code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import torch
import networkx as nx
import numpy as np
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import pickle as pkl
import scipy.sparse as sp
import torch.utils.data
import itertools
from collections import Counter
from random import shuffle
import json
#
from networkx.readwrite import json_graph
from argparse import ArgumentParser
import pdb
import time
import random
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
# def caveman_special(c=2,k=20,p_path=0.1,p_edge=0.3):
# p = p_path
# # path_count = max(int(np.ceil(p * k)),1)
# path_count = max(int(np.ceil(p * k)),1)
# G = nx.caveman_graph(c, k)
# # remove 50% edges
# p = 1-p_edge
# for (u, v) in list(G.edges()):
# if np.random.rand() < p and ((u < k and v < k) or (u >= k and v >= k)):
# G.remove_edge(u, v)
# # add path_count links
# for i in range(path_count):
# u = np.random.randint(0, k)
# v = np.random.randint(k, k * 2)
# G.add_edge(u, v)
# G = max(nx.connected_component_subgraphs(G), key=len)
# return G
def Graph_load_batch(min_num_nodes = 20, max_num_nodes = 1000, name = 'ENZYMES',node_attributes = True,graph_labels=True):
'''
load many graphs, e.g. enzymes
:return: a list of graphs
'''
print('Loading graph dataset: '+str(name))
G = nx.Graph()
# load data
path = 'data/'+name+'/'
data_adj = np.loadtxt(path+name+'_A.txt', delimiter=',').astype(int)
if node_attributes:
data_node_att = np.loadtxt(path+name+'_node_attributes.txt', delimiter=',')
data_node_label = np.loadtxt(path+name+'_node_labels.txt', delimiter=',').astype(int)
data_graph_indicator = np.loadtxt(path+name+'_graph_indicator.txt', delimiter=',').astype(int)
if graph_labels:
data_graph_labels = np.loadtxt(path+name+'_graph_labels.txt', delimiter=',').astype(int)
data_tuple = list(map(tuple, data_adj))
# print(len(data_tuple))
# print(data_tuple[0])
# add edges
G.add_edges_from(data_tuple)
# add node attributes
for i in range(data_node_label.shape[0]):
if node_attributes:
G.add_node(i+1, feature = data_node_att[i])
G.add_node(i+1, label = data_node_label[i])
G.remove_nodes_from(list(nx.isolates(G)))
# print(G.number_of_nodes())
# print(G.number_of_edges())
# split into graphs
graph_num = data_graph_indicator.max()
node_list = np.arange(data_graph_indicator.shape[0])+1
graphs = []
max_nodes = 0
for i in range(graph_num):
# find the nodes for each graph
nodes = node_list[data_graph_indicator==i+1]
G_sub = G.subgraph(nodes)
if graph_labels:
G_sub.graph['label'] = data_graph_labels[i]
# print('nodes', G_sub.number_of_nodes())
# print('edges', G_sub.number_of_edges())
# print('label', G_sub.graph)
if G_sub.number_of_nodes()>=min_num_nodes and G_sub.number_of_nodes()<=max_num_nodes:
graphs.append(G_sub)
if G_sub.number_of_nodes() > max_nodes:
max_nodes = G_sub.number_of_nodes()
# print(G_sub.number_of_nodes(), 'i', i)
# print('Graph dataset name: {}, total graph num: {}'.format(name, len(graphs)))
# logging.warning('Graphs loaded, total num: {}'.format(len(graphs)))
print('Loaded')
return graphs, data_node_att, data_node_label
# graphs = Graph_load_batch(name='PROTEINS_full')
# pdb.set_trace()
# def caveman_special(c=2,k=20,p_path=0.01):
# G = nx.caveman_graph(c, k)
# comps = [comp for comp in nx.connected_components(G)]
#
# for edge in list(G.edges()):
# if np.random.rand()<0.5:
# G.remove_edge(edge[0],edge[1])
#
# labels = {}
# for id,comp in enumerate(comps):
#
# for node in comp:
# labels[node] = id
#
# # pdb.set_trace()
# for u in G.nodes():
# for v in G.nodes():
# if labels[u] != labels[v] and np.random.rand()<p_path:
# G.add_edge(u,v)
#
# G = max(nx.connected_component_subgraphs(G), key=len)
# print(G.number_of_nodes(), G.number_of_edges())
# return G,labels
def caveman_special(l=2,k=20,p=0.1):
G = nx.caveman_graph(l, k)
comps = [comp for comp in nx.connected_components(G)]
nodes = G.nodes()
for (u, v) in G.edges():
if random.random() < p: # rewire the edge
x = random.choice(nodes)
if G.has_edge(u, x):
continue
G.remove_edge(u, v)
G.add_edge(u, x)
labels = {}
for id,comp in enumerate(comps):
for node in comp:
labels[node] = id
G = max(nx.connected_component_subgraphs(G), key=len)
return G,labels
# caveman_special(c = 20, k = 20)
def load_graphs(dataset_str):
if dataset_str == 'grid':
graphs = []
features = []
for _ in range(1):
graph = nx.grid_2d_graph(20, 20)
# graph = nx.grid_2d_graph(100, 100)
graph = nx.convert_node_labels_to_integers(graph)
# node_order = list(range(graph.number_of_nodes()))
# shuffle(node_order)
# order_mapping = dict(zip(graph.nodes(), node_order))
# graph = nx.relabel_nodes(graph, order_mapping, copy=True)
# feature = np.ones((graph.number_of_nodes(),1))
feature = np.identity(graph.number_of_nodes())
# label = nx.adjacency_matrix(graph).toarray()
graphs.append(graph)
features.append(feature)
labels = None
elif dataset_str == 'caveman_single':
graph = nx.connected_caveman_graph(20, 20)
feature = np.ones((graph.number_of_nodes(), 1))
# feature = np.identity(graph.number_of_nodes())
# graphs = [graph for _ in range(10)]
# features = [feature for _ in range(10)]
graphs = [graph]
features = [feature]
labels = None
#
# graphs = []
# features = []
# labels = None
# for k in range(10):
# graphs.append(caveman_special(c=20, k=20, p_edge=0.2, p_path=500))
# features.append(np.ones((400, 1)))
elif dataset_str == 'caveman':
graphs = []
features = []
labels = []
# labels = None
for i in range(50):
community_size = 20
graph = nx.connected_caveman_graph(20, community_size)
# graph,labels_dict = caveman_special(20,20,0)
# node_dict = {}
# for id, node in enumerate(graph.nodes()):
# node_dict[node] = id
p=0.001
count = 0
for (u, v) in graph.edges():
if random.random() < p: # rewire the edge
x = random.choice(graph.nodes())
if graph.has_edge(u, x):
continue
graph.remove_edge(u, v)
graph.add_edge(u, x)
count += 1
print('rewire:', count)
n = graph.number_of_nodes()
feature = np.ones((n, 1))
label = np.zeros((n,n))
for u in graph.nodes():
for v in graph.nodes():
# if labels_dict[u] == labels_dict[v] and u!=v:
if u//community_size == v//community_size and u!=v:
label[u,v] = 1
# label[node_dict[u],node_dict[v]] = 1
# feature = np.identity(graph.number_of_nodes())
graphs.append(graph)
features.append(feature)
labels.append(label)
elif dataset_str == 'protein':
graphs_all, features_all, labels_all = Graph_load_batch(name='PROTEINS_full')
features_all = (features_all-np.mean(features_all,axis=-1,keepdims=True))/np.std(features_all,axis=-1,keepdims=True)
graphs = []
features = []
labels = []
for graph in graphs_all:
n = graph.number_of_nodes()
label = np.zeros((n, n))
for i,u in enumerate(graph.nodes()):
for j,v in enumerate(graph.nodes()):
if labels_all[u-1] == labels_all[v-1] and u!=v:
label[i,j] = 1
if label.sum() > n*n/2:
continue
graphs.append(graph)
labels.append(label)
idx = [node-1 for node in graph.nodes()]
feature = features_all[idx,:]
# label_dict = labels_all[graph.nodes()]
features.append(feature)
# pdb.set_trace()
print('final num', len(graphs))
elif dataset_str == 'email':
with open('data/email.txt', 'rb') as f:
graph = nx.read_edgelist(f)
label_all = np.loadtxt('data/email_labels.txt')
graph_label_all = label_all.copy()
graph_label_all[:,1] = graph_label_all[:,1]//6
for edge in graph.edges():
if graph_label_all[int(edge[0])][1] != graph_label_all[int(edge[1])][1]:
graph.remove_edge(edge[0], edge[1])
comps = [comp for comp in nx.connected_components(graph) if len(comp)>10]
graphs = [graph.subgraph(comp) for comp in comps]
labels = []
features = []
for g in graphs:
n = g.number_of_nodes()
feature = np.ones((n, 1))
features.append(feature)
label = np.zeros((n, n))
for i, u in enumerate(g.nodes()):
for j, v in enumerate(g.nodes()):
if label_all[int(u)][1] == label_all[int(v)][1]:
label[i, j] = 1
label = label - np.identity(n)
labels.append(label)
elif dataset_str == 'ppi':
dataset_dir = 'data/ppi'
print("Loading data...")
G = json_graph.node_link_graph(json.load(open(dataset_dir + "/ppi-G.json")))
labels = json.load(open(dataset_dir + "/ppi-class_map.json"))
labels = {int(i): l for i, l in labels.items()}
train_ids = [n for n in G.nodes()]
train_labels = np.array([labels[i] for i in train_ids])
if train_labels.ndim == 1:
train_labels = np.expand_dims(train_labels, 1)
print("Using only features..")
feats = np.load(dataset_dir + "/ppi-feats.npy")
## Logistic gets thrown off by big counts, so log transform num comments and score
feats[:, 0] = np.log(feats[:, 0] + 1.0)
feats[:, 1] = np.log(feats[:, 1] - min(np.min(feats[:, 1]), -1))
feat_id_map = json.load(open(dataset_dir + "/ppi-id_map.json"))
feat_id_map = {int(id): val for id, val in feat_id_map.items()}
train_feats = feats[[feat_id_map[id] for id in train_ids]]
# pdb.set_trace()
node_dict = {}
for id,node in enumerate(G.nodes()):
node_dict[node] = id
comps = [comp for comp in nx.connected_components(G) if len(comp)>10]
graphs = [G.subgraph(comp) for comp in comps]
id_all = []
for comp in comps:
id_temp = []
for node in comp:
id = node_dict[node]
id_temp.append(id)
id_all.append(np.array(id_temp))
features = [train_feats[id_temp,:]+0.1 for id_temp in id_all]
# graphs = [G.subgraph(comp) for comp in ]
# pdb.set_trace()
# real
else:
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
objects.append(pkl.load(f, encoding='latin1'))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
graph = nx.from_dict_of_lists(graph)
# keep the max connected component
nodes_id = sorted(max(nx.connected_components(graph), key=len))
graph = max(nx.connected_component_subgraphs(graph), key=len)
# adj = nx.adjacency_matrix(G)
feature = features[nodes_id, :].toarray()
# feature = np.concatenate((np.identity(graph.number_of_nodes()), feature), axis=-1)
graphs = [graph]
features = [feature]
labels = None
return graphs, features, labels
# load cora, citeseer and pubmed dataset
def load_data(dataset_str):
"""
Loads input data from gcn/data directory
ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
object;
ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
# synthetic
# todo: design node label
labels, idx_train, idx_val, idx_test = None,None,None,None
if dataset_str == 'grid':
G = nx.grid_2d_graph(20, 20)
# G = nx.grid_2d_graph(100, 100)
# features = np.ones((G.number_of_nodes(),1))
features = np.identity(G.number_of_nodes())
labels = np.zeros((G.number_of_nodes(),2))
labels[0:G.number_of_nodes()//2,0] = 1
labels[G.number_of_nodes()//2:,1] = 1
idx = np.random.permutation(G.number_of_nodes())
idx_train = idx[0:G.number_of_nodes()//2]
idx_val = idx[G.number_of_nodes()//2:]
elif dataset_str == 'caveman':
G = nx.connected_caveman_graph(20,20)
features = np.identity(G.number_of_nodes())
# features = np.ones((G.number_of_nodes(),1))
elif dataset_str == 'barabasi':
G = nx.barabasi_albert_graph(1000, 2)
features = np.identity(G.number_of_nodes())
# features = np.ones((G.number_of_nodes(), 1))
# real
else:
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
objects.append(pkl.load(f, encoding='latin1'))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
G = nx.from_dict_of_lists(graph)
# print(all(G.nodes()[i] <= G.nodes()[i + 1] for i in range(len(G.nodes()) - 1))) # check if sorted
# keep the max connected component
nodes_id = sorted(max(nx.connected_components(G), key=len))
G = max(nx.connected_component_subgraphs(G), key=len)
# adj = nx.adjacency_matrix(G)
features = features[nodes_id,:]
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
labels = labels[nodes_id,:]
# idx_test = test_idx_range.tolist()
# idx_train = range(len(y))
# idx_val = range(len(y), len(y) + 500)
idx_train = range(500)
idx_val = range(500, 1000)
idx_test = range(G.number_of_nodes()-1000,G.number_of_nodes())
return G, features, labels, idx_train, idx_val, idx_test
#
# train_mask = sample_mask(idx_train, labels.shape[0])
# val_mask = sample_mask(idx_val, labels.shape[0])
# test_mask = sample_mask(idx_test, labels.shape[0])
#
# y_train = np.zeros(labels.shape)
# y_val = np.zeros(labels.shape)
# y_test = np.zeros(labels.shape)
# y_train[train_mask, :] = labels[train_mask, :]
# y_val[val_mask, :] = labels[val_mask, :]
# y_test[test_mask, :] = labels[test_mask, :]
#
# return G, adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def get_random_subset(G, p=0.5, return_id = True):
'''
get a random subset of nodes
:param G: input graph
:param p: prob of including a node
:return: a list of nodes, will not be empty
'''
nodes = G.nodes()
while True:
rand_values = np.random.rand(len(nodes))
if np.any(np.less(rand_values,p)):
break
if return_id:
nodes_return = [id for id,node in enumerate(nodes) if rand_values[id]<p]
else:
nodes_return = [node for id,node in enumerate(nodes) if rand_values[id]<p]
return nodes_return
def get_random_subsets(G, c=1):
'''
get c*log^(n) random subsets of nodes
:param G: input graph
:param c: repeat same Sij for c*log(n) times
:return: list of list of nodes, length fixed
'''
random_subsets = []
for i in range(int(np.log2(G.number_of_nodes()))):
p = 1/np.exp2(i+1)
for j in range(int(np.log2(G.number_of_nodes())*c)):
subset = get_random_subset(G,p)
random_subsets.append(subset)
return random_subsets
def get_shortest_dist(shortest_dist, random_subsets):
'''
get the dist from a node to random subsets
:param shortest_dist:
:param node_id:
:param random_subsets:
:return: 2-d array, dist
TODO: may consider different output format
'''
node_dist = np.zeros((1,len(random_subsets)))
node_id = np.zeros((1,len(random_subsets)))
for i, random_subset in enumerate(random_subsets):
dist_min = 1e6 # todo: other aggregation possible: min, mean, sum, etc.
node_min = 0
for node in random_subset:
dist = shortest_dist[node]
if dist<dist_min:
dist_min = dist
node_min = node
node_dist[0, i] = dist_min
node_id[0, i] = node_min
return node_dist, node_id
def get_shortest_dists(shortest_dists, random_subsets, nodes):
'''
get dist for all nodes
:param shortest_dists:
:param random_subsets:
:param nodes: from G.nodes(), used to make sure node order is correct
:return: subset_dists n*m, subset_ids n*m
'''
subset_dists = np.zeros((len(shortest_dists),len(random_subsets)))
subset_ids = np.zeros((len(shortest_dists),len(random_subsets))).astype(int)
for i,node_id in enumerate(nodes):
shortest_dist = shortest_dists[node_id]
node_dist, node_id_new = get_shortest_dist(shortest_dist,random_subsets)
subset_dists[i] = node_dist
subset_ids[i] = node_id_new
return subset_dists, subset_ids
def get_feature(subset_ids, node_feature):
'''
match node ids for each subset with the corresponding features
:param subset_ids: n*m
:param node_feature: n*d
:return: subset_features n*m*d
'''
# subset_features = np.zeros((subset_ids.shape[0],subset_ids.shape[1],
# node_feature.shape[1]))
# for i in range(subset_features.shape[0]):
# subset_features[i,:,:] = node_feature[subset_ids[i,:]]
subset_features = node_feature[subset_ids.flatten(),:]
subset_features = subset_features.reshape((subset_ids.shape[0],subset_ids.shape[1],
node_feature.shape[1]))
return subset_features
class graph_dataset_node_classification(torch.utils.data.Dataset):
def __init__(self, name = 'cora', permute = False):
self.G, self.node_feature, self.label, self.idx_train, self.idx_val, self.idx_test = \
load_data(name)
self.n = self.G.number_of_nodes()
self.subset_types = int(np.log2(self.G.number_of_nodes()))
self.adj = nx.adjacency_matrix(self.G).toarray() + np.identity(self.n)
try:
self.node_feature = self.node_feature.toarray()
except:
pass
self.node_feature = self.node_feature[:, np.newaxis, :]
# G = max(nx.connected_component_subgraphs(G), key=len)
self.G = nx.convert_node_labels_to_integers(self.G)
self.node_label = np.zeros(self.label.shape[0])
for i in range(self.label.shape[0]):
self.node_label[i] = np.where(self.label[i] == 1)[0][0]
self.num_class = self.label.shape[-1]
self.shortest_dists = nx.shortest_path_length(self.G)
self.permute = permute
if not permute:
self.recompute_feature()
def recompute_feature(self):
# compute dist
t1 = time.time()
self.random_subsets = get_random_subsets(self.G, c=0.5)
t2 = time.time()
self.subset_dists, self.subset_ids = get_shortest_dists(self.shortest_dists, self.random_subsets, self.G.nodes())
t3 = time.time()
self.subset_features = get_feature(self.subset_ids, self.node_feature[:,0,:]) # remove extra dim
t4 = time.time()
self.subset_dists = self.subset_dists[:, :, np.newaxis]
t5 = time.time()
print('node num:', self.G.number_of_nodes(), 'subset num:', len(self.random_subsets),
'time:', t5 - t1, t2-t1,t3-t2,t4-t3,t5-t4)
return self.subset_dists, self.subset_features
def __len__(self):
return self.subset_dists.shape[0]
def __getitem__(self, idx):
return self.node_feature[self.idx][idx], self.node_label[self.idx][idx], self.subset_dists[idx], self.subset_features[idx]
def get_fullbatch_train(self):
if self.permute:
self.recompute_feature()
return self.node_feature[self.idx_train], self.adj, self.node_label[self.idx_train], self.subset_dists[self.idx_train], self.subset_features[self.idx_train], self.subset_ids[self.idx_train]
def get_fullbatch_val(self):
if self.permute:
self.recompute_feature()
return self.node_feature[self.idx_val], self.adj, self.node_label[self.idx_val], self.subset_dists[self.idx_val], self.subset_features[self.idx_val], self.subset_ids[self.idx_val]
def get_fullbatch_test(self):
if self.permute:
self.recompute_feature()
return self.node_feature[self.idx_test], self.adj, self.node_label[self.idx_test], self.subset_dists[self.idx_test], self.subset_features[self.idx_test], self.subset_ids[self.idx_test]
def get_fullbatch(self):
if self.permute:
self.recompute_feature()
return self.node_feature, self.adj, self.node_label, self.subset_dists, self.subset_features, self.subset_ids
class graph_dataset_link_prediction(torch.utils.data.Dataset):
def __init__(self, name = 'cora', test_ratio = 0.2, permute = False, approximate=False):
self.G, self.node_feature, _, _, _, _ = load_data(name)
self.n = self.G.number_of_nodes()
self.subset_types = int(np.log2(self.G.number_of_nodes()))
self.approximate = approximate
# default value
self.subset_dists, self.subset_features = np.zeros((0)), np.zeros((0))
try:
self.node_feature = self.node_feature.toarray()
except:
pass
self.node_feature = self.node_feature[:, np.newaxis, :]
self.G = nx.convert_node_labels_to_integers(self.G)
self.split_dataset(test_ratio)
assert self.G.nodes()==self.G_train.nodes()
if approximate:
self.node_dict = {}
for i in range(self.n):
self.node_dict[self.G.nodes()[i]] = i
else:
self.shortest_dists = nx.shortest_path_length(self.G_train)
self.adj = nx.adjacency_matrix(self.G).toarray() + np.identity(self.n)
self.adj_train = nx.adjacency_matrix(self.G_train).toarray() + np.identity(self.n)
self.adj_test = self.adj - self.adj_train
# mask
num_positive_train = np.sum((self.adj_train>0).astype(int))
self.mask_train = self.adj_train + np.random.rand(self.n, self.n)
self.mask_train = (self.adj_train + (self.mask_train < num_positive_train/(self.n*self.n)).astype(int)).astype(bool).astype(int)
num_positive_test = np.sum((self.adj_test>0).astype(int))
self.mask_test = self.adj + np.random.rand(self.n, self.n)
self.mask_test = (self.adj_test + (self.mask_test < num_positive_test / (self.n * self.n)).astype(int)).astype(bool).astype(int)
self.permute = permute
if not self.permute:
self.recompute_feature()
def split_dataset(self, test_ratio=0.2):
self.G_train = self.G.copy()
for edge in self.G_train.edges():
self.G_train.remove_edge(edge[0],edge[1])
if np.random.rand() > test_ratio or not nx.is_connected(self.G_train):
self.G_train.add_edge(edge[0],edge[1])
print('Train:', 'Connected', nx.is_connected(self.G_train),
'Node', self.G_train.number_of_nodes(), 'Edge', self.G_train.number_of_edges())
print('All:', 'Connected', nx.is_connected(self.G),
'Node', self.G.number_of_nodes(), 'Edge', self.G.number_of_edges())
def mask_adj_list(self):
self.adj_list = self.G_train.adjacency_list()
self.adj_count = np.zeros((self.n, self.n))
# self.adj_count = np.zeros((len(self.random_subsets),self.n, self.n))
# aggreagated adj_count
for i,node_list in enumerate(self.adj_list):
adj_list_temp = []
for random_subset in self.random_subsets:
node_list_temp = list(set(node_list) & set(random_subset))
if len(node_list_temp)>0:
# adj_list_temp.append(node_list_temp)
adj_list_temp += node_list_temp
for node in adj_list_temp:
self.adj_count[i, self.node_dict[node]] += 1
# batch version
# for i,node_list in enumerate(self.adj_list):
# for b,random_subset in enumerate(self.random_subsets):
# node_list_temp = list(set(node_list) & set(random_subset))
# if len(node_list_temp)>0:
# for node in node_list_temp:
# self.adj_count[b, i, self.node_dict[node]] += 1
# pdb.set_trace()
def recompute_feature(self):
# compute dist
t1 = time.time()
self.random_subsets = get_random_subsets(self.G_train, c=1)
if self.approximate:
self.mask_adj_list()
else:
self.subset_dists, self.subset_ids = get_shortest_dists(self.shortest_dists, self.random_subsets, self.G_train.nodes())
self.subset_features = get_feature(self.subset_ids, self.node_feature[:,0,:]) # remove extra dim
self.subset_dists = self.subset_dists[:, :, np.newaxis]
t2 = time.time()
print('node num:', self.G_train.number_of_nodes(), 'subset num:', len(self.random_subsets),
'time:', t2 - t1)
# return self.subset_dists, self.subset_features
def __len__(self):
return self.G_train.number_of_nodes()
def __getitem__(self, idx): # todo: edit for link pred
return self.node_feature[self.idx][idx], self.subset_dists[idx], self.subset_features[idx]
def get_fullbatch_train(self):
if self.permute:
self.recompute_feature()
return (self.node_feature, self.adj_train, self.subset_dists, self.subset_features, self.mask_train)
def get_fullbatch_test(self):
if self.permute:
self.recompute_feature()
return (self.node_feature, self.adj_train, self.subset_dists, self.subset_features, self.mask_test, self.adj_test)
def preprocess(A):
# Get size of the adjacency matrix
size = len(A)
# Get the degrees for each node
degrees = []
for node_adjaceny in A:
num = 0
for node in node_adjaceny:
if node == 1.0:
num = num + 1
# Add an extra for the "self loop"
num = num + 1
degrees.append(num)
# Create diagonal matrix D from the degrees of the nodes
D = np.diag(degrees)
# Cholesky decomposition of D
D = np.linalg.cholesky(D)
# Inverse of the Cholesky decomposition of D
D = np.linalg.inv(D)
# Create an identity matrix of size x size
I = np.eye(size)
# Turn adjacency matrix into a numpy matrix
A = np.matrix(A)
# Create A hat
A_hat = A + I
# Return A_hat
return D @ A @ D
# return A_hat, D
class graphs_dataset_loader():
def __init__(self, name = 'grid', remove_link_ratio = 0.1, graph_test_ratio = 0.2,
permute = True, approximate=-1, normalize_adj = False):
# args
self.name = name
self.remove_link_ratio = remove_link_ratio
self.graph_test_ratio = graph_test_ratio
self.permute = permute
self.approximate = approximate
self.normalize_adj = normalize_adj
# 1 load data
# list of networkx graphs; list of n*m arrays; list of n*n arrays/None(when link prediction)
self.graphs, self.graphs_feature, self.graphs_label = load_graphs(self.name)
# 2 (Link predition only) randomly remove edges for graphs, get different labels
if self.remove_link_ratio>0:
self.graphs, self.graphs_label_train, self.graphs_label_test = self.remove_link_graphs()
else:
self.graphs_label_train, self.graphs_label_test = self.graphs_label, self.graphs_label
# 3 get adj
self.graphs_adj = [nx.adjacency_matrix(graph).toarray() for graph in self.graphs]
if self.normalize_adj:
self.graphs_adj = [preprocess(adj) for adj in self.graphs_adj]
# 4 precompute dists for all node pairs for all graphs
self.graphs_dist = self.precompute_dist()
# 5 set up mask for train and test
self.graphs_mask_train, self.graphs_mask_test = self.set_masks()
# 6 set up data index
if len(self.graphs)>1:
self.ids = np.random.permutation(len(self.graphs))
self.ids_test = self.ids[:int(len(self.graphs) * self.graph_test_ratio)]
self.ids_train = self.ids[int(len(self.graphs) * self.graph_test_ratio):]
else: # transductive
self.ids_test = np.array([0])
self.ids_train = np.array([0])
self.counter_train = 0
self.counter_test = 0
self.done_train = False
self.done_test = False
print(name, len(self.graphs))
def set_masks(self):
# for link prediction, two masks are different
# for community detection, two masks are the same
# Note: diag of adj should be 0!!!
if self.remove_link_ratio > 0:
graphs_mask_train = []
graphs_mask_test = []
for i in range(len(self.graphs)):
adj = self.graphs_label_test[i]
adj_train = self.graphs_label_train[i]
adj_test = adj - adj_train
n = adj_train.shape[0]
num_positive_train = np.sum((adj_train > 0).astype(int))
mask_train = adj_train + np.identity(n) + np.random.rand(n, n)
mask_train = (adj_train + (mask_train < num_positive_train / (n * n)).astype(int)).astype(bool).astype(int)
num_positive_test = np.sum((adj_test > 0).astype(int))
mask_test = adj + np.identity(n) + np.random.rand(n, n)
mask_test = (adj_test + (mask_test < num_positive_test / (n * n)).astype(int)).astype(bool).astype(int)
graphs_mask_train.append(mask_train)
graphs_mask_test.append(mask_test)
else:
graphs_mask_train = []
for i in range(len(self.graphs)):
adj = self.graphs_label_train[i]
n = adj.shape[0]
num_positive_train = np.sum((adj > 0).astype(int))
mask_train = adj + np.identity(n) + np.random.rand(n, n)
mask_train = (adj + (mask_train < num_positive_train / (n * n)).astype(int)).astype(bool).astype(int)
graphs_mask_train.append(mask_train)
graphs_mask_test = graphs_mask_train
return graphs_mask_train, graphs_mask_test
def get_batch_train(self):
# reset epoch token
if self.done_train:
self.done_train = False
id = self.ids_train[self.counter_train]
self.counter_train += 1
# check epoch ends
if self.counter_train == len(self.ids_train):
self.counter_train = 0
self.done_train = True
np.random.shuffle(self.ids_train)
# re-sample random subsets
self.random_subsets = get_random_subsets(self.graphs[id], c=1)
self.dist_max, self.dist_argmax = self.get_shortest_dists(self.graphs_dist[id], self.random_subsets)
return (self.graphs_adj[id], self.graphs_feature[id], self.graphs_dist[id], self.graphs_label_train[id], self.graphs_mask_train[id])
def get_batch_test(self):
# reset epoch token
if self.done_test:
self.done_test = False
id = self.ids_test[self.counter_test]
self.counter_test += 1
# check epoch ends
if self.counter_test == len(self.ids_test):
self.counter_test = 0
self.done_test = True
np.random.shuffle(self.ids_test)
# re-sample random subsets
self.random_subsets = get_random_subsets(self.graphs[id], c=1)
self.dist_max, self.dist_argmax = self.get_shortest_dists(self.graphs_dist[id], self.random_subsets)
return (self.graphs_adj[id], self.graphs_feature[id], self.graphs_dist[id], self.graphs_label_test[id],
self.graphs_mask_test[id])
def precompute_dist(self):
'''
Here dist is 1/real_dist, higher actually means closer, 0 means disconnected
:return:
'''
graphs_dist = []
for graph in self.graphs:
if self.approximate>0:
# dists_array = np.maximum(nx.adjacency_matrix(graph).toarray()*0.5 + np.identity(graph.number_of_nodes()), 0.1)
# dists_array = nx.adjacency_matrix(graph).toarray()*0.5 + np.identity(graph.number_of_nodes())
dists_array = np.zeros((graph.number_of_nodes(), graph.number_of_nodes()))
# todo: consider disconnected graph
dists_dict = nx.all_pairs_shortest_path_length(graph,cutoff=self.approximate)
for i, node_i in enumerate(graph.nodes()):
shortest_dist = dists_dict[node_i]
for j, node_j in enumerate(graph.nodes()):
dist = shortest_dist.get(node_j, -1)
if dist!=-1:
dists_array[i, j] = 1 / (dist + 1)
else:
dists_array = np.zeros((graph.number_of_nodes(), graph.number_of_nodes()))
# todo: consider disconnected graph
dists_dict = nx.all_pairs_shortest_path_length(graph)
for i, node_i in enumerate(graph.nodes()):
shortest_dist = dists_dict[node_i]
for j, node_j in enumerate(graph.nodes()):
dist = shortest_dist.get(node_j, -1)
if dist != -1:
dists_array[i, j] = 1 / (dist + 1)
graphs_dist.append(dists_array)
return graphs_dist
def get_shortest_dists(self, graph_dist, random_subsets):
dist_max = np.zeros((graph_dist.shape[0], len(random_subsets)))
dist_argmax = np.zeros((graph_dist.shape[0], len(random_subsets)))
for id,random_subset in enumerate(random_subsets):
graph_dist_temp = graph_dist[:, random_subset]
dist_max[:,id] = np.amax(graph_dist_temp, axis=-1)
dist_argmax[:,id] = np.argmax(graph_dist_temp, axis=-1)
return dist_max, dist_argmax
def get_ordered_neighbours(self):
pass
def remove_link_graph(self, graph):
graph_removed = graph.copy()
for edge in graph_removed.edges():
if np.random.rand() < self.remove_link_ratio:
graph_removed.remove_edge(edge[0], edge[1])
if self.name != 'ppi':
if not nx.is_connected(graph_removed):
graph_removed.add_edge(edge[0], edge[1])
print('Before:', 'Connected', nx.is_connected(graph),
'Node', graph.number_of_nodes(), 'Edge', graph.number_of_edges())
print('After:', 'Connected', nx.is_connected(graph_removed),
'Node', graph_removed.number_of_nodes(), 'Edge', graph_removed.number_of_edges())
return graph_removed
def remove_link_graphs(self):
graphs_removed = []
graphs_label_train = []
graphs_label_test = []
for graph in self.graphs:
graph_removed = self.remove_link_graph(graph)
graphs_removed.append(graph_removed)
graphs_label_train.append(nx.adjacency_matrix(graph_removed).toarray())
graphs_label_test.append(nx.adjacency_matrix(graph).toarray())
return graphs_removed, graphs_label_train, graphs_label_test
def read_graphs():
pass
# for explainer project
class graphs_dataset_loader_simple():
def __init__(self, name='grid', remove_link_ratio=0.1, graph_test_ratio=0.2,
permute=True, approximate=-1, normalize_adj=False):
# args
self.name = name
self.remove_link_ratio = remove_link_ratio
self.graph_test_ratio = graph_test_ratio
self.permute = permute
self.approximate = approximate
self.normalize_adj = normalize_adj
# 1 load data
# list of networkx graphs; list of n*m arrays; list of n*n arrays/None(when link prediction)
self.graphs, self.graphs_feature, self.graphs_label = load_graphs(self.name)
# 3 get adj
self.graphs_adj = [nx.adjacency_matrix(graph).toarray() for graph in self.graphs]
if self.normalize_adj:
self.graphs_adj = [preprocess(adj) for adj in self.graphs_adj]
# 6 set up data index
self.counter_train = 0
self.done_train = False
print(name, len(self.graphs))
def get_batch_train(self):
# reset epoch token
if self.done_train:
self.done_train = False
id = self.counter_train
self.counter_train += 1
# check epoch ends
if self.counter_train == len(self.graphs):
self.counter_train = 0
self.done_train = True
return (self.graphs_adj[id], self.graphs_feature[id])
# dataset = graphs_dataset_loader_simple()
# dataset.get_batch_train()
#
# t1 = time.time()
# dataset = graphs_dataset_loader(approximate=-1, name='ppi')
#
# for i in range(10):
# t2 = time.time()
# batch_train = dataset.get_batch_train()
# t3 = time.time()
# print(t3-t2)
# t2 = time.time()
# print(t2-t1)
# batch_test = dataset.get_batch_test()
# pdb.set_trace()
# dataset = graph_dataset_link_prediction(name='grid')
# 0113 archive
# class graph_dataset_link_prediction(torch.utils.data.Dataset):
# def __init__(self, name = 'cora', test_ratio = 0.2, permute = False, approximate=False):
# self.G, self.node_feature, _, _, _, _ = load_data(name)
# self.n = self.G.number_of_nodes()
# self.subset_types = int(np.log2(self.G.number_of_nodes()))
#
# try:
# self.node_feature = self.node_feature.toarray()
# except:
# pass
# self.node_feature = self.node_feature[:, np.newaxis, :]
#
# # G = max(nx.connected_component_subgraphs(G), key=len)
#
# self.G = nx.convert_node_labels_to_integers(self.G)
#
# self.node_dict = {}
# for i in range(self.n):
# self.node_dict[self.G.nodes()[i]] = i
#
#
#
# self.split_dataset(test_ratio)
# assert self.G.nodes()==self.G_train.nodes()
#
# self.shortest_dists = nx.shortest_path_length(self.G_train)
#
# # self.G_raw = self.G.copy()
# # self.G_train_raw = self.G_train.copy()
#
# # self.G = nx.convert_node_labels_to_integers(self.G)
# # self.G_train = nx.convert_node_labels_to_integers(self.G_train)
#
# self.adj = nx.adjacency_matrix(self.G).toarray() + np.identity(self.n)
# self.adj_train = nx.adjacency_matrix(self.G_train).toarray() + np.identity(self.n)
# self.adj_test = self.adj - self.adj_train
#
# # mask
# num_positive_train = np.sum((self.adj_train>0).astype(int))
# self.mask_train = self.adj_train + np.random.rand(self.n, self.n)
# self.mask_train = (self.adj_train + (self.mask_train < num_positive_train/(self.n*self.n)).astype(int)).astype(bool).astype(int)
# num_positive_test = np.sum((self.adj_test>0).astype(int))
# self.mask_test = self.adj + np.random.rand(self.n, self.n)
# self.mask_test = (self.adj_test + (self.mask_test < num_positive_test / (self.n * self.n)).astype(int)).astype(bool).astype(int)
#
# self.permute = permute
# if not self.permute:
# self.recompute_feature()
#
# def split_dataset(self, test_ratio=0.2):
# self.G_train = self.G.copy()
# for edge in self.G_train.edges():
# self.G_train.remove_edge(edge[0],edge[1])
# if np.random.rand() > test_ratio or not nx.is_connected(self.G_train):
# self.G_train.add_edge(edge[0],edge[1])
# print('Train:', 'Connected', nx.is_connected(self.G_train),
# 'Node', self.G_train.number_of_nodes(), 'Edge', self.G_train.number_of_edges())
# print('All:', 'Connected', nx.is_connected(self.G),
# 'Node', self.G.number_of_nodes(), 'Edge', self.G.number_of_edges())
#
# # def recompute_feature(self, G):
# # # compute dist
# # t1 = time.time()
# # # random_subsets = get_random_subsets(G, c=0.5)
# # random_subsets = get_random_subsets(G, c=1)
# # shortest_dists = nx.shortest_path_length(G)
# # subset_dists, subset_ids = get_shortest_dists(shortest_dists, random_subsets, G.nodes())
# # subset_features = get_feature(subset_ids, self.node_feature[:,0,:]) # remove extra dim
# #
# # subset_dists = subset_dists[:, :, np.newaxis]
# #
# # t2 = time.time()
# # print('node num:', self.G.number_of_nodes(), 'subset num:', len(random_subsets),
# # 'time:', t2 - t1)
# # return subset_dists, subset_features
#
# def mask_adj_list(self):
# self.adj_list = self.G_train.adjacency_list()
# self.adj_count = np.zeros((self.n, self.n))
# # self.adj_count = np.zeros((len(self.random_subsets),self.n, self.n))
#
# # aggreagated adj_count
# for i,node_list in enumerate(self.adj_list):
# adj_list_temp = []
# for random_subset in self.random_subsets:
# node_list_temp = list(set(node_list) & set(random_subset))
# if len(node_list_temp)>0:
# # adj_list_temp.append(node_list_temp)
# adj_list_temp += node_list_temp
# for node in adj_list_temp:
# self.adj_count[i, self.node_dict[node]] += 1
#
#
# # for i,node_list in enumerate(self.adj_list):
# # for b,random_subset in enumerate(self.random_subsets):
# # node_list_temp = list(set(node_list) & set(random_subset))
# # if len(node_list_temp)>0:
# # for node in node_list_temp:
# # self.adj_count[b, i, self.node_dict[node]] += 1
#
# # pdb.set_trace()
#
#
#
# def recompute_feature(self):
# # compute dist
# t1 = time.time()
# self.random_subsets = get_random_subsets(self.G_train, c=1)
# t2 = time.time()
# self.subset_dists, self.subset_ids = get_shortest_dists(self.shortest_dists, self.random_subsets, self.G_train.nodes())
# t3 = time.time()
# self.subset_features = get_feature(self.subset_ids, self.node_feature[:,0,:]) # remove extra dim
# t4 = time.time()
# self.subset_dists = self.subset_dists[:, :, np.newaxis]
#
# t5 = time.time()
# print('node num:', self.G_train.number_of_nodes(), 'subset num:', len(self.random_subsets),
# 'time:', t5 - t1, t2-t1,t3-t2,t4-t3,t5-t4)
#
# self.mask_adj_list()
# return self.subset_dists, self.subset_features
#
# def __len__(self):
# return self.G_train.number_of_nodes()
#
# def __getitem__(self, idx): # todo: edit for link pred
# return self.node_feature[self.idx][idx], self.subset_dists[idx], self.subset_features[idx]
#
# def get_fullbatch_train(self):
# if self.permute:
# self.recompute_feature()
# return (self.node_feature, self.adj_train, self.subset_dists, self.subset_features, self.mask_train)
#
# def get_fullbatch_test(self):
# if self.permute:
# self.recompute_feature()
# return (self.node_feature, self.adj_train, self.subset_dists, self.subset_features, self.mask_test, self.adj_test)
| [
"networkx.barabasi_albert_graph",
"networkx.connected_component_subgraphs",
"numpy.random.rand",
"numpy.exp2",
"numpy.log",
"numpy.array",
"networkx.grid_2d_graph",
"numpy.arange",
"networkx.from_dict_of_lists",
"numpy.mean",
"numpy.less",
"numpy.where",
"networkx.is_connected",
"numpy.sor... | [((602, 613), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (610, 613), True, 'import numpy as np\n'), ((643, 672), 'numpy.array', 'np.array', (['mask'], {'dtype': 'np.bool'}), '(mask, dtype=np.bool)\n', (651, 672), True, 'import numpy as np\n'), ((1587, 1597), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1595, 1597), True, 'import networkx as nx\n'), ((4472, 4494), 'networkx.caveman_graph', 'nx.caveman_graph', (['l', 'k'], {}), '(l, k)\n', (4488, 4494), True, 'import networkx as nx\n'), ((30311, 30327), 'numpy.diag', 'np.diag', (['degrees'], {}), '(degrees)\n', (30318, 30327), True, 'import numpy as np\n'), ((30370, 30391), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['D'], {}), '(D)\n', (30388, 30391), True, 'import numpy as np\n'), ((30449, 30465), 'numpy.linalg.inv', 'np.linalg.inv', (['D'], {}), '(D)\n', (30462, 30465), True, 'import numpy as np\n'), ((30521, 30533), 'numpy.eye', 'np.eye', (['size'], {}), '(size)\n', (30527, 30533), True, 'import numpy as np\n'), ((30590, 30602), 'numpy.matrix', 'np.matrix', (['A'], {}), '(A)\n', (30599, 30602), True, 'import numpy as np\n'), ((1763, 1826), 'numpy.loadtxt', 'np.loadtxt', (["(path + name + '_node_attributes.txt')"], {'delimiter': '""","""'}), "(path + name + '_node_attributes.txt', delimiter=',')\n", (1773, 1826), True, 'import numpy as np\n'), ((2687, 2727), 'numpy.arange', 'np.arange', (['data_graph_indicator.shape[0]'], {}), '(data_graph_indicator.shape[0])\n', (2696, 2727), True, 'import numpy as np\n'), ((4934, 4969), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['G'], {}), '(G)\n', (4966, 4969), True, 'import networkx as nx\n'), ((14861, 14885), 'networkx.grid_2d_graph', 'nx.grid_2d_graph', (['(20)', '(20)'], {}), '(20, 20)\n', (14877, 14885), True, 'import networkx as nx\n'), ((22322, 22364), 'networkx.convert_node_labels_to_integers', 'nx.convert_node_labels_to_integers', (['self.G'], {}), '(self.G)\n', (22356, 22364), True, 'import networkx as nx\n'), ((22392, 22421), 'numpy.zeros', 'np.zeros', (['self.label.shape[0]'], {}), '(self.label.shape[0])\n', (22400, 22421), True, 'import numpy as np\n'), ((22612, 22643), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['self.G'], {}), '(self.G)\n', (22635, 22643), True, 'import networkx as nx\n'), ((22807, 22818), 'time.time', 'time.time', ([], {}), '()\n', (22816, 22818), False, 'import time\n'), ((22896, 22907), 'time.time', 'time.time', ([], {}), '()\n', (22905, 22907), False, 'import time\n'), ((23043, 23054), 'time.time', 'time.time', ([], {}), '()\n', (23052, 23054), False, 'import time\n'), ((23173, 23184), 'time.time', 'time.time', ([], {}), '()\n', (23182, 23184), False, 'import time\n'), ((23263, 23274), 'time.time', 'time.time', ([], {}), '()\n', (23272, 23274), False, 'import time\n'), ((25459, 25501), 'networkx.convert_node_labels_to_integers', 'nx.convert_node_labels_to_integers', (['self.G'], {}), '(self.G)\n', (25493, 25501), True, 'import networkx as nx\n'), ((27442, 27468), 'numpy.zeros', 'np.zeros', (['(self.n, self.n)'], {}), '((self.n, self.n))\n', (27450, 27468), True, 'import numpy as np\n'), ((28540, 28551), 'time.time', 'time.time', ([], {}), '()\n', (28549, 28551), False, 'import time\n'), ((29020, 29031), 'time.time', 'time.time', ([], {}), '()\n', (29029, 29031), False, 'import time\n'), ((1657, 1706), 'numpy.loadtxt', 'np.loadtxt', (["(path + name + '_A.txt')"], {'delimiter': '""","""'}), "(path + name + '_A.txt', delimiter=',')\n", (1667, 1706), True, 'import numpy as np\n'), ((1845, 1904), 'numpy.loadtxt', 'np.loadtxt', (["(path + name + '_node_labels.txt')"], {'delimiter': '""","""'}), "(path + name + '_node_labels.txt', delimiter=',')\n", (1855, 1904), True, 'import numpy as np\n'), ((1940, 2003), 'numpy.loadtxt', 'np.loadtxt', (["(path + name + '_graph_indicator.txt')"], {'delimiter': '""","""'}), "(path + name + '_graph_indicator.txt', delimiter=',')\n", (1950, 2003), True, 'import numpy as np\n'), ((2519, 2533), 'networkx.isolates', 'nx.isolates', (['G'], {}), '(G)\n', (2530, 2533), True, 'import networkx as nx\n'), ((4525, 4551), 'networkx.connected_components', 'nx.connected_components', (['G'], {}), '(G)\n', (4548, 4551), True, 'import networkx as nx\n'), ((4615, 4630), 'random.random', 'random.random', ([], {}), '()\n', (4628, 4630), False, 'import random\n'), ((4671, 4691), 'random.choice', 'random.choice', (['nodes'], {}), '(nodes)\n', (4684, 4691), False, 'import random\n'), ((5184, 5208), 'networkx.grid_2d_graph', 'nx.grid_2d_graph', (['(20)', '(20)'], {}), '(20, 20)\n', (5200, 5208), True, 'import networkx as nx\n'), ((5279, 5320), 'networkx.convert_node_labels_to_integers', 'nx.convert_node_labels_to_integers', (['graph'], {}), '(graph)\n', (5313, 5320), True, 'import networkx as nx\n'), ((5891, 5925), 'networkx.connected_caveman_graph', 'nx.connected_caveman_graph', (['(20)', '(20)'], {}), '(20, 20)\n', (5917, 5925), True, 'import networkx as nx\n'), ((15378, 15412), 'networkx.connected_caveman_graph', 'nx.connected_caveman_graph', (['(20)', '(20)'], {}), '(20, 20)\n', (15404, 15412), True, 'import networkx as nx\n'), ((18683, 18706), 'numpy.less', 'np.less', (['rand_values', 'p'], {}), '(rand_values, p)\n', (18690, 18706), True, 'import numpy as np\n'), ((19250, 19264), 'numpy.exp2', 'np.exp2', (['(i + 1)'], {}), '(i + 1)\n', (19257, 19264), True, 'import numpy as np\n'), ((22050, 22069), 'numpy.identity', 'np.identity', (['self.n'], {}), '(self.n)\n', (22061, 22069), True, 'import numpy as np\n'), ((25241, 25252), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (25249, 25252), True, 'import numpy as np\n'), ((25256, 25267), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (25264, 25267), True, 'import numpy as np\n'), ((25789, 25826), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['self.G_train'], {}), '(self.G_train)\n', (25812, 25826), True, 'import networkx as nx\n'), ((25887, 25906), 'numpy.identity', 'np.identity', (['self.n'], {}), '(self.n)\n', (25898, 25906), True, 'import numpy as np\n'), ((25978, 25997), 'numpy.identity', 'np.identity', (['self.n'], {}), '(self.n)\n', (25989, 25997), True, 'import numpy as np\n'), ((26175, 26205), 'numpy.random.rand', 'np.random.rand', (['self.n', 'self.n'], {}), '(self.n, self.n)\n', (26189, 26205), True, 'import numpy as np\n'), ((26445, 26475), 'numpy.random.rand', 'np.random.rand', (['self.n', 'self.n'], {}), '(self.n, self.n)\n', (26459, 26475), True, 'import numpy as np\n'), ((27065, 27094), 'networkx.is_connected', 'nx.is_connected', (['self.G_train'], {}), '(self.G_train)\n', (27080, 27094), True, 'import networkx as nx\n'), ((27225, 27248), 'networkx.is_connected', 'nx.is_connected', (['self.G'], {}), '(self.G)\n', (27240, 27248), True, 'import networkx as nx\n'), ((32501, 32514), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (32509, 32514), True, 'import numpy as np\n'), ((32544, 32557), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (32552, 32557), True, 'import numpy as np\n'), ((34863, 34896), 'numpy.random.shuffle', 'np.random.shuffle', (['self.ids_train'], {}), '(self.ids_train)\n', (34880, 34896), True, 'import numpy as np\n'), ((35614, 35646), 'numpy.random.shuffle', 'np.random.shuffle', (['self.ids_test'], {}), '(self.ids_test)\n', (35631, 35646), True, 'import numpy as np\n'), ((38090, 38123), 'numpy.amax', 'np.amax', (['graph_dist_temp'], {'axis': '(-1)'}), '(graph_dist_temp, axis=-1)\n', (38097, 38123), True, 'import numpy as np\n'), ((38156, 38191), 'numpy.argmax', 'np.argmax', (['graph_dist_temp'], {'axis': '(-1)'}), '(graph_dist_temp, axis=-1)\n', (38165, 38191), True, 'import numpy as np\n'), ((38726, 38748), 'networkx.is_connected', 'nx.is_connected', (['graph'], {}), '(graph)\n', (38741, 38748), True, 'import networkx as nx\n'), ((38867, 38897), 'networkx.is_connected', 'nx.is_connected', (['graph_removed'], {}), '(graph_removed)\n', (38882, 38897), True, 'import networkx as nx\n'), ((2061, 2121), 'numpy.loadtxt', 'np.loadtxt', (["(path + name + '_graph_labels.txt')"], {'delimiter': '""","""'}), "(path + name + '_graph_labels.txt', delimiter=',')\n", (2071, 2121), True, 'import numpy as np\n'), ((15567, 15600), 'networkx.barabasi_albert_graph', 'nx.barabasi_albert_graph', (['(1000)', '(2)'], {}), '(1000, 2)\n', (15591, 15600), True, 'import networkx as nx\n'), ((16170, 16195), 'numpy.sort', 'np.sort', (['test_idx_reorder'], {}), '(test_idx_reorder)\n', (16177, 16195), True, 'import numpy as np\n'), ((16963, 16991), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['graph'], {}), '(graph)\n', (16984, 16991), True, 'import networkx as nx\n'), ((17371, 17392), 'numpy.vstack', 'np.vstack', (['(ally, ty)'], {}), '((ally, ty))\n', (17380, 17392), True, 'import numpy as np\n'), ((36686, 36751), 'networkx.all_pairs_shortest_path_length', 'nx.all_pairs_shortest_path_length', (['graph'], {'cutoff': 'self.approximate'}), '(graph, cutoff=self.approximate)\n', (36719, 36751), True, 'import networkx as nx\n'), ((37280, 37320), 'networkx.all_pairs_shortest_path_length', 'nx.all_pairs_shortest_path_length', (['graph'], {}), '(graph)\n', (37313, 37320), True, 'import networkx as nx\n'), ((38422, 38438), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (38436, 38438), True, 'import numpy as np\n'), ((6654, 6700), 'networkx.connected_caveman_graph', 'nx.connected_caveman_graph', (['(20)', 'community_size'], {}), '(20, community_size)\n', (6680, 6700), True, 'import networkx as nx\n'), ((7375, 7390), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (7382, 7390), True, 'import numpy as np\n'), ((7411, 7427), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (7419, 7427), True, 'import numpy as np\n'), ((17227, 17262), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['G'], {}), '(G)\n', (17259, 17262), True, 'import networkx as nx\n'), ((22010, 22037), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['self.G'], {}), '(self.G)\n', (22029, 22037), True, 'import networkx as nx\n'), ((22500, 22528), 'numpy.where', 'np.where', (['(self.label[i] == 1)'], {}), '(self.label[i] == 1)\n', (22508, 22528), True, 'import numpy as np\n'), ((25847, 25874), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['self.G'], {}), '(self.G)\n', (25866, 25874), True, 'import networkx as nx\n'), ((25932, 25965), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['self.G_train'], {}), '(self.G_train)\n', (25951, 25965), True, 'import networkx as nx\n'), ((26905, 26921), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (26919, 26921), True, 'import numpy as np\n'), ((26942, 26971), 'networkx.is_connected', 'nx.is_connected', (['self.G_train'], {}), '(self.G_train)\n', (26957, 26971), True, 'import networkx as nx\n'), ((31748, 31774), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['graph'], {}), '(graph)\n', (31767, 31774), True, 'import networkx as nx\n'), ((33384, 33404), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (33398, 33404), True, 'import numpy as np\n'), ((33652, 33672), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (33666, 33672), True, 'import numpy as np\n'), ((34196, 34216), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (34210, 34216), True, 'import numpy as np\n'), ((40285, 40311), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['graph'], {}), '(graph)\n', (40304, 40311), True, 'import networkx as nx\n'), ((8115, 8159), 'numpy.std', 'np.std', (['features_all'], {'axis': '(-1)', 'keepdims': '(True)'}), '(features_all, axis=-1, keepdims=True)\n', (8121, 8159), True, 'import numpy as np\n'), ((8313, 8329), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (8321, 8329), True, 'import numpy as np\n'), ((9072, 9107), 'numpy.loadtxt', 'np.loadtxt', (['"""data/email_labels.txt"""'], {}), "('data/email_labels.txt')\n", (9082, 9107), True, 'import numpy as np\n'), ((16853, 16874), 'scipy.sparse.vstack', 'sp.vstack', (['(allx, tx)'], {}), '((allx, tx))\n', (16862, 16874), True, 'import scipy.sparse as sp\n'), ((17173, 17199), 'networkx.connected_components', 'nx.connected_components', (['G'], {}), '(G)\n', (17196, 17199), True, 'import networkx as nx\n'), ((33367, 33381), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (33378, 33381), True, 'import numpy as np\n'), ((33635, 33649), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (33646, 33649), True, 'import numpy as np\n'), ((34179, 34193), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (34190, 34193), True, 'import numpy as np\n'), ((38591, 38621), 'networkx.is_connected', 'nx.is_connected', (['graph_removed'], {}), '(graph_removed)\n', (38606, 38621), True, 'import networkx as nx\n'), ((39330, 39364), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['graph_removed'], {}), '(graph_removed)\n', (39349, 39364), True, 'import networkx as nx\n'), ((39413, 39439), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['graph'], {}), '(graph)\n', (39432, 39439), True, 'import networkx as nx\n'), ((6987, 7002), 'random.random', 'random.random', ([], {}), '()\n', (7000, 7002), False, 'import random\n'), ((8070, 8115), 'numpy.mean', 'np.mean', (['features_all'], {'axis': '(-1)', 'keepdims': '(True)'}), '(features_all, axis=-1, keepdims=True)\n', (8077, 8115), True, 'import numpy as np\n'), ((9029, 9048), 'networkx.read_edgelist', 'nx.read_edgelist', (['f'], {}), '(f)\n', (9045, 9048), True, 'import networkx as nx\n'), ((9648, 9663), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (9655, 9663), True, 'import numpy as np\n'), ((9722, 9738), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (9730, 9738), True, 'import numpy as np\n'), ((10400, 10440), 'numpy.array', 'np.array', (['[labels[i] for i in train_ids]'], {}), '([labels[i] for i in train_ids])\n', (10408, 10440), True, 'import numpy as np\n'), ((10591, 10630), 'numpy.load', 'np.load', (["(dataset_dir + '/ppi-feats.npy')"], {}), "(dataset_dir + '/ppi-feats.npy')\n", (10598, 10630), True, 'import numpy as np\n'), ((10744, 10769), 'numpy.log', 'np.log', (['(feats[:, 0] + 1.0)'], {}), '(feats[:, 0] + 1.0)\n', (10750, 10769), True, 'import numpy as np\n'), ((12152, 12177), 'numpy.sort', 'np.sort', (['test_idx_reorder'], {}), '(test_idx_reorder)\n', (12159, 12177), True, 'import numpy as np\n'), ((12955, 12983), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['graph'], {}), '(graph)\n', (12976, 12983), True, 'import networkx as nx\n'), ((15965, 15995), 'pickle.load', 'pkl.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (15973, 15995), True, 'import pickle as pkl\n'), ((9415, 9445), 'networkx.connected_components', 'nx.connected_components', (['graph'], {}), '(graph)\n', (9438, 9445), True, 'import networkx as nx\n'), ((9972, 9986), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (9983, 9986), True, 'import numpy as np\n'), ((10503, 10534), 'numpy.expand_dims', 'np.expand_dims', (['train_labels', '(1)'], {}), '(train_labels, 1)\n', (10517, 10534), True, 'import numpy as np\n'), ((13119, 13158), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['graph'], {}), '(graph)\n', (13151, 13158), True, 'import networkx as nx\n'), ((11218, 11244), 'networkx.connected_components', 'nx.connected_components', (['G'], {}), '(G)\n', (11241, 11244), True, 'import networkx as nx\n'), ((11517, 11534), 'numpy.array', 'np.array', (['id_temp'], {}), '(id_temp)\n', (11525, 11534), True, 'import numpy as np\n'), ((12841, 12862), 'scipy.sparse.vstack', 'sp.vstack', (['(allx, tx)'], {}), '((allx, tx))\n', (12850, 12862), True, 'import scipy.sparse as sp\n'), ((13057, 13087), 'networkx.connected_components', 'nx.connected_components', (['graph'], {}), '(graph)\n', (13080, 13087), True, 'import networkx as nx\n'), ((10817, 10836), 'numpy.min', 'np.min', (['feats[:, 1]'], {}), '(feats[:, 1])\n', (10823, 10836), True, 'import numpy as np\n'), ((11947, 11977), 'pickle.load', 'pkl.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (11955, 11977), True, 'import pickle as pkl\n')] |
from numpy import cumprod, array
def readf(filename, items):
"""
Read IDL arrays from a file
filename - path of the file
items - iterable of (func, shape) where func is applied to each split string of the file,
then made into an array
e.g.
snaps, vels = readf('sfr.dat', [(int, 131), (float, (131,3))])
"""
fl = open(filename, 'r')
data = ' '.join(fl.readlines()).split()
fl.close()
i0 = 0
for dfunc, shape in items:
nvals = cumprod(shape)[-1]
arr = array([dfunc(x) for x in data[i0:i0+nvals]]).reshape(shape)
yield arr
i0 += nvals
return
| [
"numpy.cumprod"
] | [((511, 525), 'numpy.cumprod', 'cumprod', (['shape'], {}), '(shape)\n', (518, 525), False, 'from numpy import cumprod, array\n')] |
import torch
import numpy as np
import matplotlib.pyplot as plt
def convert_tensor_to_RGB(network_output):
x = torch.FloatTensor([[.0, .0, .0], [1.0, .0, .0], [.0, .0, 1.0], [.0, 1.0, .0]])
converted_tensor = torch.nn.functional.embedding(network_output, x).permute(2,0,1)
return converted_tensor
def dice_scores(segmentation, ground_truth, classes):
dice_scores = []
for i in range(1,classes+1):
binary_gt = (ground_truth == i).astype(np.uint8)
binary_seg = (segmentation == i).astype(np.uint8)
intersect = np.logical_and(binary_gt, binary_seg)
sum_binary_gt = np.sum(binary_gt)
sum_binary_seg = np.sum(binary_seg)
if sum_binary_gt == 0:
continue
class_dice_score = np.sum(intersect)*2 / (sum_binary_gt+sum_binary_seg)
dice_scores.append(class_dice_score)
dice_scores = np.array(dice_scores)
return dice_scores
def image_stats(img):
data_type = type(img[0][0])
img_width = np.shape(img)[0]
img_height = np.shape(img)[1]
max_pix = np.max(img)
min_pix = np.min(img)
img_mean = np.mean(img)
img_std = np.std(img)
print(f'Type: {data_type}, Width: {img_width}, Height: {img_height}, Max: {max_pix}, Min: {min_pix}, Mean: {img_mean}, Std: {img_std}')
def tensor_stats(tensor_in):
tensor = tensor_in.clone()
tensor = tensor.double()
shape = tensor.shape
tensor_max = torch.max(tensor)
tensor_min = torch.min(tensor)
tensor_mean = torch.mean(tensor)
tensor_std = torch.std(tensor)
print(f"Tensor stats: Shape: {shape} Max: {tensor_max}, Min: {tensor_min}, Mean: {tensor_mean}, Std: {tensor_std}")
def get_mask_from_tensor(tensor, index, mask_index):
tensor_cp = tensor.clone().cpu()
tensor_masks = tensor_cp[index]
tensor_mask = tensor_masks[mask_index]
np_mask = tensor_mask.numpy()
print("np mask in get mask")
image_stats(np_mask)
return np_mask
def dice_loss(logits, target):
input = torch.functional.F.softmax(logits, 1)
smooth = 1.
input = input[:,1,:,:]
#print(input.shape)
#print(target.shape)
iflat = torch.reshape(input, (-1,))
tflat = target.view(-1)
intersection = (iflat * tflat).sum()
return 1 - ((2. * intersection + smooth) /
(iflat.sum() + tflat.sum() + smooth))
def weighted_combined_loss(loss_fn1, loss_fn2, weight=0.5):
def combined_loss(pred, Y):
return weight*loss_fn1(pred,Y) + (1-weight)*loss_fn2(pred,Y)
return combined_loss
def mean_dice_score(pred_batch, Y_batch, classes):
assert(pred_batch.size(0) == Y_batch.size(0))
cumulative_scores = np.zeros(classes)
for b_idx in range(pred_batch.size(0)):
mask = predb_to_mask(pred_batch, b_idx).numpy()
gt_tensor = Y_batch[b_idx].clone()
gt = gt_tensor.cpu().numpy()
batch_dice_score = dice_scores(mask, gt, classes)
cumulative_scores += batch_dice_score
avg_dice_scores = cumulative_scores / pred_batch.size(0)
avg_dice_score = np.average(avg_dice_scores)
return avg_dice_score, avg_dice_scores
def mean_pixel_accuracy(pred_batch, Y_batch):
return (pred_batch.argmax(dim=1) == Y_batch.cuda()).float().mean()
def batch_to_img(xb, idx):
img = np.array(xb[idx,0:3])
return img.transpose((1,2,0))
def predb_to_mask(pred_batch, idx):
p = torch.functional.F.softmax(pred_batch[idx], 0)
return p.argmax(0).cpu()
| [
"numpy.mean",
"numpy.logical_and",
"numpy.average",
"torch.mean",
"numpy.std",
"torch.max",
"numpy.max",
"torch.min",
"numpy.array",
"torch.functional.F.softmax",
"torch.reshape",
"numpy.zeros",
"numpy.sum",
"torch.nn.functional.embedding",
"numpy.min",
"numpy.shape",
"torch.std",
... | [((117, 208), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0,\n 1.0, 0.0]])\n', (134, 208), False, 'import torch\n'), ((875, 896), 'numpy.array', 'np.array', (['dice_scores'], {}), '(dice_scores)\n', (883, 896), True, 'import numpy as np\n'), ((1062, 1073), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1068, 1073), True, 'import numpy as np\n'), ((1088, 1099), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (1094, 1099), True, 'import numpy as np\n'), ((1115, 1127), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (1122, 1127), True, 'import numpy as np\n'), ((1142, 1153), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (1148, 1153), True, 'import numpy as np\n'), ((1426, 1443), 'torch.max', 'torch.max', (['tensor'], {}), '(tensor)\n', (1435, 1443), False, 'import torch\n'), ((1461, 1478), 'torch.min', 'torch.min', (['tensor'], {}), '(tensor)\n', (1470, 1478), False, 'import torch\n'), ((1497, 1515), 'torch.mean', 'torch.mean', (['tensor'], {}), '(tensor)\n', (1507, 1515), False, 'import torch\n'), ((1533, 1550), 'torch.std', 'torch.std', (['tensor'], {}), '(tensor)\n', (1542, 1550), False, 'import torch\n'), ((1998, 2035), 'torch.functional.F.softmax', 'torch.functional.F.softmax', (['logits', '(1)'], {}), '(logits, 1)\n', (2024, 2035), False, 'import torch\n'), ((2147, 2174), 'torch.reshape', 'torch.reshape', (['input', '(-1,)'], {}), '(input, (-1,))\n', (2160, 2174), False, 'import torch\n'), ((2659, 2676), 'numpy.zeros', 'np.zeros', (['classes'], {}), '(classes)\n', (2667, 2676), True, 'import numpy as np\n'), ((3046, 3073), 'numpy.average', 'np.average', (['avg_dice_scores'], {}), '(avg_dice_scores)\n', (3056, 3073), True, 'import numpy as np\n'), ((3273, 3295), 'numpy.array', 'np.array', (['xb[idx, 0:3]'], {}), '(xb[idx, 0:3])\n', (3281, 3295), True, 'import numpy as np\n'), ((3374, 3420), 'torch.functional.F.softmax', 'torch.functional.F.softmax', (['pred_batch[idx]', '(0)'], {}), '(pred_batch[idx], 0)\n', (3400, 3420), False, 'import torch\n'), ((556, 593), 'numpy.logical_and', 'np.logical_and', (['binary_gt', 'binary_seg'], {}), '(binary_gt, binary_seg)\n', (570, 593), True, 'import numpy as np\n'), ((618, 635), 'numpy.sum', 'np.sum', (['binary_gt'], {}), '(binary_gt)\n', (624, 635), True, 'import numpy as np\n'), ((661, 679), 'numpy.sum', 'np.sum', (['binary_seg'], {}), '(binary_seg)\n', (667, 679), True, 'import numpy as np\n'), ((997, 1010), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1005, 1010), True, 'import numpy as np\n'), ((1031, 1044), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1039, 1044), True, 'import numpy as np\n'), ((219, 267), 'torch.nn.functional.embedding', 'torch.nn.functional.embedding', (['network_output', 'x'], {}), '(network_output, x)\n', (248, 267), False, 'import torch\n'), ((759, 776), 'numpy.sum', 'np.sum', (['intersect'], {}), '(intersect)\n', (765, 776), True, 'import numpy as np\n')] |
import numpy as np
import networkx as nx
import torch as th
with open('1997.txt', 'r') as f:
l = [[float(num) for num in line.split(' ')[:-1]] for line in f]
mat=np.matrix(l)
mat.resize((15, 15))
#print(mat.shape)
G=nx.from_numpy_matrix(mat, create_using=nx.DiGraph)
| [
"networkx.from_numpy_matrix",
"numpy.matrix"
] | [((167, 179), 'numpy.matrix', 'np.matrix', (['l'], {}), '(l)\n', (176, 179), True, 'import numpy as np\n'), ((221, 271), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['mat'], {'create_using': 'nx.DiGraph'}), '(mat, create_using=nx.DiGraph)\n', (241, 271), True, 'import networkx as nx\n')] |
"""
"""
import copy
import random, sys, time
import torch
import numpy as np
from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, \
mul, add
rand = random_state(random.randrange(sys.maxsize))
digits = 10e8
b = 10
class PrivateKey(object):
def __init__(self, p, q, n):
if p == q:
self.l = p * (p - 1)
else:
self.l = (p - 1) * (q - 1)
try:
self.m = invert(self.l, n)
except ZeroDivisionError as e:
print(e)
exit()
class PublicKey(object):
def __init__(self, n):
self.n = n
self.n_sq = n * n
self.g = n + 1
self.bits = mpz(rint_round(log2(self.n)))
def generate_prime(bits):
"""Will generate an integer of b bits that is prime using the gmpy2 library """
while True:
possible = mpz(2) ** (bits - 1) + mpz_urandomb(rand, bits - 1)
if is_prime(possible):
return possible
def generate_keypair(bits):
""" Will generate a pair of paillier keys bits>5"""
p = generate_prime(bits // 2)
# print(p)
q = generate_prime(bits // 2)
# print(q)
n = p * q
return PrivateKey(p, q, n), PublicKey(n)
def enc(pub, plain): # (public key, plaintext)
r = mpz_urandomb(random_state(random.randrange(sys.maxsize)), pub.bits)
while (gcd(r, pub.n) != 1):
r = mpz_urandomb(random_state(random.randrange(sys.maxsize)), pub.bits)
return f_mod(mul(powmod(pub.g, plain, pub.n_sq), powmod(r, pub.n, pub.n_sq)), pub.n_sq)
# return cipher
def dec(priv, pub, cipher): # (private key, public key, cipher)
x = powmod(cipher, priv.l, pub.n_sq)
L = f_div(sub(x, 1), pub.n)
return f_mod(mul(L, priv.m), pub.n)
# return plain
def enc_add(pub, m1, m2):
"""Add one encrypted integer to another"""
return f_mod(mul(enc(pub, m1), enc(pub, m2)), pub.n_sq)
def enc_add_const(pub, m, c): # to do
"""Add constant n to an encrypted integer"""
return f_mod(mul(powmod(pub.g, c, pub.n_sq), f_mod(enc(pub, m), pub.n_sq)), pub.n_sq)
def enc_mul_const(pub, m, c): # to do
"""Multiplies an encrypted integer by a constant"""
return powmod(enc(pub, m), c, pub.n_sq)
def enc_tensor(pub, list, size):
if len(size) == 1:
for i in range(size[0]):
list[i] = enc(pub, int((list[i] + b) * digits))
elif len(size) == 2:
for i in range(size[0]):
for j in range(size[1]):
list[i][j] = enc(pub, int((list[i][j] + b) * digits))
elif len(size) == 3:
for i in range(size[0]):
for j in range(size[1]):
for k in range(size[2]):
list[i][j][k] = enc(pub, int((list[i][j][k] + b) * digits))
elif len(size) == 4:
for i in range(size[0]):
for j in range(size[1]):
for k in range(size[2]):
for l in range(size[3]):
list[i][j][k][l] = enc(pub, int((list[i][j][k][l] + b) * digits))
return list
def dec_tensor(priv, pub, list, size):
if len(size) == 1:
for i in range(size[0]):
list[i] = dec(priv, pub, list[i]) / digits - b
elif len(size) == 2:
for i in range(size[0]):
for j in range(size[1]):
list[i][j] = dec(priv, pub, list[i][j]) / digits - b
elif len(size) == 3:
for i in range(size[0]):
for j in range(size[1]):
for k in range(size[2]):
list[i][j][k] = dec(priv, pub, list[i][j][k]) / digits - b
elif len(size) == 4:
for i in range(size[0]):
for j in range(size[1]):
for k in range(size[2]):
for l in range(size[3]):
list[i][j][k][l] = dec(priv, pub, list[i][j][k][l]) / digits - b
return list
if __name__ == '__main__':
priv, pub = generate_keypair(1024)
"""
test
"""
m1 = mpz(1111111111111)
m2 = mpz(2222222222222)
c1 = enc(pub, m1)
c2 = enc(pub, m2)
dec1 = dec(priv, pub, c1)
dec2 = dec(priv, pub, c2)
print("Cipher1: {}".format(enc(pub, m1)))
print("Dec1: {}".format(dec(priv, pub, c1)))
print("Cipher2: {}".format(enc(pub, m2)))
print("Dec2: {}".format(dec(priv, pub, c2)))
print("Add: {}".format(dec(priv, pub, enc_add(pub, m1, m1))))
print("Add Constant: {}".format(dec(priv, pub, enc_add_const(pub, m1, m1))))
print("Mul Constant: {}".format(dec(priv, pub, enc_mul_const(pub, m1, mpz(1000000000)))))
priv, pub = generate_keypair(1024)
test_number = 100
test_length = [10, 100, 500, 1000]
tests = []
tests_add = []
all_tests_passed = True
for j in range(len(test_length)):
tests.append([])
tests_add.append([])
for i in range(test_number):
tests[j].append(mpz_urandomb(random_state(random.randrange(sys.maxsize)), test_length[j]))
tests_add[j].append(mpz_urandomb(random_state(random.randrange(sys.maxsize)), test_length[j]))
# print(tests)
test_enc_time = []
test_dec_time = []
for j in range(len(test_length)):
test_dec_time.append(0)
test_enc_time.append(0)
for i in range(len(tests[j])):
start = time.time()
enc(pub, tests[j][i])
end = time.time()
test_enc_time[j] += end - start
start = time.time()
dec(priv, pub, tests[j][i])
end = time.time()
test_dec_time[j] += end - start
if tests[j][i] + tests_add[j][i] != dec(priv, pub, enc_add(pub, tests[j][i], tests_add[j][i])):
all_tests_passed = False
test_enc_time[j] /= test_number
test_dec_time[j] /= test_number
print('Average Encrypt time in {} times for {} bits number: {}'.format(test_number, test_length[j],
test_enc_time[j]))
print('Average Decrypt time in {} times for {} bits number: {}'.format(test_number, test_length[j],
test_dec_time[j]))
if all_tests_passed:
print("number tests passed!")
arr1 = np.arange(10 * 1 * 5 * 5).reshape(10, 1, 5, 5)
arr1 = torch.from_numpy(arr1)
size = arr1.size()
list = arr1.numpy().tolist()
start = time.time()
enc_arr = enc_tensor(pub, list, size)
end = time.time()
enc_tensor_time = end - start
start = time.time()
dec_arr = dec_tensor(priv, pub, list, size)
end = time.time()
dec_tensor_time = end - start
new_arr = torch.Tensor(dec_arr)
print('Encrypt time for tensor in shape: {} is: {}'.format(arr1.size(), enc_tensor_time))
print('Decrypt time for tensor in shape: {} is: {}'.format(arr1.size(), dec_tensor_time))
if dec_arr != list:
all_tests_passed = False
if all_tests_passed:
print("Tensor tests passed!")
"""
update_w_avg.keys():dict_keys(['conv1.weight', 'conv1.bias', 'conv2.weight', 'conv2.bias', 'fc1.weight', 'fc1.bias', 'fc2.weight', 'fc2.bias'])
update_w_avg[k] shape:torch.Size([10, 1, 5, 5])
update_w_avg[k] shape:torch.Size([10])
update_w_avg[k] shape:torch.Size([20, 10, 5, 5])
update_w_avg[k] shape:torch.Size([20])
update_w_avg[k] shape:torch.Size([50, 320])
update_w_avg[k] shape:torch.Size([50])
update_w_avg[k] shape:torch.Size([10, 50])
update_w_avg[k] shape:torch.Size([10])
"""
| [
"random.randrange",
"gmpy2.gcd",
"torch.Tensor",
"gmpy2.powmod",
"torch.from_numpy",
"gmpy2.sub",
"gmpy2.log2",
"gmpy2.mpz_urandomb",
"gmpy2.invert",
"gmpy2.mul",
"gmpy2.is_prime",
"time.time",
"gmpy2.mpz",
"numpy.arange"
] | [((241, 270), 'random.randrange', 'random.randrange', (['sys.maxsize'], {}), '(sys.maxsize)\n', (257, 270), False, 'import random, sys, time\n'), ((1744, 1776), 'gmpy2.powmod', 'powmod', (['cipher', 'priv.l', 'pub.n_sq'], {}), '(cipher, priv.l, pub.n_sq)\n', (1750, 1776), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((4154, 4172), 'gmpy2.mpz', 'mpz', (['(1111111111111)'], {}), '(1111111111111)\n', (4157, 4172), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((4183, 4201), 'gmpy2.mpz', 'mpz', (['(2222222222222)'], {}), '(2222222222222)\n', (4186, 4201), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((6559, 6581), 'torch.from_numpy', 'torch.from_numpy', (['arr1'], {}), '(arr1)\n', (6575, 6581), False, 'import torch\n'), ((6653, 6664), 'time.time', 'time.time', ([], {}), '()\n', (6662, 6664), False, 'import random, sys, time\n'), ((6719, 6730), 'time.time', 'time.time', ([], {}), '()\n', (6728, 6730), False, 'import random, sys, time\n'), ((6779, 6790), 'time.time', 'time.time', ([], {}), '()\n', (6788, 6790), False, 'import random, sys, time\n'), ((6851, 6862), 'time.time', 'time.time', ([], {}), '()\n', (6860, 6862), False, 'import random, sys, time\n'), ((6913, 6934), 'torch.Tensor', 'torch.Tensor', (['dec_arr'], {}), '(dec_arr)\n', (6925, 6934), False, 'import torch\n'), ((1003, 1021), 'gmpy2.is_prime', 'is_prime', (['possible'], {}), '(possible)\n', (1011, 1021), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((1447, 1460), 'gmpy2.gcd', 'gcd', (['r', 'pub.n'], {}), '(r, pub.n)\n', (1450, 1460), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((1792, 1801), 'gmpy2.sub', 'sub', (['x', '(1)'], {}), '(x, 1)\n', (1795, 1801), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((1828, 1842), 'gmpy2.mul', 'mul', (['L', 'priv.m'], {}), '(L, priv.m)\n', (1831, 1842), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((505, 522), 'gmpy2.invert', 'invert', (['self.l', 'n'], {}), '(self.l, n)\n', (511, 522), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((962, 990), 'gmpy2.mpz_urandomb', 'mpz_urandomb', (['rand', '(bits - 1)'], {}), '(rand, bits - 1)\n', (974, 990), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((1393, 1422), 'random.randrange', 'random.randrange', (['sys.maxsize'], {}), '(sys.maxsize)\n', (1409, 1422), False, 'import random, sys, time\n'), ((1573, 1603), 'gmpy2.powmod', 'powmod', (['pub.g', 'plain', 'pub.n_sq'], {}), '(pub.g, plain, pub.n_sq)\n', (1579, 1603), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((1605, 1631), 'gmpy2.powmod', 'powmod', (['r', 'pub.n', 'pub.n_sq'], {}), '(r, pub.n, pub.n_sq)\n', (1611, 1631), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((2127, 2153), 'gmpy2.powmod', 'powmod', (['pub.g', 'c', 'pub.n_sq'], {}), '(pub.g, c, pub.n_sq)\n', (2133, 2153), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((5497, 5508), 'time.time', 'time.time', ([], {}), '()\n', (5506, 5508), False, 'import random, sys, time\n'), ((5563, 5574), 'time.time', 'time.time', ([], {}), '()\n', (5572, 5574), False, 'import random, sys, time\n'), ((5641, 5652), 'time.time', 'time.time', ([], {}), '()\n', (5650, 5652), False, 'import random, sys, time\n'), ((5713, 5724), 'time.time', 'time.time', ([], {}), '()\n', (5722, 5724), False, 'import random, sys, time\n'), ((6498, 6523), 'numpy.arange', 'np.arange', (['(10 * 1 * 5 * 5)'], {}), '(10 * 1 * 5 * 5)\n', (6507, 6523), True, 'import numpy as np\n'), ((770, 782), 'gmpy2.log2', 'log2', (['self.n'], {}), '(self.n)\n', (774, 782), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((939, 945), 'gmpy2.mpz', 'mpz', (['(2)'], {}), '(2)\n', (942, 945), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((1507, 1536), 'random.randrange', 'random.randrange', (['sys.maxsize'], {}), '(sys.maxsize)\n', (1523, 1536), False, 'import random, sys, time\n'), ((4728, 4743), 'gmpy2.mpz', 'mpz', (['(1000000000)'], {}), '(1000000000)\n', (4731, 4743), False, 'from gmpy2 import mpz, powmod, invert, is_prime, random_state, mpz_urandomb, rint_round, log2, gcd, f_mod, f_div, sub, mul, add\n'), ((5106, 5135), 'random.randrange', 'random.randrange', (['sys.maxsize'], {}), '(sys.maxsize)\n', (5122, 5135), False, 'import random, sys, time\n'), ((5214, 5243), 'random.randrange', 'random.randrange', (['sys.maxsize'], {}), '(sys.maxsize)\n', (5230, 5243), False, 'import random, sys, time\n')] |
import numpy as np
import vrep
import buffer
class Script:
def __init__(self, my_robot):
self.robot = my_robot
self.buffer = buffer.ReplayMemory(100)
self.client_id = self.robot.client_id
self.states = []
self.object_position = None
self.euler_angles2 = None
self.pickup_position = None
self.pickup_orientation = None
self.image = None
global COUNTER
COUNTER = 0
global COUNTER1
COUNTER1 = 0
def success_rate(self, label):
global COUNTER
global COUNTER1
COUNTER += 1
if label:
COUNTER1 += 1
if COUNTER % 100 == 0:
print('success ratio is: ', (float(COUNTER1)/float(COUNTER)))
def get_sawyer_position(self):
_, sawyer_target_position = vrep.simxGetObjectPosition(self.robot.client_id, \
self.robot.sawyer_target_handle, -1, vrep.simx_opmode_blocking)
return sawyer_target_position
def get_sawyer_orientation(self):
_, sawyer_target_orientation = vrep.simxGetObjectOrientation(self.robot.client_id, \
self.robot.sawyer_target_handle, -1, vrep.simx_opmode_blocking)
return sawyer_target_orientation
def get_object_position(self):
_, object_position = vrep.simxGetObjectPosition(self.robot.client_id, \
self.robot.object_handle[0], -1, vrep.simx_opmode_blocking)
return object_position
def get_object_orientation(self):
_, object_orientation = vrep.simxGetObjectOrientation(self.robot.client_id, \
self.robot.object_handle[0], -1, vrep.simx_opmode_blocking)
return object_orientation
def new_episode(self, label):
self.buffer.push(label, self.pickup_position[0], self.pickup_position[1], \
self.pickup_orientation[1], self.image)
self.buffer.store_at_disk()
self.success_rate(label)
def pick_position(self, exploit=True):
_, self.object_position = vrep.simxGetObjectPosition(self.client_id, \
self.robot.object_handle[0], -1, vrep.simx_opmode_blocking)
_, sawyer_target_position = vrep.simxGetObjectPosition(self.client_id, \
self.robot.sawyer_target_handle, -1, vrep.simx_opmode_blocking)
if exploit == bool(1):
self.pickup_position = np.array([self.object_position[0], self.object_position[1], \
sawyer_target_position[2]])
else:
# random = np.random.normal(0,0.05,3)
# self.pickup_position = np.array([self.object_position[0] + random[0]
# self.object_position[1] + random[1], sawyer_target_position[2]])
action_x = np.random.uniform(low=1.028, high=1.242, size=1)[0]
action_y = np.random.uniform(low=1.1, high=1.278, size=1)[0]
self.pickup_position = np.array([action_x, action_y, sawyer_target_position[2]])
def pick_orientation(self, exploit=True):
_, euler_angles = vrep.simxGetObjectOrientation(self.client_id, \
self.robot.sawyer_target_handle, -1, vrep.simx_opmode_blocking)
_, self.euler_angles2 = vrep.simxGetObjectOrientation(self.client_id, \
self.robot.object_handle[0], -1, vrep.simx_opmode_blocking)
if exploit == bool(1):
self.pickup_orientation = np.array([euler_angles[0], self.euler_angles2[1], \
euler_angles[2]])
else:
ori = np.random.uniform(0.017, 1.553, 1)[0]
self.pickup_orientation = np.array([euler_angles[0], ori, euler_angles[2]])
def set_gripper_position(self):
_, sawyer_target_position = vrep.simxGetObjectPosition(self.client_id, \
self.robot.sawyer_target_handle, -1, vrep.simx_opmode_blocking)
self.image = self.robot.get_image()
move_direction = np.asarray([self.pickup_position[0] - sawyer_target_position[0], \
self.pickup_position[1] - sawyer_target_position[1], self.pickup_position[2] - \
sawyer_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.03*move_direction/move_magnitude
num_move_steps = int(np.floor(move_magnitude/0.03))
remaining_magnitude = -num_move_steps * 0.03 + move_magnitude
remaining_distance = remaining_magnitude * move_direction/move_magnitude
for step_iter in range(num_move_steps): #selects action and executes action
vrep.simxSetObjectPosition(self.client_id, self.robot.sawyer_target_handle, -1, \
(sawyer_target_position[0] + move_step[0], sawyer_target_position[1] + \
move_step[1], sawyer_target_position[2] + move_step[2]), vrep.simx_opmode_blocking)
_, sawyer_target_position = vrep.simxGetObjectPosition(self.client_id, \
self.robot.sawyer_target_handle, -1, vrep.simx_opmode_blocking)
vrep.simxSynchronousTrigger(self.client_id)
vrep.simxGetPingTime(self.client_id)
vrep.simxSetObjectPosition(self.robot.client_id, self.robot.sawyer_target_handle, -1, \
(sawyer_target_position[0] + remaining_distance[0], sawyer_target_position[1] + \
remaining_distance[1], sawyer_target_position[2]+ remaining_distance[2]), \
vrep.simx_opmode_blocking)
vrep.simxSynchronousTrigger(self.client_id)
vrep.simxGetPingTime(self.client_id)
def set_gripper_orientation(self):
_, sawyer_orientation = vrep.simxGetObjectOrientation(self.client_id, \
self.robot.sawyer_target_handle, -1, vrep.simx_opmode_blocking)
rotation_step = 0.3 if (self.pickup_orientation[1] - sawyer_orientation[1] > 0) \
else -0.3
num_rotation_steps = int(np.floor((self.pickup_orientation[1] - \
sawyer_orientation[1])/rotation_step))
for step_iter in range(num_rotation_steps):
vrep.simxSetObjectOrientation(self.robot.client_id, \
self.robot.sawyer_target_handle, -1, (sawyer_orientation[0], \
sawyer_orientation[1] + rotation_step, sawyer_orientation[2]), \
vrep.simx_opmode_blocking)
_, sawyer_orientation = vrep.simxGetObjectOrientation(self.client_id, \
self.robot.sawyer_target_handle, -1, vrep.simx_opmode_blocking)
vrep.simxSynchronousTrigger(self.client_id)
vrep.simxGetPingTime(self.client_id)
vrep.simxSetObjectOrientation(self.robot.client_id, self.robot.sawyer_target_handle, \
-1, (sawyer_orientation[0], self.pickup_orientation[1], sawyer_orientation[2]), \
vrep.simx_opmode_blocking)
vrep.simxSynchronousTrigger(self.client_id)
vrep.simxGetPingTime(self.client_id)
def move_down(self): #3 time-steps
_, object_position = vrep.simxGetObjectPosition(self.client_id, \
self.robot.object_handle[0], -1, vrep.simx_opmode_blocking)
_, sawyer_target_position = vrep.simxGetObjectPosition(self.client_id, \
self.robot.sawyer_target_handle, -1, vrep.simx_opmode_blocking)
move_direction = np.asarray([self.pickup_position[0] - sawyer_target_position[0], \
self.pickup_position[1] - sawyer_target_position[1], object_position[2] + 0.01 \
- sawyer_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.03*move_direction/move_magnitude
num_move_steps = int(np.floor(move_magnitude/0.03))
remaining_magnitude = -num_move_steps * 0.03 + move_magnitude
remaining_distance = remaining_magnitude * move_direction/move_magnitude
for step_iter in range(num_move_steps):
vrep.simxSetObjectPosition(self.client_id, self.robot.sawyer_target_handle,\
-1, (sawyer_target_position[0] + move_step[0], sawyer_target_position[1] \
+ move_step[1], sawyer_target_position[2] + move_step[2]), \
vrep.simx_opmode_blocking)
_, sawyer_target_position = vrep.simxGetObjectPosition(self.client_id,\
self.robot.sawyer_target_handle, -1, vrep.simx_opmode_blocking)
vrep.simxSynchronousTrigger(self.client_id)
vrep.simxGetPingTime(self.client_id)
vrep.simxSetObjectPosition(self.robot.client_id, self.robot.sawyer_target_handle, \
-1, (sawyer_target_position[0] + remaining_distance[0], sawyer_target_position[1] \
+ remaining_distance[1], sawyer_target_position[2]+ remaining_distance[2]), \
vrep.simx_opmode_blocking)
vrep.simxSynchronousTrigger(self.client_id)
vrep.simxGetPingTime(self.client_id)
def open_hand(self):
self.robot.open_hand()
def close_hand(self):
self.robot.close_hand()
def lift_arm(self):
self.robot.lift_arm()
def successful_grasp(self):
label = self.robot.successful_grasp()
return label
| [
"vrep.simxSynchronousTrigger",
"vrep.simxGetPingTime",
"vrep.simxSetObjectOrientation",
"numpy.asarray",
"numpy.floor",
"vrep.simxGetObjectPosition",
"numpy.array",
"vrep.simxSetObjectPosition",
"numpy.random.uniform",
"numpy.linalg.norm",
"vrep.simxGetObjectOrientation",
"buffer.ReplayMemory"... | [((147, 171), 'buffer.ReplayMemory', 'buffer.ReplayMemory', (['(100)'], {}), '(100)\n', (166, 171), False, 'import buffer\n'), ((811, 928), 'vrep.simxGetObjectPosition', 'vrep.simxGetObjectPosition', (['self.robot.client_id', 'self.robot.sawyer_target_handle', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.robot.client_id, self.robot.\n sawyer_target_handle, -1, vrep.simx_opmode_blocking)\n', (837, 928), False, 'import vrep\n'), ((1054, 1174), 'vrep.simxGetObjectOrientation', 'vrep.simxGetObjectOrientation', (['self.robot.client_id', 'self.robot.sawyer_target_handle', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.robot.client_id, self.robot.\n sawyer_target_handle, -1, vrep.simx_opmode_blocking)\n', (1083, 1174), False, 'import vrep\n'), ((1290, 1403), 'vrep.simxGetObjectPosition', 'vrep.simxGetObjectPosition', (['self.robot.client_id', 'self.robot.object_handle[0]', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.robot.client_id, self.robot.object_handle[0\n ], -1, vrep.simx_opmode_blocking)\n', (1316, 1403), False, 'import vrep\n'), ((1515, 1631), 'vrep.simxGetObjectOrientation', 'vrep.simxGetObjectOrientation', (['self.robot.client_id', 'self.robot.object_handle[0]', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.robot.client_id, self.robot.\n object_handle[0], -1, vrep.simx_opmode_blocking)\n', (1544, 1631), False, 'import vrep\n'), ((1994, 2100), 'vrep.simxGetObjectPosition', 'vrep.simxGetObjectPosition', (['self.client_id', 'self.robot.object_handle[0]', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.client_id, self.robot.object_handle[0], -1,\n vrep.simx_opmode_blocking)\n', (2020, 2100), False, 'import vrep\n'), ((2147, 2257), 'vrep.simxGetObjectPosition', 'vrep.simxGetObjectPosition', (['self.client_id', 'self.robot.sawyer_target_handle', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.client_id, self.robot.sawyer_target_handle,\n -1, vrep.simx_opmode_blocking)\n', (2173, 2257), False, 'import vrep\n'), ((2981, 3095), 'vrep.simxGetObjectOrientation', 'vrep.simxGetObjectOrientation', (['self.client_id', 'self.robot.sawyer_target_handle', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.client_id, self.robot.\n sawyer_target_handle, -1, vrep.simx_opmode_blocking)\n', (3010, 3095), False, 'import vrep\n'), ((3137, 3247), 'vrep.simxGetObjectOrientation', 'vrep.simxGetObjectOrientation', (['self.client_id', 'self.robot.object_handle[0]', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.client_id, self.robot.object_handle[0], \n -1, vrep.simx_opmode_blocking)\n', (3166, 3247), False, 'import vrep\n'), ((3643, 3753), 'vrep.simxGetObjectPosition', 'vrep.simxGetObjectPosition', (['self.client_id', 'self.robot.sawyer_target_handle', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.client_id, self.robot.sawyer_target_handle,\n -1, vrep.simx_opmode_blocking)\n', (3669, 3753), False, 'import vrep\n'), ((3833, 4013), 'numpy.asarray', 'np.asarray', (['[self.pickup_position[0] - sawyer_target_position[0], self.pickup_position[\n 1] - sawyer_target_position[1], self.pickup_position[2] -\n sawyer_target_position[2]]'], {}), '([self.pickup_position[0] - sawyer_target_position[0], self.\n pickup_position[1] - sawyer_target_position[1], self.pickup_position[2] -\n sawyer_target_position[2]])\n', (3843, 4013), True, 'import numpy as np\n'), ((4058, 4088), 'numpy.linalg.norm', 'np.linalg.norm', (['move_direction'], {}), '(move_direction)\n', (4072, 4088), True, 'import numpy as np\n'), ((5002, 5288), 'vrep.simxSetObjectPosition', 'vrep.simxSetObjectPosition', (['self.robot.client_id', 'self.robot.sawyer_target_handle', '(-1)', '(sawyer_target_position[0] + remaining_distance[0], sawyer_target_position[\n 1] + remaining_distance[1], sawyer_target_position[2] +\n remaining_distance[2])', 'vrep.simx_opmode_blocking'], {}), '(self.robot.client_id, self.robot.\n sawyer_target_handle, -1, (sawyer_target_position[0] +\n remaining_distance[0], sawyer_target_position[1] + remaining_distance[1\n ], sawyer_target_position[2] + remaining_distance[2]), vrep.\n simx_opmode_blocking)\n', (5028, 5288), False, 'import vrep\n'), ((5307, 5350), 'vrep.simxSynchronousTrigger', 'vrep.simxSynchronousTrigger', (['self.client_id'], {}), '(self.client_id)\n', (5334, 5350), False, 'import vrep\n'), ((5359, 5395), 'vrep.simxGetPingTime', 'vrep.simxGetPingTime', (['self.client_id'], {}), '(self.client_id)\n', (5379, 5395), False, 'import vrep\n'), ((5468, 5582), 'vrep.simxGetObjectOrientation', 'vrep.simxGetObjectOrientation', (['self.client_id', 'self.robot.sawyer_target_handle', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.client_id, self.robot.\n sawyer_target_handle, -1, vrep.simx_opmode_blocking)\n', (5497, 5582), False, 'import vrep\n'), ((6429, 6630), 'vrep.simxSetObjectOrientation', 'vrep.simxSetObjectOrientation', (['self.robot.client_id', 'self.robot.sawyer_target_handle', '(-1)', '(sawyer_orientation[0], self.pickup_orientation[1], sawyer_orientation[2])', 'vrep.simx_opmode_blocking'], {}), '(self.robot.client_id, self.robot.\n sawyer_target_handle, -1, (sawyer_orientation[0], self.\n pickup_orientation[1], sawyer_orientation[2]), vrep.simx_opmode_blocking)\n', (6458, 6630), False, 'import vrep\n'), ((6657, 6700), 'vrep.simxSynchronousTrigger', 'vrep.simxSynchronousTrigger', (['self.client_id'], {}), '(self.client_id)\n', (6684, 6700), False, 'import vrep\n'), ((6709, 6745), 'vrep.simxGetPingTime', 'vrep.simxGetPingTime', (['self.client_id'], {}), '(self.client_id)\n', (6729, 6745), False, 'import vrep\n'), ((6815, 6921), 'vrep.simxGetObjectPosition', 'vrep.simxGetObjectPosition', (['self.client_id', 'self.robot.object_handle[0]', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.client_id, self.robot.object_handle[0], -1,\n vrep.simx_opmode_blocking)\n', (6841, 6921), False, 'import vrep\n'), ((6968, 7078), 'vrep.simxGetObjectPosition', 'vrep.simxGetObjectPosition', (['self.client_id', 'self.robot.sawyer_target_handle', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.client_id, self.robot.sawyer_target_handle,\n -1, vrep.simx_opmode_blocking)\n', (6994, 7078), False, 'import vrep\n'), ((7115, 7298), 'numpy.asarray', 'np.asarray', (['[self.pickup_position[0] - sawyer_target_position[0], self.pickup_position[\n 1] - sawyer_target_position[1], object_position[2] + 0.01 -\n sawyer_target_position[2]]'], {}), '([self.pickup_position[0] - sawyer_target_position[0], self.\n pickup_position[1] - sawyer_target_position[1], object_position[2] + \n 0.01 - sawyer_target_position[2]])\n', (7125, 7298), True, 'import numpy as np\n'), ((7342, 7372), 'numpy.linalg.norm', 'np.linalg.norm', (['move_direction'], {}), '(move_direction)\n', (7356, 7372), True, 'import numpy as np\n'), ((8266, 8552), 'vrep.simxSetObjectPosition', 'vrep.simxSetObjectPosition', (['self.robot.client_id', 'self.robot.sawyer_target_handle', '(-1)', '(sawyer_target_position[0] + remaining_distance[0], sawyer_target_position[\n 1] + remaining_distance[1], sawyer_target_position[2] +\n remaining_distance[2])', 'vrep.simx_opmode_blocking'], {}), '(self.robot.client_id, self.robot.\n sawyer_target_handle, -1, (sawyer_target_position[0] +\n remaining_distance[0], sawyer_target_position[1] + remaining_distance[1\n ], sawyer_target_position[2] + remaining_distance[2]), vrep.\n simx_opmode_blocking)\n', (8292, 8552), False, 'import vrep\n'), ((8583, 8626), 'vrep.simxSynchronousTrigger', 'vrep.simxSynchronousTrigger', (['self.client_id'], {}), '(self.client_id)\n', (8610, 8626), False, 'import vrep\n'), ((8635, 8671), 'vrep.simxGetPingTime', 'vrep.simxGetPingTime', (['self.client_id'], {}), '(self.client_id)\n', (8655, 8671), False, 'import vrep\n'), ((2334, 2425), 'numpy.array', 'np.array', (['[self.object_position[0], self.object_position[1], sawyer_target_position[2]]'], {}), '([self.object_position[0], self.object_position[1],\n sawyer_target_position[2]])\n', (2342, 2425), True, 'import numpy as np\n'), ((2850, 2907), 'numpy.array', 'np.array', (['[action_x, action_y, sawyer_target_position[2]]'], {}), '([action_x, action_y, sawyer_target_position[2]])\n', (2858, 2907), True, 'import numpy as np\n'), ((3326, 3393), 'numpy.array', 'np.array', (['[euler_angles[0], self.euler_angles2[1], euler_angles[2]]'], {}), '([euler_angles[0], self.euler_angles2[1], euler_angles[2]])\n', (3334, 3393), True, 'import numpy as np\n'), ((3520, 3569), 'numpy.array', 'np.array', (['[euler_angles[0], ori, euler_angles[2]]'], {}), '([euler_angles[0], ori, euler_angles[2]])\n', (3528, 3569), True, 'import numpy as np\n'), ((4173, 4204), 'numpy.floor', 'np.floor', (['(move_magnitude / 0.03)'], {}), '(move_magnitude / 0.03)\n', (4181, 4204), True, 'import numpy as np\n'), ((4452, 4700), 'vrep.simxSetObjectPosition', 'vrep.simxSetObjectPosition', (['self.client_id', 'self.robot.sawyer_target_handle', '(-1)', '(sawyer_target_position[0] + move_step[0], sawyer_target_position[1] +\n move_step[1], sawyer_target_position[2] + move_step[2])', 'vrep.simx_opmode_blocking'], {}), '(self.client_id, self.robot.sawyer_target_handle,\n -1, (sawyer_target_position[0] + move_step[0], sawyer_target_position[1\n ] + move_step[1], sawyer_target_position[2] + move_step[2]), vrep.\n simx_opmode_blocking)\n', (4478, 4700), False, 'import vrep\n'), ((4763, 4873), 'vrep.simxGetObjectPosition', 'vrep.simxGetObjectPosition', (['self.client_id', 'self.robot.sawyer_target_handle', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.client_id, self.robot.sawyer_target_handle,\n -1, vrep.simx_opmode_blocking)\n', (4789, 4873), False, 'import vrep\n'), ((4900, 4943), 'vrep.simxSynchronousTrigger', 'vrep.simxSynchronousTrigger', (['self.client_id'], {}), '(self.client_id)\n', (4927, 4943), False, 'import vrep\n'), ((4956, 4992), 'vrep.simxGetPingTime', 'vrep.simxGetPingTime', (['self.client_id'], {}), '(self.client_id)\n', (4976, 4992), False, 'import vrep\n'), ((5737, 5815), 'numpy.floor', 'np.floor', (['((self.pickup_orientation[1] - sawyer_orientation[1]) / rotation_step)'], {}), '((self.pickup_orientation[1] - sawyer_orientation[1]) / rotation_step)\n', (5745, 5815), True, 'import numpy as np\n'), ((5894, 6105), 'vrep.simxSetObjectOrientation', 'vrep.simxSetObjectOrientation', (['self.robot.client_id', 'self.robot.sawyer_target_handle', '(-1)', '(sawyer_orientation[0], sawyer_orientation[1] + rotation_step,\n sawyer_orientation[2])', 'vrep.simx_opmode_blocking'], {}), '(self.robot.client_id, self.robot.\n sawyer_target_handle, -1, (sawyer_orientation[0], sawyer_orientation[1] +\n rotation_step, sawyer_orientation[2]), vrep.simx_opmode_blocking)\n', (5923, 6105), False, 'import vrep\n'), ((6187, 6301), 'vrep.simxGetObjectOrientation', 'vrep.simxGetObjectOrientation', (['self.client_id', 'self.robot.sawyer_target_handle', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.client_id, self.robot.\n sawyer_target_handle, -1, vrep.simx_opmode_blocking)\n', (6216, 6301), False, 'import vrep\n'), ((6327, 6370), 'vrep.simxSynchronousTrigger', 'vrep.simxSynchronousTrigger', (['self.client_id'], {}), '(self.client_id)\n', (6354, 6370), False, 'import vrep\n'), ((6383, 6419), 'vrep.simxGetPingTime', 'vrep.simxGetPingTime', (['self.client_id'], {}), '(self.client_id)\n', (6403, 6419), False, 'import vrep\n'), ((7457, 7488), 'numpy.floor', 'np.floor', (['(move_magnitude / 0.03)'], {}), '(move_magnitude / 0.03)\n', (7465, 7488), True, 'import numpy as np\n'), ((7700, 7948), 'vrep.simxSetObjectPosition', 'vrep.simxSetObjectPosition', (['self.client_id', 'self.robot.sawyer_target_handle', '(-1)', '(sawyer_target_position[0] + move_step[0], sawyer_target_position[1] +\n move_step[1], sawyer_target_position[2] + move_step[2])', 'vrep.simx_opmode_blocking'], {}), '(self.client_id, self.robot.sawyer_target_handle,\n -1, (sawyer_target_position[0] + move_step[0], sawyer_target_position[1\n ] + move_step[1], sawyer_target_position[2] + move_step[2]), vrep.\n simx_opmode_blocking)\n', (7726, 7948), False, 'import vrep\n'), ((8028, 8138), 'vrep.simxGetObjectPosition', 'vrep.simxGetObjectPosition', (['self.client_id', 'self.robot.sawyer_target_handle', '(-1)', 'vrep.simx_opmode_blocking'], {}), '(self.client_id, self.robot.sawyer_target_handle,\n -1, vrep.simx_opmode_blocking)\n', (8054, 8138), False, 'import vrep\n'), ((8164, 8207), 'vrep.simxSynchronousTrigger', 'vrep.simxSynchronousTrigger', (['self.client_id'], {}), '(self.client_id)\n', (8191, 8207), False, 'import vrep\n'), ((8220, 8256), 'vrep.simxGetPingTime', 'vrep.simxGetPingTime', (['self.client_id'], {}), '(self.client_id)\n', (8240, 8256), False, 'import vrep\n'), ((2690, 2738), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1.028)', 'high': '(1.242)', 'size': '(1)'}), '(low=1.028, high=1.242, size=1)\n', (2707, 2738), True, 'import numpy as np\n'), ((2765, 2811), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1.1)', 'high': '(1.278)', 'size': '(1)'}), '(low=1.1, high=1.278, size=1)\n', (2782, 2811), True, 'import numpy as np\n'), ((3444, 3478), 'numpy.random.uniform', 'np.random.uniform', (['(0.017)', '(1.553)', '(1)'], {}), '(0.017, 1.553, 1)\n', (3461, 3478), True, 'import numpy as np\n')] |
import numpy as np
def xr_merge(ds1, ds2, on, how='left', dim1='dim_0', dim2='dim_0', fill_value=np.nan):
if how != 'left':
raise NotImplementedError
ds1 = ds1.copy()
ds1 = ds1.reset_coords().set_coords(on)
ds2 = ds2.reset_coords().set_coords(on)
ds2 = ds2.rename({dim2: dim1})
df1 = ds1.reset_coords()[on].to_dataframe()
df2 = ds2.reset_coords()[on].to_dataframe()
df1['idx1'] = np.arange(len(df1))
df2['idx2'] = np.arange(len(df2))
merge = df1.merge(df2, on=on, how=how)
assert len(merge) == ds1.dims[dim1]
idx1 = merge['idx1'].values
idx2 = merge['idx2'].values
mask = np.isfinite(idx2)
# assert mask.sum() == ds2.dims[dim1]
idx1 = idx1[mask]
idx2 = idx2[mask].astype(np.int)
for k, data_var in ds2.data_vars.items():
array = data_var.values
if isinstance(fill_value, dict):
fill = fill_value.get(k, float('nan'))
else:
fill = fill_value
assert data_var.dims[0] == dim1
shape = list(array.shape)
shape[0] = len(merge)
new_array = np.empty(shape, dtype=np.array(fill).dtype)
new_array[:] = fill
new_array[idx1] = array[idx2]
ds1[k] = data_var.dims, new_array
return ds1
| [
"numpy.array",
"numpy.isfinite"
] | [((666, 683), 'numpy.isfinite', 'np.isfinite', (['idx2'], {}), '(idx2)\n', (677, 683), True, 'import numpy as np\n'), ((1160, 1174), 'numpy.array', 'np.array', (['fill'], {}), '(fill)\n', (1168, 1174), True, 'import numpy as np\n')] |
#####################################################################################
# MIT License #
# #
# Copyright (C) 2019 <NAME> #
# #
# This file is part of VQ-VAE-Speech. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#####################################################################################
import numpy as np
from python_speech_features.base import mfcc, logfbank
from python_speech_features import delta
class SpeechFeatures(object):
default_rate = 16000
default_filters_number = 13
default_augmented = True
@staticmethod
def mfcc(signal, rate=default_rate, filters_number=default_filters_number, augmented=default_augmented):
mfcc_features = mfcc(signal, rate, numcep=filters_number)
if not augmented:
return mfcc_features
d_mfcc_features = delta(mfcc_features, 2)
a_mfcc_features = delta(d_mfcc_features, 2)
concatenated_features = np.concatenate((
mfcc_features,
d_mfcc_features,
a_mfcc_features
),
axis=1
)
return concatenated_features
@staticmethod
def logfbank(signal, rate=default_rate, filters_number=default_filters_number, augmented=default_augmented):
logfbank_features = logfbank(signal, rate, nfilt=filters_number)
if not augmented:
return logfbank_features
d_logfbank_features = delta(logfbank_features, 2)
a_logfbank_features = delta(d_logfbank_features, 2)
concatenated_features = np.concatenate((
logfbank_features,
d_logfbank_features,
a_logfbank_features
),
axis=1
)
return concatenated_features
@staticmethod
def features_from_name(name, signal, rate=default_rate, filters_number=default_filters_number, augmented=default_augmented):
return getattr(SpeechFeatures, name)(signal, rate, filters_number, augmented)
| [
"python_speech_features.base.mfcc",
"python_speech_features.delta",
"python_speech_features.base.logfbank",
"numpy.concatenate"
] | [((2554, 2595), 'python_speech_features.base.mfcc', 'mfcc', (['signal', 'rate'], {'numcep': 'filters_number'}), '(signal, rate, numcep=filters_number)\n', (2558, 2595), False, 'from python_speech_features.base import mfcc, logfbank\n'), ((2681, 2704), 'python_speech_features.delta', 'delta', (['mfcc_features', '(2)'], {}), '(mfcc_features, 2)\n', (2686, 2704), False, 'from python_speech_features import delta\n'), ((2731, 2756), 'python_speech_features.delta', 'delta', (['d_mfcc_features', '(2)'], {}), '(d_mfcc_features, 2)\n', (2736, 2756), False, 'from python_speech_features import delta\n'), ((2789, 2862), 'numpy.concatenate', 'np.concatenate', (['(mfcc_features, d_mfcc_features, a_mfcc_features)'], {'axis': '(1)'}), '((mfcc_features, d_mfcc_features, a_mfcc_features), axis=1)\n', (2803, 2862), True, 'import numpy as np\n'), ((3143, 3187), 'python_speech_features.base.logfbank', 'logfbank', (['signal', 'rate'], {'nfilt': 'filters_number'}), '(signal, rate, nfilt=filters_number)\n', (3151, 3187), False, 'from python_speech_features.base import mfcc, logfbank\n'), ((3281, 3308), 'python_speech_features.delta', 'delta', (['logfbank_features', '(2)'], {}), '(logfbank_features, 2)\n', (3286, 3308), False, 'from python_speech_features import delta\n'), ((3339, 3368), 'python_speech_features.delta', 'delta', (['d_logfbank_features', '(2)'], {}), '(d_logfbank_features, 2)\n', (3344, 3368), False, 'from python_speech_features import delta\n'), ((3401, 3491), 'numpy.concatenate', 'np.concatenate', (['(logfbank_features, d_logfbank_features, a_logfbank_features)'], {'axis': '(1)'}), '((logfbank_features, d_logfbank_features, a_logfbank_features\n ), axis=1)\n', (3415, 3491), True, 'import numpy as np\n')] |
from solvers.evolution.Chromosome_RandKey import Chromosome_RK
import numpy as np
import random, bisect
class Population(object):
def __init__(self, graph, size):
self.specimen = list()
self.graph = graph
self.edgeList = list()
for edge in graph.edgeList:
self.edgeList.append( edge.toTuple() )
self.size = size
for i in range(size):
c = Chromosome_RK(self)
self.insertSorted( c )
def printPopulation(self, onlyBest=False):
if not onlyBest:
for i,c in enumerate( self.specimen):
print("chromosome %d = (%s, %s) %d" % (i,str(np.argsort(c.nodegene)), str(c.edgegene), c.numCrossings() ) )
else:
c = self.specimen[0]
print("chromosome %d = (%s, %s) %d" % (0,str(np.argsort(c.nodegene)), str(c.edgegene), c.numCrossings() ) )
def insertSorted(self, x):
bisect.insort_left(self.specimen, x)
def selectSingleRoulette(self):
raise Exception("Not implemented")
def selectSingleTournament(self, k):
n = len(self.specimen)
bestFit = n
for i in range(k):
r = random.randint(0,n-1)
if r < bestFit:
bestFit = r
return bestFit, self.specimen[bestFit]
def getGraphSize(self):
return len(self.graph.nodes)
def getEdgeList(self):
return self.edgeList
def getPageNumber(self):
return self.graph.pageNumber | [
"solvers.evolution.Chromosome_RandKey.Chromosome_RK",
"numpy.argsort",
"bisect.insort_left",
"random.randint"
] | [((954, 990), 'bisect.insort_left', 'bisect.insort_left', (['self.specimen', 'x'], {}), '(self.specimen, x)\n', (972, 990), False, 'import random, bisect\n'), ((416, 435), 'solvers.evolution.Chromosome_RandKey.Chromosome_RK', 'Chromosome_RK', (['self'], {}), '(self)\n', (429, 435), False, 'from solvers.evolution.Chromosome_RandKey import Chromosome_RK\n'), ((1223, 1247), 'random.randint', 'random.randint', (['(0)', '(n - 1)'], {}), '(0, n - 1)\n', (1237, 1247), False, 'import random, bisect\n'), ((839, 861), 'numpy.argsort', 'np.argsort', (['c.nodegene'], {}), '(c.nodegene)\n', (849, 861), True, 'import numpy as np\n'), ((672, 694), 'numpy.argsort', 'np.argsort', (['c.nodegene'], {}), '(c.nodegene)\n', (682, 694), True, 'import numpy as np\n')] |
'''
root/problems/problem_multiGaussian.py
'''
### packages
import os
import numpy as np
import logging
### sys relative to root dir
import sys
from os.path import dirname, realpath
sys.path.append(dirname(dirname(realpath(__file__))))
### absolute imports wrt root
from problems.problem_definition import ProblemDefinition_Abstract
from codes.factory import FactoryDefinition
from data.data_tools import ezData
from codes.utilities.custom_logging import ezLogging
from codes.block_definitions.block_shapemeta import BlockShapeMeta_Gaussian
from codes.block_definitions.block_operators import BlockOperators_Gaussian
from codes.block_definitions.block_arguments import BlockArguments_Gaussian
from codes.block_definitions.block_evaluate import BlockEvaluate_Standard
from codes.block_definitions.block_mutate import BlockMutate_NoFtn
from codes.block_definitions.block_mate import BlockMate_NoMate
from codes.individual_definitions.individual_mutate import IndividualMutate_RollOnEachBlock
from codes.individual_definitions.individual_mate import IndividualMate_RollOnEachBlock
from codes.individual_definitions.individual_evaluate import IndividualEvaluate_Standard
from post_process import save_things
from post_process import plot_things
class Problem(ProblemDefinition_Abstract):
'''
Not intented to see if this does a good job at evolving but rather just a quick way to test out the different
mating, mutating, operators etc with multiple blocks.
'''
def __init__(self):
population_size = 52 #must be divisible by 4 if doing mating
number_universe = 1 #10
factory = FactoryDefinition
mpi = False
super().__init__(population_size, number_universe, factory, mpi)
block_def = self.construct_block_def(nickname = "GaussBlock",
shape_def = BlockShapeMeta_Gaussian, #maybe have x2 num of gaussians so 20
operator_def = BlockOperators_Gaussian, #only 1 operator...gauss taking in th right args
argument_def = BlockArguments_Gaussian, #0-100 floats, 0-1 floats, 0-100 ints
evaluate_def = BlockEvaluate_Standard, #ya standard eval
mutate_def = BlockMutate_NoFtn, #maybe not mutate ftn
mate_def = BlockMate_NoMate) #maybe not mate
self.construct_individual_def(block_defs = [block_def],
mutate_def = IndividualMutate_RollOnEachBlock,
mate_def = IndividualMate_RollOnEachBlock,
evaluate_def = IndividualEvaluate_Standard)
# where to put this?
self.construct_dataset()
def construct_dataset(self):
from misc import fake_mixturegauss
x, y, noisy, goal_features = fake_mixturegauss.main()
x = fake_mixturegauss.XLocations(x)
starting_sum = fake_mixturegauss.RollingSum(np.zeros(x.shape))
#self.data = data_loader.load_symbolicRegression([x, starting_sum], [y, noisy, goal_features])
self.train_data = ezData.ezData([x, starting_sum], [y, noisy, goal_features])
self.validate_data = None
def objective_functions(self, indiv):
if indiv.dead:
indiv.fitness.values = (np.inf, np.inf, np.inf)
else:
clean_y, noisy_y, goal_features = self.train_data.y
predict_y = indiv.output[0]
# how to extract the arguments to match to goal_features as well?
error = clean_y-predict_y
rms_error = np.sqrt(np.mean(np.square(error)))
max_error = np.max(np.abs(error))
# YO active nodes includes outputs and input nodes so 10 main nodes + 2 inputs + 1 output
#active_error = np.abs(10+2+1-len(indiv[0].active_nodes)) #maybe cheating by knowing the goal amount ahead of time
active_error = len(indiv[0].active_nodes)
indiv.fitness.values = (rms_error, max_error, active_error)
def check_convergence(self, universe):
GENERATION_LIMIT = 3 #1000
SCORE_MIN = 1e-1
# only going to look at the first objective value which is rmse
# CAREFUL, after we added the ids, the values are now strings not floats
min_firstobjective_index = universe.pop_fitness_scores[:,0].astype(float).argmin()
min_firstobjective = universe.pop_fitness_scores[min_firstobjective_index,:-1].astype(float)
logging.warning("Checking Convergence - generation %i, best score: %s" % (universe.generation, min_firstobjective))
if universe.generation >= GENERATION_LIMIT:
logging.warning("TERMINATING...reached generation limit.")
universe.converged = True
if min_firstobjective[0] < SCORE_MIN:
logging.warning("TERMINATING...reached minimum scores.")
universe.converged = True
def postprocess_generation(self, universe):
'''
I'd say just store an archive of scores
'''
logging.info("Post Processing Generation Run")
save_things.save_fitness_scores(universe)
ith_indiv, _ = self.get_best_indiv(universe, ith_obj=0)
best_indiv = universe.population.population[ith_indiv]
active_count = len(best_indiv[0].active_nodes) - self.indiv_def[0].input_count - self.indiv_def[0].output_count
if hasattr(self, 'roddcustom_bestindiv'):
self.roddcustom_bestindiv.append(best_indiv.id)
self.roddcustom_bestscore.append(best_indiv.fitness.values)
self.roddcustom_bestactive.append(active_count)
else:
self.roddcustom_bestindiv = [best_indiv.id]
self.roddcustom_bestscore = [best_indiv.fitness.values]
self.roddcustom_bestactive = [active_count]
fig, axes = plot_things.plot_init(nrow=2, ncol=1, figsize=(15,10), ylim=(0,self.train_data.y[0].max()*1.25)) #axes always 2dim
plot_things.plot_regression(axes[0,0], best_indiv, self)
plot_things.plot_gaussian(axes[1,0], best_indiv, self)
plot_things.plot_legend()
plot_things.plot_save(fig, name=os.path.join(universe.output_folder, "gen%04d_bestindv.jpg" % universe.generation))
def postprocess_universe(self, universe):
'''
save each individual at the end of the population
'''
logging.info("Post Processing Universe Run")
save_things.save_population(universe)
save_things.save_population_asLisp(universe, self.indiv_def)
best_ids = np.array(self.roddcustom_bestindiv)
best_scores = np.array(self.roddcustom_bestscore)
best_activecount = np.array(self.roddcustom_bestactive)
# YO active nodes includes outputs and input nodes so 10 main nodes + 2 inputs + 1 output
output_best_file = os.path.join(universe.output_folder, "custom_stats.npz")
np.savez(output_best_file, ids=best_ids,
scores=best_scores,
active_count=best_activecount,
genome_size=np.array([self.indiv_def[0].main_count]))
# i guess i want to save all the roddcustom_ attributes
# then open all the values for all the universes for each of the different runs
# and plot the different number of genomes in one color
# shoot...if doing more than one universe, need to delete these
self.roddcustom_bestindiv = []
self.roddcustom_bestscore = []
self.roddcustom_bestactive = []
def plot_custom_stats(self, folders):
import glob
import matplotlib.pyplot as plt
if (type(folders) is str) and (os.path.isdir(folders)):
'''# then assume we are looking for folders within this single folder
poss_folders = os.listdir(folders)
folders = []
for poss in poss_folders:
if os.path.isdir(poss):
folders.append(poss)'''
# now that we are using glob below, we are all good...just make this into a list
folders = [folders]
elif type(folders) is list:
# then continue as is
pass
else:
print("we don't know how to handle type %s yet" % (type(folders)))
# now try to find 'custom_stats.npz' in the folders
stats = {}
for folder in folders:
npzs = glob.glob(os.path.join(folder,"*","custom_stats.npz"), recursive=True)
for npz in npzs:
data = np.load(npz)
genome_size = data['genome_size'][0]
if genome_size not in stats:
stats[genome_size] = {'ids': [],
'scores': [],
'active_count': []}
for key in ['ids','scores','active_count']:
stats[genome_size][key].append(data[key])
# now go plot
#plt.figure(figsize=(15,10))
matplotlib_colors = ['b','g','r','c','m','y']
fig, axes = plt.subplots(2, 1, figsize=(16,8))
for ith_size, size in enumerate(stats.keys()):
for row, key in enumerate(['scores','active_count']):
datas = stats[size][key]
for data in datas:
if key == 'scores':
data = data[:,0]
axes[row].plot(data, color=matplotlib_colors[ith_size], linestyle="-", alpha=0.5)
plt.show()
import pdb; pdb.set_trace()
plt.close()
| [
"misc.fake_mixturegauss.XLocations",
"numpy.array",
"logging.info",
"misc.fake_mixturegauss.main",
"matplotlib.pyplot.close",
"os.path.isdir",
"data.data_tools.ezData.ezData",
"post_process.plot_things.plot_regression",
"post_process.save_things.save_fitness_scores",
"numpy.abs",
"logging.warnin... | [((2961, 2985), 'misc.fake_mixturegauss.main', 'fake_mixturegauss.main', ([], {}), '()\n', (2983, 2985), False, 'from misc import fake_mixturegauss\n'), ((2998, 3029), 'misc.fake_mixturegauss.XLocations', 'fake_mixturegauss.XLocations', (['x'], {}), '(x)\n', (3026, 3029), False, 'from misc import fake_mixturegauss\n'), ((3230, 3289), 'data.data_tools.ezData.ezData', 'ezData.ezData', (['[x, starting_sum]', '[y, noisy, goal_features]'], {}), '([x, starting_sum], [y, noisy, goal_features])\n', (3243, 3289), False, 'from data.data_tools import ezData\n'), ((4604, 4724), 'logging.warning', 'logging.warning', (["('Checking Convergence - generation %i, best score: %s' % (universe.\n generation, min_firstobjective))"], {}), "('Checking Convergence - generation %i, best score: %s' % (\n universe.generation, min_firstobjective))\n", (4619, 4724), False, 'import logging\n'), ((5165, 5211), 'logging.info', 'logging.info', (['"""Post Processing Generation Run"""'], {}), "('Post Processing Generation Run')\n", (5177, 5211), False, 'import logging\n'), ((5220, 5261), 'post_process.save_things.save_fitness_scores', 'save_things.save_fitness_scores', (['universe'], {}), '(universe)\n', (5251, 5261), False, 'from post_process import save_things\n'), ((6090, 6147), 'post_process.plot_things.plot_regression', 'plot_things.plot_regression', (['axes[0, 0]', 'best_indiv', 'self'], {}), '(axes[0, 0], best_indiv, self)\n', (6117, 6147), False, 'from post_process import plot_things\n'), ((6155, 6210), 'post_process.plot_things.plot_gaussian', 'plot_things.plot_gaussian', (['axes[1, 0]', 'best_indiv', 'self'], {}), '(axes[1, 0], best_indiv, self)\n', (6180, 6210), False, 'from post_process import plot_things\n'), ((6218, 6243), 'post_process.plot_things.plot_legend', 'plot_things.plot_legend', ([], {}), '()\n', (6241, 6243), False, 'from post_process import plot_things\n'), ((6506, 6550), 'logging.info', 'logging.info', (['"""Post Processing Universe Run"""'], {}), "('Post Processing Universe Run')\n", (6518, 6550), False, 'import logging\n'), ((6559, 6596), 'post_process.save_things.save_population', 'save_things.save_population', (['universe'], {}), '(universe)\n', (6586, 6596), False, 'from post_process import save_things\n'), ((6605, 6665), 'post_process.save_things.save_population_asLisp', 'save_things.save_population_asLisp', (['universe', 'self.indiv_def'], {}), '(universe, self.indiv_def)\n', (6639, 6665), False, 'from post_process import save_things\n'), ((6686, 6721), 'numpy.array', 'np.array', (['self.roddcustom_bestindiv'], {}), '(self.roddcustom_bestindiv)\n', (6694, 6721), True, 'import numpy as np\n'), ((6744, 6779), 'numpy.array', 'np.array', (['self.roddcustom_bestscore'], {}), '(self.roddcustom_bestscore)\n', (6752, 6779), True, 'import numpy as np\n'), ((6807, 6843), 'numpy.array', 'np.array', (['self.roddcustom_bestactive'], {}), '(self.roddcustom_bestactive)\n', (6815, 6843), True, 'import numpy as np\n'), ((6972, 7028), 'os.path.join', 'os.path.join', (['universe.output_folder', '"""custom_stats.npz"""'], {}), "(universe.output_folder, 'custom_stats.npz')\n", (6984, 7028), False, 'import os\n'), ((9236, 9271), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(16, 8)'}), '(2, 1, figsize=(16, 8))\n', (9248, 9271), True, 'import matplotlib.pyplot as plt\n'), ((9660, 9670), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9668, 9670), True, 'import matplotlib.pyplot as plt\n'), ((9691, 9706), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (9704, 9706), False, 'import pdb\n'), ((9715, 9726), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9724, 9726), True, 'import matplotlib.pyplot as plt\n'), ((216, 234), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (224, 234), False, 'from os.path import dirname, realpath\n'), ((3082, 3099), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (3090, 3099), True, 'import numpy as np\n'), ((4785, 4843), 'logging.warning', 'logging.warning', (['"""TERMINATING...reached generation limit."""'], {}), "('TERMINATING...reached generation limit.')\n", (4800, 4843), False, 'import logging\n'), ((4940, 4996), 'logging.warning', 'logging.warning', (['"""TERMINATING...reached minimum scores."""'], {}), "('TERMINATING...reached minimum scores.')\n", (4955, 4996), False, 'import logging\n'), ((7839, 7861), 'os.path.isdir', 'os.path.isdir', (['folders'], {}), '(folders)\n', (7852, 7861), False, 'import os\n'), ((3775, 3788), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (3781, 3788), True, 'import numpy as np\n'), ((6284, 6371), 'os.path.join', 'os.path.join', (['universe.output_folder', "('gen%04d_bestindv.jpg' % universe.generation)"], {}), "(universe.output_folder, 'gen%04d_bestindv.jpg' % universe.\n generation)\n", (6296, 6371), False, 'import os\n'), ((7246, 7286), 'numpy.array', 'np.array', (['[self.indiv_def[0].main_count]'], {}), '([self.indiv_def[0].main_count])\n', (7254, 7286), True, 'import numpy as np\n'), ((8585, 8630), 'os.path.join', 'os.path.join', (['folder', '"""*"""', '"""custom_stats.npz"""'], {}), "(folder, '*', 'custom_stats.npz')\n", (8597, 8630), False, 'import os\n'), ((8698, 8710), 'numpy.load', 'np.load', (['npz'], {}), '(npz)\n', (8705, 8710), True, 'import numpy as np\n'), ((3725, 3741), 'numpy.square', 'np.square', (['error'], {}), '(error)\n', (3734, 3741), True, 'import numpy as np\n')] |
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm, binom
from kmeans import kmeans
#########################
def adc_range(adc):
# make sure you pick the right type, zeros_like copies type.
adc_low = np.zeros_like(adc, dtype=np.float32)
adc_high = np.zeros_like(adc, dtype=np.float32)
adc_low[0] = -1e2
adc_high[-1] = 1e2
for s in range(len(adc) - 1):
adc_high[s] = (adc[s] + adc[s + 1]) / 2
adc_low[s + 1] = (adc[s] + adc[s + 1]) / 2
return adc_low, adc_high
#########################
def adc_floor(adc):
# make sure you pick the right type, zeros_like copies type.
adc_thresh = np.zeros_like(adc, dtype=np.float32)
for s in range(len(adc) - 1):
adc_thresh[s] = (adc[s] + adc[s + 1]) / 2
adc_thresh[-1] = adc[-1]
return adc_thresh
#########################
def exp_err(s, p, var, adc, rpr, row):
assert (np.all(p <= 1.))
assert (len(s) == len(p))
adc = sorted(adc)
adc = np.reshape(adc, (-1, 1))
adc_low, adc_high = adc_range(adc)
pe = norm.cdf(adc_high, s, var * np.sqrt(s) + 1e-6) - norm.cdf(adc_low, s, var * np.sqrt(s) + 1e-6)
e = s - adc
mse = np.sum(np.absolute(p * pe * e * row))
return mse
#########################
def kmeans_rpr(low, high, params, adc_count, row_count, nrow, q):
rpr_lut = np.zeros(shape=(8, 8), dtype=np.int32)
for wb in range(params['bpw']):
for xb in range(params['bpa']):
rpr_lut[xb][wb] = params['adc']
##############################################
adc_state = np.zeros(shape=(params['adc'], params['adc'], params['adc'] + 1))
adc_thresh = np.zeros(shape=(params['adc'], params['adc'], params['adc'] + 1))
weight = np.arange(params['max_rpr']+1, dtype=np.float32)
nrow_array = np.sum(row_count * weight, axis=2) / (np.sum(row_count, axis=2) + 1e-6)
nrow_array = np.mean(nrow_array, axis=0)
nrow_array = np.ceil(nrow_array)
expected_cycles = np.ceil(nrow / params['wl']) * np.ceil(nrow_array)
rpr_dist = {}
for rpr in range(low, high + 1):
counts = np.sum(adc_count, axis=(0, 1))[rpr][0:rpr+1]
values = np.array(range(rpr+1))
probs = counts / np.sum(counts)
if rpr <= params['adc']:
centroids = np.arange(0, params['adc'] + 1, step=1, dtype=np.float32)
else:
centroids = sorted(kmeans(values=values, counts=counts, n_clusters=params['adc'] + 1))
mse = exp_err(s=values, p=probs, var=params['sigma'], adc=centroids, rpr=rpr, row=expected_cycles[rpr])
rpr_dist[rpr] = (mse, centroids)
for wb in range(params['bpw']):
for xb in range(params['bpa']):
for rpr in range(low, high + 1):
scale = 2**wb * 2**xb
mse, centroids = rpr_dist[rpr]
scaled_mse = (scale / q) * 64. * mse
if (rpr == low) or (scaled_mse < params['thresh']):
rpr_lut[xb][wb] = rpr
adc_state[xb][wb] = 4 * np.array(centroids)
adc_thresh[xb][wb] = adc_floor(centroids)
if rpr == 1: adc_thresh[xb][wb][0] = 0.2
return rpr_lut, adc_state, adc_thresh
#########################
| [
"numpy.mean",
"numpy.ceil",
"numpy.reshape",
"numpy.sqrt",
"numpy.absolute",
"kmeans.kmeans",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.all",
"numpy.zeros_like",
"numpy.arange"
] | [((254, 290), 'numpy.zeros_like', 'np.zeros_like', (['adc'], {'dtype': 'np.float32'}), '(adc, dtype=np.float32)\n', (267, 290), True, 'import numpy as np\n'), ((306, 342), 'numpy.zeros_like', 'np.zeros_like', (['adc'], {'dtype': 'np.float32'}), '(adc, dtype=np.float32)\n', (319, 342), True, 'import numpy as np\n'), ((695, 731), 'numpy.zeros_like', 'np.zeros_like', (['adc'], {'dtype': 'np.float32'}), '(adc, dtype=np.float32)\n', (708, 731), True, 'import numpy as np\n'), ((957, 973), 'numpy.all', 'np.all', (['(p <= 1.0)'], {}), '(p <= 1.0)\n', (963, 973), True, 'import numpy as np\n'), ((1037, 1061), 'numpy.reshape', 'np.reshape', (['adc', '(-1, 1)'], {}), '(adc, (-1, 1))\n', (1047, 1061), True, 'import numpy as np\n'), ((1395, 1433), 'numpy.zeros', 'np.zeros', ([], {'shape': '(8, 8)', 'dtype': 'np.int32'}), '(shape=(8, 8), dtype=np.int32)\n', (1403, 1433), True, 'import numpy as np\n'), ((1623, 1688), 'numpy.zeros', 'np.zeros', ([], {'shape': "(params['adc'], params['adc'], params['adc'] + 1)"}), "(shape=(params['adc'], params['adc'], params['adc'] + 1))\n", (1631, 1688), True, 'import numpy as np\n'), ((1706, 1771), 'numpy.zeros', 'np.zeros', ([], {'shape': "(params['adc'], params['adc'], params['adc'] + 1)"}), "(shape=(params['adc'], params['adc'], params['adc'] + 1))\n", (1714, 1771), True, 'import numpy as np\n'), ((1786, 1836), 'numpy.arange', 'np.arange', (["(params['max_rpr'] + 1)"], {'dtype': 'np.float32'}), "(params['max_rpr'] + 1, dtype=np.float32)\n", (1795, 1836), True, 'import numpy as np\n'), ((1941, 1968), 'numpy.mean', 'np.mean', (['nrow_array'], {'axis': '(0)'}), '(nrow_array, axis=0)\n', (1948, 1968), True, 'import numpy as np\n'), ((1986, 2005), 'numpy.ceil', 'np.ceil', (['nrow_array'], {}), '(nrow_array)\n', (1993, 2005), True, 'import numpy as np\n'), ((1240, 1269), 'numpy.absolute', 'np.absolute', (['(p * pe * e * row)'], {}), '(p * pe * e * row)\n', (1251, 1269), True, 'import numpy as np\n'), ((1852, 1886), 'numpy.sum', 'np.sum', (['(row_count * weight)'], {'axis': '(2)'}), '(row_count * weight, axis=2)\n', (1858, 1886), True, 'import numpy as np\n'), ((2033, 2061), 'numpy.ceil', 'np.ceil', (["(nrow / params['wl'])"], {}), "(nrow / params['wl'])\n", (2040, 2061), True, 'import numpy as np\n'), ((2064, 2083), 'numpy.ceil', 'np.ceil', (['nrow_array'], {}), '(nrow_array)\n', (2071, 2083), True, 'import numpy as np\n'), ((1890, 1915), 'numpy.sum', 'np.sum', (['row_count'], {'axis': '(2)'}), '(row_count, axis=2)\n', (1896, 1915), True, 'import numpy as np\n'), ((2267, 2281), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (2273, 2281), True, 'import numpy as np\n'), ((2348, 2405), 'numpy.arange', 'np.arange', (['(0)', "(params['adc'] + 1)"], {'step': '(1)', 'dtype': 'np.float32'}), "(0, params['adc'] + 1, step=1, dtype=np.float32)\n", (2357, 2405), True, 'import numpy as np\n'), ((2157, 2187), 'numpy.sum', 'np.sum', (['adc_count'], {'axis': '(0, 1)'}), '(adc_count, axis=(0, 1))\n', (2163, 2187), True, 'import numpy as np\n'), ((2451, 2517), 'kmeans.kmeans', 'kmeans', ([], {'values': 'values', 'counts': 'counts', 'n_clusters': "(params['adc'] + 1)"}), "(values=values, counts=counts, n_clusters=params['adc'] + 1)\n", (2457, 2517), False, 'from kmeans import kmeans\n'), ((1139, 1149), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (1146, 1149), True, 'import numpy as np\n'), ((1187, 1197), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (1194, 1197), True, 'import numpy as np\n'), ((3121, 3140), 'numpy.array', 'np.array', (['centroids'], {}), '(centroids)\n', (3129, 3140), True, 'import numpy as np\n')] |
import io
import numpy as np
import pdb
import sys
import tensorflow as tf
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
'''
Q: what is variables?
'''
def get_char_LSTM(batch_size, vocab_size=11, embedding_size=512, variables=1, bidirectional=True, share_embeddings=True):
lstm_features = 512
if share_embeddings:
embedding_layer = tf.keras.layers.Embedding(
vocab_size, embedding_size, embeddings_initializer='uniform',
input_length=None
)
inputs = tf.keras.layers.Input(shape=(variables, None, ), name='input', dtype='int64')
embeddings = embedding_layer(inputs)
print(embeddings)
embeddings = tf.unstack(embeddings, variables, axis=1)
print(embeddings)
lstm_outs = []
for i in range(variables):
if bidirectional:
lstm_outs.append(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(lstm_features)) (embeddings[i]))
else:
lstm_outs.append(tf.keras.layers.LSTM(lstm_features) (embeddings[i]))
print(lstm_outs)
lstm_out = tf.stack(lstm_outs, 1)
print('variables', variables)
lstm_out = tf.reshape(lstm_out, [-1, variables * lstm_features])
predictions = tf.keras.layers.Dense(2, activation='softmax', dtype='float32')(lstm_out)
# if bidirectional:
# lstm_outs = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(lstm_features)) (embeddings)
# else:
# lstm_outs = tf.keras.layers.LSTM(lstm_features) (embeddings)
# layer1 = tf.keras.layers.Dense(128, activation='relu')(lstm_outs)
# predictions = tf.keras.layers.Dense(2, activation='softmax', dtype='float32')(lstm_outs)
# # Build model
return tf.keras.models.Model(inputs=inputs, outputs=predictions), lstm_out
def loss(model, x, y, training, loss_object):
# training=training is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
y_ = model(x, training=training)
# print('x:', x, x.shape)
# print('y:', y_)
return loss_object(y_true=y, y_pred=y_)
def grad(model, inputs, targets, loss_object):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets, True, loss_object)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
def model_fit(model, x_train, y_train, loss_object, optimizer, epochs=1):
# pdb.set_trace()
# Create batches
for e in range(epochs):
for i in range(0, len(x_train), 32):
batch_x = x_train[i : i + 32]
# print(batch_x)
batch_y = y_train[i : i + 32]
loss_value, grads = grad(model, batch_x, batch_y, loss_object)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# for idx, x in enumerate(x_train):
# print(x)
# y = y_train[idx]
# # Optimize the model
# loss_value, grads = grad(model, x, y, loss_object)
# print(loss_value)
# optimizer.apply_gradients(zip(grads, model.trainable_variables))
def accuracy(predictions, values):
correct = 0.0
for prediction, value in zip(predictions, values):
if prediction - value == 0.0:
correct += 1
return correct / len(predictions)
def main():
batch_size = 32
maxlen = batch_size * 500
variables = 2
x_dim = 2
_type = sys.argv[1]
# input_size = 2
if _type == 'func':
# model = get_model_LSTM(10)
num_var = 1
model, lstm_out = get_char_LSTM(batch_size, bidirectional=False, variables=variables)
print('lstm_out:', lstm_out)
x_train = np.random.randint(11, size=(maxlen, variables, x_dim))
y_train = [1 if np.sum(x) < 40 else 0 for x in x_train]
print(x_train[: batch_size], [np.sum(x) for x in x_train[: batch_size]])
print(y_train[: 50])
# y_train = np.random.randint(2, size=(maxlen))
# print(x_train)
p = model.predict(x_train)
print(p[: batch_size])
x_test = np.random.randint(11, size=(batch_size * 100, variables, x_dim))
y_test = [1 if np.sum(x) < 40 else 0 for x in x_test]
# y_test = np.random.randint(2, size=(32))
import pdb
pdb.set_trace()
print('y_test', y_test)
prediction_probs = model.predict(x_test)
predictions = [int(np.round(p[1])) for p in prediction_probs]
print(prediction_probs)
print(predictions)
acc = accuracy(predictions, y_test)
print('Accuracy:', acc)
# loss_obj = tfa.losses.TripletSemiHardLoss()
loss_obj = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer_obj = tf.keras.optimizers.Adam(0.001)
# Compile the model
# print(model.summary())
# model.compile(
# optimizer=tf.keras.optimizers.Adam(0.001),
# loss=tfa.losses.TripletSemiHardLoss())
# model.compile(
# optimizer=tf.keras.optimizers.Adam(0.001),
# loss=tf.keras.losses.BinaryCrossentropy())
if _type == 'func' or _type == 'func_star':
for e in range(3):
model_fit(model, x_train, y_train, loss_obj, optimizer_obj)
# history = model.fit(x_train, y_train, batch_size=32, epochs=8)
prediction_probs = model.predict(x_test)
predictions = [int(np.round(p[1])) for p in prediction_probs]
print(prediction_probs)
print(predictions)
acc = accuracy(predictions, y_test)
print('Accuracy:', acc)
import pdb
pdb.set_trace()
prediction_probs = model.predict(x_test)
np.savetxt("vecs.tsv", prediction_probs, delimiter='\t')
# Save test embeddings for visualization in projector
# test_loss = model.evaluate(x_test, y_test)
# print('test loss: ' + str(test_loss))
# np.savetxt("vecs.tsv", results, delimiter='\t')
# out_m = io.open('meta.tsv', 'w', encoding='utf-8')
# for img, labels in tfds.as_numpy(test_dataset):
# [out_m.write(str(x) + "\n") for x in labels]
# out_m.close()
# try:
# from google.colab import files
# files.download('vecs.tsv')
# files.download('meta.tsv')
# except:
# pass
if __name__ == "__main__":
main()
| [
"tensorflow.unstack",
"tensorflow.keras.layers.Input",
"numpy.round",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Embedding",
"tensorflow.GradientTape",
"numpy.random.randint",
"tensorflow.keras.layers.Dense",
"numpy.sum",
... | [((4705, 4768), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (4750, 4768), True, 'import tensorflow as tf\n'), ((4789, 4820), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (4813, 4820), True, 'import tensorflow as tf\n'), ((371, 481), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['vocab_size', 'embedding_size'], {'embeddings_initializer': '"""uniform"""', 'input_length': 'None'}), "(vocab_size, embedding_size,\n embeddings_initializer='uniform', input_length=None)\n", (396, 481), True, 'import tensorflow as tf\n'), ((529, 604), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(variables, None)', 'name': '"""input"""', 'dtype': '"""int64"""'}), "(shape=(variables, None), name='input', dtype='int64')\n", (550, 604), True, 'import tensorflow as tf\n'), ((699, 740), 'tensorflow.unstack', 'tf.unstack', (['embeddings', 'variables'], {'axis': '(1)'}), '(embeddings, variables, axis=1)\n', (709, 740), True, 'import tensorflow as tf\n'), ((1129, 1151), 'tensorflow.stack', 'tf.stack', (['lstm_outs', '(1)'], {}), '(lstm_outs, 1)\n', (1137, 1151), True, 'import tensorflow as tf\n'), ((1209, 1262), 'tensorflow.reshape', 'tf.reshape', (['lstm_out', '[-1, variables * lstm_features]'], {}), '(lstm_out, [-1, variables * lstm_features])\n', (1219, 1262), True, 'import tensorflow as tf\n'), ((1796, 1853), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'inputs', 'outputs': 'predictions'}), '(inputs=inputs, outputs=predictions)\n', (1817, 1853), True, 'import tensorflow as tf\n'), ((2241, 2258), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2256, 2258), True, 'import tensorflow as tf\n'), ((3736, 3790), 'numpy.random.randint', 'np.random.randint', (['(11)'], {'size': '(maxlen, variables, x_dim)'}), '(11, size=(maxlen, variables, x_dim))\n', (3753, 3790), True, 'import numpy as np\n'), ((4130, 4194), 'numpy.random.randint', 'np.random.randint', (['(11)'], {'size': '(batch_size * 100, variables, x_dim)'}), '(11, size=(batch_size * 100, variables, x_dim))\n', (4147, 4194), True, 'import numpy as np\n'), ((4335, 4350), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (4348, 4350), False, 'import pdb\n'), ((5654, 5669), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (5667, 5669), False, 'import pdb\n'), ((5727, 5783), 'numpy.savetxt', 'np.savetxt', (['"""vecs.tsv"""', 'prediction_probs'], {'delimiter': '"""\t"""'}), "('vecs.tsv', prediction_probs, delimiter='\\t')\n", (5737, 5783), True, 'import numpy as np\n'), ((1285, 1348), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""', 'dtype': '"""float32"""'}), "(2, activation='softmax', dtype='float32')\n", (1306, 1348), True, 'import tensorflow as tf\n'), ((3893, 3902), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (3899, 3902), True, 'import numpy as np\n'), ((4459, 4473), 'numpy.round', 'np.round', (['p[1]'], {}), '(p[1])\n', (4467, 4473), True, 'import numpy as np\n'), ((3815, 3824), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (3821, 3824), True, 'import numpy as np\n'), ((4218, 4227), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (4224, 4227), True, 'import numpy as np\n'), ((5424, 5438), 'numpy.round', 'np.round', (['p[1]'], {}), '(p[1])\n', (5432, 5438), True, 'import numpy as np\n'), ((1032, 1067), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['lstm_features'], {}), '(lstm_features)\n', (1052, 1067), True, 'import tensorflow as tf\n'), ((927, 962), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['lstm_features'], {}), '(lstm_features)\n', (947, 962), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
"""Detect vanishing points in non-Manhattan world.
Usage:
eval_nyu.py [options] <yaml-config> <checkpoint>
eval_nyu.py ( -h | --help )
Arguments:
<yaml-config> Path to the yaml hyper-parameter file
<checkpoint> Path to the checkpoint
Options:
-h --help Show this screen
-d --devices <devices> Comma seperated GPU devices [default: 0]
-o --output <output> Path to the output AA curve [default: error.npz]
--dump <output-dir> Optionally, save the vanishing points to npz format.
--noimshow Do not show result
"""
import os
import sys
import math
import shlex
import pprint
import random
import os.path as osp
import threading
import subprocess
import time
import torch
import matplotlib as mpl
import skimage.io
import numpy as np
import numpy.linalg as LA
import scipy.spatial.distance as scipy_spatial_dist
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
from tqdm import tqdm
from docopt import docopt
import scipy.io as sio
import vpd
import vpd.models.vanishing_net as vn
from vpd.config import C, M
from vpd.datasets import ScanNetDataset, WireframeDataset, YUDDataset, NYUDataset
from vpd.models.sphere.sphere_utils import gold_spiral_sampling_patch
def topk_orthogonal_vps(scores, xyz, num_vps=3):
index = np.argsort(-scores)
vps_idx = [index[0]]
for i in index[1:]:
if len(vps_idx) == num_vps:
break
# cos_distance function: input: x: mxp, y: nxp; output: y, mxn
### scipy: same 0, opposite 2, orthorgonal 1, dist = 1-AB/(|A||B|)
dist_cos = scipy_spatial_dist.cdist(xyz[vps_idx], xyz[i][None, :], 'cosine')
### same 1, opposite -1, orthorgonal 0
dist_cos = np.abs(-1.0*dist_cos+1.0)
dist_cos_arc = np.min(np.arccos(dist_cos))
if dist_cos_arc >= np.pi/num_vps:
vps_idx.append(i)
else:
continue
vps_pd = xyz[vps_idx]
return vps_pd, vps_idx
def compute_error(vps_pd, vps_gt):
error = np.arccos(np.abs(vps_gt @ vps_pd.transpose()).clip(max=1))
error = error.min(axis=1) / np.pi * 180.0 # num_pd x num_gt, axis=1
return error.flatten()
def AA(x, y, threshold):
index = np.searchsorted(x, threshold)
x = np.concatenate([x[:index], [threshold]])
y = np.concatenate([y[:index], [threshold]])
return ((x[1:] - x[:-1]) * y[:-1]).sum() / threshold
def main():
args = docopt(__doc__)
config_file = args["<yaml-config>"]
C.update(C.from_yaml(filename=config_file))
C.model.im2col_step = 32 # override im2col_step for evaluation
M.update(C.model)
pprint.pprint(C, indent=4)
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
# # # save plots for visualization
# os.environ['QT_QPA_PLATFORM']='offscreen'
device_name = "cpu"
num_gpus = args["--devices"].count(",") + 1
os.environ["CUDA_VISIBLE_DEVICES"] = args["--devices"]
if torch.cuda.is_available():
device_name = "cuda"
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed(0)
print("Let's use", torch.cuda.device_count(), "GPU(s)!")
for k in range(0, torch.cuda.device_count()):
print('kth, device name', k, torch.cuda.get_device_name(k))
else:
print("CUDA is not available")
device = torch.device(device_name)
npzfile = np.load(C.io.ht_mapping, allow_pickle=True)
ht_mapping = npzfile['ht_mapping']
ht_mapping[:,2] = npzfile['rho_res'].item() - np.abs(ht_mapping[:,2])
ht_mapping[:,2] /= npzfile['rho_res'].item()
vote_ht_dict={}
vote_ht_dict["vote_mapping"]= torch.tensor(ht_mapping, requires_grad=False).float().contiguous()
vote_ht_dict["im_size"]= (npzfile['rows'], npzfile['cols'])
vote_ht_dict["ht_size"]= (npzfile['h'], npzfile['w'])
print('vote_ht_dict memory MB', vote_ht_dict["vote_mapping"].size(),
vote_ht_dict["vote_mapping"].element_size() * vote_ht_dict["vote_mapping"].nelement() / (1024 * 1024))
npzfile = np.load(C.io.sphere_mapping, allow_pickle=True)
sphere_neighbors = npzfile['sphere_neighbors']
vote_sphere_dict={}
vote_sphere_dict["vote_mapping"]=torch.tensor(sphere_neighbors, requires_grad=False).float().contiguous()
vote_sphere_dict["ht_size"]=(npzfile['h'], npzfile['w'])
vote_sphere_dict["sphere_size"]=npzfile['num_points']
print('vote_sphere_dict memory MB', vote_sphere_dict["sphere_size"], vote_sphere_dict["vote_mapping"].size(),
vote_sphere_dict["vote_mapping"].element_size() * vote_sphere_dict["vote_mapping"].nelement() / (1024 * 1024))
# 2. model
if M.backbone == "stacked_hourglass":
backbone = vpd.models.hg(
planes=128, depth=M.depth, num_stacks=M.num_stacks, num_blocks=M.num_blocks
)
else:
raise NotImplementedError
model = vpd.models.VanishingNet(backbone, vote_ht_dict, vote_sphere_dict)
model = model.to(device)
model = torch.nn.DataParallel(
model, device_ids=list(range(args["--devices"].count(",") + 1))
)
if args["<checkpoint>"] =="None":
checkpoint = None
else:
print('args["<checkpoint>"]', args["<checkpoint>"])
checkpoint = torch.load(args["<checkpoint>"], map_location=lambda storage, loc: storage)
print('checkpoint', checkpoint["iteration"], checkpoint["epoch"])
# print('checkpoint', checkpoint["iteration"])
model.load_state_dict(checkpoint["model_state_dict"])
model.eval()
print('model', model)
##### number of parameters in a model
total_params = sum(p.numel() for p in model.parameters())
##### number of trainable parameters in a model
train_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('num of total parameters', total_params)
print('num of trainable parameters', train_params)
if C.io.dataset.upper() == "WIREFRAME":
Dataset = WireframeDataset
elif C.io.dataset.upper() == "SCANNET":
Dataset = ScanNetDataset
elif C.io.dataset.upper() == "NYU":
Dataset = NYUDataset
elif C.io.dataset.upper() == "YUD":
Dataset = YUDDataset
else:
raise NotImplementedError
# assert C.io.dataset.upper() in ["NYU", "YUD"]
assert C.io.dataset.upper() in ["NYU"]
loader = torch.utils.data.DataLoader(
Dataset(C.io.datadir, split="test"),
batch_size=M.batch_size * num_gpus,
shuffle=False,
num_workers=C.io.num_workers if os.name != "nt" else 0,
pin_memory=True,
)
print('loader size', len(loader))
if args["--dump"] is not None:
os.makedirs(args["--dump"], exist_ok=True)
xyz = gold_spiral_sampling_patch(np.array([0, 0, 1]), alpha=90.0 * np.pi / 180., num_pts=C.io.num_nodes)
for batch_idx, (images, targets, vpts_gt) in enumerate(tqdm(loader)):
images = images.to(device)
targets = targets.to(device)
input_dict = {"image": images, "target": targets, "eval": True}
with torch.no_grad():
result = model(input_dict)
preds = result["prediction"].cpu().numpy()
targets = targets.cpu().numpy()
vpts_gt = vpts_gt.cpu().numpy()
for idx, (pred, target, vpt_gt) in enumerate(zip(preds, targets, vpts_gt)):
### save predictions at first and then cluster VPs
if args["--dump"]:
index = batch_idx * M.batch_size + idx
np.savez(
os.path.join(args["--dump"], f"{index:06d}.npz"),
vpts_sphere=pred,
)
if __name__ == "__main__":
main()
| [
"vpd.config.M.update",
"numpy.arccos",
"vpd.config.C.io.dataset.upper",
"torch.cuda.device_count",
"numpy.argsort",
"numpy.array",
"torch.cuda.is_available",
"pprint.pprint",
"docopt.docopt",
"vpd.models.VanishingNet",
"numpy.searchsorted",
"numpy.random.seed",
"numpy.concatenate",
"numpy.... | [((1383, 1402), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (1393, 1402), True, 'import numpy as np\n'), ((2287, 2316), 'numpy.searchsorted', 'np.searchsorted', (['x', 'threshold'], {}), '(x, threshold)\n', (2302, 2316), True, 'import numpy as np\n'), ((2325, 2365), 'numpy.concatenate', 'np.concatenate', (['[x[:index], [threshold]]'], {}), '([x[:index], [threshold]])\n', (2339, 2365), True, 'import numpy as np\n'), ((2374, 2414), 'numpy.concatenate', 'np.concatenate', (['[y[:index], [threshold]]'], {}), '([y[:index], [threshold]])\n', (2388, 2414), True, 'import numpy as np\n'), ((2497, 2512), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (2503, 2512), False, 'from docopt import docopt\n'), ((2673, 2690), 'vpd.config.M.update', 'M.update', (['C.model'], {}), '(C.model)\n', (2681, 2690), False, 'from vpd.config import C, M\n'), ((2695, 2721), 'pprint.pprint', 'pprint.pprint', (['C'], {'indent': '(4)'}), '(C, indent=4)\n', (2708, 2721), False, 'import pprint\n'), ((2727, 2741), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (2738, 2741), False, 'import random\n'), ((2746, 2763), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2760, 2763), True, 'import numpy as np\n'), ((2768, 2788), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (2785, 2788), False, 'import torch\n'), ((3020, 3045), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3043, 3045), False, 'import torch\n'), ((3413, 3438), 'torch.device', 'torch.device', (['device_name'], {}), '(device_name)\n', (3425, 3438), False, 'import torch\n'), ((3454, 3497), 'numpy.load', 'np.load', (['C.io.ht_mapping'], {'allow_pickle': '(True)'}), '(C.io.ht_mapping, allow_pickle=True)\n', (3461, 3497), True, 'import numpy as np\n'), ((4105, 4152), 'numpy.load', 'np.load', (['C.io.sphere_mapping'], {'allow_pickle': '(True)'}), '(C.io.sphere_mapping, allow_pickle=True)\n', (4112, 4152), True, 'import numpy as np\n'), ((4941, 5006), 'vpd.models.VanishingNet', 'vpd.models.VanishingNet', (['backbone', 'vote_ht_dict', 'vote_sphere_dict'], {}), '(backbone, vote_ht_dict, vote_sphere_dict)\n', (4964, 5006), False, 'import vpd\n'), ((1671, 1736), 'scipy.spatial.distance.cdist', 'scipy_spatial_dist.cdist', (['xyz[vps_idx]', 'xyz[i][None, :]', '"""cosine"""'], {}), "(xyz[vps_idx], xyz[i][None, :], 'cosine')\n", (1695, 1736), True, 'import scipy.spatial.distance as scipy_spatial_dist\n'), ((1803, 1832), 'numpy.abs', 'np.abs', (['(-1.0 * dist_cos + 1.0)'], {}), '(-1.0 * dist_cos + 1.0)\n', (1809, 1832), True, 'import numpy as np\n'), ((2566, 2599), 'vpd.config.C.from_yaml', 'C.from_yaml', ([], {'filename': 'config_file'}), '(filename=config_file)\n', (2577, 2599), False, 'from vpd.config import C, M\n'), ((3134, 3159), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(0)'], {}), '(0)\n', (3156, 3159), False, 'import torch\n'), ((3587, 3611), 'numpy.abs', 'np.abs', (['ht_mapping[:, 2]'], {}), '(ht_mapping[:, 2])\n', (3593, 3611), True, 'import numpy as np\n'), ((4771, 4865), 'vpd.models.hg', 'vpd.models.hg', ([], {'planes': '(128)', 'depth': 'M.depth', 'num_stacks': 'M.num_stacks', 'num_blocks': 'M.num_blocks'}), '(planes=128, depth=M.depth, num_stacks=M.num_stacks,\n num_blocks=M.num_blocks)\n', (4784, 4865), False, 'import vpd\n'), ((5305, 5380), 'torch.load', 'torch.load', (["args['<checkpoint>']"], {'map_location': '(lambda storage, loc: storage)'}), "(args['<checkpoint>'], map_location=lambda storage, loc: storage)\n", (5315, 5380), False, 'import torch\n'), ((5967, 5987), 'vpd.config.C.io.dataset.upper', 'C.io.dataset.upper', ([], {}), '()\n', (5985, 5987), False, 'from vpd.config import C, M\n'), ((6362, 6382), 'vpd.config.C.io.dataset.upper', 'C.io.dataset.upper', ([], {}), '()\n', (6380, 6382), False, 'from vpd.config import C, M\n'), ((6726, 6768), 'os.makedirs', 'os.makedirs', (["args['--dump']"], {'exist_ok': '(True)'}), "(args['--dump'], exist_ok=True)\n", (6737, 6768), False, 'import os\n'), ((6807, 6826), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (6815, 6826), True, 'import numpy as np\n'), ((6939, 6951), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (6943, 6951), False, 'from tqdm import tqdm\n'), ((1860, 1879), 'numpy.arccos', 'np.arccos', (['dist_cos'], {}), '(dist_cos)\n', (1869, 1879), True, 'import numpy as np\n'), ((3187, 3212), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3210, 3212), False, 'import torch\n'), ((3251, 3276), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3274, 3276), False, 'import torch\n'), ((6048, 6068), 'vpd.config.C.io.dataset.upper', 'C.io.dataset.upper', ([], {}), '()\n', (6066, 6068), False, 'from vpd.config import C, M\n'), ((7112, 7127), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7125, 7127), False, 'import torch\n'), ((3320, 3349), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['k'], {}), '(k)\n', (3346, 3349), False, 'import torch\n'), ((6125, 6145), 'vpd.config.C.io.dataset.upper', 'C.io.dataset.upper', ([], {}), '()\n', (6143, 6145), False, 'from vpd.config import C, M\n'), ((3714, 3759), 'torch.tensor', 'torch.tensor', (['ht_mapping'], {'requires_grad': '(False)'}), '(ht_mapping, requires_grad=False)\n', (3726, 3759), False, 'import torch\n'), ((4265, 4316), 'torch.tensor', 'torch.tensor', (['sphere_neighbors'], {'requires_grad': '(False)'}), '(sphere_neighbors, requires_grad=False)\n', (4277, 4316), False, 'import torch\n'), ((6194, 6214), 'vpd.config.C.io.dataset.upper', 'C.io.dataset.upper', ([], {}), '()\n', (6212, 6214), False, 'from vpd.config import C, M\n'), ((7579, 7627), 'os.path.join', 'os.path.join', (["args['--dump']", 'f"""{index:06d}.npz"""'], {}), "(args['--dump'], f'{index:06d}.npz')\n", (7591, 7627), False, 'import os\n')] |
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
import numpy as np
from P2_get_data import get_data
epochs = 150
batch_size = 100
time_steps = 3
def LSTM_train(x_train, y_train, x_test, y_test):
model = Sequential()
model.add(LSTM(50, input_shape=(x_train.shape[1], x_train.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
history = model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(x_test, y_test),
verbose=1, shuffle=False)
# plot history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.ylabel('Loss')
plt.xlabel('Iteration')
plt.savefig('loss_{}.png'.format(time_steps))
plt.show()
return model
def LSTM_predict(model, scaler, index, x_train, x_test):
x_data = np.concatenate((x_train, x_test), axis = 0)
y_pred = model.predict(x_data)
x_data = x_data[:, 0, :].reshape(x_data.shape[0], x_data.shape[2])
all_true = x_data
all_pred = np.concatenate((y_pred, x_data[:,1:]),axis=1)
inv_y_pred = scaler.inverse_transform(all_true)[:, 0]
inv_y_true = scaler.inverse_transform(all_pred)[:, 0]
x = np.arange(0, x_data.shape[0])
start = x_train.shape[0]
plt.plot(x[start: start+100], inv_y_pred[start: start+100], label='pred')
plt.plot(x[start: start + 100], inv_y_true[start: start + 100], label='true')
plt.ylabel('Pollution')
plt.xlabel('Datetime start from {}'.format(index[start]))
plt.legend()
plt.savefig('predict_{}.png'.format(time_steps))
plt.show()
RMSE(x_train.shape[0], inv_y_pred, inv_y_true)
def RMSE(size, inv_y_pred, inv_y_true):
# calculate RMSE
rmse = sqrt(mean_squared_error(inv_y_true[size:], inv_y_pred[size:]))
print('Test RMSE: %.3f' % rmse)
if __name__ == '__main__':
scaler, index, x_train, y_train, x_test, y_test = get_data(train_size = 365*24*3, time_steps=time_steps)
print('Training data size: ', x_train.shape, y_train.shape)
print('Validation data size: ', x_test.shape, y_test.shape)
model = LSTM_train(x_train, y_train, x_test, y_test)
LSTM_predict(model, scaler, index, x_train, x_test) | [
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"P2_get_data.get_data",
"keras.models.Sequential",
"sklearn.metrics.mean_squared_error",
"keras.layers.LSTM",
"numpy.concatenate",
"keras.layers.Dense",
"matplotlib.pyplot.legend",
"matplotlib.pyp... | [((365, 377), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (375, 377), False, 'from keras.models import Sequential\n'), ((712, 760), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {'label': '"""train"""'}), "(history.history['loss'], label='train')\n", (720, 760), True, 'from matplotlib import pyplot as plt\n'), ((765, 816), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {'label': '"""test"""'}), "(history.history['val_loss'], label='test')\n", (773, 816), True, 'from matplotlib import pyplot as plt\n'), ((821, 833), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (831, 833), True, 'from matplotlib import pyplot as plt\n'), ((838, 856), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (848, 856), True, 'from matplotlib import pyplot as plt\n'), ((861, 884), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (871, 884), True, 'from matplotlib import pyplot as plt\n'), ((939, 949), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (947, 949), True, 'from matplotlib import pyplot as plt\n'), ((1040, 1081), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {'axis': '(0)'}), '((x_train, x_test), axis=0)\n', (1054, 1081), True, 'import numpy as np\n'), ((1228, 1275), 'numpy.concatenate', 'np.concatenate', (['(y_pred, x_data[:, 1:])'], {'axis': '(1)'}), '((y_pred, x_data[:, 1:]), axis=1)\n', (1242, 1275), True, 'import numpy as np\n'), ((1399, 1428), 'numpy.arange', 'np.arange', (['(0)', 'x_data.shape[0]'], {}), '(0, x_data.shape[0])\n', (1408, 1428), True, 'import numpy as np\n'), ((1463, 1538), 'matplotlib.pyplot.plot', 'plt.plot', (['x[start:start + 100]', 'inv_y_pred[start:start + 100]'], {'label': '"""pred"""'}), "(x[start:start + 100], inv_y_pred[start:start + 100], label='pred')\n", (1471, 1538), True, 'from matplotlib import pyplot as plt\n'), ((1541, 1616), 'matplotlib.pyplot.plot', 'plt.plot', (['x[start:start + 100]', 'inv_y_true[start:start + 100]'], {'label': '"""true"""'}), "(x[start:start + 100], inv_y_true[start:start + 100], label='true')\n", (1549, 1616), True, 'from matplotlib import pyplot as plt\n'), ((1623, 1646), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pollution"""'], {}), "('Pollution')\n", (1633, 1646), True, 'from matplotlib import pyplot as plt\n'), ((1713, 1725), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1723, 1725), True, 'from matplotlib import pyplot as plt\n'), ((1783, 1793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1791, 1793), True, 'from matplotlib import pyplot as plt\n'), ((2102, 2158), 'P2_get_data.get_data', 'get_data', ([], {'train_size': '(365 * 24 * 3)', 'time_steps': 'time_steps'}), '(train_size=365 * 24 * 3, time_steps=time_steps)\n', (2110, 2158), False, 'from P2_get_data import get_data\n'), ((392, 450), 'keras.layers.LSTM', 'LSTM', (['(50)'], {'input_shape': '(x_train.shape[1], x_train.shape[2])'}), '(50, input_shape=(x_train.shape[1], x_train.shape[2]))\n', (396, 450), False, 'from keras.layers import LSTM\n'), ((466, 474), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (471, 474), False, 'from keras.layers import Dense\n'), ((1925, 1981), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['inv_y_true[size:]', 'inv_y_pred[size:]'], {}), '(inv_y_true[size:], inv_y_pred[size:])\n', (1943, 1981), False, 'from sklearn.metrics import mean_squared_error\n')] |
import numpy as np
from sklearn.metrics import r2_score as sklearn_r2_score
from tensorflow import convert_to_tensor
from scikeras.wrappers import KerasRegressor
from .mlp_models import dynamic_regressor
def test_kerasregressor_r2_correctness():
"""Test custom R^2 implementation against scikit-learn's."""
n_samples = 50
datasets = []
y_true = np.arange(n_samples, dtype=float)
y_pred = y_true + 1
datasets.append((y_true.reshape(-1, 1), y_pred.reshape(-1, 1)))
y_true = np.random.random_sample(size=y_true.shape)
y_pred = np.random.random_sample(size=y_true.shape)
datasets.append((y_true.reshape(-1, 1), y_pred.reshape(-1, 1)))
def keras_backend_r2(y_true, y_pred):
"""Wrap Keras operations to numpy."""
y_true = convert_to_tensor(y_true)
y_pred = convert_to_tensor(y_pred)
return KerasRegressor.r_squared(y_true, y_pred).numpy()
for (y_true, y_pred) in datasets:
np.testing.assert_almost_equal(
keras_backend_r2(y_true, y_pred),
sklearn_r2_score(y_true, y_pred),
decimal=5,
)
def test_kerasregressor_r2_as_metric():
"""Test custom R^2 implementation against scikit-learn's."""
est = KerasRegressor(
dynamic_regressor, metrics=[KerasRegressor.r_squared], epochs=10, random_state=0
)
y = np.random.randint(low=0, high=2, size=(1000,))
X = y.reshape((-1, 1))
est.fit(X, y)
current_score = est.score(X, y)
last_hist = est.history_["r_squared"][-1]
np.testing.assert_almost_equal(current_score, last_hist, decimal=3)
current_eval = est.model_.evaluate(X, y, return_dict=True)["r_squared"]
np.testing.assert_almost_equal(current_score, current_eval, decimal=3)
| [
"numpy.random.random_sample",
"scikeras.wrappers.KerasRegressor",
"numpy.random.randint",
"numpy.testing.assert_almost_equal",
"tensorflow.convert_to_tensor",
"scikeras.wrappers.KerasRegressor.r_squared",
"sklearn.metrics.r2_score",
"numpy.arange"
] | [((367, 400), 'numpy.arange', 'np.arange', (['n_samples'], {'dtype': 'float'}), '(n_samples, dtype=float)\n', (376, 400), True, 'import numpy as np\n'), ((506, 548), 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': 'y_true.shape'}), '(size=y_true.shape)\n', (529, 548), True, 'import numpy as np\n'), ((562, 604), 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': 'y_true.shape'}), '(size=y_true.shape)\n', (585, 604), True, 'import numpy as np\n'), ((1233, 1333), 'scikeras.wrappers.KerasRegressor', 'KerasRegressor', (['dynamic_regressor'], {'metrics': '[KerasRegressor.r_squared]', 'epochs': '(10)', 'random_state': '(0)'}), '(dynamic_regressor, metrics=[KerasRegressor.r_squared],\n epochs=10, random_state=0)\n', (1247, 1333), False, 'from scikeras.wrappers import KerasRegressor\n'), ((1353, 1399), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2)', 'size': '(1000,)'}), '(low=0, high=2, size=(1000,))\n', (1370, 1399), True, 'import numpy as np\n'), ((1533, 1600), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['current_score', 'last_hist'], {'decimal': '(3)'}), '(current_score, last_hist, decimal=3)\n', (1563, 1600), True, 'import numpy as np\n'), ((1682, 1752), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['current_score', 'current_eval'], {'decimal': '(3)'}), '(current_score, current_eval, decimal=3)\n', (1712, 1752), True, 'import numpy as np\n'), ((779, 804), 'tensorflow.convert_to_tensor', 'convert_to_tensor', (['y_true'], {}), '(y_true)\n', (796, 804), False, 'from tensorflow import convert_to_tensor\n'), ((822, 847), 'tensorflow.convert_to_tensor', 'convert_to_tensor', (['y_pred'], {}), '(y_pred)\n', (839, 847), False, 'from tensorflow import convert_to_tensor\n'), ((1049, 1081), 'sklearn.metrics.r2_score', 'sklearn_r2_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1065, 1081), True, 'from sklearn.metrics import r2_score as sklearn_r2_score\n'), ((863, 903), 'scikeras.wrappers.KerasRegressor.r_squared', 'KerasRegressor.r_squared', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (887, 903), False, 'from scikeras.wrappers import KerasRegressor\n')] |
from .conftest import base_config
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import openamundsen as oa
import openamundsen.errors as errors
import pandas as pd
import pytest
import xarray as xr
@pytest.mark.parametrize('fmt', ['netcdf', 'csv', 'memory'])
def test_formats(fmt, tmp_path):
config = base_config()
config.end_date = '2020-01-16'
config.results_dir = tmp_path
config.output_data.timeseries.format = fmt
config.output_data.timeseries.variables = [{'var': 'snow.num_layers'}]
model = oa.OpenAmundsen(config)
model.initialize()
model.run()
point_ids = ['bellavista', 'latschbloder', 'proviantdepot']
if fmt in ('netcdf', 'memory'):
if fmt == 'netcdf':
ds = xr.open_dataset(tmp_path / 'output_timeseries.nc')
elif fmt == 'memory':
ds = model.point_output.data
assert ds.time.to_index().equals(model.dates)
assert_array_equal(ds.point, point_ids)
assert_array_equal(
list(ds.coords.keys()),
['time', 'point', 'lon', 'lat', 'alt', 'x', 'y', 'soil_layer', 'snow_layer'],
)
assert ds.temp.dims == ('time', 'point')
assert ds.snow_thickness.dims == ('time', 'snow_layer', 'point')
assert ds.soil_temp.dims == ('time', 'soil_layer', 'point')
assert ds.temp.dtype == np.float32
assert np.issubdtype(ds.num_layers.dtype, np.integer)
assert np.all(ds.temp > 250.)
elif fmt == 'csv':
for point_id in point_ids:
assert (tmp_path / f'point_{point_id}.csv').exists()
df = pd.read_csv(tmp_path / 'point_bellavista.csv', index_col='time', parse_dates=True)
assert df.index.equals(model.dates)
assert df.temp.dtype == np.float64
assert np.issubdtype(df.num_layers.dtype, np.integer)
assert 'snow_thickness0' in df
assert np.all(df.temp > 250.)
def test_values():
config = base_config()
config.end_date = '2020-01-15'
model = oa.OpenAmundsen(config)
model.initialize()
point = 'proviantdepot'
row = int(model.meteo.sel(station=point).row)
col = int(model.meteo.sel(station=point).col)
data_temp = pd.Series(index=model.dates, dtype=float)
data_soil_temp1 = pd.Series(index=model.dates, dtype=float)
for date in model.dates:
model.run_single()
data_temp[date] = model.state.meteo.temp[row, col]
data_soil_temp1[date] = model.state.soil.temp[1, row, col]
ds = model.point_output.data.sel(point=point)
assert_allclose(ds.temp.values, data_temp)
assert_allclose(ds.soil_temp.isel(soil_layer=1).values, data_soil_temp1)
@pytest.mark.parametrize('write_freq', ['M', '7H', '3H', '10min'])
def test_write_freq(write_freq, tmp_path):
config = base_config()
config.end_date = '2020-01-15'
config.results_dir = tmp_path
config.output_data.timeseries.format = 'netcdf'
config.output_data.timeseries.write_freq = write_freq
model = oa.OpenAmundsen(config)
model.initialize()
model.run()
ds = xr.open_dataset(tmp_path / 'output_timeseries.nc')
assert ds.time.to_index().equals(model.dates)
def test_points():
bc = base_config()
bc.end_date = '2020-01-15 00:00'
config = bc.copy()
model = oa.OpenAmundsen(config)
model.initialize()
model.run()
ds = model.point_output.data
assert_array_equal(ds.point, ['bellavista', 'latschbloder', 'proviantdepot'])
config = bc.copy()
config.output_data.timeseries.add_default_points = False
model = oa.OpenAmundsen(config)
model.initialize()
model.run()
ds = model.point_output.data
assert ds.point.size == 0
config = bc.copy()
config.output_data.timeseries.add_default_points = False
config.output_data.timeseries.points.append({
'x': 640367,
'y': 5182896,
})
config.output_data.timeseries.points.append({
'x': 645378,
'y': 5190907,
'name': 'mypoint',
})
model = oa.OpenAmundsen(config)
model.initialize()
model.run()
ds = model.point_output.data
assert_array_equal(ds.point, ['point1', 'mypoint'])
assert_allclose(ds.alt, [3181.89, 1948.97])
# Duplicate point name
config = bc.copy()
config.output_data.timeseries.points.append({
'x': 640367,
'y': 5182896,
'name': 'bellavista',
})
model = oa.OpenAmundsen(config)
with pytest.raises(errors.ConfigurationError):
model.initialize()
# Duplicate point name
config = bc.copy()
config.output_data.timeseries.points.append({
'x': 640367,
'y': 5182896,
'name': 'bellavista',
})
model = oa.OpenAmundsen(config)
with pytest.raises(errors.ConfigurationError):
model.initialize()
# Point not within grid
config = bc.copy()
config.output_data.timeseries.points.append({
'x': 637152,
'y': 5196427,
})
model = oa.OpenAmundsen(config)
with pytest.raises(errors.ConfigurationError):
model.initialize()
def test_variables():
bc = base_config()
bc.end_date = '2020-01-15 00:00'
bc.output_data.timeseries.variables = []
config = bc.copy()
config.output_data.timeseries.add_default_variables = False
model = oa.OpenAmundsen(config)
model.initialize()
model.run()
ds = model.point_output.data
assert len(ds.data_vars) == 0
config = bc.copy()
config.output_data.timeseries.add_default_variables = False
config.output_data.timeseries.variables.append({'var': 'meteo.spec_heat_cap_moist_air'})
config.output_data.timeseries.variables.append({
'var': 'surface.conductance',
'name': 'myvar',
})
model = oa.OpenAmundsen(config)
model.initialize()
model.run()
ds = model.point_output.data
assert_array_equal(ds.data_vars, ['spec_heat_cap_moist_air', 'myvar'])
# Invalid variable name
config = bc.copy()
config.output_data.timeseries.variables.append({'var': 'meteo.asdf'})
model = oa.OpenAmundsen(config)
with pytest.raises(errors.ConfigurationError):
model.initialize()
# Output name already in use
config = bc.copy()
config.output_data.timeseries.variables.append({'var': 'meteo.temp'})
model = oa.OpenAmundsen(config)
with pytest.raises(errors.ConfigurationError):
model.initialize()
| [
"pandas.Series",
"openamundsen.OpenAmundsen",
"pandas.read_csv",
"numpy.testing.assert_allclose",
"pytest.mark.parametrize",
"numpy.issubdtype",
"pytest.raises",
"numpy.all",
"xarray.open_dataset",
"numpy.testing.assert_array_equal"
] | [((235, 294), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fmt"""', "['netcdf', 'csv', 'memory']"], {}), "('fmt', ['netcdf', 'csv', 'memory'])\n", (258, 294), False, 'import pytest\n'), ((2693, 2758), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""write_freq"""', "['M', '7H', '3H', '10min']"], {}), "('write_freq', ['M', '7H', '3H', '10min'])\n", (2716, 2758), False, 'import pytest\n'), ((559, 582), 'openamundsen.OpenAmundsen', 'oa.OpenAmundsen', (['config'], {}), '(config)\n', (574, 582), True, 'import openamundsen as oa\n'), ((2033, 2056), 'openamundsen.OpenAmundsen', 'oa.OpenAmundsen', (['config'], {}), '(config)\n', (2048, 2056), True, 'import openamundsen as oa\n'), ((2226, 2267), 'pandas.Series', 'pd.Series', ([], {'index': 'model.dates', 'dtype': 'float'}), '(index=model.dates, dtype=float)\n', (2235, 2267), True, 'import pandas as pd\n'), ((2290, 2331), 'pandas.Series', 'pd.Series', ([], {'index': 'model.dates', 'dtype': 'float'}), '(index=model.dates, dtype=float)\n', (2299, 2331), True, 'import pandas as pd\n'), ((2570, 2612), 'numpy.testing.assert_allclose', 'assert_allclose', (['ds.temp.values', 'data_temp'], {}), '(ds.temp.values, data_temp)\n', (2585, 2612), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((3021, 3044), 'openamundsen.OpenAmundsen', 'oa.OpenAmundsen', (['config'], {}), '(config)\n', (3036, 3044), True, 'import openamundsen as oa\n'), ((3094, 3144), 'xarray.open_dataset', 'xr.open_dataset', (["(tmp_path / 'output_timeseries.nc')"], {}), "(tmp_path / 'output_timeseries.nc')\n", (3109, 3144), True, 'import xarray as xr\n'), ((3312, 3335), 'openamundsen.OpenAmundsen', 'oa.OpenAmundsen', (['config'], {}), '(config)\n', (3327, 3335), True, 'import openamundsen as oa\n'), ((3412, 3489), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['ds.point', "['bellavista', 'latschbloder', 'proviantdepot']"], {}), "(ds.point, ['bellavista', 'latschbloder', 'proviantdepot'])\n", (3430, 3489), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((3587, 3610), 'openamundsen.OpenAmundsen', 'oa.OpenAmundsen', (['config'], {}), '(config)\n', (3602, 3610), True, 'import openamundsen as oa\n'), ((4037, 4060), 'openamundsen.OpenAmundsen', 'oa.OpenAmundsen', (['config'], {}), '(config)\n', (4052, 4060), True, 'import openamundsen as oa\n'), ((4137, 4188), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['ds.point', "['point1', 'mypoint']"], {}), "(ds.point, ['point1', 'mypoint'])\n", (4155, 4188), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((4193, 4236), 'numpy.testing.assert_allclose', 'assert_allclose', (['ds.alt', '[3181.89, 1948.97]'], {}), '(ds.alt, [3181.89, 1948.97])\n', (4208, 4236), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((4430, 4453), 'openamundsen.OpenAmundsen', 'oa.OpenAmundsen', (['config'], {}), '(config)\n', (4445, 4453), True, 'import openamundsen as oa\n'), ((4725, 4748), 'openamundsen.OpenAmundsen', 'oa.OpenAmundsen', (['config'], {}), '(config)\n', (4740, 4748), True, 'import openamundsen as oa\n'), ((4991, 5014), 'openamundsen.OpenAmundsen', 'oa.OpenAmundsen', (['config'], {}), '(config)\n', (5006, 5014), True, 'import openamundsen as oa\n'), ((5322, 5345), 'openamundsen.OpenAmundsen', 'oa.OpenAmundsen', (['config'], {}), '(config)\n', (5337, 5345), True, 'import openamundsen as oa\n'), ((5768, 5791), 'openamundsen.OpenAmundsen', 'oa.OpenAmundsen', (['config'], {}), '(config)\n', (5783, 5791), True, 'import openamundsen as oa\n'), ((5868, 5938), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['ds.data_vars', "['spec_heat_cap_moist_air', 'myvar']"], {}), "(ds.data_vars, ['spec_heat_cap_moist_air', 'myvar'])\n", (5886, 5938), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((6077, 6100), 'openamundsen.OpenAmundsen', 'oa.OpenAmundsen', (['config'], {}), '(config)\n', (6092, 6100), True, 'import openamundsen as oa\n'), ((6322, 6345), 'openamundsen.OpenAmundsen', 'oa.OpenAmundsen', (['config'], {}), '(config)\n', (6337, 6345), True, 'import openamundsen as oa\n'), ((954, 993), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['ds.point', 'point_ids'], {}), '(ds.point, point_ids)\n', (972, 993), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((1406, 1452), 'numpy.issubdtype', 'np.issubdtype', (['ds.num_layers.dtype', 'np.integer'], {}), '(ds.num_layers.dtype, np.integer)\n', (1419, 1452), True, 'import numpy as np\n'), ((1468, 1491), 'numpy.all', 'np.all', (['(ds.temp > 250.0)'], {}), '(ds.temp > 250.0)\n', (1474, 1491), True, 'import numpy as np\n'), ((4463, 4503), 'pytest.raises', 'pytest.raises', (['errors.ConfigurationError'], {}), '(errors.ConfigurationError)\n', (4476, 4503), False, 'import pytest\n'), ((4758, 4798), 'pytest.raises', 'pytest.raises', (['errors.ConfigurationError'], {}), '(errors.ConfigurationError)\n', (4771, 4798), False, 'import pytest\n'), ((5024, 5064), 'pytest.raises', 'pytest.raises', (['errors.ConfigurationError'], {}), '(errors.ConfigurationError)\n', (5037, 5064), False, 'import pytest\n'), ((6110, 6150), 'pytest.raises', 'pytest.raises', (['errors.ConfigurationError'], {}), '(errors.ConfigurationError)\n', (6123, 6150), False, 'import pytest\n'), ((6355, 6395), 'pytest.raises', 'pytest.raises', (['errors.ConfigurationError'], {}), '(errors.ConfigurationError)\n', (6368, 6395), False, 'import pytest\n'), ((769, 819), 'xarray.open_dataset', 'xr.open_dataset', (["(tmp_path / 'output_timeseries.nc')"], {}), "(tmp_path / 'output_timeseries.nc')\n", (784, 819), True, 'import xarray as xr\n'), ((1628, 1714), 'pandas.read_csv', 'pd.read_csv', (["(tmp_path / 'point_bellavista.csv')"], {'index_col': '"""time"""', 'parse_dates': '(True)'}), "(tmp_path / 'point_bellavista.csv', index_col='time',\n parse_dates=True)\n", (1639, 1714), True, 'import pandas as pd\n'), ((1813, 1859), 'numpy.issubdtype', 'np.issubdtype', (['df.num_layers.dtype', 'np.integer'], {}), '(df.num_layers.dtype, np.integer)\n', (1826, 1859), True, 'import numpy as np\n'), ((1914, 1937), 'numpy.all', 'np.all', (['(df.temp > 250.0)'], {}), '(df.temp > 250.0)\n', (1920, 1937), True, 'import numpy as np\n')] |
from matplotlib.pyplot import figure, show
from PIL import ImageDraw
from numpy import array, linspace, meshgrid, pi, cos, sin
def cylinderize(text:str) -> None:
w,h = (len(max(text.split("\n"), key=len))+1)*6,(text.count("\n")+1)*15
im=ImageDraw.Image.new("L",(w,h))
ImageDraw.Draw(im).text((0,0),text,fill=1)
THETA, Z = meshgrid(linspace(0, 2 * pi, w), linspace(0, 1, h))
figure().add_subplot(projection="3d").plot_surface(cos(THETA), sin(THETA), Z, facecolors=[[[i]*3 for i in j] for j in array(im)[::-1]], rstride=1, cstride=1)
show() | [
"PIL.ImageDraw.Image.new",
"numpy.linspace",
"PIL.ImageDraw.Draw",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.array",
"numpy.sin",
"matplotlib.pyplot.show"
] | [((250, 282), 'PIL.ImageDraw.Image.new', 'ImageDraw.Image.new', (['"""L"""', '(w, h)'], {}), "('L', (w, h))\n", (269, 282), False, 'from PIL import ImageDraw\n'), ((565, 571), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (569, 571), False, 'from matplotlib.pyplot import figure, show\n'), ((354, 376), 'numpy.linspace', 'linspace', (['(0)', '(2 * pi)', 'w'], {}), '(0, 2 * pi, w)\n', (362, 376), False, 'from numpy import array, linspace, meshgrid, pi, cos, sin\n'), ((378, 395), 'numpy.linspace', 'linspace', (['(0)', '(1)', 'h'], {}), '(0, 1, h)\n', (386, 395), False, 'from numpy import array, linspace, meshgrid, pi, cos, sin\n'), ((453, 463), 'numpy.cos', 'cos', (['THETA'], {}), '(THETA)\n', (456, 463), False, 'from numpy import array, linspace, meshgrid, pi, cos, sin\n'), ((465, 475), 'numpy.sin', 'sin', (['THETA'], {}), '(THETA)\n', (468, 475), False, 'from numpy import array, linspace, meshgrid, pi, cos, sin\n'), ((286, 304), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (300, 304), False, 'from PIL import ImageDraw\n'), ((402, 410), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (408, 410), False, 'from matplotlib.pyplot import figure, show\n'), ((520, 529), 'numpy.array', 'array', (['im'], {}), '(im)\n', (525, 529), False, 'from numpy import array, linspace, meshgrid, pi, cos, sin\n')] |
import sys
sys.path.insert(0, '.')
import os
import argparse
import torch
import torch.nn as nn
from PIL import Image
import numpy as np
import cv2
import time
import lib.transform_cv2 as T
from lib.models import model_factory
from configs import cfg_factory
from lib.cityscapes_labels import trainId2color
torch.set_grad_enabled(False)
np.random.seed(123)
# args
parse = argparse.ArgumentParser()
parse.add_argument('--cfg-file', dest='cfg_file', type=str, default='bisenetv2', help="specify the name without suffix of config file",)
parse.add_argument('--weight-path', type=str, default='./res/model_final.pth',)
parse.add_argument('--img-path', dest='img_path', type=str, default='./example.png',)
parse.add_argument('--save-path', dest='save_path', type=str, default='./res.jpg',)
args = parse.parse_args()
cfg = cfg_factory[args.cfg_file]
if os.path.exists(args.save_path):
os.path.makedirs(args.save_path)
palette = np.random.randint(0, 256, (256, 3), dtype=np.uint8)
# define model
net = model_factory[cfg.model_type](19)
net.load_state_dict(torch.load(args.weight_path, map_location='cpu'))
net.eval()
net.cuda()
# prepare data
to_tensor = T.ToTensor(
mean=(0.3257, 0.3690, 0.3223), # city, rgb
std=(0.2112, 0.2148, 0.2115),
)
# im = cv2.imread(args.img_path)[:, :, ::-1]
im = cv2.imread(args.img_path)
im = cv2.resize(im, (1024,512))[:, :, ::-1]
# im = cv2.resize(im, (1024,1024))[:, :, ::-1]
#im = cv2.resize(im, (1920,1024))[:, :, ::-1]
im = to_tensor(dict(im=im, lb=None))['im'].unsqueeze(0).cuda()
# inference
start = time.time()
out = net(im)[0].argmax(dim=1).squeeze().detach().cpu() # .numpy()
end = time.time()
# pred = palette[out]
color_map = torch.ones((out.shape[0], out.shape[1], 3))* 255
for id in trainId2color:
color_map[out == id] = torch.tensor(trainId2color[id]).float()
color_map = color_map.numpy()
cv2.imwrite(args.save_path, color_map)
print("total time:", end - start) | [
"os.path.exists",
"lib.transform_cv2.ToTensor",
"sys.path.insert",
"cv2.imwrite",
"argparse.ArgumentParser",
"os.path.makedirs",
"torch.load",
"torch.tensor",
"numpy.random.randint",
"numpy.random.seed",
"time.time",
"torch.set_grad_enabled",
"cv2.resize",
"cv2.imread",
"torch.ones"
] | [((12, 35), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""."""'], {}), "(0, '.')\n", (27, 35), False, 'import sys\n'), ((310, 339), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (332, 339), False, 'import torch\n'), ((340, 359), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (354, 359), True, 'import numpy as np\n'), ((377, 402), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (400, 402), False, 'import argparse\n'), ((853, 883), 'os.path.exists', 'os.path.exists', (['args.save_path'], {}), '(args.save_path)\n', (867, 883), False, 'import os\n'), ((933, 984), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(256, 3)'], {'dtype': 'np.uint8'}), '(0, 256, (256, 3), dtype=np.uint8)\n', (950, 984), True, 'import numpy as np\n'), ((1161, 1231), 'lib.transform_cv2.ToTensor', 'T.ToTensor', ([], {'mean': '(0.3257, 0.369, 0.3223)', 'std': '(0.2112, 0.2148, 0.2115)'}), '(mean=(0.3257, 0.369, 0.3223), std=(0.2112, 0.2148, 0.2115))\n', (1171, 1231), True, 'import lib.transform_cv2 as T\n'), ((1306, 1331), 'cv2.imread', 'cv2.imread', (['args.img_path'], {}), '(args.img_path)\n', (1316, 1331), False, 'import cv2\n'), ((1553, 1564), 'time.time', 'time.time', ([], {}), '()\n', (1562, 1564), False, 'import time\n'), ((1638, 1649), 'time.time', 'time.time', ([], {}), '()\n', (1647, 1649), False, 'import time\n'), ((1855, 1893), 'cv2.imwrite', 'cv2.imwrite', (['args.save_path', 'color_map'], {}), '(args.save_path, color_map)\n', (1866, 1893), False, 'import cv2\n'), ((889, 921), 'os.path.makedirs', 'os.path.makedirs', (['args.save_path'], {}), '(args.save_path)\n', (905, 921), False, 'import os\n'), ((1061, 1109), 'torch.load', 'torch.load', (['args.weight_path'], {'map_location': '"""cpu"""'}), "(args.weight_path, map_location='cpu')\n", (1071, 1109), False, 'import torch\n'), ((1337, 1364), 'cv2.resize', 'cv2.resize', (['im', '(1024, 512)'], {}), '(im, (1024, 512))\n', (1347, 1364), False, 'import cv2\n'), ((1684, 1727), 'torch.ones', 'torch.ones', (['(out.shape[0], out.shape[1], 3)'], {}), '((out.shape[0], out.shape[1], 3))\n', (1694, 1727), False, 'import torch\n'), ((1785, 1816), 'torch.tensor', 'torch.tensor', (['trainId2color[id]'], {}), '(trainId2color[id])\n', (1797, 1816), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 12:07:57 2019
@author: johnmount
"""
import numpy
import pandas
import vtreat.util
import vtreat.transform
class VarTransform:
"""build a treatment plan for a numeric outcome (regression)"""
def __init__(self, incoming_column_name, derived_column_names, treatment):
self.incoming_column_name_ = incoming_column_name
self.derived_column_names_ = derived_column_names.copy()
self.treatment_ = treatment
self.need_cross_treatment_ = False
self.refitter_ = None
def transform(self, data_frame):
raise NotImplementedError("base method called")
class MappedCodeTransform(VarTransform):
def __init__(self, incoming_column_name, derived_column_name, treatment, code_book):
VarTransform.__init__(
self, incoming_column_name, [derived_column_name], treatment
)
self.code_book_ = code_book
def transform(self, data_frame):
incoming_column_name = self.incoming_column_name_
derived_column_name = self.derived_column_names_[0]
sf = pandas.DataFrame({incoming_column_name: data_frame[incoming_column_name]})
bad_posns = vtreat.util.is_bad(sf[incoming_column_name])
sf.loc[bad_posns, incoming_column_name] = "_NA_"
res = pandas.merge(
sf, self.code_book_, on=[self.incoming_column_name_], how="left", sort=False
) # ordered by left table rows
res = res[[derived_column_name]].copy()
res.loc[vtreat.util.is_bad(res[derived_column_name]), derived_column_name] = 0
return res
class YAwareMappedCodeTransform(MappedCodeTransform):
def __init__(
self,
incoming_column_name,
derived_column_name,
treatment,
code_book,
refitter,
extra_args,
params,
):
MappedCodeTransform.__init__(
self,
incoming_column_name=incoming_column_name,
derived_column_name=derived_column_name,
treatment=treatment,
code_book=code_book,
)
self.need_cross_treatment_ = True
self.refitter_ = refitter
self.extra_args_ = extra_args
self.params_ = params
class CleanNumericTransform(VarTransform):
def __init__(self, incoming_column_name, replacement_value):
VarTransform.__init__(
self, incoming_column_name, [incoming_column_name], "clean_copy"
)
self.replacement_value_ = replacement_value
def transform(self, data_frame):
col = numpy.asarray(data_frame[self.incoming_column_name_].copy()).astype(float)
bad_posns = vtreat.util.is_bad(col)
col[bad_posns] = self.replacement_value_
res = pandas.DataFrame({self.derived_column_names_[0]: col})
return res
class IndicateMissingTransform(VarTransform):
def __init__(self, incoming_column_name, derived_column_name):
VarTransform.__init__(
self, incoming_column_name, [derived_column_name], "missing_indicator"
)
def transform(self, data_frame):
col = vtreat.util.is_bad(data_frame[self.incoming_column_name_])
res = pandas.DataFrame({self.derived_column_names_[0]: col})
return res.astype(float)
def fit_regression_impact_code(*, incoming_column_name, x, y, extra_args, params):
sf = vtreat.util.grouped_by_x_statistics(x, y)
if sf.shape[0] <= 1:
return None
if params["use_hierarchical_estimate"]:
sf["_impact_code"] = sf["_hest"] - sf["_gm"]
else:
sf["_impact_code"] = sf["_group_mean"] - sf["_gm"]
sf = sf.loc[:, ["x", "_impact_code"]].copy()
newcol = incoming_column_name + "_impact_code"
sf.columns = [incoming_column_name, newcol]
return YAwareMappedCodeTransform(
incoming_column_name=incoming_column_name,
derived_column_name=newcol,
treatment="impact_code",
code_book=sf,
refitter=fit_regression_impact_code,
extra_args=extra_args,
params=params,
)
def fit_regression_deviation_code(*, incoming_column_name, x, y, extra_args, params):
sf = vtreat.util.grouped_by_x_statistics(x, y)
if sf.shape[0] <= 1:
return None
sf["_deviation_code"] = numpy.sqrt(sf["_var"])
sf = sf.loc[:, ["x", "_deviation_code"]].copy()
newcol = incoming_column_name + "_deviation_code"
sf.columns = [incoming_column_name, newcol]
return YAwareMappedCodeTransform(
incoming_column_name=incoming_column_name,
derived_column_name=newcol,
treatment="deviation_code",
code_book=sf,
refitter=fit_regression_deviation_code,
extra_args=extra_args,
params=params,
)
def fit_binomial_impact_code(*, incoming_column_name, x, y, extra_args, params):
outcome_target = (extra_args["outcome_target"],)
var_suffix = extra_args["var_suffix"]
y = numpy.asarray(numpy.asarray(y) == outcome_target, dtype=numpy.float64)
sf = vtreat.util.grouped_by_x_statistics(x, y)
if sf.shape[0] <= 1:
return None
eps = 1.0e-3
if params["use_hierarchical_estimate"]:
sf["_logit_code"] = numpy.log((sf["_hest"] + eps) / (sf["_gm"] + eps))
else:
sf["_logit_code"] = numpy.log((sf["_group_mean"] + eps) / (sf["_gm"] + eps))
sf = sf.loc[:, ["x", "_logit_code"]].copy()
newcol = incoming_column_name + "_logit_code" + var_suffix
sf.columns = [incoming_column_name, newcol]
return YAwareMappedCodeTransform(
incoming_column_name=incoming_column_name,
derived_column_name=newcol,
treatment="logit_code",
code_book=sf,
refitter=fit_binomial_impact_code,
extra_args=extra_args,
params=params,
)
class IndicatorCodeTransform(VarTransform):
def __init__(
self,
incoming_column_name,
derived_column_names,
levels,
*,
sparse_indicators=False
):
VarTransform.__init__(
self, incoming_column_name, derived_column_names, "indicator_code"
)
self.levels_ = levels
self.sparse_indicators_ = sparse_indicators
def transform(self, data_frame):
incoming_column_name = self.incoming_column_name_
sf = pandas.DataFrame({incoming_column_name: data_frame[incoming_column_name]})
bad_posns = vtreat.util.is_bad(sf[incoming_column_name])
sf.loc[bad_posns, incoming_column_name] = "_NA_"
col = sf[self.incoming_column_name_]
def f(i):
v = numpy.asarray(col == self.levels_[i]) + 0.0
if self.sparse_indicators_:
v = pandas.SparseArray(v, fill_value=0.0)
return v
res = [
pandas.DataFrame({self.derived_column_names_[i]: f(i)})
for i in range(len(self.levels_))
]
res = pandas.concat(res, axis=1, sort=False)
res.reset_index(inplace=True, drop=True)
return res
def fit_indicator_code(
*, incoming_column_name, x, min_fraction, sparse_indicators=False
):
sf = pandas.DataFrame({incoming_column_name: x})
bad_posns = vtreat.util.is_bad(sf[incoming_column_name])
sf.loc[bad_posns, incoming_column_name] = "_NA_"
counts = sf[incoming_column_name].value_counts()
n = sf.shape[0]
counts = counts[counts >= min_fraction * n] # no more than 1/min_fraction symbols
levels = [v for v in counts.index]
if len(levels) < 1:
return None
return IndicatorCodeTransform(
incoming_column_name,
[incoming_column_name + "_lev_" + lev for lev in levels],
levels=levels,
sparse_indicators=sparse_indicators,
)
def fit_prevalence_code(incoming_column_name, x):
sf = pandas.DataFrame({"x": x})
bad_posns = vtreat.util.is_bad(sf["x"])
sf.loc[bad_posns, "x"] = "_NA_"
sf.reset_index(inplace=True, drop=True)
n = sf.shape[0]
sf["_ni"] = 1.0
sf = pandas.DataFrame(sf.groupby("x")["_ni"].sum())
sf.reset_index(inplace=True, drop=False)
sf["_hest"] = sf["_ni"] / n
sf = sf.loc[:, ["x", "_hest"]].copy()
newcol = incoming_column_name + "_prevalence_code"
sf.columns = [incoming_column_name, newcol]
sf[incoming_column_name] = sf[incoming_column_name].astype(str)
sf.reset_index(inplace=True, drop=True)
return MappedCodeTransform(
incoming_column_name, newcol, treatment="prevalence_code", code_book=sf
)
# noinspection PyPep8Naming
def fit_numeric_outcome_treatment(
*, X, y, var_list, outcome_name, cols_to_copy, params
):
if (var_list is None) or (len(var_list) <= 0):
var_list = [co for co in X.columns]
copy_set = set(cols_to_copy)
var_list = [co for co in var_list if (not (co in copy_set))]
if len(var_list) <= 0:
raise ValueError("no variables")
xforms = []
n = X.shape[0]
all_bad = []
for vi in var_list:
n_bad = sum(vtreat.util.is_bad(X[vi]))
if n_bad >= n:
all_bad = all_bad + [vi]
if (n_bad > 0) and (n_bad < n):
if "missing_indicator" in params["coders"]:
xforms = xforms + [
IndicateMissingTransform(
incoming_column_name=vi, derived_column_name=vi + "_is_bad"
)
]
var_list = [co for co in var_list if (not (co in set(all_bad)))]
num_list = [co for co in var_list if vtreat.util.can_convert_v_to_numeric(X[co])]
cat_list = [co for co in var_list if co not in set(num_list)]
if "clean_copy" in params["coders"]:
for vi in num_list:
summaryi = vtreat.util.characterize_numeric(X[vi])
if summaryi["varies"] and summaryi["has_range"]:
xforms = xforms + [
CleanNumericTransform(
incoming_column_name=vi, replacement_value=summaryi["mean"]
)
]
for vi in cat_list:
if "impact_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_regression_impact_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
y=y,
extra_args=None,
params=params,
)
]
if "deviation_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_regression_deviation_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
y=y,
extra_args=None,
params=params,
)
]
if "prevalence_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_prevalence_code(incoming_column_name=vi, x=numpy.asarray(X[vi]))
]
if "indicator_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_indicator_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
min_fraction=params["indicator_min_fraction"],
sparse_indicators=params["sparse_indicators"],
)
]
xforms = [xf for xf in xforms if xf is not None]
for stp in params["user_transforms"]:
stp.fit(X=X[var_list], y=y)
return {
"outcome_name": outcome_name,
"cols_to_copy": cols_to_copy,
"xforms": xforms,
}
# noinspection PyPep8Naming
def fit_binomial_outcome_treatment(
*, X, y, outcome_target, var_list, outcome_name, cols_to_copy, params
):
if (var_list is None) or (len(var_list) <= 0):
var_list = [co for co in X.columns]
copy_set = set(cols_to_copy)
var_list = [co for co in var_list if (not (co in copy_set))]
if len(var_list) <= 0:
raise ValueError("no variables")
xforms = []
n = X.shape[0]
all_badd = []
for vi in var_list:
n_bad = sum(vtreat.util.is_bad(X[vi]))
if n_bad >= n:
all_badd = all_badd + [vi]
if (n_bad > 0) and (n_bad < n):
if "missing_indicator" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
IndicateMissingTransform(
incoming_column_name=vi, derived_column_name=vi + "_is_bad"
)
]
var_list = [co for co in var_list if (not (co in set(all_badd)))]
num_list = [co for co in var_list if vtreat.util.can_convert_v_to_numeric(X[co])]
cat_list = [co for co in var_list if co not in set(num_list)]
if "clean_copy" in params["coders"]:
for vi in num_list:
summaryi = vtreat.util.characterize_numeric(X[vi])
if summaryi["varies"] and summaryi["has_range"]:
# noinspection PyTypeChecker
xforms = xforms + [
CleanNumericTransform(
incoming_column_name=vi, replacement_value=summaryi["mean"]
)
]
extra_args = {"outcome_target": outcome_target, "var_suffix": ""}
for vi in cat_list:
if "logit_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_binomial_impact_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
y=y,
extra_args=extra_args,
params=params,
)
]
if "prevalence_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_prevalence_code(incoming_column_name=vi, x=numpy.asarray(X[vi]))
]
if "indicator_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_indicator_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
min_fraction=params["indicator_min_fraction"],
sparse_indicators=params["sparse_indicators"],
)
]
xforms = [xf for xf in xforms if xf is not None]
for stp in params["user_transforms"]:
stp.fit(X=X[var_list], y=y)
return {
"outcome_name": outcome_name,
"cols_to_copy": cols_to_copy,
"xforms": xforms,
}
# noinspection PyPep8Naming
def fit_multinomial_outcome_treatment(
*, X, y, var_list, outcome_name, cols_to_copy, params
):
if (var_list is None) or (len(var_list) <= 0):
var_list = [co for co in X.columns]
copy_set = set(cols_to_copy)
var_list = [co for co in var_list if (not (co in copy_set))]
if len(var_list) <= 0:
raise ValueError("no variables")
xforms = []
n = X.shape[0]
all_bad = []
for vi in var_list:
n_bad = sum(vtreat.util.is_bad(X[vi]))
if n_bad >= n:
all_bad = all_bad + [vi]
if (n_bad > 0) and (n_bad < n):
if "missing_indicator" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
IndicateMissingTransform(
incoming_column_name=vi, derived_column_name=vi + "_is_bad"
)
]
outcomes = [oi for oi in set(y)]
var_list = [co for co in var_list if (not (co in set(all_bad)))]
num_list = [co for co in var_list if vtreat.util.can_convert_v_to_numeric(X[co])]
cat_list = [co for co in var_list if co not in set(num_list)]
if "clean_copy" in params["coders"]:
for vi in num_list:
summaryi = vtreat.util.characterize_numeric(X[vi])
if summaryi["varies"] and summaryi["has_range"]:
# noinspection PyTypeChecker
xforms = xforms + [
CleanNumericTransform(
incoming_column_name=vi, replacement_value=summaryi["mean"]
)
]
for vi in cat_list:
for outcome in outcomes:
if "impact_code" in params["coders"]:
extra_args = {
"outcome_target": outcome,
"var_suffix": ("_" + str(outcome)),
}
# noinspection PyTypeChecker
xforms = xforms + [
fit_binomial_impact_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
y=y,
extra_args=extra_args,
params=params,
)
]
if "prevalence_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_prevalence_code(incoming_column_name=vi, x=numpy.asarray(X[vi]))
]
if "indicator_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_indicator_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
min_fraction=params["indicator_min_fraction"],
sparse_indicators=params["sparse_indicators"],
)
]
xforms = [xf for xf in xforms if xf is not None]
if len(xforms) <= 0:
raise ValueError("no variables created")
for stp in params["user_transforms"]:
stp.fit(X=X[var_list], y=y)
return {
"outcome_name": outcome_name,
"cols_to_copy": cols_to_copy,
"xforms": xforms,
}
# noinspection PyPep8Naming
def fit_unsupervised_treatment(*, X, var_list, outcome_name, cols_to_copy, params):
if (var_list is None) or (len(var_list) <= 0):
var_list = [co for co in X.columns]
copy_set = set(cols_to_copy)
var_list = [co for co in var_list if (not (co in copy_set))]
if len(var_list) <= 0:
raise ValueError("no variables")
xforms = []
n = X.shape[0]
all_bad = []
for vi in var_list:
n_bad = sum(vtreat.util.is_bad(X[vi]))
if n_bad >= n:
all_bad = all_bad + [vi]
if (n_bad > 0) and (n_bad < n):
if "missing_indicator" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
IndicateMissingTransform(
incoming_column_name=vi, derived_column_name=vi + "_is_bad"
)
]
var_list = [co for co in var_list if (not (co in set(all_bad)))]
num_list = [co for co in var_list if vtreat.util.can_convert_v_to_numeric(X[co])]
cat_list = [co for co in var_list if co not in set(num_list)]
if "clean_copy" in params["coders"]:
for vi in num_list:
summaryi = vtreat.util.characterize_numeric(X[vi])
if summaryi["varies"] and summaryi["has_range"]:
# noinspection PyTypeChecker
xforms = xforms + [
CleanNumericTransform(
incoming_column_name=vi, replacement_value=summaryi["mean"]
)
]
for vi in cat_list:
if "prevalence_code" in params["coders"]:
# noinspection PyTypeChecker
xforms = xforms + [
fit_prevalence_code(incoming_column_name=vi, x=numpy.asarray(X[vi]))
]
if "indicator_code" in params["coders"]:
xforms = xforms + [
fit_indicator_code(
incoming_column_name=vi,
x=numpy.asarray(X[vi]),
min_fraction=params["indicator_min_fraction"],
sparse_indicators=params["sparse_indicators"],
)
]
xforms = [xf for xf in xforms if xf is not None]
for stp in params["user_transforms"]:
stp.fit(X=X[var_list], y=None)
return {
"outcome_name": outcome_name,
"cols_to_copy": cols_to_copy,
"xforms": xforms,
}
def pre_prep_frame(x, *, col_list, cols_to_copy):
"""Create a copy of pandas.DataFrame x restricted to col_list union cols_to_copy with col_list - cols_to_copy
converted to only string and numeric types. New pandas.DataFrame has trivial indexing. If col_list
is empty it is interpreted as all columns."""
if cols_to_copy is None:
cols_to_copy = []
if (col_list is None) or (len(col_list) <= 0):
col_list = [co for co in x.columns]
x_set = set(x.columns)
col_set = set(col_list)
for ci in cols_to_copy:
if (ci in x_set) and (ci not in col_set):
col_list = col_list + [ci]
col_set = set(col_list)
missing_cols = col_set - x_set
if len(missing_cols) > 0:
raise KeyError("referred to not-present columns " + str(missing_cols))
cset = set(cols_to_copy)
if len(col_list) <= 0:
raise ValueError("no variables")
x = x.loc[:, col_list]
x = x.reset_index(inplace=False, drop=True)
for c in x.columns:
if c in cset:
continue
bad_ind = vtreat.util.is_bad(x[c])
if vtreat.util.can_convert_v_to_numeric(x[c]):
x[c] = numpy.asarray(x[c] + 0, dtype=float)
else:
# https://stackoverflow.com/questions/22231592/pandas-change-data-type-of-series-to-string
x[c] = numpy.asarray(x[c].apply(str), dtype=str)
x.loc[bad_ind, c] = numpy.nan
return x
def perform_transform(*, x, transform, params):
plan = transform.plan_
new_frames = [xfi.transform(x) for xfi in plan["xforms"]]
for stp in params["user_transforms"]:
frm = stp.transform(X=x)
if frm is not None and frm.shape[1] > 0:
new_frames = new_frames + [frm]
# see if we want to copy over any columns
copy_set = set(plan["cols_to_copy"])
to_copy = [ci for ci in x.columns if ci in copy_set]
if len(to_copy) > 0:
cp = x.loc[:, to_copy].copy()
new_frames = [cp] + new_frames
if len(new_frames) <= 0:
raise ValueError("no columns transformed")
res = pandas.concat(new_frames, axis=1, sort=False)
res.reset_index(inplace=True, drop=True)
return res
def limit_to_appropriate_columns(*, res, transform):
plan = transform.plan_
to_copy = set(plan["cols_to_copy"])
if ("filter_to_recommended" in transform.params_.keys()) and transform.params_[
"filter_to_recommended"
]:
to_take = set(
[
ci
for ci in transform.score_frame_["variable"][
transform.score_frame_["recommended"]
]
]
)
else:
to_take = set(
[
ci
for ci in transform.score_frame_["variable"][
transform.score_frame_["has_range"]
]
]
)
cols_to_keep = [ci for ci in res.columns if ci in to_copy or ci in to_take]
if len(cols_to_keep) <= 0:
raise ValueError("no columns retained")
return res[cols_to_keep]
# val_list is a list single column Pandas data frames
def mean_of_single_column_pandas_list(val_list):
if val_list is None or len(val_list) <= 0:
return numpy.nan
d = pandas.concat(val_list, axis=0, sort=False)
col = d.columns[0]
d = d.loc[numpy.logical_not(vtreat.util.is_bad(d[col])), [col]]
if d.shape[0] < 1:
return numpy.nan
return numpy.mean(d[col])
# assumes each y-aware variable produces one derived column
# also clears out refitter_ values to None
def cross_patch_refit_y_aware_cols(*, x, y, res, plan, cross_plan):
if cross_plan is None or len(cross_plan) <= 1:
for xf in plan["xforms"]:
xf.refitter_ = None
return res
incoming_colset = set(x.columns)
derived_colset = set(res.columns)
for xf in plan["xforms"]:
if not xf.need_cross_treatment_:
continue
incoming_column_name = xf.incoming_column_name_
derived_column_name = xf.derived_column_names_[0]
if derived_column_name not in derived_colset:
continue
if incoming_column_name not in incoming_colset:
raise KeyError("missing required column " + incoming_column_name)
if xf.refitter_ is None:
raise ValueError(
"refitter is None: "
+ incoming_column_name
+ " -> "
+ derived_column_name
)
# noinspection PyPep8Naming
def maybe_transform(*, fit, X):
if fit is None:
return None
return fit.transform(X)
patches = [
maybe_transform(
fit=xf.refitter_(
incoming_column_name=incoming_column_name,
x=x[incoming_column_name][cp["train"]],
y=y[cp["train"]],
extra_args=xf.extra_args_,
params=xf.params_,
),
X=x.loc[cp["app"], [incoming_column_name]],
)
for cp in cross_plan
]
# replace any missing sections with global average (slight data leak potential)
avg = mean_of_single_column_pandas_list(
[pi for pi in patches if pi is not None]
)
if numpy.isnan(avg):
avg = 0
res[derived_column_name] = avg
for i in range(len(cross_plan)):
pi = patches[i]
if pi is None:
continue
pi.reset_index(inplace=True, drop=True)
cp = cross_plan[i]
res.loc[cp["app"], derived_column_name] = numpy.asarray(
pi[derived_column_name]
).reshape((len(pi)))
res.loc[vtreat.util.is_bad(res[derived_column_name]), derived_column_name] = avg
for xf in plan["xforms"]:
xf.refitter_ = None
return res
def cross_patch_user_y_aware_cols(*, x, y, res, params, cross_plan):
if cross_plan is None or len(cross_plan) <= 1:
return res
incoming_colset = set(x.columns)
derived_colset = set(res.columns)
if len(derived_colset) <= 0:
return res
for ut in params["user_transforms"]:
if not ut.y_aware_:
continue
instersect_in = incoming_colset.intersection(set(ut.incoming_vars_))
instersect_out = derived_colset.intersection(set(ut.derived_vars_))
if len(instersect_out) <= 0:
continue
if len(instersect_out) != len(ut.derived_vars_):
raise ValueError("not all derived columns are in res frame")
if len(instersect_in) != len(ut.incoming_vars_):
raise KeyError("missing required columns")
patches = [
ut.fit(X=x.loc[cp["train"], ut.incoming_vars_], y=y[cp["train"]]).transform(
X=x.loc[cp["app"], ut.incoming_vars_]
)
for cp in cross_plan
]
for col in ut.derived_vars_:
# replace any missing sections with global average (slight data leak potential)
avg = mean_of_single_column_pandas_list(
[pi.loc[:, [col]] for pi in patches if pi is not None]
)
if numpy.isnan(avg):
avg = 0
res[col] = avg
for i in range(len(cross_plan)):
pi = patches[i]
if pi is None:
continue
pi.reset_index(inplace=True, drop=True)
cp = cross_plan[i]
res.loc[cp["app"], col] = numpy.asarray(pi[col]).reshape((len(pi)))
res.loc[vtreat.util.is_bad(res[col]), col] = avg
return res
def score_plan_variables(cross_frame, outcome, plan, params):
def describe_xf(xf):
description = pandas.DataFrame({"variable": xf.derived_column_names_})
description["orig_variable"] = xf.incoming_column_name_
description["treatment"] = xf.treatment_
description["y_aware"] = xf.need_cross_treatment_
return description
def describe_ut(ut):
description = pandas.DataFrame(
{"orig_variable": ut.incoming_vars_, "variable": ut.derived_vars_}
)
description["treatment"] = ut.treatment_
description["y_aware"] = ut.y_aware_
return description
var_table = pandas.concat(
[describe_xf(xf) for xf in plan["xforms"]]
+ [
describe_ut(ut)
for ut in params["user_transforms"]
if len(ut.incoming_vars_) > 0
],
sort=False,
)
var_table.reset_index(inplace=True, drop=True)
sf = vtreat.util.score_variables(
cross_frame, variables=var_table["variable"], outcome=outcome
)
score_frame = pandas.merge(var_table, sf, how="left", on=["variable"], sort=False)
num_treatment_types = len(score_frame["treatment"].unique())
score_frame["_one"] = 1.0
score_frame["vcount"] = score_frame.groupby("treatment")["_one"].transform("sum")
score_frame["default_threshold"] = 1.0 / (
score_frame["vcount"] * num_treatment_types
)
score_frame.drop(["_one"], axis=1, inplace=True)
score_frame["recommended"] = numpy.logical_and(
score_frame["has_range"],
numpy.logical_and(
numpy.logical_not(
numpy.logical_or(
numpy.isnan(score_frame["significance"]),
numpy.isnan(score_frame["PearsonR"]),
)
),
numpy.logical_and(
score_frame["significance"] < score_frame["default_threshold"],
numpy.logical_or(
score_frame["PearsonR"] > 0.0,
numpy.logical_not(score_frame["y_aware"]),
),
),
),
)
return score_frame
def pseudo_score_plan_variables(*, cross_frame, plan, params):
def describe_xf(xf):
description = pandas.DataFrame({"variable": xf.derived_column_names_})
description["orig_variable"] = xf.incoming_column_name_
description["treatment"] = xf.treatment_
description["y_aware"] = xf.need_cross_treatment_
return description
def describe_ut(ut):
description = pandas.DataFrame(
{"orig_variable": ut.incoming_vars_, "variable": ut.derived_vars_}
)
description["treatment"] = ut.treatment_
description["y_aware"] = ut.y_aware_
return description
score_frame = pandas.concat(
[describe_xf(xf) for xf in plan["xforms"]]
+ [
describe_ut(ut)
for ut in params["user_transforms"]
if len(ut.incoming_vars_) > 0
],
sort=False,
)
score_frame.reset_index(inplace=True, drop=True)
def has_range(x):
x = numpy.asarray(x)
return numpy.max(x) > numpy.min(x)
score_frame["has_range"] = [
has_range(cross_frame[c]) for c in score_frame["variable"]
]
score_frame["PearsonR"] = numpy.nan
score_frame["significance"] = numpy.nan
score_frame["recommended"] = score_frame["has_range"].copy()
score_frame["_one"] = 1.0
score_frame["vcount"] = score_frame.groupby("treatment")["_one"].transform("sum")
score_frame.drop(["_one"], axis=1, inplace=True)
return score_frame
| [
"numpy.mean",
"numpy.sqrt",
"pandas.merge",
"numpy.log",
"numpy.asarray",
"pandas.SparseArray",
"numpy.logical_not",
"numpy.max",
"numpy.isnan",
"numpy.min",
"pandas.DataFrame",
"pandas.concat"
] | [((4277, 4299), 'numpy.sqrt', 'numpy.sqrt', (["sf['_var']"], {}), "(sf['_var'])\n", (4287, 4299), False, 'import numpy\n'), ((7099, 7142), 'pandas.DataFrame', 'pandas.DataFrame', (['{incoming_column_name: x}'], {}), '({incoming_column_name: x})\n', (7115, 7142), False, 'import pandas\n'), ((7766, 7792), 'pandas.DataFrame', 'pandas.DataFrame', (["{'x': x}"], {}), "({'x': x})\n", (7782, 7792), False, 'import pandas\n'), ((22338, 22383), 'pandas.concat', 'pandas.concat', (['new_frames'], {'axis': '(1)', 'sort': '(False)'}), '(new_frames, axis=1, sort=False)\n', (22351, 22383), False, 'import pandas\n'), ((23506, 23549), 'pandas.concat', 'pandas.concat', (['val_list'], {'axis': '(0)', 'sort': '(False)'}), '(val_list, axis=0, sort=False)\n', (23519, 23549), False, 'import pandas\n'), ((23700, 23718), 'numpy.mean', 'numpy.mean', (['d[col]'], {}), '(d[col])\n', (23710, 23718), False, 'import numpy\n'), ((29011, 29079), 'pandas.merge', 'pandas.merge', (['var_table', 'sf'], {'how': '"""left"""', 'on': "['variable']", 'sort': '(False)'}), "(var_table, sf, how='left', on=['variable'], sort=False)\n", (29023, 29079), False, 'import pandas\n'), ((1107, 1181), 'pandas.DataFrame', 'pandas.DataFrame', (['{incoming_column_name: data_frame[incoming_column_name]}'], {}), '({incoming_column_name: data_frame[incoming_column_name]})\n', (1123, 1181), False, 'import pandas\n'), ((1318, 1413), 'pandas.merge', 'pandas.merge', (['sf', 'self.code_book_'], {'on': '[self.incoming_column_name_]', 'how': '"""left"""', 'sort': '(False)'}), "(sf, self.code_book_, on=[self.incoming_column_name_], how=\n 'left', sort=False)\n", (1330, 1413), False, 'import pandas\n'), ((2759, 2813), 'pandas.DataFrame', 'pandas.DataFrame', (['{self.derived_column_names_[0]: col}'], {}), '({self.derived_column_names_[0]: col})\n', (2775, 2813), False, 'import pandas\n'), ((3197, 3251), 'pandas.DataFrame', 'pandas.DataFrame', (['{self.derived_column_names_[0]: col}'], {}), '({self.derived_column_names_[0]: col})\n', (3213, 3251), False, 'import pandas\n'), ((5187, 5237), 'numpy.log', 'numpy.log', (["((sf['_hest'] + eps) / (sf['_gm'] + eps))"], {}), "((sf['_hest'] + eps) / (sf['_gm'] + eps))\n", (5196, 5237), False, 'import numpy\n'), ((5276, 5332), 'numpy.log', 'numpy.log', (["((sf['_group_mean'] + eps) / (sf['_gm'] + eps))"], {}), "((sf['_group_mean'] + eps) / (sf['_gm'] + eps))\n", (5285, 5332), False, 'import numpy\n'), ((6289, 6363), 'pandas.DataFrame', 'pandas.DataFrame', (['{incoming_column_name: data_frame[incoming_column_name]}'], {}), '({incoming_column_name: data_frame[incoming_column_name]})\n', (6305, 6363), False, 'import pandas\n'), ((6884, 6922), 'pandas.concat', 'pandas.concat', (['res'], {'axis': '(1)', 'sort': '(False)'}), '(res, axis=1, sort=False)\n', (6897, 6922), False, 'import pandas\n'), ((25581, 25597), 'numpy.isnan', 'numpy.isnan', (['avg'], {}), '(avg)\n', (25592, 25597), False, 'import numpy\n'), ((28047, 28103), 'pandas.DataFrame', 'pandas.DataFrame', (["{'variable': xf.derived_column_names_}"], {}), "({'variable': xf.derived_column_names_})\n", (28063, 28103), False, 'import pandas\n'), ((28350, 28439), 'pandas.DataFrame', 'pandas.DataFrame', (["{'orig_variable': ut.incoming_vars_, 'variable': ut.derived_vars_}"], {}), "({'orig_variable': ut.incoming_vars_, 'variable': ut.\n derived_vars_})\n", (28366, 28439), False, 'import pandas\n'), ((30195, 30251), 'pandas.DataFrame', 'pandas.DataFrame', (["{'variable': xf.derived_column_names_}"], {}), "({'variable': xf.derived_column_names_})\n", (30211, 30251), False, 'import pandas\n'), ((30498, 30587), 'pandas.DataFrame', 'pandas.DataFrame', (["{'orig_variable': ut.incoming_vars_, 'variable': ut.derived_vars_}"], {}), "({'orig_variable': ut.incoming_vars_, 'variable': ut.\n derived_vars_})\n", (30514, 30587), False, 'import pandas\n'), ((31066, 31082), 'numpy.asarray', 'numpy.asarray', (['x'], {}), '(x)\n', (31079, 31082), False, 'import numpy\n'), ((4945, 4961), 'numpy.asarray', 'numpy.asarray', (['y'], {}), '(y)\n', (4958, 4961), False, 'import numpy\n'), ((21429, 21465), 'numpy.asarray', 'numpy.asarray', (['(x[c] + 0)'], {'dtype': 'float'}), '(x[c] + 0, dtype=float)\n', (21442, 21465), False, 'import numpy\n'), ((27479, 27495), 'numpy.isnan', 'numpy.isnan', (['avg'], {}), '(avg)\n', (27490, 27495), False, 'import numpy\n'), ((31098, 31110), 'numpy.max', 'numpy.max', (['x'], {}), '(x)\n', (31107, 31110), False, 'import numpy\n'), ((31113, 31125), 'numpy.min', 'numpy.min', (['x'], {}), '(x)\n', (31122, 31125), False, 'import numpy\n'), ((6566, 6603), 'numpy.asarray', 'numpy.asarray', (['(col == self.levels_[i])'], {}), '(col == self.levels_[i])\n', (6579, 6603), False, 'import numpy\n'), ((6670, 6707), 'pandas.SparseArray', 'pandas.SparseArray', (['v'], {'fill_value': '(0.0)'}), '(v, fill_value=0.0)\n', (6688, 6707), False, 'import pandas\n'), ((25916, 25954), 'numpy.asarray', 'numpy.asarray', (['pi[derived_column_name]'], {}), '(pi[derived_column_name])\n', (25929, 25954), False, 'import numpy\n'), ((29617, 29657), 'numpy.isnan', 'numpy.isnan', (["score_frame['significance']"], {}), "(score_frame['significance'])\n", (29628, 29657), False, 'import numpy\n'), ((29679, 29715), 'numpy.isnan', 'numpy.isnan', (["score_frame['PearsonR']"], {}), "(score_frame['PearsonR'])\n", (29690, 29715), False, 'import numpy\n'), ((29966, 30007), 'numpy.logical_not', 'numpy.logical_not', (["score_frame['y_aware']"], {}), "(score_frame['y_aware'])\n", (29983, 30007), False, 'import numpy\n'), ((27818, 27840), 'numpy.asarray', 'numpy.asarray', (['pi[col]'], {}), '(pi[col])\n', (27831, 27840), False, 'import numpy\n'), ((10208, 10228), 'numpy.asarray', 'numpy.asarray', (['X[vi]'], {}), '(X[vi])\n', (10221, 10228), False, 'import numpy\n'), ((10595, 10615), 'numpy.asarray', 'numpy.asarray', (['X[vi]'], {}), '(X[vi])\n', (10608, 10615), False, 'import numpy\n'), ((10932, 10952), 'numpy.asarray', 'numpy.asarray', (['X[vi]'], {}), '(X[vi])\n', (10945, 10952), False, 'import numpy\n'), ((11193, 11213), 'numpy.asarray', 'numpy.asarray', (['X[vi]'], {}), '(X[vi])\n', (11206, 11213), False, 'import numpy\n'), ((13554, 13574), 'numpy.asarray', 'numpy.asarray', (['X[vi]'], {}), '(X[vi])\n', (13567, 13574), False, 'import numpy\n'), ((13897, 13917), 'numpy.asarray', 'numpy.asarray', (['X[vi]'], {}), '(X[vi])\n', (13910, 13917), False, 'import numpy\n'), ((14158, 14178), 'numpy.asarray', 'numpy.asarray', (['X[vi]'], {}), '(X[vi])\n', (14171, 14178), False, 'import numpy\n'), ((17042, 17062), 'numpy.asarray', 'numpy.asarray', (['X[vi]'], {}), '(X[vi])\n', (17055, 17062), False, 'import numpy\n'), ((17303, 17323), 'numpy.asarray', 'numpy.asarray', (['X[vi]'], {}), '(X[vi])\n', (17316, 17323), False, 'import numpy\n'), ((19594, 19614), 'numpy.asarray', 'numpy.asarray', (['X[vi]'], {}), '(X[vi])\n', (19607, 19614), False, 'import numpy\n'), ((19814, 19834), 'numpy.asarray', 'numpy.asarray', (['X[vi]'], {}), '(X[vi])\n', (19827, 19834), False, 'import numpy\n'), ((16679, 16699), 'numpy.asarray', 'numpy.asarray', (['X[vi]'], {}), '(X[vi])\n', (16692, 16699), False, 'import numpy\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 9 16:11:56 2019
@author: sjmoneyboss
"""
import pandas as pd
import statsmodels
from statsmodels.tsa.stattools import adfuller
import statsmodels.api as sm
import datetime
import math
import pandas_datareader.data as web
import datetime as dt
from datetime import datetime
import csv
import numpy as np
from pandas_datareader import data as web
from openpyxl import load_workbook
import matplotlib.pyplot as plt
from matplotlib import style
import xlrd
from matplotlib.pyplot import figure
""" In this program, I will be compiling the 3 different programs: models.py, stationarity.py
and cointegration.py to build a very simplestic pairs trading strategy """
"""Part-1 of the Program: Fetching data from sources and storing it in a CSV file """
def get_price(symbol): #YYYY/MM/DD
s_year, s_month, s_day = [int(val) for val in input("Enter the Start Date (YYYY-MM-DD): ").split('-')]
e_year, e_month, e_day = [int(val) for val in input("Enter the End Date (YYYY-MM-DD): ").split('-')]
start = datetime(s_year, s_month, s_day)
end = datetime(e_year, e_month, e_day)
df = web.DataReader(symbol, 'yahoo', start, end)
filename = symbol + ".csv"
df.to_csv(filename)
#print(df.tail())
#df['Close'].plot(grid=True, figsize=(9,6))
#plt.ylabel("Price")
symbol_1 = input("Enter the first Symbol/Ticker: ")
get_price(symbol_1)
symbol_2 = input("Enter the second Symbol/Ticker: ")
get_price(symbol_2)
print()
print()
print("CSV files for the symols requested have been created!")
print()
"""Part-2 of the Program: Fetching the CSV file and storing them in Pandas DataFrames """
filename1 = input("Enter the first filename (eg. SPY.csv): ")
filename2 = input("Enter the second filename (eg. DIA.csv): ")
df1 = pd.read_csv(filename1)
df2 = pd.read_csv(filename2)
data1 = df1['Close']
data1.name = filename1.split('.')[0]
data2 = df2['Close']
data2.name = filename2.split('.')[0]
""" Part=3 of the Program: Checking stationarity for the time series data1 and data2 """
""" Now below we will run Augmented Dickey Fuller test on the series and check it's stationarity"""
print()
result1 = adfuller(data1)
print("Result from the ADF Test (for 1st Security): ")
print()
print('ADF Statistic: %f' % result1[0])
print('p-value: %f' % result1[1])
print('Critical Values:')
for key, value in result1[4].items():
print('\t%s: %.3f' % (key, value))
critical_values = list(result1[4].values())
print()
result2 = adfuller(data2)
print("Result from the ADF Test (for 2nd Security): ")
print()
print('ADF Statistic: %f' % result2[0])
print('p-value: %f' % result2[1])
print('Critical Values:')
for key, value in result2[4].items():
print('\t%s: %.3f' % (key, value))
critical_values = list(result2[4].values())
print()
""" Adding some code on half life """
def half_life(data):
data_lag = np.roll(data,1)
data_lag[0] = 0
data_ret = data - data_lag
data_ret[0] = 0
#adds intercept terms to X variable for regression
data_lag2 = sm.add_constant(data_lag)
model = sm.OLS(data_ret,data_lag2) #OLS = ordinary least square, for a regression fit
res = model.fit()
halflife = -(math.log(2))/res.params[1]
print("Half Life for ",data.name, ": ", halflife)
half_life(data1)
half_life(data2)
print()
def hurst(ts):
"""Returns the Hurst Exponent of the time series vector ts"""
# Create the range of lag values
lags = range(2, 100)
# Calculate the array of the variances of the lagged differences
tau = [np.sqrt(np.std(np.subtract(ts[lag:], ts[:-lag]))) for lag in lags]
# Use a linear fit to estimate the Hurst Exponent
poly = np.polyfit(np.log(lags), np.log(tau), 1)
# Return the Hurst exponent from the polyfit output
return poly[0] * 2.0
print("Hurst(for the Data Series-1) | Mean reverting if value < 0.5: %s" % hurst(np.log(data1)))
print("Hurst(for the Data Series-2) | Mean reverting if value < 0.5: %s" % hurst(np.log(data2)))
print()
#print("Price Chart for the 2 securities: ")
#df1['Close'].plot(grid=True, figsize=(9,6))
#df2['Close'].plot(grid=True, figsize=(9,6))
#plt.ylabel("Price")
""" Part-4: Performing Cointegration of the 2 security pairs """
""" let us write the code to calculate the spread first """
data1 = sm.add_constant(data1)
results = sm.OLS(data2, data1).fit()
data1 = data1[filename1.split('.')[0]] #the column name needs to be there inside the square brackets
b = results.params[filename1.split('.')[0]]
spread = data2 - b * data1
""" The code below will plot the prices for SPY and DIA time series, and the ratio
and the zscore time series"""
def zscore(series):
return (series - series.mean()) / np.std(series)
zscore_data = zscore(spread)
zscore(spread).plot()
plt.axhline(zscore(spread).mean(), color='black')
plt.axhline(1.0, color='red', linestyle='--')
plt.axhline(-1.0, color='green', linestyle='--')
plt.legend(['Spread z-score', 'Mean', '+1', '-1'])
""" In our case, we have the following Entry Signals:
- When z-score > 1, short 'data2' and buy 'data1'
- When z-score < -1, buy 'data2' and short 'data1' """
print("Done")
#Pg-109
| [
"datetime.datetime",
"numpy.roll",
"statsmodels.tsa.stattools.adfuller",
"pandas.read_csv",
"pandas_datareader.data.DataReader",
"numpy.log",
"numpy.subtract",
"math.log",
"matplotlib.pyplot.axhline",
"statsmodels.api.add_constant",
"numpy.std",
"statsmodels.api.OLS",
"matplotlib.pyplot.lege... | [((1820, 1842), 'pandas.read_csv', 'pd.read_csv', (['filename1'], {}), '(filename1)\n', (1831, 1842), True, 'import pandas as pd\n'), ((1849, 1871), 'pandas.read_csv', 'pd.read_csv', (['filename2'], {}), '(filename2)\n', (1860, 1871), True, 'import pandas as pd\n'), ((2202, 2217), 'statsmodels.tsa.stattools.adfuller', 'adfuller', (['data1'], {}), '(data1)\n', (2210, 2217), False, 'from statsmodels.tsa.stattools import adfuller\n'), ((2518, 2533), 'statsmodels.tsa.stattools.adfuller', 'adfuller', (['data2'], {}), '(data2)\n', (2526, 2533), False, 'from statsmodels.tsa.stattools import adfuller\n'), ((4324, 4346), 'statsmodels.api.add_constant', 'sm.add_constant', (['data1'], {}), '(data1)\n', (4339, 4346), True, 'import statsmodels.api as sm\n'), ((4848, 4893), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(1.0)'], {'color': '"""red"""', 'linestyle': '"""--"""'}), "(1.0, color='red', linestyle='--')\n", (4859, 4893), True, 'import matplotlib.pyplot as plt\n'), ((4894, 4942), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(-1.0)'], {'color': '"""green"""', 'linestyle': '"""--"""'}), "(-1.0, color='green', linestyle='--')\n", (4905, 4942), True, 'import matplotlib.pyplot as plt\n'), ((4943, 4993), 'matplotlib.pyplot.legend', 'plt.legend', (["['Spread z-score', 'Mean', '+1', '-1']"], {}), "(['Spread z-score', 'Mean', '+1', '-1'])\n", (4953, 4993), True, 'import matplotlib.pyplot as plt\n'), ((1082, 1114), 'datetime.datetime', 'datetime', (['s_year', 's_month', 's_day'], {}), '(s_year, s_month, s_day)\n', (1090, 1114), False, 'from datetime import datetime\n'), ((1125, 1157), 'datetime.datetime', 'datetime', (['e_year', 'e_month', 'e_day'], {}), '(e_year, e_month, e_day)\n', (1133, 1157), False, 'from datetime import datetime\n'), ((1167, 1210), 'pandas_datareader.data.DataReader', 'web.DataReader', (['symbol', '"""yahoo"""', 'start', 'end'], {}), "(symbol, 'yahoo', start, end)\n", (1181, 1210), True, 'from pandas_datareader import data as web\n'), ((2900, 2916), 'numpy.roll', 'np.roll', (['data', '(1)'], {}), '(data, 1)\n', (2907, 2916), True, 'import numpy as np\n'), ((3059, 3084), 'statsmodels.api.add_constant', 'sm.add_constant', (['data_lag'], {}), '(data_lag)\n', (3074, 3084), True, 'import statsmodels.api as sm\n'), ((3098, 3125), 'statsmodels.api.OLS', 'sm.OLS', (['data_ret', 'data_lag2'], {}), '(data_ret, data_lag2)\n', (3104, 3125), True, 'import statsmodels.api as sm\n'), ((3711, 3723), 'numpy.log', 'np.log', (['lags'], {}), '(lags)\n', (3717, 3723), True, 'import numpy as np\n'), ((3725, 3736), 'numpy.log', 'np.log', (['tau'], {}), '(tau)\n', (3731, 3736), True, 'import numpy as np\n'), ((4357, 4377), 'statsmodels.api.OLS', 'sm.OLS', (['data2', 'data1'], {}), '(data2, data1)\n', (4363, 4377), True, 'import statsmodels.api as sm\n'), ((4730, 4744), 'numpy.std', 'np.std', (['series'], {}), '(series)\n', (4736, 4744), True, 'import numpy as np\n'), ((3216, 3227), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (3224, 3227), False, 'import math\n'), ((3907, 3920), 'numpy.log', 'np.log', (['data1'], {}), '(data1)\n', (3913, 3920), True, 'import numpy as np\n'), ((4006, 4019), 'numpy.log', 'np.log', (['data2'], {}), '(data2)\n', (4012, 4019), True, 'import numpy as np\n'), ((3582, 3614), 'numpy.subtract', 'np.subtract', (['ts[lag:]', 'ts[:-lag]'], {}), '(ts[lag:], ts[:-lag])\n', (3593, 3614), True, 'import numpy as np\n')] |
from __future__ import print_function
import pickle
import numpy
import theano
numpy.random.seed(42)
def prepare_data(seqs, labels):
"""Create the matrices from the datasets.
This pad each sequence to the same lenght: the lenght of the
longuest sequence or maxlen.
if maxlen is set, we will cut all sequence to this maximum
lenght.
This swap the axis!
"""
# x: a list of sentences
lengths = [len(s) for s in seqs]
n_samples = len(seqs)
maxlen = numpy.max(lengths)
x = numpy.zeros((maxlen, n_samples)).astype('int64')
x_mask = numpy.ones((maxlen, n_samples)).astype(theano.config.floatX)
for idx, s in enumerate(seqs):
x[:lengths[idx], idx] = s
x_mask *= (1 - (x == 0))
return x, x_mask, labels
def load_data(valid_portion=0.1, maxlen=19, sort_by_len=False):
'''Loads the dataset
:type path: String
:param path: The path to the dataset (here RSC2015)
:type n_items: int
:param n_items: The number of items.
:type valid_portion: float
:param valid_portion: The proportion of the full train set used for
the validation set.
:type maxlen: None or positive int
:param maxlen: the max sequence length we use in the train/valid set.
:type sort_by_len: bool
:name sort_by_len: Sort by the sequence lenght for the train,
valid and test set. This allow faster execution as it cause
less padding per minibatch. Another mechanism must be used to
shuffle the train set at each epoch.
'''
#############
# LOAD DATA #
#############
# Load the dataset
path_train_data = '/content/gdrive/My Drive/Colab Notebooks/RSC15-Raw/NARM/train.pk'
path_test_data = '/content/gdrive/My Drive/Colab Notebooks/RSC15-Raw/NARM/test.pk'
f1 = open(path_train_data, 'rb')
train_set = pickle.load(f1)
f1.close()
f2 = open(path_test_data, 'rb')
test_set = pickle.load(f2)
f2.close()
if maxlen:
new_train_set_x = []
new_train_set_y = []
for x, y in zip(train_set[0], train_set[1]):
if len(x) < maxlen:
new_train_set_x.append(x)
new_train_set_y.append(y)
else:
new_train_set_x.append(x[:maxlen])
new_train_set_y.append(y)
train_set = (new_train_set_x, new_train_set_y)
del new_train_set_x, new_train_set_y
new_test_set_x = []
new_test_set_y = []
for xx, yy in zip(test_set[0], test_set[1]):
if len(xx) < maxlen:
new_test_set_x.append(xx)
new_test_set_y.append(yy)
else:
new_test_set_x.append(xx[:maxlen])
new_test_set_y.append(yy)
test_set = (new_test_set_x, new_test_set_y)
del new_test_set_x, new_test_set_y
# split training set into validation set
train_set_x, train_set_y = train_set
n_samples = len(train_set_x)
sidx = numpy.arange(n_samples, dtype='int32')
numpy.random.shuffle(sidx)
n_train = int(numpy.round(n_samples * (1. - valid_portion)))
valid_set_x = [train_set_x[s] for s in sidx[n_train:]]
valid_set_y = [train_set_y[s] for s in sidx[n_train:]]
train_set_x = [train_set_x[s] for s in sidx[:n_train]]
train_set_y = [train_set_y[s] for s in sidx[:n_train]]
train_set = (train_set_x, train_set_y)
valid_set = (valid_set_x, valid_set_y)
test_set_x, test_set_y = test_set
valid_set_x, valid_set_y = valid_set
train_set_x, train_set_y = train_set
def len_argsort(seq):
return sorted(range(len(seq)), key=lambda x: len(seq[x]))
if sort_by_len:
sorted_index = len_argsort(test_set_x)
test_set_x = [test_set_x[i] for i in sorted_index]
test_set_y = [test_set_y[i] for i in sorted_index]
sorted_index = len_argsort(valid_set_x)
valid_set_x = [valid_set_x[i] for i in sorted_index]
valid_set_y = [valid_set_y[i] for i in sorted_index]
train = (train_set_x, train_set_y)
valid = (valid_set_x, valid_set_y)
test = (test_set_x, test_set_y)
return train, valid, test
| [
"numpy.ones",
"numpy.round",
"pickle.load",
"numpy.max",
"numpy.zeros",
"numpy.random.seed",
"numpy.arange",
"numpy.random.shuffle"
] | [((80, 101), 'numpy.random.seed', 'numpy.random.seed', (['(42)'], {}), '(42)\n', (97, 101), False, 'import numpy\n'), ((496, 514), 'numpy.max', 'numpy.max', (['lengths'], {}), '(lengths)\n', (505, 514), False, 'import numpy\n'), ((1850, 1865), 'pickle.load', 'pickle.load', (['f1'], {}), '(f1)\n', (1861, 1865), False, 'import pickle\n'), ((1933, 1948), 'pickle.load', 'pickle.load', (['f2'], {}), '(f2)\n', (1944, 1948), False, 'import pickle\n'), ((2982, 3020), 'numpy.arange', 'numpy.arange', (['n_samples'], {'dtype': '"""int32"""'}), "(n_samples, dtype='int32')\n", (2994, 3020), False, 'import numpy\n'), ((3025, 3051), 'numpy.random.shuffle', 'numpy.random.shuffle', (['sidx'], {}), '(sidx)\n', (3045, 3051), False, 'import numpy\n'), ((3070, 3116), 'numpy.round', 'numpy.round', (['(n_samples * (1.0 - valid_portion))'], {}), '(n_samples * (1.0 - valid_portion))\n', (3081, 3116), False, 'import numpy\n'), ((524, 556), 'numpy.zeros', 'numpy.zeros', (['(maxlen, n_samples)'], {}), '((maxlen, n_samples))\n', (535, 556), False, 'import numpy\n'), ((586, 617), 'numpy.ones', 'numpy.ones', (['(maxlen, n_samples)'], {}), '((maxlen, n_samples))\n', (596, 617), False, 'import numpy\n')] |
import math
from collections import OrderedDict
import logging
logger = logging.getLogger(__name__)
import numpy as np
import torch
import tensorflow as tf
from paragen.generators import AbstractGenerator, register_generator
from paragen.utils.io import remove
from paragen.utils.runtime import Environment
@register_generator
class LightseqTransformerGenerator(AbstractGenerator):
"""
SequenceGenerator is combination of a model and search algorithm.
It processes in a multi-step fashion while model processes only one step.
It is usually separated into encoder and search with decoder, and is
exported and load with encoder and search module.
Args:
path: path to export or load generator
"""
def __init__(self,
batch_size,
path=None, ):
super().__init__(path)
self._batch_size = batch_size
env = Environment()
self._maxlen = getattr(env.configs, 'maxlen', 512)
self._model = None
self._src_special_tokens, self._tgt_special_tokens = None, None
self._lightseq_model = None
def build_from_model(self, model, src_special_tokens, tgt_special_tokens):
"""
Build generator from model and search.
Args:
model (paragen.models.EncoderDecoder): an encoder-decoder model to be wrapped
src_special_tokens (dict): source special token dict
tgt_special_tokens (dict): target special token dict
"""
self._model = model
self._src_special_tokens = src_special_tokens
self._tgt_special_tokens = tgt_special_tokens
def forward(self, encoder, decoder, search=None):
"""
Infer a sample as model in evaluation mode.
Compute encoder output first and decode results with search module
Args:
encoder (tuple): encoder inputs
decoder (tuple): decoder inputs
search (tuple): search states
Returns:
decoder_output: results inferred by search algorithm on decoder
"""
src = encoder[0].cpu().numpy()
output, _ = self._lightseq_model.infer(src)
output = torch.from_numpy(output)
output = output[:, 0, :]
return output
def export(self,
path,
net_input,
lang='en',
**kwargs):
"""
Export self to `path` by export model directly
Args:
path: path to store serialized model
net_input: fake net_input for tracing the model
lang: language
**kwargs:
- beam_size: beam search size
- lenpen: length penalty
- extra_decode_length: maximum_generation_length = min(src_length + extra_decode_length, max_step)
- generation_method: generation method
- topk: top-k candidates
- topp:
- diverse_lambda: lambda in diverse
"""
assert self._model.encoder._normalize_before and self._model.decoder._normalize_before, 'only pre-norm arch can be exported by LightSeq'
from .transformer_pb2 import Transformer
transformer = Transformer()
encoder_state_dict, decoder_state_dict = self._extract_weight()
self._fill_weight(transformer, encoder_state_dict, decoder_state_dict, lang=lang)
self._fill_in_conf(transformer, self._model.encoder._n_head, **kwargs)
self._write(transformer, path)
def _fill_weight(self, transformer, encoder_state_dict, decoder_state_dict, lang='en'):
dec_var_name_list = list(decoder_state_dict.keys())
enc_var_name_list = list(encoder_state_dict.keys())
# fill each encoder layer's params
enc_tensor_names = {}
for name in enc_var_name_list:
name_split = name.split(".")
if len(name_split) <= 2 or not name_split[2].isdigit():
continue
layer_id = int(name_split[2])
enc_tensor_names.setdefault(layer_id, []).append(name)
for layer_id in sorted(enc_tensor_names.keys()):
fill_layer(
enc_tensor_names[layer_id],
encoder_state_dict,
transformer.encoder_stack.add(),
enc_layer_mapping_dict,
)
# fill each decoder layer's params
dec_tensor_names = {}
for name in dec_var_name_list:
name_split = name.split(".")
if len(name_split) <= 2 or not name.split(".")[2].isdigit():
continue
layer_id = int(name.split(".")[2])
dec_tensor_names.setdefault(layer_id, []).append(name)
for layer_id in sorted(dec_tensor_names.keys()):
fill_layer(
dec_tensor_names[layer_id],
decoder_state_dict,
transformer.decoder_stack.add(),
dec_layer_mapping_dict,
)
# fill src_embedding
fill_layer(
enc_var_name_list,
encoder_state_dict,
transformer.src_embedding,
src_emb_mapping_dict,
)
src_tb = _gather_token_embedding(
enc_var_name_list, encoder_state_dict, "_embed"
)
transformer.src_embedding.token_embedding[:] = src_tb.flatten().tolist()
pos_emb = _get_position_encoding(length=self._maxlen, hidden_size=src_tb.shape[-1])
pos_emb_list = pos_emb.numpy().reshape([-1]).tolist()
transformer.src_embedding.position_embedding[:] = pos_emb_list
logger.info(
"model.encoder.embed_positions.weight -> src_embedding.position_embedding, shape: {}, conversion finished!".format(
(pos_emb.shape)
)
)
# fill trg_embedding
encode_output_mapping_dict = _get_encode_output_mapping_dict(len(dec_tensor_names))
trg_emb_mapping_dict.update(encode_output_mapping_dict)
fill_layer(
dec_var_name_list,
decoder_state_dict,
transformer.trg_embedding,
trg_emb_mapping_dict,
)
# assert lang in LANG2ID
trg_tb = _gather_token_embedding(
dec_var_name_list, decoder_state_dict, "_embed", lang=lang
)
transformer.trg_embedding.token_embedding[:] = trg_tb.transpose().flatten().tolist()
logger.info(
"token_embedding.weight -> trg_embedding.token_embedding, shape: {}, conversion finished!".format(
trg_tb.transpose().shape
)
)
pos_emb = _get_position_encoding(length=self._maxlen, hidden_size=trg_tb.shape[-1])
pos_emb_list = pos_emb.numpy().reshape([-1]).tolist()
transformer.trg_embedding.position_embedding[:] = pos_emb_list
logger.info(
"model.decoder.embed_positions.weight -> trg_embedding.position_embedding, shape: {}, conversion finished!".format(
(pos_emb.shape)
)
)
def _extract_weight(self):
reloaded = self._model.state_dict()
encoder_state_dict = {}
decoder_state_dict = {}
for k in reloaded:
if k.startswith("_encoder."):
encoder_state_dict[k] = reloaded[k]
if k.startswith("_decoder."):
decoder_state_dict[k] = reloaded[k]
decoder_state_dict = split_qkv(decoder_state_dict)
decoder_state_dict['_decoder.shared_bias'] = decoder_state_dict.pop('_decoder._out_proj_bias')
return encoder_state_dict, decoder_state_dict
def _fill_in_conf(self,
transformer,
nhead,
beam_size=4,
length_penalty=0.6,
extra_decode_length=50,
generation_method='beam_search',
topk=1,
topp=0.75,
diverse_lambda=0.,):
# fill in conf to transformer
transformer.model_conf.head_num = nhead
transformer.model_conf.beam_size = beam_size
transformer.model_conf.length_penalty = length_penalty
transformer.model_conf.extra_decode_length = extra_decode_length
transformer.model_conf.src_padding_id = self._src_special_tokens['pad']
transformer.model_conf.trg_start_id = self._tgt_special_tokens['bos']
transformer.model_conf.trg_end_id = self._tgt_special_tokens['eos']
transformer.model_conf.sampling_method = generation_method
transformer.model_conf.topk = topk
transformer.model_conf.topp = topp
transformer.model_conf.diverse_lambda = diverse_lambda
transformer.model_conf.is_post_ln = False
transformer.model_conf.no_scale_embedding = False
transformer.model_conf.use_gelu = False
def _write(self, transformer, path):
logger.info("Writing to {0}".format(path))
try:
with tf.io.gfile.GFile(path, "wb") as fout:
fout.write(transformer.SerializeToString())
except Exception:
logger.info('Saving PB fails. Save HDF5 instead!')
remove(path)
path = path.replace('pb', 'hdf5')
import h5py
f = h5py.File(path, "w")
save_bart_proto_to_hdf5(transformer, f)
f.close()
def load(self):
"""
Load generator from path
"""
import lightseq.inference as lsi
self._lightseq_model = lsi.Transformer(self._path, self._batch_size)
""" key是proto参数的值,value是一个强大的表达式,每个&&分割tensor name的匹配路径或表达式,每个匹配
路径的子pattern用空格分隔,表达式用expression_开头,可以对每个tensor进行单独操作,支持多个表达式。多个匹配路径
和表达式最后会concat,axis=-1 """
enc_layer_mapping_dict = OrderedDict(
{
"multihead_norm_scale": "self_attn_norm.weight",
"multihead_norm_bias": "self_attn_norm.bias",
"multihead_project_kernel_qkv": "self_attn.in_proj_weight&&expression_.transpose(0, 1)",
"multihead_project_bias_qkv": "self_attn.in_proj_bias",
"multihead_project_kernel_output": "self_attn.out_proj.weight&&expression_.transpose(0, 1)",
"multihead_project_bias_output": "self_attn.out_proj.bias",
"ffn_norm_scale": "ffn_norm.weight",
"ffn_norm_bias": "ffn_norm.bias",
"ffn_first_kernel": "ffn._fc1.weight&&expression_.transpose(0, 1)",
"ffn_first_bias": "ffn._fc1.bias",
"ffn_second_kernel": "ffn._fc2.weight&&expression_.transpose(0, 1)",
"ffn_second_bias": "ffn._fc2.bias",
}
)
dec_layer_mapping_dict = OrderedDict(
{
"self_norm_scale": "self_attn_norm.weight",
"self_norm_bias": "self_attn_norm.bias",
"self_project_kernel_qkv": "self_attn.in_proj_weight&&expression_.transpose(0, 1)",
"self_project_bias_qkv": "self_attn.in_proj_bias",
"self_project_kernel_output": "self_attn.out_proj.weight&&expression_.transpose(0, 1)",
"self_project_bias_output": "self_attn.out_proj.bias",
"encdec_norm_scale": "multihead_attn_norm.weight",
"encdec_norm_bias": "multihead_attn_norm.bias",
"encdec_project_kernel_q": "multihead_attn.q_proj_weight&&expression_.transpose(0, 1)",
"encdec_project_bias_q": "multihead_attn.q_proj_bias",
"encdec_project_kernel_output": "multihead_attn.out_proj.weight&&expression_.transpose(0, 1)",
"encdec_project_bias_output": "multihead_attn.out_proj.bias",
"ffn_norm_scale": "ffn_norm.weight",
"ffn_norm_bias": "ffn_norm.bias",
"ffn_first_kernel": "ffn._fc1.weight&&expression_.transpose(0, 1)",
"ffn_first_bias": "ffn._fc1.bias",
"ffn_second_kernel": "ffn._fc2.weight&&expression_.transpose(0, 1)",
"ffn_second_bias": "ffn._fc2.bias",
}
)
src_emb_mapping_dict = OrderedDict(
{
"norm_scale": "_norm.weight",
"norm_bias": "_norm.bias",
}
)
trg_emb_mapping_dict = OrderedDict(
{
"norm_scale": "_norm.weight",
"norm_bias": "_norm.bias",
"shared_bias": "shared_bias",
}
)
def check_rule(tensor_name, rule):
if "Adam" in tensor_name or "adam" in tensor_name:
return False
assert isinstance(rule, str) and rule
r_size = len(rule.split('.'))
t = tensor_name.split('.')
if len(t) < r_size:
return False
return rule == '.'.join(t[-r_size:])
def fill_layer(tensor_names, state_dict, layer, mapping_dict):
for proto_name, ckpt_rule in mapping_dict.items():
expression = [
ele for ele in ckpt_rule.split("&&") if ele.startswith("expression_")
]
ckpt_rule = [
ele for ele in ckpt_rule.split("&&") if not ele.startswith("expression_")
]
assert (len(ckpt_rule) > 0 and len(expression) < 2) or (
len(ckpt_rule) == 0 and len(expression) > 0
)
if len(expression) < 2:
expression = "" if not expression else expression[0].split("_")[1]
else:
expression = [exp.split("_")[1] for exp in expression]
target_tn = []
for cr in ckpt_rule:
tmp = []
for tn in tensor_names:
if check_rule(tn, cr):
tmp.append(tn)
if len(tmp) != 1:
logger.info(f'{tmp} {cr}')
assert len(tmp) == 1
target_tn.extend(tmp)
target_tensor = [state_dict[name] for name in target_tn]
tt = {}
if target_tensor:
exec("tt['save'] = [ele%s for ele in target_tensor]" % expression)
else:
if not isinstance(expression, list):
expression = [expression]
exec("tt['save'] = [%s]" % ",".join(expression))
target_tensor = np.concatenate(tt["save"], axis=-1)
logger.info(
"%s -> %s, shape: %s, convert finished."
% (target_tn if target_tn else "created", proto_name, target_tensor.shape)
)
exec("layer.%s[:]=target_tensor.flatten().tolist()" % proto_name)
def _get_encode_output_mapping_dict(dec_layer_num):
encode_output_kernel_pattern = [
"{0}.multihead_attn.k_proj_weight&&{0}.multihead_attn.v_proj_weight".format(ele)
for ele in range(dec_layer_num)
]
encode_output_bias_pattern = [
"{0}.multihead_attn.k_proj_bias&&{0}.multihead_attn.v_proj_bias".format(ele)
for ele in range(dec_layer_num)
]
return {
"encode_output_project_kernel_kv": "&&".join(
encode_output_kernel_pattern + ["expression_.transpose(0, 1)"]
),
"encode_output_project_bias_kv": "&&".join(encode_output_bias_pattern),
}
def _get_position_encoding(length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):
"""Return positional encoding.
Calculates the position encoding as a mix of sine and cosine functions with
geometrically increasing wavelengths.
Defined and formulized in Attention is All You Need, section 3.5.
Args:
length: Sequence length.
hidden_size: Size of the
min_timescale: Minimum scale that will be applied at each position
max_timescale: Maximum scale that will be applied at each position
Returns:
Tensor with shape [length, hidden_size]
"""
with tf.device("/cpu:0"):
position = tf.cast(tf.range(length), tf.float32)
num_timescales = hidden_size // 2
log_timescale_increment = math.log(
float(max_timescale) / float(min_timescale)
) / (tf.cast(num_timescales, tf.float32) - 1)
inv_timescales = min_timescale * tf.exp(
tf.cast(tf.range(num_timescales), tf.float32) * -log_timescale_increment
)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.math.sin(scaled_time), tf.math.cos(scaled_time)], axis=1)
return signal
def _gather_token_embedding(tensor_names, name2var_dict, tn_pattern, lang="en"):
""" use pattern to diff source and target. """
target_tn = []
for tn in tensor_names:
if (tn_pattern in tn.split(".")) and ("weight" in tn.split(".")):
target_tn.append(tn)
continue
target_tensor = [name2var_dict[name] for name in target_tn]
target_tensor = np.concatenate(target_tensor, axis=0)
target_tensor = target_tensor * (target_tensor.shape[1] ** 0.5)
logger.info(
"token embedding shape is %s, scaled by %s"
% (target_tensor.shape, target_tensor.shape[1] ** 0.5))
logger.info("token embedding shape is {}".format(target_tensor.shape))
return target_tensor
def split_qkv(decoder_state_dict):
state_dict = OrderedDict()
for key, val in decoder_state_dict.items():
if 'multihead_attn.in_proj' in key:
dim = val.size(0) // 3
state_dict[key.replace('multihead_attn.in_proj', 'multihead_attn.q_proj')] = val[:dim]
state_dict[key.replace('multihead_attn.in_proj', 'multihead_attn.k_proj')] = val[dim:dim * 2]
state_dict[key.replace('multihead_attn.in_proj', 'multihead_attn.v_proj')] = val[dim * 2:]
else:
state_dict[key] = val
return state_dict
def save_bart_proto_to_hdf5(transformer, f):
"""Convert bart protobuf to hdf5 format to support larger weight."""
MODEL_CONF_KEYS = [
# model_conf
"head_num",
"beam_size",
"extra_decode_length",
"length_penalty",
"src_padding_id",
"trg_start_id",
"diverse_lambda",
"sampling_method",
"topp",
"topk",
"trg_end_id",
"is_post_ln",
"no_scale_embedding",
"use_gelu",
"is_multilingual",
]
EMBEDDING_KEYS = [
# src_embedding
# trg_embedding
"token_embedding",
"position_embedding",
"norm_scale",
"norm_bias",
"encode_output_project_kernel_kv",
"encode_output_project_bias_kv",
"shared_bias",
"lang_emb",
"trg_vocab_mask",
]
ENCODER_LAYER_KEYS = [
# encoder_stack/{i}
"multihead_norm_scale",
"multihead_norm_bias",
"multihead_project_kernel_qkv",
"multihead_project_bias_qkv",
"multihead_project_kernel_output",
"multihead_project_bias_output",
"ffn_norm_scale",
"ffn_norm_bias",
"ffn_first_kernel",
"ffn_first_bias",
"ffn_second_kernel",
"ffn_second_bias",
]
DECODER_LAYER_KEYS = [
# decoder_stack/{i}
"self_norm_scale",
"self_norm_bias",
"self_project_kernel_qkv",
"self_project_bias_qkv",
"self_project_kernel_output",
"self_project_bias_output",
"encdec_norm_scale",
"encdec_norm_bias",
"encdec_project_kernel_q",
"encdec_project_bias_q",
"encdec_project_kernel_output",
"encdec_project_bias_output",
"ffn_norm_scale",
"ffn_norm_bias",
"ffn_first_kernel",
"ffn_first_bias",
"ffn_second_kernel",
"ffn_second_bias",
]
base_attr_to_keys = {
"src_embedding": EMBEDDING_KEYS,
"trg_embedding": EMBEDDING_KEYS,
"model_conf": MODEL_CONF_KEYS,
}
from operator import attrgetter
logger.info(f"start converting protobuf to hdf5 format.")
# load src_embedding, trg_embedding, model_conf
for base_attr, keys in base_attr_to_keys.items():
for key in keys:
hdf5_key = f"{base_attr}/{key}"
proto_attr = f"{base_attr}.{key}"
if key not in dir(attrgetter(base_attr)(transformer)):
logger.info(f"key {key} not found in {base_attr}, skipping")
continue
logger.info(f"loading transformer {proto_attr} -> {hdf5_key}")
_data = attrgetter(proto_attr)(transformer)
if type(_data) is str:
logger.info(
f"find type str, explicitly convert string to ascii encoded array."
)
# explict convert to array of char (int8) to avoid issues on string reading in C
_data = np.array([ord(c) for c in _data]).astype(np.int8)
f.create_dataset(hdf5_key, data=_data)
# save number of layers metadata
f.create_dataset("model_conf/n_encoder_stack", data=len(transformer.encoder_stack))
f.create_dataset("model_conf/n_decoder_stack", data=len(transformer.decoder_stack))
# load encoder_stack
for layer_id, layer in enumerate(transformer.encoder_stack):
for key in ENCODER_LAYER_KEYS:
hdf5_key = f"encoder_stack/{layer_id}/{key}"
proto_attr = key
logger.info(f"loading transformer.encoder_stack {proto_attr} -> {hdf5_key}")
f.create_dataset(hdf5_key, data=attrgetter(proto_attr)(layer))
# load decoder_stack
for layer_id, layer in enumerate(transformer.decoder_stack):
for key in DECODER_LAYER_KEYS:
hdf5_key = f"decoder_stack/{layer_id}/{key}"
proto_attr = key
logger.info(f"loading transformer.decoder_stack {proto_attr} -> {hdf5_key}")
f.create_dataset(hdf5_key, data=attrgetter(proto_attr)(layer))
logger.info(f"proto to hdf5 conversion completed.")
| [
"logging.getLogger",
"paragen.utils.runtime.Environment",
"collections.OrderedDict",
"tensorflow.device",
"tensorflow.io.gfile.GFile",
"operator.attrgetter",
"tensorflow.math.cos",
"tensorflow.math.sin",
"torch.from_numpy",
"h5py.File",
"tensorflow.range",
"numpy.concatenate",
"paragen.utils... | [((72, 99), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (89, 99), False, 'import logging\n'), ((9774, 10507), 'collections.OrderedDict', 'OrderedDict', (["{'multihead_norm_scale': 'self_attn_norm.weight', 'multihead_norm_bias':\n 'self_attn_norm.bias', 'multihead_project_kernel_qkv':\n 'self_attn.in_proj_weight&&expression_.transpose(0, 1)',\n 'multihead_project_bias_qkv': 'self_attn.in_proj_bias',\n 'multihead_project_kernel_output':\n 'self_attn.out_proj.weight&&expression_.transpose(0, 1)',\n 'multihead_project_bias_output': 'self_attn.out_proj.bias',\n 'ffn_norm_scale': 'ffn_norm.weight', 'ffn_norm_bias': 'ffn_norm.bias',\n 'ffn_first_kernel': 'ffn._fc1.weight&&expression_.transpose(0, 1)',\n 'ffn_first_bias': 'ffn._fc1.bias', 'ffn_second_kernel':\n 'ffn._fc2.weight&&expression_.transpose(0, 1)', 'ffn_second_bias':\n 'ffn._fc2.bias'}"], {}), "({'multihead_norm_scale': 'self_attn_norm.weight',\n 'multihead_norm_bias': 'self_attn_norm.bias',\n 'multihead_project_kernel_qkv':\n 'self_attn.in_proj_weight&&expression_.transpose(0, 1)',\n 'multihead_project_bias_qkv': 'self_attn.in_proj_bias',\n 'multihead_project_kernel_output':\n 'self_attn.out_proj.weight&&expression_.transpose(0, 1)',\n 'multihead_project_bias_output': 'self_attn.out_proj.bias',\n 'ffn_norm_scale': 'ffn_norm.weight', 'ffn_norm_bias': 'ffn_norm.bias',\n 'ffn_first_kernel': 'ffn._fc1.weight&&expression_.transpose(0, 1)',\n 'ffn_first_bias': 'ffn._fc1.bias', 'ffn_second_kernel':\n 'ffn._fc2.weight&&expression_.transpose(0, 1)', 'ffn_second_bias':\n 'ffn._fc2.bias'})\n", (9785, 10507), False, 'from collections import OrderedDict\n'), ((10595, 11721), 'collections.OrderedDict', 'OrderedDict', (["{'self_norm_scale': 'self_attn_norm.weight', 'self_norm_bias':\n 'self_attn_norm.bias', 'self_project_kernel_qkv':\n 'self_attn.in_proj_weight&&expression_.transpose(0, 1)',\n 'self_project_bias_qkv': 'self_attn.in_proj_bias',\n 'self_project_kernel_output':\n 'self_attn.out_proj.weight&&expression_.transpose(0, 1)',\n 'self_project_bias_output': 'self_attn.out_proj.bias',\n 'encdec_norm_scale': 'multihead_attn_norm.weight', 'encdec_norm_bias':\n 'multihead_attn_norm.bias', 'encdec_project_kernel_q':\n 'multihead_attn.q_proj_weight&&expression_.transpose(0, 1)',\n 'encdec_project_bias_q': 'multihead_attn.q_proj_bias',\n 'encdec_project_kernel_output':\n 'multihead_attn.out_proj.weight&&expression_.transpose(0, 1)',\n 'encdec_project_bias_output': 'multihead_attn.out_proj.bias',\n 'ffn_norm_scale': 'ffn_norm.weight', 'ffn_norm_bias': 'ffn_norm.bias',\n 'ffn_first_kernel': 'ffn._fc1.weight&&expression_.transpose(0, 1)',\n 'ffn_first_bias': 'ffn._fc1.bias', 'ffn_second_kernel':\n 'ffn._fc2.weight&&expression_.transpose(0, 1)', 'ffn_second_bias':\n 'ffn._fc2.bias'}"], {}), "({'self_norm_scale': 'self_attn_norm.weight', 'self_norm_bias':\n 'self_attn_norm.bias', 'self_project_kernel_qkv':\n 'self_attn.in_proj_weight&&expression_.transpose(0, 1)',\n 'self_project_bias_qkv': 'self_attn.in_proj_bias',\n 'self_project_kernel_output':\n 'self_attn.out_proj.weight&&expression_.transpose(0, 1)',\n 'self_project_bias_output': 'self_attn.out_proj.bias',\n 'encdec_norm_scale': 'multihead_attn_norm.weight', 'encdec_norm_bias':\n 'multihead_attn_norm.bias', 'encdec_project_kernel_q':\n 'multihead_attn.q_proj_weight&&expression_.transpose(0, 1)',\n 'encdec_project_bias_q': 'multihead_attn.q_proj_bias',\n 'encdec_project_kernel_output':\n 'multihead_attn.out_proj.weight&&expression_.transpose(0, 1)',\n 'encdec_project_bias_output': 'multihead_attn.out_proj.bias',\n 'ffn_norm_scale': 'ffn_norm.weight', 'ffn_norm_bias': 'ffn_norm.bias',\n 'ffn_first_kernel': 'ffn._fc1.weight&&expression_.transpose(0, 1)',\n 'ffn_first_bias': 'ffn._fc1.bias', 'ffn_second_kernel':\n 'ffn._fc2.weight&&expression_.transpose(0, 1)', 'ffn_second_bias':\n 'ffn._fc2.bias'})\n", (10606, 11721), False, 'from collections import OrderedDict\n'), ((11831, 11901), 'collections.OrderedDict', 'OrderedDict', (["{'norm_scale': '_norm.weight', 'norm_bias': '_norm.bias'}"], {}), "({'norm_scale': '_norm.weight', 'norm_bias': '_norm.bias'})\n", (11842, 11901), False, 'from collections import OrderedDict\n'), ((11955, 12059), 'collections.OrderedDict', 'OrderedDict', (["{'norm_scale': '_norm.weight', 'norm_bias': '_norm.bias', 'shared_bias':\n 'shared_bias'}"], {}), "({'norm_scale': '_norm.weight', 'norm_bias': '_norm.bias',\n 'shared_bias': 'shared_bias'})\n", (11966, 12059), False, 'from collections import OrderedDict\n'), ((16315, 16352), 'numpy.concatenate', 'np.concatenate', (['target_tensor'], {'axis': '(0)'}), '(target_tensor, axis=0)\n', (16329, 16352), True, 'import numpy as np\n'), ((16710, 16723), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (16721, 16723), False, 'from collections import OrderedDict\n'), ((902, 915), 'paragen.utils.runtime.Environment', 'Environment', ([], {}), '()\n', (913, 915), False, 'from paragen.utils.runtime import Environment\n'), ((2186, 2210), 'torch.from_numpy', 'torch.from_numpy', (['output'], {}), '(output)\n', (2202, 2210), False, 'import torch\n'), ((9542, 9587), 'lightseq.inference.Transformer', 'lsi.Transformer', (['self._path', 'self._batch_size'], {}), '(self._path, self._batch_size)\n', (9557, 9587), True, 'import lightseq.inference as lsi\n'), ((13783, 13818), 'numpy.concatenate', 'np.concatenate', (["tt['save']"], {'axis': '(-1)'}), "(tt['save'], axis=-1)\n", (13797, 13818), True, 'import numpy as np\n'), ((15311, 15330), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (15320, 15330), True, 'import tensorflow as tf\n'), ((15359, 15375), 'tensorflow.range', 'tf.range', (['length'], {}), '(length)\n', (15367, 15375), True, 'import tensorflow as tf\n'), ((15751, 15778), 'tensorflow.expand_dims', 'tf.expand_dims', (['position', '(1)'], {}), '(position, 1)\n', (15765, 15778), True, 'import tensorflow as tf\n'), ((15781, 15814), 'tensorflow.expand_dims', 'tf.expand_dims', (['inv_timescales', '(0)'], {}), '(inv_timescales, 0)\n', (15795, 15814), True, 'import tensorflow as tf\n'), ((8998, 9027), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['path', '"""wb"""'], {}), "(path, 'wb')\n", (9015, 9027), True, 'import tensorflow as tf\n'), ((9198, 9210), 'paragen.utils.io.remove', 'remove', (['path'], {}), '(path)\n', (9204, 9210), False, 'from paragen.utils.io import remove\n'), ((9297, 9317), 'h5py.File', 'h5py.File', (['path', '"""w"""'], {}), "(path, 'w')\n", (9306, 9317), False, 'import h5py\n'), ((15544, 15579), 'tensorflow.cast', 'tf.cast', (['num_timescales', 'tf.float32'], {}), '(num_timescales, tf.float32)\n', (15551, 15579), True, 'import tensorflow as tf\n'), ((15843, 15867), 'tensorflow.math.sin', 'tf.math.sin', (['scaled_time'], {}), '(scaled_time)\n', (15854, 15867), True, 'import tensorflow as tf\n'), ((15869, 15893), 'tensorflow.math.cos', 'tf.math.cos', (['scaled_time'], {}), '(scaled_time)\n', (15880, 15893), True, 'import tensorflow as tf\n'), ((19894, 19916), 'operator.attrgetter', 'attrgetter', (['proto_attr'], {}), '(proto_attr)\n', (19904, 19916), False, 'from operator import attrgetter\n'), ((15654, 15678), 'tensorflow.range', 'tf.range', (['num_timescales'], {}), '(num_timescales)\n', (15662, 15678), True, 'import tensorflow as tf\n'), ((19659, 19680), 'operator.attrgetter', 'attrgetter', (['base_attr'], {}), '(base_attr)\n', (19669, 19680), False, 'from operator import attrgetter\n'), ((20885, 20907), 'operator.attrgetter', 'attrgetter', (['proto_attr'], {}), '(proto_attr)\n', (20895, 20907), False, 'from operator import attrgetter\n'), ((21265, 21287), 'operator.attrgetter', 'attrgetter', (['proto_attr'], {}), '(proto_attr)\n', (21275, 21287), False, 'from operator import attrgetter\n')] |
"""
test_fitting_tanh.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Mon May 12 14:19:33 MDT 2014
Description: Can run this in parallel.
"""
import time, ares
import numpy as np
import matplotlib.pyplot as pl
# These go to every calculation
base_pars = \
{
'problem_type': 101,
'tanh_model': True,
'blob_names': [['tau_e', 'z_B', 'z_C', 'z_D', 'igm_dTb_C', 'igm_dTb_D'],
['cgm_h_2', 'igm_Tk', 'igm_dTb']],
'blob_ivars': [None, np.arange(6, 21)],
'blob_funcs': None,
}
# Initialize fitter
fitter = ares.inference.FitGlobal21cm(**base_pars)
fitter.turning_points = list('BCD')
# Assume default parameters
fitter.data = base_pars
# Set axes of parameter space
fitter.parameters = ['tanh_xz0', 'tanh_xdz', 'tanh_Tz0', 'tanh_Tdz']
fitter.is_log = [False]*4
# Set priors on model parameters (uninformative)
ps = ares.inference.PriorSet()
ps.add_prior(ares.inference.Priors.UniformPrior(5., 20.), 'tanh_xz0')
ps.add_prior(ares.inference.Priors.UniformPrior(0.1, 20.), 'tanh_xdz')
ps.add_prior(ares.inference.Priors.UniformPrior(5., 20.), 'tanh_Tz0')
ps.add_prior(ares.inference.Priors.UniformPrior(0.1, 20.), 'tanh_Tdz')
ps.add_prior(ares.inference.Priors.GaussianPrior(0.066, 0.012), 'tau_e')
fitter.prior_set = ps
# Set errors
fitter.error = {tp:[1.0, 5.] for tp in list('BCD')}
fitter.nwalkers = 128
# Run it!
t1 = time.time()
fitter.run(prefix='test_tanh_extrema', burn=10, steps=50, clobber=True,
save_freq=10)
t2 = time.time()
print("Run complete in {:.4g} minutes.\n".format((t2 - t1) / 60.))
| [
"ares.inference.Priors.UniformPrior",
"ares.inference.PriorSet",
"ares.inference.FitGlobal21cm",
"ares.inference.Priors.GaussianPrior",
"time.time",
"numpy.arange"
] | [((544, 585), 'ares.inference.FitGlobal21cm', 'ares.inference.FitGlobal21cm', ([], {}), '(**base_pars)\n', (572, 585), False, 'import time, ares\n'), ((857, 882), 'ares.inference.PriorSet', 'ares.inference.PriorSet', ([], {}), '()\n', (880, 882), False, 'import time, ares\n'), ((1365, 1376), 'time.time', 'time.time', ([], {}), '()\n', (1374, 1376), False, 'import time, ares\n'), ((1473, 1484), 'time.time', 'time.time', ([], {}), '()\n', (1482, 1484), False, 'import time, ares\n'), ((896, 941), 'ares.inference.Priors.UniformPrior', 'ares.inference.Priors.UniformPrior', (['(5.0)', '(20.0)'], {}), '(5.0, 20.0)\n', (930, 941), False, 'import time, ares\n'), ((966, 1011), 'ares.inference.Priors.UniformPrior', 'ares.inference.Priors.UniformPrior', (['(0.1)', '(20.0)'], {}), '(0.1, 20.0)\n', (1000, 1011), False, 'import time, ares\n'), ((1037, 1082), 'ares.inference.Priors.UniformPrior', 'ares.inference.Priors.UniformPrior', (['(5.0)', '(20.0)'], {}), '(5.0, 20.0)\n', (1071, 1082), False, 'import time, ares\n'), ((1107, 1152), 'ares.inference.Priors.UniformPrior', 'ares.inference.Priors.UniformPrior', (['(0.1)', '(20.0)'], {}), '(0.1, 20.0)\n', (1141, 1152), False, 'import time, ares\n'), ((1178, 1227), 'ares.inference.Priors.GaussianPrior', 'ares.inference.Priors.GaussianPrior', (['(0.066)', '(0.012)'], {}), '(0.066, 0.012)\n', (1213, 1227), False, 'import time, ares\n'), ((472, 488), 'numpy.arange', 'np.arange', (['(6)', '(21)'], {}), '(6, 21)\n', (481, 488), True, 'import numpy as np\n')] |
#-*- coding:utf-8 -*-
"""
GLSZM
Copyright (c) 2016 <NAME>
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.php
Date: 2016/01/31
"""
import numpy as np
from matplotlib import pyplot as plt
from TextureAnalysis.Utils import normalize
from scipy.ndimage import measurements
class GLSZM:
"""
Gray Level Size Zone Matrix
"""
def __init__(self, img, level_min=1, level_max=256, threshold=None):
"""
initialize
:param img: normalized image
:param level_min: min intensity of normalized image
:param level_max: max intensity of normalized image
:param threshold: threshold of the minimal value
"""
assert len(img.shape) == 2, 'image must be 2D'
self.img, self.slope, self.intercept = \
normalize(img, level_min, level_max, threshold)
self.n_level = (level_max - level_min) + 1
self.level_min = level_min
self.level_max = level_max
self.matrix, self.zone_sizes = self._construct_matrix()
self.features = self._calc_features()
def _calc_features(self):
"""
calculate feature values
:return: feature values
"""
features ={}
mat = self.matrix
zone_sizes = self.zone_sizes
omega = mat.flatten().sum()
min_size = zone_sizes.min()
max_size = zone_sizes.max()
j = np.array(range(min_size, max_size+1))[np.newaxis, :]
j = np.vstack((j,)*mat.shape[0])
i = np.array(range(self.level_min, self.level_max+1))[:, np.newaxis]
i = np.hstack((i,)*mat.shape[1])
small_area_emp = (mat / (j**2)).sum() / omega
large_area_emp = (mat * (j**2)).sum() / omega
low_intensity_emp = (mat / (i**2)).sum() / omega
high_intensity_emp = (mat * (i**2)).sum() / omega
intensity_variability = ((mat / (i**2)).sum(axis=1) ** 2).sum() / omega
size_zone_variability = ((mat / (j**2)).sum(axis=0) ** 2).sum() / omega #?
zone_percentage = omega / (mat * (j**2)).sum()
low_intensity_small_area_emp = (mat / (i**2) / (j**2)).sum() / omega
high_intensity_small_area_emp = (mat * (i**2) * (j**2)).sum() / omega
low_intensity_large_area_emp = (mat * (j**2) / (i**2)).sum() / omega
high_intensity_large_area_emp = (mat * (i**2) / (j**2)).sum() / omega
features['small_area_emp'] = small_area_emp
features['large_area_emp'] = large_area_emp
features['low_intensity_emp'] = low_intensity_emp
features['high_intensity_emp'] = high_intensity_emp
features['intensity_variability'] = intensity_variability
features['size_zone_variability'] = size_zone_variability
features['zone_percentage'] = zone_percentage
features['low_intensity_small_area_emp'] = low_intensity_small_area_emp
features['high_intensity_small_area_emp'] = high_intensity_small_area_emp
features['low_intensity_large_area_emp'] = low_intensity_large_area_emp
features['high_intensity_large_area_emp'] = high_intensity_large_area_emp
def _construct_matrix(self):
"""
construct GLSZ-Matrix
:return: GLSZ-Matrix
"""
s = [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]
elements = []
for i in range(self.level_min, self.level_max+1):
assert i >= 0, 'level mast be positivee value or 0.'
tmp_img = np.array(self.img)
tmp_img = (tmp_img == i)
labeled_array, num_features = measurements.label(tmp_img,
structure=s)
for label in range(1, num_features+1):
size = (labeled_array.flatten() == label).sum()
elements.append([i, size])
elements = np.array(elements)
min_element_size = elements[:, 1].min()
rows = (self.level_max - self.level_min) + 1
cols = elements[:, 1].max() - min_element_size + 1
mat = np.zeros((rows, cols), dtype=np.float)
zone_sizes = np.unique(elements[:, 1])
for element in elements:
mat[element[0], element[1]-min_element_size] += 1
return mat, zone_sizes
if __name__ == '__main__':
pass | [
"numpy.unique",
"numpy.hstack",
"scipy.ndimage.measurements.label",
"TextureAnalysis.Utils.normalize",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
] | [((856, 903), 'TextureAnalysis.Utils.normalize', 'normalize', (['img', 'level_min', 'level_max', 'threshold'], {}), '(img, level_min, level_max, threshold)\n', (865, 903), False, 'from TextureAnalysis.Utils import normalize\n'), ((1528, 1558), 'numpy.vstack', 'np.vstack', (['((j,) * mat.shape[0])'], {}), '((j,) * mat.shape[0])\n', (1537, 1558), True, 'import numpy as np\n'), ((1646, 1676), 'numpy.hstack', 'np.hstack', (['((i,) * mat.shape[1])'], {}), '((i,) * mat.shape[1])\n', (1655, 1676), True, 'import numpy as np\n'), ((3895, 3913), 'numpy.array', 'np.array', (['elements'], {}), '(elements)\n', (3903, 3913), True, 'import numpy as np\n'), ((4088, 4126), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {'dtype': 'np.float'}), '((rows, cols), dtype=np.float)\n', (4096, 4126), True, 'import numpy as np\n'), ((4148, 4173), 'numpy.unique', 'np.unique', (['elements[:, 1]'], {}), '(elements[:, 1])\n', (4157, 4173), True, 'import numpy as np\n'), ((3517, 3535), 'numpy.array', 'np.array', (['self.img'], {}), '(self.img)\n', (3525, 3535), True, 'import numpy as np\n'), ((3615, 3655), 'scipy.ndimage.measurements.label', 'measurements.label', (['tmp_img'], {'structure': 's'}), '(tmp_img, structure=s)\n', (3633, 3655), False, 'from scipy.ndimage import measurements\n')] |
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate
import tecplot as tp
import wake_config as conf
import matplotlib.ticker as ticker
width = 3
#from helpers.setup_plot import *
#plot_style = 'THESIS'
#line_style, markers = setup_plot('THESIS', width, width*0.8)
def lumley_map():
#rechts: axisymmetry
Jx1=np.linspace(0,2./9.,300)
Jy1=(3./2.) * (Jx1 *(4./3.))**(2./3.)
#% links: axisymmetry
Jx2=np.linspace(0,-1./36.,50)
Jy2=3./2. * (-Jx2 *4.0/3.0)**(2./3.)
#% oben: 2-component-turbulence
Jx3=np.linspace(-1./36.,2./9.,300)
Jy3=2./9. + Jx3*2
return Jx1, Jy1, np.flipud(Jx2), np.flipud(Jy2), Jx3, Jy3
def draw_map(x, z, xlin, zlin, invar2, invar3):
#xi, zi = np.linspace(x0, x0, num), np.linspace(z0, z1, num)
I2 = scipy.interpolate.griddata((x, z), invar2, (xlin, zlin), method='linear')
I3 = scipy.interpolate.griddata((x, z), invar3, (xlin, zlin), method='linear')
print('line x is from ' + str(xlin[0]) + ' to ' + str(xlin[-1]))
Jx1, Jy1, Jx2, Jy2, Jx3, Jy3 = lumley_map()
width = 3
line_style, markers = setup_plot('THESIS', width, width*0.8)
fig, ax = plt.subplots(1,1)
plt.plot(Jx1, Jy1, Jx2, Jy2, Jx3, Jy3, color='k')
pcm = plt.scatter(I3, I2, s=2, c = xlin, cmap = plt.cm.viridis)
#cbar = plt.colorbar(pcm, ticks=[1.05, 1.65])
#cbar.set_label('$(x-x_{TE})/c_{local}$', labelpad=-5)
#cbar.ax.set_yticklabels(['0', '3'])
adjustprops = dict(left=0.12, bottom=0.12, right=0.97, top=0.97, wspace=0.2, hspace=0.2) # Subplot properties
adjustprops = dict(left=0.14, bottom=0.2, right=0.97, top=0.97, wspace=0.2, hspace=0.2) # Subplot properties
plt.subplots_adjust(**adjustprops)
plt.xlabel(r'$III_a$', labelpad = 5)
plt.ylabel(r'$II_a$', labelpad=-9)
img_name = 'wake_centerline_anisotropy_lumley.pdf'
plt.savefig(img_name)
print('written ' + img_name)
plt.close()
def draw_barycentric(C, xb, yb, x, z, xlin, zlin):
zlin = np.squeeze(zlin)
do_annotate = False
xbi = scipy.interpolate.griddata((x, z), xb, (xlin, zlin), method='linear')
ybi = scipy.interpolate.griddata((x, z), yb, (xlin, zlin), method='linear')
print(x.shape)
print(xlin.shape)
print(zlin.shape)
print(xbi.shape)
width = 2
fig, ax = plt.subplots(1,1)
#connectpoints()
x1, y1 = [0, 0.5], [0, np.sqrt(3.0)/2.0]
x2, y2 = [0, 1], [0, 0]
x3, y3 = [1, 0.5], [0, np.sqrt(3.0)/2.0]
plt.plot(x1, y1, x2, y2, x3, y3, color='k')
#plt.xlim([0,1])
#plt.ylim([0,1])
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
plt.axis('off')
adjustprops = dict(left=0.05, bottom=0.05, right=0.97, top=0.97, wspace=0.2, hspace=0.2) # Subplot properties
plt.subplots_adjust(**adjustprops)
plt.scatter(xbi, ybi, s=2, c = xlin, cmap=plt.cm.viridis)
if do_annotate:
ax.annotate('upstream', xy=(xbi[0], ybi[0]), xytext=(-20,-20),
textcoords='offset points', ha='center', va='top',
bbox=dict(boxstyle='round,pad=0.2', fc='white', alpha=1),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.5',
color='black'), fontsize=11)
ax.annotate('downstream', xy=(xbi[-1], ybi[-1]), xytext=(-40,40),
textcoords='offset points', ha='center', va='top',
bbox=dict(boxstyle='round,pad=0.2', fc='white', alpha=1),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=-0.5',
color='black'), fontsize=11)
#ax.text(0, 0, r'$x_{2c}$', color='black' , fontsize=12, ha='right', va='top')
#ax.text(x2[1], y2[1], r'$x_{1c}$', color='black' , fontsize=12, ha='left', va='top')
#ax.text(x1[1], y1[1]+0.03, r'$x_{3c}$', color='black' , fontsize=12, ha='center', va='bottom')
ax.text(0, 0, r'$2c$', color='black' , fontsize=12, ha='right', va='top')
ax.text(x2[1], y2[1], r'${1c}$', color='black' , fontsize=12, ha='left', va='top')
ax.text(x1[1], y1[1]+0.03, r'${3c}$', color='black' , fontsize=12, ha='center', va='bottom')
# Rotate angle
angle = 45
#trans_angle = plt.gca().transData.transform_angles(np.array((45,)), l2.reshape((1, 2)))[0]
# Plot text
th1 = plt.text(0.2, np.sqrt(3)/4.0+0.05, 'axisymmetric contraction', fontsize=11, rotation=60, rotation_mode='anchor', ha='center', va='center')
th2 = plt.text(0.8, np.sqrt(3)/4.0+0.05, 'axisymmetric expansion', fontsize=11, rotation=-60, rotation_mode='anchor', ha='center', va='center')
adjustprops = dict(left=0.05, bottom=0.07, right=0.95, top=0.93, wspace=0.2, hspace=0.2) # Subplot properties
plt.subplots_adjust(**adjustprops)
plt.gca().set_aspect('equal')
img_name = 'wake_centerline_anisotropy_barycentric.pdf'
plt.savefig(img_name)
print('written ' + img_name)
plt.close()
| [
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"numpy.flipud",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.gca",
"numpy.squeeze",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"matplotlib.pyplo... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((384, 414), 'numpy.linspace', 'np.linspace', (['(0)', '(2.0 / 9.0)', '(300)'], {}), '(0, 2.0 / 9.0, 300)\n', (395, 414), True, 'import numpy as np\n'), ((485, 516), 'numpy.linspace', 'np.linspace', (['(0)', '(-1.0 / 36.0)', '(50)'], {}), '(0, -1.0 / 36.0, 50)\n', (496, 516), True, 'import numpy as np\n'), ((597, 637), 'numpy.linspace', 'np.linspace', (['(-1.0 / 36.0)', '(2.0 / 9.0)', '(300)'], {}), '(-1.0 / 36.0, 2.0 / 9.0, 300)\n', (608, 637), True, 'import numpy as np\n'), ((1205, 1223), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1217, 1223), True, 'import matplotlib.pyplot as plt\n'), ((1227, 1276), 'matplotlib.pyplot.plot', 'plt.plot', (['Jx1', 'Jy1', 'Jx2', 'Jy2', 'Jx3', 'Jy3'], {'color': '"""k"""'}), "(Jx1, Jy1, Jx2, Jy2, Jx3, Jy3, color='k')\n", (1235, 1276), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1340), 'matplotlib.pyplot.scatter', 'plt.scatter', (['I3', 'I2'], {'s': '(2)', 'c': 'xlin', 'cmap': 'plt.cm.viridis'}), '(I3, I2, s=2, c=xlin, cmap=plt.cm.viridis)\n', (1298, 1340), True, 'import matplotlib.pyplot as plt\n'), ((1738, 1772), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {}), '(**adjustprops)\n', (1757, 1772), True, 'import matplotlib.pyplot as plt\n'), ((1777, 1810), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$III_a$"""'], {'labelpad': '(5)'}), "('$III_a$', labelpad=5)\n", (1787, 1810), True, 'import matplotlib.pyplot as plt\n'), ((1818, 1851), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$II_a$"""'], {'labelpad': '(-9)'}), "('$II_a$', labelpad=-9)\n", (1828, 1851), True, 'import matplotlib.pyplot as plt\n'), ((1913, 1934), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_name'], {}), '(img_name)\n', (1924, 1934), True, 'import matplotlib.pyplot as plt\n'), ((1973, 1984), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1982, 1984), True, 'import matplotlib.pyplot as plt\n'), ((2048, 2064), 'numpy.squeeze', 'np.squeeze', (['zlin'], {}), '(zlin)\n', (2058, 2064), True, 'import numpy as np\n'), ((2363, 2381), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2375, 2381), True, 'import matplotlib.pyplot as plt\n'), ((2520, 2563), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'y1', 'x2', 'y2', 'x3', 'y3'], {'color': '"""k"""'}), "(x1, y1, x2, y2, x3, y3, color='k')\n", (2528, 2563), True, 'import matplotlib.pyplot as plt\n'), ((2678, 2693), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2686, 2693), True, 'import matplotlib.pyplot as plt\n'), ((2818, 2852), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {}), '(**adjustprops)\n', (2837, 2852), True, 'import matplotlib.pyplot as plt\n'), ((2858, 2913), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xbi', 'ybi'], {'s': '(2)', 'c': 'xlin', 'cmap': 'plt.cm.viridis'}), '(xbi, ybi, s=2, c=xlin, cmap=plt.cm.viridis)\n', (2869, 2913), True, 'import matplotlib.pyplot as plt\n'), ((4886, 4920), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {}), '(**adjustprops)\n', (4905, 4920), True, 'import matplotlib.pyplot as plt\n'), ((5020, 5041), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_name'], {}), '(img_name)\n', (5031, 5041), True, 'import matplotlib.pyplot as plt\n'), ((5080, 5091), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5089, 5091), True, 'import matplotlib.pyplot as plt\n'), ((671, 685), 'numpy.flipud', 'np.flipud', (['Jx2'], {}), '(Jx2)\n', (680, 685), True, 'import numpy as np\n'), ((687, 701), 'numpy.flipud', 'np.flipud', (['Jy2'], {}), '(Jy2)\n', (696, 701), True, 'import numpy as np\n'), ((4925, 4934), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4932, 4934), True, 'import matplotlib.pyplot as plt\n'), ((2425, 2437), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (2432, 2437), True, 'import numpy as np\n'), ((2498, 2510), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (2505, 2510), True, 'import numpy as np\n'), ((4486, 4496), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (4493, 4496), True, 'import numpy as np\n'), ((4636, 4646), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (4643, 4646), True, 'import numpy as np\n')] |
"""
ColorMatch RGB Colourspace
==========================
Defines the *ColorMatch RGB* colourspace:
- :attr:`colour.models.RGB_COLOURSPACE_COLOR_MATCH_RGB`.
References
----------
- :cite:`Lindbloom2014a` : <NAME>. (2014). RGB Working Space
Information. Retrieved April 11, 2014, from
http://www.brucelindbloom.com/WorkingSpaceInfo.html
"""
from __future__ import annotations
import numpy as np
from functools import partial
from colour.colorimetry import CCS_ILLUMINANTS
from colour.hints import NDArray
from colour.models.rgb import (
RGB_Colourspace,
gamma_function,
normalised_primary_matrix,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "<EMAIL>"
__status__ = "Production"
__all__ = [
"PRIMARIES_COLOR_MATCH_RGB",
"WHITEPOINT_NAME_COLOR_MATCH_RGB",
"CCS_WHITEPOINT_COLOR_MATCH_RGB",
"MATRIX_COLOR_MATCH_RGB_TO_XYZ",
"MATRIX_XYZ_TO_COLOR_MATCH_RGB",
"RGB_COLOURSPACE_COLOR_MATCH_RGB",
]
PRIMARIES_COLOR_MATCH_RGB: NDArray = np.array(
[
[0.6300, 0.3400],
[0.2950, 0.6050],
[0.1500, 0.0750],
]
)
"""*ColorMatch RGB* colourspace primaries."""
WHITEPOINT_NAME_COLOR_MATCH_RGB: str = "D50"
"""*ColorMatch RGB* colourspace whitepoint name."""
CCS_WHITEPOINT_COLOR_MATCH_RGB: NDArray = CCS_ILLUMINANTS[
"CIE 1931 2 Degree Standard Observer"
][WHITEPOINT_NAME_COLOR_MATCH_RGB]
"""*ColorMatch RGB* colourspace whitepoint chromaticity coordinates."""
MATRIX_COLOR_MATCH_RGB_TO_XYZ: NDArray = normalised_primary_matrix(
PRIMARIES_COLOR_MATCH_RGB, CCS_WHITEPOINT_COLOR_MATCH_RGB
)
"""*ColorMatch RGB* colourspace to *CIE XYZ* tristimulus values matrix."""
MATRIX_XYZ_TO_COLOR_MATCH_RGB: NDArray = np.linalg.inv(
MATRIX_COLOR_MATCH_RGB_TO_XYZ
)
"""*CIE XYZ* tristimulus values to *ColorMatch RGB* colourspace matrix."""
RGB_COLOURSPACE_COLOR_MATCH_RGB: RGB_Colourspace = RGB_Colourspace(
"ColorMatch RGB",
PRIMARIES_COLOR_MATCH_RGB,
CCS_WHITEPOINT_COLOR_MATCH_RGB,
WHITEPOINT_NAME_COLOR_MATCH_RGB,
MATRIX_COLOR_MATCH_RGB_TO_XYZ,
MATRIX_XYZ_TO_COLOR_MATCH_RGB,
partial(gamma_function, exponent=1 / 1.8),
partial(gamma_function, exponent=1.8),
)
RGB_COLOURSPACE_COLOR_MATCH_RGB.__doc__ = """
*ColorMatch RGB* colourspace.
References
----------
:cite:`Lindbloom2014a`
"""
| [
"numpy.array",
"colour.models.rgb.normalised_primary_matrix",
"functools.partial",
"numpy.linalg.inv"
] | [((1153, 1208), 'numpy.array', 'np.array', (['[[0.63, 0.34], [0.295, 0.605], [0.15, 0.075]]'], {}), '([[0.63, 0.34], [0.295, 0.605], [0.15, 0.075]])\n', (1161, 1208), True, 'import numpy as np\n'), ((1650, 1738), 'colour.models.rgb.normalised_primary_matrix', 'normalised_primary_matrix', (['PRIMARIES_COLOR_MATCH_RGB', 'CCS_WHITEPOINT_COLOR_MATCH_RGB'], {}), '(PRIMARIES_COLOR_MATCH_RGB,\n CCS_WHITEPOINT_COLOR_MATCH_RGB)\n', (1675, 1738), False, 'from colour.models.rgb import RGB_Colourspace, gamma_function, normalised_primary_matrix\n'), ((1858, 1902), 'numpy.linalg.inv', 'np.linalg.inv', (['MATRIX_COLOR_MATCH_RGB_TO_XYZ'], {}), '(MATRIX_COLOR_MATCH_RGB_TO_XYZ)\n', (1871, 1902), True, 'import numpy as np\n'), ((2253, 2294), 'functools.partial', 'partial', (['gamma_function'], {'exponent': '(1 / 1.8)'}), '(gamma_function, exponent=1 / 1.8)\n', (2260, 2294), False, 'from functools import partial\n'), ((2300, 2337), 'functools.partial', 'partial', (['gamma_function'], {'exponent': '(1.8)'}), '(gamma_function, exponent=1.8)\n', (2307, 2337), False, 'from functools import partial\n')] |
import networkit as nw
from dwave_qbsolv import QBSolv
import matplotlib.pyplot as plt
import random
from datetime import datetime
import argparse
import time
import numpy as np
import networkx as nx
from qiskit import BasicAer
from qiskit import QuantumCircuit, execute, Aer
from qiskit.optimization.applications.ising import max_cut
from qiskit.optimization.applications.ising.common import sample_most_likely
from qiskit.aqua.algorithms import QAOA
import qiskit.aqua.components.optimizers as optimizers
from qiskit.algorithms.optimizers import L_BFGS_B
from QAOAKit.utils import (
qaoa_maxcut_energy,
)
from QAOAKit.qaoa import get_maxcut_qaoa_circuit
parser = argparse.ArgumentParser()
parser.add_argument("-gname", type = str, default = "None", help = "graph file")
parser.add_argument("-method", type = str, default = "None", help = "ref method")
parser.add_argument("-timer", type = bool, default = False, help = "print times")
parser.add_argument("-showgain", type = bool, default = False, help = "show gain diff")
parser.add_argument("-spsize", type = int, default = 20, help = "size of subproblems")
parser.add_argument("-inputtype", type = str, default = "file", help = "method of graph")
parser.add_argument("-solver", type = str, default = "qbsolv", help = "qubo slver")
parser.add_argument("-optimizer", type = str, default = "COBYLA", help = "qaoa optimizer")
parser.add_argument("-iteration", type = int, default = 1, help = "max iteration for qaoa optimizer")
parser.add_argument("-p", type = int, default = 1, help = "p value of qaoa")
parser.add_argument("-shots", type = int, default = 1000, help = "number of shots for the optimized qaoa circuit")
parser.add_argument("-gformat", type = str, default = "alist", help = "graph format")
parser.add_argument("-nomultilvl", type = bool, default = False, help = "Use/Dont Use Multilevel")
parser.add_argument("-bpA", type = int, default = 5000, help = "size of bipartite graph")
parser.add_argument("-bpD", type = int, default = 4, help = "degree of bipartite graph")
args = parser.parse_args()
useml = args.nomultilvl
method = args.method
timer = args.timer
gname = "graphs/" + args.gname
bpA = args.bpA
bpD = args.bpD
showgain = args.showgain
spsize = args.spsize
solver = args.solver
optimizer = args.optimizer
iteration = args.iteration
p = args.p
shots = args.shots
inputtype = args.inputtype
gformat = args.gformat
mapGain = {}
S = []
T = []
gainTime = 0
pairTime = 0
buildSpTime = 0
solveTime = 0
qSolves = 0
cSolves = 0
ties = 0
minweight = 0
def readGraph():
f = open(gname, "r")
line = f.readline().split()
n = int(line[0])
G = nw.graph.Graph(n=0,weighted=False, directed=False)
G.addNodes(n)
cn = 0
line = f.readline().split()
while(line != []):
for x in line:
G.addEdge(cn, int(x)-1)
cn += 1
line = f.readline().split()
G.removeMultiEdges()
G.indexEdges()
G.removeSelfLoops()
return G
def readGraphEList():
f = open(gname, "r")
line = f.readline().split()
n = int(line[0])
G = nw.graph.Graph(n=n, weighted=True, directed = False)
line = f.readline().split()
while line != []:
u = int(line[0])-1
v = int(line[1])-1
w = int(line[2])
G.addEdge(u, v, w)
line = f.readline().split()
G.removeMultiEdges()
G.indexEdges()
G.removeSelfLoops()
return G
def get_exact_energy(G, p):
def f(theta):
# let's assume first half is gammas, second half is betas
gamma = theta[:p]
beta = theta[p:]
return -qaoa_maxcut_energy(G, beta, gamma)
return f
def meshGraph():
G = nw.graph.Graph(n=10000, weighted =False, directed=False)
for i in range(10000):
if i % 100 != 99:
G.addEdge(i, i+1)
if i + 100 < 10000:
G.addEdge(i, i+100)
G.removeMultiEdges()
G.indexEdges()
G.removeSelfLoops()
return G
def meshGraphRand():
G = meshGraph()
for i in range(10000):
for j in range(i+1, 10000):
if not G.hasEdge(i, j):
if random.randint(0, 100) < 10:
G.addEdge(i,j)
return G
def pyomo(G):
opt = SolverFactory('gurobi')
model = pyo.ConcreteModel()
model.n = pyo.Param(default=G.numberOfNodes())
model.x = pyo.Var(pyo.RangeSet(0,model.n-1), within=pyo.Binary)
model.obj = pyo.Objective(expr = 0)
model.c = pyo.Constraint(rule=model.x[2]<=1)
for u,v in G.iterEdges():
w = G.weight(u,v)
model.obj.expr += (2 * w * model.x[u] * model.x[v])
model.obj.expr += (-w * model.x[u]) + (-w * model.x[v])
results = opt.solve(model)
solution = {}
for i in range(G.numberOfNodes()):
solution[i] = model.x[i].value
if solution[i] == None:
solution[i] = 0
return solution
def randSampleSolve(G):
bestobj = 0
bestsol = {}
for _ in range(1024):
sol = {}
for i in G.iterNodes():
sol[i] = random.randint(0, 1)
obj = calc_obj(G, sol)
if obj > bestobj:
bestsol = sol.copy()
bestobj = obj
return bestsol
def build_qubo(G):
Q = {}
n = G.numberOfNodes()
for i in range(n):
for j in range(i,n):
Q[(i,j)] = 0
for i in range(n):
for j in range(i+1, n):
if G.hasEdge(i,j):
weight = G.weight(i, j)
Q[(i, i)] -= weight
Q[(j,j)] -= weight
Q[(i, j)] = 2*weight
return Q
def fineSolFromCoarseSol(cSol, ctfM):
fSol = {}
for (c, fl) in ctfM.items():
for x in fl:
fSol[x] = cSol[c]
return fSol
def calc_obj(G, solution):
obj = 0
n = G.numberOfNodes()
for i in range(n):
for j in range(i+1, n):
obj += G.weight(i, j)*(2*solution[i]*solution[j] - solution[i] - solution[j])
return -1 * obj
def swapGain(G, sol, u, v):
gain = 0
for x in G.iterNeighbors(u):
if x != v:
if sol[x] == sol[u]:
gain += G.weight(u, x)
else:
gain -= G.weight(u, x)
for x in G.iterNeighbors(v):
if x != u:
if sol[x] == sol[v]:
gain += G.weight(v, x)
else:
gain -= G.weight(v, x)
return gain
def buildGainMap(G, sol):
start = time.perf_counter()
global mapGain
global gainTime
if mapGain != {} or mapGain == None:
mapGain = {}
for x in range(G.numberOfNodes()):
mapGain[x] = 0
for u, v, w in G.iterEdgesWeights():
if sol[u] == sol[v]:
mapGain[u] += w
else:
mapGain[v] -= w
end = time.perf_counter()
gainTime += (end - start)
def buildParts(G, sol):
S.clear()
T.clear()
for x in range(G.numberOfNodes()):
if sol[x] == 0:
S.append(x)
elif sol[x] == 1:
T.append(x)
return
def pairwiseGain(G, sol):
global pairTime
pwGain = []
start = time.perf_counter()
for i in range(len(S)):
for j in range(len(T)):
pwGain.append((mapGain[S[i]] + mapGain[T[j]] + 2*G.weight(S[i], T[j]), S[i], T[j]))
end = time.perf_counter()
pairTime += (end - start)
return sorted(pwGain)
def pairwiseSubProb(G, sol, sp_size):
n = G.numberOfNodes()
subprob = nw.graph.Graph(n=2*(1+sp_size), weighted = True, directed = False)
pwGain = pairwiseGain(G, sol)
pwGain.reverse()
used = {}
mapProbToSubProb = {}
totalGain = 0
ct = 0
i = 0
idx = 0
while(ct < sp_size and i < len(pwGain)):
v1 = pwGain[i][1]
v2 = pwGain[i][2]
if v1 not in used and v2 not in used:
mapProbToSubProb[v1] = idx
idx += 1
mapProbToSubProb[v2] = idx
idx += 1
ct += 1
totalGain += pwGain[i][0]
i += 1
for x in G.iterNodes():
if x not in mapProbToSubProb.keys():
if sol[x] == 0:
mapProbToSubProb[x] = idx
if sol[x] == 1:
mapProbToSubProb[x] = idx + 1
for u, v in G.iterEdges():
spu = mapProbToSubProb[u]
spv = mapProbToSubProb[v]
if spu != spv:
subprob.increaseWeight(spu, spv, G.weight(u,v))
return (subprob, mapProbToSubProb, totalGain)
def spectralGain(G, sol):
eigvectors = nw.algebraic.laplacianEigenvectors(G, cutoff=2, reverse=True)
eigvec = eigvectors[1][1]
gainS = []
gainT = []
gain = []
for _ in range(G.numberOfNodes()):
gain.append(0)
for v in S:
vGain = 0
for u in T:
vGain += abs(eigvec[v] - eigvec[u])
gainS.append((vGain, v))
for v in T:
vGain = 0
for u in S:
vGain += abs(eigvec[v] - eigvec[u])
gainT.append((vGain, v))
return (sorted(gainS), sorted(gainT))
def spectralGainSubProb(G, sol, sp_size):
n = G.numberOfNodes()
subprob = nw.graph.Graph(n=2*(1+sp_size), weighted = True, directed = False)
mapProbToSubProb = {}
idx = 0
gain = spectralGain(G, sol)
gainS = gain[0]
gainT = gain[1]
totalGain = 0
gainS.reverse()
gainT.reverse()
for i in range(sp_size):
mapProbToSubProb[gainS[i][1]] = idx
totalGain += gainS[i][0]
idx += 1
for i in range(sp_size):
mapProbToSubProb[gainT[i][1]] = idx
totalGain += gainT[i][0]
idx += 1
for i in range(sp_size, len(gainS)):
mapProbToSubProb[gainS[i][1]] = idx
for i in range(sp_size, len(gainT)):
mapProbToSubProb[gainT[i][1]] = idx+1
for u, v in G.iterEdges():
spu = mapProbToSubProb[u]
spv = mapProbToSubProb[v]
if spu != spv:
subprob.increaseWeight(spu, spv, G.weight(u,v))
return (subprob, mapProbToSubProb, totalGain)
def randSubProb(G, sp_size, sol):
subprob = nw.graph.Graph(n=2*(1 + sp_size), weighted = True, directed = False )
random.shuffle(S)
random.shuffle(T)
mapProbToSubProb = {}
idx = 0
for i in range(sp_size):
mapProbToSubProb[S[i]] = idx
idx += 1
for i in range(sp_size):
mapProbToSubProb[T[i]] = idx
idx += 1
for i in range(sp_size, len(S)):
mapProbToSubProb[S[i]] = idx
for i in range(sp_size, len(T)):
mapProbToSubProb[T[i]] = idx+1
n = G.numberOfNodes()
for u, v in G.iterEdges():
spu = mapProbToSubProb[u]
spv = mapProbToSubProb[v]
if spu != spv:
subprob.increaseWeight(spu, spv, G.weight(u,v))
return (subprob, mapProbToSubProb, 0)
def randPairSubProb(G, sol, sp_size):
subprob = nw.graph.Graph(n=2*(1 + sp_size), weighted = True, directed = False )
rpS = []
rpT = []
sampleSize = 10*sp_size
if len(S) < sampleSize:
rpS = S[0:len(S)]
else:
for _ in range(sampleSize):
i = random.randint(0, len(S)-1)
rpS.append(S[i])
if len(T) < sampleSize:
rpT = T[0:len(T)]
else:
for _ in range(sampleSize):
i = random.randint(0, len(T)-1)
rpT.append(T[i])
pwGain = []
for i in range(len(rpS)):
for j in range(len(rpT)):
pwGain.append((swapGain(G, sol, rpS[i], rpT[j]), rpS[i], rpT[j]))
pwGain = sorted(pwGain)
pwGain.reverse()
used = {}
mapProbToSubProb = {}
totalGain = 0
ct = 0
i = 0
idx = 0
while(ct < sp_size and i < len(pwGain)):
v1 = pwGain[i][1]
v2 = pwGain[i][2]
if v1 not in used and v2 not in used:
mapProbToSubProb[v1] = idx
idx += 1
mapProbToSubProb[v2] = idx
idx += 1
ct += 1
totalGain += pwGain[i][0]
i += 1
for x in G.iterNodes():
if x not in mapProbToSubProb.keys():
if sol[x] == 0:
mapProbToSubProb[x] = idx
if sol[x] == 1:
mapProbToSubProb[x] = idx + 1
for u, v in G.iterEdges():
spu = mapProbToSubProb[u]
spv = mapProbToSubProb[v]
if spu != spv:
subprob.increaseWeight(spu, spv, G.weight(u,v))
return (subprob, mapProbToSubProb, totalGain)
def qaoa(G):
p = 3
nxG = nw.nxadapter.nk2nx(G)
obj = get_exact_energy(nxG, p)
# Lower and upper bounds
lb = np.hstack([np.full(p, -2*np.pi), np.full(p, -2*np.pi)])
ub = np.hstack([np.full(p, 2*np.pi), np.full(p, 2*np.pi)])
np.random.seed(1)
lb_init = np.hstack([np.full(p, -np.pi), np.full(p, -np.pi)])
ub_init = np.hstack([np.full(p, np.pi), np.full(p, np.pi)])
angles = [np.random.uniform(lb, ub, 2*p)]
bounds = [(lb[i], ub[i]) for i in range(2*p)]
results = []
optimizer = L_BFGS_B(maxiter = 100)
for angle in angles:
try:
result = optimizer.optimize(2*p, obj, variable_bounds = bounds, initial_point = angle)
results.append(result)
except AssertionError:
pass
best_result = min(results, key=itemgetter(1))
qc = get_maxcut_qaoa_circuit(nxG, best_result[0][p:], best_result[0][:p])
qc.measure_all()
backend = AerSimulator()
res = backend.run(qc).result().get_counts()
s = max(res, key=res.get)
sol = {}
for i in range(len(s)):
sol[i] = int(s[i])
return sol
def refine(G, sol, sp_size, obj, rmethod, spsolver, sp=None):
global buildSpTime
global solveTime
start = time.perf_counter()
if sp != None:
subprob = sp
elif rmethod == "spectral":
subprob = spectralGainSubProb(G, sol, sp_size)
elif rmethod == "pairwise":
subprob = pairwiseSubProb(G, sol, sp_size)
elif rmethod == "randpair":
subprob = randPairSubProb(G, sol, sp_size)
else:
subprob = randSubProb(G, sp_size, sol)
end = time.perf_counter()
buildSpTime += (end - start)
eGain = subprob[2]
mapProbToSubProb = subprob[1]
start = time.perf_counter()
if spsolver == "qbsolv":
Q = build_qubo(subprob[0])
response = QBSolv().sample_qubo(Q)
solution = response.samples()[0]
elif spsolver == "qaoa":
solution = qaoa(subprob[0])
elif spsolver == "gurobi":
solution = pyomo(subprob[0])
elif spsolver == "sampling":
solution = randSampleSolve(subprob[0])
end = time.perf_counter()
solveTime += (end - start)
if timer:
print(str(end - start) + "s solving subproblem")
n = G.numberOfNodes()
new_sol = {}
changed = set()
for i in range(n):
new_sol[i] = solution[mapProbToSubProb[i]]
if sol[i] != new_sol[i]:
changed.add(i)
new_obj = calc_obj(subprob[0], solution)
rGain = new_obj - obj
if showgain:
print(str(eGain) + " expected gain, " + str(rGain) + " real gain" )
if new_obj > obj:
for x in G.iterNodes():
if sol[x] != new_sol[x]:
if sol[x] == 0:
S.remove(x)
T.append(x)
elif sol[x] == 1:
T.remove(x)
S.append(x)
if rmethod == 'pairwise':
for x in changed:
for y in G.iterNeighbors(x):
if new_sol[x] == new_sol[y]:
mapGain[x] -= 2*G.weight(x, y)
mapGain[y] -= 2*G.weight(x, y)
if new_sol[x] != new_sol[y]:
mapGain[x] += 2*G.weight(x, y)
mapGain[y] += 2*G.weight(x, y)
return (new_sol, new_obj, subprob)
else:
return (sol, obj, subprob)
def calc_imbalance(sol,n):
s = 0
t = 0
for i in range(n):
if sol[i] == 1:
t += 1
if sol[i] == 0:
s += 1
return abs(t - s) / ((s + t)/2)
def spectralCoarsening(G):
eigvectors = nw.algebraic.laplacianEigenvectors(G, cutoff=2, reverse=True)
eigvec = eigvectors[1][1]
orderedNodes = []
for i in range(len(eigvec)):
orderedNodes.append((eigvec[i], i))
orderedNodes.sort()
orderedNodes.reverse()
n = len(orderedNodes)
i = 0
j = int(n/2)
mapCoarseToFine = {}
mapFineToCoarse = {}
idx = 0
while i < int(n/2) and j < n:
u = orderedNodes[i][1]
v = orderedNodes[j][1]
if not G.hasEdge(u, v):
mapCoarseToFine[idx] = [u, v]
mapFineToCoarse[u] = idx
mapFineToCoarse[v] = idx
idx += 1
else:
mapCoarseToFine[idx] = [u]
mapFineToCoarse[u] = idx
idx += 1
mapCoarseToFine[idx] = [v]
mapFineToCoarse[v] = idx
idx += 1
i += 1
j += 1
if n % 2 == 1:
u = orderedNodes[j][1]
mapCoarseToFine[idx] = [u]
mapFineToCoarse[u] = idx
idx += 1
cG = nw.graph.Graph(n=idx, weighted=True, directed=False)
for u,v in G.iterEdges():
cu = mapFineToCoarse[u]
cv = mapFineToCoarse[v]
cG.increaseWeight(cu, cv, G.weight(u, v))
return (cG, mapCoarseToFine)
def randInitialSolution(G):
sol = {}
for x in G.iterNodes():
sol[x] = random.randint(0,1)
return sol
def informedCoarsen(G, sol):
S = []
T = []
for i in range(G.numberOfNodes()):
if sol[i] == 0:
S.append(i)
else:
T.append(i)
mapCoarseToFine = {}
mapFineToCoarse = {}
idx = 0
ct = 0
solu = {}
while ct < len(S) and len(S) >= 2:
u = S[random.randint(0, len(S)-1)]
v = S[random.randint(0, len(S)-1)]
if u == v:
ct += 1
continue
if not G.hasEdge(u, v):
mapCoarseToFine[idx] = [u, v]
mapFineToCoarse[u] = idx
mapFineToCoarse[v] = idx
solu[idx] = 0
idx += 1
ct = 0
S.remove(u)
S.remove(v)
else:
ct += 1
while ct < len(T) and len(T) >= 2:
u = T[random.randint(0, len(T)-1)]
v = T[random.randint(0, len(T)-1)]
if u == v:
continue
if not G.hasEdge(u, v):
mapCoarseToFine[idx] = [u, v]
mapFineToCoarse[u] = idx
mapFineToCoarse[v] = idx
solu[idx] = 1
idx += 1
ct = 0
T.remove(u)
T.remove(v)
else:
ct += 1
for x in S:
mapCoarseToFine[idx] = [x]
mapFineToCoarse[x] = idx
solu[idx] = 0
idx += 1
for x in T:
mapCoarseToFine[idx] = [x]
mapFineToCoarse[x] = idx
solu[idx] = 1
idx += 1
cG = nw.graph.Graph(n=idx, weighted=True, directed=False)
for u,v in G.iterEdges():
cu = mapFineToCoarse[u]
cv = mapFineToCoarse[v]
cG.increaseWeight(cu, cv, G.weight(u, v))
return (cG, mapCoarseToFine , solu)
def iterativeVCycle(G, sol):
global S
global T
global cSolves
global qSolves
global ties
refinements = 0
hierarchy = [G]
hierarchy_map = []
old = G.numberOfNodes()
coarse = informedCoarsen(G, sol)
G = coarse[0]
hierarchy.append(G)
hierarchy_map.append(coarse[1])
new = G.numberOfNodes()
while(abs(new - old) > 2*spsize):
old = G.numberOfNodes()
if old <= 2*(1+spsize):
break
coarse = spectralCoarsening(G)
G = coarse[0]
hierarchy.append(G)
hierarchy_map.append(coarse[1])
new = G.numberOfNodes()
hierarchy_map.reverse()
hierarchy.reverse()
solution = randInitialSolution(G)
obj = 0
for i in range(len(hierarchy_map)):
fG = hierarchy[i+1]
cG = hierarchy[i]
fMap = hierarchy_map[i]
new_solution = {}
for i in range(cG.numberOfNodes()):
for x in fMap[i]:
new_solution[x] = solution[i]
solution = new_solution
obj = calc_obj(fG, solution)
buildParts(fG, solution)
ct = 0
while ct < 5:
if method == 'pairwise':
buildGainMap(fG, solution)
if solver != 'hybrid':
res = refine(fG, solution, spsize, obj, method, solver)
refinements += 1
solution = res[0]
new_obj = res[1]
elif solver == "hybrid":
os = solution.copy()
tS = S[:]
tT = T[:]
res = refine(fG, solution, spsize, obj, method, "qaoa")
solution = res[0]
new_obj = res[1]
tS2 = S[:]
tT2 = T[:]
S = tS
T = tT
print("smapling")
res = refine(fG, os, spsize, obj, method, "sampling")
print("done sampling")
if res[1] > new_obj:
cSolves += 1
solution = res[0]
new_obj = res[1]
else:
if res[1] == new_obj:
ties += 1
else:
qSolves += 1
S = tS2
T = tT2
refinements += 1
if new_obj == obj:
ct += 1
else:
ct = 0
obj = new_obj
if method != 'pairwise':
buildGainMap(fG, solution)
while True:
if solver != 'hybrid':
os = solution.copy()
tS = S[:]
tT = T[:]
res = refine(fG, solution, spsize, obj, method, "qaoa")
solution = res[0]
new_obj = res[1]
tS2 = S[:]
tT2 = T[:]
S = tS
T = tT
res = refine(fG, os, spsize, obj, method, "sampling")
if res[1] > new_obj:
cSolves += 1
solution = res[0]
new_obj = res[1]
else:
if res[1] == new_obj:
ties += 1
else:
qSolves += 1
S = tS2
T = tT2
refinements += 1
if new_obj == obj:
break
obj = new_obj
print(str(fG))
print("Obj Val: " + str(obj))
print("Imbalance: " + str(calc_imbalance(solution, fG.numberOfNodes())))
print(str(refinements) + " iterations of refinement")
return (obj, solution)
def maxcut_solve(G):
global S
global T
global cSolves
global qSolves
global ties
refinements = 0
print(gname)
print(str(G))
print(method)
start = time.perf_counter()
hierarchy = [G]
hierarchy_map = []
old = G.numberOfNodes()
new = 0
while(abs(new - old) > 2*spsize):
old = G.numberOfNodes()
if old <= 2*(1+spsize):
break
coarse = spectralCoarsening(G)
G = coarse[0]
hierarchy.append(G)
hierarchy_map.append(coarse[1])
new = G.numberOfNodes()
end = time.perf_counter()
if timer:
print(str(end - start) + "s coarsening")
hierarchy_map.reverse()
hierarchy.reverse()
solution = randInitialSolution(G)
obj = 0
for i in range(len(hierarchy_map)):
fG = hierarchy[i+1]
cG = hierarchy[i]
fMap = hierarchy_map[i]
new_solution = {}
for i in range(cG.numberOfNodes()):
for x in fMap[i]:
new_solution[x] = solution[i]
solution = new_solution
obj = calc_obj(fG, solution)
buildParts(fG, solution)
ct = 0
while ct < 5:
if method == 'pairwise':
buildGainMap(fG, solution)
if solver != 'hybrid':
res = refine(fG, solution, spsize, obj, method, solver)
refinements += 1
solution = res[0]
new_obj = res[1]
elif solver == "hybrid":
os = solution.copy()
ps = solution.copy()
tS = S[:]
tT = T[:]
res = refine(fG, solution, spsize, obj, method, "qaoa")
sp = res[2]
solution = res[0]
new_obj = res[1]
tS2 = S[:]
tT2 = T[:]
S = tS
T = tT
res = refine(fG, os, spsize, obj, method, "sampling", sp)
if res[1] > new_obj:
cSolves += 1
solution = res[0]
new_obj = res[1]
else:
if res[1] == new_obj:
ties += 1
else:
qSolves += 1
S = tS2
T = tT2
refinements += 1
if new_obj == obj:
ct += 1
else:
ct = 0
obj = new_obj
if method != 'pairwise':
buildGainMap(fG, solution)
while True:
if solver != 'hybrid':
res = refine(fG, solution, spsize, obj, method, solver)
refinements += 1
solution = res[0]
new_obj = res[1]
elif solver == "hybrid":
os = solution.copy()
tS = S[:]
tT = T[:]
res = refine(fG, solution, spsize, obj, method, "qaoa")
solution = res[0]
new_obj = res[1]
sp = res[2]
tS2 = S[:]
tT2 = T[:]
S = tS
T = tT
res = refine(fG, os, spsize, obj, method, "sampling", sp)
if res[1] > new_obj:
cSolves += 1
solution = res[0]
new_obj = res[1]
else:
if res[1] == new_obj:
ties += 1
else:
qSolves += 1
S = tS2
T = tT2
refinements += 1
if new_obj == obj:
break
obj = new_obj
print(str(fG))
print("Obj Val: " + str(obj))
print("Imbalance: " + str(calc_imbalance(solution, fG.numberOfNodes())))
print(str(refinements) + " iterations of refinement")
# best = obj
# bestsol = solution
# print("\n\n1st Iteration with Informed Coarsening\n\n")
# vcycle = iterativeVCycle(fG, solution)
# obj = vcycle[0]
# solution = vcycle[1]
# if obj > best:
# best = obj
# bestsol = solution
# print("\n\n2nd Iteration with Informed Coarsening\n\n")
# vcycle = iterativeVCycle(fG, solution)
# obj = vcycle[0]
# solution = vcycle[1]
# if obj > best:
# best = obj
# bestsol = solution
return obj
if inputtype == "file":
if gformat == 'alist':
G = readGraph()
elif gformat == 'elist':
G = readGraphEList()
if inputtype == "bipartite":
G = nx.algorithms.bipartite.generators.random_graph(bpA, bpA, 0.1, seed = 0)
G = nw.nxadapter.nx2nk(G)
if inputtype == "mesh":
G = meshGraph()
s = time.perf_counter()
if useml == False:
obj = maxcut_solve(G)
elif useml == True:
obj = maxcut_solve_noml(G)
e = time.perf_counter()
print("Found maximum value of " + str(obj) + " " + str(e-s) + "s")
if solver == 'hybrid':
print("Quantum: " + str(qSolves) + " Classical: " + str(cSolves) + " Ties: " + str(ties))
| [
"networkit.nxadapter.nx2nk",
"QAOAKit.qaoa.get_maxcut_qaoa_circuit",
"qiskit.algorithms.optimizers.L_BFGS_B",
"random.randint",
"random.shuffle",
"argparse.ArgumentParser",
"numpy.full",
"time.perf_counter",
"networkit.nxadapter.nk2nx",
"dwave_qbsolv.QBSolv",
"numpy.random.seed",
"numpy.random... | [((670, 695), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (693, 695), False, 'import argparse\n'), ((27983, 28002), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (28000, 28002), False, 'import time\n'), ((28103, 28122), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (28120, 28122), False, 'import time\n'), ((2629, 2680), 'networkit.graph.Graph', 'nw.graph.Graph', ([], {'n': '(0)', 'weighted': '(False)', 'directed': '(False)'}), '(n=0, weighted=False, directed=False)\n', (2643, 2680), True, 'import networkit as nw\n'), ((3066, 3116), 'networkit.graph.Graph', 'nw.graph.Graph', ([], {'n': 'n', 'weighted': '(True)', 'directed': '(False)'}), '(n=n, weighted=True, directed=False)\n', (3080, 3116), True, 'import networkit as nw\n'), ((3661, 3716), 'networkit.graph.Graph', 'nw.graph.Graph', ([], {'n': '(10000)', 'weighted': '(False)', 'directed': '(False)'}), '(n=10000, weighted=False, directed=False)\n', (3675, 3716), True, 'import networkit as nw\n'), ((6427, 6446), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6444, 6446), False, 'import time\n'), ((6760, 6779), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6777, 6779), False, 'import time\n'), ((7086, 7105), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7103, 7105), False, 'import time\n'), ((7272, 7291), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7289, 7291), False, 'import time\n'), ((7432, 7498), 'networkit.graph.Graph', 'nw.graph.Graph', ([], {'n': '(2 * (1 + sp_size))', 'weighted': '(True)', 'directed': '(False)'}), '(n=2 * (1 + sp_size), weighted=True, directed=False)\n', (7446, 7498), True, 'import networkit as nw\n'), ((8477, 8538), 'networkit.algebraic.laplacianEigenvectors', 'nw.algebraic.laplacianEigenvectors', (['G'], {'cutoff': '(2)', 'reverse': '(True)'}), '(G, cutoff=2, reverse=True)\n', (8511, 8538), True, 'import networkit as nw\n'), ((9075, 9141), 'networkit.graph.Graph', 'nw.graph.Graph', ([], {'n': '(2 * (1 + sp_size))', 'weighted': '(True)', 'directed': '(False)'}), '(n=2 * (1 + sp_size), weighted=True, directed=False)\n', (9089, 9141), True, 'import networkit as nw\n'), ((10028, 10094), 'networkit.graph.Graph', 'nw.graph.Graph', ([], {'n': '(2 * (1 + sp_size))', 'weighted': '(True)', 'directed': '(False)'}), '(n=2 * (1 + sp_size), weighted=True, directed=False)\n', (10042, 10094), True, 'import networkit as nw\n'), ((10102, 10119), 'random.shuffle', 'random.shuffle', (['S'], {}), '(S)\n', (10116, 10119), False, 'import random\n'), ((10124, 10141), 'random.shuffle', 'random.shuffle', (['T'], {}), '(T)\n', (10138, 10141), False, 'import random\n'), ((10824, 10890), 'networkit.graph.Graph', 'nw.graph.Graph', ([], {'n': '(2 * (1 + sp_size))', 'weighted': '(True)', 'directed': '(False)'}), '(n=2 * (1 + sp_size), weighted=True, directed=False)\n', (10838, 10890), True, 'import networkit as nw\n'), ((12449, 12470), 'networkit.nxadapter.nk2nx', 'nw.nxadapter.nk2nx', (['G'], {}), '(G)\n', (12467, 12470), True, 'import networkit as nw\n'), ((12668, 12685), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (12682, 12685), True, 'import numpy as np\n'), ((12952, 12973), 'qiskit.algorithms.optimizers.L_BFGS_B', 'L_BFGS_B', ([], {'maxiter': '(100)'}), '(maxiter=100)\n', (12960, 12973), False, 'from qiskit.algorithms.optimizers import L_BFGS_B\n'), ((13262, 13330), 'QAOAKit.qaoa.get_maxcut_qaoa_circuit', 'get_maxcut_qaoa_circuit', (['nxG', 'best_result[0][p:]', 'best_result[0][:p]'], {}), '(nxG, best_result[0][p:], best_result[0][:p])\n', (13285, 13330), False, 'from QAOAKit.qaoa import get_maxcut_qaoa_circuit\n'), ((13664, 13683), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (13681, 13683), False, 'import time\n'), ((14044, 14063), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (14061, 14063), False, 'import time\n'), ((14167, 14186), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (14184, 14186), False, 'import time\n'), ((14563, 14582), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (14580, 14582), False, 'import time\n'), ((16116, 16177), 'networkit.algebraic.laplacianEigenvectors', 'nw.algebraic.laplacianEigenvectors', (['G'], {'cutoff': '(2)', 'reverse': '(True)'}), '(G, cutoff=2, reverse=True)\n', (16150, 16177), True, 'import networkit as nw\n'), ((17138, 17190), 'networkit.graph.Graph', 'nw.graph.Graph', ([], {'n': 'idx', 'weighted': '(True)', 'directed': '(False)'}), '(n=idx, weighted=True, directed=False)\n', (17152, 17190), True, 'import networkit as nw\n'), ((18964, 19016), 'networkit.graph.Graph', 'nw.graph.Graph', ([], {'n': 'idx', 'weighted': '(True)', 'directed': '(False)'}), '(n=idx, weighted=True, directed=False)\n', (18978, 19016), True, 'import networkit as nw\n'), ((23215, 23234), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (23232, 23234), False, 'import time\n'), ((23609, 23628), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (23626, 23628), False, 'import time\n'), ((27830, 27900), 'networkx.algorithms.bipartite.generators.random_graph', 'nx.algorithms.bipartite.generators.random_graph', (['bpA', 'bpA', '(0.1)'], {'seed': '(0)'}), '(bpA, bpA, 0.1, seed=0)\n', (27877, 27900), True, 'import networkx as nx\n'), ((27911, 27932), 'networkit.nxadapter.nx2nk', 'nw.nxadapter.nx2nk', (['G'], {}), '(G)\n', (27929, 27932), True, 'import networkit as nw\n'), ((12830, 12862), 'numpy.random.uniform', 'np.random.uniform', (['lb', 'ub', '(2 * p)'], {}), '(lb, ub, 2 * p)\n', (12847, 12862), True, 'import numpy as np\n'), ((17457, 17477), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (17471, 17477), False, 'import random\n'), ((3585, 3619), 'QAOAKit.utils.qaoa_maxcut_energy', 'qaoa_maxcut_energy', (['G', 'beta', 'gamma'], {}), '(G, beta, gamma)\n', (3603, 3619), False, 'from QAOAKit.utils import qaoa_maxcut_energy\n'), ((5010, 5030), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (5024, 5030), False, 'import random\n'), ((12555, 12577), 'numpy.full', 'np.full', (['p', '(-2 * np.pi)'], {}), '(p, -2 * np.pi)\n', (12562, 12577), True, 'import numpy as np\n'), ((12577, 12599), 'numpy.full', 'np.full', (['p', '(-2 * np.pi)'], {}), '(p, -2 * np.pi)\n', (12584, 12599), True, 'import numpy as np\n'), ((12620, 12641), 'numpy.full', 'np.full', (['p', '(2 * np.pi)'], {}), '(p, 2 * np.pi)\n', (12627, 12641), True, 'import numpy as np\n'), ((12641, 12662), 'numpy.full', 'np.full', (['p', '(2 * np.pi)'], {}), '(p, 2 * np.pi)\n', (12648, 12662), True, 'import numpy as np\n'), ((12711, 12729), 'numpy.full', 'np.full', (['p', '(-np.pi)'], {}), '(p, -np.pi)\n', (12718, 12729), True, 'import numpy as np\n'), ((12731, 12749), 'numpy.full', 'np.full', (['p', '(-np.pi)'], {}), '(p, -np.pi)\n', (12738, 12749), True, 'import numpy as np\n'), ((12777, 12794), 'numpy.full', 'np.full', (['p', 'np.pi'], {}), '(p, np.pi)\n', (12784, 12794), True, 'import numpy as np\n'), ((12796, 12813), 'numpy.full', 'np.full', (['p', 'np.pi'], {}), '(p, np.pi)\n', (12803, 12813), True, 'import numpy as np\n'), ((14270, 14278), 'dwave_qbsolv.QBSolv', 'QBSolv', ([], {}), '()\n', (14276, 14278), False, 'from dwave_qbsolv import QBSolv\n'), ((4102, 4124), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (4116, 4124), False, 'import random\n')] |
import time
import numpy as np
from airobot import Robot
from airobot import log_warn
from airobot.utils.common import euler2quat
def main():
"""
This function shows an example of block stacking.
"""
np.set_printoptions(precision=4, suppress=True)
robot = Robot('franka')
success = robot.arm.go_home()
if not success:
log_warn('Robot go_home failed!!!')
ori = euler2quat([0, 0, np.pi / 2])
robot.pb_client.load_urdf('table/table.urdf',
[.6, 0, 0.4],
ori,
scaling=0.9)
box_size = 0.03
box_id1 = robot.pb_client.load_geom('box', size=box_size,
mass=0.1,
base_pos=[.5, 0.12, 1.0],
rgba=[1, 0, 0, 1])
box_id2 = robot.pb_client.load_geom('box',
size=box_size,
mass=0.1,
base_pos=[0.3, 0.12, 1.0],
rgba=[0, 0, 1, 1])
robot.arm.eetool.open()
obj_pos = robot.pb_client.get_body_state(box_id1)[0]
move_dir = obj_pos - robot.arm.get_ee_pose()[0]
move_dir[2] = 0
eef_step = 0.025
# an example of using IK with nullspace enabled
ik_kwargs = dict(ns=True)
robot.arm.move_ee_xyz(move_dir, eef_step=eef_step, **dict(ik_kwargs=ik_kwargs))
move_dir = np.zeros(3)
move_dir[2] = obj_pos[2] - robot.arm.get_ee_pose()[0][2]
robot.arm.move_ee_xyz(move_dir, eef_step=eef_step)
robot.arm.eetool.close(wait=False)
robot.arm.move_ee_xyz([0, 0, 0.3], eef_step=eef_step)
obj_pos = robot.pb_client.get_body_state(box_id2)[0]
move_dir = obj_pos - robot.arm.get_ee_pose()[0]
move_dir[2] = 0
robot.arm.move_ee_xyz(move_dir, eef_step=eef_step)
move_dir = obj_pos - robot.arm.get_ee_pose()[0]
move_dir[2] += box_size * 2
robot.arm.move_ee_xyz(move_dir, eef_step=eef_step)
robot.arm.eetool.open()
move_dir[2] = 0.2
robot.arm.move_ee_xyz(move_dir, eef_step=eef_step)
time.sleep(10)
if __name__ == '__main__':
main()
| [
"time.sleep",
"numpy.zeros",
"airobot.utils.common.euler2quat",
"airobot.Robot",
"airobot.log_warn",
"numpy.set_printoptions"
] | [((220, 267), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'suppress': '(True)'}), '(precision=4, suppress=True)\n', (239, 267), True, 'import numpy as np\n'), ((280, 295), 'airobot.Robot', 'Robot', (['"""franka"""'], {}), "('franka')\n", (285, 295), False, 'from airobot import Robot\n'), ((404, 433), 'airobot.utils.common.euler2quat', 'euler2quat', (['[0, 0, np.pi / 2]'], {}), '([0, 0, np.pi / 2])\n', (414, 433), False, 'from airobot.utils.common import euler2quat\n'), ((1502, 1513), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1510, 1513), True, 'import numpy as np\n'), ((2159, 2173), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (2169, 2173), False, 'import time\n'), ((358, 393), 'airobot.log_warn', 'log_warn', (['"""Robot go_home failed!!!"""'], {}), "('Robot go_home failed!!!')\n", (366, 393), False, 'from airobot import log_warn\n')] |
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from PIL import Image
import numpy as np
import onnx
import torch
from torch.onnx.symbolic_registry import register_op
from torch.onnx.symbolic_helper import parse_args
from torchreid.models import build_model
from torchreid.utils import load_pretrained_weights
from torchreid.data.transforms import build_inference_transform
from scripts.default_config import get_default_config, model_kwargs
@parse_args('v', 'i', 'v', 'v', 'f', 'i')
def group_norm_symbolic(g, input, num_groups, weight, bias, eps, cudnn_enabled):
from torch.onnx.symbolic_opset9 import reshape, mul, add, reshape_as
channels_num = input.type().sizes()[1]
if num_groups == channels_num:
output = g.op('InstanceNormalization', input, weight, bias, epsilon_f=eps)
else:
# Reshape from [n, g * cg, h, w] to [1, n * g, cg * h, w].
x = reshape(g, input, [0, num_groups, -1, 0])
x = reshape(g, x, [1, -1, 0, 0])
# Normalize channel-wise.
x = g.op('MeanVarianceNormalization', x, axes_i=[2, 3])
# Reshape back.
x = reshape_as(g, x, input)
# Apply affine transform.
x = mul(g, x, reshape(g, weight, [1, channels_num, 1, 1]))
output = add(g, x, reshape(g, bias, [1, channels_num, 1, 1]))
return output
def parse_num_classes(source_datasets):
num_clustered = 0
num_rest = 0
for src in source_datasets:
if isinstance(src, (tuple, list)):
num_clustered += 1
else:
num_rest += 1
total_num_sources = num_clustered + int(num_rest > 0)
assert total_num_sources > 0
return [0] * total_num_sources # dummy number of classes
def random_image(height, width):
input_size = (height, width, 3)
img = np.random.rand(*input_size).astype(np.float32)
img = np.uint8(img * 255)
out_img = Image.fromarray(img)
return out_img
def reset_config(cfg):
cfg.model.download_weights = False
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config-file', type=str, default='',
help='Path to config file')
parser.add_argument('--output-name', type=str, default='model',
help='Path to save ONNX model')
parser.add_argument('--opset', type=int, default=9)
parser.add_argument('--verbose', default=False, action='store_true',
help='Verbose mode for onnx.export')
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER,
help='Modify config options using the command-line')
args = parser.parse_args()
cfg = get_default_config()
cfg.use_gpu = torch.cuda.is_available()
if args.config_file:
cfg.merge_from_file(args.config_file)
reset_config(cfg)
cfg.merge_from_list(args.opts)
cfg.freeze()
num_classes = parse_num_classes(cfg.data.sources)
model = build_model(**model_kwargs(cfg, num_classes))
load_pretrained_weights(model, cfg.model.load_weights)
model.eval()
transform = build_inference_transform(
cfg.data.height,
cfg.data.width,
norm_mean=cfg.data.norm_mean,
norm_std=cfg.data.norm_std,
)
input_img = random_image(cfg.data.height, cfg.data.width)
input_blob = transform(input_img).unsqueeze(0)
input_names = ['data']
output_names = ['reid_embedding']
dynamic_axes = {'data': {0: 'batch_size', 1: 'channels', 2: 'height', 3: 'width'},
'reid_embedding': {0: 'batch_size', 1: 'dim'}}
output_file_path = args.output_name
if not args.output_name.endswith('.onnx'):
output_file_path += '.onnx'
register_op("group_norm", group_norm_symbolic, "", args.opset)
with torch.no_grad():
torch.onnx.export(
model,
input_blob,
output_file_path,
verbose=args.verbose,
export_params=True,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
opset_version=args.opset,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX
)
net_from_onnx = onnx.load(output_file_path)
try:
onnx.checker.check_model(net_from_onnx)
print('ONNX check passed.')
except onnx.onnx_cpp2py_export.checker.ValidationError as ex:
print('ONNX check failed: {}.'.format(ex))
if __name__ == '__main__':
main()
| [
"torch.onnx.symbolic_helper.parse_args",
"numpy.uint8",
"PIL.Image.fromarray",
"torch.onnx.symbolic_opset9.reshape",
"scripts.default_config.get_default_config",
"numpy.random.rand",
"argparse.ArgumentParser",
"torch.onnx.symbolic_registry.register_op",
"torch.onnx.export",
"torchreid.utils.load_p... | [((997, 1037), 'torch.onnx.symbolic_helper.parse_args', 'parse_args', (['"""v"""', '"""i"""', '"""v"""', '"""v"""', '"""f"""', '"""i"""'], {}), "('v', 'i', 'v', 'v', 'f', 'i')\n", (1007, 1037), False, 'from torch.onnx.symbolic_helper import parse_args\n'), ((2395, 2414), 'numpy.uint8', 'np.uint8', (['(img * 255)'], {}), '(img * 255)\n', (2403, 2414), True, 'import numpy as np\n'), ((2430, 2450), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (2445, 2450), False, 'from PIL import Image\n'), ((2563, 2642), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (2586, 2642), False, 'import argparse\n'), ((3263, 3283), 'scripts.default_config.get_default_config', 'get_default_config', ([], {}), '()\n', (3281, 3283), False, 'from scripts.default_config import get_default_config, model_kwargs\n'), ((3302, 3327), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3325, 3327), False, 'import torch\n'), ((3590, 3644), 'torchreid.utils.load_pretrained_weights', 'load_pretrained_weights', (['model', 'cfg.model.load_weights'], {}), '(model, cfg.model.load_weights)\n', (3613, 3644), False, 'from torchreid.utils import load_pretrained_weights\n'), ((3679, 3800), 'torchreid.data.transforms.build_inference_transform', 'build_inference_transform', (['cfg.data.height', 'cfg.data.width'], {'norm_mean': 'cfg.data.norm_mean', 'norm_std': 'cfg.data.norm_std'}), '(cfg.data.height, cfg.data.width, norm_mean=cfg.\n data.norm_mean, norm_std=cfg.data.norm_std)\n', (3704, 3800), False, 'from torchreid.data.transforms import build_inference_transform\n'), ((4298, 4360), 'torch.onnx.symbolic_registry.register_op', 'register_op', (['"""group_norm"""', 'group_norm_symbolic', '""""""', 'args.opset'], {}), "('group_norm', group_norm_symbolic, '', args.opset)\n", (4309, 4360), False, 'from torch.onnx.symbolic_registry import register_op\n'), ((4806, 4833), 'onnx.load', 'onnx.load', (['output_file_path'], {}), '(output_file_path)\n', (4815, 4833), False, 'import onnx\n'), ((1444, 1485), 'torch.onnx.symbolic_opset9.reshape', 'reshape', (['g', 'input', '[0, num_groups, -1, 0]'], {}), '(g, input, [0, num_groups, -1, 0])\n', (1451, 1485), False, 'from torch.onnx.symbolic_opset9 import reshape, mul, add, reshape_as\n'), ((1498, 1526), 'torch.onnx.symbolic_opset9.reshape', 'reshape', (['g', 'x', '[1, -1, 0, 0]'], {}), '(g, x, [1, -1, 0, 0])\n', (1505, 1526), False, 'from torch.onnx.symbolic_opset9 import reshape, mul, add, reshape_as\n'), ((1661, 1684), 'torch.onnx.symbolic_opset9.reshape_as', 'reshape_as', (['g', 'x', 'input'], {}), '(g, x, input)\n', (1671, 1684), False, 'from torch.onnx.symbolic_opset9 import reshape, mul, add, reshape_as\n'), ((4370, 4385), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4383, 4385), False, 'import torch\n'), ((4395, 4666), 'torch.onnx.export', 'torch.onnx.export', (['model', 'input_blob', 'output_file_path'], {'verbose': 'args.verbose', 'export_params': '(True)', 'input_names': 'input_names', 'output_names': 'output_names', 'dynamic_axes': 'dynamic_axes', 'opset_version': 'args.opset', 'operator_export_type': 'torch.onnx.OperatorExportTypes.ONNX'}), '(model, input_blob, output_file_path, verbose=args.verbose,\n export_params=True, input_names=input_names, output_names=output_names,\n dynamic_axes=dynamic_axes, opset_version=args.opset,\n operator_export_type=torch.onnx.OperatorExportTypes.ONNX)\n', (4412, 4666), False, 'import torch\n'), ((4851, 4890), 'onnx.checker.check_model', 'onnx.checker.check_model', (['net_from_onnx'], {}), '(net_from_onnx)\n', (4875, 4890), False, 'import onnx\n'), ((1741, 1784), 'torch.onnx.symbolic_opset9.reshape', 'reshape', (['g', 'weight', '[1, channels_num, 1, 1]'], {}), '(g, weight, [1, channels_num, 1, 1])\n', (1748, 1784), False, 'from torch.onnx.symbolic_opset9 import reshape, mul, add, reshape_as\n'), ((1813, 1854), 'torch.onnx.symbolic_opset9.reshape', 'reshape', (['g', 'bias', '[1, channels_num, 1, 1]'], {}), '(g, bias, [1, channels_num, 1, 1])\n', (1820, 1854), False, 'from torch.onnx.symbolic_opset9 import reshape, mul, add, reshape_as\n'), ((2338, 2365), 'numpy.random.rand', 'np.random.rand', (['*input_size'], {}), '(*input_size)\n', (2352, 2365), True, 'import numpy as np\n'), ((3554, 3584), 'scripts.default_config.model_kwargs', 'model_kwargs', (['cfg', 'num_classes'], {}), '(cfg, num_classes)\n', (3566, 3584), False, 'from scripts.default_config import get_default_config, model_kwargs\n')] |
import sys
import os
import torch
import yaml
from easydict import EasyDict as edict
from pytorch_transformers.tokenization_bert import BertTokenizer
from vilbert.datasets import ConceptCapLoaderTrain, ConceptCapLoaderVal
from vilbert.vilbert import VILBertForVLTasks, BertConfig, BertForMultiModalPreTraining
from vilbert.task_utils import LoadDatasetEval
import numpy as np
import matplotlib.pyplot as plt
import PIL
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.layers import nms
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
from PIL import Image
import cv2
import argparse
import glob
from types import SimpleNamespace
import pdb
import _pickle as cPickle
class FeatureExtractor:
MAX_SIZE = 1333
MIN_SIZE = 800
def __init__(self):
self.args = self.get_parser().parse_args()
self.detection_model = self._build_detection_model()
os.makedirs(self.args.output_folder, exist_ok=True)
def get_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_file", default=None, type=str, help="Detectron model file"
)
parser.add_argument(
"--config_file", default=None, type=str, help="Detectron config file"
)
parser.add_argument(
"--imdb_gt_file",
default=None,
type=str,
help="Imdb file containing file path and bboxes.",
)
parser.add_argument("--batch_size", type=int, default=2, help="Batch size")
parser.add_argument(
"--num_features",
type=int,
default=100,
help="Number of features to extract.",
)
parser.add_argument(
"--output_folder", type=str, default="./output", help="Output folder"
)
parser.add_argument(
"--feature_name",
type=str,
help="The name of the feature to extract",
default="fc6",
)
parser.add_argument(
"--confidence_threshold",
type=float,
default=0,
help="Threshold of detection confidence above which boxes will be selected",
)
parser.add_argument(
"--background",
action="store_true",
help="The model will output predictions for the background class when set",
)
parser.add_argument(
"--partition", type=int, default=0, help="Partition to download."
)
return parser
def _build_detection_model(self):
cfg.merge_from_file(self.args.config_file)
cfg.freeze()
model = build_detection_model(cfg)
checkpoint = torch.load(self.args.model_file, map_location=torch.device("cpu"))
load_state_dict(model, checkpoint.pop("model"))
model.to("cuda")
model.eval()
return model
def get_batch_proposals(self, images, im_scales, im_infos, proposals):
proposals_batch = []
for idx, img_info in enumerate(im_infos):
boxes_tensor = torch.from_numpy(
proposals[idx]["bbox"][: int(proposals[idx]["num_box"]), 0:]
).to("cuda")
orig_image_size = (img_info["width"], img_info["height"])
boxes = BoxList(boxes_tensor, orig_image_size)
image_size = (images.image_sizes[idx][1], images.image_sizes[idx][0])
boxes = boxes.resize(image_size)
proposals_batch.append(boxes)
return proposals_batch
def _image_transform(self, path):
img = Image.open(path)
im = np.array(img).astype(np.float32)
# IndexError: too many indices for array, grayscale images
if len(im.shape) < 3:
im = np.repeat(im[:, :, np.newaxis], 3, axis=2)
im = im[:, :, ::-1]
im -= np.array([102.9801, 115.9465, 122.7717])
im_shape = im.shape
im_height = im_shape[0]
im_width = im_shape[1]
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
# Scale based on minimum size
im_scale = self.MIN_SIZE / im_size_min
# Prevent the biggest axis from being more than max_size
# If bigger, scale it down
if np.round(im_scale * im_size_max) > self.MAX_SIZE:
im_scale = self.MAX_SIZE / im_size_max
im = cv2.resize(
im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR
)
img = torch.from_numpy(im).permute(2, 0, 1)
im_info = {"width": im_width, "height": im_height}
return img, im_scale, im_info
def _process_feature_extraction(
self, output, im_scales, im_infos, feature_name="fc6", conf_thresh=0
):
batch_size = len(output[0]["proposals"])
n_boxes_per_image = [len(boxes) for boxes in output[0]["proposals"]]
score_list = output[0]["scores"].split(n_boxes_per_image)
score_list = [torch.nn.functional.softmax(x, -1) for x in score_list]
feats = output[0][feature_name].split(n_boxes_per_image)
cur_device = score_list[0].device
feat_list = []
info_list = []
for i in range(batch_size):
dets = output[0]["proposals"][i].bbox / im_scales[i]
scores = score_list[i]
max_conf = torch.zeros((scores.shape[0])).to(cur_device)
conf_thresh_tensor = torch.full_like(max_conf, conf_thresh)
start_index = 1
# Column 0 of the scores matrix is for the background class
if self.args.background:
start_index = 0
for cls_ind in range(start_index, scores.shape[1]):
cls_scores = scores[:, cls_ind]
keep = nms(dets, cls_scores, 0.5)
max_conf[keep] = torch.where(
# Better than max one till now and minimally greater than conf_thresh
(cls_scores[keep] > max_conf[keep])
& (cls_scores[keep] > conf_thresh_tensor[keep]),
cls_scores[keep],
max_conf[keep],
)
feat_list.append(feats[i])
num_boxes = len(feats[i])
bbox = output[0]["proposals"][i]
bbox = bbox.resize(((im_infos[i]["width"], im_infos[i]["height"])))
bbox = bbox.bbox
# Predict the class label using the scores
objects = torch.argmax(scores[:, start_index:], dim=1)
info_list.append(
{
"bbox": bbox.cpu().numpy(),
"num_boxes": num_boxes,
"objects": objects.cpu().numpy(),
"image_width": im_infos[i]["width"],
"image_height": im_infos[i]["height"],
"cls_prob": scores.cpu().numpy(),
}
)
return feat_list, info_list
def get_detectron_features(self, image_paths):
img_tensor, im_scales, im_infos, im_bbox = [], [], [], []
for image_path in image_paths:
#print('Image Path ' ,image_path)
# print("image transformations...")
im, im_scale, im_info = self._image_transform(image_path["file_path"])
print("image transformations done")
img_tensor.append(im)
im_scales.append(im_scale)
im_infos.append(im_info)
im_bbox.append(image_path)
# Image dimensions should be divisible by 32, to allow convolutions
# in detector to work
current_img_list = to_image_list(img_tensor, size_divisible=32)
current_img_list = current_img_list.to("cuda")
# print("Infos: curr image, im_scale, img_infos, image_path_bbox: \n",current_img_list, im_scales, im_infos, im_bbox )
# print("Getting batch proposals...")
# print("Infos: curr image, im_scale, img_infos, image_path_bbox: \n",current_img_list, im_scales, im_infos, image_paths['bbox'] )
proposals = self.get_batch_proposals(
current_img_list, im_scales, im_infos, im_bbox
)
print("Getting batch proposals done")
with torch.no_grad():
output = self.detection_model(current_img_list, proposals=proposals)
feat_list = self._process_feature_extraction(
output,
im_scales,
im_infos,
self.args.feature_name,
self.args.confidence_threshold,
)
print("Features extracted!")
return feat_list
def _chunks(self, array, chunk_size):
for i in range(0, len(array), chunk_size):
yield array[i : i + chunk_size]
def _save_feature(self, file_name, feature, info):
file_base_name = str(file_name).split(".")[0]
info["image_id"] = file_base_name
info["features"] = feature.cpu().numpy()
file_base_name = str(file_base_name) + ".npy"
np.save(os.path.join(self.args.output_folder, file_base_name), info)
print("Saved in: "+os.path.join(self.args.output_folder, file_base_name))
def extract_features(self):
files = np.load(self.args.imdb_gt_file, allow_pickle=True)
extracted_features = []
# files = sorted(files)
# files = [files[i: i+1000] for i in range(0, len(files), 1000)][self.args.partition]
cnt = 1
for chunk in self._chunks(files, self.args.batch_size):
try:
print('############## CNT : ', cnt)
cnt += 1
print('Getting features...')
# print(chunk)
features, infos = self.get_detectron_features(chunk)
extracted_features.append((features, infos))
print('Getting batch features done!')
except BaseException:
continue
np.save('gt_feat.npy', extracted_features)
return extracted_features
def tokenize_batch(batch):
return [tokenizer.convert_tokens_to_ids(sent) for sent in batch]
def untokenize_batch(batch):
return [tokenizer.convert_ids_to_tokens(sent) for sent in batch]
def detokenize(sent):
""" Roughly detokenizes (mainly undoes wordpiece) """
new_sent = []
for i, tok in enumerate(sent):
if tok.startswith("##"):
new_sent[len(new_sent) - 1] = new_sent[len(new_sent) - 1] + tok[2:]
else:
new_sent.append(tok)
return new_sent
def printer(sent, should_detokenize=True):
if should_detokenize:
sent = detokenize(sent)[1:-1]
print(" ".join(sent))
def show_boxes2(img_path, boxes, colors, texts=None, masks=None):
# boxes [[xyxy]]
plt.imshow(img)
ax = plt.gca()
print('boxes: ',boxes)
for k in range(boxes.shape[0]):
box = boxes[k]
xmin, ymin, xmax, ymax = list(box)
coords = (xmin, ymin), xmax - xmin + 1, ymax - ymin + 1
color = colors[k]
ax.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
if texts is not None:
ax.text(xmin, ymin, texts[k], bbox={'facecolor':'blue', 'alpha':0.5},fontsize=8, color='white')
# write arbitary string for given sentense.
def plot_attention_maps(attn_maps, x_labels, y_labels, title, out_file, type):
# create a 1920 x 1080 pixel image
fig, ax = plt.subplots(nrows=2, ncols=4, figsize=(19.2, 10.8))
attn_head_idx = 0
for row in range(0, 2):
for col in range(0, 4):
ax[row][col].imshow(attn_maps[attn_head_idx])
ax[row][col].set_xticks(np.arange(len(x_labels)))
ax[row][col].set_xticklabels(x_labels)
if row == 0:
ax[row][col].xaxis.tick_top()
plt.setp(ax[row][col].get_xticklabels(), rotation=90, ha="left", rotation_mode="anchor")
else:
plt.setp(ax[row][col].get_xticklabels(), rotation=90, ha="right", rotation_mode="anchor")
# show y ticks only on left column
if col == 0:
ax[row][col].set_yticks(np.arange(len(y_labels)))
ax[row][col].set_yticklabels(y_labels)
else:
ax[row][col].set_yticks([])
ax[row][col].set_yticklabels([])
attn_head_idx += 1
fig.tight_layout()
plt.text(24.25, 0, title, size=18, verticalalignment='center', rotation=270)
# move vision on text attention maps more to the top and text on vision attention maps to the bottom such that
# larger words fit into the visualization
if type == 'vis':
plt.subplots_adjust(left=0.1, right=0.98, top=1.0)
else:
plt.subplots_adjust(left=0.1, right=0.98, top=0.9, bottom=0.0)
plt.savefig(out_file)
| [
"maskrcnn_benchmark.config.cfg.merge_from_file",
"torch.from_numpy",
"torch.full_like",
"numpy.array",
"maskrcnn_benchmark.modeling.detector.build_detection_model",
"torch.nn.functional.softmax",
"numpy.save",
"matplotlib.pyplot.imshow",
"numpy.repeat",
"argparse.ArgumentParser",
"numpy.max",
... | [((10870, 10885), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (10880, 10885), True, 'import matplotlib.pyplot as plt\n'), ((10895, 10904), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10902, 10904), True, 'import matplotlib.pyplot as plt\n'), ((11540, 11592), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(4)', 'figsize': '(19.2, 10.8)'}), '(nrows=2, ncols=4, figsize=(19.2, 10.8))\n', (11552, 11592), True, 'import matplotlib.pyplot as plt\n'), ((12513, 12589), 'matplotlib.pyplot.text', 'plt.text', (['(24.25)', '(0)', 'title'], {'size': '(18)', 'verticalalignment': '"""center"""', 'rotation': '(270)'}), "(24.25, 0, title, size=18, verticalalignment='center', rotation=270)\n", (12521, 12589), True, 'import matplotlib.pyplot as plt\n'), ((12919, 12940), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_file'], {}), '(out_file)\n', (12930, 12940), True, 'import matplotlib.pyplot as plt\n'), ((1059, 1110), 'os.makedirs', 'os.makedirs', (['self.args.output_folder'], {'exist_ok': '(True)'}), '(self.args.output_folder, exist_ok=True)\n', (1070, 1110), False, 'import os\n'), ((1155, 1180), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1178, 1180), False, 'import argparse\n'), ((2733, 2775), 'maskrcnn_benchmark.config.cfg.merge_from_file', 'cfg.merge_from_file', (['self.args.config_file'], {}), '(self.args.config_file)\n', (2752, 2775), False, 'from maskrcnn_benchmark.config import cfg\n'), ((2784, 2796), 'maskrcnn_benchmark.config.cfg.freeze', 'cfg.freeze', ([], {}), '()\n', (2794, 2796), False, 'from maskrcnn_benchmark.config import cfg\n'), ((2814, 2840), 'maskrcnn_benchmark.modeling.detector.build_detection_model', 'build_detection_model', (['cfg'], {}), '(cfg)\n', (2835, 2840), False, 'from maskrcnn_benchmark.modeling.detector import build_detection_model\n'), ((3760, 3776), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (3770, 3776), False, 'from PIL import Image\n'), ((4022, 4062), 'numpy.array', 'np.array', (['[102.9801, 115.9465, 122.7717]'], {}), '([102.9801, 115.9465, 122.7717])\n', (4030, 4062), True, 'import numpy as np\n'), ((4176, 4197), 'numpy.min', 'np.min', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (4182, 4197), True, 'import numpy as np\n'), ((4220, 4241), 'numpy.max', 'np.max', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (4226, 4241), True, 'import numpy as np\n'), ((4555, 4644), 'cv2.resize', 'cv2.resize', (['im', 'None', 'None'], {'fx': 'im_scale', 'fy': 'im_scale', 'interpolation': 'cv2.INTER_LINEAR'}), '(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.\n INTER_LINEAR)\n', (4565, 4644), False, 'import cv2\n'), ((7775, 7819), 'maskrcnn_benchmark.structures.image_list.to_image_list', 'to_image_list', (['img_tensor'], {'size_divisible': '(32)'}), '(img_tensor, size_divisible=32)\n', (7788, 7819), False, 'from maskrcnn_benchmark.structures.image_list import to_image_list\n'), ((9345, 9395), 'numpy.load', 'np.load', (['self.args.imdb_gt_file'], {'allow_pickle': '(True)'}), '(self.args.imdb_gt_file, allow_pickle=True)\n', (9352, 9395), True, 'import numpy as np\n'), ((10057, 10099), 'numpy.save', 'np.save', (['"""gt_feat.npy"""', 'extracted_features'], {}), "('gt_feat.npy', extracted_features)\n", (10064, 10099), True, 'import numpy as np\n'), ((12782, 12832), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'right': '(0.98)', 'top': '(1.0)'}), '(left=0.1, right=0.98, top=1.0)\n', (12801, 12832), True, 'import matplotlib.pyplot as plt\n'), ((12851, 12913), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'right': '(0.98)', 'top': '(0.9)', 'bottom': '(0.0)'}), '(left=0.1, right=0.98, top=0.9, bottom=0.0)\n', (12870, 12913), True, 'import matplotlib.pyplot as plt\n'), ((3937, 3979), 'numpy.repeat', 'np.repeat', (['im[:, :, np.newaxis]', '(3)'], {'axis': '(2)'}), '(im[:, :, np.newaxis], 3, axis=2)\n', (3946, 3979), True, 'import numpy as np\n'), ((4440, 4472), 'numpy.round', 'np.round', (['(im_scale * im_size_max)'], {}), '(im_scale * im_size_max)\n', (4448, 4472), True, 'import numpy as np\n'), ((5149, 5183), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['x', '(-1)'], {}), '(x, -1)\n', (5176, 5183), False, 'import torch\n'), ((5598, 5636), 'torch.full_like', 'torch.full_like', (['max_conf', 'conf_thresh'], {}), '(max_conf, conf_thresh)\n', (5613, 5636), False, 'import torch\n'), ((6630, 6674), 'torch.argmax', 'torch.argmax', (['scores[:, start_index:]'], {'dim': '(1)'}), '(scores[:, start_index:], dim=1)\n', (6642, 6674), False, 'import torch\n'), ((8373, 8388), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8386, 8388), False, 'import torch\n'), ((9153, 9206), 'os.path.join', 'os.path.join', (['self.args.output_folder', 'file_base_name'], {}), '(self.args.output_folder, file_base_name)\n', (9165, 9206), False, 'import os\n'), ((11145, 11209), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['*coords'], {'fill': '(False)', 'edgecolor': 'color', 'linewidth': '(2)'}), '(*coords, fill=False, edgecolor=color, linewidth=2)\n', (11158, 11209), True, 'import matplotlib.pyplot as plt\n'), ((2908, 2927), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2920, 2927), False, 'import torch\n'), ((3790, 3803), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3798, 3803), True, 'import numpy as np\n'), ((4676, 4696), 'torch.from_numpy', 'torch.from_numpy', (['im'], {}), '(im)\n', (4692, 4696), False, 'import torch\n'), ((5941, 5967), 'maskrcnn_benchmark.layers.nms', 'nms', (['dets', 'cls_scores', '(0.5)'], {}), '(dets, cls_scores, 0.5)\n', (5944, 5967), False, 'from maskrcnn_benchmark.layers import nms\n'), ((6001, 6135), 'torch.where', 'torch.where', (['((cls_scores[keep] > max_conf[keep]) & (cls_scores[keep] >\n conf_thresh_tensor[keep]))', 'cls_scores[keep]', 'max_conf[keep]'], {}), '((cls_scores[keep] > max_conf[keep]) & (cls_scores[keep] >\n conf_thresh_tensor[keep]), cls_scores[keep], max_conf[keep])\n', (6012, 6135), False, 'import torch\n'), ((9241, 9294), 'os.path.join', 'os.path.join', (['self.args.output_folder', 'file_base_name'], {}), '(self.args.output_folder, file_base_name)\n', (9253, 9294), False, 'import os\n'), ((5519, 5547), 'torch.zeros', 'torch.zeros', (['scores.shape[0]'], {}), '(scores.shape[0])\n', (5530, 5547), False, 'import torch\n')] |
"""
Routines for Fourier transform.
"""
from __future__ import division
from ..datatable.wrapping import wrap
from ..datatable import column
from . import waveforms, specfunc
import numpy as np
import numpy.fft as fft
def truncate_len_pow2(trace, truncate_power=None):
"""
Truncate trace length to the the nearest power of 2.
If `truncate_power` is not ``None``, it determines the minimal power of 2 that has to divide the length.
(if it is ``None``, than it's the maximal possible power).
"""
if truncate_power==0:
return trace
if truncate_power<0:
truncate_power=None
l=len(trace)
chunk_l=1
power=0
while chunk_l*2<=l:
chunk_l=chunk_l*2
power=power+1
if truncate_power is not None and power>=truncate_power:
break
l=(l//chunk_l)*chunk_l
return wrap(trace).t[:l]
def normalize_fourier_transform(ft, normalization="none"):
"""
Normalize the Fourier transform data.
`ft` is a 2D data with 2 columns: frequency and complex amplitude.
`normalization` can be ``'none'`` (none done), ``'sum'`` (the power sum is preserved: ``sum(abs(ft)**2)==sum(abs(trace)**2)``)
or ``'density'`` (power spectral density normalization).
"""
l=len(ft)
if normalization=="sum":
ft=wrap(ft).copy()
ft[:,1]=ft[:,1]/np.sqrt(l)
ft=ft.cont
elif normalization=="density" or normalization=="dBc":
ft=wrap(ft).copy()
norm=np.sqrt(l**2*abs(ft[1,0]-ft[0,0]))
if normalization=="dBc":
norm=norm*ft[len(ft)//2,1]/l
ft[:,1]=ft[:,1]/norm
ft=ft.cont
elif normalization!="none":
raise ValueError("unrecognized normalization mode: {0}".format(normalization))
return ft
def apply_window(trace_values, window="rectangle", window_power_compensate=True):
"""
Apply FT window to the trace.
If ``window_power_compensate==True``, multiply the data is multiplied by a compensating factor to preserve power in the spectrum.
"""
if window=="rectangle":
return trace_values
window=specfunc.get_window_func(window)
window_trace=window(np.arange(len(trace_values)),len(trace_values),ft_compensated=window_power_compensate)
return trace_values*window_trace
def fourier_transform(trace, truncate=False, truncate_power=None, normalization="none", no_time=False, single_sided=False, window="rectangle", window_power_compensate=True):
"""
Calculate a fourier transform of the trace.
Args:
trace: Time trace to be transformed. Either an ``Nx2`` array, where ``trace[:,0]`` is time and ``trace[:,1]`` is data (real or complex),
or an ``Nx3`` array, where ``trace[:,0]`` is time, ``trace[:,1]`` is the real part of the signal and ``trace[:,2]`` is the imaginary part.
truncate (bool): If ``True``, cut the data to the power of 2.
truncate_power: If ``None``, cut to the nearest power of 2; otherwise, cut to the largest possible length that divides ``2**truncate_power``.
Only relevant if ``truncate==True``.
normalization (str): Fourier transform normalization:
- ``'none'``: no normalization;
- ``'sum'``: then norm of the data is conserved (``sum(abs(ft[:,1])**2)==sum(abs(trace[:,1])**2)``);
- ``'density'``: power spectral density normalization, in ``x/rtHz`` (``sum(abs(ft[:,1])**2)*df==mean(abs(trace[:,1])**2)``);
- ``'dBc'``: like ``'density'``, but normalized to the mean trace value.
no_time (bool): If ``True``, assume that the time axis is missing and use the standard index instead (if trace is 1D data, `no_time` is always ``True``).
single_sided (bool): If ``True``, only leave positive frequency side of the transform.
window (str): FT window. Can be ``'rectangle'`` (essentially, no window), ``'hann'`` or ``'hamming'``.
window_power_compensate (bool): If ``True``, the data is multiplied by a compensating factor to preserve power in the spectrum.
Returns:
a two-column array, where the first column is frequency, and the second is complex FT data.
"""
wrapped=wrap(trace)
column_names=["frequency","ft_data"]
if trace.ndim==1:
trace_values=wrapped[:]
else:
if wrapped.shape()[1]==(1 if no_time else 2):
trace_values=wrapped[:,-1]
elif wrapped.shape()[1]==(2 if no_time else 3):
trace_values=wrapped[:,-2]+1j*wrapped[:,-1]
else:
raise ValueError("fourier_transform doesn't work for an array with shape {0}".format(wrapped.shape()))
dt=1. if (no_time or wrapped.ndim()==1) else wrapped[1,0]-wrapped[0,0]
if len(trace_values)==0:
return wrapped.from_array(np.zeros((0,2)),column_names,wrapped=False)
if len(trace_values)==1:
return wrapped.from_array(np.array([[0,trace_values[0]]]),column_names,wrapped=False)
if truncate:
trace_values=truncate_len_pow2(trace_values,truncate_power=truncate_power)
trace_values=apply_window(trace_values,window,window_power_compensate=window_power_compensate)
ft=fft.fftshift(fft.fft(trace_values))
df=1./(dt*len(ft))
frequencies=column.crange(-len(ft)/2.,len(ft)/2.)*df
ft=wrapped.from_columns([frequencies.as_array(),ft],column_names,wrapped=False) if wrapped.ndim()>1 else np.column_stack((frequencies,ft))
ft=normalize_fourier_transform(ft,normalization)
if single_sided:
ft=wrap(ft).t[len(ft)//2:,:]
ft[0,0]=0 # numerical error compensation
return ft
def flip_fourier_transform(ft):
"""
Flip the fourier transform (analogous to making frequencies negative and flipping the order).
"""
ft=wrap(ft).copy()
if len(ft)%2==1:
ft[:,1]=ft[::-1,1]
else:
ft[1::,1]=ft[:0:-1,1]
return ft.cont
def inverse_fourier_transform(ft, truncate=False, truncate_power=None, no_freq=False, zero_loc=None, symmetric_time=False):
"""
Calculate an inverse fourier transform of the trace.
Args:
ft: Fourier transform data to be inverted. Is an ``Nx2`` array, where ``ft[:,0]`` is frequency and ``ft[:,1]`` is fourier transform (real or complex).
truncate (bool): If ``True``, cut the data to the power of 2.
truncate_power: If ``None``, cut to the nearest power of 2; otherwise, cut to the largest possible length that divides ``2**truncate_power``.
Only relevant if ``truncate==True``.
no_freq (bool): If ``True``, assume that the frequency axis is missing and use the standard index instead (if trace is 1D data, `no_freq` is always ``True``).
zero_loc (bool): Location of the zero frequency point. Can be ``None`` (the one with the value of f-axis closest to zero), ``'center'`` (mid-point)
or an integer index.
symmetric_time (bool): If ``True``, make time axis go from ``(-0.5/df, 0.5/df)`` rather than ``(0, 1./df)``.
Returns:
a two-column array, where the first column is frequency, and the second is the complex-valued trace data.
"""
wrapped=wrap(ft)
column_names=["time","data"]
if len(ft)==0:
return wrapped.from_array(np.zeros((0,2)),column_names,wrapped=False)
if len(ft)==1:
return wrapped.from_array(np.array([[0,wrapped[:,0]]]),column_names,wrapped=False)
no_freq=no_freq or wrapped.ndim()==1
if zero_loc is None:
if no_freq:
zero_freq_point=0
else:
zero_freq_point=waveforms.find_closest_arg(wrapped.c[0],0,ordered=True)
if zero_freq_point is None:
raise ValueError("can't find zero frequency point; closest is {0}".format(wrapped[zero_freq_point,0]))
elif zero_loc=="center":
zero_freq_point=len(ft)//2
else:
zero_freq_point=zero_loc
if wrapped.ndim()==1:
ft_ordered=np.concatenate(( wrapped[zero_freq_point:], wrapped[:zero_freq_point] ))
else:
ft_ordered=np.concatenate(( wrapped[zero_freq_point:,-1], wrapped[:zero_freq_point,-1] ))
if truncate:
ft_ordered=truncate_len_pow2(ft_ordered,truncate_power=truncate_power)
trace=fft.ifft(ft_ordered)
l=len(trace)
df=1. if no_freq else wrapped[1,0]-wrapped[0,0]
dt=1./(df*l)
times=column.crange(len(ft))*dt
if symmetric_time:
times=times-times[l//2]
trace=np.concatenate((trace[l//2:],trace[:l//2]))
if wrapped.ndim()==1:
return np.column_stack((times,trace))
else:
return wrapped.from_columns([times.as_array(),trace],column_names,wrapped=False)
def power_spectral_density(trace, truncate=False, truncate_power=None, normalization="density", no_time=False, single_sided=False, window="rectangle", window_power_compensate=True):
"""
Calculate a power spectral density of the trace.
Args:
trace: Time trace to be transformed. Either an ``Nx2`` array, where ``trace[:,0]`` is time and ``trace[:,1]`` is data (real or complex),
or an ``Nx3`` array, where ``trace[:,0]`` is time, ``trace[:,1]`` is the real part of the signal and ``trace[:,2]`` is the imaginary part.
truncate (bool): If ``True``, cut the data to the power of 2.
truncate_power: If ``None``, cut to the nearest power of 2; otherwise, cut to the largest possible length that divides ``2**truncate_power``.
Only relevant if ``truncate==True``.
normalization (str): Fourier transform normalization:
- ``'none'``: no normalization;
- ``'sum'``: then norm of the data is conserved (``sum(PSD[:,1]))==sum(abs(trace[:,1])**2)``);
- ``'density'``: power spectral density normalization, in ``x/rtHz`` (``sum(PSD[:,1])*df==mean(abs(trace[:,1])**2)``);
- ``'dBc'``: like ``'density'``, but normalized to the mean trace value.
no_time (bool): If ``True``, assume that the time axis is missing and use the standard index instead (if trace is 1D data, `no_time` is always ``True``).
single_sided (bool): If ``True``, only leave positive frequency side of the PSD.
window (str): FT window. Can be ``'rectangle'`` (essentially, no window), ``'hann'`` or ``'hamming'``.
window_power_compensate (bool): If ``True``, the data is multiplied by a compensating factor to preserve power in the spectrum.
Returns:
a two-column array, where the first column is frequency, and the second is positive PSD.
"""
column_names=["frequency","PSD"]
ft=fourier_transform(trace, truncate=truncate, truncate_power=truncate_power, normalization=normalization, no_time=no_time, single_sided=single_sided, window=window, window_power_compensate=window_power_compensate)
wrapped=wrap(ft)
PSD=wrapped.from_columns((wrapped.c[0].real,abs(wrapped.c[1])**2),column_names,wrapped=False)
return PSD
def get_real_part(ft):
"""
Get the fourier transform of the real part only from the fourier transform of a complex variable.
"""
re_ft=wrap(ft).copy()
re_ft[1:,1]=(ft[1:,1]+ft[:0:-1,1].conjugate())*0.5
re_ft[0,1]=np.real(ft[0,1])
return re_ft.cont
def get_imag_part(ft):
"""
Get the fourier transform of the imaginary part only from the fourier transform of a complex variable.
"""
im_ft=wrap(ft).copy()
im_ft[1:,1]=(im_ft[1:,1]-im_ft[:0:-1,1].conjugate())/2.j
im_ft[0,1]=im_ft[0,1].imag
return im_ft.cont
def get_correlations(ft_a, ft_b, zero_mean=True, normalization="none"):
"""
Calculate the correlation function of the two variables given their fourier transforms.
Args:
ft_a: first variable fourier transform
ft_b: second variable fourier transform
zero_mean (bool): If ``True``, the value corresponding to the zero frequency is set to zero (only fluctuations around means of a and b are calculated).
normalization (str): Can be ``'whole'`` (correlations are normalized by product of PSDs derived from `ft_a` and `ft_b`)
or ``'individual'`` (normalization is done for each frequency individually, so that the absolute value is always 1).
"""
if len(ft_a)!=len(ft_b):
raise ValueError("transforms should be of the same length")
corr=ft_a.copy()
corr[:,1]=corr[:,1]*ft_b[:,1].conjugate()
if (zero_mean):
corr[len(corr)/2,1]=0.
if normalization=="whole":
norm_a=(abs(ft_a[:,1])**2).sum()-abs(ft_a[len(ft_a)/2,1])**2
norm_b=(abs(ft_b[:,1])**2).sum()-abs(ft_b[len(ft_b)/2,1])**2
corr[:,1]=corr[:,1]/(norm_a*norm_b)**.5
elif normalization=="individual":
norm_factors=abs(ft_a[:,1]*ft_b[:,1])
corr[:,1]=corr[:,1]/norm_factors
elif normalization!="none":
raise ValueError("unrecognized normalization method: {0}".format(normalization))
return corr | [
"numpy.sqrt",
"numpy.fft.fft",
"numpy.column_stack",
"numpy.real",
"numpy.zeros",
"numpy.array",
"numpy.concatenate",
"numpy.fft.ifft"
] | [((8192, 8212), 'numpy.fft.ifft', 'fft.ifft', (['ft_ordered'], {}), '(ft_ordered)\n', (8200, 8212), True, 'import numpy.fft as fft\n'), ((11132, 11149), 'numpy.real', 'np.real', (['ft[0, 1]'], {}), '(ft[0, 1])\n', (11139, 11149), True, 'import numpy as np\n'), ((5181, 5202), 'numpy.fft.fft', 'fft.fft', (['trace_values'], {}), '(trace_values)\n', (5188, 5202), True, 'import numpy.fft as fft\n'), ((5393, 5427), 'numpy.column_stack', 'np.column_stack', (['(frequencies, ft)'], {}), '((frequencies, ft))\n', (5408, 5427), True, 'import numpy as np\n'), ((7905, 7975), 'numpy.concatenate', 'np.concatenate', (['(wrapped[zero_freq_point:], wrapped[:zero_freq_point])'], {}), '((wrapped[zero_freq_point:], wrapped[:zero_freq_point]))\n', (7919, 7975), True, 'import numpy as np\n'), ((8007, 8085), 'numpy.concatenate', 'np.concatenate', (['(wrapped[zero_freq_point:, -1], wrapped[:zero_freq_point, -1])'], {}), '((wrapped[zero_freq_point:, -1], wrapped[:zero_freq_point, -1]))\n', (8021, 8085), True, 'import numpy as np\n'), ((8404, 8452), 'numpy.concatenate', 'np.concatenate', (['(trace[l // 2:], trace[:l // 2])'], {}), '((trace[l // 2:], trace[:l // 2]))\n', (8418, 8452), True, 'import numpy as np\n'), ((8489, 8520), 'numpy.column_stack', 'np.column_stack', (['(times, trace)'], {}), '((times, trace))\n', (8504, 8520), True, 'import numpy as np\n'), ((1365, 1375), 'numpy.sqrt', 'np.sqrt', (['l'], {}), '(l)\n', (1372, 1375), True, 'import numpy as np\n'), ((4795, 4811), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (4803, 4811), True, 'import numpy as np\n'), ((4902, 4934), 'numpy.array', 'np.array', (['[[0, trace_values[0]]]'], {}), '([[0, trace_values[0]]])\n', (4910, 4934), True, 'import numpy as np\n'), ((7226, 7242), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (7234, 7242), True, 'import numpy as np\n'), ((7323, 7353), 'numpy.array', 'np.array', (['[[0, wrapped[:, 0]]]'], {}), '([[0, wrapped[:, 0]]])\n', (7331, 7353), True, 'import numpy as np\n')] |
# Copyright 2021 Institute of Advanced Research in Artificial Intelligence (IARAI) GmbH.
# IARAI licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from pathlib import Path
import numpy as np
from util.h5_util import load_h5_file
from util.h5_util import write_data_to_h5
def test_assign_reload_floats():
data = np.random.random(size=(5, 5, 5)) * 520 - 255
assert data.dtype != np.uint8
assigned = np.zeros(shape=(5, 5, 5), dtype=np.uint8)
assigned[:] = np.clip(data, 0, 255)
assert assigned.dtype == np.uint8
too_large = np.argwhere(data > 255)
too_small = np.argwhere(data < 0)
with tempfile.TemporaryDirectory() as temp_dir:
myh5 = Path(temp_dir) / "my.h5"
write_data_to_h5(data, filename=myh5)
reloaded = load_h5_file(myh5)
for k in list(too_small) + list(too_large):
print(f"{k}: data={data[k[0], k[1], k[2]]} - reloaded={reloaded[k[0], k[1], k[2]]} - assigned={assigned[k[0], k[1], k[2]]}")
assert (reloaded == assigned).all(), f"assigned={assigned}, reloaded={reloaded}"
| [
"numpy.clip",
"tempfile.TemporaryDirectory",
"util.h5_util.write_data_to_h5",
"util.h5_util.load_h5_file",
"pathlib.Path",
"numpy.random.random",
"numpy.zeros",
"numpy.argwhere"
] | [((942, 983), 'numpy.zeros', 'np.zeros', ([], {'shape': '(5, 5, 5)', 'dtype': 'np.uint8'}), '(shape=(5, 5, 5), dtype=np.uint8)\n', (950, 983), True, 'import numpy as np\n'), ((1002, 1023), 'numpy.clip', 'np.clip', (['data', '(0)', '(255)'], {}), '(data, 0, 255)\n', (1009, 1023), True, 'import numpy as np\n'), ((1078, 1101), 'numpy.argwhere', 'np.argwhere', (['(data > 255)'], {}), '(data > 255)\n', (1089, 1101), True, 'import numpy as np\n'), ((1118, 1139), 'numpy.argwhere', 'np.argwhere', (['(data < 0)'], {}), '(data < 0)\n', (1129, 1139), True, 'import numpy as np\n'), ((1149, 1178), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1176, 1178), False, 'import tempfile\n'), ((1240, 1277), 'util.h5_util.write_data_to_h5', 'write_data_to_h5', (['data'], {'filename': 'myh5'}), '(data, filename=myh5)\n', (1256, 1277), False, 'from util.h5_util import write_data_to_h5\n'), ((1297, 1315), 'util.h5_util.load_h5_file', 'load_h5_file', (['myh5'], {}), '(myh5)\n', (1309, 1315), False, 'from util.h5_util import load_h5_file\n'), ((848, 880), 'numpy.random.random', 'np.random.random', ([], {'size': '(5, 5, 5)'}), '(size=(5, 5, 5))\n', (864, 880), True, 'import numpy as np\n'), ((1207, 1221), 'pathlib.Path', 'Path', (['temp_dir'], {}), '(temp_dir)\n', (1211, 1221), False, 'from pathlib import Path\n')] |
import numpy as np
class Channel:
def __init__(self, inst):
"""Initializes a new device.
Args:
inst -- instrument in which the channels are
"""
self.inst = inst
self.inst.write(":WAV:FORM BYTE")
self.inst.write(":WAV:POIN:MODE MAX")
def get_time_scale(self):
"""Returns current time scale."""
return float(self.inst.ask(":TIM:SCAL?"))
def set_time_scale(self, scale):
"""Returns current time scale."""
self.inst.write(":TIM:SCAL {}".format(scale))
def get_time_delay(self):
"""Returns current time delay."""
return float(self.inst.ask(":TIM:POS?"))
def set_time_delay(self, delay):
"""Sets time delay to chosen value.
Args:
delay -- time delay to set
"""
self.inst.write(":TIM:POS {}".format(delay))
def get_volt_scale(self, channel):
"""Returns current voltage scale of the selected channel.
Args:
channel -- selected channel
"""
return float(self.inst.ask(":CHAN{}:SCAL?".format(channel)))
def set_volt_scale(self, channel, scale):
"""Sets voltage scale of the selected channel to chosen value.
Args:
channel -- selected channel
scale -- voltage scale to set
"""
self.inst.write(":CHAN{}:SCAL {}".format(channel, scale))
def get_volt_offset(self, channel):
"""Returns current voltage offset of the selected channel.
Args:
channel -- selected channel
"""
return float(self.inst.ask(":CHAN{}:OFFS?".format(channel)))
def set_volt_offset(self, channel, offset):
"""Sets voltage offset of the selected channel to chosen value.
Args:
channel -- selected channel
offset -- voltage offset to set
"""
self.inst.write(":CHAN{}:OFFS {}".format(channel, offset))
def auto_trigger(self, channel):
"""Set trigger level to 50% in selected channel.
Args:
channel -- selected channel
"""
self.inst.write(":TRIG:SOUR CHAN{}".format(channel))
self.inst.write(":TRIG:LEV:ASET")
def set_trigger(self, channel, level=0):
"""Set trigger level to chosen value in selected channel.
Args:
channel -- selected channel
level -- trigger level to set in volts (default 0)
"""
self.inst.write(":TRIG:SOUR CHAN{}".format(channel))
self.inst.write(":TRIG:LEV {}".format(level))
def get_vpp(self, channel):
"""Returns peak-to-peak measurement of the selected channel.
Args:
channel -- selected channel
"""
return float(self.inst.ask(":MEAS:VPP? CHAN{}".format(channel)))
def get_vrms(self, channel):
"""Returns rms measurement of the selected channel.
Args:
channel -- selected channel
"""
return float(self.inst.ask(":MEAS:VRMS? CHAN{}".format(channel)))
def get_frequency(self, channel):
"""Returns frequency measurement of the selected channel.
Args:
channel -- selected channel
"""
return float(self.inst.ask(":MEAS:FREQ? CHAN{}".format(channel)))
def get_period(self, channel):
"""Returns period measurement of the selected channel.
Args:
channel -- selected channel
"""
return float(self.inst.ask(":MEAS:PER? CHAN{}".format(channel)))
def get_phase(self, sel_channel, ref_channel):
"""Returns phase difference between selected and reference channel
in degrees.
Args:
sel_channel -- selected channel
ref_channel -- reference channel
"""
return float(
self.inst.ask("MEAS:PHAS? CHAN{},CHAN{}".format(sel_channel, ref_channel))
)
def set_coupling(self, channel, mode):
"""Sets coupling mode of the selected channel to chosen value.
Args:
channel -- selected channel
mode -- "AC" or "DC"
"""
self.inst.write(":CHAN{}:COUP {}".format(channel, mode))
def toggle_channel(self, channel):
"""Toggles selected channel status.
Args:
channel -- selected channel
"""
status = self.inst.ask("CHAN{}:DISP?".format(channel)) == "1"
if status:
self.inst.write("CHAN{}:DISP OFF".format(channel))
else:
self.inst.write("CHAN{}:DISP ON".format(channel))
def get_data(self, channel, points=1000):
"""Returns wave data from selected channel as a numpy array.
Args:
channel -- selected channel
points -- number of points to be acquired
"""
self.inst.write(":DIG CHAN{}".format(channel))
self.inst.write(":WAV:POIN {}".format(points))
self.inst.write(":WAV:SOURCE CHAN{}".format(channel))
self.inst.write(":WAV:DATA?")
rawdata = self.inst.read_raw()
data = np.frombuffer(rawdata[10:-1], "B")
yorigin = float(self.inst.ask(":WAV:YOR?"))
yref = float(self.inst.ask(":WAV:YREF?"))
yinc = float(self.inst.ask(":WAV:YINC?"))
xorigin = float(self.inst.ask(":WAV:XOR?"))
xref = float(self.inst.ask(":WAV:XREF?"))
xinc = float(self.inst.ask(":WAV:XINC?"))
data_y = ((data - yref) * yinc) + yorigin
data_x = np.array(range(len(data)))
data_x = ((data_x - xref)) * xinc + xorigin
return data_x, data_y
| [
"numpy.frombuffer"
] | [((5029, 5063), 'numpy.frombuffer', 'np.frombuffer', (['rawdata[10:-1]', '"""B"""'], {}), "(rawdata[10:-1], 'B')\n", (5042, 5063), True, 'import numpy as np\n')] |
import numpy as np
import utils
def mk_training_matrices(pairs, en_dimension, cat_dimension, english_space, catalan_space):
en_mat = np.zeros((len(pairs),en_dimension))
cat_mat = np.zeros((len(pairs),cat_dimension))
c = 0
for p in pairs:
en_word,cat_word = p.split()
en_mat[c] = english_space[en_word]
cat_mat[c] = catalan_space[cat_word]
c+=1
return en_mat,cat_mat
def linalg(mat_english,mat_catalan):
w = np.linalg.lstsq(mat_english,mat_catalan)[0] # obtaining the parameters
print(mat_english.shape,mat_catalan.shape,w.shape)
return w
'''Read semantic spaces'''
english_space = utils.readDM("data/english.subset.dm")
catalan_space = utils.readDM("data/catalan.subset.dm")
utils.run_PCA(english_space,english_space.keys(),"english_space.png")
utils.run_PCA(catalan_space,catalan_space.keys(),"catalan_space.png")
'''Read all word pairs'''
all_pairs = []
f = open("data/pairs.txt")
for l in f:
l = l.rstrip('\n')
all_pairs.append(l)
f.close()
'''Make training/test fold'''
training_pairs = all_pairs[:120]
test_pairs = all_pairs[121:]
'''Make training/test matrices'''
en_mat, cat_mat = mk_training_matrices(training_pairs, 400, 300, english_space, catalan_space)
params = linalg(en_mat,cat_mat)
'''Test'''
'''Sanity check -- is the regression matrix retrieving the training vectors?'''
#print(training_pairs[0])
#en, cat = training_pairs[0].split()
#predict = np.dot(params.T,english_space[en])
#print(predict[:20])
#print(catalan_space[cat][:20])
'''Loop through test pairs and evaluate translations'''
score = 0
for p in test_pairs:
en, cat = p.split()
predicted_vector = np.dot(params.T,english_space[en])
#print(predicted_vector)
nearest_neighbours = utils.neighbours(catalan_space,predicted_vector,5)
if cat in nearest_neighbours:
score+=1
print(en,cat,nearest_neighbours,"1")
else:
print(en,cat,nearest_neighbours,"0")
print("Precision:",score/len(test_pairs))
| [
"utils.readDM",
"utils.neighbours",
"numpy.dot",
"numpy.linalg.lstsq"
] | [((661, 699), 'utils.readDM', 'utils.readDM', (['"""data/english.subset.dm"""'], {}), "('data/english.subset.dm')\n", (673, 699), False, 'import utils\n'), ((716, 754), 'utils.readDM', 'utils.readDM', (['"""data/catalan.subset.dm"""'], {}), "('data/catalan.subset.dm')\n", (728, 754), False, 'import utils\n'), ((1678, 1713), 'numpy.dot', 'np.dot', (['params.T', 'english_space[en]'], {}), '(params.T, english_space[en])\n', (1684, 1713), True, 'import numpy as np\n'), ((1767, 1819), 'utils.neighbours', 'utils.neighbours', (['catalan_space', 'predicted_vector', '(5)'], {}), '(catalan_space, predicted_vector, 5)\n', (1783, 1819), False, 'import utils\n'), ((474, 515), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['mat_english', 'mat_catalan'], {}), '(mat_english, mat_catalan)\n', (489, 515), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from models.tf_model import TFModel
class RNN(TFModel):
def input_layer(self):
'''
Data and Hyperparameters
'''
with tf.variable_scope("input_layer"):
# Tensor containing word ids
# shape = (batch size, max length of sentence in batch)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None],
name="word_ids")
# Tensor containing the real length of each sentence
# shape = (batch size)
self.sentence_lengths = tf.placeholder(tf.int32, shape=[None],
name="sentence_lengths")
# Tensor containing char ids
# shape = (batch size, max length of sentence, max length of word)
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None],
name="char_ids")
# shape = (batch_size, max_length of sentence)
self.word_lengths = tf.placeholder(tf.int32, shape=[None, None],
name="word_lengths")
# Tensor containing the real length of each word
# shape = (batch size, max length of sentence in batch)
self.labels = tf.placeholder(tf.int32, shape=[None, None],
name="labels")
# Dropout tensors
self.char_drop_input = tf.placeholder_with_default(
input=1.0, shape=(), name="char_drop_input")
self.char_drop_state = tf.placeholder_with_default(
input=1.0, shape=(), name="char_drop_state")
self.char_drop_output = tf.placeholder_with_default(
input=1.0, shape=(), name="char_drop_output")
self.word_drop_input = tf.placeholder_with_default(
input=1.0, shape=(), name="word_drop_input")
self.word_drop_state = tf.placeholder_with_default(
input=1.0, shape=(), name="word_drop_state")
self.word_drop_output = tf.placeholder_with_default(
input=1.0, shape=(), name="word_drop_output")
# Training variables
self.global_step = tf.Variable(0, name="global_step", trainable=False)
# Using a decaying learning rate
self.lr = tf.train.exponential_decay(
learning_rate=self.config.learning["rate"],
global_step=self.global_step,
decay_steps=self.config.learning["decay_steps"],
decay_rate=self.config.learning["decay"],
staircase=self.config.learning["staircase"])
# Create the optimizer/trainer
# I initialize it here for multi-gpu training
self.optimizer = tf.train.AdamOptimizer(self.lr)
def embedding_layer(self):
'''
Embedding matrices
'''
with tf.variable_scope("embedding_layer"):
if self.config.pretrained is None:
# Using randomly initialized vectors
# Word embedding matrix
word_embedding = tf.get_variable(
name="word_embedding",
dtype=tf.float32,
initializer=tf.random_uniform(
shape=[self.config.n_words, self.config.dim_word],
minval=-0.25, maxval=0.25))
else:
word_embedding = tf.get_variable(
name="word_embedding",
initializer=np.asarray(self.config.wordvec_matrix, dtype=np.float32),
dtype=tf.float32,
trainable=self.config.non_static)
if self.config.use_chars:
# Char embedding matrix
char_embedding = tf.get_variable(
name="char_embedding",
dtype=tf.float32,
initializer=tf.random_uniform(
shape=[self.config.n_chars, self.config.dim_char],
minval=-0.25, maxval=0.25))
self.word_vectors = tf.nn.embedding_lookup(
word_embedding, self.word_ids, name="word_matrix")
if self.config.use_chars:
self.char_vectors = tf.nn.embedding_lookup(
char_embedding, self.char_ids, name="char_matrix")
'''
word_embedding = (batch size, max length of sentence in batch, self.config.dim_word)
char_embedding = (batch size, max length of sentence in batch, max length of word, self.config.dim_char)
'''
def RNN_layer(self):
'''
Recurrent Layer
'''
def Cells(num_units, char_cell=False):
'''
Function to build cells
'''
# TODO: Wrappers
if self.config.cells == "rnn":
self.cell_fw = tf.contrib.rnn.BasicRNNCell(num_units=num_units)
if self.config.bidirectional:
self.cell_bw = tf.contrib.rnn.BasicRNNCell(num_units=num_units)
elif self.config.cells == "lstm":
self.cell_fw = tf.contrib.rnn.LSTMCell(num_units=num_units)
if self.config.bidirectional:
self.cell_bw = tf.contrib.rnn.LSTMCell(num_units=num_units)
else:
self.cell_fw = tf.contrib.rnn.GRUCell(num_units=num_units)
if self.config.bidirectional:
self.cell_bw = tf.contrib.rnn.GRUCell(num_units=num_units)
if char_cell:
self.cell_fw = tf.contrib.rnn.DropoutWrapper(
cell=self.cell_fw, input_keep_prob=self.char_drop_input, output_keep_prob=self.char_drop_output, state_keep_prob=self.char_drop_state)
if self.config.bidirectional:
self.cell_bw = tf.contrib.rnn.DropoutWrapper(
cell=self.cell_bw, input_keep_prob=self.char_drop_input, output_keep_prob=self.char_drop_output, state_keep_prob=self.char_drop_state)
else:
self.cell_fw = tf.contrib.rnn.DropoutWrapper(
cell=self.cell_fw, input_keep_prob=self.word_drop_input, output_keep_prob=self.word_drop_output, state_keep_prob=self.word_drop_state)
if self.config.bidirectional:
self.cell_bw = tf.contrib.rnn.DropoutWrapper(
cell=self.cell_bw, input_keep_prob=self.word_drop_input, output_keep_prob=self.word_drop_output, state_keep_prob=self.word_drop_state)
# Word Level Network
if self.config.use_chars:
with tf.variable_scope("word_layer"):
# Put the word length in the axis 1 (time dimension)
s = tf.shape(self.char_vectors)
# new shape = [batch*sentence_length,word_length,char_dim]
self.char_vectors = tf.reshape(self.char_vectors,
shape=[s[0] * s[1], s[-2], self.config.dim_char])
word_lengths = tf.reshape(self.word_lengths, shape=[s[0] * s[1]])
# CELLS
Cells(self.config.cell_char)
# Bidirectional
if self.config.bidirectional:
_, (output_state_fw, output_state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self.cell_fw, cell_bw=self.cell_bw, inputs=self.char_vectors,
sequence_length=word_lengths, dtype=tf.float32)
if self.config.cells == "lstm":
output_state_fw, output_state_bw = output_state_fw[1], output_state_bw[1]
self.char_output = tf.concat([output_state_fw, output_state_bw], axis=-1)
# Unidirectional
else:
_, output_state_fw = tf.nn.dynamic_rnn(
cell=self.cell_fw, inputs=self.char_vectors,
sequence_length=word_lengths, dtype=tf.float32)
if self.config.model == "lstm":
output_state_fw = output_state_fw[1]
self.char_output = output_state_fw
# shape = (batch size, max sentence length, char hidden size)
self.h = self.char_output.shape[1].value
self.char_output = tf.reshape(self.char_output, shape=[s[0], s[1], self.h])
self.word_vectors = tf.concat([self.word_vectors, self.char_output], axis=-1)
# Sentence Level Network
with tf.variable_scope("sentence_layer"):
# Create Cells
Cells(self.config.cell_word)
# Bidirectional
if self.config.bidirectional:
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self.cell_fw, cell_bw=self.cell_bw, inputs=self.word_vectors,
sequence_length=self.sentence_lengths, dtype=tf.float32)
self.lstm_output = tf.concat([output_fw, output_bw], axis=-1)
# Unidirectional
else:
output_state_fw, _ = tf.nn.dynamic_rnn(
cell=self.cell_fw, inputs=self.word_vectors,
sequence_length=self.sentence_lengths, dtype=tf.float32)
self.lstm_output = output_state_fw
# tf.shape() gets us the dynamic shape of a tensor
# Save the max sentence length
self.nsteps = tf.shape(self.lstm_output)[1]
# .shape on the other hand provides the static shape of a tensor
# Save the hidden length
self.h = self.lstm_output.shape[2].value
# current shape = [batch,max sentence length, hidden size]
# after shape = [batch * max sentence , hidden size]
self.layer_output = tf.reshape(self.lstm_output, [-1, self.h])
def output_layer(self):
with tf.variable_scope("output_layer"):
layer = {
'weights': tf.get_variable(name="W", initializer=tf.truncated_normal([self.h, self.config.n_tags])),
'biases': tf.get_variable(name="b", initializer=tf.truncated_normal([self.config.n_tags]))
}
self.pred = tf.nn.xw_plus_b(
self.layer_output, layer["weights"], layer["biases"], name="preds")
self.logits = tf.reshape(
self.pred, [-1, self.nsteps, self.config.n_tags], name="logits")
def loss_function(self):
with tf.variable_scope("loss_layer"):
if self.config.use_crf:
log_likelihood, trans_params = tf.contrib.crf.crf_log_likelihood(
self.logits, self.labels, self.sentence_lengths)
self.trans_params = tf.Variable(trans_params, name="trans_params")
self.loss = tf.reduce_mean(-log_likelihood)
else:
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logits, labels=self.labels)
mask = tf.sequence_mask(self.sentence_lengths)
losses = tf.boolean_mask(losses, mask)
self.loss = tf.reduce_mean(losses)
def train_op(self):
with tf.variable_scope("train_step"):
self.gradient = self.optimizer.compute_gradients(loss=self.loss)
self.train_op = self.optimizer.apply_gradients(grads_and_vars=self.gradient,
global_step=self.global_step)
def build(self):
self.input_layer()
self.embedding_layer()
self.RNN_layer()
self.output_layer()
self.loss_function()
# Generic functions that add training op and initialize session
self.train_op()
self.initialize_session() # now self.sess is defined and vars are init
def load_model(self, dir):
self.initialize_session()
self.saver = tf.train.import_meta_graph("{}.meta".format(dir))
self.saver.restore(self.sess, dir)
# Get the operations easily
graph = tf.get_default_graph()
# INPUT_LAYER
self.word_ids = graph.get_operation_by_name("input_layer/word_ids").outputs[0]
self.sentence_lengths = graph.get_operation_by_name(
"input_layer/sentence_lengths").outputs[0]
self.char_ids = graph.get_operation_by_name("input_layer/char_ids").outputs[0]
self.word_lengths = graph.get_operation_by_name("input_layer/word_lengths").outputs[0]
self.labels = graph.get_operation_by_name("input_layer/labels").outputs[0]
# OUTPUT_LAYER
self.logits = graph.get_operation_by_name("output_layer/logits").outputs[0]
# CRF
if self.config.use_crf:
self.trans_params = graph.get_operation_by_name("loss_layer/trans_params").outputs[0]
def __init__(self, config):
super(RNN, self).__init__(config)
def predict_batch(self, feed):
# Batch Prediction
# CRF Prediction
if self.config.use_crf:
# get tag scores and transition params of CRF
viterbi_sequences = []
logits, trans_params = self.sess.run(
[self.logits, self.trans_params], feed_dict=feed)
# iterate over the sentences because no batching in vitervi_decode
for logit, sentence_length in zip(logits, feed[self.sentence_lengths]):
logit = logit[:sentence_length] # keep only the valid steps
viterbi_seq, _ = tf.contrib.crf.viterbi_decode(
logit, trans_params)
viterbi_sequences.append(viterbi_seq)
return viterbi_sequences
# Softmax Prediction
else:
labels_pred = self.sess.run(self.logits, feed_dict=feed)
# labels_pred = tf.cast(tf.argmax(self.logits, axis=-1), tf.int32)
labels_pred = np.argmax(labels_pred, axis=-1)
return labels_pred
| [
"tensorflow.shape",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.boolean_mask",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.contrib.rnn.GRUCell",
"tensorflow.contrib.rnn.LSTMCell",
"tensorflow.reduce_mean",
"tensorflow.nn.embedding_lookup",
"tensorflow.placeholder... | [((12229, 12251), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (12249, 12251), True, 'import tensorflow as tf\n'), ((202, 234), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""input_layer"""'], {}), "('input_layer')\n", (219, 234), True, 'import tensorflow as tf\n'), ((373, 434), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""word_ids"""'}), "(tf.int32, shape=[None, None], name='word_ids')\n", (387, 434), True, 'import tensorflow as tf\n'), ((614, 677), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""sentence_lengths"""'}), "(tf.int32, shape=[None], name='sentence_lengths')\n", (628, 677), True, 'import tensorflow as tf\n'), ((877, 944), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None, None]', 'name': '"""char_ids"""'}), "(tf.int32, shape=[None, None, None], name='char_ids')\n", (891, 944), True, 'import tensorflow as tf\n'), ((1080, 1145), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""word_lengths"""'}), "(tf.int32, shape=[None, None], name='word_lengths')\n", (1094, 1145), True, 'import tensorflow as tf\n'), ((1348, 1407), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""labels"""'}), "(tf.int32, shape=[None, None], name='labels')\n", (1362, 1407), True, 'import tensorflow as tf\n'), ((1515, 1587), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', ([], {'input': '(1.0)', 'shape': '()', 'name': '"""char_drop_input"""'}), "(input=1.0, shape=(), name='char_drop_input')\n", (1542, 1587), True, 'import tensorflow as tf\n'), ((1640, 1712), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', ([], {'input': '(1.0)', 'shape': '()', 'name': '"""char_drop_state"""'}), "(input=1.0, shape=(), name='char_drop_state')\n", (1667, 1712), True, 'import tensorflow as tf\n'), ((1766, 1839), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', ([], {'input': '(1.0)', 'shape': '()', 'name': '"""char_drop_output"""'}), "(input=1.0, shape=(), name='char_drop_output')\n", (1793, 1839), True, 'import tensorflow as tf\n'), ((1892, 1964), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', ([], {'input': '(1.0)', 'shape': '()', 'name': '"""word_drop_input"""'}), "(input=1.0, shape=(), name='word_drop_input')\n", (1919, 1964), True, 'import tensorflow as tf\n'), ((2017, 2089), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', ([], {'input': '(1.0)', 'shape': '()', 'name': '"""word_drop_state"""'}), "(input=1.0, shape=(), name='word_drop_state')\n", (2044, 2089), True, 'import tensorflow as tf\n'), ((2143, 2216), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', ([], {'input': '(1.0)', 'shape': '()', 'name': '"""word_drop_output"""'}), "(input=1.0, shape=(), name='word_drop_output')\n", (2170, 2216), True, 'import tensorflow as tf\n'), ((2299, 2350), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (2310, 2350), True, 'import tensorflow as tf\n'), ((2418, 2668), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', ([], {'learning_rate': "self.config.learning['rate']", 'global_step': 'self.global_step', 'decay_steps': "self.config.learning['decay_steps']", 'decay_rate': "self.config.learning['decay']", 'staircase': "self.config.learning['staircase']"}), "(learning_rate=self.config.learning['rate'],\n global_step=self.global_step, decay_steps=self.config.learning[\n 'decay_steps'], decay_rate=self.config.learning['decay'], staircase=\n self.config.learning['staircase'])\n", (2444, 2668), True, 'import tensorflow as tf\n'), ((2866, 2897), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.lr'], {}), '(self.lr)\n', (2888, 2897), True, 'import tensorflow as tf\n'), ((2998, 3034), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""embedding_layer"""'], {}), "('embedding_layer')\n", (3015, 3034), True, 'import tensorflow as tf\n'), ((4199, 4272), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['word_embedding', 'self.word_ids'], {'name': '"""word_matrix"""'}), "(word_embedding, self.word_ids, name='word_matrix')\n", (4221, 4272), True, 'import tensorflow as tf\n'), ((8686, 8721), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sentence_layer"""'], {}), "('sentence_layer')\n", (8703, 8721), True, 'import tensorflow as tf\n'), ((9981, 10023), 'tensorflow.reshape', 'tf.reshape', (['self.lstm_output', '[-1, self.h]'], {}), '(self.lstm_output, [-1, self.h])\n', (9991, 10023), True, 'import tensorflow as tf\n'), ((10066, 10099), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""output_layer"""'], {}), "('output_layer')\n", (10083, 10099), True, 'import tensorflow as tf\n'), ((10386, 10474), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['self.layer_output', "layer['weights']", "layer['biases']"], {'name': '"""preds"""'}), "(self.layer_output, layer['weights'], layer['biases'], name=\n 'preds')\n", (10401, 10474), True, 'import tensorflow as tf\n'), ((10513, 10588), 'tensorflow.reshape', 'tf.reshape', (['self.pred', '[-1, self.nsteps, self.config.n_tags]'], {'name': '"""logits"""'}), "(self.pred, [-1, self.nsteps, self.config.n_tags], name='logits')\n", (10523, 10588), True, 'import tensorflow as tf\n'), ((10649, 10680), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss_layer"""'], {}), "('loss_layer')\n", (10666, 10680), True, 'import tensorflow as tf\n'), ((11370, 11401), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""train_step"""'], {}), "('train_step')\n", (11387, 11401), True, 'import tensorflow as tf\n'), ((14052, 14083), 'numpy.argmax', 'np.argmax', (['labels_pred'], {'axis': '(-1)'}), '(labels_pred, axis=-1)\n', (14061, 14083), True, 'import numpy as np\n'), ((4365, 4438), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['char_embedding', 'self.char_ids'], {'name': '"""char_matrix"""'}), "(char_embedding, self.char_ids, name='char_matrix')\n", (4387, 4438), True, 'import tensorflow as tf\n'), ((5017, 5065), 'tensorflow.contrib.rnn.BasicRNNCell', 'tf.contrib.rnn.BasicRNNCell', ([], {'num_units': 'num_units'}), '(num_units=num_units)\n', (5044, 5065), True, 'import tensorflow as tf\n'), ((5722, 5895), 'tensorflow.contrib.rnn.DropoutWrapper', 'tf.contrib.rnn.DropoutWrapper', ([], {'cell': 'self.cell_fw', 'input_keep_prob': 'self.char_drop_input', 'output_keep_prob': 'self.char_drop_output', 'state_keep_prob': 'self.char_drop_state'}), '(cell=self.cell_fw, input_keep_prob=self.\n char_drop_input, output_keep_prob=self.char_drop_output,\n state_keep_prob=self.char_drop_state)\n', (5751, 5895), True, 'import tensorflow as tf\n'), ((6228, 6401), 'tensorflow.contrib.rnn.DropoutWrapper', 'tf.contrib.rnn.DropoutWrapper', ([], {'cell': 'self.cell_fw', 'input_keep_prob': 'self.word_drop_input', 'output_keep_prob': 'self.word_drop_output', 'state_keep_prob': 'self.word_drop_state'}), '(cell=self.cell_fw, input_keep_prob=self.\n word_drop_input, output_keep_prob=self.word_drop_output,\n state_keep_prob=self.word_drop_state)\n', (6257, 6401), True, 'import tensorflow as tf\n'), ((6766, 6797), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""word_layer"""'], {}), "('word_layer')\n", (6783, 6797), True, 'import tensorflow as tf\n'), ((6888, 6915), 'tensorflow.shape', 'tf.shape', (['self.char_vectors'], {}), '(self.char_vectors)\n', (6896, 6915), True, 'import tensorflow as tf\n'), ((7027, 7106), 'tensorflow.reshape', 'tf.reshape', (['self.char_vectors'], {'shape': '[s[0] * s[1], s[-2], self.config.dim_char]'}), '(self.char_vectors, shape=[s[0] * s[1], s[-2], self.config.dim_char])\n', (7037, 7106), True, 'import tensorflow as tf\n'), ((7185, 7235), 'tensorflow.reshape', 'tf.reshape', (['self.word_lengths'], {'shape': '[s[0] * s[1]]'}), '(self.word_lengths, shape=[s[0] * s[1]])\n', (7195, 7235), True, 'import tensorflow as tf\n'), ((8488, 8544), 'tensorflow.reshape', 'tf.reshape', (['self.char_output'], {'shape': '[s[0], s[1], self.h]'}), '(self.char_output, shape=[s[0], s[1], self.h])\n', (8498, 8544), True, 'import tensorflow as tf\n'), ((8581, 8638), 'tensorflow.concat', 'tf.concat', (['[self.word_vectors, self.char_output]'], {'axis': '(-1)'}), '([self.word_vectors, self.char_output], axis=-1)\n', (8590, 8638), True, 'import tensorflow as tf\n'), ((8907, 9074), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', ([], {'cell_fw': 'self.cell_fw', 'cell_bw': 'self.cell_bw', 'inputs': 'self.word_vectors', 'sequence_length': 'self.sentence_lengths', 'dtype': 'tf.float32'}), '(cell_fw=self.cell_fw, cell_bw=self.cell_bw,\n inputs=self.word_vectors, sequence_length=self.sentence_lengths, dtype=\n tf.float32)\n', (8938, 9074), True, 'import tensorflow as tf\n'), ((9143, 9185), 'tensorflow.concat', 'tf.concat', (['[output_fw, output_bw]'], {'axis': '(-1)'}), '([output_fw, output_bw], axis=-1)\n', (9152, 9185), True, 'import tensorflow as tf\n'), ((9270, 9393), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'self.cell_fw', 'inputs': 'self.word_vectors', 'sequence_length': 'self.sentence_lengths', 'dtype': 'tf.float32'}), '(cell=self.cell_fw, inputs=self.word_vectors,\n sequence_length=self.sentence_lengths, dtype=tf.float32)\n', (9287, 9393), True, 'import tensorflow as tf\n'), ((9616, 9642), 'tensorflow.shape', 'tf.shape', (['self.lstm_output'], {}), '(self.lstm_output)\n', (9624, 9642), True, 'import tensorflow as tf\n'), ((10765, 10852), 'tensorflow.contrib.crf.crf_log_likelihood', 'tf.contrib.crf.crf_log_likelihood', (['self.logits', 'self.labels', 'self.sentence_lengths'], {}), '(self.logits, self.labels, self.\n sentence_lengths)\n', (10798, 10852), True, 'import tensorflow as tf\n'), ((10905, 10951), 'tensorflow.Variable', 'tf.Variable', (['trans_params'], {'name': '"""trans_params"""'}), "(trans_params, name='trans_params')\n", (10916, 10951), True, 'import tensorflow as tf\n'), ((10980, 11011), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(-log_likelihood)'], {}), '(-log_likelihood)\n', (10994, 11011), True, 'import tensorflow as tf\n'), ((11055, 11146), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'self.logits', 'labels': 'self.labels'}), '(logits=self.logits, labels=\n self.labels)\n', (11101, 11146), True, 'import tensorflow as tf\n'), ((11186, 11225), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['self.sentence_lengths'], {}), '(self.sentence_lengths)\n', (11202, 11225), True, 'import tensorflow as tf\n'), ((11251, 11280), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['losses', 'mask'], {}), '(losses, mask)\n', (11266, 11280), True, 'import tensorflow as tf\n'), ((11309, 11331), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {}), '(losses)\n', (11323, 11331), True, 'import tensorflow as tf\n'), ((13671, 13721), 'tensorflow.contrib.crf.viterbi_decode', 'tf.contrib.crf.viterbi_decode', (['logit', 'trans_params'], {}), '(logit, trans_params)\n', (13700, 13721), True, 'import tensorflow as tf\n'), ((5147, 5195), 'tensorflow.contrib.rnn.BasicRNNCell', 'tf.contrib.rnn.BasicRNNCell', ([], {'num_units': 'num_units'}), '(num_units=num_units)\n', (5174, 5195), True, 'import tensorflow as tf\n'), ((5274, 5318), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', ([], {'num_units': 'num_units'}), '(num_units=num_units)\n', (5297, 5318), True, 'import tensorflow as tf\n'), ((5495, 5538), 'tensorflow.contrib.rnn.GRUCell', 'tf.contrib.rnn.GRUCell', ([], {'num_units': 'num_units'}), '(num_units=num_units)\n', (5517, 5538), True, 'import tensorflow as tf\n'), ((5989, 6162), 'tensorflow.contrib.rnn.DropoutWrapper', 'tf.contrib.rnn.DropoutWrapper', ([], {'cell': 'self.cell_bw', 'input_keep_prob': 'self.char_drop_input', 'output_keep_prob': 'self.char_drop_output', 'state_keep_prob': 'self.char_drop_state'}), '(cell=self.cell_bw, input_keep_prob=self.\n char_drop_input, output_keep_prob=self.char_drop_output,\n state_keep_prob=self.char_drop_state)\n', (6018, 6162), True, 'import tensorflow as tf\n'), ((6495, 6668), 'tensorflow.contrib.rnn.DropoutWrapper', 'tf.contrib.rnn.DropoutWrapper', ([], {'cell': 'self.cell_bw', 'input_keep_prob': 'self.word_drop_input', 'output_keep_prob': 'self.word_drop_output', 'state_keep_prob': 'self.word_drop_state'}), '(cell=self.cell_bw, input_keep_prob=self.\n word_drop_input, output_keep_prob=self.word_drop_output,\n state_keep_prob=self.word_drop_state)\n', (6524, 6668), True, 'import tensorflow as tf\n'), ((7445, 7598), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', ([], {'cell_fw': 'self.cell_fw', 'cell_bw': 'self.cell_bw', 'inputs': 'self.char_vectors', 'sequence_length': 'word_lengths', 'dtype': 'tf.float32'}), '(cell_fw=self.cell_fw, cell_bw=self.cell_bw,\n inputs=self.char_vectors, sequence_length=word_lengths, dtype=tf.float32)\n', (7476, 7598), True, 'import tensorflow as tf\n'), ((7835, 7889), 'tensorflow.concat', 'tf.concat', (['[output_state_fw, output_state_bw]'], {'axis': '(-1)'}), '([output_state_fw, output_state_bw], axis=-1)\n', (7844, 7889), True, 'import tensorflow as tf\n'), ((7987, 8101), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'self.cell_fw', 'inputs': 'self.char_vectors', 'sequence_length': 'word_lengths', 'dtype': 'tf.float32'}), '(cell=self.cell_fw, inputs=self.char_vectors,\n sequence_length=word_lengths, dtype=tf.float32)\n', (8004, 8101), True, 'import tensorflow as tf\n'), ((3339, 3439), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '[self.config.n_words, self.config.dim_word]', 'minval': '(-0.25)', 'maxval': '(0.25)'}), '(shape=[self.config.n_words, self.config.dim_word], minval\n =-0.25, maxval=0.25)\n', (3356, 3439), True, 'import tensorflow as tf\n'), ((3628, 3684), 'numpy.asarray', 'np.asarray', (['self.config.wordvec_matrix'], {'dtype': 'np.float32'}), '(self.config.wordvec_matrix, dtype=np.float32)\n', (3638, 3684), True, 'import numpy as np\n'), ((4020, 4120), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '[self.config.n_chars, self.config.dim_char]', 'minval': '(-0.25)', 'maxval': '(0.25)'}), '(shape=[self.config.n_chars, self.config.dim_char], minval\n =-0.25, maxval=0.25)\n', (4037, 4120), True, 'import tensorflow as tf\n'), ((5400, 5444), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', ([], {'num_units': 'num_units'}), '(num_units=num_units)\n', (5423, 5444), True, 'import tensorflow as tf\n'), ((5620, 5663), 'tensorflow.contrib.rnn.GRUCell', 'tf.contrib.rnn.GRUCell', ([], {'num_units': 'num_units'}), '(num_units=num_units)\n', (5642, 5663), True, 'import tensorflow as tf\n'), ((10188, 10237), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[self.h, self.config.n_tags]'], {}), '([self.h, self.config.n_tags])\n', (10207, 10237), True, 'import tensorflow as tf\n'), ((10304, 10345), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[self.config.n_tags]'], {}), '([self.config.n_tags])\n', (10323, 10345), True, 'import tensorflow as tf\n')] |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib.lines import Line2D
def plotMeasurement(L, indices, measurements, s=0.25, filename=None, title=None, url=None):
fig = plt.figure()
axes = fig.add_subplot(111, projection='3d')
x, y, z = [], [], []
dx, dy, dz, c = [], [], [], []
l = 1. - s
cmap = plt.get_cmap('plasma')
column_names = []
for j in range(L):
name = '{0}L'.format(str(j+1))
column_names.append(name)
name = '{0}R'.format(str(j+1))
column_names.append(name)
for index, measurement in zip(indices, measurements):
i, si, j, sj, label = index
idx_i = 2*i
if si == 'r':
idx_i += 1
idx_j = 2*j
if sj == 'r':
idx_j += 1
x += [idx_i]
y += [idx_j]
z += [0]
dx += [l]
dy += [l]
dz += [measurement]
c += [cmap(measurement)]
ticks = np.arange(0, 2*L, 1)
axes.set_zlim(0., 1.)
axes.set_xticks(ticks)
axes.set_xticklabels(column_names)
axes.set_yticks(ticks)
axes.set_yticklabels(column_names)
axes.set_xlabel('source')
axes.set_ylabel('target')
axes.set_zlabel('$P$')
axes.bar3d(x, y, z, dx, dy, dz, color=c, zsort='max', shade=True, edgecolor='white')
if url is not None:
axes.text(-0.75, 2*L+1, 0.0, url, 'y', fontsize=7)
x_txt = axes.text(0, 0, 1.01, '$135^{\circ}$', 'x')
y_txt = axes.text(0, 2*L-0.2, 1.015, '$45^{\circ}$', 'y')
x_title = axes.text(2*L, 2*L, 1.2, title, 'y', fontsize=20)
y_title = axes.text(2*L, 0, 1.25, title, 'x', fontsize=20)
axes.view_init(elev=45, azim=45)
if filename is not None:
x_txt.set_visible(False)
y_txt.set_visible(True)
x_title.set_visible(False)
y_title.set_visible(True)
fig.savefig(filename + '_45.png', transparent=True, bbox_inches='tight', pad_inches=0)
axes.view_init(elev=45, azim=135)
if filename is not None:
x_txt.set_visible(True)
y_txt.set_visible(False)
x_title.set_visible(True)
y_title.set_visible(False)
fig.savefig(filename + '_135.png', transparent=True, bbox_inches='tight', pad_inches=0)
def plotExpectations(times, expects, labels, colors, styles, linewidths, title, filename, axvlines=[]):
fig, ax = plt.subplots(1, 1, constrained_layout=True)
ax.set_title(title)
ax.set_xlabel('t')
ax.grid()
for x in axvlines:
ax.axvline(x=x, color='black', linestyle='--')
for measurement, label, color, style, width in zip(expects, labels, colors, styles, linewidths):
ax.plot(times, measurement, label=label, color=color, linestyle=style, linewidth=width)
ax.legend()
fig.savefig(filename, transparent=True)
# from: https://matplotlib.org/stable/gallery/lines_bars_and_markers/barchart.html#sphx-glr-gallery-lines-bars-and-markers-barchart-py
def plotTeleportationOutcomes(outcomes, corrs, labels, title, url=None, filename=None):
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots(1, 1, constrained_layout=True)
rects1 = ax.bar(x - width/2, outcomes, width, label='not corrected')
rects2 = ax.bar(x + width/2, corrs, width, label='corrected')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('fidelity')
ax.set_title(title)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
ax.bar_label(rects1, padding=3)
ax.bar_label(rects2, padding=3)
if url is not None:
ax.text(0, -0.1, url, fontsize=7)
fig.tight_layout()
if filename is not None:
fig.savefig(filename, transparent=True)
def plotHbdgSpectrum(L, mus, spectrum, title, mark=None, url=None, url_x=None, url_y=None, filename=None):
fig, ax = plt.subplots(1, 1, constrained_layout=True)
ax.set_title(title)
ax.set_xlabel('$\mu$')
ax.set_ylabel('$E$')
ax.set_xlim(0., mus[-1])
for j in range(2*L):
ax.plot(mus, spectrum[:, j], color='black')
if mark is not None:
ax.axvline(x=mark, color='red', linestyle='--')
if url is not None:
pos_y = -0.1
if url_y is not None:
pos_y = url_y
pos_x = 0.0
if url_x is not None:
pos_x = url_x
ax.text(pos_x, pos_y, url, fontsize=9)
fig.savefig(filename, transparent=True)
| [
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.get_cmap"
] | [((272, 284), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (282, 284), True, 'import matplotlib.pyplot as plt\n'), ((420, 442), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""plasma"""'], {}), "('plasma')\n", (432, 442), True, 'import matplotlib.pyplot as plt\n'), ((1026, 1048), 'numpy.arange', 'np.arange', (['(0)', '(2 * L)', '(1)'], {}), '(0, 2 * L, 1)\n', (1035, 1048), True, 'import numpy as np\n'), ((2421, 2464), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'constrained_layout': '(True)'}), '(1, 1, constrained_layout=True)\n', (2433, 2464), True, 'import matplotlib.pyplot as plt\n'), ((3197, 3240), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'constrained_layout': '(True)'}), '(1, 1, constrained_layout=True)\n', (3209, 3240), True, 'import matplotlib.pyplot as plt\n'), ((3941, 3984), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'constrained_layout': '(True)'}), '(1, 1, constrained_layout=True)\n', (3953, 3984), True, 'import matplotlib.pyplot as plt\n')] |
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from numpy.linalg import eig
from PIL import Image
import numpy as np
import argparse
import glob
import time
import os
parser = argparse.ArgumentParser()
parser.add_argument("--kernel-type", type=str,
default="rbf", help="kernel function type")
parser.add_argument("--gamma-s", type=float, default=2.5,
help="hyperparameter gamma_s in the rbf kernel")
parser.add_argument("--gamma-c", type=float, default=2.5,
help="hyperparameter gamma_c in the rbf kernel")
parser.add_argument("--sigma", type=float, default=0.1,
help="Sigma value for Laplace rbf kernel")
parser.add_argument("--cut", type=str, default="normalized",
help="ratio or normalized cut")
parser.add_argument("--K", type=int, default=2, help="number of clusters")
parser.add_argument("--init-mode", type=str, default="k-means++",
help="initialize cluster mode")
parser.add_argument("--iterations", type=str, default=50,
help="Maximum iterations for K-means to run")
args = parser.parse_args()
print("".join(f"{k}={v}\n" for k, v in vars(args).items()))
DATA_PATH = "./data/"
SAVE_PATH = "./results/"
def get_kernel(img, h, w):
img = img.reshape(h * w, 3)
img = img / 255.0
coor = []
for i in range(w):
for j in range(h):
coor.append([i, j])
coor = np.array(coor, dtype=float)
coor = coor / 100.0
if args.kernel_type == "rbf":
pix_dist = cdist(img, img, "sqeuclidean")
spatial_dist = cdist(coor, coor, "sqeuclidean")
# e^-gamma_s*spatial_dist x e^-gamma_c*color_dist
g_s = args.gamma_s
g_c = args.gamma_c
gram_matrix = np.multiply(
np.exp(-g_s * spatial_dist), np.exp(-g_c * pix_dist))
elif args.kernel_type == "Laplace_rbf":
sigma = args.sigma
pix_dist = cdist(img, img, metric="minkowski", p=1)
spatial_dist = cdist(coor, coor, metric="minkowski", p=1)
gram_matrix = np.multiply(
np.exp(-1 / sigma * spatial_dist), np.exp(-1/sigma * pix_dist))
return gram_matrix
def get_img_name(img_path):
img_path = os.path.normpath(img_path)
path_list = img_path.split(os.sep)
img_name = path_list[-1][:-4]
return img_name
def get_graph_Laplacian(W):
cut_type = args.cut
d = np.sum(W, axis=1)
D = np.diag(d) # degree matrix D=[dii]
if cut_type == "ratio":
L = D - W
elif cut_type == "normalized":
L = np.sqrt(D) @ (D - W) @ np.sqrt(D)
return L
def eigen_decomposition(img_name, L):
cut = args.cut
K = args.K
kernel_type = args.kernel_type
eigval_f = DATA_PATH + f"eigval_{img_name}_{cut}_{kernel_type}.npy"
eigvec_f = DATA_PATH + f"eigvec_{img_name}_{cut}_{kernel_type}.npy"
if os.path.exists(eigval_f):
eigval = np.load(eigval_f)
eigvec = np.load(eigvec_f)
else:
eigval, eigvec = eig(L)
np.save(eigval_f, eigval)
np.save(eigvec_f, eigvec)
order = np.argsort(eigval)
sorted_eigvec = eigvec[:, order]
U = sorted_eigvec[:, 1: K + 1]
T = U.copy()
if cut == "normalized":
for i, u in enumerate(U):
T[i, :] = u / np.sqrt(np.sum(u ** 2))
return T
def init_cluster(data, img):
K = args.K
mode = args.init_mode
if mode == "random":
rand_idx = np.random.choice(data.shape[0], size=K)
mean = data[rand_idx]
dist = cdist(mean, data, metric="sqeuclidean")
cluster = np.argmin(dist, axis=0)
elif mode == "k-means++":
# 1. Choose one center uniformly at random among the data points.
# 2. For each data point x not chosen yet, compute D(x), the distance between x and the nearest center that has already been chosen.
# 3. Choose one new data point at random as a new center, using a weighted probability distribution where a point x is chosen with probability proportional to D(x)2.
# 4. Repeat Steps 2 and 3 until k centers have been chosen.
img = img.reshape(-1, 3)
img = img / 255.0
first_mean = np.random.choice(h * w, size=1)
center = np.full(K, first_mean, dtype=int)
center_val = img[center]
for i in range(1, K):
dist = cdist(center_val, img, metric="sqeuclidean")
min_dist = np.min(dist, axis=0)
center[i] = np.random.choice(
h * w, size=1, p=min_dist ** 2 / np.sum(min_dist ** 2))
center_val = img[center]
dist = cdist(center_val, img, metric="sqeuclidean")
cluster = np.argmin(dist, axis=0)
return cluster
def run(data, h, w, img):
iterations = args.iterations
K = args.K
all_alpha = []
alpha = init_cluster(data, img)
all_alpha.append(alpha.reshape(h, w))
for iter in range(iterations):
cnt = np.zeros(K, dtype=float)
for i in range(K):
cnt[i] = np.count_nonzero(alpha == i)
if cnt[i] == 0:
cnt[i] = 1
mean = np.zeros((K, K), dtype=float)
for i in range(K):
mean[i] = np.sum(data[alpha == i, :], axis=0)
mean[i] = mean[i]/cnt[i]
dist = cdist(mean, data, metric="sqeuclidean")
new_alpha = np.argmin(dist, axis=0)
all_alpha.append(new_alpha.reshape(h, w))
if np.array_equal(alpha, new_alpha):
print(f"Converge in {iter+1}th iterations!")
break
alpha = new_alpha
all_alpha = np.array(all_alpha)
return all_alpha
def plot_result(all_alpha, img_name, data):
K = args.K
mode = args.init_mode
kernel_type = args.kernel_type
cut = args.cut
img_name += f"_{cut}_k{K}_{kernel_type}_{mode}"
# export video .gif
save_dir = SAVE_PATH + img_name
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
color = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 0]], dtype=float)
imgs = []
for i in range(len(all_alpha)):
out_img = color[all_alpha[i]]
out_img = out_img.reshape((h, w, 3))
plt.imsave(f"{save_dir}/{img_name}_{i}.png", out_img)
imgs.append(Image.fromarray(np.uint8(out_img * 255)))
video_path = SAVE_PATH + "spectral_video/" + img_name + ".gif"
imgs[0].save(video_path, format="GIF", append_images=imgs[1:],
loop=0, save_all=True, duration=300)
# plot eigenspace
alpha = all_alpha[-1]
alpha = np.array(alpha)
alpha = alpha.reshape(-1)
if K == 2:
plt.figure(figsize=(10, 10))
plt.scatter(data[alpha == 0, 0], data[alpha == 0, 1], c='yellow')
plt.scatter(data[alpha == 1, 0], data[alpha == 1, 1], c='blue')
plt.title(f"Eigendspace {cut} K={K} {kernel_type} {mode}")
eigen_path = SAVE_PATH+"eigenspace/"+img_name+".png"
plt.savefig(eigen_path)
plt.show()
if __name__ == "__main__":
start_time = time.time()
for img_path in glob.glob(DATA_PATH + "*.png"):
img_name = get_img_name(img_path)
img = Image.open(img_path, "r")
img = np.array(img)
h, w, _ = img.shape
W = get_kernel(img, h, w)
L = get_graph_Laplacian(W)
T = eigen_decomposition(img_name, L)
all_alpha = run(T, h, w, img)
plot_result(all_alpha, img_name, T)
print(f"--- {time.time()-start_time} seconds ---")
| [
"numpy.uint8",
"numpy.sqrt",
"numpy.argsort",
"numpy.array",
"numpy.count_nonzero",
"numpy.save",
"os.path.exists",
"argparse.ArgumentParser",
"os.path.normpath",
"numpy.exp",
"os.path.isdir",
"os.mkdir",
"matplotlib.pyplot.scatter",
"numpy.min",
"numpy.argmin",
"glob.glob",
"matplot... | [((205, 230), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (228, 230), False, 'import argparse\n'), ((1472, 1499), 'numpy.array', 'np.array', (['coor'], {'dtype': 'float'}), '(coor, dtype=float)\n', (1480, 1499), True, 'import numpy as np\n'), ((2256, 2282), 'os.path.normpath', 'os.path.normpath', (['img_path'], {}), '(img_path)\n', (2272, 2282), False, 'import os\n'), ((2438, 2455), 'numpy.sum', 'np.sum', (['W'], {'axis': '(1)'}), '(W, axis=1)\n', (2444, 2455), True, 'import numpy as np\n'), ((2464, 2474), 'numpy.diag', 'np.diag', (['d'], {}), '(d)\n', (2471, 2474), True, 'import numpy as np\n'), ((2901, 2925), 'os.path.exists', 'os.path.exists', (['eigval_f'], {}), '(eigval_f)\n', (2915, 2925), False, 'import os\n'), ((3120, 3138), 'numpy.argsort', 'np.argsort', (['eigval'], {}), '(eigval)\n', (3130, 3138), True, 'import numpy as np\n'), ((5596, 5615), 'numpy.array', 'np.array', (['all_alpha'], {}), '(all_alpha)\n', (5604, 5615), True, 'import numpy as np\n'), ((5967, 6034), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 0]]'], {'dtype': 'float'}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 0]], dtype=float)\n', (5975, 6034), True, 'import numpy as np\n'), ((6541, 6556), 'numpy.array', 'np.array', (['alpha'], {}), '(alpha)\n', (6549, 6556), True, 'import numpy as np\n'), ((7010, 7021), 'time.time', 'time.time', ([], {}), '()\n', (7019, 7021), False, 'import time\n'), ((7043, 7073), 'glob.glob', 'glob.glob', (["(DATA_PATH + '*.png')"], {}), "(DATA_PATH + '*.png')\n", (7052, 7073), False, 'import glob\n'), ((1578, 1608), 'scipy.spatial.distance.cdist', 'cdist', (['img', 'img', '"""sqeuclidean"""'], {}), "(img, img, 'sqeuclidean')\n", (1583, 1608), False, 'from scipy.spatial.distance import cdist\n'), ((1632, 1664), 'scipy.spatial.distance.cdist', 'cdist', (['coor', 'coor', '"""sqeuclidean"""'], {}), "(coor, coor, 'sqeuclidean')\n", (1637, 1664), False, 'from scipy.spatial.distance import cdist\n'), ((2944, 2961), 'numpy.load', 'np.load', (['eigval_f'], {}), '(eigval_f)\n', (2951, 2961), True, 'import numpy as np\n'), ((2979, 2996), 'numpy.load', 'np.load', (['eigvec_f'], {}), '(eigvec_f)\n', (2986, 2996), True, 'import numpy as np\n'), ((3032, 3038), 'numpy.linalg.eig', 'eig', (['L'], {}), '(L)\n', (3035, 3038), False, 'from numpy.linalg import eig\n'), ((3047, 3072), 'numpy.save', 'np.save', (['eigval_f', 'eigval'], {}), '(eigval_f, eigval)\n', (3054, 3072), True, 'import numpy as np\n'), ((3081, 3106), 'numpy.save', 'np.save', (['eigvec_f', 'eigvec'], {}), '(eigvec_f, eigvec)\n', (3088, 3106), True, 'import numpy as np\n'), ((3470, 3509), 'numpy.random.choice', 'np.random.choice', (['data.shape[0]'], {'size': 'K'}), '(data.shape[0], size=K)\n', (3486, 3509), True, 'import numpy as np\n'), ((3555, 3594), 'scipy.spatial.distance.cdist', 'cdist', (['mean', 'data'], {'metric': '"""sqeuclidean"""'}), "(mean, data, metric='sqeuclidean')\n", (3560, 3594), False, 'from scipy.spatial.distance import cdist\n'), ((3613, 3636), 'numpy.argmin', 'np.argmin', (['dist'], {'axis': '(0)'}), '(dist, axis=0)\n', (3622, 3636), True, 'import numpy as np\n'), ((4957, 4981), 'numpy.zeros', 'np.zeros', (['K'], {'dtype': 'float'}), '(K, dtype=float)\n', (4965, 4981), True, 'import numpy as np\n'), ((5129, 5158), 'numpy.zeros', 'np.zeros', (['(K, K)'], {'dtype': 'float'}), '((K, K), dtype=float)\n', (5137, 5158), True, 'import numpy as np\n'), ((5297, 5336), 'scipy.spatial.distance.cdist', 'cdist', (['mean', 'data'], {'metric': '"""sqeuclidean"""'}), "(mean, data, metric='sqeuclidean')\n", (5302, 5336), False, 'from scipy.spatial.distance import cdist\n'), ((5357, 5380), 'numpy.argmin', 'np.argmin', (['dist'], {'axis': '(0)'}), '(dist, axis=0)\n', (5366, 5380), True, 'import numpy as np\n'), ((5443, 5475), 'numpy.array_equal', 'np.array_equal', (['alpha', 'new_alpha'], {}), '(alpha, new_alpha)\n', (5457, 5475), True, 'import numpy as np\n'), ((5903, 5926), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (5916, 5926), False, 'import os\n'), ((5936, 5954), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (5944, 5954), False, 'import os\n'), ((6176, 6229), 'matplotlib.pyplot.imsave', 'plt.imsave', (['f"""{save_dir}/{img_name}_{i}.png"""', 'out_img'], {}), "(f'{save_dir}/{img_name}_{i}.png', out_img)\n", (6186, 6229), True, 'import matplotlib.pyplot as plt\n'), ((6610, 6638), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (6620, 6638), True, 'import matplotlib.pyplot as plt\n'), ((6647, 6712), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data[alpha == 0, 0]', 'data[alpha == 0, 1]'], {'c': '"""yellow"""'}), "(data[alpha == 0, 0], data[alpha == 0, 1], c='yellow')\n", (6658, 6712), True, 'import matplotlib.pyplot as plt\n'), ((6721, 6784), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data[alpha == 1, 0]', 'data[alpha == 1, 1]'], {'c': '"""blue"""'}), "(data[alpha == 1, 0], data[alpha == 1, 1], c='blue')\n", (6732, 6784), True, 'import matplotlib.pyplot as plt\n'), ((6793, 6851), 'matplotlib.pyplot.title', 'plt.title', (['f"""Eigendspace {cut} K={K} {kernel_type} {mode}"""'], {}), "(f'Eigendspace {cut} K={K} {kernel_type} {mode}')\n", (6802, 6851), True, 'import matplotlib.pyplot as plt\n'), ((6921, 6944), 'matplotlib.pyplot.savefig', 'plt.savefig', (['eigen_path'], {}), '(eigen_path)\n', (6932, 6944), True, 'import matplotlib.pyplot as plt\n'), ((6953, 6963), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6961, 6963), True, 'import matplotlib.pyplot as plt\n'), ((7131, 7156), 'PIL.Image.open', 'Image.open', (['img_path', '"""r"""'], {}), "(img_path, 'r')\n", (7141, 7156), False, 'from PIL import Image\n'), ((7171, 7184), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (7179, 7184), True, 'import numpy as np\n'), ((1824, 1851), 'numpy.exp', 'np.exp', (['(-g_s * spatial_dist)'], {}), '(-g_s * spatial_dist)\n', (1830, 1851), True, 'import numpy as np\n'), ((1853, 1876), 'numpy.exp', 'np.exp', (['(-g_c * pix_dist)'], {}), '(-g_c * pix_dist)\n', (1859, 1876), True, 'import numpy as np\n'), ((1969, 2009), 'scipy.spatial.distance.cdist', 'cdist', (['img', 'img'], {'metric': '"""minkowski"""', 'p': '(1)'}), "(img, img, metric='minkowski', p=1)\n", (1974, 2009), False, 'from scipy.spatial.distance import cdist\n'), ((2033, 2075), 'scipy.spatial.distance.cdist', 'cdist', (['coor', 'coor'], {'metric': '"""minkowski"""', 'p': '(1)'}), "(coor, coor, metric='minkowski', p=1)\n", (2038, 2075), False, 'from scipy.spatial.distance import cdist\n'), ((4205, 4236), 'numpy.random.choice', 'np.random.choice', (['(h * w)'], {'size': '(1)'}), '(h * w, size=1)\n', (4221, 4236), True, 'import numpy as np\n'), ((4254, 4287), 'numpy.full', 'np.full', (['K', 'first_mean'], {'dtype': 'int'}), '(K, first_mean, dtype=int)\n', (4261, 4287), True, 'import numpy as np\n'), ((4626, 4670), 'scipy.spatial.distance.cdist', 'cdist', (['center_val', 'img'], {'metric': '"""sqeuclidean"""'}), "(center_val, img, metric='sqeuclidean')\n", (4631, 4670), False, 'from scipy.spatial.distance import cdist\n'), ((4689, 4712), 'numpy.argmin', 'np.argmin', (['dist'], {'axis': '(0)'}), '(dist, axis=0)\n', (4698, 4712), True, 'import numpy as np\n'), ((5030, 5058), 'numpy.count_nonzero', 'np.count_nonzero', (['(alpha == i)'], {}), '(alpha == i)\n', (5046, 5058), True, 'import numpy as np\n'), ((5208, 5243), 'numpy.sum', 'np.sum', (['data[alpha == i, :]'], {'axis': '(0)'}), '(data[alpha == i, :], axis=0)\n', (5214, 5243), True, 'import numpy as np\n'), ((2123, 2156), 'numpy.exp', 'np.exp', (['(-1 / sigma * spatial_dist)'], {}), '(-1 / sigma * spatial_dist)\n', (2129, 2156), True, 'import numpy as np\n'), ((2158, 2187), 'numpy.exp', 'np.exp', (['(-1 / sigma * pix_dist)'], {}), '(-1 / sigma * pix_dist)\n', (2164, 2187), True, 'import numpy as np\n'), ((2616, 2626), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (2623, 2626), True, 'import numpy as np\n'), ((4370, 4414), 'scipy.spatial.distance.cdist', 'cdist', (['center_val', 'img'], {'metric': '"""sqeuclidean"""'}), "(center_val, img, metric='sqeuclidean')\n", (4375, 4414), False, 'from scipy.spatial.distance import cdist\n'), ((4438, 4458), 'numpy.min', 'np.min', (['dist'], {'axis': '(0)'}), '(dist, axis=0)\n', (4444, 4458), True, 'import numpy as np\n'), ((6266, 6289), 'numpy.uint8', 'np.uint8', (['(out_img * 255)'], {}), '(out_img * 255)\n', (6274, 6289), True, 'import numpy as np\n'), ((2593, 2603), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (2600, 2603), True, 'import numpy as np\n'), ((3324, 3338), 'numpy.sum', 'np.sum', (['(u ** 2)'], {}), '(u ** 2)\n', (3330, 3338), True, 'import numpy as np\n'), ((7427, 7438), 'time.time', 'time.time', ([], {}), '()\n', (7436, 7438), False, 'import time\n'), ((4550, 4571), 'numpy.sum', 'np.sum', (['(min_dist ** 2)'], {}), '(min_dist ** 2)\n', (4556, 4571), True, 'import numpy as np\n')] |
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared helpers for different experiment flavours."""
from typing import Mapping, Sequence
from acme import specs
from acme.tf import networks
from acme.tf import utils as tf2_utils
import numpy as np
import sonnet as snt
def make_default_networks(
environment_spec: specs.EnvironmentSpec,
*,
policy_layer_sizes: Sequence[int] = (256, 256, 256),
critic_layer_sizes: Sequence[int] = (512, 512, 256),
policy_init_scale: float = 0.7,
critic_init_scale: float = 1e-3,
critic_num_components: int = 5,
) -> Mapping[str, snt.Module]:
"""Creates networks used by the agent."""
# Unpack the environment spec to get appropriate shapes, dtypes, etc.
act_spec = environment_spec.actions
obs_spec = environment_spec.observations
num_dimensions = np.prod(act_spec.shape, dtype=int)
# Create the observation network and make sure it's a Sonnet module.
observation_network = tf2_utils.batch_concat
observation_network = tf2_utils.to_sonnet_module(observation_network)
# Create the policy network.
policy_network = snt.Sequential([
networks.LayerNormMLP(policy_layer_sizes, activate_final=True),
networks.MultivariateNormalDiagHead(
num_dimensions,
init_scale=policy_init_scale,
use_tfd_independent=True)
])
# The multiplexer concatenates the (maybe transformed) observations/actions.
critic_network = snt.Sequential([
networks.CriticMultiplexer(action_network=networks.ClipToSpec(act_spec)),
networks.LayerNormMLP(critic_layer_sizes, activate_final=True),
networks.GaussianMixtureHead(
num_dimensions=1,
num_components=critic_num_components,
init_scale=critic_init_scale)
])
# Create network variables.
# Get embedding spec by creating observation network variables.
emb_spec = tf2_utils.create_variables(observation_network, [obs_spec])
tf2_utils.create_variables(policy_network, [emb_spec])
tf2_utils.create_variables(critic_network, [emb_spec, act_spec])
return {
'policy': policy_network,
'critic': critic_network,
'observation': observation_network,
}
| [
"numpy.prod",
"acme.tf.networks.ClipToSpec",
"acme.tf.utils.create_variables",
"acme.tf.networks.MultivariateNormalDiagHead",
"acme.tf.networks.GaussianMixtureHead",
"acme.tf.utils.to_sonnet_module",
"acme.tf.networks.LayerNormMLP"
] | [((1395, 1429), 'numpy.prod', 'np.prod', (['act_spec.shape'], {'dtype': 'int'}), '(act_spec.shape, dtype=int)\n', (1402, 1429), True, 'import numpy as np\n'), ((1573, 1620), 'acme.tf.utils.to_sonnet_module', 'tf2_utils.to_sonnet_module', (['observation_network'], {}), '(observation_network)\n', (1599, 1620), True, 'from acme.tf import utils as tf2_utils\n'), ((2442, 2501), 'acme.tf.utils.create_variables', 'tf2_utils.create_variables', (['observation_network', '[obs_spec]'], {}), '(observation_network, [obs_spec])\n', (2468, 2501), True, 'from acme.tf import utils as tf2_utils\n'), ((2504, 2558), 'acme.tf.utils.create_variables', 'tf2_utils.create_variables', (['policy_network', '[emb_spec]'], {}), '(policy_network, [emb_spec])\n', (2530, 2558), True, 'from acme.tf import utils as tf2_utils\n'), ((2561, 2625), 'acme.tf.utils.create_variables', 'tf2_utils.create_variables', (['critic_network', '[emb_spec, act_spec]'], {}), '(critic_network, [emb_spec, act_spec])\n', (2587, 2625), True, 'from acme.tf import utils as tf2_utils\n'), ((1695, 1757), 'acme.tf.networks.LayerNormMLP', 'networks.LayerNormMLP', (['policy_layer_sizes'], {'activate_final': '(True)'}), '(policy_layer_sizes, activate_final=True)\n', (1716, 1757), False, 'from acme.tf import networks\n'), ((1765, 1877), 'acme.tf.networks.MultivariateNormalDiagHead', 'networks.MultivariateNormalDiagHead', (['num_dimensions'], {'init_scale': 'policy_init_scale', 'use_tfd_independent': '(True)'}), '(num_dimensions, init_scale=\n policy_init_scale, use_tfd_independent=True)\n', (1800, 1877), False, 'from acme.tf import networks\n'), ((2111, 2173), 'acme.tf.networks.LayerNormMLP', 'networks.LayerNormMLP', (['critic_layer_sizes'], {'activate_final': '(True)'}), '(critic_layer_sizes, activate_final=True)\n', (2132, 2173), False, 'from acme.tf import networks\n'), ((2181, 2300), 'acme.tf.networks.GaussianMixtureHead', 'networks.GaussianMixtureHead', ([], {'num_dimensions': '(1)', 'num_components': 'critic_num_components', 'init_scale': 'critic_init_scale'}), '(num_dimensions=1, num_components=\n critic_num_components, init_scale=critic_init_scale)\n', (2209, 2300), False, 'from acme.tf import networks\n'), ((2073, 2102), 'acme.tf.networks.ClipToSpec', 'networks.ClipToSpec', (['act_spec'], {}), '(act_spec)\n', (2092, 2102), False, 'from acme.tf import networks\n')] |
#Script Goal: it will predict the annotations and send back (letter,confidence,(x,y,w,h)) for a set of images
#run this script in cmd by locating into the darknet folder
import cv2
import numpy as np
import glob2
from pandas import DataFrame
import darknet
import sys
import itertools
from tqdm import tqdm
sys.path.insert(0,'/mekeneocr/myUniverse/darknet') #puth darknet path in first place in sys (this step is very important)
config= "/mekeneocr/tiny/yolov4-tiny-custom.cfg"
data= "/mekeneocr/tiny/letters.data"
weights= "/mekeneocr/tiny/yolov4-tiny-custom_best.weights"
network, class_names, class_colors = darknet.load_network(
config,
data,
weights,
batch_size=1
)
def image_detection(image_path, network, class_names, class_colors, thresh):
# Darknet doesn't accept numpy images.
# Create one with image we reuse for each detect
width = darknet.network_width(network)
height = darknet.network_height(network)
darknet_image = darknet.make_image(width, height, 3)
image = cv2.imread(image_path)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_resized = cv2.resize(image_rgb, (width, height),
interpolation=cv2.INTER_LINEAR)
darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
detections = darknet.detect_image(network, class_names, darknet_image, thresh=thresh)
darknet.free_image(darknet_image)
image = darknet.draw_boxes(detections, image_resized, class_colors)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), detections
liste = [] #create an empty list for the paths of the csv files
liste_df = [] #create an empty list for the dataframes
tiny_images = glob2.glob("/mekeneocr/tiny/tiny_images/*.png") #list all png images
for idx, thing in tqdm(enumerate(tiny_images)): #for every images add to liste its corresponding csv file path
#path_to_csv_thing=thing.split(".")[0] + ".csv"
#liste.append(path_to_csv_thing)
my_image_detected,det = image_detection(thing, network, class_names, class_colors, 0.9) #for every images, find its detection
print(f"the image size is :{str(my_image_detected.shape)}")
mescoordonnees = [] #create an empty list for the coordinates (letter, confidence, x, y, w, h)
mesvecteurs = [] #create an empty list for the vectors
monvecteur = []
print(det)
print(len(det))
if len(det) > 0:
for i in range(len(det)): #for every detection that has a vector
try: #try this out
filename = thing.split("/")[4]
vector_letter = det[i][0]
vector_confidence = det[i][1]
vector_x = int(det[i][2][0])
vector_y = int(det[i][2][1])
vector_w = int(det[i][2][2])
vector_h = int(det[i][2][3])
mescoordonnees.append(filename)
mescoordonnees.append(vector_letter)
mescoordonnees.append(vector_confidence)
mescoordonnees.append(vector_x)
mescoordonnees.append(vector_y)
mescoordonnees.append(vector_w)
mescoordonnees.append(vector_h)
mesvecteurs.append(mescoordonnees)
mescoordonnees = []
liste_df.append(mesvecteurs)
except Exception as e:
print(e)
#otherwise print the file that detection does not have a vector
filename = thing.split("/")[4]
vector_letter = None
vector_confidence = None
vector_x = None
vector_y = None
vector_w = None
vector_h = None
mescoordonnees.append(filename)
mescoordonnees.append(vector_letter)
mescoordonnees.append(vector_confidence)
mescoordonnees.append(vector_x)
mescoordonnees.append(vector_y)
mescoordonnees.append(vector_w)
mescoordonnees.append(vector_h)
monvecteur.append(mescoordonnees)
mescoordonnees = []
print(monvecteur)
#print(thing)
liste_df.append(monvecteur)
else:
filename = thing.split("/")[4]
vector_letter = None
vector_confidence = None
vector_x = None
vector_y = None
vector_w = None
vector_h = None
mescoordonnees.append(filename)
mescoordonnees.append(vector_letter)
mescoordonnees.append(vector_confidence)
mescoordonnees.append(vector_x)
mescoordonnees.append(vector_y)
mescoordonnees.append(vector_w)
mescoordonnees.append(vector_h)
monvecteur.append(mescoordonnees)
mescoordonnees = []
print(monvecteur)
#print(thing)
liste_df.append(monvecteur)
#np_liste_df = np.array(liste_df) #make a numpy array
merged = list(itertools.chain(*liste_df))
merged_array = np.array(merged)
#print(merged_array.shape) #print its dimension
merged_frame = DataFrame(merged_array)
merged_frame.to_csv(r'/mekeneocr/tiny/mycsvmeteor_tresh_0.9.csv', sep =',', index = False, header = True)
| [
"itertools.chain",
"glob2.glob",
"darknet.network_width",
"sys.path.insert",
"darknet.free_image",
"darknet.draw_boxes",
"darknet.load_network",
"numpy.array",
"darknet.detect_image",
"darknet.make_image",
"cv2.cvtColor",
"pandas.DataFrame",
"darknet.network_height",
"cv2.resize",
"cv2.i... | [((311, 362), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/mekeneocr/myUniverse/darknet"""'], {}), "(0, '/mekeneocr/myUniverse/darknet')\n", (326, 362), False, 'import sys\n'), ((620, 677), 'darknet.load_network', 'darknet.load_network', (['config', 'data', 'weights'], {'batch_size': '(1)'}), '(config, data, weights, batch_size=1)\n', (640, 677), False, 'import darknet\n'), ((1719, 1766), 'glob2.glob', 'glob2.glob', (['"""/mekeneocr/tiny/tiny_images/*.png"""'], {}), "('/mekeneocr/tiny/tiny_images/*.png')\n", (1729, 1766), False, 'import glob2\n'), ((5224, 5240), 'numpy.array', 'np.array', (['merged'], {}), '(merged)\n', (5232, 5240), True, 'import numpy as np\n'), ((5306, 5329), 'pandas.DataFrame', 'DataFrame', (['merged_array'], {}), '(merged_array)\n', (5315, 5329), False, 'from pandas import DataFrame\n'), ((901, 931), 'darknet.network_width', 'darknet.network_width', (['network'], {}), '(network)\n', (922, 931), False, 'import darknet\n'), ((945, 976), 'darknet.network_height', 'darknet.network_height', (['network'], {}), '(network)\n', (967, 976), False, 'import darknet\n'), ((997, 1033), 'darknet.make_image', 'darknet.make_image', (['width', 'height', '(3)'], {}), '(width, height, 3)\n', (1015, 1033), False, 'import darknet\n'), ((1047, 1069), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1057, 1069), False, 'import cv2\n'), ((1086, 1124), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1098, 1124), False, 'import cv2\n'), ((1145, 1215), 'cv2.resize', 'cv2.resize', (['image_rgb', '(width, height)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(image_rgb, (width, height), interpolation=cv2.INTER_LINEAR)\n', (1155, 1215), False, 'import cv2\n'), ((1339, 1411), 'darknet.detect_image', 'darknet.detect_image', (['network', 'class_names', 'darknet_image'], {'thresh': 'thresh'}), '(network, class_names, darknet_image, thresh=thresh)\n', (1359, 1411), False, 'import darknet\n'), ((1416, 1449), 'darknet.free_image', 'darknet.free_image', (['darknet_image'], {}), '(darknet_image)\n', (1434, 1449), False, 'import darknet\n'), ((1462, 1521), 'darknet.draw_boxes', 'darknet.draw_boxes', (['detections', 'image_resized', 'class_colors'], {}), '(detections, image_resized, class_colors)\n', (1480, 1521), False, 'import darknet\n'), ((5181, 5207), 'itertools.chain', 'itertools.chain', (['*liste_df'], {}), '(*liste_df)\n', (5196, 5207), False, 'import itertools\n'), ((1533, 1571), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1545, 1571), False, 'import cv2\n')] |
# python3.6
import random
import base64
from paho.mqtt import client as mqtt_client
broker = '172.16.58.3'
port = 1883
topic = "info/12340000000000"
# generate client ID with pub prefix randomly
client_id = f'python-mqtt-{random.randint(0, 100)}'
# username = 'emqx'
# password = '<PASSWORD>'
def ecg(mensagem):
import matplotlib.pyplot
from numpy.core.fromnumeric import size
#Manipulação de uma string para um array de caracteres
arrayCaracteres = []
for palavra in mensagem:
for letra in palavra:
arrayCaracteres.append(letra)
#Retirar o cabeçalho
payloadCaracteres= arrayCaracteres[32:2532]
#Manipular para um array de pares de caracteres
i=0
payloadParesHexa=[]
while i < size(payloadCaracteres)-1:
aux = payloadCaracteres[i] + '' + payloadCaracteres[i+1]
payloadParesHexa.append(aux)
i +=2
#Converter de Hexa para int - Valores de 0 a 255
payloadInt = []
for val in payloadParesHexa:
payloadInt.append(int(val,16))
# Gerando unidade de tempo
tempo = 5/size(payloadInt)
#Gerando array com todas os valores de tempo
aux = tempo
arrayTempo = []
while tempo <= 5:
arrayTempo.append(tempo)
tempo+= aux
#Verificação de dimensões entre dados a serem plotados
if(size(payloadInt)>size(arrayTempo)):
del(payloadInt[size(payloadInt)-1])
#Plotando os dados
matplotlib.pyplot.plot(arrayTempo, payloadInt)
matplotlib.pyplot.xlabel('time in seconds')
matplotlib.pyplot.ylabel('Amplitude (normalised)')
matplotlib.pyplot.title('Heart beat signal Template')
matplotlib.pyplot.show()
def connect_mqtt() -> mqtt_client:
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to MQTT Broker!")
else:
print("Failed to connect, return code %d\n", rc)
client = mqtt_client.Client(client_id)
client.on_connect = on_connect
client.connect(broker, port)
return client
def subscribe(client: mqtt_client):
def on_message(client, userdata, msg):
print(f"Received `{msg.payload}` from `{msg.topic}` topic and time `")
res = ''.join(format(x, '02x') for x in msg.payload)
print(f"Received `{res}` from `{msg.topic}` topic and time `")
ecg(res)
client.subscribe(topic)
client.on_message = on_message
def run():
client = connect_mqtt()
subscribe(client)
client.loop_forever()
if __name__ == '__main__':
run()
| [
"paho.mqtt.client.Client",
"numpy.core.fromnumeric.size",
"random.randint"
] | [((1923, 1952), 'paho.mqtt.client.Client', 'mqtt_client.Client', (['client_id'], {}), '(client_id)\n', (1941, 1952), True, 'from paho.mqtt import client as mqtt_client\n'), ((226, 248), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (240, 248), False, 'import random\n'), ((1084, 1100), 'numpy.core.fromnumeric.size', 'size', (['payloadInt'], {}), '(payloadInt)\n', (1088, 1100), False, 'from numpy.core.fromnumeric import size\n'), ((1337, 1353), 'numpy.core.fromnumeric.size', 'size', (['payloadInt'], {}), '(payloadInt)\n', (1341, 1353), False, 'from numpy.core.fromnumeric import size\n'), ((1354, 1370), 'numpy.core.fromnumeric.size', 'size', (['arrayTempo'], {}), '(arrayTempo)\n', (1358, 1370), False, 'from numpy.core.fromnumeric import size\n'), ((749, 772), 'numpy.core.fromnumeric.size', 'size', (['payloadCaracteres'], {}), '(payloadCaracteres)\n', (753, 772), False, 'from numpy.core.fromnumeric import size\n'), ((1396, 1412), 'numpy.core.fromnumeric.size', 'size', (['payloadInt'], {}), '(payloadInt)\n', (1400, 1412), False, 'from numpy.core.fromnumeric import size\n')] |
from pudding.clustering import kmeans
import pytest
import numpy as np
from sklearn.datasets import make_blobs
import pudding
def testKmeansToyData():
'''
Test KMeans uisng a toy dataset
'''
X = [[0.0, 0.0], [0.5, 0.0], [0.5, 1.0], [1.0, 1.0]]
initial_centers = [[0.0, 0.0], [1.0, 1.0]]
expected_membership = [0, 0, 1, 1]
expected_centers = [[0.25, 0.0], [0.75, 1.0]]
expected_iterations = 2
pudding_kmeans = pudding.clustering.KMeans(n_clusters=len(initial_centers), cuda_enabled=False)
pudding_kmeans.fit(np.array(X), initial_centers=initial_centers)
assert pudding_kmeans.membership.tolist() == expected_membership
for center, expected_center in zip(pudding_kmeans.centers, expected_centers):
assert center == pytest.approx(expected_center)
assert expected_iterations == pudding_kmeans.n_iter
pudding_kmeans = pudding.clustering.KMeans(n_clusters=len(initial_centers), cuda_enabled=True)
pudding_kmeans.fit(np.array(X), initial_centers=initial_centers)
assert pudding_kmeans.membership.tolist() == expected_membership
for center, expected_center in zip(pudding_kmeans.centers, expected_centers):
assert center == pytest.approx(expected_center)
assert expected_iterations == pudding_kmeans.n_iter
def testKmeansCPUGPU():
'''
Test our implementation with some randomly generated data between the CPU and GPU version
'''
# Generate random data
seed = 0
np.random.seed(seed)
n_examples = 3000
truth_centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=n_examples, centers=truth_centers, cluster_std=0.7)
# Our implementation CPU
cpu_kmeans = pudding.clustering.KMeans(n_clusters=3, cuda_enabled=False, rand_seed=seed)
cpu_kmeans.fit(X)
our_cpu_centers, our_cpu_membership, our_cpu_n_iter = cpu_kmeans.centers, cpu_kmeans.membership, cpu_kmeans.n_iter
# Our implementation GPU
gpu_kmeans = pudding.clustering.KMeans(n_clusters=3, cuda_enabled=True, rand_seed=seed)
gpu_kmeans.fit(X)
our_gpu_centers, our_gpu_membership, our_gpu_n_iter = gpu_kmeans.centers, gpu_kmeans.membership, gpu_kmeans.n_iter
# Assertions
assert our_cpu_membership.tolist() == our_gpu_membership.tolist()
for our_cpu_center, our_gpu_center in zip(our_cpu_centers, our_gpu_centers):
assert our_cpu_center == pytest.approx(our_gpu_center)
assert our_cpu_n_iter == our_gpu_n_iter
def testKmeansCPUGPULarge():
'''
Test our implementation with some randomly generated data between the CPU and GPU version using a larger number of data pooints
'''
# Generate random data
seed = 0
np.random.seed(seed)
n_examples = 1000000
truth_centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=n_examples, centers=truth_centers, cluster_std=0.7)
# Our implementation CPU
cpu_kmeans = pudding.clustering.KMeans(n_clusters=3, cuda_enabled=False, rand_seed=seed)
cpu_kmeans.fit(X)
our_cpu_centers = cpu_kmeans.centers
# Our implementation GPU
gpu_kmeans = pudding.clustering.KMeans(n_clusters=3, cuda_enabled=True, rand_seed=seed)
gpu_kmeans.fit(X)
our_gpu_centers = gpu_kmeans.centers
# Assertions
for our_cpu_center, our_gpu_center in zip(our_cpu_centers, our_gpu_centers):
assert our_cpu_center == pytest.approx(our_gpu_center, rel=1e-1)
def testKmeansEmptyCluster():
'''
Test KMeans when there is empty cluster
'''
X = [[0.0, 0.0], [0.5, 0.0], [0.5, 1.0], [1.0, 1.0]]
initial_centers = [[0.0, 0.0], [10.0, 10.0]]
expected_membership = [0, 0, 0, 0]
expected_centers = [[0.5, 0.5], [10.0, 10.0]]
expected_iterations = 2
cpu_kmeans = pudding.clustering.KMeans(n_clusters=len(initial_centers), cuda_enabled=False)
cpu_kmeans.fit(np.array(X), initial_centers=initial_centers)
centers, membership, n_iterations = cpu_kmeans.centers, cpu_kmeans.membership, cpu_kmeans.n_iter
assert membership.tolist() == expected_membership
for center, expected_center in zip(centers, expected_centers):
assert center == pytest.approx(expected_center)
assert expected_iterations == n_iterations
gpu_kmeans = pudding.clustering.KMeans(n_clusters=len(initial_centers), cuda_enabled=True)
gpu_kmeans.fit(np.array(X), initial_centers=initial_centers)
centers, membership, n_iterations = gpu_kmeans.centers, gpu_kmeans.membership, gpu_kmeans.n_iter
assert membership.tolist() == expected_membership
for center, expected_center in zip(centers, expected_centers):
assert center == pytest.approx(expected_center)
assert expected_iterations == n_iterations | [
"pytest.approx",
"sklearn.datasets.make_blobs",
"numpy.array",
"numpy.random.seed",
"pudding.clustering.KMeans"
] | [((1473, 1493), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1487, 1493), True, 'import numpy as np\n'), ((1575, 1647), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': 'n_examples', 'centers': 'truth_centers', 'cluster_std': '(0.7)'}), '(n_samples=n_examples, centers=truth_centers, cluster_std=0.7)\n', (1585, 1647), False, 'from sklearn.datasets import make_blobs\n'), ((1700, 1775), 'pudding.clustering.KMeans', 'pudding.clustering.KMeans', ([], {'n_clusters': '(3)', 'cuda_enabled': '(False)', 'rand_seed': 'seed'}), '(n_clusters=3, cuda_enabled=False, rand_seed=seed)\n', (1725, 1775), False, 'import pudding\n'), ((1964, 2038), 'pudding.clustering.KMeans', 'pudding.clustering.KMeans', ([], {'n_clusters': '(3)', 'cuda_enabled': '(True)', 'rand_seed': 'seed'}), '(n_clusters=3, cuda_enabled=True, rand_seed=seed)\n', (1989, 2038), False, 'import pudding\n'), ((2681, 2701), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2695, 2701), True, 'import numpy as np\n'), ((2786, 2858), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': 'n_examples', 'centers': 'truth_centers', 'cluster_std': '(0.7)'}), '(n_samples=n_examples, centers=truth_centers, cluster_std=0.7)\n', (2796, 2858), False, 'from sklearn.datasets import make_blobs\n'), ((2911, 2986), 'pudding.clustering.KMeans', 'pudding.clustering.KMeans', ([], {'n_clusters': '(3)', 'cuda_enabled': '(False)', 'rand_seed': 'seed'}), '(n_clusters=3, cuda_enabled=False, rand_seed=seed)\n', (2936, 2986), False, 'import pudding\n'), ((3097, 3171), 'pudding.clustering.KMeans', 'pudding.clustering.KMeans', ([], {'n_clusters': '(3)', 'cuda_enabled': '(True)', 'rand_seed': 'seed'}), '(n_clusters=3, cuda_enabled=True, rand_seed=seed)\n', (3122, 3171), False, 'import pudding\n'), ((550, 561), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (558, 561), True, 'import numpy as np\n'), ((983, 994), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (991, 994), True, 'import numpy as np\n'), ((3838, 3849), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3846, 3849), True, 'import numpy as np\n'), ((4325, 4336), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (4333, 4336), True, 'import numpy as np\n'), ((773, 803), 'pytest.approx', 'pytest.approx', (['expected_center'], {}), '(expected_center)\n', (786, 803), False, 'import pytest\n'), ((1206, 1236), 'pytest.approx', 'pytest.approx', (['expected_center'], {}), '(expected_center)\n', (1219, 1236), False, 'import pytest\n'), ((2383, 2412), 'pytest.approx', 'pytest.approx', (['our_gpu_center'], {}), '(our_gpu_center)\n', (2396, 2412), False, 'import pytest\n'), ((3367, 3405), 'pytest.approx', 'pytest.approx', (['our_gpu_center'], {'rel': '(0.1)'}), '(our_gpu_center, rel=0.1)\n', (3380, 3405), False, 'import pytest\n'), ((4132, 4162), 'pytest.approx', 'pytest.approx', (['expected_center'], {}), '(expected_center)\n', (4145, 4162), False, 'import pytest\n'), ((4619, 4649), 'pytest.approx', 'pytest.approx', (['expected_center'], {}), '(expected_center)\n', (4632, 4649), False, 'import pytest\n')] |
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add
import os
import math
from typing import Optional, Dict, Tuple, Any
from game import Game, State
import constants as c
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class NNet:
"""
Neural network consisting of residual convolutional layers splitting into policy and value outputs.
"""
def __init__(self, epochs: int = c.DEFAULT_EPOCHS, learning_rate: float = c.DEFAULT_LEARNING_RATE,
batch_size: int = c.DEFAULT_BATCH_SIZE, model_name: str = c.DEFAULT_MODEL_NAME,
load_data: bool = True):
self.epochs = epochs
self.batch_size = batch_size
self.model_name = model_name
self.model = self._get_model(learning_rate, load_data, model_name)
@classmethod
def _get_model(cls, learning_rate: float, load_data: bool, model_name: str) -> keras.Model:
inputs = Input(shape=(c.ROWS, c.COLUMNS, 6 * 2 + 6))
x = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(inputs)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
for _ in range(5):
x = cls._res_net(inputs=x, filters=256, kernel_size=(3, 3))
policy = Conv2D(filters=256, kernel_size=(3, 3), padding='valid')(x)
policy = BatchNormalization(axis=3)(policy)
policy = Activation('relu')(policy)
policy = Flatten()(policy)
policy = Dense(256, activation='relu')(policy)
policy = Dense((c.ROWS * c.COLUMNS) ** 2, activation='softmax', name='policy')(policy)
value = Conv2D(filters=128, kernel_size=(3, 3), padding='valid')(x)
value = BatchNormalization(axis=3)(value)
value = Activation('relu')(value)
value = Flatten()(value)
value = Dense(1, activation='sigmoid', name='value')(value)
model = keras.Model(inputs=inputs, outputs=[policy, value])
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=learning_rate),
loss={'value': 'mean_squared_error',
'policy': 'categorical_crossentropy'}
)
if load_data:
try:
model.load_weights(f'{parent_dir}\\weights\\{model_name}\\').expect_partial()
except ValueError:
print('No saved weights found')
return model
@staticmethod
def _res_net(inputs: Any, filters: int, kernel_size: tuple) -> Any:
x_shortcut = inputs
x = Conv2D(filters=filters, kernel_size=kernel_size, padding='same')(inputs)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Conv2D(filters=filters, kernel_size=kernel_size, padding='same')(x)
x = BatchNormalization(axis=3)(x)
x = Add()([x, x_shortcut])
x = Activation('relu')(x)
return x
def train(self, examples: list, save_data=False) -> None:
"""
Trains the neural network from a list of training examples.
:param examples: Training data as List[(state,(policy,value))]
:param save_data: Always saves the new weights if True
"""
x_train = np.array([self._to_binary_state(example[0]) for example in examples])
y_policy = np.array([self._to_policy_vector(example[1][0], example[0].player) for example in examples])
y_value = np.array([self._get_value(example[1][1], example[0].player) for example in examples])
self.model.fit(x=x_train, y={'policy': y_policy, 'value': y_value},
epochs=self.epochs, batch_size=self.batch_size, shuffle=True)
if save_data:
self.model.save_weights(f'{parent_dir}\\weights\\{self.model_name}\\')
def prediction(self, state: State) -> Tuple[dict, float]:
"""
Returns a policy and value prediction for a given state. Value is from the perspective of the player making
the next move.
:param state: State to evaluate
:return: (policy, vector). Policy is given as probability vector and value between 0 and 1.
"""
binary_state = self._to_binary_state(state)
prediction = self.model.predict(np.array([binary_state]))
policy = self._get_policy(prediction[0][0], state)
value = prediction[1][0][0]
return policy, value
# policy vectors are from the perspective of the player making the move
@classmethod
def _to_policy_vector(cls, move: tuple, player: str) -> np.array:
policy = np.zeros(c.ROWS ** 2 * c.COLUMNS ** 2)
policy[cls._policy_index(move, player)] = 1
return policy
@staticmethod
def _policy_index(move: tuple, player: str) -> int:
if player == 'black':
# mirror row of move
move = ((c.ROWS - move[0][0] - 1, move[0][1]), (c.ROWS - move[1][0] - 1, move[1][1]))
base = (1, c.ROWS, c.ROWS * c.COLUMNS, c.ROWS * c.COLUMNS * c.ROWS)
return move[0][0] * base[0] + move[0][1] * base[1] + move[1][0] * base[2] + move[1][1] * base[3]
@classmethod
def _get_policy(cls, policy: np.ndarray, state: State) -> Dict:
legal_moves = Game.get_legal_moves(state)
policy_dict = {move: policy[cls._policy_index(move, state.player)] for move in legal_moves}
value_sum = sum(policy_dict.values())
return {move: value / value_sum for move, value in policy_dict.items()}
@staticmethod
def _get_value(evaluation, player):
evaluation *= c.ALPHA_SIGMOID
value = 1 / (1 + math.exp(-evaluation))
if player == 'black':
value = 1 - value
return value
# binary states are from the perspective of the player making the move
@classmethod
def _to_binary_state(cls, state: State) -> np.array:
black = state.player == 'black'
bin_state = np.zeros(shape=(c.ROWS, c.COLUMNS, 6 * 2 + 6))
for row in range(c.ROWS):
for column in range(c.COLUMNS):
index = cls._piece_index(state.board[row][column])
if index:
piece_index = index[1] + 6 * int(state.player == index[0])
bin_state[row, column, piece_index] = 1
for i, right in enumerate(state.castle_rights):
if black:
i = i + 2 % 4
bin_state[row, column, 12 + i] = 1
if black:
bin_state[row, column, 17] = 1
if state.en_passant:
bin_state[state.en_passant[0], state.en_passant[1], 16] = 1
if black:
bin_state = np.flip(bin_state, axis=0)
return bin_state
@staticmethod
def _piece_index(piece: str) -> Optional[tuple]:
if piece == 'empty':
return None
piece = piece.split('_')
typing = {
'pawn': 0,
'knight': 1,
'bishop': 2,
'rook': 3,
'queen': 4,
'king': 5
}
return piece[0], typing.get(piece[1])
if __name__ == '__main__':
nn = NNet()
game = Game('8/1P3k2/8/8/8/8/4K3/8 w - - 0 1')
print(nn.prediction(game.state))
| [
"tensorflow.keras.layers.Input",
"numpy.flip",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.BatchNormalization",
"numpy.array",
"numpy.zeros",
"game.Game.get_legal_moves",
"tensorflow.keras.layers.Dense",
"tensorflow.optimize... | [((7261, 7300), 'game.Game', 'Game', (['"""8/1P3k2/8/8/8/8/4K3/8 w - - 0 1"""'], {}), "('8/1P3k2/8/8/8/8/4K3/8 w - - 0 1')\n", (7265, 7300), False, 'from game import Game, State\n'), ((382, 407), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (397, 407), False, 'import os\n'), ((1099, 1142), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(c.ROWS, c.COLUMNS, 6 * 2 + 6)'}), '(shape=(c.ROWS, c.COLUMNS, 6 * 2 + 6))\n', (1104, 1142), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((2042, 2093), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': '[policy, value]'}), '(inputs=inputs, outputs=[policy, value])\n', (2053, 2093), False, 'from tensorflow import keras\n'), ((4682, 4720), 'numpy.zeros', 'np.zeros', (['(c.ROWS ** 2 * c.COLUMNS ** 2)'], {}), '(c.ROWS ** 2 * c.COLUMNS ** 2)\n', (4690, 4720), True, 'import numpy as np\n'), ((5320, 5347), 'game.Game.get_legal_moves', 'Game.get_legal_moves', (['state'], {}), '(state)\n', (5340, 5347), False, 'from game import Game, State\n'), ((6010, 6056), 'numpy.zeros', 'np.zeros', ([], {'shape': '(c.ROWS, c.COLUMNS, 6 * 2 + 6)'}), '(shape=(c.ROWS, c.COLUMNS, 6 * 2 + 6))\n', (6018, 6056), True, 'import numpy as np\n'), ((1156, 1211), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'padding': '"""same"""'}), "(filters=256, kernel_size=(3, 3), padding='same')\n", (1162, 1211), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((1232, 1258), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (1250, 1258), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((1274, 1292), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1284, 1292), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((1414, 1470), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'padding': '"""valid"""'}), "(filters=256, kernel_size=(3, 3), padding='valid')\n", (1420, 1470), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((1491, 1517), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (1509, 1517), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((1543, 1561), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1553, 1561), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((1587, 1596), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1594, 1596), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((1622, 1651), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1627, 1651), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((1677, 1746), 'tensorflow.keras.layers.Dense', 'Dense', (['((c.ROWS * c.COLUMNS) ** 2)'], {'activation': '"""softmax"""', 'name': '"""policy"""'}), "((c.ROWS * c.COLUMNS) ** 2, activation='softmax', name='policy')\n", (1682, 1746), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((1772, 1828), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'padding': '"""valid"""'}), "(filters=128, kernel_size=(3, 3), padding='valid')\n", (1778, 1828), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((1848, 1874), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (1866, 1874), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((1898, 1916), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1908, 1916), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((1940, 1949), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1947, 1949), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((1973, 2017), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""value"""'}), "(1, activation='sigmoid', name='value')\n", (1978, 2017), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((2671, 2735), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'filters', 'kernel_size': 'kernel_size', 'padding': '"""same"""'}), "(filters=filters, kernel_size=kernel_size, padding='same')\n", (2677, 2735), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((2756, 2782), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (2774, 2782), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((2798, 2816), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2808, 2816), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((2833, 2897), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'filters', 'kernel_size': 'kernel_size', 'padding': '"""same"""'}), "(filters=filters, kernel_size=kernel_size, padding='same')\n", (2839, 2897), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((2913, 2939), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)'}), '(axis=3)\n', (2931, 2939), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((2956, 2961), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (2959, 2961), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((2991, 3009), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3001, 3009), False, 'from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add\n'), ((4350, 4374), 'numpy.array', 'np.array', (['[binary_state]'], {}), '([binary_state])\n', (4358, 4374), True, 'import numpy as np\n'), ((6778, 6804), 'numpy.flip', 'np.flip', (['bin_state'], {'axis': '(0)'}), '(bin_state, axis=0)\n', (6785, 6804), True, 'import numpy as np\n'), ((2140, 2187), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (2158, 2187), True, 'import tensorflow as tf\n'), ((5696, 5717), 'math.exp', 'math.exp', (['(-evaluation)'], {}), '(-evaluation)\n', (5704, 5717), False, 'import math\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import Dataset, DataLoader
def generate_df_affect_by_n_days(series, n, index=False):
if len(series) <= n:
raise Exception("The Length of series is %d, while affect by (n=%d)." % (len(series), n))
df = pd.DataFrame()
for i in range(n):
df['c%d' % i] = series.tolist()[i:-(n - i)]
df['y'] = series.tolist()[n:]
if index:
df.index = series.index[n:]
return df
def readData(column='最高价', n=30, all_too=True, index=False, train_end=-300):
df = pd.read_csv("601600.csv", index_col=0, encoding='gb18030')
# df.index = list(map(lambda x: datetime.datetime.strptime(x, "%Y-%m-%d"), df.index))
# df_column = df[column].copy()
df_column = df[column]
df_column_train, df_column_test = df_column[:train_end], df_column[train_end - n:]
df_generate_from_df_column_train = generate_df_affect_by_n_days(df_column_train, n, index=index)
if all_too:
return df_generate_from_df_column_train, df_column, df.index.tolist()
return df_generate_from_df_column_train
class RNN(nn.Module):
def __init__(self, input_size):
super(RNN, self).__init__()
self.rnn = nn.LSTM(
input_size=input_size,
hidden_size=64,
num_layers=1,
batch_first=True
)
self.out = nn.Sequential(
nn.Linear(64, 1)
)
def forward(self, x):
r_out, (h_n, h_c) = self.rnn(x, None) # None 表示 hidden state 会用全0的 state
out = self.out(r_out)
return out
class TrainSet(Dataset):
def __init__(self, data):
# 定义好 image 的路径
self.data, self.label = data[:, :-1].float(), data[:, -1].float()
def __getitem__(self, index):
return self.data[index], self.label[index]
def __len__(self):
return len(self.data)
n = 30
LR = 0.0001
EPOCH = 10
train_end = -500
# 数据集建立
df, df_all, df_index = readData('最高价', n=n, train_end=train_end)
df_all = np.array(df_all.tolist())
plt.plot(df_index, df_all, label='real-data')
df_numpy = np.array(df)
df_numpy_mean = np.mean(df_numpy)
df_numpy_std = np.std(df_numpy)
df_numpy = (df_numpy - df_numpy_mean) / df_numpy_std
df_tensor = torch.Tensor(df_numpy)
trainset = TrainSet(df_tensor)
trainloader = DataLoader(trainset, batch_size=10, shuffle=True)
# rnn = torch.load('rnn.pkl')
try:
rnn = torch.load('rnn.pkl')
except FileNotFoundError:
rnn = RNN(n)
except EOFError:
rnn = RNN(n)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.MSELoss()
for step in range(EPOCH):
for tx, ty in trainloader:
output = rnn(torch.unsqueeze(tx, dim=0))
loss = loss_func(torch.squeeze(output), ty)
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # back propagation, compute gradients
optimizer.step()
print(step, loss)
if step % 10:
torch.save(rnn, 'rnn.pkl')
torch.save(rnn, 'rnn.pkl')
#
generate_data_train = []
generate_data_test = []
test_index = len(df_all) + train_end
df_all_normal = (df_all - df_numpy_mean) / df_numpy_std
df_all_normal_tensor = torch.Tensor(df_all_normal)
for i in range(n, len(df_all)):
x = df_all_normal_tensor[i - n:i]
x = torch.unsqueeze(torch.unsqueeze(x, dim=0), dim=0)
y = rnn(x)
if i < test_index:
generate_data_train.append(torch.squeeze(y).detach().numpy() * df_numpy_std + df_numpy_mean)
else:
generate_data_test.append(torch.squeeze(y).detach().numpy() * df_numpy_std + df_numpy_mean)
plt.plot(df_index[n:train_end], generate_data_train, label='generate_train')
plt.plot(df_index[train_end:], generate_data_test, label='generate_test')
plt.legend()
plt.show()
plt.cla()
plt.plot(df_index[train_end:-400], df_all[train_end:-400], label='real-data')
plt.plot(df_index[train_end:-400], generate_data_test[:-400], label='generate_test')
plt.legend()
plt.show()
| [
"torch.squeeze",
"numpy.mean",
"pandas.read_csv",
"torch.utils.data.DataLoader",
"torch.nn.LSTM",
"torch.unsqueeze",
"torch.load",
"matplotlib.pyplot.plot",
"torch.Tensor",
"numpy.array",
"torch.nn.MSELoss",
"torch.save",
"numpy.std",
"pandas.DataFrame",
"torch.nn.Linear",
"matplotlib.... | [((2113, 2158), 'matplotlib.pyplot.plot', 'plt.plot', (['df_index', 'df_all'], {'label': '"""real-data"""'}), "(df_index, df_all, label='real-data')\n", (2121, 2158), True, 'import matplotlib.pyplot as plt\n'), ((2171, 2183), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (2179, 2183), True, 'import numpy as np\n'), ((2201, 2218), 'numpy.mean', 'np.mean', (['df_numpy'], {}), '(df_numpy)\n', (2208, 2218), True, 'import numpy as np\n'), ((2234, 2250), 'numpy.std', 'np.std', (['df_numpy'], {}), '(df_numpy)\n', (2240, 2250), True, 'import numpy as np\n'), ((2317, 2339), 'torch.Tensor', 'torch.Tensor', (['df_numpy'], {}), '(df_numpy)\n', (2329, 2339), False, 'import torch\n'), ((2386, 2435), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': '(10)', 'shuffle': '(True)'}), '(trainset, batch_size=10, shuffle=True)\n', (2396, 2435), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2679, 2691), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2689, 2691), True, 'import torch.nn as nn\n'), ((3086, 3112), 'torch.save', 'torch.save', (['rnn', '"""rnn.pkl"""'], {}), "(rnn, 'rnn.pkl')\n", (3096, 3112), False, 'import torch\n'), ((3282, 3309), 'torch.Tensor', 'torch.Tensor', (['df_all_normal'], {}), '(df_all_normal)\n', (3294, 3309), False, 'import torch\n'), ((3687, 3763), 'matplotlib.pyplot.plot', 'plt.plot', (['df_index[n:train_end]', 'generate_data_train'], {'label': '"""generate_train"""'}), "(df_index[n:train_end], generate_data_train, label='generate_train')\n", (3695, 3763), True, 'import matplotlib.pyplot as plt\n'), ((3764, 3837), 'matplotlib.pyplot.plot', 'plt.plot', (['df_index[train_end:]', 'generate_data_test'], {'label': '"""generate_test"""'}), "(df_index[train_end:], generate_data_test, label='generate_test')\n", (3772, 3837), True, 'import matplotlib.pyplot as plt\n'), ((3838, 3850), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3848, 3850), True, 'import matplotlib.pyplot as plt\n'), ((3851, 3861), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3859, 3861), True, 'import matplotlib.pyplot as plt\n'), ((3862, 3871), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3869, 3871), True, 'import matplotlib.pyplot as plt\n'), ((3872, 3949), 'matplotlib.pyplot.plot', 'plt.plot', (['df_index[train_end:-400]', 'df_all[train_end:-400]'], {'label': '"""real-data"""'}), "(df_index[train_end:-400], df_all[train_end:-400], label='real-data')\n", (3880, 3949), True, 'import matplotlib.pyplot as plt\n'), ((3950, 4039), 'matplotlib.pyplot.plot', 'plt.plot', (['df_index[train_end:-400]', 'generate_data_test[:-400]'], {'label': '"""generate_test"""'}), "(df_index[train_end:-400], generate_data_test[:-400], label=\n 'generate_test')\n", (3958, 4039), True, 'import matplotlib.pyplot as plt\n'), ((4035, 4047), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4045, 4047), True, 'import matplotlib.pyplot as plt\n'), ((4048, 4058), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4056, 4058), True, 'import matplotlib.pyplot as plt\n'), ((363, 377), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (375, 377), True, 'import pandas as pd\n'), ((639, 697), 'pandas.read_csv', 'pd.read_csv', (['"""601600.csv"""'], {'index_col': '(0)', 'encoding': '"""gb18030"""'}), "('601600.csv', index_col=0, encoding='gb18030')\n", (650, 697), True, 'import pandas as pd\n'), ((2482, 2503), 'torch.load', 'torch.load', (['"""rnn.pkl"""'], {}), "('rnn.pkl')\n", (2492, 2503), False, 'import torch\n'), ((1292, 1370), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'input_size', 'hidden_size': '(64)', 'num_layers': '(1)', 'batch_first': '(True)'}), '(input_size=input_size, hidden_size=64, num_layers=1, batch_first=True)\n', (1299, 1370), True, 'import torch.nn as nn\n'), ((3059, 3085), 'torch.save', 'torch.save', (['rnn', '"""rnn.pkl"""'], {}), "(rnn, 'rnn.pkl')\n", (3069, 3085), False, 'import torch\n'), ((3404, 3429), 'torch.unsqueeze', 'torch.unsqueeze', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (3419, 3429), False, 'import torch\n'), ((1475, 1491), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (1484, 1491), True, 'import torch.nn as nn\n'), ((2771, 2797), 'torch.unsqueeze', 'torch.unsqueeze', (['tx'], {'dim': '(0)'}), '(tx, dim=0)\n', (2786, 2797), False, 'import torch\n'), ((2824, 2845), 'torch.squeeze', 'torch.squeeze', (['output'], {}), '(output)\n', (2837, 2845), False, 'import torch\n'), ((3511, 3527), 'torch.squeeze', 'torch.squeeze', (['y'], {}), '(y)\n', (3524, 3527), False, 'import torch\n'), ((3621, 3637), 'torch.squeeze', 'torch.squeeze', (['y'], {}), '(y)\n', (3634, 3637), False, 'import torch\n')] |
import numpy
from neural_network import NeuralNetwork
NR_INPUT_NODES = 784
NR_HIDDEN_NODES = 200
NR_OUTPUT_NODES = 10
LEARNING_RATE = 0.1
EPOCH = 5
neural_network = NeuralNetwork(NR_INPUT_NODES, NR_HIDDEN_NODES, NR_OUTPUT_NODES, LEARNING_RATE)
training_data_file = open('dataset/training/mnist_train.csv')
training_data_list = training_data_file.readlines()
training_data_file.close()
# train the network
for training_data in training_data_list:
all_values = training_data.split(',')
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
targets = numpy.zeros(NR_OUTPUT_NODES) + 0.01
targets[int(all_values[0])] + 0.99
neural_network.train(inputs, targets)
test_data_file = open('dataset/test/mnist_test.csv')
test_data_list = test_data_file.readlines()
test_data_file.close()
# query the network
scorecard = []
for e in range(EPOCH):
for test_data in test_data_list:
all_values = test_data.split(',')
correct_label = int(all_values[0])
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
outputs = neural_network.query(inputs)
network_label = numpy.argmax(outputs)
print('correct_label: ', correct_label, ' network_answer: ', network_label)
if (network_label == correct_label):
scorecard.append(1)
else:
scorecard.append(0)
scorecard_array = numpy.asarray(scorecard)
print('network performance = ', scorecard_array.sum() / scorecard_array.size)
| [
"neural_network.NeuralNetwork",
"numpy.asarray",
"numpy.argmax",
"numpy.asfarray",
"numpy.zeros"
] | [((169, 247), 'neural_network.NeuralNetwork', 'NeuralNetwork', (['NR_INPUT_NODES', 'NR_HIDDEN_NODES', 'NR_OUTPUT_NODES', 'LEARNING_RATE'], {}), '(NR_INPUT_NODES, NR_HIDDEN_NODES, NR_OUTPUT_NODES, LEARNING_RATE)\n', (182, 247), False, 'from neural_network import NeuralNetwork\n'), ((1386, 1410), 'numpy.asarray', 'numpy.asarray', (['scorecard'], {}), '(scorecard)\n', (1399, 1410), False, 'import numpy\n'), ((576, 604), 'numpy.zeros', 'numpy.zeros', (['NR_OUTPUT_NODES'], {}), '(NR_OUTPUT_NODES)\n', (587, 604), False, 'import numpy\n'), ((1138, 1159), 'numpy.argmax', 'numpy.argmax', (['outputs'], {}), '(outputs)\n', (1150, 1159), False, 'import numpy\n'), ((508, 538), 'numpy.asfarray', 'numpy.asfarray', (['all_values[1:]'], {}), '(all_values[1:])\n', (522, 538), False, 'import numpy\n'), ((1013, 1043), 'numpy.asfarray', 'numpy.asfarray', (['all_values[1:]'], {}), '(all_values[1:])\n', (1027, 1043), False, 'import numpy\n')] |
# notes for this course can be found at:
# https://deeplearningcourses.com/c/data-science-linear-regression-in-python
# https://www.udemy.com/data-science-linear-regression-in-python
import numpy as np
import matplotlib.pyplot as plt
def make_poly(X, deg):
n = len(X)
data = [np.ones(n)]
for d in xrange(deg):
data.append(X**(d+1))
return np.vstack(data).T
def fit(X, Y):
return np.linalg.solve(X.T.dot(X), X.T.dot(Y))
def fit_and_display(X, Y, sample, deg):
N = len(X)
train_idx = np.random.choice(N, sample)
Xtrain = X[train_idx]
Ytrain = Y[train_idx]
plt.scatter(Xtrain, Ytrain)
plt.show()
# fit polynomial
Xtrain_poly = make_poly(Xtrain, deg)
w = fit(Xtrain_poly, Ytrain)
# display the polynomial
X_poly = make_poly(X, deg)
Y_hat = X_poly.dot(w)
plt.plot(X, Y)
plt.plot(X, Y_hat)
plt.scatter(Xtrain, Ytrain)
plt.title("deg = %d" % deg)
plt.show()
def get_mse(Y, Yhat):
d = Y - Yhat
return d.dot(d) / len(d)
def plot_train_vs_test_curves(X, Y, sample=20, max_deg=20):
N = len(X)
train_idx = np.random.choice(N, sample)
Xtrain = X[train_idx]
Ytrain = Y[train_idx]
test_idx = [idx for idx in xrange(N) if idx not in train_idx]
# test_idx = np.random.choice(N, sample)
Xtest = X[test_idx]
Ytest = Y[test_idx]
mse_trains = []
mse_tests = []
for deg in xrange(max_deg+1):
Xtrain_poly = make_poly(Xtrain, deg)
w = fit(Xtrain_poly, Ytrain)
Yhat_train = Xtrain_poly.dot(w)
mse_train = get_mse(Ytrain, Yhat_train)
Xtest_poly = make_poly(Xtest, deg)
Yhat_test = Xtest_poly.dot(w)
mse_test = get_mse(Ytest, Yhat_test)
mse_trains.append(mse_train)
mse_tests.append(mse_test)
plt.plot(mse_trains, label="train mse")
plt.plot(mse_tests, label="test mse")
plt.legend()
plt.show()
plt.plot(mse_trains, label="train mse")
plt.legend()
plt.show()
if __name__ == "__main__":
# make up some data and plot it
N = 100
X = np.linspace(0, 6*np.pi, N)
Y = np.sin(X)
plt.plot(X, Y)
plt.show()
for deg in (5, 6, 7, 8, 9):
fit_and_display(X, Y, 10, deg)
plot_train_vs_test_curves(X, Y)
| [
"numpy.ones",
"numpy.random.choice",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.vstack",
"matplotlib.pyplot.scatter",
"numpy.sin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((526, 553), 'numpy.random.choice', 'np.random.choice', (['N', 'sample'], {}), '(N, sample)\n', (542, 553), True, 'import numpy as np\n'), ((611, 638), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Xtrain', 'Ytrain'], {}), '(Xtrain, Ytrain)\n', (622, 638), True, 'import matplotlib.pyplot as plt\n'), ((643, 653), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (651, 653), True, 'import matplotlib.pyplot as plt\n'), ((841, 855), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {}), '(X, Y)\n', (849, 855), True, 'import matplotlib.pyplot as plt\n'), ((860, 878), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y_hat'], {}), '(X, Y_hat)\n', (868, 878), True, 'import matplotlib.pyplot as plt\n'), ((883, 910), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Xtrain', 'Ytrain'], {}), '(Xtrain, Ytrain)\n', (894, 910), True, 'import matplotlib.pyplot as plt\n'), ((915, 942), 'matplotlib.pyplot.title', 'plt.title', (["('deg = %d' % deg)"], {}), "('deg = %d' % deg)\n", (924, 942), True, 'import matplotlib.pyplot as plt\n'), ((947, 957), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (955, 957), True, 'import matplotlib.pyplot as plt\n'), ((1121, 1148), 'numpy.random.choice', 'np.random.choice', (['N', 'sample'], {}), '(N, sample)\n', (1137, 1148), True, 'import numpy as np\n'), ((1810, 1849), 'matplotlib.pyplot.plot', 'plt.plot', (['mse_trains'], {'label': '"""train mse"""'}), "(mse_trains, label='train mse')\n", (1818, 1849), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1891), 'matplotlib.pyplot.plot', 'plt.plot', (['mse_tests'], {'label': '"""test mse"""'}), "(mse_tests, label='test mse')\n", (1862, 1891), True, 'import matplotlib.pyplot as plt\n'), ((1896, 1908), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1906, 1908), True, 'import matplotlib.pyplot as plt\n'), ((1913, 1923), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1921, 1923), True, 'import matplotlib.pyplot as plt\n'), ((1929, 1968), 'matplotlib.pyplot.plot', 'plt.plot', (['mse_trains'], {'label': '"""train mse"""'}), "(mse_trains, label='train mse')\n", (1937, 1968), True, 'import matplotlib.pyplot as plt\n'), ((1973, 1985), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1983, 1985), True, 'import matplotlib.pyplot as plt\n'), ((1990, 2000), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1998, 2000), True, 'import matplotlib.pyplot as plt\n'), ((2085, 2113), 'numpy.linspace', 'np.linspace', (['(0)', '(6 * np.pi)', 'N'], {}), '(0, 6 * np.pi, N)\n', (2096, 2113), True, 'import numpy as np\n'), ((2120, 2129), 'numpy.sin', 'np.sin', (['X'], {}), '(X)\n', (2126, 2129), True, 'import numpy as np\n'), ((2135, 2149), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {}), '(X, Y)\n', (2143, 2149), True, 'import matplotlib.pyplot as plt\n'), ((2154, 2164), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2162, 2164), True, 'import matplotlib.pyplot as plt\n'), ((288, 298), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (295, 298), True, 'import numpy as np\n'), ((367, 382), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (376, 382), True, 'import numpy as np\n')] |
"""
预测3
BY 李说啥都对
2018.3
"""
import os
import numpy as np
import tensorflow as tf
from PIL import Image
from cfg_3 import MAX_CAPTCHA, CHAR_SET_LEN, model_path_3_1, model_path_3_2, model_path_3_3
from cnn_sys_3 import crack_captcha_cnn, X, keep_prob
from utils_3 import vec2text, get_clear_bin_image
from PyQt5.QtWidgets import QApplication
def hack_function(sess, predict, captcha_image):
text_list = sess.run(predict, feed_dict={X: [captcha_image], keep_prob: 1.})
text = text_list[0].tolist()
vector = np.zeros(MAX_CAPTCHA * CHAR_SET_LEN)
i = 0
for n in text:
vector[i * CHAR_SET_LEN + n] = 1
i += 1
return vec2text(vector)
def batch_hack_captcha_3(inroad, outroad):
try:
output = crack_captcha_cnn()
predict = tf.argmax(tf.reshape(output, [-1, MAX_CAPTCHA, CHAR_SET_LEN]), 2)
fw = open(outroad, 'w')
saver = tf.train.Saver()
with tf.Session() as sess:
predict_list = []
# first model predict
saver.restore(sess, tf.train.latest_checkpoint(model_path_3_1))
dirs = os.listdir(inroad)
predict_list1 = []
for i in dirs:
QApplication.processEvents()
image = Image.open(inroad + "/" + i)
image = get_clear_bin_image(image)
image = np.array(image)
image = 1 * image.flatten()
predict_text = hack_function(sess, predict, image)
i = i.split(".")[0]
print("{},{}".format(i, predict_text))
predict_list1.append(predict_text)
# fw.write("{},{}\n".format(i, predict_text))
fw.flush()
# second model predict
saver.restore(sess, tf.train.latest_checkpoint(model_path_3_2))
predict_list2 = []
for i in dirs:
QApplication.processEvents()
image = Image.open(inroad + "/" + i)
image = get_clear_bin_image(image)
image = np.array(image)
image = 1 * image.flatten()
predict_text = hack_function(sess, predict, image)
i = i.split(".")[0]
print("{},{}".format(i, predict_text))
predict_list2.append(predict_text)
# fw.write("{},{}\n".format(i, predict_text))
fw.flush()
#third model predict
saver.restore(sess, tf.train.latest_checkpoint(model_path_3_3))
predict_list3 = []
for i in dirs:
QApplication.processEvents()
image = Image.open(inroad + "/" + i)
image = get_clear_bin_image(image)
image = np.array(image)
image = 1 * image.flatten()
predict_text = hack_function(sess, predict, image)
i = i.split(".")[0]
print("{},{}".format(i, predict_text))
predict_list3.append(predict_text)
# fw.write("{},{}\n".format(i, predict_text))
fw.flush()
for i in range(len(predict_list1)):
if predict_list1[i] == predict_list2[i] and predict_list1[i] == predict_list3[i]:
predict_list.append(predict_list1[i])
elif predict_list1[i] == predict_list2[i]:
predict_list.append(predict_list1[i])
elif predict_list1[i] == predict_list3[i]:
predict_list.append(predict_list1[i])
elif predict_list2[i] == predict_list3[i]:
predict_list.append(predict_list2[i])
else:
predict_list.append(predict_list1[i])
for dir, i in zip(dirs, range(len(predict_list))):
dir = dir.split(".")[0]
print(dir, predict_list[i])
fw.write("{},{}\n".format(dir, predict_list[i]))
fw.flush()
except:
print("ERROR!")
return -1
if __name__ == '__main__':
# inroad = r'C:\Users\Servon\Desktop\fff/'
inroad = "E:/Users/Dell/PycharmProjects/CNN-third/all/"
outroad = r'C:\Users\Servon\Desktop\mappings3.txt'
batch_hack_captcha_3(inroad, outroad)
| [
"os.listdir",
"PIL.Image.open",
"tensorflow.Session",
"tensorflow.train.Saver",
"utils_3.vec2text",
"numpy.array",
"numpy.zeros",
"PyQt5.QtWidgets.QApplication.processEvents",
"utils_3.get_clear_bin_image",
"tensorflow.reshape",
"tensorflow.train.latest_checkpoint",
"cnn_sys_3.crack_captcha_cn... | [((536, 572), 'numpy.zeros', 'np.zeros', (['(MAX_CAPTCHA * CHAR_SET_LEN)'], {}), '(MAX_CAPTCHA * CHAR_SET_LEN)\n', (544, 572), True, 'import numpy as np\n'), ((674, 690), 'utils_3.vec2text', 'vec2text', (['vector'], {}), '(vector)\n', (682, 690), False, 'from utils_3 import vec2text, get_clear_bin_image\n'), ((767, 786), 'cnn_sys_3.crack_captcha_cnn', 'crack_captcha_cnn', ([], {}), '()\n', (784, 786), False, 'from cnn_sys_3 import crack_captcha_cnn, X, keep_prob\n'), ((922, 938), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (936, 938), True, 'import tensorflow as tf\n'), ((816, 867), 'tensorflow.reshape', 'tf.reshape', (['output', '[-1, MAX_CAPTCHA, CHAR_SET_LEN]'], {}), '(output, [-1, MAX_CAPTCHA, CHAR_SET_LEN])\n', (826, 867), True, 'import tensorflow as tf\n'), ((953, 965), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (963, 965), True, 'import tensorflow as tf\n'), ((1142, 1160), 'os.listdir', 'os.listdir', (['inroad'], {}), '(inroad)\n', (1152, 1160), False, 'import os\n'), ((1078, 1120), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_path_3_1'], {}), '(model_path_3_1)\n', (1104, 1120), True, 'import tensorflow as tf\n'), ((1240, 1268), 'PyQt5.QtWidgets.QApplication.processEvents', 'QApplication.processEvents', ([], {}), '()\n', (1266, 1268), False, 'from PyQt5.QtWidgets import QApplication\n'), ((1294, 1322), 'PIL.Image.open', 'Image.open', (["(inroad + '/' + i)"], {}), "(inroad + '/' + i)\n", (1304, 1322), False, 'from PIL import Image\n'), ((1348, 1374), 'utils_3.get_clear_bin_image', 'get_clear_bin_image', (['image'], {}), '(image)\n', (1367, 1374), False, 'from utils_3 import vec2text, get_clear_bin_image\n'), ((1400, 1415), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1408, 1415), True, 'import numpy as np\n'), ((1838, 1880), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_path_3_2'], {}), '(model_path_3_2)\n', (1864, 1880), True, 'import tensorflow as tf\n'), ((1961, 1989), 'PyQt5.QtWidgets.QApplication.processEvents', 'QApplication.processEvents', ([], {}), '()\n', (1987, 1989), False, 'from PyQt5.QtWidgets import QApplication\n'), ((2015, 2043), 'PIL.Image.open', 'Image.open', (["(inroad + '/' + i)"], {}), "(inroad + '/' + i)\n", (2025, 2043), False, 'from PIL import Image\n'), ((2069, 2095), 'utils_3.get_clear_bin_image', 'get_clear_bin_image', (['image'], {}), '(image)\n', (2088, 2095), False, 'from utils_3 import vec2text, get_clear_bin_image\n'), ((2121, 2136), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2129, 2136), True, 'import numpy as np\n'), ((2557, 2599), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_path_3_3'], {}), '(model_path_3_3)\n', (2583, 2599), True, 'import tensorflow as tf\n'), ((2680, 2708), 'PyQt5.QtWidgets.QApplication.processEvents', 'QApplication.processEvents', ([], {}), '()\n', (2706, 2708), False, 'from PyQt5.QtWidgets import QApplication\n'), ((2734, 2762), 'PIL.Image.open', 'Image.open', (["(inroad + '/' + i)"], {}), "(inroad + '/' + i)\n", (2744, 2762), False, 'from PIL import Image\n'), ((2788, 2814), 'utils_3.get_clear_bin_image', 'get_clear_bin_image', (['image'], {}), '(image)\n', (2807, 2814), False, 'from utils_3 import vec2text, get_clear_bin_image\n'), ((2840, 2855), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2848, 2855), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# anim_sequence_EI_networks_spont_stim.py
#
# Copyright 2019 <NAME>
# The MIT License
import numpy as np
import pylab as pl
import lib.protocol as protocol
import lib.animation_image as ai
import datetime
landscapes = [
{'mode': 'symmetric'},
{'mode': 'homogeneous', 'specs': {'phi': 3}},
{'mode': 'random'},
{'mode': 'Perlin', 'specs': {'size': 4}},
{'mode': 'Perlin_uniform', 'specs': {'size': 4}},
]
simulation = 'sequence_EI_networks'
params = protocol.get_parameters(simulation).as_dict()
params.update({'landscape': landscapes[-1]})
gids, ts = protocol.get_or_simulate(simulation, params)
nrow = ncol = params['nrowE']
npop = nrow * ncol
offset = 1
idx = gids - offset < npop
gids, ts = gids[idx], ts[idx]
ts_bins = np.arange(500., 2500., 10.)
h = np.histogram2d(ts, gids - offset, bins=[ts_bins, range(npop + 1)])[0]
hh = h.reshape(-1, nrow, ncol)
simulation = 'sequence_EI_networks_stim'
params = protocol.get_parameters(simulation).as_dict()
params.update({'landscale': landscapes[-1]})
gids, ts = protocol.get_or_simulate(simulation, params)
nrow = ncol = params['nrowE']
npop = nrow * ncol
offset = 1
idx = gids - offset < npop
gids, ts = gids[idx], ts[idx]
ts_bins = np.arange(500., 2500., 10.)
h = np.histogram2d(ts, gids - offset, bins=[ts_bins, range(npop + 1)])[0]
hh_stim = h.reshape(-1, nrow, ncol)
fig, ax = pl.subplots(1,2)
a = ai.multiple_animate_image(fig, ax, [hh,hh_stim], vmin=0, vmax=np.max([hh,hh_stim]), cmap='binary')
ax[0].set_title('Spontaneous')
ax[1].set_title('Evoked')
date = datetime.datetime.now()
a.save('sequence_EI_networks_spont_stim-%s.mp4' %(date), fps=12, extra_args=['-vcodec', 'libx264'])
pl.show()
| [
"numpy.max",
"lib.protocol.get_or_simulate",
"datetime.datetime.now",
"lib.protocol.get_parameters",
"pylab.subplots",
"numpy.arange",
"pylab.show"
] | [((601, 645), 'lib.protocol.get_or_simulate', 'protocol.get_or_simulate', (['simulation', 'params'], {}), '(simulation, params)\n', (625, 645), True, 'import lib.protocol as protocol\n'), ((776, 806), 'numpy.arange', 'np.arange', (['(500.0)', '(2500.0)', '(10.0)'], {}), '(500.0, 2500.0, 10.0)\n', (785, 806), True, 'import numpy as np\n'), ((1063, 1107), 'lib.protocol.get_or_simulate', 'protocol.get_or_simulate', (['simulation', 'params'], {}), '(simulation, params)\n', (1087, 1107), True, 'import lib.protocol as protocol\n'), ((1238, 1268), 'numpy.arange', 'np.arange', (['(500.0)', '(2500.0)', '(10.0)'], {}), '(500.0, 2500.0, 10.0)\n', (1247, 1268), True, 'import numpy as np\n'), ((1387, 1404), 'pylab.subplots', 'pl.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (1398, 1404), True, 'import pylab as pl\n'), ((1571, 1594), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1592, 1594), False, 'import datetime\n'), ((1695, 1704), 'pylab.show', 'pl.show', ([], {}), '()\n', (1702, 1704), True, 'import pylab as pl\n'), ((498, 533), 'lib.protocol.get_parameters', 'protocol.get_parameters', (['simulation'], {}), '(simulation)\n', (521, 533), True, 'import lib.protocol as protocol\n'), ((960, 995), 'lib.protocol.get_parameters', 'protocol.get_parameters', (['simulation'], {}), '(simulation)\n', (983, 995), True, 'import lib.protocol as protocol\n'), ((1470, 1491), 'numpy.max', 'np.max', (['[hh, hh_stim]'], {}), '([hh, hh_stim])\n', (1476, 1491), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import glob
# Load previously saved data
with np.load('1. Camera Calibration\camera.py') as X:
mtx, dist, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')]
| [
"numpy.load"
] | [((77, 120), 'numpy.load', 'np.load', (['"""1. Camera Calibration\\\\camera.py"""'], {}), "('1. Camera Calibration\\\\camera.py')\n", (84, 120), True, 'import numpy as np\n')] |
"""
Data loader for TUM RGBD benchmark
@author: <NAME>
@date: March 2019
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys, os, random
import pickle
import numpy as np
import os.path as osp
import torch.utils.data as data
from scipy.misc import imread
from tqdm import tqdm
from transforms3d import quaternions
from cv2 import resize, INTER_NEAREST
"""
The following scripts use the directory structure as:
root
---- fr1
-------- rgbd_dataset_freiburg1_360
-------- rgbd_dataset_freiburg1_desk
-------- rgbd_dataset_freiburg1_desk2
-------- ...
---- fr2
-------- rgbd_dataset_freiburg2_360_hemisphere
-------- rgbd_dataset_freiburg2_360_kidnap
-------- rgbd_dataset_freiburg2_coke
-------- ....
---- fr3
-------- rgbd_dataset_freiburg3_cabinet
-------- rgbd_dataset_freiburg3_long_office_household
-------- rgbd_dataset_freiburg3_nostructure_notexture_far
-------- ....
"""
def tum_trainval_dict():
""" the sequence dictionary of TUM dataset
https://vision.in.tum.de/data/datasets/rgbd-dataset/download
The calibration parameters refers to:
https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats
"""
return {
'fr1': {
'calib': [525.0, 525.0, 319.5, 239.5],
'seq': ['rgbd_dataset_freiburg1_desk2',
'rgbd_dataset_freiburg1_floor',
'rgbd_dataset_freiburg1_room',
'rgbd_dataset_freiburg1_xyz',
'rgbd_dataset_freiburg1_rpy',
'rgbd_dataset_freiburg1_plant',
'rgbd_dataset_freiburg1_teddy']
},
'fr2': {
'calib': [525.0, 525.0, 319.5, 239.5],
'seq': ['rgbd_dataset_freiburg2_360_hemisphere',
'rgbd_dataset_freiburg2_large_no_loop',
'rgbd_dataset_freiburg2_large_with_loop',
'rgbd_dataset_freiburg2_pioneer_slam',
'rgbd_dataset_freiburg2_pioneer_slam2',
'rgbd_dataset_freiburg2_pioneer_slam3',
'rgbd_dataset_freiburg2_xyz',
'rgbd_dataset_freiburg2_360_kidnap',
'rgbd_dataset_freiburg2_rpy',
'rgbd_dataset_freiburg2_coke',
'rgbd_dataset_freiburg2_desk_with_person',
'rgbd_dataset_freiburg2_dishes',
'rgbd_dataset_freiburg2_flowerbouquet_brownbackground',
'rgbd_dataset_freiburg2_metallic_sphere2',
'rgbd_dataset_freiburg2_flowerbouquet'
]
},
'fr3': {
'calib': [525.0, 525.0, 319.5, 239.5],
'seq': [
'rgbd_dataset_freiburg3_walking_halfsphere',
'rgbd_dataset_freiburg3_walking_rpy',
'rgbd_dataset_freiburg3_cabinet',
'rgbd_dataset_freiburg3_nostructure_notexture_far',
'rgbd_dataset_freiburg3_nostructure_notexture_near_withloop',
'rgbd_dataset_freiburg3_nostructure_texture_far',
'rgbd_dataset_freiburg3_nostructure_texture_near_withloop',
'rgbd_dataset_freiburg3_sitting_rpy',
'rgbd_dataset_freiburg3_sitting_static',
'rgbd_dataset_freiburg3_sitting_xyz',
'rgbd_dataset_freiburg3_structure_notexture_near',
'rgbd_dataset_freiburg3_structure_texture_far',
'rgbd_dataset_freiburg3_structure_texture_near',
'rgbd_dataset_freiburg3_teddy']
}
}
def tum_test_dict():
""" the trajectorys held out for testing TUM dataset
"""
return {
'fr1': {
'calib': [525.0, 525.0, 319.5, 239.5],
'seq': ['rgbd_dataset_freiburg1_360',
'rgbd_dataset_freiburg1_desk']
},
'fr2': {
'calib': [525.0, 525.0, 319.5, 239.5],
'seq': ['rgbd_dataset_freiburg2_desk',
'rgbd_dataset_freiburg2_pioneer_360']
},
'fr3': {
'calib': [525.0, 525.0, 319.5, 239.5],
'seq': ['rgbd_dataset_freiburg3_walking_static', # dynamic scene
'rgbd_dataset_freiburg3_walking_xyz', # dynamic scene
'rgbd_dataset_freiburg3_long_office_household']
}
}
class TUM(data.Dataset):
base = 'data'
def __init__(self, root = '', category='train',
keyframes=[1], data_transform=None, select_traj=None):
"""
:param the root directory of the data
:param select the category (train, validation,test)
:param select the number of keyframes
Test data only support one keyfame at one time
Train/validation data support mixing different keyframes
:param select one particular trajectory at runtime
Only support for testing
"""
super(TUM, self).__init__()
self.image_seq = []
self.depth_seq = []
self.invalid_seq = []
self.cam_pose_seq= []
self.calib = []
self.seq_names = []
self.ids = 0
self.seq_acc_ids = [0]
self.keyframes = keyframes
self.transforms = data_transform
if category == 'test':
self.__load_test(root+'/data_tum', select_traj)
else: # train and validation
self.__load_train_val(root+'/data_tum', category)
# downscale the input image to a quarter
self.fx_s = 0.25
self.fy_s = 0.25
print('TUM dataloader for {:} using keyframe {:}: \
{:} valid frames'.format(category, keyframes, self.ids))
def __load_train_val(self, root, category):
tum_data = tum_trainval_dict()
for ks, scene in tum_data.items():
for seq_name in scene['seq']:
seq_path = osp.join(ks, seq_name)
self.calib.append(scene['calib'])
# synchronized trajectory file
sync_traj_file = osp.join(root, seq_path, 'sync_trajectory.pkl')
if not osp.isfile(sync_traj_file):
print("The synchronized trajectory file {:} has not been generated.".format(seq_path))
print("Generate it now...")
write_sync_trajectory(root, ks, seq_name)
with open(sync_traj_file, 'rb') as p:
trainval = pickle.load(p)
total_num = len(trainval)
# the ratio to split the train & validation set
if category == 'train':
start_idx, end_idx = 0, int(0.95*total_num)
else:
start_idx, end_idx = int(0.95*total_num), total_num
images = [trainval[idx][1] for idx in range(start_idx, end_idx)]
depths = [trainval[idx][2] for idx in range(start_idx, end_idx)]
extrin = [tq2mat(trainval[idx][0]) for idx in range(start_idx, end_idx)]
self.image_seq.append(images)
self.depth_seq.append(depths)
self.cam_pose_seq.append(extrin)
self.seq_names.append(seq_path)
self.ids += max(0, len(images) - max(self.keyframes))
self.seq_acc_ids.append(self.ids)
def __load_test(self, root, select_traj=None):
""" Note:
The test trajectory is loaded slightly different from the train/validation trajectory.
We only select keyframes from the entire trajectory, rather than use every individual frame.
For a given trajectory of length N, using key-frame 2, the train/validation set will use
[[1, 3], [2, 4], [3, 5],...[N-1, N]],
while test set will use pair
[[1, 3], [3, 5], [5, 7],...[N-1, N]]
This difference result in a change in the trajectory length when using different keyframes.
The benefit of sampling keyframes of the test set is that the output is a more reasonable trajectory;
And in training/validation, we fully leverage every pair of image.
"""
tum_data = tum_test_dict()
assert(len(self.keyframes) == 1)
kf = self.keyframes[0]
self.keyframes = [1]
for ks, scene in tum_data.items():
for seq_name in scene['seq']:
seq_path = osp.join(ks, seq_name)
if select_traj is not None:
if seq_path != select_traj: continue
self.calib.append(scene['calib'])
# synchronized trajectory file
sync_traj_file = osp.join(root, seq_path, 'sync_trajectory.pkl')
if not osp.isfile(sync_traj_file):
print("The synchronized trajectory file {:} has not been generated.".format(seq_path))
print("Generate it now...")
write_sync_trajectory(root, ks, seq_name)
with open(sync_traj_file, 'rb') as p:
frames = pickle.load(p)
total_num = len(frames)
images = [frames[idx][1] for idx in range(0, total_num, kf)]
depths = [frames[idx][2] for idx in range(0, total_num, kf)]
extrin = [tq2mat(frames[idx][0]) for idx in range(0, total_num, kf)]
self.image_seq.append(images)
self.depth_seq.append(depths)
self.cam_pose_seq.append(extrin)
self.seq_names.append(seq_path)
self.ids += max(0, len(images)-1)
self.seq_acc_ids.append(self.ids)
if len(self.image_seq) == 0:
raise Exception("The specified trajectory is not in the test set.")
def __getitem__(self, index):
seq_idx = max(np.searchsorted(self.seq_acc_ids, index+1) - 1, 0)
frame_idx = index - self.seq_acc_ids[seq_idx]
this_idx = frame_idx
next_idx = frame_idx + random.choice(self.keyframes)
color0 = self.__load_rgb_tensor(self.image_seq[seq_idx][this_idx])
color1 = self.__load_rgb_tensor(self.image_seq[seq_idx][next_idx])
depth0 = self.__load_depth_tensor(self.depth_seq[seq_idx][this_idx])
depth1 = self.__load_depth_tensor(self.depth_seq[seq_idx][next_idx])
if self.transforms:
color0, color1 = self.transforms([color0, color1])
# normalize the coordinate
calib = np.asarray(self.calib[seq_idx], dtype=np.float32)
calib[0] *= self.fx_s
calib[1] *= self.fy_s
calib[2] *= self.fx_s
calib[3] *= self.fy_s
cam_pose0 = self.cam_pose_seq[seq_idx][this_idx]
cam_pose1 = self.cam_pose_seq[seq_idx][next_idx]
transform = np.dot(np.linalg.inv(cam_pose1), cam_pose0).astype(np.float32)
name = '{:}_{:06d}to{:06d}'.format(self.seq_names[seq_idx],
this_idx, next_idx)
return color0, color1, depth0, depth1, transform, calib, name
def __len__(self):
return self.ids
def __load_rgb_tensor(self, path):
""" Load the rgb image
"""
image = imread(path)[:, :, :3]
image = image.astype(np.float32) / 255.0
image = resize(image, None, fx=self.fx_s, fy=self.fy_s)
return image
def __load_depth_tensor(self, path):
""" Load the depth:
The depth images are scaled by a factor of 5000, i.e., a pixel
value of 5000 in the depth image corresponds to a distance of
1 meter from the camera, 10000 to 2 meter distance, etc.
A pixel value of 0 means missing value/no data.
"""
depth = imread(path).astype(np.float32) / 5e3
depth = resize(depth, None, fx=self.fx_s, fy=self.fy_s, interpolation=INTER_NEAREST)
depth = np.clip(depth, a_min=0.5, a_max=5.0) # the accurate range of kinect depth
return depth[np.newaxis, :]
"""
Some utility files to work with the data
"""
def tq2mat(tq):
""" transform translation-quaternion (tq) to (4x4) matrix
"""
tq = np.array(tq)
T = np.eye(4)
T[:3,:3] = quaternions.quat2mat(np.roll(tq[3:], 1))
T[:3, 3] = tq[:3]
return T
def write_sync_trajectory(local_dir, dataset, subject_name):
"""
:param the root of the directory
:param the dataset category 'fr1', 'fr2' or 'fr3'
"""
rgb_file = osp.join(local_dir, dataset, subject_name, 'rgb.txt')
depth_file= osp.join(local_dir, dataset, subject_name, 'depth.txt')
pose_file = osp.join(local_dir, dataset, subject_name, 'groundtruth.txt')
rgb_list = read_file_list(rgb_file)
depth_list=read_file_list(depth_file)
pose_list = read_file_list(pose_file)
matches = associate_three(rgb_list, depth_list, pose_list, offset=0.0, max_difference=0.02)
trajectory_info = []
for (a,b,c) in matches:
pose = [float(x) for x in pose_list[c]]
rgb_file = osp.join(local_dir, dataset, subject_name, rgb_list[a][0])
depth_file = osp.join(local_dir, dataset, subject_name, depth_list[b][0])
trajectory_info.append([pose, rgb_file, depth_file])
dataset_path = osp.join(local_dir, dataset, subject_name, 'sync_trajectory.pkl')
with open(dataset_path, 'wb') as output:
pickle.dump(trajectory_info, output)
txt_path = osp.join(local_dir, dataset, subject_name, 'sync_trajectory.txt')
pickle2txts(dataset_path, txt_path)
def pickle2txts(pickle_file, txt_file):
'''
write the pickle_file into a txt_file
'''
with open(pickle_file, 'rb') as pkl_file:
traj = pickle.load(pkl_file)
with open(txt_file, 'w') as f:
for frame in traj:
f.write(' '.join(['%f ' % x for x in frame[0]]))
f.write(frame[1] + ' ')
f.write(frame[2] + '\n')
"""
The following utility files are provided by TUM RGBD dataset benchmark
Refer: https://vision.in.tum.de/data/datasets/rgbd-dataset/tools
"""
def read_file_list(filename):
"""
Reads a trajectory from a text file.
File format:
The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched)
and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp.
Input:
filename -- File name
Output:
dict -- dictionary of (stamp,data) tuples
"""
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
list = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
list = [(float(l[0]),l[1:]) for l in list if len(l)>1]
return dict(list)
def associate(first_list, second_list,offset,max_difference):
"""
Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim
to find the closest match for every input tuple.
Input:
first_list -- first dictionary of (stamp,data) tuples
second_list -- second dictionary of (stamp,data) tuples
offset -- time offset between both dictionaries (e.g., to model the delay between the sensors)
max_difference -- search radius for candidate generation
Output:
matches -- list of matched tuples ((stamp1,data1),(stamp2,data2))
"""
first_keys = first_list.keys()
second_keys = second_list.keys()
potential_matches = [(abs(a - (b + offset)), a, b)
for a in first_keys
for b in second_keys
if abs(a - (b + offset)) < max_difference]
potential_matches.sort()
matches = []
for diff, a, b in potential_matches:
if a in first_keys and b in second_keys:
first_keys.remove(a)
second_keys.remove(b)
matches.append((a, b))
matches.sort()
return matches
def associate_three(first_list, second_list, third_list, offset, max_difference):
"""
Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim to find the closest match for every input tuple.
Input:
first_list -- first dictionary of (stamp,data) tuples (default to be rgb)
second_list -- second dictionary of (stamp,data) tuples (default to be depth)
third_list -- third dictionary of (stamp,data) tuples (default to be pose)
offset -- time offset between both dictionaries (e.g., to model the delay between the sensors)
max_difference -- search radius for candidate generation
Output:
matches -- list of matched tuples ((stamp1,data1),(stamp2,data2),(stamp3,data3))
"""
first_keys = list(first_list)
second_keys = list(second_list)
third_keys = list(third_list)
# find the potential matches in (rgb, depth)
potential_matches_ab = [(abs(a - (b + offset)), a, b)
for a in first_keys
for b in second_keys
if abs(a - (b + offset)) < max_difference]
potential_matches_ab.sort()
matches_ab = []
for diff, a, b in potential_matches_ab:
if a in first_keys and b in second_keys:
matches_ab.append((a, b))
matches_ab.sort()
# find the potential matches in (rgb, depth, pose)
potential_matches = [(abs(a - (c + offset)), abs(b - (c + offset)), a,b,c)
for (a,b) in matches_ab
for c in third_keys
if abs(a - (c + offset)) < max_difference and
abs(b - (c + offset)) < max_difference]
potential_matches.sort()
matches_abc = []
for diff_rgb, diff_depth, a, b, c in potential_matches:
if a in first_keys and b in second_keys and c in third_keys:
first_keys.remove(a)
second_keys.remove(b)
third_keys.remove(c)
matches_abc.append((a,b,c))
matches_abc.sort()
return matches_abc
if __name__ == '__main__':
loader = TUM(category='test', keyframes=[1])
import torchvision.utils as torch_utils
torch_loader = data.DataLoader(loader, batch_size=16,
shuffle=False, num_workers=4)
for batch in torch_loader:
color0, color1, depth0, depth1, transform, K, name = batch
B, C, H, W = color0.shape
bcolor0_img = torch_utils.make_grid(color0, nrow=4)
import matplotlib.pyplot as plt
plt.figure()
plt.imshow(bcolor0_img.numpy().transpose(1,2,0))
plt.show() | [
"numpy.clip",
"torch.utils.data.replace",
"numpy.array",
"torchvision.utils.make_grid",
"numpy.searchsorted",
"numpy.asarray",
"scipy.misc.imread",
"numpy.eye",
"random.choice",
"pickle.load",
"os.path.isfile",
"cv2.resize",
"matplotlib.pyplot.show",
"numpy.roll",
"pickle.dump",
"os.pa... | [((12246, 12258), 'numpy.array', 'np.array', (['tq'], {}), '(tq)\n', (12254, 12258), True, 'import numpy as np\n'), ((12267, 12276), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (12273, 12276), True, 'import numpy as np\n'), ((12554, 12607), 'os.path.join', 'osp.join', (['local_dir', 'dataset', 'subject_name', '"""rgb.txt"""'], {}), "(local_dir, dataset, subject_name, 'rgb.txt')\n", (12562, 12607), True, 'import os.path as osp\n'), ((12624, 12679), 'os.path.join', 'osp.join', (['local_dir', 'dataset', 'subject_name', '"""depth.txt"""'], {}), "(local_dir, dataset, subject_name, 'depth.txt')\n", (12632, 12679), True, 'import os.path as osp\n'), ((12696, 12757), 'os.path.join', 'osp.join', (['local_dir', 'dataset', 'subject_name', '"""groundtruth.txt"""'], {}), "(local_dir, dataset, subject_name, 'groundtruth.txt')\n", (12704, 12757), True, 'import os.path as osp\n'), ((13323, 13388), 'os.path.join', 'osp.join', (['local_dir', 'dataset', 'subject_name', '"""sync_trajectory.pkl"""'], {}), "(local_dir, dataset, subject_name, 'sync_trajectory.pkl')\n", (13331, 13388), True, 'import os.path as osp\n'), ((13496, 13561), 'os.path.join', 'osp.join', (['local_dir', 'dataset', 'subject_name', '"""sync_trajectory.txt"""'], {}), "(local_dir, dataset, subject_name, 'sync_trajectory.txt')\n", (13504, 13561), True, 'import os.path as osp\n'), ((18221, 18289), 'torch.utils.data.DataLoader', 'data.DataLoader', (['loader'], {'batch_size': '(16)', 'shuffle': '(False)', 'num_workers': '(4)'}), '(loader, batch_size=16, shuffle=False, num_workers=4)\n', (18236, 18289), True, 'import torch.utils.data as data\n'), ((10601, 10650), 'numpy.asarray', 'np.asarray', (['self.calib[seq_idx]'], {'dtype': 'np.float32'}), '(self.calib[seq_idx], dtype=np.float32)\n', (10611, 10650), True, 'import numpy as np\n'), ((11386, 11433), 'cv2.resize', 'resize', (['image', 'None'], {'fx': 'self.fx_s', 'fy': 'self.fy_s'}), '(image, None, fx=self.fx_s, fy=self.fy_s)\n', (11392, 11433), False, 'from cv2 import resize, INTER_NEAREST\n'), ((11888, 11964), 'cv2.resize', 'resize', (['depth', 'None'], {'fx': 'self.fx_s', 'fy': 'self.fy_s', 'interpolation': 'INTER_NEAREST'}), '(depth, None, fx=self.fx_s, fy=self.fy_s, interpolation=INTER_NEAREST)\n', (11894, 11964), False, 'from cv2 import resize, INTER_NEAREST\n'), ((11981, 12017), 'numpy.clip', 'np.clip', (['depth'], {'a_min': '(0.5)', 'a_max': '(5.0)'}), '(depth, a_min=0.5, a_max=5.0)\n', (11988, 12017), True, 'import numpy as np\n'), ((12313, 12331), 'numpy.roll', 'np.roll', (['tq[3:]', '(1)'], {}), '(tq[3:], 1)\n', (12320, 12331), True, 'import numpy as np\n'), ((13101, 13159), 'os.path.join', 'osp.join', (['local_dir', 'dataset', 'subject_name', 'rgb_list[a][0]'], {}), '(local_dir, dataset, subject_name, rgb_list[a][0])\n', (13109, 13159), True, 'import os.path as osp\n'), ((13181, 13241), 'os.path.join', 'osp.join', (['local_dir', 'dataset', 'subject_name', 'depth_list[b][0]'], {}), '(local_dir, dataset, subject_name, depth_list[b][0])\n', (13189, 13241), True, 'import os.path as osp\n'), ((13443, 13479), 'pickle.dump', 'pickle.dump', (['trajectory_info', 'output'], {}), '(trajectory_info, output)\n', (13454, 13479), False, 'import pickle\n'), ((13762, 13783), 'pickle.load', 'pickle.load', (['pkl_file'], {}), '(pkl_file)\n', (13773, 13783), False, 'import pickle\n'), ((18456, 18493), 'torchvision.utils.make_grid', 'torch_utils.make_grid', (['color0'], {'nrow': '(4)'}), '(color0, nrow=4)\n', (18477, 18493), True, 'import torchvision.utils as torch_utils\n'), ((18543, 18555), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18553, 18555), True, 'import matplotlib.pyplot as plt\n'), ((18621, 18631), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18629, 18631), True, 'import matplotlib.pyplot as plt\n'), ((10109, 10138), 'random.choice', 'random.choice', (['self.keyframes'], {}), '(self.keyframes)\n', (10122, 10138), False, 'import sys, os, random\n'), ((11298, 11310), 'scipy.misc.imread', 'imread', (['path'], {}), '(path)\n', (11304, 11310), False, 'from scipy.misc import imread\n'), ((5914, 5936), 'os.path.join', 'osp.join', (['ks', 'seq_name'], {}), '(ks, seq_name)\n', (5922, 5936), True, 'import os.path as osp\n'), ((6068, 6115), 'os.path.join', 'osp.join', (['root', 'seq_path', '"""sync_trajectory.pkl"""'], {}), "(root, seq_path, 'sync_trajectory.pkl')\n", (6076, 6115), True, 'import os.path as osp\n'), ((8484, 8506), 'os.path.join', 'osp.join', (['ks', 'seq_name'], {}), '(ks, seq_name)\n', (8492, 8506), True, 'import os.path as osp\n'), ((8742, 8789), 'os.path.join', 'osp.join', (['root', 'seq_path', '"""sync_trajectory.pkl"""'], {}), "(root, seq_path, 'sync_trajectory.pkl')\n", (8750, 8789), True, 'import os.path as osp\n'), ((9943, 9987), 'numpy.searchsorted', 'np.searchsorted', (['self.seq_acc_ids', '(index + 1)'], {}), '(self.seq_acc_ids, index + 1)\n', (9958, 9987), True, 'import numpy as np\n'), ((6140, 6166), 'os.path.isfile', 'osp.isfile', (['sync_traj_file'], {}), '(sync_traj_file)\n', (6150, 6166), True, 'import os.path as osp\n'), ((6471, 6485), 'pickle.load', 'pickle.load', (['p'], {}), '(p)\n', (6482, 6485), False, 'import pickle\n'), ((8814, 8840), 'os.path.isfile', 'osp.isfile', (['sync_traj_file'], {}), '(sync_traj_file)\n', (8824, 8840), True, 'import os.path as osp\n'), ((9143, 9157), 'pickle.load', 'pickle.load', (['p'], {}), '(p)\n', (9154, 9157), False, 'import pickle\n'), ((10913, 10937), 'numpy.linalg.inv', 'np.linalg.inv', (['cam_pose1'], {}), '(cam_pose1)\n', (10926, 10937), True, 'import numpy as np\n'), ((11834, 11846), 'scipy.misc.imread', 'imread', (['path'], {}), '(path)\n', (11840, 11846), False, 'from scipy.misc import imread\n'), ((14597, 14619), 'torch.utils.data.replace', 'data.replace', (['""","""', '""" """'], {}), "(',', ' ')\n", (14609, 14619), True, 'import torch.utils.data as data\n')] |
import rospy
import actionlib
from math import radians
import numpy as np
import scipy.signal
import time
import dynamic_reconfigure.client
from robot_localization.srv import SetPose
from pyquaternion import Quaternion as qt
from std_srvs.srv import Empty
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Quaternion, Pose, PoseWithCovarianceStamped, Twist, PoseStamped
from move_base_msgs.msg import MoveBaseGoal, MoveBaseAction
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import OccupancyGrid, Path, Odometry
from nav_msgs.srv import GetPlan
def _create_MoveBaseGoal(x, y, angle):
"""
Create a MoveBaseGoal with x, y position and yaw rotation (in degrees).
Returns a MoveBaseGoal
"""
mb_goal = MoveBaseGoal()
mb_goal.target_pose.header.frame_id = 'odom' # Note: the frame_id must be map
mb_goal.target_pose.pose.position.x = x
mb_goal.target_pose.pose.position.y = y
mb_goal.target_pose.pose.position.z = 0 # z must be 0.0 (no height in the map)
e = qt(axis = [0, 0, 1], angle = angle).elements
mb_goal.target_pose.pose.orientation = Quaternion(e[1], e[2], e[3], e[0])
return mb_goal
def _create_PoseWithCovarianceStamped():
"""
Create initial pose in odometery frame (used to reset)
"""
a = PoseWithCovarianceStamped()
a.header.frame_id = 'odom'
a.pose.pose.position.x = 0.0
a.pose.pose.position.y = 0.0
a.pose.pose.position.z = 0.0
a.pose.pose.orientation.x = 0.0
a.pose.pose.orientation.y = 0.0
a.pose.pose.orientation.z = 0.0
a.pose.pose.orientation.w = 0.0
return a
class Robot_config():
"""This is a class that tracks the jackal robot status
"""
def __init__(self):
self.X = 0 # inertia frame
self.Y = 0
self.Z = 0
self.PSI = 0
self.global_path = []
self.gx = 0 # body frame
self.gy = 0
self.gp = 0
self.los = 1
self.bad_vel = 0
self.vel_counter = 0
self.qt = (0, 0, 0, 0)
def get_robot_status(self, msg):
q1 = msg.pose.pose.orientation.x
q2 = msg.pose.pose.orientation.y
q3 = msg.pose.pose.orientation.z
q0 = msg.pose.pose.orientation.w
self.X = msg.pose.pose.position.x
self.Y = msg.pose.pose.position.y
self.Z = msg.pose.pose.position.z
self.PSI = np.arctan2(2 * (q0*q3 + q1*q2), (1 - 2*(q2**2+q3**2)))
self.qt = (q1, q2, q3, q0)
def get_global_path(self, msg):
gp = []
for pose in msg.poses:
gp.append([pose.pose.position.x, pose.pose.position.y])
gp = np.array(gp)
x = gp[:,0]
try:
xhat = scipy.signal.savgol_filter(x, 19, 3)
except:
xhat = x
y = gp[:,1]
try:
yhat = scipy.signal.savgol_filter(y, 19, 3)
except:
yhat = y
gphat = np.column_stack((xhat, yhat))
gphat.tolist()
self.global_path = gphat
def vel_monitor(self, msg):
"""
Count the number of velocity command and velocity command
that is smaller than 0.2 m/s (hard coded here, count as self.bad_vel)
"""
vx = msg.linear.x
if vx <= 0.05:
self.bad_vel += 1
self.vel_counter += 1
def transform_lg(wp, X, Y, PSI):
R_r2i = np.matrix([[np.cos(PSI), -np.sin(PSI), X], [np.sin(PSI), np.cos(PSI), Y], [0, 0, 1]])
R_i2r = np.linalg.inv(R_r2i)
pi = np.matrix([[wp[0]], [wp[1]], [1]])
pr = np.matmul(R_i2r, pi)
lg = np.array([pr[0,0], pr[1, 0]])
return lg
def transform_gp(gp, X, Y, PSI):
R_r2i = np.matrix([[np.cos(PSI), -np.sin(PSI), X], [np.sin(PSI), np.cos(PSI), Y], [0, 0, 1]])
R_i2r = np.linalg.inv(R_r2i)
pi = np.concatenate([gp, np.ones_like(gp[:, :1])], axis=-1)
pr = np.matmul(R_i2r, pi.T)
return np.asarray(pr[:2, :])
class MoveBase():
def __init__(self, goal_position = [6, 6, 0], base_local_planner="base_local_planner/TrajectoryPlannerROS"):
self.goal_position = goal_position
self.base_local_planner = base_local_planner.split("/")[-1]
self.planner_client = dynamic_reconfigure.client.Client('/move_base/' + self.base_local_planner)
self.local_costmap_client = dynamic_reconfigure.client.Client('move_base/local_costmap/inflater_layer')
self.global_costmap_client = dynamic_reconfigure.client.Client('move_base/global_costmap/inflater_layer')
self.nav_as = actionlib.SimpleActionClient('/move_base', MoveBaseAction)
self.global_goal = _create_MoveBaseGoal(goal_position[0], goal_position[1], goal_position[2])
self._reset_odom = rospy.ServiceProxy('/set_pose', SetPose)
self._clear_costmap = rospy.ServiceProxy('/move_base/clear_costmaps', Empty)
self._make_plan = rospy.ServiceProxy('/move_base/make_plan', GetPlan)
self.robot_config = Robot_config()
self.sub_robot = rospy.Subscriber("/odometry/filtered", Odometry, self.robot_config.get_robot_status)
# self.sub_gp = rospy.Subscriber("/move_base/" + self.base_local_planner + "/global_plan", Path, self.robot_config.get_global_path)
self.sub_gp = rospy.Subscriber("/move_base/NavfnROS/plan", Path, self.robot_config.get_global_path)
self.sub_vel = rospy.Subscriber("/jackal_velocity_controller/cmd_vel", Twist, self.robot_config.vel_monitor)
self.laser_scan = None
def set_navi_param(self, param_name, param):
if param_name != 'inflation_radius':
self.planner_client.update_configuration({param_name.split("/")[-1]: param})
rospy.set_param('/move_base/' + param_name, param)
if param_name == 'max_vel_theta':
self.planner_client.update_configuration({'min_vel_theta': -param})
rospy.set_param('/move_base/' + 'min_vel_theta', -param)
else:
self.global_costmap_client.update_configuration({param_name: param})
self.local_costmap_client.update_configuration({param_name: param})
rospy.set_param('/move_base/global_costmap/inflater_layer/' + param_name, param)
rospy.set_param('/move_base/local_costmap/inflater_layer/' + param_name, param)
def get_navi_param(self, param_name):
if param_name != 'inflation_radius':
param = rospy.get_param('/move_base/' + param_name)
else:
param = rospy.get_param('/move_base/global_costmap/inflater_layer/' + param_name)
return param
def get_laser_scan(self):
data = None
while data is None:
try:
data = rospy.wait_for_message('front/scan', LaserScan, timeout=5)
except:
pass
self.laser_scan = data
return data
def get_collision(self):
if self.laser_scan is not None:
laser_scan = np.array(self.laser_scan.ranges)
else:
laser_scan = self.get_laser_scan().ranges
self.laser_scan = None
d = np.mean(sorted(laser_scan)[:5])
return d < 0.3
def set_global_goal(self):
self.nav_as.wait_for_server()
try:
self.nav_as.send_goal(self.global_goal)
except (rospy.ServiceException) as e:
print ("/move_base service call failed")
def reset_robot_in_odom(self):
rospy.wait_for_service('/set_pose')
try:
self._reset_odom(_create_PoseWithCovarianceStamped())
except rospy.ServiceException:
print ("/set_pose service call failed")
self.robot_config.X = 0
self.robot_config.Y = 0
self.robot_config.Z = 0
# clear vel count history
self.robot_config.bad_vel = 0
self.robot_config.vel_counter = 0
def clear_costmap(self):
rospy.wait_for_service('/move_base/clear_costmaps')
try:
self._clear_costmap()
except rospy.ServiceException:
print ("/clear_costmaps service call failed")
def make_plan(self):
# get_plan = GetPlan()
start = PoseStamped()
start.header.frame_id = "odom"
start.pose.position.x = self.robot_config.X
start.pose.position.y = self.robot_config.Y
start.pose.position.z = self.robot_config.Z
x, y, z, w = self.robot_config.qt
start.pose.orientation.x = x
start.pose.orientation.y = y
start.pose.orientation.z = z
start.pose.orientation.w = w
goal = PoseStamped()
x, y, angle = self.goal_position
e = qt(axis = [0, 0, 1], angle = angle).elements
goal.header.frame_id = "odom"
goal.pose.position.x = x
goal.pose.position.y = y
goal.pose.position.z = 0
goal.pose.orientation = Quaternion(e[1], e[2], e[3], e[0])
tolerance = 0.5
# get_plan.start = start
# get_plan.goal = goal
rospy.wait_for_service('/move_base/make_plan')
try:
self._make_plan(start, goal, tolerance)
except rospy.ServiceException:
print ("/make_plan service call failed")
def reset_global_goal(self, goal_position = [6, 6, 0]):
self.global_goal = _create_MoveBaseGoal(goal_position[0], goal_position[1], goal_position[2])
def get_bad_vel_num(self):
"""
return the number of bad velocity and reset the count
"""
bad_vel = self.robot_config.bad_vel
vel = self.robot_config.vel_counter
return bad_vel, vel
def get_local_goal(self):
"""Get the local goal coordinate relative to the robot's current location
Returns:
[Pose msg]: pose msg with attributes x, y, and orientaton
"""
gp = self.robot_config.global_path
X = self.robot_config.X
Y = self.robot_config.Y
PSI = self.robot_config.PSI
los = self.robot_config.los
lg_x = 0
lg_y = 0
if len(gp)>0:
lg_flag = 0
for wp in gp:
dist = (np.array(wp)-np.array([X, Y]))**2
dist = np.sum(dist, axis=0)
dist = np.sqrt(dist)
if dist > los:
lg_flag = 1
lg = transform_lg(wp, X, Y, PSI)
lg_x = lg[0]
lg_y = lg[1]
break
if lg_flag == 0:
lg = transform_lg(gp[-1], X, Y, PSI)
lg_x = lg[0]
lg_y = lg[1]
local_goal = Pose()
local_goal.position.x = lg_x
local_goal.position.y = lg_y
local_goal.orientation.w = 1
return local_goal
def get_global_path(self):
gp = self.robot_config.global_path
gp = transform_gp(gp, self.robot_config.X, self.robot_config.Y, self.robot_config.PSI)
return gp.T
def get_costmap(self):
cm = None
while cm is None:
try:
cm = rospy.wait_for_message("/move_base/global_costmap/costmap", OccupancyGrid, timeout=5)
except:
pass
return cm
| [
"numpy.sqrt",
"numpy.column_stack",
"numpy.array",
"numpy.arctan2",
"geometry_msgs.msg.PoseWithCovarianceStamped",
"numpy.sin",
"geometry_msgs.msg.Pose",
"rospy.ServiceProxy",
"numpy.asarray",
"geometry_msgs.msg.Quaternion",
"numpy.matmul",
"rospy.Subscriber",
"move_base_msgs.msg.MoveBaseGoa... | [((750, 764), 'move_base_msgs.msg.MoveBaseGoal', 'MoveBaseGoal', ([], {}), '()\n', (762, 764), False, 'from move_base_msgs.msg import MoveBaseGoal, MoveBaseAction\n'), ((1115, 1149), 'geometry_msgs.msg.Quaternion', 'Quaternion', (['e[1]', 'e[2]', 'e[3]', 'e[0]'], {}), '(e[1], e[2], e[3], e[0])\n', (1125, 1149), False, 'from geometry_msgs.msg import Quaternion, Pose, PoseWithCovarianceStamped, Twist, PoseStamped\n'), ((1295, 1322), 'geometry_msgs.msg.PoseWithCovarianceStamped', 'PoseWithCovarianceStamped', ([], {}), '()\n', (1320, 1322), False, 'from geometry_msgs.msg import Quaternion, Pose, PoseWithCovarianceStamped, Twist, PoseStamped\n'), ((3451, 3471), 'numpy.linalg.inv', 'np.linalg.inv', (['R_r2i'], {}), '(R_r2i)\n', (3464, 3471), True, 'import numpy as np\n'), ((3481, 3515), 'numpy.matrix', 'np.matrix', (['[[wp[0]], [wp[1]], [1]]'], {}), '([[wp[0]], [wp[1]], [1]])\n', (3490, 3515), True, 'import numpy as np\n'), ((3525, 3545), 'numpy.matmul', 'np.matmul', (['R_i2r', 'pi'], {}), '(R_i2r, pi)\n', (3534, 3545), True, 'import numpy as np\n'), ((3555, 3585), 'numpy.array', 'np.array', (['[pr[0, 0], pr[1, 0]]'], {}), '([pr[0, 0], pr[1, 0]])\n', (3563, 3585), True, 'import numpy as np\n'), ((3744, 3764), 'numpy.linalg.inv', 'np.linalg.inv', (['R_r2i'], {}), '(R_r2i)\n', (3757, 3764), True, 'import numpy as np\n'), ((3839, 3861), 'numpy.matmul', 'np.matmul', (['R_i2r', 'pi.T'], {}), '(R_i2r, pi.T)\n', (3848, 3861), True, 'import numpy as np\n'), ((3873, 3894), 'numpy.asarray', 'np.asarray', (['pr[:2, :]'], {}), '(pr[:2, :])\n', (3883, 3894), True, 'import numpy as np\n'), ((1027, 1058), 'pyquaternion.Quaternion', 'qt', ([], {'axis': '[0, 0, 1]', 'angle': 'angle'}), '(axis=[0, 0, 1], angle=angle)\n', (1029, 1058), True, 'from pyquaternion import Quaternion as qt\n'), ((2374, 2438), 'numpy.arctan2', 'np.arctan2', (['(2 * (q0 * q3 + q1 * q2))', '(1 - 2 * (q2 ** 2 + q3 ** 2))'], {}), '(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 ** 2 + q3 ** 2))\n', (2384, 2438), True, 'import numpy as np\n'), ((2629, 2641), 'numpy.array', 'np.array', (['gp'], {}), '(gp)\n', (2637, 2641), True, 'import numpy as np\n'), ((2910, 2939), 'numpy.column_stack', 'np.column_stack', (['(xhat, yhat)'], {}), '((xhat, yhat))\n', (2925, 2939), True, 'import numpy as np\n'), ((4493, 4551), 'actionlib.SimpleActionClient', 'actionlib.SimpleActionClient', (['"""/move_base"""', 'MoveBaseAction'], {}), "('/move_base', MoveBaseAction)\n", (4521, 4551), False, 'import actionlib\n'), ((4681, 4721), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/set_pose"""', 'SetPose'], {}), "('/set_pose', SetPose)\n", (4699, 4721), False, 'import rospy\n'), ((4752, 4806), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/move_base/clear_costmaps"""', 'Empty'], {}), "('/move_base/clear_costmaps', Empty)\n", (4770, 4806), False, 'import rospy\n'), ((4833, 4884), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/move_base/make_plan"""', 'GetPlan'], {}), "('/move_base/make_plan', GetPlan)\n", (4851, 4884), False, 'import rospy\n'), ((4954, 5043), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/odometry/filtered"""', 'Odometry', 'self.robot_config.get_robot_status'], {}), "('/odometry/filtered', Odometry, self.robot_config.\n get_robot_status)\n", (4970, 5043), False, 'import rospy\n'), ((5201, 5291), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/move_base/NavfnROS/plan"""', 'Path', 'self.robot_config.get_global_path'], {}), "('/move_base/NavfnROS/plan', Path, self.robot_config.\n get_global_path)\n", (5217, 5291), False, 'import rospy\n'), ((5310, 5408), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/jackal_velocity_controller/cmd_vel"""', 'Twist', 'self.robot_config.vel_monitor'], {}), "('/jackal_velocity_controller/cmd_vel', Twist, self.\n robot_config.vel_monitor)\n", (5326, 5408), False, 'import rospy\n'), ((7375, 7410), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""/set_pose"""'], {}), "('/set_pose')\n", (7397, 7410), False, 'import rospy\n'), ((7829, 7880), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""/move_base/clear_costmaps"""'], {}), "('/move_base/clear_costmaps')\n", (7851, 7880), False, 'import rospy\n'), ((8099, 8112), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (8110, 8112), False, 'from geometry_msgs.msg import Quaternion, Pose, PoseWithCovarianceStamped, Twist, PoseStamped\n'), ((8514, 8527), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (8525, 8527), False, 'from geometry_msgs.msg import Quaternion, Pose, PoseWithCovarianceStamped, Twist, PoseStamped\n'), ((8795, 8829), 'geometry_msgs.msg.Quaternion', 'Quaternion', (['e[1]', 'e[2]', 'e[3]', 'e[0]'], {}), '(e[1], e[2], e[3], e[0])\n', (8805, 8829), False, 'from geometry_msgs.msg import Quaternion, Pose, PoseWithCovarianceStamped, Twist, PoseStamped\n'), ((8946, 8992), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""/move_base/make_plan"""'], {}), "('/move_base/make_plan')\n", (8968, 8992), False, 'import rospy\n'), ((10555, 10561), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (10559, 10561), False, 'from geometry_msgs.msg import Quaternion, Pose, PoseWithCovarianceStamped, Twist, PoseStamped\n'), ((3795, 3818), 'numpy.ones_like', 'np.ones_like', (['gp[:, :1]'], {}), '(gp[:, :1])\n', (3807, 3818), True, 'import numpy as np\n'), ((5633, 5683), 'rospy.set_param', 'rospy.set_param', (["('/move_base/' + param_name)", 'param'], {}), "('/move_base/' + param_name, param)\n", (5648, 5683), False, 'import rospy\n'), ((6075, 6160), 'rospy.set_param', 'rospy.set_param', (["('/move_base/global_costmap/inflater_layer/' + param_name)", 'param'], {}), "('/move_base/global_costmap/inflater_layer/' + param_name, param\n )\n", (6090, 6160), False, 'import rospy\n'), ((6168, 6247), 'rospy.set_param', 'rospy.set_param', (["('/move_base/local_costmap/inflater_layer/' + param_name)", 'param'], {}), "('/move_base/local_costmap/inflater_layer/' + param_name, param)\n", (6183, 6247), False, 'import rospy\n'), ((6356, 6399), 'rospy.get_param', 'rospy.get_param', (["('/move_base/' + param_name)"], {}), "('/move_base/' + param_name)\n", (6371, 6399), False, 'import rospy\n'), ((6434, 6507), 'rospy.get_param', 'rospy.get_param', (["('/move_base/global_costmap/inflater_layer/' + param_name)"], {}), "('/move_base/global_costmap/inflater_layer/' + param_name)\n", (6449, 6507), False, 'import rospy\n'), ((6894, 6926), 'numpy.array', 'np.array', (['self.laser_scan.ranges'], {}), '(self.laser_scan.ranges)\n', (6902, 6926), True, 'import numpy as np\n'), ((8581, 8612), 'pyquaternion.Quaternion', 'qt', ([], {'axis': '[0, 0, 1]', 'angle': 'angle'}), '(axis=[0, 0, 1], angle=angle)\n', (8583, 8612), True, 'from pyquaternion import Quaternion as qt\n'), ((3365, 3376), 'numpy.cos', 'np.cos', (['PSI'], {}), '(PSI)\n', (3371, 3376), True, 'import numpy as np\n'), ((3397, 3408), 'numpy.sin', 'np.sin', (['PSI'], {}), '(PSI)\n', (3403, 3408), True, 'import numpy as np\n'), ((3410, 3421), 'numpy.cos', 'np.cos', (['PSI'], {}), '(PSI)\n', (3416, 3421), True, 'import numpy as np\n'), ((3658, 3669), 'numpy.cos', 'np.cos', (['PSI'], {}), '(PSI)\n', (3664, 3669), True, 'import numpy as np\n'), ((3690, 3701), 'numpy.sin', 'np.sin', (['PSI'], {}), '(PSI)\n', (3696, 3701), True, 'import numpy as np\n'), ((3703, 3714), 'numpy.cos', 'np.cos', (['PSI'], {}), '(PSI)\n', (3709, 3714), True, 'import numpy as np\n'), ((5831, 5887), 'rospy.set_param', 'rospy.set_param', (["('/move_base/' + 'min_vel_theta')", '(-param)'], {}), "('/move_base/' + 'min_vel_theta', -param)\n", (5846, 5887), False, 'import rospy\n'), ((6648, 6706), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""front/scan"""', 'LaserScan'], {'timeout': '(5)'}), "('front/scan', LaserScan, timeout=5)\n", (6670, 6706), False, 'import rospy\n'), ((10127, 10147), 'numpy.sum', 'np.sum', (['dist'], {'axis': '(0)'}), '(dist, axis=0)\n', (10133, 10147), True, 'import numpy as np\n'), ((10171, 10184), 'numpy.sqrt', 'np.sqrt', (['dist'], {}), '(dist)\n', (10178, 10184), True, 'import numpy as np\n'), ((10999, 11088), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/move_base/global_costmap/costmap"""', 'OccupancyGrid'], {'timeout': '(5)'}), "('/move_base/global_costmap/costmap', OccupancyGrid,\n timeout=5)\n", (11021, 11088), False, 'import rospy\n'), ((3379, 3390), 'numpy.sin', 'np.sin', (['PSI'], {}), '(PSI)\n', (3385, 3390), True, 'import numpy as np\n'), ((3672, 3683), 'numpy.sin', 'np.sin', (['PSI'], {}), '(PSI)\n', (3678, 3683), True, 'import numpy as np\n'), ((10070, 10082), 'numpy.array', 'np.array', (['wp'], {}), '(wp)\n', (10078, 10082), True, 'import numpy as np\n'), ((10083, 10099), 'numpy.array', 'np.array', (['[X, Y]'], {}), '([X, Y])\n', (10091, 10099), True, 'import numpy as np\n')] |
# pylint: disable=invalid-name
from __future__ import absolute_import, division
__license__ = """MIT License
Copyright (c) 2014-2019 <NAME> and <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import warnings
import numpy as n
from .classes import KDE
class gaussian_kde(KDE):
def __init__(self, data, weights=None, kde_values=None, use_cuda=True,
adaptive=False, weight_adaptive_bw=False, alpha=0.3,
bw_method='silverman'):
if kde_values != None:
raise NotImplementedError("`kde_values` is not supported for"
" cudakde.")
KDE.__init__(self, data, use_cuda, weights=weights, alpha=alpha,
method=bw_method)
self.weighted = False if weights is None or len(weights) == 0 else True
if adaptive:
if not self.weighted and weight_adaptive_bw:
warnings.warn("Since `weights` aren't given"
" `weight_adaptive_bw` will have no effect!")
self.calcLambdas(weights=weight_adaptive_bw,
weightedCov=weight_adaptive_bw)
else:
self.lambdas = n.ones(self.n)
def __call__(self, points):
points = n.atleast_2d(points)
self.kde(points, weights=self.weighted, weightedCov=self.weighted)
return n.array(self.values)
class bootstrap_kde(object):
def __init__(self, data, niter=10, weights=None, **kwargs):
assert int(niter) == float(niter)
niter = int(niter)
self.kernels = []
self.bootstrap_indices = []
self.data = n.atleast_2d(data)
self.d, self.n = self.data.shape
self.weighted = False if weights is None or len(weights) == 0 else True
for _ in xrange(niter):
indices = n.array(self.get_bootstrap_indices())
self.bootstrap_indices.append(indices)
if self.weighted:
kernel = gaussian_kde(data[..., indices],
weights=weights[indices],
**kwargs)
else:
kernel = gaussian_kde(data[..., indices], **kwargs)
self.kernels.append(kernel)
def __call__(self, points):
return self.evaluate(points)
def evaluate(self, points):
points = n.atleast_2d(points)
_, m = points.shape
means, sqmeans = n.zeros(m), n.zeros(m)
for kernel in self.kernels:
values = kernel(points)
means += values
sqmeans += values**2
means /= len(self.kernels)
sqmeans /= len(self.kernels)
errors = n.sqrt(sqmeans - means**2)
return means, errors
def get_bootstrap_indices(self):
bootstrap_indices = n.random.choice(self.n, size=self.n, replace=True)
return bootstrap_indices
| [
"numpy.atleast_2d",
"numpy.sqrt",
"numpy.ones",
"numpy.random.choice",
"numpy.array",
"numpy.zeros",
"warnings.warn"
] | [((2245, 2265), 'numpy.atleast_2d', 'n.atleast_2d', (['points'], {}), '(points)\n', (2257, 2265), True, 'import numpy as n\n'), ((2356, 2376), 'numpy.array', 'n.array', (['self.values'], {}), '(self.values)\n', (2363, 2376), True, 'import numpy as n\n'), ((2625, 2643), 'numpy.atleast_2d', 'n.atleast_2d', (['data'], {}), '(data)\n', (2637, 2643), True, 'import numpy as n\n'), ((3355, 3375), 'numpy.atleast_2d', 'n.atleast_2d', (['points'], {}), '(points)\n', (3367, 3375), True, 'import numpy as n\n'), ((3674, 3702), 'numpy.sqrt', 'n.sqrt', (['(sqmeans - means ** 2)'], {}), '(sqmeans - means ** 2)\n', (3680, 3702), True, 'import numpy as n\n'), ((3796, 3846), 'numpy.random.choice', 'n.random.choice', (['self.n'], {'size': 'self.n', 'replace': '(True)'}), '(self.n, size=self.n, replace=True)\n', (3811, 3846), True, 'import numpy as n\n'), ((2180, 2194), 'numpy.ones', 'n.ones', (['self.n'], {}), '(self.n)\n', (2186, 2194), True, 'import numpy as n\n'), ((3429, 3439), 'numpy.zeros', 'n.zeros', (['m'], {}), '(m)\n', (3436, 3439), True, 'import numpy as n\n'), ((3441, 3451), 'numpy.zeros', 'n.zeros', (['m'], {}), '(m)\n', (3448, 3451), True, 'import numpy as n\n'), ((1900, 1992), 'warnings.warn', 'warnings.warn', (['"""Since `weights` aren\'t given `weight_adaptive_bw` will have no effect!"""'], {}), '(\n "Since `weights` aren\'t given `weight_adaptive_bw` will have no effect!")\n', (1913, 1992), False, 'import warnings\n')] |
import numpy as np
def CIS(occ, F, C, VeeMOspin):
# Make the spin MO fock matrix
Fspin = np.zeros((len(F)*2,len(F)*2))
Cspin = np.zeros((len(F)*2,len(F)*2))
for p in range(1,len(F)*2+1):
for q in range(1,len(F)*2+1):
Fspin[p-1,q-1] = F[(p+1)//2-1,(q+1)//2-1] * (p%2 == q%2)
Cspin[p-1,q-1] = C[(p+1)//2-1,(q+1)//2-1] * (p%2 == q%2)
FMOspin = np.dot(np.transpose(Cspin),np.dot(Fspin,Cspin))
#Construct hamiltonian
H = np.zeros((occ*(len(Fspin)-occ),occ*(len(Fspin)-occ)))
jbidx = -1
for j in range(0, occ):
for b in range(occ, len(Fspin)):
jbidx += 1
iaidx = -1
for i in range(0, occ):
for a in range(occ, len(Fspin)):
iaidx += 1
H[iaidx,jbidx] = VeeMOspin[a,j,i,b] - VeeMOspin[a,j,b,i]
if i == j:
H[iaidx,jbidx] += FMOspin[a,b]
if a == b:
H[iaidx,jbidx] -= FMOspin[i,j]
Exc = np.linalg.eigvalsh(H)
return Exc
| [
"numpy.linalg.eigvalsh",
"numpy.dot",
"numpy.transpose"
] | [((1072, 1093), 'numpy.linalg.eigvalsh', 'np.linalg.eigvalsh', (['H'], {}), '(H)\n', (1090, 1093), True, 'import numpy as np\n'), ((411, 430), 'numpy.transpose', 'np.transpose', (['Cspin'], {}), '(Cspin)\n', (423, 430), True, 'import numpy as np\n'), ((431, 451), 'numpy.dot', 'np.dot', (['Fspin', 'Cspin'], {}), '(Fspin, Cspin)\n', (437, 451), True, 'import numpy as np\n')] |
# michaelpeterswa
# kulo.py
import csv
import geojson
import datetime
import numpy as np
from shapely.geometry import shape, MultiPolygon, Polygon, Point
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import TensorBoard
input_file = "..\data\Washington_Large_Fires_1973-2019.geojson"
def loadData(filename):
"""
Loads GeoJson Data from "filename"
"""
with open(filename) as f:
data = geojson.load(f)
return data
def returnMaxAcreage(fire_data):
"""
return maximum acreage
"""
fire_max = 0
for fire in fire_data:
if fire["properties"]["ACRES"] >= fire_max:
fire_max = fire["properties"]["ACRES"]
return fire_max
def createPolygon(fire):
"""
create a Polygon object from list of points
"""
points = []
for coordinate in fire["geometry"]["coordinates"][0]:
points.append(tuple(coordinate))
polygon = Polygon(points)
return polygon
def createPolygonFromMulti(fire):
"""
https://gis.stackexchange.com/questions/166934/python-library-for-converting-geojson-multi-polygon-to-polygon
"""
multipolygon = [x.buffer(0) for x in shape(fire["geometry"]).buffer(0).geoms]
max_poly = max(multipolygon, key=lambda a: a.area)
return max_poly
def generateCentroid(polygon):
"""
calculate and return centroid of a polygon
"""
return list(polygon.centroid.coords)
def isMultiPolygonal(fire):
"""
return true if the object is a MultiPolygon
"""
return True if fire["geometry"]["type"] == "MultiPolygon" else False
def normalizeFireData(fire_data, max_acres, lat_div=100, long_div=200):
fire_data_list = []
for fire in fire_data:
fire_size = fire[1]["ACRES"] / max_acreage
fire_lat = fire[0][0][1] / lat_div
fire_long = fire[0][0][0] / long_div
fire_data_list.append([fire_lat, fire_long, fire_size])
fire_data_nparray = np.array(fire_data_list)
return fire_data_nparray
if __name__ == "__main__":
fire_data = loadData(input_file)
fire_data = fire_data["features"]
max_acreage = returnMaxAcreage(fire_data)
print(max_acreage)
lat_amt = 100
long_amt = 100
results = []
for fire in fire_data:
poly = createPolygonFromMulti(fire) if isMultiPolygonal(fire) else createPolygon(fire)
fire_centroid = generateCentroid(poly)
results.append((fire_centroid, fire["properties"]))
normalized_fire_data = normalizeFireData(results, max_acreage, lat_amt, long_amt)
with open("../data/cleaned_data.csv", 'w', newline='') as f:
csv_obj = csv.writer(f)
csv_obj.writerows(normalized_fire_data)
# #-----------------------------
# X = normalized_fire_data[:,0:2]
# y = normalized_fire_data[:,2]
# model = Sequential()
# model.add(Dense(4, input_dim=2, activation='relu'))
# model.add(Dense(4, activation='relu'))
# model.add(Dense(1, activation='linear'))
# model.compile(loss='mse', optimizer='adam')
# log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
# tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)
# model.fit(X, y, batch_size=3, epochs=1000, callbacks=[tensorboard_callback], verbose=1)
# results = model.evaluate(X, y)
# model.save("../kulo_model")
# print("Loss: ", results)
# test_lat = 48.383549
# test_long = -120.009935
# samples = [(test_lat / lat_amt, test_long / long_amt)]
# npsamples = np.array(samples)
# predictions = model.predict(samples)
# result_acres = predictions[0][0] * max_acreage
# print("final result for: (", test_lat, ",", test_long, ") at ", result_acres, "acres" ) | [
"csv.writer",
"numpy.array",
"shapely.geometry.Polygon",
"shapely.geometry.shape",
"geojson.load"
] | [((950, 965), 'shapely.geometry.Polygon', 'Polygon', (['points'], {}), '(points)\n', (957, 965), False, 'from shapely.geometry import shape, MultiPolygon, Polygon, Point\n'), ((1960, 1984), 'numpy.array', 'np.array', (['fire_data_list'], {}), '(fire_data_list)\n', (1968, 1984), True, 'import numpy as np\n'), ((454, 469), 'geojson.load', 'geojson.load', (['f'], {}), '(f)\n', (466, 469), False, 'import geojson\n'), ((2644, 2657), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2654, 2657), False, 'import csv\n'), ((1191, 1214), 'shapely.geometry.shape', 'shape', (["fire['geometry']"], {}), "(fire['geometry'])\n", (1196, 1214), False, 'from shapely.geometry import shape, MultiPolygon, Polygon, Point\n')] |
import numpy as np
import theano
import theano.tensor as T
from nose.tools import assert_true
from numpy.testing import assert_equal, assert_array_equal
from smartlearner.interfaces.dataset import Dataset
floatX = theano.config.floatX
ALL_DTYPES = np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']
def test_dataset_used_in_theano_function():
rng = np.random.RandomState(1234)
nb_examples = 10
inputs = (rng.randn(nb_examples, 5) * 100).astype(floatX)
targets = (rng.randn(nb_examples, 1) > 0.5).astype(floatX)
dataset = Dataset(inputs, targets)
input_sqr_norm = T.sum(dataset.symb_inputs**2)
result = input_sqr_norm - dataset.symb_targets
f = theano.function([dataset.symb_inputs, dataset.symb_targets], result)
assert_array_equal(f(inputs, targets), np.sum(inputs**2)-targets)
def test_dataset_without_targets():
rng = np.random.RandomState(1234)
nb_examples = 10
nb_features = 3
sequences_length = 4
nb_channels = 2
image_shape = (5, 5)
# Test creating dataset with different example shapes:
# scalar feature, vector features, sequence of vector features, multiple channels images features.
for example_shape in [(), (nb_features,), (sequences_length, nb_features), (nb_channels,)+image_shape]:
inputs_shape = (nb_examples,) + example_shape
for dtype in ALL_DTYPES:
inputs = (rng.randn(*inputs_shape) * 100).astype(dtype)
dataset = Dataset(inputs)
# Data should be converted into `floatX`.
assert_equal(dataset.inputs.dtype, floatX)
assert_equal(dataset.symb_inputs.dtype, floatX)
assert_equal(dataset.symb_inputs.ndim, inputs.ndim)
assert_equal(dataset.input_shape, example_shape)
assert_array_equal(dataset.inputs.get_value(), inputs.astype(floatX))
# Everything related to target should be None
assert_true(dataset.targets is None)
assert_true(dataset.symb_targets is None)
assert_true(dataset.target_shape is None)
assert_true(dataset.target_size is None)
# Create dataset from nested Pyton lists.
inputs = [[1, 2, 3]] * nb_examples
dataset = Dataset(inputs)
# Data should be converted into `floatX`.
assert_equal(dataset.inputs.dtype, floatX)
assert_equal(dataset.symb_inputs.dtype, floatX)
assert_equal(dataset.symb_inputs.ndim, 2)
assert_equal(dataset.input_shape, (3,))
assert_array_equal(dataset.inputs.get_value(), np.array(inputs, dtype=floatX))
def test_dataset_with_targets():
rng = np.random.RandomState(1234)
nb_examples = 10
nb_features = 3
sequences_length = 4
nb_channels = 2
image_shape = (5, 5)
# Test creating dataset with different example shapes and target shapes:
# scalar feature, vector features, sequence of vector features, multiple channels images features.
for target_shape in [(), (nb_features,), (sequences_length, nb_features), (nb_channels,)+image_shape]:
for example_shape in [(), (nb_features,), (sequences_length, nb_features), (nb_channels,)+image_shape]:
inputs_shape = (nb_examples,) + example_shape
targets_shape = (nb_examples,) + target_shape
for example_dtype in ALL_DTYPES:
for target_dtype in ALL_DTYPES:
inputs = (rng.randn(*inputs_shape) * 100).astype(example_dtype)
targets = (rng.randn(*targets_shape) * 100).astype(target_dtype)
dataset = Dataset(inputs, targets)
# Data should be converted into `floatX`.
assert_equal(dataset.inputs.dtype, floatX)
assert_equal(dataset.symb_inputs.dtype, floatX)
assert_equal(dataset.symb_inputs.ndim, inputs.ndim)
assert_equal(dataset.input_shape, example_shape)
assert_array_equal(dataset.inputs.get_value(), inputs.astype(floatX))
assert_equal(dataset.targets.dtype, floatX)
assert_equal(dataset.symb_targets.dtype, floatX)
assert_equal(dataset.symb_targets.ndim, targets.ndim)
assert_equal(dataset.target_shape, target_shape)
assert_array_equal(dataset.targets.get_value(), targets.astype(floatX))
# Create dataset from nested Pyton lists.
inputs = [[1, 2, 3]] * nb_examples
targets = [[1, 2, 3]] * nb_examples
dataset = Dataset(inputs, targets)
# Data should be converted into `floatX`.
assert_equal(dataset.inputs.dtype, floatX)
assert_equal(dataset.symb_inputs.dtype, floatX)
assert_equal(dataset.symb_inputs.ndim, 2)
assert_equal(dataset.input_shape, (3,))
assert_array_equal(dataset.inputs.get_value(), np.array(inputs, dtype=floatX))
assert_equal(dataset.targets.dtype, floatX)
assert_equal(dataset.symb_targets.dtype, floatX)
assert_equal(dataset.symb_targets.ndim, 2)
assert_equal(dataset.target_shape, (3,))
assert_array_equal(dataset.targets.get_value(), np.array(targets, dtype=floatX))
def test_dataset_with_test_value():
rng = np.random.RandomState(1234)
nb_examples = 10
theano.config.compute_test_value = 'warn'
try:
inputs = (rng.randn(nb_examples, 5) * 100).astype(floatX)
targets = (rng.randn(nb_examples, 1) > 0.5).astype(floatX)
dataset = Dataset(inputs, targets)
input_sqr_norm = T.sum(dataset.symb_inputs**2)
result = input_sqr_norm - dataset.symb_targets
assert_array_equal(result.tag.test_value, np.sum(inputs**2)-targets)
finally:
theano.config.compute_test_value = 'off'
| [
"numpy.testing.assert_equal",
"theano.function",
"theano.tensor.sum",
"smartlearner.interfaces.dataset.Dataset",
"numpy.array",
"numpy.sum",
"nose.tools.assert_true",
"numpy.random.RandomState"
] | [((368, 395), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (389, 395), True, 'import numpy as np\n'), ((558, 582), 'smartlearner.interfaces.dataset.Dataset', 'Dataset', (['inputs', 'targets'], {}), '(inputs, targets)\n', (565, 582), False, 'from smartlearner.interfaces.dataset import Dataset\n'), ((605, 636), 'theano.tensor.sum', 'T.sum', (['(dataset.symb_inputs ** 2)'], {}), '(dataset.symb_inputs ** 2)\n', (610, 636), True, 'import theano.tensor as T\n'), ((694, 762), 'theano.function', 'theano.function', (['[dataset.symb_inputs, dataset.symb_targets]', 'result'], {}), '([dataset.symb_inputs, dataset.symb_targets], result)\n', (709, 762), False, 'import theano\n'), ((882, 909), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (903, 909), True, 'import numpy as np\n'), ((2233, 2248), 'smartlearner.interfaces.dataset.Dataset', 'Dataset', (['inputs'], {}), '(inputs)\n', (2240, 2248), False, 'from smartlearner.interfaces.dataset import Dataset\n'), ((2299, 2341), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.inputs.dtype', 'floatX'], {}), '(dataset.inputs.dtype, floatX)\n', (2311, 2341), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((2346, 2393), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.symb_inputs.dtype', 'floatX'], {}), '(dataset.symb_inputs.dtype, floatX)\n', (2358, 2393), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((2398, 2439), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.symb_inputs.ndim', '(2)'], {}), '(dataset.symb_inputs.ndim, 2)\n', (2410, 2439), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((2444, 2483), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.input_shape', '(3,)'], {}), '(dataset.input_shape, (3,))\n', (2456, 2483), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((2612, 2639), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (2633, 2639), True, 'import numpy as np\n'), ((4520, 4544), 'smartlearner.interfaces.dataset.Dataset', 'Dataset', (['inputs', 'targets'], {}), '(inputs, targets)\n', (4527, 4544), False, 'from smartlearner.interfaces.dataset import Dataset\n'), ((4595, 4637), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.inputs.dtype', 'floatX'], {}), '(dataset.inputs.dtype, floatX)\n', (4607, 4637), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((4642, 4689), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.symb_inputs.dtype', 'floatX'], {}), '(dataset.symb_inputs.dtype, floatX)\n', (4654, 4689), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((4694, 4735), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.symb_inputs.ndim', '(2)'], {}), '(dataset.symb_inputs.ndim, 2)\n', (4706, 4735), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((4740, 4779), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.input_shape', '(3,)'], {}), '(dataset.input_shape, (3,))\n', (4752, 4779), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((4868, 4911), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.targets.dtype', 'floatX'], {}), '(dataset.targets.dtype, floatX)\n', (4880, 4911), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((4916, 4964), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.symb_targets.dtype', 'floatX'], {}), '(dataset.symb_targets.dtype, floatX)\n', (4928, 4964), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((4969, 5011), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.symb_targets.ndim', '(2)'], {}), '(dataset.symb_targets.ndim, 2)\n', (4981, 5011), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((5016, 5056), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.target_shape', '(3,)'], {}), '(dataset.target_shape, (3,))\n', (5028, 5056), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((5190, 5217), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (5211, 5217), True, 'import numpy as np\n'), ((2535, 2565), 'numpy.array', 'np.array', (['inputs'], {'dtype': 'floatX'}), '(inputs, dtype=floatX)\n', (2543, 2565), True, 'import numpy as np\n'), ((4831, 4861), 'numpy.array', 'np.array', (['inputs'], {'dtype': 'floatX'}), '(inputs, dtype=floatX)\n', (4839, 4861), True, 'import numpy as np\n'), ((5109, 5140), 'numpy.array', 'np.array', (['targets'], {'dtype': 'floatX'}), '(targets, dtype=floatX)\n', (5117, 5140), True, 'import numpy as np\n'), ((5447, 5471), 'smartlearner.interfaces.dataset.Dataset', 'Dataset', (['inputs', 'targets'], {}), '(inputs, targets)\n', (5454, 5471), False, 'from smartlearner.interfaces.dataset import Dataset\n'), ((5498, 5529), 'theano.tensor.sum', 'T.sum', (['(dataset.symb_inputs ** 2)'], {}), '(dataset.symb_inputs ** 2)\n', (5503, 5529), True, 'import theano.tensor as T\n'), ((807, 826), 'numpy.sum', 'np.sum', (['(inputs ** 2)'], {}), '(inputs ** 2)\n', (813, 826), True, 'import numpy as np\n'), ((1471, 1486), 'smartlearner.interfaces.dataset.Dataset', 'Dataset', (['inputs'], {}), '(inputs)\n', (1478, 1486), False, 'from smartlearner.interfaces.dataset import Dataset\n'), ((1554, 1596), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.inputs.dtype', 'floatX'], {}), '(dataset.inputs.dtype, floatX)\n', (1566, 1596), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((1609, 1656), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.symb_inputs.dtype', 'floatX'], {}), '(dataset.symb_inputs.dtype, floatX)\n', (1621, 1656), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((1669, 1720), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.symb_inputs.ndim', 'inputs.ndim'], {}), '(dataset.symb_inputs.ndim, inputs.ndim)\n', (1681, 1720), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((1733, 1781), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.input_shape', 'example_shape'], {}), '(dataset.input_shape, example_shape)\n', (1745, 1781), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((1935, 1971), 'nose.tools.assert_true', 'assert_true', (['(dataset.targets is None)'], {}), '(dataset.targets is None)\n', (1946, 1971), False, 'from nose.tools import assert_true\n'), ((1984, 2025), 'nose.tools.assert_true', 'assert_true', (['(dataset.symb_targets is None)'], {}), '(dataset.symb_targets is None)\n', (1995, 2025), False, 'from nose.tools import assert_true\n'), ((2038, 2079), 'nose.tools.assert_true', 'assert_true', (['(dataset.target_shape is None)'], {}), '(dataset.target_shape is None)\n', (2049, 2079), False, 'from nose.tools import assert_true\n'), ((2092, 2132), 'nose.tools.assert_true', 'assert_true', (['(dataset.target_size is None)'], {}), '(dataset.target_size is None)\n', (2103, 2132), False, 'from nose.tools import assert_true\n'), ((5633, 5652), 'numpy.sum', 'np.sum', (['(inputs ** 2)'], {}), '(inputs ** 2)\n', (5639, 5652), True, 'import numpy as np\n'), ((3561, 3585), 'smartlearner.interfaces.dataset.Dataset', 'Dataset', (['inputs', 'targets'], {}), '(inputs, targets)\n', (3568, 3585), False, 'from smartlearner.interfaces.dataset import Dataset\n'), ((3669, 3711), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.inputs.dtype', 'floatX'], {}), '(dataset.inputs.dtype, floatX)\n', (3681, 3711), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((3732, 3779), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.symb_inputs.dtype', 'floatX'], {}), '(dataset.symb_inputs.dtype, floatX)\n', (3744, 3779), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((3800, 3851), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.symb_inputs.ndim', 'inputs.ndim'], {}), '(dataset.symb_inputs.ndim, inputs.ndim)\n', (3812, 3851), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((3872, 3920), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.input_shape', 'example_shape'], {}), '(dataset.input_shape, example_shape)\n', (3884, 3920), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((4032, 4075), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.targets.dtype', 'floatX'], {}), '(dataset.targets.dtype, floatX)\n', (4044, 4075), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((4096, 4144), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.symb_targets.dtype', 'floatX'], {}), '(dataset.symb_targets.dtype, floatX)\n', (4108, 4144), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((4165, 4218), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.symb_targets.ndim', 'targets.ndim'], {}), '(dataset.symb_targets.ndim, targets.ndim)\n', (4177, 4218), False, 'from numpy.testing import assert_equal, assert_array_equal\n'), ((4239, 4287), 'numpy.testing.assert_equal', 'assert_equal', (['dataset.target_shape', 'target_shape'], {}), '(dataset.target_shape, target_shape)\n', (4251, 4287), False, 'from numpy.testing import assert_equal, assert_array_equal\n')] |
import tornado.web
import json
import cStringIO
from collections import defaultdict
import numpy as np
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from status.util import dthandler, SafeHandler
#TODO - Have date slider to select range
#TODO - Ask if anyone uses it
class BarcodeVsExpectedDataHandler(SafeHandler):
""" Serves series with number of matched reads to a barcode compared
to the expected number of reads matched to a barcode.
Loaded through /api/v1/expected url
"""
def get(self):
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.yield_difference(), default=dthandler))
def yield_difference(self):
fc_lanes_total_reads = {}
for row in self.application.samples_db.view("barcodes/read_counts",
group_level=2):
fc_lanes_total_reads[tuple(row.key)] = row.value
fc_lanes_unmatched_reads = {}
for row in self.application.flowcells_db.view("lanes/unmatched", reduce=False):
fc_lanes_unmatched_reads[tuple(row.key)] = row.value
fc_lanes_sample_count = {}
for row in self.application.samples_db.view("lanes/count", group_level=2):
fc_lanes_sample_count[tuple(row.key)] = max(row.value - 1, 1)
fc_lane_expected_yield = {}
for k in fc_lanes_total_reads.keys():
fc_lane_expected_yield[k] = \
((fc_lanes_total_reads[k] - fc_lanes_unmatched_reads.get(k, 0))
/ fc_lanes_sample_count[k])
barcode_relation = defaultdict(list)
for fc_lane, expected_yield in fc_lane_expected_yield.items():
fc_l = list(fc_lane)
rc_view = self.application.samples_db.view("barcodes/read_counts",
reduce=False)
for row in rc_view[fc_l + [""]: fc_l + ["Z"]]:
try:
barcode_relation[row.key[-1]].append(float(row.value) / expected_yield)
except ZeroDivisionError:
pass
processed_relation = barcode_relation.iteritems()
processed_relation = filter(lambda l: len(l[1]) >= 50, processed_relation)
processed_relation.sort(key=lambda l: np.median(l[1]))
return processed_relation
class BarcodeVsExpectedPlotHandler(BarcodeVsExpectedDataHandler):
""" Serves a boxplot of expected yields vs matched yields for top
present barcodes.
Loaded through /api/v1/plot/barcodes_vs_expected([^/]*)$ url
"""
def get(self, graph_type):
processed_relation = self.yield_difference()
# Filter data
plot_data = []
plot_labels = []
for l in processed_relation:
if graph_type == "_single.png" and "-" not in l[0]:
plot_data.append(l[1])
plot_labels.append(l[0])
elif graph_type == "_double.png" and "-" in l[0]:
plot_data.append(l[1])
plot_labels.append(l[0])
elif graph_type == ".png":
plot_data.append(l[1])
plot_labels.append(l[0])
if graph_type == "_single.png":
fig = Figure(figsize=[12, 10])
elif graph_type == "_double.png":
fig = Figure(figsize=[12, 40])
else:
fig = Figure(figsize=[12, 50])
ax = fig.add_axes([0.2, 0.1, 0.8, 0.9])
ax.boxplot(plot_data, 0, '', 0)
ax.set_xlabel("log(matched yield / expected yield)")
ax.set_ylabel("Barcode")
ax.set_yticklabels(plot_labels, family='monospace')
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
data = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(data))
self.write(data)
class ExpectedHandler(SafeHandler):
""" Serves a page with a boxplots of expected yield compared to matched
yields for all runs of top bar codes.
"""
def get(self):
t = self.application.loader.load("barcode_vs_expected.html")
self.write(t.generate(gs_globals=self.application.gs_globals, user=self.get_current_user_name(), deprecated=True))
| [
"numpy.median",
"cStringIO.StringIO",
"matplotlib.figure.Figure",
"collections.defaultdict",
"matplotlib.backends.backend_agg.FigureCanvasAgg"
] | [((1641, 1658), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1652, 1658), False, 'from collections import defaultdict\n'), ((3703, 3723), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvasAgg', (['fig'], {}), '(fig)\n', (3718, 3723), False, 'from matplotlib.backends.backend_agg import FigureCanvasAgg\n'), ((3739, 3759), 'cStringIO.StringIO', 'cStringIO.StringIO', ([], {}), '()\n', (3757, 3759), False, 'import cStringIO\n'), ((3282, 3306), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '[12, 10]'}), '(figsize=[12, 10])\n', (3288, 3306), False, 'from matplotlib.figure import Figure\n'), ((3367, 3391), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '[12, 40]'}), '(figsize=[12, 40])\n', (3373, 3391), False, 'from matplotlib.figure import Figure\n'), ((3424, 3448), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '[12, 50]'}), '(figsize=[12, 50])\n', (3430, 3448), False, 'from matplotlib.figure import Figure\n'), ((2340, 2355), 'numpy.median', 'np.median', (['l[1]'], {}), '(l[1])\n', (2349, 2355), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
import time
import numpy as np
import os
root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
import sys
sys.path.append(root_dir)
from adversarial_robustness.cnns import *
from adversarial_robustness.datasets.svhn import SVHN
from adversarial_robustness.datasets.notmnist import notMNIST
from adversarial_robustness.datasets.mnist import MNIST
import pickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--savedir", type=str,
help="Place to save model")
parser.add_argument(
"--name", type=str, default="",
help="Model name")
parser.add_argument(
"--dataset", type=str, default="",
help="Dataset")
parser.add_argument(
"--l2cs", type=float, default=0.0,
help="L2 certainty sensitivity penalty")
parser.add_argument(
"--l2dbl", type=float, default=0.0,
help="L2 double backprop penalty")
parser.add_argument(
"--lr", type=float, default=0.0002,
help="learning rate")
parser.add_argument(
"--adameps", type=float, default=1e-04,
help="adam epsilon")
parser.add_argument(
"--advtraineps", type=float, default=0.0,
help="adversarial training epsilon")
parser.add_argument(
"--distilltemp", type=float, default=1.0,
help="temperature for distillation")
parser.add_argument(
"--batchsize", type=int, default=256,
help="batch size")
parser.add_argument(
"--nbatches", type=int, default=15000,
help="number of batches")
FLAGS = parser.parse_args()
name = FLAGS.name
model_dir = FLAGS.savedir
adv_X_dir = root_dir + '/cached/fgsm'
if FLAGS.dataset == 'mnist':
dataset = MNIST()
CNN = MNIST_CNN
fgsm_file = adv_X_dir + '/mnist-normal-fgsm-perturbation.npy'
elif FLAGS.dataset == 'notmnist':
dataset = notMNIST()
CNN = MNIST_CNN
fgsm_file = adv_X_dir + '/notmnist-normal-fgsm-perturbation.npy'
elif FLAGS.dataset == 'svhn':
dataset = SVHN()
CNN = SVHN_CNN
fgsm_file = adv_X_dir + '/svhn-normal-fgsm-perturbation.npy'
X = dataset.X
y = dataset.onehot_y
Xt = dataset.Xt[:1024]
yt = dataset.onehot_yt[:1024]
clip_min = dataset.X.min()
clip_max = dataset.X.max()
dX = np.sign(np.load(fgsm_file))[:1024]
def _fgsm(eps):
return np.clip(Xt[:len(dX)] + eps * dX, clip_min, clip_max)
fgsm = { 0.1: _fgsm(0.1), 0.2: _fgsm(0.2), 0.3: _fgsm(0.3) }
epses = [0.1, 0.2, 0.3]
scores = {}
train_curves = {}
train_curves['batch_number'] = []
train_curves['batch_accuracy'] = []
train_curves['cross_entropy'] = []
train_curves['l2_grad_logp_true'] = []
train_curves['l2_grad_logp_rest'] = []
train_curves['l2_grad_logp_all'] = []
train_curves['l2_param_grads'] = []
train_curves['adv_accuracy'] = []
train_curves['test_accuracy'] = []
batch_size = FLAGS.batchsize
num_batches = FLAGS.nbatches
num_epochs = int(np.ceil(num_batches / (len(X) / batch_size)))
print(num_epochs)
if FLAGS.distilltemp > 1.01:
print('distillation')
num_batches2 = min(FLAGS.nbatches, 10000)
num_epochs2 = int(np.ceil(num_batches2 / (len(X) / batch_size)))
cnn2 = CNN()
cnn2.fit(X, y, softmax_temperature=FLAGS.distilltemp, learning_rate=FLAGS.lr, epsilon=FLAGS.adameps, num_epochs=num_epochs2, batch_size=batch_size)
yhat = tf.nn.softmax(cnn2.logits/FLAGS.distilltemp)
with tf.Session() as sess:
cnn2.init(sess)
ysmooth = yhat.eval(feed_dict={ cnn2.X: X[:1000] })
for i in range(1000, len(X), 1000):
ysmooth = np.vstack((ysmooth, yhat.eval(feed_dict={ cnn2.X: X[i:i+1000] })))
y = ysmooth
tf.reset_default_graph()
cnn = CNN()
cnn.l2_grad_logp_all = tf.nn.l2_loss(tf.gradients(cnn.logps, cnn.X)[0])
cnn.l2_grad_logp_true = tf.nn.l2_loss(tf.gradients(cnn.logps * cnn.y, cnn.X)[0])
cnn.l2_grad_logp_rest = tf.nn.l2_loss(tf.gradients(cnn.logps * (1-cnn.y), cnn.X)[0])
optimizer = tf.train.AdamOptimizer(
learning_rate=FLAGS.lr,
epsilon=FLAGS.adameps)
loss_fn = cnn.loss_function(
softmax_temperature=FLAGS.distilltemp,
l2_certainty_sensitivity=FLAGS.l2cs,
l2_double_backprop=FLAGS.l2dbl)
if FLAGS.advtraineps > 1e-06:
print('adversarial training')
adv_loss = cnn.adversarial_training_loss(FLAGS.advtraineps, clip_min, clip_max)
loss_fn = (loss_fn + adv_loss) / 2.0
gradients, variables = zip(*optimizer.compute_gradients(loss_fn))
cnn.l2_param_grads = tf.add_n([tf.nn.l2_loss(g) for g in gradients])
cnn.train_op = optimizer.apply_gradients(zip(gradients, variables))
batches = cnn.minibatches({ 'X': X, 'y': y }, batch_size=batch_size, num_epochs=num_epochs)
t = time.time()
i = 0
checkpoint_interval = 2500
print_interval = 500
curve_interval = 100
filenames = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch in batches:
batch[cnn.is_train] = True
_, loss = sess.run([cnn.train_op, loss_fn], feed_dict=batch)
if i % checkpoint_interval == 0:
cnn.vals = [v.eval() for v in cnn.vars]
filename = model_dir+'/{}-batch{}-cnn.pkl'.format(name, i)
cnn.save(filename)
filenames.append(filename)
with open(model_dir+'/{}-batch{}-train-curves.pkl'.format(name,i), 'wb') as f:
pickle.dump(train_curves, f)
if i % print_interval == 0:
print('Batch {}, loss {}, {}s'.format(i, loss, time.time() - t))
if i % curve_interval == 0:
values = sess.run([
cnn.accuracy,
cnn.l2_grad_logp_true,
cnn.l2_grad_logp_rest,
cnn.l2_grad_logp_all,
cnn.l2_param_grads,
cnn.cross_entropy,
], feed_dict=batch)
train_curves['batch_number'].append(i)
train_curves['batch_accuracy'].append(values[0])
train_curves['l2_grad_logp_true'].append(values[1])
train_curves['l2_grad_logp_rest'].append(values[2])
train_curves['l2_grad_logp_all'].append(values[3])
train_curves['l2_param_grads'].append(values[4])
train_curves['cross_entropy'].append(values[5])
train_curves['adv_accuracy'].append(sess.run(cnn.accuracy, feed_dict={ cnn.X: fgsm[epses[1]][:512], cnn.y: yt[:512] }))
train_curves['test_accuracy'].append(sess.run(cnn.accuracy, feed_dict={ cnn.X: Xt[:512], cnn.y: yt[:512] }))
i += 1
cnn.vals = [v.eval() for v in cnn.vars]
filename = model_dir+'/{}-cnn.pkl'.format(name)
cnn.save(filename)
filenames.append(filename)
for filename in filenames:
cnn2 = CNN()
cnn2.load(filename)
cnn2.save(filename)
with open(model_dir+'/{}-train-curves.pkl'.format(name), 'wb') as f:
pickle.dump(train_curves, f)
for key, values in train_curves.items():
if key == 'batch_number':
continue
fig = plt.figure()
plt.plot(train_curves['batch_number'], values, marker='o', lw=2)
plt.title(key)
plt.xlabel('Minibatch')
plt.ylabel(key)
if 'grad' in key:
plt.yscale('log')
plt.savefig(model_dir+'/{}-traincurves-{}.png'.format(name,key))
plt.close(fig)
scores[(name, 'norm')] = cnn.score(Xt, yt).accuracy
for eps in epses:
scores[(name, eps)] = cnn.score(fgsm[eps], yt[:len(fgsm[eps])]).accuracy
print(scores)
with open(model_dir+'/{}-scores.pkl'.format(name), 'wb') as f:
pickle.dump(scores, f)
with open(model_dir+'/{}-flags.pkl'.format(name), 'wb') as f:
pickle.dump(vars(FLAGS), f)
| [
"matplotlib.pyplot.ylabel",
"tensorflow.gradients",
"adversarial_robustness.datasets.mnist.MNIST",
"tensorflow.nn.softmax",
"sys.path.append",
"adversarial_robustness.datasets.notmnist.notMNIST",
"argparse.ArgumentParser",
"tensorflow.Session",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((222, 247), 'sys.path.append', 'sys.path.append', (['root_dir'], {}), '(root_dir)\n', (237, 247), False, 'import sys\n'), ((502, 527), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (525, 527), False, 'import argparse\n'), ((3533, 3557), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3555, 3557), True, 'import tensorflow as tf\n'), ((3821, 3890), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'FLAGS.lr', 'epsilon': 'FLAGS.adameps'}), '(learning_rate=FLAGS.lr, epsilon=FLAGS.adameps)\n', (3843, 3890), True, 'import tensorflow as tf\n'), ((4535, 4546), 'time.time', 'time.time', ([], {}), '()\n', (4544, 4546), False, 'import time\n'), ((1699, 1706), 'adversarial_robustness.datasets.mnist.MNIST', 'MNIST', ([], {}), '()\n', (1704, 1706), False, 'from adversarial_robustness.datasets.mnist import MNIST\n'), ((3245, 3291), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(cnn2.logits / FLAGS.distilltemp)'], {}), '(cnn2.logits / FLAGS.distilltemp)\n', (3258, 3291), True, 'import tensorflow as tf\n'), ((4642, 4654), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4652, 4654), True, 'import tensorflow as tf\n'), ((6444, 6472), 'pickle.dump', 'pickle.dump', (['train_curves', 'f'], {}), '(train_curves, f)\n', (6455, 6472), False, 'import pickle\n'), ((6564, 6576), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6574, 6576), True, 'import matplotlib.pyplot as plt\n'), ((6579, 6643), 'matplotlib.pyplot.plot', 'plt.plot', (["train_curves['batch_number']", 'values'], {'marker': '"""o"""', 'lw': '(2)'}), "(train_curves['batch_number'], values, marker='o', lw=2)\n", (6587, 6643), True, 'import matplotlib.pyplot as plt\n'), ((6646, 6660), 'matplotlib.pyplot.title', 'plt.title', (['key'], {}), '(key)\n', (6655, 6660), True, 'import matplotlib.pyplot as plt\n'), ((6663, 6686), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Minibatch"""'], {}), "('Minibatch')\n", (6673, 6686), True, 'import matplotlib.pyplot as plt\n'), ((6689, 6704), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['key'], {}), '(key)\n', (6699, 6704), True, 'import matplotlib.pyplot as plt\n'), ((6816, 6830), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6825, 6830), True, 'import matplotlib.pyplot as plt\n'), ((7057, 7079), 'pickle.dump', 'pickle.dump', (['scores', 'f'], {}), '(scores, f)\n', (7068, 7079), False, 'import pickle\n'), ((177, 202), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (192, 202), False, 'import os\n'), ((1835, 1845), 'adversarial_robustness.datasets.notmnist.notMNIST', 'notMNIST', ([], {}), '()\n', (1843, 1845), False, 'from adversarial_robustness.datasets.notmnist import notMNIST\n'), ((2217, 2235), 'numpy.load', 'np.load', (['fgsm_file'], {}), '(fgsm_file)\n', (2224, 2235), True, 'import numpy as np\n'), ((3297, 3309), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3307, 3309), True, 'import tensorflow as tf\n'), ((3607, 3637), 'tensorflow.gradients', 'tf.gradients', (['cnn.logps', 'cnn.X'], {}), '(cnn.logps, cnn.X)\n', (3619, 3637), True, 'import tensorflow as tf\n'), ((3680, 3718), 'tensorflow.gradients', 'tf.gradients', (['(cnn.logps * cnn.y)', 'cnn.X'], {}), '(cnn.logps * cnn.y, cnn.X)\n', (3692, 3718), True, 'import tensorflow as tf\n'), ((3761, 3805), 'tensorflow.gradients', 'tf.gradients', (['(cnn.logps * (1 - cnn.y))', 'cnn.X'], {}), '(cnn.logps * (1 - cnn.y), cnn.X)\n', (3773, 3805), True, 'import tensorflow as tf\n'), ((4332, 4348), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['g'], {}), '(g)\n', (4345, 4348), True, 'import tensorflow as tf\n'), ((4675, 4708), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4706, 4708), True, 'import tensorflow as tf\n'), ((6729, 6746), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (6739, 6746), True, 'import matplotlib.pyplot as plt\n'), ((1973, 1979), 'adversarial_robustness.datasets.svhn.SVHN', 'SVHN', ([], {}), '()\n', (1977, 1979), False, 'from adversarial_robustness.datasets.svhn import SVHN\n'), ((5129, 5157), 'pickle.dump', 'pickle.dump', (['train_curves', 'f'], {}), '(train_curves, f)\n', (5140, 5157), False, 'import pickle\n'), ((5243, 5254), 'time.time', 'time.time', ([], {}), '()\n', (5252, 5254), False, 'import time\n')] |
import os, sys
import numpy as np
import time
import argparse
import traceback
import glob
import trimesh
import math
import shutil
import json
import open3d as o3d
from tqdm import tqdm
import ctypes
import logging
from contextlib import closing
import multiprocessing as mp
from multiprocessing import Pool
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import config as cfg
from utils.pcd_utils import BBox
info = mp.get_logger().info
def compute_global_bbox():
logger = mp.log_to_stderr()
logger.setLevel(logging.INFO)
# create shared array for bbox
shared_bbox = mp.Array(ctypes.c_double, N*M)
bbox = to_numpy_array(shared_bbox)
# By updating bbox, we uppdate shared_bbox as well, since they share memory
bbox = bbox.reshape((N, M))
bbox[:, :] = np.array([[math.inf, math.inf, math.inf], [-math.inf, -math.inf, -math.inf]])
#####################################################################################
# Go over all animations
#####################################################################################
with closing(mp.Pool(processes=n_jobs, initializer=init, initargs=(shared_bbox,))) as p:
# many processes access the same slice
p.map_async(update_bbox, sample_dirs)
p.join()
p.close()
print("done")
final_bbox = to_numpy_array(shared_bbox)
final_bbox = final_bbox.reshape((N, M))
#####################################################################################
#####################################################################################
# assert np.all(np.isfinite(final_bbox)), final_bbox
# Compute current extent
p_min, p_max = final_bbox[0], final_bbox[1]
non_cube_extent = p_max - p_min
# Convert bbox to cube
cube_extent = np.max(non_cube_extent) * np.ones_like(non_cube_extent)
delta = cube_extent - non_cube_extent
half_delta = delta / 2.0
# Cube params
p_min = p_min - half_delta
extent = cube_extent
# Enlarge bbox
p_min = p_min - bbox_displacement
extent = extent + 2.0 * bbox_displacement
# Update bbox
final_bbox[0] = p_min
final_bbox[1] = p_min + extent
# assert np.all(np.isfinite(final_bbox)), final_bbox
# Store bbox
print("Dumping into json file:", dataset_bbox_json)
with open(dataset_bbox_json, 'w') as f:
json.dump(final_bbox.tolist(), f, indent=4)
return final_bbox
def init(shared_bbox_):
global shared_bbox
shared_bbox = shared_bbox_ # must be inherited, not passed as an argument
def to_numpy_array(mp_arr):
return np.frombuffer(mp_arr.get_obj())
def update_bbox(sample_dir):
print(sample_dir)
"""synchronized."""
with shared_bbox.get_lock(): # synchronize access
info(f"start {sample_dir}")
mesh_raw_path = os.path.join(sample_dir, "mesh_raw.ply")
assert os.path.exists(mesh_raw_path), f"update_bbox: {mesh_raw_path}"
# Load meshes
mesh = trimesh.load_mesh(mesh_raw_path, process=False, maintain_order=True)
# Compute bbox of current mesh
bbox_bounds = mesh.bounds
bbox_min = bbox_bounds[0]
bbox_max = bbox_bounds[1]
# print(bbox_bounds)
assert np.all(np.isfinite(bbox_bounds)), bbox_bounds
# Update the total bbox after having taken into account the alignment to the origin
bbox = to_numpy_array(shared_bbox)
bbox = bbox.reshape((N, M))
bbox[0] = np.minimum(bbox[0], bbox_min)
bbox[1] = np.maximum(bbox[1], bbox_max)
info(f"end {sample_dir}")
################################################################################################
################################################################################################
################################################################################################
def normalize_mesh(mesh):
# Global normalization
if compute_bbox:
vertices = (mesh.vertices - p_min) / extent # now we're in [-1, 1]
vertices = vertices - 0.5 # now in [-0.5, 0.5]
else:
vertices = mesh.vertices - trans
vertices = scale * vertices
mesh.vertices = vertices
return mesh
def normalize_meshes(sample_dir):
try:
# Normal mesh
mesh_raw_path = os.path.join(sample_dir, "mesh_raw.ply")
if os.path.exists(mesh_raw_path):
normalized_mesh_path = os.path.join(sample_dir, "mesh_normalized.ply")
if OVERWRITE or not os.path.isfile(normalized_mesh_path):
mesh = trimesh.load_mesh(mesh_raw_path, process=False, maintain_order=True)
mesh = normalize_mesh(mesh)
trimesh.Trimesh.export(mesh, normalized_mesh_path, 'ply')
print("\tWriting mesh into:", normalized_mesh_path)
if VIZ:
mesh_o3d = o3d.io.read_triangle_mesh(normalized_mesh_path)
mesh_o3d.compute_vertex_normals()
o3d.visualization.draw_geometries([world_frame, unit_bbox, mesh_o3d])
###################################################################################
# Real scan if exists
real_scan_path = os.path.join(sample_dir, "mesh_real_scan.ply")
if os.path.isfile(real_scan_path):
mesh = trimesh.load_mesh(real_scan_path, process=False, maintain_order=True)
mesh = normalize_mesh(mesh)
trimesh.Trimesh.export(mesh, real_scan_path, 'ply')
print("\t\tWriting real scan into:", real_scan_path)
###################################################################################
###################################################################################
# Body mesh if exists
body_mesh_raw_path = os.path.join(sample_dir, "mesh_body_raw.ply")
if os.path.isfile(body_mesh_raw_path):
body_mesh_normalized_path = os.path.join(sample_dir, "mesh_body_normalized.ply")
if OVERWRITE_BODY or not os.path.isfile(body_mesh_normalized_path):
mesh = trimesh.load_mesh(body_mesh_raw_path, process=False, maintain_order=True)
mesh = normalize_mesh(mesh)
trimesh.Trimesh.export(mesh, body_mesh_normalized_path, 'ply')
print("\t\tWriting body mesh into:", body_mesh_normalized_path)
###################################################################################
except:
print('\t------------ Error with {}: {}'.format(sample_dir, traceback.format_exc()))
if __name__ == '__main__':
try:
n_jobs = int(os.environ['SLURM_CPUS_ON_NODE'])
except:
n_jobs = 4
print()
print(f"Using {n_jobs} jobs")
mp.freeze_support()
p_min = -0.5
p_max = 0.5
# Flag to visualize meshes for debugging
VIZ = False
if VIZ:
unit_bbox = BBox.compute_bbox_from_min_point_and_max_point(
np.array([p_min]*3), np.array([p_max]*3)
)
world_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=0.5, origin=[0, 0, 0]
)
#####################################################################################
#####################################################################################
dataset = 'cape'
#####################################################################################
#####################################################################################
OVERWRITE_BBOX_COMPUTATION = False
OVERWRITE = False # for the general mesh
OVERWRITE_BODY = False # for the body mesh, in case the dataset has such meshes
#####################################################################################
compute_bbox = False # Keep it to False, unless you wanna play with the normalization etc.
#####################################################################################
if not compute_bbox:
input("Using predefined bbox and scale to normalize - Recommened!")
else:
input("Computing dataset-specific bbox to normalize")
target_animations = []
if compute_bbox:
# bbox array dimensions ([[bbox_min], [bbox_max]])
N, M = 2, 3
# bbox displacement
bbox_displacement = 0.01
else:
# scale
scale = 1.0
trans = 0.0
if 'mano' in dataset:
scale = 0.75
bbox_displacement = 0.0
elif 'cape' in dataset:
scale = 0.4
bbox_displacement = 0.0
# Load our precomputed bbox to normalize the dataset to reside within a unit cube
predefined_bbox_json_path = os.path.join("bbox.json")
assert os.path.isfile(predefined_bbox_json_path)
with open(predefined_bbox_json_path, 'r') as f:
predefined_bbox = json.loads(f.read())
predefined_bbox = np.array(predefined_bbox)
trans = (predefined_bbox[0] + predefined_bbox[1]) / 2.
else:
print("dataset is not implemented")
exit()
####################
dataset_dir = os.path.join(cfg.ROOT, "datasets", dataset)
assert os.path.isdir(dataset_dir), dataset_dir
print("dataset_dir:", dataset_dir)
# Prepare the list of all sample dirs
sample_dirs = sorted(glob.glob(dataset_dir + "/*/*/*"))
########################################################################################################
# 1. Compute global bbox
########################################################################################################
if compute_bbox:
dataset_bbox_json = os.path.join(dataset_dir, "bbox.json")
if OVERWRITE_BBOX_COMPUTATION or not os.path.isfile(dataset_bbox_json):
print()
input("Need to compute bbox. Do I go ahead?")
bbox = compute_global_bbox()
else:
print()
input("Already had bbox - Load it?")
with open(dataset_bbox_json, 'r') as f:
bbox = json.loads(f.read())
bbox = np.array(bbox)
print("bbox ready!")
print(bbox)
########################################################################################################
# 2. Normalize meshes to lie within a common bbox
########################################################################################################
print()
print("#"*60)
print(f"Will normalize {len(sample_dirs)} meshes!")
print("#"*60)
input("Continue?")
if compute_bbox:
p_min = bbox[0]
p_max = bbox[1]
extent = p_max - p_min
p_norm = Pool(n_jobs)
p_norm.map(normalize_meshes, sample_dirs)
print("Done!")
| [
"multiprocessing.Array",
"numpy.array",
"multiprocessing.freeze_support",
"open3d.io.read_triangle_mesh",
"numpy.isfinite",
"os.path.exists",
"multiprocessing.log_to_stderr",
"numpy.max",
"multiprocessing.get_logger",
"os.path.isdir",
"trimesh.Trimesh.export",
"open3d.geometry.TriangleMesh.cre... | [((455, 470), 'multiprocessing.get_logger', 'mp.get_logger', ([], {}), '()\n', (468, 470), True, 'import multiprocessing as mp\n'), ((519, 537), 'multiprocessing.log_to_stderr', 'mp.log_to_stderr', ([], {}), '()\n', (535, 537), True, 'import multiprocessing as mp\n'), ((626, 658), 'multiprocessing.Array', 'mp.Array', (['ctypes.c_double', '(N * M)'], {}), '(ctypes.c_double, N * M)\n', (634, 658), True, 'import multiprocessing as mp\n'), ((826, 903), 'numpy.array', 'np.array', (['[[math.inf, math.inf, math.inf], [-math.inf, -math.inf, -math.inf]]'], {}), '([[math.inf, math.inf, math.inf], [-math.inf, -math.inf, -math.inf]])\n', (834, 903), True, 'import numpy as np\n'), ((6882, 6901), 'multiprocessing.freeze_support', 'mp.freeze_support', ([], {}), '()\n', (6899, 6901), True, 'import multiprocessing as mp\n'), ((9386, 9429), 'os.path.join', 'os.path.join', (['cfg.ROOT', '"""datasets"""', 'dataset'], {}), "(cfg.ROOT, 'datasets', dataset)\n", (9398, 9429), False, 'import os, sys\n'), ((9441, 9467), 'os.path.isdir', 'os.path.isdir', (['dataset_dir'], {}), '(dataset_dir)\n', (9454, 9467), False, 'import os, sys\n'), ((10959, 10971), 'multiprocessing.Pool', 'Pool', (['n_jobs'], {}), '(n_jobs)\n', (10963, 10971), False, 'from multiprocessing import Pool\n'), ((1851, 1874), 'numpy.max', 'np.max', (['non_cube_extent'], {}), '(non_cube_extent)\n', (1857, 1874), True, 'import numpy as np\n'), ((1877, 1906), 'numpy.ones_like', 'np.ones_like', (['non_cube_extent'], {}), '(non_cube_extent)\n', (1889, 1906), True, 'import numpy as np\n'), ((2880, 2920), 'os.path.join', 'os.path.join', (['sample_dir', '"""mesh_raw.ply"""'], {}), "(sample_dir, 'mesh_raw.ply')\n", (2892, 2920), False, 'import os, sys\n'), ((2936, 2965), 'os.path.exists', 'os.path.exists', (['mesh_raw_path'], {}), '(mesh_raw_path)\n', (2950, 2965), False, 'import os, sys\n'), ((3037, 3105), 'trimesh.load_mesh', 'trimesh.load_mesh', (['mesh_raw_path'], {'process': '(False)', 'maintain_order': '(True)'}), '(mesh_raw_path, process=False, maintain_order=True)\n', (3054, 3105), False, 'import trimesh\n'), ((3531, 3560), 'numpy.minimum', 'np.minimum', (['bbox[0]', 'bbox_min'], {}), '(bbox[0], bbox_min)\n', (3541, 3560), True, 'import numpy as np\n'), ((3579, 3608), 'numpy.maximum', 'np.maximum', (['bbox[1]', 'bbox_max'], {}), '(bbox[1], bbox_max)\n', (3589, 3608), True, 'import numpy as np\n'), ((4394, 4434), 'os.path.join', 'os.path.join', (['sample_dir', '"""mesh_raw.ply"""'], {}), "(sample_dir, 'mesh_raw.ply')\n", (4406, 4434), False, 'import os, sys\n'), ((4446, 4475), 'os.path.exists', 'os.path.exists', (['mesh_raw_path'], {}), '(mesh_raw_path)\n', (4460, 4475), False, 'import os, sys\n'), ((5306, 5352), 'os.path.join', 'os.path.join', (['sample_dir', '"""mesh_real_scan.ply"""'], {}), "(sample_dir, 'mesh_real_scan.ply')\n", (5318, 5352), False, 'import os, sys\n'), ((5364, 5394), 'os.path.isfile', 'os.path.isfile', (['real_scan_path'], {}), '(real_scan_path)\n', (5378, 5394), False, 'import os, sys\n'), ((5907, 5952), 'os.path.join', 'os.path.join', (['sample_dir', '"""mesh_body_raw.ply"""'], {}), "(sample_dir, 'mesh_body_raw.ply')\n", (5919, 5952), False, 'import os, sys\n'), ((5964, 5998), 'os.path.isfile', 'os.path.isfile', (['body_mesh_raw_path'], {}), '(body_mesh_raw_path)\n', (5978, 5998), False, 'import os, sys\n'), ((7166, 7243), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', ([], {'size': '(0.5)', 'origin': '[0, 0, 0]'}), '(size=0.5, origin=[0, 0, 0])\n', (7215, 7243), True, 'import open3d as o3d\n'), ((9589, 9622), 'glob.glob', 'glob.glob', (["(dataset_dir + '/*/*/*')"], {}), "(dataset_dir + '/*/*/*')\n", (9598, 9622), False, 'import glob\n'), ((9921, 9959), 'os.path.join', 'os.path.join', (['dataset_dir', '"""bbox.json"""'], {}), "(dataset_dir, 'bbox.json')\n", (9933, 9959), False, 'import os, sys\n'), ((357, 382), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (372, 382), False, 'import os, sys\n'), ((1131, 1199), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'n_jobs', 'initializer': 'init', 'initargs': '(shared_bbox,)'}), '(processes=n_jobs, initializer=init, initargs=(shared_bbox,))\n', (1138, 1199), True, 'import multiprocessing as mp\n'), ((3301, 3325), 'numpy.isfinite', 'np.isfinite', (['bbox_bounds'], {}), '(bbox_bounds)\n', (3312, 3325), True, 'import numpy as np\n'), ((4525, 4572), 'os.path.join', 'os.path.join', (['sample_dir', '"""mesh_normalized.ply"""'], {}), "(sample_dir, 'mesh_normalized.ply')\n", (4537, 4572), False, 'import os, sys\n'), ((4982, 5029), 'open3d.io.read_triangle_mesh', 'o3d.io.read_triangle_mesh', (['normalized_mesh_path'], {}), '(normalized_mesh_path)\n', (5007, 5029), True, 'import open3d as o3d\n'), ((5088, 5157), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[world_frame, unit_bbox, mesh_o3d]'], {}), '([world_frame, unit_bbox, mesh_o3d])\n', (5121, 5157), True, 'import open3d as o3d\n'), ((5415, 5484), 'trimesh.load_mesh', 'trimesh.load_mesh', (['real_scan_path'], {'process': '(False)', 'maintain_order': '(True)'}), '(real_scan_path, process=False, maintain_order=True)\n', (5432, 5484), False, 'import trimesh\n'), ((5545, 5596), 'trimesh.Trimesh.export', 'trimesh.Trimesh.export', (['mesh', 'real_scan_path', '"""ply"""'], {}), "(mesh, real_scan_path, 'ply')\n", (5567, 5596), False, 'import trimesh\n'), ((6053, 6105), 'os.path.join', 'os.path.join', (['sample_dir', '"""mesh_body_normalized.ply"""'], {}), "(sample_dir, 'mesh_body_normalized.ply')\n", (6065, 6105), False, 'import os, sys\n'), ((7092, 7113), 'numpy.array', 'np.array', (['([p_min] * 3)'], {}), '([p_min] * 3)\n', (7100, 7113), True, 'import numpy as np\n'), ((7113, 7134), 'numpy.array', 'np.array', (['([p_max] * 3)'], {}), '([p_max] * 3)\n', (7121, 7134), True, 'import numpy as np\n'), ((4683, 4751), 'trimesh.load_mesh', 'trimesh.load_mesh', (['mesh_raw_path'], {'process': '(False)', 'maintain_order': '(True)'}), '(mesh_raw_path, process=False, maintain_order=True)\n', (4700, 4751), False, 'import trimesh\n'), ((4816, 4873), 'trimesh.Trimesh.export', 'trimesh.Trimesh.export', (['mesh', 'normalized_mesh_path', '"""ply"""'], {}), "(mesh, normalized_mesh_path, 'ply')\n", (4838, 4873), False, 'import trimesh\n'), ((6222, 6295), 'trimesh.load_mesh', 'trimesh.load_mesh', (['body_mesh_raw_path'], {'process': '(False)', 'maintain_order': '(True)'}), '(body_mesh_raw_path, process=False, maintain_order=True)\n', (6239, 6295), False, 'import trimesh\n'), ((6364, 6426), 'trimesh.Trimesh.export', 'trimesh.Trimesh.export', (['mesh', 'body_mesh_normalized_path', '"""ply"""'], {}), "(mesh, body_mesh_normalized_path, 'ply')\n", (6386, 6426), False, 'import trimesh\n'), ((8897, 8922), 'os.path.join', 'os.path.join', (['"""bbox.json"""'], {}), "('bbox.json')\n", (8909, 8922), False, 'import os, sys\n'), ((8942, 8983), 'os.path.isfile', 'os.path.isfile', (['predefined_bbox_json_path'], {}), '(predefined_bbox_json_path)\n', (8956, 8983), False, 'import os, sys\n'), ((10006, 10039), 'os.path.isfile', 'os.path.isfile', (['dataset_bbox_json'], {}), '(dataset_bbox_json)\n', (10020, 10039), False, 'import os, sys\n'), ((10371, 10385), 'numpy.array', 'np.array', (['bbox'], {}), '(bbox)\n', (10379, 10385), True, 'import numpy as np\n'), ((4618, 4654), 'os.path.isfile', 'os.path.isfile', (['normalized_mesh_path'], {}), '(normalized_mesh_path)\n', (4632, 4654), False, 'import os, sys\n'), ((6156, 6197), 'os.path.isfile', 'os.path.isfile', (['body_mesh_normalized_path'], {}), '(body_mesh_normalized_path)\n', (6170, 6197), False, 'import os, sys\n'), ((6680, 6702), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6700, 6702), False, 'import traceback\n'), ((9146, 9171), 'numpy.array', 'np.array', (['predefined_bbox'], {}), '(predefined_bbox)\n', (9154, 9171), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from ..networks.deeplab.aspp import build_aspp, ASPP
from ..networks.deeplab.backbone.resnet import SEResNet50
class NET_GAmap(nn.Module):
# with Indicator encoder
def __init__(self, pretrained=1, resfix=False,):
super(NET_GAmap, self).__init__()
# Sparse-to-Dense Network & Object Feature Extractor
self.encoder_6ch = Encoder_6ch(resfix)
self.decoder_iact = Decoder()
self.converter = Object_Feature_Extractor()
# Interfused Object Feature
self.encoder_3ch = Encoder_3ch(resfix)
self.transfer_module = Attention_Transfer_Module()
# Overlapped Object Feature
self.diff_module = IAware_Diff_Prop_Module()
# Segmentation Head
self.feature_compounder = Feature_compounder()
self.segmentation_head = Segmentation_Head()
self.refer_weight = None
self._initialize_weights(pretrained)
def is_training(self,boolean):
if boolean == True:
self.transfer_module.training = True
self.diff_module.training = True
else:
self.transfer_module.training = False
self.diff_module.training = False
def forward_obj_feature_extractor(self, x): # Bx4xHxW to Bx1xHxW
r5, _, r3, r2 = self.encoder_6ch(x)
estimated_mask, m2 = self.decoder_iact(r5, r3, r2, train_prop=False)
r5_indicator = self.converter(r5, r3, m2)
return estimated_mask, r5_indicator
def forward_prop(self, anno_propEnc_r4_list, queframe_3ch, anno_iactEnc_r4_list, r4_neighbor, neighbor_pred_onehot,
anno_fr_list=None, que_fr=None, debug=False): #1/16, 1024
if debug == False:
r4_que, r2_que = self.encoder_3ch(queframe_3ch)
trf_module_out, scoremap = self.transfer_module(anno_propEnc_r4_list, r4_que, anno_iactEnc_r4_list, anno_fr_list, que_fr) # 1/8, 256
diff_module_out = self.diff_module(neighbor_pred_onehot, r4_neighbor, r4_que)
m2 = self.feature_compounder(trf_module_out, diff_module_out, r4_que, r2_que)
estimated_fgbg = self.segmentation_head(m2)
fg_map = (F.softmax(F.interpolate(estimated_fgbg, scale_factor=0.125), dim=0)[:,1] > 0.4).float() # Nobj,H,W
fg_map = torch.max(fg_map, dim=0)[0] #H W
n_fg = fg_map.sum()
score = (float(torch.mean(scoremap) + (torch.sum(fg_map * scoremap)/(n_fg+0.1)).cpu()))/2
return estimated_fgbg, r4_que, score
else:
r4_que, r2_que = self.encoder_3ch(queframe_3ch)
trf_module_out, scoremap, attention = self.transfer_module(anno_propEnc_r4_list, r4_que, anno_iactEnc_r4_list, anno_fr_list, que_fr, True) # 1/8, 256
diff_module_out = self.diff_module(neighbor_pred_onehot, r4_neighbor, r4_que)
m2 = self.feature_compounder(trf_module_out, diff_module_out, r4_que, r2_que)
estimated_fgbg = self.segmentation_head(m2)
fg_map = (F.softmax(F.interpolate(estimated_fgbg, scale_factor=0.125), dim=0)[:,1] > 0.4).float() # Nobj,H,W
fg_map = torch.max(fg_map, dim=0)[0] #H W
n_fg = fg_map.sum()
score = (float(torch.mean(scoremap) + (torch.sum(fg_map * scoremap)/(n_fg+0.1)).cpu()))/2
return estimated_fgbg, r4_que, score, attention
def _initialize_weights(self, pretrained):
for m in self.modules():
if pretrained:
break
else:
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.001)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class Encoder_3ch(nn.Module):
def __init__(self, resfix):
super(Encoder_3ch, self).__init__()
self.conv0_3ch = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=True)
resnet = SEResNet50(output_stride=8, BatchNorm=nn.BatchNorm2d, pretrained=True)
self.bn1 = resnet.bn1
self.relu = resnet.relu # 1/2, 64
self.maxpool = resnet.maxpool
self.res2 = resnet.layer1 # 1/4, 256
self.res3 = resnet.layer2 # 1/8, 512
self.res4 = resnet.layer3 # 1/8, 1024
# freeze BNs
if resfix:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
for p in m.parameters():
p.requires_grad = False
def forward(self, x):
# x : [b,4,h,w]
# f = (in_frame - Variable(self.mean)) / Variable(self.std)
#a = torch.unsqueeze(in_a, dim=1).float() # add channel dim
#b = torch.unsqueeze(in_b, dim=1).float() # add channel dim
x = self.conv0_3ch(x) # 1/2, 64
x = self.bn1(x)
c1 = self.relu(x) # 1/2, 64
x = self.maxpool(c1) # 1/4, 64
r2 = self.res2(x) # 1/4, 256
r3 = self.res3(r2) # 1/8, 512
r4 = self.res4(r3) # 1/8, 1024
return r4, r2
class Encoder_6ch(nn.Module):
def __init__(self, resfix):
super(Encoder_6ch, self).__init__()
self.conv0_6ch = nn.Conv2d(6, 64, kernel_size=7, stride=2, padding=3, bias=True)
resnet = SEResNet50(output_stride=16, BatchNorm=nn.BatchNorm2d, pretrained=True)
self.bn1 = resnet.bn1
self.relu = resnet.relu # 1/2, 64
self.maxpool = resnet.maxpool
self.res2 = resnet.layer1 # 1/4, 256
self.res3 = resnet.layer2 # 1/8, 512
self.res4 = resnet.layer3 # 1/16, 1024
self.res5 = resnet.layer4 # 1/16, 2048
# freeze BNs
if resfix:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
for p in m.parameters():
p.requires_grad = False
def forward(self, x):
# x : [b,4,h,w]
# f = (in_frame - Variable(self.mean)) / Variable(self.std)
#a = torch.unsqueeze(in_a, dim=1).float() # add channel dim
#b = torch.unsqueeze(in_b, dim=1).float() # add channel dim
x = self.conv0_6ch(x) # 1/2, 64
x = self.bn1(x)
c1 = self.relu(x) # 1/2, 64
x = self.maxpool(c1) # 1/4, 64
r2 = self.res2(x) # 1/4, 256
r3 = self.res3(r2) # 1/8, 512
r4 = self.res4(r3) # 1/16, 1024
r5 = self.res5(r4) # 1/16, 2048
return r5, r4, r3, r2
class Refine(nn.Module):
def __init__(self, inplanes, planes, scale_factor=2):
super(Refine, self).__init__()
self.convFS1 = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1)
self.convFS2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.convFS3 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.convMM1 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.convMM2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.scale_factor = scale_factor
def forward(self, f, pm):
s = self.convFS1(f)
sr = self.convFS2(F.relu(s))
sr = self.convFS3(F.relu(sr))
s = s + sr
m = s + F.interpolate(pm, scale_factor=self.scale_factor, mode='bilinear',align_corners=True)
mr = self.convMM1(F.relu(m))
mr = self.convMM2(F.relu(mr))
m = m + mr
return m
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
mdim = 256
self.aspp_decoder = ASPP(backbone='res', output_stride=16, BatchNorm=nn.BatchNorm2d, pretrained=1)
self.convG0 = nn.Conv2d(2048, mdim, kernel_size=3, padding=1)
self.convG1 = nn.Conv2d(mdim, mdim, kernel_size=3, padding=1)
self.convG2 = nn.Conv2d(mdim, mdim, kernel_size=3, padding=1)
self.RF3 = Refine(512, mdim) # 1/16 -> 1/8
self.RF2 = Refine(256, mdim) # 1/8 -> 1/4
self.lastconv = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(256, 1, kernel_size=1, stride=1))
def forward(self, r5, que_r3, que_r2, train_prop = True):
aspp_out = self.aspp_decoder(r5) #1/16 mdim
aspp_out = F.interpolate(aspp_out, scale_factor=4, mode='bilinear',align_corners=True) #1/4 mdim
m4 = self.convG0(F.relu(r5)) # out: # 1/16, mdim
m4 = self.convG1(F.relu(m4)) # out: # 1/16, mdim
m4 = self.convG2(F.relu(m4)) # out: # 1/16, mdim
m3 = self.RF3(que_r3, m4) # out: 1/8, mdim
m2 = self.RF2(que_r2, m3) # out: 1/4, mdim
m2 = torch.cat((m2, aspp_out), dim=1) # out: 1/4, mdim*2
if train_prop:
return m2
else:
x = self.lastconv(m2)
x = F.interpolate(x, scale_factor=4, mode='bilinear', align_corners=True)
return x, m2
class IAware_Diff_Prop_Module(nn.Module):
def __init__(self):
super(IAware_Diff_Prop_Module, self).__init__()
self.f_conv_inter = nn.Conv2d(1024, 128, kernel_size=1, padding=0) # 1/8, 128
self.conv1_inter = nn.Conv2d(128, 64, kernel_size=5, padding=2) # 1/8, 128
self.f_conv_diffu = nn.Conv2d(1024, 128, kernel_size=1, padding=0) # 1/8, 128
self.conv1_diffu = nn.Conv2d(129, 64, kernel_size=5, padding=2) # 1/8, 128
self.conv2_diffu = nn.Conv2d(128, 128, kernel_size=5, padding=2) # 1/8, 128
self.conv3_diffu = nn.Conv2d(128, 128, kernel_size=5, padding=2) # 1/8, 128
self.training = True # If testing, batchsize = n_obj
def forward(self, neighbor_pred_onehot, r4_nei, r4_que):
'''
neighbor_pred_onehot:
Train: # B,1,H,W
Test: # Nobj,1,H,W
'''
f_inter = (self.f_conv_inter(r4_nei) - self.f_conv_inter(r4_que))**2 # [B, C, 128, HW]
f_inter = self.conv1_inter(torch.exp(-f_inter))
neighbor_pred_onehot = F.interpolate(neighbor_pred_onehot, scale_factor=0.125, mode='bilinear',align_corners=True) #1/4 mdim
f_diff = torch.cat((self.f_conv_diffu(r4_nei), neighbor_pred_onehot), dim=1)
f_diff = self.conv1_diffu(f_diff)
f_diff = torch.cat((f_diff,f_inter), dim=1)
f_diff = self.conv2_diffu(f_diff)
f_diff = self.conv3_diffu(f_diff)
return f_diff
class Attention_Transfer_Module(nn.Module):
def __init__(self):
super(Attention_Transfer_Module, self).__init__()
self.f_conv_sim = nn.Conv2d(1024, 128, kernel_size=1, padding=0) # 1/8, 128
self.f_conv_att = nn.Conv1d(128, 128, kernel_size=1, padding=0) # 1/8, 128
self.training = True # If testing, batchsize = n_obj
def get_attention(self, anno_f, que_f, similarity_mat, h, w):
'''
anno_f : train [BN, 128, HW'] test [N, 128, HW']
que_f : train [B, 128, HW] test [1, 128, HW]
similarity_mat : train [BN, HW', HW] test [N, HW', HW]
'''
bn, c, hw = anno_f.size()
b, c, hw = que_f.size()
n_feature = int(bn/b)
similarity_mat_self = F.softmax(torch.bmm(que_f.transpose(1, 2), que_f), dim=2)
que_f_transferred = torch.bmm(self.f_conv_att(que_f), similarity_mat_self).unsqueeze(dim=1) # [B, 1, 128, HW]
anno_f_transferred = torch.bmm(self.f_conv_att(anno_f), similarity_mat).reshape(b, n_feature, c, hw) # [B, N, 128, HW]
# diff = (anno_f_transferred - que_f_transferred)**2 # [B, N, 128, HW]
# attention = (torch.max(diff, dim=2, keepdim=True)[0]).reshape(b,n_feature,1,h,w) # [B, N, 1, H, W]
# attention = F.softmax(1/(attention+0.1),dim=1) # [B, N, 1, H, W]
diff = (anno_f_transferred - que_f_transferred)**2 # [B, N, 128, HW]
diff = (torch.max(diff, dim=2, keepdim=True)[0]).reshape(b,n_feature,1,h,w) + 0.1 # [B, N, 1, H, W]
attention_logit = 1/diff
scoremap_logit = attention_logit.clone() # [B, N, 1, H, W]
scoremap_logit = scoremap_logit[0,:,0 ] # [N, H, W]
scoremap = torch.exp(torch.max(scoremap_logit, dim=0)[0]/2-5) # H, W
attention = F.softmax(attention_logit,dim=1) # [B, N, 1, H, W]
return attention, scoremap
def forward(self, anno_feature_list, que_feature, anno_indicator_feature_list, anno_fr_list, que_fr, debug = False):
'''
:param anno_feature_list: [B,C,H,W] x list (N values in list) B-Nobject N-Nround
:param que_feature: [B,C,H,W]
:param anno_indicator_feature_list: [B,C,H,W] x list (N values in list)
:return que_mask_feature: [B,C,H,W]
'''
n_features = len(anno_feature_list)
b, ci, h, w = anno_indicator_feature_list[0].size() # b means n_objs # [B, 256, HxW]
if (n_features >= 4) and (anno_fr_list is not None):
anno_fr_list_tmp = anno_fr_list[1:]
index_adjacent = np.argsort(np.abs(que_fr - anno_fr_list_tmp))[:2]
anno_fr_list = anno_fr_list[0] + list(anno_fr_list_tmp[index_adjacent])
anno_fr_adjacent_index = [0] + list(1 + index_adjacent)
n_features = 3
else:
anno_fr_adjacent_index = list(range(len(anno_feature_list)))
anno_feature_sim = [] # [BN, C, HW'](train), # [N, C, HW'](test)
anno_indicator_feature = [] # [BN, C, HW']
for f_idx in anno_fr_adjacent_index:
anno_feature_sim.append(self.f_conv_sim(anno_feature_list[f_idx]).reshape(b, 128, h*w)) # [B, 128, HW']
anno_indicator_feature.append(anno_indicator_feature_list[f_idx].reshape(b, 256, h*w)) # [B, 256, HW']
que_feature_sim = self.f_conv_sim(que_feature).reshape(b, 128, h*w) # [B, 128, HW]
anno_feature_sim = torch.stack(anno_feature_sim, dim=1) # [B, N, 128, HW']
anno_indicator_feature = torch.stack(anno_indicator_feature, dim=1) # [B, N, 256, HW']
if self.training:
anno_feature_sim = anno_feature_sim.reshape(b*n_features, 128, h*w) # [BN, 128, HW']
que_feature_sim_tmp = torch.unsqueeze(que_feature_sim,dim=1).expand(-1, n_features, -1, -1).reshape(b*n_features, 128, h*w) # [BN, 128, HW']
similarity_mat = F.softmax(torch.bmm(anno_feature_sim.transpose(1, 2), que_feature_sim_tmp), dim=2) # [BN, HW', HW]
attention, scoremap = self.get_attention(anno_feature_sim, que_feature_sim, similarity_mat, h, w) # [B, N, 1, H, W]
anno_indicator_feature = anno_indicator_feature.reshape(b*n_features, 256, h*w) # [B, N, 256, H, W]
anno_indicator_feature_transferred = torch.bmm(anno_indicator_feature,similarity_mat).reshape(b, n_features, 256, h, w) # [B, N, 256, H, W]
else:
que_feature_sim = que_feature_sim[0]
anno_feature_sim = anno_feature_sim[0].reshape(n_features, 128, h * w) # [N, 128, HW']
que_feature_sim_tmp = torch.unsqueeze(que_feature_sim,dim=0).expand(n_features, -1, -1) # [N, 128, HW']
similarity_mat = F.softmax(torch.bmm(anno_feature_sim.transpose(1, 2), que_feature_sim_tmp), dim=2) # [N, HW', HW]
attention, scoremap = self.get_attention(anno_feature_sim, que_feature_sim.unsqueeze(dim=0), similarity_mat, h, w) # [1, N, 1, H, W]
anno_indicator_feature_transferred = []
for obj_idx in range(b):
anno_indicator_feature_transferred.append(torch.bmm(anno_indicator_feature[obj_idx],similarity_mat))
anno_indicator_feature_transferred = torch.stack(anno_indicator_feature_transferred,dim=0).reshape(b, n_features, 256, h, w) # [B, N, 256, H, W]
que_mask_feature = torch.sum(anno_indicator_feature_transferred * attention, dim=1, keepdim=False) # [B, 256, H, W]
if debug:
return que_mask_feature, scoremap, attention.detach().data[0, :, 0].cpu().numpy()
else:
return que_mask_feature, scoremap
class Feature_compounder(nn.Module):
def __init__(self):
super(Feature_compounder, self).__init__()
mdim = 128
self.que_conv = nn.Conv2d(1024, 256, kernel_size=1, padding=0) # 1/8, 256
self.cat_conv = nn.Conv2d(640, 512, kernel_size=1, padding=0) # 1/8, 256
self.aspp_decoder = ASPP(backbone='res', output_stride=8, BatchNorm=nn.BatchNorm2d, pretrained=1, inplanes=512, outplanes = mdim)
self.convG0 = nn.Conv2d(512, mdim, kernel_size=3, padding=1)
self.convG1 = nn.Conv2d(mdim, mdim, kernel_size=3, padding=1)
self.convG2 = nn.Conv2d(mdim, mdim, kernel_size=3, padding=1)
self.RF2 = Refine(256, mdim) # 1/8 -> 1/4
def forward(self, trf_module_out, diff_module_out, que_r4, que_r2):
que_r4 = self.que_conv(que_r4)
r4 = torch.cat((trf_module_out, diff_module_out, que_r4), dim=1)
r4 = self.cat_conv(r4)
aspp_out = self.aspp_decoder(r4) #1/8 mdim
aspp_out = F.interpolate(aspp_out, scale_factor=2, mode='bilinear',align_corners=True) #1/4 mdim
m4 = self.convG0(F.relu(r4)) # out: # 1/8, mdim
m4 = self.convG1(F.relu(m4)) # out: # 1/8, mdim
m4 = self.convG2(F.relu(m4)) # out: # 1/8, mdim
m2 = self.RF2(que_r2, m4) # out: 1/4, mdim
m2 = torch.cat((m2, aspp_out), dim=1) # out: 1/4, mdim*2
return m2 # out: 1/4, 256
class Segmentation_Head(nn.Module):
def __init__(self):
super(Segmentation_Head, self).__init__()
self.conv1 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(256)
self.conv2 = nn.Conv2d(256, 128, kernel_size=5, padding=2)
self.bn2 = nn.BatchNorm2d(128)
self.conv3 = nn.Conv2d(128, 128, kernel_size=5, padding=2)
self.bn3 = nn.BatchNorm2d(128)
self.conv4 = nn.Conv2d(128, 64, kernel_size=5, padding=2)
self.bn4 = nn.BatchNorm2d(64)
self.conv5 = nn.Conv2d(64, 2, kernel_size=1, stride=1)
def forward(self, seg_feature):
'''
:return:
'''
x = self.bn1(self.conv1(seg_feature))
x = nn.Dropout(0.5)(F.relu(x))
x = self.bn2(self.conv2(x))
x = nn.Dropout(0.1)(F.relu(x))
x = self.bn3(self.conv3(x))
x = nn.Dropout(0.1)(F.relu(x))
x = self.bn4(self.conv4(x))
x = self.conv5(F.relu(x))
x = F.interpolate(x, scale_factor=4, mode='bilinear', align_corners=True)
return x
class Object_Feature_Extractor(nn.Module):
def __init__(self):
super(Object_Feature_Extractor, self).__init__()
# [1/4, 512] to [1/8, 256]
downsample1 = nn.Conv2d(512, 256, kernel_size=1, stride=2, bias=False)
self.block1 = SEBottleneck(512, 64, stride = 2, downsample = downsample1)
# [1/16, 2048] to [1/8, 256]
self.conv16_8 = nn.Conv2d(2048, 256, kernel_size=1, stride=1)
# [1/8, 512] to [1/8, 256]
self.conv8_8 = nn.Conv2d(512, 256, kernel_size=1, stride=1)
self.conv_cat = nn.Conv2d(768, 256, kernel_size=3, stride=1, padding=1) # 1/8, 256
def forward(self, r5, r4, m2):
'''
:param r5: 1/16, 2048
:param r4: 1/8, 1024
:param m2: 1/4, 512
:return:
'''
m4 = self.block1(m2)
r5 = self.conv16_8(r5)
r5_r4 = F.interpolate(r5, scale_factor=2, mode='bilinear',align_corners=True)
r4 = self.conv8_8(r4)
x = torch.cat((r5_r4, r4, m4),dim=1)
x = self.conv_cat(x)
return x # 1/8, 256
class SEBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, BatchNorm=nn.BatchNorm2d):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False)
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
# SE
self.global_pool = nn.AdaptiveAvgPool2d(1)
self.conv_down = nn.Conv2d(
planes * 4, planes // 4, kernel_size=1, bias=False)
self.conv_up = nn.Conv2d(
planes // 4, planes * 4, kernel_size=1, bias=False)
self.sig = nn.Sigmoid()
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out1 = self.global_pool(out)
out1 = self.conv_down(out1)
out1 = self.relu(out1)
out1 = self.conv_up(out1)
out1 = self.sig(out1)
if self.downsample is not None:
residual = self.downsample(x)
res = out1 * out + residual
res = self.relu(res)
return res
#
#
# if __name__ == "__main__":
# import torch
# model = ATnet()
# input = torch.rand(1, 3, 512, 512)
# output, low_level_feat = model(input)
# print(output.size())
# print(low_level_feat.size()) | [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.max",
"torch.exp",
"torch.sum",
"torch.nn.functional.interpolate",
"torch.bmm",
"torch.nn.functional.softmax",
"torch.nn.BatchNorm2d",
"torch.nn.Sigmoid",
"torch.mean",
"torch.unsqueeze",
"torch.nn.AdaptiveAvgPool2d",
"numpy.abs",
"torch.nn.fun... | [((4173, 4196), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (4193, 4196), True, 'import torch.nn as nn\n'), ((4736, 4799), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(True)'}), '(3, 64, kernel_size=7, stride=2, padding=3, bias=True)\n', (4745, 4799), True, 'import torch.nn as nn\n'), ((6035, 6098), 'torch.nn.Conv2d', 'nn.Conv2d', (['(6)', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(True)'}), '(6, 64, kernel_size=7, stride=2, padding=3, bias=True)\n', (6044, 6098), True, 'import torch.nn as nn\n'), ((7448, 7501), 'torch.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'planes'], {'kernel_size': '(3)', 'padding': '(1)'}), '(inplanes, planes, kernel_size=3, padding=1)\n', (7457, 7501), True, 'import torch.nn as nn\n'), ((7525, 7576), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'padding': '(1)'}), '(planes, planes, kernel_size=3, padding=1)\n', (7534, 7576), True, 'import torch.nn as nn\n'), ((7600, 7651), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'padding': '(1)'}), '(planes, planes, kernel_size=3, padding=1)\n', (7609, 7651), True, 'import torch.nn as nn\n'), ((7675, 7726), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'padding': '(1)'}), '(planes, planes, kernel_size=3, padding=1)\n', (7684, 7726), True, 'import torch.nn as nn\n'), ((7750, 7801), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'padding': '(1)'}), '(planes, planes, kernel_size=3, padding=1)\n', (7759, 7801), True, 'import torch.nn as nn\n'), ((8451, 8498), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2048)', 'mdim'], {'kernel_size': '(3)', 'padding': '(1)'}), '(2048, mdim, kernel_size=3, padding=1)\n', (8460, 8498), True, 'import torch.nn as nn\n'), ((8521, 8568), 'torch.nn.Conv2d', 'nn.Conv2d', (['mdim', 'mdim'], {'kernel_size': '(3)', 'padding': '(1)'}), '(mdim, mdim, kernel_size=3, padding=1)\n', (8530, 8568), True, 'import torch.nn as nn\n'), ((8591, 8638), 'torch.nn.Conv2d', 'nn.Conv2d', (['mdim', 'mdim'], {'kernel_size': '(3)', 'padding': '(1)'}), '(mdim, mdim, kernel_size=3, padding=1)\n', (8600, 8638), True, 'import torch.nn as nn\n'), ((9501, 9577), 'torch.nn.functional.interpolate', 'F.interpolate', (['aspp_out'], {'scale_factor': '(4)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(aspp_out, scale_factor=4, mode='bilinear', align_corners=True)\n", (9514, 9577), True, 'import torch.nn.functional as F\n'), ((9879, 9911), 'torch.cat', 'torch.cat', (['(m2, aspp_out)'], {'dim': '(1)'}), '((m2, aspp_out), dim=1)\n', (9888, 9911), False, 'import torch\n'), ((10290, 10336), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(128)'], {'kernel_size': '(1)', 'padding': '(0)'}), '(1024, 128, kernel_size=1, padding=0)\n', (10299, 10336), True, 'import torch.nn as nn\n'), ((10376, 10420), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(128, 64, kernel_size=5, padding=2)\n', (10385, 10420), True, 'import torch.nn as nn\n'), ((10462, 10508), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(128)'], {'kernel_size': '(1)', 'padding': '(0)'}), '(1024, 128, kernel_size=1, padding=0)\n', (10471, 10508), True, 'import torch.nn as nn\n'), ((10548, 10592), 'torch.nn.Conv2d', 'nn.Conv2d', (['(129)', '(64)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(129, 64, kernel_size=5, padding=2)\n', (10557, 10592), True, 'import torch.nn as nn\n'), ((10632, 10677), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(128, 128, kernel_size=5, padding=2)\n', (10641, 10677), True, 'import torch.nn as nn\n'), ((10717, 10762), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(128, 128, kernel_size=5, padding=2)\n', (10726, 10762), True, 'import torch.nn as nn\n'), ((11187, 11283), 'torch.nn.functional.interpolate', 'F.interpolate', (['neighbor_pred_onehot'], {'scale_factor': '(0.125)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(neighbor_pred_onehot, scale_factor=0.125, mode='bilinear',\n align_corners=True)\n", (11200, 11283), True, 'import torch.nn.functional as F\n'), ((11433, 11468), 'torch.cat', 'torch.cat', (['(f_diff, f_inter)'], {'dim': '(1)'}), '((f_diff, f_inter), dim=1)\n', (11442, 11468), False, 'import torch\n'), ((11729, 11775), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(128)'], {'kernel_size': '(1)', 'padding': '(0)'}), '(1024, 128, kernel_size=1, padding=0)\n', (11738, 11775), True, 'import torch.nn as nn\n'), ((11814, 11859), 'torch.nn.Conv1d', 'nn.Conv1d', (['(128)', '(128)'], {'kernel_size': '(1)', 'padding': '(0)'}), '(128, 128, kernel_size=1, padding=0)\n', (11823, 11859), True, 'import torch.nn as nn\n'), ((13369, 13402), 'torch.nn.functional.softmax', 'F.softmax', (['attention_logit'], {'dim': '(1)'}), '(attention_logit, dim=1)\n', (13378, 13402), True, 'import torch.nn.functional as F\n'), ((14970, 15006), 'torch.stack', 'torch.stack', (['anno_feature_sim'], {'dim': '(1)'}), '(anno_feature_sim, dim=1)\n', (14981, 15006), False, 'import torch\n'), ((15059, 15101), 'torch.stack', 'torch.stack', (['anno_indicator_feature'], {'dim': '(1)'}), '(anno_indicator_feature, dim=1)\n', (15070, 15101), False, 'import torch\n'), ((16862, 16941), 'torch.sum', 'torch.sum', (['(anno_indicator_feature_transferred * attention)'], {'dim': '(1)', 'keepdim': '(False)'}), '(anno_indicator_feature_transferred * attention, dim=1, keepdim=False)\n', (16871, 16941), False, 'import torch\n'), ((17290, 17336), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(256)'], {'kernel_size': '(1)', 'padding': '(0)'}), '(1024, 256, kernel_size=1, padding=0)\n', (17299, 17336), True, 'import torch.nn as nn\n'), ((17373, 17418), 'torch.nn.Conv2d', 'nn.Conv2d', (['(640)', '(512)'], {'kernel_size': '(1)', 'padding': '(0)'}), '(640, 512, kernel_size=1, padding=0)\n', (17382, 17418), True, 'import torch.nn as nn\n'), ((17592, 17638), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', 'mdim'], {'kernel_size': '(3)', 'padding': '(1)'}), '(512, mdim, kernel_size=3, padding=1)\n', (17601, 17638), True, 'import torch.nn as nn\n'), ((17661, 17708), 'torch.nn.Conv2d', 'nn.Conv2d', (['mdim', 'mdim'], {'kernel_size': '(3)', 'padding': '(1)'}), '(mdim, mdim, kernel_size=3, padding=1)\n', (17670, 17708), True, 'import torch.nn as nn\n'), ((17731, 17778), 'torch.nn.Conv2d', 'nn.Conv2d', (['mdim', 'mdim'], {'kernel_size': '(3)', 'padding': '(1)'}), '(mdim, mdim, kernel_size=3, padding=1)\n', (17740, 17778), True, 'import torch.nn as nn\n'), ((17957, 18016), 'torch.cat', 'torch.cat', (['(trf_module_out, diff_module_out, que_r4)'], {'dim': '(1)'}), '((trf_module_out, diff_module_out, que_r4), dim=1)\n', (17966, 18016), False, 'import torch\n'), ((18118, 18194), 'torch.nn.functional.interpolate', 'F.interpolate', (['aspp_out'], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(aspp_out, scale_factor=2, mode='bilinear', align_corners=True)\n", (18131, 18194), True, 'import torch.nn.functional as F\n'), ((18440, 18472), 'torch.cat', 'torch.cat', (['(m2, aspp_out)'], {'dim': '(1)'}), '((m2, aspp_out), dim=1)\n', (18449, 18472), False, 'import torch\n'), ((18659, 18704), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(256, 256, kernel_size=3, padding=1)\n', (18668, 18704), True, 'import torch.nn as nn\n'), ((18724, 18743), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (18738, 18743), True, 'import torch.nn as nn\n'), ((18765, 18810), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(256, 128, kernel_size=5, padding=2)\n', (18774, 18810), True, 'import torch.nn as nn\n'), ((18830, 18849), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (18844, 18849), True, 'import torch.nn as nn\n'), ((18871, 18916), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(128, 128, kernel_size=5, padding=2)\n', (18880, 18916), True, 'import torch.nn as nn\n'), ((18936, 18955), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (18950, 18955), True, 'import torch.nn as nn\n'), ((18977, 19021), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(128, 64, kernel_size=5, padding=2)\n', (18986, 19021), True, 'import torch.nn as nn\n'), ((19041, 19059), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (19055, 19059), True, 'import torch.nn as nn\n'), ((19081, 19122), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(2)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(64, 2, kernel_size=1, stride=1)\n', (19090, 19122), True, 'import torch.nn as nn\n'), ((19519, 19588), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(4)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(x, scale_factor=4, mode='bilinear', align_corners=True)\n", (19532, 19588), True, 'import torch.nn.functional as F\n'), ((19788, 19844), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)'], {'kernel_size': '(1)', 'stride': '(2)', 'bias': '(False)'}), '(512, 256, kernel_size=1, stride=2, bias=False)\n', (19797, 19844), True, 'import torch.nn as nn\n'), ((19989, 20034), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2048)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(2048, 256, kernel_size=1, stride=1)\n', (19998, 20034), True, 'import torch.nn as nn\n'), ((20094, 20138), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(512, 256, kernel_size=1, stride=1)\n', (20103, 20138), True, 'import torch.nn as nn\n'), ((20164, 20219), 'torch.nn.Conv2d', 'nn.Conv2d', (['(768)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(768, 256, kernel_size=3, stride=1, padding=1)\n', (20173, 20219), True, 'import torch.nn as nn\n'), ((20472, 20542), 'torch.nn.functional.interpolate', 'F.interpolate', (['r5'], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(r5, scale_factor=2, mode='bilinear', align_corners=True)\n", (20485, 20542), True, 'import torch.nn.functional as F\n'), ((20584, 20617), 'torch.cat', 'torch.cat', (['(r5_r4, r4, m4)'], {'dim': '(1)'}), '((r5_r4, r4, m4), dim=1)\n', (20593, 20617), False, 'import torch\n'), ((20902, 20956), 'torch.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'planes'], {'kernel_size': '(1)', 'bias': '(False)'}), '(inplanes, planes, kernel_size=1, bias=False)\n', (20911, 20956), True, 'import torch.nn as nn\n'), ((21015, 21123), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'stride': 'stride', 'dilation': 'dilation', 'padding': 'dilation', 'bias': '(False)'}), '(planes, planes, kernel_size=3, stride=stride, dilation=dilation,\n padding=dilation, bias=False)\n', (21024, 21123), True, 'import torch.nn as nn\n'), ((21209, 21278), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', '(planes * self.expansion)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(planes, planes * self.expansion, kernel_size=1, bias=False)\n', (21218, 21278), True, 'import torch.nn as nn\n'), ((21353, 21374), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (21360, 21374), True, 'import torch.nn as nn\n'), ((21415, 21438), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (21435, 21438), True, 'import torch.nn as nn\n'), ((21464, 21525), 'torch.nn.Conv2d', 'nn.Conv2d', (['(planes * 4)', '(planes // 4)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(planes * 4, planes // 4, kernel_size=1, bias=False)\n', (21473, 21525), True, 'import torch.nn as nn\n'), ((21562, 21623), 'torch.nn.Conv2d', 'nn.Conv2d', (['(planes // 4)', '(planes * 4)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(planes // 4, planes * 4, kernel_size=1, bias=False)\n', (21571, 21623), True, 'import torch.nn as nn\n'), ((21656, 21668), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (21666, 21668), True, 'import torch.nn as nn\n'), ((4242, 4294), 'torch.nn.Linear', 'nn.Linear', (['channel', '(channel // reduction)'], {'bias': '(False)'}), '(channel, channel // reduction, bias=False)\n', (4251, 4294), True, 'import torch.nn as nn\n'), ((4308, 4329), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4315, 4329), True, 'import torch.nn as nn\n'), ((4343, 4395), 'torch.nn.Linear', 'nn.Linear', (['(channel // reduction)', 'channel'], {'bias': '(False)'}), '(channel // reduction, channel, bias=False)\n', (4352, 4395), True, 'import torch.nn as nn\n'), ((4409, 4421), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4419, 4421), True, 'import torch.nn as nn\n'), ((7928, 7937), 'torch.nn.functional.relu', 'F.relu', (['s'], {}), '(s)\n', (7934, 7937), True, 'import torch.nn.functional as F\n'), ((7965, 7975), 'torch.nn.functional.relu', 'F.relu', (['sr'], {}), '(sr)\n', (7971, 7975), True, 'import torch.nn.functional as F\n'), ((8013, 8103), 'torch.nn.functional.interpolate', 'F.interpolate', (['pm'], {'scale_factor': 'self.scale_factor', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(pm, scale_factor=self.scale_factor, mode='bilinear',\n align_corners=True)\n", (8026, 8103), True, 'import torch.nn.functional as F\n'), ((8125, 8134), 'torch.nn.functional.relu', 'F.relu', (['m'], {}), '(m)\n', (8131, 8134), True, 'import torch.nn.functional as F\n'), ((8162, 8172), 'torch.nn.functional.relu', 'F.relu', (['mr'], {}), '(mr)\n', (8168, 8172), True, 'import torch.nn.functional as F\n'), ((8782, 8849), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(512, 256, kernel_size=3, stride=1, padding=1, bias=False)\n', (8791, 8849), True, 'import torch.nn as nn\n'), ((8889, 8908), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (8903, 8908), True, 'import torch.nn as nn\n'), ((8948, 8957), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8955, 8957), True, 'import torch.nn as nn\n'), ((8997, 9012), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (9007, 9012), True, 'import torch.nn as nn\n'), ((9052, 9119), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(256, 256, kernel_size=3, stride=1, padding=1, bias=False)\n', (9061, 9119), True, 'import torch.nn as nn\n'), ((9159, 9178), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (9173, 9178), True, 'import torch.nn as nn\n'), ((9218, 9227), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9225, 9227), True, 'import torch.nn as nn\n'), ((9267, 9282), 'torch.nn.Dropout', 'nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (9277, 9282), True, 'import torch.nn as nn\n'), ((9322, 9364), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(1)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(256, 1, kernel_size=1, stride=1)\n', (9331, 9364), True, 'import torch.nn as nn\n'), ((9612, 9622), 'torch.nn.functional.relu', 'F.relu', (['r5'], {}), '(r5)\n', (9618, 9622), True, 'import torch.nn.functional as F\n'), ((9670, 9680), 'torch.nn.functional.relu', 'F.relu', (['m4'], {}), '(m4)\n', (9676, 9680), True, 'import torch.nn.functional as F\n'), ((9728, 9738), 'torch.nn.functional.relu', 'F.relu', (['m4'], {}), '(m4)\n', (9734, 9738), True, 'import torch.nn.functional as F\n'), ((10041, 10110), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(4)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(x, scale_factor=4, mode='bilinear', align_corners=True)\n", (10054, 10110), True, 'import torch.nn.functional as F\n'), ((11134, 11153), 'torch.exp', 'torch.exp', (['(-f_inter)'], {}), '(-f_inter)\n', (11143, 11153), False, 'import torch\n'), ((18229, 18239), 'torch.nn.functional.relu', 'F.relu', (['r4'], {}), '(r4)\n', (18235, 18239), True, 'import torch.nn.functional as F\n'), ((18286, 18296), 'torch.nn.functional.relu', 'F.relu', (['m4'], {}), '(m4)\n', (18292, 18296), True, 'import torch.nn.functional as F\n'), ((18343, 18353), 'torch.nn.functional.relu', 'F.relu', (['m4'], {}), '(m4)\n', (18349, 18353), True, 'import torch.nn.functional as F\n'), ((19260, 19275), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (19270, 19275), True, 'import torch.nn as nn\n'), ((19276, 19285), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (19282, 19285), True, 'import torch.nn.functional as F\n'), ((19335, 19350), 'torch.nn.Dropout', 'nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (19345, 19350), True, 'import torch.nn as nn\n'), ((19351, 19360), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (19357, 19360), True, 'import torch.nn.functional as F\n'), ((19410, 19425), 'torch.nn.Dropout', 'nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (19420, 19425), True, 'import torch.nn as nn\n'), ((19426, 19435), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (19432, 19435), True, 'import torch.nn.functional as F\n'), ((19496, 19505), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (19502, 19505), True, 'import torch.nn.functional as F\n'), ((2367, 2391), 'torch.max', 'torch.max', (['fg_map'], {'dim': '(0)'}), '(fg_map, dim=0)\n', (2376, 2391), False, 'import torch\n'), ((3200, 3224), 'torch.max', 'torch.max', (['fg_map'], {'dim': '(0)'}), '(fg_map, dim=0)\n', (3209, 3224), False, 'import torch\n'), ((14143, 14176), 'numpy.abs', 'np.abs', (['(que_fr - anno_fr_list_tmp)'], {}), '(que_fr - anno_fr_list_tmp)\n', (14149, 14176), True, 'import numpy as np\n'), ((15817, 15866), 'torch.bmm', 'torch.bmm', (['anno_indicator_feature', 'similarity_mat'], {}), '(anno_indicator_feature, similarity_mat)\n', (15826, 15866), False, 'import torch\n'), ((16117, 16156), 'torch.unsqueeze', 'torch.unsqueeze', (['que_feature_sim'], {'dim': '(0)'}), '(que_feature_sim, dim=0)\n', (16132, 16156), False, 'import torch\n'), ((16618, 16676), 'torch.bmm', 'torch.bmm', (['anno_indicator_feature[obj_idx]', 'similarity_mat'], {}), '(anno_indicator_feature[obj_idx], similarity_mat)\n', (16627, 16676), False, 'import torch\n'), ((16726, 16780), 'torch.stack', 'torch.stack', (['anno_indicator_feature_transferred'], {'dim': '(0)'}), '(anno_indicator_feature_transferred, dim=0)\n', (16737, 16780), False, 'import torch\n'), ((2459, 2479), 'torch.mean', 'torch.mean', (['scoremap'], {}), '(scoremap)\n', (2469, 2479), False, 'import torch\n'), ((3292, 3312), 'torch.mean', 'torch.mean', (['scoremap'], {}), '(scoremap)\n', (3302, 3312), False, 'import torch\n'), ((13020, 13056), 'torch.max', 'torch.max', (['diff'], {'dim': '(2)', 'keepdim': '(True)'}), '(diff, dim=2, keepdim=True)\n', (13029, 13056), False, 'import torch\n'), ((13301, 13333), 'torch.max', 'torch.max', (['scoremap_logit'], {'dim': '(0)'}), '(scoremap_logit, dim=0)\n', (13310, 13333), False, 'import torch\n'), ((15281, 15320), 'torch.unsqueeze', 'torch.unsqueeze', (['que_feature_sim'], {'dim': '(1)'}), '(que_feature_sim, dim=1)\n', (15296, 15320), False, 'import torch\n'), ((2257, 2306), 'torch.nn.functional.interpolate', 'F.interpolate', (['estimated_fgbg'], {'scale_factor': '(0.125)'}), '(estimated_fgbg, scale_factor=0.125)\n', (2270, 2306), True, 'import torch.nn.functional as F\n'), ((3090, 3139), 'torch.nn.functional.interpolate', 'F.interpolate', (['estimated_fgbg'], {'scale_factor': '(0.125)'}), '(estimated_fgbg, scale_factor=0.125)\n', (3103, 3139), True, 'import torch.nn.functional as F\n'), ((2483, 2511), 'torch.sum', 'torch.sum', (['(fg_map * scoremap)'], {}), '(fg_map * scoremap)\n', (2492, 2511), False, 'import torch\n'), ((3316, 3344), 'torch.sum', 'torch.sum', (['(fg_map * scoremap)'], {}), '(fg_map * scoremap)\n', (3325, 3344), False, 'import torch\n')] |
#!/usr/bin/env python
# coding: utf-8
# ### IMPORTING LIBRARIES AND DATASET
# In[1]:
import os
import cv2
import tensorflow as tf
import numpy as np
from tensorflow.keras import layers, optimizers
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, Dropout
from tensorflow.keras.models import Model, load_model
from tensorflow.keras import backend as K
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, LearningRateScheduler
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
# In[2]:
# Specify training data directory
XRay_Directory = 'Train'
# In[3]:
# List the folders in the directory
os.listdir(XRay_Directory)
# In[4]:
# Use image generator to generate tensor images data and normalize them
# Use 20% of the data for cross-validation
image_generator = ImageDataGenerator(rescale = 1./255, validation_split= 0.2)
# In[5]:
# Generate batches of 40 images
# Total number of images is 133*4 = 532 images
# Training is 428 (80%) and validation is 104 (20%)
# Perform shuffling and image resizing
train_generator = image_generator.flow_from_directory(batch_size = 40, directory= XRay_Directory, shuffle= True, target_size=(256,256), class_mode = 'categorical', subset="training")
# In[6]:
validation_generator = image_generator.flow_from_directory(batch_size = 40, directory= XRay_Directory, shuffle= True, target_size=(256,256), class_mode = 'categorical', subset="validation")
# In[7]:
# Generate a batch of 40 images and labels
train_images, train_labels = next(train_generator)
# In[8]:
train_images.shape
# In[9]:
train_labels.shape
# In[10]:
train_labels
# In[11]:
# labels Translator
label_names = {0 : 'Covid-19', 1 : 'Normal' , 2: 'Viral Pneumonia', 3 : 'Bacterial Pneumonia'}
# ### VISUALIZING DATASET
# In[12]:
# Creating a grid of 36 images along with their corresponding labels
L = 6
W = 6
fig, axes = plt.subplots(L, W, figsize = (12, 12))
axes = axes.ravel()
for i in np.arange(0, L*W):
axes[i].imshow(train_images[i])
axes[i].set_title(label_names[np.argmax(train_labels[i])])
axes[i].axis('off')
plt.subplots_adjust(wspace = 0.5)
# ### DEEP NEURAL NETWORKS and TRANSFER LEARNING
# ### IMPORTING MODEL WITH PRETRAINED WEIGHTS
# In[13]:
basemodel = ResNet50(weights = 'imagenet', include_top = False, input_tensor = Input(shape=(256,256,3)))
# In[14]:
basemodel.summary()
# In[15]:
#freezing the model upto the last stage - 4 and re-training stage -5
for layer in basemodel.layers[:-10]:
layers.trainable = False
# ### BUILDING AND TRAINING DEEP LEARNING MODEL
# In[16]:
headmodel = basemodel.output
headmodel = AveragePooling2D(pool_size = (4,4))(headmodel)
headmodel = Flatten(name= 'flatten')(headmodel)
headmodel = Dense(256, activation = "relu")(headmodel)
headmodel = Dropout(0.3)(headmodel)
headmodel = Dense(128, activation = "relu")(headmodel)
headmodel = Dropout(0.2)(headmodel)
headmodel = Dense(4, activation = 'softmax')(headmodel)
model = Model(inputs = basemodel.input, outputs = headmodel)
# In[17]:
model.compile(loss = 'categorical_crossentropy', optimizer=optimizers.RMSprop(lr = 1e-4, decay = 1e-6), metrics= ["accuracy"])
# In[18]:
# using early stopping to exit training if validation loss is not decreasing even after certain epochs (patience)
earlystopping = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20)
# save the best model with lower validation loss
checkpointer = ModelCheckpoint(filepath="weights.hdf5", verbose=1, save_best_only=True)
# In[19]:
train_generator = image_generator.flow_from_directory(batch_size = 4, directory= XRay_Directory, shuffle= True, target_size=(256,256), class_mode= 'categorical', subset="training")
val_generator = image_generator.flow_from_directory(batch_size = 4, directory= XRay_Directory, shuffle= True, target_size=(256,256), class_mode= 'categorical', subset="validation")
# In[ ]:
history = model.fit_generator(train_generator, steps_per_epoch= train_generator.n // 4, epochs = 10, validation_data= val_generator, validation_steps= val_generator.n // 4, callbacks=[checkpointer, earlystopping])
# ### EVALUATING TRAINED DEEP LEARNING MODEL
# In[ ]:
history.history.keys()
# In[ ]:
plt.plot(history.history['accuracy'])
plt.plot(history.history['loss'])
plt.title('Model Loss and Accuracy Progress During Training')
plt.xlabel('Epoch')
plt.ylabel('Training Accuracy and Loss')
plt.legend(['Training Accuracy', 'Training Loss'])
# In[ ]:
plt.plot(history.history['val_loss'])
plt.title('Model Loss During Cross-Validation')
plt.xlabel('Epoch')
plt.ylabel('Validation Loss')
plt.legend(['Validation Loss'])
# In[ ]:
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy Progress During Cross-Validation')
plt.xlabel('Epoch')
plt.ylabel('Validation Accuracy')
plt.legend(['Validation Accuracy'])
# In[ ]:
test_directory = 'Test'
# In[ ]:
test_gen = ImageDataGenerator(rescale = 1./255)
test_generator = test_gen.flow_from_directory(batch_size = 40, directory= test_directory, shuffle= True, target_size=(256,256), class_mode= 'categorical')
evaluate = model.evaluate_generator(test_generator, steps = test_generator.n // 4, verbose =1)
print('Accuracy Test : {}'.format(evaluate[1]))
# In[ ]:
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
prediction = []
original = []
image = []
for i in range(len(os.listdir(test_directory))):
for item in os.listdir(os.path.join(test_directory,str(i))):
img= cv2.imread(os.path.join(test_directory,str(i),item))
img = cv2.resize(img,(256,256))
image.append(img)
img = img / 255
img = img.reshape(-1,256,256,3)
predict = model.predict(img)
predict = np.argmax(predict)
prediction.append(predict)
original.append(i)
# In[ ]:
len(original)
# In[ ]:
score = accuracy_score(original,prediction)
print("Test Accuracy : {}".format(score))
# In[ ]:
L = 5
W = 5
fig, axes = plt.subplots(L, W, figsize = (12, 12))
axes = axes.ravel()
for i in np.arange(0, L*W):
axes[i].imshow(image[i])
axes[i].set_title('Guess={}\nTrue={}'.format(str(label_names[prediction[i]]), str(label_names[original[i]])))
axes[i].axis('off')
plt.subplots_adjust(wspace = 1.2)
# In[ ]:
print(classification_report(np.asarray(original), np.asarray(prediction)))
# In[ ]:
cm = confusion_matrix(np.asarray(original), np.asarray(prediction))
ax = plt.subplot()
sns.heatmap(cm, annot = True, ax = ax)
ax.set_xlabel('Predicted')
ax.set_ylabel('Original')
ax.set_title('Confusion_matrix')
| [
"matplotlib.pyplot.ylabel",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.AveragePooling2D",
"numpy.arange",
"tensorflow.keras.layers.Input",
"os.listdir",
"matplotlib.pyplot.xlabel",
... | [((891, 917), 'os.listdir', 'os.listdir', (['XRay_Directory'], {}), '(XRay_Directory)\n', (901, 917), False, 'import os\n'), ((1066, 1125), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'validation_split': '(0.2)'}), '(rescale=1.0 / 255, validation_split=0.2)\n', (1084, 1125), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2160, 2196), 'matplotlib.pyplot.subplots', 'plt.subplots', (['L', 'W'], {'figsize': '(12, 12)'}), '(L, W, figsize=(12, 12))\n', (2172, 2196), True, 'import matplotlib.pyplot as plt\n'), ((2229, 2248), 'numpy.arange', 'np.arange', (['(0)', '(L * W)'], {}), '(0, L * W)\n', (2238, 2248), True, 'import numpy as np\n'), ((2372, 2403), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.5)'}), '(wspace=0.5)\n', (2391, 2403), True, 'import matplotlib.pyplot as plt\n'), ((3257, 3305), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'basemodel.input', 'outputs': 'headmodel'}), '(inputs=basemodel.input, outputs=headmodel)\n', (3262, 3305), False, 'from tensorflow.keras.models import Model, load_model\n'), ((3595, 3664), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'verbose': '(1)', 'patience': '(20)'}), "(monitor='val_loss', mode='min', verbose=1, patience=20)\n", (3608, 3664), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, LearningRateScheduler\n'), ((3730, 3802), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""weights.hdf5"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(filepath='weights.hdf5', verbose=1, save_best_only=True)\n", (3745, 3802), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, LearningRateScheduler\n'), ((4501, 4538), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {}), "(history.history['accuracy'])\n", (4509, 4538), True, 'import matplotlib.pyplot as plt\n'), ((4539, 4572), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (4547, 4572), True, 'import matplotlib.pyplot as plt\n'), ((4574, 4635), 'matplotlib.pyplot.title', 'plt.title', (['"""Model Loss and Accuracy Progress During Training"""'], {}), "('Model Loss and Accuracy Progress During Training')\n", (4583, 4635), True, 'import matplotlib.pyplot as plt\n'), ((4636, 4655), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (4646, 4655), True, 'import matplotlib.pyplot as plt\n'), ((4656, 4696), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Training Accuracy and Loss"""'], {}), "('Training Accuracy and Loss')\n", (4666, 4696), True, 'import matplotlib.pyplot as plt\n'), ((4697, 4747), 'matplotlib.pyplot.legend', 'plt.legend', (["['Training Accuracy', 'Training Loss']"], {}), "(['Training Accuracy', 'Training Loss'])\n", (4707, 4747), True, 'import matplotlib.pyplot as plt\n'), ((4761, 4798), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (4769, 4798), True, 'import matplotlib.pyplot as plt\n'), ((4799, 4846), 'matplotlib.pyplot.title', 'plt.title', (['"""Model Loss During Cross-Validation"""'], {}), "('Model Loss During Cross-Validation')\n", (4808, 4846), True, 'import matplotlib.pyplot as plt\n'), ((4847, 4866), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (4857, 4866), True, 'import matplotlib.pyplot as plt\n'), ((4867, 4896), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Validation Loss"""'], {}), "('Validation Loss')\n", (4877, 4896), True, 'import matplotlib.pyplot as plt\n'), ((4897, 4928), 'matplotlib.pyplot.legend', 'plt.legend', (["['Validation Loss']"], {}), "(['Validation Loss'])\n", (4907, 4928), True, 'import matplotlib.pyplot as plt\n'), ((4942, 4983), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {}), "(history.history['val_accuracy'])\n", (4950, 4983), True, 'import matplotlib.pyplot as plt\n'), ((4984, 5044), 'matplotlib.pyplot.title', 'plt.title', (['"""Model Accuracy Progress During Cross-Validation"""'], {}), "('Model Accuracy Progress During Cross-Validation')\n", (4993, 5044), True, 'import matplotlib.pyplot as plt\n'), ((5045, 5064), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (5055, 5064), True, 'import matplotlib.pyplot as plt\n'), ((5065, 5098), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Validation Accuracy"""'], {}), "('Validation Accuracy')\n", (5075, 5098), True, 'import matplotlib.pyplot as plt\n'), ((5099, 5134), 'matplotlib.pyplot.legend', 'plt.legend', (["['Validation Accuracy']"], {}), "(['Validation Accuracy'])\n", (5109, 5134), True, 'import matplotlib.pyplot as plt\n'), ((5196, 5233), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (5214, 5233), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((6168, 6204), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['original', 'prediction'], {}), '(original, prediction)\n', (6182, 6204), False, 'from sklearn.metrics import confusion_matrix, classification_report, accuracy_score\n'), ((6284, 6320), 'matplotlib.pyplot.subplots', 'plt.subplots', (['L', 'W'], {'figsize': '(12, 12)'}), '(L, W, figsize=(12, 12))\n', (6296, 6320), True, 'import matplotlib.pyplot as plt\n'), ((6353, 6372), 'numpy.arange', 'np.arange', (['(0)', '(L * W)'], {}), '(0, L * W)\n', (6362, 6372), True, 'import numpy as np\n'), ((6540, 6571), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(1.2)'}), '(wspace=1.2)\n', (6559, 6571), True, 'import matplotlib.pyplot as plt\n'), ((6749, 6762), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (6760, 6762), True, 'import matplotlib.pyplot as plt\n'), ((6763, 6797), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'ax': 'ax'}), '(cm, annot=True, ax=ax)\n', (6774, 6797), True, 'import seaborn as sns\n'), ((2915, 2949), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(4, 4)'}), '(pool_size=(4, 4))\n', (2931, 2949), False, 'from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, Dropout\n'), ((2974, 2997), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {'name': '"""flatten"""'}), "(name='flatten')\n", (2981, 2997), False, 'from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, Dropout\n'), ((3022, 3051), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (3027, 3051), False, 'from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, Dropout\n'), ((3077, 3089), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (3084, 3089), False, 'from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, Dropout\n'), ((3113, 3142), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (3118, 3142), False, 'from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, Dropout\n'), ((3168, 3180), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (3175, 3180), False, 'from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, Dropout\n'), ((3204, 3234), 'tensorflow.keras.layers.Dense', 'Dense', (['(4)'], {'activation': '"""softmax"""'}), "(4, activation='softmax')\n", (3209, 3234), False, 'from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, Dropout\n'), ((6698, 6718), 'numpy.asarray', 'np.asarray', (['original'], {}), '(original)\n', (6708, 6718), True, 'import numpy as np\n'), ((6720, 6742), 'numpy.asarray', 'np.asarray', (['prediction'], {}), '(prediction)\n', (6730, 6742), True, 'import numpy as np\n'), ((2600, 2626), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(256, 256, 3)'}), '(shape=(256, 256, 3))\n', (2605, 2626), False, 'from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, Dropout\n'), ((3383, 3425), 'tensorflow.keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': '(0.0001)', 'decay': '(1e-06)'}), '(lr=0.0001, decay=1e-06)\n', (3401, 3425), False, 'from tensorflow.keras import layers, optimizers\n'), ((5693, 5719), 'os.listdir', 'os.listdir', (['test_directory'], {}), '(test_directory)\n', (5703, 5719), False, 'import os\n'), ((5868, 5895), 'cv2.resize', 'cv2.resize', (['img', '(256, 256)'], {}), '(img, (256, 256))\n', (5878, 5895), False, 'import cv2\n'), ((6039, 6057), 'numpy.argmax', 'np.argmax', (['predict'], {}), '(predict)\n', (6048, 6057), True, 'import numpy as np\n'), ((6616, 6636), 'numpy.asarray', 'np.asarray', (['original'], {}), '(original)\n', (6626, 6636), True, 'import numpy as np\n'), ((6638, 6660), 'numpy.asarray', 'np.asarray', (['prediction'], {}), '(prediction)\n', (6648, 6660), True, 'import numpy as np\n'), ((2318, 2344), 'numpy.argmax', 'np.argmax', (['train_labels[i]'], {}), '(train_labels[i])\n', (2327, 2344), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from mlpipe.processors.i_processor import IPreProcessor
class PreProcessData(IPreProcessor):
def process(self, raw_data, input_data, ground_truth, piped_params=None):
ground_truth = np.zeros(10)
ground_truth[raw_data["label"]] = 1.0
png_binary = raw_data["img"]
png_img = np.frombuffer(png_binary, np.uint8)
input_data = cv2.imdecode(png_img, cv2.IMREAD_COLOR)
return raw_data, input_data, ground_truth, piped_params
| [
"numpy.frombuffer",
"numpy.zeros",
"cv2.imdecode"
] | [((226, 238), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (234, 238), True, 'import numpy as np\n'), ((341, 376), 'numpy.frombuffer', 'np.frombuffer', (['png_binary', 'np.uint8'], {}), '(png_binary, np.uint8)\n', (354, 376), True, 'import numpy as np\n'), ((398, 437), 'cv2.imdecode', 'cv2.imdecode', (['png_img', 'cv2.IMREAD_COLOR'], {}), '(png_img, cv2.IMREAD_COLOR)\n', (410, 437), False, 'import cv2\n')] |
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
sys.path.append('/home/jwalker/dynamics/python/monsoon-onset')
import os
import numpy as np
import xarray as xray
import pandas as pd
import matplotlib.pyplot as plt
import collections
import atmos as atm
import merra
import indices
import utils
# ----------------------------------------------------------------------
version = 'merra2'
years = np.arange(1980, 2016)
onset_nm = 'CHP_MFC'
plevs = [1000,925,850,775,700,600,500,400,300,250,200,150,100,70,50,30,20]
ind_nm, npre, npost = 'onset', 140, 230
#ind_nm, npre, npost = 'retreat', 270, 100
datadir = atm.homedir() + 'datastore/%s/analysis/' % version
savedir = atm.homedir() + 'datastore/%s/analysis/' % version
filestr = datadir + version + '_ubudget%d_ndays5_60E-100E_%d.nc'
savestr = savedir + version + '_ubudget%d_dailyrel_'
if ind_nm == 'retreat':
savestr = savestr + 'retreat_'
savestr = savestr + onset_nm +'_ndays5_60E-100E'
datafiles, savefiles = {}, {}
for plev in plevs:
datafiles[plev] = [filestr % (plev, yr) for yr in years]
savefiles[plev] = [savestr % plev + '_%d.nc' % yr for yr in years]
yearstr = '%d-%d' % (min(years), max(years))
indfile = savedir + version + '_index_%s_%s.nc' % (onset_nm, yearstr)
# ----------------------------------------------------------------------
# Onset index for each year
print('Opening ' + indfile)
with xray.open_dataset(indfile) as index:
index.load()
onset = index['onset'].values
retreat = index['retreat'].values
# ----------------------------------------------------------------------
# Get daily data
def get_data(datafile, year, d0, npre, npost):
daymin, daymax = d0 - npre, d0 + npost
ndays = len(atm.season_days('ANN', year))
file_pre = datafile.replace(str(year), str(year - 1))
file_post = datafile.replace(str(year), str(year + 1))
if daymin <1 and os.path.isfile(file_pre):
print('---Loading prev year ' + file_pre)
with xray.open_dataset(file_pre) as ds_pre:
ds_pre.load()
else:
ds_pre = None
if daymax > ndays and os.path.isfile(file_post):
print('---Loading next year ' + file_post)
with xray.open_dataset(file_post) as ds_post:
ds_post.load()
else:
ds_post = None
print('Loading ' + datafile)
with xray.open_dataset(datafile) as ds:
data = utils.wrapyear(ds, ds_pre, ds_post, daymin, daymax, year=year)
data.attrs = ds.attrs
return data
for plev in plevs:
for y, year in enumerate(years):
datafile = datafiles[plev][y]
d_onset, d_retreat = onset[y], retreat[y]
d0 = int(index[ind_nm][y].values)
ds_rel = xray.Dataset()
ds = get_data(datafile, year, d0, npre, npost)
ds_rel.attrs = ds.attrs
for nm in ds.data_vars:
var = atm.expand_dims(ds[nm], 'year', year)
ds_rel[nm] = utils.daily_rel2onset(var, d0, npre, npost)
ds_rel.attrs['d_onset'] = d_onset
ds_rel.attrs['d_retreat'] = d_retreat
savefile = savefiles[plev][y]
print('Saving to ' + savefile)
ds_rel.to_netcdf(savefile)
# ----------------------------------------------------------------------
# Compute climatologies and save
yearstr = '%d-%d' % (years.min(), years.max())
for plev in plevs:
relfiles = savefiles[plev]
savefile = savestr % plev + '_' + yearstr + '.nc'
ds = atm.mean_over_files(relfiles)
ds.attrs['years'] = years
print('Saving to ' + savefile)
ds.to_netcdf(savefile)
# ----------------------------------------------------------------------
# Concatenate plevels in climatology and save
files = [savestr % plev + '_' + yearstr + '.nc' for plev in plevs]
ubudget = xray.Dataset()
pname, pdim = 'Height', 1
subset_dict = {'lat' : (-60, 60), 'lon' : (40, 120)}
for i, plev in enumerate(plevs):
filenm = files[i]
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ds:
ds = atm.subset(ds, subset_dict)
ds.load()
for nm in ds.data_vars:
ds[nm] = atm.expand_dims(ds[nm], pname, plev, axis=pdim)
if i == 0:
ubudget = ds
else:
ubudget = xray.concat([ubudget, ds], dim=pname)
ubudget.coords[pname].attrs['units'] = 'hPa'
savefile = files[0]
savefile = savefile.replace('%d' % plevs[0], '')
print('Saving to ' + savefile)
ubudget.to_netcdf(savefile)
| [
"atmos.mean_over_files",
"utils.daily_rel2onset",
"atmos.homedir",
"xarray.Dataset",
"os.path.isfile",
"atmos.expand_dims",
"xarray.concat",
"utils.wrapyear",
"atmos.subset",
"xarray.open_dataset",
"sys.path.append",
"atmos.season_days",
"numpy.arange"
] | [((11, 71), 'sys.path.append', 'sys.path.append', (['"""/home/jwalker/dynamics/python/atmos-tools"""'], {}), "('/home/jwalker/dynamics/python/atmos-tools')\n", (26, 71), False, 'import sys\n'), ((72, 131), 'sys.path.append', 'sys.path.append', (['"""/home/jwalker/dynamics/python/atmos-read"""'], {}), "('/home/jwalker/dynamics/python/atmos-read')\n", (87, 131), False, 'import sys\n'), ((132, 194), 'sys.path.append', 'sys.path.append', (['"""/home/jwalker/dynamics/python/monsoon-onset"""'], {}), "('/home/jwalker/dynamics/python/monsoon-onset')\n", (147, 194), False, 'import sys\n'), ((481, 502), 'numpy.arange', 'np.arange', (['(1980)', '(2016)'], {}), '(1980, 2016)\n', (490, 502), True, 'import numpy as np\n'), ((3805, 3819), 'xarray.Dataset', 'xray.Dataset', ([], {}), '()\n', (3817, 3819), True, 'import xarray as xray\n'), ((693, 706), 'atmos.homedir', 'atm.homedir', ([], {}), '()\n', (704, 706), True, 'import atmos as atm\n'), ((754, 767), 'atmos.homedir', 'atm.homedir', ([], {}), '()\n', (765, 767), True, 'import atmos as atm\n'), ((1463, 1489), 'xarray.open_dataset', 'xray.open_dataset', (['indfile'], {}), '(indfile)\n', (1480, 1489), True, 'import xarray as xray\n'), ((3485, 3514), 'atmos.mean_over_files', 'atm.mean_over_files', (['relfiles'], {}), '(relfiles)\n', (3504, 3514), True, 'import atmos as atm\n'), ((1779, 1807), 'atmos.season_days', 'atm.season_days', (['"""ANN"""', 'year'], {}), "('ANN', year)\n", (1794, 1807), True, 'import atmos as atm\n'), ((1947, 1971), 'os.path.isfile', 'os.path.isfile', (['file_pre'], {}), '(file_pre)\n', (1961, 1971), False, 'import os\n'), ((2159, 2184), 'os.path.isfile', 'os.path.isfile', (['file_post'], {}), '(file_post)\n', (2173, 2184), False, 'import os\n'), ((2393, 2420), 'xarray.open_dataset', 'xray.open_dataset', (['datafile'], {}), '(datafile)\n', (2410, 2420), True, 'import xarray as xray\n'), ((2443, 2505), 'utils.wrapyear', 'utils.wrapyear', (['ds', 'ds_pre', 'ds_post', 'daymin', 'daymax'], {'year': 'year'}), '(ds, ds_pre, ds_post, daymin, daymax, year=year)\n', (2457, 2505), False, 'import utils\n'), ((2757, 2771), 'xarray.Dataset', 'xray.Dataset', ([], {}), '()\n', (2769, 2771), True, 'import xarray as xray\n'), ((3994, 4019), 'xarray.open_dataset', 'xray.open_dataset', (['filenm'], {}), '(filenm)\n', (4011, 4019), True, 'import xarray as xray\n'), ((4040, 4067), 'atmos.subset', 'atm.subset', (['ds', 'subset_dict'], {}), '(ds, subset_dict)\n', (4050, 4067), True, 'import atmos as atm\n'), ((4131, 4178), 'atmos.expand_dims', 'atm.expand_dims', (['ds[nm]', 'pname', 'plev'], {'axis': 'pdim'}), '(ds[nm], pname, plev, axis=pdim)\n', (4146, 4178), True, 'import atmos as atm\n'), ((4243, 4280), 'xarray.concat', 'xray.concat', (['[ubudget, ds]'], {'dim': 'pname'}), '([ubudget, ds], dim=pname)\n', (4254, 4280), True, 'import xarray as xray\n'), ((2036, 2063), 'xarray.open_dataset', 'xray.open_dataset', (['file_pre'], {}), '(file_pre)\n', (2053, 2063), True, 'import xarray as xray\n'), ((2250, 2278), 'xarray.open_dataset', 'xray.open_dataset', (['file_post'], {}), '(file_post)\n', (2267, 2278), True, 'import xarray as xray\n'), ((2909, 2946), 'atmos.expand_dims', 'atm.expand_dims', (['ds[nm]', '"""year"""', 'year'], {}), "(ds[nm], 'year', year)\n", (2924, 2946), True, 'import atmos as atm\n'), ((2972, 3015), 'utils.daily_rel2onset', 'utils.daily_rel2onset', (['var', 'd0', 'npre', 'npost'], {}), '(var, d0, npre, npost)\n', (2993, 3015), False, 'import utils\n')] |
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from scipy.spatial.transform import Rotation
from tadataka.matrix import motion_matrix
from tadataka.rigid_transform import (inv_transform_all, transform_all,
transform_each, Transform, transform_se3)
def test_transform_each():
points = np.array([
[1, 2, 5],
[4, -2, 3],
])
rotations = np.array([
[[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
[[0, 0, -1],
[0, 1, 0],
[1, 0, 0]]
])
translations = np.array([
[1, 2, 3],
[4, 5, 6]
])
expected = np.array([
[2, -3, 5], # [ 1, -5, 2] + [ 1, 2, 3]
[1, 3, 10] # [ -3, -2, 4] + [ 4, 5, 6]
])
assert_array_equal(
transform_each(rotations, translations, points),
expected
)
def test_transform_all():
points = np.array([
[1, 2, 5],
[4, -2, 3],
[0, 0, 6]
])
rotations = np.array([
[[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
[[0, 0, -1],
[0, 1, 0],
[1, 0, 0]]
])
translations = np.array([
[1, 2, 3],
[4, 5, 6]
])
expected = np.array([
[[2, -3, 5], # [ 1, -5, 2] + [ 1, 2, 3]
[5, -1, 1], # [ 4, -3, -2] + [ 1, 2, 3]
[1, -4, 3]], # [ 0, -6, 0] + [ 1, 2, 3]
[[-1, 7, 7], # [-5, 2, 1] + [ 4, 5, 6]
[1, 3, 10], # [-3, -2, 4] + [ 4, 5, 6]
[-2, 5, 6]] # [-6, 0, 0] + [ 4, 5, 6]
])
assert_array_equal(transform_all(rotations, translations, points),
expected)
def test_inv_transform_all():
points = np.array([
[1, 2, 5],
[4, -2, 3],
[0, 0, 6]
])
rotations = np.array([
[[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
[[0, 0, -1],
[0, 1, 0],
[1, 0, 0]]
])
# [R.T for R in rotations]
# [[1, 0, 0],
# [0, 0, 1],
# [0, -1, 0]],
# [[0, 0, 1],
# [0, 1, 0],
# [-1, 0, 0]]
translations = np.array([
[1, 2, 3],
[4, 5, 6]
])
# p - t
# [[0, 0, 2],
# [3, -4, 0],
# [-1, -2, 3]],
# [[-3, -3, -1],
# [0, -7, -3],
# [-4, -5, 0]]
# np.dot(R.T, p-t)
expected = np.array([
[[0, 2, 0],
[3, 0, 4],
[-1, 3, 2]],
[[-1, -3, 3],
[-3, -7, 0],
[0, -5, 4]]
])
assert_array_equal(inv_transform_all(rotations, translations, points),
expected)
def test_transform_class():
P = np.array([
[1, 2, 5],
[4, -2, 3],
])
R = np.array([
[1, 0, 0],
[0, 0, -1],
[0, 1, 0]
])
t = np.array([1, 2, 3])
assert_array_equal(
Transform(R, t, s=1.0)(P),
[[2, -3, 5], # [ 1 -5 2] + [ 1 2 3]
[5, -1, 1]] # [ 4 -3 -2] + [ 1 2 3]
)
assert_array_equal(
Transform(R, t, s=0.1)(P),
[[1.1, 1.5, 3.2], # [ 0.1 -0.5 0.2] + [ 1 2 3]
[1.4, 1.7, 2.8]] # [ 0.4 -0.3 -0.2] + [ 1 2 3]
)
def test_transform_se3():
R_10 = np.random.random((3, 3))
t_10 = np.random.random(3)
T_10 = motion_matrix(R_10, t_10)
P0 = np.random.uniform(-10, 10, (10, 3))
P1 = transform_se3(T_10, P0)
assert_array_almost_equal(P1, np.dot(R_10, P0.T).T + t_10)
| [
"tadataka.matrix.motion_matrix",
"tadataka.rigid_transform.transform_all",
"numpy.random.random",
"tadataka.rigid_transform.Transform",
"numpy.array",
"numpy.dot",
"tadataka.rigid_transform.transform_each",
"tadataka.rigid_transform.transform_se3",
"numpy.random.uniform",
"tadataka.rigid_transform... | [((374, 407), 'numpy.array', 'np.array', (['[[1, 2, 5], [4, -2, 3]]'], {}), '([[1, 2, 5], [4, -2, 3]])\n', (382, 407), True, 'import numpy as np\n'), ((448, 534), 'numpy.array', 'np.array', (['[[[1, 0, 0], [0, 0, -1], [0, 1, 0]], [[0, 0, -1], [0, 1, 0], [1, 0, 0]]]'], {}), '([[[1, 0, 0], [0, 0, -1], [0, 1, 0]], [[0, 0, -1], [0, 1, 0], [1, 0,\n 0]]])\n', (456, 534), True, 'import numpy as np\n'), ((609, 641), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (617, 641), True, 'import numpy as np\n'), ((680, 714), 'numpy.array', 'np.array', (['[[2, -3, 5], [1, 3, 10]]'], {}), '([[2, -3, 5], [1, 3, 10]])\n', (688, 714), True, 'import numpy as np\n'), ((948, 992), 'numpy.array', 'np.array', (['[[1, 2, 5], [4, -2, 3], [0, 0, 6]]'], {}), '([[1, 2, 5], [4, -2, 3], [0, 0, 6]])\n', (956, 992), True, 'import numpy as np\n'), ((1040, 1126), 'numpy.array', 'np.array', (['[[[1, 0, 0], [0, 0, -1], [0, 1, 0]], [[0, 0, -1], [0, 1, 0], [1, 0, 0]]]'], {}), '([[[1, 0, 0], [0, 0, -1], [0, 1, 0]], [[0, 0, -1], [0, 1, 0], [1, 0,\n 0]]])\n', (1048, 1126), True, 'import numpy as np\n'), ((1201, 1233), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (1209, 1233), True, 'import numpy as np\n'), ((1272, 1363), 'numpy.array', 'np.array', (['[[[2, -3, 5], [5, -1, 1], [1, -4, 3]], [[-1, 7, 7], [1, 3, 10], [-2, 5, 6]]]'], {}), '([[[2, -3, 5], [5, -1, 1], [1, -4, 3]], [[-1, 7, 7], [1, 3, 10], [-\n 2, 5, 6]]])\n', (1280, 1363), True, 'import numpy as np\n'), ((1758, 1802), 'numpy.array', 'np.array', (['[[1, 2, 5], [4, -2, 3], [0, 0, 6]]'], {}), '([[1, 2, 5], [4, -2, 3], [0, 0, 6]])\n', (1766, 1802), True, 'import numpy as np\n'), ((1850, 1936), 'numpy.array', 'np.array', (['[[[1, 0, 0], [0, 0, -1], [0, 1, 0]], [[0, 0, -1], [0, 1, 0], [1, 0, 0]]]'], {}), '([[[1, 0, 0], [0, 0, -1], [0, 1, 0]], [[0, 0, -1], [0, 1, 0], [1, 0,\n 0]]])\n', (1858, 1936), True, 'import numpy as np\n'), ((2154, 2186), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (2162, 2186), True, 'import numpy as np\n'), ((2380, 2470), 'numpy.array', 'np.array', (['[[[0, 2, 0], [3, 0, 4], [-1, 3, 2]], [[-1, -3, 3], [-3, -7, 0], [0, -5, 4]]]'], {}), '([[[0, 2, 0], [3, 0, 4], [-1, 3, 2]], [[-1, -3, 3], [-3, -7, 0], [0,\n -5, 4]]])\n', (2388, 2470), True, 'import numpy as np\n'), ((2672, 2705), 'numpy.array', 'np.array', (['[[1, 2, 5], [4, -2, 3]]'], {}), '([[1, 2, 5], [4, -2, 3]])\n', (2680, 2705), True, 'import numpy as np\n'), ((2738, 2782), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 0, -1], [0, 1, 0]]'], {}), '([[1, 0, 0], [0, 0, -1], [0, 1, 0]])\n', (2746, 2782), True, 'import numpy as np\n'), ((2821, 2840), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2829, 2840), True, 'import numpy as np\n'), ((3266, 3290), 'numpy.random.random', 'np.random.random', (['(3, 3)'], {}), '((3, 3))\n', (3282, 3290), True, 'import numpy as np\n'), ((3302, 3321), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (3318, 3321), True, 'import numpy as np\n'), ((3333, 3358), 'tadataka.matrix.motion_matrix', 'motion_matrix', (['R_10', 't_10'], {}), '(R_10, t_10)\n', (3346, 3358), False, 'from tadataka.matrix import motion_matrix\n'), ((3369, 3404), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(10)', '(10, 3)'], {}), '(-10, 10, (10, 3))\n', (3386, 3404), True, 'import numpy as np\n'), ((3414, 3437), 'tadataka.rigid_transform.transform_se3', 'transform_se3', (['T_10', 'P0'], {}), '(T_10, P0)\n', (3427, 3437), False, 'from tadataka.rigid_transform import inv_transform_all, transform_all, transform_each, Transform, transform_se3\n'), ((835, 882), 'tadataka.rigid_transform.transform_each', 'transform_each', (['rotations', 'translations', 'points'], {}), '(rotations, translations, points)\n', (849, 882), False, 'from tadataka.rigid_transform import inv_transform_all, transform_all, transform_each, Transform, transform_se3\n'), ((1632, 1678), 'tadataka.rigid_transform.transform_all', 'transform_all', (['rotations', 'translations', 'points'], {}), '(rotations, translations, points)\n', (1645, 1678), False, 'from tadataka.rigid_transform import inv_transform_all, transform_all, transform_each, Transform, transform_se3\n'), ((2549, 2599), 'tadataka.rigid_transform.inv_transform_all', 'inv_transform_all', (['rotations', 'translations', 'points'], {}), '(rotations, translations, points)\n', (2566, 2599), False, 'from tadataka.rigid_transform import inv_transform_all, transform_all, transform_each, Transform, transform_se3\n'), ((2874, 2896), 'tadataka.rigid_transform.Transform', 'Transform', (['R', 't'], {'s': '(1.0)'}), '(R, t, s=1.0)\n', (2883, 2896), False, 'from tadataka.rigid_transform import inv_transform_all, transform_all, transform_each, Transform, transform_se3\n'), ((3056, 3078), 'tadataka.rigid_transform.Transform', 'Transform', (['R', 't'], {'s': '(0.1)'}), '(R, t, s=0.1)\n', (3065, 3078), False, 'from tadataka.rigid_transform import inv_transform_all, transform_all, transform_each, Transform, transform_se3\n'), ((3472, 3490), 'numpy.dot', 'np.dot', (['R_10', 'P0.T'], {}), '(R_10, P0.T)\n', (3478, 3490), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Evaluate the stability (i.e. agreement) between a set of partitions generated on the same dataset, using Pairwise Normalized Mutual Information (PNMI).
Sample usage:
python eval-partition-stability.py models/base/*partition*.pkl
"""
import os, sys
import logging as log
from optparse import OptionParser
import numpy as np
from prettytable import PrettyTable
from sklearn.metrics.cluster import normalized_mutual_info_score
import unsupervised.nmf, unsupervised.util
# --------------------------------------------------------------
def main():
parser = OptionParser(usage="usage: %prog [options] partition_file1|directory1 ...")
parser.add_option("-o","--output", action="store", type="string", dest="out_path", help="path for CSV summary file (by default this is not written)", default=None)
parser.add_option("--hist", action="store", type="string", dest="hist_out_path", help="path for histogram CSV file (by default this is not written)", default=None)
# Parse command line arguments
(options, args) = parser.parse_args()
if( len(args) < 1 ):
parser.error( "Must specify one or more partitions/directories" )
log.basicConfig(level=20, format='%(message)s')
# Get list of all specified partition files
file_paths = []
for path in args:
if not os.path.exists( path ):
log.error("No such file or directory: %s" % path )
sys.exit(1)
if os.path.isdir(path):
log.debug("Searching %s for partitions" % path )
for dir_path, dirs, files in os.walk(path):
for fname in files:
if fname.startswith("partition") and fname.endswith(".pkl"):
file_paths.append( os.path.join( dir_path, fname ) )
else:
file_paths.append( path )
file_paths.sort()
if len(file_paths) == 0:
log.error("No partition files found to validate")
sys.exit(1)
log.info("Processing partitions for %d base topic models ..." % len(file_paths) )
# Load cached partitions
all_partitions = []
for file_path in file_paths:
log.debug( "Loading partition from %s" % file_path )
partition,cluster_doc_ids = unsupervised.util.load_partition( file_path )
all_partitions.append( partition )
r = len(all_partitions)
log.info( "Evaluating stability of %d partitions with NMI ..." % r )
# compute NMI of each pair of partitions
all_scores = []
for i in range(r):
for j in range(i+1,r):
score = normalized_mutual_info_score( all_partitions[i], all_partitions[j] )
all_scores.append( score )
# Get overall score across all pairs
all_scores = np.array( all_scores )
tab = PrettyTable( ["statistic","stability"] )
tab.align["statistic"] = "l"
tab.add_row( [ "mean", "%.3f" % all_scores.mean() ] )
tab.add_row( [ "median", "%.3f" % np.median(all_scores) ] )
tab.add_row( [ "sdev", "%.3f" % all_scores.std() ] )
tab.add_row( [ "min", "%.3f" % all_scores.min() ] )
tab.add_row( [ "max", "%.3f" % all_scores.max() ] )
log.info( tab )
# Write summary to CSV?
if not options.out_path is None:
log.info("Writing summary of results to %s" % options.out_path)
unsupervised.util.write_table( options.out_path, tab )
# Write histogram to CSV?
if not options.hist_out_path is None:
#bins = np.arange(0,1.1,0.1)
bins = np.arange(0,1.01,0.05)
inds = list(np.digitize(all_scores, bins, right=True))
log.info("Writing histogram of results to %s" % options.hist_out_path)
with open(options.hist_out_path,"w") as fout:
fout.write("NMI,Count,Fraction\n")
for ind, b in enumerate(bins):
freq = inds.count(ind)
frac = float(freq)/len(all_scores)
fout.write("%.2f,%d,%.3f\n" % (b, freq, frac ) )
# --------------------------------------------------------------
if __name__ == "__main__":
main()
| [
"logging.basicConfig",
"prettytable.PrettyTable",
"os.path.exists",
"numpy.median",
"logging.debug",
"numpy.digitize",
"os.walk",
"os.path.join",
"optparse.OptionParser",
"numpy.array",
"sklearn.metrics.cluster.normalized_mutual_info_score",
"os.path.isdir",
"sys.exit",
"logging.info",
"... | [((584, 659), 'optparse.OptionParser', 'OptionParser', ([], {'usage': '"""usage: %prog [options] partition_file1|directory1 ..."""'}), "(usage='usage: %prog [options] partition_file1|directory1 ...')\n", (596, 659), False, 'from optparse import OptionParser\n'), ((1152, 1199), 'logging.basicConfig', 'log.basicConfig', ([], {'level': '(20)', 'format': '"""%(message)s"""'}), "(level=20, format='%(message)s')\n", (1167, 1199), True, 'import logging as log\n'), ((2164, 2230), 'logging.info', 'log.info', (["('Evaluating stability of %d partitions with NMI ...' % r)"], {}), "('Evaluating stability of %d partitions with NMI ...' % r)\n", (2172, 2230), True, 'import logging as log\n'), ((2501, 2521), 'numpy.array', 'np.array', (['all_scores'], {}), '(all_scores)\n', (2509, 2521), True, 'import numpy as np\n'), ((2531, 2570), 'prettytable.PrettyTable', 'PrettyTable', (["['statistic', 'stability']"], {}), "(['statistic', 'stability'])\n", (2542, 2570), False, 'from prettytable import PrettyTable\n'), ((2880, 2893), 'logging.info', 'log.info', (['tab'], {}), '(tab)\n', (2888, 2893), True, 'import logging as log\n'), ((1389, 1408), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1402, 1408), False, 'import os, sys\n'), ((1743, 1792), 'logging.error', 'log.error', (['"""No partition files found to validate"""'], {}), "('No partition files found to validate')\n", (1752, 1792), True, 'import logging as log\n'), ((1795, 1806), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1803, 1806), False, 'import os, sys\n'), ((1971, 2021), 'logging.debug', 'log.debug', (["('Loading partition from %s' % file_path)"], {}), "('Loading partition from %s' % file_path)\n", (1980, 2021), True, 'import logging as log\n'), ((2959, 3022), 'logging.info', 'log.info', (["('Writing summary of results to %s' % options.out_path)"], {}), "('Writing summary of results to %s' % options.out_path)\n", (2967, 3022), True, 'import logging as log\n'), ((3187, 3211), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.05)'], {}), '(0, 1.01, 0.05)\n', (3196, 3211), True, 'import numpy as np\n'), ((3269, 3339), 'logging.info', 'log.info', (["('Writing histogram of results to %s' % options.hist_out_path)"], {}), "('Writing histogram of results to %s' % options.hist_out_path)\n", (3277, 3339), True, 'import logging as log\n'), ((1291, 1311), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1305, 1311), False, 'import os, sys\n'), ((1318, 1367), 'logging.error', 'log.error', (["('No such file or directory: %s' % path)"], {}), "('No such file or directory: %s' % path)\n", (1327, 1367), True, 'import logging as log\n'), ((1372, 1383), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1380, 1383), False, 'import os, sys\n'), ((1413, 1460), 'logging.debug', 'log.debug', (["('Searching %s for partitions' % path)"], {}), "('Searching %s for partitions' % path)\n", (1422, 1460), True, 'import logging as log\n'), ((1494, 1507), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (1501, 1507), False, 'import os, sys\n'), ((2349, 2415), 'sklearn.metrics.cluster.normalized_mutual_info_score', 'normalized_mutual_info_score', (['all_partitions[i]', 'all_partitions[j]'], {}), '(all_partitions[i], all_partitions[j])\n', (2377, 2415), False, 'from sklearn.metrics.cluster import normalized_mutual_info_score\n'), ((3224, 3265), 'numpy.digitize', 'np.digitize', (['all_scores', 'bins'], {'right': '(True)'}), '(all_scores, bins, right=True)\n', (3235, 3265), True, 'import numpy as np\n'), ((2693, 2714), 'numpy.median', 'np.median', (['all_scores'], {}), '(all_scores)\n', (2702, 2714), True, 'import numpy as np\n'), ((1624, 1653), 'os.path.join', 'os.path.join', (['dir_path', 'fname'], {}), '(dir_path, fname)\n', (1636, 1653), False, 'import os, sys\n')] |
# USAGE
# python detection.py --input videos/sample1.mp4 --yolo yolo-coco
import numpy as np
import argparse
import imutils
import time
import cv2
import os
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True, help="path to input video")
ap.add_argument("-y", "--yolo", default="yolo-coco", help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5, help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3, help="threshold when applying non-maxima suppression")
args = vars(ap.parse_args())
labelsPath = os.path.sep.join([args["yolo"], "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")
weightsPath = os.path.sep.join([args["yolo"], "yolov3.weights"])
configPath = os.path.sep.join([args["yolo"], "yolov3.cfg"])
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
filename = args["input"][7:-4]
cv2.namedWindow('original')
cv2.setMouseCallback('original', mouse_callback)
vs = cv2.VideoCapture(args["input"])
writer1, writer2 = None, None
W = int(vs.get(cv2.CAP_PROP_FRAME_WIDTH))
H = int(vs.get(cv2.CAP_PROP_FRAME_HEIGHT))
print("W, H:", W, H)
# WRITE THE FIRST FRAME
success, image = vs.read()
if success:
cv2.imwrite("./videos/{}.jpg".format(filename), image)
img_original = cv2.imread('./videos/{}.jpg'.format(filename))
while True:
cv2.imshow("original", img_original)
H, W = img_original.shape[:2]
# press SPACEBAR to break
if cv2.waitKey(1)&0xFF == 32:
break
print("point_list:",point_list)
# coordinate order - upper left > upper right > lower left > lower right
pts_src = np.float32([list(point_list[0]), list(point_list[1]), list(point_list[2]), list(point_list[3])])
pts_dst = np.float32([[0,0], [W,0], [0,H], [W,H]])
pts = [list(point_list[0]), list(point_list[1]), list(point_list[3]), list(point_list[2])]
pts = np.array(pts)
pts = pts.reshape((-1, 1, 2))
space = np.int32(pts)
(x, y) = point_list[-1]
_original_coord = np.array([[x, y]], dtype='float32')
original_coord = np.array([_original_coord])
M = cv2.getPerspectiveTransform(pts_src, pts_dst)
HM, status = cv2.findHomography(pts_src, pts_dst)
cv2.destroyAllWindows()
try:
prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() else cv2.CAP_PROP_FRAME_COUNT
total = int(vs.get(prop))
print("[INFO] {} total frames in video".format(total))
except:
print("[INFO] could not determine # of frames in video")
print("[INFO] no approx. completion time can be provided")
total = -1
while True:
(grabbed, frame) = vs.read()
if not grabbed:
break
if W is None or H is None:
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > args["confidence"]:
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
x1 = int(centerX - (width / 2))
y1 = int(centerY - (height / 2))
x2 = int(centerX + (width / 2))
y2 = int(centerY + (height / 2))
if LABELS[classID] == 'person':
boxes.append([x1, y1, x2, y2])
confidences.append(float(confidence))
classIDs.append(classID)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"], args["threshold"])
transformed_frame = cv2.warpPerspective(frame, M, (W, H))
if len(idxs) > 0:
for i in idxs.flatten():
(x1, y1) = (boxes[i][0], boxes[i][1])
(x2, y2) = (boxes[i][2], boxes[i][3])
original_1 = np.array([[[x1, y1]]], dtype='float32')
original_2 = np.array([[[x2, y2]]], dtype='float32')
transformed_1 = cv2.perspectiveTransform(original_1, HM)
transformed_2 = cv2.perspectiveTransform(original_2, HM)
t_x1, t_y1 = int(transformed_1[0][0][0]), int(transformed_1[0][0][1])
t_x2, t_y2 = int(transformed_2[0][0][0]), int(transformed_2[0][0][1])
x_center = int((x1 + x2) / 2)
y_bottom = y2
t_x_center = int((t_x1+t_x2)/2)
t_y_bottom = t_y2
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
cv2.polylines(frame, [space], True, (0,255,0), 2)
cv2.circle(frame, (x_center, y_bottom), 5, (0, 0, 255), -1)
cv2.circle(transformed_frame, (t_x_center, t_y_bottom), 5, (0, 0, 255), -1)
text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
cv2.putText(frame, text, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
if writer1 is None and writer2 is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer1 = cv2.VideoWriter('output/{}_detect.avi'.format(filename), fourcc,
30, (frame.shape[1], frame.shape[0]), True)
writer2 = cv2.VideoWriter('output/{}_transform.avi'.format(filename), fourcc,
30, (transformed_frame.shape[1], transformed_frame.shape[0]), True)
if total > 0:
elap = (end - start)
print("[INFO] single frame took {:.4f} seconds".format(elap))
print("[INFO] estimated total time to finish: {:.4f}".format(elap * total))
writer1.write(frame)
writer2.write(transformed_frame)
print("[INFO] cleaning up...")
writer1.release()
writer2.release()
vs.release()
print("[INFO] Finished!")
| [
"cv2.rectangle",
"imutils.is_cv2",
"numpy.int32",
"cv2.imshow",
"numpy.array",
"os.path.sep.join",
"cv2.warpPerspective",
"cv2.destroyAllWindows",
"cv2.dnn.NMSBoxes",
"cv2.setMouseCallback",
"argparse.ArgumentParser",
"numpy.random.seed",
"cv2.VideoWriter_fourcc",
"cv2.perspectiveTransform... | [((164, 189), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (187, 189), False, 'import argparse\n'), ((632, 678), 'os.path.sep.join', 'os.path.sep.join', (["[args['yolo'], 'coco.names']"], {}), "([args['yolo'], 'coco.names'])\n", (648, 678), False, 'import os\n'), ((733, 751), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (747, 751), True, 'import numpy as np\n'), ((840, 890), 'os.path.sep.join', 'os.path.sep.join', (["[args['yolo'], 'yolov3.weights']"], {}), "([args['yolo'], 'yolov3.weights'])\n", (856, 890), False, 'import os\n'), ((904, 950), 'os.path.sep.join', 'os.path.sep.join', (["[args['yolo'], 'yolov3.cfg']"], {}), "([args['yolo'], 'yolov3.cfg'])\n", (920, 950), False, 'import os\n'), ((1000, 1051), 'cv2.dnn.readNetFromDarknet', 'cv2.dnn.readNetFromDarknet', (['configPath', 'weightsPath'], {}), '(configPath, weightsPath)\n', (1026, 1051), False, 'import cv2\n'), ((1169, 1196), 'cv2.namedWindow', 'cv2.namedWindow', (['"""original"""'], {}), "('original')\n", (1184, 1196), False, 'import cv2\n'), ((1197, 1245), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""original"""', 'mouse_callback'], {}), "('original', mouse_callback)\n", (1217, 1245), False, 'import cv2\n'), ((1251, 1282), 'cv2.VideoCapture', 'cv2.VideoCapture', (["args['input']"], {}), "(args['input'])\n", (1267, 1282), False, 'import cv2\n'), ((1994, 2038), 'numpy.float32', 'np.float32', (['[[0, 0], [W, 0], [0, H], [W, H]]'], {}), '([[0, 0], [W, 0], [0, H], [W, H]])\n', (2004, 2038), True, 'import numpy as np\n'), ((2133, 2146), 'numpy.array', 'np.array', (['pts'], {}), '(pts)\n', (2141, 2146), True, 'import numpy as np\n'), ((2185, 2198), 'numpy.int32', 'np.int32', (['pts'], {}), '(pts)\n', (2193, 2198), True, 'import numpy as np\n'), ((2243, 2278), 'numpy.array', 'np.array', (['[[x, y]]'], {'dtype': '"""float32"""'}), "([[x, y]], dtype='float32')\n", (2251, 2278), True, 'import numpy as np\n'), ((2296, 2323), 'numpy.array', 'np.array', (['[_original_coord]'], {}), '([_original_coord])\n', (2304, 2323), True, 'import numpy as np\n'), ((2329, 2374), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts_src', 'pts_dst'], {}), '(pts_src, pts_dst)\n', (2356, 2374), False, 'import cv2\n'), ((2388, 2424), 'cv2.findHomography', 'cv2.findHomography', (['pts_src', 'pts_dst'], {}), '(pts_src, pts_dst)\n', (2406, 2424), False, 'import cv2\n'), ((2426, 2449), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2447, 2449), False, 'import cv2\n'), ((1622, 1658), 'cv2.imshow', 'cv2.imshow', (['"""original"""', 'img_original'], {}), "('original', img_original)\n", (1632, 1658), False, 'import cv2\n'), ((2945, 3021), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame', '(1 / 255.0)', '(416, 416)'], {'swapRB': '(True)', 'crop': '(False)'}), '(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n', (2966, 3021), False, 'import cv2\n'), ((3057, 3068), 'time.time', 'time.time', ([], {}), '()\n', (3066, 3068), False, 'import time\n'), ((3114, 3125), 'time.time', 'time.time', ([], {}), '()\n', (3123, 3125), False, 'import time\n'), ((3954, 4029), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', "args['confidence']", "args['threshold']"], {}), "(boxes, confidences, args['confidence'], args['threshold'])\n", (3970, 4029), False, 'import cv2\n'), ((4055, 4092), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame', 'M', '(W, H)'], {}), '(frame, M, (W, H))\n', (4074, 4092), False, 'import cv2\n'), ((2503, 2519), 'imutils.is_cv2', 'imutils.is_cv2', ([], {}), '()\n', (2517, 2519), False, 'import imutils\n'), ((5402, 5433), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (5424, 5433), False, 'import cv2\n'), ((1730, 1744), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1741, 1744), False, 'import cv2\n'), ((3304, 3321), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (3313, 3321), True, 'import numpy as np\n'), ((4275, 4314), 'numpy.array', 'np.array', (['[[[x1, y1]]]'], {'dtype': '"""float32"""'}), "([[[x1, y1]]], dtype='float32')\n", (4283, 4314), True, 'import numpy as np\n'), ((4340, 4379), 'numpy.array', 'np.array', (['[[[x2, y2]]]'], {'dtype': '"""float32"""'}), "([[[x2, y2]]], dtype='float32')\n", (4348, 4379), True, 'import numpy as np\n'), ((4409, 4449), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['original_1', 'HM'], {}), '(original_1, HM)\n', (4433, 4449), False, 'import cv2\n'), ((4478, 4518), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['original_2', 'HM'], {}), '(original_2, HM)\n', (4502, 4518), False, 'import cv2\n'), ((4898, 4948), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x1, y1)', '(x2, y2)', 'color', '(2)'], {}), '(frame, (x1, y1), (x2, y2), color, 2)\n', (4911, 4948), False, 'import cv2\n'), ((4961, 5012), 'cv2.polylines', 'cv2.polylines', (['frame', '[space]', '(True)', '(0, 255, 0)', '(2)'], {}), '(frame, [space], True, (0, 255, 0), 2)\n', (4974, 5012), False, 'import cv2\n'), ((5023, 5082), 'cv2.circle', 'cv2.circle', (['frame', '(x_center, y_bottom)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(frame, (x_center, y_bottom), 5, (0, 0, 255), -1)\n', (5033, 5082), False, 'import cv2\n'), ((5095, 5170), 'cv2.circle', 'cv2.circle', (['transformed_frame', '(t_x_center, t_y_bottom)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(transformed_frame, (t_x_center, t_y_bottom), 5, (0, 0, 255), -1)\n', (5105, 5170), False, 'import cv2\n'), ((5259, 5338), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x1, y1 - 5)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'color', '(2)'], {}), '(frame, text, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n', (5270, 5338), False, 'import cv2\n'), ((3451, 3473), 'numpy.array', 'np.array', (['[W, H, W, H]'], {}), '([W, H, W, H])\n', (3459, 3473), True, 'import numpy as np\n')] |
# summary function for drawing graph
# ref : https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/04-utils/tensorboard/logger.py
# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
import os
import tensorflow as tf
import numpy as np
import scipy.misc
import torch
from torchvision.transforms import transforms
from ImageLoader import ImageLoader
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
class Logger(object):
def __init__(self, log_dir, is_train=False):
"""Create a summary writer logging to log_dir."""
if is_train:
self.writer = tf.summary.FileWriter(log_dir)
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def image_summary(self, tag, images, step):
"""Log a list of images."""
img_summaries = []
for i, img in enumerate(images):
# Write the image to a string
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format="png")
# Create an Image object
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))
# Create and write Summary
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
"""Log a histogram of the tensor of values."""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values ** 2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
def accuracy(self, net, path, epoch, phase, device, log_path, batch_size=64, do_logwrite=False):
try:
os.makedirs(log_path)
except OSError:
pass
category = path.split('/')[-1] + "_" + phase
with torch.no_grad():
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)) # Normalize [-1, 1]
])
dataset = ImageLoader(path, phase, transforms=transform)
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=False)
correct = 0
for idx, (images, labels) in enumerate(data_loader):
images = images.to(device)
labels = labels.to(device)
outputs = net(images)
_, pred_y = torch.max(outputs.data, 1)
correct += (pred_y == labels).sum().item()
accuracy = (correct / len(dataset)) * 100
print(
'phase:%s --- correct [%d/%d] acc: %.2f%%' %
(category, correct, len(dataset), accuracy))
if do_logwrite:
f = open('%s/%s_log.txt' % (log_path, category), 'a')
f.write('epoch:%d, phase:%s --- correct [%d/%d] %.4f%%\n' % (
epoch, phase, correct, len(dataset), accuracy))
f.close()
return accuracy
def FAR(self, net, path, device, log_path, batch_size=64, do_logwrite=False):
try:
os.makedirs(log_path)
except OSError:
pass
with torch.no_grad():
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)) # Normalize [-1, 1]
])
dataset = ImageLoader(path, "Fake", transforms=transform)
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=False)
wrong = 0
for idx, (images, labels) in enumerate(data_loader):
images = images.to(device)
labels = labels.to(device)
outputs = net(images)
_, pred_y = torch.max(outputs.data, 1)
wrong += (pred_y != labels).sum().item()
metric = (wrong / len(dataset)) * 100
print_log = '%s --- neg(fake) -> pos(real) Wrong [%d/%d] FAR: %.2f%%' % ("FAR", wrong, len(dataset), metric)
print(print_log)
if do_logwrite:
f = open('%s/%s_log.txt' % (log_path, "test_"), 'a')
f.write(print_log)
f.close()
return metric
def FRR(self, net, path, device, log_path, batch_size=64, do_logwrite=False):
try:
os.makedirs(log_path)
except OSError:
pass
with torch.no_grad():
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)) # Normalize [-1, 1]
])
dataset = ImageLoader(path, "Real", transforms=transform)
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=False)
wrong = 0
for idx, (images, labels) in enumerate(data_loader):
images = images.to(device)
labels = labels.to(device)
outputs = net(images)
_, pred_y = torch.max(outputs.data, 1)
wrong += (pred_y != labels).sum().item()
metric = (wrong / len(dataset)) * 100
print_log = '%s --- pos(real) -> neg(fake) Wrong [%d/%d] FRR: %.2f%%' % ("FRR", wrong, len(dataset), metric)
print(print_log)
if do_logwrite:
f = open('%s/%s_log.txt' % (log_path, "test_"), 'a')
f.write(print_log)
f.close()
return metric
| [
"StringIO.StringIO",
"numpy.prod",
"numpy.histogram",
"tensorflow.Summary",
"tensorflow.HistogramProto",
"os.makedirs",
"torch.utils.data.DataLoader",
"torch.max",
"io.BytesIO",
"numpy.max",
"numpy.sum",
"ImageLoader.ImageLoader",
"torchvision.transforms.transforms.Normalize",
"torchvision... | [((1700, 1731), 'tensorflow.Summary', 'tf.Summary', ([], {'value': 'img_summaries'}), '(value=img_summaries)\n', (1710, 1731), True, 'import tensorflow as tf\n'), ((1964, 1995), 'numpy.histogram', 'np.histogram', (['values'], {'bins': 'bins'}), '(values, bins=bins)\n', (1976, 1995), True, 'import numpy as np\n'), ((2061, 2080), 'tensorflow.HistogramProto', 'tf.HistogramProto', ([], {}), '()\n', (2078, 2080), True, 'import tensorflow as tf\n'), ((688, 718), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['log_dir'], {}), '(log_dir)\n', (709, 718), True, 'import tensorflow as tf\n'), ((2106, 2120), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (2112, 2120), True, 'import numpy as np\n'), ((2147, 2161), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (2153, 2161), True, 'import numpy as np\n'), ((2186, 2207), 'numpy.prod', 'np.prod', (['values.shape'], {}), '(values.shape)\n', (2193, 2207), True, 'import numpy as np\n'), ((2234, 2248), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (2240, 2248), True, 'import numpy as np\n'), ((2283, 2302), 'numpy.sum', 'np.sum', (['(values ** 2)'], {}), '(values ** 2)\n', (2289, 2302), True, 'import numpy as np\n'), ((2864, 2885), 'os.makedirs', 'os.makedirs', (['log_path'], {}), '(log_path)\n', (2875, 2885), False, 'import os\n'), ((2994, 3009), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3007, 3009), False, 'import torch\n'), ((3209, 3255), 'ImageLoader.ImageLoader', 'ImageLoader', (['path', 'phase'], {'transforms': 'transform'}), '(path, phase, transforms=transform)\n', (3220, 3255), False, 'from ImageLoader import ImageLoader\n'), ((3282, 3356), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(dataset, batch_size=batch_size, shuffle=False)\n', (3309, 3356), False, 'import torch\n'), ((4437, 4458), 'os.makedirs', 'os.makedirs', (['log_path'], {}), '(log_path)\n', (4448, 4458), False, 'import os\n'), ((4514, 4529), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4527, 4529), False, 'import torch\n'), ((4729, 4776), 'ImageLoader.ImageLoader', 'ImageLoader', (['path', '"""Fake"""'], {'transforms': 'transform'}), "(path, 'Fake', transforms=transform)\n", (4740, 4776), False, 'from ImageLoader import ImageLoader\n'), ((4803, 4877), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(dataset, batch_size=batch_size, shuffle=False)\n', (4830, 4877), False, 'import torch\n'), ((5786, 5807), 'os.makedirs', 'os.makedirs', (['log_path'], {}), '(log_path)\n', (5797, 5807), False, 'import os\n'), ((5863, 5878), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5876, 5878), False, 'import torch\n'), ((6078, 6125), 'ImageLoader.ImageLoader', 'ImageLoader', (['path', '"""Real"""'], {'transforms': 'transform'}), "(path, 'Real', transforms=transform)\n", (6089, 6125), False, 'from ImageLoader import ImageLoader\n'), ((6152, 6226), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(dataset, batch_size=batch_size, shuffle=False)\n', (6179, 6226), False, 'import torch\n'), ((1169, 1179), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (1177, 1179), False, 'from StringIO import StringIO\n'), ((1589, 1644), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': "('%s/%d' % (tag, i))", 'image': 'img_sum'}), "(tag='%s/%d' % (tag, i), image=img_sum)\n", (1605, 1644), True, 'import tensorflow as tf\n'), ((3707, 3733), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (3716, 3733), False, 'import torch\n'), ((5226, 5252), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (5235, 5252), False, 'import torch\n'), ((6575, 6601), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (6584, 6601), False, 'import torch\n'), ((841, 886), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'tag', 'simple_value': 'value'}), '(tag=tag, simple_value=value)\n', (857, 886), True, 'import tensorflow as tf\n'), ((1220, 1229), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1227, 1229), False, 'from io import BytesIO\n'), ((2622, 2659), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'tag', 'histo': 'hist'}), '(tag=tag, histo=hist)\n', (2638, 2659), True, 'import tensorflow as tf\n'), ((3072, 3093), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3091, 3093), False, 'from torchvision.transforms import transforms\n'), ((3111, 3147), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', (['(0.5,)', '(0.5,)'], {}), '((0.5,), (0.5,))\n', (3131, 3147), False, 'from torchvision.transforms import transforms\n'), ((4592, 4613), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4611, 4613), False, 'from torchvision.transforms import transforms\n'), ((4631, 4667), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', (['(0.5,)', '(0.5,)'], {}), '((0.5,), (0.5,))\n', (4651, 4667), False, 'from torchvision.transforms import transforms\n'), ((5941, 5962), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5960, 5962), False, 'from torchvision.transforms import transforms\n'), ((5980, 6016), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', (['(0.5,)', '(0.5,)'], {}), '((0.5,), (0.5,))\n', (6000, 6016), False, 'from torchvision.transforms import transforms\n')] |
# -*- coding:utf-8 -*-
"""
"""
import re
import time
import numpy as np
import pandas as pd
from lightgbm import LGBMRegressor, LGBMClassifier
from sklearn.impute import SimpleImputer
from sklearn.metrics import log_loss, mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, OrdinalEncoder, StandardScaler, OneHotEncoder
from sklearn.utils import column_or_1d
from sklearn.utils.validation import check_is_fitted
from tabular_toolbox.column_selector import column_skewness_kurtosis, column_int, column_object_category_bool
from tabular_toolbox.utils import logging, infer_task_type
logger = logging.get_logger(__name__)
def root_mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average', squared=True):
return np.sqrt(
mean_squared_error(y_true, y_pred, sample_weight=sample_weight, multioutput=multioutput, squared=squared))
def subsample(X, y, max_samples, train_samples, task, random_state=9527):
stratify = None
if X.shape[0] > max_samples:
if task != 'regression':
stratify = y
X_train, _, y_train, _ = train_test_split(
X, y, train_size=max_samples, shuffle=True, stratify=stratify
)
if task != 'regression':
stratify = y_train
X_train, X_test, y_train, y_test = train_test_split(
X_train, y_train, train_size=train_samples, shuffle=True, stratify=stratify, random_state=random_state
)
else:
if task != 'regression':
stratify = y
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.5, shuffle=True, stratify=stratify
)
return X_train, X_test, y_train, y_test
class SafeLabelEncoder(LabelEncoder):
def transform(self, y):
check_is_fitted(self, 'classes_')
y = column_or_1d(y, warn=True)
unseen = len(self.classes_)
y = np.array([np.searchsorted(self.classes_, x) if x in self.classes_ else unseen for x in y])
return y
class MultiLabelEncoder:
def __init__(self):
self.encoders = {}
def fit(self, X, y=None):
assert len(X.shape) == 2
n_features = X.shape[1]
use_iloc = hasattr(X, 'iloc')
for n in range(n_features):
le = SafeLabelEncoder()
data = X.iloc[:, n] if use_iloc else X[:, n]
le.fit(data)
self.encoders[n] = le
return self
def transform(self, X):
assert len(X.shape) == 2
n_features = X.shape[1]
assert n_features == len(self.encoders.items())
for n in range(n_features):
if isinstance(X, np.ndarray):
X[:, n] = self.encoders[n].transform(X[:, n])
elif isinstance(X, pd.DataFrame):
X.iloc[:, n] = self.encoders[n].transform(X.iloc[:, n])
else:
raise NotImplementedError('Not supported type')
return X
class SafeOrdinalEncoder(OrdinalEncoder):
__doc__ = r'Adapted from sklearn OrdinalEncoder\n' + OrdinalEncoder.__doc__
# def fit(self, X, y=None):
# super().fit(X, y)
# #
# # def make_encoder(categories):
# # unseen = len(categories)
# # m = dict(zip(categories, range(unseen)))
# # vf = np.vectorize(lambda x: m[x] if x in m.keys() else unseen)
# # return vf
# #
# # def make_decoder(categories, dtype):
# # if dtype in (np.float32, np.float64, np.float):
# # default_value = np.nan
# # elif dtype in (np.int32, np.int64, np.int, np.uint32, np.uint64, np.uint):
# # default_value = -1
# # else:
# # default_value = None
# # dtype = np.object
# # unseen = len(categories)
# # vf = np.vectorize(lambda x: categories[x] if unseen > x >= 0 else default_value,
# # otypes=[dtype])
# # return vf
# #
# # self.encoders_ = [make_encoder(cat) for cat in self.categories_]
# # self.decoders_ = [make_decoder(cat, X.dtypes[i]) for i, cat in enumerate(self.categories_)]
#
# return self
def transform(self, X, y=None):
if not isinstance(X, (pd.DataFrame, np.ndarray)):
raise TypeError("Unexpected type {}".format(type(X)))
def make_encoder(categories):
unseen = len(categories)
m = dict(zip(categories, range(unseen)))
vf = np.vectorize(lambda x: m[x] if x in m.keys() else unseen)
return vf
values = X if isinstance(X, np.ndarray) else X.values
encoders_ = [make_encoder(cat) for cat in self.categories_]
result = [encoders_[i](values[:, i]) for i in range(values.shape[1])]
if isinstance(X, pd.DataFrame):
assert len(result) == len(X.columns)
data = {c: result[i] for i, c in enumerate(X.columns)}
result = pd.DataFrame(data, dtype=self.dtype)
else:
result = np.stack(result, axis=1)
if self.dtype != result.dtype:
result = result.astype(self.dtype)
return result
def inverse_transform(self, X):
if not isinstance(X, (pd.DataFrame, np.ndarray)):
raise TypeError("Unexpected type {}".format(type(X)))
def make_decoder(categories, dtype):
if dtype in (np.float32, np.float64, np.float):
default_value = np.nan
elif dtype in (np.int32, np.int64, np.int, np.uint32, np.uint64, np.uint):
default_value = -1
else:
default_value = None
dtype = np.object
unseen = len(categories)
vf = np.vectorize(lambda x: categories[x] if unseen > x >= 0 else default_value,
otypes=[dtype])
return vf
values = X if isinstance(X, np.ndarray) else X.values
decoders_ = [make_decoder(cat, cat.dtype) for i, cat in enumerate(self.categories_)]
result = [decoders_[i](values[:, i]) for i in range(values.shape[1])]
if isinstance(X, pd.DataFrame):
assert len(result) == len(X.columns)
data = {c: result[i] for i, c in enumerate(X.columns)}
result = pd.DataFrame(data)
else:
result = np.stack(result, axis=1)
return result
class SafeOneHotEncoder(OneHotEncoder):
def get_feature_names(self, input_features=None):
"""
Override this method to remove non-alphanumeric chars from feature names
"""
check_is_fitted(self)
cats = self.categories_
if input_features is None:
input_features = ['x%d' % i for i in range(len(cats))]
elif len(input_features) != len(self.categories_):
raise ValueError(
"input_features should have length equal to number of "
"features ({}), got {}".format(len(self.categories_),
len(input_features)))
feature_names = []
for i in range(len(cats)):
names = [input_features[i] + '_' + str(idx) + '_' + re.sub('[^A-Za-z0-9_]+', '_', str(t))
for idx, t in enumerate(cats[i])]
if self.drop_idx_ is not None and self.drop_idx_[i] is not None:
names.pop(self.drop_idx_[i])
feature_names.extend(names)
return np.array(feature_names, dtype=object)
class LogStandardScaler:
def __init__(self, copy=True, with_mean=True, with_std=True):
self.scaler = StandardScaler(copy=copy, with_mean=with_mean, with_std=with_std)
self.min_values = None
def fit(self, X, y=None):
self.X_min_values = np.min(X)
self.scaler.fit(np.log(X - self.X_min_values + 1))
return self
def transform(self, X):
X = np.log(np.clip(X - self.X_min_values + 1, a_min=1, a_max=None))
X = self.scaler.transform(X)
return X
class SkewnessKurtosisTransformer:
def __init__(self, transform_fn=None, skew_threshold=0.5, kurtosis_threshold=0.5):
self.columns_ = []
self.skewness_threshold = skew_threshold
self.kurtosis_threshold = kurtosis_threshold
if transform_fn is None:
transform_fn = np.log
self.transform_fn = transform_fn
def fit(self, X, y=None):
assert len(X.shape) == 2
self.columns_ = column_skewness_kurtosis(X, skew_threshold=self.skewness_threshold,
kurtosis_threshold=self.kurtosis_threshold)
logger.info(f'SkewnessKurtosisTransformer - selected columns:{self.columns_}')
return self
def transform(self, X):
assert len(X.shape) == 2
if len(self.columns_) > 0:
try:
X[self.columns_] = self.transform_fn(X[self.columns_])
except Exception as e:
logger.error(e)
return X
class FeatureSelectionTransformer():
def __init__(self, task=None, max_train_samples=10000, max_test_samples=10000, max_cols=10000,
ratio_select_cols=0.1,
n_max_cols=100, n_min_cols=10, reserved_cols=None):
self.task = task
if max_cols <= 0:
max_cols = 10000
if max_train_samples <= 0:
max_train_samples = 10000
if max_test_samples <= 0:
max_test_samples = 10000
self.max_train_samples = max_train_samples
self.max_test_samples = max_test_samples
self.max_cols = max_cols
self.ratio_select_cols = ratio_select_cols
self.n_max_cols = n_max_cols
self.n_min_cols = n_min_cols
self.reserved_cols = reserved_cols
self.scores_ = {}
self.columns_ = []
def get_categorical_features(self, X):
cat_cols = column_object_category_bool(X)
int_cols = column_int(X)
for c in int_cols:
if X[c].min() >= 0 and X[c].max() < np.iinfo(np.int32).max:
cat_cols.append(c)
return cat_cols
def feature_score(self, F_train, y_train, F_test, y_test):
if self.task is None:
self.task, _ = infer_task_type(y_train)
if self.task == 'regression':
model = LGBMRegressor()
eval_metric = root_mean_squared_error
else:
model = LGBMClassifier()
eval_metric = log_loss
cat_cols = self.get_categorical_features(F_train)
model.fit(F_train, y_train,
# eval_set=(F_test, y_test),
# early_stopping_rounds=20,
# verbose=0,
# categorical_feature=cat_cols,
# eval_metric=eval_metric,
)
if self.task == 'regression':
y_pred = model.predict(F_test)
else:
y_pred = model.predict_proba(F_test)[:, 1]
score = eval_metric(y_test, y_pred)
return score
def fit(self, X, y):
start_time = time.time()
if self.task is None:
self.task, _ = infer_task_type(y)
columns = X.columns.to_list()
logger.info(f'all columns: {columns}')
if self.reserved_cols is not None:
self.reserved_cols = list(set(self.reserved_cols).intersection(columns))
logger.info(f'exclude reserved columns: {self.reserved_cols}')
columns = list(set(columns) - set(self.reserved_cols))
if len(columns) > self.max_cols:
columns = np.random.choice(columns, self.max_cols, replace=False)
if len(columns) <= 0:
logger.warn('no columns to score')
self.columns_ = self.reserved_cols
self.scores_ = {}
return self
X_score = X[columns]
X_train, X_test, y_train, y_test = subsample(X_score, y,
max_samples=self.max_test_samples + self.max_train_samples,
train_samples=self.max_train_samples,
task=self.task)
if self.task != 'regression' and y_train.dtype != 'int':
le = LabelEncoder()
y_train = le.fit_transform(y_train)
y_test = le.transform(y_test)
cat_cols = column_object_category_bool(X_train)
if len(cat_cols) > 0:
logger.info('ordinal encoding...')
X_train['__datacanvas__source__'] = 'train'
X_test['__datacanvas__source__'] = 'test'
X_all = pd.concat([X_train, X_test], axis=0)
oe = OrdinalEncoder()
X_all[cat_cols] = oe.fit_transform(X_all[cat_cols]).astype('int')
X_train = X_all[X_all['__datacanvas__source__'] == 'train']
X_test = X_all[X_all['__datacanvas__source__'] == 'test']
X_train.pop('__datacanvas__source__')
X_test.pop('__datacanvas__source__')
self.scores_ = {}
for c in columns:
F_train = X_train[[c]]
F_test = X_test[[c]]
self.scores_[c] = self.feature_score(F_train, y_train, F_test, y_test)
logger.info(f'Feature score: {c}={self.scores_[c]}')
sorted_scores = sorted([[col, score] for col, score in self.scores_.items()], key=lambda x: x[1])
logger.info(f'feature scores:{sorted_scores}')
topn = np.min([np.max([int(len(columns) * self.ratio_select_cols), np.min([len(columns), self.n_min_cols])]),
self.n_max_cols])
if self.reserved_cols is not None:
self.columns_ = self.reserved_cols
else:
self.columns_ = []
self.columns_ += [s[0] for s in sorted_scores[:topn]]
logger.info(f'selected columns:{self.columns_}')
logger.info(f'taken {time.time() - start_time}s')
del X_score, X_train, X_test, y_train, y_test
return self
def transform(self, X):
return X[self.columns_]
class FloatOutputImputer(SimpleImputer):
def transform(self, X):
return super().transform(X).astype(np.float64)
| [
"numpy.clip",
"sklearn.preprocessing.LabelEncoder",
"numpy.log",
"lightgbm.LGBMRegressor",
"lightgbm.LGBMClassifier",
"numpy.iinfo",
"numpy.array",
"numpy.searchsorted",
"tabular_toolbox.utils.logging.get_logger",
"numpy.stack",
"numpy.min",
"pandas.DataFrame",
"sklearn.utils.validation.chec... | [((662, 690), 'tabular_toolbox.utils.logging.get_logger', 'logging.get_logger', (['__name__'], {}), '(__name__)\n', (680, 690), False, 'from tabular_toolbox.utils import logging, infer_task_type\n'), ((887, 997), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_true', 'y_pred'], {'sample_weight': 'sample_weight', 'multioutput': 'multioutput', 'squared': 'squared'}), '(y_true, y_pred, sample_weight=sample_weight, multioutput\n =multioutput, squared=squared)\n', (905, 997), False, 'from sklearn.metrics import log_loss, mean_squared_error\n'), ((1214, 1293), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'train_size': 'max_samples', 'shuffle': '(True)', 'stratify': 'stratify'}), '(X, y, train_size=max_samples, shuffle=True, stratify=stratify)\n', (1230, 1293), False, 'from sklearn.model_selection import train_test_split\n'), ((1424, 1548), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'train_size': 'train_samples', 'shuffle': '(True)', 'stratify': 'stratify', 'random_state': 'random_state'}), '(X_train, y_train, train_size=train_samples, shuffle=True,\n stratify=stratify, random_state=random_state)\n', (1440, 1548), False, 'from sklearn.model_selection import train_test_split\n'), ((1678, 1749), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'train_size': '(0.5)', 'shuffle': '(True)', 'stratify': 'stratify'}), '(X, y, train_size=0.5, shuffle=True, stratify=stratify)\n', (1694, 1749), False, 'from sklearn.model_selection import train_test_split\n'), ((1893, 1926), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""classes_"""'], {}), "(self, 'classes_')\n", (1908, 1926), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((1939, 1965), 'sklearn.utils.column_or_1d', 'column_or_1d', (['y'], {'warn': '(True)'}), '(y, warn=True)\n', (1951, 1965), False, 'from sklearn.utils import column_or_1d\n'), ((6753, 6774), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (6768, 6774), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((7607, 7644), 'numpy.array', 'np.array', (['feature_names'], {'dtype': 'object'}), '(feature_names, dtype=object)\n', (7615, 7644), True, 'import numpy as np\n'), ((7760, 7825), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'copy': 'copy', 'with_mean': 'with_mean', 'with_std': 'with_std'}), '(copy=copy, with_mean=with_mean, with_std=with_std)\n', (7774, 7825), False, 'from sklearn.preprocessing import LabelEncoder, OrdinalEncoder, StandardScaler, OneHotEncoder\n'), ((7916, 7925), 'numpy.min', 'np.min', (['X'], {}), '(X)\n', (7922, 7925), True, 'import numpy as np\n'), ((8613, 8728), 'tabular_toolbox.column_selector.column_skewness_kurtosis', 'column_skewness_kurtosis', (['X'], {'skew_threshold': 'self.skewness_threshold', 'kurtosis_threshold': 'self.kurtosis_threshold'}), '(X, skew_threshold=self.skewness_threshold,\n kurtosis_threshold=self.kurtosis_threshold)\n', (8637, 8728), False, 'from tabular_toolbox.column_selector import column_skewness_kurtosis, column_int, column_object_category_bool\n'), ((10039, 10069), 'tabular_toolbox.column_selector.column_object_category_bool', 'column_object_category_bool', (['X'], {}), '(X)\n', (10066, 10069), False, 'from tabular_toolbox.column_selector import column_skewness_kurtosis, column_int, column_object_category_bool\n'), ((10089, 10102), 'tabular_toolbox.column_selector.column_int', 'column_int', (['X'], {}), '(X)\n', (10099, 10102), False, 'from tabular_toolbox.column_selector import column_skewness_kurtosis, column_int, column_object_category_bool\n'), ((11216, 11227), 'time.time', 'time.time', ([], {}), '()\n', (11225, 11227), False, 'import time\n'), ((12533, 12569), 'tabular_toolbox.column_selector.column_object_category_bool', 'column_object_category_bool', (['X_train'], {}), '(X_train)\n', (12560, 12569), False, 'from tabular_toolbox.column_selector import column_skewness_kurtosis, column_int, column_object_category_bool\n'), ((5100, 5136), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'dtype': 'self.dtype'}), '(data, dtype=self.dtype)\n', (5112, 5136), True, 'import pandas as pd\n'), ((5172, 5196), 'numpy.stack', 'np.stack', (['result'], {'axis': '(1)'}), '(result, axis=1)\n', (5180, 5196), True, 'import numpy as np\n'), ((5885, 5980), 'numpy.vectorize', 'np.vectorize', (['(lambda x: categories[x] if unseen > x >= 0 else default_value)'], {'otypes': '[dtype]'}), '(lambda x: categories[x] if unseen > x >= 0 else default_value,\n otypes=[dtype])\n', (5897, 5980), True, 'import numpy as np\n'), ((6441, 6459), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (6453, 6459), True, 'import pandas as pd\n'), ((6495, 6519), 'numpy.stack', 'np.stack', (['result'], {'axis': '(1)'}), '(result, axis=1)\n', (6503, 6519), True, 'import numpy as np\n'), ((7950, 7983), 'numpy.log', 'np.log', (['(X - self.X_min_values + 1)'], {}), '(X - self.X_min_values + 1)\n', (7956, 7983), True, 'import numpy as np\n'), ((8053, 8108), 'numpy.clip', 'np.clip', (['(X - self.X_min_values + 1)'], {'a_min': '(1)', 'a_max': 'None'}), '(X - self.X_min_values + 1, a_min=1, a_max=None)\n', (8060, 8108), True, 'import numpy as np\n'), ((10382, 10406), 'tabular_toolbox.utils.infer_task_type', 'infer_task_type', (['y_train'], {}), '(y_train)\n', (10397, 10406), False, 'from tabular_toolbox.utils import logging, infer_task_type\n'), ((10466, 10481), 'lightgbm.LGBMRegressor', 'LGBMRegressor', ([], {}), '()\n', (10479, 10481), False, 'from lightgbm import LGBMRegressor, LGBMClassifier\n'), ((10566, 10582), 'lightgbm.LGBMClassifier', 'LGBMClassifier', ([], {}), '()\n', (10580, 10582), False, 'from lightgbm import LGBMRegressor, LGBMClassifier\n'), ((11285, 11303), 'tabular_toolbox.utils.infer_task_type', 'infer_task_type', (['y'], {}), '(y)\n', (11300, 11303), False, 'from tabular_toolbox.utils import logging, infer_task_type\n'), ((11723, 11778), 'numpy.random.choice', 'np.random.choice', (['columns', 'self.max_cols'], {'replace': '(False)'}), '(columns, self.max_cols, replace=False)\n', (11739, 11778), True, 'import numpy as np\n'), ((12408, 12422), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (12420, 12422), False, 'from sklearn.preprocessing import LabelEncoder, OrdinalEncoder, StandardScaler, OneHotEncoder\n'), ((12778, 12814), 'pandas.concat', 'pd.concat', (['[X_train, X_test]'], {'axis': '(0)'}), '([X_train, X_test], axis=0)\n', (12787, 12814), True, 'import pandas as pd\n'), ((12832, 12848), 'sklearn.preprocessing.OrdinalEncoder', 'OrdinalEncoder', ([], {}), '()\n', (12846, 12848), False, 'from sklearn.preprocessing import LabelEncoder, OrdinalEncoder, StandardScaler, OneHotEncoder\n'), ((2025, 2058), 'numpy.searchsorted', 'np.searchsorted', (['self.classes_', 'x'], {}), '(self.classes_, x)\n', (2040, 2058), True, 'import numpy as np\n'), ((10178, 10196), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (10186, 10196), True, 'import numpy as np\n'), ((14044, 14055), 'time.time', 'time.time', ([], {}), '()\n', (14053, 14055), False, 'import time\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
def calculate_daylight(day: int, latitude: float = 53.551086) -> float:
"""
Calculate number of hours of daylight in a day
Parameters
----------
day : integer (required)
day of the week by number, starting at 0 (Monday)
latitude : float (required)
latitude at which number of daylight hours in a day is required
(default is taken as latitude of Hamburg since weather data is also
taken for this city:
https://www.latlong.net/place/hamburg-germany-8766.html)
Returns
-------
daylightamount : float
number of hours of daylight in a day
SOURCE: http://mathforum.org/library/drmath/view/56478.html
"""
ast = 0.9671396
abc = 0.2163108
const = 0.39795
d_shift = day - 186
P = np.arcsin(
const * np.cos(abc + 2 * np.arctan(ast * np.tan(0.00860 * d_shift)))
)
pi = np.pi
numerator = (0.8333 * pi / 180) + np.sin(latitude * pi / 180) * np.sin(P)
denominator = np.cos(latitude * pi / 180) * np.cos(P)
daylightamount = 24 - (24 / pi) * np.arccos(
(np.sin(numerator) / denominator)
)
return daylightamount
def get_too_hot_cold(data, threshold=20):
# METHOD 1 - use GROUP BY
temps = data["temp"] - threshold
data["too_hot"] = temps.mask(temps < 0, other=0).abs()
data["too_cold"] = temps.mask(temps > 0, other=0).abs()
# # METHOD 2 - do not use GROUP BY
# hcs = []
# for c in data["country"].unique():
# temps = data[data["country"] == c]["temp"] - threshold
# too_hot = temps.mask(temps < 0, other=0).abs()
# too_cold = temps.mask(temps > 0, other=0).abs()
# hc = (
# too_hot.rename("too_hot")
# .to_frame()
# .merge(
# too_cold.rename("too_cold").to_frame(),
# left_index=True,
# right_index=True,
# how="left",
# )
# .merge(
# data[data["country"] == c][["ds"]],
# left_index=True,
# right_index=True,
# how="left",
# )
# .assign(country=c)
# )
# hc = hc.sort_values(["ds"])
# hcs.append(hc)
# data = data.merge(
# pd.concat(hcs, ignore_index=True), on=["country", "ds"], how="left"
# )
return data
| [
"numpy.sin",
"numpy.tan",
"numpy.cos"
] | [((1056, 1083), 'numpy.cos', 'np.cos', (['(latitude * pi / 180)'], {}), '(latitude * pi / 180)\n', (1062, 1083), True, 'import numpy as np\n'), ((1086, 1095), 'numpy.cos', 'np.cos', (['P'], {}), '(P)\n', (1092, 1095), True, 'import numpy as np\n'), ((998, 1025), 'numpy.sin', 'np.sin', (['(latitude * pi / 180)'], {}), '(latitude * pi / 180)\n', (1004, 1025), True, 'import numpy as np\n'), ((1028, 1037), 'numpy.sin', 'np.sin', (['P'], {}), '(P)\n', (1034, 1037), True, 'import numpy as np\n'), ((1154, 1171), 'numpy.sin', 'np.sin', (['numerator'], {}), '(numerator)\n', (1160, 1171), True, 'import numpy as np\n'), ((911, 935), 'numpy.tan', 'np.tan', (['(0.0086 * d_shift)'], {}), '(0.0086 * d_shift)\n', (917, 935), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : pad.py
# Author : <NAME> <<EMAIL>>
# Date : 01.11.2020
# Last Modified Date: 09.11.2021
# Last Modified By : <NAME> <<EMAIL>>
#
# Copyright (c) 2020, Imperial College, London
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of Imperial College nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# pad sequences to maximum length for batch processing
# code borrowed from keras_preprocessing (comes with MIT license)
import numpy as np
def pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.):
"""Pads sequences to the same length.
"""
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
total_samples = len(sequences)
lengths = []
sample_shape = ()
flag = True
for x in sequences:
try:
lengths.append(len(x))
if flag and len(x):
sample_shape = np.asarray(x).shape[1:]
flag = False
except TypeError:
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
if maxlen is None:
maxlen = np.max(lengths)
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)
if isinstance(value, str) and dtype != object and not is_dtype_str:
raise ValueError("`dtype` {} is not compatible with `value`'s type: {}\n"
"You should set `dtype=object` for variable length strings."
.format(dtype, type(value)))
x = np.full((total_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" '
'not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s '
'is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
| [
"numpy.issubdtype",
"numpy.full",
"numpy.asarray",
"numpy.max"
] | [((3091, 3158), 'numpy.full', 'np.full', (['((total_samples, maxlen) + sample_shape)', 'value'], {'dtype': 'dtype'}), '((total_samples, maxlen) + sample_shape, value, dtype=dtype)\n', (3098, 3158), True, 'import numpy as np\n'), ((2685, 2700), 'numpy.max', 'np.max', (['lengths'], {}), '(lengths)\n', (2691, 2700), True, 'import numpy as np\n'), ((2721, 2750), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.str_'], {}), '(dtype, np.str_)\n', (2734, 2750), True, 'import numpy as np\n'), ((2754, 2787), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.unicode_'], {}), '(dtype, np.unicode_)\n', (2767, 2787), True, 'import numpy as np\n'), ((3590, 3620), 'numpy.asarray', 'np.asarray', (['trunc'], {'dtype': 'dtype'}), '(trunc, dtype=dtype)\n', (3600, 3620), True, 'import numpy as np\n'), ((2430, 2443), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2440, 2443), True, 'import numpy as np\n')] |
from collections import defaultdict
from copy import deepcopy
from time import time
from flatland.envs.agent_utils import RailAgentStatus
from flatland.envs.rail_env import RailEnv, RailEnvActions
import numpy as np
from flatlander.agents.heuristic_agent import HeuristicPriorityAgent
from flatlander.submission.helper import get_agent_pos, is_done
def promising(possible_transitions, departed):
return np.count_nonzero(possible_transitions) > 1 or not departed
def epsilon_greedy_plan(env: RailEnv, obs_dict, budget_seconds=60, epsilon=0.1,
policy_agent=HeuristicPriorityAgent()):
start_t = time()
best_actions = []
best_return = -np.inf
best_pc = -np.inf
all_returns = []
all_pcs = []
plan_step = 0
budget_used = False
while not budget_used:
local_env = deepcopy(env)
episode_return = 0
action_memory = []
dones = defaultdict(lambda: False)
print(f'\nPlanning step {plan_step + 1}')
while not dones['__all__'] and not budget_used:
actions = defaultdict(lambda: None, policy_agent.compute_actions(obs_dict,
env=local_env))
for agent in env.agents:
pos = get_agent_pos(agent)
next_possible_moves = local_env.rail.get_transitions(*pos, agent.direction)
departed = agent.status.value != RailAgentStatus.READY_TO_DEPART.value
if np.random.random() < epsilon and promising(next_possible_moves, departed):
possible_actions = set(np.flatnonzero(next_possible_moves))
possible_actions = possible_actions.union({RailEnvActions.STOP_MOVING.value,
RailEnvActions.MOVE_FORWARD.value})
non_default_actions = possible_actions.difference({actions[agent.handle]})
actions[agent.handle] = np.random.choice(list(non_default_actions))
action_memory.append(actions)
obs_dict, all_rewards, dones, info = local_env.step(actions)
episode_return += np.sum(list(all_rewards))
budget_used = (time() - start_t) > budget_seconds
if not budget_used:
all_returns.append(episode_return)
pc = np.sum(np.array([1 for a in local_env.agents if is_done(a)])) / local_env.get_num_agents()
all_pcs.append(pc)
if pc > best_pc:
best_return = episode_return
best_pc = pc
best_actions = action_memory
if pc == 1.0:
print(f'MAX PC: {best_pc}, MIN PC: {np.min(all_pcs)}, MAX RETURN: {best_return}\n')
return best_actions
plan_step += 1
if len(all_pcs) > 0:
print(f'MAX PC: {best_pc}, MIN PC: {np.min(all_pcs)}, MAX RETURN: {best_return}\n')
else:
print(f'Budget reached before any planning step could finish!')
return best_actions if len(best_actions) > 0 else None
| [
"flatlander.submission.helper.get_agent_pos",
"numpy.random.random",
"numpy.flatnonzero",
"numpy.min",
"numpy.count_nonzero",
"collections.defaultdict",
"copy.deepcopy",
"flatlander.submission.helper.is_done",
"flatlander.agents.heuristic_agent.HeuristicPriorityAgent",
"time.time"
] | [((588, 612), 'flatlander.agents.heuristic_agent.HeuristicPriorityAgent', 'HeuristicPriorityAgent', ([], {}), '()\n', (610, 612), False, 'from flatlander.agents.heuristic_agent import HeuristicPriorityAgent\n'), ((629, 635), 'time.time', 'time', ([], {}), '()\n', (633, 635), False, 'from time import time\n'), ((834, 847), 'copy.deepcopy', 'deepcopy', (['env'], {}), '(env)\n', (842, 847), False, 'from copy import deepcopy\n'), ((918, 945), 'collections.defaultdict', 'defaultdict', (['(lambda : False)'], {}), '(lambda : False)\n', (929, 945), False, 'from collections import defaultdict\n'), ((410, 448), 'numpy.count_nonzero', 'np.count_nonzero', (['possible_transitions'], {}), '(possible_transitions)\n', (426, 448), True, 'import numpy as np\n'), ((1291, 1311), 'flatlander.submission.helper.get_agent_pos', 'get_agent_pos', (['agent'], {}), '(agent)\n', (1304, 1311), False, 'from flatlander.submission.helper import get_agent_pos, is_done\n'), ((2245, 2251), 'time.time', 'time', ([], {}), '()\n', (2249, 2251), False, 'from time import time\n'), ((2905, 2920), 'numpy.min', 'np.min', (['all_pcs'], {}), '(all_pcs)\n', (2911, 2920), True, 'import numpy as np\n'), ((1511, 1529), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1527, 1529), True, 'import numpy as np\n'), ((1629, 1664), 'numpy.flatnonzero', 'np.flatnonzero', (['next_possible_moves'], {}), '(next_possible_moves)\n', (1643, 1664), True, 'import numpy as np\n'), ((2723, 2738), 'numpy.min', 'np.min', (['all_pcs'], {}), '(all_pcs)\n', (2729, 2738), True, 'import numpy as np\n'), ((2421, 2431), 'flatlander.submission.helper.is_done', 'is_done', (['a'], {}), '(a)\n', (2428, 2431), False, 'from flatlander.submission.helper import get_agent_pos, is_done\n')] |
# load packages
import random
import yaml
from munch import Munch
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torchaudio
import librosa
import soundfile
import argparse
import shutil
import os
from Utils.ASR.models import ASRCNN
from Utils.JDC.model import JDCNet
from models import Generator, MappingNetwork, StyleEncoder
def numpy2tensor(wave):
to_mel = torchaudio.transforms.MelSpectrogram(
n_mels=80, n_fft=2048, win_length=1200, hop_length=300)
mean, std = -4, 4
wave_tensor = torch.from_numpy(wave).float()
mel_tensor = to_mel(wave_tensor)
mel_tensor = (torch.log(1e-5 + mel_tensor.unsqueeze(0)) - mean) / std
return mel_tensor
def build_model(model_params={}):
args = Munch(model_params)
generator = Generator(args.dim_in, args.style_dim, args.max_conv_dim, w_hpf=args.w_hpf, F0_channel=args.F0_channel)
mapping_network = MappingNetwork(args.latent_dim, args.style_dim, hidden_dim=args.max_conv_dim)
style_encoder = StyleEncoder(args.dim_in, args.style_dim, args.max_conv_dim)
nets_ema = Munch(generator=generator,
mapping_network=mapping_network,
style_encoder=style_encoder)
return nets_ema
def compute_style(reference_path, by_latent=False):
if not by_latent:
wave, sr = librosa.load(reference_path, sr=24000)
audio, index = librosa.effects.trim(wave, top_db=30)
if sr != 24000:
wave = librosa.resample(wave, sr, 24000)
mel_tensor = numpy2tensor(wave).to('cuda')
with torch.no_grad():
ref = starganv2.style_encoder(mel_tensor.unsqueeze(1))
else:
latent_dim = starganv2.mapping_network.shared[0].in_features
ref = starganv2.mapping_network(torch.randn(1, latent_dim).to('cuda'))
return ref
def inference(f0_model, vocoder, starganv2, source_path, reference_path, by_latent=False):
# load source wave
audio, source_sr = librosa.load(source_path, sr=24000)
audio /= np.max(np.abs(audio))
source = numpy2tensor(audio).to('cuda')
# load reference wave
reference = compute_style(reference_path, by_latent)
with torch.no_grad():
f0_feat = F0_model.get_feature_GAN(source.unsqueeze(1))
out = starganv2.generator(source.unsqueeze(1), reference, F0=f0_feat)
mel = out.transpose(-1, -2).squeeze().to('cuda')
converted = vocoder.inference(mel)
converted = converted.view(-1).to('cpu').detach().numpy()
return converted
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--log_dir", type=str, default="Models/JVS10_normal", help="where models were saved")
parser.add_argument("--source", type=str, default=None, help="source audio file path")
parser.add_argument("--reference", type=str, default=None, help="reference audio file path")
parser.add_argument("--by_latent", action="store_true", help="use mapping network")
args = parser.parse_args()
# load F0 model
F0_model = JDCNet(num_class=1, seq_len=192)
params = torch.load("Utils/JDC/bst.t7")['net']
F0_model.load_state_dict(params)
F0_model.eval().to('cuda')
# load vocoder
from parallel_wavegan.utils import load_model
vocoder = load_model("Vocoder/checkpoint-400000steps.pkl").to('cuda').eval()
vocoder.remove_weight_norm()
vocoder.eval()
# load starganv2
with open(os.path.join(args.log_dir, 'config.yml')) as f:
starganv2_config = yaml.safe_load(f)
model_path = os.path.join(args.log_dir, f'epoch_{starganv2_config["epochs"]:05}.pth')
starganv2 = build_model(model_params=starganv2_config["model_params"])
params = torch.load(model_path, map_location='cpu')['model_ema']
for key in starganv2:
starganv2[key].load_state_dict(params[key])
starganv2[key].eval()
starganv2.style_encoder = starganv2.style_encoder.to('cuda')
starganv2.mapping_network = starganv2.mapping_network.to('cuda')
starganv2.generator = starganv2.generator.to('cuda')
# load reference audio
if args.reference is None and not args.by_latent:
with open('Data/val_list.txt', 'r') as f:
val_list = f.read().split('\n')[:-1]
reference_data = random.choice(val_list)
args.reference, _ = reference_data.split('|')
converted = inference(F0_model, vocoder, starganv2, args.source, args.reference, args.by_latent)
# save results
shutil.copy(args.source, "source.wav")
if args.reference:
shutil.copy(args.reference, "reference.wav")
soundfile.write("converted.wav", converted, 24000)
| [
"torch.from_numpy",
"soundfile.write",
"librosa.resample",
"librosa.effects.trim",
"librosa.load",
"argparse.ArgumentParser",
"models.MappingNetwork",
"parallel_wavegan.utils.load_model",
"Utils.JDC.model.JDCNet",
"torch.randn",
"numpy.abs",
"random.choice",
"shutil.copy",
"munch.Munch",
... | [((411, 507), 'torchaudio.transforms.MelSpectrogram', 'torchaudio.transforms.MelSpectrogram', ([], {'n_mels': '(80)', 'n_fft': '(2048)', 'win_length': '(1200)', 'hop_length': '(300)'}), '(n_mels=80, n_fft=2048, win_length=1200,\n hop_length=300)\n', (447, 507), False, 'import torchaudio\n'), ((765, 784), 'munch.Munch', 'Munch', (['model_params'], {}), '(model_params)\n', (770, 784), False, 'from munch import Munch\n'), ((801, 908), 'models.Generator', 'Generator', (['args.dim_in', 'args.style_dim', 'args.max_conv_dim'], {'w_hpf': 'args.w_hpf', 'F0_channel': 'args.F0_channel'}), '(args.dim_in, args.style_dim, args.max_conv_dim, w_hpf=args.w_hpf,\n F0_channel=args.F0_channel)\n', (810, 908), False, 'from models import Generator, MappingNetwork, StyleEncoder\n'), ((927, 1004), 'models.MappingNetwork', 'MappingNetwork', (['args.latent_dim', 'args.style_dim'], {'hidden_dim': 'args.max_conv_dim'}), '(args.latent_dim, args.style_dim, hidden_dim=args.max_conv_dim)\n', (941, 1004), False, 'from models import Generator, MappingNetwork, StyleEncoder\n'), ((1025, 1085), 'models.StyleEncoder', 'StyleEncoder', (['args.dim_in', 'args.style_dim', 'args.max_conv_dim'], {}), '(args.dim_in, args.style_dim, args.max_conv_dim)\n', (1037, 1085), False, 'from models import Generator, MappingNetwork, StyleEncoder\n'), ((1106, 1199), 'munch.Munch', 'Munch', ([], {'generator': 'generator', 'mapping_network': 'mapping_network', 'style_encoder': 'style_encoder'}), '(generator=generator, mapping_network=mapping_network, style_encoder=\n style_encoder)\n', (1111, 1199), False, 'from munch import Munch\n'), ((1993, 2028), 'librosa.load', 'librosa.load', (['source_path'], {'sr': '(24000)'}), '(source_path, sr=24000)\n', (2005, 2028), False, 'import librosa\n'), ((2600, 2625), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2623, 2625), False, 'import argparse\n'), ((3081, 3113), 'Utils.JDC.model.JDCNet', 'JDCNet', ([], {'num_class': '(1)', 'seq_len': '(192)'}), '(num_class=1, seq_len=192)\n', (3087, 3113), False, 'from Utils.JDC.model import JDCNet\n'), ((3589, 3661), 'os.path.join', 'os.path.join', (['args.log_dir', 'f"""epoch_{starganv2_config[\'epochs\']:05}.pth"""'], {}), '(args.log_dir, f"epoch_{starganv2_config[\'epochs\']:05}.pth")\n', (3601, 3661), False, 'import os\n'), ((4520, 4558), 'shutil.copy', 'shutil.copy', (['args.source', '"""source.wav"""'], {}), "(args.source, 'source.wav')\n", (4531, 4558), False, 'import shutil\n'), ((4639, 4689), 'soundfile.write', 'soundfile.write', (['"""converted.wav"""', 'converted', '(24000)'], {}), "('converted.wav', converted, 24000)\n", (4654, 4689), False, 'import soundfile\n'), ((1353, 1391), 'librosa.load', 'librosa.load', (['reference_path'], {'sr': '(24000)'}), '(reference_path, sr=24000)\n', (1365, 1391), False, 'import librosa\n'), ((1415, 1452), 'librosa.effects.trim', 'librosa.effects.trim', (['wave'], {'top_db': '(30)'}), '(wave, top_db=30)\n', (1435, 1452), False, 'import librosa\n'), ((2049, 2062), 'numpy.abs', 'np.abs', (['audio'], {}), '(audio)\n', (2055, 2062), True, 'import numpy as np\n'), ((2202, 2217), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2215, 2217), False, 'import torch\n'), ((3127, 3157), 'torch.load', 'torch.load', (['"""Utils/JDC/bst.t7"""'], {}), "('Utils/JDC/bst.t7')\n", (3137, 3157), False, 'import torch\n'), ((3549, 3566), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (3563, 3566), False, 'import yaml\n'), ((3751, 3793), 'torch.load', 'torch.load', (['model_path'], {'map_location': '"""cpu"""'}), "(model_path, map_location='cpu')\n", (3761, 3793), False, 'import torch\n'), ((4316, 4339), 'random.choice', 'random.choice', (['val_list'], {}), '(val_list)\n', (4329, 4339), False, 'import random\n'), ((4590, 4634), 'shutil.copy', 'shutil.copy', (['args.reference', '"""reference.wav"""'], {}), "(args.reference, 'reference.wav')\n", (4601, 4634), False, 'import shutil\n'), ((554, 576), 'torch.from_numpy', 'torch.from_numpy', (['wave'], {}), '(wave)\n', (570, 576), False, 'import torch\n'), ((1496, 1529), 'librosa.resample', 'librosa.resample', (['wave', 'sr', '(24000)'], {}), '(wave, sr, 24000)\n', (1512, 1529), False, 'import librosa\n'), ((1595, 1610), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1608, 1610), False, 'import torch\n'), ((3474, 3514), 'os.path.join', 'os.path.join', (['args.log_dir', '"""config.yml"""'], {}), "(args.log_dir, 'config.yml')\n", (3486, 3514), False, 'import os\n'), ((1799, 1825), 'torch.randn', 'torch.randn', (['(1)', 'latent_dim'], {}), '(1, latent_dim)\n', (1810, 1825), False, 'import torch\n'), ((3318, 3366), 'parallel_wavegan.utils.load_model', 'load_model', (['"""Vocoder/checkpoint-400000steps.pkl"""'], {}), "('Vocoder/checkpoint-400000steps.pkl')\n", (3328, 3366), False, 'from parallel_wavegan.utils import load_model\n')] |
import os
import numpy as np
import matplotlib.pyplot as plt
from shape import Shape
from visualize import visualize
from normalize import normalize_data, normalize_shape
from dataLoader import import_dataset, import_normalised_data
from featureExtraction import *
from featureMatching import *
from utils import pick_file
DATA_PATH = os.path.join(os.getcwd(), 'data') + os.sep
DATA_SHAPES_PRICETON = DATA_PATH + 'benchmark' + os.sep + 'db' + os.sep
SAVED_DATA = DATA_PATH + 'cache' + os.sep
NORMALIZED_DATA = SAVED_DATA + 'processed_data' + os.sep
try:
features = np.load(SAVED_DATA + "features.npy", allow_pickle=True)
features = features.item()
except:
pass
#TODO: Include a file picker to show that aspect we also implemented, and the speed of the feature calculation
# Retrieving shape
print("\n----------------------------------------------------")
print("3D Shapes Search Engine")
print("----------------------------------------------------")
while True:
print("Select a shape to search for similar ones. Only OFF and PLY formats are supported. \n")
try:
shape = pick_file()
break
except FileNotFoundError:
print("File not found. Try again.\n")
continue
except FileExistsError:
print("Format not supported. Please select an OFF or PLY file.\n")
continue
print('Normalising query shape . . . ')
shape, new_n_verts, new_n_faces = normalize_shape(shape)
# Calculating features for the shape
print('Calculating features for query shape and standardize them. . .')
shape_features = calculate_single_shape_metrics(shape)
shape_features = standardize_single_shape(shape_features)
# Calculate nearest neighbors via ANN and R-Nearest Neighbors
neighbors = r_neighbors(shape_features, features)
n_shapes_id, n_distances = neighbors[0][1:], neighbors[1][1:]
# Retrieving shapes from database
n_shapes = []
for id in n_shapes_id:
filename =NORMALIZED_DATA + "n" + str(id) + ".off"
file = open(filename, 'r')
verts, faces, n_verts, n_faces = read_off(file)
mesh = trm.load_mesh(filename)
shape = Shape(verts, faces, mesh)
shape.set_id(id)
n_shapes.append(shape)
visualize(n_shapes)
| [
"os.getcwd",
"visualize.visualize",
"shape.Shape",
"utils.pick_file",
"normalize.normalize_shape",
"numpy.load"
] | [((1420, 1442), 'normalize.normalize_shape', 'normalize_shape', (['shape'], {}), '(shape)\n', (1435, 1442), False, 'from normalize import normalize_data, normalize_shape\n'), ((2178, 2197), 'visualize.visualize', 'visualize', (['n_shapes'], {}), '(n_shapes)\n', (2187, 2197), False, 'from visualize import visualize\n'), ((573, 628), 'numpy.load', 'np.load', (["(SAVED_DATA + 'features.npy')"], {'allow_pickle': '(True)'}), "(SAVED_DATA + 'features.npy', allow_pickle=True)\n", (580, 628), True, 'import numpy as np\n'), ((2103, 2128), 'shape.Shape', 'Shape', (['verts', 'faces', 'mesh'], {}), '(verts, faces, mesh)\n', (2108, 2128), False, 'from shape import Shape\n'), ((350, 361), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (359, 361), False, 'import os\n'), ((1104, 1115), 'utils.pick_file', 'pick_file', ([], {}), '()\n', (1113, 1115), False, 'from utils import pick_file\n')] |
from plantcv.plantcv import fatal_error
from plantcv.plantcv import params
import os
from spectral import *
from osgeo import gdal
from osgeo.gdalconst import *
import numpy as np
def reference2array(path):
"""this function allows you read in hyperspectral reference in raw format and returns it as array that is averaged
(this will be used to normalize the raw hyperspectral image)
Inputs:
path = path to the raw file of reference
Returns:
image_array_all = hyperspectral reference image in array format
gdalhyper = hyperspectral reference image
pixelWidth = pixelWidth
cols = number of cols of raw image
rows = number of rows of raw image
bands = number of bands of raw image
:param hyperimg: spectral object
:param bands: list of band centers
:param path: string
:return filname: string
"""
device += 1
if os.path.isfile(path) == False:
fatal_error(str(path) + " does not exist")
gdalhyper = gdal.Open(path, GA_ReadOnly)
if gdalhyper is None:
print ("Couldn't open this file: " + path)
sys.exit("Try again!")
else:
print ("%s opened successfully" %path)
print ('Get image size')
cols = gdalhyper.RasterXSize
rows = gdalhyper.RasterYSize
bands = gdalhyper.RasterCount
print ("columns: %i" %cols)
print ("rows: %i" %rows)
print ("bands: %i" %bands)
print ('Get georeference information')
geotransform = gdalhyper.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
print ("origin x: %i" %originX)
print ("origin y: %i" %originY)
print ("width: %2.2f" %pixelWidth)
print ("height: %2.2f" %pixelHeight)
# Set pixel offset.....
print ('Convert image to 2D array')
band = gdalhyper.GetRasterBand(1)
image_array = band.ReadAsArray(0, 0, cols, rows)
image_array_name = path
print (type(image_array))
print (image_array.shape)
output_list = []
for i in range(1,bands+1):
band = gdalhyper.GetRasterBand(i)
image_array = band.ReadAsArray(0, 0, cols, rows)
for y in zip(*image_array):
avg_reflectance = sum(y)/len(y)
#print (avg_reflectance)
(output_list.append( (avg_reflectance) ))
#print (output_list)
image_array_ave = np.reshape(output_list, (bands, cols))
print ('Average image width')
print (image_array_ave.shape)
return image_array_all, gdalhyper, cols, rows, bands
| [
"osgeo.gdal.Open",
"numpy.reshape",
"os.path.isfile"
] | [((991, 1019), 'osgeo.gdal.Open', 'gdal.Open', (['path', 'GA_ReadOnly'], {}), '(path, GA_ReadOnly)\n', (1000, 1019), False, 'from osgeo import gdal\n'), ((2495, 2533), 'numpy.reshape', 'np.reshape', (['output_list', '(bands, cols)'], {}), '(output_list, (bands, cols))\n', (2505, 2533), True, 'import numpy as np\n'), ((892, 912), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (906, 912), False, 'import os\n')] |
"""
AutoCal
Automatic analysis of Calcium imaging data
<NAME> 2017
<EMAIL>
"""
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
"""
Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
Modified from Scipy cookbook by <NAME>
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
.. [1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
:param y: array_like, shape (N,) the values of the time history of the signal.
:param window_size: int, the length of the window. Must be an odd integer number.
:param order: int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
:param deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
:param rate:
:return: ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
>>> import numpy as np
>>> savitzky_golay(np.array([1,2,3,4,5,4,3,2,1]), 3, 1)
array([ 1. , 2. , 3. , 4. , 4.33333333,
4. , 3. , 2. , 1.66666667])
"""
import numpy as np
import math
assert type(window_size) == int and type(order) == int, 'Window size and order must be integers'
assert window_size % 2 == 1 and window_size >= 1, 'Window size must be positive odd number'
assert window_size >= order + 2, 'Window size is too small for polynomial order'
order_range = range(order + 1)
half_window = window_size // 2
# Precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * math.factorial(deriv)
# Fill back in the beginning and end signal points with values taken from the signal itself
first_vals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
last_vals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((first_vals, y, last_vals))
# Return the linear convolution
return np.convolve(m[::-1], y, mode='valid') | [
"numpy.abs",
"numpy.convolve",
"numpy.linalg.pinv",
"math.factorial",
"numpy.concatenate"
] | [((2257, 2299), 'numpy.concatenate', 'np.concatenate', (['(first_vals, y, last_vals)'], {}), '((first_vals, y, last_vals))\n', (2271, 2299), True, 'import numpy as np\n'), ((2348, 2385), 'numpy.convolve', 'np.convolve', (['m[::-1]', 'y'], {'mode': '"""valid"""'}), "(m[::-1], y, mode='valid')\n", (2359, 2385), True, 'import numpy as np\n'), ((1997, 2018), 'math.factorial', 'math.factorial', (['deriv'], {}), '(deriv)\n', (2011, 2018), False, 'import math\n'), ((2140, 2181), 'numpy.abs', 'np.abs', (['(y[1:half_window + 1][::-1] - y[0])'], {}), '(y[1:half_window + 1][::-1] - y[0])\n', (2146, 2181), True, 'import numpy as np\n'), ((2206, 2250), 'numpy.abs', 'np.abs', (['(y[-half_window - 1:-1][::-1] - y[-1])'], {}), '(y[-half_window - 1:-1][::-1] - y[-1])\n', (2212, 2250), True, 'import numpy as np\n'), ((1954, 1971), 'numpy.linalg.pinv', 'np.linalg.pinv', (['b'], {}), '(b)\n', (1968, 1971), True, 'import numpy as np\n')] |
import numpy as np
import cv2
class Ellipse():
def __init__(self):
"""
A partial ellipse
"""
startAngle = np.random.rand()*360.0
arclength = 60 + np.sum(np.random.rand(2))*140.0
direction = np.random.choice([-1, 1])
self._shape = 'ellipse'
self._centre = np.random.randint(200, 300, (2,)).tolist()
self._radii = np.random.randint(10, 100, (2,)).tolist()
self._angle = np.random.rand()*360.0
self._startAngle = startAngle
self._endAngle = startAngle + (direction*arclength)
def draw(self, img, highlight_ends=False):
cv2.ellipse(img,
center=tuple(self._centre),
axes=tuple(self._radii),
angle=self._angle,
startAngle=self._startAngle,
endAngle=self._endAngle,
color=(255,255,255),
thickness=2)
if highlight_ends:
cv2.ellipse(img,
center=tuple(self._centre),
axes=tuple(self._radii),
angle=self._angle,
startAngle=self._startAngle,
endAngle=self._startAngle+5,
color=(0,255,0), # b g r
thickness=2)
cv2.ellipse(img,
center=tuple(self._centre),
axes=tuple(self._radii),
angle=self._angle,
startAngle=self._endAngle-5,
endAngle=self._endAngle,
color=(0,0,255), # b g r
thickness=2)
def to_dict(self):
return {'shape': 'ellipse',
'centre_x': self._centre[0],
'centre_y': self._centre[1],
'radii_primary': self._radii[0],
'radii_secondary': self._radii[1],
'angle': self._angle,
'startAngle': self._startAngle,
'endAngle': self._endAngle,
}
class EllipseGroup():
def __init__(self, n_subpaths=1):
"""
initialise and store random sate prior
"""
self._n_subpaths = n_subpaths
# this must happen before anyother calls to random are made
self._paths = [Ellipse() for _ in range(self._n_subpaths)]
def n_subpaths(self):
return self._n_subpaths
def to_dict(self):
"""
export to a flat dictionary
"""
settings = {'n_subpaths': len(self._paths)}
for i,p in enumerate(self._paths):
for k, v in p.to_dict().items():
settings[f"path[{i}].{k}"] = v
return settings
def draw(self, img, highlight_ends=False):
"""
draw everything and highlight start and end of path
"""
for p in self._paths:
p.draw(img)
def draw_iterative_highlight_ends(self, img):
"""
this should return a generator that will cycle
"""
for i in range(self._n_subpaths):
for I,p in enumerate(self._paths):
p.draw(img,i==I)
yield i
if __name__ == "__main__":
import cv2
img=np.zeros((500, 500, 3), np.uint8)
def new_char():
e_char = EllipseGroup(2)
#bs_char.draw_bspline(img,'centre')
print(e_char.to_dict())
return e_char.draw_iterative_highlight_ends()
char_cycle = iter([])
def cycle_char():
global char_cycle
img[:] = 0
try:
next(char_cycle)
except StopIteration as e:
char_cycle = new_char()
cycle_char()
while(True):
cv2.imshow('image', img)
key=cv2.waitKey(20) & 0xFF
if key == 27:
break
if key == 32:
cycle_char()
cv2.destroyAllWindows()
| [
"numpy.random.rand",
"numpy.random.choice",
"cv2.imshow",
"numpy.zeros",
"numpy.random.randint",
"cv2.destroyAllWindows",
"cv2.waitKey"
] | [((3370, 3403), 'numpy.zeros', 'np.zeros', (['(500, 500, 3)', 'np.uint8'], {}), '((500, 500, 3), np.uint8)\n', (3378, 3403), True, 'import numpy as np\n'), ((4008, 4031), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4029, 4031), False, 'import cv2\n'), ((245, 270), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (261, 270), True, 'import numpy as np\n'), ((3856, 3880), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (3866, 3880), False, 'import cv2\n'), ((145, 161), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (159, 161), True, 'import numpy as np\n'), ((474, 490), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (488, 490), True, 'import numpy as np\n'), ((3893, 3908), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (3904, 3908), False, 'import cv2\n'), ((335, 368), 'numpy.random.randint', 'np.random.randint', (['(200)', '(300)', '(2,)'], {}), '(200, 300, (2,))\n', (352, 368), True, 'import numpy as np\n'), ((405, 437), 'numpy.random.randint', 'np.random.randint', (['(10)', '(100)', '(2,)'], {}), '(10, 100, (2,))\n', (422, 437), True, 'import numpy as np\n'), ((200, 217), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (214, 217), True, 'import numpy as np\n')] |
import os
import sys
import numpy as np
from glob import glob
from skimage.io import imread
from skimage.transform import resize
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_addons as tfa
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from utils import *
#if not in_notebook():
# import argparse
# parser = argparse.ArgumentParser(description='MODEL ACTIVITY ANALYZER.')
# parser.add_argument('--dataset', default='./dataset', type=str, help='path to dataset')
# parser.add_argument('--model', default='model file name', type=str, help='model file name')
# parser.add_argument('--lx', default=0, type=int, help='image length')
# parser.add_argument('--ly', default=0, type=int, help='image width')
# parser.add_argument('--n_sample', default=4, type=int, help='number of sample')
# parser.add_argument('--epochs', default=200, type=int, help='number of epochs')
# parser.add_argument('--BS', default=32, type=int, help='number of epochs')
# parser.add_argument('--prefix', default='', type=str, help='path to save the results')
## parser.add_argument('--deep', default=0, type=int, help='Network depth!')
## parser.add_argument('--dpi', default=200, type=int, help='image dpi')
# parser.add_argument('--restart', action="store_true")
# args = parser.parse_args()
# data_path = args.dataset
# lx,ly = args.lx,args.ly
# n_sample = args.n_sample
# restart = args.restart
# EPOCHS = args.epochs
# BS = args.BS
# pp = args.pp
# reg = args.reg
## dpi = args.dpi
# prefix = args.prefix+'/'
## DEEP = args.deep
#else:
# data_path = 'Alzheimer_dataset/train'
# lx,ly = 128,128
# n_sample = 4
# restart = 0
# EPOCHS = 1
# BS = 32
## dpi = args.dpi
# prefix = 'alz1/'
data_path = '/home/vafaeisa/scratch/plank/'
lx,ly = 256,256
n_sample = 4
restart = 0
EPOCHS = 1
BS = 32
prefix = 'alz1/'
#paths = glob(data_path+'/*')
#fname = '{}-{}-{}'.format(data_path.split('/')[-1],lx,ly)
#if not os.path.isfile(fname+'.npz'):# or restart:
# print("[INFO] reading data and preparation...")
# x = []
# labels = []
# full_path = []
# for path in paths:
# files = glob(path+'/*')
# for fil in files:
# try:
# img = imread(fil)
# if lx*ly!=0:
# img = resize(img,output_shape=(lx,ly))
# if img.ndim==3:
# img = np.mean(img,axis=-1)
# x.append(img)
# labels.append(fil.split('/')[-2])
# full_path.append(fil)
# except:
# print('Something is wrong with',fil,', skipped.')
# print("[INFO] prepared data is saved.")
# np.savez(fname,x=x,labels=labels,full_path=full_path)
# x = np.array(x)
# labels = np.array(labels)
#else:
# data = np.load(fname+'.npz'.format(lx,ly))
# x = data['x']
# labels = data['labels']
# full_path = data['full_path']
# print("[INFO] data is loaded...")
#int_map,lbl_map = int_label(labels)
#vec = [int_map[word] for word in labels]
#vec = np.array(vec)
#y = to_categorical(vec, num_classes=None, dtype='float32')
#x = x[:,:,:,None]/x.max()
#x = 2*x-1
## initialize the training data augmentation object
#trainAug = ImageDataGenerator(
# rotation_range=5,
# width_shift_range=0.03,
# height_shift_range=0.03,
## brightness_range=0.01,
## shear_range=0.0,
# zoom_range=0.03,
## horizontal_flip=True,
## vertical_flip=True,
# fill_mode="nearest")
#describe_labels(y,verbose=1)
#x_us,y_us = balance_aug(x,y,trainAug)
## x_us,y_us = mixup(x,y,alpha=20,beta=1)
#describe_labels(y_us,verbose=1)
#x_us,y_us = shuffle_data(x_us,y_us)
#train_x0 = x_us[y_us[:,0].astype(bool)]
#train_x1 = x_us[y_us[:,1].astype(bool)]
#test_x0 = train_x0[:20]
#test_x1 = train_x1[:20]
#train_x0 = x_us[y_us[:,0].astype(bool)]
#train_x1 = x_us[y_us[:,1].astype(bool)]
#test_x0 = train_x0[:20]
#test_x1 = train_x1[:20]
def blocker(x,nside):
xx = np.array_split(x, nside, axis=1)
xx = np.concatenate(xx,axis=0)
xx = np.array_split(xx, nside, axis=2)
xx = np.concatenate(xx,axis=0)
return xx
csep = 'healpix'
train_x0 = np.load(data_path+csep+'.npy')[:100]
csep = 'sevem'
train_x1 = np.load(data_path+csep+'.npy')[:100]
train_x0 = blocker(train_x0,8)
train_x1 = blocker(train_x1,8)
#fig,(ax1,ax2) = plt.subplots(1,2,figsize=(14,5))
#irr = np.random.randint(train_x0.shape[0])
#ax1.imshow(train_x0[irr],cmap='jet')
#ax2.imshow(train_x1[irr],cmap='jet')
#plt.tight_layout()
#plt.savefig('test.jpg')
train_x0 = train_x0/train_x0.max()
train_x0 = train_x0/train_x0.min()
train_x0 = 2*train_x0-1
train_x0 = train_x0[:,:,:,None]
train_x1 = train_x1/train_x1.max()
train_x1 = train_x1/train_x1.min()
train_x1 = 2*train_x1-1
train_x1 = train_x1[:,:,:,None]
test_x0 = train_x0[:20]
test_x1 = train_x1[:20]
print(train_x0.shape,train_x1.shape)
#input_img_size = (256, 256, 1)
input_img_size = train_x0.shape[1:]
buffer_size = 256
batch_size = 10
# Get the generators
gen_G = get_resnet_generator(input_img_size,name="generator_G")
gen_F = get_resnet_generator(input_img_size,name="generator_F")
# Get the discriminators
disc_X = get_discriminator(input_img_size,name="discriminator_X")
disc_Y = get_discriminator(input_img_size,name="discriminator_Y")
# Loss function for evaluating adversarial loss
adv_loss_fn = keras.losses.MeanSquaredError()
# Define the loss function for the generators
def generator_loss_fn(fake):
fake_loss = adv_loss_fn(tf.ones_like(fake), fake)
return fake_loss
# Define the loss function for the discriminators
def discriminator_loss_fn(real, fake):
real_loss = adv_loss_fn(tf.ones_like(real), real)
fake_loss = adv_loss_fn(tf.zeros_like(fake), fake)
return (real_loss + fake_loss) * 0.5
# Create cycle gan model
cycle_gan_model = CycleGan(
generator_G=gen_G, generator_F=gen_F, discriminator_X=disc_X, discriminator_Y=disc_Y
)
# Compile the model
cycle_gan_model.compile(
gen_G_optimizer=keras.optimizers.Adam(learning_rate=5e-5, beta_1=0.5),
gen_F_optimizer=keras.optimizers.Adam(learning_rate=5e-5, beta_1=0.5),
disc_X_optimizer=keras.optimizers.Adam(learning_rate=5e-5, beta_1=0.5),
disc_Y_optimizer=keras.optimizers.Adam(learning_rate=5e-5, beta_1=0.5),
gen_loss_fn=generator_loss_fn,
disc_loss_fn=discriminator_loss_fn,
)
# fake_train_x1 = self.gen_G(real_train_x0)
# fake_train_x0 = self.gen_F(real_train_x1)
cycle_gan_model.fit(train_x0, train_x1,
batch_size=10,
epochs=100,
# callbacks=[plotter, model_checkpoint_callback],
)
cycle_gan_model.saveit('model1/')
cycle_gan_model.loadit('model1/')
_, ax = plt.subplots(4, 2, figsize=(10, 15))
#for i, img in enumerate(test_horses.take(4)):
for i in range(4):
img = test_x0[i:i+1]
prediction = np.array(cycle_gan_model.gen_G(img, training=False)[0])
prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
img = (img[0] * 127.5 + 127.5).astype(np.uint8) #.numpy().astype(np.uint8)
ax[i, 0].imshow(img)
ax[i, 1].imshow(prediction)
ax[i, 0].set_title("Input image")
ax[i, 0].set_title("Input image")
ax[i, 1].set_title("Translated image")
ax[i, 0].axis("off")
ax[i, 1].axis("off")
prediction = keras.preprocessing.image.array_to_img(prediction)
prediction.save("predicted_img_{i}.png".format(i=i))
plt.tight_layout()
plt.show()
plt.savefig('fig.jpg',dpi=150)
| [
"tensorflow.keras.losses.MeanSquaredError",
"numpy.array_split",
"tensorflow.keras.preprocessing.image.array_to_img",
"tensorflow.keras.optimizers.Adam",
"numpy.concatenate",
"tensorflow.ones_like",
"tensorflow.zeros_like",
"numpy.load"
] | [((5504, 5535), 'tensorflow.keras.losses.MeanSquaredError', 'keras.losses.MeanSquaredError', ([], {}), '()\n', (5533, 5535), False, 'from tensorflow import keras\n'), ((4116, 4148), 'numpy.array_split', 'np.array_split', (['x', 'nside'], {'axis': '(1)'}), '(x, nside, axis=1)\n', (4130, 4148), True, 'import numpy as np\n'), ((4158, 4184), 'numpy.concatenate', 'np.concatenate', (['xx'], {'axis': '(0)'}), '(xx, axis=0)\n', (4172, 4184), True, 'import numpy as np\n'), ((4193, 4226), 'numpy.array_split', 'np.array_split', (['xx', 'nside'], {'axis': '(2)'}), '(xx, nside, axis=2)\n', (4207, 4226), True, 'import numpy as np\n'), ((4236, 4262), 'numpy.concatenate', 'np.concatenate', (['xx'], {'axis': '(0)'}), '(xx, axis=0)\n', (4250, 4262), True, 'import numpy as np\n'), ((4306, 4340), 'numpy.load', 'np.load', (["(data_path + csep + '.npy')"], {}), "(data_path + csep + '.npy')\n", (4313, 4340), True, 'import numpy as np\n'), ((4370, 4404), 'numpy.load', 'np.load', (["(data_path + csep + '.npy')"], {}), "(data_path + csep + '.npy')\n", (4377, 4404), True, 'import numpy as np\n'), ((7436, 7486), 'tensorflow.keras.preprocessing.image.array_to_img', 'keras.preprocessing.image.array_to_img', (['prediction'], {}), '(prediction)\n', (7474, 7486), False, 'from tensorflow import keras\n'), ((5640, 5658), 'tensorflow.ones_like', 'tf.ones_like', (['fake'], {}), '(fake)\n', (5652, 5658), True, 'import tensorflow as tf\n'), ((5806, 5824), 'tensorflow.ones_like', 'tf.ones_like', (['real'], {}), '(real)\n', (5818, 5824), True, 'import tensorflow as tf\n'), ((5860, 5879), 'tensorflow.zeros_like', 'tf.zeros_like', (['fake'], {}), '(fake)\n', (5873, 5879), True, 'import tensorflow as tf\n'), ((6139, 6193), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': '(5e-05)', 'beta_1': '(0.5)'}), '(learning_rate=5e-05, beta_1=0.5)\n', (6160, 6193), False, 'from tensorflow import keras\n'), ((6214, 6268), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': '(5e-05)', 'beta_1': '(0.5)'}), '(learning_rate=5e-05, beta_1=0.5)\n', (6235, 6268), False, 'from tensorflow import keras\n'), ((6290, 6344), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': '(5e-05)', 'beta_1': '(0.5)'}), '(learning_rate=5e-05, beta_1=0.5)\n', (6311, 6344), False, 'from tensorflow import keras\n'), ((6366, 6420), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': '(5e-05)', 'beta_1': '(0.5)'}), '(learning_rate=5e-05, beta_1=0.5)\n', (6387, 6420), False, 'from tensorflow import keras\n')] |
from typing import List, Optional
import numpy as np
import pandas as pd
from temporis.dataset.ts_dataset import AbstractTimeSeriesDataset
def variance_information(
dataset: AbstractTimeSeriesDataset,
features: Optional[List[str]] = None,
) -> pd.DataFrame:
"""
Return mean and max null proportion for each column of each life of the dataset
Parameters
----------
dataset: AbstractTimeSeriesDataset
The dataset
features: Optional[List[str]]=None
Features to select
transformer:
Transformer
Return
------
pd.DataFrame: Dataframe that contains three columns
['Feature', 'Max Std', 'Mean Std']
dict: string -> list
The key is the column name and the value is the list of std proportion
for each life
"""
common_features = dataset.common_features()
if features:
common_features = set(common_features).intersection(set(features))
std_per_life = {}
for life in dataset:
selected_columns = [column for column in common_features if column in life.columns]
d = life[selected_columns].std().to_dict()
for column in selected_columns:
if not isinstance(d[column], float):
continue
std_list = std_per_life.setdefault(column, [])
std_list.append(d[column])
data = [
(
column,
np.min(std_per_life[column]),
np.mean(std_per_life[column]),
np.max(std_per_life[column]),
)
for column in std_per_life.keys()
]
df = pd.DataFrame(
data,
columns=[
"Feature",
"Min std",
"Mean std",
"Max std",
],
)
df.sort_values(by="Min std", inplace=True, ascending=True)
return df, std_per_life
| [
"pandas.DataFrame",
"numpy.mean",
"numpy.max",
"numpy.min"
] | [((1622, 1695), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['Feature', 'Min std', 'Mean std', 'Max std']"}), "(data, columns=['Feature', 'Min std', 'Mean std', 'Max std'])\n", (1634, 1695), True, 'import pandas as pd\n'), ((1439, 1467), 'numpy.min', 'np.min', (['std_per_life[column]'], {}), '(std_per_life[column])\n', (1445, 1467), True, 'import numpy as np\n'), ((1481, 1510), 'numpy.mean', 'np.mean', (['std_per_life[column]'], {}), '(std_per_life[column])\n', (1488, 1510), True, 'import numpy as np\n'), ((1524, 1552), 'numpy.max', 'np.max', (['std_per_life[column]'], {}), '(std_per_life[column])\n', (1530, 1552), True, 'import numpy as np\n')] |
"""
Leer los datos del archivo /M1/CLASE_03/Data/sales_data_sample_excercise.csv
Este archivo tiene algunos datos numéricos y otros de tipo cadena de caracteres.
Las columnas son:
ORDERNUMBER: int, id de la orden
SALES: float, monto abonado
MONTH_ID: int, mes
YEAR_ID: int, año
PRODUCTLINE: str, producto
COUNTRY: str, país de venta
¿Recuerdan que todos los elementos de una instancia de ndarray deben ser del mismo tipo? Entonces vamos a leer el archivo y crear una instancia de ndarray de tipo cadena de caracteres.
¿Qué pasaría si intentáramos crear una instancia de tipo int? ¿Y de tipo float?
"""
import numpy as np
import seaborn as sns
file = "Data/sales_data_sample_excercise.csv"
data_str = np.genfromtxt(file, delimiter="\t", skip_header=True, dtype=str)
print("\nSring dtype")
print(data_str)
data_int = np.genfromtxt(file, delimiter="\t", skip_header=True, dtype=int)
print("\nInt dtype")
print(data_int)
data_float = np.genfromtxt(file, delimiter="\t", skip_header=True, dtype=float)
print("\nFloat dtype")
print(data_float)
data = np.genfromtxt(file, delimiter="\t", skip_header=True)
print("\nNo specified dtype")
print(data)
# Crear un array numérico que tenga como valores las columna SALES y otro array de str que tenga como valores la columna COUNTRY
sales = data_float[:, 1]
print(sales)
country = data_str[:, -1]
print(country)
# Sobre los datos de precios de ventas (columna SALES) calcular:
# mínimo máximo promedio cantidad suma
print(f"\nPrecio minimo sales: {sales.min()}")
print(f"Precio máximo sales: {sales.max()}")
print(f"Precio promedio sales: {sales.mean()}")
print(f"Precio cantidad sales: {len(sales)}")
print(f"Precio suma sales: {sales.sum()}")
print("\n¿Cuántas ventas se hicieron en USA?")
usa_mask = country == "USA"
usa_sales = sales[usa_mask]
print(usa_sales.sum())
print(
"\n¿Cuáles son los precios de las 5 ventas que están en las filas 6 a 10 del dataset?"
)
print(sales[6:11])
print(f"Precio media sales: {sales.mean()}")
print(f"Precio mediana sales: {np.median(sales)}")
print(f"Precio desvio sales: {sales.std()}")
print(f"Precio rango sales: {sales.max() - sales.min()}")
def distribution_plotter(data, label):
sns.set(rc={"figure.figsize": (10, 7)})
sns.set_style("white")
dist = sns.distplot(data,
hist_kws={"alpha": 0.2},
kde_kws={"linewidth": 5})
dist.set_title("Distribucion de " + label + "\n", fontsize=16)
random_generator = np.random.default_rng(1234)
birthday = random_generator.integers(low=1, high=366, size=30)
distribution_plotter(birthday, "Cumple")
| [
"seaborn.set",
"numpy.median",
"numpy.random.default_rng",
"seaborn.distplot",
"seaborn.set_style",
"numpy.genfromtxt"
] | [((712, 776), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '"""\t"""', 'skip_header': '(True)', 'dtype': 'str'}), "(file, delimiter='\\t', skip_header=True, dtype=str)\n", (725, 776), True, 'import numpy as np\n'), ((828, 892), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '"""\t"""', 'skip_header': '(True)', 'dtype': 'int'}), "(file, delimiter='\\t', skip_header=True, dtype=int)\n", (841, 892), True, 'import numpy as np\n'), ((944, 1010), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '"""\t"""', 'skip_header': '(True)', 'dtype': 'float'}), "(file, delimiter='\\t', skip_header=True, dtype=float)\n", (957, 1010), True, 'import numpy as np\n'), ((1060, 1113), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '"""\t"""', 'skip_header': '(True)'}), "(file, delimiter='\\t', skip_header=True)\n", (1073, 1113), True, 'import numpy as np\n'), ((2478, 2505), 'numpy.random.default_rng', 'np.random.default_rng', (['(1234)'], {}), '(1234)\n', (2499, 2505), True, 'import numpy as np\n'), ((2194, 2233), 'seaborn.set', 'sns.set', ([], {'rc': "{'figure.figsize': (10, 7)}"}), "(rc={'figure.figsize': (10, 7)})\n", (2201, 2233), True, 'import seaborn as sns\n'), ((2238, 2260), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (2251, 2260), True, 'import seaborn as sns\n'), ((2272, 2341), 'seaborn.distplot', 'sns.distplot', (['data'], {'hist_kws': "{'alpha': 0.2}", 'kde_kws': "{'linewidth': 5}"}), "(data, hist_kws={'alpha': 0.2}, kde_kws={'linewidth': 5})\n", (2284, 2341), True, 'import seaborn as sns\n'), ((2026, 2042), 'numpy.median', 'np.median', (['sales'], {}), '(sales)\n', (2035, 2042), True, 'import numpy as np\n')] |
import numpy as np
import torch
import inspect
from collections import OrderedDict
from data_loader import Dataset, get_loader
import config
def random_candidates(test_users, n_candidates=101, seed=37, validate=False):
if not validate:
np.random.seed(seed)
train_data = Dataset.train_val
test_data = Dataset.test
else:
train_data = Dataset.train
test_data = Dataset.val
test_set = {}
for i, user in enumerate(test_users):
target = test_data[user]
user_items = train_data[user]
items = np.random.choice(Dataset.items, len(target) + len(user_items) + n_candidates, replace=False)
negs = set(items) - set(user_items) - set(target)
test_set[user] = list(negs)[:n_candidates - len(target)] + target
return test_set
class Evaluator:
def __init__(self, model, k=50, users=-1, n_candidates=-1, timestep=-1, metrics='recall,ndcg', validate=False, repeatable=False):
self.model = model
self.k = k
self.n_candidates = n_candidates
self.instances = []
self.test_users = []
self.metrics = metrics.split(',')
self.validate = validate
self.rtn_timestep = timestep
self.generator = self.create_generator(users)
self.repeatable = repeatable
self.invalid_items = list(set(range(Dataset.n_items)) - set(Dataset.items))
def add(self, target, prediction):
self.instances.append((target, prediction))
def create_generator(self, users):
requires = set(inspect.signature(self.model.predict).parameters) | {'users', 'users_items'}
gen_params = dict()
gen_params['n_neg'] = 0
gen_params['unroll'] = -1
gen_params['timestep'] = -1
return get_loader(requires, batch_size=100, users=users, num_workers=2, process_mode=1 if self.validate else 2, shuffle=False, **gen_params)
def precision(self):
prec = 0
for target, prediction in self.instances:
prec += float(len(set(target) & set(prediction))) / len(prediction)
return prec / len(self.instances)
def recall(self):
recall = 0
count = 0
for target, prediction in self.instances:
rec = float(len(set(target) & set(prediction))) / len(target)
count += 1
recall += rec
return recall / count
def ndcg(self):
ndcg_ = 0.
count = 0
for target, prediction in self.instances:
dcg = 0.
max_dcg = 0.
for i, p in enumerate(prediction):
if i < len(target):
max_dcg += 1. / np.log2(2 + i)
if p in target:
dcg += 1. / np.log2(2 + i)
ndcg_ += dcg / max_dcg
count += 1
return ndcg_ / count
def filter_topk(self, score, users_items, candidates, invalid_items=None):
if candidates is None:
if invalid_items is not None:
score[:, invalid_items] = -float('inf')
if not self.repeatable:
score = score.scatter_(1, users_items, -float('inf'))
scores, topk_items = score.topk(self.k, -1)
if candidates is not None:
topk_items = candidates.gather(1, topk_items)
# print('=============================')
# print(users_items[0], topk_items[0])
return topk_items
def run(self, calculate=True):
self.instances = []
self.test_users = []
self.model.eval()
device = next(self.model.parameters()).device
test_data = Dataset.test
if self.validate:
test_data = Dataset.val
with torch.no_grad():
invalid_items = torch.LongTensor(self.invalid_items).to(device)
for inputs in self.generator:
# inputs -> [multi-batch] multi-batch-> [X, Y, W]
inp = inputs[0][0]
users = inp['users']
if self.n_candidates <= 0:
candidates = None
else:
candidates = []
test_case = random_candidates(users, self.n_candidates, seed=config.random_seed, validate=self.validate)
for user in users:
candidates.append(test_case[user])
candidates = torch.LongTensor(candidates).to(device)
inp = {k: torch.LongTensor(x).to(device) for k, x in inp.items()}
users_items = inp['users_items'].clone()
if self.rtn_timestep > 0:
inp['users_items'] = inp['users_items'][:, -self.rtn_timestep:]
score, hidden = self.model.predict(candidates=candidates, **inp)
prediction = self.filter_topk(score.detach(), users_items=users_items, candidates=candidates, invalid_items=invalid_items)
prediction = prediction.cpu().numpy()
users_items = users_items.cpu().numpy()
for i, user in enumerate(users):
target = test_data[user]
# target = set(users_items[i])
self.test_users.append(user)
self.add(target, prediction[i])
if calculate:
return self.calculate_metrics()
def calculate_metrics(self):
return OrderedDict([(metric, getattr(self, metric)()) for metric in self.metrics])
| [
"torch.LongTensor",
"inspect.signature",
"data_loader.get_loader",
"numpy.random.seed",
"torch.no_grad",
"numpy.log2"
] | [((251, 271), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (265, 271), True, 'import numpy as np\n'), ((1776, 1913), 'data_loader.get_loader', 'get_loader', (['requires'], {'batch_size': '(100)', 'users': 'users', 'num_workers': '(2)', 'process_mode': '(1 if self.validate else 2)', 'shuffle': '(False)'}), '(requires, batch_size=100, users=users, num_workers=2,\n process_mode=1 if self.validate else 2, shuffle=False, **gen_params)\n', (1786, 1913), False, 'from data_loader import Dataset, get_loader\n'), ((3708, 3723), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3721, 3723), False, 'import torch\n'), ((1554, 1591), 'inspect.signature', 'inspect.signature', (['self.model.predict'], {}), '(self.model.predict)\n', (1571, 1591), False, 'import inspect\n'), ((3753, 3789), 'torch.LongTensor', 'torch.LongTensor', (['self.invalid_items'], {}), '(self.invalid_items)\n', (3769, 3789), False, 'import torch\n'), ((2661, 2675), 'numpy.log2', 'np.log2', (['(2 + i)'], {}), '(2 + i)\n', (2668, 2675), True, 'import numpy as np\n'), ((2740, 2754), 'numpy.log2', 'np.log2', (['(2 + i)'], {}), '(2 + i)\n', (2747, 2754), True, 'import numpy as np\n'), ((4376, 4404), 'torch.LongTensor', 'torch.LongTensor', (['candidates'], {}), '(candidates)\n', (4392, 4404), False, 'import torch\n'), ((4442, 4461), 'torch.LongTensor', 'torch.LongTensor', (['x'], {}), '(x)\n', (4458, 4461), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
import pytest
import numpy as np
import pandas as pd
from fluids.numerics import linspace, assert_close, derivative, assert_close1d
from thermo.vapor_pressure import *
from thermo.vapor_pressure import VDI_TABULAR, WAGNER_MCGARRY
from chemicals.identifiers import check_CAS
from math import *
### Main predictor
@pytest.mark.meta_T_dept
def test_VaporPressure():
# Ethanol, test as many methods asa possible at once
EtOH = VaporPressure(Tb=351.39, Tc=514.0, Pc=6137000.0, omega=0.635, CASRN='64-17-5')
methods = list(EtOH.all_methods)
methods.remove(VDI_TABULAR)
Psat_calcs = []
for i in methods:
EtOH.method = i
Psat_calcs.append(EtOH.T_dependent_property(305.))
Psat_exp = [11579.634014300127, 11698.02742876088, 11590.408779316374, 11659.154222044575, 11592.205263402893, 11593.661615921257, 11612.378633936816, 11350.156640503357, 12081.738947110121, 14088.453409816764, 9210.26200064024]
assert_close1d(sorted(Psat_calcs), sorted(Psat_exp))
assert_close(EtOH.calculate(305, VDI_TABULAR), 11690.81660829924, rtol=1E-4)
s = EtOH.as_JSON()
assert 'json_version' in s
obj2 = VaporPressure.from_JSON(s)
assert EtOH == obj2
# Use another chemical to get in ANTOINE_EXTENDED_POLING
a = VaporPressure(CASRN='589-81-1')
Psat_calcs = []
for i in list(a.all_methods):
a.method = i
Psat_calcs.append(a.T_dependent_property(410))
Psat_exp = [162944.82134710113, 162870.44794192078, 162865.5380455795]
assert_close1d(sorted(Psat_calcs), sorted(Psat_exp))
s = a.as_JSON()
obj2 = VaporPressure.from_JSON(s)
assert a == obj2
# Test that methods return None
EtOH = VaporPressure(Tb=351.39, Tc=514.0, Pc=6137000.0, omega=0.635, CASRN='64-17-5')
EtOH.extrapolation = None
for i in list(EtOH.all_methods):
EtOH.method = i
assert EtOH.T_dependent_property(5000) is None
# Test interpolation, extrapolation
w = VaporPressure(Tb=373.124, Tc=647.14, Pc=22048320.0, omega=0.344, CASRN='7732-18-5')
Ts = linspace(300, 350, 10)
Ps = [3533.918074415897, 4865.419832056078, 6612.2351036034115, 8876.854141719203, 11780.097759775277, 15462.98385942125, 20088.570250257424, 25843.747665059742, 32940.95821687677, 41619.81654904555]
w.add_tabular_data(Ts=Ts, properties=Ps)
assert_close(w.T_dependent_property(305.), 4715.122890601165)
w.extrapolation = 'interp1d'
assert_close(w.T_dependent_property(200.), 0.5364148240126076)
# Get a check for Antoine Extended
cycloheptane = VaporPressure(Tb=391.95, Tc=604.2, Pc=3820000.0, omega=0.2384, CASRN='291-64-5')
cycloheptane.method = ('ANTOINE_EXTENDED_POLING')
cycloheptane.extrapolation = None
assert_close(cycloheptane.T_dependent_property(410), 161647.35219882353)
assert None == cycloheptane.T_dependent_property(400)
with pytest.raises(Exception):
cycloheptane.test_method_validity(300, 'BADMETHOD')
def test_VaporPressure_linear_extrapolation_non_negative():
ethanol_psat = VaporPressure(Tb=351.39, Tc=514.0, Pc=6137000.0, omega=0.635, CASRN='64-17-5')
# Make sure the constants are set to guard against future changes to defaults
ethanol_psat.method = WAGNER_MCGARRY
ethanol_psat.interpolation_T = (lambda T: 1/T)
ethanol_psat.interpolation_property = (lambda P: log(P))
ethanol_psat.interpolation_property_inv = (lambda P: exp(P))
ethanol_psat.extrapolation = 'linear'
assert_close(ethanol_psat(700), 59005875.32878946, rtol=1e-4)
assert_close(ethanol_psat(100), 1.0475828451230242e-11, rtol=1e-4)
assert ethanol_psat.T_limits['WAGNER_MCGARRY'][0] == ethanol_psat.WAGNER_MCGARRY_Tmin
assert ethanol_psat.T_limits['WAGNER_MCGARRY'][1] == ethanol_psat.WAGNER_MCGARRY_Tc
assert_close(ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tc),
ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tc-1e-6))
assert_close(ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tc),
ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tc+1e-6))
assert_close(ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tmin),
ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tmin-1e-6))
assert_close(ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tmin),
ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tmin+1e-6))
Tmin = ethanol_psat.T_limits[ethanol_psat.method][0]
Ts = linspace(0.7*Tmin, Tmin*(1-1e-10), 10)
Ps = [ethanol_psat(T) for T in Ts]
# Confirms it's linear
# plt.plot(1/np.array(Ts), np.log(Ps))
# plt.show()
def rsquared(x, y):
import scipy.stats
_, _, r_value, _, _ = scipy.stats.linregress(x, y)
return r_value*r_value
assert_close(rsquared(1/np.array(Ts), np.log(Ps)), 1, atol=1e-5)
# TODO make work with different interpolation methods
# assert ethanol_psat == VaporPressure.from_JSON(ethanol_psat.as_JSON())
@pytest.mark.meta_T_dept
def test_VaporPressure_extrapolation_solve_prop():
cycloheptane = VaporPressure(Tb=391.95, Tc=604.2, Pc=3820000.0, omega=0.2384, CASRN='291-64-5')
cycloheptane.method = 'ANTOINE_EXTENDED_POLING'
cycloheptane.extrapolation = 'AntoineAB|DIPPR101_ABC'
cycloheptane.T_dependent_property(T=4000)
assert_close(cycloheptane.solve_property(1), 187.25621087267422)
assert_close(cycloheptane.solve_property(1e-20), 60.677576120119156)
assert_close(cycloheptane.solve_property(1e5), 391.3576035137979)
assert_close(cycloheptane.solve_property(1e6), 503.31772463155266)
assert_close(cycloheptane.solve_property(1e7), 711.8060977006566)
assert_close(cycloheptane.solve_property(3e7), 979.2319342086599)
def test_VaporPressure_bestfit_derivatives():
obj = VaporPressure(poly_fit=(175.7, 512.49, [-1.446088049406911e-19, 4.565038519454878e-16, -6.278051259204248e-13, 4.935674274379539e-10,
-2.443464113936029e-07, 7.893819658700523e-05, -0.016615779444332356, 2.1842496316772264, -134.19766175812708]))
assert_close(obj.T_dependent_property(300), 18601.061401014867, rtol=1e-11)
assert_close(obj.T_dependent_property_derivative(300), 954.1652489206775, rtol=1e-11)
assert_close(obj.T_dependent_property_derivative(300, order=2), 41.8787546283273, rtol=1e-11)
assert_close(derivative(obj.T_dependent_property, 300, dx=300*1e-7), obj.T_dependent_property_derivative(300))
assert_close(derivative(obj.T_dependent_property_derivative, 300, dx=300*1e-7), obj.T_dependent_property_derivative(300, order=2))
@pytest.mark.meta_T_dept
def test_VaporPressure_extrapolation_AB():
obj = VaporPressure(Tb=309.21, Tc=469.7, Pc=3370000.0, omega=0.251, CASRN='109-66-0', load_data=True, extrapolation='AntoineAB')
obj.method = WAGNER_MCGARRY
obj.calculate_derivative(300, WAGNER_MCGARRY)
for extrapolation in ('AntoineAB', 'DIPPR101_ABC', 'AntoineAB|AntoineAB', 'DIPPR101_ABC|DIPPR101_ABC',
'DIPPR101_ABC|AntoineAB', 'AntoineAB|DIPPR101_ABC'):
obj.extrapolation = extrapolation
assert_close(obj.T_dependent_property(obj.WAGNER_MCGARRY_Tc),
obj.T_dependent_property(obj.WAGNER_MCGARRY_Tc-1e-6))
assert_close(obj.T_dependent_property(obj.WAGNER_MCGARRY_Tc),
obj.T_dependent_property(obj.WAGNER_MCGARRY_Tc+1e-6))
assert_close(obj.T_dependent_property(obj.WAGNER_MCGARRY_Tmin),
obj.T_dependent_property(obj.WAGNER_MCGARRY_Tmin-1e-6))
assert_close(obj.T_dependent_property(obj.WAGNER_MCGARRY_Tmin),
obj.T_dependent_property(obj.WAGNER_MCGARRY_Tmin+1e-6))
def test_VaporPressure_fast_Psat_poly_fit():
corr = VaporPressure(poly_fit=(273.17, 647.086, [-2.8478502840358144e-21, 1.7295186670575222e-17, -4.034229148562168e-14, 5.0588958391215855e-11, -3.861625996277003e-08, 1.886271475957639e-05, -0.005928371869421494, 1.1494956887882308, -96.74302379151317]))
# Low temperature values - up to 612 Pa
assert_close(corr.solve_property(1e-5), corr.solve_prop_poly_fit(1e-5), rtol=1e-10)
assert_close(corr.solve_property(1), corr.solve_prop_poly_fit(1), rtol=1e-10)
assert_close(corr.solve_property(100), corr.solve_prop_poly_fit(100), rtol=1e-10)
P_trans = exp(corr.poly_fit_Tmin_value)
assert_close(corr.solve_property(P_trans), corr.solve_prop_poly_fit(P_trans), rtol=1e-10)
assert_close(corr.solve_property(P_trans + 1e-7), corr.solve_prop_poly_fit(P_trans + 1e-7), rtol=1e-10)
# Solver region
assert_close(corr.solve_property(1e5), corr.solve_prop_poly_fit(1e5), rtol=1e-10)
assert_close(corr.solve_property(1e7), corr.solve_prop_poly_fit(1e7), rtol=1e-10)
P_trans = exp(corr.poly_fit_Tmax_value)
assert_close(corr.solve_property(P_trans), corr.solve_prop_poly_fit(P_trans), rtol=1e-10)
assert_close(corr.solve_property(P_trans + 1e-7), corr.solve_prop_poly_fit(P_trans + 1e-7), rtol=1e-10)
# High T
assert_close(corr.solve_property(1e8), corr.solve_prop_poly_fit(1e8), rtol=1e-10)
# Extrapolation
from thermo.vapor_pressure import BESTFIT, BEST_FIT_AB, BEST_FIT_ABC
obj = VaporPressure(poly_fit=(178.01, 591.74, [-8.638045111752356e-20, 2.995512203611858e-16, -4.5148088801006036e-13, 3.8761537879200513e-10, -2.0856828984716705e-07, 7.279010846673517e-05, -0.01641020023565049, 2.2758331029405516, -146.04484159879843]))
assert_close(obj.calculate(1000, BEST_FIT_AB), 78666155.90418352, rtol=1e-10)
assert_close(obj.calculate(1000, BEST_FIT_ABC), 156467764.5930495, rtol=1e-10)
assert_close(obj.calculate(400, BESTFIT), 157199.6909849476, rtol=1e-10)
assert_close(obj.calculate(400, BEST_FIT_AB), 157199.6909849476, rtol=1e-10)
assert_close(obj.calculate(400, BEST_FIT_ABC), 157199.6909849476, rtol=1e-10)
@pytest.mark.meta_T_dept
def test_VaporPressure_extrapolation_no_validation():
N2 = VaporPressure(CASRN='7727-37-9', extrapolation='DIPPR101_ABC')
N2.method = WAGNER_MCGARRY
assert N2(298.15) is not None
assert N2(1000.15) is not None
def test_VaporPressure_fast_Psat_poly_fit_extrapolation():
obj = VaporPressure(poly_fit=(175.7, 512.49, [-1.446088049406911e-19, 4.565038519454878e-16, -6.278051259204248e-13, 4.935674274379539e-10,
-2.443464113936029e-07, 7.893819658700523e-05, -0.016615779444332356, 2.1842496316772264, -134.19766175812708]))
obj.extrapolation = 'AntoineAB|DIPPR101_ABC'
assert_close(obj.solve_property(.0000000000001), 3.2040851644645945)
assert_close(obj.solve_property(300), 237.7793675652309)
assert_close(obj.solve_property(1e8), 661.6135315674736)
| [
"fluids.numerics.linspace",
"fluids.numerics.derivative",
"numpy.log",
"numpy.array",
"pytest.raises"
] | [((3229, 3251), 'fluids.numerics.linspace', 'linspace', (['(300)', '(350)', '(10)'], {}), '(300, 350, 10)\n', (3237, 3251), False, 'from fluids.numerics import linspace, assert_close, derivative, assert_close1d\n'), ((5721, 5765), 'fluids.numerics.linspace', 'linspace', (['(0.7 * Tmin)', '(Tmin * (1 - 1e-10))', '(10)'], {}), '(0.7 * Tmin, Tmin * (1 - 1e-10), 10)\n', (5729, 5765), False, 'from fluids.numerics import linspace, assert_close, derivative, assert_close1d\n'), ((4045, 4069), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (4058, 4069), False, 'import pytest\n'), ((7629, 7686), 'fluids.numerics.derivative', 'derivative', (['obj.T_dependent_property', '(300)'], {'dx': '(300 * 1e-07)'}), '(obj.T_dependent_property, 300, dx=300 * 1e-07)\n', (7639, 7686), False, 'from fluids.numerics import linspace, assert_close, derivative, assert_close1d\n'), ((7744, 7812), 'fluids.numerics.derivative', 'derivative', (['obj.T_dependent_property_derivative', '(300)'], {'dx': '(300 * 1e-07)'}), '(obj.T_dependent_property_derivative, 300, dx=300 * 1e-07)\n', (7754, 7812), False, 'from fluids.numerics import linspace, assert_close, derivative, assert_close1d\n'), ((6071, 6081), 'numpy.log', 'np.log', (['Ps'], {}), '(Ps)\n', (6077, 6081), True, 'import numpy as np\n'), ((6057, 6069), 'numpy.array', 'np.array', (['Ts'], {}), '(Ts)\n', (6065, 6069), True, 'import numpy as np\n')] |
from ..spacegroup import expand_spacegroup
import numpy as np
def test_expand_spacegroup():
"""test for expand_spacegroup function"""
# try non-integer input
a_float = 31.1
a_string = 'a'
a_list = [1, 2]
# try all non-integer inputs
try:
expand_spacegroup(a_float)
except Exception:
pass
else:
raise Exception('Did not catch case of float input')
try:
expand_spacegroup(a_string)
except Exception:
pass
else:
raise Exception('Did not catch case of string input')
try:
expand_spacegroup(a_list)
except Exception:
pass
else:
raise Exception('Did not catch case of list input')
# check that output is correct length for any integer
rand_int = np.random.randint(1, 230)
results = expand_spacegroup(rand_int)
assert len(results) == 5
return True
| [
"numpy.random.randint"
] | [((782, 807), 'numpy.random.randint', 'np.random.randint', (['(1)', '(230)'], {}), '(1, 230)\n', (799, 807), True, 'import numpy as np\n')] |
import os
import numpy as np
import matplotlib.image as mpimg
def listdir_nohidden(path: str, jpg_only=False) -> list:
"""
returns an alphabetically sorted list of filenames of the unhidden files of a directory
optionnal arg : jpg_only. Set on True for only .jpg or .JPG
"""
if jpg_only:
return sorted(
[el for el in os.listdir(path) if
not (el.startswith(".") or el.endswith(".MOV") or el.endswith(".mov")) and (
el.endswith(".jpg") or el.endswith(".JPG"))])
else:
return sorted(
[el for el in os.listdir(path) if not (el.startswith(".") or el.endswith(".MOV") or el.endswith(".mov"))])
def data_repartition(zord: str, directory_: str) -> list:
"""
Returns the count of .jpg or .JPG files from each category folder in the directory_ folder in the alphabetical
order.
"""
folders = []
if zord == "main_zord":
classes = listdir_nohidden(directory_)
for classe in classes:
dir_ = os.path.join(directory_, classe)
labels = listdir_nohidden(dir_)
tot = 0
for label in labels:
tot += len(listdir_nohidden(diver(dir_ + "/" + label), jpg_only=True))
folders.append(tot)
else:
directory_ = os.path.join(directory_, zord)
for label in listdir_nohidden(directory_):
file_nb = len(listdir_nohidden(diver(directory_ + "/" + label), jpg_only=True))
folders.append(file_nb)
return folders
def weighter(folders: list) -> dict:
"""
Returns the proportionality coefficients of the sizes of the folders. The largest one's weight is set on 1.
"""
maximum = max(folders)
class_weight = {}
for i, el in enumerate(folders):
class_weight[i] = float(maximum) / float(el)
return class_weight
def diver(path: str) -> str:
"""
DIVES
"""
while len(listdir_nohidden(path)) == 1:
path += "/" + listdir_nohidden(path)[0]
return path
def one4all_labeller(path: str) -> list:
"""
LABELS
"""
obj_map = []
folders = listdir_nohidden(path)
for folder in folders:
path_ = path + "/" + folder
if len(listdir_nohidden(path, jpg_only=True)) > 1:
file_nb = len(listdir_nohidden(diver(path_), jpg_only=True))
obj_map.append([folder, file_nb])
else:
for label in listdir_nohidden(path_):
path__ = path_ + "/" + label
file_nb = len(listdir_nohidden(diver(path__), jpg_only=True))
obj_map.append([folder, file_nb])
return obj_map
class ImageFromDirectory:
"""
Imports Image from directory pretty much as the keras methods except it outputs x and y separately
and in NumPy arrays. It was done to avoid dealing with keras Tensors.
Depends on int_reader and all the functions required to make int_reader work
"""
def __init__(self, path, zord_kind):
int_to_label = {}
im_compt = 0
err_compt = 0
if zord_kind[:9] == "main_zord":
n = sum(data_repartition("main_zord", path))
x, y = np.empty((n, 256, 256, 3)), np.empty((n, 1), dtype="int32")
int_label = int_reader(label="classe")
for classe in listdir_nohidden(path):
path_classe = os.path.join(path, classe)
for label in listdir_nohidden(path_classe):
path_label = os.path.join(path_classe, label)
path_label = (diver(path_label))
for im in listdir_nohidden(path_label, jpg_only=True):
try:
img = mpimg.imread(os.path.join(path_label, im))
x[im_compt] = img
y[im_compt] = int_label[classe]
im_compt += 1
except Exception as e:
print(e.__class__)
err_compt += 1
elif zord_kind == "one4all" or zord_kind == "MegaZord" or zord_kind[:8] == "megazord":
n = sum(data_repartition("main_zord", path))
x, y = np.empty((n, 256, 256, 3)), np.empty((n, 1), dtype="int32")
int_label = labels_in_dir_mz_order()
for classe in listdir_nohidden(path):
path_classe = os.path.join(path, classe)
for label in listdir_nohidden(path_classe):
path_label = os.path.join(path_classe, label)
path_label = diver(path_label)
for im in listdir_nohidden(path_label, jpg_only=True):
try:
img = mpimg.imread(os.path.join(path_label, im))
x[im_compt] = img
y[im_compt] = int_label[label]
im_compt += 1
except Exception as e:
print(e.__class__)
print(label)
err_compt += 1
else:
n = sum(data_repartition(zord_kind, path))
x, y = np.empty((n, 256, 256, 3)), np.empty((n, 1), dtype="int32")
self.classes_names = listdir_nohidden(path)
int_label = labels_in_dir_mz_order()
path_classe = os.path.join(path, zord_kind)
for label in listdir_nohidden(path_classe):
path_label = os.path.join(path_classe, label)
path_label = (diver(path_label))
for im in listdir_nohidden(path_label, jpg_only=True):
try:
img = mpimg.imread(os.path.join(path_label, im))
x[im_compt] = img
y[im_compt] = int_label[label]
im_compt += 1
except Exception as e:
print(e.__class__)
err_compt += 1
assert im_compt + err_compt == n, "Some files have been missed"
print("\n{} files have been imported".format(im_compt))
print("{} errors occured".format(err_compt))
self.x = x[:im_compt]
self.y = y[:im_compt]
self.label_map = int_to_label
def zord_from_pb_file(path: str) -> str:
"""
:param path:
:return: the name of the zord
"""
path = path[:-3]
i = 1
while path[-i] != "/":
i += 1
return path[-i + 1:]
def labeller(path: str) -> None:
f = open("../files/labels.txt", "a")
path += "/train_set"
err_compt = 0
classes_names = listdir_nohidden(path)
for classe in classes_names:
path_classe = os.path.join(path, classe)
for label in listdir_nohidden(path_classe):
path_label = os.path.join(path_classe, label)
path_label = (diver(path_label))
for im in listdir_nohidden(path_label, jpg_only=True):
try:
input_path = "path " + str(
os.path.join(path_label, im)) + " " + "classe " + classe + " " + "label " + label + "\n"
f.write(input_path)
except:
err_compt += 1
f.close()
def label_reader(label=None) -> dict:
f = open("../files/labels.txt")
lines = f.readlines()
rep = {}
for line in lines:
i = 0
while line[i + 1:i + 4] != "jpg":
i += 1
path = line[5:i]
i += 3
if label == "classe":
while line[i + 1:i + 8] != "classe ":
i += 1
j = 0
while line[i + 8 + j + 1] != " ":
j += 1
value = line[i + 8: i + 8 + j + 1]
else:
while line[i + 1:i + 7] != "label ":
i += 1
j = 0
while line[i + 7 + j + 1:i + 7 + j + 2] != "\n":
j += 1
value = line[i + 7: i + 7 + j + 1]
rep[path] = value
return rep
def int_labeller(label: str) -> None:
dic = label_reader(label)
integer_labels = np.unique(list(dic.values()))
f = open("../files/int_{}.txt".format(label), "a")
f.write(str(integer_labels))
f.close()
def int_reader(label: str) -> dict:
f = open("../files/int_{}.txt".format(label))
txt = f.read()
dic = {}
nb_found = 0
i = 1
while i < len(txt) - 2:
if txt[i] == "'":
i += 1
j = 1
while txt[i + j] != "'":
j += 1
if j > 2:
dic[txt[i:i + j]] = nb_found
nb_found += 1
i += j
else:
i += 1
return dic
def flatten(t: list) -> list:
l = []
for el in t:
if type(el) == list:
for el_1 in el:
l.append(el_1)
else:
l.append(el)
return l
def labels_in_dir_mz_order() -> dict:
"""
:return: labels in the same order that MegaZord was trained on.
"""
labels = {}
compt = 0
path = "/Users/lucas/swiss_knife/data"
for classe in listdir_nohidden(path):
for label in listdir_nohidden(os.path.join(path, classe)):
labels[label] = compt
compt += 1
return labels
def resizer(path, tgt_size=(256,256)):
import cv2
for img in listdir_nohidden(path, jpg_only=True):
pic = cv2.imread(os.path.join(path, img))
shape=pic.shape[:2]
w = min(shape)
l = max(shape)
d = int((l - w) / 2)
pic = np.asarray(pic)
print(type(pic))
if w == shape[0]:
pic = pic[:, d:d + w]
else:
pic = pic[d:d + w, :]
pic = cv2.resize(pic, tgt_size)
cv2.imwrite(os.path.join(path, img), pic)
def get_path():
cwd = os.getcwd()
if cwd[:-8]=="MegaZord" :
return os.path.join(cwd,"files")
if __name__ == "__main__":
try :
os.remove("../files/labels.txt")
os.remove("../files/int_label.txt")
os.remove("../files/int_classe.txt")
except FileNotFoundError :
pass
labeller("/Volumes/WD_BLACK/ressources")
int_labeller("classe")
int_labeller("label")
# [o_o] | [
"os.listdir",
"numpy.asarray",
"os.path.join",
"os.getcwd",
"numpy.empty",
"cv2.resize",
"os.remove"
] | [((9871, 9882), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9880, 9882), False, 'import os\n'), ((1312, 1342), 'os.path.join', 'os.path.join', (['directory_', 'zord'], {}), '(directory_, zord)\n', (1324, 1342), False, 'import os\n'), ((6749, 6775), 'os.path.join', 'os.path.join', (['path', 'classe'], {}), '(path, classe)\n', (6761, 6775), False, 'import os\n'), ((9604, 9619), 'numpy.asarray', 'np.asarray', (['pic'], {}), '(pic)\n', (9614, 9619), True, 'import numpy as np\n'), ((9767, 9792), 'cv2.resize', 'cv2.resize', (['pic', 'tgt_size'], {}), '(pic, tgt_size)\n', (9777, 9792), False, 'import cv2\n'), ((9929, 9955), 'os.path.join', 'os.path.join', (['cwd', '"""files"""'], {}), "(cwd, 'files')\n", (9941, 9955), False, 'import os\n'), ((10006, 10038), 'os.remove', 'os.remove', (['"""../files/labels.txt"""'], {}), "('../files/labels.txt')\n", (10015, 10038), False, 'import os\n'), ((10047, 10082), 'os.remove', 'os.remove', (['"""../files/int_label.txt"""'], {}), "('../files/int_label.txt')\n", (10056, 10082), False, 'import os\n'), ((10091, 10127), 'os.remove', 'os.remove', (['"""../files/int_classe.txt"""'], {}), "('../files/int_classe.txt')\n", (10100, 10127), False, 'import os\n'), ((1031, 1063), 'os.path.join', 'os.path.join', (['directory_', 'classe'], {}), '(directory_, classe)\n', (1043, 1063), False, 'import os\n'), ((6853, 6885), 'os.path.join', 'os.path.join', (['path_classe', 'label'], {}), '(path_classe, label)\n', (6865, 6885), False, 'import os\n'), ((9224, 9250), 'os.path.join', 'os.path.join', (['path', 'classe'], {}), '(path, classe)\n', (9236, 9250), False, 'import os\n'), ((9462, 9485), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (9474, 9485), False, 'import os\n'), ((9814, 9837), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (9826, 9837), False, 'import os\n'), ((3193, 3219), 'numpy.empty', 'np.empty', (['(n, 256, 256, 3)'], {}), '((n, 256, 256, 3))\n', (3201, 3219), True, 'import numpy as np\n'), ((3221, 3252), 'numpy.empty', 'np.empty', (['(n, 1)'], {'dtype': '"""int32"""'}), "((n, 1), dtype='int32')\n", (3229, 3252), True, 'import numpy as np\n'), ((3384, 3410), 'os.path.join', 'os.path.join', (['path', 'classe'], {}), '(path, classe)\n', (3396, 3410), False, 'import os\n'), ((5409, 5438), 'os.path.join', 'os.path.join', (['path', 'zord_kind'], {}), '(path, zord_kind)\n', (5421, 5438), False, 'import os\n'), ((358, 374), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (368, 374), False, 'import os\n'), ((594, 610), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (604, 610), False, 'import os\n'), ((3504, 3536), 'os.path.join', 'os.path.join', (['path_classe', 'label'], {}), '(path_classe, label)\n', (3516, 3536), False, 'import os\n'), ((4230, 4256), 'numpy.empty', 'np.empty', (['(n, 256, 256, 3)'], {}), '((n, 256, 256, 3))\n', (4238, 4256), True, 'import numpy as np\n'), ((4258, 4289), 'numpy.empty', 'np.empty', (['(n, 1)'], {'dtype': '"""int32"""'}), "((n, 1), dtype='int32')\n", (4266, 4289), True, 'import numpy as np\n'), ((4419, 4445), 'os.path.join', 'os.path.join', (['path', 'classe'], {}), '(path, classe)\n', (4431, 4445), False, 'import os\n'), ((5218, 5244), 'numpy.empty', 'np.empty', (['(n, 256, 256, 3)'], {}), '((n, 256, 256, 3))\n', (5226, 5244), True, 'import numpy as np\n'), ((5246, 5277), 'numpy.empty', 'np.empty', (['(n, 1)'], {'dtype': '"""int32"""'}), "((n, 1), dtype='int32')\n", (5254, 5277), True, 'import numpy as np\n'), ((5524, 5556), 'os.path.join', 'os.path.join', (['path_classe', 'label'], {}), '(path_classe, label)\n', (5536, 5556), False, 'import os\n'), ((4539, 4571), 'os.path.join', 'os.path.join', (['path_classe', 'label'], {}), '(path_classe, label)\n', (4551, 4571), False, 'import os\n'), ((3741, 3769), 'os.path.join', 'os.path.join', (['path_label', 'im'], {}), '(path_label, im)\n', (3753, 3769), False, 'import os\n'), ((5746, 5774), 'os.path.join', 'os.path.join', (['path_label', 'im'], {}), '(path_label, im)\n', (5758, 5774), False, 'import os\n'), ((4774, 4802), 'os.path.join', 'os.path.join', (['path_label', 'im'], {}), '(path_label, im)\n', (4786, 4802), False, 'import os\n'), ((7091, 7119), 'os.path.join', 'os.path.join', (['path_label', 'im'], {}), '(path_label, im)\n', (7103, 7119), False, 'import os\n')] |
# calculating the intragroup average distance, showing that is is significantly increases with age,
# TRF is lower than 24-month-old AL. Make a figure
# note about scaling - the metabolom matrix has the metabolites in rows (animals col),
# and scaling normalizes the columns, so we need to scale the transpose of the metabolom matrix.
# Checking old-young groups. need to expand the code for all 6 pairs!
# Due to orders of magnitude difference in the reads between different metabolites, we need to scale the data. each metabolite is normmalized to have mean 0, std 1.
# this program calculates the average distance within a group and between groups by calculating the the L1 metrics between all possible pairs (of layers)
# across all metabolites. we then have a matrix of distances where the diagonal is 0.
# we can calculate the average distance for the group and between groups. Next we keep the distances but permute the names, and test
# what is the probability to have intra-group average distance higher than the intergroup one
# specifically, for each pair (a,b) we will compare a to ab, b to ab, and the demand is that the distance is >= original distance
# present the data on a bar graph, p-value according to permutations
def caldistance(lst1, lst2): # calculates the L1 distance between lst1, lst2. they represent two layers (chcknum1/2) with heir list of metabolomic values
dist0 = 0
for ii in range(0, len(lst1)):
tmp = abs(lst1[ii] - lst2[ii])
dist0 = dist0 + tmp
return dist0
def strtofloat0(lst): # returns a list of float, given al ist of numbers in str type
ans = []
for ii in lst:
tmp = [float(kk) for kk in ii]
ans.append(tmp)
return ans
def getvalues0(polarval0, start1, len1, start2, len2): # returns lists with the values of the groups that we want to compare
grp1 = polarval0[:, start1:(start1+len1)]
grp2 = polarval0[:, start2:(start2+len2)]
grp1lst = grp1.tolist()
grp2lst = grp2.tolist()
return grp1lst, grp2lst
def averageingroupdis(dim0, mtrx0): # calculates the average distance within the same group (array) - sum of all elements, devided by (number of elements minus diagonal)
# dim0 is the number of rows/columns in the array.
# This is a symmetrical matrix with diagonal = 0. element ij is the distance between animal i and j (in the same group)
sm0 = np.sum(mtrx0)
numbrofelmnts = ((dim0*dim0) - dim0)
ans = sm0/numbrofelmnts
return ans
def averageoutgroupdis(dim0, mtrx0): # calculates the average distance beween two groups (array) - sum of all elements, devided by number of elements
# dim0 is the number of rows/columns in the array, here the diagonal has no meaning - each row is one group and each column is a second group.
# element ij is the distance between animal i and j (in the different groups!)
sm0 = np.sum(mtrx0)
numbrofelmnts = ((dim0*dim0))
ans = sm0/numbrofelmnts
return ans
def buildidsmatrx(distarr, perm0): # receives the original distance matrix/array and the permutation vector, builds the permutated matrix
permdist0 = []
for ii in perm0:
permtmp = []
for jj in perm0:
tmp = distarr[ii, jj] # need to have the indices starting from 0!
permtmp.append(tmp)
# print('permtmp', permtmp)
permdist0.append(permtmp)
return permdist0
def originaldistmtrx(distarry): # receives the two-group metabolomics data, generates the distance matrix(list)
distlstot0 = []
for ii in range(0, len(distarry)):
rowdist = []
for jj in range(0, len(distarry)):
tmpdist = caldistance(distarry[ii], distarry[jj])
rowdist.append(tmpdist)
distlstot0.append(rowdist)
return distlstot0
def generatepairgroup(group01, group02): # generates the distance matrix (array) for the group01-group02 pair
group01arr = np.array(group01)
group01arrt = group01arr.transpose()
print(len(group01arrt), len(group01arrt[0])) #
group01lst0 = group01arrt.tolist()
group02arr = np.array(group02)
group02arrt = group02arr.transpose()
print(len(group02arrt), len(group02arrt[0])) #
group02lst0 = group02arrt.tolist()
group0102lst0 = group01lst0 + group02lst0
print(len(group0102lst0), len(group0102lst0[0])) #
distlst0 = originaldistmtrx(group0102lst0) # generating the distance matrix (array)
print(len(distlst0), len(distlst0[0]), distlst0[0][0], distlst0[0][1], distlst0[1][1], distlst0[1][0])
return distlst0
def ingpdis(gpnum, gpsize, distmtrx): # receives the distance matrix(list), returns the intragroup distance of gpnum
distmtrxarr = np.array(distmtrx)
if gpnum == 1: # always size 15
tmpdistmtrxarr = distmtrxarr[0:gpsize, 0:gpsize]
sm0 = np.sum(tmpdistmtrxarr)
numbrofelmnts = ((gpsize * gpsize) - gpsize)
ans = sm0 / numbrofelmnts
if gpnum == 2: # should work for size 15 as well as 14
tmpdistmtrxarr = distmtrxarr[15:, 15:] # starts with No. 15 always
sm0 = np.sum(tmpdistmtrxarr)
numbrofelmnts = ((gpsize * gpsize) - gpsize) # goos for size 15 and 14 - this is the matrix size
ans = sm0 / numbrofelmnts
return ans
def outgpdis(gset, gpsize, distmtrx): # receives the distance matrix(list), returns the intergroup distance of gset
distmtrxarr = np.array(distmtrx)
if gset[1] != 3:
tmpdistmtrxarr = distmtrxarr[0:gpsize, gpsize:]
sm0 = np.sum(tmpdistmtrxarr)
numbrofelmnts = (gpsize * gpsize)
ans = sm0 / numbrofelmnts
elif gset[1] == 3:
tmpdistmtrxarr = distmtrxarr[0:gpsize, gpsize:]
sm0 = np.sum(tmpdistmtrxarr)
numbrofelmnts = (gpsize * (gpsize-1))
ans = sm0 / numbrofelmnts
return ans
def ingspdistance(distmtrx): # receives the distance matrix(list), returns the intragroup distances all 4 groups; O/Y/AL/CR
distmtrxarr = np.array(distmtrx)
permdistances = []
for ii in range(0, 4):
if ii != 3: # always size 15
tmpdistmtrxarr = distmtrxarr[(ii*15):((ii+1)*15), (ii*15):((ii+1)*15)]
sm0 = np.sum(tmpdistmtrxarr)
numbrofelmnts = (210) # 15*15 - 15
permdistances.append(sm0 / numbrofelmnts)
elif ii == 3:
tmpdistmtrxarr = distmtrxarr[45:59, 45:59]
sm0 = np.sum(tmpdistmtrxarr)
numbrofelmnts = (182) # 14*14 - 14
permdistances.append(sm0 / numbrofelmnts)
return permdistances
def calcsum0(old, young, al): # recieves three group distances, check monotonic trend between young/old/al. if monotonic, returns the sum of abs(difference).
if (young < old < al):
ans = abs(old - young) + abs(old - al)
else:
ans = 0
return ans
def getstderror(in_out, distarr): # calcuates the std (population; pstdev) of distance matrix distarr, in_out 0 means 0s in the diagonal exclude, 1 intergroup count all elements
distlst0 = distarr.tolist()
# print('dist', distlst0)
if in_out == 0: # distarr represents intragroup distances
elements0 = []
for ii in distlst0:
for jj in ii:
if jj != 0:
elements0.append(jj)
elif in_out == 1: # distarr represents intergroup distances
elements0 = []
for ii in distlst0:
for jj in ii:
if jj != 0:
elements0.append(jj)
std0 = statistics.pstdev(elements0)
# print(len(elements0)) # - yey
return std0
def make1dlist(array): # recieves a 2-d array returns a 1-d list of all elements
onedlist = []
for ii in array.tolist():
onedlist = onedlist + ii
return onedlist
from scipy.stats import hypergeom
import statsmodels
import statsmodels.stats.multitest
import statistics
from scipy.stats import mannwhitneyu
from scipy.stats import ttest_ind
from scipy.stats.stats import pearsonr
from scipy.stats.stats import spearmanr
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from scipy.stats import stats
# need to do standard scaling (mean 0, std 1) because of the 10 orders of magnitude difference between the values in polar to lipid.
print('intergroup distance')
merge0 = pd.read_csv('mergedpolarlipid', header = None) # the metabolomics data for the merged polar & lipid file
print(merge0.head())
merge0val0 = merge0.iloc[:, :].values #
# Feature Scaling
scaler = StandardScaler()
merge0valt = scaler.fit_transform(merge0val0.transpose()) # the scaling is for the columns, so we scale the transpose matrix (col = metabolites)
merge0val = merge0valt.transpose()
valuesoldyoung = getvalues0(merge0val, 0, 15, 15, 15) # returns lists with the values of the groups that we want to compare
oldtmp1 = valuesoldyoung[0]
oldval = strtofloat0(oldtmp1)
youngtmp1 = valuesoldyoung[1]
youngval = strtofloat0(youngtmp1)
valuesALCR = getvalues0(merge0val, 30, 15, 45, 14)
altmp1 = valuesALCR[0]
alval = strtofloat0(altmp1)
crtmp1 = valuesALCR[1]
crval = strtofloat0(crtmp1)
print(len(oldval), len(oldval[0]), oldval[0][0]) # 434 15 v (not scaled 0.000390325) - yey
oldvalarr = np.array(oldval)
oldvalarrt = oldvalarr.transpose()
print(len(oldvalarrt), len(oldvalarrt[0])) # 15 434
oldvallst0 = oldvalarrt.tolist()
youngvalarr = np.array(youngval)
youngvalarrt = youngvalarr.transpose()
print(len(youngvalarrt), len(youngvalarrt[0])) # 15 434
youngvallst0 = youngvalarrt.tolist()
oldyoungvallst0 = oldvallst0 + youngvallst0
print(len(oldyoungvallst0), len(oldyoungvallst0[0])) # 30 434 - yey
# dist1_2 = caldistance([1,2,3], [1,2,4])
# print(dist1_2) # 1 - yey
dist1_1 = caldistance(oldyoungvallst0[0], oldyoungvallst0[0])
dist1_2 = caldistance(oldyoungvallst0[0], oldyoungvallst0[1])
print(dist1_1, dist1_2) # 0.0 360.7584529430399 (not scaled 0.0, 239666801.601786) - looking good!
# going over all possible pairs
distlstot = []
for ii in range(0, len(oldyoungvallst0)):
rowdist = []
for jj in range(0, len(oldyoungvallst0)):
tmpdist = caldistance(oldyoungvallst0[ii], oldyoungvallst0[jj])
rowdist.append(tmpdist)
distlstot.append(rowdist)
print(len(distlstot), len(distlstot[0]), distlstot[0][0], distlstot[0][1], distlstot[1][1], distlstot[1][0]) # 30 30 0.0 360.7584529430399 0.0 360.7584529430399 (not scaled 30 30 0.0 239666801.601786 0.0 239666801.601786)
# distlstot is the matrix/list that consist all the distances between old/young groups!
# intragroup average distance - find all intragroup pairs, find their distance, average over them
# the first group is 'old', has 15 members
distmpmtrx = [[0,3,3], [3,0,3], [3,3,0]] # average distance 3
averpairdist = averageingroupdis(3, distmpmtrx)
print(averpairdist) # 3.0 - yey
distlstotarr = np.array(distlstot)
olddistlst = distlstotarr[0:15, 0:15]
print(len(olddistlst), len(olddistlst[0])) # 15 15 - yey
oldaverdist = averageingroupdis(15, olddistlst)
print(oldaverdist) # 392.91898409453125 (not scaled 246372927.80372265) - no errors, verify
youngdistlst = distlstotarr[15:, 15:]
print(len(youngdistlst), len(youngdistlst[0])) # 15 15 - yey
print(youngdistlst) # looking good - symmetrical with 0's in the diagonal
youngaverdist = averageingroupdis(15, youngdistlst)
print(youngaverdist) # 319.49663046450587 (not scaled 198695619.0538811)
# # now permuting the labels
# permuting the labels, then pick the corresponding distances from the original matrix. For example, if no. 2 is now # 14,
# then all the distances between no.'i' and no. 2 are replaced by the distances between 'i' and 14 (which is the new 2)
# looking at the ingroup average distances, and getting p-value for the difference
getdistmtrx = originaldistmtrx(merge0val.transpose().tolist()) # calculates the distance matrix for all 59 animals
print(len(getdistmtrx), len(getdistmtrx[-1]), getdistmtrx[0][0], getdistmtrx[0][1], getdistmtrx[0][2], getdistmtrx[1][0], getdistmtrx[1][1], getdistmtrx[1][2], getdistmtrx[2][0], getdistmtrx[2][1], getdistmtrx[2][2]) #
# 59 59 0.0 360.7584529430399 385.0680196051907 360.7584529430399 0.0 270.8677751129098 385.0680196051907 270.8677751129098 0.0 - yey, same as mergedscaleddistances
distances = [392.91898409453125, 319.49663046450587, 464.5228587656814, 366.00724671998097] # O,Y,AL,TRF
difftot = abs(distances[0] - distances[1]) + abs(distances[2] - distances[0]) # difftot = (O-Y) + (AL-O)
# running permutations
range0 = np.arange(59)
perm0 = np.random.permutation(range0)
permdisttmp = buildidsmatrx(np.array(getdistmtrx), perm0)
# calculating the permuted-ingroup distances
qcingpsdist = ingspdistance(getdistmtrx) # QC ingroup distances
print(qcingpsdist) # [392.91898409453125, 319.49663046450587, 464.5228587656814, 366.00724671998097] - yey
permingpdist = ingspdistance(permdisttmp)
print(permingpdist) # for one permutation - [555.9641151107118, 405.52211994358356, 424.97273571968947, 399.20275047630844] - not monotonic, good
diffsum0 = calcsum0(permingpdist[0], permingpdist[1], permingpdist[2])
print(diffsum0) # 0 - yey
# lets run 1000 permutations
# ind0 = 0
# for ii in range(0, 1): # 100, 1000
# perm0 = np.random.permutation(range0)
# permdisttmp = buildidsmatrx(np.array(getdistmtrx), perm0)
# permingpdist = ingspdistance(permdisttmp)
# diffsum0 = calcsum0(permingpdist[0], permingpdist[1], permingpdist[2])
# if (diffsum0 > difftot) and (permingpdist[3] < permingpdist[2]):
# ind0 = ind0 + 1
# print(ind0) # 1 perm - 0, 10 perm - 0, 100 perm - 0, 1000 perm - 5, 1000 perm - 5 (p_value = 0.005) - yey
# barplot
groups00 = ['8 months', '20 months', '24 months AL', '24 months TRF']
x_pos = np.arange(len(groups00))
distances0 = [distances[1], distances[0], distances[2], distances[3]]
# Build the plot
fig, ax = plt.subplots()
ax.bar(x_pos, distances0, align='center', color='blue', capsize=10)
# ax.set_ylabel('Coefficient of Thermal Expansion ($\degree C^{-1}$)')
# ax.set_ylabel('Average Ingroup Distances [AU]')
ax.set_xticks(x_pos)
ax.set_xticklabels(groups00) #, size = 0)
ax.set_title('Average Ingroup Distance With Age') # ax.set_title('Average Ingroup Distance With Age')
# ax.yaxis.grid(True)
# ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
# plt.savefig('intra group distance.pdf')
plt.show()
| [
"pandas.read_csv",
"matplotlib.pyplot.show",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.sum",
"statistics.pstdev",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.random.permutation"
] | [((8529, 8573), 'pandas.read_csv', 'pd.read_csv', (['"""mergedpolarlipid"""'], {'header': 'None'}), "('mergedpolarlipid', header=None)\n", (8540, 8573), True, 'import pandas as pd\n'), ((8726, 8742), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (8740, 8742), False, 'from sklearn.preprocessing import StandardScaler\n'), ((9451, 9467), 'numpy.array', 'np.array', (['oldval'], {}), '(oldval)\n', (9459, 9467), True, 'import numpy as np\n'), ((9606, 9624), 'numpy.array', 'np.array', (['youngval'], {}), '(youngval)\n', (9614, 9624), True, 'import numpy as np\n'), ((11085, 11104), 'numpy.array', 'np.array', (['distlstot'], {}), '(distlstot)\n', (11093, 11104), True, 'import numpy as np\n'), ((12764, 12777), 'numpy.arange', 'np.arange', (['(59)'], {}), '(59)\n', (12773, 12777), True, 'import numpy as np\n'), ((12787, 12816), 'numpy.random.permutation', 'np.random.permutation', (['range0'], {}), '(range0)\n', (12808, 12816), True, 'import numpy as np\n'), ((14131, 14145), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (14143, 14145), True, 'import matplotlib.pyplot as plt\n'), ((14655, 14665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14663, 14665), True, 'import matplotlib.pyplot as plt\n'), ((2427, 2440), 'numpy.sum', 'np.sum', (['mtrx0'], {}), '(mtrx0)\n', (2433, 2440), True, 'import numpy as np\n'), ((2916, 2929), 'numpy.sum', 'np.sum', (['mtrx0'], {}), '(mtrx0)\n', (2922, 2929), True, 'import numpy as np\n'), ((3981, 3998), 'numpy.array', 'np.array', (['group01'], {}), '(group01)\n', (3989, 3998), True, 'import numpy as np\n'), ((4152, 4169), 'numpy.array', 'np.array', (['group02'], {}), '(group02)\n', (4160, 4169), True, 'import numpy as np\n'), ((4767, 4785), 'numpy.array', 'np.array', (['distmtrx'], {}), '(distmtrx)\n', (4775, 4785), True, 'import numpy as np\n'), ((5477, 5495), 'numpy.array', 'np.array', (['distmtrx'], {}), '(distmtrx)\n', (5485, 5495), True, 'import numpy as np\n'), ((6054, 6072), 'numpy.array', 'np.array', (['distmtrx'], {}), '(distmtrx)\n', (6062, 6072), True, 'import numpy as np\n'), ((7613, 7641), 'statistics.pstdev', 'statistics.pstdev', (['elements0'], {}), '(elements0)\n', (7630, 7641), False, 'import statistics\n'), ((12846, 12867), 'numpy.array', 'np.array', (['getdistmtrx'], {}), '(getdistmtrx)\n', (12854, 12867), True, 'import numpy as np\n'), ((4896, 4918), 'numpy.sum', 'np.sum', (['tmpdistmtrxarr'], {}), '(tmpdistmtrxarr)\n', (4902, 4918), True, 'import numpy as np\n'), ((5159, 5181), 'numpy.sum', 'np.sum', (['tmpdistmtrxarr'], {}), '(tmpdistmtrxarr)\n', (5165, 5181), True, 'import numpy as np\n'), ((5590, 5612), 'numpy.sum', 'np.sum', (['tmpdistmtrxarr'], {}), '(tmpdistmtrxarr)\n', (5596, 5612), True, 'import numpy as np\n'), ((5787, 5809), 'numpy.sum', 'np.sum', (['tmpdistmtrxarr'], {}), '(tmpdistmtrxarr)\n', (5793, 5809), True, 'import numpy as np\n'), ((6266, 6288), 'numpy.sum', 'np.sum', (['tmpdistmtrxarr'], {}), '(tmpdistmtrxarr)\n', (6272, 6288), True, 'import numpy as np\n'), ((6490, 6512), 'numpy.sum', 'np.sum', (['tmpdistmtrxarr'], {}), '(tmpdistmtrxarr)\n', (6496, 6512), True, 'import numpy as np\n')] |
# SYNCS Hackathon 2020
# jadaj - Circular
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import cv2
from PIL import Image
import io
import base64
# Inputs:
# image (2d array-like): Will consider all non-zero entries in array as part of circle
# centre ((float, float))
# Outputs:
# (int) score from 0 to 100
def evaluate_circle(image, centre):
# Evaluation
centre = centre[::-1]
coords = np.argwhere(image)
shifted = coords - centre
theta, r = np.arctan2(shifted[:,0], shifted[:,1]), np.sqrt(shifted[:,0]**2 + shifted[:,1]**2)
shuffle = np.argsort(theta)
theta = theta[shuffle]
r = r[shuffle]
smoothed = sm.nonparametric.lowess(r,theta,frac=1/30,it=3, return_sorted = False)
theta_samp = np.linspace(min(theta), max(theta),40)
r_spaced = np.interp(theta_samp,theta,smoothed)
radius = np.median(r_spaced)
error = np.mean(abs(r_spaced - radius))/radius
score = 2 - 2/(1+np.exp(-8*error))
score = int(score*1000)/10
# Shifting
scale = 1.4
height, width = image.shape
top, bottom = int(centre[0] - radius*scale), int(centre[0] + radius*scale)
left,right = int(centre[1] - radius*scale), int(centre[1] + radius*scale)
toppad, bottompad = 0 - min(0,top), max(height,bottom) - height
leftpad, rightpad = 0 - min(0,left), max(width,right) - width
img_cut = image[max(0,top):bottom, max(0,left):right]
n_height = img_cut.shape[0]
img_stretch = np.hstack([np.zeros([n_height,leftpad]), img_cut, np.zeros([n_height,rightpad])])
new_width = img_stretch.shape[1]
img_stretch = np.vstack([np.zeros([toppad,new_width]), img_stretch, np.zeros([bottompad, new_width])])
new_img = cv2.resize(img_stretch,(800,800))
new_centre = (400,400)
new_radius = 400/scale
# Arrows
coord_set = set(tuple(x) for x in np.argwhere(new_img).tolist())
fig, ax = plt.subplots(figsize = (8,8))
layers = []
new_img = new_img != 0
for x in [(43,255), (45,231), (66,136)]:
a = np.where(new_img==0, x[0], new_img)
b = np.where(a==1, x[1],a)
layers.append(b)
img_col = np.stack(layers)
plt.imshow(img_col.transpose((1,2,0)))
for phi in np.linspace(0, 2*np.pi, 60, endpoint = False):
circle_point = new_radius*np.cos(phi) + new_centre[0], new_radius*np.sin(phi) + new_centre[1]
dx,dy = 0.5*np.cos(phi), 0.5*np.sin(phi)
x,y = new_centre
ray_set = {new_centre}
while (x>=0) and (x<800) and (y>=0) and (y<800):
x += dx
y += dy
ray_set.add((int(x), int(y)))
intersections = list(ray_set & coord_set)
if len(intersections) > 0:
draw_point = np.mean(np.array(intersections), axis = 0)
vector = (circle_point - draw_point)
if np.linalg.norm(vector)/new_radius > 0.03:
plt.arrow(draw_point[1], draw_point[0], vector[1]*3,vector[0]*3, width= 2,head_width=7, head_length=11, fc='w', ec='w')
plt.axis('off')
#overlay = plt.figure()
buf = io.BytesIO()
plt.savefig(buf, bbox_inches='tight')
# plt.savefig('Output.png', bbox_inches='tight')
buf.seek(0)
#image = Image.open(buf)
#print(image)
plt.close()
return score, buf
# Inputs:
# lineArray (list of 2d array-likes): Individual masks of found lines
# lineMask (2d array-likes): Compiled mask of found lines
# Output:
# angle from parallel (float)
def evaluate_lines(lineArray, lineMask):
min_angle = np.inf
coords = [np.argwhere(l) for l in lineArray]
gradients = [np.polyfit(c[:,0],c[:,1],1)[0] for c in coords]
n = len(gradients)
for i in range(n):
for j in range(i+1,n):
min_angle = min(min_angle,
np.rad2deg(np.arctan(abs(gradients[i]-gradients[j])/(1+gradients[i]*gradients[j]))))
score = int(100*min_angle)/100
# my_string = base64.b64encode(img_file.read())
# buf = io.BytesIO()
# plt.savefig(buf, bbox_inches='tight')
# buf.seek(0)
# #image = Image.open(buf)
# #print(image)
# plt.close()
# return score, buf
return score | [
"numpy.sqrt",
"numpy.polyfit",
"io.BytesIO",
"numpy.argsort",
"numpy.array",
"numpy.arctan2",
"numpy.linalg.norm",
"numpy.sin",
"matplotlib.pyplot.arrow",
"numpy.where",
"matplotlib.pyplot.close",
"numpy.stack",
"numpy.linspace",
"numpy.exp",
"matplotlib.pyplot.axis",
"matplotlib.pyplo... | [((442, 460), 'numpy.argwhere', 'np.argwhere', (['image'], {}), '(image)\n', (453, 460), True, 'import numpy as np\n'), ((603, 620), 'numpy.argsort', 'np.argsort', (['theta'], {}), '(theta)\n', (613, 620), True, 'import numpy as np\n'), ((682, 755), 'statsmodels.api.nonparametric.lowess', 'sm.nonparametric.lowess', (['r', 'theta'], {'frac': '(1 / 30)', 'it': '(3)', 'return_sorted': '(False)'}), '(r, theta, frac=1 / 30, it=3, return_sorted=False)\n', (705, 755), True, 'import statsmodels.api as sm\n'), ((824, 862), 'numpy.interp', 'np.interp', (['theta_samp', 'theta', 'smoothed'], {}), '(theta_samp, theta, smoothed)\n', (833, 862), True, 'import numpy as np\n'), ((879, 898), 'numpy.median', 'np.median', (['r_spaced'], {}), '(r_spaced)\n', (888, 898), True, 'import numpy as np\n'), ((1733, 1768), 'cv2.resize', 'cv2.resize', (['img_stretch', '(800, 800)'], {}), '(img_stretch, (800, 800))\n', (1743, 1768), False, 'import cv2\n'), ((1918, 1946), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (1930, 1946), True, 'import matplotlib.pyplot as plt\n'), ((2164, 2180), 'numpy.stack', 'np.stack', (['layers'], {}), '(layers)\n', (2172, 2180), True, 'import numpy as np\n'), ((2239, 2284), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(60)'], {'endpoint': '(False)'}), '(0, 2 * np.pi, 60, endpoint=False)\n', (2250, 2284), True, 'import numpy as np\n'), ((3031, 3046), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3039, 3046), True, 'import matplotlib.pyplot as plt\n'), ((3085, 3097), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3095, 3097), False, 'import io\n'), ((3102, 3139), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buf'], {'bbox_inches': '"""tight"""'}), "(buf, bbox_inches='tight')\n", (3113, 3139), True, 'import matplotlib.pyplot as plt\n'), ((3260, 3271), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3269, 3271), True, 'import matplotlib.pyplot as plt\n'), ((506, 546), 'numpy.arctan2', 'np.arctan2', (['shifted[:, 0]', 'shifted[:, 1]'], {}), '(shifted[:, 0], shifted[:, 1])\n', (516, 546), True, 'import numpy as np\n'), ((546, 594), 'numpy.sqrt', 'np.sqrt', (['(shifted[:, 0] ** 2 + shifted[:, 1] ** 2)'], {}), '(shifted[:, 0] ** 2 + shifted[:, 1] ** 2)\n', (553, 594), True, 'import numpy as np\n'), ((2054, 2091), 'numpy.where', 'np.where', (['(new_img == 0)', 'x[0]', 'new_img'], {}), '(new_img == 0, x[0], new_img)\n', (2062, 2091), True, 'import numpy as np\n'), ((2102, 2127), 'numpy.where', 'np.where', (['(a == 1)', 'x[1]', 'a'], {}), '(a == 1, x[1], a)\n', (2110, 2127), True, 'import numpy as np\n'), ((3552, 3566), 'numpy.argwhere', 'np.argwhere', (['l'], {}), '(l)\n', (3563, 3566), True, 'import numpy as np\n'), ((1504, 1533), 'numpy.zeros', 'np.zeros', (['[n_height, leftpad]'], {}), '([n_height, leftpad])\n', (1512, 1533), True, 'import numpy as np\n'), ((1543, 1573), 'numpy.zeros', 'np.zeros', (['[n_height, rightpad]'], {}), '([n_height, rightpad])\n', (1551, 1573), True, 'import numpy as np\n'), ((1641, 1670), 'numpy.zeros', 'np.zeros', (['[toppad, new_width]'], {}), '([toppad, new_width])\n', (1649, 1670), True, 'import numpy as np\n'), ((1684, 1716), 'numpy.zeros', 'np.zeros', (['[bottompad, new_width]'], {}), '([bottompad, new_width])\n', (1692, 1716), True, 'import numpy as np\n'), ((3604, 3635), 'numpy.polyfit', 'np.polyfit', (['c[:, 0]', 'c[:, 1]', '(1)'], {}), '(c[:, 0], c[:, 1], 1)\n', (3614, 3635), True, 'import numpy as np\n'), ((977, 995), 'numpy.exp', 'np.exp', (['(-8 * error)'], {}), '(-8 * error)\n', (983, 995), True, 'import numpy as np\n'), ((2408, 2419), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2414, 2419), True, 'import numpy as np\n'), ((2425, 2436), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2431, 2436), True, 'import numpy as np\n'), ((2750, 2773), 'numpy.array', 'np.array', (['intersections'], {}), '(intersections)\n', (2758, 2773), True, 'import numpy as np\n'), ((2907, 3036), 'matplotlib.pyplot.arrow', 'plt.arrow', (['draw_point[1]', 'draw_point[0]', '(vector[1] * 3)', '(vector[0] * 3)'], {'width': '(2)', 'head_width': '(7)', 'head_length': '(11)', 'fc': '"""w"""', 'ec': '"""w"""'}), "(draw_point[1], draw_point[0], vector[1] * 3, vector[0] * 3, width\n =2, head_width=7, head_length=11, fc='w', ec='w')\n", (2916, 3036), True, 'import matplotlib.pyplot as plt\n'), ((2320, 2331), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2326, 2331), True, 'import numpy as np\n'), ((2360, 2371), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2366, 2371), True, 'import numpy as np\n'), ((2849, 2871), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {}), '(vector)\n', (2863, 2871), True, 'import numpy as np\n'), ((1873, 1893), 'numpy.argwhere', 'np.argwhere', (['new_img'], {}), '(new_img)\n', (1884, 1893), True, 'import numpy as np\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
from mxnet.contrib.svrg_optimization.svrg_module import SVRGModule
def test_svrg_intermediate_level_api(args):
"""Demonstrates intermediate level SVRGModule API where the training process
need to be explicitly defined. KVstore is not explicitly created.
Parameters
----------
args: args
Command line arguments
"""
num_epoch = args.epochs
batch_size = args.batch_size
update_freq = args.update_freq
di, mod = create_network(batch_size, update_freq)
mod.bind(data_shapes=di.provide_data, label_shapes=di.provide_label)
mod.init_params(initializer=mx.init.Uniform(0.01), allow_missing=False, force_init=False, allow_extra=False)
kv = mx.kv.create("local")
mod.init_optimizer(kvstore=kv, optimizer='sgd', optimizer_params=(('learning_rate', 0.025),))
metrics = mx.metric.create("mse")
for e in range(num_epoch):
metrics.reset()
if e % mod.update_freq == 0:
mod.update_full_grads(di)
di.reset()
for batch in di:
mod.forward_backward(data_batch=batch)
mod.update()
mod.update_metric(metrics, batch.label)
mod.logger.info('Epoch[%d] Train cost=%f', e, metrics.get()[1])
def test_svrg_high_level_api(args):
"""Demonstrates suggested usage of high level SVRGModule API. KVStore is explicitly created.
Parameters
----------
args: args
Command line arguments
"""
num_epoch = args.epochs
batch_size = args.batch_size
update_freq = args.update_freq
di, mod = create_network(batch_size, update_freq)
mod.fit(di, eval_metric='mse', optimizer='sgd', optimizer_params=(('learning_rate', 0.025),), num_epoch=num_epoch,
kvstore='local')
def create_network(batch_size, update_freq):
"""Create a linear regression network for performing SVRG optimization.
Parameters
----------
batch_size: int
Size of data split
update_freq: int
Update Frequency for calculating full gradients
Returns
----------
di: mx.io.NDArrayIter
Data iterator
update_freq: SVRGModule
An instance of SVRGModule for performing SVRG optimization
"""
import logging
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
train_data = np.random.randint(1, 5, [1000, 2])
weights = np.array([1.0, 2.0])
train_label = train_data.dot(weights)
di = mx.io.NDArrayIter(train_data, train_label, batch_size=batch_size, shuffle=True, label_name='lin_reg_label')
X = mx.sym.Variable('data')
Y = mx.symbol.Variable('lin_reg_label')
fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1)
lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro")
mod = SVRGModule(
symbol=lro,
data_names=['data'],
label_names=['lin_reg_label'], update_freq=update_freq, logger=logging
)
return di, mod
# run as a script
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-e', dest='epochs', default=100, type=int)
parser.add_argument('-bs', dest='batch_size', default=32, type=int)
parser.add_argument('-f', dest="update_freq", default=2, type=int)
args = parser.parse_args()
print("========================== Intermediate Level API ==========================")
test_svrg_intermediate_level_api(args)
print("========================== High Level API ==========================")
test_svrg_high_level_api(args)
| [
"logging.basicConfig",
"argparse.ArgumentParser",
"mxnet.io.NDArrayIter",
"mxnet.symbol.Variable",
"mxnet.sym.Variable",
"mxnet.sym.LinearRegressionOutput",
"mxnet.init.Uniform",
"numpy.array",
"numpy.random.randint",
"mxnet.kv.create",
"mxnet.metric.create",
"mxnet.sym.FullyConnected",
"mxn... | [((1521, 1542), 'mxnet.kv.create', 'mx.kv.create', (['"""local"""'], {}), "('local')\n", (1533, 1542), True, 'import mxnet as mx\n'), ((1655, 1678), 'mxnet.metric.create', 'mx.metric.create', (['"""mse"""'], {}), "('mse')\n", (1671, 1678), True, 'import mxnet as mx\n'), ((3092, 3144), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'head'}), '(level=logging.INFO, format=head)\n', (3111, 3144), False, 'import logging\n'), ((3163, 3197), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)', '[1000, 2]'], {}), '(1, 5, [1000, 2])\n', (3180, 3197), True, 'import numpy as np\n'), ((3212, 3232), 'numpy.array', 'np.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (3220, 3232), True, 'import numpy as np\n'), ((3285, 3397), 'mxnet.io.NDArrayIter', 'mx.io.NDArrayIter', (['train_data', 'train_label'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'label_name': '"""lin_reg_label"""'}), "(train_data, train_label, batch_size=batch_size, shuffle=\n True, label_name='lin_reg_label')\n", (3302, 3397), True, 'import mxnet as mx\n'), ((3401, 3424), 'mxnet.sym.Variable', 'mx.sym.Variable', (['"""data"""'], {}), "('data')\n", (3416, 3424), True, 'import mxnet as mx\n'), ((3433, 3468), 'mxnet.symbol.Variable', 'mx.symbol.Variable', (['"""lin_reg_label"""'], {}), "('lin_reg_label')\n", (3451, 3468), True, 'import mxnet as mx\n'), ((3497, 3552), 'mxnet.sym.FullyConnected', 'mx.sym.FullyConnected', ([], {'data': 'X', 'name': '"""fc1"""', 'num_hidden': '(1)'}), "(data=X, name='fc1', num_hidden=1)\n", (3518, 3552), True, 'import mxnet as mx\n'), ((3563, 3641), 'mxnet.sym.LinearRegressionOutput', 'mx.sym.LinearRegressionOutput', ([], {'data': 'fully_connected_layer', 'label': 'Y', 'name': '"""lro"""'}), "(data=fully_connected_layer, label=Y, name='lro')\n", (3592, 3641), True, 'import mxnet as mx\n'), ((3653, 3772), 'mxnet.contrib.svrg_optimization.svrg_module.SVRGModule', 'SVRGModule', ([], {'symbol': 'lro', 'data_names': "['data']", 'label_names': "['lin_reg_label']", 'update_freq': 'update_freq', 'logger': 'logging'}), "(symbol=lro, data_names=['data'], label_names=['lin_reg_label'],\n update_freq=update_freq, logger=logging)\n", (3663, 3772), False, 'from mxnet.contrib.svrg_optimization.svrg_module import SVRGModule\n'), ((3899, 3924), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3922, 3924), False, 'import argparse\n'), ((1431, 1452), 'mxnet.init.Uniform', 'mx.init.Uniform', (['(0.01)'], {}), '(0.01)\n', (1446, 1452), True, 'import mxnet as mx\n')] |
import numpy as np
import collections
from ..utils.utils_def import FlopyBinaryData
from ..utils.reference import SpatialReference
class MfGrdFile(FlopyBinaryData):
def __init__(self, filename, precision='double', verbose=False):
"""
Class constructor.
"""
super(MfGrdFile, self).__init__()
self.set_float(precision=precision)
self.verbose = verbose
self._initial_len = 50
self._recorddict = collections.OrderedDict()
self._datadict = collections.OrderedDict()
self._recordkeys = []
self.file = open(filename, 'rb')
"""
# read header information
GRID DISV
VERSION 1
NTXT 13
LENTXT 100
"""
# grid type
line = self.read_text(self._initial_len).strip()
t = line.split()
self._grid = t[1]
# version
line = self.read_text(self._initial_len).strip()
t = line.split()
self._version = t[1]
# version
line = self.read_text(self._initial_len).strip()
t = line.split()
self._ntxt = int(t[1])
# length of text
line = self.read_text(self._initial_len).strip()
t = line.split()
self._lentxt = int(t[1])
# read text strings
for idx in range(self._ntxt):
line = self.read_text(self._lentxt).strip()
t = line.split()
key = t[0]
dt = t[1]
if dt == 'INTEGER':
dtype = np.int32
elif dt == 'SINGLE':
dtype = np.float32
elif dt == 'DOUBLE':
dtype = np.float64
else:
dtype = None
nd = int(t[3])
if nd > 0:
shp = [int(v) for v in t[4:]]
shp = tuple(shp[::-1])
else:
shp = (0,)
self._recorddict[key] = (dtype, nd, shp)
self._recordkeys.append(key)
if self.verbose:
print('read {} records from {}'.format(self._ntxt, filename))
for key in self._recordkeys:
dt, nd, shp = self._recorddict[key]
# read array data
if nd > 0:
count = 1
for v in shp:
count *= v
v = self.read_record(count=count, dtype=dt)
# read variable data
else:
if dt == np.int32:
v = self.read_integer()
elif dt == np.float32:
v = self.read_real()
elif dt == np.float64:
v = self.read_real()
self._datadict[key] = v
# set the spatial reference
self.sr = self._set_spatialreference()
def _set_spatialreference(self):
try:
if self._grid == 'DISV':
sr = None
elif self._grid == 'DIS':
delr, delc = self._datadict['DELR'], self._datadict['DELC']
xorigin, yorigin, rot = self._datadict['XORIGIN'], \
self._datadict['YORIGIN'], \
self._datadict['ANGROT']
sr = SpatialReference(delr=delr, delc=delc,
xll=xorigin, yll=yorigin, rotation=rot)
except:
sr = None
print('could not set spatial reference for {}'.format(self.file.name))
return sr
def get_spatialreference(self):
return self.sr
def get_centroids(self):
x, y = None, None
try:
if self._grid == 'DISV':
x = self._datadict['CELLX']
y = self._datadict['CELLY']
elif self._grid == 'DIS':
nlay = self._datadict['NLAY']
x = np.tile(self.sr.xcentergrid.flatten(), nlay)
y = np.tile(self.sr.ycentergrid.flatten(), nlay)
except:
print('could not return centroids' +
' for {}'.format(self.file.name))
return np.column_stack((x, y))
def get_verts(self):
if self._grid == 'DISV':
try:
iverts = []
iavert = self._datadict['IAVERT']
javert = self._datadict['JAVERT']
shpvert = self._recorddict['VERTICES'][2]
for ivert in range(self._datadict['NCPL']):
i0 = iavert[ivert] - 1
i1 = iavert[ivert+1] - 1
iverts.append((javert[i0:i1]-1).tolist())
if self.verbose:
print('returning vertices for {}'.format(self.file.name))
return iverts, self._datadict['VERTICES'].reshape(shpvert)
except:
print('could not return vertices for {}'.format(self.file.name))
elif self._grid == 'DIS':
try:
nlay, nrow, ncol = self._datadict['NLAY'], \
self._datadict['NROW'], \
self._datadict['NCOL']
iv = 0
verts = []
iverts = []
for k in range(nlay):
for i in range(nrow):
for j in range(ncol):
ivlist = []
v = self.sr.get_vertices(i, j)
for (x, y) in v:
verts.append((x, y))
ivlist.append(iv)
iv += 1
iverts.append(ivlist)
verts = np.array(verts)
return iverts, verts
except:
print('could not return vertices for {}'.format(self.file.name))
| [
"numpy.array",
"collections.OrderedDict",
"numpy.column_stack"
] | [((467, 492), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (490, 492), False, 'import collections\n'), ((518, 543), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (541, 543), False, 'import collections\n'), ((4101, 4124), 'numpy.column_stack', 'np.column_stack', (['(x, y)'], {}), '((x, y))\n', (4116, 4124), True, 'import numpy as np\n'), ((5680, 5695), 'numpy.array', 'np.array', (['verts'], {}), '(verts)\n', (5688, 5695), True, 'import numpy as np\n')] |
"""Test cases for evaluation scripts."""
import json
import os
import unittest
import numpy as np
from PIL import Image
from ..common.utils import DEFAULT_COCO_CONFIG
from .ins_seg import evaluate_ins_seg
class TestBDD100KInsSegEval(unittest.TestCase):
"""Test cases for BDD100K detection evaluation."""
def test_ins_seg(self) -> None:
"""Check detection evaluation correctness."""
cur_dir = os.path.dirname(os.path.abspath(__file__))
gt_base = "{}/testcases/ins_seg/gt".format(cur_dir)
pred_base = "{}/testcases/ins_seg/pred".format(cur_dir)
pred_json = "{}/testcases/ins_seg/pred.json".format(cur_dir)
result = evaluate_ins_seg(
gt_base, pred_base, pred_json, DEFAULT_COCO_CONFIG
)
for key, val in result.items():
print(key, val)
overall_reference = {
"AP": 0.686056105610561,
"AP_50": 0.8968646864686468,
"AP_75": 0.6666666666666666,
"AP_small": 0.686056105610561,
"AR_max_1": 0.6583333333333333,
"AR_max_10": 0.7083333333333334,
"AR_max_100": 0.7083333333333334,
"AR_small": 0.7083333333333334,
}
for key in overall_reference:
self.assertAlmostEqual(result[key], overall_reference[key])
def create_test_file() -> None:
"""Creat mocking files for the InsSeg test case."""
cur_dir = os.path.dirname(os.path.abspath(__file__))
gt_base = "{}/testcases/ins_seg/gt".format(cur_dir)
dt_base = "{}/testcases/ins_seg/pred".format(cur_dir)
dt_json = "{}/testcases/ins_seg/pred.json".format(cur_dir)
if not os.path.isdir(gt_base):
os.makedirs(gt_base)
gt_mask = np.zeros((100, 100, 4), dtype=np.uint8)
gt_mask[:10, :10] = np.array([1, 0, 0, 1], dtype=np.uint8)
gt_mask[20:40, 10:20] = np.array([2, 0, 0, 2], dtype=np.uint8)
gt_mask[20:40, 20:30] = np.array([3, 0, 0, 3], dtype=np.uint8)
gt_mask[40:60, 10:30] = np.array([3, 0, 0, 4], dtype=np.uint8)
gt_mask[40:60, 30:40] = np.array([3, 0, 0, 5], dtype=np.uint8)
gt_mask[60:70, 50:60] = np.array([3, 0, 0, 6], dtype=np.uint8)
Image.fromarray(gt_mask).save(os.path.join(gt_base, "a.png"))
if not os.path.isdir(dt_base):
os.makedirs(dt_base)
dt_mask = np.zeros((100, 100, 4), dtype=np.uint8)
dt_mask[:10, :10] = np.array([1, 0, 0, 1], dtype=np.uint8)
dt_mask[20:40, 10:19] = np.array([2, 0, 0, 2], dtype=np.uint8)
dt_mask[20:40, 20:27] = np.array([3, 0, 0, 4], dtype=np.uint8)
dt_mask[40:60, 10:22] = np.array([3, 0, 0, 6], dtype=np.uint8)
dt_mask[40:60, 30:35] = np.array([3, 0, 0, 7], dtype=np.uint8)
dt_mask[60:70, 50:54] = np.array([3, 0, 0, 8], dtype=np.uint8)
Image.fromarray(dt_mask).save(os.path.join(dt_base, "a.png"))
if not os.path.isfile(dt_json):
scores = [
[1, 0.4],
[2, 0.9],
[3, 0.7],
[4, 0.8],
[6, 0.9],
[7, 0.9],
[8, 0.9],
[9, 0.9],
]
dt_pred = [
{
"name": "a.png",
"labels": [
{
"index": item[0],
"score": item[1],
}
for item in scores
],
}
]
with open(dt_json, "w") as fp:
json.dump(dt_pred, fp)
if __name__ == "__main__":
create_test_file()
unittest.main()
| [
"PIL.Image.fromarray",
"os.makedirs",
"os.path.join",
"os.path.isfile",
"numpy.array",
"numpy.zeros",
"os.path.isdir",
"unittest.main",
"os.path.abspath",
"json.dump"
] | [((3555, 3570), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3568, 3570), False, 'import unittest\n'), ((1444, 1469), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1459, 1469), False, 'import os\n'), ((1660, 1682), 'os.path.isdir', 'os.path.isdir', (['gt_base'], {}), '(gt_base)\n', (1673, 1682), False, 'import os\n'), ((1692, 1712), 'os.makedirs', 'os.makedirs', (['gt_base'], {}), '(gt_base)\n', (1703, 1712), False, 'import os\n'), ((1731, 1770), 'numpy.zeros', 'np.zeros', (['(100, 100, 4)'], {'dtype': 'np.uint8'}), '((100, 100, 4), dtype=np.uint8)\n', (1739, 1770), True, 'import numpy as np\n'), ((1799, 1837), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {'dtype': 'np.uint8'}), '([1, 0, 0, 1], dtype=np.uint8)\n', (1807, 1837), True, 'import numpy as np\n'), ((1870, 1908), 'numpy.array', 'np.array', (['[2, 0, 0, 2]'], {'dtype': 'np.uint8'}), '([2, 0, 0, 2], dtype=np.uint8)\n', (1878, 1908), True, 'import numpy as np\n'), ((1941, 1979), 'numpy.array', 'np.array', (['[3, 0, 0, 3]'], {'dtype': 'np.uint8'}), '([3, 0, 0, 3], dtype=np.uint8)\n', (1949, 1979), True, 'import numpy as np\n'), ((2012, 2050), 'numpy.array', 'np.array', (['[3, 0, 0, 4]'], {'dtype': 'np.uint8'}), '([3, 0, 0, 4], dtype=np.uint8)\n', (2020, 2050), True, 'import numpy as np\n'), ((2083, 2121), 'numpy.array', 'np.array', (['[3, 0, 0, 5]'], {'dtype': 'np.uint8'}), '([3, 0, 0, 5], dtype=np.uint8)\n', (2091, 2121), True, 'import numpy as np\n'), ((2154, 2192), 'numpy.array', 'np.array', (['[3, 0, 0, 6]'], {'dtype': 'np.uint8'}), '([3, 0, 0, 6], dtype=np.uint8)\n', (2162, 2192), True, 'import numpy as np\n'), ((2275, 2297), 'os.path.isdir', 'os.path.isdir', (['dt_base'], {}), '(dt_base)\n', (2288, 2297), False, 'import os\n'), ((2307, 2327), 'os.makedirs', 'os.makedirs', (['dt_base'], {}), '(dt_base)\n', (2318, 2327), False, 'import os\n'), ((2346, 2385), 'numpy.zeros', 'np.zeros', (['(100, 100, 4)'], {'dtype': 'np.uint8'}), '((100, 100, 4), dtype=np.uint8)\n', (2354, 2385), True, 'import numpy as np\n'), ((2414, 2452), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {'dtype': 'np.uint8'}), '([1, 0, 0, 1], dtype=np.uint8)\n', (2422, 2452), True, 'import numpy as np\n'), ((2485, 2523), 'numpy.array', 'np.array', (['[2, 0, 0, 2]'], {'dtype': 'np.uint8'}), '([2, 0, 0, 2], dtype=np.uint8)\n', (2493, 2523), True, 'import numpy as np\n'), ((2556, 2594), 'numpy.array', 'np.array', (['[3, 0, 0, 4]'], {'dtype': 'np.uint8'}), '([3, 0, 0, 4], dtype=np.uint8)\n', (2564, 2594), True, 'import numpy as np\n'), ((2627, 2665), 'numpy.array', 'np.array', (['[3, 0, 0, 6]'], {'dtype': 'np.uint8'}), '([3, 0, 0, 6], dtype=np.uint8)\n', (2635, 2665), True, 'import numpy as np\n'), ((2698, 2736), 'numpy.array', 'np.array', (['[3, 0, 0, 7]'], {'dtype': 'np.uint8'}), '([3, 0, 0, 7], dtype=np.uint8)\n', (2706, 2736), True, 'import numpy as np\n'), ((2769, 2807), 'numpy.array', 'np.array', (['[3, 0, 0, 8]'], {'dtype': 'np.uint8'}), '([3, 0, 0, 8], dtype=np.uint8)\n', (2777, 2807), True, 'import numpy as np\n'), ((2890, 2913), 'os.path.isfile', 'os.path.isfile', (['dt_json'], {}), '(dt_json)\n', (2904, 2913), False, 'import os\n'), ((437, 462), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (452, 462), False, 'import os\n'), ((2231, 2261), 'os.path.join', 'os.path.join', (['gt_base', '"""a.png"""'], {}), "(gt_base, 'a.png')\n", (2243, 2261), False, 'import os\n'), ((2846, 2876), 'os.path.join', 'os.path.join', (['dt_base', '"""a.png"""'], {}), "(dt_base, 'a.png')\n", (2858, 2876), False, 'import os\n'), ((3476, 3498), 'json.dump', 'json.dump', (['dt_pred', 'fp'], {}), '(dt_pred, fp)\n', (3485, 3498), False, 'import json\n'), ((2201, 2225), 'PIL.Image.fromarray', 'Image.fromarray', (['gt_mask'], {}), '(gt_mask)\n', (2216, 2225), False, 'from PIL import Image\n'), ((2816, 2840), 'PIL.Image.fromarray', 'Image.fromarray', (['dt_mask'], {}), '(dt_mask)\n', (2831, 2840), False, 'from PIL import Image\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Functionality for finite element approximations.
This file is part of Fieldosophy, a toolkit for random fields.
Copyright (C) 2021 <NAME> <<EMAIL>>
This Source Code is subject to the terms of the BSD 3-Clause License.
If a copy of the license was not distributed with this file, you can obtain one at https://opensource.org/licenses/BSD-3-Clause.
"""
import numpy as np
import numpy.linalg as linalg
import scipy.sparse as sparse
import scipy.stats as stats
import scipy.special as special
import ctypes
import os
import abc
from ..misc import Cheb
from . import Cholesky
class FEM(abc.ABC):
"""
Class representing a FEM approximation of a stochastic differential equation.
:param mesh: A mesh object to base the FEM approximation on.
:param matMapsCalculate: Defines which mappings from inner products on simplices to system matrices that should be computed.
:param matMapsCalculateEdges: Defines which mappings from inner products on edge simplices to edge system matrices that should be computed.
:param libPath: A path to the dynamically linked library "libSPDEC.so" to access the low-level API of fieldosophy.
"""
# Declare pointer types
c_double_p = ctypes.POINTER(ctypes.c_double)
c_uint_p = ctypes.POINTER(ctypes.c_uint)
mesh = None
QChol = None
Pr = None
matMaps = ['M']
matMapsEdges = None
BCDirichlet = None
BCDirichletIndex = None
mu = None
tau = None
nu = None
sigma = None
kappaMin = None
def __init__(self, mesh = None, matMapsCalculate = ['M'], matMapsCalculateEdges = None, libPath = None):
if mesh is None:
return
# Create a mesh object
self.mesh = mesh
# Acquire maps from triangle function values to FEM matrices
self.matMaps = MatMaps( mesh.triangles, mesh.nodes, calculate=matMapsCalculate, libPath = libPath )
if matMapsCalculateEdges is not None:
boundary = self.mesh.getBoundary()
self.matMapsEdges = MatMaps( boundary["edges"], mesh.nodes, calculate=matMapsCalculateEdges, libPath = libPath )
return
@abc.abstractmethod
def copy(self):
"""
:return A deep copy of self.
"""
# Deep copies object
return
def copyParent(self, out):
"""
:param out: out becomes a deep copy of this object.
"""
out.mesh = self.mesh.copy()
out.matMaps = self.matMaps.copy()
if self.matMapsEdges is not None:
out.matMapsEdges = self.matMapsEdges.copy()
out.BCDirichlet = self.BCDirichlet.copy()
out.BCDirichletIndex = self.BCDirichletIndex.copy()
if self.QChol is not None:
out.QChol = self.QChol.copy()
if self.Pr is not None:
out.Pr = self.Pr.copy()
if self.mu is not None:
if np.isscalar(self.mu):
out.mu = self.mu
else:
out.mu = self.mu.copy()
if self.tau is not None:
if np.isscalar(self.tau):
out.tau = self.tau
else:
out.tau = self.tau.copy()
if self.sigma is not None:
if np.isscalar(self.sigma):
out.sigma = self.sigma
else:
out.sigma = self.sigma.copy()
out.nu = self.nu
out.kappaMin = self.kappaMin
return out
def updateSystem(self, MCoeff, tau, nu, mu = 0, BCoeff = None, GCoeff = None, sourceCoeff = None, BCRobin = None, BCDirichlet = None, factorize = True):
"""
Update system with new parameters.
:param MCoeff: Coefficients of constant term (simplex wise).
:param tau: Value of the tau-parameter. Tau can be either a scalar or a vector the same size as the number of nodes in the mesh.
:param nu: The smoothness parameter. Can be any positive real value.
:param mu: The mean value. Either a constant or a vector the same size as the number of nodes of the mesh. If set to None, the implicit mean is not computed (can save computations when applicable)
:param BCoeff: Coefficients of the first derivative term (simplex wise and with the number of elements as the dimensionality of the hyperplane that the manifold is embedded in).
:param GCoeff: Coefficients of the second derivative term (simplex wise and with the number of elements as the squared dimensionality of the hyperplane that the manifold is embedded in).
:param sourceCoeff: An optional deterministic term to the source of the differential equation (same dimensionality as the number of simplices).
:param BCRobin: Sets the Robin boundary conditions. Set to None if no Robin boundary conditions should be enforced.
Otherwise, an array where the number of rows equal either one or the number of simplices on the edge and with two columns.
The first column corresponds to the coefficient for the constant term and thesecond column corresponding to the coefficient for the first-derivative term.
:param BCDirichlet: Sets the Dirichlet boundary conditions. If None, no Dirichlet boundary conditions are enforced.
Otherwise, BCDirichlet is a vector the same size as the number of nodes in the mesh.
The vale corresponds to the enforced constant value of that specific node unless the value is nan, which means that the given node is not enforced to a Dircihlet condition.
:param factorize: If the Choleksy factorization should be performed now.
"""
# Set variance coefficient
self.tau = tau
# Set smoothness coefficient
self.nu = nu
# Set kappaMin
self.kappaMin = np.min(MCoeff)
d = self.mesh.topD
# Handle Dirichlet indexing
if self.BCDirichlet is None:
if BCDirichlet is None:
BCDirichlet = np.NaN * np.ones( (self.mesh.N) )
if BCDirichlet is not None:
self.BCDirichletIndex = ~np.isnan(BCDirichlet)
BCDirichlet = BCDirichlet[self.BCDirichletIndex]
self.BCDirichlet = BCDirichlet
if BCRobin is None:
BCRobin = np.zeros( (1,2) )
# build K
K = self.assembleK(MCoeff, BCoeff, GCoeff, BCRobin[:,1])
# Build C
C = MatMaps.mapTriVals2Mat( self.matMaps.M, 1, self.mesh.N )
C = np.asarray(C.sum(axis=1)).reshape(-1) # Mass lump
CInvSqrt = 1/np.sqrt(C) # get squared inverse
# Handle smoothness
beta = (self.nu + d/2)/2
m = 2
n = 1
if beta < 1:
m = 1
n = 2
# Loop until succecsfull factorization
goOn = True
while goOn:
goOn = False
# Try to acquire a rational approximation
[Pl, Pr] = FEM.rationalApproximation( K, CInvSqrt, self.kappaMin, self.nu, d, m=m, n=n )
# Construct latent precision matrix
self.QChol = sparse.diags( CInvSqrt[~self.BCDirichletIndex], shape= (self.mesh.N - np.sum(self.BCDirichletIndex)) * np.array([1,1]) )
self.QChol = Pl[ np.ix_(~self.BCDirichletIndex, ~self.BCDirichletIndex) ] * self.QChol
# If try to factorize
if factorize:
try:
self.QChol = Cholesky.SKSparseCholesky( self.QChol.tocsc(), AAt = True )
except Cholesky.SparseCholeskyAbstract.PositiveDefiniteException as e:
if ( (m == 2) and (n == 1) ):
m = 1
n = 2
elif ( (m == 1) and (n == 2) ):
print("Precision matrix is badly conditioned! Applying rational approximation with constant denominator.")
m = 1
n = 0
else:
raise e
goOn = True
# Store Pr
self.Pr = Pr[ np.ix_(~self.BCDirichletIndex, ~self.BCDirichletIndex) ]
if self.mu is None:
if mu is None:
mu = 0
# If should compute implicit mean
if mu is not None:
self.mu = self.computeImplicitMean(mu, sourceCoeff, BCRobin[:,0], Pl, Pr)
return
def assembleK(self, MCoeff, BCoeff, GCoeff, BCRobinFunction):
"""
Assembles the system matrix 'K' of the FEM system.
:param MCoeff: The inner product over each simplex for each basis function sorted in a T x N array (where T are the number of simplices and N the number of nodes, i.e., basis functions). Here, representing the constant term in the differential operator where the inner product is between two basis functions multiplied with some constant that can be specific for the triangle.
:param BCoeff: Similar to MCoeff but here representing the inner product between a basis function and the gradient of another basis function. BCoeff is a list with D number of elements; one element for each dimension in the hyperplane in which the spatial domain is embedded.
:param GCoeff: Similar to BCoeff but here representing the inner product between the gradient of one basis function and the gradient of another basis function. Therefore being a list with DxD number of elements.
:param BCRobinFunction: A Robin boundary condition means that, on some part of the boundary, we know that n * grad X = a X + b. Hence, the boundary condition can be enforced by the inner product of a X with some basis function and b with some basis function. When constructing K, only the first part where X is multiplied with the basis function is used. This is the input of BCRobinFunction. BCRobinFunction has one value for each edge. The Robin boundary condition can be overridden on a part of (or all of) the boundary by setting Dirichlet condition on this part instead; since Dirichlet conditions have precedence over Neumann and Robin in Fieldosophy.
:return The sparse K matrix.
"""
K = MatMaps.mapTriVals2Mat( self.matMaps.M, MCoeff, self.mesh.N )
if BCoeff is not None:
for iter in range(len(self.matMaps.B)):
if BCoeff[iter] is not None:
K = K + MatMaps.mapTriVals2Mat( self.matMaps.B[iter], BCoeff[iter], self.mesh.N )
if GCoeff is not None:
for iter in range(len(self.matMaps.G)):
if GCoeff[iter] is not None:
K = K + MatMaps.mapTriVals2Mat( self.matMaps.G[iter], GCoeff[iter], self.mesh.N )
if self.matMapsEdges is not None:
if self.matMapsEdges.M is not None:
K = K - MatMaps.mapTriVals2Mat( self.matMapsEdges.M, BCRobinFunction, self.mesh.N )
return K
def computeImplicitMean(self, mu, sourceCoeff, BCRobinConstant, Pl, Pr):
"""
Assembles the right hand side of the FEM system and solve to get the mean.
The actual mean of the GRF depends not only on the explicitly given mean, but also on the source term and the source part of a Robin boundary condition.
This function computes this implicit mean.
:param mu: The explicit mean defined by the user.
:param sourceCoeff: The source term given by the user.
:param BCRobinConstant: The source term part of the Robin boundary condition.
:param Pl: The Pl matrix acquired from the rational approximation to fractional derivatives of the K matrix.
:param Pr: The Pr matrix acquired from the rational approximation to fractional derivatives of the K matrix.
:return The implicit mean.
"""
# If mean is just a scalar
if np.isscalar(mu):
mu = mu * np.ones( (self.mesh.N), dtype=np.float64 )
RHS = None
# If source is defined
if sourceCoeff is not None:
if RHS is None:
RHS = np.zeros( (self.mesh.N), dtype=np.float64 )
RHS = RHS + MatMaps.mapTriVals2Mat( self.matMaps.U, sourceCoeff, (self.mesh.N, 1) ).toarray().flatten()
# If Robin constant is defined
if self.matMapsEdges is not None:
if self.matMapsEdges.U is not None:
if RHS is None:
RHS = np.zeros( (self.mesh.N), dtype=np.float64 )
RHS = RHS + MatMaps.mapTriVals2Mat( self.matMapsEdges.U, \
BCRobinConstant * np.mean( self.tau[ self.mesh.boundary["edges"] ], axis=1 ), \
(self.mesh.N, 1) ).toarray().flatten()
# If Dirichlet is defined
if self.BCDirichlet.size > 0:
mu[self.BCDirichletIndex] = self.BCDirichlet
if np.any(self.BCDirichlet != 0):
# Cholesky factorize Pr
PrChol = Cholesky.SKSparseCholesky( Pr.tocsc(), AAt = False )
# Solve for Dirichlet conditions
sol = np.zeros((self.mesh.N))
sol[self.BCDirichletIndex] = ( self.BCDirichlet * self.tau[self.BCDirichletIndex] )
sol = PrChol.solve( sol )
# Multiply with Pl
sol = Pl * sol
if RHS is None:
RHS = np.zeros( (self.mesh.N), dtype=np.float64 )
RHS = RHS - sol
# If Right hand side is not zero
if RHS is not None:
# Cholesky factorization of K
PlChol = Cholesky.SKSparseCholesky( Pl[ np.ix_(~self.BCDirichletIndex, ~self.BCDirichletIndex) ].tocsc(), AAt = False )
# Get solution
sol = PlChol.solve( RHS[~self.BCDirichletIndex] )
# Solve to get implicit mean
mu[~self.BCDirichletIndex] = mu[~self.BCDirichletIndex] + sol / self.tau[~self.BCDirichletIndex]
return mu
def loglik(self, y, A, sigmaEps, QCond = None):
"""
Log-likelihood of model given observations
:param y: Observed data, a KxL array where L are the number of replicates and K are the number of joint observations.
:param A: An observation matrix relating the observations to the mesh.
:param sigmaEps: The independent centered observation noise given as a standard deviation. Can be different for different observation points.
:param QCond: It is possible to provide a conditional precision matrix (if already computed) such that the computation does not have to be performed again. If left as None the precision matrix will instead be computed as part of the operation.
:return The log-likelihood.
"""
# Check that system is defined
if self.QChol is None:
raise Exception( "System is not created" )
if not isinstance( self.QChol, Cholesky.SKSparseCholesky):
raise Exception("Precision matrix was not factorized!")
# Get number of observations in current realization
k = A.shape[0]
# Get number of realizations
if len(y.shape) == 1:
y = y.reshape((-1,1))
M = y.shape[1]
Atemp = sparse.diags( 1/self.tau[~self.BCDirichletIndex] ) * self.Pr
Atemp = A[:, ~self.BCDirichletIndex] * Atemp
QEps = sparse.diags( np.ones(k) / sigmaEps**2 )
# Compute log determinant of Q
logDetQ = self.QChol.getLogDet()
# Compute log determinant of error Q
logDetQEps = np.sum(np.log(QEps.diagonal()))
if QCond is None:
QCond = self.QChol.getMatrix()
# Build Q of conditional distribution
QCond = QCond + (Atemp.transpose() * QEps * Atemp)
# Cholesky factorize
QCond = Cholesky.SKSparseCholesky( QCond, AAt = False )
# Compute log determinant of conditional precision
logDetQCond = QCond.getLogDet()
# Compute log likelihood
l = 0.5 * ( logDetQEps + logDetQ - logDetQCond - k * np.log(2*np.pi) )
# Compute quadratic form
# # Update log likelihood
# l = l - 0.5/M * np.sum(y*y) / sigmaEps**2
# tempmu = self.QChol.permute(self.mu, toChol = True )
# tempmu = self.QChol.getL(upper = True ) * tempmu
# # Update log likelihood
# l = l - 0.5/M * M * np.sum( tempmu * tempmu )
# tempy = y - (A * self.mu).reshape((-1,1))
# tempy = tempy / sigmaEps**2
# tempy = A[:, ~self.BCDirichletIndex].transpose() * tempy
# tempy = QCond.solve( tempy )
# tempy = tempy + self.mu.reshape((-1,1))
# tempy = QCond.permute( tempy, toChol = True )
# tempy = QCond.getL(upper = True) * tempy
# # Update log likelihood
# l = l + 0.5/M * np.sum(tempy * tempy)
# tempy = y - (A * self.mu).reshape((-1,1))
# tempy2 = tempy / sigmaEps**2
# tempy2 = A[:, ~self.BCDirichletIndex].transpose() * tempy2
# tempy2 = QCond.solve( tempy2 )
# tempy2 = A[:, ~self.BCDirichletIndex] * tempy2
# tempy2 = (tempy - tempy2) / sigmaEps**2
# # Update log likelihood
# l = l - 0.5/M * np.sum(tempy*tempy2)
tempy = y - (A * self.mu ).reshape((-1,1))
tempy2 = QEps * tempy
# Update log likelihood
l = l - 0.5/M * np.sum(tempy * tempy2 )
tempy2 = Atemp.transpose() * tempy2
tempy2 = QCond.permute( tempy2, toChol=True )
tempy2 = QCond.solveL( tempy2, transpose = False )
# Update log likelihood
l = l + 0.5/M * np.sum(tempy2 ** 2)
return l
def cond(self, y, A, sigmaEps, QChol = None):
"""
Acquire conditional distribution given model and observations.
:param y: Observed data, a KxL array where L are the number of replicates and K are the number of joint observations.
:param A: An observation matrix relating the observations to the mesh.
:param sigmaEps: The independent centered observation noise given as a standard deviation. Can be different for different observation points.
:param QCond: It is possible to provide a conditional precision matrix (if already computed) such that the computation does not have to be performed again. If left as None the precision matrix will instead be computed as part of the operation.
:return A new FEM object representing the conditioned random field.
"""
# Get number of observations in current realization
k = A.shape[0]
if isinstance(y, np.ndarray):
if len(y.shape) != 1:
if y.shape[1] == 1:
y = y.reshape((-1))
else:
raise Exception("Wrong size on observation vector")
# compensate for marginal variance
tauInv = sparse.diags( 1/self.tau[~self.BCDirichletIndex] )
# Multiply with Pr
Atemp = tauInv * self.Pr
# Multiply with observation matrix
Atemp = A[:, ~self.BCDirichletIndex] * Atemp
QEps = sparse.diags( np.ones((k)) / sigmaEps**2 )
# Copy current model
out = self.copy()
# If QChol should be computed
if QChol is None:
if isinstance( self.QChol, Cholesky.SKSparseCholesky):
out.QChol = self.QChol.getMatrix()
else:
out.QChol = self.QChol * self.QChol.transpose()
# Build Q of conditional distribution
out.QChol = out.QChol + (Atemp.transpose() * QEps * Atemp)
# Cholesky factorize
out.QChol = Cholesky.SKSparseCholesky( out.QChol, AAt = False )
else:
out.QChol = QChol
# Set sigma to none since new marginal standard deviation is unknown
self.sigma = None
# Build conditional mean
if np.any( y ) or np.any( self.mu ):
mu = y - A * self.mu
mu = QEps * mu
mu = Atemp.transpose() * mu
out.mu[~self.BCDirichletIndex] = self.mu[~self.BCDirichletIndex] + \
tauInv * ( self.Pr * out.QChol.solve( mu ) )
out.mu[self.BCDirichletIndex] = self.mu[self.BCDirichletIndex]
return out
def condMean(self, y, A, sigmaEps, QChol = None):
"""
If already have conditional distribution, use this function to get a new (or several) conditional means
:param y: Observed data, a KxL array where L are the number of replicates and K are the number of joint observations.
:param A: An observation matrix relating the observations to the mesh.
:param sigmaEps: The independent centered observation noise given as a standard deviation. Can be different for different observation points.
:param QCond: It is possible to provide a conditional precision matrix (if already computed) such that the computation does not have to be performed again. If left as None the precision matrix will instead be computed as part of the operation.
:return A new FEM object representing the conditioned random field.
"""
if QChol is None:
QChol = self.QChol
# Get number of observations in current realization
k = A.shape[0]
# compensate for marginal variance
tauInv = sparse.diags( 1/self.tau[~self.BCDirichletIndex] )
# Multiply with Pr
Atemp = tauInv * self.Pr
# Multiply with observation matrix
Atemp = A[:, ~self.BCDirichletIndex] * Atemp
QEps = sparse.diags( np.ones((k)) / sigmaEps**2 )
mu = y - A * self.mu.reshape((-1,1))
mu = QEps * mu
mu = Atemp.transpose() * mu
out = self.mu.copy().reshape((-1,1))
out = np.repeat( out, mu.shape[1], axis = 1 )
out[~self.BCDirichletIndex, :] = out[~self.BCDirichletIndex, :] + \
tauInv * ( self.Pr * QChol.solve( mu ) )
return out
def condQChol(self, A, sigmaEps):
"""
Compute conditional Q Cholesky factor.
:param A: An observation matrix relating the observations to the mesh.
:param sigmaEps: The independent centered observation noise given as a standard deviation. Can be different for different observation points.
:return The Cholesky factorized matrix
"""
# Get number of observations in current realization
k = A.shape[0]
# compensate for marginal variance
tauInv = sparse.diags( 1/self.tau[~self.BCDirichletIndex] )
# Multiply with Pr
Atemp = tauInv * self.Pr
# Multiply with observation matrix
Atemp = A[:, ~self.BCDirichletIndex] * Atemp
QEps = sparse.diags( np.ones((k)) / sigmaEps**2 )
# If QChol should be computed
if isinstance( self.QChol, Cholesky.SKSparseCholesky):
QChol = self.QChol.getMatrix()
else:
QChol = self.QChol * self.QChol.transpose()
# Build Q of conditional distribution
QChol = QChol + (Atemp.transpose() * QEps * Atemp)
# Cholesky factorize
QChol = Cholesky.SKSparseCholesky( QChol, AAt = False )
return QChol
def generateRandom( self, n ):
"""
Generate realizations from model.
:param n: Number of realizations.
:return A N x n array, where N are the number of nodes in the mesh and n is the number of realizations.
"""
# Check that system is defined
if self.QChol is None:
raise Exception( "System is not created" )
if not isinstance( self.QChol, Cholesky.SKSparseCholesky):
self.QChol = Cholesky.SKSparseCholesky( self.QChol, AAt = True )
# Generate random
Z = np.zeros( (self.mesh.N, n) )
Z1 = stats.norm.rvs( size = np.sum(~self.BCDirichletIndex) * n ).reshape((-1,n))
Z1 = self.QChol.solveL( Z1, transpose = True )
Z1 = self.QChol.permute( Z1, toChol = False)
# Multiply solution with Pr
Z1 = self.Pr * Z1
# Assemble
Z[~self.BCDirichletIndex, :] = Z1
Z = Z / self.tau.reshape((self.mesh.N,1))
Z = Z + self.mu.reshape((self.mesh.N,1))
return Z
def multiplyWithCovariance( self, matrix ):
"""
Multiply vector or matrix with the covariance function.
:param matrix: An N x n array for which N is the number of nodes in the mesh and n are the number of vectors to multiply.
:return An N x n array.
"""
# Check that system is defined
if self.QChol is None:
raise Exception( "System is not created" )
if not isinstance( self.QChol, Cholesky.SKSparseCholesky):
self.QChol = Cholesky.SKSparseCholesky( self.QChol, AAt = True )
# Check size
if matrix.shape[0] != self.mesh.N:
raise Exception( "Wrong size!" )
# Acquire tau inverse
tauInv = sparse.diags( 1/self.tau[~self.BCDirichletIndex] ).tocsc()
# If sparse matrix
if sparse.issparse(matrix):
matrix = tauInv * matrix.tocsc()[~self.BCDirichletIndex, :]
else:
matrix = tauInv * matrix[~self.BCDirichletIndex, :]
# Multiply solution with Pr transpose
matrix = self.Pr.transpose().tocsc() * matrix
# Preallocate output
out = np.zeros( (self.mesh.N, matrix.shape[1]) )
# Solve for output
solution = self.QChol.solve( matrix )
# If sparse
if sparse.issparse(solution):
# Make into array
solution = solution.toarray()
# Multiply with Pr transpose
solution = self.Pr * solution
# Multiply with tau inverse
out[~self.BCDirichletIndex, :] = tauInv * solution
return out
def rationalApproximation( K, CInvSqrt, eigenNorm, nu, d, N = 20, m = 2, n = 1 ):
"""
Compute rational approximation as suggested in Bolin et al. 2019.
:param K: The system matrix of the FEM system.
:param CInvSqrt: A diagonal matrix representing the square root of the inverse of C (assuming "mass lumping" to acquire a diagonal matrix).
:param eigenNorm: The maximal value of the eigen values of the differential operator.
:param nu: The smoothness of the field, i.e., beta = nu/2 + d/4, where nu is the Hölder constant of realizations almost everywhere.
:param d: The dimensionality of the manifold (not generally the dimensionality of the hyperplane in which it is embedded in).
:param N: The degree of the Chebyshev polynomial used in the rational approximation in the Cleenshaw-Lord approximation.
:param m: The degree of the polynomial corresponding to Pl.
:param n: The degree of the polynomial corresponding to Pr.
:return The Pl and Pr matrices for the rational FEM approximation of the solution.
"""
# Check feasible nu
if nu <= 0:
raise Exception("Smoothness parameter is too small!")
# Compute beta
beta = (nu + d/2)/2
# Limit beta values
beta = np.min((beta, 3.25))
# Compute CInv
CInv = sparse.diags( CInvSqrt ** 2 )
# Compute C
C = sparse.diags( CInvSqrt ** (-2) ).tocsr()
# Compute identity
I = sparse.eye(K.shape[0])
# Get CInv * K and normalize by eigennorm
CiL = CInv * K / eigenNorm
# Preallocate
Pl = I.tocsc()
Pr = I.tocsc()
mBeta = np.floor(beta)
# If beta is an integer
if beta == mBeta:
# If beta is smaller or equal to maximum degree of Pl
if mBeta <= m:
for iter in range(int(mBeta)):
Pl = CiL * Pl
Pl = C * Pl
Pl = Pl * (eigenNorm ** mBeta)
return (Pl, Pr)
# mBeta = m-n
# mBeta = 0
domain = ( 10**( -5/2 ), 1 )
# Acquire rational polynomial approximation
b = np.polynomial.polynomial.Polynomial( np.array([1]) )
c = np.polynomial.polynomial.Polynomial( np.array([1]) )
if beta != mBeta:
f = lambda x : x ** (beta-mBeta)
c, b = Cheb.ClenshawLord( f, N, domain, n, m )
# runx = np.linspace(domain[0], domain[1], num=int(1e3))
# err =np.max( np.abs( c(runx)/b(runx) - f(runx) ) )
# Remove too small trailing zeros
b = b.trim( tol = 1e-6 )
c = c.trim( tol = 1e-6 )
# Acquire the roots of the polynomials
rc = c.roots().real
rb = b.roots().real
# Acquire leftover
leftover = (mBeta - (rb.size - rc.size) )
# Handle Pl
for iter in np.arange(0, rb.size ):
Pl = Pl * ( I - CiL * rb[iter] )
# Add leftover CiL
for iter in range( int( np.max((0,leftover)) ) ):
Pl = CiL * Pl
# Multiply with final C
Pl = C * Pl
# adjust for highest order coefficients
Pl = ( b.coef[-1] * (b.mapparms()[1] ** rb.size) ) * Pl
# Handle Pr
for iter in np.arange(0, rc.size ):
Pr = Pr * ( I - CiL * rc[iter] )
# Add leftover CiL
for iter in range(int( np.max((0,-leftover)) )):
Pr = CiL * Pr
# adjust for highest order coefficients
Pr = ( c.coef[-1] * (c.mapparms()[1] ** rc.size) ) * Pr
# Unnormalize Pl by eigenNorm
Pl = Pl * (eigenNorm ** beta)
return (Pl, Pr)
class MatMaps:
"""
Class representing the mapping from values at simplices to values for and between nodes
"""
# Dimensionality of space
D = None
# Dimensionality of simplices
topD = None
# Number of simplices
NT = None
# Number of nodes
NN = None
# mass matrix < phi_i, phi_j >
M = None
# flow matrix < nabla phi_i, phi_j >
B = None
# stiffness matrix < nabla phi_i, nabla phi_j >
G = None
# right hand side < 1, phi_j >
U = None
# Declare pointer types
c_double_p = ctypes.POINTER(ctypes.c_double)
c_uint_p = ctypes.POINTER(ctypes.c_uint)
_libPath = os.path.join( os.path.dirname( __file__), "../libraries/libSPDEC.so" )
_libInstance = None
def __init__(self, simplices, nodes, calculate = ['M'], libPath = None):
'''
Maps point values for each simplex to stiffness, mass, and ... matrix
Computes inner products between two pairs of basis functions for each simplex, then assembles them.
So far only implemented for piecewise linear basis functions.
'''
if libPath is not None:
self._libPath = libPath
# Instantiate C library
self._libInstance = ctypes.CDLL(self._libPath)
# Declare mapTrivals2Mat function
self._libInstance.FEM_mapTrivals2Mat.restype = ctypes.c_int
self._libInstance.FEM_mapTrivals2Mat.argtypes = [ \
self.c_double_p, self.c_uint_p, self.c_uint_p, self.c_uint_p, \
ctypes.c_uint, ctypes.c_uint, \
self.c_double_p, ctypes.c_uint, ctypes.c_uint, \
self.c_uint_p, ctypes.c_uint, ctypes.c_uint \
]
self.D = nodes.shape[1]
self.NN = nodes.shape[0]
self.topD = simplices.shape[1]-1
self.NT = simplices.shape[0]
if "M" in calculate:
self.computeM( simplices, nodes )
if "B" in calculate:
self.computeB( simplices, nodes )
if "G" in calculate:
self.computeG( simplices, nodes )
if "U" in calculate:
self.computeU( simplices, nodes )
return
def copy(self):
"""
:return Deep copy of self.
"""
out = MatMaps( np.array([[]]), np.array([[]]), calculate = [], libPath = self._libPath )
out.D = self.D
out.topD = self.topD
out.NT = self.NT
out.NN = self.NN
out.M = self.M
out.B = self.B
out.G = self.G
out.U = self.U
return out
def computeM(self, simplices, nodes):
"""
Computes inner products between pairs of basis functions for each simplex in mesh.
:param simplices: The simplices of the mesh.
:param nodes: The nodes of the mesh.
:return The matrix mapping vector of size the number of simplices to each combination of basis function pairs. The matrix will be very sparse and is stored in a special format defined in 'acquireSmallerMatrix'.
"""
# Preallocate
self.M = { \
"data" : np.NaN * np.zeros(( self.NT * ( (self.topD+1) ** 2 ) ), dtype=np.double), \
"row" : np.zeros(( self.NT * ( (self.topD+1) ** 2 ) ), dtype=np.uintc), \
"col" : np.zeros(( self.NT * ( (self.topD+1) ** 2 ) ), dtype=np.uintc) \
}
# Compute
self.M = self.computeGeneric( simplices, nodes, 0, self.M )
# Assemble sparse matrix
self.M = MatMaps.acquireSmallerMatrix( sparse.coo_matrix((self.M["data"], (self.M["row"], self.M["col"])), shape=(self.NN**2, self.NT)) )
return
def computeB(self, simplices, nodes):
"""
Computes inner products between any basis function and the gradient of any basis function for each simplex in mesh.
:param simplices: The simplices of the mesh.
:param nodes: The nodes of the mesh.
:return The matrix mapping vector of size the number of simplices to each combination of basis function pairs. The matrix will be very sparse and is stored in a special format defined in 'acquireSmallerMatrix'. The output is a list with one element for each dimension in the hyperplane for which the manifold is embedded in.
"""
self.B = [None] * self.D
for iter in range(self.D):
# Preallocate
self.B[iter] = { \
"data" : np.NaN * np.zeros(( self.NT * ( (self.topD+1) ** 2 ) ), dtype=np.double), \
"row" : np.zeros(( self.NT * ( (self.topD+1) ** 2 ) ), dtype=np.uintc), \
"col" : np.zeros(( self.NT * ( (self.topD+1) ** 2 ) ), dtype=np.uintc) \
}
# Compute
self.B[iter] = self.computeGeneric( simplices, nodes, 1+iter, self.B[iter] )
# Assemble sparse matrix
self.B[iter] = MatMaps.acquireSmallerMatrix( sparse.coo_matrix((self.B[iter]["data"], (self.B[iter]["row"], self.B[iter]["col"])), shape=(self.NN**2, self.NT)) )
return
def computeG(self, simplices, nodes):
"""
Computes inner products between the gradient of any basis function and the gradient of any other basis function for each simplex in mesh.
:param simplices: The simplices of the mesh.
:param nodes: The nodes of the mesh.
:return The matrix mapping vector of size the number of simplices to each combination of basis function pairs. The matrix will be very sparse and is stored in a special format defined in 'acquireSmallerMatrix'. The output is a list with one element for each combination of dimension pairs in the hyperplane for which the manifold is embedded in.
"""
self.G = [None] * (self.D**2)
for iter in range(self.D**2):
# Preallocate
self.G[iter] = { \
"data" : np.NaN * np.zeros(( self.NT * ( (self.topD+1) ** 2 ) ), dtype=np.double), \
"row" : np.zeros(( self.NT * ( (self.topD+1) ** 2 ) ), dtype=np.uintc), \
"col" : np.zeros(( self.NT * ( (self.topD+1) ** 2 ) ), dtype=np.uintc) \
}
# Compute
self.G[iter] = self.computeGeneric( simplices, nodes, 1+self.D + iter, self.G[iter] )
# Assemble sparse matrix
self.G[iter] = MatMaps.acquireSmallerMatrix( sparse.coo_matrix((self.G[iter]["data"], (self.G[iter]["row"], self.G[iter]["col"])), shape=(self.NN**2, self.NT)) )
return
def computeU(self, simplices, nodes):
"""
Computes inner products between any basis function and 1 for each simplex in mesh.
:param simplices: The simplices of the mesh.
:param nodes: The nodes of the mesh.
:return The matrix mapping vector of size the number of simplices to each number of nodes in the mesh. The matrix will be very sparse and is stored in a special format defined in 'acquireSmallerMatrix'.
"""
# Preallocate
self.U = { \
"data" : np.NaN * np.zeros(( self.NT * ( (self.topD+1) ) ), dtype=np.double), \
"row" : np.zeros(( self.NT * ( (self.topD+1) ) ), dtype=np.uintc), \
"col" : np.zeros(( self.NT * ( (self.topD+1) ) ), dtype=np.uintc) \
}
# Compute
self.U = self.computeGeneric( simplices, nodes, 1 + self.D + self.D**2, self.U )
# Assemble sparse matrix
self.U = MatMaps.acquireSmallerMatrix( sparse.coo_matrix((self.U["data"], (self.U["row"], self.U["col"])), shape=(self.NN, self.NT)) )
return
def computeGeneric(self, simplices, nodes, matId, tempMat):
"""
Function for computing either M, B, G, or U matrices using the low-level library.
:param simplices: The simplices of the mesh.
:param nodes: The nodes of the mesh.
:param matId: Code informing on which matrix to compute. 0 being the M matrix, 1 to D+1 being the different dimensions of B matrix, D+2 to 1 + D + D^2 being the different dimensions of the G matrix, and 2 + D + D^2 being the U matrix.
:param tempMat: A sparse matrix, typically preallocated to an appropriate size.
:return the corresponding sparse N^2 x T matrix, where N is the number of nodes and T the number of simplices.
"""
data_p = tempMat["data"].ctypes.data_as(self.c_double_p)
row_p = tempMat["row"].ctypes.data_as(self.c_uint_p)
col_p = tempMat["col"].ctypes.data_as(self.c_uint_p)
idx = ctypes.c_uint( np.uintc(0) )
nodes_p = nodes.ctypes.data_as( self.c_double_p )
simplices_p = simplices.ctypes.data_as( self.c_uint_p )
# Assemble
status = self._libInstance.FEM_mapTrivals2Mat( \
data_p, row_p, col_p, ctypes.byref( idx ), \
ctypes.c_uint( matId ), ctypes.c_uint( tempMat["data"].size ), \
nodes_p, ctypes.c_uint( self.NN ), ctypes.c_uint( self.D ), \
simplices_p, ctypes.c_uint( self.NT ), ctypes.c_uint( self.topD ) \
)
if status != 0:
raise Exception( "Uknown error occured! Error status: " + str(status) )
# Remove unused
tempMat["data"] = tempMat["data"][0:idx.value]
tempMat["row"] = tempMat["row"][0:idx.value]
tempMat["col"] = tempMat["col"][0:idx.value]
return tempMat
def acquireSmallerMatrix( COOMat ):
"""
Make matrix smaller by removing zero rows
:param COOMat: The large sparse matrix.
:return The compressed large sparse matrix. Stored as a smaller sparse matrix with all uneccessary rows removed, and an indexing of which rows these are.
"""
# Get all unique rows
uniqueRows = np.unique( COOMat.row, return_index=True, return_inverse = True )
# Change row index to the new indexing
COOMat.row = uniqueRows[2]
# Resize matrix to remove uneccesary part
COOMat.resize( (uniqueRows[0].size, COOMat.shape[1]) )
# Store matrix and original row index
return { "CSRMatrix" : COOMat.tocsr(), "originalRow" : uniqueRows[0] }
def mapTriVals2Mat( matrix, vector, N ):
"""
Map values at triangles to a system matrix on the nodes.
:param matrix: A matrix acquired using computeGeneric or any of the wrapper of it, viz. computeM, computeB, computeG, and computeU.
:param vector: A vector of constants for each simplex.
:param N: The shape of the output matrix.
:return A N[0] x N[1] matrix to use for the system of linear equations in the finite element method.
"""
if np.isscalar(N):
N = N * np.array([1,1])
if np.isscalar(vector):
vector = vector * np.ones(matrix["CSRMatrix"].shape[1], dtype="float64" )
# Compute from simplex to basis
out = matrix["CSRMatrix"] * vector
if N[1] == 1:
# Create sparse matrix of output
out = sparse.coo_matrix( \
( out, \
( matrix["originalRow"].astype(np.uintc), \
np.zeros( (matrix["originalRow"].size) ).astype(np.uintc) ) ), \
shape = N )
else:
# Create sparse matrix of output
out = sparse.coo_matrix( \
( out, \
( np.floor( matrix["originalRow"] / N[0]).astype(np.uintc), \
(matrix["originalRow"] % N[0]).astype(np.uintc) ) ), \
shape = N )
return out.tocsr()
class abstractDeformed(FEM):
"""
Generic FEM child class for the deformed Matern models.
The childParams parameter completely decides the model.
"""
# parameers of inherited model
childParams = None
# Dictionary of what matMaps parameters to calculate
matMapsCalculate = None
matMapsCalculateEdges = None
@abc.abstractmethod
def paramsFunction(self):
# Function for computing FEM params from child params
return
def __init__( self, mesh, childParams, nu, sigma, mu = 0, libPath = None, BCDirichlet = None, BCRobin = None, sourceCoeff = None, factorize = True ):
# Acquire necesarry maps from mesh cells to system matrices
if sourceCoeff is not None:
self.matMapsCalculate.append('U')
# Acquire necessary maps from mesh edges to system matrices
if BCRobin is not None:
self.matMapsCalculateEdges = []
if np.any(BCRobin[:, 0] != 0):
self.matMapsCalculateEdges.append('U')
if np.any(BCRobin[:, 1] != 0):
self.matMapsCalculateEdges.append('M')
# Parent init
super(abstractDeformed, self).__init__(mesh, \
matMapsCalculate = self.matMapsCalculate, \
matMapsCalculateEdges = self.matMapsCalculateEdges, \
libPath = libPath\
)
# Update system
self.updateSystem( childParams = childParams, nu = nu, mu = mu, sigma = sigma, sourceCoeff = sourceCoeff, BCRobin = BCRobin, BCDirichlet = BCDirichlet, factorize = factorize )
return
def copy(self):
out = type(self)( None, None, None, None)
out = super(abstractDeformed, self).copyParent(out)
out.childParams = self.childParams
out.matMapsCalculate = self.matMapsCalculate
out.matMapsCalculateEdges = self.matMapsCalculateEdges
return out
def updateSystem( self, childParams, nu, sigma, mu = None, BCDirichlet = None, BCRobin = None, sourceCoeff = None, factorize = True ):
if self.mesh == None:
return
# Setup system
self.childParams = childParams
self.nu = nu
d = self.mesh.topD
alpha = nu + d / 2.0
tau = np.sqrt( special.gamma(nu) / ( special.gamma(alpha) * (4 * np.pi)**(d/2) ) ) / ( sigma )
if np.isscalar(tau):
tau = tau * np.ones((self.mesh.N))
MCoeff, BCoeff, GCoeff = self.paramsFunction( )
# Parent init
super(abstractDeformed, self).updateSystem( \
MCoeff = MCoeff, \
tau = tau, nu = nu, mu = mu, \
BCoeff = BCoeff, \
GCoeff = GCoeff, \
sourceCoeff = sourceCoeff, \
BCRobin = BCRobin, \
BCDirichlet = BCDirichlet, \
factorize = factorize \
)
return
class MaternFEM(abstractDeformed):
"""
Class representing the classical matern model.
"""
matMapsCalculate = ['M', 'G']
matMapsCalculateEdges = None
def paramsFunction( self ):
"""
Defines the standard Matérn SPDE model parameterized by a correlation range ('r').
:return The coefficients for the M, B, and G matrices.
"""
if self.childParams is None:
raise Exception("No r-parameter given")
r = self.childParams
if isinstance( r, dict):
r = r["r"]
d = self.mesh.embD
alpha = self.nu + d / 2
logGSqrt = - d * np.log( r/np.sqrt(8*self.nu) )
GInv = ( np.exp( - 2 / d * logGSqrt) * np.eye(d) ).flatten()
MCoeff = np.exp( 1/alpha * logGSqrt )
BCoeff = None
GCoeff = [None] * GInv.size
for iterGInv in range(GInv.size):
if GInv[iterGInv] != 0:
GCoeff[iterGInv] = MCoeff * GInv[iterGInv]
return (MCoeff, BCoeff, GCoeff)
class anisotropicMaternFEM(abstractDeformed):
"""
Class representing the anisotropic matern model in two dimensions.
"""
matMapsCalculate = ['M', 'G']
matMapsCalculateEdges = None
def paramsFunction( self ):
"""
Generate FEM model for anisotropic Matérn model in two dimensions given an angle of the main direction ('angle') and two correlation ranges ('r').
:return The coefficients for the M, B, and G matrices.
"""
if not self.mesh.embD == 2:
raise Exception("Current class only defined for manifolds embedded in R^2!")
if self.childParams is None:
raise Exception("No parameters given")
if not isinstance( self.childParams, dict):
raise Exception("Parameters were not given in dictionary format")
alpha = self.nu + self.mesh.topD / 2
logGSqrt, GInv = orthVectorsToG( angleToVecs2D(self.childParams["angle"]).transpose(), self.childParams["r"] / np.sqrt(8*self.nu) )
# Set FEM parameters
MCoeff = np.exp( 1/alpha * logGSqrt )
BCoeff = None
GCoeff = [None] * (self.mesh.embD**2)
if GInv is not None:
for iterGInv in range(self.mesh.embD**2):
if GInv[iterGInv] != 0:
GCoeff[iterGInv] = MCoeff * GInv[iterGInv]
return (MCoeff, BCoeff, GCoeff)
class nonStatFEM(abstractDeformed):
"""
Class representing the general deformed Matern model.
"""
matMapsCalculate = ['M', 'G']
matMapsCalculateEdges = None
def paramsFunction( self ):
"""
Function to map child parameters to FEM parameters.
In the member object 'childParams', either a function is provided under the name 'f' or it is assumed that the parameters 'logGSqrt' and 'GInv' are provided.
If a function was provided under the name 'f', this function should take the dictionary self.childParams as its argument.
It should output a tuple corresponding to logGSqrt and GInv.
logGSqrt is the logarithm of the squared determinant of G
GInv is the inverse of G
:return: a tuple corresponding to the coefficients of M, B, and G.
"""
if self.childParams is None:
raise Exception("No parameters given")
if not isinstance( self.childParams, dict):
raise Exception("Parameters were not given in dictionary format")
# Compute kappa and H
logGSqrt = None
GInv = None
if "f" in self.childParams:
logGSqrt, GInv = self.childParams["f"]( self.childParams )
else:
logGSqrt = self.childParams["logGSqrt"]
GInv = self.childParams["GInv"]
alpha = self.nu + self.mesh.topD / 2
# Set FEM parameters
MCoeff = np.exp( 1/alpha * logGSqrt )
BCoeff = None
GCoeff = [None] * (self.mesh.embD**2)
if GInv is not None:
for iterGInv in range(self.mesh.embD**2):
if np.any(GInv[iterGInv] != 0):
GCoeff[iterGInv] = MCoeff * GInv[iterGInv]
return (MCoeff, BCoeff, GCoeff)
def angleToVecs2D( angle ):
"""
:param angle: An angle [radians] for the main direction (from x-axis).
:return A 2D-array which columns are two orthogonal unit vectors, the first pointing in the direction of angle
"""
if isinstance( angle, np.ndarray):
rotMat = np.stack( ( np.cos(angle), -np.sin(angle), np.sin(angle), np.cos(angle) ) )
if angle.size > 0:
rotMat = rotMat.transpose((1,0)).reshape( (angle.size, 2, 2) )
else:
rotMat = rotMat.reshape( (2, 2) )
else:
rotMat = np.array( [ [ np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)] ] )
vectors = np.matmul( rotMat, np.eye(2) )
return vectors
def tangentVectorsOnSphere( points, northPole = np.array([0.0,0.0,1.0]) ):
"""
Acquire a basis for the tangent space at given points on the surface of the unit sphere.
:param points: N x 3 array of N points at which to acquire basis of tangent space.
:param northPole: 3 array of point corresponding to the north pole.
:return A N x 3 x 3 array. Each point has three orthogonal tangent vectors of unit length.
They are constructed such as the first vector is pointing towards the 'northPole'.
The second vector is orthogonal to both the first vector and the vector from the origin to the point of interest.
The third vector is equal to the vector between the origin and the point of interest.
The last dimension represent the elements of the vectors. The next to last dimension indices the vectors
"""
vectors = np.zeros( (points.shape[0], 3,3) )
# Get third vector
vectors[:, 2, :] = points / np.linalg.norm(points, axis= 1).reshape((-1,1))
# Get second vector
vectors[:, 1, :] = np.cross( northPole.reshape( (1,3) ), vectors[:,2,:] )
# Get first vector
vectors[:, 0, :] = np.cross( vectors[:,2,:], vectors[:,1,:] )
# Normalize vectors
lengths = np.linalg.norm( vectors, axis=2 ).reshape((-1, 3))
inds = np.any( lengths == 0.0, axis=1 )
vectors[inds, :, : ] = np.nan
vectors[~inds, :, :] = vectors[~inds, :, :] / lengths[~inds, :].reshape( (-1,3,1) )
return vectors
def orthVectorsToG( vectors, scalers ):
"""
Acquire parameters to the SPDE given the deformation
:param vectors: An N x d x d array where first dimension decides simplex. The second dimension is indexing the vectors.
The third dimension is the elements of the vectors.
The set of vectors assumed to be orthogonal and normalized.
:param scalers: An N x d array of scalers in each of the vector directions.
:return the logarithm of the squared determinant of G, and the inverse of G. Here, G being the metric tensor.
"""
if (vectors.ndim == 2):
vectors = vectors.reshape((1,vectors.shape[0], vectors.shape[1]))
if (scalers.ndim == 1):
scalers = scalers.reshape((1,-1))
n = scalers.shape[0]
d = scalers.shape[1]
# Compute logarithm of G squared
logGSqrt = - np.sum( np.log(scalers), axis = 1 )
temp = scalers.reshape( (n,d,1) ) * vectors[:, :d, :]
GInv = np.ones( (n,vectors.shape[1],vectors.shape[1]) )
for iter in range(vectors.shape[1]):
temp2 = temp * temp[:, :, iter].reshape((n,d,1))
GInv[:, iter, :] = np.sum( temp2, axis=1 ).reshape(n, vectors.shape[1])
GInv = GInv.reshape( (n, vectors.shape[1]**2) ).transpose()
if n == 1:
GInv = GInv.flatten()
logGSqrt = logGSqrt[0]
return (logGSqrt, GInv)
| [
"numpy.sqrt",
"numpy.log",
"numpy.array",
"ctypes.CDLL",
"numpy.linalg.norm",
"numpy.sin",
"numpy.uintc",
"numpy.arange",
"numpy.mean",
"numpy.repeat",
"numpy.cross",
"numpy.isscalar",
"scipy.sparse.eye",
"ctypes.c_uint",
"numpy.ix_",
"numpy.max",
"numpy.exp",
"scipy.sparse.diags",... | [((1272, 1303), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_double'], {}), '(ctypes.c_double)\n', (1286, 1303), False, 'import ctypes\n'), ((1322, 1351), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint'], {}), '(ctypes.c_uint)\n', (1336, 1351), False, 'import ctypes\n'), ((31682, 31713), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_double'], {}), '(ctypes.c_double)\n', (31696, 31713), False, 'import ctypes\n'), ((31732, 31761), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_uint'], {}), '(ctypes.c_uint)\n', (31746, 31761), False, 'import ctypes\n'), ((51638, 51663), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (51646, 51663), True, 'import numpy as np\n'), ((52469, 52502), 'numpy.zeros', 'np.zeros', (['(points.shape[0], 3, 3)'], {}), '((points.shape[0], 3, 3))\n', (52477, 52502), True, 'import numpy as np\n'), ((52763, 52807), 'numpy.cross', 'np.cross', (['vectors[:, 2, :]', 'vectors[:, 1, :]'], {}), '(vectors[:, 2, :], vectors[:, 1, :])\n', (52771, 52807), True, 'import numpy as np\n'), ((52911, 52941), 'numpy.any', 'np.any', (['(lengths == 0.0)'], {'axis': '(1)'}), '(lengths == 0.0, axis=1)\n', (52917, 52941), True, 'import numpy as np\n'), ((54084, 54132), 'numpy.ones', 'np.ones', (['(n, vectors.shape[1], vectors.shape[1])'], {}), '((n, vectors.shape[1], vectors.shape[1]))\n', (54091, 54132), True, 'import numpy as np\n'), ((6019, 6033), 'numpy.min', 'np.min', (['MCoeff'], {}), '(MCoeff)\n', (6025, 6033), True, 'import numpy as np\n'), ((12401, 12416), 'numpy.isscalar', 'np.isscalar', (['mu'], {}), '(mu)\n', (12412, 12416), True, 'import numpy as np\n'), ((19740, 19790), 'scipy.sparse.diags', 'sparse.diags', (['(1 / self.tau[~self.BCDirichletIndex])'], {}), '(1 / self.tau[~self.BCDirichletIndex])\n', (19752, 19790), True, 'import scipy.sparse as sparse\n'), ((22316, 22366), 'scipy.sparse.diags', 'sparse.diags', (['(1 / self.tau[~self.BCDirichletIndex])'], {}), '(1 / self.tau[~self.BCDirichletIndex])\n', (22328, 22366), True, 'import scipy.sparse as sparse\n'), ((22767, 22802), 'numpy.repeat', 'np.repeat', (['out', 'mu.shape[1]'], {'axis': '(1)'}), '(out, mu.shape[1], axis=1)\n', (22776, 22802), True, 'import numpy as np\n'), ((23534, 23584), 'scipy.sparse.diags', 'sparse.diags', (['(1 / self.tau[~self.BCDirichletIndex])'], {}), '(1 / self.tau[~self.BCDirichletIndex])\n', (23546, 23584), True, 'import scipy.sparse as sparse\n'), ((24877, 24903), 'numpy.zeros', 'np.zeros', (['(self.mesh.N, n)'], {}), '((self.mesh.N, n))\n', (24885, 24903), True, 'import numpy as np\n'), ((26272, 26295), 'scipy.sparse.issparse', 'sparse.issparse', (['matrix'], {}), '(matrix)\n', (26287, 26295), True, 'import scipy.sparse as sparse\n'), ((26627, 26667), 'numpy.zeros', 'np.zeros', (['(self.mesh.N, matrix.shape[1])'], {}), '((self.mesh.N, matrix.shape[1]))\n', (26635, 26667), True, 'import numpy as np\n'), ((26790, 26815), 'scipy.sparse.issparse', 'sparse.issparse', (['solution'], {}), '(solution)\n', (26805, 26815), True, 'import scipy.sparse as sparse\n'), ((28487, 28507), 'numpy.min', 'np.min', (['(beta, 3.25)'], {}), '((beta, 3.25))\n', (28493, 28507), True, 'import numpy as np\n'), ((28555, 28582), 'scipy.sparse.diags', 'sparse.diags', (['(CInvSqrt ** 2)'], {}), '(CInvSqrt ** 2)\n', (28567, 28582), True, 'import scipy.sparse as sparse\n'), ((28697, 28719), 'scipy.sparse.eye', 'sparse.eye', (['K.shape[0]'], {}), '(K.shape[0])\n', (28707, 28719), True, 'import scipy.sparse as sparse\n'), ((28915, 28929), 'numpy.floor', 'np.floor', (['beta'], {}), '(beta)\n', (28923, 28929), True, 'import numpy as np\n'), ((30260, 30281), 'numpy.arange', 'np.arange', (['(0)', 'rb.size'], {}), '(0, rb.size)\n', (30269, 30281), True, 'import numpy as np\n'), ((30665, 30686), 'numpy.arange', 'np.arange', (['(0)', 'rc.size'], {}), '(0, rc.size)\n', (30674, 30686), True, 'import numpy as np\n'), ((31798, 31823), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (31813, 31823), False, 'import os\n'), ((32414, 32440), 'ctypes.CDLL', 'ctypes.CDLL', (['self._libPath'], {}), '(self._libPath)\n', (32425, 32440), False, 'import ctypes\n'), ((41410, 41471), 'numpy.unique', 'np.unique', (['COOMat.row'], {'return_index': '(True)', 'return_inverse': '(True)'}), '(COOMat.row, return_index=True, return_inverse=True)\n', (41419, 41471), True, 'import numpy as np\n'), ((42349, 42363), 'numpy.isscalar', 'np.isscalar', (['N'], {}), '(N)\n', (42360, 42363), True, 'import numpy as np\n'), ((42421, 42440), 'numpy.isscalar', 'np.isscalar', (['vector'], {}), '(vector)\n', (42432, 42440), True, 'import numpy as np\n'), ((45817, 45833), 'numpy.isscalar', 'np.isscalar', (['tau'], {}), '(tau)\n', (45828, 45833), True, 'import numpy as np\n'), ((47213, 47241), 'numpy.exp', 'np.exp', (['(1 / alpha * logGSqrt)'], {}), '(1 / alpha * logGSqrt)\n', (47219, 47241), True, 'import numpy as np\n'), ((48615, 48643), 'numpy.exp', 'np.exp', (['(1 / alpha * logGSqrt)'], {}), '(1 / alpha * logGSqrt)\n', (48621, 48643), True, 'import numpy as np\n'), ((50509, 50537), 'numpy.exp', 'np.exp', (['(1 / alpha * logGSqrt)'], {}), '(1 / alpha * logGSqrt)\n', (50515, 50537), True, 'import numpy as np\n'), ((51552, 51561), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (51558, 51561), True, 'import numpy as np\n'), ((3068, 3088), 'numpy.isscalar', 'np.isscalar', (['self.mu'], {}), '(self.mu)\n', (3079, 3088), True, 'import numpy as np\n'), ((3229, 3250), 'numpy.isscalar', 'np.isscalar', (['self.tau'], {}), '(self.tau)\n', (3240, 3250), True, 'import numpy as np\n'), ((3407, 3430), 'numpy.isscalar', 'np.isscalar', (['self.sigma'], {}), '(self.sigma)\n', (3418, 3430), True, 'import numpy as np\n'), ((6555, 6571), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (6563, 6571), True, 'import numpy as np\n'), ((6856, 6866), 'numpy.sqrt', 'np.sqrt', (['C'], {}), '(C)\n', (6863, 6866), True, 'import numpy as np\n'), ((8495, 8549), 'numpy.ix_', 'np.ix_', (['(~self.BCDirichletIndex)', '(~self.BCDirichletIndex)'], {}), '(~self.BCDirichletIndex, ~self.BCDirichletIndex)\n', (8501, 8549), True, 'import numpy as np\n'), ((13414, 13443), 'numpy.any', 'np.any', (['(self.BCDirichlet != 0)'], {}), '(self.BCDirichlet != 0)\n', (13420, 13443), True, 'import numpy as np\n'), ((15863, 15913), 'scipy.sparse.diags', 'sparse.diags', (['(1 / self.tau[~self.BCDirichletIndex])'], {}), '(1 / self.tau[~self.BCDirichletIndex])\n', (15875, 15913), True, 'import scipy.sparse as sparse\n'), ((20804, 20813), 'numpy.any', 'np.any', (['y'], {}), '(y)\n', (20810, 20813), True, 'import numpy as np\n'), ((20820, 20835), 'numpy.any', 'np.any', (['self.mu'], {}), '(self.mu)\n', (20826, 20835), True, 'import numpy as np\n'), ((29496, 29509), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (29504, 29509), True, 'import numpy as np\n'), ((29561, 29574), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (29569, 29574), True, 'import numpy as np\n'), ((33511, 33525), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (33519, 33525), True, 'import numpy as np\n'), ((33527, 33541), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (33535, 33541), True, 'import numpy as np\n'), ((34482, 34538), 'numpy.zeros', 'np.zeros', (['(self.NT * (self.topD + 1) ** 2)'], {'dtype': 'np.uintc'}), '(self.NT * (self.topD + 1) ** 2, dtype=np.uintc)\n', (34490, 34538), True, 'import numpy as np\n'), ((34570, 34626), 'numpy.zeros', 'np.zeros', (['(self.NT * (self.topD + 1) ** 2)'], {'dtype': 'np.uintc'}), '(self.NT * (self.topD + 1) ** 2, dtype=np.uintc)\n', (34578, 34626), True, 'import numpy as np\n'), ((34829, 34932), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (["(self.M['data'], (self.M['row'], self.M['col']))"], {'shape': '(self.NN ** 2, self.NT)'}), "((self.M['data'], (self.M['row'], self.M['col'])), shape=(\n self.NN ** 2, self.NT))\n", (34846, 34932), True, 'import scipy.sparse as sparse\n'), ((38615, 38666), 'numpy.zeros', 'np.zeros', (['(self.NT * (self.topD + 1))'], {'dtype': 'np.uintc'}), '(self.NT * (self.topD + 1), dtype=np.uintc)\n', (38623, 38666), True, 'import numpy as np\n'), ((38698, 38749), 'numpy.zeros', 'np.zeros', (['(self.NT * (self.topD + 1))'], {'dtype': 'np.uintc'}), '(self.NT * (self.topD + 1), dtype=np.uintc)\n', (38706, 38749), True, 'import numpy as np\n'), ((38961, 39059), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (["(self.U['data'], (self.U['row'], self.U['col']))"], {'shape': '(self.NN, self.NT)'}), "((self.U['data'], (self.U['row'], self.U['col'])), shape=(\n self.NN, self.NT))\n", (38978, 39059), True, 'import scipy.sparse as sparse\n'), ((40060, 40071), 'numpy.uintc', 'np.uintc', (['(0)'], {}), '(0)\n', (40068, 40071), True, 'import numpy as np\n'), ((40330, 40347), 'ctypes.byref', 'ctypes.byref', (['idx'], {}), '(idx)\n', (40342, 40347), False, 'import ctypes\n'), ((40367, 40387), 'ctypes.c_uint', 'ctypes.c_uint', (['matId'], {}), '(matId)\n', (40380, 40387), False, 'import ctypes\n'), ((40391, 40426), 'ctypes.c_uint', 'ctypes.c_uint', (["tempMat['data'].size"], {}), "(tempMat['data'].size)\n", (40404, 40426), False, 'import ctypes\n'), ((40455, 40477), 'ctypes.c_uint', 'ctypes.c_uint', (['self.NN'], {}), '(self.NN)\n', (40468, 40477), False, 'import ctypes\n'), ((40481, 40502), 'ctypes.c_uint', 'ctypes.c_uint', (['self.D'], {}), '(self.D)\n', (40494, 40502), False, 'import ctypes\n'), ((40535, 40557), 'ctypes.c_uint', 'ctypes.c_uint', (['self.NT'], {}), '(self.NT)\n', (40548, 40557), False, 'import ctypes\n'), ((40561, 40585), 'ctypes.c_uint', 'ctypes.c_uint', (['self.topD'], {}), '(self.topD)\n', (40574, 40585), False, 'import ctypes\n'), ((44282, 44308), 'numpy.any', 'np.any', (['(BCRobin[:, 0] != 0)'], {}), '(BCRobin[:, 0] != 0)\n', (44288, 44308), True, 'import numpy as np\n'), ((44386, 44412), 'numpy.any', 'np.any', (['(BCRobin[:, 1] != 0)'], {}), '(BCRobin[:, 1] != 0)\n', (44392, 44412), True, 'import numpy as np\n'), ((52849, 52880), 'numpy.linalg.norm', 'np.linalg.norm', (['vectors'], {'axis': '(2)'}), '(vectors, axis=2)\n', (52863, 52880), True, 'import numpy as np\n'), ((53977, 53992), 'numpy.log', 'np.log', (['scalers'], {}), '(scalers)\n', (53983, 53992), True, 'import numpy as np\n'), ((6361, 6382), 'numpy.isnan', 'np.isnan', (['BCDirichlet'], {}), '(BCDirichlet)\n', (6369, 6382), True, 'import numpy as np\n'), ((12440, 12478), 'numpy.ones', 'np.ones', (['self.mesh.N'], {'dtype': 'np.float64'}), '(self.mesh.N, dtype=np.float64)\n', (12447, 12478), True, 'import numpy as np\n'), ((12628, 12667), 'numpy.zeros', 'np.zeros', (['self.mesh.N'], {'dtype': 'np.float64'}), '(self.mesh.N, dtype=np.float64)\n', (12636, 12667), True, 'import numpy as np\n'), ((13634, 13655), 'numpy.zeros', 'np.zeros', (['self.mesh.N'], {}), '(self.mesh.N)\n', (13642, 13655), True, 'import numpy as np\n'), ((16006, 16016), 'numpy.ones', 'np.ones', (['k'], {}), '(k)\n', (16013, 16016), True, 'import numpy as np\n'), ((18127, 18149), 'numpy.sum', 'np.sum', (['(tempy * tempy2)'], {}), '(tempy * tempy2)\n', (18133, 18149), True, 'import numpy as np\n'), ((18397, 18416), 'numpy.sum', 'np.sum', (['(tempy2 ** 2)'], {}), '(tempy2 ** 2)\n', (18403, 18416), True, 'import numpy as np\n'), ((19976, 19986), 'numpy.ones', 'np.ones', (['k'], {}), '(k)\n', (19983, 19986), True, 'import numpy as np\n'), ((22552, 22562), 'numpy.ones', 'np.ones', (['k'], {}), '(k)\n', (22559, 22562), True, 'import numpy as np\n'), ((23770, 23780), 'numpy.ones', 'np.ones', (['k'], {}), '(k)\n', (23777, 23780), True, 'import numpy as np\n'), ((26167, 26217), 'scipy.sparse.diags', 'sparse.diags', (['(1 / self.tau[~self.BCDirichletIndex])'], {}), '(1 / self.tau[~self.BCDirichletIndex])\n', (26179, 26217), True, 'import scipy.sparse as sparse\n'), ((28617, 28645), 'scipy.sparse.diags', 'sparse.diags', (['(CInvSqrt ** -2)'], {}), '(CInvSqrt ** -2)\n', (28629, 28645), True, 'import scipy.sparse as sparse\n'), ((30388, 30409), 'numpy.max', 'np.max', (['(0, leftover)'], {}), '((0, leftover))\n', (30394, 30409), True, 'import numpy as np\n'), ((30792, 30814), 'numpy.max', 'np.max', (['(0, -leftover)'], {}), '((0, -leftover))\n', (30798, 30814), True, 'import numpy as np\n'), ((34393, 34450), 'numpy.zeros', 'np.zeros', (['(self.NT * (self.topD + 1) ** 2)'], {'dtype': 'np.double'}), '(self.NT * (self.topD + 1) ** 2, dtype=np.double)\n', (34401, 34450), True, 'import numpy as np\n'), ((35880, 35936), 'numpy.zeros', 'np.zeros', (['(self.NT * (self.topD + 1) ** 2)'], {'dtype': 'np.uintc'}), '(self.NT * (self.topD + 1) ** 2, dtype=np.uintc)\n', (35888, 35936), True, 'import numpy as np\n'), ((35972, 36028), 'numpy.zeros', 'np.zeros', (['(self.NT * (self.topD + 1) ** 2)'], {'dtype': 'np.uintc'}), '(self.NT * (self.topD + 1) ** 2, dtype=np.uintc)\n', (35980, 36028), True, 'import numpy as np\n'), ((36274, 36395), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (["(self.B[iter]['data'], (self.B[iter]['row'], self.B[iter]['col']))"], {'shape': '(self.NN ** 2, self.NT)'}), "((self.B[iter]['data'], (self.B[iter]['row'], self.B[iter]\n ['col'])), shape=(self.NN ** 2, self.NT))\n", (36291, 36395), True, 'import scipy.sparse as sparse\n'), ((37394, 37450), 'numpy.zeros', 'np.zeros', (['(self.NT * (self.topD + 1) ** 2)'], {'dtype': 'np.uintc'}), '(self.NT * (self.topD + 1) ** 2, dtype=np.uintc)\n', (37402, 37450), True, 'import numpy as np\n'), ((37486, 37542), 'numpy.zeros', 'np.zeros', (['(self.NT * (self.topD + 1) ** 2)'], {'dtype': 'np.uintc'}), '(self.NT * (self.topD + 1) ** 2, dtype=np.uintc)\n', (37494, 37542), True, 'import numpy as np\n'), ((37785, 37906), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (["(self.G[iter]['data'], (self.G[iter]['row'], self.G[iter]['col']))"], {'shape': '(self.NN ** 2, self.NT)'}), "((self.G[iter]['data'], (self.G[iter]['row'], self.G[iter]\n ['col'])), shape=(self.NN ** 2, self.NT))\n", (37802, 37906), True, 'import scipy.sparse as sparse\n'), ((38531, 38583), 'numpy.zeros', 'np.zeros', (['(self.NT * (self.topD + 1))'], {'dtype': 'np.double'}), '(self.NT * (self.topD + 1), dtype=np.double)\n', (38539, 38583), True, 'import numpy as np\n'), ((42385, 42401), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (42393, 42401), True, 'import numpy as np\n'), ((42472, 42526), 'numpy.ones', 'np.ones', (["matrix['CSRMatrix'].shape[1]"], {'dtype': '"""float64"""'}), "(matrix['CSRMatrix'].shape[1], dtype='float64')\n", (42479, 42526), True, 'import numpy as np\n'), ((45859, 45879), 'numpy.ones', 'np.ones', (['self.mesh.N'], {}), '(self.mesh.N)\n', (45866, 45879), True, 'import numpy as np\n'), ((48539, 48559), 'numpy.sqrt', 'np.sqrt', (['(8 * self.nu)'], {}), '(8 * self.nu)\n', (48546, 48559), True, 'import numpy as np\n'), ((50708, 50735), 'numpy.any', 'np.any', (['(GInv[iterGInv] != 0)'], {}), '(GInv[iterGInv] != 0)\n', (50714, 50735), True, 'import numpy as np\n'), ((51175, 51188), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (51181, 51188), True, 'import numpy as np\n'), ((51206, 51219), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (51212, 51219), True, 'import numpy as np\n'), ((51221, 51234), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (51227, 51234), True, 'import numpy as np\n'), ((52565, 52595), 'numpy.linalg.norm', 'np.linalg.norm', (['points'], {'axis': '(1)'}), '(points, axis=1)\n', (52579, 52595), True, 'import numpy as np\n'), ((54258, 54279), 'numpy.sum', 'np.sum', (['temp2'], {'axis': '(1)'}), '(temp2, axis=1)\n', (54264, 54279), True, 'import numpy as np\n'), ((6255, 6275), 'numpy.ones', 'np.ones', (['self.mesh.N'], {}), '(self.mesh.N)\n', (6262, 6275), True, 'import numpy as np\n'), ((7595, 7649), 'numpy.ix_', 'np.ix_', (['(~self.BCDirichletIndex)', '(~self.BCDirichletIndex)'], {}), '(~self.BCDirichletIndex, ~self.BCDirichletIndex)\n', (7601, 7649), True, 'import numpy as np\n'), ((12984, 13023), 'numpy.zeros', 'np.zeros', (['self.mesh.N'], {'dtype': 'np.float64'}), '(self.mesh.N, dtype=np.float64)\n', (12992, 13023), True, 'import numpy as np\n'), ((13924, 13963), 'numpy.zeros', 'np.zeros', (['self.mesh.N'], {'dtype': 'np.float64'}), '(self.mesh.N, dtype=np.float64)\n', (13932, 13963), True, 'import numpy as np\n'), ((16739, 16756), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (16745, 16756), True, 'import numpy as np\n'), ((35787, 35844), 'numpy.zeros', 'np.zeros', (['(self.NT * (self.topD + 1) ** 2)'], {'dtype': 'np.double'}), '(self.NT * (self.topD + 1) ** 2, dtype=np.double)\n', (35795, 35844), True, 'import numpy as np\n'), ((37301, 37358), 'numpy.zeros', 'np.zeros', (['(self.NT * (self.topD + 1) ** 2)'], {'dtype': 'np.double'}), '(self.NT * (self.topD + 1) ** 2, dtype=np.double)\n', (37309, 37358), True, 'import numpy as np\n'), ((45717, 45734), 'scipy.special.gamma', 'special.gamma', (['nu'], {}), '(nu)\n', (45730, 45734), True, 'import scipy.special as special\n'), ((47088, 47108), 'numpy.sqrt', 'np.sqrt', (['(8 * self.nu)'], {}), '(8 * self.nu)\n', (47095, 47108), True, 'import numpy as np\n'), ((47126, 47151), 'numpy.exp', 'np.exp', (['(-2 / d * logGSqrt)'], {}), '(-2 / d * logGSqrt)\n', (47132, 47151), True, 'import numpy as np\n'), ((47156, 47165), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (47162, 47165), True, 'import numpy as np\n'), ((51191, 51204), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (51197, 51204), True, 'import numpy as np\n'), ((51442, 51455), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (51448, 51455), True, 'import numpy as np\n'), ((51475, 51488), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (51481, 51488), True, 'import numpy as np\n'), ((51490, 51503), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (51496, 51503), True, 'import numpy as np\n'), ((7532, 7548), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (7540, 7548), True, 'import numpy as np\n'), ((45739, 45759), 'scipy.special.gamma', 'special.gamma', (['alpha'], {}), '(alpha)\n', (45752, 45759), True, 'import scipy.special as special\n'), ((51458, 51471), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (51464, 51471), True, 'import numpy as np\n'), ((7499, 7528), 'numpy.sum', 'np.sum', (['self.BCDirichletIndex'], {}), '(self.BCDirichletIndex)\n', (7505, 7528), True, 'import numpy as np\n'), ((14164, 14218), 'numpy.ix_', 'np.ix_', (['(~self.BCDirichletIndex)', '(~self.BCDirichletIndex)'], {}), '(~self.BCDirichletIndex, ~self.BCDirichletIndex)\n', (14170, 14218), True, 'import numpy as np\n'), ((24946, 24976), 'numpy.sum', 'np.sum', (['(~self.BCDirichletIndex)'], {}), '(~self.BCDirichletIndex)\n', (24952, 24976), True, 'import numpy as np\n'), ((42836, 42872), 'numpy.zeros', 'np.zeros', (["matrix['originalRow'].size"], {}), "(matrix['originalRow'].size)\n", (42844, 42872), True, 'import numpy as np\n'), ((43070, 43108), 'numpy.floor', 'np.floor', (["(matrix['originalRow'] / N[0])"], {}), "(matrix['originalRow'] / N[0])\n", (43078, 43108), True, 'import numpy as np\n'), ((13144, 13198), 'numpy.mean', 'np.mean', (["self.tau[self.mesh.boundary['edges']]"], {'axis': '(1)'}), "(self.tau[self.mesh.boundary['edges']], axis=1)\n", (13151, 13198), True, 'import numpy as np\n')] |
from .Beamline import StructuredBeamline
import numpy as np
from scipy.constants import c
from re import findall
# lengths: mm
# quad strength: T/m
# bend angles: deg
# Need to handle elements that can have different number of parameters depending on mode
class BeamlineParser(object):
"""
Class that will parse a .lte file and return a StructuredBeamline object.
May be used to then construct an equivalent beamline in other input formats.
"""
def __init__(self, filename, beamline_name):
self.filename = filename
self.beamline_name = beamline_name.lower()
self.beamline = StructuredBeamline(beamline_name) # Holds StructuredBeamline object
self.beamline_string = ''
self.lines = {}
self.rpn_variables = {}
self.global_parameters = {}
self.comment_character = '!'
try:
with open(filename, 'r') as open_file:
self.lattice_definition = open_file.readlines()
except IOError:
print("File could not be read")
def __call__(self):
"""
Runs parser for set beamline and returns StructuredBeamline object
:return:
"""
self._sanitize_lattice_definition()
self.find_beamlines()
self.parse_set_beamline()
return self.beamline
def _sanitize_lattice_definition(self):
"""
Performs a number miscellaneous cleanup functions.
All run here to minimize number of times the lattice definition is cycled.
Performs:
Remove commented and empty lines from lte file
Concatenate lines ending in continuation character to their start
store rpn variables
"""
for linenumber in range(len(self.lattice_definition) - 1, -1, -1):
self.lattice_definition[linenumber] = self.lattice_definition[linenumber].lower()
# Remove lines starting with comment
try:
if self.lattice_definition[linenumber].lstrip()[0] == self.comment_character:
self.lattice_definition.pop(linenumber)
except IndexError:
self.lattice_definition.pop(linenumber)
continue
# Remove anything after a comment midline
if self.lattice_definition[linenumber].find(self.comment_character) > -1:
self.lattice_definition[linenumber] = \
self.lattice_definition[linenumber][:self.lattice_definition[linenumber].find(self.comment_character)]
# Need to include this if elegant
# try:
# if self.lattice_definition[linenumber].rstrip()[-1] == '&':
# self.lattice_definition[linenumber] = self.lattice_definition[linenumber].rstrip()[:-1] + \
# self.lattice_definition[linenumber + 1].strip()
# self.lattice_definition.pop(linenumber + 1)
# except IndexError:
# print()
# print("Line continuation concatenation may have failed.")
# raise
class Trace3d(BeamlineParser):
elements = {
'drift': ['L'],
'quadrupole': ['K1', 'L', 'DX', 'DY', 'OF'],
'rfca': ['VOLT', 'PHASE', 'EGF', 'CHANGE_P0', 'H'],
'edge': ['BETA', 'R', 'G', 'K1', 'K2'],
'sbend': ['ANGLE', 'R', 'INDEX', 'VF']
}
classifiers = {
1: 'drift',
3: 'quadrupole',
8: 'sbend',
9: 'edge',
10: 'rfca'
}
conversions = {
# Multiply by to get BeamLine Units (mostly SI)
# length
'L': 1e-3,
# dipoles
'R': 1e-3,
'G': 1e-3,
'dt': np.pi / 180.,
'ANGLE': np.pi / 180.,
'BETA': np.pi / 180.,
# quadrupoles
'DX': 1e-3,
'DY': 1e-3,
# cavities
'VOLT': 1e6,
}
def __init__(self, filename, beamline_name):
super(Trace3d, self).__init__(filename, beamline_name)
self.comment_character = ';'
self.beamline_start_position = None
self.beamline_end_position = None
# def __call__(self, *args, **kwargs):
# self._sanitize_lattice_definition()
# for line in self.lattice_definition:
# if line.find('cmt') > -1:
# print(self.get_element_name(line), self.get_element_type(line))
def get_element_name(self, line):
name_def = 'cmt('
def_start = line.find(name_def)
def_end = line.find(')')
return line[def_start + len(name_def):def_end].strip()
def get_element_type(self, line):
def_start = line.find('nt')
def_char_start = line[def_start:].find('=')
element_type = findall('\d+', line[def_start + def_char_start:])[0]
return int(element_type)
def get_element_parameters(self, line, type):
val_start = line.find('a(')
val_end = line[val_start:].find('=') + val_start
values = findall(r"[-+]?\d*\.\d+|\d+", line[val_end:])
names = self.elements[self.classifiers[type]]
param_dict = {}
for param_name, param_val in zip(names, values):
param_dict[param_name] = float(param_val)
if len(names) != len(values):
print("WARNING: Element {} {} had a parameter mismatch".format(self.get_element_name(line),
self.classifiers[self.get_element_type(line)]))
return param_dict
def _convert_units(self, element_parameters):
for param, conversion in self.conversions.items():
try:
element_parameters[param] = element_parameters[param] * conversion
except KeyError:
pass
def _standardize(self):
# This is an awkward catchall to fix non-standard parameter definitions
for ele in self.beamline.get_beamline_elements():
# Add arclengths to dipoles
if ele.type == 'sbend':
ele.parameters['l'] = np.abs(ele.parameters['r'] * ele.parameters['angle'])
def find_beamlines(self):
lines = []
for i, line in enumerate(self.lattice_definition):
if line.find('cmt') > -1:
lines.append(i)
self.beamline_start_position = np.min(lines)
self.beamline_end_position = np.max(lines)
def parse_set_beamline(self):
for line in self.lattice_definition[self.beamline_start_position:self.beamline_end_position]:
name = self.get_element_name(line)
type = self.get_element_type(line)
parameters = self.get_element_parameters(line, type)
self._convert_units(parameters)
try:
getattr(self, self.handlers[type])(name, parameters, self.beamline)
except KeyError:
self.beamline.add_element(str(name), self.classifiers[type], parameters)
self._standardize() # TODO: Change this to handler
def handle_bends(self):
bend_edges = []
seq = self.beamline.sequence
for i, ele in enumerate(seq):
if ele.type == 'sbend':
E1 = seq[i - 1].parameters['BETA']
E2 = seq[i + 1].parameters['BETA']
HGAP = seq[i - 1].parameters['G'] / 2.
FINT = seq[i - 1].parameters['K1']
self.beamline.edit_element(i, ['E1', 'E2', 'HGAP', 'FINT'], [E1, E2, HGAP, FINT])
bend_edges.extend([seq[i - 1], seq[i + 1]])
for edge in bend_edges:
seq.remove(edge)
def handle_cavities(self):
cavities = []
phases = []
seq = self.beamline.sequence
for i, ele in enumerate(seq):
if ele.type == 'rfca':
cavities.append(i)
phases.append(90. - ele.parameters['phase'] )
if True:
L = seq[i - 1].parameters['L'] + seq[i + 1].parameters['L']
self.beamline.edit_element(i - 1, ['L',], [0.0,])
self.beamline.edit_element(i + 1, ['L',], [0.0,])
self.beamline.edit_element(i, ['L', ], [L, ])
for cav in cavities:
self.beamline.edit_element(cav, ['phase']*len(phases), phases)
class TraceWin(Trace3d):
elements = {
'drift': ['L', 'R', 'Ry', 'RX_SHIFT', 'RY_SHIFT'],
'quadrupole': ['L', 'K1', 'R', 'THETA', 'G3U3', 'G4U4', 'G5U5', 'G6U6', 'GFR'],
'sbend': ['ANGLE', 'R', 'INDEX', 'VF'],
'dtl_cell': ['L', 'LQ1', 'LQ2', 'GC', 'B1', 'B2', 'VOLT', 'PHASE', 'R', 'P', 'BETA_S', 'T_S', 'KTS', 'K2TS'],
'marker': [],
'rfca': ['MODE', 'N', 'BETA', 'VOLT', 'PHASE', 'R', 'P', 'KE0TI', 'KE0TO', 'DZI', 'DZO'],
'freq': ['FREQUENCY', ],
'field_map': ['TYPE', 'L', 'PHASE', 'R', 'KB', 'KE', 'KI', 'KA', 'FILE']
}
classifiers = {
'drift': 'drift',
'quad': 'quadrupole',
'dtl_cel': 'dtl_cell',
'ncells': 'rfca',
'set_sync_phase': 'marker',
'freq': 'freq',
'field_map': 'field_map' # This is really dependent on field_map settings
}
conversions = {
# Multiply by to get BeamLine Units (mostly SI)
# length
'L': 1e-3,
# quadrupoles
# cavities
'LQ1': 1e-3,
'LQ2': 1e-3,
'frequency': 1e6
}
def handle_dtl_cell(self, name, element, beamline):
q1_L = element['LQ1']
q2_L = element['LQ2']
cavity_L = element['L'] - q1_L - q2_L
q1_K1 = element['B1']
q2_K1 = element['B2']
cavity_PHASE = element['PHASE'] + 90.
beamline.add_element(name + '_q1', 'quadrupole', {'k1': q1_K1, 'l': q1_L})
try:
frequency = self.global_parameters['frequency']
except KeyError:
frequency = 1.0
beamline.add_element(name + 'cav', 'rfca',
{'l': cavity_L, 'volt': element['VOLT'],
'phase': cavity_PHASE, 'frequency': frequency})
beamline.add_element(name + '_q2', 'quadrupole', {'k1': q2_K1, 'l': q2_L})
def handle_ncells(self, name, element, beamline):
assert self.global_parameters['frequency'], "NCELL must have a frequency set to determine length"
wavelength = c / self.global_parameters['frequency']
length = element['BETA'] * wavelength * element['N']
cavity_PHASE = element['PHASE'] + 90.
cavity_VOLT = element['VOLT'] * length
cavity_FREQ = self.global_parameters['frequency']
beamline.add_element(name + 'cav', 'rfca',
{'l': length, 'volt': cavity_VOLT, 'phase': cavity_PHASE,
'frequency': cavity_FREQ})
def handle_freq(self, name, element, beamline):
self.global_parameters['frequency'] = element['frequency']
def handle_field_map(self, name, element, beamline):
# Very crude implementation of this element. We will assume just 1D RF field for now.
# We will assume the field map file has max/min of 1.0 / -1.0.
frequency = self.global_parameters['frequency']
volt = element['VOLT'] * 1e6 # field map files store E-field in MV/m
beamline.add_element(name + 'cav', 'rfca',
{'l': element['L'], 'volt': volt, 'phase': element['PHASE'],
'frequency': frequency})
handlers = {
'dtl_cel': 'handle_dtl_cell',
'freq': 'handle_freq',
'ncells': 'handle_ncells',
'field_map': 'handle_field_map'
}
def __init__(self, filename, beamline_name):
super(TraceWin, self).__init__(filename, beamline_name)
self.comment_character = ';'
self.beamline_start_position = None
self.beamline_end_position = None
self.name_count = 0 # TRACEWIN doesn't name elements
def get_element_parameters(self, line, type):
values = findall(r"[-+]?\d*\.\d+|\d+", line)
names = self.elements[self.classifiers[type]]
param_dict = {}
for param_name, param_val in zip(names, values):
param_dict[param_name] = float(param_val)
if len(names) != len(values):
print("WARNING: Element {} {} had a parameter mismatch".format(self.get_element_name(line),
self.classifiers[self.get_element_type(line)]))
return param_dict
def get_element_type(self, line):
element_name = line.split()[0]
return element_name.lower()
def get_element_name(self, line):
name = 'e' + str(self.name_count)
self.name_count += 1
return name
def _standardize(self):
pass
def find_beamlines(self):
# Manually set beamline file line positions for now
pass
| [
"numpy.max",
"re.findall",
"numpy.abs",
"numpy.min"
] | [((4982, 5030), 're.findall', 'findall', (['"""[-+]?\\\\d*\\\\.\\\\d+|\\\\d+"""', 'line[val_end:]'], {}), "('[-+]?\\\\d*\\\\.\\\\d+|\\\\d+', line[val_end:])\n", (4989, 5030), False, 'from re import findall\n'), ((6325, 6338), 'numpy.min', 'np.min', (['lines'], {}), '(lines)\n', (6331, 6338), True, 'import numpy as np\n'), ((6376, 6389), 'numpy.max', 'np.max', (['lines'], {}), '(lines)\n', (6382, 6389), True, 'import numpy as np\n'), ((12018, 12056), 're.findall', 'findall', (['"""[-+]?\\\\d*\\\\.\\\\d+|\\\\d+"""', 'line'], {}), "('[-+]?\\\\d*\\\\.\\\\d+|\\\\d+', line)\n", (12025, 12056), False, 'from re import findall\n'), ((4733, 4783), 're.findall', 'findall', (['"""\\\\d+"""', 'line[def_start + def_char_start:]'], {}), "('\\\\d+', line[def_start + def_char_start:])\n", (4740, 4783), False, 'from re import findall\n'), ((6052, 6105), 'numpy.abs', 'np.abs', (["(ele.parameters['r'] * ele.parameters['angle'])"], {}), "(ele.parameters['r'] * ele.parameters['angle'])\n", (6058, 6105), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import math
import time
import sys
def input_coordinates(filename, showmap=False):
with open(filename, 'r') as fin:
X = []
Y = []
while True:
line = fin.readline()
if not line:
break
x, y = line.split(', ')
x, y = float(x), float(y)
X.append(x)
Y.append(y)
if showmap:
plt.scatter(X, Y)
return X, Y
def _coordinates_to_distance_table(coordinates):
distance_table = []
for x1, y1 in coordinates:
distance_list = []
for x2, y2 in coordinates:
distance_list.append(math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2))
distance_table.append(distance_list)
return distance_table
def calc_distance(path, X, Y):
distance = 0
i = 0
if isinstance(path, np.ndarray):
n_iter = path.size - 1
else:
n_iter = len(path) - 1
while i < n_iter:
present_idx = path[i]
next_idx = path[i + 1]
distance += math.sqrt((X[present_idx] - X[next_idx]) ** 2
+ (Y[present_idx] - Y[next_idx]) ** 2)
i += 1
distance += math.sqrt((X[0] - X[-1]) ** 2
+ (Y[0] - Y[-1]) ** 2)
return distance
def _prob_exec(prob):
if np.random.rand() <= prob:
return True
else:
return False
def random_path(X, Y):
if len(X) != len(Y):
sys.stderr.write('X and Y are not same length')
n = len(X)
path = np.random.permutation(n)
return path
def _metropolis(path1, X, Y, T):
distance1 = calc_distance(path1, X, Y)
path2 = np.copy(path1)
n = path1.size
swap_cities_idx = np.random.randint(0, n, size=2)
path2[swap_cities_idx[0]], path2[swap_cities_idx[1]] = \
path2[swap_cities_idx[1]], path2[swap_cities_idx[0]]
distance2 = calc_distance(path2, X, Y)
if distance2 < distance1:
return path2, distance2
delta = distance2 - distance1
prob = math.exp(- delta / T)
if _prob_exec(prob):
return path2, distance2
else:
return path1, distance1
def greedy_tsp(X, Y):
coordinates = list(zip(X, Y))
distance_table = _coordinates_to_distance_table(coordinates)
distance_table = np.array(distance_table)
num_of_cities = len(distance_table)
city = np.random.randint(0, num_of_cities)
path = np.array([], dtype='int8')
bin_path = np.ones([num_of_cities], dtype=bool)
falses = np.zeros([num_of_cities], dtype=bool)
for i in range(num_of_cities):
path = np.append(path, city)
bin_path[path[i]] = False
distance_list = distance_table[city]
if (bin_path == False).all():
pass
else:
nearest_distance = np.min(distance_list[np.argwhere(bin_path)])
nearest_city = int(np.argwhere(distance_list == nearest_distance))
city = nearest_city
return path
def anneal(path, X, Y, n_iter=100000, pmelt=0.7, tgt=0.01, stagfactor=0.05,
procplt=False, color='dimgray', lw=2):
n_cities = len(path)
initial_distance = calc_distance(path, X, Y)
min_distance, max_distance = initial_distance, initial_distance
optimized_distances = []
distances = []
optimized_distances.append(initial_distance)
distances.append(initial_distance)
for i in range(max([0.01 * n_cities, 2])):
path_, distance = _metropolis(path, X, Y, 10 ** 10)
if distance < min_distance:
min_distance = distance
if max_distance < distance:
max_distance = distance
range_ = (max_distance - min_distance) * pmelt
temp = tgt ** (1 / n_iter)
optimized_distance = initial_distance
optimized_step = 1
optimized_path = path
path_ = np.copy(path)
for i in range(1, n_iter):
sys.stdout.write('\r{} / {} processing...'.format(
i + 1, n_iter))
sys.stdout.flush()
T = range_ * (temp ** i)
path_, distance = _metropolis(path_, X, Y, T)
if distance < optimized_distance:
optimized_distance = distance
optimized_path = path_
optimized_step = i
optimized_distances.append(optimized_distance)
distances.append(distance)
# Reheat
if i - optimized_step == stagfactor * n_iter:
temp = temp ** (0.05 * i / n_iter)
if procplt:
plt.plot(distances, color='dimgray', lw=1)
plt.plot(optimized_distances, color='black', lw=2)
return optimized_path
def showmap(path, X, Y):
path_sorted_coords = [[X[city_num], Y[city_num]] for city_num in path]
path_sorted_coords.append(path_sorted_coords[0])
path_sorted_coords = np.asarray(path_sorted_coords).T
plt.xticks([])
plt.yticks([])
plt.plot(path_sorted_coords[0], path_sorted_coords[1], marker='o',
markersize=5, color='dimgray', lw=1)
if __name__ == '__main__':
# this random seed provides better result
np.random.seed(1000384)
X, Y = input_coordinates("prefs.out")
#init_path = greedy_tsp(X, Y)
init_path = random_path(X, Y)
plt.title('Annealing result')
plt.xlabel('steps')
plt.ylabel('Tour length (m)')
path = anneal(init_path, X, Y, n_iter=100000, procplt=True)
plt.show()
plt.subplot(121)
plt.title('Initial(random) Route')
showmap(init_path, X, Y)
plt.subplot(122)
plt.title('Optimized Route')
showmap(path, X, Y)
plt.show()
distance = calc_distance(path, X, Y)
print("distance: {}".format(distance))
with open('result', 'w') as fout:
fout.write('path\n')
for city in path:
fout.write(str(city) + '\n')
fout.write('')
| [
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"math.sqrt",
"numpy.array",
"math.exp",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.yticks",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"sys.stdout.flush",
"numpy.random.permutation",
"num... | [((1218, 1270), 'math.sqrt', 'math.sqrt', (['((X[0] - X[-1]) ** 2 + (Y[0] - Y[-1]) ** 2)'], {}), '((X[0] - X[-1]) ** 2 + (Y[0] - Y[-1]) ** 2)\n', (1227, 1270), False, 'import math\n'), ((1557, 1581), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (1578, 1581), True, 'import numpy as np\n'), ((1687, 1701), 'numpy.copy', 'np.copy', (['path1'], {}), '(path1)\n', (1694, 1701), True, 'import numpy as np\n'), ((1744, 1775), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n'], {'size': '(2)'}), '(0, n, size=2)\n', (1761, 1775), True, 'import numpy as np\n'), ((2055, 2075), 'math.exp', 'math.exp', (['(-delta / T)'], {}), '(-delta / T)\n', (2063, 2075), False, 'import math\n'), ((2319, 2343), 'numpy.array', 'np.array', (['distance_table'], {}), '(distance_table)\n', (2327, 2343), True, 'import numpy as np\n'), ((2395, 2430), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_of_cities'], {}), '(0, num_of_cities)\n', (2412, 2430), True, 'import numpy as np\n'), ((2442, 2468), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int8"""'}), "([], dtype='int8')\n", (2450, 2468), True, 'import numpy as np\n'), ((2484, 2520), 'numpy.ones', 'np.ones', (['[num_of_cities]'], {'dtype': 'bool'}), '([num_of_cities], dtype=bool)\n', (2491, 2520), True, 'import numpy as np\n'), ((2534, 2571), 'numpy.zeros', 'np.zeros', (['[num_of_cities]'], {'dtype': 'bool'}), '([num_of_cities], dtype=bool)\n', (2542, 2571), True, 'import numpy as np\n'), ((3836, 3849), 'numpy.copy', 'np.copy', (['path'], {}), '(path)\n', (3843, 3849), True, 'import numpy as np\n'), ((4830, 4844), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4840, 4844), True, 'import matplotlib.pyplot as plt\n'), ((4849, 4863), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4859, 4863), True, 'import matplotlib.pyplot as plt\n'), ((4868, 4975), 'matplotlib.pyplot.plot', 'plt.plot', (['path_sorted_coords[0]', 'path_sorted_coords[1]'], {'marker': '"""o"""', 'markersize': '(5)', 'color': '"""dimgray"""', 'lw': '(1)'}), "(path_sorted_coords[0], path_sorted_coords[1], marker='o',\n markersize=5, color='dimgray', lw=1)\n", (4876, 4975), True, 'import matplotlib.pyplot as plt\n'), ((5062, 5085), 'numpy.random.seed', 'np.random.seed', (['(1000384)'], {}), '(1000384)\n', (5076, 5085), True, 'import numpy as np\n'), ((5200, 5229), 'matplotlib.pyplot.title', 'plt.title', (['"""Annealing result"""'], {}), "('Annealing result')\n", (5209, 5229), True, 'import matplotlib.pyplot as plt\n'), ((5234, 5253), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {}), "('steps')\n", (5244, 5253), True, 'import matplotlib.pyplot as plt\n'), ((5258, 5287), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Tour length (m)"""'], {}), "('Tour length (m)')\n", (5268, 5287), True, 'import matplotlib.pyplot as plt\n'), ((5361, 5371), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5369, 5371), True, 'import matplotlib.pyplot as plt\n'), ((5376, 5392), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (5387, 5392), True, 'import matplotlib.pyplot as plt\n'), ((5397, 5431), 'matplotlib.pyplot.title', 'plt.title', (['"""Initial(random) Route"""'], {}), "('Initial(random) Route')\n", (5406, 5431), True, 'import matplotlib.pyplot as plt\n'), ((5470, 5486), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (5481, 5486), True, 'import matplotlib.pyplot as plt\n'), ((5491, 5519), 'matplotlib.pyplot.title', 'plt.title', (['"""Optimized Route"""'], {}), "('Optimized Route')\n", (5500, 5519), True, 'import matplotlib.pyplot as plt\n'), ((5548, 5558), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5556, 5558), True, 'import matplotlib.pyplot as plt\n'), ((450, 467), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'Y'], {}), '(X, Y)\n', (461, 467), True, 'import matplotlib.pyplot as plt\n'), ((1072, 1161), 'math.sqrt', 'math.sqrt', (['((X[present_idx] - X[next_idx]) ** 2 + (Y[present_idx] - Y[next_idx]) ** 2)'], {}), '((X[present_idx] - X[next_idx]) ** 2 + (Y[present_idx] - Y[\n next_idx]) ** 2)\n', (1081, 1161), False, 'import math\n'), ((1349, 1365), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1363, 1365), True, 'import numpy as np\n'), ((1483, 1530), 'sys.stderr.write', 'sys.stderr.write', (['"""X and Y are not same length"""'], {}), "('X and Y are not same length')\n", (1499, 1530), False, 'import sys\n'), ((2623, 2644), 'numpy.append', 'np.append', (['path', 'city'], {}), '(path, city)\n', (2632, 2644), True, 'import numpy as np\n'), ((3993, 4011), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4009, 4011), False, 'import sys\n'), ((4485, 4527), 'matplotlib.pyplot.plot', 'plt.plot', (['distances'], {'color': '"""dimgray"""', 'lw': '(1)'}), "(distances, color='dimgray', lw=1)\n", (4493, 4527), True, 'import matplotlib.pyplot as plt\n'), ((4536, 4586), 'matplotlib.pyplot.plot', 'plt.plot', (['optimized_distances'], {'color': '"""black"""', 'lw': '(2)'}), "(optimized_distances, color='black', lw=2)\n", (4544, 4586), True, 'import matplotlib.pyplot as plt\n'), ((4793, 4823), 'numpy.asarray', 'np.asarray', (['path_sorted_coords'], {}), '(path_sorted_coords)\n', (4803, 4823), True, 'import numpy as np\n'), ((685, 727), 'math.sqrt', 'math.sqrt', (['((x1 - x2) ** 2 + (y1 - y2) ** 2)'], {}), '((x1 - x2) ** 2 + (y1 - y2) ** 2)\n', (694, 727), False, 'import math\n'), ((2900, 2946), 'numpy.argwhere', 'np.argwhere', (['(distance_list == nearest_distance)'], {}), '(distance_list == nearest_distance)\n', (2911, 2946), True, 'import numpy as np\n'), ((2845, 2866), 'numpy.argwhere', 'np.argwhere', (['bin_path'], {}), '(bin_path)\n', (2856, 2866), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.model_selection import train_test_split
import seaborn as sns
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import (
AdaBoostRegressor, GradientBoostingRegressor, RandomForestRegressor)
from sklearn.linear_model import Lasso, OrthogonalMatchingPursuit
from rootcp import models
from sklearn.datasets import make_regression
from sklearn import datasets
import intervals
def oracleCP(X, y, model, alpha=0.1):
model.fit(X, y)
residual = model.conformity(y, model.predict(X))
q_alpha = np.quantile(residual, 1 - alpha)
mu = model.predict(X[-1, :])
lb = mu - q_alpha
ub = mu + q_alpha
return [lb, ub]
def splitCP(X, y, model, alpha=0.1):
X_train, X_test, Y_train, Y_test = train_test_split(X[:-1], y,
test_size=0.5)
model.fit(X_train, Y_train)
# Ranking on the calibration set
sorted_residual = np.sort(model.conformity(Y_test, model.predict(X_test)))
index = int((X.shape[0] / 2 + 1) * (1 - alpha))
# Double check index - 1 (because numpy tab start at 0)
quantile = sorted_residual[index]
mu_ = model.predict(X[-1, :])
return [mu_ - quantile, mu_ + quantile]
def splitCPP(X, y, model, alpha=0.1):
scp = splitCP(X, y, model, alpha)
mu = 0.5 * (scp[0] + scp[1])
# y_mu = np.array(list(y) + [mu[0]])
y_mu = np.array(list(y) + [0])
model.fit(X, y_mu)
y_pred = model.predict(X[-1])
print("y_pred =", y_pred, "y_scp =", mu)
if scp[0] <= y_pred and y_pred <= scp[1]:
bound = max(scp[1] - y_pred, y_pred - scp[0])
print("ok", scp[1] - scp[0], bound)
return [y_pred - bound, y_pred + bound]
return scp
def ridgeCP(X, y, lmd, alpha=0.1):
n_samples, n_features = X.shape
H = X.T.dot(X) + lmd * np.eye(n_features)
C = np.eye(n_samples) - X.dot(np.linalg.solve(H, X.T))
A = C.dot(list(y) + [0])
B = C[:, -1]
negative_B = np.where(B < 0)[0]
A[negative_B] *= -1
B[negative_B] *= -1
S, U, V = [], [], []
for i in range(n_samples):
if B[i] != B[-1]:
tmp_u_i = (A[i] - A[-1]) / (B[-1] - B[i])
tmp_v_i = -(A[i] + A[-1]) / (B[-1] + B[i])
u_i, v_i = np.sort([tmp_u_i, tmp_v_i])
U += [u_i]
V += [v_i]
elif B[i] != 0:
tmp_uv = -0.5 * (A[i] + A[-1]) / B[i]
U += [tmp_uv]
V += [tmp_uv]
if B[-1] > B[i]:
S += [intervals.closed(U[i], V[i])]
elif B[-1] < B[i]:
intvl_u = intervals.openclosed(-np.inf, U[i])
intvl_v = intervals.closedopen(V[i], np.inf)
S += [intvl_u.union(intvl_v)]
elif B[-1] == B[i] and B[i] > 0 and A[-1] < A[i]:
S += [intervals.closedopen(U[i], np.inf)]
elif B[-1] == B[i] and B[i] > 0 and A[-1] > A[i]:
S += [intervals.openclosed(-np.inf, U[i])]
elif B[-1] == B[i] and B[i] == 0 and abs(A[-1]) <= abs(A[i]):
S += [intervals.open(-np.inf, np.inf)]
elif B[-1] == B[i] and B[i] == 0 and abs(A[-1]) > abs(A[i]):
S += [intervals.empty()]
elif B[-1] == B[i] and A[-1] == A[i]:
S += [intervals.open(-np.inf, np.inf)]
else:
print("boom !!!")
hat_y = np.sort([-np.inf] + U + V + [np.inf])
size = hat_y.shape[0]
conf_pred = intervals.empty()
p_values = np.zeros(size)
for i in range(size - 1):
n_pvalue_i = 0.
intvl_i = intervals.closed(hat_y[i], hat_y[i + 1])
for j in range(n_samples):
n_pvalue_i += intvl_i in S[j]
p_values[i] = n_pvalue_i / n_samples
if p_values[i] > alpha:
conf_pred = conf_pred.union(intvl_i)
return conf_pred, hat_y, p_values
def set_style():
# This sets reasonable defaults for font size for
# a figure that will go in a paper
sns.set_context("paper")
# Set the font to be serif, rather than sans
sns.set(font='serif', font_scale=1.5)
sns.set_palette('muted')
# Make the background white, and specify the
# specific font family
sns.set_style("whitegrid", {
"font.family": "serif",
"font.serif": ["Times", "Palatino", "serif"]
})
def load_data(dataset="boston"):
if dataset == "boston":
boston = datasets.load_boston()
X_ = boston.data
Y_ = boston.target
if dataset == "diabetes":
diabetes = datasets.load_diabetes()
X_ = diabetes.data
Y_ = diabetes.target
if dataset == "climate":
X_ = np.load("Xclimate.npy")
Y_ = np.load("yclimate.npy")
n_features = X_.shape[1]
groups = np.arange(n_features) // 7
groups = groups.astype(int)
n_groups = int(n_features / 7)
size_groups = 7 * np.ones(n_groups)
size_groups = size_groups.astype(int)
omega = np.ones(n_groups) # since all groups have the same size
g_start = np.cumsum(size_groups, dtype=np.intc) - size_groups[0]
if dataset == "housingcalifornia":
housing = datasets.fetch_california_housing()
X_, Y_ = housing.data, housing.target
if dataset == "friedman1":
X_, Y_ = datasets.make_friedman1(
n_samples=500, n_features=100, noise=1)
if dataset == "synthetic":
dense = 0.7
n_samples, n_features = (100, 1000)
X_, Y_ = make_regression(n_samples=n_samples, n_features=n_features,
# random_state=random_state,
n_informative=int(n_features * dense),
noise=1)
# without n_informativelization scipy.minimize fails to converge
X_ /= np.linalg.norm(X_, axis=0)
mask = np.sum(np.isnan(X_), axis=0) == 0
if np.any(mask):
X_ = X_[:, mask]
Y_ = (Y_ - Y_.mean()) / Y_.std()
return X_, Y_
def load_model(method="ridge", X=None, y=None):
# if random_state is None, the estimator is random itself (an additional
# randomness potentially independent of the data) and the very definition
# of conformal set is unclear
if method == "GradientBoosting":
model = GradientBoostingRegressor(warm_start=True, random_state=0)
if method == "MLP":
model = MLPRegressor(warm_start=False, random_state=0, max_iter=2000)
if method == "AdaBoost":
model = AdaBoostRegressor(random_state=0)
if method == "RandomForest":
# For randomForest I dont know yet if it is safe to use warm_start
model = RandomForestRegressor(warm_start=False, random_state=0)
if method == "OMP":
# Do not have a warm_start
tol_omp = 1e-3 * np.linalg.norm(y) ** 2
model = OrthogonalMatchingPursuit(tol=tol_omp, fit_intercept=False)
if method == "Lasso":
lmd = np.linalg.norm(X.T.dot(y), ord=np.inf) / 25
model = Lasso(alpha=lmd / X.shape[0], warm_start=False, max_iter=5000,
fit_intercept=False)
if method == "ridge":
lmd = 0.1
model = models.ridge(lmd=lmd)
return models.regressor(model=model)
| [
"sklearn.neural_network.MLPRegressor",
"intervals.openclosed",
"intervals.closedopen",
"sklearn.linear_model.Lasso",
"rootcp.models.ridge",
"sklearn.ensemble.AdaBoostRegressor",
"seaborn.set_style",
"intervals.closed",
"numpy.linalg.norm",
"numpy.arange",
"seaborn.set",
"sklearn.ensemble.Rando... | [((557, 589), 'numpy.quantile', 'np.quantile', (['residual', '(1 - alpha)'], {}), '(residual, 1 - alpha)\n', (568, 589), True, 'import numpy as np\n'), ((767, 809), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X[:-1]', 'y'], {'test_size': '(0.5)'}), '(X[:-1], y, test_size=0.5)\n', (783, 809), False, 'from sklearn.model_selection import train_test_split\n'), ((3345, 3382), 'numpy.sort', 'np.sort', (['([-np.inf] + U + V + [np.inf])'], {}), '([-np.inf] + U + V + [np.inf])\n', (3352, 3382), True, 'import numpy as np\n'), ((3425, 3442), 'intervals.empty', 'intervals.empty', ([], {}), '()\n', (3440, 3442), False, 'import intervals\n'), ((3458, 3472), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (3466, 3472), True, 'import numpy as np\n'), ((3949, 3973), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (3964, 3973), True, 'import seaborn as sns\n'), ((4027, 4064), 'seaborn.set', 'sns.set', ([], {'font': '"""serif"""', 'font_scale': '(1.5)'}), "(font='serif', font_scale=1.5)\n", (4034, 4064), True, 'import seaborn as sns\n'), ((4069, 4093), 'seaborn.set_palette', 'sns.set_palette', (['"""muted"""'], {}), "('muted')\n", (4084, 4093), True, 'import seaborn as sns\n'), ((4174, 4276), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""', "{'font.family': 'serif', 'font.serif': ['Times', 'Palatino', 'serif']}"], {}), "('whitegrid', {'font.family': 'serif', 'font.serif': ['Times',\n 'Palatino', 'serif']})\n", (4187, 4276), True, 'import seaborn as sns\n'), ((5770, 5796), 'numpy.linalg.norm', 'np.linalg.norm', (['X_'], {'axis': '(0)'}), '(X_, axis=0)\n', (5784, 5796), True, 'import numpy as np\n'), ((5849, 5861), 'numpy.any', 'np.any', (['mask'], {}), '(mask)\n', (5855, 5861), True, 'import numpy as np\n'), ((7147, 7176), 'rootcp.models.regressor', 'models.regressor', ([], {'model': 'model'}), '(model=model)\n', (7163, 7176), False, 'from rootcp import models\n'), ((1875, 1892), 'numpy.eye', 'np.eye', (['n_samples'], {}), '(n_samples)\n', (1881, 1892), True, 'import numpy as np\n'), ((1990, 2005), 'numpy.where', 'np.where', (['(B < 0)'], {}), '(B < 0)\n', (1998, 2005), True, 'import numpy as np\n'), ((3547, 3587), 'intervals.closed', 'intervals.closed', (['hat_y[i]', 'hat_y[i + 1]'], {}), '(hat_y[i], hat_y[i + 1])\n', (3563, 3587), False, 'import intervals\n'), ((4376, 4398), 'sklearn.datasets.load_boston', 'datasets.load_boston', ([], {}), '()\n', (4396, 4398), False, 'from sklearn import datasets\n'), ((4501, 4525), 'sklearn.datasets.load_diabetes', 'datasets.load_diabetes', ([], {}), '()\n', (4523, 4525), False, 'from sklearn import datasets\n'), ((4625, 4648), 'numpy.load', 'np.load', (['"""Xclimate.npy"""'], {}), "('Xclimate.npy')\n", (4632, 4648), True, 'import numpy as np\n'), ((4662, 4685), 'numpy.load', 'np.load', (['"""yclimate.npy"""'], {}), "('yclimate.npy')\n", (4669, 4685), True, 'import numpy as np\n'), ((4945, 4962), 'numpy.ones', 'np.ones', (['n_groups'], {}), '(n_groups)\n', (4952, 4962), True, 'import numpy as np\n'), ((5133, 5168), 'sklearn.datasets.fetch_california_housing', 'datasets.fetch_california_housing', ([], {}), '()\n', (5166, 5168), False, 'from sklearn import datasets\n'), ((5264, 5327), 'sklearn.datasets.make_friedman1', 'datasets.make_friedman1', ([], {'n_samples': '(500)', 'n_features': '(100)', 'noise': '(1)'}), '(n_samples=500, n_features=100, noise=1)\n', (5287, 5327), False, 'from sklearn import datasets\n'), ((6238, 6296), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'warm_start': '(True)', 'random_state': '(0)'}), '(warm_start=True, random_state=0)\n', (6263, 6296), False, 'from sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor, RandomForestRegressor\n'), ((6338, 6399), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'warm_start': '(False)', 'random_state': '(0)', 'max_iter': '(2000)'}), '(warm_start=False, random_state=0, max_iter=2000)\n', (6350, 6399), False, 'from sklearn.neural_network import MLPRegressor\n'), ((6446, 6479), 'sklearn.ensemble.AdaBoostRegressor', 'AdaBoostRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (6463, 6479), False, 'from sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor, RandomForestRegressor\n'), ((6605, 6660), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'warm_start': '(False)', 'random_state': '(0)'}), '(warm_start=False, random_state=0)\n', (6626, 6660), False, 'from sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor, RandomForestRegressor\n'), ((6785, 6844), 'sklearn.linear_model.OrthogonalMatchingPursuit', 'OrthogonalMatchingPursuit', ([], {'tol': 'tol_omp', 'fit_intercept': '(False)'}), '(tol=tol_omp, fit_intercept=False)\n', (6810, 6844), False, 'from sklearn.linear_model import Lasso, OrthogonalMatchingPursuit\n'), ((6946, 7033), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': '(lmd / X.shape[0])', 'warm_start': '(False)', 'max_iter': '(5000)', 'fit_intercept': '(False)'}), '(alpha=lmd / X.shape[0], warm_start=False, max_iter=5000,\n fit_intercept=False)\n', (6951, 7033), False, 'from sklearn.linear_model import Lasso, OrthogonalMatchingPursuit\n'), ((7113, 7134), 'rootcp.models.ridge', 'models.ridge', ([], {'lmd': 'lmd'}), '(lmd=lmd)\n', (7125, 7134), False, 'from rootcp import models\n'), ((1848, 1866), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (1854, 1866), True, 'import numpy as np\n'), ((1901, 1924), 'numpy.linalg.solve', 'np.linalg.solve', (['H', 'X.T'], {}), '(H, X.T)\n', (1916, 1924), True, 'import numpy as np\n'), ((2273, 2300), 'numpy.sort', 'np.sort', (['[tmp_u_i, tmp_v_i]'], {}), '([tmp_u_i, tmp_v_i])\n', (2280, 2300), True, 'import numpy as np\n'), ((4737, 4758), 'numpy.arange', 'np.arange', (['n_features'], {}), '(n_features)\n', (4746, 4758), True, 'import numpy as np\n'), ((4865, 4882), 'numpy.ones', 'np.ones', (['n_groups'], {}), '(n_groups)\n', (4872, 4882), True, 'import numpy as np\n'), ((5020, 5057), 'numpy.cumsum', 'np.cumsum', (['size_groups'], {'dtype': 'np.intc'}), '(size_groups, dtype=np.intc)\n', (5029, 5057), True, 'import numpy as np\n'), ((5815, 5827), 'numpy.isnan', 'np.isnan', (['X_'], {}), '(X_)\n', (5823, 5827), True, 'import numpy as np\n'), ((2518, 2546), 'intervals.closed', 'intervals.closed', (['U[i]', 'V[i]'], {}), '(U[i], V[i])\n', (2534, 2546), False, 'import intervals\n'), ((2598, 2633), 'intervals.openclosed', 'intervals.openclosed', (['(-np.inf)', 'U[i]'], {}), '(-np.inf, U[i])\n', (2618, 2633), False, 'import intervals\n'), ((2656, 2690), 'intervals.closedopen', 'intervals.closedopen', (['V[i]', 'np.inf'], {}), '(V[i], np.inf)\n', (2676, 2690), False, 'import intervals\n'), ((6746, 6763), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (6760, 6763), True, 'import numpy as np\n'), ((2810, 2844), 'intervals.closedopen', 'intervals.closedopen', (['U[i]', 'np.inf'], {}), '(U[i], np.inf)\n', (2830, 2844), False, 'import intervals\n'), ((2923, 2958), 'intervals.openclosed', 'intervals.openclosed', (['(-np.inf)', 'U[i]'], {}), '(-np.inf, U[i])\n', (2943, 2958), False, 'import intervals\n'), ((3049, 3080), 'intervals.open', 'intervals.open', (['(-np.inf)', 'np.inf'], {}), '(-np.inf, np.inf)\n', (3063, 3080), False, 'import intervals\n'), ((3170, 3187), 'intervals.empty', 'intervals.empty', ([], {}), '()\n', (3185, 3187), False, 'import intervals\n'), ((3254, 3285), 'intervals.open', 'intervals.open', (['(-np.inf)', 'np.inf'], {}), '(-np.inf, np.inf)\n', (3268, 3285), False, 'import intervals\n')] |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicablNe law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from collections import defaultdict
from gs_quant.api.gs.hedges import GsHedgeApi
import logging
from typing import Union, List
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
_logger = logging.getLogger(__name__)
class Hedge:
"""
A Marquee hedge.
"""
@staticmethod
def find_optimal_hedge(hedge_query: dict, hyperparams: dict, metric: str) -> \
Union[dict, float]:
"""
This function is designed to find the 'best' hedge from a list of hedges that are computed using a grid
search over all hyperparameters passed in - where 'best' is defined by the metric argument passed in and
whether we want to minimize or maximize this metric.
:param hedge_query: dict, hedge data that is sent to the Marquee API as input to the new performance hedger
:param hyperparams: dict, keys are hyperparameters (Concentration or Diversity) that map to lists of the values
to use for one of these hyperparameters when running the new performance hedger.
:param metric: str, the metric we want to optimize i.e. 'holdingError' or 'rSquared'
:return: dict, float, and tuple, the best hedge found using the algorithm, the value of the metric being
optimized, and the tuple of optimal hyperparameters
"""
hedge_results = {}
opt_map = Hedge.create_optimization_mappings()
optimization_type = opt_map[metric]
_logger.info(f'We are trying to {optimization_type} {metric} and will return the optimized hedge & '
f'metric value...')
hyperparam_grid = [(x, y) for x in hyperparams['Concentration'] for y in hyperparams['Diversity']]
for pair in hyperparam_grid:
hedge_params = hedge_query['parameters']
hedge_params.lasso_weight, hedge_params.ridge_weight = pair[0], pair[1]
hedge_query['parameters'] = hedge_params
results = GsHedgeApi.calculate_hedge(hedge_query)
_logger.info(f'Current Hedge is using the following values for Concentration/Diversity: {pair}')
curr_results, curr_pair = results['result']['hedge'], pair
hedge_results[curr_results[metric]] = (curr_results, curr_pair)
_logger.info(f'Current Hedge value for {metric}: {curr_results[metric]*100:.3}%')
optimized_metric = min(hedge_results.keys()) if optimization_type == 'minimize' else max(hedge_results.keys())
optimized_hedge, optimized_hyperparams = hedge_results[optimized_metric][0], hedge_results[optimized_metric][1]
return optimized_hedge, optimized_metric, optimized_hyperparams
@staticmethod
def create_optimization_mappings() -> dict:
"""
This function is designed to construct a mapping between metrics a user can choose to optimize when calling the
New Performance Hedger and the way the metric should be optimized.
:param none:
:return: dict, the dictionary containing a mapping between metrics to optimize and how they should be optimized
"""
opt_dict = {'rSquared': 'maximize', 'correlation': 'maximize', 'holdingError': 'minimize',
'trackingError': 'minimize', 'transactionCost': 'minimize', 'annualizedReturn': 'maximize'}
return opt_dict
@staticmethod
def construct_portfolio_weights_and_asset_numbers(results: dict) -> Union[dict, List]:
"""
Function used to retrieve the constructed portfolio from a performance hedge, sort it, then calculate the
weights for all assets and total number of assets and return these results.
:param results: dict, the results of the performance hedge request made to the Marquee API
:return: dict, list, list - the portfolio, portfolio weights, and number of assets
"""
portfolio = results["result"]["hedge"]["constituents"]
portfolio.sort(key=lambda x: x['weight'], reverse=True)
weights = [asset['weight'] for asset in portfolio]
asset_numbers = list(range(len(portfolio)))
return portfolio, weights, asset_numbers
@staticmethod
def plot_weights_against_number_of_assets(hedge_query: dict, hyperparams: dict, figsize: tuple = (18, 9)) -> \
matplotlib.figure.Figure:
"""
Function used to plot the effects that a particular hyperparameter (Concentration or Diversity) has
on the results of a new performance hedge done through the Marquee API.
:param hedge_query: dict, hedge data that is sent to the Marquee API as input to the new
performance hedger
:param hyperparams: dict, keys are hyperparameters (Concentration or Diversity) that map to lists of
the values to use for one of these hyperparameters when running
the new performance hedger. The hyperparameter not used will
have a 'None' value.
:param figsize: tuple, width and height of the plot in inches
:return: matplotlib.figure.Figure - the figure of the plot (the top level container for
all the plot elements)
"""
lines = []
hyperparam_to_plot = 'Concentration' if hyperparams['Concentration'] else 'Diversity'
f, ax = plt.subplots(figsize=figsize)
try:
for i in range(len(hyperparams[hyperparam_to_plot])):
hedge_params = hedge_query['parameters']
if hyperparam_to_plot == 'Concentration':
hedge_params.lasso_weight = hyperparams[hyperparam_to_plot][i]
else:
hedge_params.lasso_weight = 0
if hyperparam_to_plot == 'Diversity':
hedge_params.ridge_weight = hyperparams[hyperparam_to_plot][i]
else:
hedge_params.ridge_weight = 0
hedge_query['parameters'] = hedge_params
results = GsHedgeApi.calculate_hedge(hedge_query)
portfolio, weights, asset_numbers = Hedge.construct_portfolio_weights_and_asset_numbers(results)
x_ind = np.arange(len(asset_numbers))
bar = ax.bar(x_ind, weights, align='center', alpha=0.6)
lines.append(bar)
plt.legend(lines, [hyperparam_to_plot + ' Percentage: ' + str(i) for i in
hyperparams[hyperparam_to_plot]], prop={'size': 18})
plt.xlabel('Number of Assets', size=13)
plt.ylabel('Weights', size=13)
plt.title('Analyzing the effects of the ' + hyperparam_to_plot + ' hyperparameter on a hedge', size=22)
plt.show()
except Exception as err:
_logger.warning(f'Constructing portfolio, weights, or asset numbers failed with {err} ... \
returning empty plot.')
return f
@staticmethod
def asset_id_diffs(portfolio_asset_ids, thomson_reuters_asset_ids):
"""
Function designed to find the assets that are contained in the portfolio but that we don't have
Thomson Reuters data for.
:param portfolio_asset_ids: list, the list of MQIDs representing all of the assets that we are computing
rebalance costs for
:param thomson_reuters_asset_ids: list, the list of MQIDs representing all of the assets that we have Thomson
Reuters data for
:return: list, the assets that we don't have Thomson Reuters data for and should exclude in the transaction cost
calculations
"""
diffs = list(set(portfolio_asset_ids) - set(thomson_reuters_asset_ids))
return diffs
@staticmethod
def create_transaction_cost_data_structures(portfolio_asset_ids, portfolio_quantities, thomson_reuters_eod_data,
backtest_dates):
"""
Function designed to create the data structures necessary to compute transaction costs based on rebalancing a
portfolio of assets.
:param portfolio_asset_ids: list, the asset_ids for each asset in the underlying portfolio that we want to
compute transaction costs for
:param portfolio_quantities: list, the number of shares for each asset in the underlying portfolio that we want
to compute transaction costs for
:param thomson_reuters_eod_data: Dataset, the data used to fetch prices for assets - in this case from <NAME>
:param backtest_dates: list, the dates that the portfolio is held for (that we want to compute rebalance costs
for)
:return: Union[list, dict, ...], the data structures necessary for computing transaction (rebalance) costs
"""
thomson_reuters_asset_ids = [asset_id for asset_id in thomson_reuters_eod_data.get_data(
backtest_dates[-1], backtest_dates[-1], assetId=portfolio_asset_ids)['assetId']]
diffs = Hedge.asset_id_diffs(portfolio_asset_ids, thomson_reuters_asset_ids)
for diff in diffs:
portfolio_asset_ids.remove(diff)
# Map asset_id to quantity of shares from portfolio, while excluding assets that are found in the diffs since
# there is no Thomson Reuters data on them
id_quantity_map = {}
for idx, asset_id in enumerate(portfolio_asset_ids):
if asset_id not in diffs:
id_quantity_map[asset_id] = portfolio_quantities[idx]
# Map asset_id to list of prices of that asset over the transaction_cost_dates we want
id_prices_map = defaultdict(lambda: list())
prices_df = pd.DataFrame()
for date in backtest_dates:
data = thomson_reuters_eod_data.get_data(date, date, assetId=portfolio_asset_ids)
prices_df = prices_df.append(data)
for asset_id in portfolio_asset_ids:
id_prices_map[asset_id] = list(prices_df.loc[prices_df['assetId'] == asset_id]['closePrice'])
# Create list representing notional of each day in transaction_cost_days and map asset_ids to notional of each
# asset on each day
id_to_notional_map = {}
notionals_assets = [abs(np.asarray(id_prices_map[asset_id]) * id_quantity_map[asset_id]) for asset_id in
portfolio_asset_ids]
# Mapping asset_id to notionals of each day of that asset_id
for idx, asset_id in enumerate(portfolio_asset_ids):
id_to_notional_map[asset_id] = list(notionals_assets[idx])
total_notionals_each_day = list(np.sum(notionals_assets, axis=0))
# Create map of asset_ids to weights of total portfolio on each day
id_to_weight_map = {}
for idx, asset_id in enumerate(portfolio_asset_ids):
id_to_weight_map[asset_id] = [i / j for i, j in
zip(id_to_notional_map[portfolio_asset_ids[idx]], total_notionals_each_day)]
return id_quantity_map, id_prices_map, id_to_notional_map, id_to_weight_map
@staticmethod
def t_cost(basis_points, notional_traded):
"""
Function designed to compute the transaction costs associated with trading a notional
amount of an asset to rebalance a portfolio.
:param basis_points: float, the number of basis points to use as an approximation when computing transaction
costs to trade each asset that is rebalanced and that will be converted to a
percentage (e.g. 20.43)
:param notional_traded: float, notional amount of the asset that is being traded to rebalance the portfolio
:return: float, the total transaction cost of trading a particular notional amount of an asset
"""
return (basis_points * 1e-4) * notional_traded
@staticmethod
def compute_notional_traded(notional_on_the_day, prev_weight, curr_weight):
"""
Function used to compute the notional amount (USD) of an asset that will be traded on a particular day, using
the weights of the asset from the previous day and the weights & notional from the current day.
:param notional_on_the_day: float, notional amount of the asset that the portfolio contains on the current day
:param prev_weight: float, the weighting of the corresponding asset (of the entire portfolio) on the previous
day
:param curr_weight: float, the weighting of the corresponding asset (of the entire portfolio) on the current day
:return: float, the net notional amount of the asset traded on the current day
"""
return sum([np.abs(curr_weight - prev_weight) * notional_on_the_day])
@staticmethod
def compute_tcosts(basis_points, asset_weights, asset_notionals, backtest_dates, portfolio_asset_ids):
"""
Function to compute cumulative transaction costs associated with rebalancing a portfolio. In particular, for
each day on which we compute the cumulative the rebalancing costs (USD), the weights of the constituents of the
portfolio on the previous day are used to calculate how much of the notional amount of each asset is traded to
execute the rebalance.
:param basis_points: float, the number of basis points to use as an approximation when computing transaction
costs to trade each asset that is rebalanced and that will be converted to a
percentage (e.g. 20.43)
:param asset_weights: dict, the dictionary mapping of asset_ids (MQIDs) to a list of weights where each weight
represents the weighting of the corresponding asset (of the entire portfolio) on
each day in the backtest period
:param asset_notionals: dict, the dictionary mapping of asset_ids (MQIDs) to a list of floats where each float
represents the notional amount of the corresponding asset (of the entire
portfolio) on each day in the backtest period
:param backtest_dates: list, the dates that the portfolio is held for (that we want to compute rebalance costs
for)
:param portfolio_asset_ids: list, the list of MQIDs representing all of the assets that we are computing
rebalancing costs for
:return: pd.Series, the cumulative transaction costs associated with rebalancing the portfolio across the
backtest period
"""
tcosts_each_day = []
for idx, date in enumerate(backtest_dates):
tcost_today = 0
for asset_id in portfolio_asset_ids:
prev_weights = asset_weights[asset_id][0] if idx == 0 else asset_weights[asset_id][idx - 1]
notional_on_the_day, curr_weights = asset_notionals[asset_id][idx], asset_weights[asset_id][idx]
notional_to_trade = Hedge.compute_notional_traded(notional_on_the_day, prev_weights, curr_weights)
transaction_cost = Hedge.t_cost(basis_points, notional_to_trade)
tcost_today += transaction_cost
tcosts_each_day.append(abs(tcost_today))
cum_tcosts = pd.Series(np.cumsum(tcosts_each_day))
return cum_tcosts
@staticmethod
def plot_transaction_costs_of_rebalancing(cumulative_transaction_costs, backtest_dates, figsize=(10, 6)):
"""
Function used to plot the cumulative transaction costs associated with rebalancing a hedge. The plot axes
include the backtest dates on the x-axis and the cumulative transaction costs (USD) on the y-axis.
:param cumulative_transaction_costs: pd.Series, the cumulative transaction costs over the backtest period
:param backtest_dates: list, the dates that the portfolio is held for (that we want to compute rebalance costs
for)
:param figsize: tuple, width and height of the plot in inches
:return:
"""
indices = [x for x in range(len(backtest_dates))]
ax_costs = cumulative_transaction_costs.plot(figsize=figsize, linewidth=3)
ax_costs.legend(['Weights-Based Performance Hedger'])
plt.ylabel('Cumulative Costs (USD)', size=13)
plt.xlabel('Backtest Period', size=13)
plt.xticks(indices, backtest_dates, rotation='vertical', size=13)
plt.title('Cumulative Costs to Rebalance Weights-Based Performance Hedger', size=22)
plt.legend(labels=['Weights-Based Performance Hedger'], prop={'size': 18})
| [
"logging.getLogger",
"numpy.abs",
"gs_quant.api.gs.hedges.GsHedgeApi.calculate_hedge",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.asarray",
"numpy.sum",
"numpy.cumsum",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
... | [((784, 811), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (801, 811), False, 'import logging\n'), ((6011, 6040), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6023, 6040), True, 'import matplotlib.pyplot as plt\n'), ((10498, 10512), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10510, 10512), True, 'import pandas as pd\n'), ((17250, 17295), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Costs (USD)"""'], {'size': '(13)'}), "('Cumulative Costs (USD)', size=13)\n", (17260, 17295), True, 'import matplotlib.pyplot as plt\n'), ((17304, 17342), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Backtest Period"""'], {'size': '(13)'}), "('Backtest Period', size=13)\n", (17314, 17342), True, 'import matplotlib.pyplot as plt\n'), ((17351, 17416), 'matplotlib.pyplot.xticks', 'plt.xticks', (['indices', 'backtest_dates'], {'rotation': '"""vertical"""', 'size': '(13)'}), "(indices, backtest_dates, rotation='vertical', size=13)\n", (17361, 17416), True, 'import matplotlib.pyplot as plt\n'), ((17425, 17513), 'matplotlib.pyplot.title', 'plt.title', (['"""Cumulative Costs to Rebalance Weights-Based Performance Hedger"""'], {'size': '(22)'}), "('Cumulative Costs to Rebalance Weights-Based Performance Hedger',\n size=22)\n", (17434, 17513), True, 'import matplotlib.pyplot as plt\n'), ((17518, 17592), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labels': "['Weights-Based Performance Hedger']", 'prop': "{'size': 18}"}), "(labels=['Weights-Based Performance Hedger'], prop={'size': 18})\n", (17528, 17592), True, 'import matplotlib.pyplot as plt\n'), ((2583, 2622), 'gs_quant.api.gs.hedges.GsHedgeApi.calculate_hedge', 'GsHedgeApi.calculate_hedge', (['hedge_query'], {}), '(hedge_query)\n', (2609, 2622), False, 'from gs_quant.api.gs.hedges import GsHedgeApi\n'), ((7177, 7216), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Assets"""'], {'size': '(13)'}), "('Number of Assets', size=13)\n", (7187, 7216), True, 'import matplotlib.pyplot as plt\n'), ((7229, 7259), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Weights"""'], {'size': '(13)'}), "('Weights', size=13)\n", (7239, 7259), True, 'import matplotlib.pyplot as plt\n'), ((7272, 7379), 'matplotlib.pyplot.title', 'plt.title', (["('Analyzing the effects of the ' + hyperparam_to_plot +\n ' hyperparameter on a hedge')"], {'size': '(22)'}), "('Analyzing the effects of the ' + hyperparam_to_plot +\n ' hyperparameter on a hedge', size=22)\n", (7281, 7379), True, 'import matplotlib.pyplot as plt\n'), ((7388, 7398), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7396, 7398), True, 'import matplotlib.pyplot as plt\n'), ((11424, 11456), 'numpy.sum', 'np.sum', (['notionals_assets'], {'axis': '(0)'}), '(notionals_assets, axis=0)\n', (11430, 11456), True, 'import numpy as np\n'), ((16248, 16274), 'numpy.cumsum', 'np.cumsum', (['tcosts_each_day'], {}), '(tcosts_each_day)\n', (16257, 16274), True, 'import numpy as np\n'), ((6682, 6721), 'gs_quant.api.gs.hedges.GsHedgeApi.calculate_hedge', 'GsHedgeApi.calculate_hedge', (['hedge_query'], {}), '(hedge_query)\n', (6708, 6721), False, 'from gs_quant.api.gs.hedges import GsHedgeApi\n'), ((11053, 11088), 'numpy.asarray', 'np.asarray', (['id_prices_map[asset_id]'], {}), '(id_prices_map[asset_id])\n', (11063, 11088), True, 'import numpy as np\n'), ((13541, 13574), 'numpy.abs', 'np.abs', (['(curr_weight - prev_weight)'], {}), '(curr_weight - prev_weight)\n', (13547, 13574), True, 'import numpy as np\n')] |
from concurrent.futures import ProcessPoolExecutor
from functools import partial
import numpy as np
import os
import audio
import csv
from hparams import hparams
import traceback
def build_from_path(in_dir, out_dir, speakers, num_workers=1, tqdm=lambda x: x):
executor = ProcessPoolExecutor(max_workers=num_workers)
futures = []
index = 1
for speaker_id, speaker in enumerate(speakers):
os.makedirs(os.path.join(out_dir, speaker), exist_ok=True)
with open(os.path.join(in_dir, speaker, 'sentences_train.csv'), encoding='utf-8', errors='replace') as f:
for row in csv.reader(f, delimiter='|', escapechar=None, quotechar=None):
wav_path = os.path.join(in_dir, speaker, row[0])
text = row[1]
futures.append(executor.submit(partial(_process_utterance, out_dir, index, speaker, speaker_id,
wav_path,
text)))
index += 1
return [future.result() for future in tqdm(futures)]
def _process_utterance(out_dir, index, speaker, speaker_id, wav_path, text):
# Load the audio to a numpy array:
try:
wav = audio.load_wav(wav_path)
if hparams.rescaling:
wav = wav / np.abs(wav).max() * hparams.rescaling_max
# Compute the linear-scale spectrogram from the wav:
spectrogram = audio.spectrogram(wav).astype(np.float32)
n_frames = spectrogram.shape[1]
# Compute a mel-scale spectrogram from the wav:
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
# Write the spectrograms to disk:
spectrogram_filename = '%s/er_m-spec-%05d.npy' % (speaker, index)
mel_filename = '%s/er_m-mel-%05d.npy' % (speaker, index)
np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
# Return a tuple describing this training example:
return spectrogram_filename, mel_filename, n_frames, text, speaker_id
except Exception:
print("Error with the following file:", wav_path)
print("Error:", traceback.format_exc())
print(text)
return "", "", 0, text, speaker_id
| [
"traceback.format_exc",
"numpy.abs",
"os.path.join",
"audio.melspectrogram",
"audio.load_wav",
"functools.partial",
"concurrent.futures.ProcessPoolExecutor",
"audio.spectrogram",
"csv.reader"
] | [((277, 321), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'num_workers'}), '(max_workers=num_workers)\n', (296, 321), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((1233, 1257), 'audio.load_wav', 'audio.load_wav', (['wav_path'], {}), '(wav_path)\n', (1247, 1257), False, 'import audio\n'), ((426, 456), 'os.path.join', 'os.path.join', (['out_dir', 'speaker'], {}), '(out_dir, speaker)\n', (438, 456), False, 'import os\n'), ((610, 671), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""|"""', 'escapechar': 'None', 'quotechar': 'None'}), "(f, delimiter='|', escapechar=None, quotechar=None)\n", (620, 671), False, 'import csv\n'), ((1847, 1890), 'os.path.join', 'os.path.join', (['out_dir', 'spectrogram_filename'], {}), '(out_dir, spectrogram_filename)\n', (1859, 1890), False, 'import os\n'), ((1943, 1978), 'os.path.join', 'os.path.join', (['out_dir', 'mel_filename'], {}), '(out_dir, mel_filename)\n', (1955, 1978), False, 'import os\n'), ((491, 543), 'os.path.join', 'os.path.join', (['in_dir', 'speaker', '"""sentences_train.csv"""'], {}), "(in_dir, speaker, 'sentences_train.csv')\n", (503, 543), False, 'import os\n'), ((700, 737), 'os.path.join', 'os.path.join', (['in_dir', 'speaker', 'row[0]'], {}), '(in_dir, speaker, row[0])\n', (712, 737), False, 'import os\n'), ((1439, 1461), 'audio.spectrogram', 'audio.spectrogram', (['wav'], {}), '(wav)\n', (1456, 1461), False, 'import audio\n'), ((1604, 1629), 'audio.melspectrogram', 'audio.melspectrogram', (['wav'], {}), '(wav)\n', (1624, 1629), False, 'import audio\n'), ((2261, 2283), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2281, 2283), False, 'import traceback\n'), ((815, 900), 'functools.partial', 'partial', (['_process_utterance', 'out_dir', 'index', 'speaker', 'speaker_id', 'wav_path', 'text'], {}), '(_process_utterance, out_dir, index, speaker, speaker_id, wav_path, text\n )\n', (822, 900), False, 'from functools import partial\n'), ((1313, 1324), 'numpy.abs', 'np.abs', (['wav'], {}), '(wav)\n', (1319, 1324), True, 'import numpy as np\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" transform .jpg picture to .bin format.
"""
import os
import argparse
import numpy as np
import tensorflow as tf
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
scale_path = '/mnt/data1/2021/hht/huawei/bsrn/temp/bin/scale'
# 将图像数据转换为bin文件, images大小固定为120*80
def tobin(imgdir,bindir):
# images = []
for file in os.listdir(imgdir):
if file.endswith('.png'):
pic_path = imgdir + "/" + file
print("start to process %s" % pic_path)
image = tf.read_file(filename=pic_path)
image = tf.image.decode_png(image, channels=3,dtype=tf.uint8)
print("image's shape: ",image.shape)
image = tf.image.resize_images(image, size=(480,320))
# image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# image.set_shape([256, 256, 3])
# print(image.shape)
with tf.Session() as sess:
image_numpy = image.eval()
# print(image_numpy.shape)
# save the pic as .bin format for Ascend310 infer.
image_numpy = np.array(image_numpy.reshape(-1),dtype=np.float32)
image_numpy.tofile(bindir + "/" + file + ".data.bin")
scale = np.array(4)#,dtype=np.float32)
scale.tofile(scale_path + "/" + file + ".data.bin")
# images.append(image_numpy)
# images = np.array(images,dtype=np.uint8)
# print("----------------------------------------------------------")
# print("images type:", type(images), "shape: ", images.shape)
# images.tofile(bindir + "/" + "data.bin")
if __name__ == '__main__':
# argument definition
parser = argparse.ArgumentParser()
parser.add_argument("--data_input_dir", type=str, default='', help="Input dataset path.")
parser.add_argument("--data_truth_dir", type=str, default='', help="Input label dataset path.")
parser.add_argument("--bin_input_dir", type=str, default='', help="Output dataset path.")
parser.add_argument("--bin_truth_dir", type=str, default='', help="Output label dataset path.")
config = parser.parse_args()
# preparation
if os.path.exists(config.bin_input_dir + '/x2'):
pass
else:
os.makedirs(config.bin_input_dir + '/x2')
if os.path.exists(config.bin_input_dir + '/x3'):
pass
else:
os.makedirs(config.bin_input_dir + '/x3')
if os.path.exists(config.bin_input_dir + '/x4'):
pass
else:
os.makedirs(config.bin_input_dir + '/x4')
if os.path.exists(config.bin_truth_dir):
pass
else:
os.makedirs(config.bin_truth_dir)
# tobin(config.data_input_dir + '/x2', config.bin_input_dir + '/x2')
# tobin(config.data_input_dir + '/x3', config.bin_input_dir + '/x3')
# tobin(config.data_input_dir + '/x4', config.bin_input_dir + '/x4')
tobin(config.data_truth_dir, config.bin_truth_dir) | [
"os.path.exists",
"tensorflow.image.decode_png",
"os.listdir",
"tensorflow.image.resize_images",
"os.makedirs",
"argparse.ArgumentParser",
"tensorflow.Session",
"numpy.array",
"tensorflow.read_file"
] | [((1598, 1616), 'os.listdir', 'os.listdir', (['imgdir'], {}), '(imgdir)\n', (1608, 1616), False, 'import os\n'), ((2933, 2958), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2956, 2958), False, 'import argparse\n'), ((3405, 3449), 'os.path.exists', 'os.path.exists', (["(config.bin_input_dir + '/x2')"], {}), "(config.bin_input_dir + '/x2')\n", (3419, 3449), False, 'import os\n'), ((3531, 3575), 'os.path.exists', 'os.path.exists', (["(config.bin_input_dir + '/x3')"], {}), "(config.bin_input_dir + '/x3')\n", (3545, 3575), False, 'import os\n'), ((3657, 3701), 'os.path.exists', 'os.path.exists', (["(config.bin_input_dir + '/x4')"], {}), "(config.bin_input_dir + '/x4')\n", (3671, 3701), False, 'import os\n'), ((3783, 3819), 'os.path.exists', 'os.path.exists', (['config.bin_truth_dir'], {}), '(config.bin_truth_dir)\n', (3797, 3819), False, 'import os\n'), ((3482, 3523), 'os.makedirs', 'os.makedirs', (["(config.bin_input_dir + '/x2')"], {}), "(config.bin_input_dir + '/x2')\n", (3493, 3523), False, 'import os\n'), ((3608, 3649), 'os.makedirs', 'os.makedirs', (["(config.bin_input_dir + '/x3')"], {}), "(config.bin_input_dir + '/x3')\n", (3619, 3649), False, 'import os\n'), ((3734, 3775), 'os.makedirs', 'os.makedirs', (["(config.bin_input_dir + '/x4')"], {}), "(config.bin_input_dir + '/x4')\n", (3745, 3775), False, 'import os\n'), ((3852, 3885), 'os.makedirs', 'os.makedirs', (['config.bin_truth_dir'], {}), '(config.bin_truth_dir)\n', (3863, 3885), False, 'import os\n'), ((1768, 1799), 'tensorflow.read_file', 'tf.read_file', ([], {'filename': 'pic_path'}), '(filename=pic_path)\n', (1780, 1799), True, 'import tensorflow as tf\n'), ((1820, 1874), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['image'], {'channels': '(3)', 'dtype': 'tf.uint8'}), '(image, channels=3, dtype=tf.uint8)\n', (1839, 1874), True, 'import tensorflow as tf\n'), ((1943, 1989), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['image'], {'size': '(480, 320)'}), '(image, size=(480, 320))\n', (1965, 1989), True, 'import tensorflow as tf\n'), ((2495, 2506), 'numpy.array', 'np.array', (['(4)'], {}), '(4)\n', (2503, 2506), True, 'import numpy as np\n'), ((2161, 2173), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2171, 2173), True, 'import tensorflow as tf\n')] |
# caclculate the cost bw
import os
import pandas as pd
import numpy as np
import time
# store the matrix A
def store_bw_e2u(df_ct, store_path, n):
user_path = store_path + '/user_' + str(n)
if not os.path.exists(user_path):
os.makedirs(user_path)
df_ct.to_csv(user_path + '/cost_e.csv', index=False)
return
# generate the cost_e vector for a given set of cached tiles of a given user n
# calculate the DT in BT sizes and store them
# we user the derive eq:2 in the paper.
def generate_cost_e(cached_tiles_m, bt_size_arr, n,
data_store_path, ena_store):
start_time = time.time()
all_dt_sizes = []
for t_ind, t in enumerate(cached_tiles_m):
# get sum of basic tiles
tot_bts_size = 0
l_l_m = int(t[0])
l_l_n = int(t[1])
u_r_m = int(t[2])
u_r_n = int(t[3])
for r in range(l_l_m, u_r_m):
for c in range(l_l_n, u_r_n):
bt_ind = r * 20 + c
tot_bts_size += bt_size_arr[bt_ind]
tot_bts_size = tot_bts_size / 1000000
dt_size_mb = 0.432 * tot_bts_size * tot_bts_size + 0.306 * tot_bts_size + 0.0025
all_dt_sizes.append(dt_size_mb)
stop_time = time.time()
# store the data
ct_col_name = np.repeat(['ct_'], len(all_dt_sizes))
cts = np.arange(len(all_dt_sizes)).astype(str)
ct_cols = np.core.defchararray.add(ct_col_name, cts)
df_cost_bw_e2u = pd.DataFrame(columns=ct_cols,
data=np.asarray(all_dt_sizes).reshape([1, -1]))
if ena_store:
store_bw_e2u(df_cost_bw_e2u, data_store_path, n)
return np.asarray(all_dt_sizes), stop_time - start_time
| [
"os.path.exists",
"os.makedirs",
"numpy.asarray",
"numpy.core.defchararray.add",
"time.time"
] | [((620, 631), 'time.time', 'time.time', ([], {}), '()\n', (629, 631), False, 'import time\n'), ((1225, 1236), 'time.time', 'time.time', ([], {}), '()\n', (1234, 1236), False, 'import time\n'), ((1380, 1422), 'numpy.core.defchararray.add', 'np.core.defchararray.add', (['ct_col_name', 'cts'], {}), '(ct_col_name, cts)\n', (1404, 1422), True, 'import numpy as np\n'), ((207, 232), 'os.path.exists', 'os.path.exists', (['user_path'], {}), '(user_path)\n', (221, 232), False, 'import os\n'), ((242, 264), 'os.makedirs', 'os.makedirs', (['user_path'], {}), '(user_path)\n', (253, 264), False, 'import os\n'), ((1645, 1669), 'numpy.asarray', 'np.asarray', (['all_dt_sizes'], {}), '(all_dt_sizes)\n', (1655, 1669), True, 'import numpy as np\n'), ((1514, 1538), 'numpy.asarray', 'np.asarray', (['all_dt_sizes'], {}), '(all_dt_sizes)\n', (1524, 1538), True, 'import numpy as np\n')] |
import cv2
import numpy as np
frameWidth = 640
framHeight = 480
cap = cv2.VideoCapture(0)
cap.set(3, frameWidth)
cap.set(4, framHeight)
cap.set(10, 150)
# Unfortunately depend of your camera quality,
# you need to adjust different values for different illuminations
# to be use during the night
myColors = [[0, 120, 109, 14, 255, 255],
[0, 129, 141, 179, 255, 255]]
# to be use during the evening
# myColors = [[0, 109, 136, 88, 255, 255],
# [0, 83, 25, 18, 255, 255]]
def findcolor(img, myColors):
imgSHV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
for color in myColors:
lower = np.array(color[0:3])
upper = np.array(color[3:6])
mask = cv2.inRange(imgSHV, lower, upper)
getContours(mask)
def getContours(img):
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 600:
cv2.drawContours(imgResult, cnt, -1, (255, 0, 255), 3)
while True:
sucess, img = cap.read()
imgResult = img.copy()
findcolor(img, myColors)
cv2.imshow("Result", imgResult)
if cv2.waitKey(1) & 0xff == ord('q'):
break
| [
"cv2.drawContours",
"cv2.inRange",
"cv2.imshow",
"cv2.contourArea",
"numpy.array",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.findContours",
"cv2.waitKey"
] | [((72, 91), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (88, 91), False, 'import cv2\n'), ((542, 578), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (554, 578), False, 'import cv2\n'), ((805, 868), 'cv2.findContours', 'cv2.findContours', (['img', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (821, 868), False, 'import cv2\n'), ((1123, 1154), 'cv2.imshow', 'cv2.imshow', (['"""Result"""', 'imgResult'], {}), "('Result', imgResult)\n", (1133, 1154), False, 'import cv2\n'), ((622, 642), 'numpy.array', 'np.array', (['color[0:3]'], {}), '(color[0:3])\n', (630, 642), True, 'import numpy as np\n'), ((659, 679), 'numpy.array', 'np.array', (['color[3:6]'], {}), '(color[3:6])\n', (667, 679), True, 'import numpy as np\n'), ((695, 728), 'cv2.inRange', 'cv2.inRange', (['imgSHV', 'lower', 'upper'], {}), '(imgSHV, lower, upper)\n', (706, 728), False, 'import cv2\n'), ((909, 929), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (924, 929), False, 'import cv2\n'), ((965, 1019), 'cv2.drawContours', 'cv2.drawContours', (['imgResult', 'cnt', '(-1)', '(255, 0, 255)', '(3)'], {}), '(imgResult, cnt, -1, (255, 0, 255), 3)\n', (981, 1019), False, 'import cv2\n'), ((1162, 1176), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1173, 1176), False, 'import cv2\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
QDrift Class
"""
from typing import List, Union, cast
import numpy as np
from qiskit.opflow.evolutions.trotterizations.trotterization_base import TrotterizationBase
from qiskit.opflow.list_ops.composed_op import ComposedOp
from qiskit.opflow.list_ops.summed_op import SummedOp
from qiskit.opflow.operator_base import OperatorBase
from qiskit.opflow.primitive_ops.pauli_sum_op import PauliSumOp
from qiskit.opflow.primitive_ops.primitive_op import PrimitiveOp
from qiskit.utils import algorithm_globals
# pylint: disable=invalid-name
class QDrift(TrotterizationBase):
"""The QDrift Trotterization method, which selects each each term in the
Trotterization randomly, with a probability proportional to its weight. Based on the work
of <NAME> in https://arxiv.org/abs/1811.08017.
"""
def __init__(self, reps: int = 1) -> None:
r"""
Args:
reps: The number of times to repeat the Trotterization circuit.
"""
super().__init__(reps=reps)
def convert(self, operator: OperatorBase) -> OperatorBase:
if not isinstance(operator, (SummedOp, PauliSumOp)):
raise TypeError("Trotterization converters can only convert SummedOp or PauliSumOp.")
if not isinstance(operator.coeff, (float, int)):
raise TypeError(
"Trotterization converters can only convert operators with real coefficients."
)
operator_iter: Union[PauliSumOp, List[PrimitiveOp]]
if isinstance(operator, PauliSumOp):
operator_iter = operator
coeffs = operator.primitive.coeffs
coeff = operator.coeff
else:
operator_iter = cast(List[PrimitiveOp], operator.oplist)
coeffs = [op.coeff for op in operator_iter]
coeff = operator.coeff
# We artificially make the weights positive, TODO check approximation performance
weights = np.abs(coeffs)
lambd = np.sum(weights)
N = 2 * (lambd**2) * (coeff**2)
factor = lambd * coeff / (N * self.reps)
# The protocol calls for the removal of the individual coefficients,
# and multiplication by a constant factor.
scaled_ops = [(op * (factor / op.coeff)).exp_i() for op in operator_iter]
sampled_ops = algorithm_globals.random.choice(
scaled_ops, size=(int(N * self.reps),), p=weights / lambd
)
return ComposedOp(sampled_ops).reduce()
| [
"typing.cast",
"numpy.abs",
"qiskit.opflow.list_ops.composed_op.ComposedOp",
"numpy.sum"
] | [((2419, 2433), 'numpy.abs', 'np.abs', (['coeffs'], {}), '(coeffs)\n', (2425, 2433), True, 'import numpy as np\n'), ((2450, 2465), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (2456, 2465), True, 'import numpy as np\n'), ((2178, 2218), 'typing.cast', 'cast', (['List[PrimitiveOp]', 'operator.oplist'], {}), '(List[PrimitiveOp], operator.oplist)\n', (2182, 2218), False, 'from typing import List, Union, cast\n'), ((2917, 2940), 'qiskit.opflow.list_ops.composed_op.ComposedOp', 'ComposedOp', (['sampled_ops'], {}), '(sampled_ops)\n', (2927, 2940), False, 'from qiskit.opflow.list_ops.composed_op import ComposedOp\n')] |
import sys
import os
import numpy as np
#this_name=sys.argv[0]
#var=sys.argv[1]
#print('argv[0] is ' + this_name + '; argv[1] is ' + var + '\n')
if len(sys.argv) > 1:
npy_txt_path=sys.argv[1] # it should contain files ./name.npy.txt
else:
print("should provide source directory name\n")
exit()
#npy_txt_path_nosplash=npy_txt_path.split("/")
if npy_txt_path[-1] == '/':
npy_path=npy_txt_path[:-1] + '_npy'
else:
npy_path=npy_txt_path + '_npy'
# then make a output directory
print("source directory is:" + npy_txt_path)
#npy_path=npy_txt_path + "_npy"
print("making output directory: " + npy_path)
if not os.path.exists(npy_path):
os.makedirs(npy_path)
npy_txt_files= os.listdir(npy_txt_path)
num_files = len(npy_txt_files)
print("Total file number:" + str(num_files))
for this_file in npy_txt_files:
if this_file.endswith(".txt"):
f = open(npy_txt_path + "/" + this_file);
feat_mat = np.loadtxt(f)
output_this_file = this_file[:-4] # this_file= xxx.npy.txt np.save(npy_path + "/" + output_this_file, feat_mat)
np.save(npy_path + "/" + output_this_file, feat_mat)
print(npy_txt_path + ": done converting to npy format...")
| [
"os.path.exists",
"os.listdir",
"os.makedirs",
"numpy.loadtxt",
"numpy.save"
] | [((691, 715), 'os.listdir', 'os.listdir', (['npy_txt_path'], {}), '(npy_txt_path)\n', (701, 715), False, 'import os\n'), ((624, 648), 'os.path.exists', 'os.path.exists', (['npy_path'], {}), '(npy_path)\n', (638, 648), False, 'import os\n'), ((654, 675), 'os.makedirs', 'os.makedirs', (['npy_path'], {}), '(npy_path)\n', (665, 675), False, 'import os\n'), ((928, 941), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (938, 941), True, 'import numpy as np\n'), ((1071, 1123), 'numpy.save', 'np.save', (["(npy_path + '/' + output_this_file)", 'feat_mat'], {}), "(npy_path + '/' + output_this_file, feat_mat)\n", (1078, 1123), True, 'import numpy as np\n')] |
import numpy as np
def score1(q, doc):
return np.dot(q, doc)
def score2(q, doc):
return np.dot(q, doc) / (np.linalg.norm(q) * np.linalg.norm(doc))
def retrieval(collection, query, func_score):
result = []
for i in range(len(collection)):
sim = func_score(query, collection[i])
result.append( (i+1, sim) )#[ (doc1, sc1), (doc, sc2) ]
result.sort(key = lambda tup: tup[1])
return result
##################### main ##############################
collection = [
np.array([1.452,0,2.122,3.564,4.123,0,0,2.342,0,0,0,1.975,4.543,0,6.134,2.234]),
np.array([0,2.093,0,0,4.245,1.234,0,0,0,0,2.345,0,2.135,0,0,3.456])
]
query = np.array([0,1.345,1.453,1.987,0,2.133,0,0,0,0,0,0,3.452,0,0,4.234])
### aplicar score
result = retrieval(collection, query, score2)
print(result) | [
"numpy.array",
"numpy.dot",
"numpy.linalg.norm"
] | [((697, 784), 'numpy.array', 'np.array', (['[0, 1.345, 1.453, 1.987, 0, 2.133, 0, 0, 0, 0, 0, 0, 3.452, 0, 0, 4.234]'], {}), '([0, 1.345, 1.453, 1.987, 0, 2.133, 0, 0, 0, 0, 0, 0, 3.452, 0, 0, \n 4.234])\n', (705, 784), True, 'import numpy as np\n'), ((54, 68), 'numpy.dot', 'np.dot', (['q', 'doc'], {}), '(q, doc)\n', (60, 68), True, 'import numpy as np\n'), ((531, 629), 'numpy.array', 'np.array', (['[1.452, 0, 2.122, 3.564, 4.123, 0, 0, 2.342, 0, 0, 0, 1.975, 4.543, 0, \n 6.134, 2.234]'], {}), '([1.452, 0, 2.122, 3.564, 4.123, 0, 0, 2.342, 0, 0, 0, 1.975, 4.543,\n 0, 6.134, 2.234])\n', (539, 629), True, 'import numpy as np\n'), ((617, 704), 'numpy.array', 'np.array', (['[0, 2.093, 0, 0, 4.245, 1.234, 0, 0, 0, 0, 2.345, 0, 2.135, 0, 0, 3.456]'], {}), '([0, 2.093, 0, 0, 4.245, 1.234, 0, 0, 0, 0, 2.345, 0, 2.135, 0, 0, \n 3.456])\n', (625, 704), True, 'import numpy as np\n'), ((104, 118), 'numpy.dot', 'np.dot', (['q', 'doc'], {}), '(q, doc)\n', (110, 118), True, 'import numpy as np\n'), ((122, 139), 'numpy.linalg.norm', 'np.linalg.norm', (['q'], {}), '(q)\n', (136, 139), True, 'import numpy as np\n'), ((142, 161), 'numpy.linalg.norm', 'np.linalg.norm', (['doc'], {}), '(doc)\n', (156, 161), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import sys
import argparse
import numpy as np
import yaml
DESC = "Prints total number of parameters in model.npz"
S2S_SPECIAL_NODE = "special:model.yml"
def main():
args = parse_args()
print("Loading {}".format(args.model))
model = np.load(args.model)
count = 0
for key in model:
if key == S2S_SPECIAL_NODE:
continue
#print("{} : {}".format(key, model[key].size))
count += model[key].size
print("Total number of parameters: {}".format(count))
def parse_args():
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument("-m", "--model", help="model file", default="model.npz")
return parser.parse_args()
if __name__ == "__main__":
main()
| [
"numpy.load",
"argparse.ArgumentParser"
] | [((273, 292), 'numpy.load', 'np.load', (['args.model'], {}), '(args.model)\n', (280, 292), True, 'import numpy as np\n'), ((567, 608), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'DESC'}), '(description=DESC)\n', (590, 608), False, 'import argparse\n')] |
import numpy as np
from .utils.augmentations import letterbox
from .models.experimental import attempt_load
import cv2 as cv
from .utils.general import (check_img_size,
non_max_suppression, set_logging)
import argparse
import os
import sys
from pathlib import Path
import random
import torch
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
class ObjDetection:
def __init__(self, weights, imgsz=None) -> None:
if imgsz is None:
imgsz = [640, 640]
set_logging()
self.img_size = imgsz
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.half = self.device.type != 'cpu'
model = attempt_load(weights, map_location=self.device)
self.stride = max(int(model.stride.max()), 32) # model stride
self.names = model.module.names if hasattr(model, 'module') else model.names # get class names
model.float()
self.img_size = check_img_size(self.img_size, s=self.stride)
self.model = model
@property
def class_labels(self) -> list:
return self.names
def __call__(self, *args, **kwds) -> list:
return self.detect(*args, **kwds)
@torch.no_grad()
def detect(self, src, conf_thres=0.25, iou_thres=0.45, agnostic_nms=False, classes=None):
img = letterbox(src, self.img_size, stride=self.stride, auto=True)[0]
new_height, new_width = img.shape[:2]
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
im = img.float()
im /= 255
if len(im.shape) == 3:
im = im[None]
pred = self.model(im, augment=False)[0]
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms)
items = []
if pred[0] is not None and len(pred):
for p in pred[0]:
score = np.round(p[4].cpu().detach().numpy(), 2)
label = self.names[int(p[5])]
xmin = int(p[0] * src.shape[1] / self.img_size[0])
ymin = int(p[1] * src.shape[0] / new_height)
xmax = int(p[2] * src.shape[1] / self.img_size[0])
ymax = int(p[3] * src.shape[0] / new_height)
item = {'label': label,
'bbox': [(xmin, ymin), (xmax, ymax)],
'score': score
}
items.append(item)
return items
def draw(self, src, objs):
object_colors = [[0, 0, 255]]
for obj in objs:
# print(obj)
label = obj['label']
score = obj['score']
[(xmin, ymin), (xmax, ymax)] = obj['bbox']
color = object_colors[self.class_labels.index(label)]
src = cv.rectangle(src, (xmin, ymin), (xmax, ymax), color, 2)
src = cv.putText(src, f'{label} ({str(score)})', (xmin, ymin),
cv.FONT_HERSHEY_SIMPLEX, 0.75, color, 1, cv.LINE_AA)
return src
if __name__ == "__main__":
cap = cv.VideoCapture('test_video.avi')
obj_model = ObjDetection("weights/best.pt", [480, 480])
test_pic_dir = '/home/michael/dataset/test_fall_pic'
count = 0
for img_path in os.listdir(test_pic_dir):
test_pic = os.path.join(test_pic_dir, img_path)
image = cv.imread(test_pic)
objs = obj_model(image, 0.70, 0.5)
saved_img = obj_model.draw(image.copy(), objs)
cv.imwrite(f'./runs/detect/exp5/frame{count}.png', saved_img)
print(f'frame{count}.png saved')
count += 1
| [
"cv2.rectangle",
"cv2.imwrite",
"os.listdir",
"pathlib.Path",
"pathlib.Path.cwd",
"os.path.join",
"torch.from_numpy",
"numpy.ascontiguousarray",
"torch.cuda.is_available",
"cv2.VideoCapture",
"torch.no_grad",
"cv2.imread"
] | [((1387, 1402), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1400, 1402), False, 'import torch\n'), ((3308, 3341), 'cv2.VideoCapture', 'cv.VideoCapture', (['"""test_video.avi"""'], {}), "('test_video.avi')\n", (3323, 3341), True, 'import cv2 as cv\n'), ((3496, 3520), 'os.listdir', 'os.listdir', (['test_pic_dir'], {}), '(test_pic_dir)\n', (3506, 3520), False, 'import os\n'), ((329, 343), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (333, 343), False, 'from pathlib import Path\n'), ((517, 527), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (525, 527), False, 'from pathlib import Path\n'), ((1708, 1733), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {}), '(img)\n', (1728, 1733), True, 'import numpy as np\n'), ((3541, 3577), 'os.path.join', 'os.path.join', (['test_pic_dir', 'img_path'], {}), '(test_pic_dir, img_path)\n', (3553, 3577), False, 'import os\n'), ((3594, 3613), 'cv2.imread', 'cv.imread', (['test_pic'], {}), '(test_pic)\n', (3603, 3613), True, 'import cv2 as cv\n'), ((3720, 3781), 'cv2.imwrite', 'cv.imwrite', (['f"""./runs/detect/exp5/frame{count}.png"""', 'saved_img'], {}), "(f'./runs/detect/exp5/frame{count}.png', saved_img)\n", (3730, 3781), True, 'import cv2 as cv\n'), ((3037, 3092), 'cv2.rectangle', 'cv.rectangle', (['src', '(xmin, ymin)', '(xmax, ymax)', 'color', '(2)'], {}), '(src, (xmin, ymin), (xmax, ymax), color, 2)\n', (3049, 3092), True, 'import cv2 as cv\n'), ((773, 798), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (796, 798), False, 'import torch\n'), ((1748, 1769), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (1764, 1769), False, 'import torch\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.