code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# ---------------------------------------------------------#
# Utilities to handle dictionary
# ---------------------------------------------------------#
import copy
import numpy as np
def dict_np_to_dict_list(input_dict):
"""
Convert a dict of numpy array to a dict of list
"""
input_dict = copy.copy(input_dict)
if type(input_dict) is dict:
for name in list(input_dict.keys()):
input_dict.update({name: input_dict[name].tolist()})
return input_dict
else:
return input_dict.tolist()
def dict_list_to_dict_np(input_dict):
"""
Convert a dict of list to a dict of numpy array
"""
input_dict = copy.copy(input_dict)
if type(input_dict) is dict:
for name in list(input_dict.keys()):
input_dict.update({name: np.array(input_dict[name])})
return input_dict
else:
return np.array(input_dict)
def list_to_dict(names, arrs):
"""
Matching a list of array with names
"""
# TODO: need detailed test
if type(arrs) is list:
final_dict = {}
if len(names) == len(arrs):
for name, arr in zip(names, arrs):
final_dict.update({name: arr})
elif len(arrs):
for name in names:
final_dict.update({name: arrs[0]})
else:
raise IndexError(f"names has a length of {len(names)} but arrs has a length of {len(arrs)}")
return final_dict
elif type(arrs) is np.ndarray and len(names) == 1:
return {names[0]: arrs}
elif type(arrs) is np.ndarray and len(names) > 1:
final_dict = {}
for name in names:
final_dict.update({name: arrs})
return final_dict
else:
return arrs
def to_iterable(var):
"""
convert things to list
treat string as not iterable!
"""
try:
if type(var) is str:
raise Exception
iter(var)
except Exception:
return [var]
else:
return var
| [
"numpy.array",
"copy.copy"
] | [((311, 332), 'copy.copy', 'copy.copy', (['input_dict'], {}), '(input_dict)\n', (320, 332), False, 'import copy\n'), ((672, 693), 'copy.copy', 'copy.copy', (['input_dict'], {}), '(input_dict)\n', (681, 693), False, 'import copy\n'), ((889, 909), 'numpy.array', 'np.array', (['input_dict'], {}), '(input_dict)\n', (897, 909), True, 'import numpy as np\n'), ((809, 835), 'numpy.array', 'np.array', (['input_dict[name]'], {}), '(input_dict[name])\n', (817, 835), True, 'import numpy as np\n')] |
import numpy as np
from a_nice_mc.objectives.bayes_logistic_regression import BayesianLogisticRegression
class Heart(BayesianLogisticRegression):
def __init__(self, name='heart', batch_size=32):
data = np.load('data/heart/data.npy')
labels = np.load('data/heart/labels.npy')
# Normalize the f**king data!!!
dm = np.mean(data, axis=0)
ds = np.std(data, axis=0)
data = (data - dm) / ds
super(Heart, self).__init__(data, labels, batch_size=batch_size)
self.name = name
@staticmethod
def mean():
return np.array([
-0.13996868, 0.71390106, 0.69571619, 0.43944853, 0.36997702, -0.27319424,
0.31730518, -0.49617367, 0.40516419, 0.4312388, 0.26531786, 1.10337417,
0.70054367, -0.25684964
])
@staticmethod
def std():
return np.array([
0.22915648, 0.24545612, 0.20457998, 0.20270157, 0.21040644, 0.20094482,
0.19749419, 0.24134014, 0.20230987, 0.25595334, 0.23709087, 0.24735325,
0.20701178, 0.19771984
])
| [
"numpy.mean",
"numpy.array",
"numpy.load",
"numpy.std"
] | [((216, 246), 'numpy.load', 'np.load', (['"""data/heart/data.npy"""'], {}), "('data/heart/data.npy')\n", (223, 246), True, 'import numpy as np\n'), ((264, 296), 'numpy.load', 'np.load', (['"""data/heart/labels.npy"""'], {}), "('data/heart/labels.npy')\n", (271, 296), True, 'import numpy as np\n'), ((351, 372), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (358, 372), True, 'import numpy as np\n'), ((386, 406), 'numpy.std', 'np.std', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (392, 406), True, 'import numpy as np\n'), ((588, 778), 'numpy.array', 'np.array', (['[-0.13996868, 0.71390106, 0.69571619, 0.43944853, 0.36997702, -0.27319424, \n 0.31730518, -0.49617367, 0.40516419, 0.4312388, 0.26531786, 1.10337417,\n 0.70054367, -0.25684964]'], {}), '([-0.13996868, 0.71390106, 0.69571619, 0.43944853, 0.36997702, -\n 0.27319424, 0.31730518, -0.49617367, 0.40516419, 0.4312388, 0.26531786,\n 1.10337417, 0.70054367, -0.25684964])\n', (596, 778), True, 'import numpy as np\n'), ((872, 1059), 'numpy.array', 'np.array', (['[0.22915648, 0.24545612, 0.20457998, 0.20270157, 0.21040644, 0.20094482, \n 0.19749419, 0.24134014, 0.20230987, 0.25595334, 0.23709087, 0.24735325,\n 0.20701178, 0.19771984]'], {}), '([0.22915648, 0.24545612, 0.20457998, 0.20270157, 0.21040644, \n 0.20094482, 0.19749419, 0.24134014, 0.20230987, 0.25595334, 0.23709087,\n 0.24735325, 0.20701178, 0.19771984])\n', (880, 1059), True, 'import numpy as np\n')] |
'''
demo for single image
'''
import numpy as np
import cv2
import face_recognition
from face import Face
from utils import putText
from utils import preprocess_input
model = Face(train=False)
model.load_weights('./face_weights/face_weights.26-val_loss-3.85-val_age_loss-3.08-val_gender_loss-0.22-val_race_loss-0.55.utk.h5')
gender_labels = ['Male', 'Female']
race_labels = ['Whites', 'Blacks', 'Asian', 'Indian', 'Others']
#https://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Rothe_DEX_Deep_EXpectation_ICCV_2015_paper.pdf
age_labels = np.reshape(np.arange(1, 94), (93,1))
demo_image = cv2.imread('./demo_images/how-old-demo5.jpg')
image_h, image_w = demo_image.shape[0], demo_image.shape[1]
margin = 0.01
face_locations = face_recognition.face_locations(demo_image, model='hog')
if len(face_locations) > 0:
face_batch = np.empty((len(face_locations), 200, 200, 3))
# add face images into batch
for i,rect in enumerate(face_locations):
# crop with a margin
top, bottom, left, right = rect[0], rect[2], rect[3], rect[1]
top = max(int(top - image_h * margin), 0)
left = max(int(left - image_w * margin), 0)
bottom = min(int(bottom + image_h * margin), image_h - 1)
right = min(int(right + image_w * margin), image_w - 1)
face_img = demo_image[top:bottom, left:right, :]
face_img = cv2.resize(face_img, (200, 200))
face_batch[i, :, :, :] = face_img
face_batch = preprocess_input(face_batch)
preds = model.predict(face_batch)
preds_ages = preds[0]
preds_genders = preds[1]
preds_races = preds[2]
# dispaly on srceen
for rect, age, gender, race in zip(face_locations, preds_ages, preds_genders, preds_races):
cv2.rectangle(demo_image, (rect[3], rect[0]), (rect[1], rect[2]), (255, 0, 0), 2)
age = np.expand_dims(age, 0)
# https://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Rothe_DEX_Deep_EXpectation_ICCV_2015_paper.pdf
age_data = int(age.dot(age_labels).flatten())
gender_index = np.argmax(gender)
race_index = np.argmax(race)
demo_image = putText(demo_image, 'gender: {0}'.format(gender_labels[gender_index]), (255, 0, 0), (rect[3], rect[0]-16), size=15)
demo_image = putText(demo_image, 'race: {0}'.format(race_labels[race_index]), (255, 0, 0), (rect[3], rect[0]-32), size=15)
demo_image = putText(demo_image, 'age: {0}'.format(age_data), (255, 0, 0), (rect[3], rect[0]-48), size=15)
cv2.imshow('image', demo_image)
if cv2.waitKey(0) & 0xff == ord("q"):
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"face_recognition.face_locations",
"numpy.arange",
"face.Face",
"numpy.argmax",
"cv2.imshow",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.expand_dims",
"cv2.resize",
"cv2.imread",
"utils.preprocess_input"
] | [((179, 196), 'face.Face', 'Face', ([], {'train': '(False)'}), '(train=False)\n', (183, 196), False, 'from face import Face\n'), ((622, 667), 'cv2.imread', 'cv2.imread', (['"""./demo_images/how-old-demo5.jpg"""'], {}), "('./demo_images/how-old-demo5.jpg')\n", (632, 667), False, 'import cv2\n'), ((760, 816), 'face_recognition.face_locations', 'face_recognition.face_locations', (['demo_image'], {'model': '"""hog"""'}), "(demo_image, model='hog')\n", (791, 816), False, 'import face_recognition\n'), ((581, 597), 'numpy.arange', 'np.arange', (['(1)', '(94)'], {}), '(1, 94)\n', (590, 597), True, 'import numpy as np\n'), ((1492, 1520), 'utils.preprocess_input', 'preprocess_input', (['face_batch'], {}), '(face_batch)\n', (1508, 1520), False, 'from utils import preprocess_input\n'), ((2547, 2578), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'demo_image'], {}), "('image', demo_image)\n", (2557, 2578), False, 'import cv2\n'), ((1395, 1427), 'cv2.resize', 'cv2.resize', (['face_img', '(200, 200)'], {}), '(face_img, (200, 200))\n', (1405, 1427), False, 'import cv2\n'), ((1771, 1857), 'cv2.rectangle', 'cv2.rectangle', (['demo_image', '(rect[3], rect[0])', '(rect[1], rect[2])', '(255, 0, 0)', '(2)'], {}), '(demo_image, (rect[3], rect[0]), (rect[1], rect[2]), (255, 0, \n 0), 2)\n', (1784, 1857), False, 'import cv2\n'), ((1867, 1889), 'numpy.expand_dims', 'np.expand_dims', (['age', '(0)'], {}), '(age, 0)\n', (1881, 1889), True, 'import numpy as np\n'), ((2104, 2121), 'numpy.argmax', 'np.argmax', (['gender'], {}), '(gender)\n', (2113, 2121), True, 'import numpy as np\n'), ((2143, 2158), 'numpy.argmax', 'np.argmax', (['race'], {}), '(race)\n', (2152, 2158), True, 'import numpy as np\n'), ((2629, 2652), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2650, 2652), False, 'import cv2\n'), ((2586, 2600), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2597, 2600), False, 'import cv2\n')] |
import pickle
import time
from typing import Callable, Optional
import numpy as np
from active_reward_learning.common.policy import FixedPolicy
from active_reward_learning.envs import (
HighwayDriving,
RewardModelMeanWrapper,
RewardModelSampleWrapper,
)
from .base import BaseSolver
def get_features_from_policy(env, policy):
"""Represent policies with average feature vector.
This only makes sense for linear reward functions, but it is only used for the
HighwayDriving environment.
"""
assert isinstance(env.unwrapped, HighwayDriving)
assert isinstance(policy, FixedPolicy)
N = 10
features = np.zeros(env.Ndim_repr)
for i in range(N):
obs = env.reset()
done = False
while not done:
act = policy.get_action(obs)
obs, reward, done, info = env.step(act)
features += info["gp_repr"]
features /= N
return features
class LBFGSArgmaxSolver(BaseSolver):
def __init__(
self,
env,
solver_policies_file=None,
candidate_policies=None,
debug=False,
):
assert isinstance(env.unwrapped, HighwayDriving)
assert solver_policies_file is not None or candidate_policies is not None
if solver_policies_file is not None:
with open(solver_policies_file, "rb") as f:
self.candidate_policies = pickle.load(f)
elif candidate_policies is not None:
self.candidate_policies = candidate_policies
else:
return NotImplementedError()
self.features = []
for policy in self.candidate_policies:
features = get_features_from_policy(env, policy)
self.features.append(features)
assert len(self.candidate_policies) > 0
assert len(self.candidate_policies) == len(self.features)
self.debug = debug
super().__init__(env)
def solve(
self,
n_episodes: int = 1,
rewards: Optional[np.ndarray] = None,
logging_callback: Optional[Callable] = None,
):
t = time.time()
if isinstance(self.env, RewardModelMeanWrapper):
w = self.env.reward_model.gp_model.linear_predictive_mean
elif isinstance(self.env, RewardModelSampleWrapper):
w = self.env.theta
else:
w = self.env.reward_w
if n_episodes > 0:
best_policy = self.env.get_optimal_policy(w=w, restarts=n_episodes)
best_value = best_policy.evaluate(self.env, N=10, rollout=True)
else:
best_policy = None
best_value = -float("inf")
if self.debug:
print("best_value", best_value)
for policy, features in zip(self.candidate_policies, self.features):
value = np.dot(features, w)
if self.debug:
print("value", value)
if value > best_value:
best_policy, best_value = policy, value
if self.debug:
print("update, best_value", best_value)
self.policy = best_policy
assert self.policy is not None
self.solve_time += time.time() - t
return self.policy
| [
"numpy.dot",
"numpy.zeros",
"pickle.load",
"time.time"
] | [((645, 668), 'numpy.zeros', 'np.zeros', (['env.Ndim_repr'], {}), '(env.Ndim_repr)\n', (653, 668), True, 'import numpy as np\n'), ((2100, 2111), 'time.time', 'time.time', ([], {}), '()\n', (2109, 2111), False, 'import time\n'), ((2814, 2833), 'numpy.dot', 'np.dot', (['features', 'w'], {}), '(features, w)\n', (2820, 2833), True, 'import numpy as np\n'), ((3182, 3193), 'time.time', 'time.time', ([], {}), '()\n', (3191, 3193), False, 'import time\n'), ((1399, 1413), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1410, 1413), False, 'import pickle\n')] |
"""
Copyright 2020 Universitat Politècnica de Catalunya
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import tensorflow as tf
import random
import networkx as nx
from datanetAPI import DatanetAPI
import argparse
from networkx.readwrite import json_graph
import json
import tarfile
import os
import sys
def generator(data_dir):
tool = DatanetAPI(data_dir)
it = iter(tool)
for sample in it:
G_copy = sample.get_topology_object().copy()
T = sample.get_traffic_matrix()
R = sample.get_routing_matrix()
D = sample.get_performance_matrix()
HG = network_to_hypergraph(network_graph=G_copy,
routing_matrix=R,
traffic_matrix=T,
performance_matrix=D)
yield HG
def network_to_hypergraph(network_graph, routing_matrix, traffic_matrix, performance_matrix):
G = (nx.DiGraph(network_graph)).copy()
R = np.copy(routing_matrix)
T = np.copy(traffic_matrix)
P = np.copy(performance_matrix)
D_G = nx.DiGraph()
for src in range(G.number_of_nodes()):
for dst in range(G.number_of_nodes()):
if src != dst:
D_G.add_node('p_{}_{}'.format(src, dst),
entity='path',
traffic=T[src, dst]['Flows'][0]['AvgBw'],
delay=P[src, dst]['AggInfo']['AvgDelay'])
if G.has_edge(src, dst):
D_G.add_node('l_{}_{}'.format(src, dst),
entity='link',
capacity=int(G.edges[src, dst]['bandwidth']))
for h_1, h_2 in [R[src, dst][i:i + 2] for i in range(0, len(R[src, dst]) - 1)]:
D_G.add_edge('p_{}_{}'.format(src, dst), 'l_{}_{}'.format(h_1, h_2))
D_G.add_edge('l_{}_{}'.format(h_1, h_2), 'p_{}_{}'.format(src, dst))
D_G.remove_nodes_from([node for node, out_degree in D_G.out_degree() if out_degree == 0])
return D_G
def migrate_dataset(input_path, output_path, max_per_file, split):
gen = generator(input_path)
data = []
file_ctr_train = 0
file_ctr_eval = 0
topology = input_path.split('/')[3]
tmp_dir = output_path+"tmp/"
os.system("rm -rf %s" % (tmp_dir))
os.makedirs(tmp_dir)
counter = 0
while True:
try:
G = next(gen)
parser_graph = json_graph.node_link_data(G)
data.append(parser_graph)
if counter == max_per_file:
a = np.random.rand()
path = output_path
if a < split:
path += 'eval/'
with open(tmp_dir+'data.json', 'w') as json_file:
json.dump(data, json_file)
tar = tarfile.open(path + topology + "sample_" + str(file_ctr_eval) + ".tar.gz", "w:gz")
tar.add(tmp_dir+'data.json', arcname="data.json")
tar.close()
os.remove(tmp_dir+'data.json')
file_ctr_eval += 1
else:
path += 'train/'
with open(tmp_dir+'data.json', 'w') as json_file:
json.dump(data, json_file)
tar = tarfile.open(path + topology + "sample_" + str(file_ctr_train) + ".tar.gz", "w:gz")
tar.add(tmp_dir+'data.json', arcname="data.json")
tar.close()
os.remove(tmp_dir+'data.json')
file_ctr_train += 1
data = []
counter = 0
else:
counter +=1
# when finished, save all the remaining ones in the last file
except:
a = np.random.rand()
path = output_path
if a < split:
path += 'eval/'
with open(tmp_dir+'data.json', 'w') as json_file:
json.dump(data, json_file)
tar = tarfile.open(path + topology + "sample_" + str(file_ctr_eval) + ".tar.gz", "w:gz")
tar.add(tmp_dir+'data.json', arcname="data.json")
tar.close()
os.remove(tmp_dir+'data.json')
else:
path += 'train/'
with open(tmp_dir+'data.json', 'w') as json_file:
json.dump(data, json_file)
tar = tarfile.open(path + topology + "sample_" + str(file_ctr_train) + ".tar.gz", "w:gz")
tar.add(tmp_dir+'data.json', arcname="data.json")
tar.close()
os.remove(tmp_dir+'data.json')
os.system("rm -rf %s" % (tmp_dir))
break
if __name__ == "__main__":
# python migrate.py -d ../../../nsfnetbw/ -o ./datansfnet/ -n 100 -s 0.8
# Parse logs and get best model
parser = argparse.ArgumentParser(description='Parse file and create plots')
parser.add_argument('-d', help='Origin data directory', type=str, required=True, nargs='+')
parser.add_argument('-o', help='Output data directory', type=str, required=True, nargs='+')
parser.add_argument('-n', help='Number of samples per file', type=int, required=True, nargs='+')
parser.add_argument('-s', help='Percentage split of files used for TRAINING. 1-percentage will be added to EVALUATION set.', type=float, required=True, nargs='+')
args = parser.parse_args()
origin_dir = args.d[0]
output_dir = args.o[0]
split = 1-float(args.s[0])
num_samples_file = args.n[0]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if os.path.exists(output_dir+'/eval'):
os.system("rm -rf %s" % (output_dir))
if os.path.exists(output_dir+'/train'):
os.system("rm -rf %s" % (output_dir))
os.makedirs(output_dir+'/eval')
os.makedirs(output_dir+'/train')
migrate_dataset(origin_dir, output_dir, num_samples_file, split) | [
"numpy.copy",
"os.path.exists",
"numpy.random.rand",
"os.makedirs",
"datanetAPI.DatanetAPI",
"argparse.ArgumentParser",
"networkx.DiGraph",
"networkx.readwrite.json_graph.node_link_data",
"os.system",
"json.dump",
"os.remove"
] | [((873, 893), 'datanetAPI.DatanetAPI', 'DatanetAPI', (['data_dir'], {}), '(data_dir)\n', (883, 893), False, 'from datanetAPI import DatanetAPI\n'), ((1498, 1521), 'numpy.copy', 'np.copy', (['routing_matrix'], {}), '(routing_matrix)\n', (1505, 1521), True, 'import numpy as np\n'), ((1530, 1553), 'numpy.copy', 'np.copy', (['traffic_matrix'], {}), '(traffic_matrix)\n', (1537, 1553), True, 'import numpy as np\n'), ((1562, 1589), 'numpy.copy', 'np.copy', (['performance_matrix'], {}), '(performance_matrix)\n', (1569, 1589), True, 'import numpy as np\n'), ((1601, 1613), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1611, 1613), True, 'import networkx as nx\n'), ((2828, 2860), 'os.system', 'os.system', (["('rm -rf %s' % tmp_dir)"], {}), "('rm -rf %s' % tmp_dir)\n", (2837, 2860), False, 'import os\n'), ((2867, 2887), 'os.makedirs', 'os.makedirs', (['tmp_dir'], {}), '(tmp_dir)\n', (2878, 2887), False, 'import os\n'), ((5442, 5508), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse file and create plots"""'}), "(description='Parse file and create plots')\n", (5465, 5508), False, 'import argparse\n'), ((6204, 6240), 'os.path.exists', 'os.path.exists', (["(output_dir + '/eval')"], {}), "(output_dir + '/eval')\n", (6218, 6240), False, 'import os\n'), ((6298, 6335), 'os.path.exists', 'os.path.exists', (["(output_dir + '/train')"], {}), "(output_dir + '/train')\n", (6312, 6335), False, 'import os\n'), ((6386, 6419), 'os.makedirs', 'os.makedirs', (["(output_dir + '/eval')"], {}), "(output_dir + '/eval')\n", (6397, 6419), False, 'import os\n'), ((6422, 6456), 'os.makedirs', 'os.makedirs', (["(output_dir + '/train')"], {}), "(output_dir + '/train')\n", (6433, 6456), False, 'import os\n'), ((6132, 6158), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (6146, 6158), False, 'import os\n'), ((6168, 6191), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (6179, 6191), False, 'import os\n'), ((6248, 6283), 'os.system', 'os.system', (["('rm -rf %s' % output_dir)"], {}), "('rm -rf %s' % output_dir)\n", (6257, 6283), False, 'import os\n'), ((6343, 6378), 'os.system', 'os.system', (["('rm -rf %s' % output_dir)"], {}), "('rm -rf %s' % output_dir)\n", (6352, 6378), False, 'import os\n'), ((1456, 1481), 'networkx.DiGraph', 'nx.DiGraph', (['network_graph'], {}), '(network_graph)\n', (1466, 1481), True, 'import networkx as nx\n'), ((2987, 3015), 'networkx.readwrite.json_graph.node_link_data', 'json_graph.node_link_data', (['G'], {}), '(G)\n', (3012, 3015), False, 'from networkx.readwrite import json_graph\n'), ((3114, 3130), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3128, 3130), True, 'import numpy as np\n'), ((4345, 4361), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4359, 4361), True, 'import numpy as np\n'), ((5235, 5267), 'os.system', 'os.system', (["('rm -rf %s' % tmp_dir)"], {}), "('rm -rf %s' % tmp_dir)\n", (5244, 5267), False, 'import os\n'), ((3586, 3618), 'os.remove', 'os.remove', (["(tmp_dir + 'data.json')"], {}), "(tmp_dir + 'data.json')\n", (3595, 3618), False, 'import os\n'), ((4069, 4101), 'os.remove', 'os.remove', (["(tmp_dir + 'data.json')"], {}), "(tmp_dir + 'data.json')\n", (4078, 4101), False, 'import os\n'), ((4780, 4812), 'os.remove', 'os.remove', (["(tmp_dir + 'data.json')"], {}), "(tmp_dir + 'data.json')\n", (4789, 4812), False, 'import os\n'), ((5192, 5224), 'os.remove', 'os.remove', (["(tmp_dir + 'data.json')"], {}), "(tmp_dir + 'data.json')\n", (5201, 5224), False, 'import os\n'), ((3327, 3353), 'json.dump', 'json.dump', (['data', 'json_file'], {}), '(data, json_file)\n', (3336, 3353), False, 'import json\n'), ((3809, 3835), 'json.dump', 'json.dump', (['data', 'json_file'], {}), '(data, json_file)\n', (3818, 3835), False, 'import json\n'), ((4537, 4563), 'json.dump', 'json.dump', (['data', 'json_file'], {}), '(data, json_file)\n', (4546, 4563), False, 'import json\n'), ((4948, 4974), 'json.dump', 'json.dump', (['data', 'json_file'], {}), '(data, json_file)\n', (4957, 4974), False, 'import json\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 14:43:48 2020
@author: arnou
"""
import os
import errno
import numpy as np
import librosa
import pandas as pd
import more_itertools
from sklearn.preprocessing import StandardScaler
def segment_audio(signal, fs, win_size, win_step):
print('segment')
win_data = list(more_itertools.windowed(signal, n=win_size, step=win_step))
return(win_data)
def obtain_win_label_single(seg_label):
seg_win_label= np.zeros((len(seg_label), 1))
for iSeg in range(len(seg_label)):
win_label_value = np.asarray(seg_label[iSeg])
win_label_value[win_label_value == None] = 0
print(win_label_value)
if np.sum(win_label_value) / len(win_label_value) >= 0.5:
seg_win_label[iSeg] = 1
return(seg_win_label)
def obtain_win_label(seg_label):
print('windowed label array to lable value')
seg_win_label_exist = np.zeros((len(seg_label), 1))
seg_win_label_strong = np.zeros((len(seg_label), 1))
seg_win_label_mid = np.zeros((len(seg_label), 1))
seg_win_label_weak = np.zeros((len(seg_label), 1))
for iSeg in range(len(seg_label)):
win_label_value = np.asarray(seg_label[iSeg])
win_label_value[win_label_value == None] = 0
#print(win_label_value)
if np.sum(win_label_value) > 0:
seg_win_label_exist[iSeg] = 1
if np.sum(win_label_value) / len(win_label_value) == 1:
seg_win_label_strong[iSeg] = 1
if np.sum(win_label_value) / len(win_label_value) >= 0.75:
seg_win_label_mid[iSeg] = 1
if np.sum(win_label_value) / len(win_label_value) >= 0.5:
seg_win_label_weak[iSeg] = 1
# combine labels
seg_win_label_all = np.concatenate((seg_win_label_exist, seg_win_label_strong,
seg_win_label_mid, seg_win_label_weak), axis=1)
return(seg_win_label_all)
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def signal_1d_to_2d(feat_1d, fs):
winsize, winstep = 0.02, 0.01 # second
feat_2d = []
n_sample, n_feat = feat_1d.shape
for i_sample in range(n_sample):
row_signal = feat_1d[i_sample,:]
row_signal = np.asfortranarray(row_signal)
S = librosa.feature.melspectrogram(y=row_signal, sr=fs, n_mels=120,
hop_length=int(winstep*fs), n_fft=int(winsize*fs),
fmax=fs/2)
feat_2d.append(np.array(S).ravel())
row, col = S.shape
return feat_2d, row, col
def reshape_data_3d(feat, time_sequence, row, col):
n_samples, n_data = feat.shape
feat_final = []
for i_samples in range(n_samples):
temp_vector = feat[i_samples,:]
temp_img = temp_vector.reshape(time_sequence, row, col)
feat_final.append(temp_img[:,:,:,np.newaxis])
feat_final = np.array(feat_final)
return feat_final
def reshape_data(feat, row, col):
n_samples, n_data = feat.shape
feat_final = []
for i_samples in range(n_samples):
temp_vector = feat[i_samples,:]
temp_img = temp_vector.reshape(row, col)
feat_final.append(temp_img[:,:,np.newaxis])
feat_final = np.array(feat_final)
return feat_final
def reshape_data_block(feat):
n_samples, n_data = feat.shape
feat_final = []
for i_samples in range(n_samples):
temp_vector = feat[i_samples,:]
temp_vector_block = np.split(temp_vector, 7)
mat_block = []
n_block = len(temp_vector_block)
for i_block in range(n_block):
mat_block.append(temp_vector_block[i_block].reshape(17, 59))
temp_img = np.vstack(mat_block)
feat_final.append(temp_img[:,:,np.newaxis])
feat_final = np.array(feat_final)
return feat_final
def read_data(dataPath):
data = pd.read_csv(dataPath, header=None)
#data = dd.read_csv(dataPath, header=None)
data = data.values
feat = data[:,0:-1]
label = data[:,-1]
return feat,label
def normalize_data(feat):
res = StandardScaler().fit_transform(feat)
return res
def fast_zscore(my_data):
nCol = my_data.shape[1]
data_col_list = []
for iCol in range(nCol):
print(iCol)
data_col = my_data[:,iCol]
data_col_zscore = (data_col - data_col.mean())/data_col.std()
data_col_list.append(data_col_zscore)
return(data_col_list)
def fast_min_max(my_data):
nCol = my_data.shape[1]
data_col_list = []
for iCol in range(nCol):
print(iCol)
data_col = my_data[:,iCol]
data_col_zscore = (data_col - np.min(data_col)) / (np.max(data_col) - np.min(data_col))
data_col_list.append(data_col_zscore)
return(data_col_list)
| [
"pandas.read_csv",
"os.makedirs",
"numpy.asarray",
"numpy.min",
"numpy.max",
"numpy.asfortranarray",
"numpy.array",
"numpy.split",
"numpy.sum",
"sklearn.preprocessing.StandardScaler",
"numpy.vstack",
"numpy.concatenate",
"more_itertools.windowed"
] | [((1811, 1921), 'numpy.concatenate', 'np.concatenate', (['(seg_win_label_exist, seg_win_label_strong, seg_win_label_mid,\n seg_win_label_weak)'], {'axis': '(1)'}), '((seg_win_label_exist, seg_win_label_strong,\n seg_win_label_mid, seg_win_label_weak), axis=1)\n', (1825, 1921), True, 'import numpy as np\n'), ((3175, 3195), 'numpy.array', 'np.array', (['feat_final'], {}), '(feat_final)\n', (3183, 3195), True, 'import numpy as np\n'), ((3540, 3560), 'numpy.array', 'np.array', (['feat_final'], {}), '(feat_final)\n', (3548, 3560), True, 'import numpy as np\n'), ((4194, 4214), 'numpy.array', 'np.array', (['feat_final'], {}), '(feat_final)\n', (4202, 4214), True, 'import numpy as np\n'), ((4287, 4321), 'pandas.read_csv', 'pd.read_csv', (['dataPath'], {'header': 'None'}), '(dataPath, header=None)\n', (4298, 4321), True, 'import pandas as pd\n'), ((333, 391), 'more_itertools.windowed', 'more_itertools.windowed', (['signal'], {'n': 'win_size', 'step': 'win_step'}), '(signal, n=win_size, step=win_step)\n', (356, 391), False, 'import more_itertools\n'), ((575, 602), 'numpy.asarray', 'np.asarray', (['seg_label[iSeg]'], {}), '(seg_label[iSeg])\n', (585, 602), True, 'import numpy as np\n'), ((1212, 1239), 'numpy.asarray', 'np.asarray', (['seg_label[iSeg]'], {}), '(seg_label[iSeg])\n', (1222, 1239), True, 'import numpy as np\n'), ((2045, 2062), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2056, 2062), False, 'import os\n'), ((2458, 2487), 'numpy.asfortranarray', 'np.asfortranarray', (['row_signal'], {}), '(row_signal)\n', (2475, 2487), True, 'import numpy as np\n'), ((3842, 3866), 'numpy.split', 'np.split', (['temp_vector', '(7)'], {}), '(temp_vector, 7)\n', (3850, 3866), True, 'import numpy as np\n'), ((4087, 4107), 'numpy.vstack', 'np.vstack', (['mat_block'], {}), '(mat_block)\n', (4096, 4107), True, 'import numpy as np\n'), ((1359, 1382), 'numpy.sum', 'np.sum', (['win_label_value'], {}), '(win_label_value)\n', (1365, 1382), True, 'import numpy as np\n'), ((4517, 4533), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4531, 4533), False, 'from sklearn.preprocessing import StandardScaler\n'), ((708, 731), 'numpy.sum', 'np.sum', (['win_label_value'], {}), '(win_label_value)\n', (714, 731), True, 'import numpy as np\n'), ((1441, 1464), 'numpy.sum', 'np.sum', (['win_label_value'], {}), '(win_label_value)\n', (1447, 1464), True, 'import numpy as np\n'), ((1548, 1571), 'numpy.sum', 'np.sum', (['win_label_value'], {}), '(win_label_value)\n', (1554, 1571), True, 'import numpy as np\n'), ((1655, 1678), 'numpy.sum', 'np.sum', (['win_label_value'], {}), '(win_label_value)\n', (1661, 1678), True, 'import numpy as np\n'), ((5099, 5115), 'numpy.min', 'np.min', (['data_col'], {}), '(data_col)\n', (5105, 5115), True, 'import numpy as np\n'), ((5120, 5136), 'numpy.max', 'np.max', (['data_col'], {}), '(data_col)\n', (5126, 5136), True, 'import numpy as np\n'), ((5139, 5155), 'numpy.min', 'np.min', (['data_col'], {}), '(data_col)\n', (5145, 5155), True, 'import numpy as np\n'), ((2726, 2737), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (2734, 2737), True, 'import numpy as np\n')] |
import logging
from d4rl.pointmaze import waypoint_controller
from d4rl.pointmaze import maze_model, maze_layouts
import numpy as np
import pickle
import gzip
import h5py
import argparse
import os
import tqdm
START_POS = np.array([10., 24.])
TARGET_POS = np.array([18., 8.])
def reset_data():
return {'states': [],
'actions': [],
'images': [],
'terminals': [],
'infos/goal': [],
'infos/qpos': [],
'infos/qvel': [],
}
def append_data(data, s, a, img, tgt, done, env_data):
data['states'].append(s)
data['actions'].append(a)
data['images'].append(img)
data['terminals'].append(done)
data['infos/goal'].append(tgt)
data['infos/qpos'].append(env_data.qpos.ravel().copy())
data['infos/qvel'].append(env_data.qvel.ravel().copy())
def npify(data):
for k in data:
if k == 'terminals':
dtype = np.bool_
else:
dtype = np.float32
data[k] = np.array(data[k], dtype=dtype)
def sample_env_and_controller(args):
layout_str = maze_layouts.rand_layout(seed=0, size=40)
env = maze_model.MazeEnv(layout_str, agent_centric_view=args.agent_centric)
controller = waypoint_controller.WaypointController(layout_str)
return env, controller
def reset_env(env, agent_centric=False):
s = env.reset()
env.set_target(TARGET_POS)
s = env.reset_to_location(START_POS)
if agent_centric:
[env.render(mode='rgb_array') for _ in range(100)] # so that camera can catch up with agent
return s
def save_video(file_name, frames, fps=20, video_format='mp4'):
import skvideo.io
skvideo.io.vwrite(
file_name,
frames,
inputdict={
'-r': str(int(fps)),
},
outputdict={
'-f': video_format,
'-pix_fmt': 'yuv420p', # '-pix_fmt=yuv420p' needed for osx https://github.com/scikit-video/scikit-video/issues/74
}
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--render', action='store_true', help='Render trajectories')
parser.add_argument('--noisy', action='store_true', help='Noisy actions')
parser.add_argument('--agent_centric', action='store_true', help='Whether agent-centric images are rendered.')
parser.add_argument('--save_images', action='store_true', help='Whether rendered images are saved.')
parser.add_argument('--data_dir', type=str, default='.', help='Base directory for dataset')
parser.add_argument('--num_samples', type=int, default=int(2e5), help='Num samples to collect')
parser.add_argument('--min_traj_len', type=int, default=int(20), help='Min number of samples per trajectory')
parser.add_argument('--rand_maze_size', type=int, default=int(20), help='Size of generate maze')
parser.add_argument('--batch_idx', type=int, default=int(-1), help='(Optional) Index of generated data batch')
args = parser.parse_args()
if args.agent_centric and not args.save_images:
raise ValueError("Need to save images for agent-centric dataset")
max_episode_steps = 1600 # if not args.save_images else 500
env, controller = sample_env_and_controller(args)
s = reset_env(env, agent_centric=args.agent_centric)
data = reset_data()
ts, cnt = 0, 0
for tt in tqdm.tqdm(range(args.num_samples)):
position = s[0:2]
velocity = s[2:4]
try:
act, done = controller.get_action(position, velocity, env._target)
except ValueError:
# failed to find valid path to goal
print("FAIL")
data = reset_data()
env, controller = sample_env_and_controller(args)
s = reset_env(env, agent_centric=args.agent_centric)
ts = 0
continue
print(act)
if args.noisy:
act = act + np.random.randn(*act.shape)*0.5
act = np.clip(act, -1.0, 1.0)
if ts >= max_episode_steps:
done = True
append_data(data, s, act, env.render(mode='rgb_array'), #, camera_name='birdview'),
env._target, done, env.sim.data)
ns, _, _, _ = env.step(act)
ts += 1
if done:
if len(data['actions']) > args.min_traj_len:
save_data(args, data, cnt)
print("Saved Demonstration. Exiting...")
exit(0)
data = reset_data()
env, controller = sample_env_and_controller(args)
s = reset_env(env, agent_centric=args.agent_centric)
ts = 0
else:
s = ns
if args.render:
env.render(mode='human')
def save_data(args, data, idx):
# save_video("seq_{}_ac.mp4".format(idx), data['images'])
dir_name = ''
if args.batch_idx >= 0:
dir_name = os.path.join(dir_name, "batch_{}".format(args.batch_idx))
os.makedirs(os.path.join(args.data_dir, dir_name), exist_ok=True)
file_name = os.path.join(args.data_dir, dir_name, "rollout_{}.h5".format(idx))
# save rollout to file
f = h5py.File(file_name, "w")
f.create_dataset("traj_per_file", data=1)
# store trajectory info in traj0 group
npify(data)
traj_data = f.create_group("traj0")
traj_data.create_dataset("states", data=data['states'])
if args.save_images:
traj_data.create_dataset("images", data=data['images'], dtype=np.uint8)
else:
traj_data.create_dataset("images", data=np.zeros((data['states'].shape[0], 2, 2, 3), dtype=np.uint8))
traj_data.create_dataset("actions", data=data['actions'])
terminals = data['terminals']
if np.sum(terminals) == 0:
terminals[-1] = True
# build pad-mask that indicates how long sequence is
is_terminal_idxs = np.nonzero(terminals)[0]
pad_mask = np.zeros((len(terminals),))
pad_mask[:is_terminal_idxs[0]] = 1.
traj_data.create_dataset("pad_mask", data=pad_mask)
f.close()
if __name__ == "__main__":
main()
| [
"numpy.clip",
"argparse.ArgumentParser",
"os.path.join",
"h5py.File",
"d4rl.pointmaze.maze_layouts.rand_layout",
"numpy.array",
"numpy.sum",
"numpy.zeros",
"numpy.nonzero",
"d4rl.pointmaze.maze_model.MazeEnv",
"numpy.random.randn",
"d4rl.pointmaze.waypoint_controller.WaypointController"
] | [((223, 245), 'numpy.array', 'np.array', (['[10.0, 24.0]'], {}), '([10.0, 24.0])\n', (231, 245), True, 'import numpy as np\n'), ((257, 278), 'numpy.array', 'np.array', (['[18.0, 8.0]'], {}), '([18.0, 8.0])\n', (265, 278), True, 'import numpy as np\n'), ((1091, 1132), 'd4rl.pointmaze.maze_layouts.rand_layout', 'maze_layouts.rand_layout', ([], {'seed': '(0)', 'size': '(40)'}), '(seed=0, size=40)\n', (1115, 1132), False, 'from d4rl.pointmaze import maze_model, maze_layouts\n'), ((1143, 1212), 'd4rl.pointmaze.maze_model.MazeEnv', 'maze_model.MazeEnv', (['layout_str'], {'agent_centric_view': 'args.agent_centric'}), '(layout_str, agent_centric_view=args.agent_centric)\n', (1161, 1212), False, 'from d4rl.pointmaze import maze_model, maze_layouts\n'), ((1230, 1280), 'd4rl.pointmaze.waypoint_controller.WaypointController', 'waypoint_controller.WaypointController', (['layout_str'], {}), '(layout_str)\n', (1268, 1280), False, 'from d4rl.pointmaze import waypoint_controller\n'), ((2012, 2037), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2035, 2037), False, 'import argparse\n'), ((5097, 5122), 'h5py.File', 'h5py.File', (['file_name', '"""w"""'], {}), "(file_name, 'w')\n", (5106, 5122), False, 'import h5py\n'), ((1004, 1034), 'numpy.array', 'np.array', (['data[k]'], {'dtype': 'dtype'}), '(data[k], dtype=dtype)\n', (1012, 1034), True, 'import numpy as np\n'), ((3935, 3958), 'numpy.clip', 'np.clip', (['act', '(-1.0)', '(1.0)'], {}), '(act, -1.0, 1.0)\n', (3942, 3958), True, 'import numpy as np\n'), ((4924, 4961), 'os.path.join', 'os.path.join', (['args.data_dir', 'dir_name'], {}), '(args.data_dir, dir_name)\n', (4936, 4961), False, 'import os\n'), ((5658, 5675), 'numpy.sum', 'np.sum', (['terminals'], {}), '(terminals)\n', (5664, 5675), True, 'import numpy as np\n'), ((5792, 5813), 'numpy.nonzero', 'np.nonzero', (['terminals'], {}), '(terminals)\n', (5802, 5813), True, 'import numpy as np\n'), ((5492, 5552), 'numpy.zeros', 'np.zeros', (["(data['states'].shape[0], 2, 2, 3)"], {'dtype': 'np.uint8'}), "((data['states'].shape[0], 2, 2, 3), dtype=np.uint8)\n", (5500, 5552), True, 'import numpy as np\n'), ((3888, 3915), 'numpy.random.randn', 'np.random.randn', (['*act.shape'], {}), '(*act.shape)\n', (3903, 3915), True, 'import numpy as np\n')] |
import numpy as np
from datetime import timedelta
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.colors as colors
from pyadlml.dataset import DEVICE
from pyadlml.dataset.stats.devices import duration_correlation, \
trigger_time_diff, device_tcorr, device_triggers_one_day, \
devices_trigger_count, devices_on_off_stats
from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time,\
heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, \
_num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, \
_num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2
from pyadlml.dataset.devices import _is_dev_rep2, device_rep1_2_rep2
from pyadlml.util import get_sequential_color, get_secondary_color, get_primary_color, get_diverging_color
def hist_trigger_time_diff(df_devs=None, x=None, n_bins=50, figsize=(10, 6), color=None, file_path=None):
"""
Plot a histogram of the differences between succeeding device triggers.
Parameters
----------
df_devs : pd.DataFrame, optional
Recorded devices from a dataset. Fore more information refer to the
:ref:`user guide<device_dataframe>`.
x : ndarray, optional
Array of time deltas used to plot the histogram
n_bins : int, default=50
the number of bins for the histogram.
color : str, optional
sets the color of the plot. When not set, the primary theming color is used.
Learn more about theming in the :ref:`user guide <theming>`
figsize : (float, float), default: None
width, height in inches. If not provided, the figsize is inferred by automatically.
file_path : str, optional
If set, saves the plot under the given file path and return *None* instead
of returning the figure.
Examples
--------
>>> from pyadlml.plot import plot_trigger_time_dev
>>> plot_trigger_time_dev_todo(data.df_devs)
.. image:: ../_static/images/plots/dev_hist_trigger_td.png
:height: 300px
:width: 500 px
:scale: 100 %
:alt: alternate text
:align: center
Returns
-------
res : fig or None
Either a figure if file_path is not specified or nothing.
"""
assert not (df_devs is None and x is None)
title='Time difference between succeeding device'
log_sec_col = 'total_log_secs'
sec_col = 'total_secs'
ylabel='count'
ax2label = 'cummulative percentage'
ax1label = 'timedeltas count '
xlabel = 'log seconds'
color = (get_primary_color() if color is None else color)
color2 = get_secondary_color()
if x is None:
X = trigger_time_diff(df_devs.copy())
else:
X = x
# make equal bin size from max to min
bins = np.logspace(min(np.log10(X)), max(np.log10(X)), n_bins)
# make data ready for hist
hist, _ = np.histogram(X, bins=bins)
cum_percentage = hist.cumsum()/hist.sum()
cum_percentage = np.concatenate(([0], cum_percentage)) # let the array start with 0
# plots
fig,ax = plt.subplots(figsize=figsize)
plt.xscale('log')
ax.hist(X, bins=bins, label=ax1label, color=color)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
# create axis for line
ax2=ax.twinx()
ax2.plot(bins, cum_percentage, 'r', label=ax2label, color=color2)
ax2.set_ylabel('%')
ax2.set_xscale('log')
ax_top = ax.secondary_xaxis('top', functions=(lambda x: x, lambda x: x))
ax_top.xaxis.set_major_formatter(
ticker.FuncFormatter(func_formatter_seconds2time))
# plot single legend for multiple axis
h1, l1 = ax.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
ax.legend(h1+h2, l1+l2, loc='center right')
plt.title(title, y=1.08)
if file_path is not None:
savefig(fig, file_path)
return
else:
return fig
def boxplot_on_duration(df_devs, lst_devs=None, order='mean', figsize=None, file_path=None):
"""
generates a boxplot for all devices.
Parameters
----------
df_devs : pd.DataFrame, optional
recorded devices from a dataset. Fore more information refer to the
:ref:`user guide<device_dataframe>`.
lst_devs : lst of str, optional
A list of devices that are included in the statistic. The list can be a
subset of the recorded activities or contain activities that are not recorded.
figsize : (float, float), default: None
width, height in inches. If not provided, the figsize is inferred by automatically.
order : {'mean', 'alphabetically', 'room'}, default='mean'
determines the order in which the devices are listed.
file_path : str, optional
If set, saves the plot under the given file path and return *None* instead
of returning the figure.
Examples
--------
>>> from pyadlml.plots import plot_device_bp_on_duration
>>> plot_device_bp_on_duration(data.df_devs)
.. image:: ../_static/images/plots/dev_bp_dur.png
:height: 300px
:width: 500 px
:scale: 90 %
:alt: alternate text
:align: center
Returns
-------
res : fig or None
Either a figure if file_path is not specified or nothing.
"""
title = 'Devices on-duration'
xlabel = 'log seconds'
xlabel_top = 'time'
from pyadlml.dataset.stats.devices import devices_td_on
df_devs = devices_td_on(df_devs)
# select data for each device
devices = list(df_devs[DEVICE].unique())
devices.sort(reverse=True)
dat = []
for device in devices:
df_device = df_devs[df_devs[DEVICE] == device]
tmp = df_device['td'].dt.total_seconds()
dat.append(tmp)
if lst_devs is not None:
nan_devs = list(set(lst_devs).difference(set(list(devices))))
nan_devs.sort(reverse=True)
for dev in nan_devs:
dat.append([])
devices = devices + nan_devs
num_dev = len(devices)
figsize = (_num_boxes_2_figsize(num_dev) if figsize is None else figsize)
# plotting
fig, ax = plt.subplots(figsize=figsize)
ax.boxplot(dat, vert=False)
ax.set_title(title)
ax.set_yticklabels(devices, ha='right')
ax.set_xlabel(xlabel)
ax.set_xscale('log')
# create secondary axis with time format 1s, 1m, 1d
ax_top = ax.secondary_xaxis('top', functions=(lambda x: x, lambda x: x))
ax_top.set_xlabel(xlabel_top)
ax_top.xaxis.set_major_formatter(
ticker.FuncFormatter(func_formatter_seconds2time))
# save or return figure
if file_path is not None:
savefig(fig, file_path)
return
else:
return fig
def heatmap_trigger_one_day(df_devs=None, lst_devs=None, df_tod=None, t_res='1h',
figsize=None, cmap=None, file_path=None):
"""
Plots the heatmap for one day where all the device triggers are shown
Parameters
----------
df_devs : pd.DataFrame, optional
recorded devices from a dataset. Fore more information refer to the
:ref:`user guide<device_dataframe>`.
lst_devs : lst of str, optional
A list of devices that are included in the statistic. The list can be a
subset of the recorded activities or contain activities that are not recorded.
df_tod : pd.DataFrame
A precomputed transition table. If the *df_trans* parameter is given, parameters
*df_acts* and *lst_acts* are ignored. The transition table can be computed
in :ref:`stats <stats_acts_trans>`.
t_res : str of {'[1-12]h', default='1h'
the resolution, time_bins in hours. The number are
figsize : (float, float), default: None
width, height in inches. If not provided, the figsize is inferred by automatically.
cmap : str or Colormap, optional
The Colormap instance or registered colormap name used to map scalar
data to colors. This parameter is ignored for RGB(A) data.
Defaults 'viridis'.
file_path : str, optional
If set, saves the plot under the given file path and return *None* instead
of returning the figure.
Examples
--------
>>> from pyadlml.plots import plot_device_hm_time_trigger
>>> plot_device_hm_time_trigger(data.df_devs, t_res='1h')
.. image:: ../_static/images/plots/dev_hm_trigger_one_day.png
:height: 300px
:width: 500 px
:scale: 100 %
:alt: alternate text
:align: center
Returns
-------
res : fig or None
Either a figure if file_path is not specified or nothing.
"""
assert not (df_devs is None and df_tod is None)
title = "Device triggers cummulative over one day"
xlabel = 'time'
df = (device_triggers_one_day(df_devs.copy(), lst_devs=lst_devs, t_res=t_res) if df_tod is None else df_tod)
num_dev = len(list(df.columns))
figsize = (_num_items_2_heatmap_one_day_figsize(num_dev) if figsize is None else figsize)
cmap = (get_sequential_color() if cmap is None else cmap)
x_labels = list(df.index)
y_labels = df.columns
dat = df.values.T
# begin plotting
fig, ax = plt.subplots(figsize=figsize)
im, cbar = heatmap(dat, y_labels, x_labels, ax=ax, cmap=cmap, cbarlabel='counts')
ax.set_title(title)
ax.set_xlabel(xlabel)
# format the x-axis
def func(x,p):
if True:
if int(x/k) < 10:
return '0{}:00'.format(int(x/k)+1)
else:
return '{}:00'.format(int(x/k)+1)
# calculate the tick positions
a,b = ax.get_xlim()
k = (b-a)/24
tcks_pos = np.arange(0,23)*k + (-0.5 + k)
x_locator = ticker.FixedLocator(tcks_pos)
ax.xaxis.set_major_formatter(ticker.FuncFormatter(func))
ax.xaxis.set_major_locator(x_locator)
ax.set_aspect(aspect='auto')
if file_path is not None:
savefig(fig, file_path)
return
else:
return fig
def heatmap_trigger_time(df_devs=None, lst_devs=None, df_tcorr=None, t_window='5s', figsize=None,
z_scale="linear", cmap=None, numbers=None, file_path=None):
"""
Plot todo
Parameters
----------
df_devs : pd.DataFrame, optional
recorded devices from a dataset. Fore more information refer to the
:ref:`user guide<device_dataframe>`.
lst_devs : lst of str, optional
A list of devices that are included in the statistic. The list can be a
subset of the recorded activities or contain activities that are not recorded.
df_tcorr : pd.DataFrame
A precomputed correlation table. If the *df_tcorr* parameter is given, parameters
*df_devs* and *lst_devs* are ignored. The transition table can be computed
in :ref:`stats <stats_devs_tcorr>`.
t_window : str of {'[1-12]h', default='1h'
the resolution, time_bins in hours. The number are
figsize : (float, float), default: None
width, height in inches. If not provided, the figsize is inferred by automatically.
z_scale : {"log", "linear"}, default: None
The axis scale type to apply.
numbers : bool, default: True
Whether to display numbers inside the heatmaps fields or not.
cmap : str or Colormap, optional
The Colormap instance or registered colormap name used to map scalar
data to colors. This parameter is ignored for RGB(A) data.
Defaults 'viridis'.
file_path : str, optional
If set, saves the plot under the given file path and return *None* instead
of returning the figure.
Examples
--------
>>> from pyadlml.plots import plot_device_hm_time_trigger
>>> plot_device_hm_time_trigger(data.df_devs, t_res='1h')
.. image:: ../_static/images/plots/dev_hm_trigger_one_day.png
:height: 300px
:width: 500 px
:scale: 100 %
:alt: alternate text
:align: center
Returns
-------
res : fig or None
Either a figure if file_path is not specified or nothing.
"""
assert not (df_devs is None and df_tcorr is None)
title = "Triggercount with sliding window of " + t_window
color = 'trigger count'
cbarlabel = 'counts'
if df_tcorr is None:
df = device_tcorr(df_devs, lst_devs=lst_devs, t_window=t_window)
else:
df = df_tcorr
# get the list of cross tabulations per t_window
vals = df.astype(int).values.T
devs = list(df.index)
num_dev = len(devs)
figsize = (_num_items_2_heatmap_square_figsize_ver2(num_dev) if figsize is None else figsize)
cmap = (get_sequential_color() if cmap is None else cmap)
fig, ax = plt.subplots(figsize=figsize)
log = True if z_scale == 'log' else False
valfmt = "{x:.0f}"
im, cbar = heatmap_square(vals, devs, devs, ax=ax, cmap=cmap,
cbarlabel=cbarlabel, log=log)#, cbar_kw=cbar_kw)
# show numbers for small sizes
if numbers is None:
if num_dev < 20:
texts = annotate_heatmap(im, textcolors=("white", "black"), log=log, valfmt=valfmt)
elif numbers:
texts = annotate_heatmap(im, textcolors=("white", "black"), log=log, valfmt=valfmt)
ax.set_title(title)
fig.tight_layout()
if file_path is not None:
savefig(fig, file_path)
return
else:
return fig
def heatmap_cross_correlation(df_devs=None, lst_devs=None, df_dur_corr=None, figsize=None,
numbers=None, file_path=None):
"""
Plots the cross correlation between the device signals
Parameters
----------
df_devs : pd.DataFrame, optional
recorded devices from a dataset. Fore more information refer to the
:ref:`user guide<device_dataframe>`.
lst_devs : lst of str, optional
A list of devices that are included in the statistic. The list can be a
subset of the recorded activities or contain activities that are not recorded.
df_tcorr : pd.DataFrame
A precomputed correlation table. If the *df_tcorr* parameter is given, parameters
*df_devs* and *lst_devs* are ignored. The transition table can be computed
in :ref:`stats <stats_devs_tcorr>`.
figsize : (float, float), default: None
width, height in inches. If not provided, the figsize is inferred by automatically.
numbers : bool, default: True
Whether to display numbers inside the heatmaps fields or not.
file_path : str, optional
If set, saves the plot under the given file path and return *None* instead
of returning the figure.
Examples
--------
>>> from pyadlml.plot import plot_dev_hm_similarity
>>> plot_dev_hm_similarity(data.df_devs)
.. image:: ../_static/images/plots/dev_hm_dur_cor.png
:height: 400px
:width: 500 px
:scale: 90 %
:alt: alternate text
:align: center
Returns
-------
res : fig or None
Either a figure if file_path is not specified or nothing.
"""
assert not (df_devs is None and df_dur_corr is None)
title = 'Devices cross-correlation'
cmap = 'RdBu'
cbarlabel = 'similarity'
if df_dur_corr is None:
ct = duration_correlation(df_devs, lst_devs=lst_devs)
else:
ct = df_dur_corr
ct = ct.replace(pd.NA, np.inf)
vals = ct.values.T
devs = list(ct.index)
num_dev = len(devs)
figsize = (_num_items_2_heatmap_square_figsize_ver2(num_dev) if figsize is None else figsize)
fig, ax = plt.subplots(figsize=figsize)
im, cbar = heatmap_square(vals, devs, devs, ax=ax, cmap=cmap, cbarlabel=cbarlabel,
vmin=-1, vmax=1)
if numbers is None:
if num_dev < 15:
valfmt = "{x:.2f}"
texts = annotate_heatmap(im, textcolors=("black", "white"),
threshold=0.5, valfmt=valfmt)
elif num_dev < 30:
valfmt = "{x:.1f}"
texts = annotate_heatmap(im, textcolors=("black", "white"),
threshold=0.5, valfmt=valfmt)
if numbers:
texts = annotate_heatmap(im, textcolors=("black", "white"),
threshold=0.5, valfmt="{x:.2f}")
ax.set_title(title)
fig.tight_layout()
if file_path is not None:
savefig(fig, file_path)
return
else:
return fig
def hist_on_off(df_devs=None, lst_devs=None, df_on_off=None, figsize=None,
color=None, color_sec=None, order='frac_on', file_path=None):
"""
Plot bars the on/off fraction of all devices
Parameters
----------
df_devs : pd.DataFrame, optional
recorded devices from a dataset. Fore more information refer to the
:ref:`user guide<device_dataframe>`.
lst_devs : lst of str, optional
A list of devices that are included in the statistic. The list can be a
subset of the recorded activities or contain activities that are not recorded.
df_on_off : pd.DataFrame
A precomputed correlation table. If the *df_tcorr* parameter is given, parameters
*df_devs* and *lst_devs* are ignored. The transition table can be computed
in :ref:`stats <stats_devs_tcorr>`.
figsize : (float, float), default: None
width, height in inches. If not provided, the figsize is inferred by automatically.
color : str, optional
sets the primary color of the plot. When not set, the primary theming color is used.
Learn more about theming in the :ref:`user guide <theming>`
color_sec : str, optional
sets the secondary color of the plot. When not set, the secondary theming color is used.
Learn more about theming in the :ref:`user guide <theming>`
order : {'frac_on', 'alphabetically', 'area'}, default='frac_on'
determines the order in which the devices are listed.
file_path : str, optional
If set, saves the plot under the given file path and return *None* instead
of returning the figure.
Examples
--------
>>> from pyadlml.plot import plot_device_on_off
>>> plot_device_on_off(data.df_devs)
.. image:: ../_static/images/plots/dev_on_off.png
:height: 300px
:width: 500 px
:scale: 100 %
:alt: alternate text
:align: center
Returns
-------
res : fig or None
Either a figure if file_path is not specified or nothing.
"""
assert not (df_devs is None and df_on_off is None)
assert order in ['frac_on', 'name', 'area']
title = 'Devices fraction on/off'
xlabel ='Percentage in binary states'
ylabel = 'Devices'
on_label = 'on'
off_label = 'off'
color = (get_primary_color() if color is None else color)
color2 = (get_secondary_color()if color_sec is None else color_sec)
if df_on_off is None:
df = devices_on_off_stats(df_devs, lst_devs=lst_devs)
else:
df = df_on_off
num_dev = len(df)
figsize = (_num_bars_2_figsize(num_dev) if figsize is None else figsize)
if order == 'frac_on':
df = df.sort_values(by='frac_on', axis=0)
elif order == 'name':
df = df.sort_values(by=DEVICE, axis=0)
else:
raise NotImplementedError('room order will be implemented in the future')
dev_lst = list(df[DEVICE])
# Figure Size
fig, ax = plt.subplots(figsize=figsize)
if lst_devs is not None:
df['tmp'] = 0
plt.barh(df[DEVICE], df['tmp'].values, alpha=0.0)
plt.barh(df[DEVICE], df['frac_off'].values, label=off_label, color=color)
plt.barh(df[DEVICE], df['frac_on'].values, left=df['frac_off'], label=on_label, color=color2)
else:
plt.barh(dev_lst, df['frac_off'].values, label=off_label, color=color)
# careful: notice "bottom" parameter became "left"
plt.barh(dev_lst, df['frac_on'].values, left=df['frac_off'], label=on_label, color=color2)
# we also need to switch the labels
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# set the text centers to the middle for the greater fraction
widths = df['frac_off'].apply(lambda x: x if x >= 0.5 else 1-x)
xcenters = df['frac_off'].apply(lambda x: x/2 if x >= 0.5 else (1-x)/2 + x)
first_number_left = True
for y, c, w in zip(range(len(xcenters)), xcenters, widths):
if y == len(xcenters)-1 and c < 0.5:
first_number_left = False
if c > 0.5:
text_color='black'
else:
text_color='white'
ax.text(c, y, '{:.4f}'.format(w), ha='center', va='center', color=text_color)
if first_number_left:
ax.legend(ncol=2, bbox_to_anchor=(0, 1),
loc='upper left', fontsize='small')
else:
ax.legend(ncol=2, bbox_to_anchor=(1,1),
loc='upper right', fontsize='small')
# Remove axes splines
for s in ['top', 'right']:
ax.spines[s].set_visible(False)
if file_path is not None:
savefig(fig, file_path)
return
else:
return fig
def hist_counts(df_devs=None, lst_devs=None, df_tc=None, figsize=None,
y_scale='linear', color=None, order='count', file_path=None):
"""
bar chart displaying how often activities are occurring
Parameters
----------
df_devs : pd.DataFrame, optional
recorded devices from a dataset. Fore more information refer to the
:ref:`user guide<device_dataframe>`.
lst_devs : lst of str, optional
A list of devices that are included in the statistic. The list can be a
subset of the recorded activities or contain activities that are not recorded.
df_tc : pd.DataFrame
A precomputed correlation table. If the *df_tcorr* parameter is given, parameters
*df_devs* and *lst_devs* are ignored. The transition table can be computed
in :ref:`stats <stats_devs_tcorr>`.
y_scale : {"log", "linear"}, default: None
The axis scale type to apply.
figsize : (float, float), default: None
width, height in inches. If not provided, the figsize is inferred by automatically.
color : str, optional
sets the primary color of the plot. When not set, the primary theming color is used.
Learn more about theming in the :ref:`user guide <theming>`
order : {'count', 'alphabetically', 'area'}, default='count'
determines the order in which the devices are listed.
file_path : str, optional
If set, saves the plot under the given file path and return *None* instead
of returning the figure.
Examples
--------
>>> from pyadlml.plots import plot_device_bar_count
>>> plot_device_bar_count(data.df_devs)
.. image:: ../_static/images/plots/dev_bar_trigger.png
:height: 300px
:width: 500 px
:scale: 90 %
:alt: alternate text
:align: center
Returns
-------
res : fig or None
Either a figure if file_path is not specified or nothing.
"""
assert not (df_devs is None and df_tc is None)
assert y_scale in ['log', 'linear']
assert order in ['alphabetic', 'count', 'room']
title = 'Device triggers'
x_label = 'count'
df_col = 'trigger_count'
df = (devices_trigger_count(df_devs.copy(), lst_devs=lst_devs) if df_tc is None else df_tc)
num_dev = len(df)
figsize = (_num_bars_2_figsize(num_dev) if figsize is None else figsize)
color = (get_primary_color() if color is None else color)
if order == 'alphabetic':
df = df.sort_values(by=[DEVICE], ascending=True)
elif order == 'count':
df = df.sort_values(by=[df_col])
else:
raise NotImplemented('the room order is going to be implemented')
# plot
fig, ax = plt.subplots(figsize=figsize)
plt.title(title)
plt.xlabel(x_label)
ax.barh(df[DEVICE], df[df_col], color=color)
if y_scale == 'log':
ax.set_xscale('log')
if file_path is not None:
savefig(fig, file_path)
return
else:
return fig | [
"numpy.log10",
"pyadlml.dataset.plot.util._num_boxes_2_figsize",
"matplotlib.pyplot.ylabel",
"pyadlml.util.get_sequential_color",
"pyadlml.dataset.plot.util._num_items_2_heatmap_one_day_figsize",
"pyadlml.dataset.plot.util._num_bars_2_figsize",
"numpy.arange",
"numpy.histogram",
"pyadlml.dataset.sta... | [((2650, 2671), 'pyadlml.util.get_secondary_color', 'get_secondary_color', ([], {}), '()\n', (2669, 2671), False, 'from pyadlml.util import get_sequential_color, get_secondary_color, get_primary_color, get_diverging_color\n'), ((2917, 2943), 'numpy.histogram', 'np.histogram', (['X'], {'bins': 'bins'}), '(X, bins=bins)\n', (2929, 2943), True, 'import numpy as np\n'), ((3011, 3048), 'numpy.concatenate', 'np.concatenate', (['([0], cum_percentage)'], {}), '(([0], cum_percentage))\n', (3025, 3048), True, 'import numpy as np\n'), ((3104, 3133), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3116, 3133), True, 'import matplotlib.pyplot as plt\n'), ((3138, 3155), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (3148, 3155), True, 'import matplotlib.pyplot as plt\n'), ((3807, 3831), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'y': '(1.08)'}), '(title, y=1.08)\n', (3816, 3831), True, 'import matplotlib.pyplot as plt\n'), ((5473, 5495), 'pyadlml.dataset.stats.devices.devices_td_on', 'devices_td_on', (['df_devs'], {}), '(df_devs)\n', (5486, 5495), False, 'from pyadlml.dataset.stats.devices import devices_td_on\n'), ((6140, 6169), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6152, 6169), True, 'import matplotlib.pyplot as plt\n'), ((9192, 9221), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (9204, 9221), True, 'import matplotlib.pyplot as plt\n'), ((9237, 9307), 'pyadlml.dataset.plot.util.heatmap', 'heatmap', (['dat', 'y_labels', 'x_labels'], {'ax': 'ax', 'cmap': 'cmap', 'cbarlabel': '"""counts"""'}), "(dat, y_labels, x_labels, ax=ax, cmap=cmap, cbarlabel='counts')\n", (9244, 9307), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((9721, 9750), 'matplotlib.ticker.FixedLocator', 'ticker.FixedLocator', (['tcks_pos'], {}), '(tcks_pos)\n', (9740, 9750), True, 'import matplotlib.ticker as ticker\n'), ((12694, 12723), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (12706, 12723), True, 'import matplotlib.pyplot as plt\n'), ((12825, 12910), 'pyadlml.dataset.plot.util.heatmap_square', 'heatmap_square', (['vals', 'devs', 'devs'], {'ax': 'ax', 'cmap': 'cmap', 'cbarlabel': 'cbarlabel', 'log': 'log'}), '(vals, devs, devs, ax=ax, cmap=cmap, cbarlabel=cbarlabel, log=log\n )\n', (12839, 12910), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((15565, 15594), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (15577, 15594), True, 'import matplotlib.pyplot as plt\n'), ((15610, 15702), 'pyadlml.dataset.plot.util.heatmap_square', 'heatmap_square', (['vals', 'devs', 'devs'], {'ax': 'ax', 'cmap': 'cmap', 'cbarlabel': 'cbarlabel', 'vmin': '(-1)', 'vmax': '(1)'}), '(vals, devs, devs, ax=ax, cmap=cmap, cbarlabel=cbarlabel,\n vmin=-1, vmax=1)\n', (15624, 15702), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((19395, 19424), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (19407, 19424), True, 'import matplotlib.pyplot as plt\n'), ((20012, 20028), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (20021, 20028), True, 'import matplotlib.pyplot as plt\n'), ((20033, 20051), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (20043, 20051), True, 'import matplotlib.pyplot as plt\n'), ((20058, 20076), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (20068, 20076), True, 'import matplotlib.pyplot as plt\n'), ((23805, 23834), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (23817, 23834), True, 'import matplotlib.pyplot as plt\n'), ((23839, 23855), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (23848, 23855), True, 'import matplotlib.pyplot as plt\n'), ((23860, 23879), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (23870, 23879), True, 'import matplotlib.pyplot as plt\n'), ((2588, 2607), 'pyadlml.util.get_primary_color', 'get_primary_color', ([], {}), '()\n', (2605, 2607), False, 'from pyadlml.util import get_sequential_color, get_secondary_color, get_primary_color, get_diverging_color\n'), ((3562, 3611), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['func_formatter_seconds2time'], {}), '(func_formatter_seconds2time)\n', (3582, 3611), True, 'import matplotlib.ticker as ticker\n'), ((3875, 3898), 'pyadlml.dataset.plot.util.savefig', 'savefig', (['fig', 'file_path'], {}), '(fig, file_path)\n', (3882, 3898), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((6047, 6076), 'pyadlml.dataset.plot.util._num_boxes_2_figsize', '_num_boxes_2_figsize', (['num_dev'], {}), '(num_dev)\n', (6067, 6076), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((6535, 6584), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['func_formatter_seconds2time'], {}), '(func_formatter_seconds2time)\n', (6555, 6584), True, 'import matplotlib.ticker as ticker\n'), ((6653, 6676), 'pyadlml.dataset.plot.util.savefig', 'savefig', (['fig', 'file_path'], {}), '(fig, file_path)\n', (6660, 6676), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((8932, 8977), 'pyadlml.dataset.plot.util._num_items_2_heatmap_one_day_figsize', '_num_items_2_heatmap_one_day_figsize', (['num_dev'], {}), '(num_dev)\n', (8968, 8977), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((9023, 9045), 'pyadlml.util.get_sequential_color', 'get_sequential_color', ([], {}), '()\n', (9043, 9045), False, 'from pyadlml.util import get_sequential_color, get_secondary_color, get_primary_color, get_diverging_color\n'), ((9784, 9810), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['func'], {}), '(func)\n', (9804, 9810), True, 'import matplotlib.ticker as ticker\n'), ((9926, 9949), 'pyadlml.dataset.plot.util.savefig', 'savefig', (['fig', 'file_path'], {}), '(fig, file_path)\n', (9933, 9949), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((12287, 12346), 'pyadlml.dataset.stats.devices.device_tcorr', 'device_tcorr', (['df_devs'], {'lst_devs': 'lst_devs', 't_window': 't_window'}), '(df_devs, lst_devs=lst_devs, t_window=t_window)\n', (12299, 12346), False, 'from pyadlml.dataset.stats.devices import duration_correlation, trigger_time_diff, device_tcorr, device_triggers_one_day, devices_trigger_count, devices_on_off_stats\n'), ((12534, 12583), 'pyadlml.dataset.plot.util._num_items_2_heatmap_square_figsize_ver2', '_num_items_2_heatmap_square_figsize_ver2', (['num_dev'], {}), '(num_dev)\n', (12574, 12583), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((12629, 12651), 'pyadlml.util.get_sequential_color', 'get_sequential_color', ([], {}), '()\n', (12649, 12651), False, 'from pyadlml.util import get_sequential_color, get_secondary_color, get_primary_color, get_diverging_color\n'), ((13332, 13355), 'pyadlml.dataset.plot.util.savefig', 'savefig', (['fig', 'file_path'], {}), '(fig, file_path)\n', (13339, 13355), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((15260, 15308), 'pyadlml.dataset.stats.devices.duration_correlation', 'duration_correlation', (['df_devs'], {'lst_devs': 'lst_devs'}), '(df_devs, lst_devs=lst_devs)\n', (15280, 15308), False, 'from pyadlml.dataset.stats.devices import duration_correlation, trigger_time_diff, device_tcorr, device_triggers_one_day, devices_trigger_count, devices_on_off_stats\n'), ((15468, 15517), 'pyadlml.dataset.plot.util._num_items_2_heatmap_square_figsize_ver2', '_num_items_2_heatmap_square_figsize_ver2', (['num_dev'], {}), '(num_dev)\n', (15508, 15517), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((16156, 16245), 'pyadlml.dataset.plot.util.annotate_heatmap', 'annotate_heatmap', (['im'], {'textcolors': "('black', 'white')", 'threshold': '(0.5)', 'valfmt': '"""{x:.2f}"""'}), "(im, textcolors=('black', 'white'), threshold=0.5, valfmt=\n '{x:.2f}')\n", (16172, 16245), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((16356, 16379), 'pyadlml.dataset.plot.util.savefig', 'savefig', (['fig', 'file_path'], {}), '(fig, file_path)\n', (16363, 16379), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((18744, 18763), 'pyadlml.util.get_primary_color', 'get_primary_color', ([], {}), '()\n', (18761, 18763), False, 'from pyadlml.util import get_sequential_color, get_secondary_color, get_primary_color, get_diverging_color\n'), ((18807, 18828), 'pyadlml.util.get_secondary_color', 'get_secondary_color', ([], {}), '()\n', (18826, 18828), False, 'from pyadlml.util import get_sequential_color, get_secondary_color, get_primary_color, get_diverging_color\n'), ((18905, 18953), 'pyadlml.dataset.stats.devices.devices_on_off_stats', 'devices_on_off_stats', (['df_devs'], {'lst_devs': 'lst_devs'}), '(df_devs, lst_devs=lst_devs)\n', (18925, 18953), False, 'from pyadlml.dataset.stats.devices import duration_correlation, trigger_time_diff, device_tcorr, device_triggers_one_day, devices_trigger_count, devices_on_off_stats\n'), ((19025, 19053), 'pyadlml.dataset.plot.util._num_bars_2_figsize', '_num_bars_2_figsize', (['num_dev'], {}), '(num_dev)\n', (19044, 19053), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((19484, 19533), 'matplotlib.pyplot.barh', 'plt.barh', (['df[DEVICE]', "df['tmp'].values"], {'alpha': '(0.0)'}), "(df[DEVICE], df['tmp'].values, alpha=0.0)\n", (19492, 19533), True, 'import matplotlib.pyplot as plt\n'), ((19542, 19615), 'matplotlib.pyplot.barh', 'plt.barh', (['df[DEVICE]', "df['frac_off'].values"], {'label': 'off_label', 'color': 'color'}), "(df[DEVICE], df['frac_off'].values, label=off_label, color=color)\n", (19550, 19615), True, 'import matplotlib.pyplot as plt\n'), ((19624, 19722), 'matplotlib.pyplot.barh', 'plt.barh', (['df[DEVICE]', "df['frac_on'].values"], {'left': "df['frac_off']", 'label': 'on_label', 'color': 'color2'}), "(df[DEVICE], df['frac_on'].values, left=df['frac_off'], label=\n on_label, color=color2)\n", (19632, 19722), True, 'import matplotlib.pyplot as plt\n'), ((19737, 19807), 'matplotlib.pyplot.barh', 'plt.barh', (['dev_lst', "df['frac_off'].values"], {'label': 'off_label', 'color': 'color'}), "(dev_lst, df['frac_off'].values, label=off_label, color=color)\n", (19745, 19807), True, 'import matplotlib.pyplot as plt\n'), ((19875, 19969), 'matplotlib.pyplot.barh', 'plt.barh', (['dev_lst', "df['frac_on'].values"], {'left': "df['frac_off']", 'label': 'on_label', 'color': 'color2'}), "(dev_lst, df['frac_on'].values, left=df['frac_off'], label=on_label,\n color=color2)\n", (19883, 19969), True, 'import matplotlib.pyplot as plt\n'), ((21029, 21052), 'pyadlml.dataset.plot.util.savefig', 'savefig', (['fig', 'file_path'], {}), '(fig, file_path)\n', (21036, 21052), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((23415, 23443), 'pyadlml.dataset.plot.util._num_bars_2_figsize', '_num_bars_2_figsize', (['num_dev'], {}), '(num_dev)\n', (23434, 23443), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((23490, 23509), 'pyadlml.util.get_primary_color', 'get_primary_color', ([], {}), '()\n', (23507, 23509), False, 'from pyadlml.util import get_sequential_color, get_secondary_color, get_primary_color, get_diverging_color\n'), ((24023, 24046), 'pyadlml.dataset.plot.util.savefig', 'savefig', (['fig', 'file_path'], {}), '(fig, file_path)\n', (24030, 24046), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((2831, 2842), 'numpy.log10', 'np.log10', (['X'], {}), '(X)\n', (2839, 2842), True, 'import numpy as np\n'), ((2849, 2860), 'numpy.log10', 'np.log10', (['X'], {}), '(X)\n', (2857, 2860), True, 'import numpy as np\n'), ((9669, 9685), 'numpy.arange', 'np.arange', (['(0)', '(23)'], {}), '(0, 23)\n', (9678, 9685), True, 'import numpy as np\n'), ((13059, 13134), 'pyadlml.dataset.plot.util.annotate_heatmap', 'annotate_heatmap', (['im'], {'textcolors': "('white', 'black')", 'log': 'log', 'valfmt': 'valfmt'}), "(im, textcolors=('white', 'black'), log=log, valfmt=valfmt)\n", (13075, 13134), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((13169, 13244), 'pyadlml.dataset.plot.util.annotate_heatmap', 'annotate_heatmap', (['im'], {'textcolors': "('white', 'black')", 'log': 'log', 'valfmt': 'valfmt'}), "(im, textcolors=('white', 'black'), log=log, valfmt=valfmt)\n", (13185, 13244), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((15822, 15908), 'pyadlml.dataset.plot.util.annotate_heatmap', 'annotate_heatmap', (['im'], {'textcolors': "('black', 'white')", 'threshold': '(0.5)', 'valfmt': 'valfmt'}), "(im, textcolors=('black', 'white'), threshold=0.5, valfmt=\n valfmt)\n", (15838, 15908), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n'), ((16012, 16098), 'pyadlml.dataset.plot.util.annotate_heatmap', 'annotate_heatmap', (['im'], {'textcolors': "('black', 'white')", 'threshold': '(0.5)', 'valfmt': 'valfmt'}), "(im, textcolors=('black', 'white'), threshold=0.5, valfmt=\n valfmt)\n", (16028, 16098), False, 'from pyadlml.dataset.plot.util import heatmap_square, func_formatter_seconds2time, heatmap, annotate_heatmap, savefig, _num_bars_2_figsize, _num_items_2_heatmap_square_figsize, _num_boxes_2_figsize, _num_items_2_heatmap_one_day_figsize, _num_items_2_heatmap_square_figsize_ver2\n')] |
from multi_fits_cubes.cloud import Cloud
import numpy as np
from spectral_cube import SpectralCube
from collections import OrderedDict
import astropy.units as u
from astropy.wcs import WCS
kps = u.km / u.s
class FluxRMS(Cloud):
def __init__(self, signal_v_range, *args, **kwargs):
super().__init__(*args, **kwargs)
master_line = list(self.cubes.keys())[0]
self.masked_cube = self.cubes[master_line]
self.mask2d = self.mask_cube_obj.get_mask_map2d()
self.cutted_big_cubes = OrderedDict()
for line, cube in self.big_cubes.items():
vlo, vhi = cube.spectral_extrema
cutted_cube = self.mask_cube_obj.cut_from_a_big_cube_v_range(cube, vlo, vhi, with_vel_unit=kps,
with_value_unit=u.K)
self.cutted_big_cubes[line] = cutted_cube.with_mask(self.mask2d)
self.signal_vlo, self.signal_vhi = signal_v_range
n_vox = np.sum(~np.isnan(cloud.mask_cube_obj.mask3d.filled_data[:].value))
n_pix = np.sum(cloud.mask_cube_obj.get_mask_map2d())
self.avg_n_channel = int(round(n_vox / n_pix))
"""
v_resolution = abs(self.big_cube.spectral_axis[0] - self.big_cube.spectral_axis[1])
self.signal_channel_num = int(((self.signal_vhi - self.signal_vlo) / v_resolution).value)
self.set_full_v_range(*self.big_cube.spectral_extrema)
self.unit = self.big_cube.unit
v_axis = self.big_cube.spectral_axis
self.channel_width = abs(v_axis[1] - v_axis[0])
"""
def sliding_rms_values(self, line, n_channel_step_size):
"""
Calculate the sliding flux rms values
:param line:
:param n_channel_step_size:
:return:
"""
cube = self.cutted_big_cubes[line]
n_tot_channels = cube.shape[0]
v_axis = cube.spectral_axis.to(kps).value
t = np.where(np.isclose(v_axis, self.signal_vlo.value, atol=0.07))[0]
# print(v_axis, self.signal_vlo)
# print(t)
# assert len(t) == 1
self.signal_channel_lo = t[0]
t = np.where(np.isclose(v_axis, self.signal_vhi.value, atol=0.07))[0]
# print(v_axis, self.signal_vhi)
# print(t)
# assert len(t) == 1
self.signal_channel_hi = t[0]
self.signal_channel_num = self.signal_channel_hi - self.signal_channel_lo + 1
part1_upper_bound = self.signal_channel_lo
part2_lower_bound = self.signal_channel_hi + 1
part1 = np.arange(0, part1_upper_bound - self.avg_n_channel, n_channel_step_size, dtype=np.int)
part2 = np.arange(part2_lower_bound, n_tot_channels - self.avg_n_channel, n_channel_step_size, dtype=np.int)
# print('part1', part1)
# print('part2', part2)
start_channel_indices = np.hstack([part1, part2])
flux_arr = u.Quantity([self._rms_in_one_box(cube, i) for i in start_channel_indices])
return start_channel_indices, flux_arr
def _rms_in_one_box(self, cube, start_channel_index):
i = start_channel_index
j = i + self.avg_n_channel
box = cube[i: j, :, :]
box.allow_huge_operations = True
m0 = box.moment0()
flux = np.nansum(m0.value) * m0.unit
return flux
class FluxRMSold(Cloud):
def __init__(self, line=None, signal_v_range=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if line is None:
assert len(self.cubes) == 1, "Arguments `line` can be None iff exactly 1 line in the Cloud object."
line = list(self.cubes.keys())[0]
self.masked_cube = self.cubes[line]
self.mask2d = self.mask_cube_obj.get_mask_map2d()
self.big_cube = self.big_cubes[line]
if signal_v_range is None:
self.signal_vlo, self.signal_vhi = self.masked_cube.spectral_extrema
self.signal_channel_num = self.masked_cube.shape[0]
else:
self.signal_vlo, self.signal_vhi = signal_v_range
v_resolution = abs(self.big_cube.spectral_axis[0] - self.big_cube.spectral_axis[1])
self.signal_channel_num = int(((self.signal_vhi - self.signal_vlo) / v_resolution).value)
self.set_full_v_range(*self.big_cube.spectral_extrema)
self.unit = self.big_cube.unit
v_axis = self.big_cube.spectral_axis
self.channel_width = abs(v_axis[1] - v_axis[0])
def set_full_v_range(self, vlo, vhi):
"""
This v range is not for signal, but for the range in which the rms will be calculated.
If you want to use the full velocity range in the big cube,
ignore this method, because the constructor will do that for you.
:param vlo:
:param vhi:
:return:
"""
self.masked_cube_with_larger_v_range = \
self.mask_cube_obj.cut_from_a_big_cube_v_range(
big_cube=self.big_cube,
vlo=vlo,
vhi=vhi
).with_mask(self.mask2d)
v_axis = self.masked_cube_with_larger_v_range.spectral_axis.value
t = np.where(np.isclose(v_axis, self.signal_vlo.value))[0]
if len(t) == 0:
self.signal_channel_lo = None
else:
assert len(t) == 1
self.signal_channel_lo = t[0]
t = np.where(np.isclose(v_axis, self.signal_vhi.value))[0]
if len(t) == 0:
self.signal_channel_hi = None
else:
assert len(t) == 1
self.signal_channel_hi = t[0] + 1
self.masked_cube_with_larger_v_range_data = self.masked_cube_with_larger_v_range.filled_data[:].value
return self.masked_cube_with_larger_v_range
def sliding_rms_values(self, n_channel_step_size, n_channel_extra_clip, unit=u.K * u.km / u.s):
"""
Calculate the sliding flux rms values
:param n_channel_step_size:
:param n_channel_extra_clip:
:return:
"""
if self.signal_channel_lo is None:
self.signal_channel_lo = 0
if self.signal_channel_hi is None:
self.signal_channel_hi = self.big_cube.shape[0]
part1_upper_bound = self.signal_channel_lo - n_channel_extra_clip
part2_lower_bound = self.signal_channel_hi + n_channel_extra_clip
part1 = np.arange(0, part1_upper_bound - self.signal_channel_num, n_channel_step_size)
part2 = np.arange(part2_lower_bound,
self.masked_cube_with_larger_v_range.shape[0] - self.signal_channel_num,
n_channel_step_size)
# print('part1', part1)
# print('part2', part2)
start_channel_indices = np.hstack([part1, part2])
flux_arr = np.array([self._rms_in_one_box(i) for i in start_channel_indices])
return start_channel_indices, (flux_arr * self.unit * self.channel_width).to(unit)
def channel_index_to_velocity(self, channel_indices, with_unit=u.km / u.s):
return self.big_cube.spectral_axis[channel_indices].to(with_unit)
def _rms_in_one_box(self, start_channel_index):
i = start_channel_index
j = i + self.signal_channel_num
box = self.masked_cube_with_larger_v_range_data[i: j, :, :]
flux = np.nansum(box)
return flux
@staticmethod
def make_fake_square_cloud(big_cube: SpectralCube, cloud_vlo, cloud_vhi):
"""
Assume that
there is a square cloud with a specific vrange,
we have a datacube with the same spatial coverage (but larger velocity coverage)
and our goal is to estimate the flux rms level in the datacube.
:param big_cube:
:param cloud_vlo:
:param cloud_vhi:
:return: FluxRMS object
"""
bvlo, bvhi = big_cube.spectral_extrema
v_resolution = abs(big_cube.spectral_axis[0] - big_cube.spectral_axis[1])
if cloud_vlo >= bvhi or cloud_vhi <= bvlo:
cloud_number_of_channels = int((abs(cloud_vhi - cloud_vlo) / v_resolution).value)
template = big_cube.subcube(zlo=0, zhi=cloud_number_of_channels)
data = np.ones(template.shape)
header = template.header.copy()
header['CRPIX3'] = 0
header['CRVAL3'] = cloud_vlo.to(big_cube.spectral_axis.unit).value
cloud_cube = SpectralCube(data=data, wcs=WCS(header))
else:
cloud_cube = big_cube.subcube(zlo=cloud_vlo, zhi=cloud_vhi)
print(cloud_cube.spectral_extrema)
cloud = FluxRMS(name='fakesquarecloud',
mask_cube=cloud_cube,
big_cubes=OrderedDict({'line': big_cube}),
signal_v_range=(cloud_vlo, cloud_vhi))
return cloud
if __name__ == '__main__':
# test
from multi_fits_cubes.cloud import CloudManager
from matplotlib import pyplot as plt
cube = SpectralCube.read('../test_data/bigs/M195_L2.fits')
cube.allow_huge_operations = True
cube = cube * u.K
big_cubes = OrderedDict({'C18O': cube})
cm = CloudManager('../test_data/masks', big_cubes=big_cubes, cls=FluxRMSold)
for idx in [1246, 1254, 7474]:
cloud = cm.load_cloud(idx)
start_channels, flux_rms_values = cloud.sliding_rms_values(3, 5)
plt.hist(flux_rms_values, bins=25, alpha=0.5, label='Cloud' + str(idx))
plt.legend()
plt.show()
print(flux_rms_values)
'''
big_cube = SpectralCube.read("../test_data/noise/Noise_1_100_195_0_L.fits")
big_cube.allow_huge_operations = True
big_cube = big_cube * u.K
bvlo, bvhi = big_cube.spectral_extrema
cloud_v_width = 10 * u.km / u.s
cloud = FluxRMS.make_fake_square_cloud(big_cube=big_cube,
cloud_vlo=bvlo - cloud_v_width,
cloud_vhi=bvlo)
print(cloud['line'].spectral_extrema)
start_channels, flux_rms_values = cloud.sliding_rms_values(4, 0)
plt.hist(flux_rms_values)
plt.show()
plt.plot(start_channels, flux_rms_values)
plt.show()
v = cloud.channel_index_to_velocity(start_channels, with_unit=u.km / u.s)
plt.plot(v, flux_rms_values)
plt.show()
print()
cloud_v_width = 20 * u.km / u.s
cloud = FluxRMS.make_fake_square_cloud(big_cube=big_cube,
cloud_vlo=bvlo - cloud_v_width,
cloud_vhi=bvlo)
print(cloud['line'].spectral_extrema)
start_channels, flux_rms_values = cloud.sliding_rms_values(4, 0)
plt.hist(flux_rms_values)
plt.show()
plt.plot(start_channels, flux_rms_values)
plt.show()
v = cloud.channel_index_to_velocity(start_channels, with_unit=u.km / u.s)
plt.plot(v, flux_rms_values)
plt.show()
'''
| [
"spectral_cube.SpectralCube.read",
"collections.OrderedDict",
"numpy.isclose",
"numpy.ones",
"numpy.arange",
"numpy.hstack",
"numpy.nansum",
"numpy.isnan",
"multi_fits_cubes.cloud.CloudManager",
"astropy.wcs.WCS",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((9182, 9233), 'spectral_cube.SpectralCube.read', 'SpectralCube.read', (['"""../test_data/bigs/M195_L2.fits"""'], {}), "('../test_data/bigs/M195_L2.fits')\n", (9199, 9233), False, 'from spectral_cube import SpectralCube\n'), ((9313, 9340), 'collections.OrderedDict', 'OrderedDict', (["{'C18O': cube}"], {}), "({'C18O': cube})\n", (9324, 9340), False, 'from collections import OrderedDict\n'), ((9352, 9423), 'multi_fits_cubes.cloud.CloudManager', 'CloudManager', (['"""../test_data/masks"""'], {'big_cubes': 'big_cubes', 'cls': 'FluxRMSold'}), "('../test_data/masks', big_cubes=big_cubes, cls=FluxRMSold)\n", (9364, 9423), False, 'from multi_fits_cubes.cloud import CloudManager\n'), ((9656, 9668), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9666, 9668), True, 'from matplotlib import pyplot as plt\n'), ((9674, 9684), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9682, 9684), True, 'from matplotlib import pyplot as plt\n'), ((538, 551), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (549, 551), False, 'from collections import OrderedDict\n'), ((2666, 2757), 'numpy.arange', 'np.arange', (['(0)', '(part1_upper_bound - self.avg_n_channel)', 'n_channel_step_size'], {'dtype': 'np.int'}), '(0, part1_upper_bound - self.avg_n_channel, n_channel_step_size,\n dtype=np.int)\n', (2675, 2757), True, 'import numpy as np\n'), ((2771, 2875), 'numpy.arange', 'np.arange', (['part2_lower_bound', '(n_tot_channels - self.avg_n_channel)', 'n_channel_step_size'], {'dtype': 'np.int'}), '(part2_lower_bound, n_tot_channels - self.avg_n_channel,\n n_channel_step_size, dtype=np.int)\n', (2780, 2875), True, 'import numpy as np\n'), ((2971, 2996), 'numpy.hstack', 'np.hstack', (['[part1, part2]'], {}), '([part1, part2])\n', (2980, 2996), True, 'import numpy as np\n'), ((6552, 6630), 'numpy.arange', 'np.arange', (['(0)', '(part1_upper_bound - self.signal_channel_num)', 'n_channel_step_size'], {}), '(0, part1_upper_bound - self.signal_channel_num, n_channel_step_size)\n', (6561, 6630), True, 'import numpy as np\n'), ((6648, 6774), 'numpy.arange', 'np.arange', (['part2_lower_bound', '(self.masked_cube_with_larger_v_range.shape[0] - self.signal_channel_num)', 'n_channel_step_size'], {}), '(part2_lower_bound, self.masked_cube_with_larger_v_range.shape[0] -\n self.signal_channel_num, n_channel_step_size)\n', (6657, 6774), True, 'import numpy as np\n'), ((6924, 6949), 'numpy.hstack', 'np.hstack', (['[part1, part2]'], {}), '([part1, part2])\n', (6933, 6949), True, 'import numpy as np\n'), ((7505, 7519), 'numpy.nansum', 'np.nansum', (['box'], {}), '(box)\n', (7514, 7519), True, 'import numpy as np\n'), ((3394, 3413), 'numpy.nansum', 'np.nansum', (['m0.value'], {}), '(m0.value)\n', (3403, 3413), True, 'import numpy as np\n'), ((8397, 8420), 'numpy.ones', 'np.ones', (['template.shape'], {}), '(template.shape)\n', (8404, 8420), True, 'import numpy as np\n'), ((1019, 1076), 'numpy.isnan', 'np.isnan', (['cloud.mask_cube_obj.mask3d.filled_data[:].value'], {}), '(cloud.mask_cube_obj.mask3d.filled_data[:].value)\n', (1027, 1076), True, 'import numpy as np\n'), ((2002, 2054), 'numpy.isclose', 'np.isclose', (['v_axis', 'self.signal_vlo.value'], {'atol': '(0.07)'}), '(v_axis, self.signal_vlo.value, atol=0.07)\n', (2012, 2054), True, 'import numpy as np\n'), ((2238, 2290), 'numpy.isclose', 'np.isclose', (['v_axis', 'self.signal_vhi.value'], {'atol': '(0.07)'}), '(v_axis, self.signal_vhi.value, atol=0.07)\n', (2248, 2290), True, 'import numpy as np\n'), ((5319, 5360), 'numpy.isclose', 'np.isclose', (['v_axis', 'self.signal_vlo.value'], {}), '(v_axis, self.signal_vlo.value)\n', (5329, 5360), True, 'import numpy as np\n'), ((5547, 5588), 'numpy.isclose', 'np.isclose', (['v_axis', 'self.signal_vhi.value'], {}), '(v_axis, self.signal_vhi.value)\n', (5557, 5588), True, 'import numpy as np\n'), ((8912, 8943), 'collections.OrderedDict', 'OrderedDict', (["{'line': big_cube}"], {}), "({'line': big_cube})\n", (8923, 8943), False, 'from collections import OrderedDict\n'), ((8634, 8645), 'astropy.wcs.WCS', 'WCS', (['header'], {}), '(header)\n', (8637, 8645), False, 'from astropy.wcs import WCS\n')] |
from pyids.model_selection import CoordinateAscent
from pyids.algorithms.ids import IDS
from pyids.algorithms import mine_CARs, mine_IDS_ruleset
from pyarc.qcba.data_structures import QuantitativeDataFrame
import pandas as pd
import numpy as np
df_iris = pd.read_csv("../../../data/iris0.csv")
quant_df = QuantitativeDataFrame(df_iris)
cars = mine_CARs(df_iris, 20)
def is_solution_interpretable(metrics):
print(metrics)
return (
metrics["fraction_overlap"] <= 0.5 and
metrics["fraction_classes"] > 1.0 and
metrics["fraction_uncovered"] <= 0.5 and
metrics["average_rule_width"] < 8 and
metrics["ruleset_length"] <= 10
)
def solution_interpretability_distance(metrics):
distance_vector = np.array([
max(metrics["fraction_overlap"] - 0.5, 0),
max(1 - metrics["fraction_classes"], 0),
max(metrics["fraction_uncovered"] - 0.5, 0),
max(metrics["average_rule_width"] - 8, 0),
max(metrics["ruleset_length"] - 10, 0)
])
return np.sum(distance_vector)
return np.linalg.norm(distance_vector)
def fmax(lambda_dict):
print(lambda_dict)
ids = IDS(algorithm="SLS")
ids.fit(class_association_rules=cars, quant_dataframe=quant_df, lambda_array=list(lambda_dict.values()))
metrics = ids.score_interpretability_metrics(quant_df)
"""
if not is_solution_interpretable(metrics):
distance = -solution_interpretability_distance(metrics)
print(distance)
return -distance
"""
auc = ids.score_auc(quant_df)
print(auc)
return auc
coord_asc = CoordinateAscent(
func=fmax,
func_args_ranges=dict(
l1=(1, 1000),
l2=(1, 1000),
l3=(1, 1000),
l4=(1, 1000),
l5=(1, 1000),
l6=(1, 1000),
l7=(1, 1000)
),
ternary_search_precision=50,
max_iterations=3
)
coord_asc.fit()
df = pd.DataFrame(coord_asc.procedure_data)
df.to_csv("output_data/coordinate_ascent_run_AUConly.csv")
| [
"pandas.read_csv",
"pyarc.qcba.data_structures.QuantitativeDataFrame",
"numpy.sum",
"pyids.algorithms.ids.IDS",
"numpy.linalg.norm",
"pandas.DataFrame",
"pyids.algorithms.mine_CARs"
] | [((258, 296), 'pandas.read_csv', 'pd.read_csv', (['"""../../../data/iris0.csv"""'], {}), "('../../../data/iris0.csv')\n", (269, 296), True, 'import pandas as pd\n'), ((308, 338), 'pyarc.qcba.data_structures.QuantitativeDataFrame', 'QuantitativeDataFrame', (['df_iris'], {}), '(df_iris)\n', (329, 338), False, 'from pyarc.qcba.data_structures import QuantitativeDataFrame\n'), ((346, 368), 'pyids.algorithms.mine_CARs', 'mine_CARs', (['df_iris', '(20)'], {}), '(df_iris, 20)\n', (355, 368), False, 'from pyids.algorithms import mine_CARs, mine_IDS_ruleset\n'), ((1902, 1940), 'pandas.DataFrame', 'pd.DataFrame', (['coord_asc.procedure_data'], {}), '(coord_asc.procedure_data)\n', (1914, 1940), True, 'import pandas as pd\n'), ((1029, 1052), 'numpy.sum', 'np.sum', (['distance_vector'], {}), '(distance_vector)\n', (1035, 1052), True, 'import numpy as np\n'), ((1064, 1095), 'numpy.linalg.norm', 'np.linalg.norm', (['distance_vector'], {}), '(distance_vector)\n', (1078, 1095), True, 'import numpy as np\n'), ((1154, 1174), 'pyids.algorithms.ids.IDS', 'IDS', ([], {'algorithm': '"""SLS"""'}), "(algorithm='SLS')\n", (1157, 1174), False, 'from pyids.algorithms.ids import IDS\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 8 20:41:14 2018
@author: <NAME>
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers.core import Dense, Dropout, Activation, Flatten
from tensorflow.python.keras.layers.convolutional import Conv2D, MaxPooling2D
import os
import cv2
train_images = []
test_images = []
train_labels = []
test_labels = []
labels = []
test_image_name = []
num_classes = 0
batch_size = 10
epochs = 10
TRAINING_IMAGES_DIR = os.getcwd() + "/train/"
TEST_IMAGES_DIR = os.getcwd() + "/test/"
#count = 0
img_rows = 32
img_cols = 32
for l in os.listdir(TRAINING_IMAGES_DIR):
print(l)
labels.append(l)
TRAINING_IMAGES_SUB_DIR = TRAINING_IMAGES_DIR+l
for train_image in os.listdir(TRAINING_IMAGES_SUB_DIR):
image_size=32
filename = os.path.join(TRAINING_IMAGES_SUB_DIR,train_image)
image = cv2.imread(filename) # read image using OpenCV
print(filename)
# Resize image to desired size and preprocess exactly as done during training...
image = cv2.resize(image, (img_rows, img_cols),0,0, cv2.INTER_LINEAR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
train_images.append(image)
train_labels.append(num_classes)
num_classes = num_classes+1
train_images = np.array(train_images, dtype=np.uint8)
#train_images = train_images.astype('float32')
#train_images = np.multiply(train_images, 1.0/255.0)
#print(train_images.shape)
#train_labels = tf.keras.utils.to_categorical(train_labels,2)
for test_image in os.listdir(TEST_IMAGES_DIR):
filename = os.path.join(TEST_IMAGES_DIR,test_image)
image_size=32
num_channels=10 # 10 digits 0 to 10
image = cv2.imread(filename) # read image using OpenCV
# Resize image to desired size and preprocess exactly as done during training...
image = cv2.resize(image, (img_rows, img_cols),0,0, cv2.INTER_LINEAR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
test_images.append(image)
test_labels.append(0)
test_image_name.append(test_image)
test_images = np.array(test_images, dtype=np.uint8)
#test_images = test_images.astype('float32')
#test_images = np.multiply(test_images, 1.0/255.0)
#test_labels = tf.keras.utils.to_categorical(test_labels,2)
#print(test_images.shape)
x_train = train_images
x_test = test_images
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = train_labels
y_test = test_labels
# convert class vectors to binary class matrices
#y_train = keras.utils.to_categorical(y_train, num_classes)
#y_test = keras.utils.to_categorical(y_test, num_classes)
print('x_train shape:', x_train.shape)
#print('y_train shape:', y_train.shape)
print('x_test shape:', x_test.shape)
#print('y_test shape:', y_test.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
model = Sequential()
model.add(Conv2D(6, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
#model = model.load_weights("model.h5")
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
#model.load_weights('my_model')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
model.save_weights('./my_model')
test_loss, test_acc = model.evaluate(x_test, y_test)
print('Test loss:', test_loss)
print('Test accuracy:', test_acc)
predictions = model.predict(x_test)
i = 0
for a in test_labels:
print(test_image_name[i])
j = 0
for b in labels:
print(labels[j] + " " + "{0:.2f}".format(predictions[i,j]*100) + "% confidence")
j = j+1
print("--------------------------")
i = i+1
#model.save("model.pb")
| [
"tensorflow.train.AdamOptimizer",
"tensorflow.python.keras.layers.convolutional.MaxPooling2D",
"os.listdir",
"tensorflow.python.keras.layers.core.Dense",
"os.path.join",
"os.getcwd",
"numpy.array",
"tensorflow.python.keras.layers.convolutional.Conv2D",
"tensorflow.python.keras.layers.core.Flatten",
... | [((768, 799), 'os.listdir', 'os.listdir', (['TRAINING_IMAGES_DIR'], {}), '(TRAINING_IMAGES_DIR)\n', (778, 799), False, 'import os\n'), ((1502, 1540), 'numpy.array', 'np.array', (['train_images'], {'dtype': 'np.uint8'}), '(train_images, dtype=np.uint8)\n', (1510, 1540), True, 'import numpy as np\n'), ((1755, 1782), 'os.listdir', 'os.listdir', (['TEST_IMAGES_DIR'], {}), '(TEST_IMAGES_DIR)\n', (1765, 1782), False, 'import os\n'), ((2326, 2363), 'numpy.array', 'np.array', (['test_images'], {'dtype': 'np.uint8'}), '(test_images, dtype=np.uint8)\n', (2334, 2363), True, 'import numpy as np\n'), ((3594, 3606), 'tensorflow.python.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3604, 3606), False, 'from tensorflow.python.keras.models import Sequential\n'), ((644, 655), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (653, 655), False, 'import os\n'), ((689, 700), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (698, 700), False, 'import os\n'), ((914, 949), 'os.listdir', 'os.listdir', (['TRAINING_IMAGES_SUB_DIR'], {}), '(TRAINING_IMAGES_SUB_DIR)\n', (924, 949), False, 'import os\n'), ((1812, 1853), 'os.path.join', 'os.path.join', (['TEST_IMAGES_DIR', 'test_image'], {}), '(TEST_IMAGES_DIR, test_image)\n', (1824, 1853), False, 'import os\n'), ((1932, 1952), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (1942, 1952), False, 'import cv2\n'), ((2087, 2150), 'cv2.resize', 'cv2.resize', (['image', '(img_rows, img_cols)', '(0)', '(0)', 'cv2.INTER_LINEAR'], {}), '(image, (img_rows, img_cols), 0, 0, cv2.INTER_LINEAR)\n', (2097, 2150), False, 'import cv2\n'), ((2163, 2202), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2175, 2202), False, 'import cv2\n'), ((2602, 2623), 'tensorflow.python.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (2621, 2623), True, 'from tensorflow.python.keras import backend as K\n'), ((3618, 3691), 'tensorflow.python.keras.layers.convolutional.Conv2D', 'Conv2D', (['(6)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(6, kernel_size=(3, 3), activation='relu', input_shape=input_shape)\n", (3624, 3691), False, 'from tensorflow.python.keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((3740, 3777), 'tensorflow.python.keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (3746, 3777), False, 'from tensorflow.python.keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((3790, 3820), 'tensorflow.python.keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3802, 3820), False, 'from tensorflow.python.keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((3833, 3871), 'tensorflow.python.keras.layers.convolutional.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""'}), "(128, (3, 3), activation='relu')\n", (3839, 3871), False, 'from tensorflow.python.keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((3884, 3897), 'tensorflow.python.keras.layers.core.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (3891, 3897), False, 'from tensorflow.python.keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((3910, 3919), 'tensorflow.python.keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (3917, 3919), False, 'from tensorflow.python.keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((3932, 3961), 'tensorflow.python.keras.layers.core.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (3937, 3961), False, 'from tensorflow.python.keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((3974, 3986), 'tensorflow.python.keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3981, 3986), False, 'from tensorflow.python.keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((3999, 4039), 'tensorflow.python.keras.layers.core.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (4004, 4039), False, 'from tensorflow.python.keras.layers.core import Dense, Dropout, Activation, Flatten\n'), ((1004, 1054), 'os.path.join', 'os.path.join', (['TRAINING_IMAGES_SUB_DIR', 'train_image'], {}), '(TRAINING_IMAGES_SUB_DIR, train_image)\n', (1016, 1054), False, 'import os\n'), ((1071, 1091), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (1081, 1091), False, 'import cv2\n'), ((1250, 1313), 'cv2.resize', 'cv2.resize', (['image', '(img_rows, img_cols)', '(0)', '(0)', 'cv2.INTER_LINEAR'], {}), '(image, (img_rows, img_cols), 0, 0, cv2.INTER_LINEAR)\n', (1260, 1313), False, 'import cv2\n'), ((1329, 1368), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1341, 1368), False, 'import cv2\n'), ((4114, 4138), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (4136, 4138), True, 'import tensorflow as tf\n')] |
"""
===============================================
Plot with Respect to Different Reference Frames
===============================================
In this example, we will demonstrate how to use the TransformManager.
We will add several transforms to the manager and plot all frames in
two reference frames ('world' and 'A').
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from pytransform3d.plot_utils import make_3d_axis
from pytransform3d.transformations import random_transform
from pytransform3d.transform_manager import TransformManager
random_state = np.random.RandomState(0)
A2world = random_transform(random_state)
B2world = random_transform(random_state)
A2C = random_transform(random_state)
D2B = random_transform(random_state)
tm = TransformManager()
tm.add_transform("A", "world", A2world)
tm.add_transform("B", "world", B2world)
tm.add_transform("A", "C", A2C)
tm.add_transform("D", "B", D2B)
plt.figure(figsize=(10, 5))
ax = make_3d_axis(3, 121)
ax = tm.plot_frames_in("world", ax=ax, alpha=0.6)
ax.view_init(30, 20)
ax = make_3d_axis(3, 122)
ax = tm.plot_frames_in("A", ax=ax, alpha=0.6)
ax.view_init(30, 20)
plt.show()
| [
"pytransform3d.transformations.random_transform",
"pytransform3d.transform_manager.TransformManager",
"matplotlib.pyplot.figure",
"pytransform3d.plot_utils.make_3d_axis",
"numpy.random.RandomState",
"matplotlib.pyplot.show"
] | [((587, 611), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (608, 611), True, 'import numpy as np\n'), ((622, 652), 'pytransform3d.transformations.random_transform', 'random_transform', (['random_state'], {}), '(random_state)\n', (638, 652), False, 'from pytransform3d.transformations import random_transform\n'), ((663, 693), 'pytransform3d.transformations.random_transform', 'random_transform', (['random_state'], {}), '(random_state)\n', (679, 693), False, 'from pytransform3d.transformations import random_transform\n'), ((700, 730), 'pytransform3d.transformations.random_transform', 'random_transform', (['random_state'], {}), '(random_state)\n', (716, 730), False, 'from pytransform3d.transformations import random_transform\n'), ((737, 767), 'pytransform3d.transformations.random_transform', 'random_transform', (['random_state'], {}), '(random_state)\n', (753, 767), False, 'from pytransform3d.transformations import random_transform\n'), ((774, 792), 'pytransform3d.transform_manager.TransformManager', 'TransformManager', ([], {}), '()\n', (790, 792), False, 'from pytransform3d.transform_manager import TransformManager\n'), ((938, 965), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (948, 965), True, 'import matplotlib.pyplot as plt\n'), ((972, 992), 'pytransform3d.plot_utils.make_3d_axis', 'make_3d_axis', (['(3)', '(121)'], {}), '(3, 121)\n', (984, 992), False, 'from pytransform3d.plot_utils import make_3d_axis\n'), ((1070, 1090), 'pytransform3d.plot_utils.make_3d_axis', 'make_3d_axis', (['(3)', '(122)'], {}), '(3, 122)\n', (1082, 1090), False, 'from pytransform3d.plot_utils import make_3d_axis\n'), ((1159, 1169), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1167, 1169), True, 'import matplotlib.pyplot as plt\n')] |
import torch
import torch.nn as nn
import numpy as np
np.random.seed(0)
from model.generate_anchor import generate_anchors
from model.bbox_transform import clip_boxes
from model.ellipse_transform import ellipse_transform_inv, ellipse2box
from nms.cpu_nms import cpu_nms
from nms.gpu_nms import gpu_nms
def _filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = ((ws >= min_size) & (hs >= min_size)).nonzero().view(-1)
return keep
class EllipseProposalLayer(nn.Module):
def __init__(self, cfg):
super(EllipseProposalLayer, self).__init__()
self._cfg = dict(cfg)
self._preprocess()
def _preprocess(self):
# pre-computing stuff for making anchor later
self._im_info = (self._cfg['MAX_SIZE'], self._cfg['MAX_SIZE'])
base_anchors = generate_anchors(
base_size=self._cfg['RPN_FEAT_STRIDE'],
ratios=[1],
scales=np.array(self._cfg['ANCHOR_SCALES'], dtype=np.float32))
num_anchors = base_anchors.shape[0]
feat_stride = self._cfg['RPN_FEAT_STRIDE']
feat_width = self._cfg['MAX_SIZE'] // self._cfg['RPN_FEAT_STRIDE']
feat_height = feat_width
shift_x = np.arange(0, feat_width) * feat_stride
shift_y = np.arange(0, feat_height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(),
shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
anchors = base_anchors.reshape((1, A, 4)) + \
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
self._feat_height = feat_height
self._feat_width = feat_width
self._anchors = torch.from_numpy(anchors).float()
def cuda(self, device=None):
self._anchors = self._anchors.cuda(device)
return self._apply(lambda t: t.cuda(device))
def forward(self, out_cls, out_ellipse):
"""
out_cls: (feat_height, feat_width, anchors, 2) FloatVariable
out_ellipse: (feat_height, feat_width, anchors, 5) FloatVariable
"""
scores = nn.functional.softmax(
out_cls, dim=3)[..., 1].contiguous().data.view(-1, 1)
ellipse_deltas = out_ellipse.data.view(-1, 5)
# 1. Generate proposals from ellipse deltas and shifted anchors
# Convert anchors into proposals via ellipse transformations
# Convert ellipse into bbox proposals
ellipses = ellipse_transform_inv(self._anchors, ellipse_deltas)
boxes = ellipse2box(ellipses, self._cfg['ELLIPSE_PAD'])
# 2. clip predicted boxes to image
boxes = clip_boxes(boxes, self._im_info[:2])
# 3. remove predicted boxes with either height or width < threshold
# (NOTICE: convert min_size to input image scale stored in im_info[2])
keep = _filter_boxes(boxes, self._cfg['TEST.RPN_MIN_SIZE'])
boxes = boxes[keep, :]
ellipses = ellipses[keep, :]
scores = scores[keep]
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
_, order = torch.sort(scores.view(-1), dim=0, descending=True)
if self._cfg['TEST.RPN_PRE_NMS_TOP_N'] > 0:
order = order[:self._cfg['TEST.RPN_PRE_NMS_TOP_N']]
boxes = boxes[order, :]
ellipses = ellipses[order, :]
scores = scores[order]
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
if self._cfg['USE_GPU_NMS']:
nms = gpu_nms
else:
nms = cpu_nms
dets = np.hstack((boxes.cpu().numpy(), scores.cpu().numpy()))
keep = nms(dets, self._cfg['TEST.RPN_NMS_THRESH'])
keep = torch.from_numpy(np.array(keep)).type_as(scores).long()
if self._cfg['TEST.RPN_POST_NMS_TOP_N'] > 0:
keep = keep[:self._cfg['TEST.RPN_POST_NMS_TOP_N']]
boxes = boxes[keep, :]
ellipses = ellipses[keep, :]
scores = scores[keep].view(-1)
return (boxes, ellipses, scores)
| [
"model.ellipse_transform.ellipse_transform_inv",
"torch.from_numpy",
"model.ellipse_transform.ellipse2box",
"model.bbox_transform.clip_boxes",
"numpy.array",
"numpy.random.seed",
"numpy.meshgrid",
"torch.nn.functional.softmax",
"numpy.arange"
] | [((56, 73), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (70, 73), True, 'import numpy as np\n'), ((1441, 1470), 'numpy.meshgrid', 'np.meshgrid', (['shift_x', 'shift_y'], {}), '(shift_x, shift_y)\n', (1452, 1470), True, 'import numpy as np\n'), ((2831, 2883), 'model.ellipse_transform.ellipse_transform_inv', 'ellipse_transform_inv', (['self._anchors', 'ellipse_deltas'], {}), '(self._anchors, ellipse_deltas)\n', (2852, 2883), False, 'from model.ellipse_transform import ellipse_transform_inv, ellipse2box\n'), ((2900, 2947), 'model.ellipse_transform.ellipse2box', 'ellipse2box', (['ellipses', "self._cfg['ELLIPSE_PAD']"], {}), "(ellipses, self._cfg['ELLIPSE_PAD'])\n", (2911, 2947), False, 'from model.ellipse_transform import ellipse_transform_inv, ellipse2box\n'), ((3008, 3044), 'model.bbox_transform.clip_boxes', 'clip_boxes', (['boxes', 'self._im_info[:2]'], {}), '(boxes, self._im_info[:2])\n', (3018, 3044), False, 'from model.bbox_transform import clip_boxes\n'), ((1317, 1341), 'numpy.arange', 'np.arange', (['(0)', 'feat_width'], {}), '(0, feat_width)\n', (1326, 1341), True, 'import numpy as np\n'), ((1374, 1399), 'numpy.arange', 'np.arange', (['(0)', 'feat_height'], {}), '(0, feat_height)\n', (1383, 1399), True, 'import numpy as np\n'), ((1039, 1093), 'numpy.array', 'np.array', (["self._cfg['ANCHOR_SCALES']"], {'dtype': 'np.float32'}), "(self._cfg['ANCHOR_SCALES'], dtype=np.float32)\n", (1047, 1093), True, 'import numpy as np\n'), ((2080, 2105), 'torch.from_numpy', 'torch.from_numpy', (['anchors'], {}), '(anchors)\n', (2096, 2105), False, 'import torch\n'), ((4188, 4202), 'numpy.array', 'np.array', (['keep'], {}), '(keep)\n', (4196, 4202), True, 'import numpy as np\n'), ((2481, 2518), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['out_cls'], {'dim': '(3)'}), '(out_cls, dim=3)\n', (2502, 2518), True, 'import torch.nn as nn\n')] |
from typing import Union, Optional, Any, Dict
import numpy as np
from l5kit.geometry import transform_points
import torch
__all__ = [
'traj_stat', 'classify_traj', 'comp_val', 'filter_traj'
]
def trajectory_stat(
history_positions: np.array,
target_positions: np.array,
centroid: np.array,
world_to_image: np.array,
) -> Any:
history_pixels = transform_points(history_positions + centroid, world_to_image)
history_pixels -= history_pixels[0]
history_y_change = history_pixels[np.argmax(np.abs(history_pixels[:, 1])), 1]
history_x_change = history_pixels[np.argmax(np.abs(history_pixels[:, 0])), 0]
target_pixels = transform_points(target_positions + centroid, world_to_image)
target_pixels -= target_pixels[0]
target_y_change = target_pixels[np.argmax(np.abs(target_pixels[:, 1])), 1]
target_x_change = target_pixels[np.argmax(np.abs(target_pixels[:, 0])), 0]
hist_diff = np.linalg.norm(np.diff(history_positions, axis=0), axis=1)
history_speed = hist_diff.sum() / history_positions.shape[0]
history_acceleration = (hist_diff[-1] - hist_diff[0]) / hist_diff.shape[0]
target_diff = np.linalg.norm(np.diff(target_positions, axis=0), axis=1)
target_speed = target_diff.sum() / target_positions.shape[0]
target_acceleration = (target_diff[-1] - target_diff[0]) / target_diff.shape[0]
total_acceleration = (target_diff[-1] - hist_diff[0]) / (target_diff.shape[0] + hist_diff.shape[0])
return ('history_y_change', history_y_change), ('history_x_change', history_x_change), \
('target_y_change', target_y_change), ('target_x_change', target_x_change), \
('history_speed', history_speed), ('history_acceleration', history_acceleration), \
('target_speed', target_speed), ('target_acceleration', target_acceleration), \
('total_acceleration', total_acceleration)
def traj_stat(traj: dict, predicted_targets=None) -> Any:
targets = predicted_targets if predicted_targets is not None else traj['target_positions']
return trajectory_stat(traj['history_positions'], targets,
traj['centroid'], traj['world_to_image'])
def classify_traj(
hist_y_change: np.array,
tar_y_change: np.array,
speed_change: np.array,
turn_thresh: Optional[float] = 3.,
speed_thresh: Optional[float] = 0.5,
prefix: Optional[Any] = '',
matrix: Optional[bool] = False
) -> Union[tuple, str]:
if np.abs(tar_y_change) > turn_thresh:
target = 'D' if tar_y_change < 0. else 'U'
else:
target = 'N'
if np.abs(hist_y_change) > turn_thresh:
history = 'U' if hist_y_change < 0. else 'D'
else:
history = 'N'
if np.abs(speed_change) > speed_thresh:
speed = 'D' if speed_change < 0. else 'U'
else:
speed = 'N'
if matrix:
conv = lambda x: 1 if x == 'N' else 0 if x == 'U' else 2
return conv(history), conv(target), conv(speed)
return f'{prefix}{history}{target}{speed}'
def comp_val(hist_change, tar_change, speed_change, traj_cls: str):
if traj_cls[1] == 'N':
return abs(hist_change), abs(speed_change)
elif traj_cls[0] == 'N':
return abs(tar_change), abs(speed_change)
return abs(tar_change) + abs(hist_change), abs(speed_change)
def filter_traj(traj: dict, static_hist_thresh: Optional[float] = 1.):
value = traj['target_availabilities'].sum()
if value != traj['target_availabilities'].shape[0]:
return 'target', value
value = traj['history_availabilities'].sum()
if value != traj['history_availabilities'].shape[0]:
return 'history', value
value = np.linalg.norm(np.diff(traj['history_positions'], axis=0), axis=1).sum()
if static_hist_thresh and value < static_hist_thresh:
return 'static', value # filter scenes with static history
return False
| [
"numpy.abs",
"numpy.diff",
"l5kit.geometry.transform_points"
] | [((386, 448), 'l5kit.geometry.transform_points', 'transform_points', (['(history_positions + centroid)', 'world_to_image'], {}), '(history_positions + centroid, world_to_image)\n', (402, 448), False, 'from l5kit.geometry import transform_points\n'), ((674, 735), 'l5kit.geometry.transform_points', 'transform_points', (['(target_positions + centroid)', 'world_to_image'], {}), '(target_positions + centroid, world_to_image)\n', (690, 735), False, 'from l5kit.geometry import transform_points\n'), ((964, 998), 'numpy.diff', 'np.diff', (['history_positions'], {'axis': '(0)'}), '(history_positions, axis=0)\n', (971, 998), True, 'import numpy as np\n'), ((1186, 1219), 'numpy.diff', 'np.diff', (['target_positions'], {'axis': '(0)'}), '(target_positions, axis=0)\n', (1193, 1219), True, 'import numpy as np\n'), ((2505, 2525), 'numpy.abs', 'np.abs', (['tar_y_change'], {}), '(tar_y_change)\n', (2511, 2525), True, 'import numpy as np\n'), ((2630, 2651), 'numpy.abs', 'np.abs', (['hist_y_change'], {}), '(hist_y_change)\n', (2636, 2651), True, 'import numpy as np\n'), ((2760, 2780), 'numpy.abs', 'np.abs', (['speed_change'], {}), '(speed_change)\n', (2766, 2780), True, 'import numpy as np\n'), ((537, 565), 'numpy.abs', 'np.abs', (['history_pixels[:, 1]'], {}), '(history_pixels[:, 1])\n', (543, 565), True, 'import numpy as np\n'), ((619, 647), 'numpy.abs', 'np.abs', (['history_pixels[:, 0]'], {}), '(history_pixels[:, 0])\n', (625, 647), True, 'import numpy as np\n'), ((820, 847), 'numpy.abs', 'np.abs', (['target_pixels[:, 1]'], {}), '(target_pixels[:, 1])\n', (826, 847), True, 'import numpy as np\n'), ((899, 926), 'numpy.abs', 'np.abs', (['target_pixels[:, 0]'], {}), '(target_pixels[:, 0])\n', (905, 926), True, 'import numpy as np\n'), ((3725, 3767), 'numpy.diff', 'np.diff', (["traj['history_positions']"], {'axis': '(0)'}), "(traj['history_positions'], axis=0)\n", (3732, 3767), True, 'import numpy as np\n')] |
import os
import torch
import scipy
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import StandardScaler
from imblearn.under_sampling import RandomUnderSampler
from VDP_Layers import VDP_FullyConnected, VDP_Relu, VDP_Softmax
from torch.utils.data import DataLoader, Dataset
from sklearn.metrics import confusion_matrix, roc_auc_score, average_precision_score, balanced_accuracy_score
os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
class VDPNet(torch.nn.Module):
def __init__(self, layer_1, layer_2, layer_3):
super(VDPNet, self).__init__()
self.fullyCon1 = VDP_FullyConnected(12, layer_1, input_flag=True)
self.fullyCon2 = VDP_FullyConnected(layer_1, layer_2, input_flag=False)
self.fullyCon3 = VDP_FullyConnected(layer_2, layer_3, input_flag=False)
self.fullyCon4 = VDP_FullyConnected(layer_3, 1, input_flag=False)
self.relu = VDP_Relu() # Actually SELU
self.bn1 = torch.nn.BatchNorm1d(layer_1)
self.bn2 = torch.nn.BatchNorm1d(layer_2)
self.bn3 = torch.nn.BatchNorm1d(layer_3)
self.bn4 = torch.nn.BatchNorm1d(1)
self.register_buffer("thing", torch.tensor(1e-3).repeat([self.fullyCon3.out_features]))
def forward(self, x):
# flat_x = torch.flatten(x_input, start_dim=1)
mu, sigma = self.fullyCon1.forward(x)
mu = self.bn1(mu)
mu, sigma = self.relu(mu, sigma)
mu, sigma = self.fullyCon2.forward(mu, sigma)
mu = self.bn2(mu)
mu, sigma = self.relu(mu, sigma)
mu, sigma = self.fullyCon3(mu, sigma)
mu = self.bn3(mu)
mu, sigma = self.relu(mu, sigma)
mu, sigma = self.fullyCon4(mu, sigma)
mu = self.bn4(mu)
return mu, sigma
def nll_gaussian(self, y_pred_mean, y_pred_sd, y_test):
thing = torch.tensor(1e-3)
# dense_label = torch.argmax(y_test, dim=1)
criterion = torch.nn.BCEWithLogitsLoss(reduction='none')
y_pred_sd_inv = torch.inverse(
y_pred_sd + torch.diag(thing.repeat([self.fullyCon4.out_features])).to(y_pred_sd.device))
mu_ = criterion(y_pred_mean, y_test)
mu_sigma = torch.bmm(mu_.unsqueeze(1), y_pred_sd_inv)
ms = 0.5 * mu_sigma + 0.5 * torch.log(torch.det(y_pred_sd +
torch.diag(thing.repeat([self.fullyCon4.out_features])).to(y_pred_sd.device))).unsqueeze(1)
ms = ms.mean()
return ms
def batch_loss(self, output_mean, output_sigma, label):
output_sigma_clamp = torch.clamp(output_sigma, 1e-10, 1e+10)
tau = 0.002
log_likelihood = self.nll_gaussian(output_mean, output_sigma_clamp, label)
loss_value = log_likelihood + tau * (self.fullyCon1.kl_loss_term() + self.fullyCon2.kl_loss_term() +
self.fullyCon3.kl_loss_term() + self.fullyCon4.kl_loss_term())
return loss_value
class data_loader(Dataset):
def __init__(self, X, y):
self.X = torch.from_numpy(X).float().to('cuda:0')
self.y = torch.from_numpy(y).float().to('cuda:0')
def __len__(self):
return len(self.X)
def __getitem__(self, index):
target = self.y[index]
data_val = self.X[index, :]
return data_val, target
def load_data():
cd = os.getcwd()
x_eicu = pd.read_csv(cd+'/../data/x_eicu.csv')
y_eicu = pd.read_csv(cd+'/../data/y_eicu.csv')
mimic = pd.read_csv(cd + '/../data/mimic.csv')
assert np.all(x_eicu['patientunitstayid'].to_numpy() == y_eicu['patientunitstayid'].to_numpy())
feature_list = ['lactate', 'oobventday1', 'eyes', 'motor', 'verbal', 'albumin_x',
'age', 'creatinine_x', 'BUN', 'PT - INR', 'WBC x 1000', 'meanbp']
feature_list_mimic = ['Lactate', 'firstdayvent', 'gcseyes', 'gcsmotor', 'gcsverbal', 'Albumin',
'Age', 'Creatinine', 'BUN', 'INR', 'WBC', 'MAP']
x_eicu = x_eicu[feature_list].to_numpy()
y_eicu = y_eicu['actualicumortality'].to_numpy()
x_mimic = mimic[feature_list_mimic].to_numpy()
y_mimic = mimic['Mortality'].to_numpy()
x = np.vstack((x_eicu, x_mimic))
y = np.hstack((y_eicu, y_mimic))
shuffler = np.random.permutation(len(x))
return x[shuffler], y[shuffler]
# return x_eicu, y_eicu
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, h
def main():
x, y = load_data()
kfold = StratifiedKFold(n_splits=10)
logits_all = []
labels_all = []
accuracy = []
precision = []
sensitivity = []
specificity = []
roc_auc = []
prc_auc = []
balanced_acc = []
counter = 1
for train_index, test_index in kfold.split(x, y):
x_train, y_train = x[train_index], y[train_index]
x_test, y_test = x[test_index], y[test_index]
imputer = IterativeImputer()
scaler = StandardScaler()
x_train = scaler.fit_transform(imputer.fit_transform(x_train))
x_test = scaler.transform(imputer.transform(x_test))
x_train, y_train = RandomUnderSampler().fit_resample(x_train, y_train)
trainset = data_loader(x_train, y_train)
testset = data_loader(x_test, y_test)
trainloader = DataLoader(trainset, batch_size=1000, shuffle=True)
testloader = DataLoader(testset, batch_size=1000, shuffle=True)
model = VDPNet(31, 93, 94)
no_epochs = 18
optimizer = torch.optim.SGD(model.parameters(), lr=0.002219, weight_decay=0.006427, momentum=0.7589, nesterov=True)
model.train()
model.to('cuda:0')
for epoch in range(no_epochs):
for itr, (x_train, y_train) in enumerate(trainloader):
optimizer.zero_grad()
mu, sigma = model.forward(x_train)
loss = model.batch_loss(mu, sigma, y_train.view(-1, 1))
loss.backward()
optimizer.step()
model.eval()
mu, sigma = model.forward(torch.from_numpy(x_test).float().to('cuda:0'))
logits = torch.sigmoid(mu).detach().cpu().numpy()
logits_all.append(logits.reshape(-1))
labels_all.append(y_test)
print('Iter {}/10 done'.format(counter))
counter += 1
for i in range(len(logits_all)):
tn, fp, fn, tp = confusion_matrix(labels_all[i], np.round(logits_all[i])).ravel()
accuracy.append((tp + tn) / (tp + tn + fp + fn))
precision.append(tp / (tp + fp))
sensitivity.append(tp / (tp + fn))
specificity.append(tn / (tn + fp))
roc_auc.append(roc_auc_score(labels_all[i], logits_all[i]))
prc_auc.append(average_precision_score(labels_all[i], logits_all[i]))
balanced_acc.append(balanced_accuracy_score(labels_all[i], np.round(logits_all[i])))
mean, confidence_interval = mean_confidence_interval(accuracy)
print('Accuracy Mean and confidence interval: {:4f}, {:4f}'.format(mean, confidence_interval))
mean, confidence_interval = mean_confidence_interval(precision)
print('Precision Mean and confidence interval: {:4f}, {:4f}'.format(mean, confidence_interval))
mean, confidence_interval = mean_confidence_interval(sensitivity)
print('Sensitivity Mean and confidence interval: {:4f}, {:4f}'.format(mean, confidence_interval))
mean, confidence_interval = mean_confidence_interval(specificity)
print('Specificity Mean and confidence interval: {:4f}, {:4f}'.format(mean, confidence_interval))
mean, confidence_interval = mean_confidence_interval(roc_auc)
print('ROC_AUC Mean and confidence interval: {:4f}, {:4f}'.format(mean, confidence_interval))
mean, confidence_interval = mean_confidence_interval(prc_auc)
print('PRC_AUC Mean and confidence interval: {:4f}, {:4f}'.format(mean, confidence_interval))
mean, confidence_interval = mean_confidence_interval(balanced_acc)
print('Balanced Accuracy Mean and confidence interval: {:4f}, {:4f}'.format(mean, confidence_interval))
if __name__ == '__main__':
main()
| [
"pandas.read_csv",
"numpy.hstack",
"torch.from_numpy",
"sklearn.metrics.roc_auc_score",
"sklearn.model_selection.StratifiedKFold",
"torch.nn.BatchNorm1d",
"numpy.array",
"scipy.stats.sem",
"imblearn.under_sampling.RandomUnderSampler",
"numpy.mean",
"VDP_Layers.VDP_FullyConnected",
"numpy.vstac... | [((3522, 3533), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3531, 3533), False, 'import os\n'), ((3547, 3586), 'pandas.read_csv', 'pd.read_csv', (["(cd + '/../data/x_eicu.csv')"], {}), "(cd + '/../data/x_eicu.csv')\n", (3558, 3586), True, 'import pandas as pd\n'), ((3598, 3637), 'pandas.read_csv', 'pd.read_csv', (["(cd + '/../data/y_eicu.csv')"], {}), "(cd + '/../data/y_eicu.csv')\n", (3609, 3637), True, 'import pandas as pd\n'), ((3648, 3686), 'pandas.read_csv', 'pd.read_csv', (["(cd + '/../data/mimic.csv')"], {}), "(cd + '/../data/mimic.csv')\n", (3659, 3686), True, 'import pandas as pd\n'), ((4335, 4363), 'numpy.vstack', 'np.vstack', (['(x_eicu, x_mimic)'], {}), '((x_eicu, x_mimic))\n', (4344, 4363), True, 'import numpy as np\n'), ((4372, 4400), 'numpy.hstack', 'np.hstack', (['(y_eicu, y_mimic)'], {}), '((y_eicu, y_mimic))\n', (4381, 4400), True, 'import numpy as np\n'), ((4776, 4804), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (4791, 4804), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((795, 843), 'VDP_Layers.VDP_FullyConnected', 'VDP_FullyConnected', (['(12)', 'layer_1'], {'input_flag': '(True)'}), '(12, layer_1, input_flag=True)\n', (813, 843), False, 'from VDP_Layers import VDP_FullyConnected, VDP_Relu, VDP_Softmax\n'), ((869, 923), 'VDP_Layers.VDP_FullyConnected', 'VDP_FullyConnected', (['layer_1', 'layer_2'], {'input_flag': '(False)'}), '(layer_1, layer_2, input_flag=False)\n', (887, 923), False, 'from VDP_Layers import VDP_FullyConnected, VDP_Relu, VDP_Softmax\n'), ((949, 1003), 'VDP_Layers.VDP_FullyConnected', 'VDP_FullyConnected', (['layer_2', 'layer_3'], {'input_flag': '(False)'}), '(layer_2, layer_3, input_flag=False)\n', (967, 1003), False, 'from VDP_Layers import VDP_FullyConnected, VDP_Relu, VDP_Softmax\n'), ((1029, 1077), 'VDP_Layers.VDP_FullyConnected', 'VDP_FullyConnected', (['layer_3', '(1)'], {'input_flag': '(False)'}), '(layer_3, 1, input_flag=False)\n', (1047, 1077), False, 'from VDP_Layers import VDP_FullyConnected, VDP_Relu, VDP_Softmax\n'), ((1098, 1108), 'VDP_Layers.VDP_Relu', 'VDP_Relu', ([], {}), '()\n', (1106, 1108), False, 'from VDP_Layers import VDP_FullyConnected, VDP_Relu, VDP_Softmax\n'), ((1145, 1174), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['layer_1'], {}), '(layer_1)\n', (1165, 1174), False, 'import torch\n'), ((1194, 1223), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['layer_2'], {}), '(layer_2)\n', (1214, 1223), False, 'import torch\n'), ((1243, 1272), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['layer_3'], {}), '(layer_3)\n', (1263, 1272), False, 'import torch\n'), ((1292, 1315), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(1)'], {}), '(1)\n', (1312, 1315), False, 'import torch\n'), ((2016, 2035), 'torch.tensor', 'torch.tensor', (['(0.001)'], {}), '(0.001)\n', (2028, 2035), False, 'import torch\n'), ((2107, 2151), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (2133, 2151), False, 'import torch\n'), ((2747, 2794), 'torch.clamp', 'torch.clamp', (['output_sigma', '(1e-10)', '(10000000000.0)'], {}), '(output_sigma, 1e-10, 10000000000.0)\n', (2758, 2794), False, 'import torch\n'), ((4579, 4593), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4587, 4593), True, 'import numpy as np\n'), ((4621, 4631), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (4628, 4631), True, 'import numpy as np\n'), ((4633, 4651), 'scipy.stats.sem', 'scipy.stats.sem', (['a'], {}), '(a)\n', (4648, 4651), False, 'import scipy\n'), ((4665, 4713), 'scipy.stats.t.ppf', 'scipy.stats.t.ppf', (['((1 + confidence) / 2.0)', '(n - 1)'], {}), '((1 + confidence) / 2.0, n - 1)\n', (4682, 4713), False, 'import scipy\n'), ((5180, 5198), 'sklearn.impute.IterativeImputer', 'IterativeImputer', ([], {}), '()\n', (5196, 5198), False, 'from sklearn.impute import IterativeImputer\n'), ((5216, 5232), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5230, 5232), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5561, 5612), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': '(1000)', 'shuffle': '(True)'}), '(trainset, batch_size=1000, shuffle=True)\n', (5571, 5612), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((5634, 5684), 'torch.utils.data.DataLoader', 'DataLoader', (['testset'], {'batch_size': '(1000)', 'shuffle': '(True)'}), '(testset, batch_size=1000, shuffle=True)\n', (5644, 5684), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((6892, 6935), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels_all[i]', 'logits_all[i]'], {}), '(labels_all[i], logits_all[i])\n', (6905, 6935), False, 'from sklearn.metrics import confusion_matrix, roc_auc_score, average_precision_score, balanced_accuracy_score\n'), ((6960, 7013), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['labels_all[i]', 'logits_all[i]'], {}), '(labels_all[i], logits_all[i])\n', (6983, 7013), False, 'from sklearn.metrics import confusion_matrix, roc_auc_score, average_precision_score, balanced_accuracy_score\n'), ((5392, 5412), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {}), '()\n', (5410, 5412), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((7082, 7105), 'numpy.round', 'np.round', (['logits_all[i]'], {}), '(logits_all[i])\n', (7090, 7105), True, 'import numpy as np\n'), ((1355, 1374), 'torch.tensor', 'torch.tensor', (['(0.001)'], {}), '(0.001)\n', (1367, 1374), False, 'import torch\n'), ((6652, 6675), 'numpy.round', 'np.round', (['logits_all[i]'], {}), '(logits_all[i])\n', (6660, 6675), True, 'import numpy as np\n'), ((3210, 3229), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (3226, 3229), False, 'import torch\n'), ((3268, 3287), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (3284, 3287), False, 'import torch\n'), ((6303, 6327), 'torch.from_numpy', 'torch.from_numpy', (['x_test'], {}), '(x_test)\n', (6319, 6327), False, 'import torch\n'), ((6367, 6384), 'torch.sigmoid', 'torch.sigmoid', (['mu'], {}), '(mu)\n', (6380, 6384), False, 'import torch\n')] |
import numpy as np
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing import ignore_warnings
from imodels.rule_set.rule_fit import RuleFitRegressor
from imodels.util.transforms import FriedScale
## Testing FriedScale():
def test_fried_scale():
x_scale_test = np.zeros([100, 2])
x_scale_test[0:5, 0] = -100
x_scale_test[5:10, 0] = 100
x_scale_test[10:55, 0] = 1
x_scale_test[5:55,
1] = 1 # winsorised version of first column at trim=0.1: note, will not be scaled because it is already an indicator function, as per FP004
fs = FriedScale() # trim_quantile=0.1)
fs.train(x_scale_test)
'''
np.testing.assert_array_equal(fs.scale(x_scale_test),
np.hstack([x_scale_test[:, 1].reshape([-1, 1]) * 0.4 / np.std(x_scale_test[:, 1]),
x_scale_test[:, 1].reshape([-1, 1])]))
'''
@ignore_warnings(category=ConvergenceWarning)
def test_integration():
X = np.array([[1, 99, 43, 34],
[1, 76, 22, 10],
[0, 83, 11, 0],
[0, 99, 74, 33],
[0, 53, 40, 34]])
y = np.array([1, 0, 1, 1, 0])
rfr = RuleFitRegressor(exp_rand_tree_size=False, n_estimators=500, random_state=1, include_linear=False,
max_rules=None, alpha=0.1)
rfr.fit(X, y)
print(len(rfr._get_rules()))
expected = np.array([0.83333333, 0.25, 0.83333333, 0.83333333, 0.25])
assert np.allclose(rfr.predict(X), expected, atol=1.0e-04)
rfr = RuleFitRegressor(exp_rand_tree_size=False, n_estimators=5, random_state=0, max_rules=None, alpha=0.01)
rfr.fit(X, y)
expected = np.array([0.89630491, 0.15375469, 0.89624531, 1.05000033, 0.00369476])
assert np.allclose(rfr.predict(X), expected)
rfr = RuleFitRegressor(exp_rand_tree_size=False, n_estimators=5, random_state=0,
max_rules=None, alpha=0.01, tree_generator=RandomForestClassifier())
rfr.fit(X, y)
# expected = np.array([0.89630491, 0.15375469, 0.89624531, 1.05000033, 0.00369476])
# assert np.allclose(rfr.predict(X), expected) | [
"imodels.rule_set.rule_fit.RuleFitRegressor",
"imodels.util.transforms.FriedScale",
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"numpy.zeros",
"sklearn.utils._testing.ignore_warnings"
] | [((1041, 1085), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'ConvergenceWarning'}), '(category=ConvergenceWarning)\n', (1056, 1085), False, 'from sklearn.utils._testing import ignore_warnings\n'), ((368, 386), 'numpy.zeros', 'np.zeros', (['[100, 2]'], {}), '([100, 2])\n', (376, 386), True, 'import numpy as np\n'), ((659, 671), 'imodels.util.transforms.FriedScale', 'FriedScale', ([], {}), '()\n', (669, 671), False, 'from imodels.util.transforms import FriedScale\n'), ((1118, 1216), 'numpy.array', 'np.array', (['[[1, 99, 43, 34], [1, 76, 22, 10], [0, 83, 11, 0], [0, 99, 74, 33], [0, 53,\n 40, 34]]'], {}), '([[1, 99, 43, 34], [1, 76, 22, 10], [0, 83, 11, 0], [0, 99, 74, 33],\n [0, 53, 40, 34]])\n', (1126, 1216), True, 'import numpy as np\n'), ((1293, 1318), 'numpy.array', 'np.array', (['[1, 0, 1, 1, 0]'], {}), '([1, 0, 1, 1, 0])\n', (1301, 1318), True, 'import numpy as np\n'), ((1330, 1459), 'imodels.rule_set.rule_fit.RuleFitRegressor', 'RuleFitRegressor', ([], {'exp_rand_tree_size': '(False)', 'n_estimators': '(500)', 'random_state': '(1)', 'include_linear': '(False)', 'max_rules': 'None', 'alpha': '(0.1)'}), '(exp_rand_tree_size=False, n_estimators=500, random_state=1,\n include_linear=False, max_rules=None, alpha=0.1)\n', (1346, 1459), False, 'from imodels.rule_set.rule_fit import RuleFitRegressor\n'), ((1549, 1607), 'numpy.array', 'np.array', (['[0.83333333, 0.25, 0.83333333, 0.83333333, 0.25]'], {}), '([0.83333333, 0.25, 0.83333333, 0.83333333, 0.25])\n', (1557, 1607), True, 'import numpy as np\n'), ((1682, 1788), 'imodels.rule_set.rule_fit.RuleFitRegressor', 'RuleFitRegressor', ([], {'exp_rand_tree_size': '(False)', 'n_estimators': '(5)', 'random_state': '(0)', 'max_rules': 'None', 'alpha': '(0.01)'}), '(exp_rand_tree_size=False, n_estimators=5, random_state=0,\n max_rules=None, alpha=0.01)\n', (1698, 1788), False, 'from imodels.rule_set.rule_fit import RuleFitRegressor\n'), ((1818, 1888), 'numpy.array', 'np.array', (['[0.89630491, 0.15375469, 0.89624531, 1.05000033, 0.00369476]'], {}), '([0.89630491, 0.15375469, 0.89624531, 1.05000033, 0.00369476])\n', (1826, 1888), True, 'import numpy as np\n'), ((2095, 2119), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (2117, 2119), False, 'from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\n')] |
"""
name: euler.py
goal: numeric solve of differential equations
author: Dr <NAME> antoine-sebastien
date: 28/03/2022
"""
from math import exp, pow
import numpy as np
import matplotlib.pyplot as pt
from interpolation.lagrange import Lagrange
class Euler:
def __init__(self):
self.a = None
self.b = None
self.pas = 0.1
self._getValues()
result = self._dev(self.a, self.b, self.initial, self.pas)
vals = np.arange(self.a, self.b + self.pas, self.pas)
print(len(vals), len(result))
# print(vals)
print(Lagrange("runge-kutta-2.py").funcLagrange(vals, result, len(result) - 1))
# Lagrange("runge-kutta-2.py").showC(vals, result)
pt.scatter(vals, result, label='Courbe Obtenue')
# pt.plot(vals, [-pow(x, 2)+x+2 for x in vals], label='Courbe')
pt.legend()
pt.show()
def _getValues(self):
print("Entrez les valeurs des intervalles [a,b]: ")
try:
self.a = float(input("a:"))
self.b = float(input("b: "))
if self.a >= self.b:
print("votre intervalle n'est pas valide")
self._getValues()
self.initial = float(input("Valeur initial X0: "))
except ValueError:
print("Données incorrecte")
self._getValues()
def func(self, X, Y) -> int:
# return -0.3 * Y + 2 * exp(X)
return -2 * X + 1
def _dev(self, a, b, X0, pas):
f = X0
values = list()
values.append(f)
val = np.arange(a, b, pas)
for i in val:
f = values[-1] + pas * self.func(i, f)
values.append(f)
return values
Euler()
| [
"interpolation.lagrange.Lagrange",
"numpy.arange",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((473, 519), 'numpy.arange', 'np.arange', (['self.a', '(self.b + self.pas)', 'self.pas'], {}), '(self.a, self.b + self.pas, self.pas)\n', (482, 519), True, 'import numpy as np\n'), ((735, 783), 'matplotlib.pyplot.scatter', 'pt.scatter', (['vals', 'result'], {'label': '"""Courbe Obtenue"""'}), "(vals, result, label='Courbe Obtenue')\n", (745, 783), True, 'import matplotlib.pyplot as pt\n'), ((864, 875), 'matplotlib.pyplot.legend', 'pt.legend', ([], {}), '()\n', (873, 875), True, 'import matplotlib.pyplot as pt\n'), ((884, 893), 'matplotlib.pyplot.show', 'pt.show', ([], {}), '()\n', (891, 893), True, 'import matplotlib.pyplot as pt\n'), ((1574, 1594), 'numpy.arange', 'np.arange', (['a', 'b', 'pas'], {}), '(a, b, pas)\n', (1583, 1594), True, 'import numpy as np\n'), ((594, 622), 'interpolation.lagrange.Lagrange', 'Lagrange', (['"""runge-kutta-2.py"""'], {}), "('runge-kutta-2.py')\n", (602, 622), False, 'from interpolation.lagrange import Lagrange\n')] |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from data_factory import DataFactory
from frequent_directions_aistats_ridge_regression import FDRidge
from random_projections_aistats_ridge_regression import RPRidge
from sklearn.linear_model import Ridge,LinearRegression
from plot_config import fd_params, rfd_params, gauss_single_params, sjlt_single_params, gauss_ihs_params, sjlt_ihs_params
import numpy as np
from math import floor
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.kernel_approximation import RBFSampler
import matplotlib.pyplot as plt
import pandas as pd
from utils import get_errors
from timeit import default_timer as timer
# def get_errors(arr,x):
# e = [np.linalg.norm(arr[:,i] - x)/np.linalg.norm(x) for i in range(arr.shape[1])]
# e.insert(0,1)
# return e
def real_experiment(data_name,gamma_reg,):
gamma = gamma_reg
n = 20000
ds = DataFactory(n=n)
if data_name == 'CoverType':
X,y = ds.fetch_forest_cover()
rff_features = True
elif data_name == 'w8a':
X,y = ds.fetch_w8a()
rff_features = True
elif data_name == 'CaliforniaHousing':
feature_size = 18000
_X, _y = fetch_california_housing(return_X_y=True)
X_train, y = _X, _y
#X_train, X_test, y, y_test = train_test_split(_X, _y, test_size=0.1, random_state=42)
rbf_feature = RBFSampler(gamma=0.005, random_state=100,n_components=feature_size)
X = rbf_feature.fit_transform(X_train)
rff_features = False
else:
X,y = ds.fetch_year_predictions()
rff_features = True
# Whether to fit fourier features
if rff_features:
X, y = X[:n], y[:n]
X = ds.feature_expansion(X,n_extra_features=1024)
d = X.shape[1]
# Optimal solution
print('#'*60)
print('Solving exactly: Data shape: ', X.shape)
solve_start = timer()
H = X.T@X + gamma*np.eye(d)
x_opt = np.linalg.solve(H,X.T@y)
solve_time = timer() - solve_start
print('Solving exactly: ', solve_time)
# Iterate the FD regression
iterations = 10
m = int(2**9)
alpha = 1.0
fd_sk_dim = int(alpha*m)
fd_buffer =int((2-alpha)*m)
print('#'*40)
print('#'*10, '\t FREQUENT DIRECTIONS \t', '#'*10)
# fdr = FDRidge(fd_dim=fd_sk_dim,gamma=gamma,batch_size=fd_buffer) # nb this was 2m so expect to incresae slightly
fdr = FDRidge(fd_dim=fd_sk_dim,gamma=gamma,batch_size=fd_buffer) # nb this was 2m so expect to incresae slightly
_, all_x,fd_measured = fdr.fast_iterate(X,y,iterations)
print('#'*10, '\t FREQUENT DIRECTIONS \t ', fd_measured['sketch time'], '#'*10)
print('#'*10, '\t ROBUST FREQUENT DIRECTIONS \t', '#'*10)
rfdr = FDRidge(fd_dim=fd_sk_dim,fd_mode='RFD',gamma=gamma,batch_size=fd_buffer)
_, rfd_all_x, rfd_measured = rfdr.fast_iterate(X,y,iterations)
print('#'*10, '\t ROBUST FREQUENT DIRECTIONS \t ', rfd_measured['sketch time'], '#'*10)
print('#'*10, '\t GAUSS SINGLE \t', '#'*10)
gauss_single = RPRidge(rp_dim=m,rp_mode='Gaussian',gamma=gamma)
_, gauss_single_all_x, gauss_single_measured = gauss_single.iterate_single_timing(X,y)
print('#'*10, '\t SJLT SINGLE \t', '#'*10)
sjlt_single = RPRidge(rp_dim=m,rp_mode='SJLT',gamma=gamma)
_, sjlt_single_all_x, sjlt_single_measured = sjlt_single.iterate_single_timing(X,y)
print('#'*10, '\t GAUSS IHS \t', '#'*10)
ihs_gauss = RPRidge(rp_dim=m,rp_mode='Gaussian',gamma=gamma)
_, ihs_gauss_all_x, ihs_gauss_measured= ihs_gauss.iterate_multiple_timing(X,y)
print('#'*10, '\t SJLT IHS \t', '#'*10)
ihs_sjlt = RPRidge(rp_dim=m,rp_mode='SJLT',gamma=gamma)
_, ihs_sjlt_all_x, ihs_sjlt_measured = ihs_sjlt.iterate_multiple_timing(X,y)
# Measurement arrays
fd_errors = np.zeros(iterations)
rfd_errors = np.zeros_like(fd_errors,dtype=float)
gauss_single_errors = np.zeros_like(fd_errors)
sjlt_single_errors = np.zeros_like(fd_errors)
gauss_ihs_errors = np.zeros_like(fd_errors)
sjlt_ihs_errors = np.zeros_like(fd_errors)
for it in range(iterations):
err = np.linalg.norm(all_x[:,it] - x_opt)/np.linalg.norm(x_opt)
rfd_err = np.linalg.norm(rfd_all_x[:,it] - x_opt)/np.linalg.norm(x_opt)
gauss_err = np.linalg.norm(gauss_single_all_x[:,it] - x_opt)/np.linalg.norm(x_opt)
sjlt_err = np.linalg.norm(sjlt_single_all_x[:,it] - x_opt)/np.linalg.norm(x_opt)
ihs_gauss_err = np.linalg.norm(ihs_gauss_all_x[:,it] - x_opt)/np.linalg.norm(x_opt)
ihs_sjlt_err = np.linalg.norm(ihs_sjlt_all_x[:,it] - x_opt)/np.linalg.norm(x_opt)
print(f'Iteration {it}\tFD:{err:.5E}\tRFD:{rfd_err:.5E},\tRP:{gauss_err:.5E},\tIHS:{ihs_gauss_err:.5E}')
fd_errors[it] = err
rfd_errors[it] = rfd_err
gauss_single_errors[it] = gauss_err
sjlt_single_errors[it] = sjlt_err
gauss_ihs_errors[it] = ihs_gauss_err
sjlt_ihs_errors[it] = ihs_sjlt_err
print('#'*10, '\t PLOTTING \t', '#'*10)
# ! THIS IS THE AISTATS PLOT SIZE fig, axes = plt.subplots(nrows=2,figsize=(5,2.5),dpi=100)
fig, axes = plt.subplots(nrows=2,dpi=100)
ax, ax_time = axes[0], axes[1]
# ! Error vs Iterations plot
ax.plot(1+np.arange(iterations), fd_errors,label='FD', **fd_params)
ax.plot(1+np.arange(iterations), rfd_errors,label='RFD', **rfd_params)
ax.plot(1+np.arange(iterations), gauss_single_errors,label='Gaussian', **gauss_single_params)
ax.plot(1+np.arange(iterations), sjlt_single_errors,label='SJLT',**sjlt_single_params)
ax.plot(1+np.arange(iterations), gauss_ihs_errors,label='ihs:Gauss',**gauss_ihs_params)
ax.plot(1+np.arange(iterations), sjlt_ihs_errors,label='ihs:SJLT',**sjlt_ihs_params)
if gamma == 100.:
ax.legend(ncol=2,loc='best')
ax.set_yscale('log')
#ax.set_xlabel('Iterations')
#ax.set_ylabel(r'$\|\mathbf{x}^t - \mathbf{x}^*\|_2 / \| \mathbf{x}^*\|_2$')
#ax.set_ylabel('Error') # Use this if latex is not present
# ! Error vs time plot
ax_time.plot(fd_measured['all_times'], get_errors(all_x,x_opt), **fd_params)
ax_time.plot(rfd_measured['all_times'], get_errors(rfd_all_x,x_opt), **rfd_params)
ax_time.plot(gauss_single_measured['all_times'], get_errors(gauss_single_all_x,x_opt), **gauss_single_params)
ax_time.plot(sjlt_single_measured['all_times'], get_errors(sjlt_single_all_x,x_opt), **sjlt_single_params)
ax_time.plot(ihs_gauss_measured['all_times'], get_errors(ihs_gauss_all_x,x_opt), **gauss_ihs_params)
ax_time.plot(ihs_sjlt_measured['all_times'], get_errors(ihs_sjlt_all_x,x_opt), **sjlt_ihs_params)
ax_time.set_yscale('log',base=10)
ax_time.set_xscale('log',base=10)
ax_time.legend(title=f'Exact:{solve_time:.3f}s')
#ax.set_xscale('log',base=10)
#ax_time.set_ylim(1E-16, 1E1)
# ! Saving the plots
fname = '/home/dickens/code/FrequentDirectionsRidgeRegression/sandbox/figures/efficient-iterative-'+data_name+str(int(gamma))+'.png'
# ! commenting this line as it is the save format for the paper
# fig.savefig(fname,dpi=150,bbox_inches='tight',pad_inches=None)
fig.savefig(fname,dpi=200,bbox_inches='tight',pad_inches=None)
#plt.show()
# ! Separate the sketch time plots
# * First build the dataframes
build_dict = {
'FD' : fd_measured['sketch time'],
'RFD' : rfd_measured['sketch time'],
'Gauss' : gauss_single_measured['sketch time'],
'SJLT' : sjlt_single_measured['sketch time'],
'ihs:Gauss' : ihs_gauss_measured['sketch time'],
'ihs:SJLT' : ihs_sjlt_measured['sketch time']
}
mean_iter_time_single = lambda a : np.mean(a['all_times'][1:] - a['sketch time'])
mean_iter_time_multi = lambda a : np.mean(a['all_times'][1:] - a['sketch time']/iterations)
iteration_dict = {
'FD' : mean_iter_time_single(fd_measured),
'RFD' : mean_iter_time_single(rfd_measured),
'Gauss' : mean_iter_time_single(gauss_single_measured),
'SJLT' : mean_iter_time_single(sjlt_single_measured),
'ihs:Gauss' : mean_iter_time_multi(ihs_gauss_measured),
'ihs:SJLT' : mean_iter_time_multi(ihs_sjlt_measured)
}
print(build_dict)
print(iteration_dict)
# * Do the plotting
bar_cols = [x['color'] for x in [fd_params, rfd_params, gauss_single_params, sjlt_single_params, gauss_ihs_params, sjlt_ihs_params]]
timing_fig, timing_axes = plt.subplots(ncols=2,dpi=150)
timing_fname = '/home/dickens/code/FrequentDirectionsRidgeRegression/sandbox/figures/efficient-iterative-'+data_name+str(int(gamma))+'separate-time-cost.png'
build_ax, iter_ax = timing_axes
#
build_ax.barh(list(build_dict.keys()), list(build_dict.values()),color=bar_cols)
iter_ax.barh(list(iteration_dict.keys()), list(iteration_dict.values()),color=bar_cols)
build_ax.set_xlabel('Build Time (seconds)')
iter_ax.set_xlabel('Iteration Time (seconds)')
iter_ax.set_xlim(0,iteration_dict['ihs:Gauss']+1)
#iter_ax.set_xscale('symlog')
_ihs_sjlt_time = iteration_dict['ihs:SJLT']
_title = f'ihs:sjlt:{_ihs_sjlt_time:.3f}'
iter_ax.legend(title=_title)
timing_fig.savefig(timing_fname,dpi=200,pad_inches=None)
def main():
datasets = ['CaliforniaHousing']#,'CoverType', 'YearPredictions']['w8a']: #
gammas = [100.]
for d in datasets:
for g in gammas:
real_experiment(d,g)
if __name__ == '__main__':
main()
| [
"numpy.mean",
"random_projections_aistats_ridge_regression.RPRidge",
"numpy.linalg.solve",
"numpy.eye",
"timeit.default_timer",
"sklearn.kernel_approximation.RBFSampler",
"data_factory.DataFactory",
"frequent_directions_aistats_ridge_regression.FDRidge",
"numpy.zeros",
"sklearn.datasets.fetch_cali... | [((1690, 1706), 'data_factory.DataFactory', 'DataFactory', ([], {'n': 'n'}), '(n=n)\n', (1701, 1706), False, 'from data_factory import DataFactory\n'), ((2672, 2679), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2677, 2679), True, 'from timeit import default_timer as timer\n'), ((2724, 2751), 'numpy.linalg.solve', 'np.linalg.solve', (['H', '(X.T @ y)'], {}), '(H, X.T @ y)\n', (2739, 2751), True, 'import numpy as np\n'), ((3184, 3244), 'frequent_directions_aistats_ridge_regression.FDRidge', 'FDRidge', ([], {'fd_dim': 'fd_sk_dim', 'gamma': 'gamma', 'batch_size': 'fd_buffer'}), '(fd_dim=fd_sk_dim, gamma=gamma, batch_size=fd_buffer)\n', (3191, 3244), False, 'from frequent_directions_aistats_ridge_regression import FDRidge\n'), ((3510, 3585), 'frequent_directions_aistats_ridge_regression.FDRidge', 'FDRidge', ([], {'fd_dim': 'fd_sk_dim', 'fd_mode': '"""RFD"""', 'gamma': 'gamma', 'batch_size': 'fd_buffer'}), "(fd_dim=fd_sk_dim, fd_mode='RFD', gamma=gamma, batch_size=fd_buffer)\n", (3517, 3585), False, 'from frequent_directions_aistats_ridge_regression import FDRidge\n'), ((3821, 3871), 'random_projections_aistats_ridge_regression.RPRidge', 'RPRidge', ([], {'rp_dim': 'm', 'rp_mode': '"""Gaussian"""', 'gamma': 'gamma'}), "(rp_dim=m, rp_mode='Gaussian', gamma=gamma)\n", (3828, 3871), False, 'from random_projections_aistats_ridge_regression import RPRidge\n'), ((4027, 4073), 'random_projections_aistats_ridge_regression.RPRidge', 'RPRidge', ([], {'rp_dim': 'm', 'rp_mode': '"""SJLT"""', 'gamma': 'gamma'}), "(rp_dim=m, rp_mode='SJLT', gamma=gamma)\n", (4034, 4073), False, 'from random_projections_aistats_ridge_regression import RPRidge\n'), ((4222, 4272), 'random_projections_aistats_ridge_regression.RPRidge', 'RPRidge', ([], {'rp_dim': 'm', 'rp_mode': '"""Gaussian"""', 'gamma': 'gamma'}), "(rp_dim=m, rp_mode='Gaussian', gamma=gamma)\n", (4229, 4272), False, 'from random_projections_aistats_ridge_regression import RPRidge\n'), ((4414, 4460), 'random_projections_aistats_ridge_regression.RPRidge', 'RPRidge', ([], {'rp_dim': 'm', 'rp_mode': '"""SJLT"""', 'gamma': 'gamma'}), "(rp_dim=m, rp_mode='SJLT', gamma=gamma)\n", (4421, 4460), False, 'from random_projections_aistats_ridge_regression import RPRidge\n'), ((4582, 4602), 'numpy.zeros', 'np.zeros', (['iterations'], {}), '(iterations)\n', (4590, 4602), True, 'import numpy as np\n'), ((4620, 4657), 'numpy.zeros_like', 'np.zeros_like', (['fd_errors'], {'dtype': 'float'}), '(fd_errors, dtype=float)\n', (4633, 4657), True, 'import numpy as np\n'), ((4683, 4707), 'numpy.zeros_like', 'np.zeros_like', (['fd_errors'], {}), '(fd_errors)\n', (4696, 4707), True, 'import numpy as np\n'), ((4733, 4757), 'numpy.zeros_like', 'np.zeros_like', (['fd_errors'], {}), '(fd_errors)\n', (4746, 4757), True, 'import numpy as np\n'), ((4781, 4805), 'numpy.zeros_like', 'np.zeros_like', (['fd_errors'], {}), '(fd_errors)\n', (4794, 4805), True, 'import numpy as np\n'), ((4828, 4852), 'numpy.zeros_like', 'np.zeros_like', (['fd_errors'], {}), '(fd_errors)\n', (4841, 4852), True, 'import numpy as np\n'), ((5906, 5936), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'dpi': '(100)'}), '(nrows=2, dpi=100)\n', (5918, 5936), True, 'import matplotlib.pyplot as plt\n'), ((9271, 9301), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'dpi': '(150)'}), '(ncols=2, dpi=150)\n', (9283, 9301), True, 'import matplotlib.pyplot as plt\n'), ((2766, 2773), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2771, 2773), True, 'from timeit import default_timer as timer\n'), ((6860, 6884), 'utils.get_errors', 'get_errors', (['all_x', 'x_opt'], {}), '(all_x, x_opt)\n', (6870, 6884), False, 'from utils import get_errors\n'), ((6942, 6970), 'utils.get_errors', 'get_errors', (['rfd_all_x', 'x_opt'], {}), '(rfd_all_x, x_opt)\n', (6952, 6970), False, 'from utils import get_errors\n'), ((7038, 7075), 'utils.get_errors', 'get_errors', (['gauss_single_all_x', 'x_opt'], {}), '(gauss_single_all_x, x_opt)\n', (7048, 7075), False, 'from utils import get_errors\n'), ((7151, 7187), 'utils.get_errors', 'get_errors', (['sjlt_single_all_x', 'x_opt'], {}), '(sjlt_single_all_x, x_opt)\n', (7161, 7187), False, 'from utils import get_errors\n'), ((7260, 7294), 'utils.get_errors', 'get_errors', (['ihs_gauss_all_x', 'x_opt'], {}), '(ihs_gauss_all_x, x_opt)\n', (7270, 7294), False, 'from utils import get_errors\n'), ((7364, 7397), 'utils.get_errors', 'get_errors', (['ihs_sjlt_all_x', 'x_opt'], {}), '(ihs_sjlt_all_x, x_opt)\n', (7374, 7397), False, 'from utils import get_errors\n'), ((8471, 8517), 'numpy.mean', 'np.mean', (["(a['all_times'][1:] - a['sketch time'])"], {}), "(a['all_times'][1:] - a['sketch time'])\n", (8478, 8517), True, 'import numpy as np\n'), ((8556, 8615), 'numpy.mean', 'np.mean', (["(a['all_times'][1:] - a['sketch time'] / iterations)"], {}), "(a['all_times'][1:] - a['sketch time'] / iterations)\n", (8563, 8615), True, 'import numpy as np\n'), ((2702, 2711), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (2708, 2711), True, 'import numpy as np\n'), ((4901, 4937), 'numpy.linalg.norm', 'np.linalg.norm', (['(all_x[:, it] - x_opt)'], {}), '(all_x[:, it] - x_opt)\n', (4915, 4937), True, 'import numpy as np\n'), ((4937, 4958), 'numpy.linalg.norm', 'np.linalg.norm', (['x_opt'], {}), '(x_opt)\n', (4951, 4958), True, 'import numpy as np\n'), ((4977, 5017), 'numpy.linalg.norm', 'np.linalg.norm', (['(rfd_all_x[:, it] - x_opt)'], {}), '(rfd_all_x[:, it] - x_opt)\n', (4991, 5017), True, 'import numpy as np\n'), ((5017, 5038), 'numpy.linalg.norm', 'np.linalg.norm', (['x_opt'], {}), '(x_opt)\n', (5031, 5038), True, 'import numpy as np\n'), ((5059, 5108), 'numpy.linalg.norm', 'np.linalg.norm', (['(gauss_single_all_x[:, it] - x_opt)'], {}), '(gauss_single_all_x[:, it] - x_opt)\n', (5073, 5108), True, 'import numpy as np\n'), ((5108, 5129), 'numpy.linalg.norm', 'np.linalg.norm', (['x_opt'], {}), '(x_opt)\n', (5122, 5129), True, 'import numpy as np\n'), ((5149, 5197), 'numpy.linalg.norm', 'np.linalg.norm', (['(sjlt_single_all_x[:, it] - x_opt)'], {}), '(sjlt_single_all_x[:, it] - x_opt)\n', (5163, 5197), True, 'import numpy as np\n'), ((5197, 5218), 'numpy.linalg.norm', 'np.linalg.norm', (['x_opt'], {}), '(x_opt)\n', (5211, 5218), True, 'import numpy as np\n'), ((5243, 5289), 'numpy.linalg.norm', 'np.linalg.norm', (['(ihs_gauss_all_x[:, it] - x_opt)'], {}), '(ihs_gauss_all_x[:, it] - x_opt)\n', (5257, 5289), True, 'import numpy as np\n'), ((5289, 5310), 'numpy.linalg.norm', 'np.linalg.norm', (['x_opt'], {}), '(x_opt)\n', (5303, 5310), True, 'import numpy as np\n'), ((5334, 5379), 'numpy.linalg.norm', 'np.linalg.norm', (['(ihs_sjlt_all_x[:, it] - x_opt)'], {}), '(ihs_sjlt_all_x[:, it] - x_opt)\n', (5348, 5379), True, 'import numpy as np\n'), ((5379, 5400), 'numpy.linalg.norm', 'np.linalg.norm', (['x_opt'], {}), '(x_opt)\n', (5393, 5400), True, 'import numpy as np\n'), ((6019, 6040), 'numpy.arange', 'np.arange', (['iterations'], {}), '(iterations)\n', (6028, 6040), True, 'import numpy as np\n'), ((6091, 6112), 'numpy.arange', 'np.arange', (['iterations'], {}), '(iterations)\n', (6100, 6112), True, 'import numpy as np\n'), ((6166, 6187), 'numpy.arange', 'np.arange', (['iterations'], {}), '(iterations)\n', (6175, 6187), True, 'import numpy as np\n'), ((6264, 6285), 'numpy.arange', 'np.arange', (['iterations'], {}), '(iterations)\n', (6273, 6285), True, 'import numpy as np\n'), ((6355, 6376), 'numpy.arange', 'np.arange', (['iterations'], {}), '(iterations)\n', (6364, 6376), True, 'import numpy as np\n'), ((6447, 6468), 'numpy.arange', 'np.arange', (['iterations'], {}), '(iterations)\n', (6456, 6468), True, 'import numpy as np\n'), ((1981, 2022), 'sklearn.datasets.fetch_california_housing', 'fetch_california_housing', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (2005, 2022), False, 'from sklearn.datasets import fetch_california_housing\n'), ((2168, 2236), 'sklearn.kernel_approximation.RBFSampler', 'RBFSampler', ([], {'gamma': '(0.005)', 'random_state': '(100)', 'n_components': 'feature_size'}), '(gamma=0.005, random_state=100, n_components=feature_size)\n', (2178, 2236), False, 'from sklearn.kernel_approximation import RBFSampler\n')] |
import hashlib
from colorama import Fore, Style
from Crypto.Cipher import AES
from elftools.elf.elffile import ELFFile
from .compression import lz77_decompress, lzma_compress
from .exception import (
InvalidStockRomError,
MissingSymbolError,
NotEnoughSpaceError,
ParsingError,
)
from .patch import FirmwarePatchMixin
from .utils import round_down_word, round_up_word
def _val_to_color(val):
if 0x9010_0000 > val >= 0x9000_0000:
return Fore.YELLOW
elif 0x0804_0000 > val >= 0x0800_0000:
return Fore.MAGENTA
else:
return ""
class Lookup(dict):
def __repr__(self):
substrs = []
substrs.append("{")
for k, v in sorted(self.items()):
k_color = _val_to_color(k)
v_color = _val_to_color(v)
substrs.append(
f" {k_color}0x{k:08X}{Style.RESET_ALL}: "
f"{v_color}0x{v:08X}{Style.RESET_ALL},"
)
substrs.append("}")
return "\n".join(substrs)
class Firmware(FirmwarePatchMixin, bytearray):
RAM_BASE = 0x02000000
RAM_LEN = 0x00020000
FLASH_BASE = 0x0000_0000
FLASH_LEN = 0
def __init__(self, firmware=None):
if firmware:
with open(firmware, "rb") as f:
firmware_data = f.read()
super().__init__(firmware_data)
else:
super().__init__(self.FLASH_LEN)
self._lookup = Lookup()
self._verify()
def _verify(self):
pass
def __getitem__(self, key):
"""Properly raises index error if trying to access oob regions."""
if isinstance(key, slice):
if key.start is not None:
try:
self[key.start]
except IndexError:
raise IndexError(
f"Index {key.start} ({hex(key.start)}) out of range"
) from None
if key.stop is not None:
try:
self[key.stop - 1]
except IndexError:
raise IndexError(
f"Index {key.stop - 1} ({hex(key.stop - 1)}) out of range"
) from None
return super().__getitem__(key)
def __setitem__(self, key, new_val):
"""Properly raises index error if trying to access oob regions."""
if isinstance(key, slice):
if key.start is not None:
try:
self[key.start]
except IndexError:
raise NotEnoughSpaceError(
f"Starting index {key.start} ({hex(key.start)}) exceeds "
f"firmware length {len(self)} ({hex(len(self))})"
) from None
if key.stop is not None:
try:
self[key.stop - 1]
except IndexError:
raise NotEnoughSpaceError(
f"Ending index {key.stop - 1} ({hex(key.stop - 1)}) exceeds "
f"firmware length {len(self)} ({hex(len(self))})"
) from None
return super().__setitem__(key, new_val)
def __str__(self):
return self.__name__
@staticmethod
def hash(data):
return hashlib.sha1(data).hexdigest()
def int(self, offset: int, size=4):
return int.from_bytes(self[offset : offset + size], "little")
def set_range(self, start: int, end: int, val: bytes):
self[start:end] = val * (end - start)
return end - start
def clear_range(self, start: int, end: int):
return self.set_range(start, end, val=b"\x00")
def show(self, wrap=1024, show=True):
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
def to_hex(x, pos):
return f"0x{int(x):06X}"
def to_hex_wrap(x, pos):
return f"0x{int(x)*wrap:06X}"
n_bytes = len(self)
rows = int(np.ceil(n_bytes / wrap))
occupied = np.array(self) != 0
plt.imshow(occupied.reshape(rows, wrap))
plt.title(str(self))
axes = plt.gca()
axes.get_xaxis().set_major_locator(ticker.MultipleLocator(128))
axes.get_xaxis().set_major_formatter(ticker.FuncFormatter(to_hex))
axes.get_yaxis().set_major_locator(ticker.MultipleLocator(32))
axes.get_yaxis().set_major_formatter(ticker.FuncFormatter(to_hex_wrap))
if show:
plt.show()
class RWData:
"""
Assumptions:
1. Only compressed rwdata is after this table
2. We are only modifying the lz_decompress stuff.
"""
# THIS HAS TO AGREE WITH THE LINKER
MAX_TABLE_ELEMENTS = 5
def __init__(self, firmware, table_start, table_len):
# We want to be able to extend the table.
self.firmware = firmware
self.table_start = table_start
self.__compressed_len_memo = {}
self.datas, self.dsts = [], []
for i in range(table_start, table_start + table_len - 4, 16):
# First thing is pointer to executable, need to always replace this
# to our lzma
rel_offset_to_fn = firmware.int(i)
if rel_offset_to_fn > 0x8000_0000:
rel_offset_to_fn -= 0x1_0000_0000
# fn_addr = i + rel_offset_to_fn
# assert fn_addr == 0x18005 # lz_decompress function
i += 4
data_addr = i + firmware.int(i)
i += 4
data_len = firmware.int(i) >> 1
i += 4
data_dst = firmware.int(i)
i += 4
data = lz77_decompress(firmware[data_addr : data_addr + data_len])
print(f" lz77 decompressed data {data_len} -> {len(data)}")
firmware.clear_range(data_addr, data_addr + data_len)
self.append(data, data_dst)
last_element_offset = table_start + table_len - 4
self.last_fn = firmware.int(last_element_offset)
if self.last_fn > 0x8000_0000:
self.last_fn -= 0x1_0000_0000
self.last_fn += last_element_offset
# Mark this area as reserved; there's nothing special about 0x77, its
# just not 0x00
firmware.set_range(
table_start, table_start + 16 * self.MAX_TABLE_ELEMENTS + 4, b"\x77"
)
def __getitem__(self, k):
return self.datas[k]
@property
def table_end(self):
return self.table_start + 4 * 4 * len(self.datas) + 4 + 4
def append(self, data, dst):
"""Add a new element to the table"""
if len(self.datas) >= self.MAX_TABLE_ELEMENTS:
raise NotEnoughSpaceError(
f"MAX_TABLE_ELEMENTS value {self.MAX_TABLE_ELEMENTS} exceeded"
)
self.datas.append(data)
self.dsts.append(dst)
assert len(self.datas) == len(self.dsts)
@property
def compressed_len(self):
compressed_len = 0
for data in self.datas:
data = bytes(data)
if data not in self.__compressed_len_memo:
compressed_data = lzma_compress(bytes(data))
self.__compressed_len_memo[data] = len(compressed_data)
compressed_len += self.__compressed_len_memo[data]
return compressed_len
def write_table_and_data(self, end_of_table_reference, data_offset=None):
"""
Parameters
----------
data_offset : int
Where to write the compressed data
"""
# Write Compressed Data
data_addrs, data_lens = [], []
if data_offset is None:
index = self.table_end
else:
index = data_offset
total_len = 0
for data in self.datas:
compressed_data = lzma_compress(bytes(data))
print(
f" compressed {len(data)}->{len(compressed_data)} bytes "
f"(saves {len(data)-len(compressed_data)}). "
f"Writing to 0x{index:05X}"
)
self.firmware[index : index + len(compressed_data)] = compressed_data
data_addrs.append(index)
data_lens.append(len(compressed_data))
index += len(compressed_data)
total_len += len(compressed_data)
# Write Table
index = self.table_start
assert len(data_addrs) == len(data_lens) == len(self.dsts)
for data_addr, data_len, data_dst in zip(data_addrs, data_lens, self.dsts):
self.firmware.relative(index, "rwdata_inflate")
index += 4
# Assumes that the data will be after the table.
rel_addr = data_addr - index
if rel_addr < 0:
rel_addr += 0x1_0000_0000
self.firmware.replace(index, rel_addr, size=4)
index += 4
self.firmware.replace(index, data_len, size=4)
index += 4
self.firmware.replace(index, data_dst, size=4)
index += 4
self.firmware.relative(index, "bss_rwdata_init")
index += 4
self.firmware.relative(index, self.last_fn, size=4)
index += 4
assert index == self.table_end
# Update the pointer to the end of table in the loader
self.firmware.relative(end_of_table_reference, index, size=4)
print(self)
return total_len
def __str__(self):
"""Returns the **written** table.
Doesn't show unstaged changes.
"""
substrs = []
substrs.append("")
substrs.append("RWData Table")
substrs.append("------------")
for addr in range(self.table_start, self.table_end - 4 - 4, 16):
substrs.append(
f"0x{addr:08X}: "
f"0x{self.firmware.int(addr + 0):08X} "
f"0x{self.firmware.int(addr + 4):08X} "
f"0x{self.firmware.int(addr + 8):08X} "
f"0x{self.firmware.int(addr + 12):08X} "
)
addr = self.table_end - 8
substrs.append(f"0x{addr:08X}: 0x{self.firmware.int(addr + 0):08X}")
addr = self.table_end - 4
substrs.append(f"0x{addr:08X}: 0x{self.firmware.int(addr + 0):08X}")
substrs.append("")
return "\n".join(substrs)
class IntFirmware(Firmware):
FLASH_BASE = 0x08000000
FLASH_LEN = 0x00020000
RWDATA_OFFSET = None
RWDATA_LEN = 0
RWDATA_ITCM_IDX = None
RWDATA_DTCM_IDX = None
def __init__(self, firmware, elf):
super().__init__(firmware)
self._elf_f = open(elf, "rb")
self.elf = ELFFile(self._elf_f)
self.symtab = self.elf.get_section_by_name(".symtab")
if self.RWDATA_OFFSET is None:
self.rwdata = None
else:
self.rwdata = RWData(self, self.RWDATA_OFFSET, self.RWDATA_LEN)
def _verify(self):
h = hashlib.sha1(self).hexdigest()
if h != self.STOCK_ROM_SHA1_HASH:
raise InvalidStockRomError
def address(self, symbol_name, sub_base=False):
symbols = self.symtab.get_symbol_by_name(symbol_name)
if not symbols:
raise MissingSymbolError(f'Cannot find symbol "{symbol_name}"')
address = symbols[0]["st_value"]
if address == 0:
raise MissingSymbolError(f"{symbol_name} has address 0x0")
print(f" found {symbol_name} at 0x{address:08X}")
if sub_base:
address -= self.FLASH_BASE
return address
@property
def empty_offset(self):
"""Detect a series of 0x00 to figure out the end of the internal firmware.
Returns
-------
int
Offset into firmware where empty region begins.
"""
if self.rwdata is None:
search_start = self.STOCK_ROM_END
else:
search_start = self.rwdata.table_end
for addr in range(search_start, self.FLASH_LEN, 0x10):
if self[addr : addr + 256] == b"\x00" * 256:
int_pos_start = addr
break
else:
raise ParsingError("Couldn't find end of internal code.")
return int_pos_start
@property
def key(self):
return self[self.KEY_OFFSET : self.KEY_OFFSET + 16]
@property
def nonce(self):
return self[self.NONCE_OFFSET : self.NONCE_OFFSET + 8]
def _nonce_to_iv(nonce):
# need to convert nonce to 2
assert len(nonce) == 8
nonce = nonce[::-1]
# The lower 28bits (counter) will be updated in `crypt` method
return nonce + b"\x00\x00" + b"\x71\x23" + b"\x20\x00" + b"\x00\x00"
class ExtFirmware(Firmware):
FLASH_BASE = 0x9000_0000
FLASH_LEN = 0x0010_0000
ENC_START = 0
ENC_END = 0
def crypt(self, key, nonce):
"""Decrypts if encrypted; encrypts if in plain text."""
key = bytes(key[::-1])
iv = bytearray(_nonce_to_iv(nonce))
aes = AES.new(key, AES.MODE_ECB)
for offset in range(self.ENC_START, self.ENC_END, 128 // 8):
counter_block = iv.copy()
counter = (self.FLASH_BASE + offset) >> 4
counter_block[12] = ((counter >> 24) & 0x0F) | (counter_block[12] & 0xF0)
counter_block[13] = (counter >> 16) & 0xFF
counter_block[14] = (counter >> 8) & 0xFF
counter_block[15] = (counter >> 0) & 0xFF
cipher_block = aes.encrypt(bytes(counter_block))
for i, cipher_byte in enumerate(reversed(cipher_block)):
self[offset + i] ^= cipher_byte
class Device:
registry = {}
def __init_subclass__(cls, name, **kwargs):
super().__init_subclass__(**kwargs)
cls.name = name
cls.registry[name] = cls
def __init__(self, internal_bin, internal_elf, external_bin):
self.internal = self.Int(internal_bin, internal_elf)
self.external = self.Ext(external_bin)
self.compressed_memory = self.FreeMemory()
# Link all lookup tables to a single device instance
self.lookup = Lookup()
self.internal._lookup = self.lookup
self.external._lookup = self.lookup
self.compressed_memory._lookup = self.lookup
self.ext_offset = 0
self.int_pos = 0
self.compressed_memory_pos = 0
def _move_copy(
self, dst, dst_offset: int, src, src_offset: int, size: int, delete: bool
) -> int:
dst[dst_offset : dst_offset + size] = src[src_offset : src_offset + size]
if delete:
src.clear_range(src_offset, src_offset + size)
for i in range(size):
self.lookup[src.FLASH_BASE + src_offset + i] = (
dst.FLASH_BASE + dst_offset + i
)
return size
def _move(self, dst, dst_offset: int, src, src_offset: int, size: int) -> int:
return self._move_copy(dst, dst_offset, src, src_offset, size, True)
def _copy(self, dst, dst_offset: int, src, src_offset: int, size: int) -> int:
return self._move_copy(dst, dst_offset, src, src_offset, size, False)
# Convenience methods for move and copy
def _move_ext_to_int(self, ext_offset: int, int_offset: int, size: int) -> int:
return self._move(self.internal, int_offset, self.external, ext_offset, size)
def _copy_ext_to_int(self, ext_offset: int, int_offset: int, size: int) -> int:
return self._copy(self.internal, int_offset, self.external, ext_offset, size)
def _move_to_compressed_memory(
self, ext_offset: int, compressed_memory_offset: int, size: int
) -> int:
return self._move(
self.compressed_memory,
compressed_memory_offset,
self.external,
ext_offset,
size,
)
def crypt(self):
self.external.crypt(self.internal.key, self.internal.nonce)
def show(self, show=True):
import matplotlib.pyplot as plt
if len(self.external):
plt.subplot(2, 1, 1)
self.internal.show(show=False)
plt.subplot(2, 1, 2)
self.external.show(show=False)
else:
self.internal.show(show=False)
if show:
plt.show()
def compressed_memory_compressed_len(self, add_index=0):
index = self.compressed_memory_pos + add_index
if not index:
return 0
data = bytes(self.compressed_memory[:index])
if data in self.compressed_memory_compressed_len.memo:
return self.compressed_memory_compressed_len.memo[data]
compressed_data = lzma_compress(data)
self.compressed_memory_compressed_len.memo[data] = len(compressed_data)
return len(compressed_data)
compressed_memory_compressed_len.memo = {}
@property
def compressed_memory_free_space(self):
return len(self.compressed_memory) - self.compressed_memory_pos
@property
def int_free_space(self):
out = (
len(self.internal) - self.int_pos - self.compressed_memory_compressed_len()
)
if self.internal.rwdata is not None:
out -= self.internal.rwdata.compressed_len
return out
def rwdata_lookup(self, lower, size):
lower += self.external.FLASH_BASE
upper = lower + size
for i in range(0, len(self.internal.rwdata[self.internal.RWDATA_DTCM_IDX]), 4):
val = int.from_bytes(
self.internal.rwdata[self.internal.RWDATA_DTCM_IDX][i : i + 4], "little"
)
if lower <= val < upper:
new_val = self.lookup[val]
print(f" updating rwdata 0x{val:08X} -> 0x{new_val:08X}")
self.internal.rwdata[self.internal.RWDATA_DTCM_IDX][
i : i + 4
] = new_val.to_bytes(4, "little")
def rwdata_erase(self, lower, size):
"""
Erasing no longer used references makes it compress better.
"""
lower += 0x9000_0000
upper = lower + size
for i in range(0, len(self.internal.rwdata[self.internal.RWDATA_DTCM_IDX]), 4):
val = int.from_bytes(
self.internal.rwdata[self.internal.RWDATA_DTCM_IDX][i : i + 4], "little"
)
if lower <= val < upper:
self.internal.rwdata[self.internal.RWDATA_DTCM_IDX][
i : i + 4
] = b"\x00\x00\x00\x00"
def move_to_int(self, ext, size, reference):
if self.int_free_space < size:
raise NotEnoughSpaceError
new_loc = self.int_pos
if isinstance(ext, (bytes, bytearray)):
self.internal[self.int_pos : self.int_pos + size] = ext
else:
self._move_ext_to_int(ext, self.int_pos, size=size)
print(f" move_ext_to_int {hex(ext)} -> {hex(self.int_pos)}")
self.int_pos += round_up_word(size)
if reference is not None:
self.internal.lookup(reference)
return new_loc
def move_ext_external(self, ext, size, reference):
"""Explicitly just moves ext->ext data"""
if isinstance(ext, (bytes, bytearray)):
self.external[self.ext_offset : self.ext_offset + size] = ext
else:
self.external.move(ext, self.ext_offset, size=size)
if reference is not None:
self.internal.lookup(reference)
new_loc = ext + self.ext_offset
return new_loc
def move_ext(self, ext, size, reference):
"""Attempt to relocate in priority order:
1. Internal
2. External
This is the primary moving function for data that is already compressed
or is incompressible.
"""
try:
new_loc = self.move_to_int(ext, size, reference)
if isinstance(ext, int):
self.ext_offset -= round_down_word(size)
return new_loc
except NotEnoughSpaceError:
print(
f" {Fore.RED}Not Enough Internal space. Using external flash{Style.RESET_ALL}"
)
return self.move_ext_external(ext, size, reference)
def move_to_compressed_memory(self, ext, size, reference):
"""Attempt to relocate in priority order:
1. compressed_memory
2. Internal
3. External
This is the primary moving method for any compressible data.
"""
current_len = self.compressed_memory_compressed_len()
try:
self.compressed_memory[
self.compressed_memory_pos : self.compressed_memory_pos + size
] = self.external[ext : ext + size]
except NotEnoughSpaceError:
print(
f" {Fore.RED}compressed_memory full. Attempting to put in internal{Style.RESET_ALL}"
)
return self.move_ext(ext, size, reference)
new_len = self.compressed_memory_compressed_len(size)
diff = new_len - current_len
compression_ratio = size / diff
print(
f" {Fore.YELLOW}compression_ratio: {compression_ratio}{Style.RESET_ALL}"
)
if diff > self.int_free_space:
print(
f" {Fore.RED}not putting into free memory due not enough free "
f"internal storage for compressed data.{Style.RESET_ALL}"
)
self.compressed_memory.clear_range(
self.compressed_memory_pos, self.compressed_memory_pos + size
)
return self.move_ext_external(ext, size, reference)
elif compression_ratio < self.args.compression_ratio:
# Revert putting this data into compressed_memory due to poor space_savings
print(
f" {Fore.RED}not putting in free memory due to poor compression.{Style.RESET_ALL}"
)
self.compressed_memory.clear_range(
self.compressed_memory_pos, self.compressed_memory_pos + size
)
return self.move_ext(ext, size, reference)
# Even though the data is already moved, this builds the reference lookup
self._move_to_compressed_memory(ext, self.compressed_memory_pos, size=size)
print(
f" move_to_compressed_memory {hex(ext)} -> {hex(self.compressed_memory_pos)}"
)
if reference is not None:
self.internal.lookup(reference)
new_loc = self.compressed_memory_pos
self.compressed_memory_pos += round_up_word(size)
self.ext_offset -= round_down_word(size)
return new_loc
def __call__(self):
self.int_pos = self.internal.empty_offset
return self.patch()
def patch(self):
"""Device specific argument parsing and patching routine.
Called from __call__; not to be called otherwise.
"""
raise NotImplementedError
| [
"numpy.ceil",
"matplotlib.ticker.FuncFormatter",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.gca",
"elftools.elf.elffile.ELFFile",
"numpy.array",
"Crypto.Cipher.AES.new",
"hashlib.sha1",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((4179, 4188), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4186, 4188), True, 'import matplotlib.pyplot as plt\n'), ((10643, 10663), 'elftools.elf.elffile.ELFFile', 'ELFFile', (['self._elf_f'], {}), '(self._elf_f)\n', (10650, 10663), False, 'from elftools.elf.elffile import ELFFile\n'), ((12962, 12988), 'Crypto.Cipher.AES.new', 'AES.new', (['key', 'AES.MODE_ECB'], {}), '(key, AES.MODE_ECB)\n', (12969, 12988), False, 'from Crypto.Cipher import AES\n'), ((4022, 4045), 'numpy.ceil', 'np.ceil', (['(n_bytes / wrap)'], {}), '(n_bytes / wrap)\n', (4029, 4045), True, 'import numpy as np\n'), ((4066, 4080), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (4074, 4080), True, 'import numpy as np\n'), ((4232, 4259), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(128)'], {}), '(128)\n', (4254, 4259), True, 'import matplotlib.ticker as ticker\n'), ((4306, 4334), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['to_hex'], {}), '(to_hex)\n', (4326, 4334), True, 'import matplotlib.ticker as ticker\n'), ((4379, 4405), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(32)'], {}), '(32)\n', (4401, 4405), True, 'import matplotlib.ticker as ticker\n'), ((4452, 4485), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['to_hex_wrap'], {}), '(to_hex_wrap)\n', (4472, 4485), True, 'import matplotlib.ticker as ticker\n'), ((4516, 4526), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4524, 4526), True, 'import matplotlib.pyplot as plt\n'), ((15987, 16007), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (15998, 16007), True, 'import matplotlib.pyplot as plt\n'), ((16063, 16083), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (16074, 16083), True, 'import matplotlib.pyplot as plt\n'), ((16213, 16223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16221, 16223), True, 'import matplotlib.pyplot as plt\n'), ((3299, 3317), 'hashlib.sha1', 'hashlib.sha1', (['data'], {}), '(data)\n', (3311, 3317), False, 'import hashlib\n'), ((10922, 10940), 'hashlib.sha1', 'hashlib.sha1', (['self'], {}), '(self)\n', (10934, 10940), False, 'import hashlib\n')] |
from metrics import compute_precision_and_recall, compute_confusion_matrix
from metrics import compute_f1_measure, compute_accuracy
from data import load_data, train_test_split
import numpy as np
import copy
from visualize import plot_decision_regions
try:
import matplotlib
import matplotlib.pyplot as plt
except:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#####################################################################################################
def transform_data(features):
# use euclidean distance
origin = np.array((0,0))
transformed_features = np.linalg.norm(features - origin, axis=1)
transformed_features = np.vstack(transformed_features)
return transformed_features
class Perceptron():
def __init__(self, max_iterations=200):
self.max_iterations = max_iterations
self.weights = None
self.bias = None
def classification_function(self, x):
return np.where(x>0, 1, -1)
def fit(self, features, targets):
self.ones = np.ones([features.shape[0],1])
features = np.hstack((self.ones, features))
# Relabel targets to (-1, 1) from (0, 1)
targets = np.where(targets == 0, -1, targets)
n_samples, n_features = features.shape
self.weights = np.ones(n_features)
compare = None
norm = 1
i = 0
while i < self.max_iterations and norm >= 0.01:
i += 1
compare = copy.deepcopy(self.weights)
for index, value in enumerate(features):
linear_output = np.dot(self.weights, value)*targets[index]
prediction = self.classification_function(linear_output)
if prediction < 0:
self.weights += value*targets[index]
else:
continue
norm = np.linalg.norm(compare - self.weights)
print(f"iterations: {i}")
def predict(self, features):
self.ones = np.ones([features.shape[0],1])
features = np.hstack((self.ones, features))
linear_output = np.dot(features, self.weights)
predictions = self.classification_function(linear_output)
# Relabel predictions back to (0, 1) from (-1, 1)
predictions = np.where(predictions == -1, 0, predictions)
return predictions
###################################################################################################################
fraction = 1.0
data_path = 'data/transform_me.csv'
features, targets, attribute_names = load_data(data_path)
# features = transform_data(features)
train_features, train_targets, test_features, test_targets = train_test_split(features, targets, fraction)
perceptron = Perceptron()
perceptron.fit(train_features, train_targets)
predictions = perceptron.predict(test_features)
confusion_matrix = compute_confusion_matrix(test_targets, predictions)
accuracy = compute_accuracy(test_targets, predictions)
precision, recall = compute_precision_and_recall(test_targets, predictions)
f1_measure = compute_f1_measure(test_targets, predictions)
print(f"accuracy of perceptron: {accuracy}")
# print(f"precision: {precision}")
# print(f"recall: {recall}")
# print(f"f1 measure: {f1_measure}")
# fraction = 1.0
# data_path = 'data/transform_me.csv'
# features, targets, attribute_names = load_data(data_path)
# train_features, train_targets, test_features, test_targets = train_test_split(features, targets, fraction)
perceptron = Perceptron()
perceptron.fit(train_features, train_targets)
plot_decision_regions(test_features, test_targets, perceptron, 'Perceptron Decision Regions for Transform Me')
plt.show()
# plt.savefig("perceptron_plots/Perceptron_Decision_Regions_for_Transform_Me.png")
####################################################################################################################
class Node():
def __init__(self, value=None, attribute_name="root", attribute_index=None, branches=None):
self.branches = [] if branches is None else branches
self.attribute_name = attribute_name
self.attribute_index = attribute_index
self.value = value
class DecisionTree():
def __init__(self, attribute_names):
self.attribute_names = attribute_names
self.tree = None
def _check_input(self, features):
if features.shape[1] != len(self.attribute_names):
raise ValueError(
"Number of features and number of attribute names must match!"
)
def fit_recursion(self, features, targets, curr_node, attributes):
""" Function to fit data to branch or leaf and discern left or right children
"""
if features.size == 0 and targets.size == 0:
return Node(attribute_name='leaf')
elif np.count_nonzero(targets == 1) == len(targets):
return Node(attribute_name='leaf', value=1)
elif np.count_nonzero(targets == 0) == len(targets):
return Node(attribute_name='leaf', value=0)
elif len(attributes) == 0:
if np.count_nonzero(targets == 1) > np.count_nonzero(targets == 0):
return Node(attribute_name='leaf', value=1)
else:
return Node(attribute_name='leaf', value=0)
else:
values = np.hstack((features, np.vstack(targets)))
gain = {}
for i in range(len(attributes)):
gain[attributes[i]] = information_gain(features, i ,targets)
curr_node.attribute_name = max(gain, key=gain.get)
curr_node.attribute_index = self.attribute_names.index(curr_node.attribute_name)
curr_idx = attributes.index(curr_node.attribute_name)
removed = attributes.copy()
removed.remove(curr_node.attribute_name)
# S(A < m) and S(A >= m)
if np.any(values[values[:, curr_idx] > 1]) or np.any(values[values[:, curr_idx] < 0]):
median = np.median(values[:, curr_idx])
curr_node.value = median
left_values = values[values[:,curr_idx] < median]
left_feats = left_values[:,:-1]
left_feats = np.delete(left_feats, curr_idx, 1)
left_tgts = left_values[:, -1]
else:
curr_node.value = 1
left_values = values[values[:, curr_idx] == 0]
left_feats = left_values[:,:-1]
left_feats = np.delete(left_feats, curr_idx, 1)
left_tgts = left_values[:, -1]
left = self.fit_recursion(left_feats, left_tgts, Node(), removed)
if np.any(values[values[:, curr_idx] > 1]) or np.any(values[values[:, curr_idx] < 0]):
median = np.median(values[:, curr_idx])
curr_node.value = median
right_values = values[values[:,curr_idx] >= median]
right_feats = right_values[:,:-1]
right_feats = np.delete(right_feats, curr_idx, 1)
right_tgts = right_values[:, -1]
else:
curr_node.value = 1
right_values = values[values[:, curr_idx] == 1]
right_feats = right_values[:,:-1]
right_feats = np.delete(right_feats, curr_idx, 1)
right_tgts = right_values[:, -1]
right = self.fit_recursion(right_feats, right_tgts, Node(), removed)
curr_node.branches.append(left)
curr_node.branches.append(right)
return curr_node
def fit(self, features, targets):
self._check_input(features)
self.tree = self.fit_recursion(features, targets, Node(), self.attribute_names)
def predictor(self, point, curr_node):
if curr_node.branches == []:
return curr_node.value
else:
if (point[curr_node.attribute_index] < curr_node.value):
return self.predictor(point, curr_node.branches[0])
else:
return self.predictor(point, curr_node.branches[1])
def predict(self, features):
self._check_input(features)
predictions = np.zeros((features.shape[0]))
iter = 0
for j in features:
prediction = self.predictor(j, self.tree)
predictions[iter] = prediction
iter += 1
return predictions
def _visualize_helper(self, tree, level):
"""
Helper function for visualize a decision tree at a given level of recursion.
"""
tab_level = " " * level
val = tree.value if tree.value is not None else 0
print("%d: %s%s == %f" % (level, tab_level, tree.attribute_name, val))
def visualize(self, branch=None, level=0):
if not branch:
branch = self.tree
self._visualize_helper(branch, level)
for branch in branch.branches:
self.visualize(branch, level+1)
def information_gain(features, attribute_index, targets):
child_column = features[:, attribute_index]
child_labels = np.unique(child_column)
parent_labels = np.unique(targets)
weights = {}
prior = {}
entropy_child = 0
entropy_parent = 0
for child in child_labels:
weights[child] = np.count_nonzero(child_column == child)/child_column.size
prior[child] = {}
child_label_idxs = np.argwhere(child_column == child)
child_parent = np.zeros((len(child_label_idxs)))
iter = 0
for i in child_label_idxs:
child_parent[iter] = targets[i[0]]
iter += 1
for parent_label in parent_labels:
prior[child][parent_label] = np.count_nonzero(child_parent == parent_label)/np.count_nonzero(child_column == child)
for child in prior.keys():
label_entropy = 0
for repeat in prior[child].values():
label_entropy -= 0 if repeat == 0 else repeat*np.log2(repeat)
entropy_child += weights[child] * label_entropy
for parent_label in parent_labels:
repeat = np.count_nonzero(targets == parent_label)/targets.size
entropy_parent -= repeat*np.log2(repeat)
information_gain = entropy_parent - entropy_child
return information_gain
#################################################################################################################################
fraction = 1.0
data_path = 'data/blobs.csv'
features, targets, attribute_names = load_data(data_path)
train_features, train_targets, test_features, test_targets = train_test_split(features, targets, fraction)
decision_tree = DecisionTree(attribute_names)
decision_tree.fit(train_features, train_targets)
decision_tree.visualize()
predictions = decision_tree.predict(test_features)
confusion_matrix = compute_confusion_matrix(test_targets, predictions)
accuracy = compute_accuracy(test_targets, predictions)
precision, recall = compute_precision_and_recall(test_targets, predictions)
f1_measure = compute_f1_measure(test_targets, predictions)
print(f"accuracy: {accuracy}")
# fraction = 1.0
# data_path = 'data/transform_me.csv'
# features, targets, attribute_names = load_data(data_path)
# train_features, train_targets, test_features, test_targets = train_test_split(features, targets, fraction)
# decision_tree = DecisionTree(attribute_names)
# decision_tree.fit(train_features, train_targets)
# plot_decision_regions(test_features, test_targets, decision_tree, 'Decision Tree Decision Regions for Transform Me')
# # plt.show()
# plt.savefig("decision_tree_plots/Decision_Tree_Decision_Regions_for_Transform_Me.png") | [
"numpy.hstack",
"numpy.count_nonzero",
"numpy.array",
"numpy.linalg.norm",
"copy.deepcopy",
"metrics.compute_accuracy",
"data.load_data",
"numpy.where",
"numpy.delete",
"numpy.dot",
"numpy.vstack",
"visualize.plot_decision_regions",
"numpy.ones",
"matplotlib.use",
"data.train_test_split"... | [((2648, 2668), 'data.load_data', 'load_data', (['data_path'], {}), '(data_path)\n', (2657, 2668), False, 'from data import load_data, train_test_split\n'), ((2768, 2813), 'data.train_test_split', 'train_test_split', (['features', 'targets', 'fraction'], {}), '(features, targets, fraction)\n', (2784, 2813), False, 'from data import load_data, train_test_split\n'), ((2954, 3005), 'metrics.compute_confusion_matrix', 'compute_confusion_matrix', (['test_targets', 'predictions'], {}), '(test_targets, predictions)\n', (2978, 3005), False, 'from metrics import compute_precision_and_recall, compute_confusion_matrix\n'), ((3017, 3060), 'metrics.compute_accuracy', 'compute_accuracy', (['test_targets', 'predictions'], {}), '(test_targets, predictions)\n', (3033, 3060), False, 'from metrics import compute_f1_measure, compute_accuracy\n'), ((3081, 3136), 'metrics.compute_precision_and_recall', 'compute_precision_and_recall', (['test_targets', 'predictions'], {}), '(test_targets, predictions)\n', (3109, 3136), False, 'from metrics import compute_precision_and_recall, compute_confusion_matrix\n'), ((3150, 3195), 'metrics.compute_f1_measure', 'compute_f1_measure', (['test_targets', 'predictions'], {}), '(test_targets, predictions)\n', (3168, 3195), False, 'from metrics import compute_f1_measure, compute_accuracy\n'), ((3642, 3756), 'visualize.plot_decision_regions', 'plot_decision_regions', (['test_features', 'test_targets', 'perceptron', '"""Perceptron Decision Regions for Transform Me"""'], {}), "(test_features, test_targets, perceptron,\n 'Perceptron Decision Regions for Transform Me')\n", (3663, 3756), False, 'from visualize import plot_decision_regions\n'), ((3753, 3763), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3761, 3763), True, 'import matplotlib.pyplot as plt\n'), ((10696, 10716), 'data.load_data', 'load_data', (['data_path'], {}), '(data_path)\n', (10705, 10716), False, 'from data import load_data, train_test_split\n'), ((10778, 10823), 'data.train_test_split', 'train_test_split', (['features', 'targets', 'fraction'], {}), '(features, targets, fraction)\n', (10794, 10823), False, 'from data import load_data, train_test_split\n'), ((11017, 11068), 'metrics.compute_confusion_matrix', 'compute_confusion_matrix', (['test_targets', 'predictions'], {}), '(test_targets, predictions)\n', (11041, 11068), False, 'from metrics import compute_precision_and_recall, compute_confusion_matrix\n'), ((11080, 11123), 'metrics.compute_accuracy', 'compute_accuracy', (['test_targets', 'predictions'], {}), '(test_targets, predictions)\n', (11096, 11123), False, 'from metrics import compute_f1_measure, compute_accuracy\n'), ((11144, 11199), 'metrics.compute_precision_and_recall', 'compute_precision_and_recall', (['test_targets', 'predictions'], {}), '(test_targets, predictions)\n', (11172, 11199), False, 'from metrics import compute_precision_and_recall, compute_confusion_matrix\n'), ((11213, 11258), 'metrics.compute_f1_measure', 'compute_f1_measure', (['test_targets', 'predictions'], {}), '(test_targets, predictions)\n', (11231, 11258), False, 'from metrics import compute_f1_measure, compute_accuracy\n'), ((590, 606), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (598, 606), True, 'import numpy as np\n'), ((633, 674), 'numpy.linalg.norm', 'np.linalg.norm', (['(features - origin)'], {'axis': '(1)'}), '(features - origin, axis=1)\n', (647, 674), True, 'import numpy as np\n'), ((702, 733), 'numpy.vstack', 'np.vstack', (['transformed_features'], {}), '(transformed_features)\n', (711, 733), True, 'import numpy as np\n'), ((9312, 9335), 'numpy.unique', 'np.unique', (['child_column'], {}), '(child_column)\n', (9321, 9335), True, 'import numpy as np\n'), ((9356, 9374), 'numpy.unique', 'np.unique', (['targets'], {}), '(targets)\n', (9365, 9374), True, 'import numpy as np\n'), ((350, 371), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (364, 371), False, 'import matplotlib\n'), ((1005, 1027), 'numpy.where', 'np.where', (['(x > 0)', '(1)', '(-1)'], {}), '(x > 0, 1, -1)\n', (1013, 1027), True, 'import numpy as np\n'), ((1094, 1125), 'numpy.ones', 'np.ones', (['[features.shape[0], 1]'], {}), '([features.shape[0], 1])\n', (1101, 1125), True, 'import numpy as np\n'), ((1145, 1177), 'numpy.hstack', 'np.hstack', (['(self.ones, features)'], {}), '((self.ones, features))\n', (1154, 1177), True, 'import numpy as np\n'), ((1246, 1281), 'numpy.where', 'np.where', (['(targets == 0)', '(-1)', 'targets'], {}), '(targets == 0, -1, targets)\n', (1254, 1281), True, 'import numpy as np\n'), ((1354, 1373), 'numpy.ones', 'np.ones', (['n_features'], {}), '(n_features)\n', (1361, 1373), True, 'import numpy as np\n'), ((2085, 2116), 'numpy.ones', 'np.ones', (['[features.shape[0], 1]'], {}), '([features.shape[0], 1])\n', (2092, 2116), True, 'import numpy as np\n'), ((2136, 2168), 'numpy.hstack', 'np.hstack', (['(self.ones, features)'], {}), '((self.ones, features))\n', (2145, 2168), True, 'import numpy as np\n'), ((2193, 2223), 'numpy.dot', 'np.dot', (['features', 'self.weights'], {}), '(features, self.weights)\n', (2199, 2223), True, 'import numpy as np\n'), ((2372, 2415), 'numpy.where', 'np.where', (['(predictions == -1)', '(0)', 'predictions'], {}), '(predictions == -1, 0, predictions)\n', (2380, 2415), True, 'import numpy as np\n'), ((8378, 8405), 'numpy.zeros', 'np.zeros', (['features.shape[0]'], {}), '(features.shape[0])\n', (8386, 8405), True, 'import numpy as np\n'), ((9621, 9655), 'numpy.argwhere', 'np.argwhere', (['(child_column == child)'], {}), '(child_column == child)\n', (9632, 9655), True, 'import numpy as np\n'), ((1525, 1552), 'copy.deepcopy', 'copy.deepcopy', (['self.weights'], {}), '(self.weights)\n', (1538, 1552), False, 'import copy\n'), ((1934, 1972), 'numpy.linalg.norm', 'np.linalg.norm', (['(compare - self.weights)'], {}), '(compare - self.weights)\n', (1948, 1972), True, 'import numpy as np\n'), ((9509, 9548), 'numpy.count_nonzero', 'np.count_nonzero', (['(child_column == child)'], {}), '(child_column == child)\n', (9525, 9548), True, 'import numpy as np\n'), ((10297, 10338), 'numpy.count_nonzero', 'np.count_nonzero', (['(targets == parent_label)'], {}), '(targets == parent_label)\n', (10313, 10338), True, 'import numpy as np\n'), ((10385, 10400), 'numpy.log2', 'np.log2', (['repeat'], {}), '(repeat)\n', (10392, 10400), True, 'import numpy as np\n'), ((4924, 4954), 'numpy.count_nonzero', 'np.count_nonzero', (['(targets == 1)'], {}), '(targets == 1)\n', (4940, 4954), True, 'import numpy as np\n'), ((9920, 9966), 'numpy.count_nonzero', 'np.count_nonzero', (['(child_parent == parent_label)'], {}), '(child_parent == parent_label)\n', (9936, 9966), True, 'import numpy as np\n'), ((9967, 10006), 'numpy.count_nonzero', 'np.count_nonzero', (['(child_column == child)'], {}), '(child_column == child)\n', (9983, 10006), True, 'import numpy as np\n'), ((1638, 1665), 'numpy.dot', 'np.dot', (['self.weights', 'value'], {}), '(self.weights, value)\n', (1644, 1665), True, 'import numpy as np\n'), ((5042, 5072), 'numpy.count_nonzero', 'np.count_nonzero', (['(targets == 0)'], {}), '(targets == 0)\n', (5058, 5072), True, 'import numpy as np\n'), ((10168, 10183), 'numpy.log2', 'np.log2', (['repeat'], {}), '(repeat)\n', (10175, 10183), True, 'import numpy as np\n'), ((5197, 5227), 'numpy.count_nonzero', 'np.count_nonzero', (['(targets == 1)'], {}), '(targets == 1)\n', (5213, 5227), True, 'import numpy as np\n'), ((5230, 5260), 'numpy.count_nonzero', 'np.count_nonzero', (['(targets == 0)'], {}), '(targets == 0)\n', (5246, 5260), True, 'import numpy as np\n'), ((6020, 6059), 'numpy.any', 'np.any', (['values[values[:, curr_idx] > 1]'], {}), '(values[values[:, curr_idx] > 1])\n', (6026, 6059), True, 'import numpy as np\n'), ((6063, 6102), 'numpy.any', 'np.any', (['values[values[:, curr_idx] < 0]'], {}), '(values[values[:, curr_idx] < 0])\n', (6069, 6102), True, 'import numpy as np\n'), ((6129, 6159), 'numpy.median', 'np.median', (['values[:, curr_idx]'], {}), '(values[:, curr_idx])\n', (6138, 6159), True, 'import numpy as np\n'), ((6344, 6378), 'numpy.delete', 'np.delete', (['left_feats', 'curr_idx', '(1)'], {}), '(left_feats, curr_idx, 1)\n', (6353, 6378), True, 'import numpy as np\n'), ((6621, 6655), 'numpy.delete', 'np.delete', (['left_feats', 'curr_idx', '(1)'], {}), '(left_feats, curr_idx, 1)\n', (6630, 6655), True, 'import numpy as np\n'), ((6827, 6866), 'numpy.any', 'np.any', (['values[values[:, curr_idx] > 1]'], {}), '(values[values[:, curr_idx] > 1])\n', (6833, 6866), True, 'import numpy as np\n'), ((6870, 6909), 'numpy.any', 'np.any', (['values[values[:, curr_idx] < 0]'], {}), '(values[values[:, curr_idx] < 0])\n', (6876, 6909), True, 'import numpy as np\n'), ((6936, 6966), 'numpy.median', 'np.median', (['values[:, curr_idx]'], {}), '(values[:, curr_idx])\n', (6945, 6966), True, 'import numpy as np\n'), ((7156, 7191), 'numpy.delete', 'np.delete', (['right_feats', 'curr_idx', '(1)'], {}), '(right_feats, curr_idx, 1)\n', (7165, 7191), True, 'import numpy as np\n'), ((7439, 7474), 'numpy.delete', 'np.delete', (['right_feats', 'curr_idx', '(1)'], {}), '(right_feats, curr_idx, 1)\n', (7448, 7474), True, 'import numpy as np\n'), ((5459, 5477), 'numpy.vstack', 'np.vstack', (['targets'], {}), '(targets)\n', (5468, 5477), True, 'import numpy as np\n')] |
#! /usr/bin/env python
"""
This script produces the stacks for emission line luminosity limited samples.
"""
import sys
import os
from os.path import join
import glob
import numpy as n
import SpectraStackingEBOSS as sse
import astropy.io.fits as fits
spec_dir = join(os.environ['HOME'],"SDSS/stacks")
specList = join(spec_dir, "eboss-elg_0.2_z_1.5.asc")
outfile = join(spec_dir, os.path.basename(specList)[:-4]+".specMatrix")
stack=sse.SpectraStackingEBOSS(specList, outfile)
def getSpectra(path_to_spectrum):
hdulist = fits.open(path_to_spectrum)
wave = 10**hdulist[1].data['loglam']
ok=(wave>3740)&(wave<9604)
flux = hdulist[1].data['flux']
hdulist.close()
return wave[ok], flux[ok]
for IDX_j in n.arange(0, len(stack.plates), 4096):
IDX_min=IDX_j
IDX_max=IDX_j+4096
IDX_str=str(IDX_min).zfill(6)+'-'+str(IDX_max).zfill(6)
samp_plates, samp_mjds, samp_fiberids, samp_redshifts = stack.plates[IDX_min:IDX_max], stack.mjds[IDX_min:IDX_max], stack.fiberids[IDX_min:IDX_max], stack.redshifts[IDX_min:IDX_max]
FLUXES = n.zeros((samp_plates.shape[0], 4096))
data = []
bad_ids = []
for jj, (plate, mjd, fiber, redshift) in enumerate(zip( samp_plates, samp_mjds, samp_fiberids, samp_redshifts )):
path_to_spectrum = sse.get_path_to_spectrum_v5_11_0(plate, mjd, fiber)
if os.path.isfile(path_to_spectrum):
wl,fl=getSpectra(path_to_spectrum)
data.append([fl.shape[0], wl.min(), wl.max()])
if fl.shape[0]==4096:
FLUXES[jj]=fl
wavelength=wl
n.savetxt(stack.out_file+'.'+IDX_str+'.dat', FLUXES)
n.savetxt(stack.out_file+'.wavelength.'+IDX_str+'.dat', wavelength)
n.savetxt(stack.out_file+'.shapes.'+IDX_str+'.dat', n.array(data) )
n.savetxt(stack.out_file+'.list.'+IDX_str+'.dat', n.array([samp_plates, samp_mjds, samp_fiberids, samp_redshifts]) )
| [
"SpectraStackingEBOSS.get_path_to_spectrum_v5_11_0",
"os.path.join",
"os.path.isfile",
"numpy.array",
"numpy.zeros",
"SpectraStackingEBOSS.SpectraStackingEBOSS",
"os.path.basename",
"numpy.savetxt",
"astropy.io.fits.open"
] | [((265, 304), 'os.path.join', 'join', (["os.environ['HOME']", '"""SDSS/stacks"""'], {}), "(os.environ['HOME'], 'SDSS/stacks')\n", (269, 304), False, 'from os.path import join\n'), ((315, 356), 'os.path.join', 'join', (['spec_dir', '"""eboss-elg_0.2_z_1.5.asc"""'], {}), "(spec_dir, 'eboss-elg_0.2_z_1.5.asc')\n", (319, 356), False, 'from os.path import join\n'), ((436, 479), 'SpectraStackingEBOSS.SpectraStackingEBOSS', 'sse.SpectraStackingEBOSS', (['specList', 'outfile'], {}), '(specList, outfile)\n', (460, 479), True, 'import SpectraStackingEBOSS as sse\n'), ((526, 553), 'astropy.io.fits.open', 'fits.open', (['path_to_spectrum'], {}), '(path_to_spectrum)\n', (535, 553), True, 'import astropy.io.fits as fits\n'), ((1034, 1071), 'numpy.zeros', 'n.zeros', (['(samp_plates.shape[0], 4096)'], {}), '((samp_plates.shape[0], 4096))\n', (1041, 1071), True, 'import numpy as n\n'), ((1475, 1533), 'numpy.savetxt', 'n.savetxt', (["(stack.out_file + '.' + IDX_str + '.dat')", 'FLUXES'], {}), "(stack.out_file + '.' + IDX_str + '.dat', FLUXES)\n", (1484, 1533), True, 'import numpy as n\n'), ((1529, 1602), 'numpy.savetxt', 'n.savetxt', (["(stack.out_file + '.wavelength.' + IDX_str + '.dat')", 'wavelength'], {}), "(stack.out_file + '.wavelength.' + IDX_str + '.dat', wavelength)\n", (1538, 1602), True, 'import numpy as n\n'), ((1233, 1284), 'SpectraStackingEBOSS.get_path_to_spectrum_v5_11_0', 'sse.get_path_to_spectrum_v5_11_0', (['plate', 'mjd', 'fiber'], {}), '(plate, mjd, fiber)\n', (1265, 1284), True, 'import SpectraStackingEBOSS as sse\n'), ((1290, 1322), 'os.path.isfile', 'os.path.isfile', (['path_to_spectrum'], {}), '(path_to_spectrum)\n', (1304, 1322), False, 'import os\n'), ((1650, 1663), 'numpy.array', 'n.array', (['data'], {}), '(data)\n', (1657, 1663), True, 'import numpy as n\n'), ((1717, 1781), 'numpy.array', 'n.array', (['[samp_plates, samp_mjds, samp_fiberids, samp_redshifts]'], {}), '([samp_plates, samp_mjds, samp_fiberids, samp_redshifts])\n', (1724, 1781), True, 'import numpy as n\n'), ((383, 409), 'os.path.basename', 'os.path.basename', (['specList'], {}), '(specList)\n', (399, 409), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 17:38:08 2019
@author: Nate
The Generalized Metropolis algorithm removed the step-size error by an additional acceptance/rejection step,
which adds substantial overhead. To improve on the firstorder Langevin algorithm, can you devise a second-order
Langevin algorithm to reduce the step-size error dependence to (∆t)2?
Repeat problem 2 of HW10 using this second order Langevin algorithm.
"""
import numpy as np
import matplotlib.pyplot as plt
import pdb
##############################################################
##############################################################
#defining constants
#number of trials
N = int(3e4)
alpha = 1.6875
time_step = [0.09,0.08,0.07,0.06,0.05,0.04,0.03,0.02,0.01]
r0 = np.array([2,2,2])
r1 = np.array([3,2,2])
energy = []
std_dev = []
##############################################################
##############################################################
#defining functions
def get_delta_r(rmax):
delta_r = np.array([rmax*(np.random.uniform()-.5),rmax*(np.random.uniform()-.5),rmax*(np.random.uniform()-.5)])
return(delta_r)
#evaluate ratio
def evaluate_ratio(r, delta_r,r2, delta_r2):
global count
#(P = B/A)
A = np.exp(-2*alpha[i]*np.linalg.norm(r))*np.exp(-2*alpha[i]*np.linalg.norm(r2))
B = np.exp(-2*alpha[i]*np.linalg.norm(r+delta_r))*np.exp(-2*alpha[i]*np.linalg.norm(r2+delta_r2))
ratio = B/A
if ratio > np.random.uniform():
count+=1
return(True)
else:
return(False)
def generate_point(r, delta_r, r2, delta_r2):
if evaluate_ratio(r, delta_r, r2, delta_r2) == True:
return(r+delta_r, r2+delta_r2)
else:
return(r, r2)
##############################################################
##############################################################
#Main Loop
for i in range(len(time_step)):
#generating points
r = np.array([r0])
r2 = np.array([r1])
count = 0
en = 0
for j in range(N):
#evaluate v_x1, v_y1 etc
current_r = r[-1]
current_r2 = r2[-1]
r_normal = np.array([np.random.randn(),np.random.randn(),np.random.randn()])
r2_normal = np.array([np.random.randn(),np.random.randn(),np.random.randn()])
r_to_add = current_r - alpha*current_r/np.linalg.norm(current_r) + np.sqrt(time_step[i])*r_normal
r2_to_add = current_r2 - alpha*current_r2/np.linalg.norm(current_r2) + np.sqrt(time_step[i])*r2_normal
#record energy
r = np.append(r, [r_to_add], axis = 0)
r2 = np.append(r2, [r2_to_add], axis = 0)
r_mag = np.linalg.norm([r[j][0], r[j][1], r[j][2]])
r2_mag = np.linalg.norm([r2[j][0], r2[j][1], r2[j][2]])
r12 = np.sqrt((r[j][0]-r2[j][0])**2 + (r[j][1]-r2[j][1])**2 + (r[j][2]-r2[j][2])**2)+.0000001
E1 = -(1/2.0)*alpha**2+ alpha/r_mag-2/r_mag
E2 = -(1/2.0)*alpha**2+ alpha/r2_mag-2/r2_mag
en += E1 + E2 + (1.0/r12)
#print('accepted: ',count, 'rejected: ',N-count)
#print('Acceptance Ratio: ', count/N)
energy.append(en/N)
print('Finished run: ', time_step[i])
'''
to_sum = 0
for j in range(N):
to_sum += (-(alpha[i]**2)/2 + alpha[i]/np.linalg.norm(r[j]) - 1/np.linalg.norm(r[j])-energy[i])**2
std_dev.append(np.sqrt(to_sum)/np.sqrt(N))
'''
print(energy)
##############################################################
##############################################################
#Plotting
fig1, axes1 = plt.subplots()
axes1.plot(time_step, energy)
axes1.plot(np.linspace(0, .09, num=50), [-2.845 for i in range(50)])
axes1.set_ylabel('Energy')
axes1.set_xlabel('Time Step Sizes $\Delta t$')
axes1.set_title("Energy vs $\Delta t$", va='bottom')
plt.show()
| [
"numpy.sqrt",
"numpy.linalg.norm",
"numpy.append",
"numpy.array",
"numpy.linspace",
"numpy.random.uniform",
"numpy.random.randn",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((770, 789), 'numpy.array', 'np.array', (['[2, 2, 2]'], {}), '([2, 2, 2])\n', (778, 789), True, 'import numpy as np\n'), ((793, 812), 'numpy.array', 'np.array', (['[3, 2, 2]'], {}), '([3, 2, 2])\n', (801, 812), True, 'import numpy as np\n'), ((3593, 3607), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3605, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3836, 3846), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3844, 3846), True, 'import matplotlib.pyplot as plt\n'), ((1933, 1947), 'numpy.array', 'np.array', (['[r0]'], {}), '([r0])\n', (1941, 1947), True, 'import numpy as np\n'), ((1957, 1971), 'numpy.array', 'np.array', (['[r1]'], {}), '([r1])\n', (1965, 1971), True, 'import numpy as np\n'), ((3649, 3677), 'numpy.linspace', 'np.linspace', (['(0)', '(0.09)'], {'num': '(50)'}), '(0, 0.09, num=50)\n', (3660, 3677), True, 'import numpy as np\n'), ((1463, 1482), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1480, 1482), True, 'import numpy as np\n'), ((2556, 2588), 'numpy.append', 'np.append', (['r', '[r_to_add]'], {'axis': '(0)'}), '(r, [r_to_add], axis=0)\n', (2565, 2588), True, 'import numpy as np\n'), ((2604, 2638), 'numpy.append', 'np.append', (['r2', '[r2_to_add]'], {'axis': '(0)'}), '(r2, [r2_to_add], axis=0)\n', (2613, 2638), True, 'import numpy as np\n'), ((2666, 2709), 'numpy.linalg.norm', 'np.linalg.norm', (['[r[j][0], r[j][1], r[j][2]]'], {}), '([r[j][0], r[j][1], r[j][2]])\n', (2680, 2709), True, 'import numpy as np\n'), ((2727, 2773), 'numpy.linalg.norm', 'np.linalg.norm', (['[r2[j][0], r2[j][1], r2[j][2]]'], {}), '([r2[j][0], r2[j][1], r2[j][2]])\n', (2741, 2773), True, 'import numpy as np\n'), ((2788, 2882), 'numpy.sqrt', 'np.sqrt', (['((r[j][0] - r2[j][0]) ** 2 + (r[j][1] - r2[j][1]) ** 2 + (r[j][2] - r2[j][2\n ]) ** 2)'], {}), '((r[j][0] - r2[j][0]) ** 2 + (r[j][1] - r2[j][1]) ** 2 + (r[j][2] -\n r2[j][2]) ** 2)\n', (2795, 2882), True, 'import numpy as np\n'), ((1267, 1284), 'numpy.linalg.norm', 'np.linalg.norm', (['r'], {}), '(r)\n', (1281, 1284), True, 'import numpy as np\n'), ((1305, 1323), 'numpy.linalg.norm', 'np.linalg.norm', (['r2'], {}), '(r2)\n', (1319, 1323), True, 'import numpy as np\n'), ((1352, 1379), 'numpy.linalg.norm', 'np.linalg.norm', (['(r + delta_r)'], {}), '(r + delta_r)\n', (1366, 1379), True, 'import numpy as np\n'), ((1398, 1427), 'numpy.linalg.norm', 'np.linalg.norm', (['(r2 + delta_r2)'], {}), '(r2 + delta_r2)\n', (1412, 1427), True, 'import numpy as np\n'), ((2142, 2159), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (2157, 2159), True, 'import numpy as np\n'), ((2160, 2177), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (2175, 2177), True, 'import numpy as np\n'), ((2178, 2195), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (2193, 2195), True, 'import numpy as np\n'), ((2228, 2245), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (2243, 2245), True, 'import numpy as np\n'), ((2246, 2263), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (2261, 2263), True, 'import numpy as np\n'), ((2264, 2281), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (2279, 2281), True, 'import numpy as np\n'), ((2377, 2398), 'numpy.sqrt', 'np.sqrt', (['time_step[i]'], {}), '(time_step[i])\n', (2384, 2398), True, 'import numpy as np\n'), ((2487, 2508), 'numpy.sqrt', 'np.sqrt', (['time_step[i]'], {}), '(time_step[i])\n', (2494, 2508), True, 'import numpy as np\n'), ((1040, 1059), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1057, 1059), True, 'import numpy as np\n'), ((1070, 1089), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1087, 1089), True, 'import numpy as np\n'), ((1100, 1119), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1117, 1119), True, 'import numpy as np\n'), ((2349, 2374), 'numpy.linalg.norm', 'np.linalg.norm', (['current_r'], {}), '(current_r)\n', (2363, 2374), True, 'import numpy as np\n'), ((2458, 2484), 'numpy.linalg.norm', 'np.linalg.norm', (['current_r2'], {}), '(current_r2)\n', (2472, 2484), True, 'import numpy as np\n')] |
"""
Replicate figure from paper
===========================
"""
# Authors: <NAME> <<EMAIL>>
#
# License: MIT
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import LogLocator
import mne
from mne.datasets import sample
from qndiag import qndiag, ajd_pham, gradient, transform_set
rng = np.random.RandomState(0)
fontsize = 5
params = {
'axes.titlesize': 10,
'axes.labelsize': 10,
'font.size': 7,
'legend.fontsize': 8,
'xtick.labelsize': fontsize,
'ytick.labelsize': fontsize,
'text.usetex': True,
'ytick.major.pad': '0',
'ytick.minor.pad': '0'}
plt.rcParams.update(params)
def loss(D):
n, p, _ = D.shape
output = 0
for i in range(n):
Di = D[i]
output += np.sum(np.log(np.diagonal(Di))) - np.linalg.slogdet(Di)[1]
return output / (2 * n)
n, p = 100, 40
f, axes = plt.subplots(2, 3, figsize=(7, 3.04), sharex='col')
expe_str = ['(a)', '(b)', '(c)']
axes = axes.T
for j, (sigma, axe) in enumerate(zip([0., 0.1, 0], axes)):
if j != 2: # Synthetic data
# Generate diagonal matrices
D = rng.uniform(size=(n, p))
# Generate a random mixing matrix
A = rng.randn(p, p)
C = np.zeros((n, p, p))
# Generate the dataset
for i in range(n):
R = rng.randn(p, p)
C[i] = np.dot(A, D[i, :, None] * A.T) + sigma ** 2 * R.dot(R.T)
else: # Real data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
X = raw.get_data()
# Reduce dimension of X by PCA:
U, D, V = np.linalg.svd(X, full_matrices=False)
X = V[-p:, :]
C = np.array([np.dot(x, x.T) for x in np.split(X, n, axis=1)])
for algo in [qndiag, ajd_pham]:
_, infos = algo(C, return_B_list=True)
# For Pham, compute metrics after the algorithm is run
B_list = infos['B_list']
infos['gradient_list'] =\
[np.linalg.norm(gradient(transform_set(B, C))) for B in B_list]
infos['loss_list'] =\
[loss(transform_set(B, C)) for B in B_list]
for i, (to_plot, name, ax) in enumerate(
zip(['loss_list', 'gradient_list'],
['Objective function',
'Gradient norm'],
axe)):
ax.loglog(infos['t_list'], infos[to_plot], linewidth=2)
if i == 1 and j == 1:
ax.set_xlabel('Time (sec.)')
if j == 0:
ax.set_ylabel(name)
if i == 1:
art = ax.annotate(expe_str[j], (0, 0), (50, -30),
xycoords='axes fraction',
textcoords='offset points', va='top')
ax.grid(True)
ax.yaxis.set_major_locator(LogLocator(numticks=4, subs=(1.,)))
ax.minorticks_off()
lgd = plt.figlegend(ax.lines, ['Quasi-Newton (proposed)', 'Pham 01'],
loc=(0.32, .9), ncol=2, labelspacing=0.)
plt.savefig('expe.pdf', bbox_extra_artists=(art, lgd), bbox_inches='tight')
| [
"numpy.diagonal",
"matplotlib.pyplot.savefig",
"matplotlib.ticker.LogLocator",
"mne.io.read_raw_fif",
"qndiag.transform_set",
"matplotlib.pyplot.figlegend",
"matplotlib.pyplot.rcParams.update",
"numpy.zeros",
"mne.datasets.sample.data_path",
"numpy.linalg.slogdet",
"numpy.dot",
"numpy.split",
... | [((317, 341), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (338, 341), True, 'import numpy as np\n'), ((630, 657), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (649, 657), True, 'import matplotlib.pyplot as plt\n'), ((885, 936), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '(7, 3.04)', 'sharex': '"""col"""'}), "(2, 3, figsize=(7, 3.04), sharex='col')\n", (897, 936), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3185), 'matplotlib.pyplot.figlegend', 'plt.figlegend', (['ax.lines', "['Quasi-Newton (proposed)', 'Pham 01']"], {'loc': '(0.32, 0.9)', 'ncol': '(2)', 'labelspacing': '(0.0)'}), "(ax.lines, ['Quasi-Newton (proposed)', 'Pham 01'], loc=(0.32, \n 0.9), ncol=2, labelspacing=0.0)\n", (3087, 3185), True, 'import matplotlib.pyplot as plt\n'), ((3199, 3274), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""expe.pdf"""'], {'bbox_extra_artists': '(art, lgd)', 'bbox_inches': '"""tight"""'}), "('expe.pdf', bbox_extra_artists=(art, lgd), bbox_inches='tight')\n", (3210, 3274), True, 'import matplotlib.pyplot as plt\n'), ((1232, 1251), 'numpy.zeros', 'np.zeros', (['(n, p, p)'], {}), '((n, p, p))\n', (1240, 1251), True, 'import numpy as np\n'), ((1461, 1479), 'mne.datasets.sample.data_path', 'sample.data_path', ([], {}), '()\n', (1477, 1479), False, 'from mne.datasets import sample\n'), ((1572, 1616), 'mne.io.read_raw_fif', 'mne.io.read_raw_fif', (['raw_fname'], {'preload': '(True)'}), '(raw_fname, preload=True)\n', (1591, 1616), False, 'import mne\n'), ((1702, 1739), 'numpy.linalg.svd', 'np.linalg.svd', (['X'], {'full_matrices': '(False)'}), '(X, full_matrices=False)\n', (1715, 1739), True, 'import numpy as np\n'), ((803, 824), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['Di'], {}), '(Di)\n', (820, 824), True, 'import numpy as np\n'), ((1361, 1391), 'numpy.dot', 'np.dot', (['A', '(D[i, :, None] * A.T)'], {}), '(A, D[i, :, None] * A.T)\n', (1367, 1391), True, 'import numpy as np\n'), ((1784, 1798), 'numpy.dot', 'np.dot', (['x', 'x.T'], {}), '(x, x.T)\n', (1790, 1798), True, 'import numpy as np\n'), ((2171, 2190), 'qndiag.transform_set', 'transform_set', (['B', 'C'], {}), '(B, C)\n', (2184, 2190), False, 'from qndiag import qndiag, ajd_pham, gradient, transform_set\n'), ((2999, 3034), 'matplotlib.ticker.LogLocator', 'LogLocator', ([], {'numticks': '(4)', 'subs': '(1.0,)'}), '(numticks=4, subs=(1.0,))\n', (3009, 3034), False, 'from matplotlib.ticker import LogLocator\n'), ((783, 798), 'numpy.diagonal', 'np.diagonal', (['Di'], {}), '(Di)\n', (794, 798), True, 'import numpy as np\n'), ((1808, 1830), 'numpy.split', 'np.split', (['X', 'n'], {'axis': '(1)'}), '(X, n, axis=1)\n', (1816, 1830), True, 'import numpy as np\n'), ((2084, 2103), 'qndiag.transform_set', 'transform_set', (['B', 'C'], {}), '(B, C)\n', (2097, 2103), False, 'from qndiag import qndiag, ajd_pham, gradient, transform_set\n')] |
from .image_io import ImageIO
from .hyperparameters import params
import numpy as np
import cv2
io = ImageIO()
# round and cast tuple to have int values
def round_tuple(x):
return int(round(x[0])), int(round(x[1]))
# takes in a tracker object, find the location of the pen, and calls tracker.setPosition
# with the new coordinates if a pen is detected
def process_frame(tracker):
image_in = io.get_frame()
image_hsv = cv2.cvtColor(image_in, cv2.COLOR_BGR2HSV)
image_thresh = cv2.inRange(image_hsv, params.thresh_pen_min, params.thresh_pen_max)
image_thresh = cv2.morphologyEx(
image_thresh, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8)
)
image_thresh_overlayed = image_in.copy()
image_thresh_overlayed[image_thresh == 255, :] = (0, 0, 255)
# io.show_frame('thresholded', image_thresh_overlayed)
count, labeled, stats, centroids = cv2.connectedComponentsWithStats(
image_thresh, 4, cv2.CV_32S
)
if count > 1:
largest_label = np.argmax(stats[1:, cv2.CC_STAT_AREA]) + 1
pen_center = centroids[largest_label]
image_thresh_overlayed[labeled == largest_label, :] = (255, 100, 0)
cv2.drawMarker(
image_thresh_overlayed,
round_tuple(pen_center),
(100, 255, 0),
cv2.MARKER_CROSS,
10,
2,
)
tracker.setPosition(pen_center)
if not tracker.tabletPoly is None:
cv2.polylines(
image_thresh_overlayed, tracker.tabletPoly, True, (255, 255, 255), 2
)
return io.show_frame("output", image_thresh_overlayed, wait=True, tracker=tracker)
# starts an infinite loop that constantly tracks and updates the pen location
def tracking_loop(tracker):
while process_frame(tracker):
pass
| [
"numpy.ones",
"cv2.polylines",
"cv2.inRange",
"numpy.argmax",
"cv2.connectedComponentsWithStats",
"cv2.cvtColor"
] | [((434, 475), 'cv2.cvtColor', 'cv2.cvtColor', (['image_in', 'cv2.COLOR_BGR2HSV'], {}), '(image_in, cv2.COLOR_BGR2HSV)\n', (446, 475), False, 'import cv2\n'), ((495, 563), 'cv2.inRange', 'cv2.inRange', (['image_hsv', 'params.thresh_pen_min', 'params.thresh_pen_max'], {}), '(image_hsv, params.thresh_pen_min, params.thresh_pen_max)\n', (506, 563), False, 'import cv2\n'), ((881, 942), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['image_thresh', '(4)', 'cv2.CV_32S'], {}), '(image_thresh, 4, cv2.CV_32S)\n', (913, 942), False, 'import cv2\n'), ((639, 664), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (646, 664), True, 'import numpy as np\n'), ((1448, 1536), 'cv2.polylines', 'cv2.polylines', (['image_thresh_overlayed', 'tracker.tabletPoly', '(True)', '(255, 255, 255)', '(2)'], {}), '(image_thresh_overlayed, tracker.tabletPoly, True, (255, 255, \n 255), 2)\n', (1461, 1536), False, 'import cv2\n'), ((999, 1037), 'numpy.argmax', 'np.argmax', (['stats[1:, cv2.CC_STAT_AREA]'], {}), '(stats[1:, cv2.CC_STAT_AREA])\n', (1008, 1037), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from os.path import join
from os import getcwd
X = []
y = []
current_class = 1
colors = [
"red",
"blue",
"green",
"black",
"brown",
"magenta",
"cyan",
"orange",
"teal",
]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.set_title("Press keys 1-9 to select class")
def mouse_listener(event):
X.append([event.xdata, event.ydata])
y.append(current_class)
plt.plot(event.xdata, event.ydata, color=colors[current_class-1], marker="o")
fig.canvas.draw()
def key_listener(event):
if int(event.key) in list(range(1, 10)):
global current_class
current_class = int(event.key)
fig.canvas.mpl_connect('button_press_event', mouse_listener)
fig.canvas.mpl_connect('key_press_event', key_listener)
plt.show()
X = np.array(X).reshape(-1, 2)
y = np.array(y).astype(np.int64).reshape(-1) - 1
ds_name = input("Name of dataset (default: 'toy'):") or "toy"
ds_loc = input("Store dataset to (absolute path) (default: current dir):") or getcwd()
np.savetxt(join(ds_loc, ds_name+"_X.csv"), X, delimiter=",")
np.savetxt(join(ds_loc, ds_name+"_y.csv"), y, delimiter=",")
| [
"matplotlib.pyplot.plot",
"os.path.join",
"os.getcwd",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((269, 281), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (279, 281), True, 'import matplotlib.pyplot as plt\n'), ((855, 865), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (863, 865), True, 'import matplotlib.pyplot as plt\n'), ((497, 576), 'matplotlib.pyplot.plot', 'plt.plot', (['event.xdata', 'event.ydata'], {'color': 'colors[current_class - 1]', 'marker': '"""o"""'}), "(event.xdata, event.ydata, color=colors[current_class - 1], marker='o')\n", (505, 576), True, 'import matplotlib.pyplot as plt\n'), ((1089, 1097), 'os.getcwd', 'getcwd', ([], {}), '()\n', (1095, 1097), False, 'from os import getcwd\n'), ((1110, 1142), 'os.path.join', 'join', (['ds_loc', "(ds_name + '_X.csv')"], {}), "(ds_loc, ds_name + '_X.csv')\n", (1114, 1142), False, 'from os.path import join\n'), ((1171, 1203), 'os.path.join', 'join', (['ds_loc', "(ds_name + '_y.csv')"], {}), "(ds_loc, ds_name + '_y.csv')\n", (1175, 1203), False, 'from os.path import join\n'), ((871, 882), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (879, 882), True, 'import numpy as np\n'), ((902, 913), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (910, 913), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@File: database.py
@Description: This is a module for different database operations and
to provide a fast lookup.
This application,
1. Open/Close a SQLite database connection
2. Create a new SQLite database
3. Create a new SQLite table
4. Insert records into SQLite table
5. Create a new index on SQLite table for efficient lookup
6. Drop an index
7. Retrieve records from SQLite table
for a provided condition
8. Find out the total number of records in the database
9. Find out the table schema
10. Save/Load database to/from disk
11. Perform fast lookup on database
@Author: <NAME>
@EMail: <EMAIL>
@Created_on: 04/05/2017
@License Copyright [2017] [Chetan Borse]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
@python_version: 3.5
===============================================================================
"""
import os
import math
import time
import logging
from functools import partial
from multiprocessing import Pool
from multiprocessing import Lock
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import numpy as np
import sqlite3
from Configuration import config
# Set logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s [%(levelname)s] %(message)s',)
log = logging.getLogger("Database")
# Global variables
PATENT_EMBEDDING_DATABASE = config.PATENT_EMBEDDING_DATABASE
PATENT_EMBEDDING_TABLE = config.PATENT_EMBEDDING_TABLE
PRIMARY_KEY = config.PRIMARY_KEY
FIELDS = config.FIELDS
PATENT_EMBEDDING_INDEX = config.PATENT_EMBEDDING_INDEX
PATENT_CLUSTERING_PATH = config.PATENT_CLUSTERING_PATH
PATENT_MATRIX = config.PATENT_MATRIX
LABELS = config.LABELS
CLASSES = config.CLASSES
DISABLE_PATENT_CATEGORIES = config.DISABLE_PATENT_CATEGORIES
# Lock for synchronized access
LOCK = Lock()
class DatabaseError(Exception):
pass
class FileHandlerError(Exception):
pass
class Database(object):
"""
This is a class for different database operations.
This class,
1. Open/Close a SQLite database connection
2. Create a new SQLite database
3. Create a new SQLite table
4. Insert records into SQLite table
5. Create a new index on SQLite table for efficient lookup
6. Drop an index
7. Retrieve records from SQLite table
for a provided condition
8. Find out the total number of records in the database
9. Find out the table schema
10. Save/Load database to/from disk
"""
def __init__(self, verbose=False):
self.connection = None
self.cursor = None
self.verbose = verbose
def connect(self,
database=PATENT_EMBEDDING_DATABASE,
in_memory=True,
load_from=None):
"""
Connect to a SQLite database.
"""
try:
if in_memory:
self.connection = sqlite3.connect(':memory:')
else:
self.connection = sqlite3.connect(database)
self.cursor = self.connection.cursor()
if load_from is not None:
with open(load_from, "r") as f:
self.cursor.executescript(f.read())
self.connection.commit()
except IOError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.OperationalError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.Error as e:
raise DatabaseError("Database application failed with: %s" % e)
except Exception as e:
raise DatabaseError("Database application failed with: %s" % e)
def create_table(self,
table=PATENT_EMBEDDING_TABLE,
primary_column=PRIMARY_KEY,
other_columns=FIELDS):
"""
Create a new SQLite table.
"""
try:
self.cursor.execute('CREATE TABLE {tn} ({f} {t} NOT NULL PRIMARY KEY)' \
.format(tn=table,
f=primary_column[0], t=primary_column[1]))
for column, type in other_columns:
self.cursor.execute("ALTER TABLE {tn} ADD COLUMN '{f}' {t}" \
.format(tn=table, f=column, t=type))
except sqlite3.OperationalError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.Error as e:
raise DatabaseError("Database application failed with: %s" % e)
except Exception as e:
raise DatabaseError("Database application failed with: %s" % e)
self.connection.commit()
def insert(self,
table=PATENT_EMBEDDING_TABLE,
record=[("PatentName", None),
("DocumentEmbedding", None),
("PatentCategory", "UNKNOWN")]):
"""
Insert records into SQLite table.
"""
query = "INSERT OR IGNORE INTO {tn} ({f}) VALUES ({v})"
columns = map(lambda x: x[0], record)
values = map(lambda x: '\''+str(x[1])+'\'', record)
columns = ", ".join(columns)
values = ", ".join(values)
query = query.format(tn=table, f=columns, v=values)
self._execute_query(query)
self.connection.commit()
def create_index(self,
index=PATENT_EMBEDDING_INDEX,
table=PATENT_EMBEDDING_TABLE,
index_by_column=PRIMARY_KEY[0]):
"""
Create a new index on SQLite table for efficient lookup.
"""
query = 'CREATE UNIQUE INDEX {i} ON {tn} ({f})'.format(i=index,
tn=table,
f=index_by_column)
self._execute_query(query)
self.connection.commit()
def drop_index(self, index):
"""
Drop an index from a SQLite table.
"""
query = 'DROP INDEX {i}'.format(i=index)
self._execute_query(query)
self.connection.commit()
def get(self,
table=PATENT_EMBEDDING_TABLE,
index=PATENT_EMBEDDING_INDEX,
required_columns=["*"],
condition=""):
"""
Retrieve records from SQLite table for a provided condition.
"""
query = "SELECT {f} FROM {tn} INDEXED BY {i} WHERE {c}"
query = query.format(f=", ".join(required_columns),
tn=table,
i=index,
c=condition)
self._execute_query(query)
records = []
while True:
partial_records = self.cursor.fetchmany(True)
if not partial_records:
break
for record in partial_records:
if self.verbose:
log.debug("%r", record)
records.append(record)
return records
def get_total_records(self, table):
"""
Returns the total number of records in the database.
"""
query = 'SELECT COUNT(*) FROM {}'.format(table)
self._execute_query(query)
total_records = self.cursor.fetchall()
if self.verbose:
log.info('Total records: {}'.format(total_records[0][0]))
return total_records[0][0]
def get_table_schema(self, table):
"""
Returns the table schema.
"""
query = 'PRAGMA TABLE_INFO({})'.format(table)
self._execute_query(query)
table_schema = self.cursor.fetchall()
if self.verbose:
log.info("ID, Name, Type, Not_Null, Default_Value, Primary_Key")
for column in table_schema:
log.info(column)
return table_schema
def close(self, save_to=None):
"""
Close connection to the database.
"""
try:
if self.connection:
if save_to is not None:
if not os.path.exists(save_to.rsplit(os.sep, 1)[0]):
raise PathNotFoundError("Path does not exist: %s"
% save_to.rsplit(os.sep, 1)[0])
with open(save_to, 'w') as f:
for line in self.connection.iterdump():
f.write('%s\n' % line)
self.connection.close()
except IOError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.OperationalError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.Error as e:
raise DatabaseError("Database application failed with: %s" % e)
except Exception as e:
raise DatabaseError("Database application failed with: %s" % e)
def _execute_query(self, query):
"""
Execute SQLite query.
"""
try:
with LOCK:
self.cursor.execute(query)
except sqlite3.ProgrammingError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.IntegrityError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.OperationalError as e:
raise DatabaseError("Database application failed with: %s" % e)
except sqlite3.Error as e:
raise DatabaseError("Database application failed with: %s" % e)
except Exception as e:
raise DatabaseError("Database application failed with: %s" % e)
class FileHandler(object):
"""
Class for saving records retrieved from the database
in a synchronized fashion.
"""
@staticmethod
def write(records, filename, mode):
"""
Save records retrieved from the database in a synchronized fashion
using mutex lock on shared file resource.
"""
with LOCK:
try:
with open(filename, mode) as f:
f.write(records)
f.flush()
os.fsync(f.fileno())
except IOError as e:
raise FileHandlerError("FileHandler failed: %s" % filename)
def Lookup(database,
table=PATENT_EMBEDDING_TABLE,
index=PATENT_EMBEDDING_INDEX,
required_columns=["*"],
search_on=PRIMARY_KEY[0],
save=True,
patents=list()):
"""
Perform lookup on database.
"""
condition = "{s} IN ({i})"
# condition = "{s} IN ({i}) ORDER BY FIELD ({o})"
patents = map(lambda x: '\''+str(x)+'\'', patents)
patents = ",".join(patents)
condition = condition.format(s=search_on, i=patents)
# condition = condition.format(s=search_on, i=patents, o=patents)
records = database.get(table, index, required_columns, condition)
if save:
SaveRecords(records)
return records
def FastLookup(database,
table=PATENT_EMBEDDING_TABLE,
index=PATENT_EMBEDDING_INDEX,
required_columns=["*"],
search_on=PRIMARY_KEY[0],
patents=list(),
total_processes=1,
save=True,
path=os.getcwd(),
return_from=False):
"""
Perform fast lookup on database.
"""
chunk_size = math.ceil(float(len(patents)) / total_processes)
if chunk_size == 0:
chunk_size = 1
with Pool(processes=total_processes) as pool:
f = partial(Lookup,
database, table, index, required_columns, search_on, save)
result = pool.map(f, GetChunks(patents, size=chunk_size))
if return_from:
return result
def GetChunks(data, size=None):
"""
Get chunks of the data.
"""
if size == None:
size = len(data)
start = 0
end = size
chunks = []
while start < len(data):
chunks.append(data[start:end])
start = end
end += size
if end > len(data):
end = len(data)
return chunks
def SaveRecords(records):
"""
Save records retrieved from the database.
"""
patent_names = map(lambda x: x[0], records)
patent_names = filter(None, patent_names)
patent_names = "\n".join(patent_names)
document_embeddings = map(lambda x: x[1], records)
document_embeddings = filter(None, document_embeddings)
document_embeddings = "\n".join(document_embeddings)
if not DISABLE_PATENT_CATEGORIES:
patent_categories = map(lambda x: x[2], records)
patent_categories = filter(None, patent_categories)
patent_categories = "\n".join(patent_categories)
if os.path.exists(PATENT_CLUSTERING_PATH):
if patent_names:
FileHandler.write(patent_names+"\n", LABELS, "a")
if document_embeddings:
FileHandler.write(document_embeddings.encode()+b"\n",
PATENT_MATRIX,
"ab")
if (not DISABLE_PATENT_CATEGORIES and patent_categories):
FileHandler.write(patent_categories+"\n", CLASSES, "a")
if __name__ == '__main__':
# Database: write operations
db = Database(verbose=True)
db.connect(in_memory=True)
db.create_table(table=PATENT_EMBEDDING_TABLE,
primary_column=PRIMARY_KEY,
other_columns=FIELDS)
total_records = 1000
dimension = 500
for i in range(total_records):
default_embedding = np.zeros((dimension,), dtype=np.float32)
document_embedding = " ".join(map(str, default_embedding))
record = [("PatentName", str(i)),
("DocumentEmbedding", document_embedding),
("PatentCategory", "UNKNOWN")]
db.insert(table=PATENT_EMBEDDING_TABLE, record=record)
db.create_index(index=PATENT_EMBEDDING_INDEX,
table=PATENT_EMBEDDING_TABLE,
index_by_column=PRIMARY_KEY[0])
db.get_total_records(PATENT_EMBEDDING_TABLE)
db.get_table_schema(PATENT_EMBEDDING_TABLE)
db.close(save_to=PATENT_EMBEDDING_DATABASE)
# Database: read operations
db = Database(verbose=True)
db.connect(in_memory=True, load_from=PATENT_EMBEDDING_DATABASE)
total_patents = 50
patents = [str(i+5) for i in range(total_patents)]
dimension = 500
try:
FileHandler.write((b"%d %d\n" % (total_patents, dimension)),
PATENT_MATRIX,
"ab")
except IOError as e:
raise FileHandlerError()
start_time = time.time()
Lookup(db,
table=PATENT_EMBEDDING_TABLE,
index=PATENT_EMBEDDING_INDEX,
search_on=PRIMARY_KEY[0],
save=True,
patents=patents)
# FastLookup(db, patents=patents, total_processes=4, save=True)
end_time = time.time()
print(end_time-start_time)
db.close()
| [
"logging.basicConfig",
"os.path.exists",
"logging.getLogger",
"sqlite3.connect",
"os.getcwd",
"numpy.zeros",
"functools.partial",
"multiprocessing.Pool",
"multiprocessing.Lock",
"time.time"
] | [((2143, 2246), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(name)s [%(levelname)s] %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(name)s [%(levelname)s] %(message)s')\n", (2162, 2246), False, 'import logging\n'), ((2269, 2298), 'logging.getLogger', 'logging.getLogger', (['"""Database"""'], {}), "('Database')\n", (2286, 2298), False, 'import logging\n'), ((2789, 2795), 'multiprocessing.Lock', 'Lock', ([], {}), '()\n', (2793, 2795), False, 'from multiprocessing import Lock\n'), ((12365, 12376), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (12374, 12376), False, 'import os\n'), ((13822, 13860), 'os.path.exists', 'os.path.exists', (['PATENT_CLUSTERING_PATH'], {}), '(PATENT_CLUSTERING_PATH)\n', (13836, 13860), False, 'import os\n'), ((15725, 15736), 'time.time', 'time.time', ([], {}), '()\n', (15734, 15736), False, 'import time\n'), ((16004, 16015), 'time.time', 'time.time', ([], {}), '()\n', (16013, 16015), False, 'import time\n'), ((12589, 12620), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'total_processes'}), '(processes=total_processes)\n', (12593, 12620), False, 'from multiprocessing import Pool\n'), ((12642, 12716), 'functools.partial', 'partial', (['Lookup', 'database', 'table', 'index', 'required_columns', 'search_on', 'save'], {}), '(Lookup, database, table, index, required_columns, search_on, save)\n', (12649, 12716), False, 'from functools import partial\n'), ((14640, 14680), 'numpy.zeros', 'np.zeros', (['(dimension,)'], {'dtype': 'np.float32'}), '((dimension,), dtype=np.float32)\n', (14648, 14680), True, 'import numpy as np\n'), ((3894, 3921), 'sqlite3.connect', 'sqlite3.connect', (['""":memory:"""'], {}), "(':memory:')\n", (3909, 3921), False, 'import sqlite3\n'), ((3974, 3999), 'sqlite3.connect', 'sqlite3.connect', (['database'], {}), '(database)\n', (3989, 3999), False, 'import sqlite3\n')] |
#This file implements the upHRP algorithm, the MinVAr portfolio and the InvLamda portf.
#Code for classical HRP is based on <NAME>. (2018). Advances in Financial
#Machine Learning. Wiley. The code has been modified to create an uplifted portfolio
#strategies based on FRM adjacency mtrices and its adapted in order to be used with
#python 3 and the data set.
#<NAME>
#@date: 20201010
#"""
#[0] Import library
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#[0]Upload input data, Financial Institutions of the 6 Emerging Markets and adjacency matrix
FIs_prices = pd.read_excel("Financial Institutions Price Series.xlsx")
FRM_EM_Adjacency_matrix= pd.read_csv("adj_matix_20200630_050.csv")
print(FIs_prices)
# In[1]:
# Load modules
import os
path = os.getcwd() # Set Working directory here
# Import modules for Datastructuring and calc.
import pandas as pd
import numpy as np
from scipy import stats
import warnings
from tqdm import tqdm
# Modules for RHP algorithm
import matplotlib.pyplot as mpl
import scipy.cluster.hierarchy as sch
# Modules for the network plot
import networkx as nx
from networkx.convert_matrix import from_numpy_matrix
# Modules for Markowitz optimization
import cvxopt as opt
import cvxopt.solvers as optsolvers
warnings.filterwarnings("ignore") # suppress warnings in clustering
# In[2]:
# define functions for HRP and IVP
def getIVP(cov,**kargs):
# Compute the inverse-variance portfolio
ivp=1./np.diag(cov)
ivp/=ivp.sum()
return ivp
def getClusterVar(cov, cItems):
# Compute variance per cluster
cov_=cov.loc[cItems, cItems] # matrix slice
w_=getIVP(cov_).reshape(-1,1)
cVar=np.dot(np.dot(w_.T,cov_),w_)[0,0]
return cVar
def getQuasiDiag(link):
# Sort clustered items by distance
link=link.astype(int)
sortIx=pd.Series([link[-1,0],link[-1,1]])
numItems=link[-1,3] # number of original items
while sortIx.max() >=numItems:
sortIx.index=range(0,sortIx.shape[0]*2,2) # make space
df0=sortIx[sortIx>=numItems] # find clusters
i=df0.index;j=df0.values-numItems
sortIx[i]=link[j,0] # item 1
df0=pd.Series(link[j,1], index=i+1)
sortIx=sortIx.append(df0) # item 2
sortIx=sortIx.sort_index() # re-sort
sortIx.index=range(sortIx.shape[0]) # re-index
return sortIx.tolist()
def getRecBipart(cov,sortIx):
# Compute HRP alloc
w=pd.Series(1,index=sortIx)
cItems=[sortIx] # initialize all items in one cluster
while len(cItems)>0:
cItems=[i[j:k] for i in cItems for j,k in ((0,len(i)//2),(len(i)//2,\
len(i))) if len(i)>1] # bi-section
for i in range(0,len(cItems),2): # parse in pairs
cItems0=cItems[i] # cluster 1
cItems1=cItems[i+1] # cluster 2
cVar0=getClusterVar(cov,cItems0)
cVar1=getClusterVar(cov,cItems1)
alpha=1-cVar0/(cVar0+cVar1)
w[cItems0]*=alpha # weight 1
w[cItems1]*=1-alpha # weight 2
return w
def correlDist(corr):
# A distance matrix based on correlation, where 0<=d[i,j]<=1
# This is a proper distance metric
dist=((1-corr)/2.)**.5 # distance matrix
return dist
def plotCorrMatrix(path, corr, labels=None):
# Heatmap of the correlation matrix
if labels is None:labels=[]
mpl.pcolor(corr)
mpl.colorbar()
mpl.yticks(np.arange(.5,corr.shape[0]+.5),labels)
mpl.xticks(np.arange(.5,corr.shape[0]+.5),labels)
mpl.savefig(path,dpi=300, transparent=True)
mpl.clf();mpl.close() # reset pylab
return
# In[3]:
# define function for MinVar portfolio
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def min_var_portfolio(cov_mat, allow_short=False):
"""
Computes the minimum variance portfolio.
Note: As the variance is not invariant with respect
to leverage, it is not possible to construct non-trivial
market neutral minimum variance portfolios. This is because
the variance approaches zero with decreasing leverage,
i.e. the market neutral portfolio with minimum variance
is not invested at all.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
Returns
-------
weights: pandas.Series
Optimal asset weights.
"""
if not isinstance(cov_mat, pd.DataFrame):
raise ValueError("Covariance matrix is not a DataFrame")
n = len(cov_mat)
P = opt.matrix(cov_mat.values)
q = opt.matrix(0.0, (n, 1))
# Constraints Gx <= h
if not allow_short:
# x >= 0
G = opt.matrix(-np.identity(n))
h = opt.matrix(0.0, (n, 1))
else:
G = None
h = None
# Constraints Ax = b
# sum(x) = 1
A = opt.matrix(1.0, (1, n))
b = opt.matrix(1.0)
# Solve
optsolvers.options['show_progress'] = False
sol = optsolvers.qp(P, q, G, h, A, b)
if sol['status'] != 'optimal':
warnings.warn("Convergence problem")
# Put weights into a labeled series
weights = pd.Series(sol['x'], index=cov_mat.index)
return weights
# In[4]:
# Define functions for network graphs
#Function to plot Network plots
def plotNetwork(path,corr):
# Transform it in a links data frame
#links=corr.stack().reset_index()
#Build graph
corr=Corr_mat
adj_matrix = corr
constits_latest = corr.index
# remove self-loops
adj_matrix = np.where((adj_matrix<=1.000001) & (adj_matrix>=0.99999),0,adj_matrix)
# replace values that are below threshold
# create undirected graph from adj_matrix
graph = from_numpy_matrix(adj_matrix, parallel_edges=False, create_using= nx.Graph())
# set names to crypots
graph = nx.relabel.relabel_nodes(graph, dict(zip(range(len(constits_latest)), constits_latest)))
pos_og = nx.circular_layout(graph, scale=2)
pos = nx.circular_layout(graph, scale=1.7)
for p in pos: # raise text positions
if pos[p][1]>1:
pos[p][1] += 0.15
if pos[p][1]<-1:
pos[p][1] -= 0.15
elif pos[p][0]<0:
pos[p][0] -= 0.3
else:
pos[p][0]+=0.3
plt = mpl.figure(figsize = (5,5))
nx.draw(graph, pos_og, with_labels= False)
nx.draw_networkx_labels(graph, pos)
plt.savefig(path,dpi=300 ,transparent=True)
mpl.clf();mpl.close()
return
## In[5]:
# Loading and structuring crypto data sets
FIs_prices = FIs_prices[(~FIs_prices.isnull()).all(axis=1)] # Deleting empty rows
FIs_prices = FIs_prices.rename(columns = {"date":"Date"})
FIs_prices = FIs_prices.replace(to_replace = 0, method = "ffill")
Price_data_univ=FIs_prices
Price_data_univ = Price_data_univ.set_index("Date") # define Date as index
# Calculating returns
Return_data_univ = Price_data_univ.pct_change() #calculate daily returns
Return_data_univ = Return_data_univ.drop(Return_data_univ.index[range(0,1)])
Cov_mat1 = Return_data_univ.cov() # Covariance matrix of the return matrix
Corr_mat1=Return_data_univ.corr() # Correlation matrix of the return matrix
FRM_EM_Adjacency_matrix = FRM_EM_Adjacency_matrix.rename(columns = {"date":""})
FRM_EM_Adjacency_matrix = FRM_EM_Adjacency_matrix.set_index("") # define Date as index
Corr_mat=FRM_EM_Adjacency_matrix
Cov_mat=FRM_EM_Adjacency_matrix
# In[6]:
# Heatmap and network analysis of corr. matrix
# Plotting Correlation matrix heatmap
plotCorrMatrix(path+"/Adj_matrix_Heatmap_FIs_unsorted",Corr_mat)
# network plot of correlation matrix
plotNetwork(path+"/Corr_Network_FIs_unsorted.png", Corr_mat)
# Sort correlation matrix
dist=correlDist(Corr_mat)
link=sch.linkage(dist,'single')
sortIx=getQuasiDiag(link)
sortIx=Corr_mat.index[sortIx].tolist() # recover labels
Corr_sorted=Corr_mat.loc[sortIx,sortIx] # reorder
# Plot sorted correlation matrix
plotCorrMatrix(path+"/Adj_matrix_Heatmap_FIs_sorted",Corr_sorted)
# Plot dendogram of the constituents
#2) Cluster Data
mpl.figure(num=None, figsize=(20, 10), dpi=300, facecolor='w', edgecolor='k')
dn = sch.dendrogram(link, labels = dist.columns)
mpl.savefig(path+"/Dendrogram_FIs.png", transparent = True, dpi = 300)
mpl.clf();mpl.close() # reset pylab
print(plotNetwork)
# In[7]:
#Function to calculate the HRP portfolio weights
def HRPportf(cov,corr):
#1) Cluster covariance matrix
dist=correlDist(corr)
link=sch.linkage(dist,'single')
sortIx=getQuasiDiag(link)
sortIx=corr.index[sortIx].tolist() # recover labels
#2) Allocate capital according to HRP
weights_hrp=getRecBipart(cov,sortIx)
return weights_hrp
# In[8]:
# Compute the weights for the Markowitz MinVar and the HRP portfolio and the
# IVP portfolio
w_HRP=np.array([HRPportf(Cov_mat1,Corr_mat1).index,HRPportf(Cov_mat,Corr_mat).round(3)])
w_HRP=pd.DataFrame(np.transpose(w_HRP))
w_HRP.columns = ["Asset","Weights HRP"]
w_MinVar= np.array([min_var_portfolio(Cov_mat1).index,min_var_portfolio(Cov_mat1).round(3)])
w_MinVar=pd.DataFrame(np.transpose(w_MinVar))
w_MinVar.columns = ["Asset","Weights MinVar"]
w_IVP= np.array([Cov_mat1.index, getIVP(Cov_mat).round(3)])
w_IVP=pd.DataFrame(np.transpose(w_IVP))
w_IVP.columns = ["Asset","Weights IVP"]
Weights = pd.merge(w_MinVar,w_IVP,\
on="Asset", how = "inner")
Weights = pd.merge(Weights,w_HRP,\
on="Asset", how = "inner")
print(Weights.to_latex(index=True)) # Latex table output
| [
"pandas.read_csv",
"matplotlib.pyplot.pcolor",
"networkx.draw_networkx_labels",
"pandas.read_excel",
"numpy.arange",
"numpy.where",
"matplotlib.pyplot.close",
"numpy.dot",
"scipy.cluster.hierarchy.linkage",
"cvxopt.matrix",
"warnings.warn",
"numpy.identity",
"matplotlib.pyplot.savefig",
"p... | [((591, 648), 'pandas.read_excel', 'pd.read_excel', (['"""Financial Institutions Price Series.xlsx"""'], {}), "('Financial Institutions Price Series.xlsx')\n", (604, 648), True, 'import pandas as pd\n'), ((674, 715), 'pandas.read_csv', 'pd.read_csv', (['"""adj_matix_20200630_050.csv"""'], {}), "('adj_matix_20200630_050.csv')\n", (685, 715), True, 'import pandas as pd\n'), ((778, 789), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (787, 789), False, 'import os\n'), ((1271, 1304), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1294, 1304), False, 'import warnings\n'), ((8835, 8862), 'scipy.cluster.hierarchy.linkage', 'sch.linkage', (['dist', '"""single"""'], {}), "(dist, 'single')\n", (8846, 8862), True, 'import scipy.cluster.hierarchy as sch\n'), ((9151, 9228), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {'num': 'None', 'figsize': '(20, 10)', 'dpi': '(300)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(20, 10), dpi=300, facecolor='w', edgecolor='k')\n", (9161, 9228), True, 'import matplotlib.pyplot as mpl\n'), ((9238, 9279), 'scipy.cluster.hierarchy.dendrogram', 'sch.dendrogram', (['link'], {'labels': 'dist.columns'}), '(link, labels=dist.columns)\n', (9252, 9279), True, 'import scipy.cluster.hierarchy as sch\n'), ((9282, 9350), 'matplotlib.pyplot.savefig', 'mpl.savefig', (["(path + '/Dendrogram_FIs.png')"], {'transparent': '(True)', 'dpi': '(300)'}), "(path + '/Dendrogram_FIs.png', transparent=True, dpi=300)\n", (9293, 9350), True, 'import matplotlib.pyplot as mpl\n'), ((9353, 9362), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (9360, 9362), True, 'import matplotlib.pyplot as mpl\n'), ((9363, 9374), 'matplotlib.pyplot.close', 'mpl.close', ([], {}), '()\n', (9372, 9374), True, 'import matplotlib.pyplot as mpl\n'), ((10395, 10445), 'pandas.merge', 'pd.merge', (['w_MinVar', 'w_IVP'], {'on': '"""Asset"""', 'how': '"""inner"""'}), "(w_MinVar, w_IVP, on='Asset', how='inner')\n", (10403, 10445), True, 'import pandas as pd\n'), ((10477, 10526), 'pandas.merge', 'pd.merge', (['Weights', 'w_HRP'], {'on': '"""Asset"""', 'how': '"""inner"""'}), "(Weights, w_HRP, on='Asset', how='inner')\n", (10485, 10526), True, 'import pandas as pd\n'), ((1826, 1863), 'pandas.Series', 'pd.Series', (['[link[-1, 0], link[-1, 1]]'], {}), '([link[-1, 0], link[-1, 1]])\n', (1835, 1863), True, 'import pandas as pd\n'), ((2418, 2444), 'pandas.Series', 'pd.Series', (['(1)'], {'index': 'sortIx'}), '(1, index=sortIx)\n', (2427, 2444), True, 'import pandas as pd\n'), ((3340, 3356), 'matplotlib.pyplot.pcolor', 'mpl.pcolor', (['corr'], {}), '(corr)\n', (3350, 3356), True, 'import matplotlib.pyplot as mpl\n'), ((3361, 3375), 'matplotlib.pyplot.colorbar', 'mpl.colorbar', ([], {}), '()\n', (3373, 3375), True, 'import matplotlib.pyplot as mpl\n'), ((3488, 3532), 'matplotlib.pyplot.savefig', 'mpl.savefig', (['path'], {'dpi': '(300)', 'transparent': '(True)'}), '(path, dpi=300, transparent=True)\n', (3499, 3532), True, 'import matplotlib.pyplot as mpl\n'), ((3536, 3545), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (3543, 3545), True, 'import matplotlib.pyplot as mpl\n'), ((3546, 3557), 'matplotlib.pyplot.close', 'mpl.close', ([], {}), '()\n', (3555, 3557), True, 'import matplotlib.pyplot as mpl\n'), ((5677, 5703), 'cvxopt.matrix', 'opt.matrix', (['cov_mat.values'], {}), '(cov_mat.values)\n', (5687, 5703), True, 'import cvxopt as opt\n'), ((5712, 5735), 'cvxopt.matrix', 'opt.matrix', (['(0.0)', '(n, 1)'], {}), '(0.0, (n, 1))\n', (5722, 5735), True, 'import cvxopt as opt\n'), ((5957, 5980), 'cvxopt.matrix', 'opt.matrix', (['(1.0)', '(1, n)'], {}), '(1.0, (1, n))\n', (5967, 5980), True, 'import cvxopt as opt\n'), ((5989, 6004), 'cvxopt.matrix', 'opt.matrix', (['(1.0)'], {}), '(1.0)\n', (5999, 6004), True, 'import cvxopt as opt\n'), ((6076, 6107), 'cvxopt.solvers.qp', 'optsolvers.qp', (['P', 'q', 'G', 'h', 'A', 'b'], {}), '(P, q, G, h, A, b)\n', (6089, 6107), True, 'import cvxopt.solvers as optsolvers\n'), ((6261, 6301), 'pandas.Series', 'pd.Series', (["sol['x']"], {'index': 'cov_mat.index'}), "(sol['x'], index=cov_mat.index)\n", (6270, 6301), True, 'import pandas as pd\n'), ((6647, 6722), 'numpy.where', 'np.where', (['((adj_matrix <= 1.000001) & (adj_matrix >= 0.99999))', '(0)', 'adj_matrix'], {}), '((adj_matrix <= 1.000001) & (adj_matrix >= 0.99999), 0, adj_matrix)\n', (6655, 6722), True, 'import numpy as np\n'), ((7041, 7075), 'networkx.circular_layout', 'nx.circular_layout', (['graph'], {'scale': '(2)'}), '(graph, scale=2)\n', (7059, 7075), True, 'import networkx as nx\n'), ((7086, 7122), 'networkx.circular_layout', 'nx.circular_layout', (['graph'], {'scale': '(1.7)'}), '(graph, scale=1.7)\n', (7104, 7122), True, 'import networkx as nx\n'), ((7385, 7411), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (7395, 7411), True, 'import matplotlib.pyplot as mpl\n'), ((7418, 7459), 'networkx.draw', 'nx.draw', (['graph', 'pos_og'], {'with_labels': '(False)'}), '(graph, pos_og, with_labels=False)\n', (7425, 7459), True, 'import networkx as nx\n'), ((7465, 7500), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['graph', 'pos'], {}), '(graph, pos)\n', (7488, 7500), True, 'import networkx as nx\n'), ((7511, 7555), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'dpi': '(300)', 'transparent': '(True)'}), '(path, dpi=300, transparent=True)\n', (7522, 7555), True, 'import matplotlib.pyplot as plt\n'), ((7559, 7568), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (7566, 7568), True, 'import matplotlib.pyplot as mpl\n'), ((7569, 7580), 'matplotlib.pyplot.close', 'mpl.close', ([], {}), '()\n', (7578, 7580), True, 'import matplotlib.pyplot as mpl\n'), ((9562, 9589), 'scipy.cluster.hierarchy.linkage', 'sch.linkage', (['dist', '"""single"""'], {}), "(dist, 'single')\n", (9573, 9589), True, 'import scipy.cluster.hierarchy as sch\n'), ((9996, 10015), 'numpy.transpose', 'np.transpose', (['w_HRP'], {}), '(w_HRP)\n', (10008, 10015), True, 'import numpy as np\n'), ((10173, 10195), 'numpy.transpose', 'np.transpose', (['w_MinVar'], {}), '(w_MinVar)\n', (10185, 10195), True, 'import numpy as np\n'), ((10323, 10342), 'numpy.transpose', 'np.transpose', (['w_IVP'], {}), '(w_IVP)\n', (10335, 10342), True, 'import numpy as np\n'), ((1467, 1479), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (1474, 1479), True, 'import numpy as np\n'), ((2154, 2188), 'pandas.Series', 'pd.Series', (['link[j, 1]'], {'index': '(i + 1)'}), '(link[j, 1], index=i + 1)\n', (2163, 2188), True, 'import pandas as pd\n'), ((3391, 3426), 'numpy.arange', 'np.arange', (['(0.5)', '(corr.shape[0] + 0.5)'], {}), '(0.5, corr.shape[0] + 0.5)\n', (3400, 3426), True, 'import numpy as np\n'), ((3445, 3480), 'numpy.arange', 'np.arange', (['(0.5)', '(corr.shape[0] + 0.5)'], {}), '(0.5, corr.shape[0] + 0.5)\n', (3454, 3480), True, 'import numpy as np\n'), ((5846, 5869), 'cvxopt.matrix', 'opt.matrix', (['(0.0)', '(n, 1)'], {}), '(0.0, (n, 1))\n', (5856, 5869), True, 'import cvxopt as opt\n'), ((6160, 6196), 'warnings.warn', 'warnings.warn', (['"""Convergence problem"""'], {}), "('Convergence problem')\n", (6173, 6196), False, 'import warnings\n'), ((1681, 1699), 'numpy.dot', 'np.dot', (['w_.T', 'cov_'], {}), '(w_.T, cov_)\n', (1687, 1699), True, 'import numpy as np\n'), ((6887, 6897), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (6895, 6897), True, 'import networkx as nx\n'), ((5819, 5833), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (5830, 5833), True, 'import numpy as np\n')] |
"""
Eigenvalue demonstration
The revolving red hand is the input vector and the blue hand is the linearly
transformed vector.
Four times every revolution the two hands are parallel (or anti-parallel), twice
to each eigenvector of the matrix A. The ratio of lengths, blue hand over
red hand, is the corresponding eigenvalue. The eigenvalue will be negative
if the hands are anti-parallel.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from math import pi, sin, cos
A = np.array([
[1, 2],
[3, 3]
])
e, x = np.linalg.eig(A)
print(e)
print(x)
print(f"λ1 = {e[0]:.3f}, x1 = {np.real(x[:,0].flatten())}")
print(f"λ2 = {e[1]:.3f}, x2 = {np.real(x[:,1].flatten())}")
s = np.max(np.abs(e))
fig, ax = plt.subplots()
plt.axis([-s, s, -s, s])
plt.grid(True)
plt.title('Eigenvector demonstration')
plt.xlabel('x');
plt.ylabel('y');
plt.axis('equal')
plt.xlim(-s, s)
plt.ylim(-s, s)
ax.set_aspect('equal')
l1, = plt.plot([0, 0], [0, 0], color='r', linewidth=1.5) # input vector
l2, = plt.plot([0, 0], [0, 0], color='b', linewidth=1.5) # transformed vector
plt.legend(['$x$', r'${\bf A} x$'])
def animate(theta):
x = np.r_[cos(theta), sin(theta)]
y = A @ x
l1.set_xdata([0, x[0]])
l1.set_ydata([0, x[1]])
l2.set_xdata([0, y[0]])
l2.set_ydata([0, y[1]])
return l1, l2
myAnimation = animation.FuncAnimation(fig, animate, frames=np.linspace(0, 2 * pi, 400), blit=True, interval=20, repeat=True)
plt.show(block=True) | [
"numpy.abs",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"numpy.linalg.eig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"math.sin",
"math.cos",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlim",
"matplotlib.p... | [((525, 551), 'numpy.array', 'np.array', (['[[1, 2], [3, 3]]'], {}), '([[1, 2], [3, 3]])\n', (533, 551), True, 'import numpy as np\n'), ((583, 599), 'numpy.linalg.eig', 'np.linalg.eig', (['A'], {}), '(A)\n', (596, 599), True, 'import numpy as np\n'), ((773, 787), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (785, 787), True, 'import matplotlib.pyplot as plt\n'), ((788, 812), 'matplotlib.pyplot.axis', 'plt.axis', (['[-s, s, -s, s]'], {}), '([-s, s, -s, s])\n', (796, 812), True, 'import matplotlib.pyplot as plt\n'), ((813, 827), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (821, 827), True, 'import matplotlib.pyplot as plt\n'), ((828, 866), 'matplotlib.pyplot.title', 'plt.title', (['"""Eigenvector demonstration"""'], {}), "('Eigenvector demonstration')\n", (837, 866), True, 'import matplotlib.pyplot as plt\n'), ((867, 882), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (877, 882), True, 'import matplotlib.pyplot as plt\n'), ((884, 899), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (894, 899), True, 'import matplotlib.pyplot as plt\n'), ((901, 918), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (909, 918), True, 'import matplotlib.pyplot as plt\n'), ((919, 934), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-s)', 's'], {}), '(-s, s)\n', (927, 934), True, 'import matplotlib.pyplot as plt\n'), ((935, 950), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-s)', 's'], {}), '(-s, s)\n', (943, 950), True, 'import matplotlib.pyplot as plt\n'), ((981, 1031), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 0]', '[0, 0]'], {'color': '"""r"""', 'linewidth': '(1.5)'}), "([0, 0], [0, 0], color='r', linewidth=1.5)\n", (989, 1031), True, 'import matplotlib.pyplot as plt\n'), ((1054, 1104), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 0]', '[0, 0]'], {'color': '"""b"""', 'linewidth': '(1.5)'}), "([0, 0], [0, 0], color='b', linewidth=1.5)\n", (1062, 1104), True, 'import matplotlib.pyplot as plt\n'), ((1128, 1163), 'matplotlib.pyplot.legend', 'plt.legend', (["['$x$', '${\\\\bf A} x$']"], {}), "(['$x$', '${\\\\bf A} x$'])\n", (1138, 1163), True, 'import matplotlib.pyplot as plt\n'), ((1517, 1537), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (1525, 1537), True, 'import matplotlib.pyplot as plt\n'), ((751, 760), 'numpy.abs', 'np.abs', (['e'], {}), '(e)\n', (757, 760), True, 'import numpy as np\n'), ((1446, 1473), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * pi)', '(400)'], {}), '(0, 2 * pi, 400)\n', (1457, 1473), True, 'import numpy as np\n'), ((1204, 1214), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1207, 1214), False, 'from math import pi, sin, cos\n'), ((1216, 1226), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (1219, 1226), False, 'from math import pi, sin, cos\n')] |
import os
import subprocess
import urllib.request
import numpy as np
from dezero import as_variable
from dezero import Variable
def _dot_var(v, verbose=False):
dot_var = '{} [label="{}", color=orange, style=filled]\n'
name = '' if v.name is None else v.name
if verbose and v.data is not None:
if v.name is not None:
name += ': '
name += str(v.shape) + ' ' + str(v.dtype)
return dot_var.format(id(v), name)
def _dot_func(f):
dot_func = '{} [label="{}", color=lightblue, style=filled, shape=box]\n'
txt = dot_func.format(id(f), f.__class__.__name__)
dot_edge = '{} -> {}\n'
for x in f.inputs:
txt += dot_edge.format(id(x), id(f))
for y in f.outputs:
txt += dot_edge.format(id(f), id(y()))
return txt
def get_dot_graph(output, verbose=True):
txt = ''
funcs = []
seen_set = set()
def add_func(f):
if f not in seen_set:
funcs.append(f)
seen_set.add(f)
add_func(output.creator)
txt += _dot_var(output, verbose)
while funcs:
func = funcs.pop()
txt += _dot_func(func)
for x in func.inputs:
txt += _dot_var(x, verbose)
if x.creator is not None:
add_func(x.creator)
return 'digraph g {\n' + txt + '}'
def plot_dot_graph(output, verbose=True, to_file='graph.png'):
dot_graph = get_dot_graph(output, verbose)
# save dot data
tmp_dir = os.path.join(os.path.expanduser('~'), '.dezero')
print(tmp_dir)
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
graph_path = os.path.join(tmp_dir, 'tmp_graph.dot')
with open(graph_path, 'w') as f:
f.write(dot_graph)
# call dot command
extension = os.path.splitext(to_file)[1][1:]
cmd = 'dot {} -T {} -o {}'.format(graph_path, extension, to_file)
subprocess.run(cmd, shell=True)
def sum_to(x, shape):
"""Sum elements along axes to output an array of a goven shape.
:param ndarray x: Input array
:param shape:shapes
:return: Output array of the shape
:rtype: ndarray
"""
ndim = len(shape)
lead = x.ndim - ndim
lead_axis = tuple(range(lead))
axis = tuple([i + lead for i, sx in enumerate(shape) if sx == 1])
y = x.sum(lead_axis + axis, keepdims=True)
# print("ndim:{}, lead:{}, lead_axis:{}, axis:{}, y:{}".format(
# ndim, lead, lead_axis, axis, y))
if lead > 0:
y = y.squeeze(lead_axis)
return y
def reshape_sum_backward(gy, x_shape, axis, keepdims):
ndim = len(x_shape)
tupled_axis = axis
if axis is None:
tupled_axis = None
elif not hasattr(axis, 'len'):
tupled_axis = (axis,)
if not (ndim == 0 or tupled_axis is None or keepdims):
actual_axis = [a if a >= 0 else a + ndim for a in tupled_axis]
shape = list(gy.shape)
for a in sorted(actual_axis):
shape.insert(a, 1)
else:
shape = gy.shape
gy = gy.reshape(shape)
return gy
def logsumexp(x, axis=1):
m = x.max(axis=axis, keepdims=True)
y = x - m
np.exp(y, out=y)
s = y.sum(axis=axis, keepdims=True)
np.log(s, out=s)
m += s
return m
# =============================================================================
# download function
# =============================================================================
def show_progress(block_num, block_size, total_size):
bar_template = "\r[{}] {:.2f}%"
downloaded = block_num * block_size
p = downloaded / total_size * 100
i = int(downloaded / total_size * 30)
if p >= 100.0:
p = 100.0
if i >= 30:
i = 30
bar = "#" * i + "." * (30 - i)
print(bar_template.format(bar, p), end='')
cache_dir = os.path.join(os.path.expanduser('~'), '.dezero')
def get_file(url, file_name=None):
"""Download a file from the `url` if it is not in the cache.
The file at the `url` is downloaded to the `~/.dezero`.
Args:
url (str): URL of the file.
file_name (str): Name of the file. It `None` is specified the original
file name is used.
Returns:
str: Absolute path to the saved file.
"""
if file_name is None:
file_name = url[url.rfind('/') + 1:]
file_path = os.path.join(cache_dir, file_name)
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
if os.path.exists(file_path):
return file_path
print("Downloading: " + file_name)
try:
urllib.request.urlretrieve(url, file_path, show_progress)
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(file_path):
os.remove(file_path)
raise
print(" Done")
return file_path
# =============================================================================
# others
# =============================================================================
def get_deconv_outsize(size, k, s, p):
return s * (size - 1) + k - 2 * p
def get_conv_outsize(input_size, kernel_size, stride, pad):
return (input_size + pad * 2 - kernel_size) // stride + 1
def pair(x):
if isinstance(x, int):
return (x, x)
elif isinstance(x, tuple):
assert len(x) == 2
return x
else:
raise ValueError
| [
"os.path.exists",
"numpy.log",
"subprocess.run",
"os.path.join",
"os.path.splitext",
"numpy.exp",
"os.mkdir",
"os.path.expanduser",
"os.remove"
] | [((1608, 1646), 'os.path.join', 'os.path.join', (['tmp_dir', '"""tmp_graph.dot"""'], {}), "(tmp_dir, 'tmp_graph.dot')\n", (1620, 1646), False, 'import os\n'), ((1859, 1890), 'subprocess.run', 'subprocess.run', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (1873, 1890), False, 'import subprocess\n'), ((3095, 3111), 'numpy.exp', 'np.exp', (['y'], {'out': 'y'}), '(y, out=y)\n', (3101, 3111), True, 'import numpy as np\n'), ((3156, 3172), 'numpy.log', 'np.log', (['s'], {'out': 's'}), '(s, out=s)\n', (3162, 3172), True, 'import numpy as np\n'), ((3768, 3791), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (3786, 3791), False, 'import os\n'), ((4276, 4310), 'os.path.join', 'os.path.join', (['cache_dir', 'file_name'], {}), '(cache_dir, file_name)\n', (4288, 4310), False, 'import os\n'), ((4386, 4411), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (4400, 4411), False, 'import os\n'), ((1474, 1497), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1492, 1497), False, 'import os\n'), ((1540, 1563), 'os.path.exists', 'os.path.exists', (['tmp_dir'], {}), '(tmp_dir)\n', (1554, 1563), False, 'import os\n'), ((1573, 1590), 'os.mkdir', 'os.mkdir', (['tmp_dir'], {}), '(tmp_dir)\n', (1581, 1590), False, 'import os\n'), ((4323, 4348), 'os.path.exists', 'os.path.exists', (['cache_dir'], {}), '(cache_dir)\n', (4337, 4348), False, 'import os\n'), ((4358, 4377), 'os.mkdir', 'os.mkdir', (['cache_dir'], {}), '(cache_dir)\n', (4366, 4377), False, 'import os\n'), ((1752, 1777), 'os.path.splitext', 'os.path.splitext', (['to_file'], {}), '(to_file)\n', (1768, 1777), False, 'import os\n'), ((4612, 4637), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (4626, 4637), False, 'import os\n'), ((4651, 4671), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (4660, 4671), False, 'import os\n')] |
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
from sklearn.manifold import TSNE
from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_data(path, label_name):
df = pd.read_csv(path)
# pandas axis=1 为列
# data = df.drop("class", axis=1)
data = df
label = df[label_name].to_numpy()
return data, label
def pretreatment(data):
data_copy = data.copy()
imputer = SimpleImputer(missing_values=np.NAN, strategy="mean")
imputer = imputer.fit(data_copy)
result = imputer.transform(data_copy)
return result
def plot_emmbedding(data):
# numpy axis=0 为列
x_min, x_max = np.min(data, 0), np.max(data, 0)
data = (data - x_min) / (x_max - x_min)
return data
def tsne(data, n_components):
if n_components == 2:
# TSNE函数init参数传pca,数据显示出来全部聚在一团;不传,就显示为2个簇
tsne = TSNE(n_components)
tsne_data = tsne.fit_transform(data)
aim_data = plot_emmbedding(tsne_data)
plt.figure()
plt.subplot(111)
plt.scatter(aim_data[:, 0], aim_data[:, 1], c=label)
elif n_components == 3:
tsne = TSNE(n_components)
tsne_data = tsne.fit_transform(data)
aim_data = plot_emmbedding(tsne_data)
fig = plt.figure()
plt.subplot(111)
ax = Axes3D(fig)
ax.view_init(0, 45) # 第一个参数是垂直方向,第二个参数是水平方向
# c使用label可以将不同类以颜色区分开
ax.scatter(aim_data[:, 0], aim_data[:, 1], aim_data[:, 2], c=label)
else:
print("The value of n_components can only be 2 or 3")
plt.show()
if __name__ == "__main__":
# 1-64列为公司财务数据;65列名为class,为1/0代表在预测期破产/未破产的公司
data, label = get_data("./csv_result-1year.csv", "class")
data_tmp = pretreatment(data)
tsne(data_tmp, 3)
tsne(data_tmp, 2)
| [
"pandas.read_csv",
"sklearn.manifold.TSNE",
"numpy.max",
"matplotlib.pyplot.figure",
"sklearn.impute.SimpleImputer",
"matplotlib.pyplot.scatter",
"numpy.min",
"matplotlib.pyplot.subplot",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.show"
] | [((252, 269), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (263, 269), True, 'import pandas as pd\n'), ((474, 527), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': 'np.NAN', 'strategy': '"""mean"""'}), "(missing_values=np.NAN, strategy='mean')\n", (487, 527), False, 'from sklearn.impute import SimpleImputer\n'), ((1594, 1604), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1602, 1604), True, 'import matplotlib.pyplot as plt\n'), ((695, 710), 'numpy.min', 'np.min', (['data', '(0)'], {}), '(data, 0)\n', (701, 710), True, 'import numpy as np\n'), ((712, 727), 'numpy.max', 'np.max', (['data', '(0)'], {}), '(data, 0)\n', (718, 727), True, 'import numpy as np\n'), ((912, 930), 'sklearn.manifold.TSNE', 'TSNE', (['n_components'], {}), '(n_components)\n', (916, 930), False, 'from sklearn.manifold import TSNE\n'), ((1030, 1042), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1040, 1042), True, 'import matplotlib.pyplot as plt\n'), ((1051, 1067), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1062, 1067), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1128), 'matplotlib.pyplot.scatter', 'plt.scatter', (['aim_data[:, 0]', 'aim_data[:, 1]'], {'c': 'label'}), '(aim_data[:, 0], aim_data[:, 1], c=label)\n', (1087, 1128), True, 'import matplotlib.pyplot as plt\n'), ((1172, 1190), 'sklearn.manifold.TSNE', 'TSNE', (['n_components'], {}), '(n_components)\n', (1176, 1190), False, 'from sklearn.manifold import TSNE\n'), ((1296, 1308), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1306, 1308), True, 'import matplotlib.pyplot as plt\n'), ((1317, 1333), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1328, 1333), True, 'import matplotlib.pyplot as plt\n'), ((1347, 1358), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (1353, 1358), False, 'from mpl_toolkits.mplot3d import Axes3D\n')] |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from extensions.ops.elementwise import *
from mo.front.extractor import FrontExtractorOp
from mo.front.common.replacement import FrontReplacementOp
from mo.graph.graph import Graph, Node
from mo.ops.eltwise_n import EltwiseNAdd, EltwiseNMax
from mo.ops.power import AttributedPower
from extensions.ops.activation_ops import *
from mo.ops.const import Const
class AddFrontExtractor(FrontExtractorOp):
op = 'Add'
enabled = True
@classmethod
def extract(cls, node: Node):
Add.update_node_stat(node)
return cls.enabled
class SubFrontExtractor(FrontExtractorOp):
op = 'Sub'
enabled = True
@classmethod
def extract(cls, node: Node):
Sub.update_node_stat(node)
return cls.enabled
class MulFrontExtractor(FrontExtractorOp):
op = 'Mul'
enabled = True
@classmethod
def extract(cls, node: Node):
Mul.update_node_stat(node)
return cls.enabled
class DivFrontExtractor(FrontExtractorOp):
op = 'Div'
enabled = True
@classmethod
def extract(cls, node: Node):
Div.update_node_stat(node)
return cls.enabled
class AbsFrontExtractor(FrontExtractorOp):
op = 'Abs'
enabled = True
@classmethod
def extract(cls, node: Node):
Abs.update_node_stat(node)
return cls.enabled
class PowFrontExtractor(FrontExtractorOp):
op = 'Pow'
enabled = True
@classmethod
def extract(cls, node: Node):
attrs = {
'power': node.module.exponent,
}
AttributedPower.update_node_stat(node, attrs)
return cls.enabled
# log2(x) = ln(x) / ln(2)
class Log2Replacement(FrontReplacementOp):
op = 'Log2'
enabled = True
def replace_op(self, graph: Graph, node: Node):
log = Log(graph, dict(name=node.name + '/log')).create_node([node.in_node(0)])
scale = Const(graph, {'value': np.log(2)}).create_node()
div = Div(graph, dict(name=node.name + '/scale')).create_node([log, scale])
return [div.id]
class LessFrontExtractor(FrontExtractorOp):
op = 'Less'
enabled = True
@classmethod
def extract(cls, node: Node):
Less.update_node_stat(node)
return cls.enabled
class ZerosLike(FrontExtractorOp):
op = 'ZerosLike'
enabled = True
@classmethod
def extract(cls, node):
AttributedPower.update_node_stat(node, {'scale': 0})
return cls.enabled
class SoftPlusOp(FrontExtractorOp):
op = 'SoftPlus'
enabled = True
@classmethod
def extract(cls, node):
SoftPlus.update_node_stat(node)
return cls.enabled
| [
"numpy.log",
"mo.ops.power.AttributedPower.update_node_stat"
] | [((2136, 2181), 'mo.ops.power.AttributedPower.update_node_stat', 'AttributedPower.update_node_stat', (['node', 'attrs'], {}), '(node, attrs)\n', (2168, 2181), False, 'from mo.ops.power import AttributedPower\n'), ((2955, 3007), 'mo.ops.power.AttributedPower.update_node_stat', 'AttributedPower.update_node_stat', (['node', "{'scale': 0}"], {}), "(node, {'scale': 0})\n", (2987, 3007), False, 'from mo.ops.power import AttributedPower\n'), ((2494, 2503), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2500, 2503), True, 'import numpy as np\n')] |
import os
import json
import random
import numpy as np
import tensorflow as tf
import src.core.constants as constants
import src.retina_net.anchor_generator.box_utils as box_utils
import src.retina_net.datasets.dataset_utils as dataset_utils
from src.core.abstract_classes.dataset_handler import DatasetHandler
from src.retina_net.anchor_generator.fpn_anchor_generator import FpnAnchorGenerator
class BddDatasetHandler(DatasetHandler):
def __init__(self, config, train_val_test):
"""
Initializes directories, and loads the sample list
:param config: configuration dictionary
:param train_val_test (string): 'train', 'val', or 'test'
"""
super().__init__(config)
# Define Dicts
self._MEANS_DICT = constants.MEANS_DICT
# Load configs
self.anchor_gen_config = config['anchor_generator']
self.training_data_config = config['bdd']['training_data_config']
# Define paths to dataset
paths_config = config['bdd']['paths_config']
self.dataset_dir = os.path.expanduser(paths_config['dataset_dir'])
data_set_size = paths_config['100k_or_10k']
if train_val_test == 'train':
self.data_split_dir = train_val_test
self.label_file_name = 'train.json'
self.frac_training_data = self.training_data_config['frac_training_data']
else:
self.data_split_dir = 'val'
self.label_file_name = 'val.json'
self.frac_training_data = 1.0
self.im_dir = os.path.join(
self.dataset_dir, 'images', data_set_size, self.data_split_dir)
self.gt_label_dir = os.path.join(
self.dataset_dir, 'labels')
# Make sure dataset directories exist
dataset_utils.check_data_dirs([self.im_dir, self.gt_label_dir])
# Get sample ids
self._load_sample_ids()
self.epoch_size = len(self.sample_ids)
self.labels = json.load(open(os.path.join(
self.gt_label_dir, self.label_file_name), 'r'))
# Create placeholder for dataset
self.dataset = None
# Create flag if train\val or just inference
self.is_testing = (train_val_test == 'test')
def _load_sample_ids(self):
"""
loads sample ids to read dataset
"""
sample_ids = os.listdir(self.im_dir)
# Random shuffle here is much more computationally efficient than randomly shuffling a dataset iterator.
if self.frac_training_data != 1.0 and self.data_split_dir == 'train':
percent_samples = int(len(sample_ids) * self.frac_training_data)
inds = np.random.choice(
len(sample_ids), percent_samples, replace=False)
self.sample_ids = [sample_ids[ind] for ind in inds]
elif self.data_split_dir == 'train':
random.shuffle(sample_ids)
self.sample_ids = sample_ids
else:
self.sample_ids = sample_ids
# Create a list of image paths from sample ids
self.im_paths = [
self.im_dir +
'/' +
sample for sample in self.sample_ids]
def set_sample_id(self, sample_index):
self.im_paths = [self.im_paths[sample_index]]
self.sample_ids = [self.sample_ids[sample_index]]
def create_dataset(self):
"""
Create dataset using tf.dataset API
:return: dataset : dataset object
"""
# Set data path lists
im_paths = self.im_paths
sample_ids = self.sample_ids
# Create dataset using API
dataset = tf.data.Dataset.from_tensor_slices((im_paths, sample_ids))
# Create sample dictionary
self.dataset = dataset.map(
self.create_sample_dict,
num_parallel_calls=10)
return self.dataset
def create_sample_dict(
self,
im_path,
sample_id):
"""
Creates sample dictionary for a single sample
:param im_path: left image path
:param sample_id: ground truth sample id
:return: sample_dict: Sample dictionary filled with input tensors
"""
with tf.name_scope('input_data'):
# Read image
image_as_string = tf.io.read_file(im_path)
image = tf.image.decode_jpeg(image_as_string, channels=3)
image = tf.cast(image, tf.float32)
image_norm = dataset_utils.mean_image_subtraction(
image, self._MEANS_DICT[self.im_normalization])
# Flip channels to BGR since pretrained weights use this
# configuration.
channels = tf.unstack(image_norm, axis=-1)
image_norm = tf.stack(
[channels[2], channels[1], channels[0]], axis=-1)
boxes_class_gt, boxes_2d_gt, no_gt = tf.py_function(
self._read_labels, [sample_id], [
tf.float32, tf.float32, tf.bool])
# Create_sample_dict
sample_dict = dict()
sample_dict.update({constants.IMAGE_NORMALIZED_KEY: image_norm})
sample_dict.update(
{constants.ORIGINAL_IM_SIZE_KEY: tf.shape(image)})
# Create prior anchors and anchor targets
generator = FpnAnchorGenerator(self.anchor_gen_config)
boxes_2d_gt_vuhw = box_utils.vuvu_to_vuhw(boxes_2d_gt)
anchors_list = []
anchors_class_target_list = []
anchors_box_target_list = []
anchors_positive_mask_list = []
anchors_negative_mask_list = []
for layer_number in self.anchor_gen_config['layers']:
anchors = generator.generate_anchors(
tf.shape(image_norm), layer_number)
anchors_list.append(anchors)
if not self.is_testing:
anchor_corners = box_utils.vuhw_to_vuvu(anchors)
ious = box_utils.bbox_iou_vuvu(anchor_corners, boxes_2d_gt)
positive_anchor_mask, negative_anchor_mask, max_ious = generator.positive_negative_batching(
ious, self.anchor_gen_config['min_positive_iou'],
self.anchor_gen_config['max_negative_iou'])
anchors_positive_mask_list.append(positive_anchor_mask)
anchors_negative_mask_list.append(negative_anchor_mask)
anchor_box_targets, anchor_class_targets = generator.generate_anchor_targets(
anchors, boxes_2d_gt_vuhw, boxes_class_gt, max_ious,
positive_anchor_mask)
anchors_box_target_list.append(anchor_box_targets)
anchors_class_target_list.append(anchor_class_targets)
# Sample dict is stacked from p3 --> p7, this is essential to
# memorize for stacking the predictions later on
sample_dict.update(
{constants.ANCHORS_KEY: tf.concat(anchors_list, axis=0)})
if not self.is_testing:
sample_dict.update({constants.ANCHORS_BOX_TARGETS_KEY: tf.concat(
anchors_box_target_list, axis=0),
constants.ANCHORS_CLASS_TARGETS_KEY: tf.concat(
anchors_class_target_list, axis=0),
constants.POSITIVE_ANCHORS_MASK_KEY: tf.concat(
anchors_positive_mask_list, axis=0),
constants.NEGATIVE_ANCHOR_MASK_KEY: tf.concat(
anchors_negative_mask_list, axis=0)})
return sample_dict
def _read_labels(self, sample_id):
"""
Reads ground truth labels and parses them into one hot class representation and groundtruth 2D bounding box.
"""
sample_id = sample_id.numpy()
# Extract the list
no_gt = False
categories = self.training_data_config['categories']
boxes_class_gt = []
sample_id = sample_id.decode("utf-8")
frame_labels = [label for label in self.labels if
label['name'] == sample_id and label[
'category'] in categories]
boxes_2d_gt = np.array([[label['bbox'][1],
label['bbox'][0],
label['bbox'][3],
label['bbox'][2]] for label in frame_labels])
categories_gt = [label['category'] for label in frame_labels]
if boxes_2d_gt.size == 0:
cat_one_hot = [0 for e in range(len(categories) + 1)]
boxes_2d_gt = np.array([0.0, 0.0, 1.0, 1.0])
boxes_class_gt.append(cat_one_hot)
no_gt = True
else:
for elem in categories_gt:
cat_one_hot = [0 for e in range(len(categories) + 1)]
cat_idx = categories.index(elem.lower())
cat_one_hot[cat_idx] = 1
boxes_class_gt.append(cat_one_hot)
# one-hot representation dependent on config file
if len(boxes_2d_gt.shape) == 1:
boxes_2d_gt = np.expand_dims(boxes_2d_gt, axis=0)
return [np.array(boxes_class_gt).astype(np.float32),
np.array(boxes_2d_gt).astype(np.float32),
no_gt]
| [
"tensorflow.unstack",
"tensorflow.shape",
"src.retina_net.anchor_generator.box_utils.vuhw_to_vuvu",
"tensorflow.io.read_file",
"src.retina_net.anchor_generator.box_utils.bbox_iou_vuvu",
"numpy.array",
"tensorflow.cast",
"src.retina_net.anchor_generator.box_utils.vuvu_to_vuhw",
"os.listdir",
"tenso... | [((1064, 1111), 'os.path.expanduser', 'os.path.expanduser', (["paths_config['dataset_dir']"], {}), "(paths_config['dataset_dir'])\n", (1082, 1111), False, 'import os\n'), ((1551, 1627), 'os.path.join', 'os.path.join', (['self.dataset_dir', '"""images"""', 'data_set_size', 'self.data_split_dir'], {}), "(self.dataset_dir, 'images', data_set_size, self.data_split_dir)\n", (1563, 1627), False, 'import os\n'), ((1669, 1709), 'os.path.join', 'os.path.join', (['self.dataset_dir', '"""labels"""'], {}), "(self.dataset_dir, 'labels')\n", (1681, 1709), False, 'import os\n'), ((1778, 1841), 'src.retina_net.datasets.dataset_utils.check_data_dirs', 'dataset_utils.check_data_dirs', (['[self.im_dir, self.gt_label_dir]'], {}), '([self.im_dir, self.gt_label_dir])\n', (1807, 1841), True, 'import src.retina_net.datasets.dataset_utils as dataset_utils\n'), ((2354, 2377), 'os.listdir', 'os.listdir', (['self.im_dir'], {}), '(self.im_dir)\n', (2364, 2377), False, 'import os\n'), ((3621, 3679), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(im_paths, sample_ids)'], {}), '((im_paths, sample_ids))\n', (3655, 3679), True, 'import tensorflow as tf\n'), ((5274, 5316), 'src.retina_net.anchor_generator.fpn_anchor_generator.FpnAnchorGenerator', 'FpnAnchorGenerator', (['self.anchor_gen_config'], {}), '(self.anchor_gen_config)\n', (5292, 5316), False, 'from src.retina_net.anchor_generator.fpn_anchor_generator import FpnAnchorGenerator\n'), ((5344, 5379), 'src.retina_net.anchor_generator.box_utils.vuvu_to_vuhw', 'box_utils.vuvu_to_vuhw', (['boxes_2d_gt'], {}), '(boxes_2d_gt)\n', (5366, 5379), True, 'import src.retina_net.anchor_generator.box_utils as box_utils\n'), ((8052, 8167), 'numpy.array', 'np.array', (["[[label['bbox'][1], label['bbox'][0], label['bbox'][3], label['bbox'][2]] for\n label in frame_labels]"], {}), "([[label['bbox'][1], label['bbox'][0], label['bbox'][3], label[\n 'bbox'][2]] for label in frame_labels])\n", (8060, 8167), True, 'import numpy as np\n'), ((4201, 4228), 'tensorflow.name_scope', 'tf.name_scope', (['"""input_data"""'], {}), "('input_data')\n", (4214, 4228), True, 'import tensorflow as tf\n'), ((4285, 4309), 'tensorflow.io.read_file', 'tf.io.read_file', (['im_path'], {}), '(im_path)\n', (4300, 4309), True, 'import tensorflow as tf\n'), ((4330, 4379), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_as_string'], {'channels': '(3)'}), '(image_as_string, channels=3)\n', (4350, 4379), True, 'import tensorflow as tf\n'), ((4400, 4426), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (4407, 4426), True, 'import tensorflow as tf\n'), ((4453, 4542), 'src.retina_net.datasets.dataset_utils.mean_image_subtraction', 'dataset_utils.mean_image_subtraction', (['image', 'self._MEANS_DICT[self.im_normalization]'], {}), '(image, self._MEANS_DICT[self.\n im_normalization])\n', (4489, 4542), True, 'import src.retina_net.datasets.dataset_utils as dataset_utils\n'), ((4677, 4708), 'tensorflow.unstack', 'tf.unstack', (['image_norm'], {'axis': '(-1)'}), '(image_norm, axis=-1)\n', (4687, 4708), True, 'import tensorflow as tf\n'), ((4734, 4792), 'tensorflow.stack', 'tf.stack', (['[channels[2], channels[1], channels[0]]'], {'axis': '(-1)'}), '([channels[2], channels[1], channels[0]], axis=-1)\n', (4742, 4792), True, 'import tensorflow as tf\n'), ((4860, 4946), 'tensorflow.py_function', 'tf.py_function', (['self._read_labels', '[sample_id]', '[tf.float32, tf.float32, tf.bool]'], {}), '(self._read_labels, [sample_id], [tf.float32, tf.float32, tf.\n bool])\n', (4874, 4946), True, 'import tensorflow as tf\n'), ((8460, 8490), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (8468, 8490), True, 'import numpy as np\n'), ((8962, 8997), 'numpy.expand_dims', 'np.expand_dims', (['boxes_2d_gt'], {'axis': '(0)'}), '(boxes_2d_gt, axis=0)\n', (8976, 8997), True, 'import numpy as np\n'), ((1984, 2037), 'os.path.join', 'os.path.join', (['self.gt_label_dir', 'self.label_file_name'], {}), '(self.gt_label_dir, self.label_file_name)\n', (1996, 2037), False, 'import os\n'), ((2870, 2896), 'random.shuffle', 'random.shuffle', (['sample_ids'], {}), '(sample_ids)\n', (2884, 2896), False, 'import random\n'), ((5185, 5200), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (5193, 5200), True, 'import tensorflow as tf\n'), ((5692, 5712), 'tensorflow.shape', 'tf.shape', (['image_norm'], {}), '(image_norm)\n', (5700, 5712), True, 'import tensorflow as tf\n'), ((5839, 5870), 'src.retina_net.anchor_generator.box_utils.vuhw_to_vuvu', 'box_utils.vuhw_to_vuvu', (['anchors'], {}), '(anchors)\n', (5861, 5870), True, 'import src.retina_net.anchor_generator.box_utils as box_utils\n'), ((5894, 5946), 'src.retina_net.anchor_generator.box_utils.bbox_iou_vuvu', 'box_utils.bbox_iou_vuvu', (['anchor_corners', 'boxes_2d_gt'], {}), '(anchor_corners, boxes_2d_gt)\n', (5917, 5946), True, 'import src.retina_net.anchor_generator.box_utils as box_utils\n'), ((6876, 6907), 'tensorflow.concat', 'tf.concat', (['anchors_list'], {'axis': '(0)'}), '(anchors_list, axis=0)\n', (6885, 6907), True, 'import tensorflow as tf\n'), ((7009, 7051), 'tensorflow.concat', 'tf.concat', (['anchors_box_target_list'], {'axis': '(0)'}), '(anchors_box_target_list, axis=0)\n', (7018, 7051), True, 'import tensorflow as tf\n'), ((7123, 7167), 'tensorflow.concat', 'tf.concat', (['anchors_class_target_list'], {'axis': '(0)'}), '(anchors_class_target_list, axis=0)\n', (7132, 7167), True, 'import tensorflow as tf\n'), ((7239, 7284), 'tensorflow.concat', 'tf.concat', (['anchors_positive_mask_list'], {'axis': '(0)'}), '(anchors_positive_mask_list, axis=0)\n', (7248, 7284), True, 'import tensorflow as tf\n'), ((7355, 7400), 'tensorflow.concat', 'tf.concat', (['anchors_negative_mask_list'], {'axis': '(0)'}), '(anchors_negative_mask_list, axis=0)\n', (7364, 7400), True, 'import tensorflow as tf\n'), ((9014, 9038), 'numpy.array', 'np.array', (['boxes_class_gt'], {}), '(boxes_class_gt)\n', (9022, 9038), True, 'import numpy as np\n'), ((9075, 9096), 'numpy.array', 'np.array', (['boxes_2d_gt'], {}), '(boxes_2d_gt)\n', (9083, 9096), True, 'import numpy as np\n')] |
import argparse
import cv2
import numpy as np
import torch
from torch.autograd import Function
from torchvision import models
import torch.nn as nn
from model import *
import matplotlib.pyplot as plt
class FeatureExtractor():
""" Class for extracting activations and
registering gradients from targetted intermediate layers """
def __init__(self, model, target_layers):
self.model = model
self.target_layers = target_layers
self.gradients = []
def save_gradient(self, grad):
self.gradients.append(grad)
def __call__(self, x):
outputs = []
self.gradients = []
for name, module in self.model._modules.items():
x = module(x)
if name in self.target_layers:
x.register_hook(self.save_gradient)
outputs += [x]
return outputs, x
class ModelOutputs():
""" Class for making a forward pass, and getting:
1. The network output.
2. Activations from intermeddiate targetted layers.
3. Gradients from intermeddiate targetted layers. """
def __init__(self, model, target_layers):
self.model = model
self.feature_extractor = FeatureExtractor(self.model.features, target_layers)
def get_gradients(self):
return self.feature_extractor.gradients
def __call__(self, x):
target_activations, output = self.feature_extractor(x)
output = output.view(output.size(0), -1)
output = self.model.classifier(output)
return target_activations, output
def preprocess_image(img):
means = [0.485, 0.456, 0.406]
stds = [0.229, 0.224, 0.225]
preprocessed_img = img.copy()[:, :, ::-1]
for i in range(3):
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] - means[i]
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] / stds[i]
preprocessed_img = \
np.ascontiguousarray(np.transpose(preprocessed_img, (2, 0, 1)))
preprocessed_img = torch.from_numpy(preprocessed_img)
preprocessed_img.unsqueeze_(0)
input = preprocessed_img.requires_grad_(True)
return input
def show_cam_on_image(img, mask):
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
cam = heatmap + np.float32(img)
cam = cam / np.max(cam)
print("Cam Shape" , cam.shape)
fig = plt.figure()
cam = cv2.cvtColor(cam, cv2.COLOR_RGB2BGR)
plt.axis("off")
plt.imshow(cam)
plt.show()
fig.savefig("People_class_GC_1.png", transparent=True)
class GradCam:
def __init__(self, model, target_layer_names, use_cuda):
self.model = model
self.model.eval()
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
self.extractor = ModelOutputs(self.model, target_layer_names)
def forward(self, input):
return self.model(input)
def __call__(self, input, index=None):
if self.cuda:
features, output = self.extractor(input.cuda())
else:
features, output = self.extractor(input)
if index == None:
index = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
if self.cuda:
one_hot = torch.sum(one_hot.cuda() * output)
else:
one_hot = torch.sum(one_hot * output)
self.model.features.zero_grad()
self.model.classifier.zero_grad()
one_hot.backward(retain_graph=True)
grads_val = self.extractor.get_gradients()[-1].cpu().data.numpy()
target = features[-1]
target = target.cpu().data.numpy()[0, :]
weights = np.mean(grads_val, axis=(2, 3))[0, :]
cam = np.zeros(target.shape[1:], dtype=np.float32)
for i, w in enumerate(weights):
cam += w * target[i, :, :]
cam = np.maximum(cam, 0)
cam = cv2.resize(cam, (224, 224))
cam = cam - np.min(cam)
cam = cam / np.max(cam)
return cam
class GuidedBackpropReLU(Function):
@staticmethod
def forward(self, input):
positive_mask = (input > 0).type_as(input)
output = torch.addcmul(torch.zeros(input.size()).type_as(input), input, positive_mask)
self.save_for_backward(input, output)
return output
@staticmethod
def backward(self, grad_output):
input, output = self.saved_tensors
grad_input = None
positive_mask_1 = (input > 0).type_as(grad_output)
positive_mask_2 = (grad_output > 0).type_as(grad_output)
grad_input = torch.addcmul(torch.zeros(input.size()).type_as(input),
torch.addcmul(torch.zeros(input.size()).type_as(input), grad_output,
positive_mask_1), positive_mask_2)
return grad_input
class GuidedBackpropReLUModel:
def __init__(self, model, use_cuda):
self.model = model
self.model.eval()
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
# replace ReLU with GuidedBackpropReLU
for idx, module in self.model.features._modules.items():
if module.__class__.__name__ == 'ReLU':
self.model.features._modules[idx] = GuidedBackpropReLU.apply
def forward(self, input):
return self.model(input)
def __call__(self, input, index=None):
if self.cuda:
output = self.forward(input.cuda())
else:
output = self.forward(input)
if index == None:
index = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
if self.cuda:
one_hot = torch.sum(one_hot.cuda() * output)
else:
one_hot = torch.sum(one_hot * output)
# self.model.features.zero_grad()
# self.model.classifier.zero_grad()
one_hot.backward(retain_graph=True)
output = input.grad.cpu().data.numpy()
output = output[0, :, :, :]
return output
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--use-cuda', action='store_true', default=False,
help='Use NVIDIA GPU acceleration')
parser.add_argument('--image-path', type=str, default='./examples/both.png',
help='Input image path')
args = parser.parse_args()
args.use_cuda = args.use_cuda and torch.cuda.is_available()
if args.use_cuda:
print("Using GPU for acceleration")
else:
print("Using CPU for computation")
return args
def deprocess_image(img):
""" see https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py#L65 """
img = img - np.mean(img)
img = img / (np.std(img) + 1e-5)
img = img * 0.1
img = img + 0.5
img = np.clip(img, 0, 1)
return np.uint8(img*255)
if __name__ == '__main__':
""" python grad_cam.py <path_to_image>
1. Loads an image with opencv.
2. Preprocesses it for VGG19 and converts to a pytorch variable.
3. Makes a forward pass to find the category index with the highest score,
and computes intermediate activations.
Makes the visualization. """
args = get_args()
my_model = models.vgg16(pretrained=True)
people_model = models.vgg16(pretrained=True)
people_model.classifier[6] = nn.Linear(4096, 20)
people_model.load_state_dict(torch.load('checkpoints/best_cifar.pt'))
my_model = people_model
"""
second_model = CSRNet()
second_model.load_state_dict(torch.load('checkpoints/shaghai_tech_a_best.pth'))
my_model.features = second_model.frontend
MaxPoolLayer = nn.MaxPool2d(kernel_size=2, stride=2,padding=0 , dilation=1, ceil_mode=False)
my_model.features.add_module("pool mod" , MaxPoolLayer)
my_model.features.add_module("pool mod 2", MaxPoolLayer)
"""
#print(second_model)
print(my_model)
grad_cam = GradCam(model=my_model,target_layer_names=["21"], use_cuda=args.use_cuda)
img = cv2.imread(args.image_path, 1)
img = np.float32(cv2.resize(img, (224, 224))) / 255
input = preprocess_image(img)
# If None, returns the map for the highest scoring category.
# Otherwise, targets the requested index.
target_index = 14
mask = grad_cam(input, target_index)
show_cam_on_image(img, mask)
gb_model = GuidedBackpropReLUModel(model=my_model, use_cuda=args.use_cuda)
gb = gb_model(input, index=target_index)
gb = gb.transpose((1, 2, 0))
gb = deprocess_image(gb)
#plt.title('Guided Back Propagation')
fig = plt.figure()
plt.axis("off")
plt.imshow(gb)
plt.show()
fig.savefig("People_class_BR_1.png",transparent=True)
| [
"numpy.clip",
"numpy.uint8",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.sum",
"matplotlib.pyplot.imshow",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.max",
"numpy.min",
"matplotlib.pyplot.axis",
"numpy.maximum",
"cv2.cvtColor",
"numpy.std",
"cv2.resize",
"numpy.transpose... | [((1983, 2017), 'torch.from_numpy', 'torch.from_numpy', (['preprocessed_img'], {}), '(preprocessed_img)\n', (1999, 2017), False, 'import torch\n'), ((2377, 2389), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2387, 2389), True, 'import matplotlib.pyplot as plt\n'), ((2401, 2437), 'cv2.cvtColor', 'cv2.cvtColor', (['cam', 'cv2.COLOR_RGB2BGR'], {}), '(cam, cv2.COLOR_RGB2BGR)\n', (2413, 2437), False, 'import cv2\n'), ((2443, 2458), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2451, 2458), True, 'import matplotlib.pyplot as plt\n'), ((2463, 2478), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cam'], {}), '(cam)\n', (2473, 2478), True, 'import matplotlib.pyplot as plt\n'), ((2483, 2493), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2491, 2493), True, 'import matplotlib.pyplot as plt\n'), ((6331, 6356), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6354, 6356), False, 'import argparse\n'), ((7082, 7100), 'numpy.clip', 'np.clip', (['img', '(0)', '(1)'], {}), '(img, 0, 1)\n', (7089, 7100), True, 'import numpy as np\n'), ((7112, 7131), 'numpy.uint8', 'np.uint8', (['(img * 255)'], {}), '(img * 255)\n', (7120, 7131), True, 'import numpy as np\n'), ((7500, 7529), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (7512, 7529), False, 'from torchvision import models\n'), ((7550, 7579), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (7562, 7579), False, 'from torchvision import models\n'), ((7613, 7632), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(20)'], {}), '(4096, 20)\n', (7622, 7632), True, 'import torch.nn as nn\n'), ((8277, 8307), 'cv2.imread', 'cv2.imread', (['args.image_path', '(1)'], {}), '(args.image_path, 1)\n', (8287, 8307), False, 'import cv2\n'), ((8848, 8860), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8858, 8860), True, 'import matplotlib.pyplot as plt\n'), ((8865, 8880), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8873, 8880), True, 'import matplotlib.pyplot as plt\n'), ((8885, 8899), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gb'], {}), '(gb)\n', (8895, 8899), True, 'import matplotlib.pyplot as plt\n'), ((8904, 8914), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8912, 8914), True, 'import matplotlib.pyplot as plt\n'), ((1917, 1958), 'numpy.transpose', 'np.transpose', (['preprocessed_img', '(2, 0, 1)'], {}), '(preprocessed_img, (2, 0, 1))\n', (1929, 1958), True, 'import numpy as np\n'), ((2188, 2208), 'numpy.uint8', 'np.uint8', (['(255 * mask)'], {}), '(255 * mask)\n', (2196, 2208), True, 'import numpy as np\n'), ((2242, 2261), 'numpy.float32', 'np.float32', (['heatmap'], {}), '(heatmap)\n', (2252, 2261), True, 'import numpy as np\n'), ((2288, 2303), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (2298, 2303), True, 'import numpy as np\n'), ((2320, 2331), 'numpy.max', 'np.max', (['cam'], {}), '(cam)\n', (2326, 2331), True, 'import numpy as np\n'), ((3846, 3890), 'numpy.zeros', 'np.zeros', (['target.shape[1:]'], {'dtype': 'np.float32'}), '(target.shape[1:], dtype=np.float32)\n', (3854, 3890), True, 'import numpy as np\n'), ((3986, 4004), 'numpy.maximum', 'np.maximum', (['cam', '(0)'], {}), '(cam, 0)\n', (3996, 4004), True, 'import numpy as np\n'), ((4019, 4046), 'cv2.resize', 'cv2.resize', (['cam', '(224, 224)'], {}), '(cam, (224, 224))\n', (4029, 4046), False, 'import cv2\n'), ((6690, 6715), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6713, 6715), False, 'import torch\n'), ((6982, 6994), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (6989, 6994), True, 'import numpy as np\n'), ((7666, 7705), 'torch.load', 'torch.load', (['"""checkpoints/best_cifar.pt"""'], {}), "('checkpoints/best_cifar.pt')\n", (7676, 7705), False, 'import torch\n'), ((3465, 3492), 'torch.sum', 'torch.sum', (['(one_hot * output)'], {}), '(one_hot * output)\n', (3474, 3492), False, 'import torch\n'), ((3794, 3825), 'numpy.mean', 'np.mean', (['grads_val'], {'axis': '(2, 3)'}), '(grads_val, axis=(2, 3))\n', (3801, 3825), True, 'import numpy as np\n'), ((4067, 4078), 'numpy.min', 'np.min', (['cam'], {}), '(cam)\n', (4073, 4078), True, 'import numpy as np\n'), ((4099, 4110), 'numpy.max', 'np.max', (['cam'], {}), '(cam)\n', (4105, 4110), True, 'import numpy as np\n'), ((6033, 6060), 'torch.sum', 'torch.sum', (['(one_hot * output)'], {}), '(one_hot * output)\n', (6042, 6060), False, 'import torch\n'), ((7012, 7023), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (7018, 7023), True, 'import numpy as np\n'), ((8329, 8356), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (8339, 8356), False, 'import cv2\n'), ((3303, 3328), 'torch.from_numpy', 'torch.from_numpy', (['one_hot'], {}), '(one_hot)\n', (3319, 3328), False, 'import torch\n'), ((5871, 5896), 'torch.from_numpy', 'torch.from_numpy', (['one_hot'], {}), '(one_hot)\n', (5887, 5896), False, 'import torch\n')] |
# date: 2021.03.29
# author: <NAME> (<EMAIL>)
# Modified by <NAME> (<EMAIL>)
import os
import json
import openml as oml
import cv2
import numpy as np
from PIL import ImageFont, ImageDraw, Image
from markdown import markdown
#####################################################################
'''
*** Function: write a proto file with a given recognized ID in OpenML
*** Input: dataID from OpenML, name and location for the output file
*** Output: filename.proto (default: "model.proto")
*** "license-1.0.0.json"
*** "description.txt"
*** "authors.txt"
'''
#####################################################################
def write_proto(dataID, file_name=f'model.proto', output_folder='',
license_filename='license-1.0.0.json',
description_filename='description.txt',
author_filename='authors.txt',
input_icon='openml-databroker.png',
icon_filename='icon.png'
):
output_file = os.path.join(output_folder, file_name)
try:
dataset = oml.datasets.get_dataset(dataID)
df = dataset.get_data()[0]
except:
print(f'No data with ID {dataID}')
with open(output_file, 'w') as f:
type_ser = df.dtypes
ncols = len(type_ser)
nrows = len(df.values)
# Info about the OpenML file in a commented header section
f.write(f'// This file was generated with the Protobuf generator tool.\n')
f.write(f'// Dataset ID : {dataID}\n')
f.write(f'// Dataset Name : {dataset.name};\n')
f.write(f'// Dataset URL : {dataset.url}\n')
f.write(f'// Num. Columns : {ncols}\n')
f.write(f'// Num. Rows : {nrows}\n')
f.write(f'// Target Feature: {dataset.default_target_attribute}\n\n')
# Write actual protobuf file contents
f.write('syntax = "proto3";\n\n')
# Empty message
f.write(f'message Empty {{\n}}\n\n')
# Features message
f.write('message Features {\n')
types = [str(m) for m in type_ser]
for k, c in enumerate(types):
text = map_type(c)
varname = map_varname(type_ser.index[k])
f.write(f'\t{text:8} {varname:30} = {k+1};\n')
f.write('}\n\n')
# The get_next_row service
f.write('service get_next_row {\n')
f.write('\t rpc get_next_row(Empty) returns(Features);\n')
f.write('}\n\n')
# write licence into file "license-1.0.0.json"
_write_license(dataset, filename=license_filename)
_create_icon(dataset, input_icon=input_icon, icon_filename=icon_filename)
_write_description_authors(dataset, description_file=description_filename,
author_file=author_filename)
print(f'Done writing {output_file} file for OpenML dataset nr. {dataID}')
#####################################################################
'''
*** Function: map the given type, following the protobuf docs
*** ( as in https://developers.google.com/protocol-buffers/docs/proto3 )
*** Input: the vtype as it comes from the OpenML dataset
*** Output: the type to use in a .proto file
'''
#####################################################################
def map_type (vtype):
if vtype == 'category':
text = 'string'
elif vtype == 'float64':
text = 'double'
elif vtype == 'float32':
text = 'float'
elif vtype == 'uint8':
text = 'uint32'
else:
text = vtype
return text
#####################################################################
'''
*** Function: replaces protobuf-problematic characters for
*** variable names with underscores.
*** These changes were required after running on all 83 files
*** of interesting and finding some problematic files like:
*** 470, 1063, 1067, 1068, 40677, 40678, 40710
*** Output: variable name with underscores instead of problem chars
'''
#####################################################################
def map_varname (origvarname):
varname = origvarname
try:
# When a varname is just a number x, use varname V_x
# This happens in OpenML dataset 40682
x = int(varname)
varname = "V_" + str(x)
except:
# Replace chars problematic for protobuf with "_"
varname = origvarname.replace("-", "_")
varname = varname.replace("%", "_")
varname = varname.replace("(", "_")
varname = varname.replace(")", "_")
varname = varname.replace("&", "_")
varname = varname.replace(";", "_")
varname = varname.replace("#", "_")
varname = varname.replace("/", "_")
varname = varname.replace("\\", "_")
varname = varname.replace(".", "_").capitalize()
return varname
#####################################################################
'''
*** Function: provide the IDs of the OpenML datasets of interest
*** Output: array of the IDs of OpenML datasets of interest
'''
#####################################################################
def get_file_Nums_of_interest ():
''''
# Full list of 155 openML file numbers of interest,
# from zip file with associated protobuf files (sent by Martin)
OpenMLFiles = [
3, 6, 8, 9, 10, 13, 15, 16, 22, 24,
28, 29, 31, 32, 36, 37, 38, 40, 41, 42,
43, 44, 50, 54, 59, 60, 61, 62, 149, 150,
151, 179, 182, 187, 307, 310, 311, 313, 329, 333,
334, 335, 350, 357, 374, 375, 377, 451, 458, 466,
469, 470, 481, 566, 956, 1046, 1049, 1050, 1053, 1063,
1067, 1068,
# 1110, # <-- Download fails, possibly way too large?
1113, 1115, 1116, 1119, 1120, 1121, 1169,
1219, 1220, 1413, 1459, 1461, 1462, 1464, 1466, 1467, 1471,
1475, 1476, 1479, 1480, 1486, 1487, 1489, 1491, 1492, 1493,
1494, 1497, 1500, 1504, 1510, 1547, 1548, 1549, 1552, 1553,
1554, 1555, 1590, 1596, 1597, 4135, 4534, 4538, 6332, 23380,
23512, 40536, 40646, 40647, 40648, 40649, 40650, 40663, 40664, 40665,
40666, 40668, 40669, 40670, 40671, 40672, 40677, 40678, 40680, 40681,
40682, 40685, 40686, 40687, 40690, 40691, 40693, 40700, 40701, 40702,
#40704, # <-- File seems problematic, has 2 class columns
40705, 40706, 40707, 40708, 40710, 40711, 40713, 40900, 40945,
40966, 40975, 40983, 40984, 41496
]
'''
'''
Examples of very large OpenML datasets (> 10K rows)
149 # 1.45M rows
150 # 581K rows
1113 # 494K rows
1169 # 539K rows
1219 # 399K rows
1596 # 581K rows (same #rows & columns as in #150, maybe related)
1597 # 284K rows
40672 # 100K rows
Examples of "wide" OpenML files (many columns)
313 # 102 cols
1116 # 168 cols
1479 # 101 cols
1548 # 101 cols
40536 # 121 cols
40665 # 169 cols
40666 # 169 cols
40670 # 181 cols
The "narrowest" files (< 10 columns) are:
2 columns: 41496
3 Columns: 374
4 Columns: 43, 40704
5 Columns: 61, 329, 469, 1413, 1462, 1464
6 columns: 8, 451, 1489, 40682, 40983
7 columns: 333, 334, 335, 1115, 40669, 40681, 40975
8 columns: 37, 481, 1500, 40671, 40678, 40700, 40711
Files with columns of type object:
38, 374, 40945, 41496
'''
# The following 82 files from the 150 files of interest are
# neither very large (<= 10K rows),
# nor very wide (<=35 cols),
# nor are they problematic to download,
# and also they have no column of type object
OpenMLFileNums = [
8, 9, 10, 13, 15, 24, 29, 31, 36, 37,
41, 43, 50, 54, 59, 61, 62, 187, 307, 329,
333, 334, 335, 375, 451, 466, 469, 470, 481, 566,
1063, 1067, 1068, 1115, 1121, 1413, 1462, 1464, 1467, 1480,
1489, 1497, 1500, 1504, 1510, 1547, 1552, 1553, 1554, 4538,
23380, 40646, 40647, 40648, 40649, 40650, 40663, 40664, 40669, 40671,
40677, 40678, 40680, 40681, 40682, 40686, 40687, 40690, 40691, 40693,
40700, 40701, 40702, 40706, 40707, 40708, 40710, 40711, 40713, 40975,
40983, 40984
]
return OpenMLFileNums
def _write_license(dataset, filename="license-1.0.0.json"):
license_txt = {
"$schema": "https://raw.githubusercontent.com/acumos/license-manager/master/license-manager-client-library/src/main/resources/schema/1.0.0/license-profile.json",
"keyword": "Apache-2.0",
"licenseName": "Apache License 2.0",
"intro": dataset.citation,
"copyright": {
"year": int(dataset.upload_date[:4]) if dataset.upload_date else 2021,
"company": "OpenML",
"suffix": "All Rights Reserved"
},
"softwareType": "Public",
"companyName": "OpenML",
"contact": {
"name": dataset.creator,
"URL": dataset.original_data_url,
"email": "https://openml.org/"
},
"rtuRequired": 'false'
}
try:
with open(filename, 'w') as f:
json.dump(license_txt, f, indent = 4)
except:
print('Could not write the licence file. Please check the path!')
def _create_icon(dataset, input_icon, icon_filename):
text = dataset.name
h1 = int((290 - len(text)*12)/2)
bottomLeftCornerOfText = (h1, 0)
font = ImageFont.truetype("arial.ttf", 25)
img_pil = Image.fromarray(cv2.imread(input_icon))
draw = ImageDraw.Draw(img_pil)
draw.text(bottomLeftCornerOfText, text, font=font, fill=(0, 0, 0))
img = np.array(img_pil)
cv2.imwrite(icon_filename, img)
def _write_description_authors(dataset, description_file, author_file):
try:
description = markdown(dataset.description)
except: description = ''
description = 'https://openml.org <br>' + description
description = description.replace('\n', '<br>')
with open (description_file, 'w') as f:
f.write(description)
with open(author_file, 'w') as f:
f.write(f'{dataset.creator}')
| [
"cv2.imwrite",
"markdown.markdown",
"os.path.join",
"PIL.ImageFont.truetype",
"numpy.array",
"PIL.ImageDraw.Draw",
"openml.datasets.get_dataset",
"cv2.imread",
"json.dump"
] | [((1011, 1049), 'os.path.join', 'os.path.join', (['output_folder', 'file_name'], {}), '(output_folder, file_name)\n', (1023, 1049), False, 'import os\n'), ((9489, 9524), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arial.ttf"""', '(25)'], {}), "('arial.ttf', 25)\n", (9507, 9524), False, 'from PIL import ImageFont, ImageDraw, Image\n'), ((9591, 9614), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img_pil'], {}), '(img_pil)\n', (9605, 9614), False, 'from PIL import ImageFont, ImageDraw, Image\n'), ((9696, 9713), 'numpy.array', 'np.array', (['img_pil'], {}), '(img_pil)\n', (9704, 9713), True, 'import numpy as np\n'), ((9718, 9749), 'cv2.imwrite', 'cv2.imwrite', (['icon_filename', 'img'], {}), '(icon_filename, img)\n', (9729, 9749), False, 'import cv2\n'), ((1077, 1109), 'openml.datasets.get_dataset', 'oml.datasets.get_dataset', (['dataID'], {}), '(dataID)\n', (1101, 1109), True, 'import openml as oml\n'), ((9556, 9578), 'cv2.imread', 'cv2.imread', (['input_icon'], {}), '(input_icon)\n', (9566, 9578), False, 'import cv2\n'), ((9855, 9884), 'markdown.markdown', 'markdown', (['dataset.description'], {}), '(dataset.description)\n', (9863, 9884), False, 'from markdown import markdown\n'), ((9201, 9236), 'json.dump', 'json.dump', (['license_txt', 'f'], {'indent': '(4)'}), '(license_txt, f, indent=4)\n', (9210, 9236), False, 'import json\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 2 13:43:57 2019
@author: <NAME>
"""
import torch
from torch import optim, nn
import torch.nn.functional as F
from torchvision import datasets, transforms
import helper
import time
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data.sampler import SubsetRandomSampler
transform = transforms.Compose([transforms.ToTensor()])
trainset = datasets.FashionMNIST('Fashion_MNIST', train=True, download=True, transform=transform)
valid_size = 0.2
num_train = len(trainset)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(valid_size*num_train)
train_idx, valid_idx = indices[split: ], indices[ :split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=64, sampler=train_sampler)
valid_loader = torch.utils.data.DataLoader(trainset, batch_size=64, sampler=valid_sampler)
testset = datasets.FashionMNIST('Fashion_MNIST', train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
class Network(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 356)
self.batchnorm1 = nn.BatchNorm1d(356)
self.fc2 = nn.Linear(356, 124)
self.batchnorm2 = nn.BatchNorm1d(124)
self.fc3 = nn.Linear(124, 64)
self.batchnorm3 = nn.BatchNorm1d(64)
self.fc4 = nn.Linear(64, 10)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
x = x.view(-1, 784)
x = self.dropout(F.relu(self.batchnorm1(self.fc1(x))))
x = self.dropout(F.relu(self.batchnorm2(self.fc2(x))))
x = self.dropout(F.relu(self.batchnorm3(self.fc3(x))))
x = F.log_softmax(self.fc4(x), dim=1)
return x
model = Network()
model.cuda()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)
start = time.time()
epochs = 30
train_losses=[]
valid_losses=[]
min_validation_loss = np.Inf
for e in range(epochs):
running_loss = 0
model.train()
for images, labels in train_loader:
images = images.cuda()
labels = labels.cuda()
output = model(images)
loss = criterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
valid_loss = 0
validation_accuracy = 0
model.eval()
with torch.no_grad():
for images, labels in valid_loader:
images = images.cuda()
labels = labels.cuda()
output = model(images)
valid_loss += criterion(output, labels)
ps = torch.exp(output)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class==labels.view(*top_class.shape)
validation_accuracy += torch.mean(equals.type(torch.FloatTensor))
valid_loss /= len(valid_loader)
running_loss = running_loss/len(train_loader)
valid_losses.append(valid_loss)
train_losses.append(running_loss)
print("Epoch: {}/{} ".format(e+1, epochs),
"Training Loss: {:.3f} ".format(running_loss),
"Validation Loss: {:.3f} ".format(valid_loss),
"Validation Accuracy: {:.3f}".format(validation_accuracy/len(valid_loader)))
if valid_loss < min_validation_loss:
print('Validation loss decreased {:.4f}--->{:.4f} saving model'.format(min_validation_loss, valid_loss))
min_validation_loss = valid_loss
torch.save(model.state_dict(), 'FasionMNIST.pt')
print()
print("Total time to train {}".format(time.time()-start))
# model.cpu()
# images, labels = next(iter(test_loader))
# output = model(images[0])
# helper.view_classify(images[0], torch.exp(output), version='Fashion')
plt.plot(train_losses, label='training loss')
plt.plot(valid_losses, label='validation loss')
plt.legend(frameon=False)
model.load_state_dict(torch.load('FasionMNIST.pt'))
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
with torch.no_grad():
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
data=data.cuda()
target=target.cuda()
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total))) | [
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.nn.Dropout",
"torchvision.datasets.FashionMNIST",
"torch.load",
"matplotlib.pyplot.plot",
"torch.max",
"torch.exp",
"torch.nn.BatchNorm1d",
"numpy.sum",
"torch.nn.NLLLoss",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.no_grad... | [((427, 518), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', (['"""Fashion_MNIST"""'], {'train': '(True)', 'download': '(True)', 'transform': 'transform'}), "('Fashion_MNIST', train=True, download=True, transform\n =transform)\n", (448, 518), False, 'from torchvision import datasets, transforms\n'), ((596, 622), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (613, 622), True, 'import numpy as np\n'), ((736, 766), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_idx'], {}), '(train_idx)\n', (755, 766), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((784, 814), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['valid_idx'], {}), '(valid_idx)\n', (803, 814), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((833, 908), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': '(64)', 'sampler': 'train_sampler'}), '(trainset, batch_size=64, sampler=train_sampler)\n', (860, 908), False, 'import torch\n'), ((925, 1000), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': '(64)', 'sampler': 'valid_sampler'}), '(trainset, batch_size=64, sampler=valid_sampler)\n', (952, 1000), False, 'import torch\n'), ((1014, 1105), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', (['"""Fashion_MNIST"""'], {'train': '(False)', 'download': '(True)', 'transform': 'transform'}), "('Fashion_MNIST', train=False, download=True,\n transform=transform)\n", (1035, 1105), False, 'from torchvision import datasets, transforms\n'), ((1117, 1182), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(64)', 'shuffle': '(True)'}), '(testset, batch_size=64, shuffle=True)\n', (1144, 1182), False, 'import torch\n'), ((1971, 1983), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (1981, 1983), False, 'from torch import optim, nn\n'), ((2049, 2060), 'time.time', 'time.time', ([], {}), '()\n', (2058, 2060), False, 'import time\n'), ((4056, 4101), 'matplotlib.pyplot.plot', 'plt.plot', (['train_losses'], {'label': '"""training loss"""'}), "(train_losses, label='training loss')\n", (4064, 4101), True, 'import matplotlib.pyplot as plt\n'), ((4103, 4150), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_losses'], {'label': '"""validation loss"""'}), "(valid_losses, label='validation loss')\n", (4111, 4150), True, 'import matplotlib.pyplot as plt\n'), ((4152, 4177), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)'}), '(frameon=False)\n', (4162, 4177), True, 'import matplotlib.pyplot as plt\n'), ((4203, 4231), 'torch.load', 'torch.load', (['"""FasionMNIST.pt"""'], {}), "('FasionMNIST.pt')\n", (4213, 4231), False, 'import torch\n'), ((4390, 4405), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4403, 4405), False, 'import torch\n'), ((388, 409), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (407, 409), False, 'from torchvision import datasets, transforms\n'), ((1287, 1306), 'torch.nn.Linear', 'nn.Linear', (['(784)', '(356)'], {}), '(784, 356)\n', (1296, 1306), False, 'from torch import optim, nn\n'), ((1334, 1353), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(356)'], {}), '(356)\n', (1348, 1353), False, 'from torch import optim, nn\n'), ((1374, 1393), 'torch.nn.Linear', 'nn.Linear', (['(356)', '(124)'], {}), '(356, 124)\n', (1383, 1393), False, 'from torch import optim, nn\n'), ((1421, 1440), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(124)'], {}), '(124)\n', (1435, 1440), False, 'from torch import optim, nn\n'), ((1461, 1479), 'torch.nn.Linear', 'nn.Linear', (['(124)', '(64)'], {}), '(124, 64)\n', (1470, 1479), False, 'from torch import optim, nn\n'), ((1507, 1525), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (1521, 1525), False, 'from torch import optim, nn\n'), ((1546, 1563), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(10)'], {}), '(64, 10)\n', (1555, 1563), False, 'from torch import optim, nn\n'), ((1588, 1603), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (1598, 1603), False, 'from torch import optim, nn\n'), ((2621, 2636), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2634, 2636), False, 'import torch\n'), ((4826, 4846), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (4835, 4846), False, 'import torch\n'), ((2890, 2907), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (2899, 2907), False, 'import torch\n'), ((3867, 3878), 'time.time', 'time.time', ([], {}), '()\n', (3876, 3878), False, 'import time\n'), ((5757, 5778), 'numpy.sum', 'np.sum', (['class_correct'], {}), '(class_correct)\n', (5763, 5778), True, 'import numpy as np\n'), ((5780, 5799), 'numpy.sum', 'np.sum', (['class_total'], {}), '(class_total)\n', (5786, 5799), True, 'import numpy as np\n'), ((5731, 5750), 'numpy.sum', 'np.sum', (['class_total'], {}), '(class_total)\n', (5737, 5750), True, 'import numpy as np\n'), ((5493, 5517), 'numpy.sum', 'np.sum', (['class_correct[i]'], {}), '(class_correct[i])\n', (5499, 5517), True, 'import numpy as np\n'), ((5519, 5541), 'numpy.sum', 'np.sum', (['class_total[i]'], {}), '(class_total[i])\n', (5525, 5541), True, 'import numpy as np\n'), ((5707, 5728), 'numpy.sum', 'np.sum', (['class_correct'], {}), '(class_correct)\n', (5713, 5728), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
class VAE(object):
def __init__(self, network_architecture, batch_size, learn_rate, transfer_func=tf.nn.relu, train_multiple=False):
self.__net_arch = network_architecture
self.__lr = learn_rate
self.__bs = batch_size
self.__tran_func = transfer_func
# Flag for whether cost function associated with training multiple VAE models
self.__train_multiple = train_multiple
# Graph input
self.__x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS])
# Boolean tensor to signify whether input should be discriminated against
self.__discr = tf.placeholder(tf.bool, [None])
# Create VAE
self.__create_autoencoder()
# Compute loss terms for VAE
self.__create_vae_loss()
# Define loss function for the VAE
self.__create_loss_optimiser()
def __create_autoencoder(self):
# Initialize autoencode network weights and biases
network_weights = self.__init_weights(**self.__net_arch)
# Use recognition network to determine mean and (log) variance of Gaussian distribution in latent space
self.__z_mean, self.__z_log_sigma_sq = self.__encoder(network_weights['W_q'], network_weights['B_q'])
# Randomly draw one sample z from the latent normal distribution - assumed to generate the data
n_z = self.__net_arch['n_z']
# Epsilon is a random normal tensor - represents noise
eps = tf.random_normal((self.__bs, n_z), mean=0., stddev=1.0, dtype=tf.float32)
# z = mu + sigma*epsilon
self.__z = tf.add(self.__z_mean, tf.multiply(tf.sqrt(tf.exp(self.__z_log_sigma_sq)), eps))
# Use generator to determine mean of Bernoulli distribution of reconstructed input
self.__x_reconstr_logits, self.__x_reconstr_mean = self.__decoder(network_weights['W_p'], network_weights['B_p'])
def __init_weights(self, n_input, n_hidden_1, n_hidden_2, n_z):
all_weights = dict()
w_init = tf.contrib.layers.xavier_initializer(uniform=False)
b_init = tf.constant_initializer(0.)
all_weights['W_q'] = {
'h1': tf.Variable(w_init(shape=[n_input, n_hidden_1])),
'h2': tf.Variable(w_init(shape=[n_hidden_1, n_hidden_2])),
'out_mean': tf.Variable(w_init(shape=[n_hidden_2, n_z])),
'out_log_sigma': tf.Variable(w_init(shape=[n_hidden_2, n_z]))}
all_weights['B_q'] = {
'b1': tf.Variable(b_init(shape=[n_hidden_1])),
'b2': tf.Variable(b_init(shape=[n_hidden_2])),
'out_mean': tf.Variable(b_init(shape=[n_z])),
'out_log_sigma': tf.Variable(b_init(shape=[n_z]))}
all_weights['W_p'] = {
'h1': tf.Variable(w_init(shape=[n_z, n_hidden_2])),
'h2': tf.Variable(w_init(shape=[n_hidden_2, n_hidden_1])),
'out_mean': tf.Variable(w_init(shape=[n_hidden_1, n_input])),
'out_log_sigma': tf.Variable(w_init(shape=[n_hidden_1, n_input]))}
all_weights['B_p'] = {
'b1': tf.Variable(b_init(shape=[n_hidden_2])),
'b2': tf.Variable(b_init(shape=[n_hidden_1])),
'out_mean': tf.Variable(b_init(shape=[n_input])),
'out_log_sigma': tf.Variable(b_init(shape=[n_input]))}
return all_weights
# Generate probabilistic encoder (recognition network), which maps inputs onto a normal distribution in latent space
# Encoder network turns the input samples x into two parameters in a latent space: z_mean & z_log_sigma_sq
""" Q(z|X) """
def __encoder(self, weights, biases):
# The transformation is parametrized and can be learned
h_layer_1 = self.__tran_func(tf.add(tf.matmul(self.__x, weights['h1']), biases['b1']))
h_layer_2 = self.__tran_func(tf.add(tf.matmul(h_layer_1, weights['h2']), biases['b2']))
z_mean = tf.add(tf.matmul(h_layer_2, weights['out_mean']), biases['out_mean'])
z_log_sigma_sq = tf.add(tf.matmul(h_layer_2, weights['out_log_sigma']), biases['out_log_sigma'])
return (z_mean, z_log_sigma_sq)
# Generate probabilistic decoder (generator network), which maps points in latent space onto a Bernoulli distribution in data space
# Decoder network maps the latent space points back to the original input data
""" P(X|z) """
def __decoder(self, weights, biases, reuse=False):
# The transformation is parametrized and can be learned
h_layer_1 = self.__tran_func(tf.add(tf.matmul(self.__z, weights['h1']), biases['b1']))
h_layer_2 = self.__tran_func(tf.add(tf.matmul(h_layer_1, weights['h2']), biases['b2']))
x_reconstr_logits = tf.add(tf.matmul(h_layer_2, weights['out_mean']), biases['out_mean'])
x_reconstr_mean = tf.nn.sigmoid(x_reconstr_logits)
return x_reconstr_logits, x_reconstr_mean
# Define VAE Loss as sum of reconstruction term and KL Divergence regularisation term
def __create_vae_loss(self):
''' 1.) The reconstruction loss (the negative log probability of the input under the reconstructed Bernoulli distribution
induced by the decoder in the data space). This can be interpreted as the number of "nats" required
to reconstruct the input when the activation in latent space is given. Adding 1e-10 to avoid evaluation of log(0.0).
'''
# Prone to numerical instability
# reconstr_loss = self.__x * tf.log(1e-10 + self.__x_reconstr_mean) + (1 - self.__x) * tf.log(1e-10 + 1 - self.__x_reconstr_mean)
# reconstr_loss = -tf.reduce_sum(reconstr_loss, 1)
reconstr_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.__x_reconstr_logits, labels=self.__x)
reconstr_loss = tf.reduce_sum(reconstr_loss, axis=1)
self.__m_reconstr_loss = tf.reduce_mean(reconstr_loss)
''' 2.) The latent loss, which is defined as the KL divergence between the distribution in latent space induced
by the encoder on the data and some prior. Acts as a regulariser and can be interpreted as the number of "nats"
required for transmitting the latent space distribution given the prior.
Fitting the variational objective is equivalent to optimising a lower bound on the log marginal likelihood,
given that we know KL-divergence is non-negative --> Termed "ELBO" or "Evidence Lower Bound"
'''
latent_loss = 1 + self.__z_log_sigma_sq - tf.square(self.__z_mean) - tf.exp(self.__z_log_sigma_sq)
latent_loss = -0.5 * tf.reduce_sum(latent_loss, axis=1)
self.__m_latent_loss = tf.reduce_mean(latent_loss)
if self.__train_multiple:
self.__cost = tf.where(self.__discr, latent_loss, tf.reciprocal(latent_loss))
else:
self.__cost = tf.add(reconstr_loss, latent_loss)
# Average over batch
self.__batch_cost = tf.reduce_mean(self.__cost)
def __create_loss_optimiser(self):
self.__train_op = tf.train.AdamOptimizer(learning_rate=self.__lr).minimize(self.__batch_cost)
# Extract trainable variables and gradients
# grads, tvars = zip(*optimiser.compute_gradients(self.__batch_cost))
# Use gradient clipping to avoid 'exploding' gradients
# grads, _ = tf.clip_by_global_norm(grads, 1.)
# self.__train_op = optimiser.apply_gradients(zip(grads, tvars))
# Train model on mini-batch of input data & return the cost
def partial_fit(self, sess, X, discr=None):
if discr is None:
discr = [True] * self.__bs
opt, cost, recon_loss, latent_loss = sess.run((self.__train_op, self.__batch_cost, self.__m_reconstr_loss, self.__m_latent_loss),
feed_dict={self.__x: X, self.__discr: discr})
return cost, recon_loss, latent_loss
# Transform data by mapping it into the latent space
# Note: This maps to mean of distribution, alternatively could sample from Gaussian distribution
def transform(self, sess, X):
return sess.run(self.__z_mean, feed_dict={self.__x: X})
# Generate data by sampling from latent space
# If z_mu is not None, data for this point in latent space is generated
# Otherwise, z_mu is drawn from prior in latent space
def generate(self, sess, z_mu=None):
if z_mu is None:
z_mu = np.random.normal(size=self.__net_arch['n_z'])
return sess.run(self.__x_reconstr_mean, feed_dict={self.__z: z_mu})
# Use VAE to reconstruct given data
def reconstruct(self, sess, X):
return sess.run(self.__x_reconstr_mean, feed_dict={self.__x: X})
| [
"numpy.random.normal",
"tensorflow.train.AdamOptimizer",
"tensorflow.random_normal",
"tensorflow.reduce_sum",
"tensorflow.placeholder",
"tensorflow.add",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.nn.sigmoid",
"tensorflow.constant_initializer",
"tensorflow.matmul",
"tensorflow.s... | [((656, 704), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, IMAGE_PIXELS]'], {}), '(tf.float32, [None, IMAGE_PIXELS])\n', (670, 704), True, 'import tensorflow as tf\n'), ((807, 838), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool', '[None]'], {}), '(tf.bool, [None])\n', (821, 838), True, 'import tensorflow as tf\n'), ((1602, 1676), 'tensorflow.random_normal', 'tf.random_normal', (['(self.__bs, n_z)'], {'mean': '(0.0)', 'stddev': '(1.0)', 'dtype': 'tf.float32'}), '((self.__bs, n_z), mean=0.0, stddev=1.0, dtype=tf.float32)\n', (1618, 1676), True, 'import tensorflow as tf\n'), ((2123, 2174), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'uniform': '(False)'}), '(uniform=False)\n', (2159, 2174), True, 'import tensorflow as tf\n'), ((2188, 2216), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (2211, 2216), True, 'import tensorflow as tf\n'), ((4819, 4851), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['x_reconstr_logits'], {}), '(x_reconstr_logits)\n', (4832, 4851), True, 'import tensorflow as tf\n'), ((5645, 5738), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'logits': 'self.__x_reconstr_logits', 'labels': 'self.__x'}), '(logits=self.__x_reconstr_logits,\n labels=self.__x)\n', (5684, 5738), True, 'import tensorflow as tf\n'), ((5755, 5791), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['reconstr_loss'], {'axis': '(1)'}), '(reconstr_loss, axis=1)\n', (5768, 5791), True, 'import tensorflow as tf\n'), ((5821, 5850), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['reconstr_loss'], {}), '(reconstr_loss)\n', (5835, 5850), True, 'import tensorflow as tf\n'), ((6632, 6659), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['latent_loss'], {}), '(latent_loss)\n', (6646, 6659), True, 'import tensorflow as tf\n'), ((6906, 6933), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.__cost'], {}), '(self.__cost)\n', (6920, 6933), True, 'import tensorflow as tf\n'), ((3971, 4012), 'tensorflow.matmul', 'tf.matmul', (['h_layer_2', "weights['out_mean']"], {}), "(h_layer_2, weights['out_mean'])\n", (3980, 4012), True, 'import tensorflow as tf\n'), ((4062, 4108), 'tensorflow.matmul', 'tf.matmul', (['h_layer_2', "weights['out_log_sigma']"], {}), "(h_layer_2, weights['out_log_sigma'])\n", (4071, 4108), True, 'import tensorflow as tf\n'), ((4734, 4775), 'tensorflow.matmul', 'tf.matmul', (['h_layer_2', "weights['out_mean']"], {}), "(h_layer_2, weights['out_mean'])\n", (4743, 4775), True, 'import tensorflow as tf\n'), ((6515, 6544), 'tensorflow.exp', 'tf.exp', (['self.__z_log_sigma_sq'], {}), '(self.__z_log_sigma_sq)\n', (6521, 6544), True, 'import tensorflow as tf\n'), ((6570, 6604), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['latent_loss'], {'axis': '(1)'}), '(latent_loss, axis=1)\n', (6583, 6604), True, 'import tensorflow as tf\n'), ((6817, 6851), 'tensorflow.add', 'tf.add', (['reconstr_loss', 'latent_loss'], {}), '(reconstr_loss, latent_loss)\n', (6823, 6851), True, 'import tensorflow as tf\n'), ((8339, 8384), 'numpy.random.normal', 'np.random.normal', ([], {'size': "self.__net_arch['n_z']"}), "(size=self.__net_arch['n_z'])\n", (8355, 8384), True, 'import numpy as np\n'), ((3803, 3837), 'tensorflow.matmul', 'tf.matmul', (['self.__x', "weights['h1']"], {}), "(self.__x, weights['h1'])\n", (3812, 3837), True, 'import tensorflow as tf\n'), ((3894, 3929), 'tensorflow.matmul', 'tf.matmul', (['h_layer_1', "weights['h2']"], {}), "(h_layer_1, weights['h2'])\n", (3903, 3929), True, 'import tensorflow as tf\n'), ((4558, 4592), 'tensorflow.matmul', 'tf.matmul', (['self.__z', "weights['h1']"], {}), "(self.__z, weights['h1'])\n", (4567, 4592), True, 'import tensorflow as tf\n'), ((4649, 4684), 'tensorflow.matmul', 'tf.matmul', (['h_layer_1', "weights['h2']"], {}), "(h_layer_1, weights['h2'])\n", (4658, 4684), True, 'import tensorflow as tf\n'), ((6488, 6512), 'tensorflow.square', 'tf.square', (['self.__z_mean'], {}), '(self.__z_mean)\n', (6497, 6512), True, 'import tensorflow as tf\n'), ((6757, 6783), 'tensorflow.reciprocal', 'tf.reciprocal', (['latent_loss'], {}), '(latent_loss)\n', (6770, 6783), True, 'import tensorflow as tf\n'), ((6998, 7045), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.__lr'}), '(learning_rate=self.__lr)\n', (7020, 7045), True, 'import tensorflow as tf\n'), ((1762, 1791), 'tensorflow.exp', 'tf.exp', (['self.__z_log_sigma_sq'], {}), '(self.__z_log_sigma_sq)\n', (1768, 1791), True, 'import tensorflow as tf\n')] |
# Copyright 2019 <NAME>
#
# This file is part of RfPy.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Functions to calculate thickness (H) and Vp/Vs ratio (R) of the crust based on
moveout times of direct Ps and reverberated Pps and Pss phases.
The stacks are obtained from the median weighted by the phase of individual signals.
"""
import numpy as np
from obspy.core import Stream, Trace, AttribDict
from scipy.signal import hilbert
from scipy import stats
import sys
from matplotlib import pyplot as plt
class HkStack(object):
"""
A HkStack object contains attributes and methods to stack radial
receiver functions along moveout curves for point measurements
of the depth to Moho (H) and P-to-S velocity ratio (k) beneath
a seismic stations. The object is initialized with at least one
:class:`~obspy.core.Stream` containing observed (or synthetised)
radial receiver functions. The methods available can produce linear
weighted stacks or product stacks, and can be used in the presence
of flat or dipping Moho (with known strike and dip).
Note
----
The object is initialized with the ``rfV1`` field only, and
other attributes are added to the object as the analysis proceeds.
A second ``rfV2`` can be included, which is typically a copy of ``rfV1``
filtered at different corner frequencies and is used to stack along the
Pps and Pss moveout curves.
Parameters
----------
rfV1 : :class:`~obspy.core.Stream`
Stream object containing the radial-component receiver function
seismograms
rfV2 : :class:`~obspy.core.Stream`
Stream object containing the radial-component receiver function
seismograms (typically filtered at lower frequencies)
strike : float
Strike angle of dipping Moho (has to be known or estimated a priori)
dip : float
Dip angle of Moho (has to be known or estimated a priori)
vp : float
Mean P-wave velocity of the crust (km/s)
Other Parameters
----------------
kbound : list
List of 2 floats that determine the range of Vp/Vs values to search
dk : float
Spacing between adjacent Vp/Vs search values
hbound : list
List of 2 floats that determine the range of Moho depth values to search
dh : float
Spacing between adjacent Moho depth search values
weights : list
List of 3 floats that determine the relative weights of the individual
phase stacks to be used in the final stack. The third weight is negative
since Pss amplitudes are opposite to those of the Ps and Pps phases.
phases : list
List of 3 strings ('ps', 'pps', 'pss') corresponding to the thre phases
of interest (`do not modify this attribute`)
"""
def __init__(self, rfV1, rfV2=None, strike=None, dip=None, vp=6.0):
# Load example data if initializing empty object
if rfV1 == 'demo' or rfV1 == 'Demo':
print("Uploading demo data - station NY.MMPY")
import os
import pickle
file = open(os.path.join(
os.path.dirname(__file__),
"examples/data", "demo_streams.pkl"), 'rb')
rfV1 = pickle.load(file)
file.close()
# fftshift if the time axis starts at negative lags
if rfV1[0].stats.taxis[0] < 0.:
nn = rfV1[0].stats.npts
for tr in rfV1:
tr.data = np.fft.fftshift(tr.data)[0:int(nn/2)]
tr.stats.taxis = np.fft.fftshift(tr.data)[0:int(nn/2)]
try:
for tr in rfV2:
tr.data = np.fft.fftshift(tr.data)[0:int(nn/2)]
tr.stats.taxis = np.fft.fftshift(tr.data)[0:int(nn/2)]
except:
pass
self.rfV1 = rfV1
self.rfV2 = rfV2
self.strike = strike
self.dip = dip
self.vp = vp
self.kbound = [1.56, 2.1]
self.dk = 0.02
self.hbound = [20., 50.]
self.dh = 0.5
self.weights = [0.5, 2., -1.]
self.phases = ['ps', 'pps', 'pss']
def stack(self, vp=None):
"""
Method to calculate Hk stacks from radial receiver functions.
The stacks are calculated using phase-weighted stacking for
individual phases and take the median of the weighted stack
to avoid bias by outliers.
Note
----
If two streams are available as attributes, the method will assume
that the second stream should be used for stacking along the Pps
and Pss move out curves (e.g., if the second stream contains
lower frequency signals). Furthermore, If the ``vp`` argument is
not specified, the method will use the
value set during initialization (``vp=6.0`` km/s)
Parameters
----------
vp : float
Mean crust P-wave velocity (km/s).
Attributes
----------
pws : :class:`~numpy.ndarray`
Array of phase stacks, where the outer dimension corresponds
to the phase index (shape ``nH, nk, nph``)
sig : :class:`~numpy.ndarray`
Variance of phase stacks, where the outer dimension corresponds
to the phase index (shape ``nH, nk, nph``)
"""
# Mean crustal P-wave velocity
if not vp:
try:
vp = self.rfV1[0].stats.vp
except:
vp = self.vp
# Station name
sta = self.rfV1[0].stats.station
# Initialize arrays based on bounds
H = np.arange(self.hbound[0], self.hbound[1] + self.dh, self.dh)
k = np.arange(self.kbound[0], self.kbound[1] + self.dk, self.dk)
# Initialize arrays
weight = np.complex(0.)
amp = np.zeros(len(self.rfV1))
sig = np.zeros((len(H), len(k), len(self.phases)))
pws = np.zeros((len(H), len(k), len(self.phases)))
for ih in _progressbar(range(len(H)), 'Computing: ', 15):
for ik, kk in enumerate(k):
for ip, ph in enumerate(self.phases):
for i in range(len(self.rfV1)):
if self.rfV2 and (ph == 'pps' or ph == 'pss'):
rfV = self.rfV2[i].copy()
else:
rfV = self.rfV1[i].copy()
# Calculate move out for each phase and get
# median value, weighted by instantaneous phase (pws)
tt = _dtime_(rfV, H[ih], kk, vp, ph)
trace = _timeshift_(rfV, tt)
thilb = hilbert(trace)
tphase = np.arctan2(thilb.imag, thilb.real)
weight += np.exp(1j*tphase[0])
amp[i] = trace[0]
# ### Attempt at speeding things up
# ind = (np.abs(rfV.stats.taxis - tt)).argmin()
# trace = rfV.copy()
# thilb = hilbert(trace.data)
# tphase = np.arctan2(thilb.imag, thilb.real)
# weight += np.exp(1j*tphase[ind])
# amp[i] = trace.data[ind]
weight = abs(weight/len(self.rfV1))**4
sig[ih, ik, ip] = np.var(amp)*np.real(weight)
pws[ih, ik, ip] = np.median(amp)*np.real(weight)
self.pws = pws
self.sig = sig
def stack_dip(self, vp=None, strike=None, dip=None):
"""
Method to calculate Hk stacks from radial receiver functions
using known stike and dip angles of the Moho.
The stacks are calculated using phase-weighted stacking for
individual phases and take the median of the weighted stack
to avoid bias by outliers.
Note
----
If two streams are available as attributes, the method will assume
that the second stream should be used for stacking along the Pps
and Pss move out curves (e.g., if the second stream contains
lower frequency signals). Furthermore,
If the arguments are not specified, the method will use the
values set during initialization (``vp=6.0`` km/s,
``strike=0.``, ``dip=0.``)
Parameters
----------
vp : float
Mean crust P-wave velocity (km/s).
strike : float
Strike angle of dipping Moho (has to be known or estimated a priori)
dip : float
Dip angle of Moho (has to be known or estimated a priori)
Attributes
----------
pws : :class:`~numpy.ndarray`
Array of phase stacks, where the outer dimension corresponds
to the phase index (shape ``nH, nk, nph``)
sig : :class:`~numpy.ndarray`
Variance of phase stacks, where the outer dimension corresponds
to the phase index (shape ``nH, nk, nph``)
"""
if not strike:
strike = self.strike
else:
self.strike = strike
if not dip:
dip = self.dip
else:
self.dip = dip
# P-wave velocity
if not vp:
try:
vp = self.rfV1[0].stats.vp
except:
vp = 6.0
sta = self.rfV1[0].stats.station
# Initialize arrays based on bounds
H = np.arange(self.hbound[0], self.hbound[1] + self.dh, self.dh)
k = np.arange(self.kbound[0], self.kbound[1] + self.dk, self.dk)
# Initialize arrays
weight = np.complex(0.)
amp = np.zeros(len(self.rfV1))
sig = np.zeros((len(H), len(k), len(self.phases)))
pws = np.zeros((len(H), len(k), len(self.phases)))
for ih in _progressbar(range(len(H)), 'Computing: ', 15):
for ik, kk in enumerate(k):
for ip, ph in enumerate(self.phases):
for i in range(len(self.rfV1)):
if self.rfV2 and (ph == 'pps' or ph == 'pss'):
rfV = self.rfV2[i].copy()
else:
rfV = self.rfV1[i].copy()
# Calculate move out for each phase and get
# median value, weighted by instantaneous phase (pws)
tt = _dtime_dip_(
rfV, H[ih], kk, vp, ph, self.strike, self.dip)
trace = _timeshift_(rfV, tt)
thilb = hilbert(trace)
tphase = np.arctan2(thilb.imag, thilb.real)
weight += np.exp(1j*tphase[0])
amp[i] = trace[0]
weight = abs(weight/np.float(len(self.rfV1)))**4
sig[ih, ik, ip] = np.var(amp)*np.real(weight)
pws[ih, ik, ip] = np.median(amp)*np.real(weight)
self.pws = pws
self.sig = sig
def average(self, typ='sum', q=0.05, err_method='amp'):
"""
Method to combine the phase-weighted stacks to produce a final
stack, from which to estimate the H and k parameters and their
associated errors.
Parameters
----------
typ : str
How the phase-weigthed stacks should be combined to produce
a final stack. Available options are: weighted sum (``typ=sum``)
or product (``typ=product``).
q : float
Confidence level for the error estimate
err_method : str
How errors should be estimated. Options are ``err_method='amp'``
to estimate errors from amplitude, or ``err_method='stats'`` to
use a statistical F test from the residuals.
"""
# Initialize arrays based on bounds
H = np.arange(self.hbound[0], self.hbound[1] + self.dh, self.dh)
k = np.arange(self.kbound[0], self.kbound[1] + self.dk, self.dk)
# Multiply pws by weights
ps = self.pws[:, :, 0]*self.weights[0]
try:
pps = self.pws[:, :, 1]*self.weights[1]
except:
pps = None
try:
pss = self.pws[:, :, 2]*self.weights[2]
except:
pss = None
# Get stacks
if typ == 'sum':
stack = (ps + pps + pss)
elif typ == 'product':
# Zero out negative values
ps[ps < 0] = 0.
if self.weights[1] != 0.:
pps[pps < 0] = 0.
else:
pps = 1.
if self.weights[2] != 0.:
pss[pss < 0] = 0.
else:
pss = 1.
stack = ps*pps*pss
else:
raise(Exception("'typ' must be either 'sum' or 'product'"))
self.typ = typ
# Find maximum within stacks
ind = np.where(stack == stack.max())
self.h0 = H[ind[0]][0]
self.k0 = k[ind[1]][0]
self.stack = stack
try:
self.error()
except:
self.err_k0 = 0.
self.err_h0 = 0.
def error(self, q=0.05, err_method='amp'):
"""
Method to determine the error on H and k estimates.
From Walsh, JGR, 2013
Parameters
----------
q : float
Confidence level for the error estimate
err_method : str
How errors should be estimated. Options are ``err_method='amp'``
to estimate errors from amplitude, or ``err_method='stats'`` to
use a statistical F test from the residuals.
"""
# Initialize arrays based on bounds
H = np.arange(self.hbound[0], self.hbound[1] + self.dh, self.dh)
k = np.arange(self.kbound[0], self.kbound[1] + self.dk, self.dk)
msf = self.stack/self.stack.max()
# Method 1 - based on stats
if err_method == 'stats':
# Get degrees of freedom
dof = _dof(self._residuals())
# print(dof)
if dof < 3:
dof = 3
print(
"Degrees of freedom < 3. Fixing to DOF = 3, which may " +
"result in accurate errors")
n_par = 2
msf = 1. - msf
# Error contour
vmin = msf.min()
vmax = msf.max()
self.err_contour = vmin*(1. + n_par/(dof - n_par) *
stats.f.ppf(1. - q, n_par, dof - n_par))
# print(vmin*(1. + n_par/(dof - n_par)*
# stats.f.ppf(1. - q, n_par, dof - n_par)))
# self.err_contour = (n_par/(dof - n_par) *
err = np.where(msf < self.err_contour)
# Method 2 - based on amplitude
elif err_method == 'amp':
self.err_contour = 0.5
err = np.where(msf > self.err_contour)
else:
raise(Exception("'err_method' must be either 'stats' or 'amp'"))
self.err_method = err_method
# Estimate uncertainty (q confidence interval)
self.err_k0 = max(0.25*(k[max(err[1])] - k[min(err[1])]), self.dk)
self.err_h0 = max(0.25*(H[max(err[0])] - H[min(err[0])]), self.dh)
def plot(self, save=False, title=None, form='png'):
"""
Method to plot H-K stacks. By default all 4 panels
are plotted: The ``ps``, ``pps`` and ``pss`` stacks, and the
final (averaged) stack. Error contours are also plotted,
as well as the position of the maximum stack values.
Parameters
----------
save : bool
Whether or not to save the Figure
title : str
Title of plot
"""
# Initialize arrays based on bounds
H = np.arange(self.hbound[0], self.hbound[1] + self.dh, self.dh)
k = np.arange(self.kbound[0], self.kbound[1] + self.dk, self.dk)
# Extent of plots
extent = (H.min(), H.max(), k.min(), k.max())
# Extract phase stacks
ps = self.pws[:, :, 0]
pps = self.pws[:, :, 1]
pss = self.pws[:, :, 2]
if self.typ == 'product':
# Zero out negative values
ps[ps < 0] = 0.
try:
pps[pps < 0] = 0.
except:
pass
try:
pss[pss < 0] = 0.
except:
pass
# Set up figure
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(
2, 2, sharex=True, sharey=True)
cmap = 'RdBu_r'
# First subplot: Ps
vmax = np.abs(max(ps.max(), ps.min(), key=abs))
im = ax1.imshow(np.rot90(ps), cmap=cmap,
extent=extent, vmin=-vmax, vmax=vmax, aspect='auto')
ax1.set_ylabel('Vp/Vs')
ax1.set_title('Ps - weight: {0:.1f}'.format(
self.weights[0]), fontsize=10)
# Second subplot: Pps
vmax = np.abs(max(pps.max(), pps.min(), key=abs))
im = ax2.imshow(np.rot90(pps), cmap=cmap,
extent=extent, vmin=-vmax, vmax=vmax, aspect='auto')
ax2.set_title('Pps - weight: {0:.1f}'.format(
self.weights[1]), fontsize=10)
# Third subplot: Pss
vmax = np.abs(max(pss.max(), pss.min(), key=abs))
im = ax3.imshow(np.rot90(pss), cmap=cmap,
extent=extent, vmin=-vmax, vmax=vmax, aspect='auto')
ax3.set_title('Pss - weight: {0:.1f}'.format(
self.weights[2]), fontsize=10)
ax3.set_ylabel('Vp/Vs')
ax3.set_xlabel('Thickness (km)')
# Fourth subplot: Average
vmax = np.abs(max(self.stack.max(), self.stack.min(), key=abs))
im = ax4.imshow(np.rot90(self.stack), cmap=cmap,
extent=extent, vmin=-vmax, vmax=vmax, aspect='auto')
ax4.set_title('Stack')
ax4.set_xlabel('Thickness (km)')
#cbar = fig.colorbar(im, ticks=[-vmax, 0, vmax])
#cbar.ax.set_yticklabels(['min', '0', 'max'])
# Get confidence intervals
if hasattr(self, 'err_contour'):
# ax.contour(np.rot90(vmax-msf), (vmax-err_cont,),
if self.err_method == 'stats':
ax4.contour(
np.rot90(1.-self.stack/self.stack.max()),
(self.err_contour,),
hold='on', colors='yellow', linewidths=1, origin='upper',
extent=extent)
elif self.err_method == 'amp':
ax4.contour(
np.rot90(self.stack/self.stack.max()),
(self.err_contour,),
hold='on', colors='yellow', linewidths=1, origin='upper',
extent=extent)
# Add star showing best fit
try:
ax4.scatter(self.h0, self.k0, 60, marker='*', color='white')
except:
print("'h0' and 'k0' are not available")
if title:
plt.suptitle(title)
else:
plt.suptitle('H-k stacks, station: ' + self.rfV1[0].stats.station)
if save:
plt.savefig('HK_PLOTS/hk.' + self.rfV1[0].stats.station +
'.' + title+'.'+self.typ+'.'+form, format=form)
else:
plt.show()
plt.close()
## JMG ##
def save(self, file):
## JMG ##
"""
Saves HkStack object to file
Parameters
----------
file : str
File name for HkStack object
"""
import pickle
output = open(file, 'wb')
pickle.dump(self, output)
output.close()
def _residuals(self):
"""
Internal method to obtain residuals between observed and predicted
receiver functions given the Moho depth and Vp/Vs obtained from
the Hk stack.
"""
from telewavesim import utils
# Simple 1-layer model over half-space
model = utils.Model(
[self.h0, 0.],
[2800., 3300.],
[self.vp, 8.0],
[self.vp/self.k0, 4.5],
['iso', 'iso'])
# Parameters for run
slow = [tr.stats.slow for tr in self.rfV1]
npts = self.rfV1[0].stats.npts
dt = self.rfV1[0].stats.delta
trR = Stream()
for sl in slow:
trxyz = utils.run_plane(model, sl, npts, dt)
tfs = utils.tf_from_xyz(
trxyz, pvh=True, vp=self.vp, vs=self.vp/self.k0)
tfs[0].data = np.fft.fftshift(tfs[0].data)
trR.append(tfs[0])
trR.filter('bandpass', freqmin=0.05, freqmax=0.5, corners=2,
zerophase=True)
# Get stream of residuals
res = trR.copy()
for i in range(len(res)):
res[i].data = self.rfV1[i].data - trR[i].data
return res
def _dof(st):
"""
Method to determine the degrees of freedom to calculate
the confidence region of the misfit function.
From Walsh, JGR, 2013
"""
dof = []
for tr in st:
F = np.abs(np.fft.fft(tr.data)[0:int(len(tr.data)/2)+1])
E2 = np.sum(F**2)
E2 -= (F[0]**2 + F[-1]**2)/2.
E4 = (1./3.)*(F[0]**4 + F[-1]**4)
for i in range(1, len(F) - 1):
E4 += (4./3.)*F[i]**4
dof.append(int(4.*E2**2/E4 - 2.))
dof_max = min(dof)
return dof_max
def _dtime_(trace, z, r, vp, ph):
"""
Method to calculate travel time for different scattered phases
"""
# Horizontal slowness
slow = trace.stats.slow
# Vertical slownesses
c1 = np.sqrt((r/vp)**2 - slow**2)
c2 = np.sqrt((1./vp)**2 - slow**2)
if ph == 'ps':
tt = z*(c1 - c2)
elif ph == 'pps':
tt = z*(c1 + c2)
elif ph == 'pss':
tt = 2.*z*c1
return tt
def _dtime_dip_(trace, z, r, vp, ph, strike, dip):
"""
Method to calculate travel time for different scattered phases
using strike and dip angles
"""
# Initialize some parameters
n = np.zeros(3)
pinc = np.zeros(3)
ai = 8.1
br = vp/r
# Get vector normal to dipping interface
dip = dip*np.pi/180.
strike = strike*np.pi/180.
n[0] = np.sin(strike)*np.sin(dip)
n[1] = -np.cos(strike)*np.sin(dip)
n[2] = np.cos(dip)
# Horizontal slowness
slow = trace.stats.slow
# Back-azimuth
baz = trace.stats.baz
# Assemble constants of incident wave
c1 = n[2]
theta = baz*np.pi/180.+np.pi
pinc[0] = slow*np.cos(theta)
pinc[1] = slow*np.sin(theta)
pinc[2] = -np.sqrt(1./(ai*ai) - slow*slow)
# Calculate scalar product n * pinc
ndotpi = n[0]*pinc[0] + n[1]*pinc[1] + n[2]*pinc[2]
# Incident normal slowness
c2 = 1./(ai*ai) - ndotpi**2
if ph == 'ps':
a = vp*vp
tt = z*(c1*(np.sqrt(r*r/a - c2) - np.sqrt(1./a - c2)))
elif ph == 'pps':
pref = pinc - ndotpi*n - np.sqrt(1./(vp*vp)-c2)*n
pref[2] = -pref[2]
ndotpr = n[0]*pref[0]+n[1]*pref[1]+n[2]*pref[2]
c4 = 1./(vp*vp) - ndotpr**2
c3 = 2.*pref[2] - 2.*n[2]*ndotpr
a = vp*vp
tt = z*(c1*(np.sqrt(r*r/a - c4) + np.sqrt(1./a - c4)))
elif ph == 'pss':
tt = 2.*z*c1
pref = pinc - ndotpi*n - np.sqrt(1./(vp*vp) - c2)*n
pref[2] = np.sqrt(1./(br*br) - pref[0]*pref[0] - pref[1]*pref[1])
ndotpr = n[0]*pref[0] + n[1]*pref[1] + n[2]*pref[2]
c6 = 1./(br*br) - ndotpr**2
a = vp*vp
tt = z*(2.*c1*np.sqrt(r*r/a - c6))
return tt
def _timeshift_(trace, tt):
"""
Function to shift traces in time given travel time
"""
# Define frequencies
nt = trace.stats.npts
dt = trace.stats.delta
freq = np.fft.fftfreq(nt, d=dt)
# Fourier transform
ftrace = np.fft.fft(trace.data)
# Shift
for i in range(len(freq)):
ftrace[i] = ftrace[i]*np.exp(2.*np.pi*1j*freq[i]*tt)
# Back Fourier transform
rtrace = np.real(np.fft.ifft(ftrace))
return rtrace
def _progressbar(it, prefix="", size=60, file=sys.stdout):
"""
Show progress bar while looping in for loop
"""
count = len(it)
def show(j):
x = int(size*j/count)
file.write("%s[%s%s] %i/%i\r" %
(prefix, "#"*x, "."*(size-x), j, count))
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
| [
"numpy.sqrt",
"obspy.core.Stream",
"numpy.arctan2",
"numpy.rot90",
"numpy.sin",
"telewavesim.utils.run_plane",
"numpy.arange",
"numpy.where",
"numpy.fft.fft",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.real",
"matplotlib.pyplot.savefig",
"telewavesim.utils.tf_from_xyz",
"pickle.load"... | [((22635, 22669), 'numpy.sqrt', 'np.sqrt', (['((r / vp) ** 2 - slow ** 2)'], {}), '((r / vp) ** 2 - slow ** 2)\n', (22642, 22669), True, 'import numpy as np\n'), ((22673, 22709), 'numpy.sqrt', 'np.sqrt', (['((1.0 / vp) ** 2 - slow ** 2)'], {}), '((1.0 / vp) ** 2 - slow ** 2)\n', (22680, 22709), True, 'import numpy as np\n'), ((23064, 23075), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (23072, 23075), True, 'import numpy as np\n'), ((23087, 23098), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (23095, 23098), True, 'import numpy as np\n'), ((23316, 23327), 'numpy.cos', 'np.cos', (['dip'], {}), '(dip)\n', (23322, 23327), True, 'import numpy as np\n'), ((24756, 24780), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['nt'], {'d': 'dt'}), '(nt, d=dt)\n', (24770, 24780), True, 'import numpy as np\n'), ((24819, 24841), 'numpy.fft.fft', 'np.fft.fft', (['trace.data'], {}), '(trace.data)\n', (24829, 24841), True, 'import numpy as np\n'), ((6638, 6698), 'numpy.arange', 'np.arange', (['self.hbound[0]', '(self.hbound[1] + self.dh)', 'self.dh'], {}), '(self.hbound[0], self.hbound[1] + self.dh, self.dh)\n', (6647, 6698), True, 'import numpy as np\n'), ((6711, 6771), 'numpy.arange', 'np.arange', (['self.kbound[0]', '(self.kbound[1] + self.dk)', 'self.dk'], {}), '(self.kbound[0], self.kbound[1] + self.dk, self.dk)\n', (6720, 6771), True, 'import numpy as np\n'), ((6818, 6833), 'numpy.complex', 'np.complex', (['(0.0)'], {}), '(0.0)\n', (6828, 6833), True, 'import numpy as np\n'), ((10490, 10550), 'numpy.arange', 'np.arange', (['self.hbound[0]', '(self.hbound[1] + self.dh)', 'self.dh'], {}), '(self.hbound[0], self.hbound[1] + self.dh, self.dh)\n', (10499, 10550), True, 'import numpy as np\n'), ((10563, 10623), 'numpy.arange', 'np.arange', (['self.kbound[0]', '(self.kbound[1] + self.dk)', 'self.dk'], {}), '(self.kbound[0], self.kbound[1] + self.dk, self.dk)\n', (10572, 10623), True, 'import numpy as np\n'), ((10670, 10685), 'numpy.complex', 'np.complex', (['(0.0)'], {}), '(0.0)\n', (10680, 10685), True, 'import numpy as np\n'), ((12914, 12974), 'numpy.arange', 'np.arange', (['self.hbound[0]', '(self.hbound[1] + self.dh)', 'self.dh'], {}), '(self.hbound[0], self.hbound[1] + self.dh, self.dh)\n', (12923, 12974), True, 'import numpy as np\n'), ((12987, 13047), 'numpy.arange', 'np.arange', (['self.kbound[0]', '(self.kbound[1] + self.dk)', 'self.dk'], {}), '(self.kbound[0], self.kbound[1] + self.dk, self.dk)\n', (12996, 13047), True, 'import numpy as np\n'), ((14743, 14803), 'numpy.arange', 'np.arange', (['self.hbound[0]', '(self.hbound[1] + self.dh)', 'self.dh'], {}), '(self.hbound[0], self.hbound[1] + self.dh, self.dh)\n', (14752, 14803), True, 'import numpy as np\n'), ((14816, 14876), 'numpy.arange', 'np.arange', (['self.kbound[0]', '(self.kbound[1] + self.dk)', 'self.dk'], {}), '(self.kbound[0], self.kbound[1] + self.dk, self.dk)\n', (14825, 14876), True, 'import numpy as np\n'), ((16843, 16903), 'numpy.arange', 'np.arange', (['self.hbound[0]', '(self.hbound[1] + self.dh)', 'self.dh'], {}), '(self.hbound[0], self.hbound[1] + self.dh, self.dh)\n', (16852, 16903), True, 'import numpy as np\n'), ((16916, 16976), 'numpy.arange', 'np.arange', (['self.kbound[0]', '(self.kbound[1] + self.dk)', 'self.dk'], {}), '(self.kbound[0], self.kbound[1] + self.dk, self.dk)\n', (16925, 16976), True, 'import numpy as np\n'), ((17536, 17580), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'sharex': '(True)', 'sharey': '(True)'}), '(2, 2, sharex=True, sharey=True)\n', (17548, 17580), True, 'from matplotlib import pyplot as plt\n'), ((20336, 20347), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20345, 20347), True, 'from matplotlib import pyplot as plt\n'), ((20629, 20654), 'pickle.dump', 'pickle.dump', (['self', 'output'], {}), '(self, output)\n', (20640, 20654), False, 'import pickle\n'), ((21001, 21108), 'telewavesim.utils.Model', 'utils.Model', (['[self.h0, 0.0]', '[2800.0, 3300.0]', '[self.vp, 8.0]', '[self.vp / self.k0, 4.5]', "['iso', 'iso']"], {}), "([self.h0, 0.0], [2800.0, 3300.0], [self.vp, 8.0], [self.vp /\n self.k0, 4.5], ['iso', 'iso'])\n", (21012, 21108), False, 'from telewavesim import utils\n'), ((21334, 21342), 'obspy.core.Stream', 'Stream', ([], {}), '()\n', (21340, 21342), False, 'from obspy.core import Stream, Trace, AttribDict\n'), ((22171, 22185), 'numpy.sum', 'np.sum', (['(F ** 2)'], {}), '(F ** 2)\n', (22177, 22185), True, 'import numpy as np\n'), ((23239, 23253), 'numpy.sin', 'np.sin', (['strike'], {}), '(strike)\n', (23245, 23253), True, 'import numpy as np\n'), ((23254, 23265), 'numpy.sin', 'np.sin', (['dip'], {}), '(dip)\n', (23260, 23265), True, 'import numpy as np\n'), ((23293, 23304), 'numpy.sin', 'np.sin', (['dip'], {}), '(dip)\n', (23299, 23304), True, 'import numpy as np\n'), ((23537, 23550), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (23543, 23550), True, 'import numpy as np\n'), ((23570, 23583), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (23576, 23583), True, 'import numpy as np\n'), ((23599, 23637), 'numpy.sqrt', 'np.sqrt', (['(1.0 / (ai * ai) - slow * slow)'], {}), '(1.0 / (ai * ai) - slow * slow)\n', (23606, 23637), True, 'import numpy as np\n'), ((24998, 25017), 'numpy.fft.ifft', 'np.fft.ifft', (['ftrace'], {}), '(ftrace)\n', (25009, 25017), True, 'import numpy as np\n'), ((4253, 4270), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (4264, 4270), False, 'import pickle\n'), ((15770, 15802), 'numpy.where', 'np.where', (['(msf < self.err_contour)'], {}), '(msf < self.err_contour)\n', (15778, 15802), True, 'import numpy as np\n'), ((17728, 17740), 'numpy.rot90', 'np.rot90', (['ps'], {}), '(ps)\n', (17736, 17740), True, 'import numpy as np\n'), ((18071, 18084), 'numpy.rot90', 'np.rot90', (['pps'], {}), '(pps)\n', (18079, 18084), True, 'import numpy as np\n'), ((18383, 18396), 'numpy.rot90', 'np.rot90', (['pss'], {}), '(pss)\n', (18391, 18396), True, 'import numpy as np\n'), ((18787, 18807), 'numpy.rot90', 'np.rot90', (['self.stack'], {}), '(self.stack)\n', (18795, 18807), True, 'import numpy as np\n'), ((20017, 20036), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (20029, 20036), True, 'from matplotlib import pyplot as plt\n'), ((20063, 20129), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('H-k stacks, station: ' + self.rfV1[0].stats.station)"], {}), "('H-k stacks, station: ' + self.rfV1[0].stats.station)\n", (20075, 20129), True, 'from matplotlib import pyplot as plt\n'), ((20160, 20277), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('HK_PLOTS/hk.' + self.rfV1[0].stats.station + '.' + title + '.' + self.typ +\n '.' + form)"], {'format': 'form'}), "('HK_PLOTS/hk.' + self.rfV1[0].stats.station + '.' + title + '.' +\n self.typ + '.' + form, format=form)\n", (20171, 20277), True, 'from matplotlib import pyplot as plt\n'), ((20316, 20326), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20324, 20326), True, 'from matplotlib import pyplot as plt\n'), ((21388, 21424), 'telewavesim.utils.run_plane', 'utils.run_plane', (['model', 'sl', 'npts', 'dt'], {}), '(model, sl, npts, dt)\n', (21403, 21424), False, 'from telewavesim import utils\n'), ((21443, 21511), 'telewavesim.utils.tf_from_xyz', 'utils.tf_from_xyz', (['trxyz'], {'pvh': '(True)', 'vp': 'self.vp', 'vs': '(self.vp / self.k0)'}), '(trxyz, pvh=True, vp=self.vp, vs=self.vp / self.k0)\n', (21460, 21511), False, 'from telewavesim import utils\n'), ((21553, 21581), 'numpy.fft.fftshift', 'np.fft.fftshift', (['tfs[0].data'], {}), '(tfs[0].data)\n', (21568, 21581), True, 'import numpy as np\n'), ((23278, 23292), 'numpy.cos', 'np.cos', (['strike'], {}), '(strike)\n', (23284, 23292), True, 'import numpy as np\n'), ((24916, 24957), 'numpy.exp', 'np.exp', (['(2.0 * np.pi * 1.0j * freq[i] * tt)'], {}), '(2.0 * np.pi * 1.0j * freq[i] * tt)\n', (24922, 24957), True, 'import numpy as np\n'), ((15932, 15964), 'numpy.where', 'np.where', (['(msf > self.err_contour)'], {}), '(msf > self.err_contour)\n', (15940, 15964), True, 'import numpy as np\n'), ((22111, 22130), 'numpy.fft.fft', 'np.fft.fft', (['tr.data'], {}), '(tr.data)\n', (22121, 22130), True, 'import numpy as np\n'), ((24336, 24400), 'numpy.sqrt', 'np.sqrt', (['(1.0 / (br * br) - pref[0] * pref[0] - pref[1] * pref[1])'], {}), '(1.0 / (br * br) - pref[0] * pref[0] - pref[1] * pref[1])\n', (24343, 24400), True, 'import numpy as np\n'), ((4147, 4172), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4162, 4172), False, 'import os\n'), ((4487, 4511), 'numpy.fft.fftshift', 'np.fft.fftshift', (['tr.data'], {}), '(tr.data)\n', (4502, 4511), True, 'import numpy as np\n'), ((4558, 4582), 'numpy.fft.fftshift', 'np.fft.fftshift', (['tr.data'], {}), '(tr.data)\n', (4573, 4582), True, 'import numpy as np\n'), ((23849, 23872), 'numpy.sqrt', 'np.sqrt', (['(r * r / a - c2)'], {}), '(r * r / a - c2)\n', (23856, 23872), True, 'import numpy as np\n'), ((23871, 23892), 'numpy.sqrt', 'np.sqrt', (['(1.0 / a - c2)'], {}), '(1.0 / a - c2)\n', (23878, 23892), True, 'import numpy as np\n'), ((23948, 23977), 'numpy.sqrt', 'np.sqrt', (['(1.0 / (vp * vp) - c2)'], {}), '(1.0 / (vp * vp) - c2)\n', (23955, 23977), True, 'import numpy as np\n'), ((4675, 4699), 'numpy.fft.fftshift', 'np.fft.fftshift', (['tr.data'], {}), '(tr.data)\n', (4690, 4699), True, 'import numpy as np\n'), ((4750, 4774), 'numpy.fft.fftshift', 'np.fft.fftshift', (['tr.data'], {}), '(tr.data)\n', (4765, 4774), True, 'import numpy as np\n'), ((7706, 7720), 'scipy.signal.hilbert', 'hilbert', (['trace'], {}), '(trace)\n', (7713, 7720), False, 'from scipy.signal import hilbert\n'), ((7754, 7788), 'numpy.arctan2', 'np.arctan2', (['thilb.imag', 'thilb.real'], {}), '(thilb.imag, thilb.real)\n', (7764, 7788), True, 'import numpy as np\n'), ((7823, 7847), 'numpy.exp', 'np.exp', (['(1.0j * tphase[0])'], {}), '(1.0j * tphase[0])\n', (7829, 7847), True, 'import numpy as np\n'), ((8396, 8407), 'numpy.var', 'np.var', (['amp'], {}), '(amp)\n', (8402, 8407), True, 'import numpy as np\n'), ((8408, 8423), 'numpy.real', 'np.real', (['weight'], {}), '(weight)\n', (8415, 8423), True, 'import numpy as np\n'), ((8462, 8476), 'numpy.median', 'np.median', (['amp'], {}), '(amp)\n', (8471, 8476), True, 'import numpy as np\n'), ((8477, 8492), 'numpy.real', 'np.real', (['weight'], {}), '(weight)\n', (8484, 8492), True, 'import numpy as np\n'), ((11614, 11628), 'scipy.signal.hilbert', 'hilbert', (['trace'], {}), '(trace)\n', (11621, 11628), False, 'from scipy.signal import hilbert\n'), ((11662, 11696), 'numpy.arctan2', 'np.arctan2', (['thilb.imag', 'thilb.real'], {}), '(thilb.imag, thilb.real)\n', (11672, 11696), True, 'import numpy as np\n'), ((11731, 11755), 'numpy.exp', 'np.exp', (['(1.0j * tphase[0])'], {}), '(1.0j * tphase[0])\n', (11737, 11755), True, 'import numpy as np\n'), ((11902, 11913), 'numpy.var', 'np.var', (['amp'], {}), '(amp)\n', (11908, 11913), True, 'import numpy as np\n'), ((11914, 11929), 'numpy.real', 'np.real', (['weight'], {}), '(weight)\n', (11921, 11929), True, 'import numpy as np\n'), ((11968, 11982), 'numpy.median', 'np.median', (['amp'], {}), '(amp)\n', (11977, 11982), True, 'import numpy as np\n'), ((11983, 11998), 'numpy.real', 'np.real', (['weight'], {}), '(weight)\n', (11990, 11998), True, 'import numpy as np\n'), ((15532, 15572), 'scipy.stats.f.ppf', 'stats.f.ppf', (['(1.0 - q)', 'n_par', '(dof - n_par)'], {}), '(1.0 - q, n_par, dof - n_par)\n', (15543, 15572), False, 'from scipy import stats\n'), ((24171, 24194), 'numpy.sqrt', 'np.sqrt', (['(r * r / a - c4)'], {}), '(r * r / a - c4)\n', (24178, 24194), True, 'import numpy as np\n'), ((24193, 24214), 'numpy.sqrt', 'np.sqrt', (['(1.0 / a - c4)'], {}), '(1.0 / a - c4)\n', (24200, 24214), True, 'import numpy as np\n'), ((24291, 24320), 'numpy.sqrt', 'np.sqrt', (['(1.0 / (vp * vp) - c2)'], {}), '(1.0 / (vp * vp) - c2)\n', (24298, 24320), True, 'import numpy as np\n'), ((24528, 24551), 'numpy.sqrt', 'np.sqrt', (['(r * r / a - c6)'], {}), '(r * r / a - c6)\n', (24535, 24551), True, 'import numpy as np\n')] |
import numpy as np
from zest import zest
from plaster.run.sim_v2.c_dytsim.dytsim_v2 import c_dytsim_v2
from plaster.tools.c_common.c_common_tools import (
CYCLE_TYPE_EDMAN,
CYCLE_TYPE_PRE,
CycleKindType,
PCBType,
DytType,
)
from plaster.tools.zlog.zlog import spy
def zest_c_dytsim():
"""
Must keep track of the recall and never return nul-dyts in the dytpeps.
"""
pcbs = np.array(
[[0, 0, 0], [1, np.nan, np.nan], [1, 0, 0.5], [1, 0, 0.5],], dtype=PCBType
)
def _sim(p_bleach=0.0):
return c_dytsim_v2(
pcbs,
n_samples=100,
n_channels=1,
n_labels=1,
cycles=np.array(
[CYCLE_TYPE_PRE, CYCLE_TYPE_EDMAN, CYCLE_TYPE_EDMAN, CYCLE_TYPE_EDMAN],
dtype=CycleKindType,
),
p_bleach=p_bleach,
p_detach=0.0,
p_edman_fail=0.0,
allow_edman_cterm=False,
)
def no_error_modes():
dytmat, dytpeps, recalls = _sim()
def it_generates_full_sampling():
assert np.sum(dytpeps[:, 2]) == 100
def it_adds_nul_record_to_dytpeps():
assert np.all(dytpeps[0, :] == 0)
def it_sorts_by_count():
assert dytpeps[1, 2] >= dytpeps[2, 2] >= dytpeps[3, 2]
def it_generates_correct_indices():
assert dytmat.dtype == DytType
assert dytmat.tolist() == [
[0, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 1],
[2, 2, 1, 1],
]
assert dytpeps[:, 1].tolist() == [0, 1, 1, 1]
assert set(dytpeps[:, 0].tolist()) == set([0, 1, 2, 3])
@zest.retry(n_tries=2)
def it_handles_duds():
"""
Approx:
1/4 of results will have neither dye (that is recall will be 75%)
2/4 of results will have one or the other dye
1/4 of results will have both dyes
"""
assert recalls[0] == 0
assert -0.1 < recalls[1] - 0.75 < 0.1
# Of the remaining about 1/3 should be each of the other dyts
assert -10 < dytpeps[1, 2] - 33 < 15
assert -10 < dytpeps[2, 2] - 33 < 15
assert -10 < dytpeps[3, 2] - 33 < 15
def it_does_not_return_nul_dyts():
assert np.all(dytpeps[1:, 0] > 0)
zest()
# TODO
# def with_bleaching():
# dytmat, dytpeps, recalls = _sim(p_bleach=0.95)
#
# # Most bleach so most of the counts should go to a dyt of [1, 0, 0, 0] or [2, 0, 0, 0]
# # TODO
# assert dytmat[1, :].tolist() == [1, 0, 0, 0]
# assert dytpeps[1, 0] ==
# assert dytpeps[1, 2] > 60
#
# def it_bleaches():
# raise NotImplementedError
#
# zest()
#
# def with_bleaching():
# def it_handles_cterm():
# # Both variations
# raise NotImplementedError
#
# def it_bleaches():
# raise NotImplementedError
#
# def it_detaches():
# raise NotImplementedError
#
# def it_edmans():
# raise NotImplementedError
#
#
# zest()
#
zest()
| [
"zest.zest",
"zest.zest.retry",
"numpy.array",
"numpy.sum",
"numpy.all"
] | [((411, 499), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, np.nan, np.nan], [1, 0, 0.5], [1, 0, 0.5]]'], {'dtype': 'PCBType'}), '([[0, 0, 0], [1, np.nan, np.nan], [1, 0, 0.5], [1, 0, 0.5]], dtype=\n PCBType)\n', (419, 499), True, 'import numpy as np\n'), ((3266, 3272), 'zest.zest', 'zest', ([], {}), '()\n', (3270, 3272), False, 'from zest import zest\n'), ((1717, 1738), 'zest.zest.retry', 'zest.retry', ([], {'n_tries': '(2)'}), '(n_tries=2)\n', (1727, 1738), False, 'from zest import zest\n'), ((2417, 2423), 'zest.zest', 'zest', ([], {}), '()\n', (2421, 2423), False, 'from zest import zest\n'), ((1190, 1216), 'numpy.all', 'np.all', (['(dytpeps[0, :] == 0)'], {}), '(dytpeps[0, :] == 0)\n', (1196, 1216), True, 'import numpy as np\n'), ((2381, 2407), 'numpy.all', 'np.all', (['(dytpeps[1:, 0] > 0)'], {}), '(dytpeps[1:, 0] > 0)\n', (2387, 2407), True, 'import numpy as np\n'), ((681, 786), 'numpy.array', 'np.array', (['[CYCLE_TYPE_PRE, CYCLE_TYPE_EDMAN, CYCLE_TYPE_EDMAN, CYCLE_TYPE_EDMAN]'], {'dtype': 'CycleKindType'}), '([CYCLE_TYPE_PRE, CYCLE_TYPE_EDMAN, CYCLE_TYPE_EDMAN,\n CYCLE_TYPE_EDMAN], dtype=CycleKindType)\n', (689, 786), True, 'import numpy as np\n'), ((1096, 1117), 'numpy.sum', 'np.sum', (['dytpeps[:, 2]'], {}), '(dytpeps[:, 2])\n', (1102, 1117), True, 'import numpy as np\n')] |
"""
Authors: <NAME>, <NAME>.
Copyright:
Copyright (c) 2020 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from enum import Enum
import numpy as np
import Util, Type
import AST.AST as AST
# TODO - check if this can be cleaned up
class Op:
Op = Enum("Op", "+ - * / << >> & | ^ ~ ! && || < <= > >= == != max .* ./")
Op.print = lambda self, writer: writer.printf("%s", self.name)
Op.op_list = lambda op_str: list(
map(lambda x: Op.Op[x], op_str.split())
) # op_str:str
Op.IsPrefixOp = lambda self: True if (self.name == "max") else False
Op.IsPostfixOp = lambda self: False
class Expr:
pass
class IntExpr(Expr):
pass
class BoolExpr(Expr):
pass
class Int(IntExpr):
@staticmethod
def negMax():
return DataType.getNegMax()
@staticmethod
def max():
return DataType.getMax()
def __init__(self, n: int, wordLen: int = None):
if not (wordLen):
wordLen = Util.Config.wordLength
self.n = DataType.getInt(n, wordLen)
def updateName(self, expr_mapping):
if isinstance(self.n, np.int8):
return self.__class__(self.n, 8)
elif isinstance(self.n, np.int16):
return self.__class__(self.n, 16)
elif isinstance(self.n, np.int32):
return self.__class__(self.n, 32)
elif isinstance(self.n, np.int64):
return self.__class__(self.n, 64)
else:
assert False
return self.__class__(self.n)
class Var(IntExpr):
def __init__(self, idf: str, idx: list = [], inputVar=False):
self.idf = idf
self.idx = idx
self.inputVar = inputVar
def updateName(self, expr_mapping):
idx_new = list(map(lambda e: e.updateName(expr_mapping), self.idx))
if self.idf not in expr_mapping:
return self.__class__(self.idf, idx_new, self.inputVar)
else:
to_e = expr_mapping[self.idf]
if isinstance(to_e, Var):
return self.__class__(
to_e.idf, to_e.idx + idx_new, to_e.inputVar and self.inputVar
)
elif isinstance(to_e, Int):
return to_e
else:
assert False
class Bool(BoolExpr):
def __init__(self, b: bool):
self.b = b
def updateName(self, expr_mapping):
return self.__class__(self.b)
class IntUop(IntExpr):
def __init__(self, op: Op.Op, e: IntExpr):
assert op in Op.Op.op_list("- ~")
self.op = op
self.e = e
def updateName(self, expr_mapping):
return self.__class__(self.op, self.e.updateName(expr_mapping))
class Exp(IntExpr):
def __init__(self, e: IntExpr):
self.e = e
def updateName(self, expr_mapping):
return self.__class__(self.e.updateName(expr_mapping))
class TypeCast(IntExpr):
def __init__(self, type, expr: Expr):
self.type = type
self.expr = expr
def updateName(self, expr_mapping):
return self.__class__(self.type, self.expr.updateName(expr_mapping))
class IntBop(IntExpr):
def __init__(self, e1: IntExpr, op: Op.Op, e2: IntExpr):
assert op in Op.Op.op_list("+ - * / << >> & | ^ ==")
self.e1 = e1
self.op = op
self.e2 = e2
def updateName(self, expr_mapping):
return self.__class__(
self.e1.updateName(expr_mapping), self.op, self.e2.updateName(expr_mapping)
)
class BoolUop(BoolExpr):
def __init__(self, op: Op.Op, e: BoolExpr):
assert op in Op.Op.op_list("") # !
self.op = op
self.e = e
def updateName(self, expr_mapping):
return self.__class__(self.op, self.e.updateName(expr_mapping))
class BoolBop(BoolExpr):
def __init__(self, e1: BoolExpr, op: Op.Op, e2: BoolExpr):
assert op in Op.Op.op_list("&& ||") # || ^
self.e1 = e1
self.op = op
self.e2 = e2
def updateName(self, expr_mapping):
return self.__class__(
self.e1.updateName(expr_mapping), self.op, self.e2.updateName(expr_mapping)
)
class BoolCop(BoolExpr):
def __init__(self, e1: IntExpr, op: Op.Op, e2: IntExpr):
assert op in Op.Op.op_list("< <= > >= == !=") # >= <= !=
self.e1 = e1
self.op = op
self.e2 = e2
def updateName(self, expr_mapping):
return self.__class__(
self.e1.updateName(expr_mapping), self.op, self.e2.updateName(expr_mapping)
)
class CExpr(Expr):
def __init__(self, cond: BoolExpr, et: Expr, ef: Expr):
self.cond = cond
self.et = et
self.ef = ef
def updateName(self, expr_mapping):
return self.__class__(
self.cond.updateName(expr_mapping),
self.et.updateName(expr_mapping),
self.ef.updateName(expr_mapping),
)
class Cmd:
pass
class CmdList:
pass
class Assn(Cmd):
def __init__(self, var: Var, e: Expr):
self.var = var
self.e = e
def updateName(self, expr_mapping):
return self.__class__(
self.var.updateName(expr_mapping), self.e.updateName(expr_mapping)
)
class If(Cmd):
def __init__(self, cond: Expr, trueCmds: CmdList, falseCmds: CmdList = []):
self.cond = cond
self.trueCmds = trueCmds
self.falseCmds = falseCmds
def updateName(self, expr_mapping):
trueCmdsNew = list(map(lambda cmd: cmd.updateName(expr_mapping), self.trueCmds))
falseCmdsNew = list(
map(lambda cmd: cmd.updateName(expr_mapping), self.falseCmds)
)
return self.__class__(
self.cond.updateName(expr_mapping), trueCmdsNew, falseCmdsNew
)
class For(Cmd):
"""
The terminationCond keyword arg should either consist of ending integer for the loop (keyword - endInt)
or the actual condition (keyword - endCond).
"""
__endIntArgStr = "endInt"
__endCondArgStr = "endCond"
def __init__(self, var: Var, st: int, cmd_l: CmdList, fac=0, **terminationCond):
self.var = var
self.st = DataType.getInt(st)
self.cmd_l = cmd_l
self.factor = fac
self.endInt = None
self.endCond = None
if self.__endIntArgStr in terminationCond:
self.endInt = terminationCond[self.__endIntArgStr]
elif self.__endCondArgStr in terminationCond:
self.endCond = terminationCond[self.__endCondArgStr]
else:
assert False
def updateName(self, expr_mapping):
cmd_l_new = list(map(lambda cmd: cmd.updateName(expr_mapping), self.cmd_l))
if self.endCond:
return For(
self.var,
self.st,
cmd_l_new,
self.factor,
endCond=self.cond.updateName(expr_mapping),
)
else:
assert self.endInt is not None
return For(self.var, self.st, cmd_l_new, self.factor, endInt=self.endInt)
class While(Cmd):
def __init__(self, expr: BoolExpr, cmds: CmdList):
self.expr = expr
self.cmds = cmds
def updateName(self, expr_mapping):
cmds_new = list(map(lambda cmd: cmd.updateName(expr_mapping), self.cmds))
return While(self.expr.updateName(expr_mapping), cmds_new)
class Comment(Cmd):
def __init__(self, msg):
self.msg = msg
def updateName(self, expr_mapping):
return self.__class__(self.msg)
class Pragmas(Cmd):
def __init__(self, msg, vital=0):
self.msg = msg
self.vital = vital
def updateName(self, expr_mapping):
return self.__class__(self.msg, self.vital)
class Prog:
def __init__(self, cmd_l: CmdList, resource=0):
self.cmd_l = cmd_l
self.resource = resource
def updateName(self, expr_mapping):
cmd_l_new = list(map(lambda cmd: cmd.updateName(expr_mapping), self.cmd_l))
return self.__class__(cmd_l_new, self.resource)
class Memset(Cmd):
# if dim==1 then single for-loop memset, else memset for 'dim'
def __init__(self, e: Var, len: int, dim=1, lens=[]):
self.e = e
self.len = len
self.dim = dim
self.lens = lens
def updateName(self, expr_mapping):
return self.__class__(self.e.updateName(expr_mapping), self.len)
class Print(Cmd):
def __init__(self, expr: Expr):
self.expr = expr
def updateName(self, expr_mapping):
return self.__class__(self.expr.updateName(expr_mapping))
class PrintAsFloat(Cmd):
def __init__(self, expr: Expr, expnt: int):
self.expr = expr
self.expnt = expnt
def updateName(self, expr_mapping):
return self.__class__(self.expr.updateName(expr_mapping), self.expnt)
class FuncCall(Cmd):
def __init__(self, name, argList):
self.name = name
self.argList = argList
def updateName(self, expr_mapping):
argList_new = dict(
map(
lambda cmd: (cmd[0].updateName(expr_mapping), cmd[1]),
self.argList.items(),
)
)
return self.__class__(self.name, argList_new)
class Input(Cmd):
def __init__(
self,
expr: Expr,
shape: list,
dataType: str,
isSecret=True,
inputByParty=AST.Party.SERVER,
):
self.expr = expr
self.shape = shape
self.dataType = dataType
self.isSecret = isSecret
self.inputByParty = inputByParty
def updateName(self, expr_mapping):
return self.__class__(
self.expr.updateName(expr_mapping),
self.shape,
self.dataType,
self.isSecret,
self.inputByParty,
)
class Output(Cmd):
def __init__(self, expr: Expr, outputToParty: AST.Party):
self.expr = expr
self.outputToParty = outputToParty
def updateName(self, expr_mapping):
return self.__class__(
self.expr.updateName(expr_mapping),
self.outputToParty,
)
class Decl(Cmd):
def __init__(
self,
varIdf: str,
typeExpr: Type.Type,
bitlen: int = -1,
isSecret: bool = True,
value: list = None,
):
self.varIdf = varIdf
self.typeExpr = typeExpr
self.bitlen = Util.Config.wordLength if bitlen == -1 else bitlen
self.isSecret = isSecret
if value:
assert isinstance(value, list)
self.value = value
def updateName(self, expr_mapping):
return self.__class__(
self.varIdf, self.typeExpr, self.bitlen, self.isSecret, self.value
)
class DataType:
intType = {Util.Target.EzPC: {32: np.int32, 64: np.int64}}
intStr = {Util.Target.EzPC: "int"}
floatStr = "float"
@staticmethod
def getInt(x: int, wordLen: int = None):
if not (wordLen):
wordLen = Util.Config.wordLength
target = Util.Config.target
return DataType.intType[target][wordLen](x)
@staticmethod
def getIntClass():
target = Util.Config.target
wordLen = Util.Config.wordLength
return DataType.intType[target][wordLen]
@staticmethod
def getIntStr():
target = Util.Config.target
potentialPrefix = DataType.intStr[target]
if target == Util.Target.EzPC:
potentialPrefix = potentialPrefix + str(Util.Config.wordLength)
return potentialPrefix
@staticmethod
def getIntStrForBitlen(bitlen):
target = Util.Config.target
potentialPrefix = DataType.intStr[target]
if target == Util.Target.EzPC:
potentialPrefix = potentialPrefix + str(bitlen)
return potentialPrefix
@staticmethod
def getFloatStr():
return DataType.floatStr
@staticmethod
def getNegMax():
intClass = DataType.getIntClass()
return intClass(np.iinfo(intClass).min)
@staticmethod
def getMax():
intClass = DataType.getIntClass()
return intClass(np.iinfo(intClass).max)
| [
"numpy.iinfo",
"enum.Enum"
] | [((1250, 1319), 'enum.Enum', 'Enum', (['"""Op"""', '"""+ - * / << >> & | ^ ~ ! && || < <= > >= == != max .* ./"""'], {}), "('Op', '+ - * / << >> & | ^ ~ ! && || < <= > >= == != max .* ./')\n", (1254, 1319), False, 'from enum import Enum\n'), ((12924, 12942), 'numpy.iinfo', 'np.iinfo', (['intClass'], {}), '(intClass)\n', (12932, 12942), True, 'import numpy as np\n'), ((13051, 13069), 'numpy.iinfo', 'np.iinfo', (['intClass'], {}), '(intClass)\n', (13059, 13069), True, 'import numpy as np\n')] |
#
# BSD 3-Clause License
#
# Copyright (c) 2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import pytest
import numpy as np
import skdim
import matplotlib.pyplot as plt
from inspect import getmembers, isclass
from sklearn.utils.estimator_checks import check_estimator
@pytest.fixture
def data():
X = np.zeros((100, 10))
X[:, :5] = skdim.datasets.hyperBall(n=100, d=5, radius=1, random_state=0)
return X
# test all estimators pass check_estimator
estimators = [o[1] for o in getmembers(skdim.id) if isclass(o[1])]
@pytest.mark.parametrize("Estimator", estimators)
def test_all_estimators(Estimator):
return check_estimator(Estimator())
# test default and non-default parameters
def test_ess_params(data):
x = skdim.id.ESS().fit(data)
x = skdim.id.ESS(ver="b").fit(data)
x = skdim.id.ESS(d=2).fit(data)
x = skdim.id.ESS().fit_once(data)
### additionally test all common LocalEstimator base functions
x = skdim.id.ESS().fit(data).transform()
x = skdim.id.ESS().fit(data).transform_pw()
x = skdim.id.ESS().fit_transform(data)
x = skdim.id.ESS().fit_transform_pw(data)
x = skdim.id.ESS().fit_transform_pw(data, smooth=True)
def test_fisher_params(data, monkeypatch):
monkeypatch.setattr(plt, "show", lambda: None)
x = skdim.id.FisherS().fit(data)
x = skdim.id.FisherS(conditional_number=2).fit(data)
x = skdim.id.FisherS(produce_plots=True).fit(data)
x = skdim.id.FisherS(project_on_sphere=False).fit(data)
x = skdim.id.FisherS(verbose=True).fit(data)
x = skdim.id.FisherS(limit_maxdim=True).fit(data)
x = skdim.id.FisherS().fit(data).point_inseparability_to_pointID()
### additionally test all common GlobalEstimator base functions
x = skdim.id.FisherS().fit(data).transform()
x = skdim.id.FisherS().fit_pw(data, n_neighbors=50).transform_pw()
x = skdim.id.FisherS().fit_transform(data)
x = skdim.id.FisherS().fit_transform_pw(data, n_neighbors=50)
def test_mind_ml_params(data):
x = skdim.id.MiND_ML()
x = skdim.id.MiND_ML(ver="MLi")
x = skdim.id.MiND_ML(ver="ML1")
x = skdim.id.MiND_ML(D=5)
x = skdim.id.MiND_ML(k=5)
def test_mom_params(data):
x = skdim.id.MOM()
def test_lpca_params(data):
x = skdim.id.lPCA().fit(data)
x = skdim.id.lPCA(ver="Fan").fit(data)
x = skdim.id.lPCA(ver="ratio").fit(data)
x = skdim.id.lPCA(ver="maxgap").fit(data)
x = skdim.id.lPCA(ver="Kaiser").fit(data)
x = skdim.id.lPCA(ver="broken_stick").fit(data)
x = skdim.id.lPCA(ver="participation_ratio").fit(data)
def test_tle_params(data):
x = skdim.id.TLE().fit(data)
x = skdim.id.TLE(epsilon=0.01).fit(data)
def test_corrint_params(data):
x = skdim.id.CorrInt().fit(data)
x = skdim.id.CorrInt(k1=5, k2=15).fit(data)
def test_danco_params(data):
x = skdim.id.DANCo().fit(data)
x = skdim.id.DANCo(fractal=False).fit(data)
x = skdim.id.DANCo(D=5).fit(data)
x = skdim.id.DANCo(k=5).fit(data)
x = skdim.id.DANCo(ver="MIND_MLk").fit(data)
x = skdim.id.DANCo(ver="MIND_MLi").fit(data)
def test_knn_params(data):
x = skdim.id.KNN().fit(data)
x = skdim.id.KNN(k=5).fit(data)
x = skdim.id.KNN(ps=np.arange(30, 32)).fit(data)
x = skdim.id.KNN(M=2).fit(data)
x = skdim.id.KNN(gamma=3).fit(data)
def test_mada_params(data):
x = skdim.id.MADA().fit(data)
def test_mle_params(data):
x = skdim.id.MLE().fit(data)
x = skdim.id.MLE(n=20, sigma=0.1, dnoise="dnoiseGaussH").fit(data)
x = skdim.id.MLE(unbiased=True).fit(data)
x = skdim.id.MLE(K=10, neighborhood_based=False).fit(data)
def test_twonn_params(data):
# to trigger the "n_features>25 condition"
test_high_dim = np.zeros((len(data), 30))
test_high_dim[:, : data.shape[1]] = data
x = skdim.id.TwoNN().fit(test_high_dim)
x = skdim.id.TwoNN(discard_fraction=0.05).fit(data)
def test_aspointwise(data):
x = skdim.asPointwise(data, skdim.id.TwoNN(), n_neighbors=50)
x = skdim.asPointwise(data, skdim.id.TwoNN(), n_neighbors=50, n_jobs=2)
assert len(x) == len(data)
def test_datasets():
skdim.datasets.hyperBall(100, 2)
skdim.datasets.hyperSphere(100, 2)
skdim.datasets.hyperTwinPeaks(100, 2)
skdim.datasets.lineDiskBall(100)
skdim.datasets.swissRoll3Sph(100, 100)
skdim.datasets.BenchmarkManifolds(noise_type="uniform").generate(n=123)
skdim.datasets.BenchmarkManifolds(noise_type="normal").generate(
name="M5b_Helix2d", n=456, dim=3, d=2
)
def test_fisher_separability_graph(monkeypatch):
monkeypatch.setattr(plt, "show", lambda: None)
import numpy as np
from sklearn.decomposition import PCA
from scipy.stats.mstats import winsorize
ball1 = skdim.datasets.hyperBall(n=1000, d=3, radius=0.5, center=[0, 0, 0]).T
ball2 = skdim.datasets.hyperBall(
n=1000, d=6, radius=0.5, center=[1, 0, 0, 0, 0, 0]
).T
_2balls = np.zeros((6, 2000))
_2balls[:3, :1000] = ball1
_2balls[:, 1000:2000] = ball2
X = _2balls.T
u = PCA().fit_transform(X)
fishers = skdim.id.FisherS(conditional_number=10000).fit(X)
ns = fishers.point_inseparability_to_pointID()[0]
edges, weights = fishers.getSeparabilityGraph()
nsw = winsorize(ns, limits=(0.01, 0.01), inclusive=(True, True))
plt.figure(figsize=(10, 5))
plt.scatter(u[:, 0], u[:, 1], c=nsw)
fishers.plotSeparabilityGraph(u[:, 0], u[:, 1], edges, alpha=0.2)
plt.colorbar()
plt.axis("equal")
plt.title("PCA on original data")
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.show()
| [
"skdim.id.CorrInt",
"matplotlib.pyplot.ylabel",
"skdim.id.MOM",
"scipy.stats.mstats.winsorize",
"numpy.arange",
"inspect.getmembers",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabel",
"skdim.id.DANCo",
"skdim.id.TwoNN",
"matplotlib.pyplot.scatter",
"skdim.datasets.swissRoll3Sph",
"mat... | [((2011, 2059), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Estimator"""', 'estimators'], {}), "('Estimator', estimators)\n", (2034, 2059), False, 'import pytest\n'), ((1785, 1804), 'numpy.zeros', 'np.zeros', (['(100, 10)'], {}), '((100, 10))\n', (1793, 1804), True, 'import numpy as np\n'), ((1820, 1882), 'skdim.datasets.hyperBall', 'skdim.datasets.hyperBall', ([], {'n': '(100)', 'd': '(5)', 'radius': '(1)', 'random_state': '(0)'}), '(n=100, d=5, radius=1, random_state=0)\n', (1844, 1882), False, 'import skdim\n'), ((3485, 3503), 'skdim.id.MiND_ML', 'skdim.id.MiND_ML', ([], {}), '()\n', (3501, 3503), False, 'import skdim\n'), ((3512, 3539), 'skdim.id.MiND_ML', 'skdim.id.MiND_ML', ([], {'ver': '"""MLi"""'}), "(ver='MLi')\n", (3528, 3539), False, 'import skdim\n'), ((3548, 3575), 'skdim.id.MiND_ML', 'skdim.id.MiND_ML', ([], {'ver': '"""ML1"""'}), "(ver='ML1')\n", (3564, 3575), False, 'import skdim\n'), ((3584, 3605), 'skdim.id.MiND_ML', 'skdim.id.MiND_ML', ([], {'D': '(5)'}), '(D=5)\n', (3600, 3605), False, 'import skdim\n'), ((3614, 3635), 'skdim.id.MiND_ML', 'skdim.id.MiND_ML', ([], {'k': '(5)'}), '(k=5)\n', (3630, 3635), False, 'import skdim\n'), ((3673, 3687), 'skdim.id.MOM', 'skdim.id.MOM', ([], {}), '()\n', (3685, 3687), False, 'import skdim\n'), ((5588, 5620), 'skdim.datasets.hyperBall', 'skdim.datasets.hyperBall', (['(100)', '(2)'], {}), '(100, 2)\n', (5612, 5620), False, 'import skdim\n'), ((5625, 5659), 'skdim.datasets.hyperSphere', 'skdim.datasets.hyperSphere', (['(100)', '(2)'], {}), '(100, 2)\n', (5651, 5659), False, 'import skdim\n'), ((5664, 5701), 'skdim.datasets.hyperTwinPeaks', 'skdim.datasets.hyperTwinPeaks', (['(100)', '(2)'], {}), '(100, 2)\n', (5693, 5701), False, 'import skdim\n'), ((5706, 5738), 'skdim.datasets.lineDiskBall', 'skdim.datasets.lineDiskBall', (['(100)'], {}), '(100)\n', (5733, 5738), False, 'import skdim\n'), ((5743, 5781), 'skdim.datasets.swissRoll3Sph', 'skdim.datasets.swissRoll3Sph', (['(100)', '(100)'], {}), '(100, 100)\n', (5771, 5781), False, 'import skdim\n'), ((6394, 6413), 'numpy.zeros', 'np.zeros', (['(6, 2000)'], {}), '((6, 2000))\n', (6402, 6413), True, 'import numpy as np\n'), ((6710, 6768), 'scipy.stats.mstats.winsorize', 'winsorize', (['ns'], {'limits': '(0.01, 0.01)', 'inclusive': '(True, True)'}), '(ns, limits=(0.01, 0.01), inclusive=(True, True))\n', (6719, 6768), False, 'from scipy.stats.mstats import winsorize\n'), ((6773, 6800), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (6783, 6800), True, 'import matplotlib.pyplot as plt\n'), ((6805, 6841), 'matplotlib.pyplot.scatter', 'plt.scatter', (['u[:, 0]', 'u[:, 1]'], {'c': 'nsw'}), '(u[:, 0], u[:, 1], c=nsw)\n', (6816, 6841), True, 'import matplotlib.pyplot as plt\n'), ((6916, 6930), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6928, 6930), True, 'import matplotlib.pyplot as plt\n'), ((6935, 6952), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (6943, 6952), True, 'import matplotlib.pyplot as plt\n'), ((6957, 6990), 'matplotlib.pyplot.title', 'plt.title', (['"""PCA on original data"""'], {}), "('PCA on original data')\n", (6966, 6990), True, 'import matplotlib.pyplot as plt\n'), ((6995, 7012), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""PC1"""'], {}), "('PC1')\n", (7005, 7012), True, 'import matplotlib.pyplot as plt\n'), ((7017, 7034), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PC2"""'], {}), "('PC2')\n", (7027, 7034), True, 'import matplotlib.pyplot as plt\n'), ((7039, 7049), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7047, 7049), True, 'import matplotlib.pyplot as plt\n'), ((1969, 1989), 'inspect.getmembers', 'getmembers', (['skdim.id'], {}), '(skdim.id)\n', (1979, 1989), False, 'from inspect import getmembers, isclass\n'), ((1993, 2006), 'inspect.isclass', 'isclass', (['o[1]'], {}), '(o[1])\n', (2000, 2006), False, 'from inspect import getmembers, isclass\n'), ((5420, 5436), 'skdim.id.TwoNN', 'skdim.id.TwoNN', ([], {}), '()\n', (5434, 5436), False, 'import skdim\n'), ((5486, 5502), 'skdim.id.TwoNN', 'skdim.id.TwoNN', ([], {}), '()\n', (5500, 5502), False, 'import skdim\n'), ((6204, 6271), 'skdim.datasets.hyperBall', 'skdim.datasets.hyperBall', ([], {'n': '(1000)', 'd': '(3)', 'radius': '(0.5)', 'center': '[0, 0, 0]'}), '(n=1000, d=3, radius=0.5, center=[0, 0, 0])\n', (6228, 6271), False, 'import skdim\n'), ((6286, 6362), 'skdim.datasets.hyperBall', 'skdim.datasets.hyperBall', ([], {'n': '(1000)', 'd': '(6)', 'radius': '(0.5)', 'center': '[1, 0, 0, 0, 0, 0]'}), '(n=1000, d=6, radius=0.5, center=[1, 0, 0, 0, 0, 0])\n', (6310, 6362), False, 'import skdim\n'), ((2215, 2229), 'skdim.id.ESS', 'skdim.id.ESS', ([], {}), '()\n', (2227, 2229), False, 'import skdim\n'), ((2248, 2269), 'skdim.id.ESS', 'skdim.id.ESS', ([], {'ver': '"""b"""'}), "(ver='b')\n", (2260, 2269), False, 'import skdim\n'), ((2288, 2305), 'skdim.id.ESS', 'skdim.id.ESS', ([], {'d': '(2)'}), '(d=2)\n', (2300, 2305), False, 'import skdim\n'), ((2324, 2338), 'skdim.id.ESS', 'skdim.id.ESS', ([], {}), '()\n', (2336, 2338), False, 'import skdim\n'), ((2523, 2537), 'skdim.id.ESS', 'skdim.id.ESS', ([], {}), '()\n', (2535, 2537), False, 'import skdim\n'), ((2566, 2580), 'skdim.id.ESS', 'skdim.id.ESS', ([], {}), '()\n', (2578, 2580), False, 'import skdim\n'), ((2612, 2626), 'skdim.id.ESS', 'skdim.id.ESS', ([], {}), '()\n', (2624, 2626), False, 'import skdim\n'), ((2767, 2785), 'skdim.id.FisherS', 'skdim.id.FisherS', ([], {}), '()\n', (2783, 2785), False, 'import skdim\n'), ((2804, 2842), 'skdim.id.FisherS', 'skdim.id.FisherS', ([], {'conditional_number': '(2)'}), '(conditional_number=2)\n', (2820, 2842), False, 'import skdim\n'), ((2861, 2897), 'skdim.id.FisherS', 'skdim.id.FisherS', ([], {'produce_plots': '(True)'}), '(produce_plots=True)\n', (2877, 2897), False, 'import skdim\n'), ((2916, 2957), 'skdim.id.FisherS', 'skdim.id.FisherS', ([], {'project_on_sphere': '(False)'}), '(project_on_sphere=False)\n', (2932, 2957), False, 'import skdim\n'), ((2976, 3006), 'skdim.id.FisherS', 'skdim.id.FisherS', ([], {'verbose': '(True)'}), '(verbose=True)\n', (2992, 3006), False, 'import skdim\n'), ((3025, 3060), 'skdim.id.FisherS', 'skdim.id.FisherS', ([], {'limit_maxdim': '(True)'}), '(limit_maxdim=True)\n', (3041, 3060), False, 'import skdim\n'), ((3339, 3357), 'skdim.id.FisherS', 'skdim.id.FisherS', ([], {}), '()\n', (3355, 3357), False, 'import skdim\n'), ((3386, 3404), 'skdim.id.FisherS', 'skdim.id.FisherS', ([], {}), '()\n', (3402, 3404), False, 'import skdim\n'), ((3726, 3741), 'skdim.id.lPCA', 'skdim.id.lPCA', ([], {}), '()\n', (3739, 3741), False, 'import skdim\n'), ((3760, 3784), 'skdim.id.lPCA', 'skdim.id.lPCA', ([], {'ver': '"""Fan"""'}), "(ver='Fan')\n", (3773, 3784), False, 'import skdim\n'), ((3803, 3829), 'skdim.id.lPCA', 'skdim.id.lPCA', ([], {'ver': '"""ratio"""'}), "(ver='ratio')\n", (3816, 3829), False, 'import skdim\n'), ((3848, 3875), 'skdim.id.lPCA', 'skdim.id.lPCA', ([], {'ver': '"""maxgap"""'}), "(ver='maxgap')\n", (3861, 3875), False, 'import skdim\n'), ((3894, 3921), 'skdim.id.lPCA', 'skdim.id.lPCA', ([], {'ver': '"""Kaiser"""'}), "(ver='Kaiser')\n", (3907, 3921), False, 'import skdim\n'), ((3940, 3973), 'skdim.id.lPCA', 'skdim.id.lPCA', ([], {'ver': '"""broken_stick"""'}), "(ver='broken_stick')\n", (3953, 3973), False, 'import skdim\n'), ((3992, 4032), 'skdim.id.lPCA', 'skdim.id.lPCA', ([], {'ver': '"""participation_ratio"""'}), "(ver='participation_ratio')\n", (4005, 4032), False, 'import skdim\n'), ((4080, 4094), 'skdim.id.TLE', 'skdim.id.TLE', ([], {}), '()\n', (4092, 4094), False, 'import skdim\n'), ((4113, 4139), 'skdim.id.TLE', 'skdim.id.TLE', ([], {'epsilon': '(0.01)'}), '(epsilon=0.01)\n', (4125, 4139), False, 'import skdim\n'), ((4191, 4209), 'skdim.id.CorrInt', 'skdim.id.CorrInt', ([], {}), '()\n', (4207, 4209), False, 'import skdim\n'), ((4228, 4257), 'skdim.id.CorrInt', 'skdim.id.CorrInt', ([], {'k1': '(5)', 'k2': '(15)'}), '(k1=5, k2=15)\n', (4244, 4257), False, 'import skdim\n'), ((4307, 4323), 'skdim.id.DANCo', 'skdim.id.DANCo', ([], {}), '()\n', (4321, 4323), False, 'import skdim\n'), ((4342, 4371), 'skdim.id.DANCo', 'skdim.id.DANCo', ([], {'fractal': '(False)'}), '(fractal=False)\n', (4356, 4371), False, 'import skdim\n'), ((4390, 4409), 'skdim.id.DANCo', 'skdim.id.DANCo', ([], {'D': '(5)'}), '(D=5)\n', (4404, 4409), False, 'import skdim\n'), ((4428, 4447), 'skdim.id.DANCo', 'skdim.id.DANCo', ([], {'k': '(5)'}), '(k=5)\n', (4442, 4447), False, 'import skdim\n'), ((4466, 4496), 'skdim.id.DANCo', 'skdim.id.DANCo', ([], {'ver': '"""MIND_MLk"""'}), "(ver='MIND_MLk')\n", (4480, 4496), False, 'import skdim\n'), ((4515, 4545), 'skdim.id.DANCo', 'skdim.id.DANCo', ([], {'ver': '"""MIND_MLi"""'}), "(ver='MIND_MLi')\n", (4529, 4545), False, 'import skdim\n'), ((4593, 4607), 'skdim.id.KNN', 'skdim.id.KNN', ([], {}), '()\n', (4605, 4607), False, 'import skdim\n'), ((4626, 4643), 'skdim.id.KNN', 'skdim.id.KNN', ([], {'k': '(5)'}), '(k=5)\n', (4638, 4643), False, 'import skdim\n'), ((4715, 4732), 'skdim.id.KNN', 'skdim.id.KNN', ([], {'M': '(2)'}), '(M=2)\n', (4727, 4732), False, 'import skdim\n'), ((4751, 4772), 'skdim.id.KNN', 'skdim.id.KNN', ([], {'gamma': '(3)'}), '(gamma=3)\n', (4763, 4772), False, 'import skdim\n'), ((4821, 4836), 'skdim.id.MADA', 'skdim.id.MADA', ([], {}), '()\n', (4834, 4836), False, 'import skdim\n'), ((4884, 4898), 'skdim.id.MLE', 'skdim.id.MLE', ([], {}), '()\n', (4896, 4898), False, 'import skdim\n'), ((4917, 4969), 'skdim.id.MLE', 'skdim.id.MLE', ([], {'n': '(20)', 'sigma': '(0.1)', 'dnoise': '"""dnoiseGaussH"""'}), "(n=20, sigma=0.1, dnoise='dnoiseGaussH')\n", (4929, 4969), False, 'import skdim\n'), ((4988, 5015), 'skdim.id.MLE', 'skdim.id.MLE', ([], {'unbiased': '(True)'}), '(unbiased=True)\n', (5000, 5015), False, 'import skdim\n'), ((5034, 5078), 'skdim.id.MLE', 'skdim.id.MLE', ([], {'K': '(10)', 'neighborhood_based': '(False)'}), '(K=10, neighborhood_based=False)\n', (5046, 5078), False, 'import skdim\n'), ((5266, 5282), 'skdim.id.TwoNN', 'skdim.id.TwoNN', ([], {}), '()\n', (5280, 5282), False, 'import skdim\n'), ((5310, 5347), 'skdim.id.TwoNN', 'skdim.id.TwoNN', ([], {'discard_fraction': '(0.05)'}), '(discard_fraction=0.05)\n', (5324, 5347), False, 'import skdim\n'), ((5786, 5841), 'skdim.datasets.BenchmarkManifolds', 'skdim.datasets.BenchmarkManifolds', ([], {'noise_type': '"""uniform"""'}), "(noise_type='uniform')\n", (5819, 5841), False, 'import skdim\n'), ((5862, 5916), 'skdim.datasets.BenchmarkManifolds', 'skdim.datasets.BenchmarkManifolds', ([], {'noise_type': '"""normal"""'}), "(noise_type='normal')\n", (5895, 5916), False, 'import skdim\n'), ((6506, 6511), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (6509, 6511), False, 'from sklearn.decomposition import PCA\n'), ((6543, 6585), 'skdim.id.FisherS', 'skdim.id.FisherS', ([], {'conditional_number': '(10000)'}), '(conditional_number=10000)\n', (6559, 6585), False, 'import skdim\n'), ((2430, 2444), 'skdim.id.ESS', 'skdim.id.ESS', ([], {}), '()\n', (2442, 2444), False, 'import skdim\n'), ((2475, 2489), 'skdim.id.ESS', 'skdim.id.ESS', ([], {}), '()\n', (2487, 2489), False, 'import skdim\n'), ((3079, 3097), 'skdim.id.FisherS', 'skdim.id.FisherS', ([], {}), '()\n', (3095, 3097), False, 'import skdim\n'), ((3219, 3237), 'skdim.id.FisherS', 'skdim.id.FisherS', ([], {}), '()\n', (3235, 3237), False, 'import skdim\n'), ((3268, 3286), 'skdim.id.FisherS', 'skdim.id.FisherS', ([], {}), '()\n', (3284, 3286), False, 'import skdim\n'), ((4678, 4695), 'numpy.arange', 'np.arange', (['(30)', '(32)'], {}), '(30, 32)\n', (4687, 4695), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Functions for generating QC images and an HTML report for outputs from the
ANTs longitudinal cortical thickness pipeline
"""
import argparse
import os
import pathlib
import re
import jinja2
import nibabel as nib
from niworkflows.viz.utils import (compose_view, plot_segs,
plot_registration, cuts_from_bbox)
import numpy as np
def make_brain(*, anatomical, mask, out_file):
"""
Creates `out_file`.svg of brain `mask` on input `anatomical`
Parameters
----------
anatomical : str
Path to anatomical T1w image (*with skull*)
mask : str
Path to brain mask file
out_file : str
Path to where svg will be saved
Returns
-------
out_file : str
Where svg was saved
"""
if not out_file.endswith('.svg'):
out_file += '.svg'
compose_view(
plot_segs(image_nii=anatomical,
seg_niis=[mask],
bbox_nii=mask,
out_file='reports.svg',
masked=False,
compress='auto'),
fg_svgs=None,
out_file=out_file
)
return out_file
def make_segmentation(*, anatomical, segmentation, mask, out_file):
"""
Creates `out_file`.svg of `segmentation` countours on input `anatomical`
Parameters
----------
anatomical : str
Path to anatomical T1w image (*without skull*)
segmentation : str
Path to segmentation file with tissue types (1-6)
mask : str
Path to brain mask file
out_file : str
Path to where svg will be saved
Returns
-------
out_file : str
Where svg was saved
"""
if not out_file.endswith('.svg'):
out_file += '.svg'
segs = segmentation_to_files(segmentation)
compose_view(
plot_segs(image_nii=anatomical,
seg_niis=[mask] + segs,
bbox_nii=mask,
out_file='reports.svg',
masked=False,
compress='auto'),
fg_svgs=None,
out_file=out_file
)
for fname in segs:
os.remove(fname)
return out_file
def make_registration(*, moving, fixed, mask, out_file):
"""
Creates `out_file`.svg of registration between `moving` and `fixed`
Parameters
----------
moving : str
Path to file that was registered to `fixed`
fixed : str
Path to file that `moving` was registered to
mask : str
Path to brain mask file
out_file : str
Path to where svg will be saved
Returns
-------
out_file : str
Where svg was saved
"""
if not out_file.endswith('.svg'):
out_file += '.svg'
cuts = cuts_from_bbox(nib.load(mask), cuts=7)
compose_view(
plot_registration(nib.load(fixed),
'fixed-image',
estimate_brightness=True,
cuts=cuts,
label='fixed'),
plot_registration(nib.load(moving),
'moving-image',
estimate_brightness=True,
cuts=cuts,
label='moving'),
out_file=out_file
)
return out_file
def segmentation_to_files(segmentation, types=[2, 4]):
"""
Converts single `segmentation` into multiple files
Output of ANTs pipeline is a single segmentation file with 6 tissue types
(1: CSF, 2: Cortical GM, 3: WM, 4: Deep GM, 5: Brainstem, 6: Cerebellum).
`plot_segs` requires a *list* of files, so this splits up the input file
into individual files corresponding separately to each tissue type.
Parameters
----------
segmentation : str
Path to segmentation file with tissue types (1-6)
types : list, optional
Which tissue types to extract. Default: [2, 4]
"""
for lab in types:
if lab not in range(1, 7):
raise ValueError('`types` must only include numbers [1-6]')
img = nib.load(segmentation)
labels = img.get_data()
out_files = []
for lab in types:
out_fname = segmentation.replace('.nii.gz', f'_seg{lab}.nii.gz')
seg = np.zeros_like(labels)
seg[labels == lab] = lab
img.__class__(seg, img.affine, img.header).to_filename(out_fname)
out_files += [out_fname]
return out_files
def make_sst(sst_dir, temp_dir, out_dir):
"""
Parameters
----------
sst_dir : pathlib.Path
temp_dir : pathlib.Path
out_dir : pathlib.Path
"""
T1w = sst_dir / 'T_template0.nii.gz'
T1w_mask = sst_dir / 'T_templateBrainExtractionMask.nii.gz'
T1w_seg = sst_dir / 'T_templateBrainSegmentation.nii.gz'
T1w_to_MNI = sst_dir / 'T_templateBrainNormalizedToTemplate.nii.gz'
MNI_brain = temp_dir / 'template_brain.nii.gz'
MNI_mask = temp_dir / 'template_brain_mask.nii.gz'
seg = make_segmentation(anatomical=T1w.as_posix(),
segmentation=T1w_seg.as_posix(),
mask=T1w_mask.as_posix(),
out_file=(out_dir / 'sst_seg.svg').as_posix())
reg = make_registration(moving=T1w_to_MNI.as_posix(),
fixed=MNI_brain.as_posix(),
mask=MNI_mask.as_posix(),
out_file=(out_dir / 'sst_reg.svg').as_posix())
return seg, reg
def make_visit(visit_dir, sst_dir, out_dir):
"""
visit_dir : pathlib.Path
sst_dir : pathlib.Path
out_dir : pathlib.Path
"""
base = '_'.join(visit_dir.name.split('_')[:-1])
ses = re.search(r'ses-(\d+)', base).group()
T1w = visit_dir / '..' / 'coreg' / f'{base}.nii.gz'
T1w_mask = visit_dir / f'{base}BrainExtractionMask.nii.gz'
T1w_seg = visit_dir / f'{base}BrainSegmentation.nii.gz'
T1w_to_SST = visit_dir / f'{base}BrainNormalizedToTemplate.nii.gz'
SST_brain = sst_dir / 'T_templateBrainExtractionBrain.nii.gz'
SST_mask = sst_dir / 'T_templateBrainExtractionMask.nii.gz'
seg = make_segmentation(anatomical=T1w.as_posix(),
segmentation=T1w_seg.as_posix(),
mask=T1w_mask.as_posix(),
out_file=(out_dir / f'{ses}_seg.svg').as_posix())
reg = make_registration(moving=T1w_to_SST.as_posix(),
fixed=SST_brain.as_posix(),
mask=SST_mask.as_posix(),
out_file=(out_dir / f'{ses}_reg.svg').as_posix())
return seg, reg
def prep_for_jinja(images):
"""
Prepares svg `images` for jinja rendering
Parameters
----------
images : list-of-str
Returns
-------
outputs : list-of-tuple
"""
outputs = []
for im in images:
with open(im, 'r') as src:
content = src.read()
outputs.append((im, content))
return outputs
def main():
"""
Generates reports from outputs of ANTs pipeline
"""
parser = argparse.ArgumentParser(description='Create visual reports')
parser.add_argument('-s', '--subj_dir', dest='subj_dir',
required=True,
type=pathlib.Path,
help='Subject output directory')
parser.add_argument('-t', '--temp_dir', dest='temp_dir',
required=True,
type=pathlib.Path,
help='Template directory used by ANTs')
parser.add_argument('-o', '--out_dir', dest='out_dir',
required=False,
default=argparse.SUPPRESS,
type=pathlib.Path,
help='Where report should be saved')
options = vars(parser.parse_args())
sub, subj_dir = options['subj_dir'].name, options['subj_dir']
temp_dir = options['temp_dir'].resolve()
fig_dir = subj_dir.resolve() / 'figures'
out_dir = options.get('out_dir', subj_dir.parent).resolve() / 'reports'
fig_dir.mkdir(parents=True, exist_ok=True)
# first let's make the SST brain mask, segmentation, registration to MNI
sst_dir = (subj_dir / f'{sub}_CTSingleSubjectTemplate').resolve()
sst_seg, sst_reg = make_sst(sst_dir, temp_dir, fig_dir)
images = [sst_seg, sst_reg]
# now let's make the individual visits
for v in sorted([f for f in subj_dir.glob(f'{sub}*T1w_*') if f.is_dir()]):
v_seg, v_reg = make_visit(v.resolve(), sst_dir, fig_dir)
images.extend([v_seg, v_reg])
# prepare the images to be put into jinja template
images = prep_for_jinja(images)
# finally, grab the ANTS command to add to the report
antsfp = subj_dir / f'{sub}_antscommand.txt'
if antsfp.exists():
with open(antsfp, 'r') as src:
antscmd = src.read()
# do a little formatting to get the command to print with line breaks
add = '<br> {0}'
for to_split in ['-d', '-e', '-p', '-f', '-g', '-a', '-o']:
antscmd = add.format(to_split).join(antscmd.split(f' {to_split}'))
add = add.format('/')
antscmd = ('z' + add).join(antscmd.split('z /'))
antscmd = ('_CT' + add).join(antscmd.split('_CT /'))
else:
antscmd = ''
# render the html with jinja
env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath='/opt'),
trim_blocks=True,
lstrip_blocks=True)
report_tpl = env.get_template('report.tpl')
report_render = report_tpl.render(images=images, antscmd=[antscmd])
out_dir.mkdir(parents=True, exist_ok=True)
with open(out_dir / f'{sub}.html', 'w') as fp:
fp.write(report_render)
if __name__ == '__main__':
main()
| [
"re.search",
"argparse.ArgumentParser",
"nibabel.load",
"jinja2.FileSystemLoader",
"niworkflows.viz.utils.plot_segs",
"numpy.zeros_like",
"os.remove"
] | [((4075, 4097), 'nibabel.load', 'nib.load', (['segmentation'], {}), '(segmentation)\n', (4083, 4097), True, 'import nibabel as nib\n'), ((7073, 7133), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create visual reports"""'}), "(description='Create visual reports')\n", (7096, 7133), False, 'import argparse\n'), ((895, 1018), 'niworkflows.viz.utils.plot_segs', 'plot_segs', ([], {'image_nii': 'anatomical', 'seg_niis': '[mask]', 'bbox_nii': 'mask', 'out_file': '"""reports.svg"""', 'masked': '(False)', 'compress': '"""auto"""'}), "(image_nii=anatomical, seg_niis=[mask], bbox_nii=mask, out_file=\n 'reports.svg', masked=False, compress='auto')\n", (904, 1018), False, 'from niworkflows.viz.utils import compose_view, plot_segs, plot_registration, cuts_from_bbox\n'), ((1850, 1979), 'niworkflows.viz.utils.plot_segs', 'plot_segs', ([], {'image_nii': 'anatomical', 'seg_niis': '([mask] + segs)', 'bbox_nii': 'mask', 'out_file': '"""reports.svg"""', 'masked': '(False)', 'compress': '"""auto"""'}), "(image_nii=anatomical, seg_niis=[mask] + segs, bbox_nii=mask,\n out_file='reports.svg', masked=False, compress='auto')\n", (1859, 1979), False, 'from niworkflows.viz.utils import compose_view, plot_segs, plot_registration, cuts_from_bbox\n'), ((2153, 2169), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (2162, 2169), False, 'import os\n'), ((2777, 2791), 'nibabel.load', 'nib.load', (['mask'], {}), '(mask)\n', (2785, 2791), True, 'import nibabel as nib\n'), ((4255, 4276), 'numpy.zeros_like', 'np.zeros_like', (['labels'], {}), '(labels)\n', (4268, 4276), True, 'import numpy as np\n'), ((2846, 2861), 'nibabel.load', 'nib.load', (['fixed'], {}), '(fixed)\n', (2854, 2861), True, 'import nibabel as nib\n'), ((3061, 3077), 'nibabel.load', 'nib.load', (['moving'], {}), '(moving)\n', (3069, 3077), True, 'import nibabel as nib\n'), ((5675, 5704), 're.search', 're.search', (['"""ses-(\\\\d+)"""', 'base'], {}), "('ses-(\\\\d+)', base)\n", (5684, 5704), False, 'import re\n'), ((9400, 9442), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', ([], {'searchpath': '"""/opt"""'}), "(searchpath='/opt')\n", (9423, 9442), False, 'import jinja2\n')] |
import numpy as np
from scipy.stats import chi2
def chi_square(Ns, Ni):
""" Return $\chi^2_{\text stat}$ value and the associate p-value"""
NsNi = np.ndarray((len(Ns), 2))
NsNi[:, 0] = Ns
NsNi[:, 1] = Ni
length = len(Ns)
Ns = sum(Ns)
Ni = sum(Ni)
X2 = 0.
for Nsj, Nij in NsNi:
X2 += (Nsj*Ni - Nij*Ns)**2 / (Nsj + Nij)
X2 *= 1.0/(Ns*Ni)
rv = chi2(length - 1)
return 1.0 - rv.cdf(X2)
def calculate_central_age(Ns, Ni, zeta, seZeta, rhod, rhod_err, sigma=0.15):
"""Function to calculate central age."""
Ns = np.array(Ns)
Ni = np.array(Ni)
# Calculate mj
LAMBDA = 1.55125e-4
G = 0.5
m = Ns + Ni
p = Ns / m
theta = np.sum(Ns) / np.sum(m)
for i in range(0, 30):
w = m / (theta * (1 - theta) + (m - 1) * theta**2 * (1 - theta)**2 * sigma**2)
sigma = sigma * np.sqrt(np.sum(w**2 * (p - theta)**2) / np.sum(w))
theta = np.sum(w * p) / np.sum(w)
t = (1.0 / LAMBDA) * np.log( 1.0 + G * LAMBDA * zeta * rhod * (theta) / (1.0 - theta))
se = t * (1 / (theta**2 * (1.0 - theta)**2 * np.sum(w)) + (rhod_err / rhod)**2 + (seZeta / zeta)**2)**0.5
return {"Central": t, "se": se, "sigma": sigma}
def calculate_pooled_age(Ns, Ni, zeta, seZeta, rhod, rhod_err):
Ns = np.sum(Ns)
Ni = np.sum(Ni)
LAMBDA = 1.55125e-4
G = 0.5
t = 1.0 / LAMBDA * np.log(1.0 + G * LAMBDA * zeta * rhod * Ns / Ni)
se = t * (1.0 / Ns + 1.0 / Ni + (rhod_err / rhod)**2 + (seZeta / zeta)**2)**0.5
return {"Pooled Age": t, "se": se}
def calculate_ages(Ns, Ni, zeta, seZeta, rhod, rhod_err):
Ns = np.array(Ns)
Ni = np.array(Ni)
# Calculate mj
LAMBDA = 1.55125e-4
G = 0.5
t = 1.0 / LAMBDA * np.log(1.0 + G * LAMBDA * zeta * rhod * Ns / Ni)
se = t * (1.0 / Ns + 1.0 / Ni + (rhod_err / rhod)**2 + (seZeta / zeta)**2)**0.5
return {"Age(s)": t, "se(s)": se}
| [
"numpy.log",
"numpy.array",
"scipy.stats.chi2",
"numpy.sum"
] | [((406, 422), 'scipy.stats.chi2', 'chi2', (['(length - 1)'], {}), '(length - 1)\n', (410, 422), False, 'from scipy.stats import chi2\n'), ((584, 596), 'numpy.array', 'np.array', (['Ns'], {}), '(Ns)\n', (592, 596), True, 'import numpy as np\n'), ((606, 618), 'numpy.array', 'np.array', (['Ni'], {}), '(Ni)\n', (614, 618), True, 'import numpy as np\n'), ((1304, 1314), 'numpy.sum', 'np.sum', (['Ns'], {}), '(Ns)\n', (1310, 1314), True, 'import numpy as np\n'), ((1324, 1334), 'numpy.sum', 'np.sum', (['Ni'], {}), '(Ni)\n', (1330, 1334), True, 'import numpy as np\n'), ((1637, 1649), 'numpy.array', 'np.array', (['Ns'], {}), '(Ns)\n', (1645, 1649), True, 'import numpy as np\n'), ((1659, 1671), 'numpy.array', 'np.array', (['Ni'], {}), '(Ni)\n', (1667, 1671), True, 'import numpy as np\n'), ((719, 729), 'numpy.sum', 'np.sum', (['Ns'], {}), '(Ns)\n', (725, 729), True, 'import numpy as np\n'), ((732, 741), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (738, 741), True, 'import numpy as np\n'), ((1000, 1062), 'numpy.log', 'np.log', (['(1.0 + G * LAMBDA * zeta * rhod * theta / (1.0 - theta))'], {}), '(1.0 + G * LAMBDA * zeta * rhod * theta / (1.0 - theta))\n', (1006, 1062), True, 'import numpy as np\n'), ((1395, 1443), 'numpy.log', 'np.log', (['(1.0 + G * LAMBDA * zeta * rhod * Ns / Ni)'], {}), '(1.0 + G * LAMBDA * zeta * rhod * Ns / Ni)\n', (1401, 1443), True, 'import numpy as np\n'), ((1751, 1799), 'numpy.log', 'np.log', (['(1.0 + G * LAMBDA * zeta * rhod * Ns / Ni)'], {}), '(1.0 + G * LAMBDA * zeta * rhod * Ns / Ni)\n', (1757, 1799), True, 'import numpy as np\n'), ((948, 961), 'numpy.sum', 'np.sum', (['(w * p)'], {}), '(w * p)\n', (954, 961), True, 'import numpy as np\n'), ((964, 973), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (970, 973), True, 'import numpy as np\n'), ((889, 922), 'numpy.sum', 'np.sum', (['(w ** 2 * (p - theta) ** 2)'], {}), '(w ** 2 * (p - theta) ** 2)\n', (895, 922), True, 'import numpy as np\n'), ((921, 930), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (927, 930), True, 'import numpy as np\n'), ((1115, 1124), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (1121, 1124), True, 'import numpy as np\n')] |
import os
from tqdm import tqdm, trange
from torch.utils.data import Dataset
import random
from io import open
import pickle
from pathlib import Path
import multiprocessing
from multiprocessing import Pool
import time
import torch
import logging
logger = logging.getLogger(__name__)
import numpy as np
import shutil
_CURPATH = Path.cwd()
_TMPDIR = _CURPATH / "squad_data"
_TRAINDIR = _TMPDIR / "squad_train"
_TESTDIR = _TMPDIR / "squad_test"
_TESTFILE = "dev-v2.0.json"
_DATADIR = _CURPATH / "squad_data"
_TRAINFILE = "train-v2.0.json"
_URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/" + _TRAINFILE
_DEV_URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/" + _TESTFILE
_MODELS = _CURPATH / "models"
# wiki train
_WIKIFOLD = "wikitext-103-raw"
_WIKIFOLDER = _CURPATH / _WIKIFOLD
_TRAINDIRWIKI = _WIKIFOLDER / "wiki_train"
_DEVDIRWIKI = _WIKIFOLDER / "wiki_dev"
_WIKIZIPFILE = "wikitext-103-raw-v1.zip"
_WIKIURL = "https://s3.amazonaws.com/research.metamind.io/wikitext/" + _WIKIZIPFILE
_WIKITRAINTOKENS = "wiki.train"
_WIKIDEVTOKENS = "wiki.valid"
def unzip(filename, targetdir):
import zipfile
with zipfile.ZipFile(filename,"r") as zip_ref:
zip_ref.extractall(targetdir)
def prepare_wikitext(zipfile, wikifolder, traindirwiki, devdirwiki, wikitraintokens, wikidevtokens, wikifold):
print("unzipping wikifiles")
unzip(wikifolder / zipfile, wikifolder)
if not os.path.exists(traindirwiki):
os.makedirs(traindirwiki)
if not os.path.exists(devdirwiki):
os.makedirs(devdirwiki)
os.rename(wikifolder / wikifold/ wikitraintokens, traindirwiki / wikitraintokens )
os.rename(wikifolder / wikifold / wikidevtokens, devdirwiki / wikidevtokens )
print("finished moving wikifolder")
def chunk(l, n):
length = len(l) / n
assert (length % 1 == 0)
length = int(length)
for i in range(n):
yield l[i * length: (i+1) * length]
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = len(data) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
del data[nbatch * bsz:]
assert (len(data) == nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = list(chunk(data, bsz))
# data = data.view(bsz, -1).t().contiguous()
return data
def main():
from tokenization import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case= True)
abc = WikitextTrain(_TRAINDIRWIKI, tokenizer, seq_len = 128 , rebuild = True)
class WikitextTrain(Dataset):
def __init__(self, corpus_path, tokenizer, seq_len, encoding="utf-8", corpus_lines=None, rebuild=True, short_factor = 1, batch_size = 2, variable_seq=True):
self.vocab = tokenizer.vocab
self.tokenizer = tokenizer
self.tokenizer.max_len = 10000
self.seq_len = seq_len
self.corpus_lines = corpus_lines # number of non-empty lines in input corpus
self.corpus_path = corpus_path
self.encoding = encoding
self.pos = 0 # to avoid random sentence from same doc
self.batch_size = batch_size
self.variable_seq = variable_seq
# for loading samples directly from file
self.sample_counter = 0 # used to keep track of full epochs on file
self.line_buffer = None # keep second sentence of a pair in memory and use as first sentence in next pair
# for loading samples in memory
self.current_random_doc = 0
self.num_docs = 0
self.short_factor = short_factor
if rebuild:
self.docs = []
skipcounter = 0
if os.path.exists(self.corpus_path / "build_docs.p"):
os.remove(self.corpus_path / "build_docs.p")
for files in os.listdir(self.corpus_path):
with open(self.corpus_path / files , "r", encoding=encoding) as f:
print("Calculating length of document")
corpus_lines = sum(1 for line in f)
print(corpus_lines)
counter = 0
f.seek(0)
for line in tqdm(f, desc="Tokenization", total=corpus_lines):
words = line.split()
for i ,word in enumerate(words):
if word == "@-@":
words[i] = "-"
if word == "@.@":
words[i] = "."
if word == "@,@":
words[i] = ","
words = " ".join(words)
split_tokens = self.tokenizer.tokenize(words)
tokens = self.tokenizer.convert_tokens_to_ids(split_tokens)
self.docs.extend(tokens)
counter += 1
if counter < 100:
print(split_tokens)
print(tokens)
# self.docs = torch.LongTensor(self.docs)
print("Tokenization of full corpus done")
print(f"Full number of tokens: {len(self.docs)}")
pickle.dump(self.docs, open(self.corpus_path / "build_docs.p", "wb"))
print("Saved Dataset with Pickle")
else:
self.docs = pickle.load( open(self.corpus_path / "build_docs.p", "rb"))
print("Loaded Dataset with Pickle")
self.docs = batchify(self.docs, batch_size)
# self.length = self.docs.size(1)
self.length = len(self.docs[0])
# def batchify(data, bsz):
# # Work out how cleanly we can divide the dataset into bsz parts.
# nbatch = data.size(0) // bsz
# # Trim off any extra elements that wouldn't cleanly fit (remainders).
# data = data.narrow(0, 0, nbatch * bsz)
# # Evenly divide the data across the bsz batches.
# data = data.view(bsz, -1).t().contiguous()
# return data
def get_batch(self):
i = self.pos
# Prevent excessively small or negative sequence lengths
if self.variable_seq:
seq_len = max(int(self.seq_len *0.5), int(np.random.normal(self.seq_len, self.seq_len *0.2)))
prob = random.random()
if prob > 0.97: seq_len = seq_len // 2
seq_len = min(self.seq_len - 2, seq_len)
else:
seq_len = self.seq_len - 2
data = [ doc[i:i+seq_len] for doc in self.docs]
self.pos += seq_len
cur_features = convert_example_to_features(data, self.sample_counter, self.seq_len, self.tokenizer)
self.sample_counter += 1
cur_tensors = (cur_features.input_ids,
cur_features.input_mask,
cur_features.segment_ids,
cur_features.lm_label_ids,
)
return [], cur_tensors
def __len__(self):
# last line of doc won't be used, because there's no "nextSentence". Additionally, we start counting at 0.
return len(self.docs[0])
class InputExample(object):
"""A single training/test example for the language model."""
def __init__(self, guid, tokens_a, tokens_b=None, is_next=None, lm_labels=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
tokens_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
tokens_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.tokens_a = tokens_a
self.tokens_b = tokens_b
self.is_next = is_next # nextSentence
self.lm_labels = lm_labels # masked words for language model
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, lm_label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.lm_label_ids = lm_label_ids
def convert_example_to_features(batch, cur_time, max_seq_length, tokenizer):
"""
Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
IDs, LM labels, input_mask, CLS and SEP tokens etc.
:param example: InputExample, containing sentence input as strings and is_next label
:param max_seq_length: int, maximum length of sequence.
:param tokenizer: Tokenizer
:return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
"""
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
batch_size = len(batch)
input_ids_tensor = torch.zeros([batch_size, max_seq_length], dtype= torch.long)
input_mask_tensor = torch.zeros([batch_size, max_seq_length], dtype= torch.long)
segment_ids_tensor = torch.zeros([batch_size, max_seq_length], dtype= torch.long)
lm_label_ids_tensor = torch.zeros([batch_size, max_seq_length], dtype= torch.long)
for b, example in enumerate(batch):
masked_tokens, labels = random_word(example, tokenizer)
lm_label_ids = ([-1] + labels + [-1])
tokens = []
segment_ids = []
tokens.append(tokenizer.vocab["[CLS]"])
segment_ids.append(0)
for token in masked_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append(tokenizer.vocab["[SEP]"])
segment_ids.append(0)
input_ids = tokens
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
# print("input, segment, lmlabel")
# print(len(input_ids))
# print(len(segment_ids))
# print(len(lm_label_ids))
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(lm_label_ids) == max_seq_length
if cur_time < 5:
logger.info("*** Example ***")
logger.info("cur_time: %s" % (cur_time))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
input_ids_decoded = tokenizer.convert_ids_to_tokens(input_ids)
logger.info("input_ids decoded: %s" % " ".join([str(x) for x in input_ids_decoded]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("LM label: %s " % (lm_label_ids))
input_ids_tensor[b] = torch.FloatTensor(input_ids)
input_mask_tensor[b] = torch.FloatTensor(input_mask)
segment_ids_tensor[b] = torch.FloatTensor(segment_ids)
lm_label_ids_tensor[b] = torch.FloatTensor(lm_label_ids)
features = InputFeatures(input_ids=input_ids_tensor,
input_mask=input_mask_tensor,
segment_ids=segment_ids_tensor,
lm_label_ids=lm_label_ids_tensor,
)
return features
def random_word(tokens, tokenizer):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
output_label = []
# changed to always remove 15% of words
for i, token in enumerate(tokens):
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
tokens[i] = tokenizer.vocab["[MASK]"]
# append current token to output (we will predict these later)
try:
output_label.append(token)
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(tokenizer.vocab["[UNK]"])
logger.warning("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
return tokens, output_label
if __name__ == "__main__":
main() | [
"logging.getLogger",
"os.path.exists",
"numpy.random.normal",
"os.listdir",
"zipfile.ZipFile",
"os.makedirs",
"pathlib.Path.cwd",
"os.rename",
"tqdm.tqdm",
"torch.FloatTensor",
"io.open",
"tokenization.BertTokenizer.from_pretrained",
"random.random",
"torch.zeros",
"os.remove"
] | [((255, 282), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (272, 282), False, 'import logging\n'), ((329, 339), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (337, 339), False, 'from pathlib import Path\n'), ((1551, 1637), 'os.rename', 'os.rename', (['(wikifolder / wikifold / wikitraintokens)', '(traindirwiki / wikitraintokens)'], {}), '(wikifolder / wikifold / wikitraintokens, traindirwiki /\n wikitraintokens)\n', (1560, 1637), False, 'import os\n'), ((1638, 1714), 'os.rename', 'os.rename', (['(wikifolder / wikifold / wikidevtokens)', '(devdirwiki / wikidevtokens)'], {}), '(wikifolder / wikifold / wikidevtokens, devdirwiki / wikidevtokens)\n', (1647, 1714), False, 'import os\n'), ((2412, 2482), 'tokenization.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {'do_lower_case': '(True)'}), "('bert-base-uncased', do_lower_case=True)\n", (2441, 2482), False, 'from tokenization import BertTokenizer\n'), ((9224, 9283), 'torch.zeros', 'torch.zeros', (['[batch_size, max_seq_length]'], {'dtype': 'torch.long'}), '([batch_size, max_seq_length], dtype=torch.long)\n', (9235, 9283), False, 'import torch\n'), ((9309, 9368), 'torch.zeros', 'torch.zeros', (['[batch_size, max_seq_length]'], {'dtype': 'torch.long'}), '([batch_size, max_seq_length], dtype=torch.long)\n', (9320, 9368), False, 'import torch\n'), ((9395, 9454), 'torch.zeros', 'torch.zeros', (['[batch_size, max_seq_length]'], {'dtype': 'torch.long'}), '([batch_size, max_seq_length], dtype=torch.long)\n', (9406, 9454), False, 'import torch\n'), ((9482, 9541), 'torch.zeros', 'torch.zeros', (['[batch_size, max_seq_length]'], {'dtype': 'torch.long'}), '([batch_size, max_seq_length], dtype=torch.long)\n', (9493, 9541), False, 'import torch\n'), ((1132, 1162), 'zipfile.ZipFile', 'zipfile.ZipFile', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (1147, 1162), False, 'import zipfile\n'), ((1412, 1440), 'os.path.exists', 'os.path.exists', (['traindirwiki'], {}), '(traindirwiki)\n', (1426, 1440), False, 'import os\n'), ((1450, 1475), 'os.makedirs', 'os.makedirs', (['traindirwiki'], {}), '(traindirwiki)\n', (1461, 1475), False, 'import os\n'), ((1487, 1513), 'os.path.exists', 'os.path.exists', (['devdirwiki'], {}), '(devdirwiki)\n', (1501, 1513), False, 'import os\n'), ((1523, 1546), 'os.makedirs', 'os.makedirs', (['devdirwiki'], {}), '(devdirwiki)\n', (1534, 1546), False, 'import os\n'), ((11510, 11538), 'torch.FloatTensor', 'torch.FloatTensor', (['input_ids'], {}), '(input_ids)\n', (11527, 11538), False, 'import torch\n'), ((11570, 11599), 'torch.FloatTensor', 'torch.FloatTensor', (['input_mask'], {}), '(input_mask)\n', (11587, 11599), False, 'import torch\n'), ((11632, 11662), 'torch.FloatTensor', 'torch.FloatTensor', (['segment_ids'], {}), '(segment_ids)\n', (11649, 11662), False, 'import torch\n'), ((11696, 11727), 'torch.FloatTensor', 'torch.FloatTensor', (['lm_label_ids'], {}), '(lm_label_ids)\n', (11713, 11727), False, 'import torch\n'), ((12534, 12549), 'random.random', 'random.random', ([], {}), '()\n', (12547, 12549), False, 'import random\n'), ((3676, 3725), 'os.path.exists', 'os.path.exists', (["(self.corpus_path / 'build_docs.p')"], {}), "(self.corpus_path / 'build_docs.p')\n", (3690, 3725), False, 'import os\n'), ((3813, 3841), 'os.listdir', 'os.listdir', (['self.corpus_path'], {}), '(self.corpus_path)\n', (3823, 3841), False, 'import os\n'), ((6364, 6379), 'random.random', 'random.random', ([], {}), '()\n', (6377, 6379), False, 'import random\n'), ((3743, 3787), 'os.remove', 'os.remove', (["(self.corpus_path / 'build_docs.p')"], {}), "(self.corpus_path / 'build_docs.p')\n", (3752, 3787), False, 'import os\n'), ((5283, 5328), 'io.open', 'open', (["(self.corpus_path / 'build_docs.p')", '"""wb"""'], {}), "(self.corpus_path / 'build_docs.p', 'wb')\n", (5287, 5328), False, 'from io import open\n'), ((5437, 5482), 'io.open', 'open', (["(self.corpus_path / 'build_docs.p')", '"""rb"""'], {}), "(self.corpus_path / 'build_docs.p', 'rb')\n", (5441, 5482), False, 'from io import open\n'), ((3864, 3918), 'io.open', 'open', (['(self.corpus_path / files)', '"""r"""'], {'encoding': 'encoding'}), "(self.corpus_path / files, 'r', encoding=encoding)\n", (3868, 3918), False, 'from io import open\n'), ((4199, 4247), 'tqdm.tqdm', 'tqdm', (['f'], {'desc': '"""Tokenization"""', 'total': 'corpus_lines'}), "(f, desc='Tokenization', total=corpus_lines)\n", (4203, 4247), False, 'from tqdm import tqdm, trange\n'), ((6293, 6343), 'numpy.random.normal', 'np.random.normal', (['self.seq_len', '(self.seq_len * 0.2)'], {}), '(self.seq_len, self.seq_len * 0.2)\n', (6309, 6343), True, 'import numpy as np\n')] |
import numpy as np
from game.confs import Block_Type, Confs
def get_calc_state(
block_type: Block_Type,
tmpx: int,
tmpy: int,
tmprotate: int,
value_to_set: int = Confs.init_value.value,
):
"""
为了方便计算,根据俄罗斯方块的类型,以及假想的坐标,生成一个假想的空的 ndarray,
再将俄罗斯方块的信息填上去
Args:
block_type ([Block_Type]): [俄罗斯方块类型]
tmpx ([int]): [假想的 X 坐标]
tmpy ([int]): [假想的 Y 坐标]
tmprotate ([int]): [假想的旋转度数]
Returns:
[np.ndarray]: [返回一个上面多出一行,下面多出两行,左右都多出两列的空ndarray]
"""
# 新坐标
tmpx, tmpy = tmpx + 2, tmpy + 1
# 新矩阵
virtual_state = np.zeros(
(1 + Confs.row_count.value + 2, 2 + Confs.col_count.value + 2), np.ubyte
)
# 横条
if block_type == Block_Type.I:
if tmprotate == 0:
virtual_state[tmpy, tmpx - 1 : tmpx + 3] = value_to_set
if tmprotate == 90:
virtual_state[tmpy - 1 : tmpy + 3, tmpx] = value_to_set
# 正方形
if block_type == Block_Type.O:
virtual_state[tmpy : tmpy + 2, tmpx : tmpx + 2] = value_to_set
# 山形
if block_type == Block_Type.T:
if tmprotate == 0:
virtual_state[tmpy, tmpx - 1 : tmpx + 2] = value_to_set
virtual_state[tmpy - 1, tmpx] = value_to_set
if tmprotate == 180:
virtual_state[tmpy, tmpx - 1 : tmpx + 2] = value_to_set
virtual_state[tmpy + 1, tmpx] = value_to_set
if tmprotate == 90:
virtual_state[tmpy - 1 : tmpy + 2, tmpx] = value_to_set
virtual_state[tmpy, tmpx - 1] = value_to_set
if tmprotate == 270:
virtual_state[tmpy - 1 : tmpy + 2, tmpx] = value_to_set
virtual_state[tmpy, tmpx + 1] = value_to_set
# Z 形
if block_type == Block_Type.Z:
if tmprotate == 0:
virtual_state[tmpy - 1, tmpx - 1 : tmpx + 1] = value_to_set
virtual_state[tmpy, tmpx : tmpx + 2] = value_to_set
if tmprotate == 90:
virtual_state[tmpy : tmpy + 2, tmpx - 1] = value_to_set
virtual_state[tmpy - 1 : tmpy + 1, tmpx] = value_to_set
# 反 Z 形
if block_type == Block_Type.S:
if tmprotate == 0:
virtual_state[tmpy - 1, tmpx : tmpx + 2] = value_to_set
virtual_state[tmpy, tmpx - 1 : tmpx + 1] = value_to_set
if tmprotate == 90:
virtual_state[tmpy - 1 : tmpy + 1, tmpx - 1] = value_to_set
virtual_state[tmpy : tmpy + 2, tmpx] = value_to_set
# L 形
if block_type == Block_Type.L:
if tmprotate == 0:
virtual_state[tmpy, tmpx - 1 : tmpx + 2] = value_to_set
virtual_state[tmpy - 1, tmpx - 1] = value_to_set
if tmprotate == 180:
virtual_state[tmpy - 1, tmpx - 1 : tmpx + 2] = value_to_set
virtual_state[tmpy, tmpx + 1] = value_to_set
if tmprotate == 90:
virtual_state[tmpy - 1 : tmpy + 2, tmpx] = value_to_set
virtual_state[tmpy + 1, tmpx - 1] = value_to_set
if tmprotate == 270:
virtual_state[tmpy - 1 : tmpy + 2, tmpx - 1] = value_to_set
virtual_state[tmpy - 1, tmpx] = value_to_set
# 反 L 形
if block_type == Block_Type.J:
if tmprotate == 0:
virtual_state[tmpy, tmpx - 1 : tmpx + 2] = value_to_set
virtual_state[tmpy - 1, tmpx + 1] = value_to_set
if tmprotate == 180:
virtual_state[tmpy - 1, tmpx - 1 : tmpx + 2] = value_to_set
virtual_state[tmpy, tmpx - 1] = value_to_set
if tmprotate == 90:
virtual_state[tmpy - 1 : tmpy + 2, tmpx] = value_to_set
virtual_state[tmpy - 1, tmpx - 1] = value_to_set
if tmprotate == 270:
virtual_state[tmpy - 1 : tmpy + 2, tmpx - 1] = value_to_set
virtual_state[tmpy + 1, tmpx] = value_to_set
return virtual_state
| [
"numpy.zeros"
] | [((627, 714), 'numpy.zeros', 'np.zeros', (['(1 + Confs.row_count.value + 2, 2 + Confs.col_count.value + 2)', 'np.ubyte'], {}), '((1 + Confs.row_count.value + 2, 2 + Confs.col_count.value + 2), np\n .ubyte)\n', (635, 714), True, 'import numpy as np\n')] |
#!/usr/bin/python3
from src.AccuracyCalculator import *
import tensorflow as tf
import time
import numpy as np
import settings.DataSettings as dataSettings
# maxCount = 8
netPrediction_1 = np.array( [[ [0.9, 0.1], [0.7, 0.3], [0.6, 0.4], [0.4, 0.6], [0.3, 0.7],
[0.2, 0.8], [0.1, 0.9], [0.2, 0.8], [0.1, 0.9], [0.2, 0.8], [0.3, 0.7] ]] )
label_1 = np.array( [ [dataSettings.FIGHT_LABEL] * netPrediction_1.shape[0] ] )
# maxCount = 4
netPrediction_2 = np.array( [[ [0.9, 0.1], [0.7, 0.3], [0.3, 0.7], [0.4, 0.6], [0.3, 0.7],
[0.9, 0.1], [0.9, 0.1], [0.2, 0.8], [0.1, 0.9], [0.2, 0.8], [0.3, 0.7] ]] )
label_2 = np.array( [ [dataSettings.FIGHT_LABEL] * netPrediction_2.shape[0] ] )
# maxCount = 2
netPrediction_3 = np.array( [[ [0.9, 0.1], [0.3, 0.7], [0.9, 0.1], [0.4, 0.6], [0.8, 0.2],
[0.1, 0.9], [0.9, 0.1], [0.2, 0.8], [0.1, 0.9] ]] )
label_3 = np.array( [ [dataSettings.NO_FIGHT_LABEL] * netPrediction_3.shape[0] ] )
# maxCount = 6
netPrediction_4 = np.array( [[ [0.9, 0.1], [0.3, 0.7], [0.9, 0.1], [0.4, 0.6], [0.3, 0.7],
[0.1, 0.9], [0.1, 0.9], [0.2, 0.8], [0.1, 0.9], [0.7, 0.3] ]] )
label_4 = np.array( [ [dataSettings.NO_FIGHT_LABEL] * netPrediction_4.shape[0] ] )
def Check_CalculateAccuracy():
print("Check_CalculateAccuracy()")
accuracyCalculator = VideosAccuracyCalculator()
# 5 TP
numberOfTP = 5
truePositivePredictions = np.tile(netPrediction_1, [numberOfTP, 1, 1])
truePositiveLabels = np.tile(label_1, [numberOfTP, 1, 1])
accuracyCalculator.AppendNetPredictions(truePositivePredictions, truePositiveLabels)
# 2 FN
numberOfFN = 2
falseNegativePredictions = np.tile(netPrediction_2, [numberOfFN, 1, 1])
falseNegativeLabels = np.tile(label_2, [numberOfFN, 1, 1])
accuracyCalculator.AppendNetPredictions(falseNegativePredictions, falseNegativeLabels)
# 3 TN
numberOfTN = 3
trueNegativePredictions = np.tile(netPrediction_3, [numberOfTN, 1, 1])
trueNegativeLabels = np.tile(label_3, [numberOfTN, 1, 1])
accuracyCalculator.AppendNetPredictions(trueNegativePredictions, trueNegativeLabels)
# 3 FP
numberOfFP = 3
falsePositivePredictions = np.tile(netPrediction_4, [numberOfFP, 1, 1])
falsePositiveLabels = np.tile(label_4, [numberOfFP, 1, 1])
accuracyCalculator.AppendNetPredictions(falsePositivePredictions, falsePositiveLabels)
answerOfAccuracy = float(numberOfTP + numberOfTN) / (numberOfTP + numberOfFN + numberOfTN + numberOfFP)
answerOfPrecision = float(numberOfTP) / (numberOfTP + numberOfFP)
answerOfRecall = float(numberOfTP) / (numberOfTP + numberOfFN)
accuracy, precision, recall = accuracyCalculator.CalculateAccuracyAtGivenThreshold(threshold_=5)
print("\t (accuracy, precision, recall) = (", accuracy, ", ", precision, ", ", recall, ")")
if abs(accuracy - answerOfAccuracy) >= 1e-5:
raise ValueError("\t Accuracy (="+str(accuracy)+"); However, answer = " + str(answerOfAccuracy))
if abs(precision - answerOfPrecision) >= 1e-5:
raise ValueError("\t Precision (="+str(precision)+"); However, answer = " + str(answerOfPrecision))
if abs(recall - answerOfRecall) >= 1e-5:
raise ValueError("\t Recall (="+str(recall)+"); However, answer = " + str(answerOfRecall))
accuracyCalculator.Reset()
print("\t check passed.")
def Check_CalculateBestAccuracyAndThreshold():
print("Check_CalculateBestAccuracyAndThreshold()")
summaryWriter = tf.summary.FileWriter("src/unit_test/accuracy")
accuracyCalculator = VideosAccuracyCalculator()
accuracyCalculator.AppendNetPredictions(netPrediction_1, label_1)
accuracyCalculator.AppendNetPredictions(netPrediction_2, label_2)
accuracyCalculator.AppendNetPredictions(netPrediction_3, label_3)
accuracyCalculator.AppendNetPredictions(netPrediction_4, label_4)
bestThreshold, bestAccuracy = accuracyCalculator.CalculateBestAccuracyAndThreshold(tf_summaryWriter_=summaryWriter, currentEpoch_=1)
print("\t (bestThreshold, bestAccuracy) = (", bestThreshold, ", ", bestAccuracy, ")")
answerOfThreshold = 3
answerOfAccuracy = 0.75
if bestThreshold != answerOfThreshold:
raise ValueError("\t bestThreshold(="+str(bestThreshold)+"); However, answer = " + str(answerOfThreshold))
if abs(bestAccuracy - answerOfAccuracy) > 1e-5:
raise ValueError("\t bestAccuracy(="+str(bestAccuracy)+"); However, answer = " + str(answerOfAccuracy))
accuracyCalculator.Reset()
print("\t check passed.")
def Check_ProcessingTime():
print("Check_ProcessingTime()")
summaryWriter = tf.summary.FileWriter("src/unit_test/accuracy")
accuracyCalculator = VideosAccuracyCalculator()
predictionOfAllVideos = np.zeros([400, 40, 2])
labelOfAllVideos = np.zeros([400, 40, 2])
for i in range(400): # Test set has 400 videos
for j in range(40): # Videos has 40 frames in average.
fightProbility = np.random.rand()
predictionOfAllVideos[i, j, :] = [1 - fightProbility, fightProbility]
isFightLabel = np.random.rand() >= 0.5
if isFightLabel:
labelOfAllVideos[i, :, :] = np.tile(dataSettings.FIGHT_LABEL, [40, 1])
else:
labelOfAllVideos[i, :, :] = np.tile(dataSettings.NO_FIGHT_LABEL, [40, 1])
startAppendTime = time.time()
accuracyCalculator.AppendNetPredictions(predictionOfAllVideos, labelOfAllVideos)
endAppendTime = time.time()
print("\t Averaged AppendTime: ", endAppendTime - startAppendTime)
startCalculateTime = time.time()
bestThreshold, bestAccuracy = accuracyCalculator.CalculateBestAccuracyAndThreshold(tf_summaryWriter_=summaryWriter, currentEpoch_=2)
endCalculateTime = time.time()
print("\t\t (bestThreshold, bestAccuracy) = (", bestThreshold, ", ", bestAccuracy, ")")
print("\t Calculate Best Accuracy time: ", endCalculateTime - startCalculateTime)
accuracyCalculator.Reset()
print("\t check passed.")
if __name__ == "__main__":
print()
Check_CalculateAccuracy()
print()
print()
Check_CalculateBestAccuracyAndThreshold()
print()
print()
Check_ProcessingTime()
print()
| [
"numpy.tile",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"tensorflow.summary.FileWriter",
"time.time"
] | [((190, 338), 'numpy.array', 'np.array', (['[[[0.9, 0.1], [0.7, 0.3], [0.6, 0.4], [0.4, 0.6], [0.3, 0.7], [0.2, 0.8], [\n 0.1, 0.9], [0.2, 0.8], [0.1, 0.9], [0.2, 0.8], [0.3, 0.7]]]'], {}), '([[[0.9, 0.1], [0.7, 0.3], [0.6, 0.4], [0.4, 0.6], [0.3, 0.7], [0.2,\n 0.8], [0.1, 0.9], [0.2, 0.8], [0.1, 0.9], [0.2, 0.8], [0.3, 0.7]]])\n', (198, 338), True, 'import numpy as np\n'), ((357, 422), 'numpy.array', 'np.array', (['[[dataSettings.FIGHT_LABEL] * netPrediction_1.shape[0]]'], {}), '([[dataSettings.FIGHT_LABEL] * netPrediction_1.shape[0]])\n', (365, 422), True, 'import numpy as np\n'), ((461, 609), 'numpy.array', 'np.array', (['[[[0.9, 0.1], [0.7, 0.3], [0.3, 0.7], [0.4, 0.6], [0.3, 0.7], [0.9, 0.1], [\n 0.9, 0.1], [0.2, 0.8], [0.1, 0.9], [0.2, 0.8], [0.3, 0.7]]]'], {}), '([[[0.9, 0.1], [0.7, 0.3], [0.3, 0.7], [0.4, 0.6], [0.3, 0.7], [0.9,\n 0.1], [0.9, 0.1], [0.2, 0.8], [0.1, 0.9], [0.2, 0.8], [0.3, 0.7]]])\n', (469, 609), True, 'import numpy as np\n'), ((628, 693), 'numpy.array', 'np.array', (['[[dataSettings.FIGHT_LABEL] * netPrediction_2.shape[0]]'], {}), '([[dataSettings.FIGHT_LABEL] * netPrediction_2.shape[0]])\n', (636, 693), True, 'import numpy as np\n'), ((732, 856), 'numpy.array', 'np.array', (['[[[0.9, 0.1], [0.3, 0.7], [0.9, 0.1], [0.4, 0.6], [0.8, 0.2], [0.1, 0.9], [\n 0.9, 0.1], [0.2, 0.8], [0.1, 0.9]]]'], {}), '([[[0.9, 0.1], [0.3, 0.7], [0.9, 0.1], [0.4, 0.6], [0.8, 0.2], [0.1,\n 0.9], [0.9, 0.1], [0.2, 0.8], [0.1, 0.9]]])\n', (740, 856), True, 'import numpy as np\n'), ((875, 943), 'numpy.array', 'np.array', (['[[dataSettings.NO_FIGHT_LABEL] * netPrediction_3.shape[0]]'], {}), '([[dataSettings.NO_FIGHT_LABEL] * netPrediction_3.shape[0]])\n', (883, 943), True, 'import numpy as np\n'), ((982, 1118), 'numpy.array', 'np.array', (['[[[0.9, 0.1], [0.3, 0.7], [0.9, 0.1], [0.4, 0.6], [0.3, 0.7], [0.1, 0.9], [\n 0.1, 0.9], [0.2, 0.8], [0.1, 0.9], [0.7, 0.3]]]'], {}), '([[[0.9, 0.1], [0.3, 0.7], [0.9, 0.1], [0.4, 0.6], [0.3, 0.7], [0.1,\n 0.9], [0.1, 0.9], [0.2, 0.8], [0.1, 0.9], [0.7, 0.3]]])\n', (990, 1118), True, 'import numpy as np\n'), ((1137, 1205), 'numpy.array', 'np.array', (['[[dataSettings.NO_FIGHT_LABEL] * netPrediction_4.shape[0]]'], {}), '([[dataSettings.NO_FIGHT_LABEL] * netPrediction_4.shape[0]])\n', (1145, 1205), True, 'import numpy as np\n'), ((1380, 1424), 'numpy.tile', 'np.tile', (['netPrediction_1', '[numberOfTP, 1, 1]'], {}), '(netPrediction_1, [numberOfTP, 1, 1])\n', (1387, 1424), True, 'import numpy as np\n'), ((1447, 1483), 'numpy.tile', 'np.tile', (['label_1', '[numberOfTP, 1, 1]'], {}), '(label_1, [numberOfTP, 1, 1])\n', (1454, 1483), True, 'import numpy as np\n'), ((1623, 1667), 'numpy.tile', 'np.tile', (['netPrediction_2', '[numberOfFN, 1, 1]'], {}), '(netPrediction_2, [numberOfFN, 1, 1])\n', (1630, 1667), True, 'import numpy as np\n'), ((1691, 1727), 'numpy.tile', 'np.tile', (['label_2', '[numberOfFN, 1, 1]'], {}), '(label_2, [numberOfFN, 1, 1])\n', (1698, 1727), True, 'import numpy as np\n'), ((1868, 1912), 'numpy.tile', 'np.tile', (['netPrediction_3', '[numberOfTN, 1, 1]'], {}), '(netPrediction_3, [numberOfTN, 1, 1])\n', (1875, 1912), True, 'import numpy as np\n'), ((1935, 1971), 'numpy.tile', 'np.tile', (['label_3', '[numberOfTN, 1, 1]'], {}), '(label_3, [numberOfTN, 1, 1])\n', (1942, 1971), True, 'import numpy as np\n'), ((2111, 2155), 'numpy.tile', 'np.tile', (['netPrediction_4', '[numberOfFP, 1, 1]'], {}), '(netPrediction_4, [numberOfFP, 1, 1])\n', (2118, 2155), True, 'import numpy as np\n'), ((2179, 2215), 'numpy.tile', 'np.tile', (['label_4', '[numberOfFP, 1, 1]'], {}), '(label_4, [numberOfFP, 1, 1])\n', (2186, 2215), True, 'import numpy as np\n'), ((3340, 3387), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""src/unit_test/accuracy"""'], {}), "('src/unit_test/accuracy')\n", (3361, 3387), True, 'import tensorflow as tf\n'), ((4421, 4468), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""src/unit_test/accuracy"""'], {}), "('src/unit_test/accuracy')\n", (4442, 4468), True, 'import tensorflow as tf\n'), ((4546, 4568), 'numpy.zeros', 'np.zeros', (['[400, 40, 2]'], {}), '([400, 40, 2])\n', (4554, 4568), True, 'import numpy as np\n'), ((4589, 4611), 'numpy.zeros', 'np.zeros', (['[400, 40, 2]'], {}), '([400, 40, 2])\n', (4597, 4611), True, 'import numpy as np\n'), ((5074, 5085), 'time.time', 'time.time', ([], {}), '()\n', (5083, 5085), False, 'import time\n'), ((5185, 5196), 'time.time', 'time.time', ([], {}), '()\n', (5194, 5196), False, 'import time\n'), ((5289, 5300), 'time.time', 'time.time', ([], {}), '()\n', (5298, 5300), False, 'import time\n'), ((5455, 5466), 'time.time', 'time.time', ([], {}), '()\n', (5464, 5466), False, 'import time\n'), ((4740, 4756), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4754, 4756), True, 'import numpy as np\n'), ((4849, 4865), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4863, 4865), True, 'import numpy as np\n'), ((4923, 4965), 'numpy.tile', 'np.tile', (['dataSettings.FIGHT_LABEL', '[40, 1]'], {}), '(dataSettings.FIGHT_LABEL, [40, 1])\n', (4930, 4965), True, 'import numpy as np\n'), ((5005, 5050), 'numpy.tile', 'np.tile', (['dataSettings.NO_FIGHT_LABEL', '[40, 1]'], {}), '(dataSettings.NO_FIGHT_LABEL, [40, 1])\n', (5012, 5050), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
class grid_map:
def __init__(self, map_matrix=None, reward_matrix=None, start_index=(2, 2), goal_index=(16, 16), reward_bound=-5, reward_collision=-0.1):
self.map_matrix = map_matrix
self.state_space = map_matrix.shape[0:2]
self.action_space = [ (1, 0), (-1, 0), (0, 1), (0, -1) ]
self.reward_matrix = reward_matrix
self.reward_bound = reward_bound
self.reward_collision = reward_collision
self.start_index = start_index
self.goal_index = goal_index
def step(self, cur_state_index, action_index, state_trans_prob=1):
others = (1 - state_trans_prob) / 3
action_prob_list = [others, others, others, others]
action_prob_list[action_index] = state_trans_prob
real_action_index = np.random.choice([0, 1, 2, 3], p=action_prob_list)
action_prob = action_prob_list[real_action_index]
action = self.action_space[action_index]
done = False
next_x = cur_state_index[0] + action[0]
next_y = cur_state_index[1] + action[1]
if next_x > self.state_space[0] - 1:
next_x = self.state_space[0] - 1
reward = self.reward_bound
done = True
elif next_x < 0:
next_x = 0
reward = self.reward_bound
done = True
elif next_y > self.state_space[1] - 1:
next_y = self.state_space[1] - 1
reward = self.reward_bound
done = True
elif next_y < 0:
next_y = 0
reward = self.reward_bound
done = True
else:
reward = self.reward_matrix[next_x, next_y]
## Extra Credit
# ----------------------------------------------------------------
# You can add the heuristic reward here, such as the dwa reward, astar reward, or distance-to-goal reward learned from previous lectures to achieve a regular policy as you expected (such as keeping moving away from the obstacle.). In addition, if the self.reward_matrix[next_x, next_y] == reward_collision, there should the obstacle grid index.
pass
# example: reward = self.reward_matrix[next_x, next_y] + heuristic_reward
# ----------------------------------------------------------------
done = False
if next_x == self.goal_index[0] and next_y == self.goal_index[1]:
done = True
next_state = (next_x, next_y)
return next_state, reward, action_prob, done
def set_path(self, index):
self.map_matrix[index[0], index[1], :] = 255
def show_map(self):
plt.imshow(self.map_matrix)
plt.show()
def draw_map(self, time=0.01):
plt.imshow(self.map_matrix)
plt.pause(time) | [
"numpy.random.choice",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show"
] | [((866, 916), 'numpy.random.choice', 'np.random.choice', (['[0, 1, 2, 3]'], {'p': 'action_prob_list'}), '([0, 1, 2, 3], p=action_prob_list)\n', (882, 916), True, 'import numpy as np\n'), ((2760, 2787), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.map_matrix'], {}), '(self.map_matrix)\n', (2770, 2787), True, 'import matplotlib.pyplot as plt\n'), ((2796, 2806), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2804, 2806), True, 'import matplotlib.pyplot as plt\n'), ((2851, 2878), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.map_matrix'], {}), '(self.map_matrix)\n', (2861, 2878), True, 'import matplotlib.pyplot as plt\n'), ((2887, 2902), 'matplotlib.pyplot.pause', 'plt.pause', (['time'], {}), '(time)\n', (2896, 2902), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# coding: utf-8
import os
from PIL import Image
import zipfile
import numpy as np
import sys
sys.path.append('../src')
from make_info_dict import make_info_dict
def get_img_fpath_list(info_dict):
img_subpaths = info_dict['filename'][info_dict['is_image']]
return img_subpaths
def get_random_sample_of_img_names(info_dict, num_images = 25):
img_subpaths = info_dict['filename'][info_dict['is_image']]
img_names = np.random.choice(img_subpaths, num_images, replace=False )
return img_names
def calculate_thumbsize(num_images_in_zip, collage_size=(750,750), min_thumb_len=50):
max_thumb_len = collage_size[0]
# how large would be thumbnails be if we plotted all images
num_per_side_all = np.ceil(np.sqrt(num_images_in_zip))
thumbsize_all = collage_size[0] / num_per_side_all
thumb_len = max(min_thumb_len, thumbsize_all)
thumbsize = (thumb_len, thumb_len)
num_per_side_actual = int(np.ceil(collage_size[0]/thumbsize[0]))
return thumbsize, num_per_side_actual
def create_collage(zipfile_obj,
img_names,
imgs_per_side=15,
thumb_size=(50,50),
collage_size=(750,750)):
num_imgs = len(img_names)
x_offset = 0
y_offset = 0
ctr = 0
collage = Image.new("RGBA", collage_size)
for y in range(imgs_per_side):
for x in range(imgs_per_side):
img_name = img_names[ctr]
with zipfile_obj.open(img_name, mode='r') as img_file:
img = Image.open(img_file)
img.thumbnail(size=thumb_size)
collage.paste(img, (x_offset, y_offset))
x_offset += thumb_size[0]
ctr += 1
if ctr >= num_imgs:
break
x_offset = 0
y_offset += thumb_size[1]
return collage
def main(zipfile_obj, info_dict, output_dir):
img_subpaths = get_img_fpath_list(info_dict)
num_images_in_zip = len(img_subpaths)
min_thumb_len=200
collage_size=(800,800)
thumbsize, imgs_per_side = calculate_thumbsize(num_images_in_zip,
collage_size=collage_size,
min_thumb_len=min_thumb_len)
num_images_to_show = int(imgs_per_side**2)
percentage = (num_images_to_show/num_images_in_zip )
print('showing {:.2%} of images'.format(percentage))
img_names = get_random_sample_of_img_names(info_dict, num_images=num_images_to_show)
collage = create_collage(zipfile_obj,
img_names,
thumb_size=thumbsize,
collage_size=collage_size,
imgs_per_side=imgs_per_side)
output_fpath = os.path.join(output_dir, 'collage.png')
collage.save(output_fpath)
print(f'saved collage of sample images to {output_fpath}')
return
if __name__ == '__main__':
zip_fpath = "../data/raw/graphische_sammlung_sample.zip"
output_fpath = '../data/processed/collage.png'
zipfile_obj = zipfile.ZipFile(fpath, "r")
info_dict = make_info_dict(zipfile_obj)
main(zipfile_obj, info_dict, output_fpath)
| [
"numpy.ceil",
"PIL.Image.open",
"numpy.sqrt",
"zipfile.ZipFile",
"numpy.random.choice",
"PIL.Image.new",
"os.path.join",
"sys.path.append",
"make_info_dict.make_info_dict"
] | [((116, 141), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (131, 141), False, 'import sys\n'), ((463, 520), 'numpy.random.choice', 'np.random.choice', (['img_subpaths', 'num_images'], {'replace': '(False)'}), '(img_subpaths, num_images, replace=False)\n', (479, 520), True, 'import numpy as np\n'), ((1331, 1362), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'collage_size'], {}), "('RGBA', collage_size)\n", (1340, 1362), False, 'from PIL import Image\n'), ((2832, 2871), 'os.path.join', 'os.path.join', (['output_dir', '"""collage.png"""'], {}), "(output_dir, 'collage.png')\n", (2844, 2871), False, 'import os\n'), ((3137, 3164), 'zipfile.ZipFile', 'zipfile.ZipFile', (['fpath', '"""r"""'], {}), "(fpath, 'r')\n", (3152, 3164), False, 'import zipfile\n'), ((3182, 3209), 'make_info_dict.make_info_dict', 'make_info_dict', (['zipfile_obj'], {}), '(zipfile_obj)\n', (3196, 3209), False, 'from make_info_dict import make_info_dict\n'), ((766, 792), 'numpy.sqrt', 'np.sqrt', (['num_images_in_zip'], {}), '(num_images_in_zip)\n', (773, 792), True, 'import numpy as np\n'), ((970, 1009), 'numpy.ceil', 'np.ceil', (['(collage_size[0] / thumbsize[0])'], {}), '(collage_size[0] / thumbsize[0])\n', (977, 1009), True, 'import numpy as np\n'), ((1568, 1588), 'PIL.Image.open', 'Image.open', (['img_file'], {}), '(img_file)\n', (1578, 1588), False, 'from PIL import Image\n')] |
import numpy as np
from sklearn.cluster import DBSCAN, KMeans
AVG_EARTH_RADIUS = 6371
def _haversine_array(lat1, lng1, lat2, lng2):
lat = lat2 - lat1
lng = lng2 - lng1
d = np.sin(lat * 0.5) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(lng * 0.5) ** 2
h = 2 * AVG_EARTH_RADIUS * np.arcsin(np.sqrt(d))
return h
def dummy_manhattan_distance(row):
lat1, lng1, lat2, lng2 = map(np.radians, row)
a = _haversine_array(lat1, lng1, lat1, lng2)
b = _haversine_array(lat1, lng1, lat2, lng2)
return a + b
def bearing_array(row):
lat1, lng1, lat2, lng2 = row
lng_delta_rad = np.radians(lng2 - lng1)
lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))
y = np.sin(lng_delta_rad) * np.cos(lat2)
x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(lng_delta_rad)
return np.degrees(np.arctan2(y, x))
def center_lat_feat(row):
lat1, lng1, lat2, lng2 = row
center_lat = (lat1 + lat2) / 2
return center_lat
def center_lng_feat(row):
lat1, lng1, lat2, lng2 = row
center_lng = (lng1 + lng2) / 2
return center_lng
def coords_clusters_dbscan(coords):
coords_flat = np.vstack((coords[['sourceLatitude', 'sourceLongitude']].values,
coords[['destinationLatitude', 'destinationLongitude']].values))
clusters = DBSCAN(eps=0.01, min_samples=5, leaf_size=30).fit_predict(coords_flat)
src_cluster = clusters[:len(coords)]
dest_cluster = clusters[len(coords):]
return src_cluster, dest_cluster
def coords_clusters_kmeans(coords, n_clusters):
coords_flat = np.vstack((coords[['sourceLatitude', 'sourceLongitude']].values,
coords[['destinationLatitude', 'destinationLongitude']].values))
model = KMeans(n_clusters=n_clusters, random_state=42).fit(coords_flat)
src_cluster = model.predict(coords[['sourceLatitude', 'sourceLongitude']])
dest_cluster = model.predict(coords[['destinationLatitude', 'destinationLongitude']])
return src_cluster, dest_cluster
def coord_features(data, features, do_clusters=True):
coords = data[['sourceLatitude', 'sourceLongitude', 'destinationLatitude', 'destinationLongitude']]
features['dmd'] = coords.apply(dummy_manhattan_distance, axis=1)
features['bearing_array'] = coords.apply(bearing_array, axis=1)
features['center_lat'] = coords.apply(center_lat_feat, axis=1)
features['center_lng'] = coords.apply(center_lng_feat, axis=1)
if do_clusters:
features['cluster_src_db'], features['cluster_dest_db'] = coords_clusters_dbscan(coords)
features['cluster_src_km'], features['cluster_dest_km'] = coords_clusters_kmeans(coords, n_clusters=120)
return features | [
"numpy.radians",
"sklearn.cluster.KMeans",
"numpy.sqrt",
"numpy.arctan2",
"numpy.cos",
"numpy.vstack",
"numpy.sin",
"sklearn.cluster.DBSCAN"
] | [((613, 636), 'numpy.radians', 'np.radians', (['(lng2 - lng1)'], {}), '(lng2 - lng1)\n', (623, 636), True, 'import numpy as np\n'), ((1178, 1312), 'numpy.vstack', 'np.vstack', (["(coords[['sourceLatitude', 'sourceLongitude']].values, coords[[\n 'destinationLatitude', 'destinationLongitude']].values)"], {}), "((coords[['sourceLatitude', 'sourceLongitude']].values, coords[[\n 'destinationLatitude', 'destinationLongitude']].values))\n", (1187, 1312), True, 'import numpy as np\n'), ((1614, 1748), 'numpy.vstack', 'np.vstack', (["(coords[['sourceLatitude', 'sourceLongitude']].values, coords[[\n 'destinationLatitude', 'destinationLongitude']].values)"], {}), "((coords[['sourceLatitude', 'sourceLongitude']].values, coords[[\n 'destinationLatitude', 'destinationLongitude']].values))\n", (1623, 1748), True, 'import numpy as np\n'), ((716, 737), 'numpy.sin', 'np.sin', (['lng_delta_rad'], {}), '(lng_delta_rad)\n', (722, 737), True, 'import numpy as np\n'), ((740, 752), 'numpy.cos', 'np.cos', (['lat2'], {}), '(lat2)\n', (746, 752), True, 'import numpy as np\n'), ((865, 881), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (875, 881), True, 'import numpy as np\n'), ((187, 204), 'numpy.sin', 'np.sin', (['(lat * 0.5)'], {}), '(lat * 0.5)\n', (193, 204), True, 'import numpy as np\n'), ((306, 316), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (313, 316), True, 'import numpy as np\n'), ((761, 773), 'numpy.cos', 'np.cos', (['lat1'], {}), '(lat1)\n', (767, 773), True, 'import numpy as np\n'), ((776, 788), 'numpy.sin', 'np.sin', (['lat2'], {}), '(lat2)\n', (782, 788), True, 'import numpy as np\n'), ((821, 842), 'numpy.cos', 'np.cos', (['lng_delta_rad'], {}), '(lng_delta_rad)\n', (827, 842), True, 'import numpy as np\n'), ((1353, 1398), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(0.01)', 'min_samples': '(5)', 'leaf_size': '(30)'}), '(eps=0.01, min_samples=5, leaf_size=30)\n', (1359, 1398), False, 'from sklearn.cluster import DBSCAN, KMeans\n'), ((1786, 1832), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(42)'}), '(n_clusters=n_clusters, random_state=42)\n', (1792, 1832), False, 'from sklearn.cluster import DBSCAN, KMeans\n'), ((212, 224), 'numpy.cos', 'np.cos', (['lat1'], {}), '(lat1)\n', (218, 224), True, 'import numpy as np\n'), ((227, 239), 'numpy.cos', 'np.cos', (['lat2'], {}), '(lat2)\n', (233, 239), True, 'import numpy as np\n'), ((242, 259), 'numpy.sin', 'np.sin', (['(lng * 0.5)'], {}), '(lng * 0.5)\n', (248, 259), True, 'import numpy as np\n'), ((791, 803), 'numpy.sin', 'np.sin', (['lat1'], {}), '(lat1)\n', (797, 803), True, 'import numpy as np\n'), ((806, 818), 'numpy.cos', 'np.cos', (['lat2'], {}), '(lat2)\n', (812, 818), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
import numpy as np
import math
from scipy.spatial import KDTree
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number
POSE_LOG_PERIOD = 100
SMALL_VELOCITY = 1.
MAX_DECEL = 0.5
class WaypointUpdater(object):
def __init__(self):
self.node_name = 'waypoint_updater'
rospy.init_node(self.node_name)
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
# TODO: Add a subscriber for /obstacle_waypoint
self.final_waypoints_pub = rospy.Publisher('/final_waypoints', Lane, queue_size=1)
self.server_rate = rospy.get_param('/server_rate', 50.) # 50 Hz by default
rospy.loginfo('server_rate={0}'.format(self.server_rate))
self.pose = None
self.waypoints = None
self.wp_tree = None
self.pose_counter = 0
self.tl_stopline_idx = -1
rospy.loginfo('Starting {}'.format(self.node_name))
self.loop()
def loop(self):
rate = rospy.Rate(self.server_rate)
while not rospy.is_shutdown():
if self.wp_tree and self.pose:
#self.loginfo_pose_xy()
closest_idx = self.find_closest()
lane = self.generate_lane(closest_idx)
self.publish_final_waypoints(lane)
rate.sleep()
def loginfo_pose_xy(self):
if self.pose_counter % POSE_LOG_PERIOD == 0:
p = self.pose.pose.position
t = self.pose.header.stamp
rospy.loginfo('t={}: ({:.3f}, {:.3f})'.format(t, p.x, p.y))
self.pose_counter = 0
def find_closest(self):
p = self.pose.pose.position
xy = np.array([p.x, p.y])
closest_idx = self.wp_tree.query(xy, 1)[1]
target = self.waypoints_xy[closest_idx]
prev = self.waypoints_xy[closest_idx - 1]
val = np.dot(target - prev, xy - target)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_xy)
return closest_idx
def publish_final_waypoints(self, lane):
self.final_waypoints_pub.publish(lane)
def generate_lane(self, closest_idx):
lane = Lane()
lane.header = self.waypoints.header
waypoints_normal = self.waypoints.waypoints[closest_idx:closest_idx+LOOKAHEAD_WPS]
farthest_idx = closest_idx + LOOKAHEAD_WPS
if self.tl_stopline_idx == -1 or self.tl_stopline_idx >= farthest_idx:
lane.waypoints = waypoints_normal
else:
lane.waypoints = self.decelerate_waypoints(waypoints_normal, closest_idx)
return lane
def decelerate_waypoints(self, waypoints_normal, closest_idx):
waypoints_decel = []
for i, wp in enumerate(waypoints_normal):
new_wp = Waypoint()
new_wp.pose = wp.pose
stop_idx = max(self.tl_stopline_idx - closest_idx - 2, 0)
dist = self.distance(waypoints_normal, i, stop_idx)
vel = math.sqrt(2 * MAX_DECEL * dist)
if vel < SMALL_VELOCITY:
vel = 0.
vx = wp.twist.twist.linear.x
new_wp.twist.twist.linear.x = min(vel, vx)
waypoints_decel.append(new_wp)
return waypoints_decel
def pose_cb(self, msg):
self.pose = msg
self.pose_counter += 1
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
positions = (wp.pose.pose.position for wp in self.waypoints.waypoints)
self.waypoints_xy = np.array([[p.x, p.y] for p in positions])
self.wp_tree = KDTree(self.waypoints_xy)
rospy.loginfo("Got {} base waypoints".format(len(self.waypoints_xy)))
def traffic_cb(self, msg):
self.tl_stopline_idx = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| [
"rospy.logerr",
"rospy.Subscriber",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.get_param",
"scipy.spatial.KDTree",
"math.sqrt",
"numpy.array",
"numpy.dot",
"rospy.Rate",
"styx_msgs.msg.Waypoint",
"rospy.Publisher",
"styx_msgs.msg.Lane"
] | [((1106, 1137), 'rospy.init_node', 'rospy.init_node', (['self.node_name'], {}), '(self.node_name)\n', (1121, 1137), False, 'import rospy\n'), ((1147, 1207), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_pose"""', 'PoseStamped', 'self.pose_cb'], {}), "('/current_pose', PoseStamped, self.pose_cb)\n", (1163, 1207), False, 'import rospy\n'), ((1216, 1276), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_waypoints"""', 'Lane', 'self.waypoints_cb'], {}), "('/base_waypoints', Lane, self.waypoints_cb)\n", (1232, 1276), False, 'import rospy\n'), ((1285, 1346), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/traffic_waypoint"""', 'Int32', 'self.traffic_cb'], {}), "('/traffic_waypoint', Int32, self.traffic_cb)\n", (1301, 1346), False, 'import rospy\n'), ((1441, 1496), 'rospy.Publisher', 'rospy.Publisher', (['"""/final_waypoints"""', 'Lane'], {'queue_size': '(1)'}), "('/final_waypoints', Lane, queue_size=1)\n", (1456, 1496), False, 'import rospy\n'), ((1525, 1562), 'rospy.get_param', 'rospy.get_param', (['"""/server_rate"""', '(50.0)'], {}), "('/server_rate', 50.0)\n", (1540, 1562), False, 'import rospy\n'), ((1913, 1941), 'rospy.Rate', 'rospy.Rate', (['self.server_rate'], {}), '(self.server_rate)\n', (1923, 1941), False, 'import rospy\n'), ((2637, 2657), 'numpy.array', 'np.array', (['[p.x, p.y]'], {}), '([p.x, p.y])\n', (2645, 2657), True, 'import numpy as np\n'), ((2824, 2858), 'numpy.dot', 'np.dot', (['(target - prev)', '(xy - target)'], {}), '(target - prev, xy - target)\n', (2830, 2858), True, 'import numpy as np\n'), ((3129, 3135), 'styx_msgs.msg.Lane', 'Lane', ([], {}), '()\n', (3133, 3135), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((4513, 4554), 'numpy.array', 'np.array', (['[[p.x, p.y] for p in positions]'], {}), '([[p.x, p.y] for p in positions])\n', (4521, 4554), True, 'import numpy as np\n'), ((4578, 4603), 'scipy.spatial.KDTree', 'KDTree', (['self.waypoints_xy'], {}), '(self.waypoints_xy)\n', (4584, 4603), False, 'from scipy.spatial import KDTree\n'), ((1960, 1979), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1977, 1979), False, 'import rospy\n'), ((3765, 3775), 'styx_msgs.msg.Waypoint', 'Waypoint', ([], {}), '()\n', (3773, 3775), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((3964, 3995), 'math.sqrt', 'math.sqrt', (['(2 * MAX_DECEL * dist)'], {}), '(2 * MAX_DECEL * dist)\n', (3973, 3995), False, 'import math\n'), ((5196, 5261), 'math.sqrt', 'math.sqrt', (['((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)'], {}), '((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)\n', (5205, 5261), False, 'import math\n'), ((5530, 5584), 'rospy.logerr', 'rospy.logerr', (['"""Could not start waypoint updater node."""'], {}), "('Could not start waypoint updater node.')\n", (5542, 5584), False, 'import rospy\n')] |
import numpy as np
from agents import Agent
from buffers import BasicBuffer
from networks import DeepQNetwork
class DeepQAgent(Agent):
def __init__(self, env, policy, network=None, buffer=None, gamma=0.99, alpha=0.001, batch_size=32, replay_buffer_size=2000, replay_buffer_minreplay=500):
super().__init__(env, policy, gamma=gamma)
self.batch_size = batch_size
self.memory = BasicBuffer(replay_buffer_size) if buffer is None else buffer
self.Q = network(env, trainable=True) if network else QNetwork(env, trainable=True, learning_rate=alpha)
self.replay_buffer_minreplay = replay_buffer_minreplay
self.steps, self.episodes = 0, 0
def pi(self, s):
return self.policy.pi(lambda s: { a: q for a, q in enumerate(zip(*self.Q(s))) }, s)
def train(self, s, a, r, sp, done=False):
self.memory.push(s, a, r, sp, done) # save current observation
if len(self.memory) > self.replay_buffer_minreplay:
self.experience_replay() # do the actual training step
self.steps, self.episodes = self.steps + 1, self.episodes + done
def experience_replay(self):
"""
Perform the actual deep-Q learning step.
The actual learning is handled by calling self.Q.fit(s,target)
where s is defined as below (i.e. all states from the replay buffer)
and target is the desired value of self.Q(s).
Note that target must therefore be of size Batch x Actions. In other words fit minimize
|Q(s) - target|^2
which must implement the proper cost. This can be done by setting most entries of target equal to self.Q(s)
and the other equal to y, which is Q-learning target for Q(s,a). """
""" First we sample from replay buffer. Returns numpy Arrays of dimension
> [self.batch_size] x [...]]
for instance 'a' will be of dimension [self.batch_size x 1].
"""
s, a, r, sp, done = self.memory.sample(self.batch_size)
target, _ = self.Q(s)
Q_sp, _ = self.Q(sp)
target[np.arange(self.batch_size), a] = r.squeeze() + self.gamma * np.max(Q_sp, axis=1) * (1 - done)
self.Q.fit(s, target)
def __str__(self):
return f"DQN_{self.gamma}" | [
"numpy.max",
"numpy.arange",
"buffers.BasicBuffer"
] | [((404, 435), 'buffers.BasicBuffer', 'BasicBuffer', (['replay_buffer_size'], {}), '(replay_buffer_size)\n', (415, 435), False, 'from buffers import BasicBuffer\n'), ((2074, 2100), 'numpy.arange', 'np.arange', (['self.batch_size'], {}), '(self.batch_size)\n', (2083, 2100), True, 'import numpy as np\n'), ((2134, 2154), 'numpy.max', 'np.max', (['Q_sp'], {'axis': '(1)'}), '(Q_sp, axis=1)\n', (2140, 2154), True, 'import numpy as np\n')] |
"""
CanICA
"""
# Author: <NAME>, <NAME>,
# License: BSD 3 clause
from operator import itemgetter
import numpy as np
from scipy.stats import scoreatpercentile
from sklearn.decomposition import fastica
from sklearn.externals.joblib import Memory, delayed, Parallel
from sklearn.utils import check_random_state
from .multi_pca import MultiPCA
class CanICA(MultiPCA):
"""Perform Canonical Independent Component Analysis.
Parameters
----------
mask: Niimg-like object or MultiNiftiMasker instance, optional
Mask to be used on data. If an instance of masker is passed,
then its mask will be used. If no mask is given,
it will be computed automatically by a MultiNiftiMasker with default
parameters.
n_components: int
Number of components to extract
smoothing_fwhm: float, optional
If smoothing_fwhm is not None, it gives the size in millimeters of the
spatial smoothing to apply to the signal.
do_cca: boolean, optional
Indicate if a Canonical Correlation Analysis must be run after the
PCA.
standardize: boolean, optional
If standardize is True, the time-series are centered and normed:
their variance is put to 1 in the time dimension.
threshold: None, 'auto' or float
If None, no thresholding is applied. If 'auto',
then we apply a thresholding that will keep the n_voxels,
more intense voxels across all the maps, n_voxels being the number
of voxels in a brain volume. A float value indicates the
ratio of voxels to keep (2. means that the maps will together
have 2 x n_voxels non-zero voxels ). The float value
must be bounded by [0. and n_components].
n_init: int, optional
The number of times the fastICA algorithm is restarted
random_state: int or RandomState
Pseudo number generator state used for random sampling.
target_affine: 3x3 or 4x4 matrix, optional
This parameter is passed to image.resample_img. Please see the
related documentation for details.
target_shape: 3-tuple of integers, optional
This parameter is passed to image.resample_img. Please see the
related documentation for details.
low_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
high_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
t_r: float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details
memory: instance of joblib.Memory or string
Used to cache the masking process.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
memory_level: integer, optional
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching.
n_jobs: integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs', -2 'all CPUs but one', and so on.
verbose: integer, optional
Indicate the level of verbosity. By default, nothing is printed
References
----------
* <NAME> et al. "A group model for stable multi-subject ICA on
fMRI datasets", NeuroImage Vol 51 (2010), p. 288-299
* <NAME> et al. "ICA-based sparse features recovery from fMRI
datasets", IEEE ISBI 2010, p. 1177
"""
def __init__(self, mask=None, n_components=20, smoothing_fwhm=6,
do_cca=True,
threshold='auto',
n_init=10,
random_state=None,
standardize=True, detrend=True,
low_pass=None, high_pass=None, t_r=None,
target_affine=None, target_shape=None,
mask_strategy='epi', mask_args=None,
memory=Memory(cachedir=None), memory_level=0,
n_jobs=1, verbose=0
):
super(CanICA, self).__init__(
n_components=n_components,
do_cca=do_cca,
random_state=random_state,
# feature_compression=feature_compression,
mask=mask, smoothing_fwhm=smoothing_fwhm,
standardize=standardize, detrend=detrend,
low_pass=low_pass, high_pass=high_pass, t_r=t_r,
target_affine=target_affine, target_shape=target_shape,
mask_strategy=mask_strategy, mask_args=mask_args,
memory=memory, memory_level=memory_level,
n_jobs=n_jobs, verbose=verbose)
if isinstance(threshold, float) and threshold > n_components:
raise ValueError("Threshold must not be higher than number "
"of maps. "
"Number of maps is %s and you provided "
"threshold=%s" %
(str(n_components), str(threshold)))
self.threshold = threshold
self.n_init = n_init
def _unmix_components(self):
"""Core function of CanICA than rotate components_ to maximize
independance"""
random_state = check_random_state(self.random_state)
seeds = random_state.randint(np.iinfo(np.int32).max, size=self.n_init)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(self._cache(fastica, func_memory_level=2))
(self.components_.T, whiten=True, fun='cube',
random_state=seed)
for seed in seeds)
ica_maps_gen_ = (result[2].T for result in results)
ica_maps_and_sparsities = ((ica_map,
np.sum(np.abs(ica_map), axis=1).max())
for ica_map in ica_maps_gen_)
ica_maps, _ = min(ica_maps_and_sparsities, key=itemgetter(-1))
# Thresholding
ratio = None
if isinstance(self.threshold, float):
ratio = self.threshold
elif self.threshold == 'auto':
ratio = 1.
elif self.threshold is not None:
raise ValueError("Threshold must be None, "
"'auto' or float. You provided %s." %
str(self.threshold))
if ratio is not None:
abs_ica_maps = np.abs(ica_maps)
threshold = scoreatpercentile(
abs_ica_maps,
100. - (100. / len(ica_maps)) * ratio)
ica_maps[abs_ica_maps < threshold] = 0.
self.components_ = ica_maps
# flip signs in each component so that peak is +ve
for component in self.components_:
if component.max() < -component.min():
component *= -1
# Overriding MultiPCA._raw_fit overrides MultiPCA.fit behavior
def _raw_fit(self, data):
"""Helper function that directly process unmasked data.
Useful when called by another estimator that has already
unmasked data.
Parameters
----------
data: ndarray or memmap
Unmasked data to process
"""
MultiPCA._raw_fit(self, data)
self._unmix_components()
return self
| [
"numpy.abs",
"sklearn.utils.check_random_state",
"numpy.iinfo",
"sklearn.externals.joblib.Parallel",
"sklearn.externals.joblib.Memory",
"operator.itemgetter"
] | [((4013, 4034), 'sklearn.externals.joblib.Memory', 'Memory', ([], {'cachedir': 'None'}), '(cachedir=None)\n', (4019, 4034), False, 'from sklearn.externals.joblib import Memory, delayed, Parallel\n'), ((5288, 5325), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (5306, 5325), False, 'from sklearn.utils import check_random_state\n'), ((5424, 5474), 'sklearn.externals.joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs', 'verbose': 'self.verbose'}), '(n_jobs=self.n_jobs, verbose=self.verbose)\n', (5432, 5474), False, 'from sklearn.externals.joblib import Memory, delayed, Parallel\n'), ((6436, 6452), 'numpy.abs', 'np.abs', (['ica_maps'], {}), '(ica_maps)\n', (6442, 6452), True, 'import numpy as np\n'), ((5364, 5382), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (5372, 5382), True, 'import numpy as np\n'), ((5961, 5975), 'operator.itemgetter', 'itemgetter', (['(-1)'], {}), '(-1)\n', (5971, 5975), False, 'from operator import itemgetter\n'), ((5809, 5824), 'numpy.abs', 'np.abs', (['ica_map'], {}), '(ica_map)\n', (5815, 5824), True, 'import numpy as np\n')] |
import click
import numpy as np
import pandas as pd
import seaborn as sns
import nibabel as nib
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans, SpectralClustering, DBSCAN
from sklearn.metrics import silhouette_score
from ..due import due, Doi
from ..meta.cbma.kernel import ALEKernel, MKDAKernel, KDAKernel, Peaks2MapsKernel
from ..dataset.extract import convert_sleuth_to_database
from ..utils import get_template
#from nimare.dataset.extract import convert_sleuth_to_database
#from nimare.meta.cbma.kernel import ALEKernel, MKDAKernel, KDAKernel, Peaks2MapsKernel
#from nimare.due import due, Doi
@click.command(name='metacluster', short_help='clusters experiments based on similarity'
'of activation patterns, to investigate '
'heterogeneity across a meta-analytic dataset',
help='Method for investigating recurrent patterns of activation accross a '
'meta-analytic dataset, thus identifying trends across a collection of '
'experiments.')
@click.argument('database', required=True, type=click.Path(exists=True, readable=True))
@click.option('--output_dir', required=True, type=click.Path(exists=True), help='Directory into which clustering results will be written.')
@click.option('--output_prefix', default='metacluster', type=str, help='Common prefix for output clustering results.')
@click.option('--kernel', default='ALEKernel', type=click.Choice(['ALEKernel', 'MKDAKernel', 'KDAKernel', 'Peaks2MapsKernel']), help='Kernel estimator, for coordinate-based metaclustering.')
@click.option('--coord/--img', required=True, default=False, help='Is input data image- or coordinate-based?')
@click.option('--algorithm', '-a', default='kmeans', type=click.Choice(['kmeans', 'dbscan', 'spectral']), help='Clustering algorithm to be used, from sklearn.cluster.')
@click.option('--clust_range', nargs=2, type=int, help='Select a range for k over which clustering solutions will be evaluated (e.g., 2 10 will evaluate solutions with k = 2 clusters to k = 10 clusters).')
@due.dcite(Doi('10.1016/j.neuroimage.2015.06.044'),
description='Introduces meta-analytic clustering analysis; hierarchically clusering face paradigms.')
@due.dcite(Doi('10.1162/netn_a_00050'),
description='Performs the specific meta-analytic clustering approach included here.')
def meta_cluster_workflow(database, output_dir=None, output_prefix=None, kernel='ALEKernel', coord=True, algorithm='kmeans', clust_range=(2,10)):
def VI(X, Y):
from math import log
#from https://gist.github.com/jwcarr/626cbc80e0006b526688
n = float(sum([len(x) for x in X]))
sigma = 0.0
for x in X:
p = len(x) / n
for y in Y:
q = len(y) / n
r = len(set(x) & set(y)) / n
if r > 0.0:
sigma += r * (log(r / p, 2) + log(r / q, 2))
return abs(sigma)
template_file = get_template(space='mni152_1mm', mask=None)
if database.endswith('.json'):
db = x ##how do I read in a generic database file? do I need options for source type?
dset = db.get_dataset(ids, target='mni152_2mm')
elif database.endswith('.txt'):
db = convert_sleuth_to_database(database)
dset = db.get_dataset(target='mni152_2mm')
else:
raise click.BadParameter('You\'ve provided a database that metacluster can\'t read. :(', param_hint='database')
#imgs = dset.images
if coord:
if kernel == 'ALEKernel':
kern = ALEKernel(dset.coordinates, template_img)
elif kernel == 'MKDAKernel':
kern = MKDAKernel(dset.coordinates, template_img)
elif kernel == 'KDAKernel':
kern = KDAKernel(dset.coordinates, template_img)
elif kernel == 'Peaks2MapsKernel':
kern = Peaks2MapsKernel(dset.coordinates, template_img)
imgs = kern.transform(dset.ids)
imgs_arr = []
for i in np.arange(0,len(imgs)):
imgs_arr.append(np.ravel(imgs[i].get_data(), order='C'))
labels = pd.DataFrame(index=dset.ids)
k = np.arange(clust_range[0], (clust_range[1] + 1))
for i in k:
if algorithm == 'kmeans':
clustering = KMeans(i, init='k-means++', precompute_distances='auto')
if algorithm == 'spectral':
clustering = SpectralClustering(n_clusters=i, eigen_solver=None, random_state=None, n_init=300, gamma=1.0, affinity='rbf', n_neighbors=10, eigen_tol=0.0, assign_labels='discretize', degree=3, coef0=1, kernel_params=None)
if algorithm == 'dbscan':
min = len(dset.ids)/(i-1)
clustering = DBSCAN(eps=0.1, min_samples=min, metric='euclidean', metric_params=None, algorithm='auto', leaf_size=30, p=None)
labels[i] = clustering.fit_predict(imgs_arr)
labels.to_csv('{0}/{1}_labels.csv'.format(output_dir, output_prefix))
silhouette_scores = {}
for i in k:
j = i-2
silhouette = silhouette_score(imgs_arr, labels[i], metric='correlation', random_state=None)
silhouette_scores[i] = silhouette
silhouettes = pd.Series(silhouette_scores, name='Average Silhouette Scores')
clusters_idx = {}
for i in k:
clusters = []
for j in range(i):
clusters.append(list(np.where(labels[i] == j)[0]))
clusters_idx['Solution {0}'.format(i)] = clusters
variation_of_infofmation = {}
for i in k[:-1]:
j = clusters_idx['Solution {0}'.format(i)]
z = clusters_idx['Solution {0}'.format(i+1)]
var_info = VI(j, z)
variation_of_infofmation[i+1] = var_info
vi = pd.Series(variation_of_infofmation, name='Variation of Information')
metrics = pd.concat([vi, silhouettes], axis=1)
metrics.to_csv('{0}/{1}_metrics.csv'.format(output_dir, output_prefix))
fig,ax = plt.subplots(nrows=1, ncols=2, figsize=(10,5))
g = sns.lineplot(metrics.index, metrics['Average Silhouette Scores'], ax=ax[0])
g.set_title('Silhouette Scores')
g = sns.lineplot(metrics.index, metrics['Variation of Information'], ax=ax[1])
g.set_title('Variation of Information')
fig.savefig('{0}/{1}_metrics.png'.format(output_dir, output_prefix), dpi=300)
| [
"pandas.Series",
"click.Choice",
"sklearn.cluster.KMeans",
"sklearn.cluster.SpectralClustering",
"sklearn.cluster.DBSCAN",
"click.option",
"numpy.where",
"math.log",
"seaborn.lineplot",
"click.Path",
"pandas.DataFrame",
"click.BadParameter",
"click.command",
"pandas.concat",
"matplotlib.... | [((624, 974), 'click.command', 'click.command', ([], {'name': '"""metacluster"""', 'short_help': '"""clusters experiments based on similarityof activation patterns, to investigate heterogeneity across a meta-analytic dataset"""', 'help': '"""Method for investigating recurrent patterns of activation accross a meta-analytic dataset, thus identifying trends across a collection of experiments."""'}), "(name='metacluster', short_help=\n 'clusters experiments based on similarityof activation patterns, to investigate heterogeneity across a meta-analytic dataset'\n , help=\n 'Method for investigating recurrent patterns of activation accross a meta-analytic dataset, thus identifying trends across a collection of experiments.'\n )\n", (637, 974), False, 'import click\n'), ((1331, 1453), 'click.option', 'click.option', (['"""--output_prefix"""'], {'default': '"""metacluster"""', 'type': 'str', 'help': '"""Common prefix for output clustering results."""'}), "('--output_prefix', default='metacluster', type=str, help=\n 'Common prefix for output clustering results.')\n", (1343, 1453), False, 'import click\n'), ((1641, 1755), 'click.option', 'click.option', (['"""--coord/--img"""'], {'required': '(True)', 'default': '(False)', 'help': '"""Is input data image- or coordinate-based?"""'}), "('--coord/--img', required=True, default=False, help=\n 'Is input data image- or coordinate-based?')\n", (1653, 1755), False, 'import click\n'), ((1921, 2135), 'click.option', 'click.option', (['"""--clust_range"""'], {'nargs': '(2)', 'type': 'int', 'help': '"""Select a range for k over which clustering solutions will be evaluated (e.g., 2 10 will evaluate solutions with k = 2 clusters to k = 10 clusters)."""'}), "('--clust_range', nargs=2, type=int, help=\n 'Select a range for k over which clustering solutions will be evaluated (e.g., 2 10 will evaluate solutions with k = 2 clusters to k = 10 clusters).'\n )\n", (1933, 2135), False, 'import click\n'), ((4146, 4174), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dset.ids'}), '(index=dset.ids)\n', (4158, 4174), True, 'import pandas as pd\n'), ((4183, 4228), 'numpy.arange', 'np.arange', (['clust_range[0]', '(clust_range[1] + 1)'], {}), '(clust_range[0], clust_range[1] + 1)\n', (4192, 4228), True, 'import numpy as np\n'), ((5189, 5251), 'pandas.Series', 'pd.Series', (['silhouette_scores'], {'name': '"""Average Silhouette Scores"""'}), "(silhouette_scores, name='Average Silhouette Scores')\n", (5198, 5251), True, 'import pandas as pd\n'), ((5708, 5776), 'pandas.Series', 'pd.Series', (['variation_of_infofmation'], {'name': '"""Variation of Information"""'}), "(variation_of_infofmation, name='Variation of Information')\n", (5717, 5776), True, 'import pandas as pd\n'), ((5792, 5828), 'pandas.concat', 'pd.concat', (['[vi, silhouettes]'], {'axis': '(1)'}), '([vi, silhouettes], axis=1)\n', (5801, 5828), True, 'import pandas as pd\n'), ((5919, 5966), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(10, 5)'}), '(nrows=1, ncols=2, figsize=(10, 5))\n', (5931, 5966), True, 'import matplotlib.pyplot as plt\n'), ((5974, 6049), 'seaborn.lineplot', 'sns.lineplot', (['metrics.index', "metrics['Average Silhouette Scores']"], {'ax': 'ax[0]'}), "(metrics.index, metrics['Average Silhouette Scores'], ax=ax[0])\n", (5986, 6049), True, 'import seaborn as sns\n'), ((6095, 6169), 'seaborn.lineplot', 'sns.lineplot', (['metrics.index', "metrics['Variation of Information']"], {'ax': 'ax[1]'}), "(metrics.index, metrics['Variation of Information'], ax=ax[1])\n", (6107, 6169), True, 'import seaborn as sns\n'), ((5050, 5128), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['imgs_arr', 'labels[i]'], {'metric': '"""correlation"""', 'random_state': 'None'}), "(imgs_arr, labels[i], metric='correlation', random_state=None)\n", (5066, 5128), False, 'from sklearn.metrics import silhouette_score\n'), ((1150, 1188), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'readable': '(True)'}), '(exists=True, readable=True)\n', (1160, 1188), False, 'import click\n'), ((1240, 1263), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (1250, 1263), False, 'import click\n'), ((1501, 1575), 'click.Choice', 'click.Choice', (["['ALEKernel', 'MKDAKernel', 'KDAKernel', 'Peaks2MapsKernel']"], {}), "(['ALEKernel', 'MKDAKernel', 'KDAKernel', 'Peaks2MapsKernel'])\n", (1513, 1575), False, 'import click\n'), ((1809, 1855), 'click.Choice', 'click.Choice', (["['kmeans', 'dbscan', 'spectral']"], {}), "(['kmeans', 'dbscan', 'spectral'])\n", (1821, 1855), False, 'import click\n'), ((3427, 3535), 'click.BadParameter', 'click.BadParameter', (['"""You\'ve provided a database that metacluster can\'t read. :("""'], {'param_hint': '"""database"""'}), '("You\'ve provided a database that metacluster can\'t read. :("\n , param_hint=\'database\')\n', (3445, 3535), False, 'import click\n'), ((4306, 4362), 'sklearn.cluster.KMeans', 'KMeans', (['i'], {'init': '"""k-means++"""', 'precompute_distances': '"""auto"""'}), "(i, init='k-means++', precompute_distances='auto')\n", (4312, 4362), False, 'from sklearn.cluster import KMeans, SpectralClustering, DBSCAN\n'), ((4424, 4639), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', ([], {'n_clusters': 'i', 'eigen_solver': 'None', 'random_state': 'None', 'n_init': '(300)', 'gamma': '(1.0)', 'affinity': '"""rbf"""', 'n_neighbors': '(10)', 'eigen_tol': '(0.0)', 'assign_labels': '"""discretize"""', 'degree': '(3)', 'coef0': '(1)', 'kernel_params': 'None'}), "(n_clusters=i, eigen_solver=None, random_state=None,\n n_init=300, gamma=1.0, affinity='rbf', n_neighbors=10, eigen_tol=0.0,\n assign_labels='discretize', degree=3, coef0=1, kernel_params=None)\n", (4442, 4639), False, 'from sklearn.cluster import KMeans, SpectralClustering, DBSCAN\n'), ((4729, 4845), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(0.1)', 'min_samples': 'min', 'metric': '"""euclidean"""', 'metric_params': 'None', 'algorithm': '"""auto"""', 'leaf_size': '(30)', 'p': 'None'}), "(eps=0.1, min_samples=min, metric='euclidean', metric_params=None,\n algorithm='auto', leaf_size=30, p=None)\n", (4735, 4845), False, 'from sklearn.cluster import KMeans, SpectralClustering, DBSCAN\n'), ((5373, 5397), 'numpy.where', 'np.where', (['(labels[i] == j)'], {}), '(labels[i] == j)\n', (5381, 5397), True, 'import numpy as np\n'), ((2960, 2973), 'math.log', 'log', (['(r / p)', '(2)'], {}), '(r / p, 2)\n', (2963, 2973), False, 'from math import log\n'), ((2976, 2989), 'math.log', 'log', (['(r / q)', '(2)'], {}), '(r / q, 2)\n', (2979, 2989), False, 'from math import log\n')] |
from __future__ import print_function, division
import os
import numpy as np
from keras.layers import BatchNormalization, Activation
from keras.layers import Input, Dense, Flatten, Dropout
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D
from keras.models import Sequential, Model
from keras.models import load_model
from keras.optimizers import Adam
from sklearn.metrics import hamming_loss
from utils import mkdirs
from glob import glob
import random
import nrrd
from scipy.ndimage import zoom
IMAGE_DIR = './32_cube/images'
MODEL_DIR = './32_cube/saved_model/AutoEncoder_patch'
''' Codes adapted from https://github.com/Fdevmsy/3D_shape_inpainting.
Credit goes to the original authors
'''
class EncoderDecoder():
def __init__(self):
self.vol_rows = 128
self.vol_cols = 128
self.vol_height = 128
self.mask_height = 128
self.mask_width = 128
self.mask_length = 128
self.channels = 1
self.num_classes = 2
self.vol_shape = (self.vol_rows, self.vol_cols, self.vol_height, self.channels)
self.missing_shape = (self.mask_height, self.mask_width, self.mask_length, self.channels)
self.input_dir = "../defective_skull_train"
self.gt_imp_dir = "../gt_implants_train"
optimizer = Adam(0.0002, 0.5)
try:
#self.discriminator = load_model(os.path.join(MODEL_DIR, 'discriminator.h5'))
self.generator = load_model(os.path.join(MODEL_DIR, 'encoderdecoder_patch.h5'))
print("Loaded checkpoints")
except:
self.generator = self.build_generator()
#self.discriminator = self.build_discriminator()
print("No checkpoints found")
# discriminator
#self.discriminator.compile(loss='binary_crossentropy',
# optimizer=optimizer,
# metrics=['accuracy'])
# generator
# The generator takes noise as input and generates the missing part
masked_vol = Input(shape=self.vol_shape)
gen_missing = self.generator(masked_vol)
# For the combined model we will only train the generator
#self.discriminator.trainable = False
# The discriminator takes generated voxels as input and determines
# if it is generated or if it is a real voxels
#valid = self.discriminator(gen_missing)
# The combined model (stacked generator and discriminator)
# Trains generator to fool discriminator
self.combined = Model(masked_vol, gen_missing)
self.combined.compile(loss='mse',
#loss=['mse', 'binary_crossentropy'],
#loss_weights=[0.9, 0.1],
optimizer=optimizer)
def build_generator(self):
model = Sequential()
# Encoder
model.add(Conv3D(32, kernel_size=5, strides=2, input_shape=self.vol_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv3D(64, kernel_size=5, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv3D(128, kernel_size=5, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv3D(512, kernel_size=1, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.5))
# Decoder
model.add(UpSampling3D())
model.add(Deconv3D(256, kernel_size=5, padding="same"))
model.add(Activation('relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(Deconv3D(128, kernel_size=5, padding="same"))
model.add(Activation('relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling3D())
model.add(Deconv3D(64, kernel_size=5, padding="same"))
model.add(Activation('relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling3D())
model.add(Deconv3D(self.channels, kernel_size=5, padding="same"))
model.add(Activation('tanh'))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling3D())
model.add(Deconv3D(self.channels, kernel_size=5, padding="same"))
model.add(Activation('tanh'))
model.summary()
masked_vol = Input(shape=self.vol_shape)
gen_missing = model(masked_vol)
return Model(masked_vol, gen_missing)
def train(self, epochs, batch_size=16, sample_interval=50):
#X_train = self.generateWall()
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
print('loading data...')
#(shape=(85,16,1,128,128,128,1))
ipt=np.load('ipt_patch.npy')
gt=np.load('gt_imp_patch.npy')
print('loading data complete...')
for epoch in range(epochs):
idx=random.randrange(0,85,1)
#masked_vols, missing_parts, _ = self.mask_randomly(vols)
masked_vols=ipt[idx]
#nrrd.write('masked_vols.nrrd',masked_vols[0,:,:,:,0],h)
missing_parts=gt[idx]
#nrrd.write('missing_parts.nrrd',missing_parts[0,:,:,:,0],h)
#masked_vols: (5, 32, 32, 32, 1)
print('masked_vols:',masked_vols.shape)
#missing_parts: (5, 16, 16, 16, 1)
print('missing_parts:',missing_parts.shape)
for i in range(16):
# Train Generator
g_loss = self.combined.train_on_batch(masked_vols[i], missing_parts[i])
print(g_loss)
print('epochs:',epoch)
# save generated samples
if epoch % sample_interval == 0:
#idx = np.random.randint(0, X_train.shape[0], 2)
#vols = X_train[idx]
#self.sample_images(epoch, vols)
self.save_model()
def make_patch(self,label):
label_list=[]
for x in range(4):
for y in range(4):
temp_label=np.expand_dims(np.expand_dims(label[x*128:(x+1)*128,y*128:(y+1)*128,:],axis=0),axis=4)
label_list.append(temp_label)
return np.array(label_list)
def evaluate(self, testdir,test_results_dir):
print('evaluating the model...')
test_list=glob('{}/*.nrrd'.format(testdir))
for i in range(len(test_list)):
data,h=nrrd.read(test_list[i])
data=data[:,:,data.shape[2]-128:data.shape[2]]
datap=self.make_patch(data)
reconstructed=np.zeros(shape=(512,512,128))
patch_idx=0
for x in range(4):
for y in range(4):
gen_missing = self.generator.predict(datap[patch_idx])
gen_missing=(gen_missing>0.5)
gen_missing=gen_missing+1-1
reconstructed[x*128:(x+1)*128,y*128:(y+1)*128,:]=gen_missing[0,:,:,:,0]
patch_idx=patch_idx+1
filename=test_results_dir+test_list[i][-10:-5]+'.nrrd'
nrrd.write(filename,reconstructed,h)
def save_model(self):
def save(model, model_name):
model_path = os.path.join(MODEL_DIR, "%s.h5" % model_name)
model.save(model_path)
save(self.generator, "encoderdecoder_patch")
#save(self.discriminator, "discriminator")
if __name__ == '__main__':
test_dir="../defective_skull_test"
test_results_dir="../results/"
context_encoder = EncoderDecoder()
context_encoder.train(epochs=3000, batch_size=4, sample_interval=200)
#context_encoder.evaluate(test_dir,test_results_dir)
| [
"keras.layers.convolutional.UpSampling3D",
"nrrd.read",
"numpy.array",
"keras.layers.Activation",
"keras.models.Model",
"keras.layers.advanced_activations.LeakyReLU",
"keras.optimizers.Adam",
"numpy.ones",
"random.randrange",
"keras.layers.convolutional.Deconv3D",
"keras.models.Sequential",
"k... | [((1422, 1439), 'keras.optimizers.Adam', 'Adam', (['(0.0002)', '(0.5)'], {}), '(0.0002, 0.5)\n', (1426, 1439), False, 'from keras.optimizers import Adam\n'), ((2189, 2216), 'keras.layers.Input', 'Input', ([], {'shape': 'self.vol_shape'}), '(shape=self.vol_shape)\n', (2194, 2216), False, 'from keras.layers import Input, Dense, Flatten, Dropout\n'), ((2715, 2745), 'keras.models.Model', 'Model', (['masked_vol', 'gen_missing'], {}), '(masked_vol, gen_missing)\n', (2720, 2745), False, 'from keras.models import Sequential, Model\n'), ((3022, 3034), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3032, 3034), False, 'from keras.models import Sequential, Model\n'), ((4705, 4732), 'keras.layers.Input', 'Input', ([], {'shape': 'self.vol_shape'}), '(shape=self.vol_shape)\n', (4710, 4732), False, 'from keras.layers import Input, Dense, Flatten, Dropout\n'), ((4792, 4822), 'keras.models.Model', 'Model', (['masked_vol', 'gen_missing'], {}), '(masked_vol, gen_missing)\n', (4797, 4822), False, 'from keras.models import Sequential, Model\n'), ((4988, 5012), 'numpy.ones', 'np.ones', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (4995, 5012), True, 'import numpy as np\n'), ((5029, 5054), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (5037, 5054), True, 'import numpy as np\n'), ((5146, 5170), 'numpy.load', 'np.load', (['"""ipt_patch.npy"""'], {}), "('ipt_patch.npy')\n", (5153, 5170), True, 'import numpy as np\n'), ((5183, 5210), 'numpy.load', 'np.load', (['"""gt_imp_patch.npy"""'], {}), "('gt_imp_patch.npy')\n", (5190, 5210), True, 'import numpy as np\n'), ((6618, 6638), 'numpy.array', 'np.array', (['label_list'], {}), '(label_list)\n', (6626, 6638), True, 'import numpy as np\n'), ((3075, 3160), 'keras.layers.convolutional.Conv3D', 'Conv3D', (['(32)'], {'kernel_size': '(5)', 'strides': '(2)', 'input_shape': 'self.vol_shape', 'padding': '"""same"""'}), "(32, kernel_size=5, strides=2, input_shape=self.vol_shape, padding='same'\n )\n", (3081, 3160), False, 'from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\n'), ((3176, 3196), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3185, 3196), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3217, 3249), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (3235, 3249), False, 'from keras.layers import BatchNormalization, Activation\n'), ((3270, 3322), 'keras.layers.convolutional.Conv3D', 'Conv3D', (['(64)'], {'kernel_size': '(5)', 'strides': '(2)', 'padding': '"""same"""'}), "(64, kernel_size=5, strides=2, padding='same')\n", (3276, 3322), False, 'from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\n'), ((3343, 3363), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3352, 3363), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3384, 3416), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (3402, 3416), False, 'from keras.layers import BatchNormalization, Activation\n'), ((3437, 3490), 'keras.layers.convolutional.Conv3D', 'Conv3D', (['(128)'], {'kernel_size': '(5)', 'strides': '(2)', 'padding': '"""same"""'}), "(128, kernel_size=5, strides=2, padding='same')\n", (3443, 3490), False, 'from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\n'), ((3511, 3531), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3520, 3531), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3552, 3584), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (3570, 3584), False, 'from keras.layers import BatchNormalization, Activation\n'), ((3605, 3658), 'keras.layers.convolutional.Conv3D', 'Conv3D', (['(512)'], {'kernel_size': '(1)', 'strides': '(2)', 'padding': '"""same"""'}), "(512, kernel_size=1, strides=2, padding='same')\n", (3611, 3658), False, 'from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\n'), ((3679, 3699), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3688, 3699), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3720, 3732), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3727, 3732), False, 'from keras.layers import Input, Dense, Flatten, Dropout\n'), ((3774, 3788), 'keras.layers.convolutional.UpSampling3D', 'UpSampling3D', ([], {}), '()\n', (3786, 3788), False, 'from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\n'), ((3809, 3853), 'keras.layers.convolutional.Deconv3D', 'Deconv3D', (['(256)'], {'kernel_size': '(5)', 'padding': '"""same"""'}), "(256, kernel_size=5, padding='same')\n", (3817, 3853), False, 'from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\n'), ((3874, 3892), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3884, 3892), False, 'from keras.layers import BatchNormalization, Activation\n'), ((3913, 3945), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (3931, 3945), False, 'from keras.layers import BatchNormalization, Activation\n'), ((3966, 4010), 'keras.layers.convolutional.Deconv3D', 'Deconv3D', (['(128)'], {'kernel_size': '(5)', 'padding': '"""same"""'}), "(128, kernel_size=5, padding='same')\n", (3974, 4010), False, 'from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\n'), ((4031, 4049), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4041, 4049), False, 'from keras.layers import BatchNormalization, Activation\n'), ((4070, 4102), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (4088, 4102), False, 'from keras.layers import BatchNormalization, Activation\n'), ((4125, 4139), 'keras.layers.convolutional.UpSampling3D', 'UpSampling3D', ([], {}), '()\n', (4137, 4139), False, 'from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\n'), ((4160, 4203), 'keras.layers.convolutional.Deconv3D', 'Deconv3D', (['(64)'], {'kernel_size': '(5)', 'padding': '"""same"""'}), "(64, kernel_size=5, padding='same')\n", (4168, 4203), False, 'from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\n'), ((4224, 4242), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4234, 4242), False, 'from keras.layers import BatchNormalization, Activation\n'), ((4263, 4295), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (4281, 4295), False, 'from keras.layers import BatchNormalization, Activation\n'), ((4318, 4332), 'keras.layers.convolutional.UpSampling3D', 'UpSampling3D', ([], {}), '()\n', (4330, 4332), False, 'from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\n'), ((4353, 4407), 'keras.layers.convolutional.Deconv3D', 'Deconv3D', (['self.channels'], {'kernel_size': '(5)', 'padding': '"""same"""'}), "(self.channels, kernel_size=5, padding='same')\n", (4361, 4407), False, 'from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\n'), ((4428, 4446), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (4438, 4446), False, 'from keras.layers import BatchNormalization, Activation\n'), ((4467, 4499), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (4485, 4499), False, 'from keras.layers import BatchNormalization, Activation\n'), ((4522, 4536), 'keras.layers.convolutional.UpSampling3D', 'UpSampling3D', ([], {}), '()\n', (4534, 4536), False, 'from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\n'), ((4557, 4611), 'keras.layers.convolutional.Deconv3D', 'Deconv3D', (['self.channels'], {'kernel_size': '(5)', 'padding': '"""same"""'}), "(self.channels, kernel_size=5, padding='same')\n", (4565, 4611), False, 'from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D\n'), ((4632, 4650), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (4642, 4650), False, 'from keras.layers import BatchNormalization, Activation\n'), ((5312, 5338), 'random.randrange', 'random.randrange', (['(0)', '(85)', '(1)'], {}), '(0, 85, 1)\n', (5328, 5338), False, 'import random\n'), ((6852, 6875), 'nrrd.read', 'nrrd.read', (['test_list[i]'], {}), '(test_list[i])\n', (6861, 6875), False, 'import nrrd\n'), ((7006, 7037), 'numpy.zeros', 'np.zeros', ([], {'shape': '(512, 512, 128)'}), '(shape=(512, 512, 128))\n', (7014, 7037), True, 'import numpy as np\n'), ((7534, 7572), 'nrrd.write', 'nrrd.write', (['filename', 'reconstructed', 'h'], {}), '(filename, reconstructed, h)\n', (7544, 7572), False, 'import nrrd\n'), ((7666, 7711), 'os.path.join', 'os.path.join', (['MODEL_DIR', "('%s.h5' % model_name)"], {}), "(MODEL_DIR, '%s.h5' % model_name)\n", (7678, 7711), False, 'import os\n'), ((1588, 1638), 'os.path.join', 'os.path.join', (['MODEL_DIR', '"""encoderdecoder_patch.h5"""'], {}), "(MODEL_DIR, 'encoderdecoder_patch.h5')\n", (1600, 1638), False, 'import os\n'), ((6483, 6561), 'numpy.expand_dims', 'np.expand_dims', (['label[x * 128:(x + 1) * 128, y * 128:(y + 1) * 128, :]'], {'axis': '(0)'}), '(label[x * 128:(x + 1) * 128, y * 128:(y + 1) * 128, :], axis=0)\n', (6497, 6561), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 3 19:01:13 2017
@author: sebalander
"""
# %%
import numpy as np
# %%
N = int(5e1) # number of data, degrees of freedom
M = int(1e5) # number of realisations
P = 2 # size of matrices
mu = np.array([7, 10]) # mean of data
c = np.array([[5, 3], [3, 7]])
# generate data
x = np.random.multivariate_normal(mu, c, (N, M))
# estimo los mu y c de cada relizacion
muest = np.mean(x, axis=0)
dif = x - muest
cest = dif.reshape((N,M,P,1)) * dif.reshape((N,M,1,P))
cest = np.sum(cest, axis=0) / (N - 1)
# saco la media y varianza entre todas las realizaciones
muExp = np.mean(muest, axis=0)
difmu = muest - muExp
muVar = np.sum(difmu.reshape((M,P,1)) * difmu.reshape((M,1,P)), axis=0) / (M - 1)
cExp = np.mean(cest, axis=0)
difc = cest - cExp
difcKron = np.array([np.kron(cc,cc) for cc in difc])
cVarKron = np.sum(difcKron / (M - 1), axis=0)
difc = difc.reshape((M,P*P,1))
cVarAux = np.reshape(difc * difc.transpose((0,2,1)), (M,P,P,P,P))
cVar = np.sum(cVarAux / (M - 1), axis=0) # VARIANZA NUMERICA
# saco las cuentas analiticas
expMu = mu
VarMu = c / N
expC = c
# no es necesario trasponer porque c es simetrica
cShaped = c.reshape((P,P,1,1))
VarCnn = cShaped * cShaped.transpose((3,2,1,0))
nn = (2 * N - 1) / (N - 1)**2
VarC = VarCnn * nn ## VARIANZA TEORICA SEGUN SEBA
cVarnn = cVar / nn
print('media de media numerico: \n', muExp,
'\n\n media de media analitico \n', expMu)
print('\n\n varianza de media numerico: \n', muVar,
'\n\n varianza de media analitico \n', VarMu)
print('\n\n media de varianza numerico: \n', cExp,
'\n\n media de varianza analitico \n', expC)
print('\n\n varianza de varianza numerico (sin normalizar):\n', cVarnn,
'\n\n varianza de varianza analitico (sin normalizar)\n', VarCnn)
cKron = np.kron(c,c)
cVarnn
VarCnn
cKron.reshape((P,P,P,P))
np.reshape(c,(P*P,1), order='C') * np.reshape(c,(1, P*P), order='C')
cKron
cVarKron / nn
'''
no entiendo donde esta el problema
'''
#reldif = np.abs((cVar - VarC) / VarC)
#reldif > 1e-1
# %% re calculo las covarianzas pero con tamaños 4x4
# numerica
cVar2 = np.sum(difc * difc.transpose((0,2,1)) / (M - 1), axis=0)
# teorica
VarC2 = c.reshape((-1,1)) * c.reshape((1,-1)) * (2 * N - 1) / (N - 1)**2
# %% pruebo wishart
# probabilidad de la covarianza estimada
import scipy.stats as sts
import scipy.special as spe
# N degrees of freedom
# inv(c) is precision matrix
wishRV = sts.wishart(N, c)
wpdf = wishRV.pdf
wishPDFofC = wpdf(c * (N-1)) # la scatter matrix de mayor probabilidad
eC = - np.log(wishPDFofC)
wpdf(wishRV.rvs())
[wpdf(m) for m in cest]
def wishConst(P, N):
'''
multivariate gamma
'''
aux1 = np.pi**(P * (P - 1) / 4)
aux2 = spe.gamma( (N - np.arange(P)) / 2)
multivariateGamma = aux1 * np.prod(aux2)
return 1 / (multivariateGamma * 2**(N*P/2))
# constantes para calular la wishart pdf
wishartConstant = wishConst(P, N)
expW = (N - P - 1) / 2
expV = N / 2
def wishartPDF(W, V):
'''
implementation of wishart pdf as per muravchik notes and wikipedia
'''
detV = np.linalg.det(V)
detW = np.linalg.det(W)
exponent = np.linalg.inv(V).dot(W)
exponent = - np.trace(exponent) / 2
return wishartConstant * detW**expW * np.exp(exponent) / detV**expV
wishartPDF(cest[4] * (N - 1), c)
wpdf(cest[4] * (N - 1))
[wishartPDF(mat * (N - 1), c) for mat in cest[:10]]
[wpdf(mat * (N - 1)) for mat in cest[:10]]
wpdf(cest[:10].transpose((1,2,0)) * (N - 1))
'''
las dos dan igual asi que la libreria de scipy esta evaluando la pdf de la
matriz como corresponde.
ok, tengo una manera de medir que tan bien dan las covarianzas
'''
wishartPDFs = wpdf(cest.transpose((1,2,0)) * (N - 1))
# %% como llevar esto a una metrica
# cantidad de "grados de libertad" de una matriz de covarianza
# calculate error without including dims
# dimsErroConst = - np.log(np.pi * 2) - np.log(wishPDFofC)
dims = - np.log(wishPDFofC) / np.log(np.pi * 2) # 8.277 # P * (P + 1) / 2
## elijo los grados de libertad tal que c tenga error cero, el minimo
#dims = (4 * P
# - 2 * np.log(wishartConstant)
# + (P+1) * np.log(np.linalg.det(c))) / np.log(np.pi * 2)
norLog = dims * np.log(np.pi * 2) # constante de la gaussiana
def E2(gaussPDF):
return - np.log(gaussPDF) - norLog
e2 = E2(wishartPDFs)
import matplotlib.pyplot as plt
plt.hist(e2, 100)
er = np.sqrt(e2)
print(np.min(E), np.sqrt(E2(wishPDFofC)))
# %% covariance matrix
# https://www.statlect.com/probability-distributions/wishart-distribution
cKron = np.kron(c,c)
PvecA = np.array([[1,0,0,0],
[0,0,1,0],
[0,1,0,0],
[0,0,0,1]]) # como c es simetrica vec(c)=vec(c')
## testear PvecA
#A = np.array([[1,2],[3,4]])
#PvecA.dot(np.reshape(A,-1))
#np.reshape(A.T,-1)
const = (PvecA + np.eye(P*P)) / N
Var = const.dot(cKron)
print('teorico de wishart\n', Var)
print('numerico\n', cVar)
print('teorico seba\n', VarC)
print('\n\nteorico de wishart2\n', Var)
print('numerico2\n', cVar2)
print('teorico seba2\n', VarC2)
# %%
'''
quedo demostrado que la expected variance segun lateoria de wishart es la que
coincide con las simulaciones (se puede rastrear la diferencia con lo propuesto
por mi pero no vale la pena).
ahora ver como sacar una distancia mahalanobis
'''
import scipy.linalg as ln
PvecA = np.array([[1,0,0,0],
[0,0,1,0],
[0,1,0,0],
[0,0,0,1]]) # como c es simetrica vec(c)=vec(c')
const = (PvecA + np.eye(P*P))
def varVar(c, N):
'''
para c de 2x2
'''
cKron = np.kron(c,c)
return const.dot(cKron) / N
# tomo dos de las matrices estimadas
i = 1234
j = 100
ci = cest[i]
cj = cest[j]
def varMahal(c1, n, c2, rank=False):
'''
calculate mahalanobis distance between two matrices, taking the first one
as reference (order is important)
if rank enabled, also returns the accumulated probability up to that
mahalanobis distance taking into account 3 degrees of freedom
'''
# se elimina la fina y columna redundante porque no aportan nada
c1Var = varVar(c1, n)[[0,1,3]].T[[0,1,3]].T
c1Pres = np.linalg.inv(c1Var) # precision matrix
c1flat = c1[[0,0,1],[0,1,1]]
c2flat = c2[[0,0,1],[0,1,1]]
cFlatDif = c1flat - c2flat
mahDist = cFlatDif.dot(c1Pres).dot(cFlatDif)
if rank:
ranking = sts.chi2.cdf(mahDist, 3)
return mahDist, ranking
else:
return mahDist
# elimino uno de los componentes que es redundante # y lo multiplico
# mul = np.array([[1],[2],[1]])
ind = [0,1,3]
ciVar = varVar(ci, N)[ind] # * mul
cjVar = varVar(cj, N)[ind] # * mul
ciVar = ciVar.T[ind].T
cjVar = cjVar.T[ind].T
ciFlat = np.reshape(ci, -1)[ind]
cjFlat = np.reshape(cj, -1)[ind]
cFlatDif = ciFlat - cjFlat
A = varVar(ci, N)
ln.svd(A)
ciPres = np.linalg.inv(ciVar)
cjPres = np.linalg.inv(cjVar)
dMahi = cFlatDif.dot(ciPres).dot(cFlatDif)
dMahj = cFlatDif.dot(cjPres).dot(cFlatDif)
varMahal(ci, N, cj)
varMahal(cj, N, ci)
from dev import bayesLib as bl
bl.varMahal(ci, N, cj, rank=True)
# intuyo que la manera de juntar las dos covarianzas es sumar las distancias al cuadrado:
dM = np.sqrt(dMahi + dMahj)
# on, poque en realidad voy a tener solo varianza asociada auna de las matrices
# la otra es teorica y no sale de MC. en todo caso habría que saber que error
# tiene acotando el error de taylosr
# %% testeo las distancias mahalanobis de las esperanzas wrt lo estimado
difMU = muest - mu # la varianza de esto tiene que ser VarMu
difC = cest - c # la varianza de esto tiene que ser Var
difC2 = difC[:,[0,0,1],[0,1,1]]
presMU = ln.inv(VarMu)
Var2 = Var[[0,1,3]].T[[0,1,3]].T # saco las dimensiones irrelevantes
presC = ln.inv(Var2)
mahMU = difMU.reshape((-1,2,1)) * presMU.reshape((1,2,2)) * difMU.reshape((-1,1,2))
mahMU = mahMU.sum(axis=(1,2))
mahC = difC2.reshape((-1,3,1)) * presC.reshape((1,3,3)) * difC2.reshape((-1,1,3))
mahC = mahC.sum(axis=(1,2))
rankings = sts.chi2.cdf(mahC,3)
# grafico y comparo con chi cuadrado
plt.figure()
nMU, binsMU, patchesMU = plt.hist(mahMU, 1000, normed=True)
chi2MU = sts.chi2.pdf(binsMU,2)
plt.plot(binsMU, chi2MU, label='chi2, df=2')
plt.figure()
nC, binsC, patchesC = plt.hist(mahC, 1000, normed=True)
chi2C = sts.chi2.pdf(binsC,3)
plt.plot(binsC, chi2C, label='chi2, df=3')
'''
como sigue la pdf chi cuadrado parece que esta todo bien, asi que la distancia
de mahalanobis es un buen indicador de la distancia.
falta poner loq ue vendría a ser el sigma segun los grados de libertad. para
cada distancia de mahalanobis pouedo calcular en que elipse esta, o sea indicar
"el volumen de la elipse" donde esta, mientras mas chico mejor.
'''
muEllVol = sts.chi2.cdf(mahMU, 2)
cEllVol = sts.chi2.cdf(mahC, 3)
plt.figure()
plt.plot(mahMU, muEllVol, '.', label='mu')
plt.plot(mahC, cEllVol, '.', label='C')
plt.xlabel('distancia mahalanobis al cuadrado')
plt.ylabel('acumulado de probabiliad') | [
"numpy.prod",
"numpy.trace",
"matplotlib.pyplot.hist",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.array",
"numpy.arange",
"numpy.mean",
"scipy.stats.chi2.cdf",
"numpy.reshape",
"matplotlib.pyplot.xlabel",
"scipy.stats.wishart",
"matplotlib.pyplot.plot",
"numpy.exp",
... | [((265, 282), 'numpy.array', 'np.array', (['[7, 10]'], {}), '([7, 10])\n', (273, 282), True, 'import numpy as np\n'), ((303, 329), 'numpy.array', 'np.array', (['[[5, 3], [3, 7]]'], {}), '([[5, 3], [3, 7]])\n', (311, 329), True, 'import numpy as np\n'), ((351, 395), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'c', '(N, M)'], {}), '(mu, c, (N, M))\n', (380, 395), True, 'import numpy as np\n'), ((444, 462), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (451, 462), True, 'import numpy as np\n'), ((638, 660), 'numpy.mean', 'np.mean', (['muest'], {'axis': '(0)'}), '(muest, axis=0)\n', (645, 660), True, 'import numpy as np\n'), ((773, 794), 'numpy.mean', 'np.mean', (['cest'], {'axis': '(0)'}), '(cest, axis=0)\n', (780, 794), True, 'import numpy as np\n'), ((880, 914), 'numpy.sum', 'np.sum', (['(difcKron / (M - 1))'], {'axis': '(0)'}), '(difcKron / (M - 1), axis=0)\n', (886, 914), True, 'import numpy as np\n'), ((1021, 1054), 'numpy.sum', 'np.sum', (['(cVarAux / (M - 1))'], {'axis': '(0)'}), '(cVarAux / (M - 1), axis=0)\n', (1027, 1054), True, 'import numpy as np\n'), ((1830, 1843), 'numpy.kron', 'np.kron', (['c', 'c'], {}), '(c, c)\n', (1837, 1843), True, 'import numpy as np\n'), ((2465, 2482), 'scipy.stats.wishart', 'sts.wishart', (['N', 'c'], {}), '(N, c)\n', (2476, 2482), True, 'import scipy.stats as sts\n'), ((4389, 4406), 'matplotlib.pyplot.hist', 'plt.hist', (['e2', '(100)'], {}), '(e2, 100)\n', (4397, 4406), True, 'import matplotlib.pyplot as plt\n'), ((4412, 4423), 'numpy.sqrt', 'np.sqrt', (['e2'], {}), '(e2)\n', (4419, 4423), True, 'import numpy as np\n'), ((4572, 4585), 'numpy.kron', 'np.kron', (['c', 'c'], {}), '(c, c)\n', (4579, 4585), True, 'import numpy as np\n'), ((4595, 4661), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])\n', (4603, 4661), True, 'import numpy as np\n'), ((5375, 5441), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])\n', (5383, 5441), True, 'import numpy as np\n'), ((6863, 6872), 'scipy.linalg.svd', 'ln.svd', (['A'], {}), '(A)\n', (6869, 6872), True, 'import scipy.linalg as ln\n'), ((6884, 6904), 'numpy.linalg.inv', 'np.linalg.inv', (['ciVar'], {}), '(ciVar)\n', (6897, 6904), True, 'import numpy as np\n'), ((6914, 6934), 'numpy.linalg.inv', 'np.linalg.inv', (['cjVar'], {}), '(cjVar)\n', (6927, 6934), True, 'import numpy as np\n'), ((7095, 7128), 'dev.bayesLib.varMahal', 'bl.varMahal', (['ci', 'N', 'cj'], {'rank': '(True)'}), '(ci, N, cj, rank=True)\n', (7106, 7128), True, 'from dev import bayesLib as bl\n'), ((7226, 7248), 'numpy.sqrt', 'np.sqrt', (['(dMahi + dMahj)'], {}), '(dMahi + dMahj)\n', (7233, 7248), True, 'import numpy as np\n'), ((7678, 7691), 'scipy.linalg.inv', 'ln.inv', (['VarMu'], {}), '(VarMu)\n', (7684, 7691), True, 'import scipy.linalg as ln\n'), ((7769, 7781), 'scipy.linalg.inv', 'ln.inv', (['Var2'], {}), '(Var2)\n', (7775, 7781), True, 'import scipy.linalg as ln\n'), ((8020, 8041), 'scipy.stats.chi2.cdf', 'sts.chi2.cdf', (['mahC', '(3)'], {}), '(mahC, 3)\n', (8032, 8041), True, 'import scipy.stats as sts\n'), ((8079, 8091), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8089, 8091), True, 'import matplotlib.pyplot as plt\n'), ((8117, 8151), 'matplotlib.pyplot.hist', 'plt.hist', (['mahMU', '(1000)'], {'normed': '(True)'}), '(mahMU, 1000, normed=True)\n', (8125, 8151), True, 'import matplotlib.pyplot as plt\n'), ((8161, 8184), 'scipy.stats.chi2.pdf', 'sts.chi2.pdf', (['binsMU', '(2)'], {}), '(binsMU, 2)\n', (8173, 8184), True, 'import scipy.stats as sts\n'), ((8184, 8228), 'matplotlib.pyplot.plot', 'plt.plot', (['binsMU', 'chi2MU'], {'label': '"""chi2, df=2"""'}), "(binsMU, chi2MU, label='chi2, df=2')\n", (8192, 8228), True, 'import matplotlib.pyplot as plt\n'), ((8230, 8242), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8240, 8242), True, 'import matplotlib.pyplot as plt\n'), ((8265, 8298), 'matplotlib.pyplot.hist', 'plt.hist', (['mahC', '(1000)'], {'normed': '(True)'}), '(mahC, 1000, normed=True)\n', (8273, 8298), True, 'import matplotlib.pyplot as plt\n'), ((8307, 8329), 'scipy.stats.chi2.pdf', 'sts.chi2.pdf', (['binsC', '(3)'], {}), '(binsC, 3)\n', (8319, 8329), True, 'import scipy.stats as sts\n'), ((8329, 8371), 'matplotlib.pyplot.plot', 'plt.plot', (['binsC', 'chi2C'], {'label': '"""chi2, df=3"""'}), "(binsC, chi2C, label='chi2, df=3')\n", (8337, 8371), True, 'import matplotlib.pyplot as plt\n'), ((8747, 8769), 'scipy.stats.chi2.cdf', 'sts.chi2.cdf', (['mahMU', '(2)'], {}), '(mahMU, 2)\n', (8759, 8769), True, 'import scipy.stats as sts\n'), ((8780, 8801), 'scipy.stats.chi2.cdf', 'sts.chi2.cdf', (['mahC', '(3)'], {}), '(mahC, 3)\n', (8792, 8801), True, 'import scipy.stats as sts\n'), ((8803, 8815), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8813, 8815), True, 'import matplotlib.pyplot as plt\n'), ((8816, 8858), 'matplotlib.pyplot.plot', 'plt.plot', (['mahMU', 'muEllVol', '"""."""'], {'label': '"""mu"""'}), "(mahMU, muEllVol, '.', label='mu')\n", (8824, 8858), True, 'import matplotlib.pyplot as plt\n'), ((8859, 8898), 'matplotlib.pyplot.plot', 'plt.plot', (['mahC', 'cEllVol', '"""."""'], {'label': '"""C"""'}), "(mahC, cEllVol, '.', label='C')\n", (8867, 8898), True, 'import matplotlib.pyplot as plt\n'), ((8899, 8946), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""distancia mahalanobis al cuadrado"""'], {}), "('distancia mahalanobis al cuadrado')\n", (8909, 8946), True, 'import matplotlib.pyplot as plt\n'), ((8947, 8985), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""acumulado de probabiliad"""'], {}), "('acumulado de probabiliad')\n", (8957, 8985), True, 'import matplotlib.pyplot as plt\n'), ((541, 561), 'numpy.sum', 'np.sum', (['cest'], {'axis': '(0)'}), '(cest, axis=0)\n', (547, 561), True, 'import numpy as np\n'), ((1886, 1922), 'numpy.reshape', 'np.reshape', (['c', '(P * P, 1)'], {'order': '"""C"""'}), "(c, (P * P, 1), order='C')\n", (1896, 1922), True, 'import numpy as np\n'), ((1921, 1957), 'numpy.reshape', 'np.reshape', (['c', '(1, P * P)'], {'order': '"""C"""'}), "(c, (1, P * P), order='C')\n", (1931, 1957), True, 'import numpy as np\n'), ((2580, 2598), 'numpy.log', 'np.log', (['wishPDFofC'], {}), '(wishPDFofC)\n', (2586, 2598), True, 'import numpy as np\n'), ((3118, 3134), 'numpy.linalg.det', 'np.linalg.det', (['V'], {}), '(V)\n', (3131, 3134), True, 'import numpy as np\n'), ((3146, 3162), 'numpy.linalg.det', 'np.linalg.det', (['W'], {}), '(W)\n', (3159, 3162), True, 'import numpy as np\n'), ((3978, 3995), 'numpy.log', 'np.log', (['(np.pi * 2)'], {}), '(np.pi * 2)\n', (3984, 3995), True, 'import numpy as np\n'), ((4231, 4248), 'numpy.log', 'np.log', (['(np.pi * 2)'], {}), '(np.pi * 2)\n', (4237, 4248), True, 'import numpy as np\n'), ((4430, 4439), 'numpy.min', 'np.min', (['E'], {}), '(E)\n', (4436, 4439), True, 'import numpy as np\n'), ((5539, 5552), 'numpy.eye', 'np.eye', (['(P * P)'], {}), '(P * P)\n', (5545, 5552), True, 'import numpy as np\n'), ((5617, 5630), 'numpy.kron', 'np.kron', (['c', 'c'], {}), '(c, c)\n', (5624, 5630), True, 'import numpy as np\n'), ((6190, 6210), 'numpy.linalg.inv', 'np.linalg.inv', (['c1Var'], {}), '(c1Var)\n', (6203, 6210), True, 'import numpy as np\n'), ((6759, 6777), 'numpy.reshape', 'np.reshape', (['ci', '(-1)'], {}), '(ci, -1)\n', (6769, 6777), True, 'import numpy as np\n'), ((6792, 6810), 'numpy.reshape', 'np.reshape', (['cj', '(-1)'], {}), '(cj, -1)\n', (6802, 6810), True, 'import numpy as np\n'), ((836, 851), 'numpy.kron', 'np.kron', (['cc', 'cc'], {}), '(cc, cc)\n', (843, 851), True, 'import numpy as np\n'), ((2823, 2836), 'numpy.prod', 'np.prod', (['aux2'], {}), '(aux2)\n', (2830, 2836), True, 'import numpy as np\n'), ((3957, 3975), 'numpy.log', 'np.log', (['wishPDFofC'], {}), '(wishPDFofC)\n', (3963, 3975), True, 'import numpy as np\n'), ((4855, 4868), 'numpy.eye', 'np.eye', (['(P * P)'], {}), '(P * P)\n', (4861, 4868), True, 'import numpy as np\n'), ((6427, 6451), 'scipy.stats.chi2.cdf', 'sts.chi2.cdf', (['mahDist', '(3)'], {}), '(mahDist, 3)\n', (6439, 6451), True, 'import scipy.stats as sts\n'), ((3179, 3195), 'numpy.linalg.inv', 'np.linalg.inv', (['V'], {}), '(V)\n', (3192, 3195), True, 'import numpy as np\n'), ((3221, 3239), 'numpy.trace', 'np.trace', (['exponent'], {}), '(exponent)\n', (3229, 3239), True, 'import numpy as np\n'), ((3286, 3302), 'numpy.exp', 'np.exp', (['exponent'], {}), '(exponent)\n', (3292, 3302), True, 'import numpy as np\n'), ((4309, 4325), 'numpy.log', 'np.log', (['gaussPDF'], {}), '(gaussPDF)\n', (4315, 4325), True, 'import numpy as np\n'), ((2768, 2780), 'numpy.arange', 'np.arange', (['P'], {}), '(P)\n', (2777, 2780), True, 'import numpy as np\n')] |
import os
from glob import glob
from pathlib import Path
from typing import Callable, Dict, List
import numpy as np
from PIL import Image
from saticl.datasets.base import DatasetBase
class AgriVisionDataset(DatasetBase):
_categories = {
0: "background",
1: "double_plant",
2: "drydown",
3: "endrow",
4: "nutrient_deficiency",
5: "planter_skip",
6: "water",
7: "waterway",
8: "weed_cluster",
9: "storm_damage", # not evaluated in the actual challenge, discarded
255: "ignored", # ignored areas around the fields
}
_palette = {
0: (0, 0, 0), # background
1: (23, 190, 207), # double_plant
2: (32, 119, 180), # drydown
3: (148, 103, 189), # endrow
4: (43, 160, 44), # nutrient_deficiency
5: (127, 127, 127), # planter_skip
6: (214, 39, 40), # water
7: (140, 86, 75), # waterway
8: (255, 127, 14), # weed cluster
255: (0, 0, 0)
}
def __init__(self,
path: Path,
subset: str = "train",
transform: Callable = None,
channels: int = 3,
ignore_index: int = 255):
assert channels in (3, 4), f"Channel count not supported: {channels}"
self.subset = subset
self.transform = transform
self.channels = channels
self._ignore_index = ignore_index
self.rgb_files = sorted(glob(str(path / subset / "images" / "rgb" / "*.jpg")))
self.nir_files = sorted(glob(str(path / subset / "images" / "nir" / "*.jpg")))
assert len(self.rgb_files) > 0, "No files found!"
assert len(self.rgb_files) == len(self.nir_files), \
f"Length mismatch: RGB: {len(self.rgb_files)} - NIR: {len(self.nir_files)}"
self.image_names = list()
for rgb, nir in zip(self.rgb_files, self.nir_files):
rgb_name = os.path.basename(rgb).replace(".jpg", "")
nir_name = os.path.basename(nir).replace(".jpg", "")
assert rgb_name == nir_name, f"ID mismatch - RGB: {rgb_name}, NIR: {nir_name}"
self.image_names.append(rgb_name)
self.mask_files = sorted(glob(str(path / subset / "gt" / "*.png")))
assert len(self.rgb_files) == len(self.mask_files), \
f"Length mismatch: RGB: {len(self.rgb_files)} - GT: {len(self.mask_files)}"
for rgb, gt in zip(self.rgb_files, self.mask_files):
rgb_name = os.path.basename(rgb).replace(".jpg", "")
gt_name = os.path.basename(gt).replace(".png", "")
assert rgb_name == gt_name, f"ID mismatch - RGB: {rgb_name}, NIR: {gt_name}"
def __len__(self):
return len(self.rgb_files)
def __getitem__(self, index):
# read RGB, if also NIR is required, stack it at the bottom
image = np.array(Image.open(self.rgb_files[index]))
if self.channels == 4:
nir = np.array(Image.open(self.nir_files[index]))
image = np.dstack((image, nir))
# read the mask, remove any 'storm damage' and put background instead
label = np.array(Image.open(self.mask_files[index]))
label[label == 9] = 0
# if self.size != label.size:
# label = F.resize(label, self.size, torchvision.transforms.InterpolationMode.NEAREST)
# label = np.array(label)
if self.transform is not None:
pair = self.transform(image=image, mask=label)
image = pair.get("image")
label = pair.get("mask").astype(np.uint64)
return image, label
def add_mask(self, mask: List[bool], stage: str = None) -> None:
assert len(mask) == len(self.rgb_files), \
f"Mask is the wrong size! Expected {len(self.rgb_files)}, got {len(mask)}"
self.rgb_files = [x for include, x in zip(mask, self.rgb_files) if include]
self.mask_files = [x for include, x in zip(mask, self.mask_files) if include]
if self.nir_files:
self.nir_files = [x for include, x in zip(mask, self.nir_files) if include]
if stage:
self.subset = stage
def name(self) -> str:
return "agrivision"
def stage(self) -> str:
return self.subset
def categories(self) -> Dict[int, str]:
return self._categories
def palette(self) -> Dict[int, tuple]:
return self._palette
def ignore_index(self) -> int:
return self._ignore_index
def has_background(self) -> bool:
return True
| [
"numpy.dstack",
"PIL.Image.open",
"os.path.basename"
] | [((2919, 2952), 'PIL.Image.open', 'Image.open', (['self.rgb_files[index]'], {}), '(self.rgb_files[index])\n', (2929, 2952), False, 'from PIL import Image\n'), ((3067, 3090), 'numpy.dstack', 'np.dstack', (['(image, nir)'], {}), '((image, nir))\n', (3076, 3090), True, 'import numpy as np\n'), ((3194, 3228), 'PIL.Image.open', 'Image.open', (['self.mask_files[index]'], {}), '(self.mask_files[index])\n', (3204, 3228), False, 'from PIL import Image\n'), ((3012, 3045), 'PIL.Image.open', 'Image.open', (['self.nir_files[index]'], {}), '(self.nir_files[index])\n', (3022, 3045), False, 'from PIL import Image\n'), ((1983, 2004), 'os.path.basename', 'os.path.basename', (['rgb'], {}), '(rgb)\n', (1999, 2004), False, 'import os\n'), ((2048, 2069), 'os.path.basename', 'os.path.basename', (['nir'], {}), '(nir)\n', (2064, 2069), False, 'import os\n'), ((2538, 2559), 'os.path.basename', 'os.path.basename', (['rgb'], {}), '(rgb)\n', (2554, 2559), False, 'import os\n'), ((2602, 2622), 'os.path.basename', 'os.path.basename', (['gt'], {}), '(gt)\n', (2618, 2622), False, 'import os\n')] |
import numpy as np
class siftDescriptor:
def __init__(self, x, y, descriptor):
self.x = x
self.y = y
self.descriptor = self.normalizeSIFT(descriptor)
def normalizeSIFT(self, descriptor):
descriptor = np.array(descriptor)
norm = np.linalg.norm(descriptor)
if norm > 1.0:
descriptor /= float(norm)
return descriptor
class imageDescriptors:
def __init__(self, descriptors, label, width, height):
self.descriptors = descriptors
self.label = label
self.width = width
self.height = height
| [
"numpy.array",
"numpy.linalg.norm"
] | [((243, 263), 'numpy.array', 'np.array', (['descriptor'], {}), '(descriptor)\n', (251, 263), True, 'import numpy as np\n'), ((279, 305), 'numpy.linalg.norm', 'np.linalg.norm', (['descriptor'], {}), '(descriptor)\n', (293, 305), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# Python program to locate verses in Quran images using a
# verse template image (template matching).
# Author : <NAME>
import cv2
import numpy as np
THRESHOLD = 0.75
# Read the main image (source)
img_rgb = cv2.imread('./source/0006.jpg')
# Convert it to grayscale
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
# Read the template
template = cv2.imread('./template/verse_template.jpg',0)
# Store width and height of template in w and h
w, h = template.shape[::-1]
# Perform match operations.
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
# Store the coordinates of matched area in a numpy array
loc = np.where( res >= THRESHOLD)
# Draw a rectangle around the matched region.
points = zip(*loc[::-1])
for pt in points:
print(pt)
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 17, 255), 2)
# Show the final image with the matched area.
cv2.imshow('Detected verses',img_rgb)
cv2.waitKey() | [
"cv2.rectangle",
"numpy.where",
"cv2.imshow",
"cv2.waitKey",
"cv2.cvtColor",
"cv2.matchTemplate",
"cv2.imread"
] | [((231, 262), 'cv2.imread', 'cv2.imread', (['"""./source/0006.jpg"""'], {}), "('./source/0006.jpg')\n", (241, 262), False, 'import cv2\n'), ((304, 345), 'cv2.cvtColor', 'cv2.cvtColor', (['img_rgb', 'cv2.COLOR_BGR2GRAY'], {}), '(img_rgb, cv2.COLOR_BGR2GRAY)\n', (316, 345), False, 'import cv2\n'), ((382, 428), 'cv2.imread', 'cv2.imread', (['"""./template/verse_template.jpg"""', '(0)'], {}), "('./template/verse_template.jpg', 0)\n", (392, 428), False, 'import cv2\n'), ((548, 607), 'cv2.matchTemplate', 'cv2.matchTemplate', (['img_gray', 'template', 'cv2.TM_CCOEFF_NORMED'], {}), '(img_gray, template, cv2.TM_CCOEFF_NORMED)\n', (565, 607), False, 'import cv2\n'), ((674, 700), 'numpy.where', 'np.where', (['(res >= THRESHOLD)'], {}), '(res >= THRESHOLD)\n', (682, 700), True, 'import numpy as np\n'), ((932, 970), 'cv2.imshow', 'cv2.imshow', (['"""Detected verses"""', 'img_rgb'], {}), "('Detected verses', img_rgb)\n", (942, 970), False, 'import cv2\n'), ((971, 984), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (982, 984), False, 'import cv2\n'), ((813, 880), 'cv2.rectangle', 'cv2.rectangle', (['img_rgb', 'pt', '(pt[0] + w, pt[1] + h)', '(0, 17, 255)', '(2)'], {}), '(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 17, 255), 2)\n', (826, 880), False, 'import cv2\n')] |
#
# Copyright 2020 BBC Research & Development
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from models.base import BaseModel
import numpy as np
import tensorflow as tf
class DistortionModel(BaseModel):
def __init__(self, model_name, session, epochs, batch_size, loss_type, width, height, levels):
super().__init__(model_name, session, epochs, batch_size, loss_type, width, height, levels)
def _set_placeholders(self):
self._input = tf.placeholder(tf.float32, [None, self._height, self._width, self._channels * 2], name='input')
self._label = tf.placeholder(tf.float32, [None, self._height, self._width, self._channels], name='label')
self._output = tf.placeholder(tf.float32, [None, self._height, self._width, self._channels], name='output')
def cnn(self):
kernel = tf.get_variable(
'w_1', [3, 3, self._channels * 2, 64], tf.float32, initializer=tf.contrib.layers.xavier_initializer(False))
bias = tf.get_variable('b_1', [64], tf.float32, initializer=tf.constant_initializer(0.0))
conv_i = self.__class__._prelu(
tf.nn.bias_add(
tf.nn.conv2d(input=self._input, filter=kernel, strides=[1, 1, 1, 1], padding='SAME'), bias), 'PReLU_1')
kernel_2 = tf.get_variable(
'w_2', [3, 3, 64, 64], tf.float32, initializer=tf.contrib.layers.xavier_initializer(False))
bias_2 = tf.get_variable('b_2', [64], tf.float32, initializer=tf.constant_initializer(0.0))
conv_i = self.__class__._prelu(
tf.nn.bias_add(tf.nn.conv2d(input=conv_i, filter=kernel_2, strides=[1, 1, 1, 1], padding='SAME'), bias_2),
'PReLU_2')
conv_j = tf.nn.max_pool(value=conv_i, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
kernel_3 = tf.get_variable(
'w_3', [3, 3, 64, 64], tf.float32, initializer=tf.contrib.layers.xavier_initializer(False))
bias_3 = tf.get_variable('b_3', [64], tf.float32, initializer=tf.constant_initializer(0.0))
conv_j = self.__class__._prelu(
tf.nn.bias_add(tf.nn.conv2d(input=conv_j, filter=kernel_3, strides=[1, 1, 1, 1], padding='SAME'), bias_3),
'PReLU_3')
kernel_4 = tf.get_variable(
'w_4', [3, 3, 64, 64], tf.float32, initializer=tf.contrib.layers.xavier_initializer(False))
bias_4 = tf.get_variable('b_4', [64], tf.float32, initializer=tf.constant_initializer(0.0))
conv_j = self.__class__._prelu(
tf.nn.bias_add(tf.nn.conv2d(input=conv_j, filter=kernel_4, strides=[1, 1, 1, 1], padding='SAME'), bias_4),
'PReLU_4')
conv_k = tf.nn.max_pool(value=conv_j, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
kernel_5 = tf.get_variable(
'w_5', [3, 3, 64, 64], tf.float32, initializer=tf.contrib.layers.xavier_initializer(False))
bias_5 = tf.get_variable('b_5', [64], tf.float32, initializer=tf.constant_initializer(0.0))
conv_k = self.__class__._prelu(
tf.nn.bias_add(tf.nn.conv2d(input=conv_k, filter=kernel_5, strides=[1, 1, 1, 1], padding='SAME'), bias_5),
'PReLU_5')
conv_k = tf.image.resize_images(conv_k, [int(self._height / 2), int(self._width / 2)])
kernel_6 = tf.get_variable(
'w_6', [3, 3, 64, 64], tf.float32, initializer=tf.contrib.layers.xavier_initializer(False))
bias_6 = tf.get_variable('b_6', [64], tf.float32, initializer=tf.constant_initializer(0.0))
conv_k = self.__class__._prelu(
tf.nn.bias_add(tf.nn.conv2d(input=conv_k, filter=kernel_6, strides=[1, 1, 1, 1], padding='SAME'), bias_6),
'PReLU_6')
kernel_7 = tf.get_variable(
'w_7', [3, 3, 64, 64], tf.float32, initializer=tf.contrib.layers.xavier_initializer(False))
bias_7 = tf.get_variable('b_7', [64], tf.float32, initializer=tf.constant_initializer(0.0))
conv_k = self.__class__._prelu(
tf.nn.bias_add(tf.nn.conv2d(input=conv_k, filter=kernel_7, strides=[1, 1, 1, 1], padding='SAME'), bias_7),
'PReLU_7')
conv_j = tf.concat(values=[conv_j, conv_k], axis=3)
conv_j = tf.image.resize_images(conv_j, [self._height, self._width])
kernel_8 = tf.get_variable(
'w_8', [3, 3, 128, 64], tf.float32, initializer=tf.contrib.layers.xavier_initializer(False))
bias_8 = tf.get_variable('b_8', [64], tf.float32, initializer=tf.constant_initializer(0.0))
conv_j = self.__class__._prelu(
tf.nn.bias_add(tf.nn.conv2d(input=conv_j, filter=kernel_8, strides=[1, 1, 1, 1], padding='SAME'), bias_8),
'PReLU_8')
kernel_9 = tf.get_variable(
'w_9', [3, 3, 64, 64], tf.float32, initializer=tf.contrib.layers.xavier_initializer(False))
bias_9 = tf.get_variable('b_9', [64], tf.float32, initializer=tf.constant_initializer(0.0))
conv_j = self.__class__._prelu(
tf.nn.bias_add(tf.nn.conv2d(input=conv_j, filter=kernel_9, strides=[1, 1, 1, 1], padding='SAME'), bias_9),
'PReLU_9')
conv_i = tf.concat(values=[conv_i, conv_j], axis=3)
kernel_10 = tf.get_variable(
'w_10', [5, 5, 128, self._channels], tf.float32, initializer=tf.contrib.layers.xavier_initializer(False))
bias_10 = tf.get_variable('b_10', [self._channels], tf.float32, initializer=tf.constant_initializer(0.0))
conv_i = tf.nn.bias_add(
tf.nn.conv2d(input=conv_i, filter=kernel_10, strides=[1, 1, 1, 1], padding='SAME'), bias_10)
return conv_i
def _shuffle_dataset(self, dataset_, train_batch_order, batch):
input_t = []
label_t = []
for i in range(self._batch_size):
input_t.append(dataset_['input'][train_batch_order[i + batch * self._batch_size], :, :, :])
label_t.append(dataset_['label'][train_batch_order[i + batch * self._batch_size], :, :, :])
feed_dict = {self._input: np.array(input_t), self._label: np.array(label_t, dtype='float')}
return feed_dict
| [
"tensorflow.nn.conv2d",
"tensorflow.nn.max_pool",
"tensorflow.image.resize_images",
"tensorflow.placeholder",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.concat",
"numpy.array",
"tensorflow.constant_initializer"
] | [((965, 1064), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self._height, self._width, self._channels * 2]'], {'name': '"""input"""'}), "(tf.float32, [None, self._height, self._width, self._channels *\n 2], name='input')\n", (979, 1064), True, 'import tensorflow as tf\n'), ((1083, 1179), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self._height, self._width, self._channels]'], {'name': '"""label"""'}), "(tf.float32, [None, self._height, self._width, self._channels\n ], name='label')\n", (1097, 1179), True, 'import tensorflow as tf\n'), ((1198, 1295), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self._height, self._width, self._channels]'], {'name': '"""output"""'}), "(tf.float32, [None, self._height, self._width, self._channels\n ], name='output')\n", (1212, 1295), True, 'import tensorflow as tf\n'), ((2192, 2283), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', ([], {'value': 'conv_i', 'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""VALID"""'}), "(value=conv_i, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],\n padding='VALID')\n", (2206, 2283), True, 'import tensorflow as tf\n'), ((3144, 3235), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', ([], {'value': 'conv_j', 'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""VALID"""'}), "(value=conv_j, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],\n padding='VALID')\n", (3158, 3235), True, 'import tensorflow as tf\n'), ((4615, 4657), 'tensorflow.concat', 'tf.concat', ([], {'values': '[conv_j, conv_k]', 'axis': '(3)'}), '(values=[conv_j, conv_k], axis=3)\n', (4624, 4657), True, 'import tensorflow as tf\n'), ((4676, 4735), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['conv_j', '[self._height, self._width]'], {}), '(conv_j, [self._height, self._width])\n', (4698, 4735), True, 'import tensorflow as tf\n'), ((5601, 5643), 'tensorflow.concat', 'tf.concat', ([], {'values': '[conv_i, conv_j]', 'axis': '(3)'}), '(values=[conv_i, conv_j], axis=3)\n', (5610, 5643), True, 'import tensorflow as tf\n'), ((5959, 6046), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'conv_i', 'filter': 'kernel_10', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input=conv_i, filter=kernel_10, strides=[1, 1, 1, 1], padding=\n 'SAME')\n", (5971, 6046), True, 'import tensorflow as tf\n'), ((6471, 6488), 'numpy.array', 'np.array', (['input_t'], {}), '(input_t)\n', (6479, 6488), True, 'import numpy as np\n'), ((6503, 6535), 'numpy.array', 'np.array', (['label_t'], {'dtype': '"""float"""'}), "(label_t, dtype='float')\n", (6511, 6535), True, 'import numpy as np\n'), ((1420, 1463), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', (['(False)'], {}), '(False)\n', (1456, 1463), True, 'import tensorflow as tf\n'), ((1533, 1561), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (1556, 1561), True, 'import tensorflow as tf\n'), ((1647, 1735), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'self._input', 'filter': 'kernel', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input=self._input, filter=kernel, strides=[1, 1, 1, 1],\n padding='SAME')\n", (1659, 1735), True, 'import tensorflow as tf\n'), ((1847, 1890), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', (['(False)'], {}), '(False)\n', (1883, 1890), True, 'import tensorflow as tf\n'), ((1962, 1990), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (1985, 1990), True, 'import tensorflow as tf\n'), ((2059, 2145), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'conv_i', 'filter': 'kernel_2', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input=conv_i, filter=kernel_2, strides=[1, 1, 1, 1], padding=\n 'SAME')\n", (2071, 2145), True, 'import tensorflow as tf\n'), ((2376, 2419), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', (['(False)'], {}), '(False)\n', (2412, 2419), True, 'import tensorflow as tf\n'), ((2491, 2519), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (2514, 2519), True, 'import tensorflow as tf\n'), ((2588, 2674), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'conv_j', 'filter': 'kernel_3', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input=conv_j, filter=kernel_3, strides=[1, 1, 1, 1], padding=\n 'SAME')\n", (2600, 2674), True, 'import tensorflow as tf\n'), ((2799, 2842), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', (['(False)'], {}), '(False)\n', (2835, 2842), True, 'import tensorflow as tf\n'), ((2914, 2942), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (2937, 2942), True, 'import tensorflow as tf\n'), ((3011, 3097), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'conv_j', 'filter': 'kernel_4', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input=conv_j, filter=kernel_4, strides=[1, 1, 1, 1], padding=\n 'SAME')\n", (3023, 3097), True, 'import tensorflow as tf\n'), ((3328, 3371), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', (['(False)'], {}), '(False)\n', (3364, 3371), True, 'import tensorflow as tf\n'), ((3443, 3471), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (3466, 3471), True, 'import tensorflow as tf\n'), ((3540, 3626), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'conv_k', 'filter': 'kernel_5', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input=conv_k, filter=kernel_5, strides=[1, 1, 1, 1], padding=\n 'SAME')\n", (3552, 3626), True, 'import tensorflow as tf\n'), ((3847, 3890), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', (['(False)'], {}), '(False)\n', (3883, 3890), True, 'import tensorflow as tf\n'), ((3962, 3990), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (3985, 3990), True, 'import tensorflow as tf\n'), ((4059, 4145), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'conv_k', 'filter': 'kernel_6', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input=conv_k, filter=kernel_6, strides=[1, 1, 1, 1], padding=\n 'SAME')\n", (4071, 4145), True, 'import tensorflow as tf\n'), ((4270, 4313), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', (['(False)'], {}), '(False)\n', (4306, 4313), True, 'import tensorflow as tf\n'), ((4385, 4413), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (4408, 4413), True, 'import tensorflow as tf\n'), ((4482, 4568), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'conv_k', 'filter': 'kernel_7', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input=conv_k, filter=kernel_7, strides=[1, 1, 1, 1], padding=\n 'SAME')\n", (4494, 4568), True, 'import tensorflow as tf\n'), ((4833, 4876), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', (['(False)'], {}), '(False)\n', (4869, 4876), True, 'import tensorflow as tf\n'), ((4948, 4976), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (4971, 4976), True, 'import tensorflow as tf\n'), ((5045, 5131), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'conv_j', 'filter': 'kernel_8', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input=conv_j, filter=kernel_8, strides=[1, 1, 1, 1], padding=\n 'SAME')\n", (5057, 5131), True, 'import tensorflow as tf\n'), ((5256, 5299), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', (['(False)'], {}), '(False)\n', (5292, 5299), True, 'import tensorflow as tf\n'), ((5371, 5399), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (5394, 5399), True, 'import tensorflow as tf\n'), ((5468, 5554), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'conv_j', 'filter': 'kernel_9', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input=conv_j, filter=kernel_9, strides=[1, 1, 1, 1], padding=\n 'SAME')\n", (5480, 5554), True, 'import tensorflow as tf\n'), ((5755, 5798), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', (['(False)'], {}), '(False)\n', (5791, 5798), True, 'import tensorflow as tf\n'), ((5884, 5912), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (5907, 5912), True, 'import tensorflow as tf\n')] |
import math
import typing
from itertools import product
import numpy as np
from .matrix import Matrix, MatrixOperator
QBIT_MATRICES = {
"0": [1.0, 0.0],
"1": [0.0, 1.0],
"+": [1 / math.sqrt(2.0), 1 / math.sqrt(2.0)],
"-": [1 / math.sqrt(2.0), -1 / math.sqrt(2.0)],
}
EPSILON = 0.00001
class States:
"""Class for handling operations on qbit states."""
@staticmethod
def decode_state(qbit_representation: str) -> np.ndarray:
"""
Convert string representation of qbit (ex. |01>, |+>, |->, |001>, |+++>) to matrix form.
:param qbit_representation: string started with | and ended with >
containing any number of 0, 1, + and -
:return: matrix containing float values of qbit('s). Size of matrix is determined by
length of symbols. It will always contain pow(2, len(qbit_representation)-2)
:raises ValueError: when qbit_representation does not have at least 1 character e.g. "|>"
:raises RuntimeError: when possibilities matrix does not sum to 1
"""
def strip_braket_signs():
return qbit_representation[2:-1] if negative else qbit_representation[1:-1]
if len(qbit_representation) < 3:
raise ValueError("Qbit string representation has to have at least 1 character e.g. |1>")
negative = qbit_representation[0] == "-"
qbit_representation = strip_braket_signs()
first_qbit = qbit_representation[0]
current_matrix = Matrix(QBIT_MATRICES[first_qbit])
qbit_representation = qbit_representation[1:]
for qbit in qbit_representation:
current_matrix = MatrixOperator.kronecker_product(current_matrix, Matrix(QBIT_MATRICES[qbit]))
if negative:
current_matrix = Matrix(np.negative(current_matrix.value))
if 1 - np.sum(np.square(current_matrix.value)) > EPSILON:
raise RuntimeError("Possibilities matrix does not sum to 1")
return current_matrix.value
@staticmethod
def encode_state(matrix_representation: np.ndarray) -> typing.Optional[str]:
"""Convert matrix representation of qbit to string form.
:param matrix_representation: single dimensional matrix with one column and amount of rows being power of two
:return: string representation of qbit, if possible to describe without precision losses otherwise return None
:raises ValueError: when no matching braket representation was found
:raises RuntimeError: when more than one braket representation was found
"""
braket_length = int(math.log2(matrix_representation.size))
possible_braket_representations = [
"|" + "".join(s) + ">" for s in product(QBIT_MATRICES.keys(), repeat=braket_length)
] + ["-|" + "".join(s) + ">" for s in product(QBIT_MATRICES.keys(), repeat=braket_length)]
matches_found = []
for braket in possible_braket_representations:
if np.allclose(States.decode_state(braket), matrix_representation):
matches_found.append(braket)
if not matches_found:
raise ValueError("No braket representation was found")
if len(matches_found) > 1:
raise RuntimeError("More than one braket representation was found")
return matches_found[0]
| [
"numpy.negative",
"math.sqrt",
"math.log2",
"numpy.square"
] | [((195, 209), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (204, 209), False, 'import math\n'), ((215, 229), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (224, 229), False, 'import math\n'), ((246, 260), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (255, 260), False, 'import math\n'), ((267, 281), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (276, 281), False, 'import math\n'), ((2633, 2670), 'math.log2', 'math.log2', (['matrix_representation.size'], {}), '(matrix_representation.size)\n', (2642, 2670), False, 'import math\n'), ((1821, 1854), 'numpy.negative', 'np.negative', (['current_matrix.value'], {}), '(current_matrix.value)\n', (1832, 1854), True, 'import numpy as np\n'), ((1879, 1910), 'numpy.square', 'np.square', (['current_matrix.value'], {}), '(current_matrix.value)\n', (1888, 1910), True, 'import numpy as np\n')] |
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from typing import Optional, Tuple, Dict, Any
import numpy as np
from collections import defaultdict
from mdlearn.utils import PathLike
from mdlearn.nn.utils import Trainer
class LSTM(nn.Module):
"""LSTM model to predict the dynamics for a
time series of feature vectors."""
def __init__(
self,
input_size: int,
hidden_size: Optional[int] = None,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
):
"""
Parameters
----------
input_size: int
The number of expected features in the input :obj:`x`.
hidden_size: Optional[int], default=None
The number of features in the hidden state h. By default, the
:obj:`hidden_size` will be equal to the :obj:`input_size` in
order to propogate the dynamics.
num_layers: int, default=1
Number of recurrent layers. E.g., setting num_layers=2 would mean
stacking two LSTMs together to form a stacked LSTM, with the second
LSTM taking in outputs of the first LSTM and computing the final
results.
bias: bool, default=True
If False, then the layer does not use bias weights b_ih and b_hh.
Default: True
dropout: float, default=0.0
If non-zero, introduces a Dropout layer on the outputs of each
LSTM layer except the last layer, with dropout probability equal
to dropout.
bidirectional: bool, default=False
If True, becomes a bidirectional LSTM.
"""
super().__init__()
self.num_layers = num_layers
if hidden_size is None:
hidden_size = input_size
self.lstm = nn.LSTM(
input_size,
hidden_size,
num_layers,
bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
# Linear prediction head to map LSTM activation
# function outputs to the correct output range
self.head = nn.Linear(hidden_size, input_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
x : torch.Tensor
Tensor of shape BxNxD for B batches of N examples
by D feature dimensions.
Returns
-------
torch.Tensor
The predicted tensor of size (B, N, hidden_size).
"""
_, (h_n, _) = self.lstm(x) # output, (h_n, c_n)
# Handle bidirectional and num_layers
pred = h_n[self.num_layers - 1, ...]
pred = self.head(pred)
return pred
def mse_loss(
self, y_true: torch.Tensor, y_pred: torch.Tensor, reduction: str = "mean"
) -> torch.Tensor:
"""Compute the MSE loss between :obj:`y_true` and :obj:`y_pred`.
Parameters
----------
y_true : torch.Tensor
The true data.
y_pred : torch.Tensor
The prediction.
reduction : str, default="mean"
The reduction strategy for the F.mse_loss function.
Returns
-------
torch.Tensor
The MSE loss between :obj:`y_true` and :obj:`y_pred`.
"""
return F.mse_loss(y_true, y_pred, reduction=reduction)
class LSTMTrainer(Trainer):
"""Trainer class to fit an LSTM model to a time series of feature vectors."""
# TODO: Add example usage in documentation.
def __init__(
self,
input_size: int,
hidden_size: Optional[int] = None,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
window_size: int = 10,
horizon: int = 1,
seed: int = 42,
in_gpu_memory: bool = False,
num_data_workers: int = 0,
prefetch_factor: int = 2,
split_pct: float = 0.8,
split_method: str = "partition",
batch_size: int = 128,
shuffle: bool = True,
device: str = "cpu",
optimizer_name: str = "RMSprop",
optimizer_hparams: Dict[str, Any] = {"lr": 0.001, "weight_decay": 0.00001},
scheduler_name: Optional[str] = None,
scheduler_hparams: Dict[str, Any] = {},
epochs: int = 100,
verbose: bool = False,
clip_grad_max_norm: float = 10.0,
checkpoint_log_every: int = 10,
plot_log_every: int = 10,
plot_n_samples: int = 10000,
plot_method: Optional[str] = "TSNE",
train_subsample_pct: float = 1.0,
valid_subsample_pct: float = 1.0,
use_wandb: bool = False,
):
"""
Parameters
----------
input_size: int
The number of expected features in the input x.
hidden_size: Optional[int], default=None
The number of features in the hidden state h. By default, the
:obj:`hidden_size` will be equal to the :obj:`input_size` in
order to propogate the dynamics.
num_layers: int, default=1
Number of recurrent layers. E.g., setting num_layers=2 would mean
stacking two LSTMs together to form a stacked LSTM, with the second
LSTM taking in outputs of the first LSTM and computing the final
results.
bias: bool, default=True
If False, then the layer does not use bias weights b_ih and b_hh.
Default: True
dropout: float, default=0.0
If non-zero, introduces a Dropout layer on the outputs of each
LSTM layer except the last layer, with dropout probability equal
to dropout.
bidirectional: bool, default=False
If True, becomes a bidirectional LSTM.
window_size : int, default=10
Number of timesteps considered for prediction.
horizon : int, default=1
How many time steps to predict ahead.
seed : int, default=42
Random seed for torch, numpy, and random module.
in_gpu_memory : bool, default=False
If True, will pre-load the entire :obj:`data` array to GPU memory.
num_data_workers : int, default=0
How many subprocesses to use for data loading. 0 means that
the data will be loaded in the main process.
prefetch_factor : int, by default=2
Number of samples loaded in advance by each worker. 2 means there will be a
total of 2 * num_workers samples prefetched across all workers.
split_pct : float, default=0.8
Proportion of data set to use for training. The rest goes to validation.
split_method : str, default="random"
Method to split the data. For random split use "random", for a simple
partition, use "partition".
batch_size : int, default=128
Mini-batch size for training.
shuffle : bool, default=True
Whether to shuffle training data or not.
device : str, default="cpu"
Specify training hardware either :obj:`cpu` or :obj:`cuda` for GPU devices.
optimizer_name : str, default="RMSprop"
Name of the PyTorch optimizer to use. Matches PyTorch optimizer class name.
optimizer_hparams : Dict[str, Any], default={"lr": 0.001, "weight_decay": 0.00001}
Dictionary of hyperparameters to pass to the chosen PyTorch optimizer.
scheduler_name : Optional[str], default=None
Name of the PyTorch learning rate scheduler to use.
Matches PyTorch optimizer class name.
scheduler_hparams : Dict[str, Any], default={}
Dictionary of hyperparameters to pass to the chosen PyTorch learning rate scheduler.
epochs : int, default=100
Number of epochs to train for.
verbose : bool, default=False
If True, will print training and validation loss at each epoch.
clip_grad_max_norm : float, default=10.0
Max norm of the gradients for gradient clipping for more information
see: :obj:`torch.nn.utils.clip_grad_norm_` documentation.
checkpoint_log_every : int, default=10
Epoch interval to log a checkpoint file containing the model
weights, optimizer, and scheduler parameters.
plot_log_every : int, default=10
Epoch interval to log a visualization plot of the latent space.
plot_n_samples : int, default=10000
Number of validation samples to use for plotting.
plot_method : Optional[str], default="TSNE"
The method for visualizing the latent space or if visualization
should not be run, set :obj:`plot_method=None`. If using :obj:`"TSNE"`,
it will attempt to use the RAPIDS.ai GPU implementation and
will fallback to the sklearn CPU implementation if RAPIDS.ai
is unavailable.
train_subsample_pct : float, default=1.0
Percentage of training data to use during hyperparameter sweeps.
valid_subsample_pct : float, default=1.0
Percentage of validation data to use during hyperparameter sweeps.
use_wandb : bool, default=False
If True, will log results to wandb.
Raises
------
ValueError
:obj:`split_pct` should be between 0 and 1.
ValueError
:obj:`train_subsample_pct` should be between 0 and 1.
ValueError
:obj:`valid_subsample_pct` should be between 0 and 1.
ValueError
Specified :obj:`device` as :obj:`cuda`, but it is unavailable.
"""
super().__init__(
seed,
in_gpu_memory,
num_data_workers,
prefetch_factor,
split_pct,
split_method,
batch_size,
shuffle,
device,
epochs,
verbose,
clip_grad_max_norm,
checkpoint_log_every,
plot_log_every,
plot_n_samples,
plot_method,
train_subsample_pct,
valid_subsample_pct,
use_wandb,
)
self.window_size = window_size
self.horizon = horizon
self.optimizer_name = optimizer_name
self.optimizer_hparams = optimizer_hparams
self.scheduler_name = scheduler_name
self.scheduler_hparams = scheduler_hparams
from mdlearn.utils import get_torch_optimizer, get_torch_scheduler
# Set random seeds
self._set_seed()
self.model = LSTM(
input_size, hidden_size, num_layers, bias, dropout, bidirectional
).to(self.device)
if self.use_wandb:
import wandb
wandb.watch(self.model)
# Setup optimizer
self.optimizer = get_torch_optimizer(
self.optimizer_name, self.optimizer_hparams, self.model.parameters()
)
# Setup learning rate scheduler
self.scheduler = get_torch_scheduler(
self.scheduler_name, self.scheduler_hparams, self.optimizer
)
# Log the train and validation loss each epoch
self.loss_curve_ = {"train": [], "validation": []}
def fit(
self,
X: np.ndarray,
scalars: Dict[str, np.ndarray] = {},
output_path: PathLike = "./",
checkpoint: Optional[PathLike] = None,
):
"""Trains the LSTM on the input data :obj:`X`.
Parameters
----------
X : np.ndarray
Input features vectors of shape (N, D) where N is the number
of data examples, and D is the dimension of the feature vector.
scalars : Dict[str, np.ndarray], default={}
Dictionary of scalar arrays. For instance, the root mean squared
deviation (RMSD) for each feature vector can be passed via
:obj:`{"rmsd": np.array(...)}`. The dimension of each scalar array
should match the number of input feature vectors N.
output_path : PathLike, default="./"
Path to write training results to. Makes an :obj:`output_path/checkpoints`
folder to save model checkpoint files, and :obj:`output_path/plots` folder
to store latent space visualizations.
checkpoint : Optional[PathLike], default=None
Path to a specific model checkpoint file to restore training.
Raises
------
ValueError
If :obj:`X` does not have two dimensions. For scalar time series, please
reshape to (N, 1).
TypeError
If :obj:`scalars` is not type dict. A common error is to pass
:obj:`output_path` as the second argument.
NotImplementedError
If using a learning rate scheduler other than :obj:`ReduceLROnPlateau`,
a step function will need to be implemented.
"""
if len(X.shape) != 2:
raise ValueError(f"X should be of dimension (N, D), got {X.shape}.")
if not isinstance(scalars, dict):
raise TypeError(
"scalars should be of type dict. A common error"
" is to pass output_path as the second argument."
)
from mdlearn.utils import log_checkpoint, log_latent_visualization
from mdlearn.data.utils import train_valid_split
from mdlearn.data.datasets.feature_vector import TimeFeatureVectorDataset
if self.use_wandb:
import wandb
exist_ok = (checkpoint is not None) or self.use_wandb
output_path, checkpoint_path, plot_path = self._make_output_dir(
output_path, exist_ok
)
# Set available number of cores
self._set_num_threads()
# Load training and validation data
dataset = TimeFeatureVectorDataset(
X,
scalars,
in_gpu_memory=self.in_gpu_memory,
window_size=self.window_size,
horizon=self.horizon,
)
train_loader, valid_loader = train_valid_split(
dataset,
self.split_pct,
self.split_method,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_data_workers,
prefetch_factor=self.prefetch_factor,
persistent_workers=self.persistent_workers,
drop_last=True,
pin_memory=not self.in_gpu_memory,
)
self.scalar_dset_names = list(scalars.keys())
# Optionally resume training from a checkpoint
start_epoch = self._resume_training(checkpoint)
# Start training
for epoch in range(start_epoch, self.epochs + 1):
# Training
self.model.train()
avg_train_loss = self._train(train_loader)
if self.verbose:
print(
"====> Epoch: {} Train:\tAvg loss: {:.4f}".format(
epoch, avg_train_loss
)
)
# Validation
self.model.eval()
with torch.no_grad():
avg_valid_loss, z, paints = self._validate(valid_loader)
if self.verbose:
print(
"====> Epoch: {} Valid:\tAvg loss: {:.4f}\n".format(
epoch, avg_valid_loss
)
)
# Step the learning rate scheduler
self.step_scheduler(epoch, avg_train_loss, avg_valid_loss)
# Log a model checkpoint file
if epoch % self.checkpoint_log_every == 0:
log_checkpoint(
checkpoint_path / f"checkpoint-epoch-{epoch}.pt",
epoch,
self.model,
{"optimizer": self.optimizer},
self.scheduler,
)
if self.use_wandb:
metrics = {"train_loss": avg_train_loss, "valid_loss": avg_valid_loss}
# Log a visualization of the latent space
if (self.plot_method is not None) and (epoch % self.plot_log_every == 0):
htmls = log_latent_visualization(
z,
paints,
plot_path,
epoch,
self.plot_n_samples,
self.plot_method,
)
if self.use_wandb:
# Optionally, log visualizations to wandb
for name, html in htmls.items():
metrics[name] = wandb.Html(html, inject=False) # noqa
if self.use_wandb:
wandb.log(metrics) # noqa
# Save the losses
self.loss_curve_["train"].append(avg_train_loss)
self.loss_curve_["validation"].append(avg_valid_loss)
def predict(
self,
X: np.ndarray,
inference_batch_size: int = 512,
checkpoint: Optional[PathLike] = None,
) -> Tuple[np.ndarray, float]:
"""Predict using the LSTM.
Parameters
----------
X : np.ndarray
The input data to predict on.
inference_batch_size : int, default=512
The batch size for inference.
checkpoint : Optional[PathLike], default=None
Path to a specific model checkpoint file.
Returns
-------
Tuple[np.ndarray, float]
The predictions and the average MSE loss.
"""
from mdlearn.data.datasets.feature_vector import TimeFeatureVectorDataset
dataset = TimeFeatureVectorDataset(
X,
in_gpu_memory=self.in_gpu_memory,
window_size=self.window_size,
horizon=self.horizon,
)
data_loader = DataLoader(
dataset,
batch_size=inference_batch_size,
shuffle=False,
num_workers=self.num_data_workers,
prefetch_factor=self.prefetch_factor,
persistent_workers=self.persistent_workers,
drop_last=False,
pin_memory=not self.in_gpu_memory,
)
if checkpoint is not None:
self._load_checkpoint(checkpoint)
# Make copy of class state incase of failure during inference
tmp = self.scalar_dset_names.copy()
self.model.eval()
with torch.no_grad():
try:
# Set to empty list to avoid storage of paint scalars
# that are not convenient to pass to the predict function.
self.scalar_dset_names = []
avg_loss, preds, _ = self._validate(data_loader)
# Restore class state
self.scalar_dset_names = tmp
return preds, avg_loss
except Exception as e:
# Restore class state incase of failure
self.scalar_dset_names = tmp
raise e
def _train(self, train_loader) -> float:
avg_loss = 0.0
for i, batch in enumerate(train_loader):
if i / len(train_loader) > self.train_subsample_pct:
break # Early stop for sweeps
x = batch["X"].to(self.device, non_blocking=True)
y = batch["y"].to(self.device, non_blocking=True)
# Forward pass
y_pred = self.model(x)
loss = self.model.mse_loss(y, y_pred)
# Backward pass
self.optimizer.zero_grad()
loss.backward()
_ = torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.clip_grad_max_norm
)
self.optimizer.step()
# Collect loss
avg_loss += loss.item()
avg_loss /= len(train_loader)
return avg_loss
def _validate(
self, valid_loader
) -> Tuple[float, np.ndarray, Dict[str, np.ndarray]]:
paints = defaultdict(list)
preds = []
avg_loss = 0.0
for i, batch in enumerate(valid_loader):
if i / len(valid_loader) > self.valid_subsample_pct:
break # Early stop for sweeps
x = batch["X"].to(self.device, non_blocking=True)
y = batch["y"].to(self.device, non_blocking=True)
# Forward pass
y_pred = self.model(x)
loss = self.model.mse_loss(y, y_pred)
# Collect loss
avg_loss += loss.item()
# Collect latent vectors for visualization
preds.append(y_pred.cpu().numpy())
for name in self.scalar_dset_names:
paints[name].append(batch[name].cpu().numpy())
avg_loss /= len(valid_loader)
# Group latent vectors and paints
preds = np.concatenate(preds)
paints = {name: np.concatenate(scalar) for name, scalar in paints.items()}
return avg_loss, preds, paints
| [
"torch.nn.functional.mse_loss",
"mdlearn.utils.log_checkpoint",
"wandb.log",
"torch.nn.LSTM",
"mdlearn.utils.get_torch_scheduler",
"mdlearn.data.datasets.feature_vector.TimeFeatureVectorDataset",
"mdlearn.data.utils.train_valid_split",
"mdlearn.utils.log_latent_visualization",
"wandb.watch",
"coll... | [((1910, 2028), 'torch.nn.LSTM', 'nn.LSTM', (['input_size', 'hidden_size', 'num_layers', 'bias'], {'batch_first': '(True)', 'dropout': 'dropout', 'bidirectional': 'bidirectional'}), '(input_size, hidden_size, num_layers, bias, batch_first=True,\n dropout=dropout, bidirectional=bidirectional)\n', (1917, 2028), False, 'from torch import nn\n'), ((2252, 2286), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'input_size'], {}), '(hidden_size, input_size)\n', (2261, 2286), False, 'from torch import nn\n'), ((3449, 3496), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['y_true', 'y_pred'], {'reduction': 'reduction'}), '(y_true, y_pred, reduction=reduction)\n', (3459, 3496), True, 'from torch.nn import functional as F\n'), ((11205, 11290), 'mdlearn.utils.get_torch_scheduler', 'get_torch_scheduler', (['self.scheduler_name', 'self.scheduler_hparams', 'self.optimizer'], {}), '(self.scheduler_name, self.scheduler_hparams, self.optimizer\n )\n', (11224, 11290), False, 'from mdlearn.utils import get_torch_optimizer, get_torch_scheduler\n'), ((14023, 14149), 'mdlearn.data.datasets.feature_vector.TimeFeatureVectorDataset', 'TimeFeatureVectorDataset', (['X', 'scalars'], {'in_gpu_memory': 'self.in_gpu_memory', 'window_size': 'self.window_size', 'horizon': 'self.horizon'}), '(X, scalars, in_gpu_memory=self.in_gpu_memory,\n window_size=self.window_size, horizon=self.horizon)\n', (14047, 14149), False, 'from mdlearn.data.datasets.feature_vector import TimeFeatureVectorDataset\n'), ((14254, 14552), 'mdlearn.data.utils.train_valid_split', 'train_valid_split', (['dataset', 'self.split_pct', 'self.split_method'], {'batch_size': 'self.batch_size', 'shuffle': 'self.shuffle', 'num_workers': 'self.num_data_workers', 'prefetch_factor': 'self.prefetch_factor', 'persistent_workers': 'self.persistent_workers', 'drop_last': '(True)', 'pin_memory': '(not self.in_gpu_memory)'}), '(dataset, self.split_pct, self.split_method, batch_size=\n self.batch_size, shuffle=self.shuffle, num_workers=self.\n num_data_workers, prefetch_factor=self.prefetch_factor,\n persistent_workers=self.persistent_workers, drop_last=True, pin_memory=\n not self.in_gpu_memory)\n', (14271, 14552), False, 'from mdlearn.data.utils import train_valid_split\n'), ((17818, 17936), 'mdlearn.data.datasets.feature_vector.TimeFeatureVectorDataset', 'TimeFeatureVectorDataset', (['X'], {'in_gpu_memory': 'self.in_gpu_memory', 'window_size': 'self.window_size', 'horizon': 'self.horizon'}), '(X, in_gpu_memory=self.in_gpu_memory, window_size=\n self.window_size, horizon=self.horizon)\n', (17842, 17936), False, 'from mdlearn.data.datasets.feature_vector import TimeFeatureVectorDataset\n'), ((18013, 18262), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'inference_batch_size', 'shuffle': '(False)', 'num_workers': 'self.num_data_workers', 'prefetch_factor': 'self.prefetch_factor', 'persistent_workers': 'self.persistent_workers', 'drop_last': '(False)', 'pin_memory': '(not self.in_gpu_memory)'}), '(dataset, batch_size=inference_batch_size, shuffle=False,\n num_workers=self.num_data_workers, prefetch_factor=self.prefetch_factor,\n persistent_workers=self.persistent_workers, drop_last=False, pin_memory\n =not self.in_gpu_memory)\n', (18023, 18262), False, 'from torch.utils.data import DataLoader\n'), ((20139, 20156), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (20150, 20156), False, 'from collections import defaultdict\n'), ((20974, 20995), 'numpy.concatenate', 'np.concatenate', (['preds'], {}), '(preds)\n', (20988, 20995), True, 'import numpy as np\n'), ((10951, 10974), 'wandb.watch', 'wandb.watch', (['self.model'], {}), '(self.model)\n', (10962, 10974), False, 'import wandb\n'), ((18593, 18608), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18606, 18608), False, 'import torch\n'), ((21020, 21042), 'numpy.concatenate', 'np.concatenate', (['scalar'], {}), '(scalar)\n', (21034, 21042), True, 'import numpy as np\n'), ((15307, 15322), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15320, 15322), False, 'import torch\n'), ((15842, 15976), 'mdlearn.utils.log_checkpoint', 'log_checkpoint', (["(checkpoint_path / f'checkpoint-epoch-{epoch}.pt')", 'epoch', 'self.model', "{'optimizer': self.optimizer}", 'self.scheduler'], {}), "(checkpoint_path / f'checkpoint-epoch-{epoch}.pt', epoch,\n self.model, {'optimizer': self.optimizer}, self.scheduler)\n", (15856, 15976), False, 'from mdlearn.utils import log_checkpoint, log_latent_visualization\n'), ((16376, 16472), 'mdlearn.utils.log_latent_visualization', 'log_latent_visualization', (['z', 'paints', 'plot_path', 'epoch', 'self.plot_n_samples', 'self.plot_method'], {}), '(z, paints, plot_path, epoch, self.plot_n_samples,\n self.plot_method)\n', (16400, 16472), False, 'from mdlearn.utils import log_checkpoint, log_latent_visualization\n'), ((16885, 16903), 'wandb.log', 'wandb.log', (['metrics'], {}), '(metrics)\n', (16894, 16903), False, 'import wandb\n'), ((16798, 16828), 'wandb.Html', 'wandb.Html', (['html'], {'inject': '(False)'}), '(html, inject=False)\n', (16808, 16828), False, 'import wandb\n')] |
import csv
import math
import os
import sys
from itertools import islice
from os import path
import numpy as np
import tensorflow as tf
from PIL import Image
TRAINING_SHARDS = 60
VALIDATION_SHARDS = 30
TRAIN_DIRECTORY = 'train'
VALIDATION_DIRECTORY = 'validation'
# List of folders for training, validation and test.
folder_names = {'Training': 'FER2013Train',
'PublicTest': 'FER2013Valid',
'PrivateTest': 'FER2013Test'}
def _process_data(emotion_raw, mode):
'''
Based on https://arxiv.org/abs/1608.01041, we process the data differently depend on the training mode:
Majority: return the emotion that has the majority vote, or unknown if the count is too little.
Probability or Crossentropty: convert the count into probability distribution.abs
Multi-target: treat all emotion with 30% or more votes as equal.
'''
size = len(emotion_raw)
emotion_unknown = [0.0] * size
emotion_unknown[-2] = 1.0
# remove emotions with a single vote (outlier removal)
for i in range(size):
if emotion_raw[i] < 1.0 + sys.float_info.epsilon:
emotion_raw[i] = 0.0
sum_list = sum(emotion_raw)
emotion = [0.0] * size
if mode == 'majority':
# find the peak value of the emo_raw list
maxval = max(emotion_raw)
if maxval > 0.5 * sum_list:
emotion[np.argmax(emotion_raw)] = maxval
else:
emotion = emotion_unknown # force setting as unknown
elif (mode == 'probability') or (mode == 'crossentropy'):
sum_part = 0
count = 0
valid_emotion = True
while sum_part < 0.75 * sum_list and count < 3 and valid_emotion:
maxval = max(emotion_raw)
for i in range(size):
if emotion_raw[i] == maxval:
emotion[i] = maxval
emotion_raw[i] = 0
sum_part += emotion[i]
count += 1
if i >= 8: # unknown or non-face share same number of max votes
valid_emotion = False
if sum(
emotion) > maxval: # there have been other emotions ahead of unknown or non-face
emotion[i] = 0
count -= 1
break
if sum(
emotion) <= 0.5 * sum_list or count > 3: # less than 50% of the votes are integrated, or there are too many emotions, we'd better discard this example
emotion = emotion_unknown # force setting as unknown
elif mode == 'multi_target':
threshold = 0.3
for i in range(size):
if emotion_raw[i] >= threshold * sum_list:
emotion[i] = emotion_raw[i]
if sum(
emotion) <= 0.5 * sum_list: # less than 50% of the votes are integrated, we discard this example
emotion = emotion_unknown # set as unknown
return [float(i) / sum(emotion) for i in emotion]
def _check_or_create_dir(directory):
"""Check if directory exists otherwise create it."""
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = b'RGB'
channels = 3
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(bytes(synset, 'ascii')),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(
bytes(os.path.basename(filename), 'ascii')),
'image/encoded': _bytes_feature(image_buffer)}))
return example
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
return filename.endswith('png')
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Crop image's black boarder.
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
tf.logging.info('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, output_file, filenames, synsets, labels):
"""Processes and saves list of images as TFRecords.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
output_file: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: map of string to integer; id for all synset labels
"""
writer = tf.python_io.TFRecordWriter(output_file)
for filename, synset in zip(filenames, synsets):
image_buffer, height, width = _process_image(filename, coder)
label_list = _process_data(list(int(x) for x in synset.split(',')),
'majority')
label = np.argmax(label_list) + 1
if label > len(labels):
# Skip unknown(9) or no-face(10).
continue
# label = labels[synset]
example = _convert_to_example(filename, image_buffer, label,
synset, height, width)
writer.write(example.SerializeToString())
writer.close()
def _process_dataset(filenames, synsets, labels, output_directory, prefix,
num_shards):
"""Processes and saves list of images as TFRecords.
Args:
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: map of string to integer; id for all synset labels
output_directory: path where output files should be created
prefix: string; prefix for each file
num_shards: number of chucks to split the filenames into
Returns:
files: list of tf-record filepaths created from processing the dataset.
"""
_check_or_create_dir(output_directory)
chunksize = int(math.ceil(len(filenames) / num_shards))
coder = ImageCoder()
files = []
for shard in range(num_shards):
chunk_files = filenames[shard * chunksize: (shard + 1) * chunksize]
chunk_synsets = synsets[shard * chunksize: (shard + 1) * chunksize]
output_file = os.path.join(
output_directory, '%s-%.5d-of-%.5d' % (prefix, shard, num_shards))
_process_image_files_batch(coder, output_file, chunk_files,
chunk_synsets, labels)
tf.logging.info('Finished writing file: %s' % output_file)
files.append(output_file)
return files
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb',
quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb',
quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data,
channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
class DataConverter(object):
def __init__(self):
pass
def str_to_image(self, image_blob):
''' Convert a string blob to an image object. '''
image_string = image_blob.split(' ')
image_data = np.asarray(image_string, dtype=np.uint8).reshape(48, 48)
return Image.fromarray(image_data)
def preprocess_fer(self, base_folder, ferplus_path, fer_path):
print("Start generating ferplus images.")
for key, value in folder_names.items():
folder_path = os.path.join(base_folder, value)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
ferplus_entries = []
with open(ferplus_path, 'r') as csvfile:
ferplus_rows = csv.reader(csvfile, delimiter=',')
for row in islice(ferplus_rows, 1, None):
ferplus_entries.append(row)
index = 0
with open(fer_path, 'r') as csvfile:
fer_rows = csv.reader(csvfile, delimiter=',')
for row in islice(fer_rows, 1, None):
ferplus_row = ferplus_entries[index]
file_name = ferplus_row[1].strip()
if len(file_name) > 0:
image = self.str_to_image(row[1])
image_path = os.path.join(base_folder, folder_names[row[2]],
file_name)
image.save(image_path, compress_level=0)
index += 1
print("Done...")
def to_tf_records(self, raw_data_dir, local_scratch_dir,
train_names, validation_names, ferplus_path):
ferplus_entries = {}
with open(ferplus_path, 'r') as csvfile:
ferplus_rows = csv.reader(csvfile, delimiter=',')
for row in islice(ferplus_rows, 1, None):
k = row[1]
v = ','.join(row[2:])
ferplus_entries[k] = v
# Analyze pics.
train_files = []
validation_files = []
train_synsets = []
validation_synsets = []
for root, dirs, files in os.walk(raw_data_dir):
if len(dirs) != 0:
continue
root_parts = root.split('/')
assert len(root_parts) == 2
bucket_name = root_parts[1]
if bucket_name in train_names:
for file_name in files:
if file_name.endswith('png'):
train_files.append(path.join(root, file_name))
train_synsets.append(ferplus_entries[file_name])
if bucket_name in validation_names:
for file_name in files:
if file_name.endswith('png'):
validation_files.append(path.join(root, file_name))
validation_synsets.append(ferplus_entries[file_name])
# Create unique ids for all synsets
labels = {
'neutral': 0,
'happiness': 1,
'surprise': 2,
'sadness': 3,
'anger': 4,
'disgust': 5,
'fear': 6,
'contempt': 7
}
# Create tf_record data
train_records = _process_dataset(
train_files, train_synsets, labels,
os.path.join(local_scratch_dir, TRAIN_DIRECTORY),
TRAIN_DIRECTORY, TRAINING_SHARDS)
validation_records = _process_dataset(
validation_files, validation_synsets, labels,
os.path.join(local_scratch_dir, VALIDATION_DIRECTORY),
VALIDATION_DIRECTORY, VALIDATION_SHARDS)
return train_records, validation_records
if __name__ == '__main__':
data_converter = DataConverter()
# data_converter.preprocess_fer('fer', 'fer/fer2013new.csv',
# 'fer/fer2013/fer2013.csv')
train_records, validation_records = data_converter.to_tf_records(
'fer', 'fer_dataset', {'FER2013Train'}, {'FER2013Valid'},
'fer/fer2013new.csv'
)
print(train_records)
print(validation_records)
| [
"tensorflow.gfile.FastGFile",
"tensorflow.train.Int64List",
"tensorflow.gfile.MakeDirs",
"os.walk",
"os.path.exists",
"tensorflow.gfile.Exists",
"tensorflow.Session",
"tensorflow.placeholder",
"numpy.asarray",
"tensorflow.python_io.TFRecordWriter",
"csv.reader",
"numpy.argmax",
"tensorflow.t... | [((6702, 6742), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_file'], {}), '(output_file)\n', (6729, 6742), True, 'import tensorflow as tf\n'), ((3134, 3160), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['directory'], {}), '(directory)\n', (3149, 3160), True, 'import tensorflow as tf\n'), ((3170, 3198), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['directory'], {}), '(directory)\n', (3187, 3198), True, 'import tensorflow as tf\n'), ((5639, 5673), 'tensorflow.gfile.FastGFile', 'tf.gfile.FastGFile', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (5657, 5673), True, 'import tensorflow as tf\n'), ((5801, 5860), 'tensorflow.logging.info', 'tf.logging.info', (["('Converting PNG to JPEG for %s' % filename)"], {}), "('Converting PNG to JPEG for %s' % filename)\n", (5816, 5860), True, 'import tensorflow as tf\n'), ((8377, 8456), 'os.path.join', 'os.path.join', (['output_directory', "('%s-%.5d-of-%.5d' % (prefix, shard, num_shards))"], {}), "(output_directory, '%s-%.5d-of-%.5d' % (prefix, shard, num_shards))\n", (8389, 8456), False, 'import os\n'), ((8604, 8662), 'tensorflow.logging.info', 'tf.logging.info', (["('Finished writing file: %s' % output_file)"], {}), "('Finished writing file: %s' % output_file)\n", (8619, 8662), True, 'import tensorflow as tf\n'), ((8925, 8937), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8935, 8937), True, 'import tensorflow as tf\n'), ((9027, 9058), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string'}), '(dtype=tf.string)\n', (9041, 9058), True, 'import tensorflow as tf\n'), ((9075, 9122), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['self._png_data'], {'channels': '(3)'}), '(self._png_data, channels=3)\n', (9094, 9122), True, 'import tensorflow as tf\n'), ((9151, 9205), 'tensorflow.image.encode_jpeg', 'tf.image.encode_jpeg', (['image'], {'format': '"""rgb"""', 'quality': '(100)'}), "(image, format='rgb', quality=100)\n", (9171, 9205), True, 'import tensorflow as tf\n'), ((9360, 9391), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string'}), '(dtype=tf.string)\n', (9374, 9391), True, 'import tensorflow as tf\n'), ((9408, 9457), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['self._cmyk_data'], {'channels': '(0)'}), '(self._cmyk_data, channels=0)\n', (9428, 9457), True, 'import tensorflow as tf\n'), ((9486, 9540), 'tensorflow.image.encode_jpeg', 'tf.image.encode_jpeg', (['image'], {'format': '"""rgb"""', 'quality': '(100)'}), "(image, format='rgb', quality=100)\n", (9506, 9540), True, 'import tensorflow as tf\n'), ((9683, 9714), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string'}), '(dtype=tf.string)\n', (9697, 9714), True, 'import tensorflow as tf\n'), ((9743, 9799), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['self._decode_jpeg_data'], {'channels': '(3)'}), '(self._decode_jpeg_data, channels=3)\n', (9763, 9799), True, 'import tensorflow as tf\n'), ((10735, 10762), 'PIL.Image.fromarray', 'Image.fromarray', (['image_data'], {}), '(image_data)\n', (10750, 10762), False, 'from PIL import Image\n'), ((12539, 12560), 'os.walk', 'os.walk', (['raw_data_dir'], {}), '(raw_data_dir)\n', (12546, 12560), False, 'import os\n'), ((3394, 3425), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'value'}), '(value=value)\n', (3412, 3425), True, 'import tensorflow as tf\n'), ((3562, 3595), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (3580, 3595), True, 'import tensorflow as tf\n'), ((7007, 7028), 'numpy.argmax', 'np.argmax', (['label_list'], {}), '(label_list)\n', (7016, 7028), True, 'import numpy as np\n'), ((10956, 10988), 'os.path.join', 'os.path.join', (['base_folder', 'value'], {}), '(base_folder, value)\n', (10968, 10988), False, 'import os\n'), ((11184, 11218), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (11194, 11218), False, 'import csv\n'), ((11242, 11271), 'itertools.islice', 'islice', (['ferplus_rows', '(1)', 'None'], {}), '(ferplus_rows, 1, None)\n', (11248, 11271), False, 'from itertools import islice\n'), ((11404, 11438), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (11414, 11438), False, 'import csv\n'), ((11462, 11487), 'itertools.islice', 'islice', (['fer_rows', '(1)', 'None'], {}), '(fer_rows, 1, None)\n', (11468, 11487), False, 'from itertools import islice\n'), ((12173, 12207), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (12183, 12207), False, 'import csv\n'), ((12231, 12260), 'itertools.islice', 'islice', (['ferplus_rows', '(1)', 'None'], {}), '(ferplus_rows, 1, None)\n', (12237, 12260), False, 'from itertools import islice\n'), ((13723, 13771), 'os.path.join', 'os.path.join', (['local_scratch_dir', 'TRAIN_DIRECTORY'], {}), '(local_scratch_dir, TRAIN_DIRECTORY)\n', (13735, 13771), False, 'import os\n'), ((13936, 13989), 'os.path.join', 'os.path.join', (['local_scratch_dir', 'VALIDATION_DIRECTORY'], {}), '(local_scratch_dir, VALIDATION_DIRECTORY)\n', (13948, 13989), False, 'import os\n'), ((1374, 1396), 'numpy.argmax', 'np.argmax', (['emotion_raw'], {}), '(emotion_raw)\n', (1383, 1396), True, 'import numpy as np\n'), ((10663, 10703), 'numpy.asarray', 'np.asarray', (['image_string'], {'dtype': 'np.uint8'}), '(image_string, dtype=np.uint8)\n', (10673, 10703), True, 'import numpy as np\n'), ((11008, 11035), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (11022, 11035), False, 'import os\n'), ((11053, 11077), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (11064, 11077), False, 'import os\n'), ((11719, 11777), 'os.path.join', 'os.path.join', (['base_folder', 'folder_names[row[2]]', 'file_name'], {}), '(base_folder, folder_names[row[2]], file_name)\n', (11731, 11777), False, 'import os\n'), ((12915, 12941), 'os.path.join', 'path.join', (['root', 'file_name'], {}), '(root, file_name)\n', (12924, 12941), False, 'from os import path\n'), ((13202, 13228), 'os.path.join', 'path.join', (['root', 'file_name'], {}), '(root, file_name)\n', (13211, 13228), False, 'from os import path\n'), ((4722, 4748), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (4738, 4748), False, 'import os\n')] |
import Examples.study.paretto_front as front
import Examples.metadata_manager_results as results_manager
import Source.genetic_algorithm.fitting_functions as fit_fun
from Source.system_evaluator_utils import pretty_print
import Source.io_util as io
import statistics as stats
import numpy as np
import os
import sys
def print_metrics_model(id, r):
print()
print(id)
print("\t Test accuracy: %f" % r['system'].accuracy)
for classifier in r:
if 'trigger' not in classifier:
print("\t\t Test accuracy %s: %f" % (classifier, r[classifier].accuracy))
print("\t Model parameters: %f" % (r['system'].params/1e6))
for classifier in r:
if 'trigger' not in classifier:
print("\t\t Model parameters %s: %f * 1e6" % (classifier, r[classifier].params/1e6))
print("\t Instances processed: %d" % (r['system'].instances))
for classifier in r:
if 'trigger' not in classifier:
print("\t\tInstances processed %s: %d" % (classifier, r[classifier].instances))
print("\t Dataset Evaluation time: %f s" % (r['system'].time))
if __name__ == "__main__":
experiment = 'genetic_algorithm_multinode'
query_params = {
"dataset": "sota_models_fashion-mnist-32-dev_validation",
'iterations': 200,
'a': [
0.7,
0.15,
0.15,
],
'offspring': 50,
}
num = 1
phase = "test"
# 1) G.A. Chain ensembles
GA_results_metadata_file = os.path.join(os.environ['FCM'],
'Examples',
'compute',
experiment,
'results',
'metadata.json')
# Get evaluation results from query
GA_res_loc = results_manager.get_results_by_params(GA_results_metadata_file, query_params)
GA_res_loc = GA_res_loc[-num:]
GA_res_loc = [os.path.join(path, 'results_ensembles.pkl') for path in GA_res_loc]
# 2) Single Models
models = dict([(k, r) for k, r in io.read_pickle(GA_res_loc[0]).items() if len(r.val if phase == "val" else r.test) < 3])
models_front = front.get_front_time_accuracy(models, phase=phase)
sorted_models_front = front.sort_results_by_time(models_front, phase=phase)
accurate_NN_result = models[sorted_models_front[-1][0]].test if phase == "test" \
else models[sorted_models_front[-1][0]].val
acc = accurate_NN_result['system'].accuracy
time = accurate_NN_result['system'].time
params = accurate_NN_result['system'].params
print_metrics_model(sorted_models_front[-1][0], accurate_NN_result)
from Source.genetic_algorithm.fitting_functions import make_limits_dict, update_limit_dict
limit = make_limits_dict()
update_limit_dict(limit, models, phase=phase)
# 3) Speedup and Parameter decrease
speedup = []
param_incrase = []
acc_increase = []
for res_loc in GA_res_loc:
GA_chains = io.read_pickle(res_loc)
list_chain_res = list(GA_chains.values())
list_chain_keys = list(GA_chains.keys())
list_fit_vals = np.array(fit_fun.f2_time_param_penalization(list_chain_res, [0.7, 0.15, 0.15], limit, phase))*-1
sorted = np.argsort(list_fit_vals)
fittest_model_id = list_chain_keys[sorted[0]]
fittest_model_result = GA_chains[fittest_model_id].val if phase == "val" else GA_chains[fittest_model_id].test
print_metrics_model(fittest_model_id, fittest_model_result)
# Store improvements
speedup.append(time/fittest_model_result['system'].time)
param_incrase.append(fittest_model_result['system'].params/params)
acc_increase.append(fittest_model_result['system'].accuracy-acc)
if len(GA_res_loc) > 0:
print("\nImprovements:")
print("\tAvg speedup:", stats.mean(speedup))
print("\tAvg param incrase:", stats.mean(param_incrase))
print("\tAvg acc increase:", stats.mean(acc_increase))
else:
print("No data available")
| [
"statistics.mean",
"Source.genetic_algorithm.fitting_functions.make_limits_dict",
"Examples.study.paretto_front.get_front_time_accuracy",
"Source.genetic_algorithm.fitting_functions.update_limit_dict",
"Source.genetic_algorithm.fitting_functions.f2_time_param_penalization",
"Examples.metadata_manager_resu... | [((1497, 1595), 'os.path.join', 'os.path.join', (["os.environ['FCM']", '"""Examples"""', '"""compute"""', 'experiment', '"""results"""', '"""metadata.json"""'], {}), "(os.environ['FCM'], 'Examples', 'compute', experiment,\n 'results', 'metadata.json')\n", (1509, 1595), False, 'import os\n'), ((1870, 1947), 'Examples.metadata_manager_results.get_results_by_params', 'results_manager.get_results_by_params', (['GA_results_metadata_file', 'query_params'], {}), '(GA_results_metadata_file, query_params)\n', (1907, 1947), True, 'import Examples.metadata_manager_results as results_manager\n'), ((2238, 2288), 'Examples.study.paretto_front.get_front_time_accuracy', 'front.get_front_time_accuracy', (['models'], {'phase': 'phase'}), '(models, phase=phase)\n', (2267, 2288), True, 'import Examples.study.paretto_front as front\n'), ((2315, 2368), 'Examples.study.paretto_front.sort_results_by_time', 'front.sort_results_by_time', (['models_front'], {'phase': 'phase'}), '(models_front, phase=phase)\n', (2341, 2368), True, 'import Examples.study.paretto_front as front\n'), ((2846, 2864), 'Source.genetic_algorithm.fitting_functions.make_limits_dict', 'make_limits_dict', ([], {}), '()\n', (2862, 2864), False, 'from Source.genetic_algorithm.fitting_functions import make_limits_dict, update_limit_dict\n'), ((2869, 2914), 'Source.genetic_algorithm.fitting_functions.update_limit_dict', 'update_limit_dict', (['limit', 'models'], {'phase': 'phase'}), '(limit, models, phase=phase)\n', (2886, 2914), False, 'from Source.genetic_algorithm.fitting_functions import make_limits_dict, update_limit_dict\n'), ((2001, 2044), 'os.path.join', 'os.path.join', (['path', '"""results_ensembles.pkl"""'], {}), "(path, 'results_ensembles.pkl')\n", (2013, 2044), False, 'import os\n'), ((3070, 3093), 'Source.io_util.read_pickle', 'io.read_pickle', (['res_loc'], {}), '(res_loc)\n', (3084, 3093), True, 'import Source.io_util as io\n'), ((3332, 3357), 'numpy.argsort', 'np.argsort', (['list_fit_vals'], {}), '(list_fit_vals)\n', (3342, 3357), True, 'import numpy as np\n'), ((3936, 3955), 'statistics.mean', 'stats.mean', (['speedup'], {}), '(speedup)\n', (3946, 3955), True, 'import statistics as stats\n'), ((3995, 4020), 'statistics.mean', 'stats.mean', (['param_incrase'], {}), '(param_incrase)\n', (4005, 4020), True, 'import statistics as stats\n'), ((4059, 4083), 'statistics.mean', 'stats.mean', (['acc_increase'], {}), '(acc_increase)\n', (4069, 4083), True, 'import statistics as stats\n'), ((3226, 3313), 'Source.genetic_algorithm.fitting_functions.f2_time_param_penalization', 'fit_fun.f2_time_param_penalization', (['list_chain_res', '[0.7, 0.15, 0.15]', 'limit', 'phase'], {}), '(list_chain_res, [0.7, 0.15, 0.15], limit,\n phase)\n', (3260, 3313), True, 'import Source.genetic_algorithm.fitting_functions as fit_fun\n'), ((2131, 2160), 'Source.io_util.read_pickle', 'io.read_pickle', (['GA_res_loc[0]'], {}), '(GA_res_loc[0])\n', (2145, 2160), True, 'import Source.io_util as io\n')] |
import numpy as np
import pandas as pd
import os
import tensorflow as tf
import keras
import matplotlib.pyplot as plt
from tensorflow.python.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.python.keras.applications.vgg16 import VGG16
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.keras.applications.vgg16 import preprocess_input
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.optimizers import Adam
from PIL import Image
import warnings
warnings.filterwarnings('ignore')
from tensorflow.python.keras.models import load_model
model = load_model('/root/glioAI/glioai/models/tumor_prediction.h5')
# route to any of the labaled malignant images that model hasn't seen before
img_path = ('/root/glioAI/data/tumortest/8 no.jpg')
img = tf.keras.preprocessing.image.load_img(img_path, target_size=(224,224))
x = image.img_to_array(img)
x = np.expand_dims(x,axis=0)
img_data = preprocess_input(x)
# make prediction
rs = model.predict(img_data)
print(rs)
rs[0][0]
rs[0][1]
if rs[0][0] >= 0.9:
prediction = 'This image is NOT tumorous.'
elif rs[0][0] < 0.9:
prediction = 'Warning! This image IS tumorous.'
print(prediction) | [
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.python.keras.preprocessing.image.img_to_array",
"tensorflow.python.keras.applications.vgg16.preprocess_input",
"numpy.expand_dims",
"tensorflow.python.keras.models.load_model",
"warnings.filterwarnings"
] | [((596, 629), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (619, 629), False, 'import warnings\n'), ((692, 752), 'tensorflow.python.keras.models.load_model', 'load_model', (['"""/root/glioAI/glioai/models/tumor_prediction.h5"""'], {}), "('/root/glioAI/glioai/models/tumor_prediction.h5')\n", (702, 752), False, 'from tensorflow.python.keras.models import load_model\n'), ((890, 961), 'tensorflow.keras.preprocessing.image.load_img', 'tf.keras.preprocessing.image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (927, 961), True, 'import tensorflow as tf\n'), ((965, 988), 'tensorflow.python.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (983, 988), False, 'from tensorflow.python.keras.preprocessing import image\n'), ((993, 1018), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1007, 1018), True, 'import numpy as np\n'), ((1029, 1048), 'tensorflow.python.keras.applications.vgg16.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (1045, 1048), False, 'from tensorflow.python.keras.applications.vgg16 import preprocess_input\n')] |
import cv2
import yaml
import numpy as np
def load_yaml(load_path):
"""load yaml file"""
with open(load_path, 'r') as f:
loaded = yaml.load(f, Loader=yaml.Loader)
return loaded
def pad_input_image(img, max_steps):
"""pad image to suitable shape"""
img_h, img_w, _ = img.shape
img_pad_h = 0
if img_h % max_steps > 0:
img_pad_h = max_steps - img_h % max_steps
img_pad_w = 0
if img_w % max_steps > 0:
img_pad_w = max_steps - img_w % max_steps
padd_val = np.mean(img, axis=(0, 1)).astype(np.uint8)
img = cv2.copyMakeBorder(img, 0, img_pad_h, 0, img_pad_w,
cv2.BORDER_CONSTANT, value=padd_val.tolist())
pad_params = (img_h, img_w, img_pad_h, img_pad_w)
return img, pad_params
def recover_pad_output(outputs, pad_params):
"""recover the padded output effect"""
img_h, img_w, img_pad_h, img_pad_w = pad_params
recover_xy = np.reshape(outputs[:, :14], [-1, 7, 2]) * \
[(img_pad_w + img_w) / img_w, (img_pad_h + img_h) / img_h]
outputs[:, :14] = np.reshape(recover_xy, [-1, 14])
return outputs | [
"numpy.mean",
"numpy.reshape",
"yaml.load"
] | [((1075, 1107), 'numpy.reshape', 'np.reshape', (['recover_xy', '[-1, 14]'], {}), '(recover_xy, [-1, 14])\n', (1085, 1107), True, 'import numpy as np\n'), ((147, 179), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.Loader'}), '(f, Loader=yaml.Loader)\n', (156, 179), False, 'import yaml\n'), ((942, 981), 'numpy.reshape', 'np.reshape', (['outputs[:, :14]', '[-1, 7, 2]'], {}), '(outputs[:, :14], [-1, 7, 2])\n', (952, 981), True, 'import numpy as np\n'), ((521, 546), 'numpy.mean', 'np.mean', (['img'], {'axis': '(0, 1)'}), '(img, axis=(0, 1))\n', (528, 546), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
This module holds functions processing the results
of an oemof.solph optimisation model, that are used by methods of the classes
`q100opt.scenario_tools.DistrictScenario` and
`q100opt.scenario_tools.ParetoFront`.
Please use this module with care. It is work in progress!
Contact: <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import logging
import numpy as np
import oemof.solph as solph
import pandas as pd
from oemof.solph import views
def analyse_emissions(results):
"""
Performs analysis of emissions.
Parameters
----------
results : dict
Results of oemof.solph Energysystem.
Returns
-------
dict : Table with detailed emission analysis,
containing 2 keys: 'summary' and 'sequences'.
"""
return analyse_flow_attribute(results, keyword='emission_factor')
def analyse_costs(results):
"""
Performs a cost analysis.
Parameters
----------
results : dict
Results of oemof.solph Energysystem.
Returns
-------
dict : Table with detailed cost summary,
containing 3 keys: 'capex', 'opex' and 'all'.
"""
costs = {
'capex': analyse_capex(results),
'opex': analyse_flow_attribute(results, keyword='variable_costs'),
}
capex = pd.concat({'capex': costs['capex']}, names=['cost_type'])
opex = pd.concat({'opex': costs['opex']['sum']}, names=['cost_type'])
all = pd.concat([capex, opex])
costs.update({'all': all})
return costs
def analyse_capex(results):
"""
Analysis and Summary of the investment costs of the EnergySystem.
Parameters
----------
results : q100opt.DistrictScenario.results
The results Dictionary of the District Scenario class
(a dictionary containing the processed oemof.solph.results
with the key 'main' and the oemof.solph parameters
with the key 'param'.)
Returns
-------
pd.DataFrame :
The table contains both the parameter and result value
of the Investment objects.
- Columns: 'ep_costs', 'offset', 'invest_value' and 'costs'
- Index:
- First level: 'converter' or 'storage
(Converter are all flows comming from a solph.Transformer or
a solph.Source)
- Second level: Label of the corresponding oemof.solph component:
in case of 'converter', the label from which the flow is comming.
in case of 'storage', the label of the GenericStorage.
"""
# energy converter units
df_converter = get_invest_converter_table(results)
df_converter['category'] = 'converter'
# energy storages units
df_storages = get_invest_storage_table(results)
df_storages['category'] = 'storage'
df_result = pd.concat([df_converter, df_storages])
df_result.index = pd.MultiIndex.from_frame(
df_result[['category', 'label']])
df_result.drop(df_result[['category', 'label']], axis=1, inplace=True)
return df_result
def get_invest_converter(results):
"""
Gets the keys of investment converter units of the energy system.
Only the flows from a solph.Transformer or a solph.Source are considered.
"""
return [
x for x in results.keys()
if hasattr(results[x]['scalars'], 'invest')
if isinstance(x[0], solph.Transformer) or isinstance(
x[0], solph.Source)
]
def get_invest_storages(results):
"""
Gets the investment storages of the energy system.
Only the investment of the solph.components.GenericStorage is considered,
and not a investment in the in- or outflow.
"""
return [
x for x in results.keys()
if x[1] is None
if hasattr(results[x]['scalars'], 'invest')
if isinstance(x[0], solph.components.GenericStorage)
]
def get_invest_converter_table(results):
"""
Returns a table with a summary of investment flows of energy converter
units. These are oemof.solph.Flows comming from a solph.Transformer or
a solph.Source.
Parameters
----------
results : q100opt.DistrictScenario.results
The results Dictionary of the District Scenario class
(a dictionary containing the processed oemof.solph.results
with the key 'main' and the oemof.solph parameters
with the key 'param'.)
Returns
-------
pd.DataFrame :
The table contains both the parameter and result value
of the Investment objects.
- Columns: 'label', 'ep_costs', 'offset', 'invest_value' and 'costs'
The 'label' column is the label of the corresponding
oemof.solph.Transformer or Source, from that the flow is coming.
"""
converter_units = get_invest_converter(results['main'])
return get_invest_table(results, converter_units)
def get_invest_storage_table(results):
"""
Returns a table with a summary of investment flows of all
oemof.solph.components.GeneicStorage units.
results : q100opt.DistrictScenario.results
The results Dictionary of the District Scenario class
(a dictionary containing the processed oemof.solph.results
with the key 'main' and the oemof.solph parameters
with the key 'param'.)
Returns
-------
pd.DataFrame :
The table contains both the parameter and result value
of the Investment objects.
- Columns: 'label', 'ep_costs', 'offset', 'invest_value' and 'costs'
The 'label' column is the label of the corresponding oemof.solph
label, which is the label from which the flow is coming.
"""
storages = get_invest_storages(results['main'])
return get_invest_table(results, storages)
def get_invest_table(results, keys):
"""
Returns the investment data for a list of "results keys".
Parameters
----------
results : dict
oemof.solph results dictionary (results['main])
keys : list
Keys of flows and nodes
Returns
-------
pd.DataFrame :
The table contains both the parameter and result value
of the Investment objects.
- Columns: 'label', 'ep_costs', 'offset', 'invest_value' and 'costs'
The 'label' column is the label of the corresponding oemof.solph
label, which is the label from which the flow is coming.
"""
invest_lab = [x[0].label for x in keys]
df = pd.DataFrame(data=invest_lab, columns=['label'])
df['ep_costs'] = [results['param'][x]['scalars']['investment_ep_costs']
for x in keys]
df['offset'] = [results['param'][x]['scalars']['investment_offset']
for x in keys]
df['invest_value'] = [results['main'][x]['scalars']['invest']
for x in keys]
df['costs'] = df['invest_value'] * df['ep_costs'] + df[
'offset'] * np.sign(df['invest_value'])
return df
def analyse_flow_attribute(des_results, keyword='variable_costs'):
"""
Analysis and Summary of flow attribute keyword of the EnergySystem.
Parameters
----------
des_results : q100opt.DistrictScenario.results
The results Dictionary of the District Scenario class
(a dictionary containing the processed oemof.solph.results
with the key 'main' and the oemof.solph parameters
with the key 'param'.)
keyword : str
Keyword for that values are analyzed,
e.g. variable_costs or emission_factor.
Returns
-------
dict : All relevant data with variable_costs.
Keys of dictionary: 'summary' and 'sequences'.
"""
param = des_results['param']
results = des_results['main']
var_cost_flows = get_attr_flows(des_results, key=keyword)
df = pd.DataFrame(index=next(iter(results.values()))['sequences'].index)
len_index = len(df)
# define columns of result dataframe
if keyword == 'variable_costs':
key_product = 'costs'
elif keyword == 'emission_factor':
key_product = 'emissions'
else:
key_product = 'product'
for flow in var_cost_flows:
if isinstance(flow[0], solph.Source):
category = 'source'
label = flow[0].label
elif isinstance(flow[0], solph.Transformer):
category = 'converter'
label = flow[0].label
elif isinstance(flow[1], solph.Sink):
category = 'sink'
label = flow[1].label
else:
label = flow[0].label + '-' + flow[1].label
category = 'unknown'
logging.warning(
"Flow/Node category of {} not specified!".format(label)
)
if keyword in param[flow]['scalars'].keys():
df[(category, label, keyword)] = param[flow]['scalars'][keyword]
else:
df[(category, label, keyword)] = \
param[flow]['sequences'][keyword].values[:len_index]
# 2) get flow results
df[(category, label, 'flow')] = results[flow]["sequences"].values
# 3) calc a * b
df[(category, label, key_product)] = \
df[(category, label, keyword)] * df[(category, label, 'flow')]
df.columns = pd.MultiIndex.from_tuples(
list(df.columns), names=('category', 'label', 'value')
)
df.sort_index(axis=1, inplace=True)
df_sum = df.iloc[:, df.columns.isin(['flow', key_product], level=2)].sum()
df_summary = df_sum.unstack(level=2)
df_summary['var_' + key_product + '_av_flow'] = \
df_summary[key_product] / df_summary['flow']
df_mean = \
df.iloc[:, df.columns.get_level_values(2) == keyword].mean().unstack(
level=2).rename(columns={
keyword: 'var_' + key_product + '_av_param'})
df_summary = df_summary.join(df_mean)
return {'sum': df_summary,
'sequences': df}
def get_attr_flows(results, key='variable_costs'):
"""
Return all flows of an EnergySystem for a given attribute,
which is not zero.
Parameters
----------
results : dict
Results dicionary of the oemof.solph optimisation including the
Parameters with key 'param'.
key : str
Returns
-------
list : List of flows, where a non zero attribute value is given either
at the 'scalars' or 'sequences'.
"""
param = results['param']
list_keys = list(param.keys())
var_scalars = [
x for x in list_keys
if key in param[x]['scalars'].keys()
if abs(param[x]['scalars'][key]) > 0
]
var_sequences = [
x for x in list_keys
if key in param[x]['sequences'].keys()
if abs(param[x]['sequences'][key].sum()) > 0
]
var_cost_flows = var_scalars + var_sequences
return var_cost_flows
def get_attr_flow_results(des_results, key='variable_costs'):
"""
Return the parameter and flow results for all flows of an EnergySystem
for a given attribute, which is not zero.
Parameters
----------
des_results : dict
Results of district energy system. Must have the keys: 'main', 'param'.
key : str
Flow attribute.
Returns
-------
pd.DataFrame : Multiindex DataFrame.
- Index : Timeindex of oemof.solph.EnergySystem.
- First column index level: <from>-<to>, where from an to are the
labels of the Nodes.
- Second column index level:
- attribute parameter
- resulting flow value
- product of parameter and flow column
"""
attr_flows = get_attr_flows(des_results, key=key)
param = des_results['Param']
results = des_results['Main']
df = pd.DataFrame(index=next(iter(results.values()))['sequences'].index)
len_index = len(df)
for flow in attr_flows:
label = flow[0].label + '-' + flow[1].label
# 1) get parameters
if key in param[flow]['scalars'].keys():
df[(label, key)] = param[flow]['scalars'][key]
else:
df[(label, key)] = param[flow]['sequences'][key].values[:len_index]
# 2) get flow results
df[(label, 'flow')] = results[flow]["sequences"].values
# 3) calc a * b
if key == 'variable_costs':
key_product = 'costs'
elif key == 'emission_factor':
key_product = 'emissions'
else:
key_product = 'product'
df[(label, key_product)] = df[(label, key)] * df[(label, 'flow')]
df.columns = pd.MultiIndex.from_tuples(
list(df.columns), names=('from-to', 'value')
)
return df
def get_all_sequences(results):
"""..."""
d_node_types = {
'sink': solph.Sink,
'source': solph.Source,
'transformer': solph.Transformer,
'storage_flow': solph.GenericStorage,
}
l_df = []
for typ, solph_class in d_node_types.items():
group = {
k: v["sequences"]
for k, v in results.items()
if k[1] is not None
if isinstance(k[0], solph_class) or isinstance(k[1], solph_class)
}
df = views.convert_to_multiindex(group)
df_mi = df.columns.to_frame()
df_mi.reset_index(drop=True, inplace=True)
df_mi['from'] = [x.label for x in df_mi['from']]
df_mi['to'] = [x.label for x in df_mi['to']]
df_mi['type'] = typ
df.columns = pd.MultiIndex.from_frame(df_mi[['type', 'from', 'to']])
l_df.append(df)
df_results = pd.concat(l_df, axis=1)
# add storage content with extra type=storage_content
group = {
k: v["sequences"]
for k, v in results.items()
if k[1] is None
if isinstance(k[0], solph.GenericStorage) or isinstance(
k[1], solph.GenericStorage)
}
df = views.convert_to_multiindex(group)
df_mi = df.columns.to_frame()
df_mi.reset_index(drop=True, inplace=True)
df_mi['from'] = [x.label for x in df_mi['from']]
df.columns = pd.MultiIndex.from_frame(df_mi[['type', 'from', 'to']])
df_results = pd.concat([df_results, df], axis=1)
return df_results
def get_boundary_flows(results):
"""
Gets the results of flows of the sinks and sources.
Parameters
----------
results : dict
Results of the oemof.solph.Energysystem (results['main'])
Returns
-------
dict : Dictionary with two keys:
- 'sequences': pandas.DataFrame
with the flow values at each timestep. The columns are a tuple:
('sink', <label_of_sink>) for all solph.Sinks
('source', <label_of_source>) for all solph.Sources
- 'summary': pandas.Series (sum of 'sequences')
"""
label_sources = get_label_sources(results)
label_sinks = get_label_sinks(results)
# sources
data_sources = \
[solph.views.node(results, lab)['sequences'] for lab in label_sources]
column_sources = \
[('source', lab) for lab in label_sources]
df_sources = pd.concat(data_sources, axis=1, join='inner')
df_sources.columns = column_sources
# sinks
data_sinks = \
[solph.views.node(results, lab)['sequences'] for lab in label_sinks]
column_sinks = \
[('sink', lab) for lab in label_sinks]
df_sinks = pd.concat(data_sinks, axis=1, join='inner')
df_sinks.columns = column_sinks
df_seq = pd.concat([df_sources, df_sinks], axis=1)
df_sum = df_seq.sum()
return {'sum': df_sum,
'sequences': df_seq}
def get_trafo_flow(results, label_bus):
"""
Returns the flows from a solph.Transformer for a given solph.Bus.
Parameters
----------
results : dict
Results of the oemof.solph.Energysystem (results['main'])
label_bus : str
Label of bus.
Returns
-------
dict : Dictionary with two keys:
- 'sequences': pandas.DataFrame
with the flow values at each timestep. The columns are a tuple:
('sink', <label_of_sink>) for all solph.Sinks
('source', <label_of_source>) for all solph.Sources
- 'summary': pandas.Series (sum of 'sequences')
"""
flows = [
x for x in results.keys()
if x[1] is not None
if isinstance(x[0], solph.Transformer)
if x[1].label == label_bus
]
l_table = [results[x]['sequences']['flow'] for x in flows]
l_labels = [x[0].label for x in flows]
df_seq = pd.concat(l_table, axis=1, join='inner')
df_seq.columns = [('converter', lab) for lab in l_labels]
df_sum = df_seq.sum()
return {'sum': df_sum,
'sequences': df_seq}
def analyse_bus(results, bus_label):
"""..."""
df_seq = solph.views.node(results, bus_label)["sequences"]
df_seq.columns = pd.MultiIndex.from_tuples(df_seq.columns)
idx = pd.IndexSlice
df_seq = df_seq.loc[:, idx[:, "flow"]]
df_seq.columns = df_seq.columns.get_level_values(0)
df_sum = df_seq.sum()
return {'sum': df_sum,
'sequences': df_seq}
def get_sum_flow(results, label):
"""Return the sum of a flow."""
return solph.views.node(results, label)["sequences"].sum()[0]
def get_label_sources(results):
"""Return a list of sources of the results of an solph.Energysystem."""
return [x[0].label for x in results.keys()
if isinstance(x[0], solph.Source)]
def get_label_sinks(results):
"""Return a list of sinks of the results of an solph.Energysystem."""
return [x[1].label for x in results.keys()
if isinstance(x[1], solph.Sink)]
| [
"pandas.MultiIndex.from_frame",
"oemof.solph.views.convert_to_multiindex",
"oemof.solph.views.node",
"numpy.sign",
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"pandas.concat"
] | [((1311, 1368), 'pandas.concat', 'pd.concat', (["{'capex': costs['capex']}"], {'names': "['cost_type']"}), "({'capex': costs['capex']}, names=['cost_type'])\n", (1320, 1368), True, 'import pandas as pd\n'), ((1380, 1442), 'pandas.concat', 'pd.concat', (["{'opex': costs['opex']['sum']}"], {'names': "['cost_type']"}), "({'opex': costs['opex']['sum']}, names=['cost_type'])\n", (1389, 1442), True, 'import pandas as pd\n'), ((1453, 1477), 'pandas.concat', 'pd.concat', (['[capex, opex]'], {}), '([capex, opex])\n', (1462, 1477), True, 'import pandas as pd\n'), ((2818, 2856), 'pandas.concat', 'pd.concat', (['[df_converter, df_storages]'], {}), '([df_converter, df_storages])\n', (2827, 2856), True, 'import pandas as pd\n'), ((2880, 2938), 'pandas.MultiIndex.from_frame', 'pd.MultiIndex.from_frame', (["df_result[['category', 'label']]"], {}), "(df_result[['category', 'label']])\n", (2904, 2938), True, 'import pandas as pd\n'), ((6454, 6502), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'invest_lab', 'columns': "['label']"}), "(data=invest_lab, columns=['label'])\n", (6466, 6502), True, 'import pandas as pd\n'), ((13547, 13570), 'pandas.concat', 'pd.concat', (['l_df'], {'axis': '(1)'}), '(l_df, axis=1)\n', (13556, 13570), True, 'import pandas as pd\n'), ((13850, 13884), 'oemof.solph.views.convert_to_multiindex', 'views.convert_to_multiindex', (['group'], {}), '(group)\n', (13877, 13884), False, 'from oemof.solph import views\n'), ((14036, 14091), 'pandas.MultiIndex.from_frame', 'pd.MultiIndex.from_frame', (["df_mi[['type', 'from', 'to']]"], {}), "(df_mi[['type', 'from', 'to']])\n", (14060, 14091), True, 'import pandas as pd\n'), ((14110, 14145), 'pandas.concat', 'pd.concat', (['[df_results, df]'], {'axis': '(1)'}), '([df_results, df], axis=1)\n', (14119, 14145), True, 'import pandas as pd\n'), ((15060, 15105), 'pandas.concat', 'pd.concat', (['data_sources'], {'axis': '(1)', 'join': '"""inner"""'}), "(data_sources, axis=1, join='inner')\n", (15069, 15105), True, 'import pandas as pd\n'), ((15339, 15382), 'pandas.concat', 'pd.concat', (['data_sinks'], {'axis': '(1)', 'join': '"""inner"""'}), "(data_sinks, axis=1, join='inner')\n", (15348, 15382), True, 'import pandas as pd\n'), ((15433, 15474), 'pandas.concat', 'pd.concat', (['[df_sources, df_sinks]'], {'axis': '(1)'}), '([df_sources, df_sinks], axis=1)\n', (15442, 15474), True, 'import pandas as pd\n'), ((16505, 16545), 'pandas.concat', 'pd.concat', (['l_table'], {'axis': '(1)', 'join': '"""inner"""'}), "(l_table, axis=1, join='inner')\n", (16514, 16545), True, 'import pandas as pd\n'), ((16833, 16874), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['df_seq.columns'], {}), '(df_seq.columns)\n', (16858, 16874), True, 'import pandas as pd\n'), ((13165, 13199), 'oemof.solph.views.convert_to_multiindex', 'views.convert_to_multiindex', (['group'], {}), '(group)\n', (13192, 13199), False, 'from oemof.solph import views\n'), ((13448, 13503), 'pandas.MultiIndex.from_frame', 'pd.MultiIndex.from_frame', (["df_mi[['type', 'from', 'to']]"], {}), "(df_mi[['type', 'from', 'to']])\n", (13472, 13503), True, 'import pandas as pd\n'), ((16762, 16798), 'oemof.solph.views.node', 'solph.views.node', (['results', 'bus_label'], {}), '(results, bus_label)\n', (16778, 16798), True, 'import oemof.solph as solph\n'), ((6910, 6937), 'numpy.sign', 'np.sign', (["df['invest_value']"], {}), "(df['invest_value'])\n", (6917, 6937), True, 'import numpy as np\n'), ((14898, 14928), 'oemof.solph.views.node', 'solph.views.node', (['results', 'lab'], {}), '(results, lab)\n', (14914, 14928), True, 'import oemof.solph as solph\n'), ((15187, 15217), 'oemof.solph.views.node', 'solph.views.node', (['results', 'lab'], {}), '(results, lab)\n', (15203, 15217), True, 'import oemof.solph as solph\n'), ((17170, 17202), 'oemof.solph.views.node', 'solph.views.node', (['results', 'label'], {}), '(results, label)\n', (17186, 17202), True, 'import oemof.solph as solph\n')] |
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
POS_INDEX = 5
VEL_INDEX = 8
ATT_INDEX = 11
TIME_INDEX = 2
def plot_residual(time, pos, vel, att, is_save, save_path):
plt.figure(1)
plt.subplot(3, 1, 1)
plt.plot(time, pos[..., 0])
plt.ylabel("x(m)")
plt.subplot(3, 1, 2)
plt.plot(time, pos[..., 1])
plt.ylabel("y(m)")
plt.subplot(3, 1, 3)
plt.plot(time, pos[..., 2])
plt.ylabel("z(m)")
plt.title("pos residual (m)")
plt.grid()
if (is_save):
plt.savefig(os.path.join(save_path, "pos_residual.jpg"))
plt.figure(2)
plt.subplot(3, 1, 1)
plt.plot(time, vel[..., 0])
plt.ylabel("x(m/s)")
plt.subplot(3, 1, 2)
plt.plot(time, vel[..., 1])
plt.ylabel("y(m/s)")
plt.subplot(3, 1, 3)
plt.plot(time, vel[..., 2])
plt.ylabel("z(m/s)")
plt.title("vel residual (m/s)")
plt.grid()
if (is_save):
plt.savefig(os.path.join(save_path, "vel_residual.jpg"))
plt.figure(2)
plt.subplot(3, 1, 1)
plt.plot(time, att[..., 0])
plt.ylabel("x(m/s)")
plt.subplot(3, 1, 2)
plt.plot(time, att[..., 1])
plt.ylabel("y(m/s)")
plt.subplot(3, 1, 3)
plt.plot(time, att[..., 2])
plt.ylabel("z(m/s)")
plt.title("vel residual (deg)")
plt.grid()
if (is_save):
plt.savefig(os.path.join(save_path, "att_residual.jpg"))
plt.show()
def compare(result_file,
truth_file,
start_time=0,
end_time=86400,
is_save_picture=False,
save_path="./"):
result_data = np.loadtxt(result_file)
truth_data = np.loadtxt(truth_file)
data_index = result_data[..., TIME_INDEX] > start_time and result_data[
..., TIME_INDEX] < end_time
refer_index = truth_data[..., TIME_INDEX] > start_time and truth_data[
..., TIME_INDEX] < end_time
data_time = result_data[data_index, TIME_INDEX]
pos_data = result_data[data_index, POS_INDEX:POS_INDEX + 3]
vel_data = result_data[data_index, VEL_INDEX:VEL_INDEX + 3]
att_data = result_data[data_index, ATT_INDEX:ATT_INDEX + 3]
ref_time = truth_data[refer_index, TIME_INDEX]
ref_pos_data = truth_data[refer_index, POS_INDEX:POS_INDEX + 3]
ref_vel_data = truth_data[refer_index, VEL_INDEX:VEL_INDEX + 3]
ref_att_data = truth_data[refer_index, ATT_INDEX:ATT_INDEX + 3]
ref_i = 0
data_i = 0
residual_i = 0
residual_pos = np.nan(pos_data.shape)
residual_vel = np.nan(vel_data.shape)
residual_att = np.nan(att_data.shape)
residual_time = np.nan(ref_time.shape)
while (data_i < np.size(data_time) and ref_i < np.size(ref_time)):
if (np.abs(ref_time[ref_i] - data_time[data_i]) < 5.5e-2):
residual_pos[residual_i, ...] = ref_pos_data[
ref_i, ...] - pos_data[data_i, ...]
residual_vel[residual_i, ...] = ref_vel_data[
ref_i, ...] - vel_data[data_i, ...]
residual_att[residual_i, ...] = ref_att_data[
ref_i, ...] - att_data[data_i, ...]
residual_time[residual_i] = ref_time[ref_i]
''' 角度差异值需要特殊处理一下 '''
if ((residual_att[residual_i, 2]) > 180):
residual_att[residual_i, 2] -= 360
if ((residual_att[residual_i, 2]) < -180):
residual_att[residual_i, 2] += 360
ref_i += 1
data_i += 1
residual_i += 1
elif (ref_time[ref_i] - data_time[data_i] > 0):
data_i += 1
else:
ref_i += 1
residual_pos = residual_pos[~np.isnan(residual_pos)]
residual_vel = residual_vel[~np.isnan(residual_vel)]
residual_att = residual_att[~np.isnan(residual_att)]
pos_mean = np.zeros([3, 3])
vel_mean = np.zeros([3, 3])
att_mean = np.zeros([3, 3])
pos_mean[0, ...] = np.mean(residual_pos)
vel_mean[0, ...] = np.mean(residual_vel)
att_mean[0, ...] = np.mean(residual_att)
pos_mean[1, ...] = np.std(residual_pos)
vel_mean[1, ...] = np.std(residual_vel)
att_mean[1, ...] = np.std(residual_att)
pos_mean[2, ...] = np.sqrt(pos_mean[0, ...] * pos_mean[0, ...] +
pos_mean[1, ...] * pos_mean[1, ...])
vel_mean[2, ...] = np.sqrt(vel_mean[0, ...] * vel_mean[0, ...] +
vel_mean[1, ...] * vel_mean[1, ...])
att_mean[2, ...] = np.sqrt(att_mean[0, ...] * att_mean[0, ...] +
att_mean[1, ...] * att_mean[1, ...])
plot_residual(residual_time, residual_pos, residual_vel, residual_att,
is_save_picture, save_path)
def main():
print("length of argv is %d" % len(sys.argv))
if (len(sys.argv) < 3):
print("参数不足")
return
if (len(sys.argv) == 3):
compare(sys.argv[1], sys.argv[2])
if (len(sys.argv) == 5):
compare(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv[4]))
if (len(sys.argv) == 7):
compare(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv[4]),
bool(int(sys.argv[5])), sys.argv[6])
if __name__ == "__main__":
main()
| [
"numpy.mean",
"numpy.abs",
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.size",
"matplotlib.pyplot.plot",
"numpy.nan",
"os.path.join",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.isnan",
"numpy.std",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"matplo... | [((197, 210), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (207, 210), True, 'import matplotlib.pyplot as plt\n'), ((215, 235), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (226, 235), True, 'import matplotlib.pyplot as plt\n'), ((240, 267), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'pos[..., 0]'], {}), '(time, pos[..., 0])\n', (248, 267), True, 'import matplotlib.pyplot as plt\n'), ((272, 290), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x(m)"""'], {}), "('x(m)')\n", (282, 290), True, 'import matplotlib.pyplot as plt\n'), ((295, 315), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (306, 315), True, 'import matplotlib.pyplot as plt\n'), ((320, 347), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'pos[..., 1]'], {}), '(time, pos[..., 1])\n', (328, 347), True, 'import matplotlib.pyplot as plt\n'), ((352, 370), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y(m)"""'], {}), "('y(m)')\n", (362, 370), True, 'import matplotlib.pyplot as plt\n'), ((375, 395), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (386, 395), True, 'import matplotlib.pyplot as plt\n'), ((400, 427), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'pos[..., 2]'], {}), '(time, pos[..., 2])\n', (408, 427), True, 'import matplotlib.pyplot as plt\n'), ((432, 450), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""z(m)"""'], {}), "('z(m)')\n", (442, 450), True, 'import matplotlib.pyplot as plt\n'), ((455, 484), 'matplotlib.pyplot.title', 'plt.title', (['"""pos residual (m)"""'], {}), "('pos residual (m)')\n", (464, 484), True, 'import matplotlib.pyplot as plt\n'), ((489, 499), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (497, 499), True, 'import matplotlib.pyplot as plt\n'), ((588, 601), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (598, 601), True, 'import matplotlib.pyplot as plt\n'), ((606, 626), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (617, 626), True, 'import matplotlib.pyplot as plt\n'), ((631, 658), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'vel[..., 0]'], {}), '(time, vel[..., 0])\n', (639, 658), True, 'import matplotlib.pyplot as plt\n'), ((663, 683), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x(m/s)"""'], {}), "('x(m/s)')\n", (673, 683), True, 'import matplotlib.pyplot as plt\n'), ((688, 708), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (699, 708), True, 'import matplotlib.pyplot as plt\n'), ((713, 740), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'vel[..., 1]'], {}), '(time, vel[..., 1])\n', (721, 740), True, 'import matplotlib.pyplot as plt\n'), ((745, 765), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y(m/s)"""'], {}), "('y(m/s)')\n", (755, 765), True, 'import matplotlib.pyplot as plt\n'), ((770, 790), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (781, 790), True, 'import matplotlib.pyplot as plt\n'), ((795, 822), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'vel[..., 2]'], {}), '(time, vel[..., 2])\n', (803, 822), True, 'import matplotlib.pyplot as plt\n'), ((827, 847), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""z(m/s)"""'], {}), "('z(m/s)')\n", (837, 847), True, 'import matplotlib.pyplot as plt\n'), ((852, 883), 'matplotlib.pyplot.title', 'plt.title', (['"""vel residual (m/s)"""'], {}), "('vel residual (m/s)')\n", (861, 883), True, 'import matplotlib.pyplot as plt\n'), ((888, 898), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (896, 898), True, 'import matplotlib.pyplot as plt\n'), ((987, 1000), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (997, 1000), True, 'import matplotlib.pyplot as plt\n'), ((1005, 1025), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (1016, 1025), True, 'import matplotlib.pyplot as plt\n'), ((1030, 1057), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'att[..., 0]'], {}), '(time, att[..., 0])\n', (1038, 1057), True, 'import matplotlib.pyplot as plt\n'), ((1062, 1082), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x(m/s)"""'], {}), "('x(m/s)')\n", (1072, 1082), True, 'import matplotlib.pyplot as plt\n'), ((1087, 1107), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (1098, 1107), True, 'import matplotlib.pyplot as plt\n'), ((1112, 1139), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'att[..., 1]'], {}), '(time, att[..., 1])\n', (1120, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1144, 1164), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y(m/s)"""'], {}), "('y(m/s)')\n", (1154, 1164), True, 'import matplotlib.pyplot as plt\n'), ((1169, 1189), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (1180, 1189), True, 'import matplotlib.pyplot as plt\n'), ((1194, 1221), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'att[..., 2]'], {}), '(time, att[..., 2])\n', (1202, 1221), True, 'import matplotlib.pyplot as plt\n'), ((1226, 1246), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""z(m/s)"""'], {}), "('z(m/s)')\n", (1236, 1246), True, 'import matplotlib.pyplot as plt\n'), ((1251, 1282), 'matplotlib.pyplot.title', 'plt.title', (['"""vel residual (deg)"""'], {}), "('vel residual (deg)')\n", (1260, 1282), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1297), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1295, 1297), True, 'import matplotlib.pyplot as plt\n'), ((1385, 1395), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1393, 1395), True, 'import matplotlib.pyplot as plt\n'), ((1583, 1606), 'numpy.loadtxt', 'np.loadtxt', (['result_file'], {}), '(result_file)\n', (1593, 1606), True, 'import numpy as np\n'), ((1624, 1646), 'numpy.loadtxt', 'np.loadtxt', (['truth_file'], {}), '(truth_file)\n', (1634, 1646), True, 'import numpy as np\n'), ((2438, 2460), 'numpy.nan', 'np.nan', (['pos_data.shape'], {}), '(pos_data.shape)\n', (2444, 2460), True, 'import numpy as np\n'), ((2480, 2502), 'numpy.nan', 'np.nan', (['vel_data.shape'], {}), '(vel_data.shape)\n', (2486, 2502), True, 'import numpy as np\n'), ((2522, 2544), 'numpy.nan', 'np.nan', (['att_data.shape'], {}), '(att_data.shape)\n', (2528, 2544), True, 'import numpy as np\n'), ((2565, 2587), 'numpy.nan', 'np.nan', (['ref_time.shape'], {}), '(ref_time.shape)\n', (2571, 2587), True, 'import numpy as np\n'), ((3736, 3752), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (3744, 3752), True, 'import numpy as np\n'), ((3768, 3784), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (3776, 3784), True, 'import numpy as np\n'), ((3800, 3816), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (3808, 3816), True, 'import numpy as np\n'), ((3841, 3862), 'numpy.mean', 'np.mean', (['residual_pos'], {}), '(residual_pos)\n', (3848, 3862), True, 'import numpy as np\n'), ((3886, 3907), 'numpy.mean', 'np.mean', (['residual_vel'], {}), '(residual_vel)\n', (3893, 3907), True, 'import numpy as np\n'), ((3931, 3952), 'numpy.mean', 'np.mean', (['residual_att'], {}), '(residual_att)\n', (3938, 3952), True, 'import numpy as np\n'), ((3977, 3997), 'numpy.std', 'np.std', (['residual_pos'], {}), '(residual_pos)\n', (3983, 3997), True, 'import numpy as np\n'), ((4021, 4041), 'numpy.std', 'np.std', (['residual_vel'], {}), '(residual_vel)\n', (4027, 4041), True, 'import numpy as np\n'), ((4065, 4085), 'numpy.std', 'np.std', (['residual_att'], {}), '(residual_att)\n', (4071, 4085), True, 'import numpy as np\n'), ((4110, 4196), 'numpy.sqrt', 'np.sqrt', (['(pos_mean[0, ...] * pos_mean[0, ...] + pos_mean[1, ...] * pos_mean[1, ...])'], {}), '(pos_mean[0, ...] * pos_mean[0, ...] + pos_mean[1, ...] * pos_mean[1,\n ...])\n', (4117, 4196), True, 'import numpy as np\n'), ((4247, 4333), 'numpy.sqrt', 'np.sqrt', (['(vel_mean[0, ...] * vel_mean[0, ...] + vel_mean[1, ...] * vel_mean[1, ...])'], {}), '(vel_mean[0, ...] * vel_mean[0, ...] + vel_mean[1, ...] * vel_mean[1,\n ...])\n', (4254, 4333), True, 'import numpy as np\n'), ((4384, 4470), 'numpy.sqrt', 'np.sqrt', (['(att_mean[0, ...] * att_mean[0, ...] + att_mean[1, ...] * att_mean[1, ...])'], {}), '(att_mean[0, ...] * att_mean[0, ...] + att_mean[1, ...] * att_mean[1,\n ...])\n', (4391, 4470), True, 'import numpy as np\n'), ((538, 581), 'os.path.join', 'os.path.join', (['save_path', '"""pos_residual.jpg"""'], {}), "(save_path, 'pos_residual.jpg')\n", (550, 581), False, 'import os\n'), ((937, 980), 'os.path.join', 'os.path.join', (['save_path', '"""vel_residual.jpg"""'], {}), "(save_path, 'vel_residual.jpg')\n", (949, 980), False, 'import os\n'), ((1336, 1379), 'os.path.join', 'os.path.join', (['save_path', '"""att_residual.jpg"""'], {}), "(save_path, 'att_residual.jpg')\n", (1348, 1379), False, 'import os\n'), ((2608, 2626), 'numpy.size', 'np.size', (['data_time'], {}), '(data_time)\n', (2615, 2626), True, 'import numpy as np\n'), ((2639, 2656), 'numpy.size', 'np.size', (['ref_time'], {}), '(ref_time)\n', (2646, 2656), True, 'import numpy as np\n'), ((2671, 2714), 'numpy.abs', 'np.abs', (['(ref_time[ref_i] - data_time[data_i])'], {}), '(ref_time[ref_i] - data_time[data_i])\n', (2677, 2714), True, 'import numpy as np\n'), ((3582, 3604), 'numpy.isnan', 'np.isnan', (['residual_pos'], {}), '(residual_pos)\n', (3590, 3604), True, 'import numpy as np\n'), ((3639, 3661), 'numpy.isnan', 'np.isnan', (['residual_vel'], {}), '(residual_vel)\n', (3647, 3661), True, 'import numpy as np\n'), ((3696, 3718), 'numpy.isnan', 'np.isnan', (['residual_att'], {}), '(residual_att)\n', (3704, 3718), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 13 07:55:15 2017
@author: <NAME>
Script to do a full (MPI) data analysis run using the cluster module
Description of necessary events:
1) Find all cluster IDs
2) Write out cluster sizes and cluster IDs at each time step
3) Plot Mass-averaged cluster size of contact, aligned, and optical clusters
both separately and in the same plot, including standard deviation over runs
and save raw mu2 data
4) Compute linear and nonlinear Smoluchowski fits & plot for contact, optical,
and aligned clusters
5) Compute, plot, and save data for the correlation integral of the final
snapshot
"""
from __future__ import absolute_import, division, print_function
from mpi4py import MPI
from time import time
from shutil import move
from os import remove
import clustering as cl
import gsd.hoomd
import os.path as op
import numpy as np
save_path = SSS
data_path=save_path
runs = range(5)
ttotal = 399
tstart = 0
ts = np.arange(tstart,ttotal)
ats = 17
molno = 10648
molnolabel = 10000
AAdlabel = AAA
SCdlabel = SCSCSC
BBdlabel = BBB
idMiss = 10
idPartner = 11
idNotMiss = 4
idNotPartner = 5
fbase = 'mols'+str(molnolabel)+'_' + str(AAdlabel)+'-'\
+str(SCdlabel)+'-'+str(BBdlabel)+'_run'
start = time()
for i in runs:
fname = op.join(data_path,fbase + str(i+1) + '.gsd')
foutname = op.join(data_path,'temp.gsd')
cl.fixMisplacedArom(fname,foutname,idMiss,idPartner,idNotMiss,idNotPartner,molno,ats,ts)
remove(fname)
move(foutname,fname)
end = time()
print("Time to rewrite files with missing aromatics: ",end-start)
| [
"clustering.fixMisplacedArom",
"shutil.move",
"os.path.join",
"time.time",
"numpy.arange",
"os.remove"
] | [((955, 980), 'numpy.arange', 'np.arange', (['tstart', 'ttotal'], {}), '(tstart, ttotal)\n', (964, 980), True, 'import numpy as np\n'), ((1241, 1247), 'time.time', 'time', ([], {}), '()\n', (1245, 1247), False, 'from time import time\n'), ((1511, 1517), 'time.time', 'time', ([], {}), '()\n', (1515, 1517), False, 'from time import time\n'), ((1339, 1369), 'os.path.join', 'op.join', (['data_path', '"""temp.gsd"""'], {}), "(data_path, 'temp.gsd')\n", (1346, 1369), True, 'import os.path as op\n'), ((1373, 1473), 'clustering.fixMisplacedArom', 'cl.fixMisplacedArom', (['fname', 'foutname', 'idMiss', 'idPartner', 'idNotMiss', 'idNotPartner', 'molno', 'ats', 'ts'], {}), '(fname, foutname, idMiss, idPartner, idNotMiss,\n idNotPartner, molno, ats, ts)\n', (1392, 1473), True, 'import clustering as cl\n'), ((1466, 1479), 'os.remove', 'remove', (['fname'], {}), '(fname)\n', (1472, 1479), False, 'from os import remove\n'), ((1484, 1505), 'shutil.move', 'move', (['foutname', 'fname'], {}), '(foutname, fname)\n', (1488, 1505), False, 'from shutil import move\n')] |
import numpy as np
from tqdm import tqdm
def generate_sample_data(filename, item_count=1000, basket_count=100000, seed=123):
print("Creating data set of {} baskets with {} unique items".format(basket_count, item_count))
np.random.seed(seed)
# Create item indices and probability of being selected in the first pass
items = np.arange(item_count)
item_selection_prob = np.random.exponential(1, item_count).clip(0, 2)
item_selection_prob /= np.sum(item_selection_prob)
# Create some associations
item_assoc_prob = np.random.exponential(0.15, item_count).clip(0, 1)
associated_to = {}
for i, item in enumerate(items):
sample_count = np.random.choice([1, 2, 3], 1, p=[.7, .2, .1])
associated_to[item] = frozenset(np.random.choice(items, sample_count, replace=False))
file1 = open(filename, "w")
for _ in tqdm(range(basket_count)):
item_count = np.random.lognormal(1.75, 0.4, 1).astype(int).clip(1)
basket = set(np.random.choice(items, item_count, replace=False, p=item_selection_prob))
basket_associated = set()
for item in basket:
if np.random.uniform(0,1) < item_assoc_prob[item]:
basket_associated.update(associated_to[item])
basket.update(basket_associated)
file1.write(" ".join(str(item) for item in basket)+"\n" )
file1.close()
pass
if __name__ == '__main__':
generate_sample_data("example_dataset.dat", 1000, 100000) | [
"numpy.random.lognormal",
"numpy.random.choice",
"numpy.random.exponential",
"numpy.sum",
"numpy.random.seed",
"numpy.random.uniform",
"numpy.arange"
] | [((231, 251), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (245, 251), True, 'import numpy as np\n'), ((343, 364), 'numpy.arange', 'np.arange', (['item_count'], {}), '(item_count)\n', (352, 364), True, 'import numpy as np\n'), ((466, 493), 'numpy.sum', 'np.sum', (['item_selection_prob'], {}), '(item_selection_prob)\n', (472, 493), True, 'import numpy as np\n'), ((683, 732), 'numpy.random.choice', 'np.random.choice', (['[1, 2, 3]', '(1)'], {'p': '[0.7, 0.2, 0.1]'}), '([1, 2, 3], 1, p=[0.7, 0.2, 0.1])\n', (699, 732), True, 'import numpy as np\n'), ((391, 427), 'numpy.random.exponential', 'np.random.exponential', (['(1)', 'item_count'], {}), '(1, item_count)\n', (412, 427), True, 'import numpy as np\n'), ((548, 587), 'numpy.random.exponential', 'np.random.exponential', (['(0.15)', 'item_count'], {}), '(0.15, item_count)\n', (569, 587), True, 'import numpy as np\n'), ((770, 822), 'numpy.random.choice', 'np.random.choice', (['items', 'sample_count'], {'replace': '(False)'}), '(items, sample_count, replace=False)\n', (786, 822), True, 'import numpy as np\n'), ((995, 1068), 'numpy.random.choice', 'np.random.choice', (['items', 'item_count'], {'replace': '(False)', 'p': 'item_selection_prob'}), '(items, item_count, replace=False, p=item_selection_prob)\n', (1011, 1068), True, 'import numpy as np\n'), ((1147, 1170), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1164, 1170), True, 'import numpy as np\n'), ((919, 952), 'numpy.random.lognormal', 'np.random.lognormal', (['(1.75)', '(0.4)', '(1)'], {}), '(1.75, 0.4, 1)\n', (938, 952), True, 'import numpy as np\n')] |
""" The signals module provides classes to build buy/sell signals
Notes
------
All strategies should inherit from BaseSignal, and provide a request_historical
method. For details of this method see docstring of base/BaseSignal or the
request_historical method in ZeroCrossBuyUpSellDown in this module.
"""
#from abc import ABC,abstractmethod
import copy
import numpy as np
import pandas as pd
from .base import BaseSignal
class ZeroCrossBuyUpSellDown(BaseSignal):
""" Signal that checks for indicator crossing zero
This signal goves a buy signal for positive gradient crossing, and sell for
a negative gradient crossing
"""
def __init__(self,indicator,filter,extra_param=0.0):
""" Signal initialised with an indicator and a filter, and other params
Args:
- indicator: a models.indicator object to collect indicator for
signal to base its decisions on
- filter: a models.filter object for ticker selection
- extra_param: an extra parameter of this signal
"""
self.extra_param=extra_param
super().__init__(indicator,filter)
def request_historical(self,stocks_df,signal_name='signal'):
""" use historical data to get a dictionary of signals
Args:
- stocks_df: pandas dataframe of tickers over time
- signal_name: a name to give this signal as output column
Returns:
- signal_dict: a dictionary with keys being the tickers that the
signal considers (selected by this signal's filter),
and values being dataframes, indexed by times at which
signals are seen, and a column named by argument
'signal_name', with +/-1 for a buy/sell signal.
"""
if not isinstance(signal_name,str):
raise TypeError("singal_name must be a string")
if not isinstance(stocks_df,pd.DataFrame):
raise TypeError("singal_name must be a string")
if self.filter is not None:
stock_df = self.filter.apply_in(stocks_df) # new df, not overwritten
else:
stock_df = stocks_df
indi = self.indicator.get_indicator(stock_df)
signal_dict = dict()
in_to_out_dict = self.filter.output_map()
# loop over tickers
for c in stock_df.columns.to_list():
indi_comp_0 = indi[c].values*indi[c].shift().values # <zero at cross
indi_comp_0[0] = 1.0 # instead of NaN - make >0 - supresses warnings
indi_comp_g = np.sign(indi[c].values-indi[c].shift().values) #grad - up or down
cross_inds = np.where(indi_comp_0<0.0) # indices of crossing
# for this ticker, dataframe of all crossing times, and whether indicator
# was growing or falling
mydf = pd.DataFrame(index=stock_df.iloc[cross_inds].index,
data={signal_name:indi_comp_g[cross_inds]})
# append dataframe into dict of signals
for tick_out in in_to_out_dict[c]:
signal_dict[tick_out] = mydf
return signal_dict
| [
"numpy.where",
"pandas.DataFrame"
] | [((2714, 2741), 'numpy.where', 'np.where', (['(indi_comp_0 < 0.0)'], {}), '(indi_comp_0 < 0.0)\n', (2722, 2741), True, 'import numpy as np\n'), ((2904, 3004), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'stock_df.iloc[cross_inds].index', 'data': '{signal_name: indi_comp_g[cross_inds]}'}), '(index=stock_df.iloc[cross_inds].index, data={signal_name:\n indi_comp_g[cross_inds]})\n', (2916, 3004), True, 'import pandas as pd\n')] |
import numpy as np
from rlkit.smm.smm_policy import hard_smm_point,trained_smm_point
class SMMSampler(object):
"""
A sampler that does not serialization for sampling. Instead, it just uses
the current policy and environment as-is.
WARNING: This will affect the environment! So
```
sampler = InPlacePathSampler(env, ...)
sampler.obtain_samples # this has side-effects: env will change!
```
"""
def __init__(self, env, max_path_length,agent,load_SMM,use_history,SMM_path,num_skills):
self.env = env
if load_SMM:
self.policy = trained_smm_point(use_history,SMM_path,num_skills)
else:
self.policy = hard_smm_point()
self.agent = agent
self.max_path_length = max_path_length
def start_worker(self):
pass
def shutdown_worker(self):
pass
def obtain_samples(self, deterministic=False, max_samples=np.inf, max_trajs=np.inf, accum_context=True, resample=1):
"""
Obtains samples in the environment until either we reach either max_samples transitions or
num_traj trajectories.
The resample argument specifies how often (in trajectories) the agent will resample it's context.
"""
assert max_samples < np.inf or max_trajs < np.inf, "either max_samples or max_trajs must be finite"
#policy = MakeDeterministic(self.policy) if deterministic else self.policy
paths = []
n_steps_total = 0
n_trajs = 0
while n_steps_total < max_samples and n_trajs < max_trajs:
path = self.rollout(
self.env, self.policy, max_path_length=self.max_path_length, accum_context=accum_context,agent_target=self.agent)
# save the latent context that generated this trajectory
path['context'] = None
paths.append(path)
n_steps_total += len(path['observations'])
n_trajs += 1
# don't we also want the option to resample z ever transition?
#if n_trajs % resample == 0:
# policy.sample_z()
return paths, n_steps_total
def rollout(self,env, agent, max_path_length=np.inf, accum_context=False, resample_z=False, animated=False,agent_target=None):
"""
The following value for the following keys will be a 2D array, with the
first dimension corresponding to the time dimension.
- observations
- actions
- rewards
- next_observations
- terminals
The next two elements will be lists of dictionaries, with the index into
the list being the index into the time
- agent_infos
- env_infos
:param env:
:param agent:
:param max_path_length:
:param animated:
:param accum_context: if True, accumulate the collected context
:param agent_target: update context while evaluation and testing
:return:
"""
observations = []
actions = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
o = env.reset()
next_o = None
path_length = 0
#if animated:
# env.render()
while path_length < max_path_length:
a, agent_info = agent.get_action(o)
next_o, r, d, env_info = env.step(a)
# update the agent's current context
if accum_context:
agent_target.update_context([o, a, r, next_o, d, env_info])
observations.append(o)
rewards.append(r)
terminals.append(d)
actions.append(a)
agent_infos.append(agent_info)
env_infos.append(env_info)
path_length += 1
#if d:
# break
o = next_o
#if animated:
# env.render()
actions = np.array(actions)
if len(actions.shape) == 1:
actions = np.expand_dims(actions, 1)
observations = np.array(observations)
if len(observations.shape) == 1:
observations = np.expand_dims(observations, 1)
next_o = np.array([next_o])
next_observations = np.vstack(
(
observations[1:, :],
np.expand_dims(next_o, 0)
)
)
return dict(
observations=observations,
actions=actions,
rewards=np.array(rewards).reshape(-1, 1),
next_observations=next_observations,
terminals=np.array(terminals).reshape(-1, 1),
agent_infos=agent_infos,
env_infos=env_infos,
)
| [
"rlkit.smm.smm_policy.hard_smm_point",
"numpy.array",
"rlkit.smm.smm_policy.trained_smm_point",
"numpy.expand_dims"
] | [((3913, 3930), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (3921, 3930), True, 'import numpy as np\n'), ((4039, 4061), 'numpy.array', 'np.array', (['observations'], {}), '(observations)\n', (4047, 4061), True, 'import numpy as np\n'), ((597, 649), 'rlkit.smm.smm_policy.trained_smm_point', 'trained_smm_point', (['use_history', 'SMM_path', 'num_skills'], {}), '(use_history, SMM_path, num_skills)\n', (614, 649), False, 'from rlkit.smm.smm_policy import hard_smm_point, trained_smm_point\n'), ((688, 704), 'rlkit.smm.smm_policy.hard_smm_point', 'hard_smm_point', ([], {}), '()\n', (702, 704), False, 'from rlkit.smm.smm_policy import hard_smm_point, trained_smm_point\n'), ((3989, 4015), 'numpy.expand_dims', 'np.expand_dims', (['actions', '(1)'], {}), '(actions, 1)\n', (4003, 4015), True, 'import numpy as np\n'), ((4130, 4161), 'numpy.expand_dims', 'np.expand_dims', (['observations', '(1)'], {}), '(observations, 1)\n', (4144, 4161), True, 'import numpy as np\n'), ((4183, 4201), 'numpy.array', 'np.array', (['[next_o]'], {}), '([next_o])\n', (4191, 4201), True, 'import numpy as np\n'), ((4308, 4333), 'numpy.expand_dims', 'np.expand_dims', (['next_o', '(0)'], {}), '(next_o, 0)\n', (4322, 4333), True, 'import numpy as np\n'), ((4467, 4484), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (4475, 4484), True, 'import numpy as np\n'), ((4572, 4591), 'numpy.array', 'np.array', (['terminals'], {}), '(terminals)\n', (4580, 4591), True, 'import numpy as np\n')] |
from lantz.drivers.ni.daqmx import DigitalOutputTask, DigitalOutputChannel
import numpy as np
class DigitalSwitch(object):
def __init__(self, ch='/dev1/port0/line0'):
super().__init__()
self.task = DigitalOutputTask()
output_channel = DigitalOutputChannel(ch)
self.task.add_channel(output_channel)
clock_config = {
'source': 'OnboardClock',
'rate': 10000,
'sample_mode': 'finite',
'samples_per_channel': 100,
}
self.task.configure_timing_sample_clock = clock_config
self._state = False
return
def __del__(self):
self.task.clear()
return
@property
def state(self):
return self._state
@state.setter
def state(self, _state):
if _state:
state_pts = np.ones(100)
else:
state_pts = np.zeros(100)
with self.task as task:
self.task.write(state_pts)
self._state = _state
return
| [
"lantz.drivers.ni.daqmx.DigitalOutputTask",
"numpy.zeros",
"numpy.ones",
"lantz.drivers.ni.daqmx.DigitalOutputChannel"
] | [((220, 239), 'lantz.drivers.ni.daqmx.DigitalOutputTask', 'DigitalOutputTask', ([], {}), '()\n', (237, 239), False, 'from lantz.drivers.ni.daqmx import DigitalOutputTask, DigitalOutputChannel\n'), ((265, 289), 'lantz.drivers.ni.daqmx.DigitalOutputChannel', 'DigitalOutputChannel', (['ch'], {}), '(ch)\n', (285, 289), False, 'from lantz.drivers.ni.daqmx import DigitalOutputTask, DigitalOutputChannel\n'), ((838, 850), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (845, 850), True, 'import numpy as np\n'), ((889, 902), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (897, 902), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Processing
==========
This module contains low level processing routines
author: <NAME>
email: <EMAIL>
"""
from __future__ import absolute_import
from __future__ import division
from collections.abc import Container
import copy
import math
import numpy as np
from scipy import ndimage as ndi
from scipy.ndimage import label, binary_closing, binary_dilation
from skimage.color import rgb2gray
from skimage.measure import regionprops
from skimage.morphology import disk, skeletonize as skeletonize_skimage
from skimage.transform import probabilistic_hough_line
from skimage.util import pad, crop as crop_skimage
from ..models.utils import Line, Point
from ..models.segments import Rect, Panel, Figure, FigureRoleEnum
def convert_greyscale(img):
"""
Wrapper around skimage `rgb2gray` used for backward compatilibity
:param np.ndarray img: input image
:return np.ndarrat: image in grayscale
"""
return rgb2gray(img)
def crop(img, left=None, right=None, top=None, bottom=None):
"""
Crop image.
Automatically limits the crop if bounds are outside the image.
:param numpy.ndarray img: Input image.
:param int left: Left crop.
:param int right: Right crop.
:param int top: Top crop.
:param int bottom: Bottom crop.
:return: Cropped image.
:rtype: numpy.ndarray
"""
height, width = img.shape[:2]
left = max(0, left if left else 0)
right = min(width, right if right else width)
top = max(0, top if top else 0)
bottom = min(height, bottom if bottom else width)
out_img = img[top: bottom, left: right]
return {'img': out_img, 'rectangle': Rect(left, right, top, bottom)}
def crop_rect(img, rect_boundary):
"""
A convenience crop function that crops an image given boundaries as a Rect object
:param np.ndarray img: input image
:param Rect rect_boundary: object containing boundaries of the crop
:return: cropped image
:rtype: np.ndarray
"""
left, right = rect_boundary.left, rect_boundary.right
top, bottom = rect_boundary.top, rect_boundary.bottom
return crop(img, left, right, top, bottom)
def binary_close(fig, size=5):
""" Joins unconnected pixel by dilation and erosion"""
selem = disk(size)
img = pad(fig.img, size, mode='constant')
img = binary_closing(img, selem)
img = crop_skimage(img, size)
return Figure(img, raw_img=fig.raw_img)
def binary_floodfill(fig):
""" Converts all pixels inside closed contour to 1"""
fig.img = ndi.binary_fill_holes(fig.img)
return fig
def pixel_ratio(fig, diag):
""" Calculates the ratio of 'on' pixels to bounding box area for binary figure
:param fig : Input binary Figure
:param diag : Area to calculate pixel ratio
:return ratio: Float detailing ('on' pixels / bounding box area)
"""
cropped_img = crop_rect(fig.img, diag)
cropped_img = cropped_img['img']
ones = np.count_nonzero(cropped_img)
all_pixels = np.size(cropped_img)
ratio = ones / all_pixels
return ratio
def get_bounding_box(fig):
""" Gets the bounding box of each segment
:param fig: Input Figure
:returns panels: List of _panel objects
"""
panels = []
regions = regionprops(fig.img)
for region in regions:
y1, x1, y2, x2 = region.bbox
panels.append(Panel(x1, x2, y1, y2, region.label - 1))# Sets tags to start from 0
return set(panels)
def binary_tag(fig):
""" Tag connected regions with pixel value of 1
:param fig: Input Figure
:returns fig: Connected Figure
"""
fig = copy.deepcopy(fig)
fig.img, no_tagged = ndi.label(fig.img)
return fig
def label_and_get_ccs(fig):
"""
Convenience function that tags ccs in an image and creates their Panels
:param Figure fig: Input Figure
:return set: set of Panels of connected components
"""
labelled = binary_tag(fig)
return get_bounding_box(labelled)
def erase_elements(fig, elements):
"""
Erase elements from an image on a pixel-wise basis. if no `pixels` attribute, the function erases the whole
region inside the bounding box. Automatically assigns roles to ccs in the new figure based on the original.
:param Figure fig: Figure object containing binarized image
:param iterable of panels elements: list of elements to erase from image
:return: copy of the Figure object with elements removed
"""
temp_fig = copy.deepcopy(fig)
try:
flattened = temp_fig.img.flatten()
for element in elements:
np.put(flattened, [pixel.row * temp_fig.img.shape[1] + pixel.col for pixel in element.pixels], 0)
img_no_elements = flattened.reshape(temp_fig.img.shape[0], temp_fig.img.shape[1])
temp_fig.img = img_no_elements
except AttributeError:
for element in elements:
temp_fig.img[element.top:element.bottom+1, element.left:element.right+1] = 0
new_fig = Figure(temp_fig.img, fig.raw_img)
if hasattr(fig, 'kernel_sizes'):
new_fig.kernel_sizes = fig.kernel_sizes
for cc1 in new_fig.connected_components:
for cc2 in fig.connected_components:
if cc1 == cc2:
cc1.role = cc2.role # Copy roles of ccs
return new_fig
def dilate_fragments(fig, kernel_size):
"""
Applies binary dilation to `fig.img` using a disk-shaped structuring element of size ''kernel_sizes''.
:param Figure fig: Processed figure
:param int kernel_size: size of the structuring element
:return Figure: new Figure object
"""
selem = disk(kernel_size)
return Figure(binary_dilation(fig.img, selem), raw_img=fig.raw_img)
def is_slope_consistent(lines):
"""
Checks if the slope of multiple lines is the same or similar. Useful when multiple lines found when searching for
arrows
:param [((x1,y1), (x2,y2))] lines: iterable of pairs of coordinates
:return: True if slope is similar amongst the lines, False otherwise
"""
if not all(isinstance(line, Line) for line in lines):
pairs = [[Point(*coords) for coords in pair] for pair in lines]
lines = [Line(pair) for pair in pairs]
if all(abs(line.slope) > 10 for line in lines): # very high/low slope == inf
return True
if all([line.slope == np.inf or line.slope == -np.inf for line in lines]):
return True
slopes = [line.slope for line in lines if abs(line.slope) != np.inf]
if any([line.slope == np.inf or line.slope == -np.inf for line in lines]):
slopes = [line.slope for line in lines if abs(line.slope) != np.inf]
avg_slope = np.mean(slopes)
std_slope = np.std(slopes)
abs_tol = 0.15
rel_tol = 0.15
tol = abs_tol if abs(avg_slope < 1) else rel_tol * avg_slope
if std_slope > abs(tol):
return False
return True
def approximate_line(point_1, point_2):
"""
Implementation of a Bresenham's algorithm. Approximates a straight line between ``point_1`` and ``point_2`` with
pixels. Output is a list representing pixels forming a straight line path from ``point_1`` to ``point_2``
"""
slope = Line([point_1, point_2]).slope # Create Line just to get slope between two points
if not isinstance(point_1, Point) and not isinstance(point_2, Point):
point_1 = Point(row=point_1[1], col=point_1[0])
point_2 = Point(row=point_2[1], col=point_2[0])
if slope is np.inf:
ordered_points = sorted([point_1, point_2], key=lambda point: point.row)
return Line([Point(row=row, col=point_1.col) for row in range(ordered_points[0].row, ordered_points[1].row)])
elif abs(slope) >= 1:
ordered_points = sorted([point_1, point_2], key=lambda point: point.row)
return bresenham_line_y_dominant(*ordered_points, slope)
elif abs(slope) < 1:
ordered_points = sorted([point_1, point_2], key=lambda point: point.col)
return bresenham_line_x_dominant(*ordered_points, slope)
def bresenham_line_x_dominant(point_1, point_2, slope):
"""
bresenham algorithm implementation when change in x is larger than change in y
:param Point point_1: one endpoint of a line
:param Point point_2: other endpoint of a line
:param float slope: pre-calculated slope of the line
:return: Line formed between the two points
"""
y1 = point_1.row
y2 = point_2.row
deltay = y2 - y1
domain = range(point_1.col, point_2.col+1)
deltaerr = abs(slope)
error = 0
y = point_1.row
line = []
for x in domain:
line.append((x, y))
error += deltaerr
if error >= 0.5:
deltay_sign = int(math.copysign(1, deltay))
y += deltay_sign
error -= 1
pixels = [Point(row=y, col=x) for x, y in line]
return Line(pixels=pixels)
def bresenham_line_y_dominant(point_1, point_2, slope):
"""bresenham algorithm implementation when change in y is larger than change in x
:param Point point_1: one endpoint of a line
:param Point point_2: other endpoint of a line
:param float slope: pre-calculated slope of the line
:return: Line formed between the two points
"""
x1 = point_1.col
x2 = point_2.col
deltax = x2-x1
domain = range(point_1.row, point_2.row + 1)
deltaerr = abs(1/slope)
error = 0
x = point_1.col
line = []
for y in domain:
line.append((x, y))
error += deltaerr
if error >= 0.5:
deltax_sign = int(math.copysign(1, deltax))
x += deltax_sign
error -= 1
pixels = [Point(row=y, col=x) for x, y in line]
return Line(pixels=pixels)
def remove_small_fully_contained(connected_components):
"""
Remove smaller connected components if their bounding boxes are fully enclosed within larger connected components
:param iterable connected_components: set of all connected components
:return: a smaller set of ccs without the enclosed ccs
"""
enclosed_ccs = [small_cc for small_cc in connected_components if any(large_cc.contains(small_cc) for large_cc
in remove_connected_component(small_cc, connected_components))]
# print(enclosed_ccs)
refined_ccs = connected_components.difference(set(enclosed_ccs))
return refined_ccs
def merge_rect(rect1, rect2):
""" Merges rectangle with another, such that the bounding box enclose both
:param Rect rect1: A rectangle
:param Rect rect2: Another rectangle
:return: Merged rectangle
"""
left = min(rect1.left, rect2.left)
right = max(rect1.right, rect2.right)
top = min(rect1.top, rect2.top)
bottom = max(rect1.bottom, rect2.bottom)
return Rect(left=left, right=right, top=top, bottom=bottom)
def remove_connected_component(cc, connected_components):
"""
Attempt to remove connected component and return the smaller set
:param Panel cc: connected component to remove
:param iterable connected_components: set of all connected components
:return: smaller set of connected components
"""
if not isinstance(connected_components, set):
connected_components = set(copy.deepcopy(connected_components))
connected_components.remove(cc)
return connected_components
def isolate_patches(fig, to_isolate):
"""
Creates an empty np.ndarray of shape `fig.img.shape` and populates it with pixels from `to_isolate`
:param Figure|Crop fig: Figure object with binarized image
:param iterable of Panels to_isolate: a set or a list of connected components to isolate
:return: np.ndarray of shape `fig.img.shape` populated with only the isolated components
"""
isolated = np.zeros(shape=fig.img.shape)
for connected_component in to_isolate:
top = connected_component.top
bottom = connected_component.bottom
left = connected_component.left
right = connected_component.right
isolated[top:bottom, left:right] = fig.img[top:bottom, left:right]
fig = Figure(img=isolated, raw_img=fig.raw_img, )
return fig
def postprocessing_close_merge(fig, to_close):
"""
Isolate a set of connected components and close them using a small kernel.
Find new, larger connected components. Used for dense images, where appropriate
closing cannot be performed initially.
:param Figure fig: Figure object with binarized image
:param iterable of Panels to_close: a set or list of connected components to close
:return: A smaller set of larger connected components
"""
isolated = isolate_patches(fig, to_close)
closed = binary_close(isolated, size=5)
labelled = binary_tag(closed)
panels = get_bounding_box(labelled)
return panels
def preprocessing_remove_long_lines(fig):
"""
Remove long line separators from an image to improve image closing algorithm
:param Figure fig: Figure with a binarized img attribute
:return: Figure without separators
"""
fig = copy.deepcopy(fig)
threshold = int(fig.diagonal//2)
print(threshold)
long_lines = probabilistic_hough_line(fig.img, threshold=threshold) # Output is two endpoints per line
labelled_img, _ = label(fig.img)
long_lines_list = []
for line in long_lines:
points = [Point(row=y, col=x) for x, y in line]
p1 = points[0]
line_label = labelled_img[p1.row, p1.col]
line_pixels = np.nonzero(labelled_img == line_label)
line_pixels = list(zip(*line_pixels))
long_lines_list.append(Line(pixels=line_pixels))
return erase_elements(fig, long_lines_list)
def intersect_rectangles(rect1, rect2):
"""
Forms a new Rect object in the space shared by the two rectangles. Similar to intersection operation in set theory.
:param Rect rect1: any Rect object
:param Rect rect2: any Rect object
:return: Rect formed by taking intersection of the two initial rectangles
"""
left = max(rect1.left, rect2.left)
right = min(rect1.right, rect2.right)
top = max(rect1.top, rect2.top)
bottom = min(rect1.bottom, rect2.bottom)
return Rect(left, right, top, bottom)
def clean_output(text):
""" Remove whitespace and newline characters from input text."""
return text.replace('\n', '')
def flatten_list(data):
"""
Flattens multi-level iterables into a list of elements
:param [[..]] data: multi-level iterable data structure to flatten
:return: flattened list of all elements
"""
if len(data) == 0:
return data
if isinstance(data[0], Container):
return flatten_list(data[0]) + flatten_list(data[1:])
return data[:1] + flatten_list(data[1:])
def normalize_image(img):
"""
Normalise image values to fit range between 0 and 1, and ensure it can be further proceseed. Useful e.g. after
blurring operation
:param np.ndarray img: analysed image
:return: np.ndarray - image with values scaled to fit inside the [0,1] range
"""
min_val = np.min(img)
max_val = np.max(img)
img -= min_val
img /= (max_val - min_val)
return img
def standardize(data):
"""
Standardizes data to mean 0 and standard deviation of 1
:param np.ndarray data: array of data
:return np.ndarray: standardized data array
"""
if data.dtype != 'float':
data = data.astype('float')
feature_mean = np.mean(data, axis=0)
feature_std = np.std(data, axis=0)
data -= feature_mean
data /= feature_std
return data
def find_minima_between_peaks(data, peaks):
"""
Find deepest minima in ``data``, one between each adjacent pair of entries in ``peaks``, where ``data`` is a 2D
array describing kernel density estimate. Used to cut ``data`` into segments in a way that allows assigning samples
(used to create the estimate) to specific peaks.
:param np.ndarray data: analysed data
:param [int, int...] peaks: indices of peaks in ``data``
:return: np.ndarray containing the indices of local minima
"""
pairs = zip(peaks, peaks[1:])
minima = []
for pair in pairs:
start, end = pair
min_idx = np.argmin(data[1, start:end])+start
minima.append(min_idx)
return minima
def is_a_single_line(fig, panel, line_length):
"""
Checks if the connected component is a single line by checking slope consistency of lines between randomly
selected pixels
:return:
"""
lines = probabilistic_hough_line(isolate_patches(fig, [panel]).img, line_length=line_length)
if not lines:
return False
return is_slope_consistent(lines)
def skeletonize(fig):
"""
A convenience function operating on Figure objects working similarly to skimage.morphology.skeletonize
:param fig: analysed figure object
:return: figure object with a skeletonised image
"""
img = skeletonize_skimage(fig.img)
return Figure(img, raw_img=fig.raw_img)
def skeletonize_area_ratio(fig, panel):
""" Calculates the ratio of skeletonized image pixels to total number of pixels
:param fig: Input figure
:param panel: Original _panel object
:return: Float : Ratio of skeletonized pixels to total area (see pixel_ratio)
"""
skel_fig = skeletonize(fig)
return pixel_ratio(skel_fig, panel)
def mark_tiny_ccs(fig):
"""Marks all tiny connected components
:param Figure fig: Analysed figure"""
[setattr(cc, 'role', FigureRoleEnum.TINY) for cc in fig.connected_components if
cc.area < np.percentile([cc.area for cc in fig.connected_components], 4) and cc.role is None]
| [
"numpy.count_nonzero",
"scipy.ndimage.binary_dilation",
"copy.deepcopy",
"numpy.mean",
"scipy.ndimage.label",
"numpy.max",
"math.copysign",
"skimage.util.crop",
"numpy.min",
"skimage.transform.probabilistic_hough_line",
"numpy.argmin",
"skimage.color.rgb2gray",
"skimage.measure.regionprops",... | [((958, 971), 'skimage.color.rgb2gray', 'rgb2gray', (['img'], {}), '(img)\n', (966, 971), False, 'from skimage.color import rgb2gray\n'), ((2265, 2275), 'skimage.morphology.disk', 'disk', (['size'], {}), '(size)\n', (2269, 2275), False, 'from skimage.morphology import disk, skeletonize as skeletonize_skimage\n'), ((2287, 2322), 'skimage.util.pad', 'pad', (['fig.img', 'size'], {'mode': '"""constant"""'}), "(fig.img, size, mode='constant')\n", (2290, 2322), False, 'from skimage.util import pad, crop as crop_skimage\n'), ((2333, 2359), 'scipy.ndimage.binary_closing', 'binary_closing', (['img', 'selem'], {}), '(img, selem)\n', (2347, 2359), False, 'from scipy.ndimage import label, binary_closing, binary_dilation\n'), ((2370, 2393), 'skimage.util.crop', 'crop_skimage', (['img', 'size'], {}), '(img, size)\n', (2382, 2393), True, 'from skimage.util import pad, crop as crop_skimage\n'), ((2539, 2569), 'scipy.ndimage.binary_fill_holes', 'ndi.binary_fill_holes', (['fig.img'], {}), '(fig.img)\n', (2560, 2569), True, 'from scipy import ndimage as ndi\n'), ((2953, 2982), 'numpy.count_nonzero', 'np.count_nonzero', (['cropped_img'], {}), '(cropped_img)\n', (2969, 2982), True, 'import numpy as np\n'), ((3000, 3020), 'numpy.size', 'np.size', (['cropped_img'], {}), '(cropped_img)\n', (3007, 3020), True, 'import numpy as np\n'), ((3255, 3275), 'skimage.measure.regionprops', 'regionprops', (['fig.img'], {}), '(fig.img)\n', (3266, 3275), False, 'from skimage.measure import regionprops\n'), ((3611, 3629), 'copy.deepcopy', 'copy.deepcopy', (['fig'], {}), '(fig)\n', (3624, 3629), False, 'import copy\n'), ((3655, 3673), 'scipy.ndimage.label', 'ndi.label', (['fig.img'], {}), '(fig.img)\n', (3664, 3673), True, 'from scipy import ndimage as ndi\n'), ((4465, 4483), 'copy.deepcopy', 'copy.deepcopy', (['fig'], {}), '(fig)\n', (4478, 4483), False, 'import copy\n'), ((5603, 5620), 'skimage.morphology.disk', 'disk', (['kernel_size'], {}), '(kernel_size)\n', (5607, 5620), False, 'from skimage.morphology import disk, skeletonize as skeletonize_skimage\n'), ((6642, 6657), 'numpy.mean', 'np.mean', (['slopes'], {}), '(slopes)\n', (6649, 6657), True, 'import numpy as np\n'), ((6674, 6688), 'numpy.std', 'np.std', (['slopes'], {}), '(slopes)\n', (6680, 6688), True, 'import numpy as np\n'), ((11702, 11731), 'numpy.zeros', 'np.zeros', ([], {'shape': 'fig.img.shape'}), '(shape=fig.img.shape)\n', (11710, 11731), True, 'import numpy as np\n'), ((12992, 13010), 'copy.deepcopy', 'copy.deepcopy', (['fig'], {}), '(fig)\n', (13005, 13010), False, 'import copy\n'), ((13086, 13140), 'skimage.transform.probabilistic_hough_line', 'probabilistic_hough_line', (['fig.img'], {'threshold': 'threshold'}), '(fig.img, threshold=threshold)\n', (13110, 13140), False, 'from skimage.transform import probabilistic_hough_line\n'), ((13199, 13213), 'scipy.ndimage.label', 'label', (['fig.img'], {}), '(fig.img)\n', (13204, 13213), False, 'from scipy.ndimage import label, binary_closing, binary_dilation\n'), ((15002, 15013), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (15008, 15013), True, 'import numpy as np\n'), ((15028, 15039), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (15034, 15039), True, 'import numpy as np\n'), ((15382, 15403), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (15389, 15403), True, 'import numpy as np\n'), ((15422, 15442), 'numpy.std', 'np.std', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (15428, 15442), True, 'import numpy as np\n'), ((16862, 16890), 'skimage.morphology.skeletonize', 'skeletonize_skimage', (['fig.img'], {}), '(fig.img)\n', (16881, 16890), True, 'from skimage.morphology import disk, skeletonize as skeletonize_skimage\n'), ((5640, 5671), 'scipy.ndimage.binary_dilation', 'binary_dilation', (['fig.img', 'selem'], {}), '(fig.img, selem)\n', (5655, 5671), False, 'from scipy.ndimage import label, binary_closing, binary_dilation\n'), ((13418, 13456), 'numpy.nonzero', 'np.nonzero', (['(labelled_img == line_label)'], {}), '(labelled_img == line_label)\n', (13428, 13456), True, 'import numpy as np\n'), ((4582, 4685), 'numpy.put', 'np.put', (['flattened', '[(pixel.row * temp_fig.img.shape[1] + pixel.col) for pixel in element.pixels]', '(0)'], {}), '(flattened, [(pixel.row * temp_fig.img.shape[1] + pixel.col) for\n pixel in element.pixels], 0)\n', (4588, 4685), True, 'import numpy as np\n'), ((11173, 11208), 'copy.deepcopy', 'copy.deepcopy', (['connected_components'], {}), '(connected_components)\n', (11186, 11208), False, 'import copy\n'), ((16142, 16171), 'numpy.argmin', 'np.argmin', (['data[1, start:end]'], {}), '(data[1, start:end])\n', (16151, 16171), True, 'import numpy as np\n'), ((8675, 8699), 'math.copysign', 'math.copysign', (['(1)', 'deltay'], {}), '(1, deltay)\n', (8688, 8699), False, 'import math\n'), ((9513, 9537), 'math.copysign', 'math.copysign', (['(1)', 'deltax'], {}), '(1, deltax)\n', (9526, 9537), False, 'import math\n'), ((17505, 17567), 'numpy.percentile', 'np.percentile', (['[cc.area for cc in fig.connected_components]', '(4)'], {}), '([cc.area for cc in fig.connected_components], 4)\n', (17518, 17567), True, 'import numpy as np\n')] |
# Copyright (c) 2021, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from math import ceil, log2, pow
import os
import numpy as np
import tiledb
from lxml import etree
from PIL import Image
from copy import copy
import palettable.colorbrewer.sequential as palettes
from .dzi_adapter_interface import DZIAdapterInterface
from .errors import InvalidAttribute, InvalidColorPalette, InvalidTileAddress
from .. import settings
class TileDBDZIAdapter(DZIAdapterInterface):
def __init__(self, tiledb_file, tiledb_repo):
super(TileDBDZIAdapter, self).__init__()
self.tiledb_resource = os.path.join(tiledb_repo, tiledb_file)
self.logger.debug('TileDB adapter initialized')
def _get_meta_attributes(self, keys):
with tiledb.open(self.tiledb_resource) as A:
attributes = {}
for k in keys:
try:
attributes[k] = A.meta[k]
except:
self.logger.error('Error when loading attribute %s' % k)
return attributes
def _get_meta_attribute(self, key):
with tiledb.open(self.tiledb_resource) as A:
try:
return A.meta[key]
except:
self.logger.error('Error when loading attribute %s' % key)
def _get_dataset_shape(self):
with tiledb.open(self.tiledb_resource) as A:
return A.shape
def _get_schema(self):
return tiledb.ArraySchema.load(self.tiledb_resource)
def _check_attribute(self, attribute):
schema = self._get_schema()
return schema.has_attr(attribute)
def _get_attribute_by_index(self, attribute_index):
schema = self._get_schema()
if attribute_index >= 0 and attribute_index < schema.nattr:
return schema.attr(attribute_index).name
else:
raise IndexError('Schema has no attribute for index %d' % attribute_index)
def _get_dzi_tile_coordinates(self, row, column, tile_size, level):
level_dimensions = self._get_dzi_level_dimensions(level)
self.logger.debug(f'### DZI DIMENSIONS FOR LEVEL {level}: {level_dimensions}')
x_min = row*tile_size
y_min = column*tile_size
x_max = x_min+tile_size
y_max = y_min+tile_size
return {
'x_min': x_min,
'x_max': min(x_max, level_dimensions['width']),
'y_min': y_min,
'y_max': min(y_max, level_dimensions['height'])
}
def _get_dzi_level(self, shape):
return int(ceil(log2(max(*shape))))
def _get_dzi_max_level(self):
original_dimensions = self._get_meta_attributes(['original_width', 'original_height'])
return self._get_dzi_level((original_dimensions['original_width'],
original_dimensions['original_height']))
def _get_dzi_level_dimensions(self, level):
original_dimensions = self._get_meta_attributes(['original_width', 'original_height'])
max_dzi_level = self._get_dzi_max_level()
# self.logger.debug(f'### MAX DZI LEVEL: {max_dzi_level}')
scale_factor = pow(2, max_dzi_level - level)
# self.logger.debug(f'### SCALE FACTOR: {scale_factor}')
return {
'width': original_dimensions['original_width'] // scale_factor,
'height': original_dimensions['original_height'] // scale_factor
}
def _get_dataset_dzi_dimensions(self, attribute):
attrs = self._get_meta_attributes([
'original_width', 'original_height',
'{0}.dzi_sampling_level'.format(attribute),
'{0}.tile_size'.format(attribute)
])
return {
'width': attrs['original_width'],
'height': attrs['original_height']
}
def _get_zoom_scale_factor(self, dzi_zoom_level, dataset_attribute):
tiledb_zoom_level = self._get_meta_attribute('{0}.dzi_sampling_level'.format(dataset_attribute))
return pow(2, (tiledb_zoom_level-dzi_zoom_level))
def _get_dataset_tile_coordinates(self, dzi_coordinates, zoom_scale_factor):
return {k:(v*zoom_scale_factor) for (k, v) in dzi_coordinates.items()}
def _get_dataset_tiles(self, coordinates, dataset_attribute):
dataset_tile_size = self._get_meta_attribute('{0}.tile_size'.format(dataset_attribute))
col_min = int(coordinates['x_min']/dataset_tile_size)
row_min = int(coordinates['y_min']/dataset_tile_size)
col_max = ceil(coordinates['x_max']/dataset_tile_size)
row_max = ceil(coordinates['y_max']/dataset_tile_size)
return {
'col_min': col_min,
'col_max': col_max,
'row_min': row_min,
'row_max': row_max
}
def _slice_by_attribute(self, attribute, level, column, row, dzi_tile_size):
dzi_coordinates = self._get_dzi_tile_coordinates(row, column, dzi_tile_size, level)
# self.logger.debug(f'### TILE COORDINATES {dzi_coordinates}')
zoom_scale_factor = self._get_zoom_scale_factor(level, attribute)
dataset_tiles = self._get_dataset_tiles(
self._get_dataset_tile_coordinates(dzi_coordinates, zoom_scale_factor),
attribute
)
# self.logger.debug(f'### DATASET TILES COORDINATES {dataset_tiles}')
with tiledb.open(self.tiledb_resource) as A:
q = A.query(attrs=(attribute,))
try:
data = q[dataset_tiles['row_min']:dataset_tiles['row_max'],
dataset_tiles['col_min']:dataset_tiles['col_max']][attribute]/100.
# self.logger.debug('### DATA LOADED FROM DATASET')
if data.shape < (dataset_tiles['row_max'] - dataset_tiles['row_min'],
dataset_tiles['col_max'] - dataset_tiles['col_min']):
self.logger.debug(f'### DATA SHAPE IS {data.shape}')
width = dataset_tiles['col_max'] - dataset_tiles['col_min']
height = dataset_tiles['row_max'] - dataset_tiles['row_min']
data = np.pad(
data,
[
(0, height-data.shape[0]),
(0, width-data.shape[1])
],
'constant', constant_values=[0]
)
except tiledb.TileDBError as tbe:
self.logger.error(tbe)
empty_tile = np.zeros(
(
dataset_tiles['col_max']-dataset_tiles['col_min'],
dataset_tiles['row_max']-dataset_tiles['row_min']
)
)
return empty_tile, zoom_scale_factor
return data, zoom_scale_factor
def _apply_palette(self, slice, palette):
try:
p_obj = getattr(palettes, palette)
except AttributeError:
raise InvalidColorPalette('%s is not a valid color palette' % palette)
p_colors = copy(p_obj.colors)
p_colors.insert(0, [255, 255, 255]) # TODO: check if actually necessary
norm_slice = np.asarray(np.uint8(slice*len(p_colors))).reshape(-1)
# extend the p_colors array to avoid an issue related to probabilities with a value of 1.0
p_colors.append(p_colors[-1])
colored_slice = [p_colors[int(y)] for y in norm_slice]
return np.array(colored_slice).reshape(*slice.shape, 3)
def _tile_to_img(self, tile, mode='RGB'):
img = Image.fromarray(np.uint8(tile), mode)
return img
def _get_expected_tile_size(self, dzi_tile_size, zoom_scale_factor, dataset_tile_size):
return max(int((dzi_tile_size*zoom_scale_factor)/dataset_tile_size), 1)
def _slice_to_tile(self, slice, tile_size, zoom_scale_factor, dataset_tile_size, palette):
expected_tile_size = self._get_expected_tile_size(tile_size, zoom_scale_factor, dataset_tile_size)
tile = self._apply_palette(slice, palette)
tile = self._tile_to_img(tile)
# self.logger.debug(f'Tile width: {tile.width} --- Tile Height: {tile.height}')
# self.logger.debug(f'Expected tile size {expected_tile_size}')
return tile.resize(
(
int(tile_size*(tile.width/expected_tile_size)),
int(tile_size*(tile.height/expected_tile_size))
), Image.BOX)
def get_dzi_description(self, tile_size=None, attribute_label=None):
if attribute_label is None:
attribute = self._get_attribute_by_index(0)
else:
if self._check_attribute(attribute_label):
attribute = attribute_label
else:
raise InvalidAttribute('Dataset has no attribute %s' % attribute_label)
dset_dims = self._get_dataset_dzi_dimensions(attribute)
tile_size = tile_size if tile_size is not None else settings.DEEPZOOM_TILE_SIZE
dzi_root = etree.Element(
'Image',
attrib={
'Format': 'png',
'Overlap': '0', # no overlap when rendering array datasets
'TileSize': str(tile_size)
},
nsmap={None: 'http://schemas.microsoft.com/deepzoom/2008'}
)
etree.SubElement(dzi_root, 'Size',
attrib={
'Height': str(dset_dims['height']),
'Width': str(dset_dims['width'])
})
return etree.tostring(dzi_root)
def get_tile(self, level, row, column, palette, attribute_label=None, tile_size=None):
self.logger.debug('Loading tile')
tile_size = tile_size if tile_size is not None else settings.DEEPZOOM_TILE_SIZE
self.logger.debug('Setting tile size to %dpx', tile_size)
if attribute_label is None:
attribute = self._get_attribute_by_index(0)
else:
if self._check_attribute(attribute_label):
attribute = attribute_label
else:
raise InvalidAttribute('Dataset has no attribute %s' % attribute_label)
self.logger.debug('Slicing by attribute %s', attribute)
slice, zoom_scale_factor = self._slice_by_attribute(attribute, int(level), int(row), int(column), tile_size)
return self._slice_to_tile(slice, tile_size, zoom_scale_factor,
self._get_meta_attribute('{0}.tile_size'.format(attribute)),
palette)
| [
"numpy.uint8",
"math.ceil",
"math.pow",
"os.path.join",
"tiledb.ArraySchema.load",
"numpy.array",
"numpy.pad",
"tiledb.open",
"numpy.zeros",
"copy.copy",
"lxml.etree.tostring"
] | [((1630, 1668), 'os.path.join', 'os.path.join', (['tiledb_repo', 'tiledb_file'], {}), '(tiledb_repo, tiledb_file)\n', (1642, 1668), False, 'import os\n'), ((2469, 2514), 'tiledb.ArraySchema.load', 'tiledb.ArraySchema.load', (['self.tiledb_resource'], {}), '(self.tiledb_resource)\n', (2492, 2514), False, 'import tiledb\n'), ((4155, 4184), 'math.pow', 'pow', (['(2)', '(max_dzi_level - level)'], {}), '(2, max_dzi_level - level)\n', (4158, 4184), False, 'from math import ceil, log2, pow\n'), ((5005, 5047), 'math.pow', 'pow', (['(2)', '(tiledb_zoom_level - dzi_zoom_level)'], {}), '(2, tiledb_zoom_level - dzi_zoom_level)\n', (5008, 5047), False, 'from math import ceil, log2, pow\n'), ((5514, 5560), 'math.ceil', 'ceil', (["(coordinates['x_max'] / dataset_tile_size)"], {}), "(coordinates['x_max'] / dataset_tile_size)\n", (5518, 5560), False, 'from math import ceil, log2, pow\n'), ((5577, 5623), 'math.ceil', 'ceil', (["(coordinates['y_max'] / dataset_tile_size)"], {}), "(coordinates['y_max'] / dataset_tile_size)\n", (5581, 5623), False, 'from math import ceil, log2, pow\n'), ((8066, 8084), 'copy.copy', 'copy', (['p_obj.colors'], {}), '(p_obj.colors)\n', (8070, 8084), False, 'from copy import copy\n'), ((10547, 10571), 'lxml.etree.tostring', 'etree.tostring', (['dzi_root'], {}), '(dzi_root)\n', (10561, 10571), False, 'from lxml import etree\n'), ((1781, 1814), 'tiledb.open', 'tiledb.open', (['self.tiledb_resource'], {}), '(self.tiledb_resource)\n', (1792, 1814), False, 'import tiledb\n'), ((2124, 2157), 'tiledb.open', 'tiledb.open', (['self.tiledb_resource'], {}), '(self.tiledb_resource)\n', (2135, 2157), False, 'import tiledb\n'), ((2359, 2392), 'tiledb.open', 'tiledb.open', (['self.tiledb_resource'], {}), '(self.tiledb_resource)\n', (2370, 2392), False, 'import tiledb\n'), ((6351, 6384), 'tiledb.open', 'tiledb.open', (['self.tiledb_resource'], {}), '(self.tiledb_resource)\n', (6362, 6384), False, 'import tiledb\n'), ((8581, 8595), 'numpy.uint8', 'np.uint8', (['tile'], {}), '(tile)\n', (8589, 8595), True, 'import numpy as np\n'), ((8455, 8478), 'numpy.array', 'np.array', (['colored_slice'], {}), '(colored_slice)\n', (8463, 8478), True, 'import numpy as np\n'), ((7122, 7230), 'numpy.pad', 'np.pad', (['data', '[(0, height - data.shape[0]), (0, width - data.shape[1])]', '"""constant"""'], {'constant_values': '[0]'}), "(data, [(0, height - data.shape[0]), (0, width - data.shape[1])],\n 'constant', constant_values=[0])\n", (7128, 7230), True, 'import numpy as np\n'), ((7513, 7634), 'numpy.zeros', 'np.zeros', (["(dataset_tiles['col_max'] - dataset_tiles['col_min'], dataset_tiles[\n 'row_max'] - dataset_tiles['row_min'])"], {}), "((dataset_tiles['col_max'] - dataset_tiles['col_min'], \n dataset_tiles['row_max'] - dataset_tiles['row_min']))\n", (7521, 7634), True, 'import numpy as np\n')] |
import argparse
import json
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
label2idx = {'contradiction': 0, 'entailment': 1, 'neutral': 2}
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--biased_preds")
parser.add_argument("--model_preds")
parser.add_argument("--lbl_file")
#parser.add_argument("--alpha", type=float, default=0.1)
#parser.add_argument("--temperature", type=float, default=1)
parser.add_argument("--combine_nonentailments", action='store_true')
args = parser.parse_args()
with open(args.biased_preds, 'r') as f:
next(f) # Take off first line
biased_preds = {}
for line in f:
parts = line.strip().split(',')
index = int(parts[0])
pred = parts[1]
biased_preds[index] = pred
with open(args.model_preds, 'r') as f:
next(f) # Take off first line
model_preds = {}
for line in f:
parts = line.strip().split(',')
index = int(parts[0])
pred = parts[1]
model_preds[index] = pred
with open(args.lbl_file, 'r') as f:
labels = []
for line in f:
labels.append(line.strip())
labels = np.array(labels)
biased_keys = list(sorted(biased_preds.keys()))
model_keys = list(sorted(model_preds.keys()))
print(len(biased_keys), len(model_keys), len(labels))
print(biased_keys == model_keys)
keys = biased_keys
biased_preds = np.array([biased_preds[k] for k in keys])
model_preds = np.array([model_preds[k] for k in keys])
labels_hard = labels[biased_preds != labels]
biased_preds_hard = biased_preds[biased_preds != labels]
model_preds_hard = model_preds[biased_preds != labels]
labels_easy = labels[biased_preds == labels]
biased_preds_easy = biased_preds[biased_preds == labels]
model_preds_easy = model_preds[biased_preds == labels]
biased_acc = (biased_preds == labels).mean()
model_acc = (model_preds == labels).mean()
biased_hard_acc = (biased_preds_hard == labels_hard).mean()
model_hard_acc = (model_preds_hard == labels_hard).mean()
biased_easy_acc = (biased_preds_easy == labels_easy).mean()
model_easy_acc = (model_preds_easy == labels_easy).mean()
print(f'Full: Biased acc = {biased_acc}, Model acc = {model_acc}')
print(f'Hard: Biased acc = {biased_hard_acc}, Model acc = {model_hard_acc}')
print(f'Easy: Biased acc = {biased_easy_acc}, Model acc = {model_easy_acc}')
if __name__ == '__main__':
main()
| [
"matplotlib.use",
"numpy.array",
"argparse.ArgumentParser"
] | [((66, 87), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (80, 87), False, 'import matplotlib\n'), ((211, 236), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (234, 236), False, 'import argparse\n'), ((1573, 1614), 'numpy.array', 'np.array', (['[biased_preds[k] for k in keys]'], {}), '([biased_preds[k] for k in keys])\n', (1581, 1614), True, 'import numpy as np\n'), ((1633, 1673), 'numpy.array', 'np.array', (['[model_preds[k] for k in keys]'], {}), '([model_preds[k] for k in keys])\n', (1641, 1673), True, 'import numpy as np\n'), ((1313, 1329), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1321, 1329), True, 'import numpy as np\n')] |
import pandas as pd
from janitor.utils import skiperror
import numpy as np
import pytest
@pytest.mark.functions
def test_skiperror():
df = pd.DataFrame({"x": [1, 2, 3, "a"], "y": [1, 2, 3, "b"]})
def func(s):
return s + 1
# Verify that applying function causes error
with pytest.raises(Exception):
df["x"].apply(func)
result = df["x"].apply(skiperror(func))
assert (result.values[:-1] == np.array([2, 3, 4])).all() and np.isnan(
result.values[-1]
)
result = df["x"].apply(skiperror(func, return_x=True))
assert (result.values == np.array([2, 3, 4, "a"], dtype=object)).all()
result = df["x"].apply(skiperror(func, return_x=False, return_val=5))
assert (result.values == np.array([2, 3, 4, 5])).all()
| [
"janitor.utils.skiperror",
"numpy.array",
"numpy.isnan",
"pytest.raises",
"pandas.DataFrame"
] | [((146, 202), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [1, 2, 3, 'a'], 'y': [1, 2, 3, 'b']}"], {}), "({'x': [1, 2, 3, 'a'], 'y': [1, 2, 3, 'b']})\n", (158, 202), True, 'import pandas as pd\n'), ((301, 325), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (314, 325), False, 'import pytest\n'), ((383, 398), 'janitor.utils.skiperror', 'skiperror', (['func'], {}), '(func)\n', (392, 398), False, 'from janitor.utils import skiperror\n'), ((465, 492), 'numpy.isnan', 'np.isnan', (['result.values[-1]'], {}), '(result.values[-1])\n', (473, 492), True, 'import numpy as np\n'), ((535, 565), 'janitor.utils.skiperror', 'skiperror', (['func'], {'return_x': '(True)'}), '(func, return_x=True)\n', (544, 565), False, 'from janitor.utils import skiperror\n'), ((670, 715), 'janitor.utils.skiperror', 'skiperror', (['func'], {'return_x': '(False)', 'return_val': '(5)'}), '(func, return_x=False, return_val=5)\n', (679, 715), False, 'from janitor.utils import skiperror\n'), ((596, 634), 'numpy.array', 'np.array', (["[2, 3, 4, 'a']"], {'dtype': 'object'}), "([2, 3, 4, 'a'], dtype=object)\n", (604, 634), True, 'import numpy as np\n'), ((746, 768), 'numpy.array', 'np.array', (['[2, 3, 4, 5]'], {}), '([2, 3, 4, 5])\n', (754, 768), True, 'import numpy as np\n'), ((434, 453), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (442, 453), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 11:25:03 2019
@author: lealp
"""
import pandas as pd
pd.set_option('display.width', 50000)
pd.set_option('display.max_rows', 50000)
pd.set_option('display.max_columns', 5000)
import numpy as np
import xarray as xr
from extreme_events.extreme_classifier import Extreme_Classifier as EEC
def parse_extremes(x, distribution_type='Positive', b=False):
y = np.where(np.abs(x)==np.inf, 0, x)
y = np.where(np.isnan(y), 0, y)
if np.all(y) == 0:
return x
else:
EE = EEC(distribution_type=distribution_type, verbose=False)
EE.fit(y)
Classified = EE.predict(y, b)
return np.where(Classified.codes == Classified.categories[-1], 1, 0)
def xarray_parse_extremes(ds, dim=['time'], dask='allowed', new_dim_name=['classes'], kwargs={'b': False, 'distribution_type':'Positive'}):
filtered = xr.apply_ufunc(parse_extremes,
ds,
dask=dask,
vectorize=True,
input_core_dims=[dim],
#exclude_dims = [dim],
output_core_dims=[new_dim_name],
kwargs=kwargs,
output_dtypes=[float],
join='outer',
dataset_fill_value=np.nan,
).compute()
return filtered
if '__main__' == __name__:
from datetime import date, timedelta
import matplotlib.pyplot as plt
def get_offset(x, tim_start=(1,1,1)):
days = x # This may work for floats in general, but using integers
# is more precise (e.g. days = int(9465.0))
start = date(*tim_start) # This is the "days since" part
delta = timedelta(days) # Create a time delta object from the number of days
offset = start + delta # Add the specified number of days to 1990
return offset
ds = xr.tutorial.open_dataset('rasm', decode_times=False).load()
def parse_datetime(time):
return pd.to_datetime([str(get_offset(x)) for x in time])
ds.coords['time'] = parse_datetime(ds.coords['time'].values)
ds = ds.chunk({'time': -1}).persist()
ds = xr.decode_cf(ds)
ds_monthly_anual = xarray_parse_extremes(ds['Tair'] , ['time'],
dask='allowed')
facet = ds_monthly_anual.plot.contour(x='x', y='y', col='classes', col_wrap=5)
plt.show() | [
"numpy.abs",
"numpy.where",
"extreme_events.extreme_classifier.Extreme_Classifier",
"pandas.set_option",
"datetime.timedelta",
"numpy.isnan",
"datetime.date",
"numpy.all",
"xarray.apply_ufunc",
"xarray.tutorial.open_dataset",
"xarray.decode_cf",
"matplotlib.pyplot.show"
] | [((105, 142), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(50000)'], {}), "('display.width', 50000)\n", (118, 142), True, 'import pandas as pd\n'), ((143, 183), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(50000)'], {}), "('display.max_rows', 50000)\n", (156, 183), True, 'import pandas as pd\n'), ((184, 226), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(5000)'], {}), "('display.max_columns', 5000)\n", (197, 226), True, 'import pandas as pd\n'), ((2654, 2670), 'xarray.decode_cf', 'xr.decode_cf', (['ds'], {}), '(ds)\n', (2666, 2670), True, 'import xarray as xr\n'), ((2912, 2922), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2920, 2922), True, 'import matplotlib.pyplot as plt\n'), ((477, 488), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (485, 488), True, 'import numpy as np\n'), ((508, 517), 'numpy.all', 'np.all', (['y'], {}), '(y)\n', (514, 517), True, 'import numpy as np\n'), ((587, 642), 'extreme_events.extreme_classifier.Extreme_Classifier', 'EEC', ([], {'distribution_type': 'distribution_type', 'verbose': '(False)'}), '(distribution_type=distribution_type, verbose=False)\n', (590, 642), True, 'from extreme_events.extreme_classifier import Extreme_Classifier as EEC\n'), ((750, 811), 'numpy.where', 'np.where', (['(Classified.codes == Classified.categories[-1])', '(1)', '(0)'], {}), '(Classified.codes == Classified.categories[-1], 1, 0)\n', (758, 811), True, 'import numpy as np\n'), ((2045, 2061), 'datetime.date', 'date', (['*tim_start'], {}), '(*tim_start)\n', (2049, 2061), False, 'from datetime import date, timedelta\n'), ((2124, 2139), 'datetime.timedelta', 'timedelta', (['days'], {}), '(days)\n', (2133, 2139), False, 'from datetime import date, timedelta\n'), ((428, 437), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (434, 437), True, 'import numpy as np\n'), ((976, 1180), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['parse_extremes', 'ds'], {'dask': 'dask', 'vectorize': '(True)', 'input_core_dims': '[dim]', 'output_core_dims': '[new_dim_name]', 'kwargs': 'kwargs', 'output_dtypes': '[float]', 'join': '"""outer"""', 'dataset_fill_value': 'np.nan'}), "(parse_extremes, ds, dask=dask, vectorize=True,\n input_core_dims=[dim], output_core_dims=[new_dim_name], kwargs=kwargs,\n output_dtypes=[float], join='outer', dataset_fill_value=np.nan)\n", (990, 1180), True, 'import xarray as xr\n'), ((2357, 2409), 'xarray.tutorial.open_dataset', 'xr.tutorial.open_dataset', (['"""rasm"""'], {'decode_times': '(False)'}), "('rasm', decode_times=False)\n", (2381, 2409), True, 'import xarray as xr\n')] |
#import plotly.graph_objects as go
import plotly.graph_objs as go
import numpy as np
N = 75000
N=1000
fig = go.Figure()
fig.add_trace(
go.Scatter(
x = np.random.randn(N),
y = np.random.randn(N),
mode = 'markers',
marker = dict(
line = dict(
width = 1,
color = 'DarkSlateGrey')
)
)
)
#fig.update_layout(title_text = 'SVG')
fig.update_layout()
fig.show()
| [
"plotly.graph_objs.Figure",
"numpy.random.randn"
] | [((110, 121), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (119, 121), True, 'import plotly.graph_objs as go\n'), ((165, 183), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (180, 183), True, 'import numpy as np\n'), ((197, 215), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (212, 215), True, 'import numpy as np\n')] |
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
from collections import namedtuple
import numpy as np
import popart
import pytest
import re
# importing test_session and test_util requires adding to sys.path
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent.parent))
from test_session import PopartTestSession
import test_util as tu
@tu.requires_ipu_model
def test_disabled_virtual_graphs():
"""
In this test we check that an error is thrown when doing pipelining
if enableVirtualGraph session option is not set to true
"""
builder, op0_out, op1_out, op2_out, op3_out, anchor_map = get_simple_linear_model(
)
opts = popart.SessionOptions()
opts.enablePipelining = True
opts.virtualGraphMode = popart.VirtualGraphMode.Off
with pytest.raises(popart.popart_exception) as e_info:
session = popart.InferenceSession(fnModel=builder.getModelProto(),
dataFlow=popart.DataFlow(
10, anchor_map),
userOptions=opts,
deviceInfo=tu.create_test_device())
assert e_info.value.args[0].startswith("Pipelining requires more than")
@tu.requires_ipu_model
def test_one_ipu():
"""
In this test we check that an error is thrown when doing pipelining
on 1 IPU
"""
builder = popart.Builder()
shape_d = [10]
shape_l = [1]
d0 = builder.addInputTensor(popart.TensorInfo("FLOAT", shape_d))
d1 = builder.addInputTensor(popart.TensorInfo("FLOAT", shape_d))
op0_out = builder.aiOnnx.sin([d0], "s0")
op1_out = builder.aiOnnx.exp([d1], "r0")
op2_out = builder.aiOnnx.mul([op0_out, op1_out], "m0")
builder.addOutputTensor(op2_out)
opts = popart.SessionOptions()
opts.enablePipelining = True
opts.virtualGraphMode = popart.VirtualGraphMode.Manual # i.e. use 1 ipu
builder.pipelineStage(op0_out, 0)
builder.virtualGraph(op0_out, 0)
builder.pipelineStage(op1_out, 0)
builder.virtualGraph(op1_out, 0)
builder.pipelineStage(op2_out, 1)
builder.virtualGraph(op2_out, 0)
with pytest.raises(popart.popart_exception) as e_info:
session = popart.InferenceSession(fnModel=builder.getModelProto(),
dataFlow=popart.DataFlow(
10, [op2_out, "loss"]),
userOptions=opts,
deviceInfo=tu.create_test_device())
session.prepareDevice()
assert e_info.value.args[0].startswith("Pipelining requires more than")
@tu.requires_ipu_model
def test_enabled_recomputation():
"""
In this test we check that NO error is thrown when doing pipelining
if recomputation is enabled
"""
builder, op0_out, op1_out, op2_out, op3_out, anchor_map = get_simple_linear_model(
)
opts = popart.SessionOptions()
opts.enablePipelining = True
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
opts.autoRecomputation = popart.RecomputationType.Standard
builder.virtualGraph(op0_out, 0)
builder.virtualGraph(op1_out, 1)
builder.virtualGraph(op2_out, 1)
builder.virtualGraph(op3_out, 1)
session = popart.InferenceSession(fnModel=builder.getModelProto(),
dataFlow=popart.DataFlow(10, anchor_map),
userOptions=opts,
deviceInfo=tu.create_test_device(
numIpus=2, tilesPerIPU=20))
@tu.requires_ipu_model
def test_stream_tensors_to_multiple_ipus():
"""
Streaming an input to Ops on multiple IPUs throws an error
09/07/2019 Since D12445 this test no longer raises an exception. By
default, stream tensors are now replicated by streaming to a single
IPU, then copied across to the other IPUs where they are needed.
Leaving this test in to verify that this remains the case
"""
builder, op0_out, op1_out, op2_out, op3_out, anchor_map = get_simple_linear_model(
streamInputToOp1AndOp2=True)
opts = popart.SessionOptions()
opts.enablePipelining = True
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
builder.virtualGraph(op0_out, 0)
builder.virtualGraph(op1_out, 1)
builder.virtualGraph(op2_out, 1)
builder.virtualGraph(op3_out, 1)
session = popart.InferenceSession(fnModel=builder.getModelProto(),
dataFlow=popart.DataFlow(10, anchor_map),
userOptions=opts,
deviceInfo=tu.create_test_device(
numIpus=2, tilesPerIPU=20))
@tu.requires_ipu_model
def test_sharding_multi_source():
"""
Branched sharding does not merge IPU Copies with pipelining
e.g. Op0 -> Op2
^
Op1 ----'
where the vGraph split is IPU0 : {Op0}, IPU1 : {Op1}, IPU2 : {Op2}
"""
builder = popart.Builder()
shape_d = [10]
shape_l = []
d0 = builder.addInputTensor(popart.TensorInfo("FLOAT", shape_d))
d1 = builder.addInputTensor(popart.TensorInfo("FLOAT", shape_d))
l0 = builder.addInputTensor(popart.TensorInfo("INT32", shape_l))
op0_out = builder.aiOnnx.sin([d0], "s0")
op1_out = builder.aiOnnx.exp([d1], "r0")
op2_out = builder.aiOnnx.mul([op0_out, op1_out], "m0")
nll = builder.aiGraphcore.nllloss([op2_out, l0])
opts = popart.SessionOptions()
opts.enablePipelining = True
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
builder.virtualGraph(op0_out, 0)
builder.virtualGraph(op1_out, 1)
builder.virtualGraph(op2_out, 2)
builder.virtualGraph(nll, 2)
session = popart.InferenceSession(fnModel=builder.getModelProto(),
dataFlow=popart.DataFlow(10, [op2_out]),
userOptions=opts,
deviceInfo=tu.create_test_device(
numIpus=3, tilesPerIPU=20))
@tu.requires_ipu_model
def test_inference_min_batches():
"""
Check that we throw if too few batches to fill and flush the pipeline
for an inference model
"""
minBatches = 3 # == numIpus == numPipelineStages
get_model_anchors(doSharding=True,
doPipelining=True,
batchesPerStep=minBatches,
doTraining=False,
doDevicex=False)
with pytest.raises(popart.popart_exception) as e_info:
get_model_anchors(doSharding=True,
doPipelining=True,
batchesPerStep=minBatches - 1,
doTraining=False,
doDevicex=False)
assert e_info.value.args[0].startswith(
"For pipelining, depth (batchesPerStep) must")
@tu.requires_ipu_model
def test_training_min_batches():
"""
Check that we throw if too few batches to fill and flush the pipeline
for a training model
"""
minBatches = 5 # == 2 * (numIpus-1) + 1 == numPipelineStages
get_model_anchors(doSharding=True,
doPipelining=True,
batchesPerStep=minBatches,
doTraining=True,
doDevicex=False)
with pytest.raises(popart.popart_exception) as e_info:
get_model_anchors(doSharding=True,
doPipelining=True,
batchesPerStep=minBatches - 1,
doTraining=True,
doDevicex=False)
assert e_info.value.args[0].startswith(
"For pipelining, depth (batchesPerStep) must")
_DataType = namedtuple('_DataType', ['builder_type', 'np_type'])
_INT8 = _DataType('INT8', np.int8)
_UINT8 = _DataType('UINT8', np.uint8)
@tu.requires_ipu_model
@pytest.mark.parametrize("inputType", [_INT8, _UINT8, None])
def test_output_matches_train(inputType):
"""
In this test we check that the anchors of equivalent non-sharded, sharded
and non-pipelined, and sharded and pipelined models are equal when doing
training. We expect only the first output and weight update to be the same
as non-pipelined models
"""
bps = 8
singleIpu_anchors = get_model_anchors(doSharding=False,
doPipelining=False,
batchesPerStep=bps,
doTraining=True,
inputType=inputType)
multiIpu_anchors = get_model_anchors(doSharding=True,
doPipelining=False,
batchesPerStep=bps,
doTraining=True,
inputType=inputType)
pipelined_anchors = get_model_anchors(doSharding=True,
doPipelining=True,
batchesPerStep=bps,
doTraining=True,
inputType=inputType)
# TODO, depends on T9630, add a case with grad accumulation. All tensor
# outputs should be exactly the same when doing pipelined vs non-pipelined
# when grad accumulation is turned on
for (tId1, t1), (tId2, t2) in zip(singleIpu_anchors.items(),
multiIpu_anchors.items()):
assert np.allclose(t1, t2)
# Expect only the anchors from the first batch to be equal. After that, the
# continuous gradient accumulation option causes model parameters to diverge
for (tId1, t1), (tId2, t2) in zip(singleIpu_anchors.items(),
pipelined_anchors.items()):
for i in range(np.shape(t1)[0]):
print("singleIpu , batch: ", i, tId1, np.sum(t1[i]))
print("pipelinedIpu, batch: ", i, tId2, np.sum(t2[i]))
assert np.allclose(t1[0], t2[0])
@tu.requires_ipu_model
@pytest.mark.parametrize("inputType", [_INT8, _UINT8, None])
def test_acts_match_restored_acts(inputType):
"""
In this test we check that the stashed tensors and their equivalent
Restored tensors have the same values for all batches. This confirms
that the schedule of restoring and streaming anchors is correct
How do we know they're not both wrong? Take this example where the
streamed input is stashed. Check that it matches the raw data input
that is fed to the StepIO
"""
bps = 8
pipelined_anchors = get_model_anchors(doSharding=True,
doPipelining=True,
batchesPerStep=bps,
doTraining=True,
anchorRestoredTensors=True,
returnRawInput=True,
inputType=inputType)
for (tId, t) in pipelined_anchors.items():
for i in range(np.shape(t)[0]):
print("batch: ", i, tId, np.sum(t[i]))
# Can't seem to make the cast op produce a tensor with id "input", so we
# have to do this instead.
input_name = "Cast:0" if inputType is not None else "input"
assert np.allclose(
pipelined_anchors[popart.reservedRestoredPrefix() + "Exp:0"],
pipelined_anchors["Exp:0"])
assert np.allclose(
pipelined_anchors[popart.reservedRestoredPrefix() + input_name],
pipelined_anchors[input_name])
assert np.allclose(pipelined_anchors["input_raw"],
pipelined_anchors[input_name])
@tu.requires_ipu_model
@pytest.mark.parametrize("inputType", [_INT8, _UINT8, None])
def test_output_matches_infer(inputType):
"""
In this test we check that the anchors of equivalent non-sharded, sharded
and non-pipelined, and sharded and pipelined models are equal when doing
inference
"""
bps = 8
singleIpu_anchors = get_model_anchors(doSharding=False,
doPipelining=False,
batchesPerStep=bps,
doTraining=False,
inputType=inputType)
multiIpu_anchors = get_model_anchors(doSharding=True,
doPipelining=False,
batchesPerStep=bps,
doTraining=False,
inputType=inputType)
pipelined_anchors = get_model_anchors(doSharding=True,
doPipelining=True,
batchesPerStep=bps,
doTraining=False,
inputType=inputType)
for (tId1, t1), (tId2, t2) in zip(singleIpu_anchors.items(),
multiIpu_anchors.items()):
for i in range(np.shape(t1)[0]):
print("singleIpu, batch: ", i, tId1, np.sum(t1[i]))
print("multiIpu , batch: ", i, tId2, np.sum(t2[i]))
assert np.allclose(t1, t2)
for (tId1, t1), (tId2, t2) in zip(singleIpu_anchors.items(),
pipelined_anchors.items()):
for i in range(np.shape(t1)[0]):
print("singleIpu , batch: ", i, tId1, np.sum(t1[i]))
print("pipelinedIpu, batch: ", i, tId2, np.sum(t2[i]))
assert np.allclose(t1, t2)
# Model
# <--- ipu0 ----> <--------- ipu1 ---> <------------ ipu2 ------------>
#
# d0 --|-- Sin --|-- Exp --|
# |-- Conv --|-- Reshape --|-- Softmax --> out
# w0 --|
def get_model_anchors(doSharding,
doPipelining,
batchesPerStep,
doTraining,
doProfiling=False,
doDevicex=True,
anchorRestoredTensors=False,
returnRawInput=False,
inputType=None):
np.random.seed(seed=1)
builder = popart.Builder()
batchSize = 2
shape_d0 = [batchSize, 2, 4, 4]
shape_l0 = [batchSize]
if inputType is not None:
d0 = builder.addInputTensor(
popart.TensorInfo(inputType.builder_type, shape_d0))
else:
d0 = builder.addInputTensor(popart.TensorInfo("FLOAT", shape_d0))
data_w0 = np.ones(shape=[2, 2, 3, 3]).astype(np.float32)
w0 = builder.addInitializedInputTensor(data_w0)
l0 = builder.addInputTensor(popart.TensorInfo("INT32", shape_l0))
if inputType is not None:
d0_cast = builder.aiOnnx.cast([d0], "FLOAT")
else:
d0_cast = d0
s0 = builder.aiOnnx.sin([d0_cast], "s0")
e0 = builder.aiOnnx.exp([s0], "e0")
c0 = builder.aiOnnx.conv([e0, w0],
dilations=[1, 1],
pads=[1, 1, 1, 1],
strides=[1, 1],
debugContext="c0")
r0 = builder.reshape_const(builder.aiOnnx, [c0], [batchSize, 32])
out = builder.aiOnnx.softmax([r0], axis=1, debugContext="sfm")
nll = builder.aiGraphcore.nllloss([out, l0])
art = popart.AnchorReturnType("All")
anchor_map = {nll: art, w0: art, e0: art}
if doTraining is True:
anchor_map[popart.reservedGradientPrefix() + d0_cast] = art
if doPipelining is True and anchorRestoredTensors is True:
anchor_map[popart.reservedRestoredPrefix() + e0] = art
anchor_map[d0_cast] = art
anchor_map[popart.reservedRestoredPrefix() + d0_cast] = art
opts = popart.SessionOptions()
opts.reportOptions = {"showExecutionSteps": "true"}
opts.enablePipelining = doPipelining
if doSharding is False:
numIPUs = 1
else:
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
numIPUs = 3
if inputType is not None:
builder.virtualGraph(d0_cast, 0)
builder.virtualGraph(s0, 0)
builder.virtualGraph(e0, 1)
builder.virtualGraph(c0, 1)
builder.virtualGraph(r0, 2)
builder.virtualGraph(out, 2)
builder.virtualGraph(nll, 2)
if doTraining is True:
session = popart.TrainingSession(
fnModel=builder.getModelProto(),
dataFlow=popart.DataFlow(batchesPerStep, anchor_map),
loss=nll,
optimizer=popart.ConstSGD(0.01),
userOptions=opts,
deviceInfo=tu.create_test_device(numIpus=numIPUs, tilesPerIPU=20))
else:
session = popart.InferenceSession(
fnModel=builder.getModelProto(),
dataFlow=popart.DataFlow(batchesPerStep, anchor_map),
userOptions=opts,
deviceInfo=tu.create_test_device(numIpus=numIPUs, tilesPerIPU=20))
if doDevicex is False:
return None
anchors = session.initAnchorArrays()
session.prepareDevice()
if batchesPerStep > 1:
shape_d0.insert(0, batchesPerStep)
shape_l0.insert(0, batchesPerStep)
d0_host_type = inputType.np_type if inputType is not None else np.float32
data = np.random.uniform(low=-10.0, high=10.0,
size=shape_d0).astype(d0_host_type)
classes = np.prod(shape_d0) / (batchSize * batchesPerStep)
label = np.random.randint(low=0, high=classes,
size=shape_l0).astype(np.int32)
inputs = {d0: data, l0: label}
stepio = popart.PyStepIO(inputs, anchors)
session.weightsFromHost()
session.run(stepio)
if doProfiling is True:
from gcprofile import save_popart_report
save_popart_report(session)
if returnRawInput is True:
anchors["input_raw"] = data
return anchors
def get_simple_linear_model(streamInputToOp1AndOp2=False):
builder = popart.Builder()
shape_d = [10]
d0 = builder.addInputTensor(popart.TensorInfo("FLOAT", shape_d))
d1 = builder.addInputTensor(popart.TensorInfo("FLOAT", shape_d))
op0_out = builder.aiOnnx.sin([d0], "s0")
if streamInputToOp1AndOp2 is True:
op1_out = builder.aiOnnx.mul([op0_out, d0])
else:
op1_out = builder.aiOnnx.mul([op0_out, d1])
op2_out = builder.aiOnnx.exp([op1_out], "e0")
op3_out = builder.aiOnnx.exp([op2_out], "e1")
builder.addOutputTensor(op3_out)
art = popart.AnchorReturnType("All")
anchor_map = {op3_out: art}
return builder, op0_out, op1_out, op2_out, op3_out, anchor_map
@tu.requires_ipu_model
def test_pipeline_stage_errors():
dummy_data = np.zeros(2, dtype=np.float32)
bps = 2
vgraph_ids = []
ps_ids = []
def init_builder(builder):
d0 = builder.addInputTensor(dummy_data, 'data0')
d1 = builder.addInputTensor(dummy_data, 'data1')
d2 = builder.addInputTensor(dummy_data, 'data2')
s0 = builder.aiOnnx.sin([d0], "s0")
m0 = builder.aiOnnx.mul([s0, d0])
e0 = builder.aiOnnx.exp([m0])
e1 = builder.aiOnnx.exp([e0])
loss = builder.aiGraphcore.identityloss([e1])
builder.addOutputTensor(loss)
print(f'Setting virtual graphs to {vgraph_ids}')
for tid, vgid in zip((s0, m0, e0, e1, loss), vgraph_ids):
if vgid is not None:
builder.virtualGraph(tid, vgid)
print(f'Setting pipeline stages to {ps_ids}')
for tid, psid in zip((s0, m0, e0, e1, loss), ps_ids):
if psid is not None:
builder.pipelineStage(tid, psid)
return [loss]
session = PopartTestSession()
session.options.virtualGraphMode = popart.VirtualGraphMode.Manual
session.options.enablePipelining = True
session.device = 'ipu_model'
session.numIPUs = 2
session.batchesPerStep = bps
# test a pipeline stage appearing on multiple virtual graphs
vgraph_ids = [0, 0, 0, 1, 1]
ps_ids = [0, 0, 1, 1, 1]
with pytest.raises(popart.popart_exception) as e_info:
session.prepare(init_builder)
emsg = e_info.value.args[0]
assert re.match('Ops .* have the same pipeline stage 1,.*',
emsg) is not None
# test not all ops having a pipeline stage set
vgraph_ids = [0, 0, 1, 1, 1]
ps_ids = [0, 0, None, 1, 1]
with pytest.raises(popart.popart_exception) as e_info:
session.prepare(init_builder)
emsg = e_info.value.args[0]
assert emsg.startswith('Only some ops have had their pipeline stage set.')
@tu.requires_ipu_model
def test_pipeline_stages_backwards_through_ipus():
dummy_data = np.array([0.5, 1.0], dtype=np.float32)
bps = 2
vgraph_ids = []
ps_ids = []
def init_builder(builder):
d0 = builder.addInputTensor(dummy_data, 'data0')
s0 = builder.aiOnnx.sin([d0], "s0")
m0 = builder.aiOnnx.mul([s0, d0])
e0 = builder.aiOnnx.exp([m0])
e1 = builder.aiOnnx.exp([e0], 'output')
loss = builder.aiGraphcore.identityloss([e1])
builder.addOutputTensor(loss)
stage0 = [s0, m0]
stage1 = [e0, e1, loss]
stage0_vgraph = 1
stage1_vgraph = 0
for tid in stage0:
builder.virtualGraph(tid, stage0_vgraph)
builder.pipelineStage(tid, 0)
for tid in stage1:
builder.virtualGraph(tid, stage1_vgraph)
builder.pipelineStage(tid, 1)
return [e1]
def ref():
d0 = dummy_data
s0 = np.sin(d0)
m0 = s0 * d0
e0 = np.exp(m0)
e1 = np.exp(e0)
return e1
session = PopartTestSession()
session.options.virtualGraphMode = popart.VirtualGraphMode.Manual
session.options.enablePipelining = True
session.device = 'ipu_model'
session.numIPUs = 2
session.batchesPerStep = bps
# test a pipeline stage appearing on multiple virtual graphs
session.prepare(init_builder)
pipelineAnchors = session.run()
assert len(pipelineAnchors) == 1
pipelineAnchors = [v for k, v in pipelineAnchors.items()]
pipelineAnchor = pipelineAnchors[0]
print(pipelineAnchor)
print(ref())
assert np.allclose(pipelineAnchor[0], ref())
@tu.requires_ipu_model
def test_multiple_stages_per_virtual_graph_inference():
bps = 4
dummy_data = np.random.rand(2, 2).astype(np.float32)
data = np.random.rand(bps, 2, 2).astype(np.float32)
weights = np.random.rand(2, 2).astype(np.float32)
vgraph_ids = []
ps_ids = []
def init_builder(builder):
d0 = builder.addInputTensor(dummy_data, 'data0')
w0 = builder.addInitializedInputTensor(weights)
mm0 = builder.aiOnnx.matmul([d0, w0], "mm0")
s0 = builder.aiOnnx.sin([mm0])
mm1 = builder.aiOnnx.matmul([s0, w0], "mm1")
loss = builder.aiGraphcore.identityloss([mm1])
builder.addOutputTensor(loss)
builder.pipelineStage(mm0, 0)
builder.pipelineStage(s0, 1)
builder.pipelineStage(mm1, 2)
builder.pipelineStage(loss, 2)
builder.virtualGraph(mm0, 0)
builder.virtualGraph(s0, 1)
builder.virtualGraph(mm1, 0)
builder.virtualGraph(loss, 0)
return [mm1]
def ref():
mm0 = np.matmul(data, weights)
s0 = np.sin(mm0)
mm1 = np.matmul(s0, weights)
return mm1
session = PopartTestSession()
session.options.virtualGraphMode = popart.VirtualGraphMode.Manual
session.options.enablePipelining = True
session.device = 'ipu_model'
session.numIPUs = 2
session.batchesPerStep = bps
# test a pipeline stage appearing on multiple virtual graphs
session.prepare(init_builder)
sessionAnchors = session.run({'data0': data})
assert len(sessionAnchors) == 1
sessionAnchors = [v for k, v in sessionAnchors.items()][0]
print(sessionAnchors)
print()
refAnchors = ref()
print(refAnchors)
assert np.allclose(sessionAnchors, refAnchors)
# run the same model with and without revisiting ipus and compare the resultant weights.
@tu.requires_ipu_model
@pytest.mark.parametrize("inputType", [_INT8, _UINT8, None])
def test_multiple_stages_per_virtual_graph_training(inputType):
accumulation_factor = 5
micro_batches_per_step = 5
bps = micro_batches_per_step // accumulation_factor
data_type = inputType.np_type if inputType is not None else np.float32
dummy_data = np.random.rand(2, 2).astype(data_type)
data = np.random.rand(accumulation_factor, 2, 2).astype(data_type)
weight_data = np.random.rand(2, 2).astype(np.float32)
def run_test(set_pipeline_stages):
weights = {}
def init_builder(builder):
d0 = builder.addInputTensor(dummy_data, 'data0')
w0 = builder.addInitializedInputTensor(weight_data)
weights[w0] = np.empty(shape=weight_data.shape,
dtype=weight_data.dtype)
if inputType is not None:
d0_float = builder.aiOnnx.cast([d0], "FLOAT")
t0 = builder.aiOnnx.matmul([d0_float, w0])
else:
t0 = builder.aiOnnx.matmul([d0, w0])
t1 = builder.aiOnnx.sin([t0])
t2 = builder.aiOnnx.matmul([t1, w0])
loss = builder.aiGraphcore.identityloss([t2])
builder.addOutputTensor(loss)
if set_pipeline_stages:
if inputType is not None:
builder.pipelineStage(d0_float, 0)
builder.pipelineStage(t0, 0)
builder.pipelineStage(t1, 1)
builder.pipelineStage(t2, 2)
builder.pipelineStage(loss, 2)
if inputType is not None:
builder.virtualGraph(d0_float, 0)
builder.virtualGraph(t0, 0)
builder.virtualGraph(t1, 1)
builder.virtualGraph(t2, 0)
builder.virtualGraph(loss, 0)
return [loss]
session = PopartTestSession()
session.mode = 'train'
session.options.enablePipelining = set_pipeline_stages
session.device = 'ipu_model'
if set_pipeline_stages:
session.numIPUs = 2
session.options.virtualGraphMode = popart.VirtualGraphMode.Manual
session.batchesPerStep = bps
session.options.enableGradientAccumulation = True
session.options.accumulationFactor = accumulation_factor
# test a pipeline stage appearing on multiple virtual graphs
session.prepare(init_builder)
sessionAnchors = session.run({'data0': data})
assert len(sessionAnchors) == 1
sessionAnchor = [v for k, v in sessionAnchors.items()][0]
session._session.weightsToHost()
weightsIo = popart.PyWeightsIO(weights)
session._session.readWeights(weightsIo)
assert len(weights) == 1
weights = [v for k, v in weights.items()]
return weights[0], sessionAnchor
w0, r0 = run_test(False)
w1, r1 = run_test(True)
print("Single Ipu with gradient accumulation:")
print(" Result:")
print(f" {r0}")
print(" Weights:")
print(f" {w0}")
print()
print("Pipelining with multiple stages per ipu:")
print(" Result:")
print(f" {r1}")
print(" Weights:")
print(f" {w1}")
assert np.allclose(r0, r1)
assert np.allclose(w0, w1)
# run the same model with and without recomputation and check the updated weights
@tu.requires_ipu_model
@pytest.mark.parametrize("inputType", [_INT8, _UINT8, None])
def test_recomputation(inputType):
accumulationFactor = 3
microBatchesPerStep = 3
bps = microBatchesPerStep // accumulationFactor
data_type, f = (inputType.np_type,
1) if inputType is not None else (np.float32, 0.1)
dummy_data = np.zeros((2, 2)).astype(data_type)
data = np.array([i for i in range(accumulationFactor * 2 * 2)
]).astype(data_type) * f
data = np.reshape(data, (accumulationFactor, 2, 2))
weight_data = np.array([i for i in range(2 * 2)]).astype(np.float32) * 0.25
weight_data = np.reshape(weight_data, (2, 2))
def run_test(enable_recomputation):
weights = {}
def init_builder(builder):
d0 = builder.addInputTensor(dummy_data, 'data0')
w0 = builder.addInitializedInputTensor(weight_data)
weights[w0] = np.empty(shape=weight_data.shape,
dtype=weight_data.dtype)
if inputType is not None:
d0_float = builder.aiOnnx.cast([d0], "FLOAT")
t0 = builder.aiOnnx.mul([d0_float, w0])
else:
t0 = builder.aiOnnx.mul([d0, w0])
t1 = builder.aiOnnx.sigmoid([t0])
t2 = builder.aiGraphcore.scale([t1], 2.0)
loss = builder.aiGraphcore.identityloss([t2])
if inputType is not None:
builder.virtualGraph(d0_float, 0)
for t in (t0, t1, t2):
builder.virtualGraph(t, 0)
builder.virtualGraph(loss, 1)
return [loss]
session = PopartTestSession()
session.device = 'ipu_model'
session.numIPUs = 2
session.mode = 'train'
session.options.virtualGraphMode = popart.VirtualGraphMode.Manual
session.options.enablePipelining = True
if enable_recomputation:
session.options.autoRecomputation = popart.RecomputationType.Standard
session.options.accumulationFactor = accumulationFactor
session.options.enableGradientAccumulation = True
session.prepare(init_builder)
anchors = session.run({'data0': data})
# return the weights
session._session.weightsToHost()
weightsIo = popart.PyWeightsIO(weights)
session._session.readWeights(weightsIo)
assert len(weights) == 1
weights = [v for k, v in weights.items()]
return weights[0]
w0 = run_test(False)
w1 = run_test(True)
print(w0)
print()
print(w1)
print()
diff = w0 - w1
print(diff)
assert np.array_equal(w0, w1)
# Test that pipeline IpuCopyOpx handles internal aliases correctly. Expectation
# that the ConatOp output contains such internal aliases and the pipelined
# program compiles successfully.
def test_internal_alias_ipucopy():
builder = popart.Builder()
with builder.virtualGraph(0), builder.pipelineStage(0):
model_input = builder.addInputTensor(
popart.TensorInfo("FLOAT", [1, 2, 1]))
concat = builder.aiOnnx.concat([model_input, model_input], axis=1)
with builder.virtualGraph(1), builder.pipelineStage(1):
result = builder.aiOnnx.add([concat, concat])
opts = popart.SessionOptions()
opts.enablePipelining = True
opts.virtualGraphMode = popart.VirtualGraphMode.Manual
session = popart.InferenceSession(
fnModel=builder.getModelProto(),
dataFlow=popart.DataFlow(2, {result: popart.AnchorReturnType("All")}),
deviceInfo=tu.create_test_device(numIpus=2),
userOptions=opts)
session.prepareDevice()
feed_dict = {model_input: np.zeros([2, 2, 1], dtype=np.float32)}
stepio = popart.PyStepIO(feed_dict, session.initAnchorArrays())
session.run(stepio)
@tu.requires_ipu_model
def test_bad_auto_staging():
bps = 4
dummy_data = np.random.rand(2, 2).astype(np.float32)
data = np.random.rand(bps, 2, 2).astype(np.float32)
vgraph_ids = []
ps_ids = []
def init_builder(builder):
d0 = builder.addInputTensor(dummy_data, 'data0')
t0 = builder.aiOnnx.sin([d0])
t1 = builder.aiOnnx.sin([t0])
t2 = builder.aiOnnx.sin([t1])
loss = builder.aiGraphcore.identityloss([t2])
builder.addOutputTensor(loss)
builder.virtualGraph(t0, 0)
builder.virtualGraph(t1, 1)
builder.virtualGraph(t2, 0)
builder.virtualGraph(loss, 0)
return [loss]
def ref(d0):
t0 = np.sin(d0)
t1 = np.sin(t0)
t2 = np.sin(t1)
return t2
session = PopartTestSession()
session.options.virtualGraphMode = popart.VirtualGraphMode.Manual
session.options.enablePipelining = True
session.device = 'ipu_model'
session.numIPUs = 2
session.batchesPerStep = bps
# test a pipeline stage appearing on multiple virtual graphs
with pytest.raises(popart.popart_exception) as e_info:
session.prepare(init_builder)
assert e_info.value.args[0].startswith(
'Tensor Sin:0/1 is consumed in an earlier pipeline stage than it is produced'
)
# The below lines should be uncommented when auto pipeline stage is improved.
# assert len(sessionAnchors) == 1
# result = [v for k, v in sessionAnchors.items()][0]
# for i in range(bps):
# refResult = ref(data[i])
# print(f'Batch {i}: {result[i]}')
# print(f'Ref result: {refResult}')
# print()
# assert np.allclose(result[i], refResult)
| [
"numpy.prod",
"popart.AnchorReturnType",
"numpy.random.rand",
"numpy.array",
"numpy.sin",
"popart.PyStepIO",
"popart.PyWeightsIO",
"numpy.reshape",
"popart.reservedRestoredPrefix",
"pathlib.Path",
"gcprofile.save_popart_report",
"numpy.exp",
"numpy.matmul",
"numpy.random.seed",
"numpy.em... | [((7899, 7951), 'collections.namedtuple', 'namedtuple', (['"""_DataType"""', "['builder_type', 'np_type']"], {}), "('_DataType', ['builder_type', 'np_type'])\n", (7909, 7951), False, 'from collections import namedtuple\n'), ((8051, 8110), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inputType"""', '[_INT8, _UINT8, None]'], {}), "('inputType', [_INT8, _UINT8, None])\n", (8074, 8110), False, 'import pytest\n'), ((10251, 10310), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inputType"""', '[_INT8, _UINT8, None]'], {}), "('inputType', [_INT8, _UINT8, None])\n", (10274, 10310), False, 'import pytest\n'), ((11923, 11982), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inputType"""', '[_INT8, _UINT8, None]'], {}), "('inputType', [_INT8, _UINT8, None])\n", (11946, 11982), False, 'import pytest\n'), ((24360, 24419), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inputType"""', '[_INT8, _UINT8, None]'], {}), "('inputType', [_INT8, _UINT8, None])\n", (24383, 24419), False, 'import pytest\n'), ((27778, 27837), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inputType"""', '[_INT8, _UINT8, None]'], {}), "('inputType', [_INT8, _UINT8, None])\n", (27801, 27837), False, 'import pytest\n'), ((694, 717), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (715, 717), False, 'import popart\n'), ((1447, 1463), 'popart.Builder', 'popart.Builder', ([], {}), '()\n', (1461, 1463), False, 'import popart\n'), ((1836, 1859), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (1857, 1859), False, 'import popart\n'), ((2998, 3021), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (3019, 3021), False, 'import popart\n'), ((4236, 4259), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (4257, 4259), False, 'import popart\n'), ((5133, 5149), 'popart.Builder', 'popart.Builder', ([], {}), '()\n', (5147, 5149), False, 'import popart\n'), ((5608, 5631), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (5629, 5631), False, 'import popart\n'), ((11799, 11873), 'numpy.allclose', 'np.allclose', (["pipelined_anchors['input_raw']", 'pipelined_anchors[input_name]'], {}), "(pipelined_anchors['input_raw'], pipelined_anchors[input_name])\n", (11810, 11873), True, 'import numpy as np\n'), ((14389, 14411), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(1)'}), '(seed=1)\n', (14403, 14411), True, 'import numpy as np\n'), ((14427, 14443), 'popart.Builder', 'popart.Builder', ([], {}), '()\n', (14441, 14443), False, 'import popart\n'), ((15549, 15579), 'popart.AnchorReturnType', 'popart.AnchorReturnType', (['"""All"""'], {}), "('All')\n", (15572, 15579), False, 'import popart\n'), ((15978, 16001), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (15999, 16001), False, 'import popart\n'), ((17819, 17851), 'popart.PyStepIO', 'popart.PyStepIO', (['inputs', 'anchors'], {}), '(inputs, anchors)\n', (17834, 17851), False, 'import popart\n'), ((18185, 18201), 'popart.Builder', 'popart.Builder', ([], {}), '()\n', (18199, 18201), False, 'import popart\n'), ((18707, 18737), 'popart.AnchorReturnType', 'popart.AnchorReturnType', (['"""All"""'], {}), "('All')\n", (18730, 18737), False, 'import popart\n'), ((18914, 18943), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float32'}), '(2, dtype=np.float32)\n', (18922, 18943), True, 'import numpy as np\n'), ((19894, 19913), 'test_session.PopartTestSession', 'PopartTestSession', ([], {}), '()\n', (19911, 19913), False, 'from test_session import PopartTestSession\n'), ((20897, 20935), 'numpy.array', 'np.array', (['[0.5, 1.0]'], {'dtype': 'np.float32'}), '([0.5, 1.0], dtype=np.float32)\n', (20905, 20935), True, 'import numpy as np\n'), ((21884, 21903), 'test_session.PopartTestSession', 'PopartTestSession', ([], {}), '()\n', (21901, 21903), False, 'from test_session import PopartTestSession\n'), ((23635, 23654), 'test_session.PopartTestSession', 'PopartTestSession', ([], {}), '()\n', (23652, 23654), False, 'from test_session import PopartTestSession\n'), ((24205, 24244), 'numpy.allclose', 'np.allclose', (['sessionAnchors', 'refAnchors'], {}), '(sessionAnchors, refAnchors)\n', (24216, 24244), True, 'import numpy as np\n'), ((27619, 27638), 'numpy.allclose', 'np.allclose', (['r0', 'r1'], {}), '(r0, r1)\n', (27630, 27638), True, 'import numpy as np\n'), ((27650, 27669), 'numpy.allclose', 'np.allclose', (['w0', 'w1'], {}), '(w0, w1)\n', (27661, 27669), True, 'import numpy as np\n'), ((28266, 28310), 'numpy.reshape', 'np.reshape', (['data', '(accumulationFactor, 2, 2)'], {}), '(data, (accumulationFactor, 2, 2))\n', (28276, 28310), True, 'import numpy as np\n'), ((28410, 28441), 'numpy.reshape', 'np.reshape', (['weight_data', '(2, 2)'], {}), '(weight_data, (2, 2))\n', (28420, 28441), True, 'import numpy as np\n'), ((30414, 30436), 'numpy.array_equal', 'np.array_equal', (['w0', 'w1'], {}), '(w0, w1)\n', (30428, 30436), True, 'import numpy as np\n'), ((30676, 30692), 'popart.Builder', 'popart.Builder', ([], {}), '()\n', (30690, 30692), False, 'import popart\n'), ((31053, 31076), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (31074, 31076), False, 'import popart\n'), ((32405, 32424), 'test_session.PopartTestSession', 'PopartTestSession', ([], {}), '()\n', (32422, 32424), False, 'from test_session import PopartTestSession\n'), ((817, 855), 'pytest.raises', 'pytest.raises', (['popart.popart_exception'], {}), '(popart.popart_exception)\n', (830, 855), False, 'import pytest\n'), ((1533, 1568), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', 'shape_d'], {}), "('FLOAT', shape_d)\n", (1550, 1568), False, 'import popart\n'), ((1602, 1637), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', 'shape_d'], {}), "('FLOAT', shape_d)\n", (1619, 1637), False, 'import popart\n'), ((2205, 2243), 'pytest.raises', 'pytest.raises', (['popart.popart_exception'], {}), '(popart.popart_exception)\n', (2218, 2243), False, 'import pytest\n'), ((5218, 5253), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', 'shape_d'], {}), "('FLOAT', shape_d)\n", (5235, 5253), False, 'import popart\n'), ((5287, 5322), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', 'shape_d'], {}), "('FLOAT', shape_d)\n", (5304, 5322), False, 'import popart\n'), ((5356, 5391), 'popart.TensorInfo', 'popart.TensorInfo', (['"""INT32"""', 'shape_l'], {}), "('INT32', shape_l)\n", (5373, 5391), False, 'import popart\n'), ((6667, 6705), 'pytest.raises', 'pytest.raises', (['popart.popart_exception'], {}), '(popart.popart_exception)\n', (6680, 6705), False, 'import pytest\n'), ((7505, 7543), 'pytest.raises', 'pytest.raises', (['popart.popart_exception'], {}), '(popart.popart_exception)\n', (7518, 7543), False, 'import pytest\n'), ((9696, 9715), 'numpy.allclose', 'np.allclose', (['t1', 't2'], {}), '(t1, t2)\n', (9707, 9715), True, 'import numpy as np\n'), ((10199, 10224), 'numpy.allclose', 'np.allclose', (['t1[0]', 't2[0]'], {}), '(t1[0], t2[0])\n', (10210, 10224), True, 'import numpy as np\n'), ((13450, 13469), 'numpy.allclose', 'np.allclose', (['t1', 't2'], {}), '(t1, t2)\n', (13461, 13469), True, 'import numpy as np\n'), ((13792, 13811), 'numpy.allclose', 'np.allclose', (['t1', 't2'], {}), '(t1, t2)\n', (13803, 13811), True, 'import numpy as np\n'), ((14886, 14922), 'popart.TensorInfo', 'popart.TensorInfo', (['"""INT32"""', 'shape_l0'], {}), "('INT32', shape_l0)\n", (14903, 14922), False, 'import popart\n'), ((17608, 17625), 'numpy.prod', 'np.prod', (['shape_d0'], {}), '(shape_d0)\n', (17615, 17625), True, 'import numpy as np\n'), ((17994, 18021), 'gcprofile.save_popart_report', 'save_popart_report', (['session'], {}), '(session)\n', (18012, 18021), False, 'from gcprofile import save_popart_report\n'), ((18254, 18289), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', 'shape_d'], {}), "('FLOAT', shape_d)\n", (18271, 18289), False, 'import popart\n'), ((18323, 18358), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', 'shape_d'], {}), "('FLOAT', shape_d)\n", (18340, 18358), False, 'import popart\n'), ((20255, 20293), 'pytest.raises', 'pytest.raises', (['popart.popart_exception'], {}), '(popart.popart_exception)\n', (20268, 20293), False, 'import pytest\n'), ((20387, 20445), 're.match', 're.match', (['"""Ops .* have the same pipeline stage 1,.*"""', 'emsg'], {}), "('Ops .* have the same pipeline stage 1,.*', emsg)\n", (20395, 20445), False, 'import re\n'), ((20604, 20642), 'pytest.raises', 'pytest.raises', (['popart.popart_exception'], {}), '(popart.popart_exception)\n', (20617, 20642), False, 'import pytest\n'), ((21771, 21781), 'numpy.sin', 'np.sin', (['d0'], {}), '(d0)\n', (21777, 21781), True, 'import numpy as np\n'), ((21816, 21826), 'numpy.exp', 'np.exp', (['m0'], {}), '(m0)\n', (21822, 21826), True, 'import numpy as np\n'), ((21840, 21850), 'numpy.exp', 'np.exp', (['e0'], {}), '(e0)\n', (21846, 21850), True, 'import numpy as np\n'), ((23514, 23538), 'numpy.matmul', 'np.matmul', (['data', 'weights'], {}), '(data, weights)\n', (23523, 23538), True, 'import numpy as np\n'), ((23552, 23563), 'numpy.sin', 'np.sin', (['mm0'], {}), '(mm0)\n', (23558, 23563), True, 'import numpy as np\n'), ((23578, 23600), 'numpy.matmul', 'np.matmul', (['s0', 'weights'], {}), '(s0, weights)\n', (23587, 23600), True, 'import numpy as np\n'), ((26260, 26279), 'test_session.PopartTestSession', 'PopartTestSession', ([], {}), '()\n', (26277, 26279), False, 'from test_session import PopartTestSession\n'), ((27044, 27071), 'popart.PyWeightsIO', 'popart.PyWeightsIO', (['weights'], {}), '(weights)\n', (27062, 27071), False, 'import popart\n'), ((29425, 29444), 'test_session.PopartTestSession', 'PopartTestSession', ([], {}), '()\n', (29442, 29444), False, 'from test_session import PopartTestSession\n'), ((30078, 30105), 'popart.PyWeightsIO', 'popart.PyWeightsIO', (['weights'], {}), '(weights)\n', (30096, 30105), False, 'import popart\n'), ((31468, 31505), 'numpy.zeros', 'np.zeros', (['[2, 2, 1]'], {'dtype': 'np.float32'}), '([2, 2, 1], dtype=np.float32)\n', (31476, 31505), True, 'import numpy as np\n'), ((32313, 32323), 'numpy.sin', 'np.sin', (['d0'], {}), '(d0)\n', (32319, 32323), True, 'import numpy as np\n'), ((32337, 32347), 'numpy.sin', 'np.sin', (['t0'], {}), '(t0)\n', (32343, 32347), True, 'import numpy as np\n'), ((32361, 32371), 'numpy.sin', 'np.sin', (['t1'], {}), '(t1)\n', (32367, 32371), True, 'import numpy as np\n'), ((32704, 32742), 'pytest.raises', 'pytest.raises', (['popart.popart_exception'], {}), '(popart.popart_exception)\n', (32717, 32742), False, 'import pytest\n'), ((3445, 3476), 'popart.DataFlow', 'popart.DataFlow', (['(10)', 'anchor_map'], {}), '(10, anchor_map)\n', (3460, 3476), False, 'import popart\n'), ((3583, 3631), 'test_util.create_test_device', 'tu.create_test_device', ([], {'numIpus': '(2)', 'tilesPerIPU': '(20)'}), '(numIpus=2, tilesPerIPU=20)\n', (3604, 3631), True, 'import test_util as tu\n'), ((4620, 4651), 'popart.DataFlow', 'popart.DataFlow', (['(10)', 'anchor_map'], {}), '(10, anchor_map)\n', (4635, 4651), False, 'import popart\n'), ((4758, 4806), 'test_util.create_test_device', 'tu.create_test_device', ([], {'numIpus': '(2)', 'tilesPerIPU': '(20)'}), '(numIpus=2, tilesPerIPU=20)\n', (4779, 4806), True, 'import test_util as tu\n'), ((5988, 6018), 'popart.DataFlow', 'popart.DataFlow', (['(10)', '[op2_out]'], {}), '(10, [op2_out])\n', (6003, 6018), False, 'import popart\n'), ((6125, 6173), 'test_util.create_test_device', 'tu.create_test_device', ([], {'numIpus': '(3)', 'tilesPerIPU': '(20)'}), '(numIpus=3, tilesPerIPU=20)\n', (6146, 6173), True, 'import test_util as tu\n'), ((14604, 14655), 'popart.TensorInfo', 'popart.TensorInfo', (['inputType.builder_type', 'shape_d0'], {}), '(inputType.builder_type, shape_d0)\n', (14621, 14655), False, 'import popart\n'), ((14703, 14739), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', 'shape_d0'], {}), "('FLOAT', shape_d0)\n", (14720, 14739), False, 'import popart\n'), ((14755, 14782), 'numpy.ones', 'np.ones', ([], {'shape': '[2, 2, 3, 3]'}), '(shape=[2, 2, 3, 3])\n', (14762, 14782), True, 'import numpy as np\n'), ((17489, 17543), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10.0)', 'high': '(10.0)', 'size': 'shape_d0'}), '(low=-10.0, high=10.0, size=shape_d0)\n', (17506, 17543), True, 'import numpy as np\n'), ((17669, 17722), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'classes', 'size': 'shape_l0'}), '(low=0, high=classes, size=shape_l0)\n', (17686, 17722), True, 'import numpy as np\n'), ((22588, 22608), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (22602, 22608), True, 'import numpy as np\n'), ((22639, 22664), 'numpy.random.rand', 'np.random.rand', (['bps', '(2)', '(2)'], {}), '(bps, 2, 2)\n', (22653, 22664), True, 'import numpy as np\n'), ((22698, 22718), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (22712, 22718), True, 'import numpy as np\n'), ((24691, 24711), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (24705, 24711), True, 'import numpy as np\n'), ((24741, 24782), 'numpy.random.rand', 'np.random.rand', (['accumulation_factor', '(2)', '(2)'], {}), '(accumulation_factor, 2, 2)\n', (24755, 24782), True, 'import numpy as np\n'), ((24819, 24839), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (24833, 24839), True, 'import numpy as np\n'), ((25107, 25165), 'numpy.empty', 'np.empty', ([], {'shape': 'weight_data.shape', 'dtype': 'weight_data.dtype'}), '(shape=weight_data.shape, dtype=weight_data.dtype)\n', (25115, 25165), True, 'import numpy as np\n'), ((28108, 28124), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (28116, 28124), True, 'import numpy as np\n'), ((28691, 28749), 'numpy.empty', 'np.empty', ([], {'shape': 'weight_data.shape', 'dtype': 'weight_data.dtype'}), '(shape=weight_data.shape, dtype=weight_data.dtype)\n', (28699, 28749), True, 'import numpy as np\n'), ((30812, 30849), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT"""', '[1, 2, 1]'], {}), "('FLOAT', [1, 2, 1])\n", (30829, 30849), False, 'import popart\n'), ((31348, 31380), 'test_util.create_test_device', 'tu.create_test_device', ([], {'numIpus': '(2)'}), '(numIpus=2)\n', (31369, 31380), True, 'import test_util as tu\n'), ((31682, 31702), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (31696, 31702), True, 'import numpy as np\n'), ((31733, 31758), 'numpy.random.rand', 'np.random.rand', (['bps', '(2)', '(2)'], {}), '(bps, 2, 2)\n', (31747, 31758), True, 'import numpy as np\n'), ((993, 1024), 'popart.DataFlow', 'popart.DataFlow', (['(10)', 'anchor_map'], {}), '(10, anchor_map)\n', (1008, 1024), False, 'import popart\n'), ((1186, 1209), 'test_util.create_test_device', 'tu.create_test_device', ([], {}), '()\n', (1207, 1209), True, 'import test_util as tu\n'), ((2381, 2419), 'popart.DataFlow', 'popart.DataFlow', (['(10)', "[op2_out, 'loss']"], {}), "(10, [op2_out, 'loss'])\n", (2396, 2419), False, 'import popart\n'), ((2581, 2604), 'test_util.create_test_device', 'tu.create_test_device', ([], {}), '()\n', (2602, 2604), True, 'import test_util as tu\n'), ((10032, 10044), 'numpy.shape', 'np.shape', (['t1'], {}), '(t1)\n', (10040, 10044), True, 'import numpy as np\n'), ((10102, 10115), 'numpy.sum', 'np.sum', (['t1[i]'], {}), '(t1[i])\n', (10108, 10115), True, 'import numpy as np\n'), ((10169, 10182), 'numpy.sum', 'np.sum', (['t2[i]'], {}), '(t2[i])\n', (10175, 10182), True, 'import numpy as np\n'), ((11280, 11291), 'numpy.shape', 'np.shape', (['t'], {}), '(t)\n', (11288, 11291), True, 'import numpy as np\n'), ((11334, 11346), 'numpy.sum', 'np.sum', (['t[i]'], {}), '(t[i])\n', (11340, 11346), True, 'import numpy as np\n'), ((11572, 11603), 'popart.reservedRestoredPrefix', 'popart.reservedRestoredPrefix', ([], {}), '()\n', (11601, 11603), False, 'import popart\n'), ((11702, 11733), 'popart.reservedRestoredPrefix', 'popart.reservedRestoredPrefix', ([], {}), '()\n', (11731, 11733), False, 'import popart\n'), ((13289, 13301), 'numpy.shape', 'np.shape', (['t1'], {}), '(t1)\n', (13297, 13301), True, 'import numpy as np\n'), ((13356, 13369), 'numpy.sum', 'np.sum', (['t1[i]'], {}), '(t1[i])\n', (13362, 13369), True, 'import numpy as np\n'), ((13420, 13433), 'numpy.sum', 'np.sum', (['t2[i]'], {}), '(t2[i])\n', (13426, 13433), True, 'import numpy as np\n'), ((13625, 13637), 'numpy.shape', 'np.shape', (['t1'], {}), '(t1)\n', (13633, 13637), True, 'import numpy as np\n'), ((13695, 13708), 'numpy.sum', 'np.sum', (['t1[i]'], {}), '(t1[i])\n', (13701, 13708), True, 'import numpy as np\n'), ((13762, 13775), 'numpy.sum', 'np.sum', (['t2[i]'], {}), '(t2[i])\n', (13768, 13775), True, 'import numpy as np\n'), ((15673, 15704), 'popart.reservedGradientPrefix', 'popart.reservedGradientPrefix', ([], {}), '()\n', (15702, 15704), False, 'import popart\n'), ((16674, 16717), 'popart.DataFlow', 'popart.DataFlow', (['batchesPerStep', 'anchor_map'], {}), '(batchesPerStep, anchor_map)\n', (16689, 16717), False, 'import popart\n'), ((16763, 16784), 'popart.ConstSGD', 'popart.ConstSGD', (['(0.01)'], {}), '(0.01)\n', (16778, 16784), False, 'import popart\n'), ((16839, 16893), 'test_util.create_test_device', 'tu.create_test_device', ([], {'numIpus': 'numIPUs', 'tilesPerIPU': '(20)'}), '(numIpus=numIPUs, tilesPerIPU=20)\n', (16860, 16893), True, 'import test_util as tu\n'), ((17014, 17057), 'popart.DataFlow', 'popart.DataFlow', (['batchesPerStep', 'anchor_map'], {}), '(batchesPerStep, anchor_map)\n', (17029, 17057), False, 'import popart\n'), ((17112, 17166), 'test_util.create_test_device', 'tu.create_test_device', ([], {'numIpus': 'numIPUs', 'tilesPerIPU': '(20)'}), '(numIpus=numIPUs, tilesPerIPU=20)\n', (17133, 17166), True, 'import test_util as tu\n'), ((15812, 15843), 'popart.reservedRestoredPrefix', 'popart.reservedRestoredPrefix', ([], {}), '()\n', (15841, 15843), False, 'import popart\n'), ((15917, 15948), 'popart.reservedRestoredPrefix', 'popart.reservedRestoredPrefix', ([], {}), '()\n', (15946, 15948), False, 'import popart\n'), ((31295, 31325), 'popart.AnchorReturnType', 'popart.AnchorReturnType', (['"""All"""'], {}), "('All')\n", (31318, 31325), False, 'import popart\n'), ((273, 287), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (277, 287), False, 'from pathlib import Path\n')] |
import math
import numpy as np
from math import e
def P0(x):
return 7816.273180 * (1-math.pow(e, -0.0011752387 * x))
def P1(x):
return 1.617067 * (math.pow(e,1.153090 * x))
def P2(x):
return 6.550502 + 18.313747 * np.log(x)
def P3(x):
return 104.912790 * math.pow(e, -3.483633 / x )
valores_x = []
valores_f = []
for i in range (0, 5):
valor = float(input())
valores_x.append(valor)
for i in range (0, 5):
valor = float(input())
valores_f.append(valor)
R2_p0 = 0.; R2_p1 = 0.; R2_p2 = 0.;R2_p3 = 0.
f_med = 0.
aux1_p0 = 0.; aux2_p0 = 0.
aux1_p1 = 0.; aux2_p1 = 0.
aux1_p2 = 0.; aux2_p2 = 0.
aux1_p3 = 0.; aux2_p3 = 0.
# calculo da media
for i in range(len(valores_x)):
f_med += valores_f[i]
f_med = f_med/5
for i in range(len(valores_x)):
aux1_p0 += (P0(valores_x[i]) - valores_f[i])**2
aux1_p1 += (P1(valores_x[i]) - valores_f[i])**2
aux1_p2 += (P2(valores_x[i]) - valores_f[i])**2
aux1_p3 += (P3(valores_x[i]) - valores_f[i])**2
aux2_p0 += (P0(valores_x[i]) - f_med)**2
aux2_p1 += (P1(valores_x[i]) - f_med)**2
aux2_p2 += (P2(valores_x[i]) - f_med)**2
aux2_p3 += (P3(valores_x[i]) - f_med)**2
# calculo do coeficiente R²
R2_p0 = 1 - aux1_p0/(aux1_p0 + aux2_p0)
R2_p1 = 1 - aux1_p1/(aux1_p1 + aux2_p1)
R2_p2 = 1 - aux1_p2/(aux1_p2 + aux2_p2)
R2_p3 = 1 - aux1_p3/(aux1_p3 + aux2_p3)
valores_r2 = []
valores_r2.append(R2_p0)
valores_r2.append(R2_p1)
valores_r2.append(R2_p2)
valores_r2.append(R2_p3)
valores_r2.sort(reverse = True)
for i in range (0, 4):
if valores_r2[i] == R2_p0:
print(f'P0 R^2 = {valores_r2[i]:.6f}')
elif valores_r2[i] == R2_p1:
print(f'P1 R^2 = {round(valores_r2[i],6):.6f}')
elif valores_r2[i] == R2_p2:
print(f'P2 R^2 = {round(valores_r2[i],6):.6f}')
elif valores_r2[i] == R2_p3:
print(f'P3 R^2 = {round(valores_r2[i],6):.6f}')
| [
"math.pow",
"numpy.log"
] | [((157, 181), 'math.pow', 'math.pow', (['e', '(1.15309 * x)'], {}), '(e, 1.15309 * x)\n', (165, 181), False, 'import math\n'), ((275, 301), 'math.pow', 'math.pow', (['e', '(-3.483633 / x)'], {}), '(e, -3.483633 / x)\n', (283, 301), False, 'import math\n'), ((90, 120), 'math.pow', 'math.pow', (['e', '(-0.0011752387 * x)'], {}), '(e, -0.0011752387 * x)\n', (98, 120), False, 'import math\n'), ((229, 238), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (235, 238), True, 'import numpy as np\n')] |
import os
import os.path as osp
import cv2
import json
import math
import pickle
import numpy as np
import xml.etree.ElementTree as ET
import torch
import torch.utils.data as data
import pycocotools.coco as coco
import cv2
import numpy as np
import mmcv
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
from utils.image import get_border, get_affine_transform, affine_transform, color_aug
from utils.image import draw_umich_gaussian, gaussian_radius
import pdb
@DATASETS.register_module()
class DotaDataset(XMLDataset):
CLASSES = ('plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship',
'tennis-court', 'basketball-court',
'storage-tank', 'soccer-ball-field',
'roundabout', 'harbor',
'swimming-pool', 'helicopter','container-crane')
def __init__(self, **kwargs):
super(DotaDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotation from XML style ann_file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'JPEGImages/{img_id}.png'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = 0
height = 0
if size is not None:
width = int(size.find('width').text)
height = int(size.find('height').text)
else:
img_path = osp.join(self.img_prefix, 'JPEGImages',
'{}.png'.format(img_id))
img = Image.open(img_path)
width, height = img.size
data_infos.append(
dict(id=img_id, filename=filename, width=width, height=height))
return data_infos
def get_ann_info(self, idx):
"""Get annotation from XML file by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('label').text
if name not in self.CLASSES:
continue
label = self.cat2label[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
# Coordinates may be float type
bbox = [
int(float(bnd_box.find('x0').text)),
int(float(bnd_box.find('y0').text)),
int(float(bnd_box.find('x1').text)),
int(float(bnd_box.find('y1').text)),
int(float(bnd_box.find('x2').text)),
int(float(bnd_box.find('y2').text)),
int(float(bnd_box.find('x3').text)),
int(float(bnd_box.find('y3').text)),
]
# drop ignore and difficult
# ignore = False
# if self.min_size:
# assert not self.test_mode
# w = bbox[2] - bbox[0]
# h = bbox[3] - bbox[1]
# if w < self.min_size or h < self.min_size:
# ignore = True
# if difficult or ignore:
# bboxes_ignore.append(bbox)
# labels_ignore.append(label)
# else:
bboxes.append(bbox)
labels.append(label)
if not bboxes:
bboxes = np.zeros((0, 5))
labels = np.zeros((0, ))
else:
bboxes = np.array(bboxes, ndmin=2) - 1
labels = np.array(labels)
if not bboxes_ignore:
bboxes_ignore = np.zeros((0, 5))
labels_ignore = np.zeros((0, ))
else:
bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1
labels_ignore = np.array(labels_ignore)
n_bboxes = []
for i in range(bboxes.shape[0]):
bbox = bboxes[i, :]
cx = (bbox[0] + bbox[2] + bbox[4] + bbox[6]) / 4
cy = (bbox[1] + bbox[3] + bbox[5] + bbox[7]) / 4
w = math.sqrt(math.pow((bbox[0] - bbox[2]), 2) + math.pow((bbox[1] - bbox[3]), 2))
h = math.sqrt(math.pow((bbox[2] - bbox[4]), 2) + math.pow((bbox[3] - bbox[5]), 2))
if w < h:
w, h = h, w
theta = math.atan((bbox[5] - bbox[3]) / (bbox[4] - bbox[2] + 1e-3))
else:
theta = math.atan((bbox[3] - bbox[1]) / (bbox[2] - bbox[0] + 1e-3))
n_bboxes.append([cx, cy, w, h, theta])
ann = dict(
bboxes=np.array(n_bboxes).astype(np.float32),
labels=np.array(labels).astype(np.int64),
bboxes_ignore=bboxes_ignore.astype(np.float32),
labels_ignore=labels_ignore.astype(np.int64))
return ann
def get_cat_ids(self, idx):
"""Get category ids in XML file by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
cat_ids = []
img_id = self.data_infos[idx]['id']
xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
for obj in root.findall('object'):
name = obj.find('name').text
if name not in self.CLASSES:
continue
label = self.cat2label[name]
cat_ids.append(label)
return cat_ids
def _filter_imgs(self, min_size=32):
"""Filter images too small or without annotation."""
valid_inds = []
for i, img_info in enumerate(self.data_infos):
if min(img_info['width'], img_info['height']) < min_size:
continue
if self.filter_empty_gt:
img_id = img_info['id']
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
for obj in root.findall('object'):
name = obj.find('label').text
if name in self.CLASSES:
valid_inds.append(i)
break
else:
valid_inds.append(i)
return valid_inds
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate in VOC protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'mAP', 'recall'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. It must be a float
when evaluating mAP, and can be a list when evaluating recall.
Default: 0.5.
scale_ranges (list[tuple], optional): Scale ranges for evaluating
mAP. If not specified, all bounding boxes would be included in
evaluation. Default: None.
Returns:
dict[str, float]: AP/recall metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = {}
if metric == 'mAP':
assert isinstance(iou_thr, float)
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset='dota',
logger=logger,
nproc=10)
eval_results['mAP'] = mean_ap
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
if isinstance(iou_thr, float):
iou_thr = [iou_thr]
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for i, num in enumerate(proposal_nums):
for j, iou in enumerate(iou_thr):
eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
| [
"xml.etree.ElementTree.parse",
"math.pow",
"os.path.join",
"numpy.array",
"mmcv.list_from_file",
"numpy.zeros",
"mmdet.core.eval_map",
"mmdet.core.eval_recalls",
"math.atan"
] | [((1310, 1339), 'mmcv.list_from_file', 'mmcv.list_from_file', (['ann_file'], {}), '(ann_file)\n', (1329, 1339), False, 'import mmcv\n'), ((2485, 2542), 'os.path.join', 'osp.join', (['self.img_prefix', '"""Annotations"""', 'f"""{img_id}.xml"""'], {}), "(self.img_prefix, 'Annotations', f'{img_id}.xml')\n", (2493, 2542), True, 'import os.path as osp\n'), ((2558, 2576), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (2566, 2576), True, 'import xml.etree.ElementTree as ET\n'), ((5779, 5836), 'os.path.join', 'osp.join', (['self.img_prefix', '"""Annotations"""', 'f"""{img_id}.xml"""'], {}), "(self.img_prefix, 'Annotations', f'{img_id}.xml')\n", (5787, 5836), True, 'import os.path as osp\n'), ((5852, 5870), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (5860, 5870), True, 'import xml.etree.ElementTree as ET\n'), ((1444, 1501), 'os.path.join', 'osp.join', (['self.img_prefix', '"""Annotations"""', 'f"""{img_id}.xml"""'], {}), "(self.img_prefix, 'Annotations', f'{img_id}.xml')\n", (1452, 1501), True, 'import os.path as osp\n'), ((1553, 1571), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (1561, 1571), True, 'import xml.etree.ElementTree as ET\n'), ((4074, 4090), 'numpy.zeros', 'np.zeros', (['(0, 5)'], {}), '((0, 5))\n', (4082, 4090), True, 'import numpy as np\n'), ((4112, 4126), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (4120, 4126), True, 'import numpy as np\n'), ((4214, 4230), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (4222, 4230), True, 'import numpy as np\n'), ((4289, 4305), 'numpy.zeros', 'np.zeros', (['(0, 5)'], {}), '((0, 5))\n', (4297, 4305), True, 'import numpy as np\n'), ((4334, 4348), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (4342, 4348), True, 'import numpy as np\n'), ((4457, 4480), 'numpy.array', 'np.array', (['labels_ignore'], {}), '(labels_ignore)\n', (4465, 4480), True, 'import numpy as np\n'), ((8715, 8827), 'mmdet.core.eval_map', 'eval_map', (['results', 'annotations'], {'scale_ranges': 'None', 'iou_thr': 'iou_thr', 'dataset': '"""dota"""', 'logger': 'logger', 'nproc': '(10)'}), "(results, annotations, scale_ranges=None, iou_thr=iou_thr, dataset=\n 'dota', logger=logger, nproc=10)\n", (8723, 8827), False, 'from mmdet.core import eval_map, eval_recalls\n'), ((4163, 4188), 'numpy.array', 'np.array', (['bboxes'], {'ndmin': '(2)'}), '(bboxes, ndmin=2)\n', (4171, 4188), True, 'import numpy as np\n'), ((4392, 4424), 'numpy.array', 'np.array', (['bboxes_ignore'], {'ndmin': '(2)'}), '(bboxes_ignore, ndmin=2)\n', (4400, 4424), True, 'import numpy as np\n'), ((4974, 5034), 'math.atan', 'math.atan', (['((bbox[5] - bbox[3]) / (bbox[4] - bbox[2] + 0.001))'], {}), '((bbox[5] - bbox[3]) / (bbox[4] - bbox[2] + 0.001))\n', (4983, 5034), False, 'import math\n'), ((5076, 5136), 'math.atan', 'math.atan', (['((bbox[3] - bbox[1]) / (bbox[2] - bbox[0] + 0.001))'], {}), '((bbox[3] - bbox[1]) / (bbox[2] - bbox[0] + 0.001))\n', (5085, 5136), False, 'import math\n'), ((6531, 6588), 'os.path.join', 'osp.join', (['self.img_prefix', '"""Annotations"""', 'f"""{img_id}.xml"""'], {}), "(self.img_prefix, 'Annotations', f'{img_id}.xml')\n", (6539, 6588), True, 'import os.path as osp\n'), ((6648, 6666), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (6656, 6666), True, 'import xml.etree.ElementTree as ET\n'), ((9175, 9246), 'mmdet.core.eval_recalls', 'eval_recalls', (['gt_bboxes', 'results', 'proposal_nums', 'iou_thr'], {'logger': 'logger'}), '(gt_bboxes, results, proposal_nums, iou_thr, logger=logger)\n', (9187, 9246), False, 'from mmdet.core import eval_map, eval_recalls\n'), ((4735, 4765), 'math.pow', 'math.pow', (['(bbox[0] - bbox[2])', '(2)'], {}), '(bbox[0] - bbox[2], 2)\n', (4743, 4765), False, 'import math\n'), ((4770, 4800), 'math.pow', 'math.pow', (['(bbox[1] - bbox[3])', '(2)'], {}), '(bbox[1] - bbox[3], 2)\n', (4778, 4800), False, 'import math\n'), ((4830, 4860), 'math.pow', 'math.pow', (['(bbox[2] - bbox[4])', '(2)'], {}), '(bbox[2] - bbox[4], 2)\n', (4838, 4860), False, 'import math\n'), ((4865, 4895), 'math.pow', 'math.pow', (['(bbox[3] - bbox[5])', '(2)'], {}), '(bbox[3] - bbox[5], 2)\n', (4873, 4895), False, 'import math\n'), ((5227, 5245), 'numpy.array', 'np.array', (['n_bboxes'], {}), '(n_bboxes)\n', (5235, 5245), True, 'import numpy as np\n'), ((5285, 5301), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5293, 5301), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as pl
from scipy.optimize import root
from compute import make_analysis
from tqdm import tqdm
import matplotlib.ticker as mtick
_1_minus_Ru = np.linspace(0,1,21)
Rv_crit = np.zeros((len(_1_minus_Ru),3))
pl.figure()
def get_root(Ru,dir='00_lower'):
fun = lambda x: make_analysis([dir],Ru,x,verbose=False)[0]-1
sol = root(fun,x0=1)
return sol.x
fig, ax = pl.subplots(1,1,figsize=(3.6,3.3))
vec = []
for i, _1mRu in tqdm(enumerate(_1_minus_Ru)):
Rv_crit[i,0] = get_root(1-_1mRu,'00_lower')
Rv_crit[i,1] = get_root(1-_1mRu,'01_upper')
Rv_crit[i,2] = get_root(1-_1mRu,'06_super_low')
for i in range(3):
p = np.polyfit(_1_minus_Ru, 1-Rv_crit[:,i], 1)
print("m =", p[0], "-1/m =", -1/p[0])
ys = np.polyval(p,[0,1])
vec.append(
np.array([
1,
np.diff(ys),
])
)
for i in range(3):
vec[i] = np.array([[0,-1],[1,0]]).dot(vec[i])
vec[i] /= vec[i].sum()
print(['00_lower','01_upper','06_super_low'][i], vec[i])
print(['00_lower','01_upper','06_super_low'][i], vec[i][0]/vec[i][1])
ax.plot(_1_minus_Ru,1-Rv_crit[:,2],color='k',ls=':',label='low')
ax.fill_between(_1_minus_Ru,np.zeros_like(Rv_crit[:,2]), 1-Rv_crit[:,2],color='k',alpha=0.1)
ax.plot(_1_minus_Ru,1-Rv_crit[:,0],color='k',ls='--',label='medium')
ax.fill_between(_1_minus_Ru,np.zeros_like(Rv_crit[:,0]), 1-Rv_crit[:,0],color='k',alpha=0.1)
ax.plot(_1_minus_Ru,1-Rv_crit[:,1],color='k',ls='-',label='high')
ax.fill_between(_1_minus_Ru,np.zeros_like(Rv_crit[:,1]), 1-Rv_crit[:,1],color='k',alpha=0.1)
ax.set_xlim([0,.5])
ax.set_ylim([0,.5])
ax.text(0.03,0.03,'exponential\ngrowth',transform=ax.transAxes,va='bottom',ha='left')
ax.text(0.7,0.6,'epidemic\ncontrol',transform=ax.transAxes,va='bottom',ha='center')
ax.set_xlabel('NPI transmissibility reduction\nunvaccinated')
ax.set_ylabel('NPI transmissibility reduction\nvaccinated')
ax.legend(loc='lower right')
ax.yaxis.set_major_formatter(mtick.PercentFormatter(1))
ax.xaxis.set_major_formatter(mtick.PercentFormatter(1))
fig.tight_layout()
fig.savefig('critical_reduction.pdf')
fig.savefig('critical_reduction.png',dpi=300)
pl.show()
| [
"compute.make_analysis",
"numpy.polyfit",
"matplotlib.ticker.PercentFormatter",
"numpy.diff",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.polyval",
"scipy.optimize.root",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((193, 214), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(21)'], {}), '(0, 1, 21)\n', (204, 214), True, 'import numpy as np\n'), ((257, 268), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (266, 268), True, 'import matplotlib.pyplot as pl\n'), ((423, 460), 'matplotlib.pyplot.subplots', 'pl.subplots', (['(1)', '(1)'], {'figsize': '(3.6, 3.3)'}), '(1, 1, figsize=(3.6, 3.3))\n', (434, 460), True, 'import matplotlib.pyplot as pl\n'), ((2190, 2199), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (2197, 2199), True, 'import matplotlib.pyplot as pl\n'), ((378, 393), 'scipy.optimize.root', 'root', (['fun'], {'x0': '(1)'}), '(fun, x0=1)\n', (382, 393), False, 'from scipy.optimize import root\n'), ((690, 735), 'numpy.polyfit', 'np.polyfit', (['_1_minus_Ru', '(1 - Rv_crit[:, i])', '(1)'], {}), '(_1_minus_Ru, 1 - Rv_crit[:, i], 1)\n', (700, 735), True, 'import numpy as np\n'), ((784, 805), 'numpy.polyval', 'np.polyval', (['p', '[0, 1]'], {}), '(p, [0, 1])\n', (794, 805), True, 'import numpy as np\n'), ((1222, 1250), 'numpy.zeros_like', 'np.zeros_like', (['Rv_crit[:, 2]'], {}), '(Rv_crit[:, 2])\n', (1235, 1250), True, 'import numpy as np\n'), ((1384, 1412), 'numpy.zeros_like', 'np.zeros_like', (['Rv_crit[:, 0]'], {}), '(Rv_crit[:, 0])\n', (1397, 1412), True, 'import numpy as np\n'), ((1543, 1571), 'numpy.zeros_like', 'np.zeros_like', (['Rv_crit[:, 1]'], {}), '(Rv_crit[:, 1])\n', (1556, 1571), True, 'import numpy as np\n'), ((2001, 2026), 'matplotlib.ticker.PercentFormatter', 'mtick.PercentFormatter', (['(1)'], {}), '(1)\n', (2023, 2026), True, 'import matplotlib.ticker as mtick\n'), ((2057, 2082), 'matplotlib.ticker.PercentFormatter', 'mtick.PercentFormatter', (['(1)'], {}), '(1)\n', (2079, 2082), True, 'import matplotlib.ticker as mtick\n'), ((928, 955), 'numpy.array', 'np.array', (['[[0, -1], [1, 0]]'], {}), '([[0, -1], [1, 0]])\n', (936, 955), True, 'import numpy as np\n'), ((323, 365), 'compute.make_analysis', 'make_analysis', (['[dir]', 'Ru', 'x'], {'verbose': '(False)'}), '([dir], Ru, x, verbose=False)\n', (336, 365), False, 'from compute import make_analysis\n'), ((866, 877), 'numpy.diff', 'np.diff', (['ys'], {}), '(ys)\n', (873, 877), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 18 14:19:19 2019
@author: Effie
"""
import os
import cv2
import random
import numpy as np
def RandomRotate(gray):
(h,w)=gray.shape[:2]
center=(w/2,h/2)
angle=90*random.randint(0,4)
scale=1.0
M=cv2.getRotationMatrix2D(center,angle,scale)
gray=cv2.warpAffine(gray,M,(h,w))
return gray
def RandomScale(gray):
scale_percent=random.randint(5,95)
width=int(gray.shape[1]*scale_percent/100)
height=int(gray.shape[0]*scale_percent/100)
dim=(width,height)
gray=cv2.resize(gray,dim,interpolation=cv2.INTER_AREA)
return gray
def RandomCrop(gray):
(h,w)=gray.shape[:2]
hran=random.randint(0,int(h/2))
wran=random.randint(0,int(w/2))
gray=gray[0:int(h/2+hran),0:int(w/2+wran)]
return gray
def RandomNoise(gray):
noise=np.random.normal(0,2,(gray.shape))
gray=gray+noise
return gray
location='/home/jiahui/Desktop/Zhiwu/'
Igs=[];lb=[];lbi=[];
cate=os.listdir(location)
for i in cate: #Obtain the list of folders : seg_train or seg_pred or seg_test
path=location+i;
L=os.listdir(path)
for j in L:
imgpath=path+r'/'+j; #Create the path for pircture j access
gray=cv2.imread(imgpath,1) #read the images
grayRotate=RandomRotate(gray)
grayCrop=RandomCrop(gray)
grayScale=RandomScale(gray)
grayNoise=RandomNoise(gray)
cv2.imwrite(location+i+'/'+j+'d.jpeg',grayNoise)
cv2.imwrite(location+i+'/'+j+'c.jpeg',grayCrop)
cv2.imwrite(location+i+'/'+j+'b.jpeg',grayScale)
cv2.imwrite(location+i+'/'+j+'a.jpeg',grayRotate)
| [
"numpy.random.normal",
"cv2.imwrite",
"os.listdir",
"cv2.warpAffine",
"cv2.getRotationMatrix2D",
"cv2.resize",
"cv2.imread",
"random.randint"
] | [((1013, 1033), 'os.listdir', 'os.listdir', (['location'], {}), '(location)\n', (1023, 1033), False, 'import os\n'), ((277, 322), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', 'angle', 'scale'], {}), '(center, angle, scale)\n', (300, 322), False, 'import cv2\n'), ((331, 362), 'cv2.warpAffine', 'cv2.warpAffine', (['gray', 'M', '(h, w)'], {}), '(gray, M, (h, w))\n', (345, 362), False, 'import cv2\n'), ((422, 443), 'random.randint', 'random.randint', (['(5)', '(95)'], {}), '(5, 95)\n', (436, 443), False, 'import random\n'), ((574, 625), 'cv2.resize', 'cv2.resize', (['gray', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(gray, dim, interpolation=cv2.INTER_AREA)\n', (584, 625), False, 'import cv2\n'), ((868, 902), 'numpy.random.normal', 'np.random.normal', (['(0)', '(2)', 'gray.shape'], {}), '(0, 2, gray.shape)\n', (884, 902), True, 'import numpy as np\n'), ((1144, 1160), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1154, 1160), False, 'import os\n'), ((235, 255), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (249, 255), False, 'import random\n'), ((1267, 1289), 'cv2.imread', 'cv2.imread', (['imgpath', '(1)'], {}), '(imgpath, 1)\n', (1277, 1289), False, 'import cv2\n'), ((1478, 1535), 'cv2.imwrite', 'cv2.imwrite', (["(location + i + '/' + j + 'd.jpeg')", 'grayNoise'], {}), "(location + i + '/' + j + 'd.jpeg', grayNoise)\n", (1489, 1535), False, 'import cv2\n'), ((1539, 1595), 'cv2.imwrite', 'cv2.imwrite', (["(location + i + '/' + j + 'c.jpeg')", 'grayCrop'], {}), "(location + i + '/' + j + 'c.jpeg', grayCrop)\n", (1550, 1595), False, 'import cv2\n'), ((1599, 1656), 'cv2.imwrite', 'cv2.imwrite', (["(location + i + '/' + j + 'b.jpeg')", 'grayScale'], {}), "(location + i + '/' + j + 'b.jpeg', grayScale)\n", (1610, 1656), False, 'import cv2\n'), ((1660, 1718), 'cv2.imwrite', 'cv2.imwrite', (["(location + i + '/' + j + 'a.jpeg')", 'grayRotate'], {}), "(location + i + '/' + j + 'a.jpeg', grayRotate)\n", (1671, 1718), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 9 00:35:02 2021
@author: loaner
"""
import numpy as np
from matplotlib import pyplot as plt
import os
from scipy.interpolate import make_interp_spline
Re_tau1 = [125, 180, 250, 550]
sparese = [0.02, 0.05, 0.1]
dummy_idx1 = 200
path = "raw_results/"
SMALL_SIZE = 10
MEDIUM_SIZE = 12
BIGGER_SIZE = 14
plt.rc('font', size=BIGGER_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=MEDIUM_SIZE) # fontsize of the figure title
err_U_sps = []
err_uv_sps = []
#for sprse in sparese:
for Re_tau in Re_tau1:
dummy_idx1 = 200
#dummy_idx2 = 70
"""
Get DNS data and Spline fitting
"""
str1 = 'DNS_data/Couette_Retau'+np.str(Re_tau)+'.dat'
data = np.loadtxt(str1)
y_h, y_plus, U_plus, uv_plus = data[:,0], data[:,1], data[:,2], data[:,9]
new_Re_tau = y_plus[-1]/2
spl_U = make_interp_spline(y_plus, U_plus)
spl_uv = make_interp_spline(y_plus, uv_plus)
idx = np.where(y_plus <new_Re_tau+0.01 )
plt.semilogx (y_plus[idx], U_plus[idx]*2/np.max(U_plus) , 'k--', label = r"$U_{dns}$")
plt.semilogx (y_plus[idx].reshape((-1,1)), uv_plus[idx] , 'b--', label = r"$uv_{dns}$")
#for Re_tau in Re_tau1:
for sprse in sparese:
#dummy_idx2 += 2
dummy_idx1 += 2
data_sparse = np.loadtxt('raw/Channel_Re_tau ='+np.str(Re_tau)+'_coeff-aux-pts='+np.str(dummy_idx1)+'_alpha_.txt')
yp_sps, U_sps, uv_sps = data_sparse[:,0], data_sparse[:, 1], data_sparse[:,2]
err_U_sps_loc = np.mean(np.absolute(spl_U(yp_sps) - U_sps) / np.absolute(spl_U(yp_sps) + 1e-5) )
err_uv_sps_loc = np.mean(np.absolute(spl_uv(yp_sps) - uv_sps))
err_U_sps.append(err_U_sps_loc*100)
err_uv_sps.append(err_uv_sps_loc*100)
plt.semilogx (yp_sps.reshape((-1,1)), 2*U_sps.reshape(-1)/np.max(U_plus), label = r"$U_{nn}$; data(%):"+np.str(sprse*100))
#plt.semilogx (y_plus, U_plus/np.max(U_plus) , 'k--', label = r"$U_{dns}$")
#plt.semilogx (yp_sps.reshape((-1,1)), U_sps.reshape(-1)/np.max(U_plus), 'r', label = r"$U_{nn}$")
plt.semilogx (yp_sps.reshape((-1,1)), uv_sps.reshape(-1), label = r"$uv_{nn}$; data(%):"+np.str(sprse*100))
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.xlabel(r"$y^+$")
plt.ylabel("values")
plt.title(r"Couette : Non Fickian low, $Re_{\tau}$ = "+np.str(Re_tau))
plt.tight_layout()
plt.savefig('pics/spase_nf_couette_Re_'+np.str(Re_tau)+'.png', dpi=300)
plt.show()
#plt.close(fig)
sparese = np.array(sparese)*100
plt.plot (sparese, err_U_sps[:3], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[0]))
plt.plot (sparese, err_U_sps[3:6], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[1]))
plt.plot (sparese, err_U_sps[6:9], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[2]))
plt.plot (sparese, err_U_sps[9:], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[3]))
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.xlabel(r"% of data")
plt.ylabel(" U : Error (%)")
plt.title(r"Couette : Non Fickian law, error in velocity")
plt.tight_layout()
plt.savefig('pics/cou_nf_u_err.png', dpi=300)
plt.show()
plt.plot (sparese, err_uv_sps[:3], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[0]))
plt.plot (sparese, err_uv_sps[3:6], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[1]))
plt.plot (sparese, err_uv_sps[6:9], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[2]))
plt.plot (sparese, err_uv_sps[9:], label = r"$Re_{\tau}$ :" +np.str(Re_tau1[3]))
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.xlabel(r"% of data")
plt.ylabel(" uv : Error (%)")
plt.title(r"Couette : Non Fickian law, error in Reynolds Stress")
plt.tight_layout()
plt.savefig('pics/cou_nf_uv_err.png', dpi=300)
plt.show() | [
"numpy.str",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xlabel",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.tight_layout",
"scipy.interpolate.make_interp_spline",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"matplotl... | [((383, 415), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'BIGGER_SIZE'}), "('font', size=BIGGER_SIZE)\n", (389, 415), True, 'from matplotlib import pyplot as plt\n'), ((456, 493), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'MEDIUM_SIZE'}), "('axes', titlesize=MEDIUM_SIZE)\n", (462, 493), True, 'from matplotlib import pyplot as plt\n'), ((528, 565), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'MEDIUM_SIZE'}), "('axes', labelsize=MEDIUM_SIZE)\n", (534, 565), True, 'from matplotlib import pyplot as plt\n'), ((603, 640), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'SMALL_SIZE'}), "('xtick', labelsize=SMALL_SIZE)\n", (609, 640), True, 'from matplotlib import pyplot as plt\n'), ((675, 712), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'SMALL_SIZE'}), "('ytick', labelsize=SMALL_SIZE)\n", (681, 712), True, 'from matplotlib import pyplot as plt\n'), ((747, 785), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'MEDIUM_SIZE'}), "('legend', fontsize=MEDIUM_SIZE)\n", (753, 785), True, 'from matplotlib import pyplot as plt\n'), ((808, 847), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'MEDIUM_SIZE'}), "('figure', titlesize=MEDIUM_SIZE)\n", (814, 847), True, 'from matplotlib import pyplot as plt\n'), ((3575, 3648), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.0)\n", (3585, 3648), True, 'from matplotlib import pyplot as plt\n'), ((3649, 3672), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""% of data"""'], {}), "('% of data')\n", (3659, 3672), True, 'from matplotlib import pyplot as plt\n'), ((3675, 3703), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""" U : Error (%)"""'], {}), "(' U : Error (%)')\n", (3685, 3703), True, 'from matplotlib import pyplot as plt\n'), ((3705, 3762), 'matplotlib.pyplot.title', 'plt.title', (['"""Couette : Non Fickian law, error in velocity"""'], {}), "('Couette : Non Fickian law, error in velocity')\n", (3714, 3762), True, 'from matplotlib import pyplot as plt\n'), ((3765, 3783), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3781, 3783), True, 'from matplotlib import pyplot as plt\n'), ((3785, 3830), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pics/cou_nf_u_err.png"""'], {'dpi': '(300)'}), "('pics/cou_nf_u_err.png', dpi=300)\n", (3796, 3830), True, 'from matplotlib import pyplot as plt\n'), ((3832, 3842), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3840, 3842), True, 'from matplotlib import pyplot as plt\n'), ((4195, 4268), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.0)\n", (4205, 4268), True, 'from matplotlib import pyplot as plt\n'), ((4269, 4292), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""% of data"""'], {}), "('% of data')\n", (4279, 4292), True, 'from matplotlib import pyplot as plt\n'), ((4295, 4324), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""" uv : Error (%)"""'], {}), "(' uv : Error (%)')\n", (4305, 4324), True, 'from matplotlib import pyplot as plt\n'), ((4326, 4390), 'matplotlib.pyplot.title', 'plt.title', (['"""Couette : Non Fickian law, error in Reynolds Stress"""'], {}), "('Couette : Non Fickian law, error in Reynolds Stress')\n", (4335, 4390), True, 'from matplotlib import pyplot as plt\n'), ((4393, 4411), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4409, 4411), True, 'from matplotlib import pyplot as plt\n'), ((4413, 4459), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pics/cou_nf_uv_err.png"""'], {'dpi': '(300)'}), "('pics/cou_nf_uv_err.png', dpi=300)\n", (4424, 4459), True, 'from matplotlib import pyplot as plt\n'), ((4461, 4471), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4469, 4471), True, 'from matplotlib import pyplot as plt\n'), ((1173, 1189), 'numpy.loadtxt', 'np.loadtxt', (['str1'], {}), '(str1)\n', (1183, 1189), True, 'import numpy as np\n'), ((1337, 1371), 'scipy.interpolate.make_interp_spline', 'make_interp_spline', (['y_plus', 'U_plus'], {}), '(y_plus, U_plus)\n', (1355, 1371), False, 'from scipy.interpolate import make_interp_spline\n'), ((1386, 1421), 'scipy.interpolate.make_interp_spline', 'make_interp_spline', (['y_plus', 'uv_plus'], {}), '(y_plus, uv_plus)\n', (1404, 1421), False, 'from scipy.interpolate import make_interp_spline\n'), ((1439, 1475), 'numpy.where', 'np.where', (['(y_plus < new_Re_tau + 0.01)'], {}), '(y_plus < new_Re_tau + 0.01)\n', (1447, 1475), True, 'import numpy as np\n'), ((3045, 3063), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3061, 3063), True, 'from matplotlib import pyplot as plt\n'), ((3152, 3162), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3160, 3162), True, 'from matplotlib import pyplot as plt\n'), ((3205, 3222), 'numpy.array', 'np.array', (['sparese'], {}), '(sparese)\n', (3213, 3222), True, 'import numpy as np\n'), ((2821, 2894), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.0)\n", (2831, 2894), True, 'from matplotlib import pyplot as plt\n'), ((2903, 2922), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$y^+$"""'], {}), "('$y^+$')\n", (2913, 2922), True, 'from matplotlib import pyplot as plt\n'), ((2933, 2953), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""values"""'], {}), "('values')\n", (2943, 2953), True, 'from matplotlib import pyplot as plt\n'), ((1139, 1153), 'numpy.str', 'np.str', (['Re_tau'], {}), '(Re_tau)\n', (1145, 1153), True, 'import numpy as np\n'), ((1532, 1546), 'numpy.max', 'np.max', (['U_plus'], {}), '(U_plus)\n', (1538, 1546), True, 'import numpy as np\n'), ((3292, 3310), 'numpy.str', 'np.str', (['Re_tau1[0]'], {}), '(Re_tau1[0])\n', (3298, 3310), True, 'import numpy as np\n'), ((3374, 3392), 'numpy.str', 'np.str', (['Re_tau1[1]'], {}), '(Re_tau1[1])\n', (3380, 3392), True, 'import numpy as np\n'), ((3456, 3474), 'numpy.str', 'np.str', (['Re_tau1[2]'], {}), '(Re_tau1[2])\n', (3462, 3474), True, 'import numpy as np\n'), ((3537, 3555), 'numpy.str', 'np.str', (['Re_tau1[3]'], {}), '(Re_tau1[3])\n', (3543, 3555), True, 'import numpy as np\n'), ((3909, 3927), 'numpy.str', 'np.str', (['Re_tau1[0]'], {}), '(Re_tau1[0])\n', (3915, 3927), True, 'import numpy as np\n'), ((3992, 4010), 'numpy.str', 'np.str', (['Re_tau1[1]'], {}), '(Re_tau1[1])\n', (3998, 4010), True, 'import numpy as np\n'), ((4075, 4093), 'numpy.str', 'np.str', (['Re_tau1[2]'], {}), '(Re_tau1[2])\n', (4081, 4093), True, 'import numpy as np\n'), ((4157, 4175), 'numpy.str', 'np.str', (['Re_tau1[3]'], {}), '(Re_tau1[3])\n', (4163, 4175), True, 'import numpy as np\n'), ((2400, 2414), 'numpy.max', 'np.max', (['U_plus'], {}), '(U_plus)\n', (2406, 2414), True, 'import numpy as np\n'), ((3018, 3032), 'numpy.str', 'np.str', (['Re_tau'], {}), '(Re_tau)\n', (3024, 3032), True, 'import numpy as np\n'), ((3109, 3123), 'numpy.str', 'np.str', (['Re_tau'], {}), '(Re_tau)\n', (3115, 3123), True, 'import numpy as np\n'), ((1910, 1928), 'numpy.str', 'np.str', (['dummy_idx1'], {}), '(dummy_idx1)\n', (1916, 1928), True, 'import numpy as np\n'), ((2446, 2465), 'numpy.str', 'np.str', (['(sprse * 100)'], {}), '(sprse * 100)\n', (2452, 2465), True, 'import numpy as np\n'), ((2768, 2787), 'numpy.str', 'np.str', (['(sprse * 100)'], {}), '(sprse * 100)\n', (2774, 2787), True, 'import numpy as np\n'), ((1877, 1891), 'numpy.str', 'np.str', (['Re_tau'], {}), '(Re_tau)\n', (1883, 1891), True, 'import numpy as np\n')] |
import json
import numpy as np
from nose import with_setup
from pybbn.generator.bbngenerator import generate_singly_bbn, convert_for_exact_inference
from pybbn.graph.dag import Dag, BbnUtil, Bbn
from pybbn.graph.edge import Edge, EdgeType
from pybbn.graph.node import Node
def setup():
"""
Setup.
:return: None.
"""
np.random.seed(37)
def teardown():
"""
Teardown.
:return: None.
"""
pass
@with_setup(setup, teardown)
def test_dag_creation():
"""
Tests DAG creation.
:return: None.
"""
n0 = Node(0)
n1 = Node(1)
n2 = Node(2)
e0 = Edge(n0, n1, EdgeType.DIRECTED)
e1 = Edge(n1, n2, EdgeType.DIRECTED)
e2 = Edge(n2, n0, EdgeType.DIRECTED)
g = Dag()
g.add_node(n0)
g.add_node(n1)
g.add_edge(e0)
g.add_edge(e1)
g.add_edge(e2)
print(g)
assert len(g.get_nodes()) == 3
assert len(g.get_edges()) == 2
assert len(list(g.get_neighbors(0))) == 1
assert len(list(g.get_neighbors(1))) == 2
assert len(list(g.get_neighbors(2))) == 1
assert 1 in g.get_neighbors(0)
assert 0 in g.get_neighbors(1)
assert 2 in g.get_neighbors(1)
assert 1 in g.get_neighbors(2)
assert g.edge_exists(0, 1) == 1
assert g.edge_exists(1, 2) == 1
assert g.edge_exists(0, 2) == 0
assert len(g.get_parents(0)) == 0
assert len(g.get_parents(1)) == 1
assert len(g.get_parents(2)) == 1
assert 0 in g.get_parents(1)
assert 1 in g.get_parents(2)
assert len(g.get_children(0)) == 1
assert len(g.get_children(1)) == 1
assert len(g.get_children(2)) == 0
assert 1 in g.get_children(0)
assert 2 in g.get_children(1)
@with_setup(setup, teardown)
def test_csv_serde():
"""
Tests CSV serde.
:return: None.
"""
try:
lhs = BbnUtil.get_huang_graph()
Bbn.to_csv(lhs, 'huang.csv')
rhs = Bbn.from_csv('huang.csv')
assert len(lhs.get_nodes()) == len(rhs.get_nodes())
assert len(lhs.get_edges()) == len(rhs.get_edges())
lhs_nodes = set([str(node) for node in lhs.get_nodes()])
rhs_nodes = set([str(node) for node in rhs.get_nodes()])
for n in lhs_nodes:
assert n in rhs_nodes
lhs_edges = set([str(edge) for edge in lhs.get_edges()])
rhs_edges = set([str(edge) for edge in rhs.get_edges()])
for e in lhs_edges:
assert e in rhs_edges
except:
assert False
finally:
import os
try:
os.remove('huang.csv')
except:
pass
@with_setup(setup, teardown)
def test_to_dict():
"""
Tests creating serializable dictionary representation.
:return: None.
"""
bbn = BbnUtil.get_huang_graph()
d = Bbn.to_dict(bbn)
j = json.dumps(d, sort_keys=True, indent=2)
e = """{
"edges": [
{
"ch": 1,
"pa": 0
},
{
"ch": 2,
"pa": 0
},
{
"ch": 3,
"pa": 1
},
{
"ch": 4,
"pa": 2
},
{
"ch": 5,
"pa": 3
},
{
"ch": 5,
"pa": 4
},
{
"ch": 6,
"pa": 2
},
{
"ch": 7,
"pa": 4
},
{
"ch": 7,
"pa": 6
}
],
"nodes": {
"0": {
"probs": [
0.5,
0.5
],
"variable": {
"id": 0,
"name": "a",
"values": [
"on",
"off"
]
}
},
"1": {
"probs": [
0.5,
0.5,
0.4,
0.6
],
"variable": {
"id": 1,
"name": "b",
"values": [
"on",
"off"
]
}
},
"2": {
"probs": [
0.7,
0.3,
0.2,
0.8
],
"variable": {
"id": 2,
"name": "c",
"values": [
"on",
"off"
]
}
},
"3": {
"probs": [
0.9,
0.1,
0.5,
0.5
],
"variable": {
"id": 3,
"name": "d",
"values": [
"on",
"off"
]
}
},
"4": {
"probs": [
0.3,
0.7,
0.6,
0.4
],
"variable": {
"id": 4,
"name": "e",
"values": [
"on",
"off"
]
}
},
"5": {
"probs": [
0.01,
0.99,
0.01,
0.99,
0.01,
0.99,
0.99,
0.01
],
"variable": {
"id": 5,
"name": "f",
"values": [
"on",
"off"
]
}
},
"6": {
"probs": [
0.8,
0.2,
0.1,
0.9
],
"variable": {
"id": 6,
"name": "g",
"values": [
"on",
"off"
]
}
},
"7": {
"probs": [
0.05,
0.95,
0.95,
0.05,
0.95,
0.05,
0.95,
0.05
],
"variable": {
"id": 7,
"name": "h",
"values": [
"on",
"off"
]
}
}
}
}"""
assert len(j) == len(e)
assert j == e
@with_setup(setup, teardown)
def test_generated_serde():
"""
Tests serde of generated BBN.
:return: Nonde.
"""
g, p = generate_singly_bbn(100, max_iter=10)
e_bbn = convert_for_exact_inference(g, p)
d = Bbn.to_dict(e_bbn)
s = json.dumps(d, sort_keys=True, indent=2)
d = json.loads(s)
o_bbn = Bbn.from_dict(d)
assert len(e_bbn.get_nodes()) == len(o_bbn.get_nodes())
assert len(e_bbn.get_edges()) == len(o_bbn.get_edges())
@with_setup(setup, teardown)
def test_from_dict():
"""
Tests creating BBN from dictionary (deserialized from JSON).
:return: None.
"""
e_bbn = BbnUtil.get_huang_graph()
o_bbn = Bbn.from_dict(Bbn.to_dict(e_bbn))
assert len(e_bbn.get_nodes()) == len(o_bbn.get_nodes())
assert len(e_bbn.get_edges()) == len(o_bbn.get_edges())
| [
"json.loads",
"pybbn.graph.dag.Bbn.to_csv",
"pybbn.graph.dag.Dag",
"pybbn.graph.dag.Bbn.to_dict",
"pybbn.graph.dag.BbnUtil.get_huang_graph",
"json.dumps",
"pybbn.graph.node.Node",
"pybbn.graph.dag.Bbn.from_dict",
"nose.with_setup",
"pybbn.graph.edge.Edge",
"numpy.random.seed",
"pybbn.generator... | [((438, 465), 'nose.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (448, 465), False, 'from nose import with_setup\n'), ((1680, 1707), 'nose.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (1690, 1707), False, 'from nose import with_setup\n'), ((2569, 2596), 'nose.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (2579, 2596), False, 'from nose import with_setup\n'), ((5177, 5204), 'nose.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (5187, 5204), False, 'from nose import with_setup\n'), ((5648, 5675), 'nose.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (5658, 5675), False, 'from nose import with_setup\n'), ((340, 358), 'numpy.random.seed', 'np.random.seed', (['(37)'], {}), '(37)\n', (354, 358), True, 'import numpy as np\n'), ((559, 566), 'pybbn.graph.node.Node', 'Node', (['(0)'], {}), '(0)\n', (563, 566), False, 'from pybbn.graph.node import Node\n'), ((576, 583), 'pybbn.graph.node.Node', 'Node', (['(1)'], {}), '(1)\n', (580, 583), False, 'from pybbn.graph.node import Node\n'), ((593, 600), 'pybbn.graph.node.Node', 'Node', (['(2)'], {}), '(2)\n', (597, 600), False, 'from pybbn.graph.node import Node\n'), ((610, 641), 'pybbn.graph.edge.Edge', 'Edge', (['n0', 'n1', 'EdgeType.DIRECTED'], {}), '(n0, n1, EdgeType.DIRECTED)\n', (614, 641), False, 'from pybbn.graph.edge import Edge, EdgeType\n'), ((651, 682), 'pybbn.graph.edge.Edge', 'Edge', (['n1', 'n2', 'EdgeType.DIRECTED'], {}), '(n1, n2, EdgeType.DIRECTED)\n', (655, 682), False, 'from pybbn.graph.edge import Edge, EdgeType\n'), ((692, 723), 'pybbn.graph.edge.Edge', 'Edge', (['n2', 'n0', 'EdgeType.DIRECTED'], {}), '(n2, n0, EdgeType.DIRECTED)\n', (696, 723), False, 'from pybbn.graph.edge import Edge, EdgeType\n'), ((733, 738), 'pybbn.graph.dag.Dag', 'Dag', ([], {}), '()\n', (736, 738), False, 'from pybbn.graph.dag import Dag, BbnUtil, Bbn\n'), ((2721, 2746), 'pybbn.graph.dag.BbnUtil.get_huang_graph', 'BbnUtil.get_huang_graph', ([], {}), '()\n', (2744, 2746), False, 'from pybbn.graph.dag import Dag, BbnUtil, Bbn\n'), ((2755, 2771), 'pybbn.graph.dag.Bbn.to_dict', 'Bbn.to_dict', (['bbn'], {}), '(bbn)\n', (2766, 2771), False, 'from pybbn.graph.dag import Dag, BbnUtil, Bbn\n'), ((2780, 2819), 'json.dumps', 'json.dumps', (['d'], {'sort_keys': '(True)', 'indent': '(2)'}), '(d, sort_keys=True, indent=2)\n', (2790, 2819), False, 'import json\n'), ((5314, 5351), 'pybbn.generator.bbngenerator.generate_singly_bbn', 'generate_singly_bbn', (['(100)'], {'max_iter': '(10)'}), '(100, max_iter=10)\n', (5333, 5351), False, 'from pybbn.generator.bbngenerator import generate_singly_bbn, convert_for_exact_inference\n'), ((5364, 5397), 'pybbn.generator.bbngenerator.convert_for_exact_inference', 'convert_for_exact_inference', (['g', 'p'], {}), '(g, p)\n', (5391, 5397), False, 'from pybbn.generator.bbngenerator import generate_singly_bbn, convert_for_exact_inference\n'), ((5406, 5424), 'pybbn.graph.dag.Bbn.to_dict', 'Bbn.to_dict', (['e_bbn'], {}), '(e_bbn)\n', (5417, 5424), False, 'from pybbn.graph.dag import Dag, BbnUtil, Bbn\n'), ((5433, 5472), 'json.dumps', 'json.dumps', (['d'], {'sort_keys': '(True)', 'indent': '(2)'}), '(d, sort_keys=True, indent=2)\n', (5443, 5472), False, 'import json\n'), ((5481, 5494), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (5491, 5494), False, 'import json\n'), ((5507, 5523), 'pybbn.graph.dag.Bbn.from_dict', 'Bbn.from_dict', (['d'], {}), '(d)\n', (5520, 5523), False, 'from pybbn.graph.dag import Dag, BbnUtil, Bbn\n'), ((5810, 5835), 'pybbn.graph.dag.BbnUtil.get_huang_graph', 'BbnUtil.get_huang_graph', ([], {}), '()\n', (5833, 5835), False, 'from pybbn.graph.dag import Dag, BbnUtil, Bbn\n'), ((1809, 1834), 'pybbn.graph.dag.BbnUtil.get_huang_graph', 'BbnUtil.get_huang_graph', ([], {}), '()\n', (1832, 1834), False, 'from pybbn.graph.dag import Dag, BbnUtil, Bbn\n'), ((1843, 1871), 'pybbn.graph.dag.Bbn.to_csv', 'Bbn.to_csv', (['lhs', '"""huang.csv"""'], {}), "(lhs, 'huang.csv')\n", (1853, 1871), False, 'from pybbn.graph.dag import Dag, BbnUtil, Bbn\n'), ((1887, 1912), 'pybbn.graph.dag.Bbn.from_csv', 'Bbn.from_csv', (['"""huang.csv"""'], {}), "('huang.csv')\n", (1899, 1912), False, 'from pybbn.graph.dag import Dag, BbnUtil, Bbn\n'), ((5862, 5880), 'pybbn.graph.dag.Bbn.to_dict', 'Bbn.to_dict', (['e_bbn'], {}), '(e_bbn)\n', (5873, 5880), False, 'from pybbn.graph.dag import Dag, BbnUtil, Bbn\n'), ((2510, 2532), 'os.remove', 'os.remove', (['"""huang.csv"""'], {}), "('huang.csv')\n", (2519, 2532), False, 'import os\n')] |
import matplotlib.pyplot as plt
import numpy as np
import queue
from queueing import task
from queueing import server
class Simulator(object):
"""
Class representation of a simulator.
Attributes
----------
total_time : int
The total time the simulation runs for.
total_tasks : int
The number of tasks in the simulation.
arrival_rate : int
The average arrival rate of the tasks.
service_rate : int
The average service rate of the tasks.
server_count : int
The number of servers in the simulation.
tasks : queue.Queue
The queue of task objects in the simulation.
tasks_waiting : queue.Queue
The queue of arrived task objects in the simulation waiting for a free server.
servers : server.Server[]
All server objects in the simulation.
arrival_times : int[]
All arrival times in the simulation, sorted.
service_times : int[]
All service times in the simulation, unsorted.
"""
def __init__(self, total_time, arrival_rate, service_rate, server_count):
"""
Class constructor.
Parameters
----------
total_time : int
The total time the simulation runs for.
arrival_rate : int
The average arrival rate of the tasks.
service_rate : int
The average service rate of the tasks.
server_count : int
The number of servers in the simulation.
"""
self.total_time = total_time * 60 * 60
self.total_tasks = total_time * arrival_rate
self.arrival_rate = arrival_rate
self.service_rate = service_rate
self.server_count = server_count
self.tasks = queue.Queue()
self.tasks_waiting = queue.Queue()
self.servers = []
self.arrival_times = []
self.service_times = []
def prepare(self):
"""
Prepares the simulation.
"""
# Create servers
for i in range(self.server_count):
self.servers.append(server.Server())
# Create arrival times and service times
for i in range(self.total_tasks):
self.arrival_times.append(int(np.random.exponential(1 / self.arrival_rate) * 60 * 60 * 2))
self.service_times.append(int(np.random.exponential(1 / self.service_rate) * 60 * 60))
self.arrival_times.sort()
# Create tasks
for i in range(self.total_tasks):
arrival_time = self.arrival_times[i]
service_time = self.service_times[i]
self.tasks.put(task.Task(arrival_time, service_time))
def run(self):
"""
Runs the simulation.
"""
for current_time in range(self.total_time):
# Check for arriving tasks
for arrival_time in self.arrival_times:
if current_time == arrival_time:
# Task arrived, add to waiting tasks
current_task = self.tasks.get_nowait()
current_task.arrival_time = arrival_time
self.tasks_waiting.put(current_task)
# Check for free servers
for current_server in self.servers:
if not current_server.is_busy:
try:
# Server is not busy, get next waiting task
current_task = self.tasks_waiting.get_nowait()
current_task.start(current_time)
current_server.add_task(current_task)
except queue.Empty:
pass
# Update all servers
current_server.update(current_time)
def analyze(self):
"""
Analyzes the simulation.
"""
total_finished_tasks = 0
for current_server in self.servers:
total_finished_tasks += current_server.task_count
print("Total tasks: " + str(self.total_tasks))
print("Tasks finished: " + str(total_finished_tasks))
def main():
# Quantity of incoming tasks per hour
arrival_rate = 40
# Quantity of tasks per hour a server can handle
service_rate = 10
# Total simulation time in hours
total_time = 10
# Total number of servers in the simulation
server_count = 4
# Create and prepare simulation
simulator = Simulator(total_time, arrival_rate, service_rate, server_count)
simulator.prepare()
# Run simulation
simulator.run()
# Analyze the simulation
simulator.analyze()
if __name__ == "__main__":
main()
| [
"queue.Queue",
"queueing.task.Task",
"numpy.random.exponential",
"queueing.server.Server"
] | [((1743, 1756), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1754, 1756), False, 'import queue\n'), ((1786, 1799), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1797, 1799), False, 'import queue\n'), ((2074, 2089), 'queueing.server.Server', 'server.Server', ([], {}), '()\n', (2087, 2089), False, 'from queueing import server\n'), ((2612, 2649), 'queueing.task.Task', 'task.Task', (['arrival_time', 'service_time'], {}), '(arrival_time, service_time)\n', (2621, 2649), False, 'from queueing import task\n'), ((2328, 2372), 'numpy.random.exponential', 'np.random.exponential', (['(1 / self.service_rate)'], {}), '(1 / self.service_rate)\n', (2349, 2372), True, 'import numpy as np\n'), ((2225, 2269), 'numpy.random.exponential', 'np.random.exponential', (['(1 / self.arrival_rate)'], {}), '(1 / self.arrival_rate)\n', (2246, 2269), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
## @package inversetoon.core.clean_normal
#
# inversetoon.core.clean_normal utility package.
# @author tody
# @date 2015/10/03
import numpy as np
import cv2
import matplotlib.pyplot as plt
from inversetoon.datasets.normal import dataNames, loadData, saveData
from inversetoon.cv.normal import normalizeImage
from inversetoon.np.norm import normVectors
from inversetoon.cv.image import to32F, to8U
def cleanNormal(N_32F, A_8U):
# N_32F = cv2.bilateralFilter(N_32F, 0, 0.1, 5)
h, w = N_32F.shape[:2]
plt.subplot(1, 2, 1)
plt.gray()
plt.imshow(normVectors(N_32F.reshape(-1, 3)).reshape(h, w))
plt.subplot(1, 2, 2)
plt.gray()
A_32F = to32F(A_8U)
A_32F = cv2.GaussianBlur(A_32F, (0, 0), 3.0)
A_32F = np.clip(10.0 * (A_32F - 0.5) + 0.5, 0.0, 1.0)
A_32F = cv2.GaussianBlur(A_32F, (0, 0), 3.0)
N_fix = A_32F > 0.9
N_bg = A_32F < 0.25
A_32F = np.clip(10.0 * (A_32F - 0.5) + 0.5, 0.0, 1.0)
A_8U = to8U(A_32F)
# plt.imshow(A_8U)
# plt.show()
N_32F_blur = cv2.GaussianBlur(N_32F, (0, 0), 3.0)
for i in xrange(10):
N_32F_blur = cv2.GaussianBlur(N_32F_blur, (0, 0), 3.0)
N_32F_blur[N_fix, :] = N_32F[N_fix, :]
N_32F = N_32F_blur
# N_32F[N_bg, 2] = 0.0
N_32F_normalized = normalizeImage(N_32F)
#A_8U = np.uint8(np.clip(1000.0 * N_32F_normalized[:, :, 2], 0.0, 255.0))
# A_8U = cv2.bilateralFilter(A_8U, 0, 70, 5)
return N_32F_normalized, A_8U
def cleanNormalBatch():
target = "original"
for data_name in dataNames(target="original"):
N_32F, A_8U = loadData(data_name, target)
N_32F, A_8U = cleanNormal(N_32F, A_8U)
saveData(data_name, N_32F, A_8U, target="normal")
if __name__ == '__main__':
cleanNormalBatch()
| [
"numpy.clip",
"inversetoon.cv.image.to32F",
"matplotlib.pyplot.gray",
"inversetoon.cv.normal.normalizeImage",
"inversetoon.datasets.normal.dataNames",
"inversetoon.datasets.normal.loadData",
"inversetoon.datasets.normal.saveData",
"cv2.GaussianBlur",
"matplotlib.pyplot.subplot",
"inversetoon.cv.im... | [((553, 573), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (564, 573), True, 'import matplotlib.pyplot as plt\n'), ((578, 588), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (586, 588), True, 'import matplotlib.pyplot as plt\n'), ((658, 678), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (669, 678), True, 'import matplotlib.pyplot as plt\n'), ((683, 693), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (691, 693), True, 'import matplotlib.pyplot as plt\n'), ((706, 717), 'inversetoon.cv.image.to32F', 'to32F', (['A_8U'], {}), '(A_8U)\n', (711, 717), False, 'from inversetoon.cv.image import to32F, to8U\n'), ((730, 766), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['A_32F', '(0, 0)', '(3.0)'], {}), '(A_32F, (0, 0), 3.0)\n', (746, 766), False, 'import cv2\n'), ((779, 824), 'numpy.clip', 'np.clip', (['(10.0 * (A_32F - 0.5) + 0.5)', '(0.0)', '(1.0)'], {}), '(10.0 * (A_32F - 0.5) + 0.5, 0.0, 1.0)\n', (786, 824), True, 'import numpy as np\n'), ((837, 873), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['A_32F', '(0, 0)', '(3.0)'], {}), '(A_32F, (0, 0), 3.0)\n', (853, 873), False, 'import cv2\n'), ((934, 979), 'numpy.clip', 'np.clip', (['(10.0 * (A_32F - 0.5) + 0.5)', '(0.0)', '(1.0)'], {}), '(10.0 * (A_32F - 0.5) + 0.5, 0.0, 1.0)\n', (941, 979), True, 'import numpy as np\n'), ((991, 1002), 'inversetoon.cv.image.to8U', 'to8U', (['A_32F'], {}), '(A_32F)\n', (995, 1002), False, 'from inversetoon.cv.image import to32F, to8U\n'), ((1061, 1097), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['N_32F', '(0, 0)', '(3.0)'], {}), '(N_32F, (0, 0), 3.0)\n', (1077, 1097), False, 'import cv2\n'), ((1307, 1328), 'inversetoon.cv.normal.normalizeImage', 'normalizeImage', (['N_32F'], {}), '(N_32F)\n', (1321, 1328), False, 'from inversetoon.cv.normal import normalizeImage\n'), ((1563, 1591), 'inversetoon.datasets.normal.dataNames', 'dataNames', ([], {'target': '"""original"""'}), "(target='original')\n", (1572, 1591), False, 'from inversetoon.datasets.normal import dataNames, loadData, saveData\n'), ((1144, 1185), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['N_32F_blur', '(0, 0)', '(3.0)'], {}), '(N_32F_blur, (0, 0), 3.0)\n', (1160, 1185), False, 'import cv2\n'), ((1615, 1642), 'inversetoon.datasets.normal.loadData', 'loadData', (['data_name', 'target'], {}), '(data_name, target)\n', (1623, 1642), False, 'from inversetoon.datasets.normal import dataNames, loadData, saveData\n'), ((1699, 1748), 'inversetoon.datasets.normal.saveData', 'saveData', (['data_name', 'N_32F', 'A_8U'], {'target': '"""normal"""'}), "(data_name, N_32F, A_8U, target='normal')\n", (1707, 1748), False, 'from inversetoon.datasets.normal import dataNames, loadData, saveData\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import numpy
import copy
from dcase_util.containers import ObjectContainer
class ProbabilityEncoder(ObjectContainer):
def __init__(self, label_list=None, **kwargs):
"""Constructor
Parameters
----------
label_list : list of str
Label list
"""
super(ProbabilityEncoder, self).__init__(**kwargs)
self.label_list = label_list
def collapse_probabilities(self, probabilities, operator='sum', time_axis=1):
"""Collapse probabilities along time_axis
Parameters
----------
probabilities : numpy.ndarray
Probabilities to be collapsed
operator : str ('sum', 'prod', 'mean')
Operator to be used
Default value 'sum'
time_axis : int
time axis
Default value 1
Raises
------
AssertionError
Unknown operator
Returns
-------
numpy.ndarray
collapsed probabilities
"""
if operator not in ['sum', 'prod', 'mean']:
message = '{name}: Unknown operator [{operator}].'.format(
name=self.__class__.__name__,
operator=operator
)
self.logger.exception(message)
raise AssertionError(message)
# Get data_axis
if time_axis == 0:
data_axis = 1
else:
data_axis = 0
# Initialize array to store results
accumulated = numpy.ones(probabilities.shape[data_axis]) * -numpy.inf
# Loop along data_axis
for class_id in range(0, probabilities.shape[data_axis]):
# Get current array
if time_axis == 0:
current_array = probabilities[:, class_id]
elif time_axis == 1:
current_array = probabilities[class_id, :]
# Collapse array with given operator
if operator == 'sum':
accumulated[class_id] = numpy.sum(current_array)
elif operator == 'prod':
accumulated[class_id] = numpy.prod(current_array)
elif operator == 'mean':
accumulated[class_id] = numpy.mean(current_array)
return accumulated
def collapse_probabilities_windowed(self, probabilities, window_length, operator='sliding_sum', time_axis=1):
"""Collapse probabilities with a sliding window. Window hop size is one.
Parameters
----------
probabilities : numpy.ndarray
Probabilities to be collapsed
window_length : int
Window length in analysis frame amount.
operator : str ('sliding_sum', 'sliding_mean', 'sliding_median')
Operator to be used
Default value 'sliding_sum'
time_axis : int
time axis
Default value 1
Raises
------
AssertionError
Unknown operator
Returns
-------
numpy.ndarray
collapsed probabilities
"""
if operator not in ['sliding_sum', 'sliding_mean', 'sliding_median']:
message = '{name}: Unknown operator [{operator}].'.format(
name=self.__class__.__name__,
operator=operator
)
self.logger.exception(message)
raise AssertionError(message)
# Get data_axis
if time_axis == 0:
data_axis = 1
else:
data_axis = 0
# Lets keep the system causal and use look-back while smoothing (accumulating) likelihoods
output_probabilities = copy.deepcopy(probabilities)
# Loop along data_axis
for class_id in range(0, probabilities.shape[data_axis]):
# Get current array
if time_axis == 0:
current_array = probabilities[:, class_id]
elif time_axis == 1:
current_array = probabilities[class_id, :]
# Loop windows
for stop_id in range(0, probabilities.shape[time_axis]):
start_id = stop_id - window_length
if start_id < 0:
start_id = 0
if start_id != stop_id:
if operator == 'sliding_sum':
current_result = numpy.sum(current_array[start_id:stop_id])
elif operator == 'sliding_mean':
current_result = numpy.mean(current_array[start_id:stop_id])
elif operator == 'sliding_median':
current_result = numpy.median(current_array[start_id:stop_id])
else:
current_result = current_array[start_id]
if time_axis == 0:
output_probabilities[start_id, class_id] = current_result
elif time_axis == 1:
output_probabilities[class_id, start_id] = current_result
return output_probabilities
def binarization(self, probabilities, binarization_type='global_threshold', threshold=0.5, time_axis=1):
"""Binarization
Parameters
----------
probabilities : numpy.ndarray
Probabilities to be binarized
binarization_type : str ('global_threshold', 'class_threshold', 'frame_max')
threshold : float
Binarization threshold, value of the threshold are replaced with 1 and under with 0.
Default value 0.5
time_axis : int
Axis index for the frames
Default value 1
Raises
------
AssertionError:
Unknown binarization_type
Returns
-------
numpy.ndarray
Binarized data
"""
if binarization_type not in ['global_threshold', 'class_threshold', 'frame_max']:
message = '{name}: Unknown frame_binarization type [{type}].'.format(
name=self.__class__.__name__,
type=binarization_type
)
self.logger.exception(message)
raise AssertionError(message)
# Get data_axis
if time_axis == 0:
data_axis = 1
else:
data_axis = 0
if binarization_type == 'global_threshold':
return numpy.array(probabilities >= threshold, dtype=int)
elif binarization_type == 'class_threshold' and isinstance(threshold, list):
data = []
for class_id, class_threshold in enumerate(threshold):
if data_axis == 0:
data.append(numpy.array(probabilities[class_id, :] >= class_threshold, dtype=int))
elif data_axis == 1:
data.append(numpy.array(probabilities[:, class_id] >= class_threshold, dtype=int))
if data_axis == 0:
return numpy.vstack(data)
elif data_axis == 1:
return numpy.vstack(data).T
elif binarization_type == 'frame_max':
if data_axis == 0:
return numpy.array((probabilities / numpy.max(probabilities, axis=0)) == 1, dtype=int)
elif data_axis == 1:
return numpy.array((probabilities.T / numpy.max(probabilities, axis=1)).T == 1, dtype=int)
def max_selection(self, probabilities, label_list=None):
"""Selection based on maximum probability
Parameters
----------
probabilities : numpy.ndarray
Probabilities
label_list : list of str
Label list
Default value None
Returns
-------
numpy.ndarray
"""
if label_list is None:
label_list = self.label_list
class_id = numpy.argmax(probabilities)
if class_id < len(label_list):
return label_list[class_id]
else:
return None
| [
"numpy.prod",
"numpy.mean",
"numpy.median",
"numpy.ones",
"numpy.argmax",
"numpy.max",
"numpy.array",
"numpy.sum",
"numpy.vstack",
"copy.deepcopy"
] | [((3755, 3783), 'copy.deepcopy', 'copy.deepcopy', (['probabilities'], {}), '(probabilities)\n', (3768, 3783), False, 'import copy\n'), ((7885, 7912), 'numpy.argmax', 'numpy.argmax', (['probabilities'], {}), '(probabilities)\n', (7897, 7912), False, 'import numpy\n'), ((1618, 1660), 'numpy.ones', 'numpy.ones', (['probabilities.shape[data_axis]'], {}), '(probabilities.shape[data_axis])\n', (1628, 1660), False, 'import numpy\n'), ((6441, 6491), 'numpy.array', 'numpy.array', (['(probabilities >= threshold)'], {'dtype': 'int'}), '(probabilities >= threshold, dtype=int)\n', (6452, 6491), False, 'import numpy\n'), ((2111, 2135), 'numpy.sum', 'numpy.sum', (['current_array'], {}), '(current_array)\n', (2120, 2135), False, 'import numpy\n'), ((2214, 2239), 'numpy.prod', 'numpy.prod', (['current_array'], {}), '(current_array)\n', (2224, 2239), False, 'import numpy\n'), ((7001, 7019), 'numpy.vstack', 'numpy.vstack', (['data'], {}), '(data)\n', (7013, 7019), False, 'import numpy\n'), ((2318, 2343), 'numpy.mean', 'numpy.mean', (['current_array'], {}), '(current_array)\n', (2328, 2343), False, 'import numpy\n'), ((4445, 4487), 'numpy.sum', 'numpy.sum', (['current_array[start_id:stop_id]'], {}), '(current_array[start_id:stop_id])\n', (4454, 4487), False, 'import numpy\n'), ((4583, 4626), 'numpy.mean', 'numpy.mean', (['current_array[start_id:stop_id]'], {}), '(current_array[start_id:stop_id])\n', (4593, 4626), False, 'import numpy\n'), ((6734, 6803), 'numpy.array', 'numpy.array', (['(probabilities[class_id, :] >= class_threshold)'], {'dtype': 'int'}), '(probabilities[class_id, :] >= class_threshold, dtype=int)\n', (6745, 6803), False, 'import numpy\n'), ((7077, 7095), 'numpy.vstack', 'numpy.vstack', (['data'], {}), '(data)\n', (7089, 7095), False, 'import numpy\n'), ((4724, 4769), 'numpy.median', 'numpy.median', (['current_array[start_id:stop_id]'], {}), '(current_array[start_id:stop_id])\n', (4736, 4769), False, 'import numpy\n'), ((6875, 6944), 'numpy.array', 'numpy.array', (['(probabilities[:, class_id] >= class_threshold)'], {'dtype': 'int'}), '(probabilities[:, class_id] >= class_threshold, dtype=int)\n', (6886, 6944), False, 'import numpy\n'), ((7229, 7261), 'numpy.max', 'numpy.max', (['probabilities'], {'axis': '(0)'}), '(probabilities, axis=0)\n', (7238, 7261), False, 'import numpy\n'), ((7368, 7400), 'numpy.max', 'numpy.max', (['probabilities'], {'axis': '(1)'}), '(probabilities, axis=1)\n', (7377, 7400), False, 'import numpy\n')] |
import numpy.random as rng
def create_random_sets(random_set_options):
'''
Retuns a list of parameter sets, in the form of tuples that you can unpack
and pass into the run_model function
'''
parameter_sets = []
while len(parameter_sets) < random_set_options['Number of Random Sets']:
# Note that randint is a <= x <= b
current_set = {}
if random_set_options['sex'] is None:
current_set['sex'] = rng.randint(0, 1)
else:
current_set['sex'] = random_set_options['sex']
if random_set_options['age'] is None:
current_set['age'] = rng.randint(30, 80)
else:
current_set['age'] = random_set_options['age']
if random_set_options['RACE'] is None:
current_set['RACE'] = rng.randint(0, 9)
else:
current_set['RACE'] = random_set_options['RACE']
if random_set_options['time_since_symptoms'] is None:
current_set['time_since_symptoms'] = rng.uniform(10, 100)
else:
current_set['time_since_symptoms'] = random_set_options[
'time_since_symptoms']
# have to set time to primary before time to comprehensive
if random_set_options['time_to_primary'] is None:
current_set['time_to_primary'] = rng.uniform(10, 60)
else:
current_set['time_to_primary'] = random_set_options[
'time_to_primary']
if random_set_options['time_to_comprehensive'] is None:
current_set['time_to_comprehensive'] = rng.uniform(
current_set['time_to_primary'], 120)
else:
current_set['time_to_comprehensive'] = random_set_options[
'time_to_comprehensive']
if random_set_options['transfer_time'] is None:
current_set['transfer_time'] = rng.uniform(
current_set['time_to_comprehensive'] -
current_set['time_to_primary'],
current_set['time_to_comprehensive'] +
current_set['time_to_primary'])
else:
current_set['transfer_time'] = random_set_options['transfer_time']
parameter_sets.append(current_set)
print('Random sets have been generated')
return parameter_sets | [
"numpy.random.randint",
"numpy.random.uniform"
] | [((457, 474), 'numpy.random.randint', 'rng.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (468, 474), True, 'import numpy.random as rng\n'), ((627, 646), 'numpy.random.randint', 'rng.randint', (['(30)', '(80)'], {}), '(30, 80)\n', (638, 646), True, 'import numpy.random as rng\n'), ((801, 818), 'numpy.random.randint', 'rng.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (812, 818), True, 'import numpy.random as rng\n'), ((1005, 1025), 'numpy.random.uniform', 'rng.uniform', (['(10)', '(100)'], {}), '(10, 100)\n', (1016, 1025), True, 'import numpy.random as rng\n'), ((1318, 1337), 'numpy.random.uniform', 'rng.uniform', (['(10)', '(60)'], {}), '(10, 60)\n', (1329, 1337), True, 'import numpy.random as rng\n'), ((1567, 1615), 'numpy.random.uniform', 'rng.uniform', (["current_set['time_to_primary']", '(120)'], {}), "(current_set['time_to_primary'], 120)\n", (1578, 1615), True, 'import numpy.random as rng\n'), ((1858, 2021), 'numpy.random.uniform', 'rng.uniform', (["(current_set['time_to_comprehensive'] - current_set['time_to_primary'])", "(current_set['time_to_comprehensive'] + current_set['time_to_primary'])"], {}), "(current_set['time_to_comprehensive'] - current_set[\n 'time_to_primary'], current_set['time_to_comprehensive'] + current_set[\n 'time_to_primary'])\n", (1869, 2021), True, 'import numpy.random as rng\n')] |
# -*- coding: utf-8 -*-
"""
Plot of the Datasaurus Dozen
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
plt.style.use("ggplot")
plt.rcParams["mathtext.fontset"]='cm'
labels = np.genfromtxt("../data/DatasaurusDozen.tsv", delimiter="\t",
usecols=(0,), skip_header=1, dtype=str)
X = np.loadtxt("../data/DatasaurusDozen.tsv", delimiter="\t",
usecols=(1,), skiprows=1)
Y = np.loadtxt("../data/DatasaurusDozen.tsv", delimiter="\t",
usecols=(2,), skiprows=1)
list_labels = ['wide_lines', 'star', 'h_lines', 'high_lines', 'v_lines',
'circle', 'bullseye', 'slant_up', 'slant_down', 'x_shape',
'dots', 'away', 'dino']
#%% Plot of the dozen
plt.figure(figsize=(10, 6))
for k, label in enumerate(list_labels[:-1]):
plt.subplot(3, 4, k + 1)
plt.plot(X[labels == label],Y[labels == label], 'ok',
markersize=3)
plt.axis("image")
plt.axis([0, 100, 0, 100])
if k >= 8:
plt.xticks(np.linspace(0, 100, 5))
plt.xlabel(r"$x$")
else:
plt.xticks(np.linspace(0, 100, 5), [])
if k % 4 == 0:
plt.yticks(np.linspace(0, 100, 5))
plt.ylabel(r"$y$")
else:
plt.yticks(np.linspace(0, 100, 5), [])
plt.tight_layout()
plt.savefig("datasaurus-dozen.svg", bbox_inches="tight")
#%% Plot of the datasaurus
plt.figure(figsize=(2.5, 1.5))
plt.plot(X[labels == 'dino'], Y[labels == 'dino'], 'ok',
markersize=3)
plt.axis("image")
plt.axis([0, 100, 0, 100])
plt.xticks(np.linspace(0, 100, 5))
plt.yticks(np.linspace(0, 100, 5))
plt.xlabel(r"$x$")
plt.ylabel(r"$y$")
plt.savefig("datasaurus.svg", bbox_inches="tight")
plt.show() | [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axis",
"numpy.loadtxt",
... | [((130, 153), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (143, 153), True, 'import matplotlib.pyplot as plt\n'), ((202, 306), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../data/DatasaurusDozen.tsv"""'], {'delimiter': '"""\t"""', 'usecols': '(0,)', 'skip_header': '(1)', 'dtype': 'str'}), "('../data/DatasaurusDozen.tsv', delimiter='\\t', usecols=(0,),\n skip_header=1, dtype=str)\n", (215, 306), True, 'import numpy as np\n'), ((322, 409), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/DatasaurusDozen.tsv"""'], {'delimiter': '"""\t"""', 'usecols': '(1,)', 'skiprows': '(1)'}), "('../data/DatasaurusDozen.tsv', delimiter='\\t', usecols=(1,),\n skiprows=1)\n", (332, 409), True, 'import numpy as np\n'), ((425, 512), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/DatasaurusDozen.tsv"""'], {'delimiter': '"""\t"""', 'usecols': '(2,)', 'skiprows': '(1)'}), "('../data/DatasaurusDozen.tsv', delimiter='\\t', usecols=(2,),\n skiprows=1)\n", (435, 512), True, 'import numpy as np\n'), ((733, 760), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (743, 760), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1280), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1278, 1280), True, 'import matplotlib.pyplot as plt\n'), ((1281, 1337), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""datasaurus-dozen.svg"""'], {'bbox_inches': '"""tight"""'}), "('datasaurus-dozen.svg', bbox_inches='tight')\n", (1292, 1337), True, 'import matplotlib.pyplot as plt\n'), ((1366, 1396), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.5, 1.5)'}), '(figsize=(2.5, 1.5))\n', (1376, 1396), True, 'import matplotlib.pyplot as plt\n'), ((1397, 1467), 'matplotlib.pyplot.plot', 'plt.plot', (["X[labels == 'dino']", "Y[labels == 'dino']", '"""ok"""'], {'markersize': '(3)'}), "(X[labels == 'dino'], Y[labels == 'dino'], 'ok', markersize=3)\n", (1405, 1467), True, 'import matplotlib.pyplot as plt\n'), ((1481, 1498), 'matplotlib.pyplot.axis', 'plt.axis', (['"""image"""'], {}), "('image')\n", (1489, 1498), True, 'import matplotlib.pyplot as plt\n'), ((1499, 1525), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 100, 0, 100]'], {}), '([0, 100, 0, 100])\n', (1507, 1525), True, 'import matplotlib.pyplot as plt\n'), ((1596, 1613), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (1606, 1613), True, 'import matplotlib.pyplot as plt\n'), ((1615, 1632), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y$"""'], {}), "('$y$')\n", (1625, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1634, 1684), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""datasaurus.svg"""'], {'bbox_inches': '"""tight"""'}), "('datasaurus.svg', bbox_inches='tight')\n", (1645, 1684), True, 'import matplotlib.pyplot as plt\n'), ((1685, 1695), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1693, 1695), True, 'import matplotlib.pyplot as plt\n'), ((810, 834), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(k + 1)'], {}), '(3, 4, k + 1)\n', (821, 834), True, 'import matplotlib.pyplot as plt\n'), ((839, 907), 'matplotlib.pyplot.plot', 'plt.plot', (['X[labels == label]', 'Y[labels == label]', '"""ok"""'], {'markersize': '(3)'}), "(X[labels == label], Y[labels == label], 'ok', markersize=3)\n", (847, 907), True, 'import matplotlib.pyplot as plt\n'), ((924, 941), 'matplotlib.pyplot.axis', 'plt.axis', (['"""image"""'], {}), "('image')\n", (932, 941), True, 'import matplotlib.pyplot as plt\n'), ((946, 972), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 100, 0, 100]'], {}), '([0, 100, 0, 100])\n', (954, 972), True, 'import matplotlib.pyplot as plt\n'), ((1537, 1559), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(5)'], {}), '(0, 100, 5)\n', (1548, 1559), True, 'import numpy as np\n'), ((1572, 1594), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(5)'], {}), '(0, 100, 5)\n', (1583, 1594), True, 'import numpy as np\n'), ((1039, 1056), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (1049, 1056), True, 'import matplotlib.pyplot as plt\n'), ((1185, 1202), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y$"""'], {}), "('$y$')\n", (1195, 1202), True, 'import matplotlib.pyplot as plt\n'), ((1007, 1029), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(5)'], {}), '(0, 100, 5)\n', (1018, 1029), True, 'import numpy as np\n'), ((1087, 1109), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(5)'], {}), '(0, 100, 5)\n', (1098, 1109), True, 'import numpy as np\n'), ((1153, 1175), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(5)'], {}), '(0, 100, 5)\n', (1164, 1175), True, 'import numpy as np\n'), ((1233, 1255), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(5)'], {}), '(0, 100, 5)\n', (1244, 1255), True, 'import numpy as np\n')] |
import numpy as np
from flipper25d.config import *
from flipper25d.util import *
from flipper25d.cspace.cspace import *
from flipper25d.vis.vis import rot_vector
import pdb
from scipy.misc import imsave
import sys
import os
'''
we use the left side S2 as the pivot
right side S2 can be uniquely defined with negative_y direction on robo coordinate system
get neighbour: get_neighbours
get constrained point: right_S2
get cost: cost_fun
DFS path search: get_path_store and retrieve_path
t <- (p_, yaw, pitch, roll, alpha)
neighbour is (t, t_r)
q is (neighbour, cost, store_id)
'''
global touched
def right_S2(p_, yaw, pitch, roll):
v_oRS2 = [0.,WIDTH,0.]
R_o_y = rot_vector([0.,0.,1.],-yaw)
R_o_p = rot_vector([np.sin(yaw),np.cos(yaw),0.],-pitch)
R_o_r = rot_vector([np.cos(yaw)*np.cos(pitch),-np.sin(yaw)*np.cos(pitch),np.sin(pitch)], -roll)
v_oRS2_ = np.dot(R_o_r,np.dot(R_o_p,np.dot(R_o_y,np.array(v_oRS2).reshape((3,1)))))
p_oRS2_ = [p_[0]-int(v_oRS2_[0,0]/PIXEL_RESO),p_[1]-int(v_oRS2_[1,0]/PIXEL_RESO), v_oRS2_[2,0]+p_[2]]
return p_oRS2_
def have_same_pitch(pitchs1, pitchs2):
'''the version pitchs is a list of possible pitchs
paired_list = []
for i in range(len(pitchs1)):
for j in range(len(pitchs2)):
if (pitchs1[i] == pitchs2[j]):
paired_list.append([i,j])
if len(paired_list) == 0:
return None
else:
return paired_list[0]#later maybe have a rank
'''
#this version pitchs is either [pitch] or [pitch_ub, pitch_lb]
if len(pitchs1) == 0 or len(pitchs2) == 0:
return None
elif len(pitchs1) == 1 and len(pitchs2) == 1:
if pitchs1[0] == pitchs2[0]:
return pitchs1[0]
elif len(pitchs1) == 1 or len(pitchs2) == 1:
if len(pitchs1) == 1:
if pitchs1[0] >= pitchs2[1] and pitchs1[0] <= pitchs2[0]:
return pitchs1[0]
else:
return None
else:
if pitchs2[0] >= pitchs1[1] and pitchs2[0] <= pitchs1[0]:
return pitchs2[0]
else:
return None
else:#both len==2
#use its smallest intersection
if pitchs1[1] > pitchs2[0] or pitchs1[0] < pitchs2[1]:
return None
else:
return min(pitchs1[1],pitchs2[1])
def cost_fun(t_ori, neiblr, p_tgt_):#ano_t_ori,p_tgt_):
'''
t <- (p_, yaw, pitch, roll, alpha)
the cost of one point algo related to its ancester
#actually only use p_, yaw, pitch
'''
neibl,neibr = neiblr
yaw,pitch = neibl[1],neibl[2]
ano_t_ori = S2_to_middle(neibl, neibr)[0]
diff_height = (exp_map[ano_t_ori[0],ano_t_ori[1]]-ano_t_ori[2])**2
for i in range(10):
p_center = get_point(ano_t_ori, yaw=yaw, pitch=pitch, line_len=LEN_S1S2/10*(i+1))
diff_height += (exp_map[p_center[0],p_center[1]]-p_center[2])**2
#pitch_dist = neibl[2] ** 2
#return dist_ano+diff_height
return diff_height# + pitch_dist
#diff*a1+dist*a2 + ang_diff
def check_point_on_ground(p_):
if (exp_map[p_[0],p_[1]]+HEIGHT_EPSILON > p_[2]):
return True
else:
return False
#def get_neighbours(p_,yaw,pitch,roll,alpha):
def get_neighbours(t_l,t_r,p_tgt_):
'''
here the naming rule:
p_ is for the current point
ano_p_ is for the neighbour piint
p_/ano_p_ r is for the right side
p_tgt_ is used to compute the cost
note: following find_valid_neighbour_points.jpeg to find neighbour
'''
global touched
p_l_,yaw,pitch,roll,_ = t_l
p_r_ = t_r[0]
p_l_S1 = get_point(p_l_, yaw=yaw, pitch=pitch, line_len=LEN_S1S2)
p_r_S1 = get_point(p_r_, yaw=yaw, pitch=pitch, line_len=LEN_S1S2)
center = [(p_l_[0]+p_r_[0]+p_l_S1[0]+p_r_S1[0])/4,\
(p_l_[1]+p_r_[1]+p_l_S1[1]+p_r_S1[1])/4,\
(p_l_[2]+p_r_[2]+p_l_S1[2]+p_r_S1[2])/4]
norm_robo_x = (np.cos(yaw)*np.cos(pitch),-np.sin(yaw)*np.cos(pitch),np.sin(pitch))
norm_z = np.array((-np.cos(yaw)*np.sin(pitch),np.sin(yaw)*np.sin(pitch),np.cos(pitch)))
R_roll = rot_vector(norm_robo_x, roll)
rolled_norm_z = np.dot(R_roll,norm_z.reshape((3,1)))[:,0]
v_o_p_l_ = [-(p_l_[0] - center[0])*PIXEL_RESO, -(p_l_[1] - center[1])*PIXEL_RESO,p_l_[2] - center[2]]
#check if four points on the ground
four_point_on_ground_tag = False
if check_point_on_ground(p_l_) and check_point_on_ground(p_r_) and check_point_on_ground(p_l_S1) and check_point_on_ground(p_r_S1):
four_point_on_ground_tag = True
#p_r_ = right_S2(p_, yaw, pitch, roll)
neighbours = []
#costs = []
neib_id = 0
for y in YAWS:
turn_left = None
if y != yaw:
turn_left = True
continue#now only allows for go straight
if turn_left is None:
dr,dc = int(-np.cos(yaw)*GO_STRAIGHT_ITS), int(np.sin(yaw)*GO_STRAIGHT_ITS)
else:
if not four_point_on_ground_tag:
continue
else:
R_y = rot_vector(rolled_norm_z, yaw - y)
'''
if turn_left:
dr,dc = int(-np.cos(yaw)*WIDTH/PIXEL_RESO), int(np.sin(yaw)*WIDTH/PIXEL_RESO)
else:
dr,dc = int(-np.cos(yaw)*WIDTH/PIXEL_RESO), int(np.sin(yaw)*WIDTH/PIXEL_RESO)
'''
roted_v = np.dot(R_y, np.array(v_o_p_l_).reshape((3,1)))[:,0]
dr, dc, dh = -(roted_v[0]/PIXEL_RESO), -(roted_v[1]/PIXEL_RESO), roted_v[2]
for h_it in range(4):
#use both left and right S2 as pivot
for on_left_Pivot in [True,False]:
if turn_left is not None:
if h_it > 0 or not four_point_on_ground_tag:
continue
ano_p_ = [int(center[0]+dr),int(center[1]+dc),center[2]+dh]
else:
d_h = h_it * H_ITS
if on_left_Pivot:
ano_p_ = [p_l_[0]+dr,p_l_[1]+dc,exp_map[p_l_[0]+dr,p_l_[1]+dc]+d_h]
p_ = p_l_
else:
ano_p_ = [p_r_[0]+dr,p_r_[1]+dc,exp_map[p_r_[0]+dr,p_r_[1]+dc]+d_h]
p_ = p_r_
pitchs = get_pitch(ano_p_, yaw)
#t = [p_, y, pitch, 0.,0.]
for pt in pitchs:
# check pitch
if pt < PITCH_LB or pt > PITCH_UB:
continue
ano_t = [ano_p_, y, pt, 0., 0.]
#cost = cost_fun(t,ano_t,p_tgt_, is_left_side=y<=yaw)#+cost_fun(t_r,ano_t_r,p_tgt_)
if turn_left is not None:
neighbours.append([ano_t,None])
else:
if on_left_Pivot:
neighbours.append([ano_t, None])
else:
neighbours.append([None, ano_t])
#costs.append(cost)
return neighbours#, costs
def middle_to_S2(t_ori):
p_, yaw, pitch,roll,_ = t_ori
v_oLS2 = [0.,WIDTH/2,0.]
v_oRS2 = [0.,-WIDTH/2,0.]
R_o_y = rot_vector([0.,0.,1.],-yaw)
R_o_p = rot_vector([np.sin(yaw),np.cos(yaw),0.],-pitch)
R_o_r = rot_vector([np.cos(yaw)*np.cos(pitch),-np.sin(yaw)*np.cos(pitch),np.sin(pitch)], -roll)
v_oLS2_ = np.dot(R_o_r,np.dot(R_o_p,np.dot(R_o_y,np.array(v_oLS2).reshape((3,1)))))
v_oRS2_ = np.dot(R_o_r,np.dot(R_o_p,np.dot(R_o_y,np.array(v_oRS2).reshape((3,1)))))
p_oRS2_ = [p_[0]-int(v_oRS2_[0,0]/PIXEL_RESO),p_[1]-int(v_oRS2_[1,0]/PIXEL_RESO), v_oRS2_[2,0]+p_[2]]
p_oLS2_ = [p_[0]-int(v_oLS2_[0,0]/PIXEL_RESO),p_[1]-int(v_oLS2_[1,0]/PIXEL_RESO), v_oLS2_[2,0]+p_[2]]
return [p_oLS2_, yaw,pitch,roll,0.], [p_oRS2_, yaw,pitch,roll,0.]
def S2_to_middle(neibl, neibr):
middle = [int((neibl[0][0]+neibr[0][0])/2),int((neibl[0][1]+neibr[0][1])/2)]
return [[middle[0],middle[1],(neibl[0][2]+neibr[0][2])/2], neibl[1], neibl[2], neibl[3], neibl[4]]
def reach_target(middle, p_tgt_):
p_ = middle
if (p_[0] - p_tgt_[0])**2 <= 100:
return True
else:
return False
def get_path_store(t_ori, p_tgt_):
'''
from ORIGIN to TARGET on arena man
Note: here when we compute the path, only x,y,z,yaw,pitch will be considered.
input: t_ori, p_tgt_
Detail
1. given xyz(middle),yaw,pitch,roll
2. find a next step xyz,yaw,pitch, then check if roll valid
Note: the yaw is fixed, p_tgt_ is only used to check reach target
'''
global touched
t_ori_l, t_ori_r = middle_to_S2(t_ori)
#p_ori_r = right_S2(t_ori[0],t_ori[1],t_ori[2],t_ori[3])
'''
if t_ori_r is None:#it should be origin
Q = [[(t_ori,(p_ori_r,t_ori[1],t_ori[2],t_ori[3],t_ori[4])),0]]#in Q, it will remember its store_id
else:
'''
Q = [(t_ori_l,t_ori_r)]
store = [Q[0]]#in store, it will remember its ancester's store_id
reach_target_tag = False
before_last = 0
while len(Q)>0:
q = Q.pop()
t_ = q
print(t_[0],'\t\t\t',t_[1])
t_l, t_r= t_
t_middle = S2_to_middle(t_l,t_r)
neighbours= get_neighbours(t_l,t_r,p_tgt_)
#ids = np.argsort(costs)#cost from small to large
if len(neighbours) == 0:
print('no neighbour')
continue
nbso = []
costs = []
for i in range(len(neighbours)):
neibl,neibr = neighbours[i]
if neibl is None:
roll, _ = get_roll(neibr[0],neibr[1],neibr[2],False)
if roll is None:
continue
ano_p_ = get_point(neibr[0], yaw=neibr[1]-np.pi/2, pitch=roll, line_len=WIDTH)
neibl = [ano_p_, neibr[1],neibr[2], roll, 0.]
neibr[3] = roll
else:
roll, _ = get_roll(neibl[0],neibl[1],neibl[2],True)
if roll is None:
continue
ano_p_ = get_point(neibl[0], yaw=neibl[1]+np.pi/2, pitch=-roll, line_len=WIDTH)
neibr = [ano_p_, neibl[1],neibl[2], roll, 0.]
neibl[3] = roll
#cost = cost_fun(t_middle, S2_to_middle(neibl,neibr),p_tgt_)
cost = cost_fun(t_middle, (neibl,neibr),p_tgt_)
#print(cost, neibl,neibr)
nbso.append([neibl,neibr])
costs.append(cost)
ids = np.argsort(costs)
nbs = [nbso[i] for i in ids[::-1]]
middle = int((nbs[-1][0][0][0]+nbs[-1][1][0][0])/2),int((nbs[-1][0][0][1]+nbs[-1][1][0][1])/2)
#check if the smallest cost is target
Q = [nbs[-1]]
store.append(nbs[-1])
if reach_target(middle, p_tgt_):
store.append(nbs[-1])
return store
return store
def retrieve_path(store):
path = store
return path
if __name__ == '__main__':
t_ori = (ORIGIN_, 0., 0., 0., DEFAULT_ALPHA)
entire_path = []
p_tgt_ = TARGET_#[1512,800]
print('###### From ', t_ori, ' to ', p_tgt_, '#################')
store = get_path_store(t_ori, p_tgt_)
entire_path = retrieve_path(store)
import scipy.io
scipy.io.savemat('exp/conf_path/'+ARENA_NAME+'config_path.mat',{'path':np.array(entire_path)})
expanded_entire_path = [expand_param(p) for p in entire_path]
scipy.io.savemat('exp/conf_path/'+ARENA_NAME+'expanded_config_path.mat',{'path':np.array(expanded_entire_path)})
| [
"numpy.argsort",
"numpy.array",
"numpy.cos",
"numpy.sin",
"flipper25d.vis.vis.rot_vector"
] | [((721, 754), 'flipper25d.vis.vis.rot_vector', 'rot_vector', (['[0.0, 0.0, 1.0]', '(-yaw)'], {}), '([0.0, 0.0, 1.0], -yaw)\n', (731, 754), False, 'from flipper25d.vis.vis import rot_vector\n'), ((4184, 4213), 'flipper25d.vis.vis.rot_vector', 'rot_vector', (['norm_robo_x', 'roll'], {}), '(norm_robo_x, roll)\n', (4194, 4213), False, 'from flipper25d.vis.vis import rot_vector\n'), ((7299, 7332), 'flipper25d.vis.vis.rot_vector', 'rot_vector', (['[0.0, 0.0, 1.0]', '(-yaw)'], {}), '([0.0, 0.0, 1.0], -yaw)\n', (7309, 7332), False, 'from flipper25d.vis.vis import rot_vector\n'), ((4064, 4077), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (4070, 4077), True, 'import numpy as np\n'), ((10608, 10625), 'numpy.argsort', 'np.argsort', (['costs'], {}), '(costs)\n', (10618, 10625), True, 'import numpy as np\n'), ((773, 784), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (779, 784), True, 'import numpy as np\n'), ((785, 796), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (791, 796), True, 'import numpy as np\n'), ((886, 899), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (892, 899), True, 'import numpy as np\n'), ((4011, 4022), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (4017, 4022), True, 'import numpy as np\n'), ((4023, 4036), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (4029, 4036), True, 'import numpy as np\n'), ((4050, 4063), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (4056, 4063), True, 'import numpy as np\n'), ((4155, 4168), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (4161, 4168), True, 'import numpy as np\n'), ((7351, 7362), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (7357, 7362), True, 'import numpy as np\n'), ((7363, 7374), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (7369, 7374), True, 'import numpy as np\n'), ((7464, 7477), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (7470, 7477), True, 'import numpy as np\n'), ((11479, 11500), 'numpy.array', 'np.array', (['entire_path'], {}), '(entire_path)\n', (11487, 11500), True, 'import numpy as np\n'), ((11653, 11683), 'numpy.array', 'np.array', (['expanded_entire_path'], {}), '(expanded_entire_path)\n', (11661, 11683), True, 'import numpy as np\n'), ((833, 844), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (839, 844), True, 'import numpy as np\n'), ((845, 858), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (851, 858), True, 'import numpy as np\n'), ((872, 885), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (878, 885), True, 'import numpy as np\n'), ((4038, 4049), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (4044, 4049), True, 'import numpy as np\n'), ((4115, 4128), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (4121, 4128), True, 'import numpy as np\n'), ((4129, 4140), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (4135, 4140), True, 'import numpy as np\n'), ((4141, 4154), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (4147, 4154), True, 'import numpy as np\n'), ((5124, 5158), 'flipper25d.vis.vis.rot_vector', 'rot_vector', (['rolled_norm_z', '(yaw - y)'], {}), '(rolled_norm_z, yaw - y)\n', (5134, 5158), False, 'from flipper25d.vis.vis import rot_vector\n'), ((7411, 7422), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (7417, 7422), True, 'import numpy as np\n'), ((7423, 7436), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (7429, 7436), True, 'import numpy as np\n'), ((7450, 7463), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (7456, 7463), True, 'import numpy as np\n'), ((860, 871), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (866, 871), True, 'import numpy as np\n'), ((4103, 4114), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (4109, 4114), True, 'import numpy as np\n'), ((7438, 7449), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (7444, 7449), True, 'import numpy as np\n'), ((962, 978), 'numpy.array', 'np.array', (['v_oRS2'], {}), '(v_oRS2)\n', (970, 978), True, 'import numpy as np\n'), ((4971, 4982), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (4977, 4982), True, 'import numpy as np\n'), ((7540, 7556), 'numpy.array', 'np.array', (['v_oLS2'], {}), '(v_oLS2)\n', (7548, 7556), True, 'import numpy as np\n'), ((7629, 7645), 'numpy.array', 'np.array', (['v_oRS2'], {}), '(v_oRS2)\n', (7637, 7645), True, 'import numpy as np\n'), ((4937, 4948), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (4943, 4948), True, 'import numpy as np\n'), ((5486, 5504), 'numpy.array', 'np.array', (['v_o_p_l_'], {}), '(v_o_p_l_)\n', (5494, 5504), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding=utf-8
from __future__ import print_function, absolute_import
import json
import os
import argparse
from os.path import join
import numpy
import pyfits
import glob
def get_stats(sim_dir):
# Load the config file.
config_file = glob.glob(join(sim_dir, '*.json'))
try:
config = json.load(open(config_file[0]))
except ValueError as e:
print('Error: Failed to parse JSON config file.')
print(e.message)
return
# Get configuration values.
tau = config['corrupt']['tau_s']
hurst_amp = config['corrupt']['amplitude']['hurst']
adev_amp = config['corrupt']['amplitude']['allan_dev']
hurst_phase = config['corrupt']['phase']['hurst']
adev_phase = config['corrupt']['phase']['allan_dev']
num_times = config['sim']['observation']['num_times']
max_fact = config['baseline_average']['max_fact']
fov_radius_deg = config['baseline_average']['fov_radius_deg']
max_average_time_s = config['baseline_average']['max_average_time_s']
compression_ratio = numpy.loadtxt(join(sim_dir, 'compression.txt'))
# Get stats.
diff_files = glob.glob(join(sim_dir, 'diff_*.fits'))
for i in range(len(diff_files)):
diff_name = diff_files[i]
image = pyfits.getdata(diff_name)
rms_diff = numpy.sqrt(numpy.mean(numpy.square(image)))
min_diff = numpy.min(image)
max_diff = numpy.max(image)
with open(os.path.splitext(diff_name)[0] + '.txt', 'w') as f:
f.write('%.4f, %.2f, %.1f, %.3f, '
'%i, %.1f, %.1f, %.1e, %.1f, %.1e, '
'%.6f, %.6f, %.6f\n' % (
max_fact, fov_radius_deg, max_average_time_s, compression_ratio,
num_times, tau, hurst_amp, adev_amp, hurst_phase, adev_phase,
rms_diff, min_diff, max_diff))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Get stats for BDA pipeline.',
epilog='')
parser.add_argument('sim_dir', type=str, nargs='?', help='Simulation dir')
args = parser.parse_args()
if args.sim_dir is None:
parser.print_usage()
print('%s: error, too few arguments.' % os.path.basename(__file__))
exit(1)
if not os.path.isdir(args.sim_dir):
print("Error: Simulation dir '%s' not found!" % args.sim_dir)
get_stats(args.sim_dir)
| [
"argparse.ArgumentParser",
"os.path.join",
"os.path.splitext",
"numpy.max",
"pyfits.getdata",
"numpy.square",
"os.path.isdir",
"os.path.basename",
"numpy.min"
] | [((1961, 2038), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get stats for BDA pipeline."""', 'epilog': '""""""'}), "(description='Get stats for BDA pipeline.', epilog='')\n", (1984, 2038), False, 'import argparse\n'), ((277, 300), 'os.path.join', 'join', (['sim_dir', '"""*.json"""'], {}), "(sim_dir, '*.json')\n", (281, 300), False, 'from os.path import join\n'), ((1145, 1177), 'os.path.join', 'join', (['sim_dir', '"""compression.txt"""'], {}), "(sim_dir, 'compression.txt')\n", (1149, 1177), False, 'from os.path import join\n'), ((1224, 1252), 'os.path.join', 'join', (['sim_dir', '"""diff_*.fits"""'], {}), "(sim_dir, 'diff_*.fits')\n", (1228, 1252), False, 'from os.path import join\n'), ((1341, 1366), 'pyfits.getdata', 'pyfits.getdata', (['diff_name'], {}), '(diff_name)\n', (1355, 1366), False, 'import pyfits\n'), ((1449, 1465), 'numpy.min', 'numpy.min', (['image'], {}), '(image)\n', (1458, 1465), False, 'import numpy\n'), ((1485, 1501), 'numpy.max', 'numpy.max', (['image'], {}), '(image)\n', (1494, 1501), False, 'import numpy\n'), ((2347, 2374), 'os.path.isdir', 'os.path.isdir', (['args.sim_dir'], {}), '(args.sim_dir)\n', (2360, 2374), False, 'import os\n'), ((1408, 1427), 'numpy.square', 'numpy.square', (['image'], {}), '(image)\n', (1420, 1427), False, 'import numpy\n'), ((2292, 2318), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (2308, 2318), False, 'import os\n'), ((1520, 1547), 'os.path.splitext', 'os.path.splitext', (['diff_name'], {}), '(diff_name)\n', (1536, 1547), False, 'import os\n')] |
import streamlit as st
# To make things easier later, we're also importing numpy and pandas for
# working with sample data.
import numpy as np
import pandas as pd
from scipy.integrate import *
import scipy.optimize
import matplotlib.pyplot as plt
from functools import partial
import os, sys
st.sidebar.markdown("## Parameters used in the simulation")
st.sidebar.markdown("Enter your own custom values to run the model")
je = float(st.sidebar.text_input('Current density j_e [10^10 A/m^2]', 10))
periSampl = 1000 #
class Parameters:
gamma = 2.2128e5
alpha = float(st.sidebar.text_input('Gilbert damping constant', 1))
K1 = float(st.sidebar.text_input('Anisotropy constant K_1 [J/m^3]', 1.5 * 9100))
Js = float(st.sidebar.text_input('Saturation magnetization Js [T]', 0.65))
RAHE = float(st.sidebar.text_input('Anomalous Hall effect coefficient', 0.65))
d = float(st.sidebar.text_input('FM layer thickness [nm]', (0.6+1.2+1.1) * 1e-9))
frequency = float(st.sidebar.text_input('AC frequency [Hz]', 0.1e9))
currentd = je * 1e10
hbar = 1.054571e-34
e = 1.602176634e-19
mu0 = 4 * 3.1415927 * 1e-7
easy_axis = np.array([0,0,1])
p_axis = np.array([0,-1,0])
etadamp = float(st.sidebar.text_input('Damping like torque term coefficient', 0.084))
etafield = float(st.sidebar.text_input('Field like torque term', 0.008)) # etafield/etadamp=eta
eta = etafield/etadamp
hext = np.array([1.0 * K1/Js,0,0])
def lockin(sig, t, f, ph):
ref = np.cos(2 * 2*np.pi*f*t + ph/180.0*np.pi)
#ref = np.sin(2*np.pi*f*t + ph/180.0*np.pi)
comp = np.multiply(sig,ref)
#print(t[-1]) #plot real part fft
return comp.mean()*2
def fft(sig, t, f):
sample_dt = np.mean(np.diff(t))
N = len(t)
yfft = np.fft.rfft(sig)
yfft_abs = np.abs(yfft) #!!!
xfft = np.array(np.fft.rfftfreq(N, d=sample_dt))
stride =max(int(2*f*0.1*sample_dt),2)
idxF = np.argmin(np.abs(xfft-2*f))
tmpmax = 0
tmpj = 0
for j in range(-stride, stride+1):
if yfft_abs[idxF+j] > tmpmax:
tmpmax = yfft_abs[idxF+j]
tmpj = j
idxF = idxF+tmpj
return 2./N*(yfft.real[idxF])
def fields(t,m,p):
#Get the H^{DL} at (t, m, p)
Hk = 2 * p.K1/p.Js
Hd = p.etadamp * p.currentd * p.hbar/(2*p.e*p.Js*p.d)
return (Hk, Hd)
def f(t, m, p):
j = p.currentd * np.cos(2 * 3.1415927 * p.frequency * t)
prefactorpol = j * p.hbar/(2 * p.e * p.Js * p.d)
hani = 2 * p.K1/p.Js * p.easy_axis * np.dot(p.easy_axis,m)
h = p.hext+hani
H = - prefactorpol * (p.etadamp * np.cross(p.p_axis,m) + p.etafield * p.p_axis)
mxh = np.cross( m, h-prefactorpol*( p.etadamp * np.cross(p.p_axis,m) + p.etafield * p.p_axis ) ) #Corrected from Dieter
mxmxh = np.cross( m, mxh)
rhs = - p.gamma/(1+p.alpha**2) * mxh-p.gamma * p.alpha/(1+p.alpha**2) * mxmxh
p.result.append([t,m[0],m[1],m[2],H[0],H[1],H[2]])
return [rhs]
def calc_equilibrium(m0_,t0_,t1_,dt_,paramters_):
t0 = t0_
m0 = m0_
dt = dt_
r = ode(f).set_integrator('vode', method='bdf',atol=1e-14,nsteps =500000)
r.set_initial_value(m0_, t0_).set_f_params(paramters_).set_jac_params(2.0)
t1 = t1_
#Creating a counter and an array to store the magnetization directions
count = 0
magList = [[],[],[],[]]
testSignal = []
while r.successful() and r.t < t1: # and count < (periSampl + 1): #OLD: XXX
#To make sure the steps are equally spaced
#Hayashi et al. (2014), after eqn 45, suggests to divide one period into
# 200 time steps to get accurate temporal variation of Hall voltages
mag=r.integrate(r.t+dt)
magList[0].append(r.t)
magList[1].append(mag[0])
magList[2].append(mag[1])
magList[3].append(mag[2])
#testSignal.append( 23 * np.cos(2 * 2 * np.pi * paramters_.frequency * r.t) )
#Computing the H^{DL} at each time step
Hs = fields(r.t,mag,paramters_)
count += 1
#if count%100 == 0: print(count)
magList = np.array(magList)
#print(magList[0][0], magList[0][-1] )
return(r.t,magList,Hs, testSignal)
def calc_w1andw2(m0_,t0_,t1_,dt_,paramters_):
paramters_.result = []
t1,magList, Hs, testSignal = calc_equilibrium(m0_,t0_,t1_,dt_,paramters_)
npresults = np.array(paramters_.result)
time = np.array( magList[0] )
sinwt = np.sin( 2 * 3.1415927 * paramters_.frequency * time)
cos2wt = np.cos( 2 * 2 * 3.1415927 * paramters_.frequency * time)
current = paramters_.currentd * np.cos(2 * 3.1415927 * paramters_.frequency * time)
# time steps array creation
z=0
dt=[]
dt.append(time[1]-time[0])
for i in time:
if z>0:
dt.append(time[z]-time[z-1])
z=z+1
dt=np.array(dt)
#Computing the voltage from R_{AHE}
voltage = current * magList[3] * paramters_.RAHE * (2e-6 * 6e-9)
voltage = voltage[periSampl:]
current = current[periSampl:]
time = time[periSampl:]
sinwt = sinwt[periSampl:]
cos2wt = cos2wt[periSampl:]
dt = dt[periSampl:]
#nR2w = np.sum(voltage/paramters_.currentd * cos2wt * dt)*(2/time[-1])
R1w = np.sum(voltage * sinwt * dt)*(2 / (time[-1]*(3/4)) )
R2w = np.sum(voltage * cos2wt * dt)*(2 / (time[-1]*(3/4)) )
#R2w = np.sum(testSignal[periSampl:] * cos2wt * dt)*(2 / (time[-1]*(3/4)) )
#R1w = np.dot( voltage * dt,sinwt )/( np.dot(sinwt * dt,sinwt) * paramters_.currentd)
#nR2w = np.dot( voltage * dt,cos2wt )/( np.dot(cos2wt * dt, cos2wt) * paramters_.currentd)
fR2w = fft( voltage, magList[0][periSampl:], paramters_.frequency)
lR2w = lockin( voltage, magList[0][periSampl:], paramters_.frequency, 0)
#nR2w = np.fft.fft(magList[3], 2)/2
nR2w = lockin( voltage/paramters_.currentd, magList[0][periSampl:], paramters_.frequency, 90)
#Checking the magnetization time evolution at each external field value:
#plt.plot(time, magList[1], label = 'mx')
#plt.plot(time, magList[2], label = 'my')
#plt.plot(time, magList[3][periSampl:], label = 'mz tree periods')
#plt.plot(magList[0], magList[3], label = 'mz_full period')
#plt.title("H_x = " + str(paramters_.hext[0]*paramters_.mu0) + "[T]" )
#plt.legend()
#plt.show()
#plt.plot(time, mzlowfield(time, paramters_), label = 'test')
#plt.plot(time, np.full(time.shape, sum(magList[1]) / len(magList[1]) ), label = 'mx')
#plt.plot(time, np.full(time.shape, sum(magList[2]) / len(magList[2]) ), label = 'my')
#plt.plot(time, np.full(time.shape, sum(magList[3]) / len(magList[3]) ), label = 'mz')
#plt.plot(time, testSignal, label = 'cos(X)')
#plt.plot(time, voltage, label = 'cos(X)')
#Checking the current-induced fields time evolution at each external field value:
#plt.plot(time, npresults[:,4], label = 'Hx')
#plt.plot(time, npresults[:,5], label = 'Hy')
#plt.plot(time, npresults[:,6], label = 'Hz')
#plt.legend()
#plt.show()
#Final value of the current-induced field
#H_eff = print(npresults[-1,4],npresults[-1,5],npresults[-1,6])
#return(R1w,R2w,npresults[-1,4],npresults[-1,5],npresults[-1,6],npresults[-1,1],npresults[-1,2],npresults[-1,3], Hs, nR2w, lR2w, fR2w)
return(R1w,R2w,
magList[0], # ZZZ re-write function to save memory (duplicated time array)
npresults[:,4],npresults[:,5],npresults[:,6],
magList[1], magList[2], magList[3],
Hs, nR2w, lR2w, fR2w)
paramters = Parameters()
n = 21
phirange = np.linspace(-np.pi/2, np.pi*3/2, num=n)
signalw = []
signal2w = []
nsignal2w = []
lsignal2w = []
fsignal2w = []
timeEvol = []
Hx,Hy,Hz = [[],[],[]]
Mx,My,Mz = [[],[],[]]
m_eqx, m_eqy, m_eqz = [[],[],[]]
aheList, amrList = [[],[]]
fieldrangeT =[]
phirangeRad=[]
orgdensity = paramters.currentd
longitudinalSweep = True
rotationalSweep = False
if longitudinalSweep:
name = "_HSweep"
fieldrange = np.linspace(-0.1/paramters.mu0, 0.1/paramters.mu0, num = n )
for i in fieldrange:
paramters.currentd = orgdensity
paramters.hext = np.array([i,0,0])
initm=[0,0,1]
initm=np.array(initm)/np.linalg.norm(initm)
R1w,R2w, t,hx,hy,hz, mx,my,mz, Hs, nR2w, lR2w, fR2w = calc_w1andw2(m0_=initm,
t0_=0,
t1_=4/paramters.frequency,
dt_=1/(periSampl * paramters.frequency),
paramters_=paramters)
#Storing each current-induced field and magnetization state for each ext field value
timeEvol.append(t)
Hx.append(hx)
Hy.append(hy)
Hz.append(hz)
Mx.append(mx)
My.append(my)
Mz.append(mz)
m_eqx.append(mx[-1])
m_eqy.append(my[-1])
m_eqz.append(mz[-1])
fieldrangeT.append(i * paramters.mu0)
signalw.append(R1w)
signal2w.append(R2w)
nsignal2w.append(nR2w)
lsignal2w.append(lR2w)
fsignal2w.append(fR2w)
phirangeRad.append(0)
#AHE & AMR
paramters.currentd = -paramters.currentd
it1,imagList, iHs, itestSignal = calc_equilibrium(m0_=initm,t0_=0,t1_=4/paramters.frequency,dt_=1/(periSampl * paramters.frequency), paramters_=paramters)
aheList.append(mz[-1]-imagList[3][-1])
amrList.append(mx[-1]*mx[-1])
#Live prompt
#print(i, R1w, R2w, '\tHk,Hd', round(Hs[0]), round(Hs[1]), mx[-1], my[-1], mz[-1])
if rotationalSweep:
name = "_HconsRotat"
fieldrange = np.linspace(0, 0.8/paramters.mu0, num= int((n-1)/10) )
for h in fieldrange:
ipMagnitude = 0.05/paramters.mu0 # 0.05/paramters.mu0 # in Tesla
for i in phirange:
paramters.currentd = orgdensity
paramters.hext = np.array([ np.cos(i) * ipMagnitude , np.sin(i) * ipMagnitude , h])
initm=[0,0,-1]
initm=np.array(initm)/np.linalg.norm(initm)
R1w,R2w,hx,hy,hz,mx,my,mz, Hs, nR2w = calc_w1andw2(m0_=initm,t0_=0,t1_=1/paramters.frequency,dt_=1/(periSampl * paramters.frequency), paramters_=paramters)
#Storing each current-induced field and magnetization state for each ext field value
Hx.append(hx)
Hy.append(hy)
Hz.append(hz)
Mx.append(mx)
My.append(my)
Mz.append(mz)
phirangeRad.append(i*180/np.pi)
fieldrangeT.append(h)
signalw.append(R1w)
signal2w.append(R2w)
nsignal2w.append(nR2w)
#Live prompt
print( h, R1w, R2w, 'Pi:'+str(i%(2*np.pi)), '\tHk,Hd', round(Hs[0]), round(Hs[1]), mx, my, mz)
def savedata(p, sig, fieldrangeT, name):
#Storing the data into a dat file with the following strcture:
#Delta denotes current-induced fields
# ` denotes equilibium
# Current | H_ext | R2w | \Delta H_x | \Delta H_y | \Delta H_z | 7mz` | my` | mz` | Rw | 11 phi rad
with open( "v2o_" + str(name) + "_j" + str(p.currentd/1e10) + "e10.dat", "w") as f:
i = 0
for sig in signal2w:
f.write( str(p.currentd) + "\t" + str(fieldrangeT[i]) + "\t" + str(sig) + "\t"
+ str(Hx[i]) + "\t" + str(Hy[i]) + "\t" + str(Hz[i]) +'\t'
+ str(Mx[i]) + "\t" + str(My[i]) + "\t" + str(Mz[i]) + '\t' + str(signalw[i]) + "\t" + str(phirangeRad[i])
+ "\n")
i += 1
f.write("Hk\tHdamp\teta(f/d)\t t\t freq\n")
f.write( str(Hs[0]) + '\t' + str(Hs[1]) + "\t" + str(p.etafield/p.etadamp) + "\t" + str(p.d)
+ '\t' + str(p.frequency) + '\n')
f.close()
def graph(x, y, xlab, ylab, pltlabel, plthead):
fig, ax = plt.subplots()
plt.plot(x, y, label = pltlabel)
ax.set(xlabel = xlab, ylabel = ylab)
plt.title(plthead)
plt.legend()
return fig
def graphm(t, mx, my, mz, xlab, ylab, plthead):
fig, ax = plt.subplots()
plt.plot(t, mx, label = r'$x$')
plt.plot(t, my, label = r'$y$')
plt.plot(t, mz, label = r'$z$')
ax.set(xlabel = xlab, ylabel = ylab)
plt.title(plthead)
plt.legend()
return fig
st.title('Magnetization dynamics for FM/HM interfaces, a single-spin model')
st.header('Online LLG integrator')
st.caption("<NAME>, <NAME>, <NAME>, <NAME>")
st.caption("Physics of Functional Materials")
st.caption("University of Vienna")
st.write('The following page describes the details to consider to efficiently simulate a FM/HM interface. This model is based on the Landau-Lifshitz-Gilbert equation, and the equation is integrated using _scipy_ python libraries. Hence, the magnetization dynamics is computed with this model, which also contains routines to calculate the first and second harmonics of the Anomalous Hall Voltage (from AH Effect). This interactve tool is designed to allow quick computations and detailed understanding of the considerations made to simulate such FM/HM interfaces. ')
st.write('The parameters used in the computation for the live plot results can be freely manipulated using the left sidebar (_available clicking in the arrowhead on the top left of this web app_). Feel free to perform computations with the desired values. ')
st.subheader('Theoretical description')
st.write('The system described by the model is a typical FM/HM interface. In our specific case, a Hall cross with a thin ferromagnetic layer displaying an out of plane magnetization (fig. 1). ')
st.image("https://journals.aps.org/prb/article/10.1103/PhysRevB.89.144425/figures/1/medium",
caption = "*Fig. 1* Hall bar structure. Adapted from Phys. Rev. B 89, 144425 (2014)",
width = 400 )
#($\eta_\text{DL}$ and $\eta_\text{FL}$)
st.write(r'The LLG equation employed in the model is in explicit form and takes the Slonczewsky spin-orbit-torque coefficients as input. It goes as follows:')
st.latex(r''' \frac{\partial \vec{m}}{\partial t} = -
\frac{\gamma}{1+\alpha^2} (\vec{m} \times \vec{H}_{\text{eff}}) -
\frac{\gamma \alpha}{1+\alpha^2} \:\vec{m} \times (\vec{m} \times \vec{H}_{\text{eff}})''')
st.write(r'Where $m$ represents the mgnetization unit vector, $\alpha$ the Gilbert damping constant, $\gamma$ the gyromagnetic ratio, and $\vec{H}_{\text{eff}}$ is the effective magnetic field. The effective magnetic field contains contributions of the applied external field, the effective anisotropy field, and the current induced fields via spin orbit torque effects. It reads as follows:')
st.latex(r''' \vec{ H }_{\text{eff}} =
\vec{ H }_{\text{ext}} + \vec{ H }_{\text{k}} +
\vec{ H }^{\text{SOT}}_{\text{FL}} +
\vec{ H }^{\text{SOT}}_{\text{DL}} \\ \:\\ \:\\
\vec{ H }_{\text{k}} = \frac{2\vec{K}_1}{Js} \\ \:\\
\vec{ H }^{\text{SOT}}_{\text{FL}} = \eta_\text{FL} \frac{ j_e \hbar }{ 2 e t \mu_0 M_s }\:\vec{m} \times (\vec{m} \times \vec{p}) \\ \:\\
\vec{ H }^{\text{SOT}}_{\text{DL}} = \eta_\text{DL} \frac{ j_e \hbar }{ 2 e t \mu_0 M_s }\:(\vec{m} \times \vec{p})
''')
st.write(r"The $\vec{p}$ vector represents the spin polarization of electrons. For a current flowing along the x direction, the vector is $(0,-1,0)$. As the here simulated system presents out of plane magnetization along the +z axis, the $\vec{K}_1$ anisotropy constant is represented by $(0,0,K_1)$")
st.write("Therefore, this simplified model just describes out-of-plane systems with negligible Planar Hall Effect, compared to the Anomalous Hall Effect. It will get improved soon.")
st.caption("Performing the integration")
st.write("In order to accurately compute the first and second harmonic components of the Anomalous Hall Voltage, the period is, at least, split in 1000 equidistand time steps. This will ensure an accurate description of the time variation of the voltage induced by the AC current. Additionaly, it will improve the computation of the numerical Fourier integrals for getting the harmonic responses.")
st.write("Under AC, the voltage is made up by the following harmonics:")
st.latex(r''' V_{xy}(t) = V^{xy}_0 + V^{xy}_\omega\sin(\omega t) + V^{xy}_{2\omega}\cos(2\omega t) + ...''')
st.write("Those harmonic components can be isolated by applying the Fourier series coefficient integral definition, integrating over one full period.")
st.latex(r'''
V^{xy}_{\omega}=\frac{2}{T}\int_{T} V(t)\sin(\omega t)\text{dt} \\ \: \\
V^{xy}_{2\omega}=\frac{2}{T}\int_{T} V(t)\cos(2\omega t)\text{dt}
''')
st.write(r"As the system starts fully pointing in the z direction, it is important to simulate the electric current with a cosine wave $J_x=j_e \cos(\omega t)$. ")
if st.checkbox("Show relaxation of magnetization", True):
selected_field = st.select_slider('Slide the bar to check the trajectories for an specific field value [A/m]',
options = fieldrange.tolist())
st.write("Field value equivalent to", str( round(selected_field*paramters.mu0, 3) ), "[T]")
s_index = fieldrange.tolist().index(selected_field)
figtraj = graphm(timeEvol[s_index], Mx[s_index], My[s_index], Mz[s_index],
"time [ns]", r'$m_i$',
"Evolution at " + str( round(selected_field*paramters.mu0, 3) ) + "[T]")
st.pyplot(figtraj)
st.write(r"As can be noted in the magnetization dynamics for a given external field value, the system quickly gets its magnetization direction according to the applied AC current. However, if we just employ a single period for the time integration, the result of the Fourier integral may differ from the actual coefficient, as the first time steps do not have a pure wave behavior.")
st.caption("Computing the harmonics")
st.write(r"Therefore, in order to accurately compute the integral, each time integration of the LLG equation, for each $H_{\text{ext,x}}$ value, is performed over 4 complete periods $t_f=4/f$. Then, for computing the Fourier integral, the initial period of the time integration of the LLG equation is ommited from the computation. Furthermore, to improve the accuracy of the calculated harmonic component of the voltage, the remaining three periods are integrated and the normalization factor of the Fourier integral is adjusted accordingly. Finally, the integral is numerically approximated by the following sum:")
st.latex(r'''
V^{xy}_{ \omega} \approx \frac{2}{t_f(3/4)} \sum^{4000}_{i=1000} ({J_x}_i {m_z}_i R_{ \text{AHE} }) \sin(\omega t_i) (\Delta t)_i \\ \: \\
V^{xy}_{2\omega} \approx \frac{2}{t_f(3/4)} \sum^{4000}_{i=1000} ({J_x}_i {m_z}_i R_{ \text{AHE} }) \cos(2\omega t_i) (\Delta t)_i
''')
st.write(r'Where $i$ represents an index of the elements of the lists containing the values of each step of the simulation (_Note that one period has been split into 1000 equidistant steps_). Inside the simulation the voltage is computed as $V^{xy}(t)=J_x(t) m_z(t) R_{AHE} \sigma$, where $\sigma$ is the cross section area of the conducting element. In our case $\sigma=(2 \mu m \times 6 \text{nm})$ ')
st.write("Lastly, the resulting transfer curves using the Fourier series integral definition are: ")
figv2w = graph(fieldrangeT, signal2w, r'$\mu_0 H_x$ (T)', r'$V_{2w} [V]$ ', "V2w", "Second harmonic voltage" )
figv1w = graph(fieldrangeT, signalw, r'$\mu_0 H_x$ (T)', r'$V_{w} [V]$ ', "Vw", "First harmonic voltage" )
figamr = graph(fieldrangeT, amrList, r'$\mu_0 H_x$ (T)', r'$m_x^2$', r'$m_x^2$','AMR effect')
figahe = graph(fieldrangeT, aheList, r'$\mu_0 H_x$ (T)', r'$m_{z,+j_e}-m_{z,-j_e}$', r'$m_{z,+j_e}-m_{z,ij_e}$','AHE effect')
figmag = graphm(fieldrangeT, m_eqx, m_eqy, m_eqz, r'$\mu_0 H_x$ (T)', r'$m_i$', "Equilibrium direction of m") #index denotes field sweep step
##plt.plot(fieldrangeT, lsignal2w, label = 'lock in r2w')
##plt.plot(fieldrangeT, fsignal2w, label = 'fft r2w')
##plt.plot(fieldrangeT, H,'r')
##ax.set(xlabel=r'$\phi$ [grad]',ylabel = r'$m_{i}$ ')
st.pyplot(figv1w)
st.pyplot(figv2w)
st.write('If we just take in consideration the magnetization components to describe the AMR and AHE effects, the transfer curves are:')
st.pyplot(figahe)
st.pyplot(figamr)
st.write("It is important to highligh that by inducing an AC there is no an exact static point for equilibrium magnetization. However, when the system reaches equilibrium with respect to the AC current, the magnetization direction of the last time step of each period may be regarded as equilibrium magnetization (check ref. [X] Phys. Rev. B 89, 144425 (2014))")
st.pyplot(figmag)
#Pending code sections
#if st.checkbox("Show fields evolution", False):
# figfields = graphm(timeEvol[s_index], Hx[s_index], Hy[s_index], Hz[s_index],
# "time [ns]", r'$m_i$',
# "Current induced fields at H_ext:" + str( round(selected_field*paramters.mu0, 3) ) + "[T]")
#
# st.pyplot(figfields)
| [
"streamlit.caption",
"streamlit.image",
"streamlit.sidebar.text_input",
"numpy.array",
"streamlit.latex",
"numpy.linalg.norm",
"numpy.sin",
"streamlit.header",
"streamlit.title",
"numpy.multiply",
"numpy.cross",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.fft.rfft",
"streamlit.sidebar... | [((293, 352), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""## Parameters used in the simulation"""'], {}), "('## Parameters used in the simulation')\n", (312, 352), True, 'import streamlit as st\n'), ((353, 421), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""Enter your own custom values to run the model"""'], {}), "('Enter your own custom values to run the model')\n", (372, 421), True, 'import streamlit as st\n'), ((7948, 7993), 'numpy.linspace', 'np.linspace', (['(-np.pi / 2)', '(np.pi * 3 / 2)'], {'num': 'n'}), '(-np.pi / 2, np.pi * 3 / 2, num=n)\n', (7959, 7993), True, 'import numpy as np\n'), ((12815, 12891), 'streamlit.title', 'st.title', (['"""Magnetization dynamics for FM/HM interfaces, a single-spin model"""'], {}), "('Magnetization dynamics for FM/HM interfaces, a single-spin model')\n", (12823, 12891), True, 'import streamlit as st\n'), ((12892, 12926), 'streamlit.header', 'st.header', (['"""Online LLG integrator"""'], {}), "('Online LLG integrator')\n", (12901, 12926), True, 'import streamlit as st\n'), ((12927, 12971), 'streamlit.caption', 'st.caption', (['"""<NAME>, <NAME>, <NAME>, <NAME>"""'], {}), "('<NAME>, <NAME>, <NAME>, <NAME>')\n", (12937, 12971), True, 'import streamlit as st\n'), ((12972, 13017), 'streamlit.caption', 'st.caption', (['"""Physics of Functional Materials"""'], {}), "('Physics of Functional Materials')\n", (12982, 13017), True, 'import streamlit as st\n'), ((13018, 13052), 'streamlit.caption', 'st.caption', (['"""University of Vienna"""'], {}), "('University of Vienna')\n", (13028, 13052), True, 'import streamlit as st\n'), ((13054, 13631), 'streamlit.write', 'st.write', (['"""The following page describes the details to consider to efficiently simulate a FM/HM interface. This model is based on the Landau-Lifshitz-Gilbert equation, and the equation is integrated using _scipy_ python libraries. Hence, the magnetization dynamics is computed with this model, which also contains routines to calculate the first and second harmonics of the Anomalous Hall Voltage (from AH Effect). This interactve tool is designed to allow quick computations and detailed understanding of the considerations made to simulate such FM/HM interfaces. """'], {}), "(\n 'The following page describes the details to consider to efficiently simulate a FM/HM interface. This model is based on the Landau-Lifshitz-Gilbert equation, and the equation is integrated using _scipy_ python libraries. Hence, the magnetization dynamics is computed with this model, which also contains routines to calculate the first and second harmonics of the Anomalous Hall Voltage (from AH Effect). This interactve tool is designed to allow quick computations and detailed understanding of the considerations made to simulate such FM/HM interfaces. '\n )\n", (13062, 13631), True, 'import streamlit as st\n'), ((13622, 13890), 'streamlit.write', 'st.write', (['"""The parameters used in the computation for the live plot results can be freely manipulated using the left sidebar (_available clicking in the arrowhead on the top left of this web app_). Feel free to perform computations with the desired values. """'], {}), "(\n 'The parameters used in the computation for the live plot results can be freely manipulated using the left sidebar (_available clicking in the arrowhead on the top left of this web app_). Feel free to perform computations with the desired values. '\n )\n", (13630, 13890), True, 'import streamlit as st\n'), ((13882, 13921), 'streamlit.subheader', 'st.subheader', (['"""Theoretical description"""'], {}), "('Theoretical description')\n", (13894, 13921), True, 'import streamlit as st\n'), ((13923, 14128), 'streamlit.write', 'st.write', (['"""The system described by the model is a typical FM/HM interface. In our specific case, a Hall cross with a thin ferromagnetic layer displaying an out of plane magnetization (fig. 1). """'], {}), "(\n 'The system described by the model is a typical FM/HM interface. In our specific case, a Hall cross with a thin ferromagnetic layer displaying an out of plane magnetization (fig. 1). '\n )\n", (13931, 14128), True, 'import streamlit as st\n'), ((14119, 14325), 'streamlit.image', 'st.image', (['"""https://journals.aps.org/prb/article/10.1103/PhysRevB.89.144425/figures/1/medium"""'], {'caption': '"""*Fig. 1* Hall bar structure. Adapted from Phys. Rev. B 89, 144425 (2014)"""', 'width': '(400)'}), "(\n 'https://journals.aps.org/prb/article/10.1103/PhysRevB.89.144425/figures/1/medium'\n , caption=\n '*Fig. 1* Hall bar structure. Adapted from Phys. Rev. B 89, 144425 (2014)',\n width=400)\n", (14127, 14325), True, 'import streamlit as st\n'), ((14371, 14538), 'streamlit.write', 'st.write', (['"""The LLG equation employed in the model is in explicit form and takes the Slonczewsky spin-orbit-torque coefficients as input. It goes as follows:"""'], {}), "(\n 'The LLG equation employed in the model is in explicit form and takes the Slonczewsky spin-orbit-torque coefficients as input. It goes as follows:'\n )\n", (14379, 14538), True, 'import streamlit as st\n'), ((14530, 14779), 'streamlit.latex', 'st.latex', (['""" \\\\frac{\\\\partial \\\\vec{m}}{\\\\partial t} = -\n \\\\frac{\\\\gamma}{1+\\\\alpha^2} (\\\\vec{m} \\\\times \\\\vec{H}_{\\\\text{eff}}) - \n \\\\frac{\\\\gamma \\\\alpha}{1+\\\\alpha^2} \\\\:\\\\vec{m} \\\\times (\\\\vec{m} \\\\times \\\\vec{H}_{\\\\text{eff}})"""'], {}), '(\n """ \\\\frac{\\\\partial \\\\vec{m}}{\\\\partial t} = -\n \\\\frac{\\\\gamma}{1+\\\\alpha^2} (\\\\vec{m} \\\\times \\\\vec{H}_{\\\\text{eff}}) - \n \\\\frac{\\\\gamma \\\\alpha}{1+\\\\alpha^2} \\\\:\\\\vec{m} \\\\times (\\\\vec{m} \\\\times \\\\vec{H}_{\\\\text{eff}})"""\n )\n', (14538, 14779), True, 'import streamlit as st\n'), ((14749, 15155), 'streamlit.write', 'st.write', (['"""Where $m$ represents the mgnetization unit vector, $\\\\alpha$ the Gilbert damping constant, $\\\\gamma$ the gyromagnetic ratio, and $\\\\vec{H}_{\\\\text{eff}}$ is the effective magnetic field. The effective magnetic field contains contributions of the applied external field, the effective anisotropy field, and the current induced fields via spin orbit torque effects. It reads as follows:"""'], {}), "(\n 'Where $m$ represents the mgnetization unit vector, $\\\\alpha$ the Gilbert damping constant, $\\\\gamma$ the gyromagnetic ratio, and $\\\\vec{H}_{\\\\text{eff}}$ is the effective magnetic field. The effective magnetic field contains contributions of the applied external field, the effective anisotropy field, and the current induced fields via spin orbit torque effects. It reads as follows:'\n )\n", (14757, 15155), True, 'import streamlit as st\n'), ((15143, 15703), 'streamlit.latex', 'st.latex', (['""" \\\\vec{ H }_{\\\\text{eff}} =\n\\\\vec{ H }_{\\\\text{ext}} + \\\\vec{ H }_{\\\\text{k}} + \n\\\\vec{ H }^{\\\\text{SOT}}_{\\\\text{FL}} + \n\\\\vec{ H }^{\\\\text{SOT}}_{\\\\text{DL}} \\\\\\\\ \\\\:\\\\\\\\ \\\\:\\\\\\\\\n\\\\vec{ H }_{\\\\text{k}} = \\\\frac{2\\\\vec{K}_1}{Js} \\\\\\\\ \\\\:\\\\\\\\\n\\\\vec{ H }^{\\\\text{SOT}}_{\\\\text{FL}} = \\\\eta_\\\\text{FL} \\\\frac{ j_e \\\\hbar }{ 2 e t \\\\mu_0 M_s }\\\\:\\\\vec{m} \\\\times (\\\\vec{m} \\\\times \\\\vec{p}) \\\\\\\\ \\\\:\\\\\\\\\n\\\\vec{ H }^{\\\\text{SOT}}_{\\\\text{DL}} = \\\\eta_\\\\text{DL} \\\\frac{ j_e \\\\hbar }{ 2 e t \\\\mu_0 M_s }\\\\:(\\\\vec{m} \\\\times \\\\vec{p})\n"""'], {}), '(\n """ \\\\vec{ H }_{\\\\text{eff}} =\n\\\\vec{ H }_{\\\\text{ext}} + \\\\vec{ H }_{\\\\text{k}} + \n\\\\vec{ H }^{\\\\text{SOT}}_{\\\\text{FL}} + \n\\\\vec{ H }^{\\\\text{SOT}}_{\\\\text{DL}} \\\\\\\\ \\\\:\\\\\\\\ \\\\:\\\\\\\\\n\\\\vec{ H }_{\\\\text{k}} = \\\\frac{2\\\\vec{K}_1}{Js} \\\\\\\\ \\\\:\\\\\\\\\n\\\\vec{ H }^{\\\\text{SOT}}_{\\\\text{FL}} = \\\\eta_\\\\text{FL} \\\\frac{ j_e \\\\hbar }{ 2 e t \\\\mu_0 M_s }\\\\:\\\\vec{m} \\\\times (\\\\vec{m} \\\\times \\\\vec{p}) \\\\\\\\ \\\\:\\\\\\\\\n\\\\vec{ H }^{\\\\text{SOT}}_{\\\\text{DL}} = \\\\eta_\\\\text{DL} \\\\frac{ j_e \\\\hbar }{ 2 e t \\\\mu_0 M_s }\\\\:(\\\\vec{m} \\\\times \\\\vec{p})\n"""\n )\n', (15151, 15703), True, 'import streamlit as st\n'), ((15637, 15949), 'streamlit.write', 'st.write', (['"""The $\\\\vec{p}$ vector represents the spin polarization of electrons. For a current flowing along the x direction, the vector is $(0,-1,0)$. As the here simulated system presents out of plane magnetization along the +z axis, the $\\\\vec{K}_1$ anisotropy constant is represented by $(0,0,K_1)$"""'], {}), "(\n 'The $\\\\vec{p}$ vector represents the spin polarization of electrons. For a current flowing along the x direction, the vector is $(0,-1,0)$. As the here simulated system presents out of plane magnetization along the +z axis, the $\\\\vec{K}_1$ anisotropy constant is represented by $(0,0,K_1)$'\n )\n", (15645, 15949), True, 'import streamlit as st\n'), ((15939, 16131), 'streamlit.write', 'st.write', (['"""Therefore, this simplified model just describes out-of-plane systems with negligible Planar Hall Effect, compared to the Anomalous Hall Effect. It will get improved soon."""'], {}), "(\n 'Therefore, this simplified model just describes out-of-plane systems with negligible Planar Hall Effect, compared to the Anomalous Hall Effect. It will get improved soon.'\n )\n", (15947, 16131), True, 'import streamlit as st\n'), ((16123, 16163), 'streamlit.caption', 'st.caption', (['"""Performing the integration"""'], {}), "('Performing the integration')\n", (16133, 16163), True, 'import streamlit as st\n'), ((16165, 16573), 'streamlit.write', 'st.write', (['"""In order to accurately compute the first and second harmonic components of the Anomalous Hall Voltage, the period is, at least, split in 1000 equidistand time steps. This will ensure an accurate description of the time variation of the voltage induced by the AC current. Additionaly, it will improve the computation of the numerical Fourier integrals for getting the harmonic responses."""'], {}), "(\n 'In order to accurately compute the first and second harmonic components of the Anomalous Hall Voltage, the period is, at least, split in 1000 equidistand time steps. This will ensure an accurate description of the time variation of the voltage induced by the AC current. Additionaly, it will improve the computation of the numerical Fourier integrals for getting the harmonic responses.'\n )\n", (16173, 16573), True, 'import streamlit as st\n'), ((16564, 16636), 'streamlit.write', 'st.write', (['"""Under AC, the voltage is made up by the following harmonics:"""'], {}), "('Under AC, the voltage is made up by the following harmonics:')\n", (16572, 16636), True, 'import streamlit as st\n'), ((16637, 16756), 'streamlit.latex', 'st.latex', (['""" V_{xy}(t) = V^{xy}_0 + V^{xy}_\\\\omega\\\\sin(\\\\omega t) + V^{xy}_{2\\\\omega}\\\\cos(2\\\\omega t) + ..."""'], {}), "(\n ' V_{xy}(t) = V^{xy}_0 + V^{xy}_\\\\omega\\\\sin(\\\\omega t) + V^{xy}_{2\\\\omega}\\\\cos(2\\\\omega t) + ...'\n )\n", (16645, 16756), True, 'import streamlit as st\n'), ((16746, 16907), 'streamlit.write', 'st.write', (['"""Those harmonic components can be isolated by applying the Fourier series coefficient integral definition, integrating over one full period."""'], {}), "(\n 'Those harmonic components can be isolated by applying the Fourier series coefficient integral definition, integrating over one full period.'\n )\n", (16754, 16907), True, 'import streamlit as st\n'), ((16898, 17092), 'streamlit.latex', 'st.latex', (['""" \n V^{xy}_{\\\\omega}=\\\\frac{2}{T}\\\\int_{T} V(t)\\\\sin(\\\\omega t)\\\\text{dt} \\\\\\\\ \\\\: \\\\\\\\\n V^{xy}_{2\\\\omega}=\\\\frac{2}{T}\\\\int_{T} V(t)\\\\cos(2\\\\omega t)\\\\text{dt} \n """'], {}), '(\n """ \n V^{xy}_{\\\\omega}=\\\\frac{2}{T}\\\\int_{T} V(t)\\\\sin(\\\\omega t)\\\\text{dt} \\\\\\\\ \\\\: \\\\\\\\\n V^{xy}_{2\\\\omega}=\\\\frac{2}{T}\\\\int_{T} V(t)\\\\cos(2\\\\omega t)\\\\text{dt} \n """\n )\n', (16906, 17092), True, 'import streamlit as st\n'), ((17067, 17241), 'streamlit.write', 'st.write', (['"""As the system starts fully pointing in the z direction, it is important to simulate the electric current with a cosine wave $J_x=j_e \\\\cos(\\\\omega t)$. """'], {}), "(\n 'As the system starts fully pointing in the z direction, it is important to simulate the electric current with a cosine wave $J_x=j_e \\\\cos(\\\\omega t)$. '\n )\n", (17075, 17241), True, 'import streamlit as st\n'), ((17235, 17288), 'streamlit.checkbox', 'st.checkbox', (['"""Show relaxation of magnetization"""', '(True)'], {}), "('Show relaxation of magnetization', True)\n", (17246, 17288), True, 'import streamlit as st\n'), ((17856, 18248), 'streamlit.write', 'st.write', (['"""As can be noted in the magnetization dynamics for a given external field value, the system quickly gets its magnetization direction according to the applied AC current. However, if we just employ a single period for the time integration, the result of the Fourier integral may differ from the actual coefficient, as the first time steps do not have a pure wave behavior."""'], {}), "(\n 'As can be noted in the magnetization dynamics for a given external field value, the system quickly gets its magnetization direction according to the applied AC current. However, if we just employ a single period for the time integration, the result of the Fourier integral may differ from the actual coefficient, as the first time steps do not have a pure wave behavior.'\n )\n", (17864, 18248), True, 'import streamlit as st\n'), ((18242, 18279), 'streamlit.caption', 'st.caption', (['"""Computing the harmonics"""'], {}), "('Computing the harmonics')\n", (18252, 18279), True, 'import streamlit as st\n'), ((18281, 18906), 'streamlit.write', 'st.write', (['"""Therefore, in order to accurately compute the integral, each time integration of the LLG equation, for each $H_{\\\\text{ext,x}}$ value, is performed over 4 complete periods $t_f=4/f$. Then, for computing the Fourier integral, the initial period of the time integration of the LLG equation is ommited from the computation. Furthermore, to improve the accuracy of the calculated harmonic component of the voltage, the remaining three periods are integrated and the normalization factor of the Fourier integral is adjusted accordingly. Finally, the integral is numerically approximated by the following sum:"""'], {}), "(\n 'Therefore, in order to accurately compute the integral, each time integration of the LLG equation, for each $H_{\\\\text{ext,x}}$ value, is performed over 4 complete periods $t_f=4/f$. Then, for computing the Fourier integral, the initial period of the time integration of the LLG equation is ommited from the computation. Furthermore, to improve the accuracy of the calculated harmonic component of the voltage, the remaining three periods are integrated and the normalization factor of the Fourier integral is adjusted accordingly. Finally, the integral is numerically approximated by the following sum:'\n )\n", (18289, 18906), True, 'import streamlit as st\n'), ((18897, 19216), 'streamlit.latex', 'st.latex', (['""" \nV^{xy}_{ \\\\omega} \\\\approx \\\\frac{2}{t_f(3/4)} \\\\sum^{4000}_{i=1000} ({J_x}_i {m_z}_i R_{ \\\\text{AHE} }) \\\\sin(\\\\omega t_i) (\\\\Delta t)_i \\\\\\\\ \\\\: \\\\\\\\\nV^{xy}_{2\\\\omega} \\\\approx \\\\frac{2}{t_f(3/4)} \\\\sum^{4000}_{i=1000} ({J_x}_i {m_z}_i R_{ \\\\text{AHE} }) \\\\cos(2\\\\omega t_i) (\\\\Delta t)_i\n"""'], {}), '(\n """ \nV^{xy}_{ \\\\omega} \\\\approx \\\\frac{2}{t_f(3/4)} \\\\sum^{4000}_{i=1000} ({J_x}_i {m_z}_i R_{ \\\\text{AHE} }) \\\\sin(\\\\omega t_i) (\\\\Delta t)_i \\\\\\\\ \\\\: \\\\\\\\\nV^{xy}_{2\\\\omega} \\\\approx \\\\frac{2}{t_f(3/4)} \\\\sum^{4000}_{i=1000} ({J_x}_i {m_z}_i R_{ \\\\text{AHE} }) \\\\cos(2\\\\omega t_i) (\\\\Delta t)_i\n"""\n )\n', (18905, 19216), True, 'import streamlit as st\n'), ((19187, 19605), 'streamlit.write', 'st.write', (['"""Where $i$ represents an index of the elements of the lists containing the values of each step of the simulation (_Note that one period has been split into 1000 equidistant steps_). Inside the simulation the voltage is computed as $V^{xy}(t)=J_x(t) m_z(t) R_{AHE} \\\\sigma$, where $\\\\sigma$ is the cross section area of the conducting element. In our case $\\\\sigma=(2 \\\\mu m \\\\times 6 \\\\text{nm})$ """'], {}), "(\n 'Where $i$ represents an index of the elements of the lists containing the values of each step of the simulation (_Note that one period has been split into 1000 equidistant steps_). Inside the simulation the voltage is computed as $V^{xy}(t)=J_x(t) m_z(t) R_{AHE} \\\\sigma$, where $\\\\sigma$ is the cross section area of the conducting element. In our case $\\\\sigma=(2 \\\\mu m \\\\times 6 \\\\text{nm})$ '\n )\n", (19195, 19605), True, 'import streamlit as st\n'), ((19592, 19702), 'streamlit.write', 'st.write', (['"""Lastly, the resulting transfer curves using the Fourier series integral definition are: """'], {}), "(\n 'Lastly, the resulting transfer curves using the Fourier series integral definition are: '\n )\n", (19600, 19702), True, 'import streamlit as st\n'), ((20477, 20494), 'streamlit.pyplot', 'st.pyplot', (['figv1w'], {}), '(figv1w)\n', (20486, 20494), True, 'import streamlit as st\n'), ((20495, 20512), 'streamlit.pyplot', 'st.pyplot', (['figv2w'], {}), '(figv2w)\n', (20504, 20512), True, 'import streamlit as st\n'), ((20515, 20660), 'streamlit.write', 'st.write', (['"""If we just take in consideration the magnetization components to describe the AMR and AHE effects, the transfer curves are:"""'], {}), "(\n 'If we just take in consideration the magnetization components to describe the AMR and AHE effects, the transfer curves are:'\n )\n", (20523, 20660), True, 'import streamlit as st\n'), ((20652, 20669), 'streamlit.pyplot', 'st.pyplot', (['figahe'], {}), '(figahe)\n', (20661, 20669), True, 'import streamlit as st\n'), ((20670, 20687), 'streamlit.pyplot', 'st.pyplot', (['figamr'], {}), '(figamr)\n', (20679, 20687), True, 'import streamlit as st\n'), ((20689, 21061), 'streamlit.write', 'st.write', (['"""It is important to highligh that by inducing an AC there is no an exact static point for equilibrium magnetization. However, when the system reaches equilibrium with respect to the AC current, the magnetization direction of the last time step of each period may be regarded as equilibrium magnetization (check ref. [X] Phys. Rev. B 89, 144425 (2014))"""'], {}), "(\n 'It is important to highligh that by inducing an AC there is no an exact static point for equilibrium magnetization. However, when the system reaches equilibrium with respect to the AC current, the magnetization direction of the last time step of each period may be regarded as equilibrium magnetization (check ref. [X] Phys. Rev. B 89, 144425 (2014))'\n )\n", (20697, 21061), True, 'import streamlit as st\n'), ((21053, 21070), 'streamlit.pyplot', 'st.pyplot', (['figmag'], {}), '(figmag)\n', (21062, 21070), True, 'import streamlit as st\n'), ((434, 496), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Current density j_e [10^10 A/m^2]"""', '(10)'], {}), "('Current density j_e [10^10 A/m^2]', 10)\n", (455, 496), True, 'import streamlit as st\n'), ((1207, 1226), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1215, 1226), True, 'import numpy as np\n'), ((1238, 1258), 'numpy.array', 'np.array', (['[0, -1, 0]'], {}), '([0, -1, 0])\n', (1246, 1258), True, 'import numpy as np\n'), ((1515, 1546), 'numpy.array', 'np.array', (['[1.0 * K1 / Js, 0, 0]'], {}), '([1.0 * K1 / Js, 0, 0])\n', (1523, 1546), True, 'import numpy as np\n'), ((1585, 1635), 'numpy.cos', 'np.cos', (['(2 * 2 * np.pi * f * t + ph / 180.0 * np.pi)'], {}), '(2 * 2 * np.pi * f * t + ph / 180.0 * np.pi)\n', (1591, 1635), True, 'import numpy as np\n'), ((1685, 1706), 'numpy.multiply', 'np.multiply', (['sig', 'ref'], {}), '(sig, ref)\n', (1696, 1706), True, 'import numpy as np\n'), ((1853, 1869), 'numpy.fft.rfft', 'np.fft.rfft', (['sig'], {}), '(sig)\n', (1864, 1869), True, 'import numpy as np\n'), ((1885, 1897), 'numpy.abs', 'np.abs', (['yfft'], {}), '(yfft)\n', (1891, 1897), True, 'import numpy as np\n'), ((2921, 2937), 'numpy.cross', 'np.cross', (['m', 'mxh'], {}), '(m, mxh)\n', (2929, 2937), True, 'import numpy as np\n'), ((4218, 4235), 'numpy.array', 'np.array', (['magList'], {}), '(magList)\n', (4226, 4235), True, 'import numpy as np\n'), ((4500, 4527), 'numpy.array', 'np.array', (['paramters_.result'], {}), '(paramters_.result)\n', (4508, 4527), True, 'import numpy as np\n'), ((4552, 4572), 'numpy.array', 'np.array', (['magList[0]'], {}), '(magList[0])\n', (4560, 4572), True, 'import numpy as np\n'), ((4599, 4650), 'numpy.sin', 'np.sin', (['(2 * 3.1415927 * paramters_.frequency * time)'], {}), '(2 * 3.1415927 * paramters_.frequency * time)\n', (4605, 4650), True, 'import numpy as np\n'), ((4680, 4735), 'numpy.cos', 'np.cos', (['(2 * 2 * 3.1415927 * paramters_.frequency * time)'], {}), '(2 * 2 * 3.1415927 * paramters_.frequency * time)\n', (4686, 4735), True, 'import numpy as np\n'), ((5014, 5026), 'numpy.array', 'np.array', (['dt'], {}), '(dt)\n', (5022, 5026), True, 'import numpy as np\n'), ((8372, 8433), 'numpy.linspace', 'np.linspace', (['(-0.1 / paramters.mu0)', '(0.1 / paramters.mu0)'], {'num': 'n'}), '(-0.1 / paramters.mu0, 0.1 / paramters.mu0, num=n)\n', (8383, 8433), True, 'import numpy as np\n'), ((12397, 12411), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12409, 12411), True, 'import matplotlib.pyplot as plt\n'), ((12415, 12445), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': 'pltlabel'}), '(x, y, label=pltlabel)\n', (12423, 12445), True, 'import matplotlib.pyplot as plt\n'), ((12491, 12509), 'matplotlib.pyplot.title', 'plt.title', (['plthead'], {}), '(plthead)\n', (12500, 12509), True, 'import matplotlib.pyplot as plt\n'), ((12513, 12525), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12523, 12525), True, 'import matplotlib.pyplot as plt\n'), ((12602, 12616), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12614, 12616), True, 'import matplotlib.pyplot as plt\n'), ((12620, 12648), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'mx'], {'label': '"""$x$"""'}), "(t, mx, label='$x$')\n", (12628, 12648), True, 'import matplotlib.pyplot as plt\n'), ((12655, 12683), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'my'], {'label': '"""$y$"""'}), "(t, my, label='$y$')\n", (12663, 12683), True, 'import matplotlib.pyplot as plt\n'), ((12690, 12718), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'mz'], {'label': '"""$z$"""'}), "(t, mz, label='$z$')\n", (12698, 12718), True, 'import matplotlib.pyplot as plt\n'), ((12765, 12783), 'matplotlib.pyplot.title', 'plt.title', (['plthead'], {}), '(plthead)\n', (12774, 12783), True, 'import matplotlib.pyplot as plt\n'), ((12787, 12799), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12797, 12799), True, 'import matplotlib.pyplot as plt\n'), ((17836, 17854), 'streamlit.pyplot', 'st.pyplot', (['figtraj'], {}), '(figtraj)\n', (17845, 17854), True, 'import streamlit as st\n'), ((581, 633), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Gilbert damping constant"""', '(1)'], {}), "('Gilbert damping constant', 1)\n", (602, 633), True, 'import streamlit as st\n'), ((658, 726), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Anisotropy constant K_1 [J/m^3]"""', '(1.5 * 9100)'], {}), "('Anisotropy constant K_1 [J/m^3]', 1.5 * 9100)\n", (679, 726), True, 'import streamlit as st\n'), ((754, 816), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Saturation magnetization Js [T]"""', '(0.65)'], {}), "('Saturation magnetization Js [T]', 0.65)\n", (775, 816), True, 'import streamlit as st\n'), ((841, 905), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Anomalous Hall effect coefficient"""', '(0.65)'], {}), "('Anomalous Hall effect coefficient', 0.65)\n", (862, 905), True, 'import streamlit as st\n'), ((931, 1006), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""FM layer thickness [nm]"""', '((0.6 + 1.2 + 1.1) * 1e-09)'], {}), "('FM layer thickness [nm]', (0.6 + 1.2 + 1.1) * 1e-09)\n", (952, 1006), True, 'import streamlit as st\n'), ((1033, 1088), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""AC frequency [Hz]"""', '(100000000.0)'], {}), "('AC frequency [Hz]', 100000000.0)\n", (1054, 1088), True, 'import streamlit as st\n'), ((1280, 1348), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Damping like torque term coefficient"""', '(0.084)'], {}), "('Damping like torque term coefficient', 0.084)\n", (1301, 1348), True, 'import streamlit as st\n'), ((1377, 1431), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Field like torque term"""', '(0.008)'], {}), "('Field like torque term', 0.008)\n", (1398, 1431), True, 'import streamlit as st\n'), ((1815, 1825), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (1822, 1825), True, 'import numpy as np\n'), ((1923, 1954), 'numpy.fft.rfftfreq', 'np.fft.rfftfreq', (['N'], {'d': 'sample_dt'}), '(N, d=sample_dt)\n', (1938, 1954), True, 'import numpy as np\n'), ((2020, 2040), 'numpy.abs', 'np.abs', (['(xfft - 2 * f)'], {}), '(xfft - 2 * f)\n', (2026, 2040), True, 'import numpy as np\n'), ((2471, 2510), 'numpy.cos', 'np.cos', (['(2 * 3.1415927 * p.frequency * t)'], {}), '(2 * 3.1415927 * p.frequency * t)\n', (2477, 2510), True, 'import numpy as np\n'), ((2613, 2635), 'numpy.dot', 'np.dot', (['p.easy_axis', 'm'], {}), '(p.easy_axis, m)\n', (2619, 2635), True, 'import numpy as np\n'), ((4783, 4834), 'numpy.cos', 'np.cos', (['(2 * 3.1415927 * paramters_.frequency * time)'], {}), '(2 * 3.1415927 * paramters_.frequency * time)\n', (4789, 4834), True, 'import numpy as np\n'), ((5495, 5523), 'numpy.sum', 'np.sum', (['(voltage * sinwt * dt)'], {}), '(voltage * sinwt * dt)\n', (5501, 5523), True, 'import numpy as np\n'), ((5571, 5600), 'numpy.sum', 'np.sum', (['(voltage * cos2wt * dt)'], {}), '(voltage * cos2wt * dt)\n', (5577, 5600), True, 'import numpy as np\n'), ((8530, 8549), 'numpy.array', 'np.array', (['[i, 0, 0]'], {}), '([i, 0, 0])\n', (8538, 8549), True, 'import numpy as np\n'), ((8584, 8599), 'numpy.array', 'np.array', (['initm'], {}), '(initm)\n', (8592, 8599), True, 'import numpy as np\n'), ((8600, 8621), 'numpy.linalg.norm', 'np.linalg.norm', (['initm'], {}), '(initm)\n', (8614, 8621), True, 'import numpy as np\n'), ((2715, 2736), 'numpy.cross', 'np.cross', (['p.p_axis', 'm'], {}), '(p.p_axis, m)\n', (2723, 2736), True, 'import numpy as np\n'), ((10579, 10594), 'numpy.array', 'np.array', (['initm'], {}), '(initm)\n', (10587, 10594), True, 'import numpy as np\n'), ((10595, 10616), 'numpy.linalg.norm', 'np.linalg.norm', (['initm'], {}), '(initm)\n', (10609, 10616), True, 'import numpy as np\n'), ((2827, 2848), 'numpy.cross', 'np.cross', (['p.p_axis', 'm'], {}), '(p.p_axis, m)\n', (2835, 2848), True, 'import numpy as np\n'), ((10477, 10486), 'numpy.cos', 'np.cos', (['i'], {}), '(i)\n', (10483, 10486), True, 'import numpy as np\n'), ((10503, 10512), 'numpy.sin', 'np.sin', (['i'], {}), '(i)\n', (10509, 10512), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import requests as rq
import time
import datetime
import sys
def creq(r): #check request
if r.status_code != 200:
print('Connection failed...')
sys.exit()
def convert(unix):
year = int(time.ctime(unix).split()[4])
dt = datetime.datetime(year, 1, 1)
timestamp = (time.mktime(dt.timetuple()))
return (unix-timestamp)/31536000+year
def get_handles():
handles = input('Enter handle : ').split()
return handles
url = 'https://codeforces.com/api/'
contests = rq.get(url + 'contest.list?gym=false')
creq(contests)
current_date = datetime.date.today()
plt.style.use(['default', 'seaborn-darkgrid'])
max_year = current_date.year
last_cordinates = []
def process(handle, index, min_year):
r = rq.get(url + 'user.rating?handle=' + handle)
creq(r)
times = {}
for contest in contests.json()['result']:
times[contest['id']] = contest['startTimeSeconds']
data = []
t = []
for rating in r.json()['result']:
data.append(rating['newRating'])
t.append(convert(times[rating['contestId']]))
min_year = min(min_year, int(time.ctime(times[r.json()['result'][0]['contestId']]).split()[4]))
t.append(max_year+1)
data.append(data[-1])
last_cordinates.append([t[-1], data[-1]])
data = np.array(data)
t = np.array(t)
plt.plot(t, data, linewidth=1, marker='.', markersize=6, label=handle)
return min_year
def main():
handles = get_handles()
plt.figure(figsize=(10, 5), dpi=100)
min_year = max_year
for _user in range(len(handles)):
min_year = min(min_year, process(handles[_user], _user, min_year))
plt.xlabel('Time', fontsize=20)
plt.ylabel('Rating', fontsize=20)
plt.xticks(np.arange(min_year, max_year+2, 1))
plt.legend()
for i in range(len(handles)):
plt.text(last_cordinates[i][0]+1/7, last_cordinates[i][1], handles[i])
plt.show()
main() | [
"datetime.datetime",
"matplotlib.pyplot.text",
"time.ctime",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"requests.get",
"numpy.array",
"matplotlib.pyplot.figure",
"sys.exit",
"datetime.date.today",
"matpl... | [((557, 595), 'requests.get', 'rq.get', (["(url + 'contest.list?gym=false')"], {}), "(url + 'contest.list?gym=false')\n", (563, 595), True, 'import requests as rq\n'), ((626, 647), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (645, 647), False, 'import datetime\n'), ((650, 696), 'matplotlib.pyplot.style.use', 'plt.style.use', (["['default', 'seaborn-darkgrid']"], {}), "(['default', 'seaborn-darkgrid'])\n", (663, 696), True, 'import matplotlib.pyplot as plt\n'), ((303, 332), 'datetime.datetime', 'datetime.datetime', (['year', '(1)', '(1)'], {}), '(year, 1, 1)\n', (320, 332), False, 'import datetime\n'), ((795, 839), 'requests.get', 'rq.get', (["(url + 'user.rating?handle=' + handle)"], {}), "(url + 'user.rating?handle=' + handle)\n", (801, 839), True, 'import requests as rq\n'), ((1345, 1359), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1353, 1359), True, 'import numpy as np\n'), ((1368, 1379), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (1376, 1379), True, 'import numpy as np\n'), ((1386, 1456), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'data'], {'linewidth': '(1)', 'marker': '"""."""', 'markersize': '(6)', 'label': 'handle'}), "(t, data, linewidth=1, marker='.', markersize=6, label=handle)\n", (1394, 1456), True, 'import matplotlib.pyplot as plt\n'), ((1523, 1559), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)', 'dpi': '(100)'}), '(figsize=(10, 5), dpi=100)\n', (1533, 1559), True, 'import matplotlib.pyplot as plt\n'), ((1701, 1732), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(20)'}), "('Time', fontsize=20)\n", (1711, 1732), True, 'import matplotlib.pyplot as plt\n'), ((1737, 1770), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Rating"""'], {'fontsize': '(20)'}), "('Rating', fontsize=20)\n", (1747, 1770), True, 'import matplotlib.pyplot as plt\n'), ((1826, 1838), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1836, 1838), True, 'import matplotlib.pyplot as plt\n'), ((1956, 1966), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1964, 1966), True, 'import matplotlib.pyplot as plt\n'), ((218, 228), 'sys.exit', 'sys.exit', ([], {}), '()\n', (226, 228), False, 'import sys\n'), ((1786, 1822), 'numpy.arange', 'np.arange', (['min_year', '(max_year + 2)', '(1)'], {}), '(min_year, max_year + 2, 1)\n', (1795, 1822), True, 'import numpy as np\n'), ((1881, 1955), 'matplotlib.pyplot.text', 'plt.text', (['(last_cordinates[i][0] + 1 / 7)', 'last_cordinates[i][1]', 'handles[i]'], {}), '(last_cordinates[i][0] + 1 / 7, last_cordinates[i][1], handles[i])\n', (1889, 1955), True, 'import matplotlib.pyplot as plt\n'), ((265, 281), 'time.ctime', 'time.ctime', (['unix'], {}), '(unix)\n', (275, 281), False, 'import time\n')] |
import re
import numpy as np
from ..AbstractFitness import AbstractFitness
class Fitness(AbstractFitness):
to_regex = None
expected_match = None
static_ending = None
def __init__(self, to_regex, static_ending, expected_match, text):
self.to_regex = to_regex
self.static_ending = static_ending
self.expected_match = expected_match
self.text = text
super()
previous_length = -1
previous_fitness = -1
def evaluate_genes(self, individual, display_logging = False):
regex = ''
fitness = 0
length_of_individual = len(individual)
reverse = np.flip(self.to_regex.transform_to_array(individual))
for i, regex_item in enumerate(reverse):
temp_regex = regex_item + regex
## encourage individual regex correctness,
pattern = re.compile(temp_regex + self.static_ending, re.IGNORECASE)
matches = pattern.findall(self.text)
converted_matches = list(map(lambda n: float(n), matches))
if len(matches) > 0 and self.expected_match in converted_matches:
fitness += (( 1 - (i / length_of_individual) ) / length_of_individual)
else:
## when item is wrong,
temp_regex = '(?:.|\s)' + regex
regex = temp_regex
perfect_score = np.array([
(( 1 - (i / length_of_individual) ) / length_of_individual)
for i
in range(length_of_individual) ]
).sum()
return fitness / perfect_score
def evaluate_individual(self, individual, display_logging = False):
regex = self.to_regex.transform(individual) + self.static_ending
pattern = re.compile(regex, re.IGNORECASE)
fitness = 0
# encourage matches, but less is better.
matches = pattern.findall(self.text)
converted_matches = list(map(lambda n: float(n), matches))
if len(converted_matches) > 0 and self.expected_match in converted_matches:
## punish if the matches arent even the expected match...
fitness += ( 1 / len(converted_matches) )
return fitness
def evaluate(self, individual, display_logging = False):
new_fitness = 0.0
new_fitness += self.evaluate_genes(individual, display_logging)
new_fitness += self.evaluate_individual(individual, display_logging)
new_length = len(individual)
previous_fitness = self.previous_fitness
self.previous_fitness = new_fitness
if previous_fitness == new_fitness:
if self.previous_length <= new_length:
new_fitness += 1 # encourage growth over shrinking,
self.previous_length = new_length
return new_fitness / 3
class Mutator():
gene_factory = None
def __init__(self, gene_factory):
self.gene_factory = gene_factory
def gene_mutator(self, gene, display_logging = False):
precentage = np.random.rand()
if precentage < .08:
new_gene = self.gene_factory.create()
gene = new_gene
return gene
def individual_height_mutator(self, individual, display_logging = False):
precentage = np.random.rand()
if precentage < .10:
gene = self.gene_factory.create()
individual = [gene] + individual # grow to the left,
length = len(individual)
if precentage > .90 and length > 0:
individual = individual[1:] # remove from the left,
return individual | [
"numpy.random.rand",
"re.compile"
] | [((1802, 1834), 're.compile', 're.compile', (['regex', 're.IGNORECASE'], {}), '(regex, re.IGNORECASE)\n', (1812, 1834), False, 'import re\n'), ((3145, 3161), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3159, 3161), True, 'import numpy as np\n'), ((3390, 3406), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3404, 3406), True, 'import numpy as np\n'), ((892, 950), 're.compile', 're.compile', (['(temp_regex + self.static_ending)', 're.IGNORECASE'], {}), '(temp_regex + self.static_ending, re.IGNORECASE)\n', (902, 950), False, 'import re\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 17 11:55:11 2021
@author: Qian.Cao
Generate a series of Voronoi Rod phantoms
"""
import sys
sys.path.append('../') # use bonebox from source without having to install/build
from bonebox.phantoms.TrabeculaeVoronoi import *
import numpy as np
import matplotlib.pyplot as plt
# import mcubes
plt.ion()
print('Running example for TrabeculaeVoronoi')
def makePhantom(dilationRadius, Nseeds, edgesRetainFraction, randState):
# Parameters for generating phantom mesh
Sxyz, Nxyz = (10,10,10), (Nseeds, Nseeds, Nseeds) # volume extent in XYZ (mm), number of seeds along XYZ
Rxyz = 1.
# edgesRetainFraction = 0.5
facesRetainFraction = 0.5
# dilationRadius = 3 # (voxels)
# randState = 123 # for repeatability
# Parameters for generating phantom volume
volumeSizeVoxels = (200,200,200)
voxelSize = np.array(Sxyz) / np.array(volumeSizeVoxels)
# Generate faces and edges
points = makeSeedPointsCartesian(Sxyz, Nxyz)
ppoints = perturbSeedPointsCartesianUniformXYZ(points, Rxyz, randState=randState)
vor, ind = applyVoronoi(ppoints, Sxyz)
uniqueEdges, uniqueFaces = findUniqueEdgesAndFaces(vor, ind)
# Compute edge cosines
edgeVertices = getEdgeVertices(vor.vertices, uniqueEdges)
edgeCosines = computeEdgeCosine(edgeVertices, direction = (0,0,1))
# Compute face properties
faceVertices = getFaceVertices(vor.vertices, uniqueFaces)
faceAreas = computeFaceAreas(faceVertices)
faceCentroids = computeFaceCentroids(faceVertices)
faceNormas = computeFaceNormals(faceVertices)
# Filter random edges and faces
uniqueEdgesRetain, edgesRetainInd = filterEdgesRandomUniform(uniqueEdges,
edgesRetainFraction,
randState=randState)
uniqueFacesRetain, facesRetainInd = filterFacesRandomUniform(uniqueFaces,
facesRetainFraction,
randState=randState)
volume = makeSkeletonVolumeEdges(vor.vertices, uniqueEdgesRetain, voxelSize, volumeSizeVoxels)
volumeDilated = dilateVolumeSphereUniform(volume, dilationRadius)
volumeDilated = volumeDilated.astype(float)
bvtv = np.sum(volumeDilated>0) / volume.size
edgeVerticesRetain = getEdgeVertices(vor.vertices, uniqueEdgesRetain)
return volumeDilated, bvtv, edgeVerticesRetain
# Visualize all edges
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# for ii in range(edgeVertices.shape[0]):
# ax.plot(edgeVertices[ii,:,0],edgeVertices[ii,:,1],edgeVertices[ii,:,2],'b-')
# volumeSmoothed = mcubes.smooth(volumeDilated)
rhoBone = 2e-3 # g/mm3
voxelSize = (0.05, 0.05, 0.05) # mm
pixelSize = (0.05, 0.05) # mm
radiusTBS = 5 # pixels
def computeROIProjection(roiBone, projectionAxis):
projectionImage = np.prod(np.array(voxelSize)) * rhoBone * np.sum(roiBone,axis=projectionAxis).T \
/ np.prod(np.array(pixelSize))
return projectionImage
#%%
# Projection/TBS Settings
dilationRadius = 3.4
randState = 1
Nseeds = 12
edgesRetainFraction = 0.5
volume, bvtv, edgeVerticesRetain = makePhantom(dilationRadius, Nseeds, edgesRetainFraction, randState)
projection = computeROIProjection(volume, 0)
plt.figure()
plt.imshow(projection, cmap="gray")
plt.axis("off")
plt.title("BMD/BvTv: "+"{0:.3g}".format(bvtv))
plt.clim(0,0.012)
plt.colorbar()
#%% Visualize all edges
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for ii in range(edgeVerticesRetain.shape[0]):
ax.plot(edgeVerticesRetain[ii,:,0],edgeVerticesRetain[ii,:,1],
edgeVerticesRetain[ii,:,2],'b-')
#%% Look at impact to bvtv
radii = np.linspace(1,3,10)
ns = range(5,18)
edgesRetainFraction = 0.5
bvtvs = np.zeros((len(radii),len(ns)))
for rr in range(len(radii)):
for nn in range(len(ns)):
print(str(rr) + " " + str(nn))
volume, bvtv, edgeVerticesRetain = makePhantom(radii[rr], ns[nn], edgesRetainFraction, randState)
bvtvs[rr,nn] = bvtv
#%%
bvtvs = np.load("C:\\Users\\Qian.Cao\\tmp\\bvtvs.npy")
plt.imshow(bvtvs)
plt.colorbar()
plt.axis("off") | [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.clim",
"matplotlib.pyplot.colorbar",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.sum",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.axis",
"numpy.load",
"sys.path.append"
] | [((143, 165), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (158, 165), False, 'import sys\n'), ((342, 351), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (349, 351), True, 'import matplotlib.pyplot as plt\n'), ((3470, 3482), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3480, 3482), True, 'import matplotlib.pyplot as plt\n'), ((3483, 3518), 'matplotlib.pyplot.imshow', 'plt.imshow', (['projection'], {'cmap': '"""gray"""'}), "(projection, cmap='gray')\n", (3493, 3518), True, 'import matplotlib.pyplot as plt\n'), ((3519, 3534), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3527, 3534), True, 'import matplotlib.pyplot as plt\n'), ((3582, 3600), 'matplotlib.pyplot.clim', 'plt.clim', (['(0)', '(0.012)'], {}), '(0, 0.012)\n', (3590, 3600), True, 'import matplotlib.pyplot as plt\n'), ((3600, 3614), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3612, 3614), True, 'import matplotlib.pyplot as plt\n'), ((3647, 3659), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3657, 3659), True, 'import matplotlib.pyplot as plt\n'), ((3902, 3923), 'numpy.linspace', 'np.linspace', (['(1)', '(3)', '(10)'], {}), '(1, 3, 10)\n', (3913, 3923), True, 'import numpy as np\n'), ((4261, 4307), 'numpy.load', 'np.load', (['"""C:\\\\Users\\\\Qian.Cao\\\\tmp\\\\bvtvs.npy"""'], {}), "('C:\\\\Users\\\\Qian.Cao\\\\tmp\\\\bvtvs.npy')\n", (4268, 4307), True, 'import numpy as np\n'), ((4309, 4326), 'matplotlib.pyplot.imshow', 'plt.imshow', (['bvtvs'], {}), '(bvtvs)\n', (4319, 4326), True, 'import matplotlib.pyplot as plt\n'), ((4327, 4341), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4339, 4341), True, 'import matplotlib.pyplot as plt\n'), ((4342, 4357), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4350, 4357), True, 'import matplotlib.pyplot as plt\n'), ((888, 902), 'numpy.array', 'np.array', (['Sxyz'], {}), '(Sxyz)\n', (896, 902), True, 'import numpy as np\n'), ((905, 931), 'numpy.array', 'np.array', (['volumeSizeVoxels'], {}), '(volumeSizeVoxels)\n', (913, 931), True, 'import numpy as np\n'), ((2408, 2433), 'numpy.sum', 'np.sum', (['(volumeDilated > 0)'], {}), '(volumeDilated > 0)\n', (2414, 2433), True, 'import numpy as np\n'), ((3167, 3186), 'numpy.array', 'np.array', (['pixelSize'], {}), '(pixelSize)\n', (3175, 3186), True, 'import numpy as np\n'), ((3108, 3144), 'numpy.sum', 'np.sum', (['roiBone'], {'axis': 'projectionAxis'}), '(roiBone, axis=projectionAxis)\n', (3114, 3144), True, 'import numpy as np\n'), ((3075, 3094), 'numpy.array', 'np.array', (['voxelSize'], {}), '(voxelSize)\n', (3083, 3094), True, 'import numpy as np\n')] |
from typing import Tuple
from gym import spaces
import numpy as np
from omegaconf import DictConfig
import torch
import torch.nn as nn
from rlcycle.common.abstract.action_selector import ActionSelector
from rlcycle.common.utils.common_utils import np2tensor
class DQNActionSelector(ActionSelector):
"""DQN arg-max action selector"""
def __init__(self, use_cuda: bool):
ActionSelector.__init__(self, use_cuda)
def __call__(self, policy: nn.Module, state: np.ndarray) -> Tuple[np.ndarray, ...]:
if state.ndim == 1:
state = state.reshape(1, -1)
state = np2tensor(state, self.use_cuda).unsqueeze(0)
with torch.no_grad():
qvals = policy.forward(state)
qvals = qvals.cpu().detach().numpy()
action = np.argmax(qvals)
return action
class QRActionSelector(ActionSelector):
"""Action selector for Quantile Q-value representations"""
def __init__(self, use_cuda: bool):
ActionSelector.__init__(self, use_cuda)
def __call__(self, policy: nn.Module, state: np.ndarray) -> Tuple[np.ndarray, ...]:
if state.ndim == 1:
state = state.reshape(1, -1)
state = np2tensor(state, self.use_cuda).unsqueeze(0)
with torch.no_grad():
qvals = policy.forward(state).mean(dim=2)
qvals = qvals.cpu().numpy()
action = np.argmax(qvals)
return action
class CategoricalActionSelector(ActionSelector):
"""Action selector for categorical Q-value presentations"""
def __init__(self, use_cuda: bool):
ActionSelector.__init__(self, use_cuda)
def __call__(self, policy: nn.Module, state: np.ndarray) -> Tuple[np.ndarray, ...]:
state = np2tensor(state, self.use_cuda).unsqueeze(0)
with torch.no_grad():
dist = policy.forward(state)
weights = dist * policy.support
qvals = weights.sum(dim=2).cpu().numpy()
action = np.argmax(qvals)
return action
class EpsGreedy(ActionSelector):
"""ActionSelector wrapper for epsilon greedy policy
Attributes:
action_selector (ActionSelector): action selector to wrap
action_space (???): gym environment action space
eps (float): epsilon value for epsilon greedy
eps_final (float): minimum epsilon value to reach
eps_decay (float): decay rate for epsilon
"""
def __init__(
self,
action_selector: ActionSelector,
action_space: spaces.Discrete,
hyper_params: DictConfig,
):
ActionSelector.__init__(self, action_selector.use_cuda)
self.action_selector = action_selector
self.action_space = action_space
self.eps = hyper_params.eps
self.eps_final = hyper_params.eps_final
self.eps_decay = (
self.eps - self.eps_final
) / hyper_params.max_exploration_frame
def __call__(self, policy: nn.Module, state: np.ndarray) -> np.ndarray:
"""Return exploration action if eps > random.uniform(0,1)"""
if self.eps > np.random.random() and self.exploration:
return self.action_space.sample()
return self.action_selector(policy, state)
def decay_epsilon(self):
"""Decay epsilon as learning progresses"""
eps = self.eps - self.eps_decay
self.eps = max(eps, self.eps_final)
| [
"rlcycle.common.abstract.action_selector.ActionSelector.__init__",
"numpy.random.random",
"rlcycle.common.utils.common_utils.np2tensor",
"numpy.argmax",
"torch.no_grad"
] | [((390, 429), 'rlcycle.common.abstract.action_selector.ActionSelector.__init__', 'ActionSelector.__init__', (['self', 'use_cuda'], {}), '(self, use_cuda)\n', (413, 429), False, 'from rlcycle.common.abstract.action_selector import ActionSelector\n'), ((787, 803), 'numpy.argmax', 'np.argmax', (['qvals'], {}), '(qvals)\n', (796, 803), True, 'import numpy as np\n'), ((980, 1019), 'rlcycle.common.abstract.action_selector.ActionSelector.__init__', 'ActionSelector.__init__', (['self', 'use_cuda'], {}), '(self, use_cuda)\n', (1003, 1019), False, 'from rlcycle.common.abstract.action_selector import ActionSelector\n'), ((1380, 1396), 'numpy.argmax', 'np.argmax', (['qvals'], {}), '(qvals)\n', (1389, 1396), True, 'import numpy as np\n'), ((1583, 1622), 'rlcycle.common.abstract.action_selector.ActionSelector.__init__', 'ActionSelector.__init__', (['self', 'use_cuda'], {}), '(self, use_cuda)\n', (1606, 1622), False, 'from rlcycle.common.abstract.action_selector import ActionSelector\n'), ((1958, 1974), 'numpy.argmax', 'np.argmax', (['qvals'], {}), '(qvals)\n', (1967, 1974), True, 'import numpy as np\n'), ((2562, 2617), 'rlcycle.common.abstract.action_selector.ActionSelector.__init__', 'ActionSelector.__init__', (['self', 'action_selector.use_cuda'], {}), '(self, action_selector.use_cuda)\n', (2585, 2617), False, 'from rlcycle.common.abstract.action_selector import ActionSelector\n'), ((662, 677), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (675, 677), False, 'import torch\n'), ((1252, 1267), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1265, 1267), False, 'import torch\n'), ((1786, 1801), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1799, 1801), False, 'import torch\n'), ((604, 635), 'rlcycle.common.utils.common_utils.np2tensor', 'np2tensor', (['state', 'self.use_cuda'], {}), '(state, self.use_cuda)\n', (613, 635), False, 'from rlcycle.common.utils.common_utils import np2tensor\n'), ((1194, 1225), 'rlcycle.common.utils.common_utils.np2tensor', 'np2tensor', (['state', 'self.use_cuda'], {}), '(state, self.use_cuda)\n', (1203, 1225), False, 'from rlcycle.common.utils.common_utils import np2tensor\n'), ((1728, 1759), 'rlcycle.common.utils.common_utils.np2tensor', 'np2tensor', (['state', 'self.use_cuda'], {}), '(state, self.use_cuda)\n', (1737, 1759), False, 'from rlcycle.common.utils.common_utils import np2tensor\n'), ((3070, 3088), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3086, 3088), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy
import matplotlib
matplotlib.use("Agg")
import pylab
import seaborn
seaborn.set(context="paper", style="white", palette="deep")
rate=numpy.loadtxt("rate.csv", delimiter=",")
time=rate[:,0]
time_max=int(numpy.ceil(numpy.max(time)))
rate=rate[:,1:]
xlen=50
ylen=50
centerX=numpy.zeros([xlen, ylen])
centerY=numpy.zeros([xlen, ylen])
for i in range(xlen):
for j in range(ylen):
centerX[i,j]=float(i)
centerY[i,j]=float(j)
centerX=centerX.reshape(xlen*ylen)
centerY=centerY.reshape(xlen*ylen)
pylab.close()
pylab.figure(figsize=(3,3))
for i in range(time_max):
rate_cut=rate[(i<=time)*(time<i+1),:]
cmassX=[]
cmassY=[]
for r in rate_cut:
if numpy.max(r)>=0.01:
rsum=numpy.sum(r)
cmassX.append(centerX@r/rsum)
cmassY.append(centerY@r/rsum)
pylab.plot(cmassX,cmassY,color="black")
pylab.plot(xlen/2,ylen/2,"o",color="blue")
pylab.xlim([-1, xlen])
pylab.ylim([-1, ylen])
ax=pylab.gca()
ax.invert_yaxis()
pylab.tight_layout()
pylab.savefig("activity_trajectory.pdf")
| [
"seaborn.set",
"pylab.ylim",
"pylab.tight_layout",
"matplotlib.use",
"pylab.plot",
"pylab.savefig",
"pylab.close",
"pylab.figure",
"numpy.max",
"numpy.zeros",
"numpy.sum",
"pylab.xlim",
"numpy.loadtxt",
"pylab.gca"
] | [((55, 76), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (69, 76), False, 'import matplotlib\n'), ((105, 164), 'seaborn.set', 'seaborn.set', ([], {'context': '"""paper"""', 'style': '"""white"""', 'palette': '"""deep"""'}), "(context='paper', style='white', palette='deep')\n", (116, 164), False, 'import seaborn\n'), ((171, 211), 'numpy.loadtxt', 'numpy.loadtxt', (['"""rate.csv"""'], {'delimiter': '""","""'}), "('rate.csv', delimiter=',')\n", (184, 211), False, 'import numpy\n'), ((310, 335), 'numpy.zeros', 'numpy.zeros', (['[xlen, ylen]'], {}), '([xlen, ylen])\n', (321, 335), False, 'import numpy\n'), ((344, 369), 'numpy.zeros', 'numpy.zeros', (['[xlen, ylen]'], {}), '([xlen, ylen])\n', (355, 369), False, 'import numpy\n'), ((558, 571), 'pylab.close', 'pylab.close', ([], {}), '()\n', (569, 571), False, 'import pylab\n'), ((572, 600), 'pylab.figure', 'pylab.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (584, 600), False, 'import pylab\n'), ((876, 925), 'pylab.plot', 'pylab.plot', (['(xlen / 2)', '(ylen / 2)', '"""o"""'], {'color': '"""blue"""'}), "(xlen / 2, ylen / 2, 'o', color='blue')\n", (886, 925), False, 'import pylab\n'), ((919, 941), 'pylab.xlim', 'pylab.xlim', (['[-1, xlen]'], {}), '([-1, xlen])\n', (929, 941), False, 'import pylab\n'), ((942, 964), 'pylab.ylim', 'pylab.ylim', (['[-1, ylen]'], {}), '([-1, ylen])\n', (952, 964), False, 'import pylab\n'), ((968, 979), 'pylab.gca', 'pylab.gca', ([], {}), '()\n', (977, 979), False, 'import pylab\n'), ((998, 1018), 'pylab.tight_layout', 'pylab.tight_layout', ([], {}), '()\n', (1016, 1018), False, 'import pylab\n'), ((1019, 1059), 'pylab.savefig', 'pylab.savefig', (['"""activity_trajectory.pdf"""'], {}), "('activity_trajectory.pdf')\n", (1032, 1059), False, 'import pylab\n'), ((836, 877), 'pylab.plot', 'pylab.plot', (['cmassX', 'cmassY'], {'color': '"""black"""'}), "(cmassX, cmassY, color='black')\n", (846, 877), False, 'import pylab\n'), ((251, 266), 'numpy.max', 'numpy.max', (['time'], {}), '(time)\n', (260, 266), False, 'import numpy\n'), ((718, 730), 'numpy.max', 'numpy.max', (['r'], {}), '(r)\n', (727, 730), False, 'import numpy\n'), ((749, 761), 'numpy.sum', 'numpy.sum', (['r'], {}), '(r)\n', (758, 761), False, 'import numpy\n')] |
#!/usr/bin/env python3
import torch
from tqdm import tqdm
import numpy as np
try:
import tinycudann as tcnn
except:
pass
# This script stress-tests the GPU memory arena of tiny-cuda-nn with randomly sized allocations and helped
# find a bug in its interval arithmetic in the past.
class TcnnFCBlock(tcnn.Network):
def __init__(
self, in_features, out_features,
num_hidden_layers, hidden_features,
activation:str='ReLU', last_activation:str='None',
seed=42):
assert hidden_features in [16, 32, 64, 128], "hidden_features can only be 16, 32, 64, or 128."
super().__init__(in_features, out_features, network_config={
"otype": "FullyFusedMLP", # Component type.
"activation": activation, # Activation of hidden layers.
"output_activation": last_activation, # Activation of the output layer.
"n_neurons": hidden_features, # Neurons in each hidden layer. # May only be 16, 32, 64, or 128.
"n_hidden_layers": num_hidden_layers, # Number of hidden layers.
"feedback_alignment": False # Use feedback alignment # [Lillicrap et al. 2016].
}, seed=seed)
def forward(self, x: torch.Tensor):
prefix = x.shape[:-1]
return super().forward(x.flatten(0,-2)).unflatten(0, prefix)
device = torch.device('cuda:0')
mlp = TcnnFCBlock(3, 256, 8, 128)
for _ in range(10000):
for n, p in mlp.named_parameters():
p.grad = None
_x = np.random.randint(200, 1000, 1)[0]
x = torch.rand([_x,1000,3], dtype=torch.float, device=device) # random setting
#x = torch.rand([torch.randint(200,800,[1]).item(),100,3], dtype=torch.float, device=device) # setting 2
y = mlp.forward(x)
y.mean().backward()
| [
"numpy.random.randint",
"torch.rand",
"torch.device"
] | [((1258, 1280), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1270, 1280), False, 'import torch\n'), ((1438, 1497), 'torch.rand', 'torch.rand', (['[_x, 1000, 3]'], {'dtype': 'torch.float', 'device': 'device'}), '([_x, 1000, 3], dtype=torch.float, device=device)\n', (1448, 1497), False, 'import torch\n'), ((1398, 1429), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)', '(1)'], {}), '(200, 1000, 1)\n', (1415, 1429), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Wrappers for openai gym environments, strongly inspired of
openai/baselines/blob/master/baselines/common/atari_wrappers.py
"""
import cv2
import gym
import numpy as np
import collections
class ScaledFrom0To1Wrapper(gym.ObservationWrapper):
def observation(self, obs):
return np.array(obs).astype(np.float32) / 255.0
class Res84x84x1Wrapper(gym.ObservationWrapper):
"""Change input resolution from (210, 160, 3) to (84, 84, 1)
Convert to grayscale with ponderation: r*0.299, g*0.587, b*0.114
"""
def __init__(self, env=None, skip=4):
super(Res84x84x1Wrapper, self).__init__(env)
def observation(self, obs):
frame = cv2.resize(
obs, (110, 84), interpolation=cv2.INTER_AREA).astype(
np.float32)
frame = frame[:, 13:110-13]
frame = frame[:, :, 0] * 0.299 + frame[:, :, 1] * 0.587 + \
frame[:, :, 2] * 0.114
assert np.prod(frame.shape) == 84*84*1
return frame
class StackLast4Wrapper(gym.ObservationWrapper):
"""Stack last 4 frames as input"""
def __init__(self, env):
super(StackLast4Wrapper, self).__init__(env)
def reset(self):
self.buffer = np.zeros(
(84, 84, 4), dtype=np.float32)
return self.observation(self.env.reset())
def observation(self, observation):
self.buffer[:, :, :-1] = self.buffer[:, :, 1:]
self.buffer[:, :, -1] = observation
return self.buffer
class Skip4FramesAndReturnMaxFrom2FramesWrapper(gym.Wrapper):
def __init__(self, env):
"""Repeat action for 4 frames, get max from last 2 to avoid blink"""
super(Skip4FramesAndReturnMaxFrom2FramesWrapper, self).__init__(env)
self.frames_buffer = collections.deque(maxlen=2)
def step(self, action):
sum_reward = 0.0
for _ in range(4):
obs, reward, done, info = self.env.step(action)
self.frames_buffer.append(obs)
sum_reward += reward
if done:
break
max_frame = np.max(np.stack(self.frames_buffer), axis=0)
return max_frame, sum_reward, done, info
def reset(self):
self.frames_buffer.clear()
obs = self.env.reset()
self.frames_buffer.append(obs)
return obs
| [
"numpy.prod",
"collections.deque",
"numpy.stack",
"numpy.zeros",
"numpy.array",
"cv2.resize"
] | [((1248, 1287), 'numpy.zeros', 'np.zeros', (['(84, 84, 4)'], {'dtype': 'np.float32'}), '((84, 84, 4), dtype=np.float32)\n', (1256, 1287), True, 'import numpy as np\n'), ((1793, 1820), 'collections.deque', 'collections.deque', ([], {'maxlen': '(2)'}), '(maxlen=2)\n', (1810, 1820), False, 'import collections\n'), ((980, 1000), 'numpy.prod', 'np.prod', (['frame.shape'], {}), '(frame.shape)\n', (987, 1000), True, 'import numpy as np\n'), ((2108, 2136), 'numpy.stack', 'np.stack', (['self.frames_buffer'], {}), '(self.frames_buffer)\n', (2116, 2136), True, 'import numpy as np\n'), ((720, 776), 'cv2.resize', 'cv2.resize', (['obs', '(110, 84)'], {'interpolation': 'cv2.INTER_AREA'}), '(obs, (110, 84), interpolation=cv2.INTER_AREA)\n', (730, 776), False, 'import cv2\n'), ((342, 355), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (350, 355), True, 'import numpy as np\n')] |
"""
A module for building and performing inference with cluster graphs
"""
# Standard imports
import collections
# Third-party imports
import IPython
import graphviz
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import networkx as nx
import numpy as np
from tqdm.auto import tqdm
# Local imports
from veroku._cg_helpers._cluster import Cluster
import veroku._cg_helpers._animation as cg_animation
from veroku.factors._factor_utils import get_subset_evidence
# TODO: Optimise _pass_message.
# TODO: Improve sepsets selection for less loopiness.
# TODO: Optimisation: messages from clusters that did not receive any new messages in the previous round, do not need
# new messages calculated.
# pylint: disable=protected-access
DEFAULT_FIG_SIZE = [15, 5]
def _sort_almost_sorted(almost_sorted_deque, key):
"""
Sort a deque like that where only the first element is potentially unsorted and should probably be last and the rest
of the deque is sorted in descending order.
:param collections.deque almost_sorted_deque: The deque of size n, where the first n-1 elements are definitely
sorted (in descending order) and where the last element is also probably in the correct place, but needs to be
checked
:param callable key: The key (function) to use for sorting.
:return: The sorted (given that the conditions are met) deque.
:rtype: collections.deque
"""
if key(almost_sorted_deque[0]) < key(almost_sorted_deque[1]):
almost_sorted_deque.append(almost_sorted_deque.popleft())
if key(almost_sorted_deque[-1]) <= key(almost_sorted_deque[-2]):
return almost_sorted_deque
almost_sorted_deque = collections.deque(sorted(almost_sorted_deque, key=key, reverse=True))
return almost_sorted_deque
def _evidence_reduce_factors(factors, evidence):
"""
Observe relevant evidence for each factor.
:param factors: The factors to reduce with the (relevant) evidence.
:type factors: Factor list
:param dict evidence: The evidence (i.e {'a':1.0, 'b':2.0})
:return: The reduced factors.
:rtype factors: Factor list
"""
reduced_factors = []
for factor in factors:
if evidence is not None:
vrs, values = get_subset_evidence(all_evidence_dict=evidence, subset_vars=factor.var_names)
if len(vrs) > 0:
factor = factor.reduce(vrs, values)
reduced_factors.append(factor.copy())
return reduced_factors
def _absorb_subset_factors(factors):
"""
Absorb any factors that has a scope that is a subset of another factor into such a factor.
:param factors: The list of factors to check for subset factors.
:type factors: Factor list
:return: The (potentially reduced) list of factors.
:rtype: Factor list
"""
# TODO: Simplify this, if possible.
factors_absorbtion_dict = {i: [] for i in range(len(factors))}
final_graph_cluster_factors = []
# factors: possibly smaller list of factors after factors which have a scope that is a subset of another factor have
# been absorbed by the larger one.
factor_processed_mask = [0] * len(factors)
for i, factor_i in enumerate(factors):
if not factor_processed_mask[i]:
factor_product = factor_i.copy()
for j, factor_j in enumerate(factors):
if (i != j) and (not factor_processed_mask[j]):
if set(factor_j.var_names) < set(factor_product.var_names):
factor_product = factor_product.multiply(factor_j)
factors_absorbtion_dict[i].append(j)
factor_processed_mask[j] = 1
factor_processed_mask[i] = 1
if factor_processed_mask[i]:
final_graph_cluster_factors.append(factor_product)
for i, factor_i in enumerate(factors): # add remaining factors
if not factor_processed_mask[i]:
factor_processed_mask[i] = 1
final_graph_cluster_factors.append(factor_i)
assert all(
factor_processed_mask
), "Error: Some factors where not included during variable subset processing."
return final_graph_cluster_factors
class ClusterGraph:
"""
A class for building and performing inference with cluster graphs.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, factors, evidence=None, special_evidence=None, disable_tqdm=False):
"""
Construct a Cluster graph from a list of factors.
:param factors: The factors to construct the graph from
:type factors: factor list
:param dict evidence: evidence dictionary (mapping variable names to values) that should be used to reduce
factors before building the cluster graph. Example: {'a': 2, 'b':1}
:param dict special_evidence: evidence dictionary (mapping variable names to values) that should be used in the
calculation of messages, and not to reduce factors. This allows factor approximations - such as the
non-linear Gaussian to be iteratively refined. Example: {'a': 2, 'b':1}
:param bool disable_tqdm: Disable the tqdm progress bars used in graph construction and processing.
:param bool verbose: Whether or not to output additional information messages during graph construction and
processing.
:param debug:
"""
# TODO: see if evidence and special_evidence can be replaced by a single variable.
self.num_messages_passed = 0
self.disable_tqdm = disable_tqdm
self.verbose = False
self.debug = False
self.sync_message_passing_max_distances = []
if special_evidence is None:
special_evidence = dict()
self.special_evidence = special_evidence
all_evidence_vars = set(self.special_evidence.keys())
if evidence is not None:
evidence_vars = set(evidence.keys())
all_evidence_vars = all_evidence_vars.union(evidence_vars)
all_factors_copy = _evidence_reduce_factors(factors, evidence)
final_graph_cluster_factors = _absorb_subset_factors(all_factors_copy)
clusters = [
Cluster(factor, cluster_name_prefix=f"c{i}#")
for i, factor in enumerate(final_graph_cluster_factors)
]
self._set_non_rip_sepsets_dict(clusters, all_evidence_vars)
self._clusters = clusters
# Add special evidence to factors
for cluster in self._clusters:
cluster_special_evidence_vars, cluster_special_evidence_values = get_subset_evidence(
self.special_evidence, cluster.var_names
)
cluster_special_evidence = dict(
zip(cluster_special_evidence_vars, cluster_special_evidence_values)
)
cluster.add_special_evidence(cluster_special_evidence)
self.graph_message_paths = collections.deque([])
self._build_graph()
# TODO: consolidate these two, if possible
self.message_passing_animation_frames = []
self.passed_messages = []
def _set_non_rip_sepsets_dict(self, clusters, all_evidence_vars):
"""
Calculate the preliminary sepsets dict before the RIP property is enforced.
:param clusters: The clusters for which the sepsets should be calculated.
:param all_evidence_vars: The variables for which there is observed evidence.
"""
self._non_rip_sepsets = {}
for i in tqdm(range(len(clusters)), disable=self.disable_tqdm):
vars_i = clusters[i].var_names
for j in range(i + 1, len(clusters)):
vars_j = clusters[j].var_names
sepset = set(vars_j).intersection(set(vars_i)) - all_evidence_vars
self._non_rip_sepsets[(i, j)] = sepset
self._non_rip_sepsets[(j, i)] = sepset
def _build_graph(self):
"""
Add the cluster sepsets, graphviz graph and animation graph (for message_passing visualisation).
"""
# Check for non-unique cluster_ids (This should never be the case)
cluster_ids = [cluster.cluster_id for cluster in self._clusters]
if len(set(cluster_ids)) != len(cluster_ids):
raise ValueError(f"Non-unique cluster ids: {cluster_ids}")
self._conditional_print("Info: Building graph.")
self._graph = graphviz.Graph(format="png")
rip_sepsets_dict = self._get_running_intersection_sepsets()
# TODO: see why this is necessary, remove if not
for i in tqdm(range(len(self._clusters)), disable=self.disable_tqdm):
self._clusters[i].remove_all_neighbours()
self._conditional_print(f"Debug: number of clusters: {len(self._clusters)}")
for i in tqdm(range(len(self._clusters)), disable=self.disable_tqdm):
node_i_name = self._clusters[i]._cluster_id
self._graph.node(
name=node_i_name, label=node_i_name, style="filled", fillcolor="white", color="black"
)
for j in range(i + 1, len(self._clusters)):
if (i, j) in rip_sepsets_dict:
sepset = rip_sepsets_dict[(i, j)]
assert len(sepset) > 0, "Error: empty sepset"
self._clusters[i].add_neighbour(self._clusters[j], sepset=sepset)
self._clusters[j].add_neighbour(self._clusters[i], sepset=sepset)
gmp_ij = _GraphMessagePath(self._clusters[i], self._clusters[j])
gmp_ji = _GraphMessagePath(self._clusters[j], self._clusters[i])
self.graph_message_paths.append(gmp_ij)
self.graph_message_paths.append(gmp_ji)
self._clusters[i].add_outward_message_path(gmp_ij)
self._clusters[j].add_outward_message_path(gmp_ji)
# Graph animation
node_j_name = self._clusters[j]._cluster_id
sepset_node_label = ",".join(sepset)
sepset_node_name = cg_animation.make_sepset_node_name(node_i_name, node_j_name)
self._graph.node(name=sepset_node_name, label=sepset_node_label, shape="rectangle")
self._graph.edge(node_i_name, sepset_node_name, color="black", penwidth="2.0")
self._graph.edge(sepset_node_name, node_j_name, color="black", penwidth="2.0")
self._conditional_print(f"num graph message paths: {len(self.graph_message_paths)}")
def _conditional_print(self, message):
"""
Print message if verbose is True.
:param message: The message to print.
"""
if self.verbose:
print(message)
def plot_next_messages_info_gain(self, legend_on=False, figsize=None):
"""
Plot the information gained by a receiving new messages over sebsequent iterations for all message paths in the
graph.
:param bool legend_on: Whether or not to show the message paths (specified by connected cluster pairs) in the
plot legend.
:param list figsize: The matplotlib figure size.
"""
if figsize is None:
figsize = DEFAULT_FIG_SIZE
plt.figure(figsize=figsize)
all_paths_information_gains_with_iters = [
gmp.information_gains_with_iters for gmp in self.graph_message_paths
]
for paths_information_gains_with_iters in all_paths_information_gains_with_iters:
plt.plot(paths_information_gains_with_iters)
plt.title("Information Gain of Messages along Graph Message Paths")
plt.xlabel("iteration")
plt.ylabel("D_KL(prev_msg||msg)")
if legend_on:
legend = [
f"{gmp.sender_cluster.cluster_id}->{gmp.receiver_cluster.cluster_id}"
for gmp in self.graph_message_paths
]
plt.legend(legend)
def plot_message_convergence(self, log=False, figsize=None):
"""
Plot the the KL-divergence between the messages and their previous instances to indicate the message passing
convergence.
:param bool log: If True, plot the log of the KL-divergence.
:param list figsize: The matplotlib [width, height] of the figure.
"""
if figsize is None:
figsize = DEFAULT_FIG_SIZE
mp_max_dists = self.sync_message_passing_max_distances
if log:
mp_max_dists = np.log(mp_max_dists)
# here we tile an flatten to prevent the plot omission of values with inf on either side.
mp_max_dists = np.tile(mp_max_dists, [2, 1]).flatten(order="F")
num_iterations = len(mp_max_dists)
iterations = np.array(list(range(num_iterations))) / 2 # divide by 2 to correct for tile and flatten
non_inf_max_distances = [d for d in mp_max_dists if d != np.inf]
max_non_inf = max(non_inf_max_distances)
new_inf_value = max_non_inf * 1.5
max_distances_replaces_infs = np.array([v if v != np.inf else new_inf_value for v in mp_max_dists])
inf_values = np.ma.masked_where(
max_distances_replaces_infs != new_inf_value, max_distances_replaces_infs
)
plt.figure(figsize=figsize)
plt.plot(iterations, max_distances_replaces_infs)
plt.plot(iterations, inf_values, c="r", linewidth=2)
if len(non_inf_max_distances) != len(mp_max_dists):
custom_lines = [Line2D([0], [0], color="r", lw=4)]
plt.legend(custom_lines, ["infinity"])
plt.title("Message Passing Convergence")
plt.xlabel("iteration")
plt.ylabel("log max D_KL(prev_msg||msg)")
plt.show()
def _get_unique_vars(self):
"""
Get the set of variables in the graph.
:return: The variables
:rtype: list
"""
all_vars = []
for cluster in self._clusters:
all_vars += cluster.var_names
unique_vars = list(set(all_vars))
return unique_vars
def _get_vars_min_spanning_trees(self):
"""
Get the minimum spanning trees of all the variables in the graph.
"""
all_vars = self._get_unique_vars()
var_graphs = {var: nx.Graph() for var in all_vars}
num_clusters = len(self._clusters)
for i in range(num_clusters):
for j in range(i + 1, num_clusters):
sepset = self._non_rip_sepsets[(i, j)]
for var in sepset:
var_graphs[var].add_edge(i, j, weight=1)
var_spanning_trees = dict()
for var in all_vars:
var_graph = var_graphs[var]
var_spanning_trees[var] = nx.minimum_spanning_tree(var_graph)
return var_spanning_trees
def _get_running_intersection_sepsets(self):
"""
Get a set of sepsets for the graph, such that the graph, with these sepsets satisfies the running intersection
property.
"""
edge_sepset_dict = {}
unique_vars = self._get_unique_vars()
min_span_trees = self._get_vars_min_spanning_trees()
self._conditional_print("Info: Getting unique variable spanning trees.")
for i in tqdm(range(len(unique_vars)), disable=self.disable_tqdm):
var = unique_vars[i]
min_span_tree = min_span_trees[var]
for edge in min_span_tree.edges():
if edge in edge_sepset_dict:
edge_sepset_dict[edge].append(var)
else:
edge_sepset_dict[edge] = [var]
return edge_sepset_dict
def show(self):
"""
Show the cluster graph.
"""
self._graph.render("/tmp/test.gv", view=False)
image = IPython.core.display.Image("/tmp/test.gv.png")
IPython.core.display.display(image)
def save_graph_image(self, filename):
"""
Save image of the graph.
:param filename: The filename of the file.
"""
graphviz.Source(self._graph, filename=filename, format="png")
def get_marginal(self, vrs):
"""
Search the graph for a specific variable and get that variables marginal (posterior marginal if process_graph
has been run previously).
:return: The marginal
:rtype: Factor child
"""
for cluster in self._clusters:
if set(vrs) <= set(cluster.var_names):
factor = cluster.factor
evidence_vrs, evidence_values = get_subset_evidence(self.special_evidence, factor.var_names)
if len(evidence_vrs) > 0:
factor = factor.reduce(evidence_vrs, evidence_values)
marginal = factor.marginalize(vrs, keep=True)
return marginal
raise ValueError(f"No cluster with variables containing {vrs}")
def get_posterior_joint(self):
"""
Get the posterior joint distribution. This function is only intended to be used as a research / debugging tool
for small networks.
"""
# TODO: add functionality for efficiently getting a posterior marginal over any subset of variables and replace
# the get_marginal function above.
cluster_product = self._clusters[0]._factor.joint_distribution
for cluster in self._clusters[1:]:
cluster_product = cluster_product.multiply(cluster._factor.joint_distribution)
last_passed_message_factors = self._last_passed_message_factors
if len(last_passed_message_factors) == 0:
assert self.num_messages_passed == 0
joint = cluster_product
else:
message_product = last_passed_message_factors[0]
for message_factor in last_passed_message_factors[1:]:
message_product = message_product.multiply(message_factor)
joint = cluster_product.cancel(message_product)
return joint
def process_graph(self, tol=1e-3, max_iter=50, make_animation_gif=False):
"""
Perform synchronous message passing until convergence (or maximum iterations).
:param tol: The minimum tolerance value for the KL divergence D_KL(previous_message || next_message) that needs
to be reached (for all messages) before stopping message passing (before max_iter is reached).
:param max_iter: The maximum number of iterations of message passing. The maximum number of messages that can be
passed is max_iter * n, where n is the number of message paths (2x the number of edges) in the graph.
:param bool make_animation_gif: Whether or not to create an animation of the message passing process when. Note:
This can cause slow processing and high memory consumption for large graphs and is therefore recommended to
be used only with very small (>50 cluster) graphs.
"""
self.sync_message_passing_max_distances = []
if len(self._clusters) == 1:
# The Cluster Graph contains only single cluster. Message passing not possible or necessary.
if self.special_evidence:
evidence_vrs = list(self.special_evidence.keys())
evidence_values = list(self.special_evidence.values())
self._clusters[0]._factor = self._clusters[0]._factor.reduce(
vrs=evidence_vrs, values=evidence_values
)
return
# TODO: see if the definition of max_iter can be improved
key_func = lambda x: x.next_information_gain
max_message_passes = max_iter * len(self.graph_message_paths)
self.graph_message_paths = collections.deque(
sorted(self.graph_message_paths, key=key_func, reverse=True)
)
for _ in tqdm(range(max_message_passes), disable=self.disable_tqdm):
sender_cluster_id = self.graph_message_paths[0].sender_cluster.cluster_id
receiver_cluster_id = self.graph_message_paths[0].receiver_cluster.cluster_id
if self.debug:
self.passed_messages.append(self.graph_message_paths[0].next_message.copy())
self.graph_message_paths[0].pass_next_message()
self.num_messages_passed += 1
self.graph_message_paths = collections.deque(
sorted(self.graph_message_paths, key=key_func, reverse=True)
)
# TODO: test this:
# self.graph_message_paths = _sort_almost_sorted(self.graph_message_paths, key=key_func)
max_next_information_gain = self.graph_message_paths[0].next_information_gain
self.sync_message_passing_max_distances.append(max_next_information_gain)
if max_next_information_gain <= tol:
return
if make_animation_gif:
cg_animation.add_message_pass_animation_frames(
graph=self._graph,
frames=self.message_passing_animation_frames,
node_a_name=sender_cluster_id,
node_b_name=receiver_cluster_id,
)
@property
def _last_passed_message_factors(self):
"""
The factors of the messages passed in the last iteration of message passing.
"""
return [gmp.previously_sent_message.factor for gmp in self.graph_message_paths]
def make_message_passing_animation_gif(self, filename="graph_animation.gif"):
"""
Make message passing animation and save a GIF of the animation to file. Note that this function will only work
if the make_animation_gif variable was set to True when the process_graph method was called.
:param str filename: The name of the file.
"""
self.message_passing_animation_frames[0].save(
fp=f"./{filename}",
format="GIF",
append_images=self.message_passing_animation_frames[1:],
save_all=True,
duration=400,
loop=0,
)
class _GraphMessagePath:
"""
A specific path (direction along an edge) in a graph along which a message can be passed.
"""
def __init__(self, sender_cluster, receiver_cluster):
"""
The initializer.
:param Cluster sender_cluster: The cluster that defines the starting point of the path.
:param Cluster receiver_cluster: The cluster that defines the end point of the path.
"""
self.sender_cluster = sender_cluster
self.receiver_cluster = receiver_cluster
self.previously_sent_message = None
self.next_message = self.sender_cluster.make_message(self.receiver_cluster.cluster_id)
self.next_information_gain = None
self.information_gains_with_iters = []
self.update_next_information_gain()
def update_next_information_gain(self):
"""
Calculate the information gain that will be achieved when passing the next message.
"""
if self.previously_sent_message is None:
self.next_information_gain = self.next_message.distance_from_vacuous()
else:
# "In the context of machine learning, KL(P||Q) is often called the information gain achieved if Q is
# used instead of P." - wikipedia
# We typically want to know which new message (Q) will result in the largest information gain if it replaces
# the message (P)
# message: previous_message (P)
# factor: next message (Q)
# P.kl_divergence(Q)
self.next_information_gain = self.previously_sent_message.kl_divergence(self.next_message)
self.information_gains_with_iters.append(self.next_information_gain)
def recompute_next_message(self):
"""
Recompute the next message.
"""
new_next_message = self.sender_cluster.make_message(self.receiver_cluster.cluster_id)
self.next_message = new_next_message.copy()
self.update_next_information_gain()
def pass_next_message(self):
"""
Pass the next message along this path.
"""
self.receiver_cluster.receive_message(self.next_message)
self.previously_sent_message = self.next_message.copy()
self.next_information_gain = 0.0
self.information_gains_with_iters.append(self.next_information_gain)
for gmp in self.receiver_cluster._outward_message_paths:
gmp.recompute_next_message()
| [
"IPython.core.display.display",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.array",
"graphviz.Graph",
"matplotlib.lines.Line2D",
"veroku._cg_helpers._cluster.Cluster",
"collections.deque",
"networkx.minimum_spanning_tree",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.ma.mas... | [((6956, 6977), 'collections.deque', 'collections.deque', (['[]'], {}), '([])\n', (6973, 6977), False, 'import collections\n'), ((8442, 8470), 'graphviz.Graph', 'graphviz.Graph', ([], {'format': '"""png"""'}), "(format='png')\n", (8456, 8470), False, 'import graphviz\n'), ((11304, 11331), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (11314, 11331), True, 'import matplotlib.pyplot as plt\n'), ((11629, 11696), 'matplotlib.pyplot.title', 'plt.title', (['"""Information Gain of Messages along Graph Message Paths"""'], {}), "('Information Gain of Messages along Graph Message Paths')\n", (11638, 11696), True, 'import matplotlib.pyplot as plt\n'), ((11705, 11728), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration"""'], {}), "('iteration')\n", (11715, 11728), True, 'import matplotlib.pyplot as plt\n'), ((11737, 11770), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""D_KL(prev_msg||msg)"""'], {}), "('D_KL(prev_msg||msg)')\n", (11747, 11770), True, 'import matplotlib.pyplot as plt\n'), ((13093, 13164), 'numpy.array', 'np.array', (['[(v if v != np.inf else new_inf_value) for v in mp_max_dists]'], {}), '([(v if v != np.inf else new_inf_value) for v in mp_max_dists])\n', (13101, 13164), True, 'import numpy as np\n'), ((13184, 13281), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(max_distances_replaces_infs != new_inf_value)', 'max_distances_replaces_infs'], {}), '(max_distances_replaces_infs != new_inf_value,\n max_distances_replaces_infs)\n', (13202, 13281), True, 'import numpy as np\n'), ((13308, 13335), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (13318, 13335), True, 'import matplotlib.pyplot as plt\n'), ((13344, 13393), 'matplotlib.pyplot.plot', 'plt.plot', (['iterations', 'max_distances_replaces_infs'], {}), '(iterations, max_distances_replaces_infs)\n', (13352, 13393), True, 'import matplotlib.pyplot as plt\n'), ((13402, 13454), 'matplotlib.pyplot.plot', 'plt.plot', (['iterations', 'inf_values'], {'c': '"""r"""', 'linewidth': '(2)'}), "(iterations, inf_values, c='r', linewidth=2)\n", (13410, 13454), True, 'import matplotlib.pyplot as plt\n'), ((13637, 13677), 'matplotlib.pyplot.title', 'plt.title', (['"""Message Passing Convergence"""'], {}), "('Message Passing Convergence')\n", (13646, 13677), True, 'import matplotlib.pyplot as plt\n'), ((13686, 13709), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration"""'], {}), "('iteration')\n", (13696, 13709), True, 'import matplotlib.pyplot as plt\n'), ((13718, 13759), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log max D_KL(prev_msg||msg)"""'], {}), "('log max D_KL(prev_msg||msg)')\n", (13728, 13759), True, 'import matplotlib.pyplot as plt\n'), ((13768, 13778), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13776, 13778), True, 'import matplotlib.pyplot as plt\n'), ((15832, 15878), 'IPython.core.display.Image', 'IPython.core.display.Image', (['"""/tmp/test.gv.png"""'], {}), "('/tmp/test.gv.png')\n", (15858, 15878), False, 'import IPython\n'), ((15887, 15922), 'IPython.core.display.display', 'IPython.core.display.display', (['image'], {}), '(image)\n', (15915, 15922), False, 'import IPython\n'), ((16083, 16144), 'graphviz.Source', 'graphviz.Source', (['self._graph'], {'filename': 'filename', 'format': '"""png"""'}), "(self._graph, filename=filename, format='png')\n", (16098, 16144), False, 'import graphviz\n'), ((2253, 2330), 'veroku.factors._factor_utils.get_subset_evidence', 'get_subset_evidence', ([], {'all_evidence_dict': 'evidence', 'subset_vars': 'factor.var_names'}), '(all_evidence_dict=evidence, subset_vars=factor.var_names)\n', (2272, 2330), False, 'from veroku.factors._factor_utils import get_subset_evidence\n'), ((6232, 6277), 'veroku._cg_helpers._cluster.Cluster', 'Cluster', (['factor'], {'cluster_name_prefix': 'f"""c{i}#"""'}), "(factor, cluster_name_prefix=f'c{i}#')\n", (6239, 6277), False, 'from veroku._cg_helpers._cluster import Cluster\n'), ((6618, 6679), 'veroku.factors._factor_utils.get_subset_evidence', 'get_subset_evidence', (['self.special_evidence', 'cluster.var_names'], {}), '(self.special_evidence, cluster.var_names)\n', (6637, 6679), False, 'from veroku.factors._factor_utils import get_subset_evidence\n'), ((11576, 11620), 'matplotlib.pyplot.plot', 'plt.plot', (['paths_information_gains_with_iters'], {}), '(paths_information_gains_with_iters)\n', (11584, 11620), True, 'import matplotlib.pyplot as plt\n'), ((11980, 11998), 'matplotlib.pyplot.legend', 'plt.legend', (['legend'], {}), '(legend)\n', (11990, 11998), True, 'import matplotlib.pyplot as plt\n'), ((12545, 12565), 'numpy.log', 'np.log', (['mp_max_dists'], {}), '(mp_max_dists)\n', (12551, 12565), True, 'import numpy as np\n'), ((13590, 13628), 'matplotlib.pyplot.legend', 'plt.legend', (['custom_lines', "['infinity']"], {}), "(custom_lines, ['infinity'])\n", (13600, 13628), True, 'import matplotlib.pyplot as plt\n'), ((14321, 14331), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (14329, 14331), True, 'import networkx as nx\n'), ((14777, 14812), 'networkx.minimum_spanning_tree', 'nx.minimum_spanning_tree', (['var_graph'], {}), '(var_graph)\n', (14801, 14812), True, 'import networkx as nx\n'), ((12688, 12717), 'numpy.tile', 'np.tile', (['mp_max_dists', '[2, 1]'], {}), '(mp_max_dists, [2, 1])\n', (12695, 12717), True, 'import numpy as np\n'), ((13543, 13576), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""r"""', 'lw': '(4)'}), "([0], [0], color='r', lw=4)\n", (13549, 13576), False, 'from matplotlib.lines import Line2D\n'), ((16593, 16653), 'veroku.factors._factor_utils.get_subset_evidence', 'get_subset_evidence', (['self.special_evidence', 'factor.var_names'], {}), '(self.special_evidence, factor.var_names)\n', (16612, 16653), False, 'from veroku.factors._factor_utils import get_subset_evidence\n'), ((20907, 21091), 'veroku._cg_helpers._animation.add_message_pass_animation_frames', 'cg_animation.add_message_pass_animation_frames', ([], {'graph': 'self._graph', 'frames': 'self.message_passing_animation_frames', 'node_a_name': 'sender_cluster_id', 'node_b_name': 'receiver_cluster_id'}), '(graph=self._graph, frames=\n self.message_passing_animation_frames, node_a_name=sender_cluster_id,\n node_b_name=receiver_cluster_id)\n', (20953, 21091), True, 'import veroku._cg_helpers._animation as cg_animation\n'), ((10124, 10184), 'veroku._cg_helpers._animation.make_sepset_node_name', 'cg_animation.make_sepset_node_name', (['node_i_name', 'node_j_name'], {}), '(node_i_name, node_j_name)\n', (10158, 10184), True, 'import veroku._cg_helpers._animation as cg_animation\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.