code
stringlengths
31
1.05M
apis
list
extract_api
stringlengths
97
1.91M
import Agent import aux_functions from collections import deque import pickle import numpy as np import torch import torch.optim as optim def init_algo(data_path, history_power_td=60000, weather_dim=6): agents = deque(maxlen=4) policy = Agent.Policy(state_size=weather_dim) optimizer = optim.Adam(policy.parameters(), lr=1e-2) history_power = deque(maxlen=history_power_td) replay_buffer = deque(maxlen=50) rewards = [] data_path = data_path return agents, policy, optimizer, history_power, replay_buffer, rewards, data_path def run_new_power(new_power_data): new_power_value = aux_functions.preprocess_power_data(new_power_data) history_power.append(new_power_value) for agent in agents: agent.update_power_list(new_power_value) def run_new_weather(new_weather_data): new_weather_array = aux_functions.preprocess_weather_data(new_weather_data) new_weather_input = np.mean(new_weather_array, axis=0) policy.eval() with torch.no_grad(): input = torch.Tensor(new_weather_input) output = policy(input) policy.train() if len(agents) < 4: agents.append(Agent.Agent()) elif len(agents) == 4: oldest_agent = agents.popleft() agents.append(Agent.Agent()) oldest_agent.action, oldest_agent.log_prob = oldest_agent.act(policy) oldest_agent.reward = oldest_agent.get_reward(oldest_agent.action, history_power_mean=sum(history_power)/len(history_power)) oldest_agent.learn(oldest_agent.reward, oldest_agent.log_prob, optimizer) replay_buffer.append(agents[-1]) for agent in replay_buffer: agent.learn(agent.reward, agent.log_prob, optimizer) torch.save(policy.state_dict(), data_path+'/policy.pt') with open(data_path+'/rewards.pickle', 'wb') as f: pickle.dump(rewards, f) newest_agent = agents[-1] newest_agent.save_weather_data(new_weather_input) newest_agent.act(policy) return output # The global variables get initialized. "rewards" can be saved on disk. All other variables have to be kept in memory. agents, policy, optimizer, history_power, replay_buffer, rewards, data_path = init_algo() if NEW_POWER_STATUS: # As often as possible run_new_power(new_power_data) if NEW_WEATHER_STATUS: # Every 30min proposed_action = run_new_weather(new_weather_data)
[ "numpy.mean", "collections.deque", "pickle.dump", "aux_functions.preprocess_weather_data", "aux_functions.preprocess_power_data", "torch.Tensor", "torch.no_grad", "Agent.Policy", "Agent.Agent" ]
[((226, 241), 'collections.deque', 'deque', ([], {'maxlen': '(4)'}), '(maxlen=4)\n', (231, 241), False, 'from collections import deque\n'), ((257, 293), 'Agent.Policy', 'Agent.Policy', ([], {'state_size': 'weather_dim'}), '(state_size=weather_dim)\n', (269, 293), False, 'import Agent\n'), ((383, 413), 'collections.deque', 'deque', ([], {'maxlen': 'history_power_td'}), '(maxlen=history_power_td)\n', (388, 413), False, 'from collections import deque\n'), ((441, 457), 'collections.deque', 'deque', ([], {'maxlen': '(50)'}), '(maxlen=50)\n', (446, 457), False, 'from collections import deque\n'), ((681, 732), 'aux_functions.preprocess_power_data', 'aux_functions.preprocess_power_data', (['new_power_data'], {}), '(new_power_data)\n', (716, 732), False, 'import aux_functions\n'), ((934, 989), 'aux_functions.preprocess_weather_data', 'aux_functions.preprocess_weather_data', (['new_weather_data'], {}), '(new_weather_data)\n', (971, 989), False, 'import aux_functions\n'), ((1015, 1049), 'numpy.mean', 'np.mean', (['new_weather_array'], {'axis': '(0)'}), '(new_weather_array, axis=0)\n', (1022, 1049), True, 'import numpy as np\n'), ((1087, 1102), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1100, 1102), False, 'import torch\n'), ((1121, 1152), 'torch.Tensor', 'torch.Tensor', (['new_weather_input'], {}), '(new_weather_input)\n', (1133, 1152), False, 'import torch\n'), ((1960, 1983), 'pickle.dump', 'pickle.dump', (['rewards', 'f'], {}), '(rewards, f)\n', (1971, 1983), False, 'import pickle\n'), ((1261, 1274), 'Agent.Agent', 'Agent.Agent', ([], {}), '()\n', (1272, 1274), False, 'import Agent\n'), ((1368, 1381), 'Agent.Agent', 'Agent.Agent', ([], {}), '()\n', (1379, 1381), False, 'import Agent\n')]
""" Used to generate a sample from an MFGP sample. -- <EMAIL> """ # pylint: disable=import-error # pylint: disable=no-member # pylint: disable=invalid-name # pylint: disable=relative-import # pylint: disable=too-many-locals # pylint: disable=no-name-in-module # pylint: disable=superfluous-parens import numpy as np from scipy.interpolate import RectBivariateSpline # Local imports from gp.kernel import SEKernel import mf_func import mf_gp from utils.ancillary_utils import plot_2d_function num_per_dim = 50 spline_degree = 3 def gen_mfgp_sample_as_mfof(mfgp, fidel_cost_func, random_seed): """ Generates an mfgp sample as a mfof. """ if mfgp.fidel_dim != 1 or mfgp.domain_dim != 1: raise NotImplementedError('Only implemented 1 dimensional fidel/domain so far!') # Get/set the random state. st0 = np.random.get_state() np.random.seed(random_seed) # Set some attributes up fidel_bounds = [[0, 1]] * mfgp.fidel_dim domain_bounds = [[0, 1]] * mfgp.domain_dim opt_fidel = np.array([1]) # This part of the code relies on dim_z = dim_x = 1 # Create a grid for interpolation dim_grid = np.linspace(0, 1, num_per_dim) ZZ, XX = np.meshgrid(dim_grid, dim_grid) grid_pts = np.concatenate((ZZ.reshape(-1, 1), XX.reshape(-1, 1)), axis=1) grid_samples = mfgp.draw_samples(1, grid_pts).ravel() grid_samples_as_grid = grid_samples.reshape((num_per_dim, num_per_dim)) rbs = RectBivariateSpline(dim_grid, dim_grid, grid_samples_as_grid, kx=spline_degree, ky=spline_degree) g = lambda z, x: rbs.ev(x, z) # compute optimum point opt_search_grid_size = 1000 opt_search_dom_grid = np.linspace(0, 1, opt_search_grid_size).reshape(-1, 1) opt_search_fidel_m = np.repeat(opt_fidel.reshape(-1, 1), opt_search_grid_size, axis=0) opt_fidel_grid_vals = g(opt_search_fidel_m, opt_search_dom_grid) opt_idx = opt_fidel_grid_vals.argmax() opt_pt = np.array(opt_search_dom_grid[opt_idx]) opt_val = opt_fidel_grid_vals[opt_idx] mfof = mf_func.MFOptFunction(g, fidel_cost_func, fidel_bounds, domain_bounds, opt_fidel, vectorised=True, opt_pt=opt_pt, opt_val=opt_val) mfof.mfgp = mfgp # before returning restate the np random state np.random.set_state(st0) return mfof def gen_mfgp_sample_as_noisy_mfof(mfgp, fidel_cost_func, random_seed, noise_var): """ Generates an mfgp sample as a noisy mfof. """ mfof = gen_mfgp_sample_as_mfof(mfgp, fidel_cost_func, random_seed) return mf_func.get_noisy_mfof_from_mfof(mfof, noise_var) def gen_simple_mfgp_as_mfof(fidel_bw=0.8, random_seed=512): """ Gets a simple mfgp wrapped into an mfof. """ # Create a GP kernel_scale = 2 fidel_kernel = SEKernel(1, 1, [fidel_bw]) domain_kernel = SEKernel(1, 1, [0.08]) noise_var = 0.1 dummy_ZZ = np.zeros((0, 1)) dummy_XX = np.zeros((0, 1)) dummy_YY = np.zeros((0)) mean_func = lambda x: np.zeros((len(x))) mfgp = mf_gp.get_mfgp_from_fidel_domain(dummy_ZZ, dummy_XX, dummy_YY, kernel_scale, fidel_kernel, domain_kernel, mean_func, noise_var, build_posterior=True) # Get an mfof object fidel_cost_func = lambda z: 0.2 + 6 * z ** 2 return gen_mfgp_sample_as_mfof(mfgp, fidel_cost_func, random_seed) def visualise_mfof(mfof): """ Visualises the mfof object. """ plot_func = mfof.eval_multiple _, ax, plt = plot_2d_function(plot_func, np.array([mfof.fidel_bounds[0], mfof.domain_bounds[0]]), x_label='fidel', y_label='domain') ax.scatter(mfof.opt_fidel, mfof.opt_pt, mfof.opt_val, c='r', s=100) plt.show() def main(): """ Main function. """ print(np.random.random()) mfof = gen_simple_mfgp_as_mfof() visualise_mfof(mfof) print(np.random.random()) if __name__ == '__main__': main()
[ "numpy.random.get_state", "numpy.random.set_state", "scipy.interpolate.RectBivariateSpline", "mf_func.get_noisy_mfof_from_mfof", "numpy.random.random", "gp.kernel.SEKernel", "numpy.array", "numpy.linspace", "numpy.zeros", "mf_func.MFOptFunction", "numpy.random.seed", "mf_gp.get_mfgp_from_fidel...
[((821, 842), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (840, 842), True, 'import numpy as np\n'), ((845, 872), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (859, 872), True, 'import numpy as np\n'), ((1003, 1016), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1011, 1016), True, 'import numpy as np\n'), ((1121, 1151), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num_per_dim'], {}), '(0, 1, num_per_dim)\n', (1132, 1151), True, 'import numpy as np\n'), ((1163, 1194), 'numpy.meshgrid', 'np.meshgrid', (['dim_grid', 'dim_grid'], {}), '(dim_grid, dim_grid)\n', (1174, 1194), True, 'import numpy as np\n'), ((1410, 1512), 'scipy.interpolate.RectBivariateSpline', 'RectBivariateSpline', (['dim_grid', 'dim_grid', 'grid_samples_as_grid'], {'kx': 'spline_degree', 'ky': 'spline_degree'}), '(dim_grid, dim_grid, grid_samples_as_grid, kx=\n spline_degree, ky=spline_degree)\n', (1429, 1512), False, 'from scipy.interpolate import RectBivariateSpline\n'), ((1912, 1950), 'numpy.array', 'np.array', (['opt_search_dom_grid[opt_idx]'], {}), '(opt_search_dom_grid[opt_idx])\n', (1920, 1950), True, 'import numpy as np\n'), ((2002, 2136), 'mf_func.MFOptFunction', 'mf_func.MFOptFunction', (['g', 'fidel_cost_func', 'fidel_bounds', 'domain_bounds', 'opt_fidel'], {'vectorised': '(True)', 'opt_pt': 'opt_pt', 'opt_val': 'opt_val'}), '(g, fidel_cost_func, fidel_bounds, domain_bounds,\n opt_fidel, vectorised=True, opt_pt=opt_pt, opt_val=opt_val)\n', (2023, 2136), False, 'import mf_func\n'), ((2234, 2258), 'numpy.random.set_state', 'np.random.set_state', (['st0'], {}), '(st0)\n', (2253, 2258), True, 'import numpy as np\n'), ((2487, 2536), 'mf_func.get_noisy_mfof_from_mfof', 'mf_func.get_noisy_mfof_from_mfof', (['mfof', 'noise_var'], {}), '(mfof, noise_var)\n', (2519, 2536), False, 'import mf_func\n'), ((2702, 2728), 'gp.kernel.SEKernel', 'SEKernel', (['(1)', '(1)', '[fidel_bw]'], {}), '(1, 1, [fidel_bw])\n', (2710, 2728), False, 'from gp.kernel import SEKernel\n'), ((2747, 2769), 'gp.kernel.SEKernel', 'SEKernel', (['(1)', '(1)', '[0.08]'], {}), '(1, 1, [0.08])\n', (2755, 2769), False, 'from gp.kernel import SEKernel\n'), ((2801, 2817), 'numpy.zeros', 'np.zeros', (['(0, 1)'], {}), '((0, 1))\n', (2809, 2817), True, 'import numpy as np\n'), ((2831, 2847), 'numpy.zeros', 'np.zeros', (['(0, 1)'], {}), '((0, 1))\n', (2839, 2847), True, 'import numpy as np\n'), ((2861, 2872), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (2869, 2872), True, 'import numpy as np\n'), ((2927, 3080), 'mf_gp.get_mfgp_from_fidel_domain', 'mf_gp.get_mfgp_from_fidel_domain', (['dummy_ZZ', 'dummy_XX', 'dummy_YY', 'kernel_scale', 'fidel_kernel', 'domain_kernel', 'mean_func', 'noise_var'], {'build_posterior': '(True)'}), '(dummy_ZZ, dummy_XX, dummy_YY, kernel_scale,\n fidel_kernel, domain_kernel, mean_func, noise_var, build_posterior=True)\n', (2959, 3080), False, 'import mf_gp\n'), ((3400, 3455), 'numpy.array', 'np.array', (['[mfof.fidel_bounds[0], mfof.domain_bounds[0]]'], {}), '([mfof.fidel_bounds[0], mfof.domain_bounds[0]])\n', (3408, 3455), True, 'import numpy as np\n'), ((3653, 3671), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3669, 3671), True, 'import numpy as np\n'), ((3739, 3757), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3755, 3757), True, 'import numpy as np\n'), ((1649, 1688), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'opt_search_grid_size'], {}), '(0, 1, opt_search_grid_size)\n', (1660, 1688), True, 'import numpy as np\n')]
#! /usr/bin/env python3 from mvnc import mvncapi as mvnc import numpy, cv2 import sys, os import cPickle as pickle import fd def getPaddingSize(img): h, w, _ = img.shape top, bottom, left, right = (0,0,0,0) if w < int(h / 3 * 4): tmp = int(h / 3 * 4) - w left = tmp // 2 right = tmp - left elif h < int(w / 4 * 3): tmp = int(w / 4 * 3) - h top = tmp // 2 bottom = tmp - top else: pass return top, bottom, left, right def run_inference(image_to_classify, args): scaled_image = cv2.resize(image_to_classify, (640, 480)) image = scaled_image top,bottom,left,right = getPaddingSize(image) image = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0,0,0]) frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) face_image = fd.detect_face(frame) if len(face_image) == 0 or numpy.any(face_image[0]) == None: return None resized_image = preprocess_image(face_image[0]) # *************************************************************** # Send the image to the NCS # *************************************************************** devices = mvnc.EnumerateDevices() device = mvnc.Device(devices[0]) device.OpenDevice() graph_file_name = args.facenetGraph with open(graph_file_name, mode='rb') as f: graph_in_memory = f.read() facenet_graph = device.AllocateGraph(graph_in_memory) facenet_graph.LoadTensor(resized_image.astype(numpy.float16), None) # *************************************************************** # Get the result from the NCS # *************************************************************** output, userobj = facenet_graph.GetResult() device.CloseDevice() return output # whiten an image def whiten_image(source_image): source_mean = numpy.mean(source_image) source_standard_deviation = numpy.std(source_image) std_adjusted = numpy.maximum(source_standard_deviation, 1.0 / numpy.sqrt(source_image.size)) whitened_image = numpy.multiply(numpy.subtract(source_image, source_mean), 1 / std_adjusted) return whitened_image def preprocess_image(src): NETWORK_WIDTH = 160 NETWORK_HEIGHT = 160 preprocessed_image = cv2.resize(src, (NETWORK_WIDTH, NETWORK_HEIGHT)) #convert to RGB preprocessed_image = cv2.cvtColor(preprocessed_image, cv2.COLOR_BGR2RGB) #whiten preprocessed_image = whiten_image(preprocessed_image) # return the preprocessed image return preprocessed_image def face_diff(face1_output, face2_output): if (len(face1_output) != len(face2_output)): print('length mismatch in face_match') return False total_diff = 0 for output_index in range(0, len(face1_output)): this_diff = numpy.square(face1_output[output_index] - face2_output[output_index]) total_diff += this_diff return total_diff def run_image(inference_output, test_output, threshold): ranking = [] for directory in inference_output: for valid_image in inference_output[directory]: diff = face_diff(valid_image, test_output) if diff >= threshold: ranking.append([diff, "None"]) else: ranking.append([diff, directory]) ranking.sort() result = {} for idx in range(5): if ranking[idx][1] in result: result[ranking[idx][1]] += 1 else: result[ranking[idx][1]] = 1 ans = "None" count = 0 for d in result: if result[d] > count: count = result[d] ans = d count = 0 if ans == "None" and len(result) != 1: for d in result: if d != "None": if result[d] > count: count = result[d] ans = d return ans def train(args): valid_data_directory_list = os.listdir(args.trainData) inference_output = {} for d in valid_data_directory_list: dir_name = args.trainData + "/" + d valid_image_filename_list = [ dir_name + "/" + i for i in os.listdir(dir_name) if i.endswith(".jpg")] done = 0 for valid_image_filename in valid_image_filename_list: validated_image = cv2.imread(valid_image_filename) valid_output = run_inference(validated_image, args) if numpy.any(valid_output) == None: if (args.verbose): print("No face detected in " + valid_image_filename + " in dir: " + dir_name) continue if d in inference_output: inference_output[d].append(valid_output) else: inference_output[d] = [valid_output] done += 1 if done == 10: break with open(args.trainModel, 'wb') as mod: pickle.dump(inference_output, mod) # main entry point for program. we'll call main() to do what needs to be done. # if __name__ == "__main__": # sys.exit(train())
[ "numpy.mean", "os.listdir", "cPickle.dump", "numpy.sqrt", "fd.detect_face", "cv2.copyMakeBorder", "numpy.subtract", "mvnc.mvncapi.Device", "numpy.square", "numpy.any", "cv2.cvtColor", "numpy.std", "cv2.resize", "cv2.imread", "mvnc.mvncapi.EnumerateDevices" ]
[((565, 606), 'cv2.resize', 'cv2.resize', (['image_to_classify', '(640, 480)'], {}), '(image_to_classify, (640, 480))\n', (575, 606), False, 'import numpy, cv2\n'), ((699, 792), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image', 'top', 'bottom', 'left', 'right', 'cv2.BORDER_CONSTANT'], {'value': '[0, 0, 0]'}), '(image, top, bottom, left, right, cv2.BORDER_CONSTANT,\n value=[0, 0, 0])\n', (717, 792), False, 'import numpy, cv2\n'), ((799, 837), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (811, 837), False, 'import numpy, cv2\n'), ((856, 877), 'fd.detect_face', 'fd.detect_face', (['frame'], {}), '(frame)\n', (870, 877), False, 'import fd\n'), ((1210, 1233), 'mvnc.mvncapi.EnumerateDevices', 'mvnc.EnumerateDevices', ([], {}), '()\n', (1231, 1233), True, 'from mvnc import mvncapi as mvnc\n'), ((1247, 1270), 'mvnc.mvncapi.Device', 'mvnc.Device', (['devices[0]'], {}), '(devices[0])\n', (1258, 1270), True, 'from mvnc import mvncapi as mvnc\n'), ((1885, 1909), 'numpy.mean', 'numpy.mean', (['source_image'], {}), '(source_image)\n', (1895, 1909), False, 'import numpy, cv2\n'), ((1942, 1965), 'numpy.std', 'numpy.std', (['source_image'], {}), '(source_image)\n', (1951, 1965), False, 'import numpy, cv2\n'), ((2288, 2336), 'cv2.resize', 'cv2.resize', (['src', '(NETWORK_WIDTH, NETWORK_HEIGHT)'], {}), '(src, (NETWORK_WIDTH, NETWORK_HEIGHT))\n', (2298, 2336), False, 'import numpy, cv2\n'), ((2383, 2434), 'cv2.cvtColor', 'cv2.cvtColor', (['preprocessed_image', 'cv2.COLOR_BGR2RGB'], {}), '(preprocessed_image, cv2.COLOR_BGR2RGB)\n', (2395, 2434), False, 'import numpy, cv2\n'), ((3944, 3970), 'os.listdir', 'os.listdir', (['args.trainData'], {}), '(args.trainData)\n', (3954, 3970), False, 'import sys, os\n'), ((2099, 2140), 'numpy.subtract', 'numpy.subtract', (['source_image', 'source_mean'], {}), '(source_image, source_mean)\n', (2113, 2140), False, 'import numpy, cv2\n'), ((2825, 2894), 'numpy.square', 'numpy.square', (['(face1_output[output_index] - face2_output[output_index])'], {}), '(face1_output[output_index] - face2_output[output_index])\n', (2837, 2894), False, 'import numpy, cv2\n'), ((4913, 4947), 'cPickle.dump', 'pickle.dump', (['inference_output', 'mod'], {}), '(inference_output, mod)\n', (4924, 4947), True, 'import cPickle as pickle\n'), ((909, 933), 'numpy.any', 'numpy.any', (['face_image[0]'], {}), '(face_image[0])\n', (918, 933), False, 'import numpy, cv2\n'), ((2032, 2061), 'numpy.sqrt', 'numpy.sqrt', (['source_image.size'], {}), '(source_image.size)\n', (2042, 2061), False, 'import numpy, cv2\n'), ((4315, 4347), 'cv2.imread', 'cv2.imread', (['valid_image_filename'], {}), '(valid_image_filename)\n', (4325, 4347), False, 'import numpy, cv2\n'), ((4160, 4180), 'os.listdir', 'os.listdir', (['dir_name'], {}), '(dir_name)\n', (4170, 4180), False, 'import sys, os\n'), ((4427, 4450), 'numpy.any', 'numpy.any', (['valid_output'], {}), '(valid_output)\n', (4436, 4450), False, 'import numpy, cv2\n')]
''' Created on Sep 24, 2016 @author: Wajih-PC ''' import numpy as np from scipy.special import erfinv def sigmrnd(input): # Declaring variables as np float type to avoid Overflow warnings minusone = np.float(-1.0) plusone = np.float(1.0) sigmVals = np.true_divide(plusone,np.add(plusone,np.exp(np.multiply(minusone,input)))) samples = np.random.uniform(0,1,input.shape) samples= np.where(sigmVals>samples,1,0) return samples
[ "numpy.where", "numpy.multiply", "numpy.float", "numpy.random.uniform" ]
[((220, 234), 'numpy.float', 'np.float', (['(-1.0)'], {}), '(-1.0)\n', (228, 234), True, 'import numpy as np\n'), ((251, 264), 'numpy.float', 'np.float', (['(1.0)'], {}), '(1.0)\n', (259, 264), True, 'import numpy as np\n'), ((372, 408), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'input.shape'], {}), '(0, 1, input.shape)\n', (389, 408), True, 'import numpy as np\n'), ((421, 455), 'numpy.where', 'np.where', (['(sigmVals > samples)', '(1)', '(0)'], {}), '(sigmVals > samples, 1, 0)\n', (429, 455), True, 'import numpy as np\n'), ((326, 354), 'numpy.multiply', 'np.multiply', (['minusone', 'input'], {}), '(minusone, input)\n', (337, 354), True, 'import numpy as np\n')]
#!/usr/bin/env python #---------------------------------------------------------------------------- # Copyright (c) 2013, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('extern', parent_package, top_path) config.make_config_py() return config
[ "numpy.distutils.misc_util.Configuration" ]
[((488, 537), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['"""extern"""', 'parent_package', 'top_path'], {}), "('extern', parent_package, top_path)\n", (501, 537), False, 'from numpy.distutils.misc_util import Configuration\n')]
# Copyright (c) 2021 <NAME>. All rights reserved. # This code is licensed under Apache 2.0 with Commons Clause license (see LICENSE.md for details) """Base class for working with records. vectorbt works with two different representations of data: matrices and records. A matrix, in this context, is just an array of one-dimensional arrays, each corresponding to a separate feature. The matrix itself holds only one kind of information (one attribute). For example, one can create a matrix for entry signals, with columns being different strategy configurations. But what if the matrix is huge and sparse? What if there is more information we would like to represent by each element? Creating multiple matrices would be a waste of memory. Records make possible representing complex, sparse information in a dense format. They are just an array of one-dimensional arrays of fixed schema. You can imagine records being a DataFrame, where each row represents a record and each column represents a specific attribute. ```plaintext a b 0 1.0 5.0 attr1 = 1 2.0 NaN 2 NaN 7.0 3 4.0 8.0 a b 0 9.0 13.0 attr2 = 1 10.0 NaN 2 NaN 15.0 3 12.0 16.0 | v id col idx attr1 attr2 0 0 0 0 1 9 1 1 0 1 2 10 2 2 0 3 4 12 3 3 1 0 5 13 4 4 1 1 7 15 5 5 1 3 8 16 ``` Another advantage of records is that they are not constrained by size. Multiple records can map to a single element in a matrix. For example, one can define multiple orders at the same time step, which is impossible to represent in a matrix form without using complex data types. Consider the following example: ```pycon >>> import numpy as np >>> import pandas as pd >>> from numba import njit >>> from collections import namedtuple >>> import vectorbt as vbt >>> example_dt = np.dtype([ ... ('id', np.int_), ... ('col', np.int_), ... ('idx', np.int_), ... ('some_field', np.float_) ... ]) >>> records_arr = np.array([ ... (0, 0, 0, 10.), ... (1, 0, 1, 11.), ... (2, 0, 2, 12.), ... (3, 1, 0, 13.), ... (4, 1, 1, 14.), ... (5, 1, 2, 15.), ... (6, 2, 0, 16.), ... (7, 2, 1, 17.), ... (8, 2, 2, 18.) ... ], dtype=example_dt) >>> wrapper = vbt.ArrayWrapper(index=['x', 'y', 'z'], ... columns=['a', 'b', 'c'], ndim=2, freq='1 day') >>> records = vbt.Records(wrapper, records_arr) ``` ## Printing There are two ways to print records: * Raw dataframe that preserves field names and data types: ```pycon >>> records.records id col idx some_field 0 0 0 0 10.0 1 1 0 1 11.0 2 2 0 2 12.0 3 3 1 0 13.0 4 4 1 1 14.0 5 5 1 2 15.0 6 6 2 0 16.0 7 7 2 1 17.0 8 8 2 2 18.0 ``` * Readable dataframe that takes into consideration `Records.field_config`: ```pycon >>> records.records_readable Id Column Timestamp some_field 0 0 a x 10.0 1 1 a y 11.0 2 2 a z 12.0 3 3 b x 13.0 4 4 b y 14.0 5 5 b z 15.0 6 6 c x 16.0 7 7 c y 17.0 8 8 c z 18.0 ``` ## Mapping `Records` are just [structured arrays](https://numpy.org/doc/stable/user/basics.rec.html) with a bunch of methods and properties for processing them. Their main feature is to map the records array and to reduce it by column (similar to the MapReduce paradigm). The main advantage is that it all happens without conversion to the matrix form and wasting memory resources. `Records` can be mapped to `vectorbt.records.mapped_array.MappedArray` in several ways: * Use `Records.map_field` to map a record field: ```pycon >>> records.map_field('some_field') <vectorbt.records.mapped_array.MappedArray at 0x7ff49bd31a58> >>> records.map_field('some_field').values array([10., 11., 12., 13., 14., 15., 16., 17., 18.]) ``` * Use `Records.map` to map records using a custom function. ```pycon >>> @njit ... def power_map_nb(record, pow): ... return record.some_field ** pow >>> records.map(power_map_nb, 2) <vectorbt.records.mapped_array.MappedArray at 0x7ff49c990cf8> >>> records.map(power_map_nb, 2).values array([100., 121., 144., 169., 196., 225., 256., 289., 324.]) ``` * Use `Records.map_array` to convert an array to `vectorbt.records.mapped_array.MappedArray`. ```pycon >>> records.map_array(records_arr['some_field'] ** 2) <vectorbt.records.mapped_array.MappedArray object at 0x7fe9bccf2978> >>> records.map_array(records_arr['some_field'] ** 2).values array([100., 121., 144., 169., 196., 225., 256., 289., 324.]) ``` * Use `Records.apply` to apply a function on each column/group: ```pycon >>> @njit ... def cumsum_apply_nb(records): ... return np.cumsum(records.some_field) >>> records.apply(cumsum_apply_nb) <vectorbt.records.mapped_array.MappedArray at 0x7ff49c990cf8> >>> records.apply(cumsum_apply_nb).values array([10., 21., 33., 13., 27., 42., 16., 33., 51.]) >>> group_by = np.array(['first', 'first', 'second']) >>> records.apply(cumsum_apply_nb, group_by=group_by, apply_per_group=True).values array([10., 21., 33., 46., 60., 75., 16., 33., 51.]) ``` Notice how cumsum resets at each column in the first example and at each group in the second example. ## Filtering Use `Records.apply_mask` to filter elements per column/group: ```pycon >>> mask = [True, False, True, False, True, False, True, False, True] >>> filtered_records = records.apply_mask(mask) >>> filtered_records.count() a 2 b 1 c 2 dtype: int64 >>> filtered_records.values['id'] array([0, 2, 4, 6, 8]) ``` ## Grouping One of the key features of `Records` is that you can perform reducing operations on a group of columns as if they were a single column. Groups can be specified by `group_by`, which can be anything from positions or names of column levels, to a NumPy array with actual groups. There are multiple ways of define grouping: * When creating `Records`, pass `group_by` to `vectorbt.base.array_wrapper.ArrayWrapper`: ```pycon >>> group_by = np.array(['first', 'first', 'second']) >>> grouped_wrapper = wrapper.replace(group_by=group_by) >>> grouped_records = vbt.Records(grouped_wrapper, records_arr) >>> grouped_records.map_field('some_field').mean() first 12.5 second 17.0 dtype: float64 ``` * Regroup an existing `Records`: ```pycon >>> records.regroup(group_by).map_field('some_field').mean() first 12.5 second 17.0 dtype: float64 ``` * Pass `group_by` directly to the mapping method: ```pycon >>> records.map_field('some_field', group_by=group_by).mean() first 12.5 second 17.0 dtype: float64 ``` * Pass `group_by` directly to the reducing method: ```pycon >>> records.map_field('some_field').mean(group_by=group_by) a 11.0 b 14.0 c 17.0 dtype: float64 ``` !!! note Grouping applies only to reducing operations, there is no change to the arrays. ## Indexing Like any other class subclassing `vectorbt.base.array_wrapper.Wrapping`, we can do pandas indexing on a `Records` instance, which forwards indexing operation to each object with columns: ```pycon >>> records['a'].records id col idx some_field 0 0 0 0 10.0 1 1 0 1 11.0 2 2 0 2 12.0 >>> grouped_records['first'].records id col idx some_field 0 0 0 0 10.0 1 1 0 1 11.0 2 2 0 2 12.0 3 3 1 0 13.0 4 4 1 1 14.0 5 5 1 2 15.0 ``` !!! note Changing index (time axis) is not supported. The object should be treated as a Series rather than a DataFrame; for example, use `some_field.iloc[0]` instead of `some_field.iloc[:, 0]`. Indexing behavior depends solely upon `vectorbt.base.array_wrapper.ArrayWrapper`. For example, if `group_select` is enabled indexing will be performed on groups, otherwise on single columns. ## Caching `Records` supports caching. If a method or a property requires heavy computation, it's wrapped with `vectorbt.utils.decorators.cached_method` and `vectorbt.utils.decorators.cached_property` respectively. Caching can be disabled globally via `caching` in `vectorbt._settings.settings`. !!! note Because of caching, class is meant to be immutable and all properties are read-only. To change any attribute, use the `copy` method and pass the attribute as keyword argument. ## Saving and loading Like any other class subclassing `vectorbt.utils.config.Pickleable`, we can save a `Records` instance to the disk with `Records.save` and load it with `Records.load`. ## Stats !!! hint See `vectorbt.generic.stats_builder.StatsBuilderMixin.stats` and `Records.metrics`. ```pycon >>> records.stats(column='a') Start x End z Period 3 days 00:00:00 Total Records 3 Name: a, dtype: object ``` `Records.stats` also supports (re-)grouping: ```pycon >>> grouped_records.stats(column='first') Start x End z Period 3 days 00:00:00 Total Records 6 Name: first, dtype: object ``` ## Plots !!! hint See `vectorbt.generic.plots_builder.PlotsBuilderMixin.plots` and `Records.subplots`. This class is too generic to have any subplots, but feel free to add custom subplots to your subclass. ## Extending `Records` class can be extended by subclassing. In case some of our fields have the same meaning but different naming (such as the base field `idx`) or other properties, we can override `field_config` using `vectorbt.records.decorators.override_field_config`. It will look for configs of all base classes and merge our config on top of them. This preserves any base class property that is not explicitly listed in our config. ```pycon >>> from vectorbt.records.decorators import override_field_config >>> my_dt = np.dtype([ ... ('my_id', np.int_), ... ('my_col', np.int_), ... ('my_idx', np.int_) ... ]) >>> my_fields_config = dict( ... dtype=my_dt, ... settings=dict( ... id=dict(name='my_id'), ... col=dict(name='my_col'), ... idx=dict(name='my_idx') ... ) ... ) >>> @override_field_config(my_fields_config) ... class MyRecords(vbt.Records): ... pass >>> records_arr = np.array([ ... (0, 0, 0), ... (1, 0, 1), ... (2, 1, 0), ... (3, 1, 1) ... ], dtype=my_dt) >>> wrapper = vbt.ArrayWrapper(index=['x', 'y'], ... columns=['a', 'b'], ndim=2, freq='1 day') >>> my_records = MyRecords(wrapper, records_arr) >>> my_records.id_arr array([0, 1, 2, 3]) >>> my_records.col_arr array([0, 0, 1, 1]) >>> my_records.idx_arr array([0, 1, 0, 1]) ``` Alternatively, we can override the `_field_config` class attribute. ```pycon >>> @override_field_config ... class MyRecords(vbt.Records): ... _field_config = dict( ... dtype=my_dt, ... settings=dict( ... id=dict(name='my_id'), ... idx=dict(name='my_idx'), ... col=dict(name='my_col') ... ) ... ) ``` !!! note Don't forget to decorate the class with `@override_field_config` to inherit configs from base classes. You can stop inheritance by not decorating or passing `merge_configs=False` to the decorator. """ import inspect import string import numpy as np import pandas as pd from vectorbt import _typing as tp from vectorbt.base.array_wrapper import ArrayWrapper, Wrapping from vectorbt.base.reshape_fns import to_1d_array from vectorbt.generic.plots_builder import PlotsBuilderMixin from vectorbt.generic.stats_builder import StatsBuilderMixin from vectorbt.records import nb from vectorbt.records.col_mapper import ColumnMapper from vectorbt.records.mapped_array import MappedArray from vectorbt.utils import checks from vectorbt.utils.attr_ import get_dict_attr from vectorbt.utils.config import merge_dicts, Config, Configured from vectorbt.utils.decorators import cached_method __pdoc__ = {} RecordsT = tp.TypeVar("RecordsT", bound="Records") IndexingMetaT = tp.Tuple[ArrayWrapper, tp.RecordArray, tp.MaybeArray, tp.Array1d] class MetaFields(type): """Meta class that exposes a read-only class property `MetaFields.field_config`.""" @property def field_config(cls) -> Config: """Field config.""" return cls._field_config class RecordsWithFields(metaclass=MetaFields): """Class exposes a read-only class property `RecordsWithFields.field_config`.""" @property def field_config(self) -> Config: """Field config of `${cls_name}`. ```json ${field_config} ``` """ return self._field_config class MetaRecords(type(StatsBuilderMixin), type(PlotsBuilderMixin), type(RecordsWithFields)): pass class Records(Wrapping, StatsBuilderMixin, PlotsBuilderMixin, RecordsWithFields, metaclass=MetaRecords): """Wraps the actual records array (such as trades) and exposes methods for mapping it to some array of values (such as PnL of each trade). Args: wrapper (ArrayWrapper): Array wrapper. See `vectorbt.base.array_wrapper.ArrayWrapper`. records_arr (array_like): A structured NumPy array of records. Must have the fields `id` (record index) and `col` (column index). col_mapper (ColumnMapper): Column mapper if already known. !!! note It depends on `records_arr`, so make sure to invalidate `col_mapper` upon creating a `Records` instance with a modified `records_arr`. `Records.replace` does it automatically. **kwargs: Custom keyword arguments passed to the config. Useful if any subclass wants to extend the config. """ _field_config: tp.ClassVar[Config] = Config( dict( dtype=None, settings=dict( id=dict( name='id', title='Id' ), col=dict( name='col', title='Column', mapping='columns' ), idx=dict( name='idx', title='Timestamp', mapping='index' ) ) ), readonly=True, as_attrs=False ) @property def field_config(self) -> Config: """Field config of `${cls_name}`. ```json ${field_config} ``` """ return self._field_config def __init__(self, wrapper: ArrayWrapper, records_arr: tp.RecordArray, col_mapper: tp.Optional[ColumnMapper] = None, **kwargs) -> None: Wrapping.__init__( self, wrapper, records_arr=records_arr, col_mapper=col_mapper, **kwargs ) StatsBuilderMixin.__init__(self) # Check fields records_arr = np.asarray(records_arr) checks.assert_not_none(records_arr.dtype.fields) field_names = { dct.get('name', field_name) for field_name, dct in self.field_config.get('settings', {}).items() } dtype = self.field_config.get('dtype', None) if dtype is not None: for field in dtype.names: if field not in records_arr.dtype.names: if field not in field_names: raise TypeError(f"Field '{field}' from {dtype} cannot be found in records or config") self._records_arr = records_arr if col_mapper is None: col_mapper = ColumnMapper(wrapper, self.col_arr) self._col_mapper = col_mapper def replace(self: RecordsT, **kwargs) -> RecordsT: """See `vectorbt.utils.config.Configured.replace`. Also, makes sure that `Records.col_mapper` is not passed to the new instance.""" if self.config.get('col_mapper', None) is not None: if 'wrapper' in kwargs: if self.wrapper is not kwargs.get('wrapper'): kwargs['col_mapper'] = None if 'records_arr' in kwargs: if self.records_arr is not kwargs.get('records_arr'): kwargs['col_mapper'] = None return Configured.replace(self, **kwargs) def get_by_col_idxs(self, col_idxs: tp.Array1d) -> tp.RecordArray: """Get records corresponding to column indices. Returns new records array.""" if self.col_mapper.is_sorted(): new_records_arr = nb.record_col_range_select_nb( self.values, self.col_mapper.col_range, to_1d_array(col_idxs)) # faster else: new_records_arr = nb.record_col_map_select_nb( self.values, self.col_mapper.col_map, to_1d_array(col_idxs)) return new_records_arr def indexing_func_meta(self, pd_indexing_func: tp.PandasIndexingFunc, **kwargs) -> IndexingMetaT: """Perform indexing on `Records` and return metadata.""" new_wrapper, _, group_idxs, col_idxs = \ self.wrapper.indexing_func_meta(pd_indexing_func, column_only_select=True, **kwargs) new_records_arr = self.get_by_col_idxs(col_idxs) return new_wrapper, new_records_arr, group_idxs, col_idxs def indexing_func(self: RecordsT, pd_indexing_func: tp.PandasIndexingFunc, **kwargs) -> RecordsT: """Perform indexing on `Records`.""" new_wrapper, new_records_arr, _, _ = self.indexing_func_meta(pd_indexing_func, **kwargs) return self.replace( wrapper=new_wrapper, records_arr=new_records_arr ) @property def records_arr(self) -> tp.RecordArray: """Records array.""" return self._records_arr @property def values(self) -> tp.RecordArray: """Records array.""" return self.records_arr def __len__(self) -> int: return len(self.values) @property def records(self) -> tp.Frame: """Records.""" return pd.DataFrame.from_records(self.values) @property def recarray(self) -> tp.RecArray: return self.values.view(np.recarray) @property def col_mapper(self) -> ColumnMapper: """Column mapper. See `vectorbt.records.col_mapper.ColumnMapper`.""" return self._col_mapper @property def records_readable(self) -> tp.Frame: """Records in readable format.""" df = self.records.copy() field_settings = self.field_config.get('settings', {}) for col_name in df.columns: if col_name in field_settings: dct = field_settings[col_name] if dct.get('ignore', False): df = df.drop(columns=col_name) continue field_name = dct.get('name', col_name) if 'title' in dct: title = dct['title'] new_columns = dict() new_columns[field_name] = title df.rename(columns=new_columns, inplace=True) else: title = field_name if 'mapping' in dct: if isinstance(dct['mapping'], str) and dct['mapping'] == 'index': df[title] = self.get_map_field_to_index(col_name) else: df[title] = self.get_apply_mapping_arr(col_name) return df def get_field_setting(self, field: str, setting: str, default: tp.Any = None) -> tp.Any: """Resolve any setting of the field. Uses `Records.field_config`.""" return self.field_config.get('settings', {}).get(field, {}).get(setting, default) def get_field_name(self, field: str) -> str: """Resolve the name of the field. Uses `Records.field_config`..""" return self.get_field_setting(field, 'name', field) def get_field_title(self, field: str) -> str: """Resolve the title of the field. Uses `Records.field_config`.""" return self.get_field_setting(field, 'title', field) def get_field_mapping(self, field: str) -> tp.Optional[tp.MappingLike]: """Resolve the mapping of the field. Uses `Records.field_config`.""" return self.get_field_setting(field, 'mapping', None) def get_field_arr(self, field: str) -> tp.Array1d: """Resolve the array of the field. Uses `Records.field_config`.""" return self.values[self.get_field_name(field)] def get_map_field(self, field: str, **kwargs) -> MappedArray: """Resolve the mapped array of the field. Uses `Records.field_config`.""" return self.map_field(self.get_field_name(field), mapping=self.get_field_mapping(field), **kwargs) def get_apply_mapping_arr(self, field: str, **kwargs) -> tp.Array1d: """Resolve the mapped array on the field, with mapping applied. Uses `Records.field_config`.""" return self.get_map_field(field, **kwargs).apply_mapping().values def get_map_field_to_index(self, field: str, **kwargs) -> tp.Index: """Resolve the mapped array on the field, with index applied. Uses `Records.field_config`.""" return self.get_map_field(field, **kwargs).to_index() @property def id_arr(self) -> tp.Array1d: """Get id array.""" return self.values[self.get_field_name('id')] @property def col_arr(self) -> tp.Array1d: """Get column array.""" return self.values[self.get_field_name('col')] @property def idx_arr(self) -> tp.Optional[tp.Array1d]: """Get index array.""" idx_field_name = self.get_field_name('idx') if idx_field_name is None: return None return self.values[idx_field_name] @cached_method def is_sorted(self, incl_id: bool = False) -> bool: """Check whether records are sorted.""" if incl_id: return nb.is_col_idx_sorted_nb(self.col_arr, self.id_arr) return nb.is_col_sorted_nb(self.col_arr) def sort(self: RecordsT, incl_id: bool = False, group_by: tp.GroupByLike = None, **kwargs) -> RecordsT: """Sort records by columns (primary) and ids (secondary, optional). !!! note Sorting is expensive. A better approach is to append records already in the correct order.""" if self.is_sorted(incl_id=incl_id): return self.replace(**kwargs).regroup(group_by) if incl_id: ind = np.lexsort((self.id_arr, self.col_arr)) # expensive! else: ind = np.argsort(self.col_arr) return self.replace(records_arr=self.values[ind], **kwargs).regroup(group_by) def apply_mask(self: RecordsT, mask: tp.Array1d, group_by: tp.GroupByLike = None, **kwargs) -> RecordsT: """Return a new class instance, filtered by mask.""" mask_indices = np.flatnonzero(mask) return self.replace( records_arr=np.take(self.values, mask_indices), **kwargs ).regroup(group_by) def map_array(self, a: tp.ArrayLike, idx_arr: tp.Optional[tp.ArrayLike] = None, mapping: tp.Optional[tp.MappingLike] = None, group_by: tp.GroupByLike = None, **kwargs) -> MappedArray: """Convert array to mapped array. The length of the array should match that of the records.""" if not isinstance(a, np.ndarray): a = np.asarray(a) checks.assert_shape_equal(a, self.values) if idx_arr is None: idx_arr = self.idx_arr return MappedArray( self.wrapper, a, self.col_arr, id_arr=self.id_arr, idx_arr=idx_arr, mapping=mapping, col_mapper=self.col_mapper, **kwargs ).regroup(group_by) def map_field(self, field: str, **kwargs) -> MappedArray: """Convert field to mapped array. `**kwargs` are passed to `Records.map_array`.""" mapped_arr = self.values[field] return self.map_array(mapped_arr, **kwargs) def map(self, map_func_nb: tp.RecordMapFunc, *args, dtype: tp.Optional[tp.DTypeLike] = None, **kwargs) -> MappedArray: """Map each record to a scalar value. Returns mapped array. See `vectorbt.records.nb.map_records_nb`. `**kwargs` are passed to `Records.map_array`.""" checks.assert_numba_func(map_func_nb) mapped_arr = nb.map_records_nb(self.values, map_func_nb, *args) mapped_arr = np.asarray(mapped_arr, dtype=dtype) return self.map_array(mapped_arr, **kwargs) def apply(self, apply_func_nb: tp.RecordApplyFunc, *args, group_by: tp.GroupByLike = None, apply_per_group: bool = False, dtype: tp.Optional[tp.DTypeLike] = None, **kwargs) -> MappedArray: """Apply function on records per column/group. Returns mapped array. Applies per group if `apply_per_group` is True. See `vectorbt.records.nb.apply_on_records_nb`. `**kwargs` are passed to `Records.map_array`.""" checks.assert_numba_func(apply_func_nb) if apply_per_group: col_map = self.col_mapper.get_col_map(group_by=group_by) else: col_map = self.col_mapper.get_col_map(group_by=False) mapped_arr = nb.apply_on_records_nb(self.values, col_map, apply_func_nb, *args) mapped_arr = np.asarray(mapped_arr, dtype=dtype) return self.map_array(mapped_arr, group_by=group_by, **kwargs) @cached_method def count(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries: """Return count by column.""" wrap_kwargs = merge_dicts(dict(name_or_index='count'), wrap_kwargs) return self.wrapper.wrap_reduced( self.col_mapper.get_col_map(group_by=group_by)[1], group_by=group_by, **wrap_kwargs) # ############# Stats ############# # @property def stats_defaults(self) -> tp.Kwargs: """Defaults for `Records.stats`. Merges `vectorbt.generic.stats_builder.StatsBuilderMixin.stats_defaults` and `records.stats` from `vectorbt._settings.settings`.""" from vectorbt._settings import settings records_stats_cfg = settings['records']['stats'] return merge_dicts( StatsBuilderMixin.stats_defaults.__get__(self), records_stats_cfg ) _metrics: tp.ClassVar[Config] = Config( dict( start=dict( title='Start', calc_func=lambda self: self.wrapper.index[0], agg_func=None, tags='wrapper' ), end=dict( title='End', calc_func=lambda self: self.wrapper.index[-1], agg_func=None, tags='wrapper' ), period=dict( title='Period', calc_func=lambda self: len(self.wrapper.index), apply_to_timedelta=True, agg_func=None, tags='wrapper' ), count=dict( title='Count', calc_func='count', tags='records' ) ), copy_kwargs=dict(copy_mode='deep') ) @property def metrics(self) -> Config: return self._metrics # ############# Plotting ############# # @property def plots_defaults(self) -> tp.Kwargs: """Defaults for `Records.plots`. Merges `vectorbt.generic.plots_builder.PlotsBuilderMixin.plots_defaults` and `records.plots` from `vectorbt._settings.settings`.""" from vectorbt._settings import settings records_plots_cfg = settings['records']['plots'] return merge_dicts( PlotsBuilderMixin.plots_defaults.__get__(self), records_plots_cfg ) @property def subplots(self) -> Config: return self._subplots # ############# Docs ############# # @classmethod def build_field_config_doc(cls, source_cls: tp.Optional[type] = None) -> str: """Build field config documentation.""" if source_cls is None: source_cls = Records return string.Template( inspect.cleandoc(get_dict_attr(source_cls, 'field_config').__doc__) ).substitute( {'field_config': cls.field_config.to_doc(), 'cls_name': cls.__name__} ) @classmethod def override_field_config_doc(cls, __pdoc__: dict, source_cls: tp.Optional[type] = None) -> None: """Call this method on each subclass that overrides `field_config`.""" __pdoc__[cls.__name__ + '.field_config'] = cls.build_field_config_doc(source_cls=source_cls) Records.override_field_config_doc(__pdoc__) Records.override_metrics_doc(__pdoc__) Records.override_subplots_doc(__pdoc__)
[ "vectorbt.generic.stats_builder.StatsBuilderMixin.stats_defaults.__get__", "numpy.argsort", "vectorbt.utils.attr_.get_dict_attr", "vectorbt.base.reshape_fns.to_1d_array", "numpy.flatnonzero", "numpy.asarray", "numpy.take", "vectorbt.records.col_mapper.ColumnMapper", "vectorbt.records.nb.is_col_sorte...
[((12280, 12319), 'vectorbt._typing.TypeVar', 'tp.TypeVar', (['"""RecordsT"""'], {'bound': '"""Records"""'}), "('RecordsT', bound='Records')\n", (12290, 12319), True, 'from vectorbt import _typing as tp\n'), ((15048, 15143), 'vectorbt.base.array_wrapper.Wrapping.__init__', 'Wrapping.__init__', (['self', 'wrapper'], {'records_arr': 'records_arr', 'col_mapper': 'col_mapper'}), '(self, wrapper, records_arr=records_arr, col_mapper=\n col_mapper, **kwargs)\n', (15065, 15143), False, 'from vectorbt.base.array_wrapper import ArrayWrapper, Wrapping\n'), ((15217, 15249), 'vectorbt.generic.stats_builder.StatsBuilderMixin.__init__', 'StatsBuilderMixin.__init__', (['self'], {}), '(self)\n', (15243, 15249), False, 'from vectorbt.generic.stats_builder import StatsBuilderMixin\n'), ((15296, 15319), 'numpy.asarray', 'np.asarray', (['records_arr'], {}), '(records_arr)\n', (15306, 15319), True, 'import numpy as np\n'), ((15328, 15376), 'vectorbt.utils.checks.assert_not_none', 'checks.assert_not_none', (['records_arr.dtype.fields'], {}), '(records_arr.dtype.fields)\n', (15350, 15376), False, 'from vectorbt.utils import checks\n'), ((16624, 16658), 'vectorbt.utils.config.Configured.replace', 'Configured.replace', (['self'], {}), '(self, **kwargs)\n', (16642, 16658), False, 'from vectorbt.utils.config import merge_dicts, Config, Configured\n'), ((18380, 18418), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['self.values'], {}), '(self.values)\n', (18405, 18418), True, 'import pandas as pd\n'), ((22336, 22369), 'vectorbt.records.nb.is_col_sorted_nb', 'nb.is_col_sorted_nb', (['self.col_arr'], {}), '(self.col_arr)\n', (22355, 22369), False, 'from vectorbt.records import nb\n'), ((23212, 23232), 'numpy.flatnonzero', 'np.flatnonzero', (['mask'], {}), '(mask)\n', (23226, 23232), True, 'import numpy as np\n'), ((23843, 23884), 'vectorbt.utils.checks.assert_shape_equal', 'checks.assert_shape_equal', (['a', 'self.values'], {}), '(a, self.values)\n', (23868, 23884), False, 'from vectorbt.utils import checks\n'), ((24822, 24859), 'vectorbt.utils.checks.assert_numba_func', 'checks.assert_numba_func', (['map_func_nb'], {}), '(map_func_nb)\n', (24846, 24859), False, 'from vectorbt.utils import checks\n'), ((24881, 24931), 'vectorbt.records.nb.map_records_nb', 'nb.map_records_nb', (['self.values', 'map_func_nb', '*args'], {}), '(self.values, map_func_nb, *args)\n', (24898, 24931), False, 'from vectorbt.records import nb\n'), ((24953, 24988), 'numpy.asarray', 'np.asarray', (['mapped_arr'], {'dtype': 'dtype'}), '(mapped_arr, dtype=dtype)\n', (24963, 24988), True, 'import numpy as np\n'), ((25561, 25600), 'vectorbt.utils.checks.assert_numba_func', 'checks.assert_numba_func', (['apply_func_nb'], {}), '(apply_func_nb)\n', (25585, 25600), False, 'from vectorbt.utils import checks\n'), ((25799, 25865), 'vectorbt.records.nb.apply_on_records_nb', 'nb.apply_on_records_nb', (['self.values', 'col_map', 'apply_func_nb', '*args'], {}), '(self.values, col_map, apply_func_nb, *args)\n', (25821, 25865), False, 'from vectorbt.records import nb\n'), ((25887, 25922), 'numpy.asarray', 'np.asarray', (['mapped_arr'], {'dtype': 'dtype'}), '(mapped_arr, dtype=dtype)\n', (25897, 25922), True, 'import numpy as np\n'), ((15966, 16001), 'vectorbt.records.col_mapper.ColumnMapper', 'ColumnMapper', (['wrapper', 'self.col_arr'], {}), '(wrapper, self.col_arr)\n', (15978, 16001), False, 'from vectorbt.records.col_mapper import ColumnMapper\n'), ((22270, 22320), 'vectorbt.records.nb.is_col_idx_sorted_nb', 'nb.is_col_idx_sorted_nb', (['self.col_arr', 'self.id_arr'], {}), '(self.col_arr, self.id_arr)\n', (22293, 22320), False, 'from vectorbt.records import nb\n'), ((22821, 22860), 'numpy.lexsort', 'np.lexsort', (['(self.id_arr, self.col_arr)'], {}), '((self.id_arr, self.col_arr))\n', (22831, 22860), True, 'import numpy as np\n'), ((22907, 22931), 'numpy.argsort', 'np.argsort', (['self.col_arr'], {}), '(self.col_arr)\n', (22917, 22931), True, 'import numpy as np\n'), ((23821, 23834), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (23831, 23834), True, 'import numpy as np\n'), ((26823, 26869), 'vectorbt.generic.stats_builder.StatsBuilderMixin.stats_defaults.__get__', 'StatsBuilderMixin.stats_defaults.__get__', (['self'], {}), '(self)\n', (26863, 26869), False, 'from vectorbt.generic.stats_builder import StatsBuilderMixin\n'), ((28306, 28352), 'vectorbt.generic.plots_builder.PlotsBuilderMixin.plots_defaults.__get__', 'PlotsBuilderMixin.plots_defaults.__get__', (['self'], {}), '(self)\n', (28346, 28352), False, 'from vectorbt.generic.plots_builder import PlotsBuilderMixin\n'), ((16983, 17004), 'vectorbt.base.reshape_fns.to_1d_array', 'to_1d_array', (['col_idxs'], {}), '(col_idxs)\n', (16994, 17004), False, 'from vectorbt.base.reshape_fns import to_1d_array\n'), ((17143, 17164), 'vectorbt.base.reshape_fns.to_1d_array', 'to_1d_array', (['col_idxs'], {}), '(col_idxs)\n', (17154, 17164), False, 'from vectorbt.base.reshape_fns import to_1d_array\n'), ((23963, 24102), 'vectorbt.records.mapped_array.MappedArray', 'MappedArray', (['self.wrapper', 'a', 'self.col_arr'], {'id_arr': 'self.id_arr', 'idx_arr': 'idx_arr', 'mapping': 'mapping', 'col_mapper': 'self.col_mapper'}), '(self.wrapper, a, self.col_arr, id_arr=self.id_arr, idx_arr=\n idx_arr, mapping=mapping, col_mapper=self.col_mapper, **kwargs)\n', (23974, 24102), False, 'from vectorbt.records.mapped_array import MappedArray\n'), ((23286, 23320), 'numpy.take', 'np.take', (['self.values', 'mask_indices'], {}), '(self.values, mask_indices)\n', (23293, 23320), True, 'import numpy as np\n'), ((28788, 28829), 'vectorbt.utils.attr_.get_dict_attr', 'get_dict_attr', (['source_cls', '"""field_config"""'], {}), "(source_cls, 'field_config')\n", (28801, 28829), False, 'from vectorbt.utils.attr_ import get_dict_attr\n')]
import numpy as np import json # with open("pattern.json", "r") as fh: # patterns = json.load(fh) class Pat_Match(object): def __init__(self, config, label_to_id, filt=None): self.config = config self.label_to_id = label_to_id self.patterns = config.patterns if filt is not None: patterns = [pattern for pattern in self.patterns if label_to_id[pattern[0]] not in filt] self.patterns = patterns def match(self, tokens): config = self.config num_pat = len(self.patterns) num_text = len(tokens) res = np.zeros([num_text, num_pat]) pred = np.zeros([num_text, config.num_class]) for i, pattern in enumerate(self.patterns): rel, pat = pattern rel = self.label_to_id[rel] for j, token in enumerate(tokens): text = " ".join(token) if pat in text: # print(text) # print(pat) res[j, i] += 1 pred[j, rel] += 1 none_zero = (np.amax(pred, axis=1) > 0).astype(np.int32) pred = np.argmax(pred, axis=1) pred = pred * none_zero return res, pred
[ "numpy.zeros", "numpy.amax", "numpy.argmax" ]
[((603, 632), 'numpy.zeros', 'np.zeros', (['[num_text, num_pat]'], {}), '([num_text, num_pat])\n', (611, 632), True, 'import numpy as np\n'), ((648, 686), 'numpy.zeros', 'np.zeros', (['[num_text, config.num_class]'], {}), '([num_text, config.num_class])\n', (656, 686), True, 'import numpy as np\n'), ((1159, 1182), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (1168, 1182), True, 'import numpy as np\n'), ((1100, 1121), 'numpy.amax', 'np.amax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (1107, 1121), True, 'import numpy as np\n')]
import numpy as np from PIL import Image from loader_base import LoaderBase import os class LoaderNumpy(LoaderBase): def __init__(self, root, data_transform=[], target_transform=[]): super(LoaderNumpy, self).__init__(root, data_transform, target_transform) def index_dataset(self, dir): # get all png files, without depth files = [x for x in os.listdir(dir) if ".npy" in x] return files def from_index(self, index): file = self.indexes[index] # load a sample frame = np.load(os.path.join(self.root, file)) depth = self.numpy_uint8_to_int16(frame[:, :, 3:]) rgb = frame[:, :, 0:3] # convert to float32 (usually necessary for deep learning models rgb = rgb.astype(np.float32) depth = depth.astype(np.float32) # None as a target for this demo code target = 1 return (rgb, depth), [target] @staticmethod def numpy_uint8_to_int16(depth8): x, y, c = depth8.shape out = np.ndarray((x, y), dtype=np.int16) out[:, :] = depth8[:, :, 0] out = np.left_shift(out, 8) out[:, :] += depth8[:, :, 1] return out
[ "os.path.join", "os.listdir", "numpy.left_shift", "numpy.ndarray" ]
[((1027, 1061), 'numpy.ndarray', 'np.ndarray', (['(x, y)'], {'dtype': 'np.int16'}), '((x, y), dtype=np.int16)\n', (1037, 1061), True, 'import numpy as np\n'), ((1112, 1133), 'numpy.left_shift', 'np.left_shift', (['out', '(8)'], {}), '(out, 8)\n', (1125, 1133), True, 'import numpy as np\n'), ((548, 577), 'os.path.join', 'os.path.join', (['self.root', 'file'], {}), '(self.root, file)\n', (560, 577), False, 'import os\n'), ((378, 393), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (388, 393), False, 'import os\n')]
from .quantizer import quantize from .io_helper import write_quantized_output from numpy import floor import os from jinja2 import Environment, FileSystemLoader from shutil import copy from .caf_verilog_base import CafVerilogBase class CpxMultiply(CafVerilogBase): def __init__(self, x, y, x_i_bits=12, x_q_bits=0, y_i_bits=12, y_q_bits=0, output_dir='.'): """ :param x: :param y: :param x_i_bits: Bit length for real values :param x_q_bits: Bit length for imaginary values :param y_i_bits: Bit length for real values :param y_q_bits: Bit length for imaginary values :param output_dir: Directory to place modules and test files """ self.x = x self.y = y self.cpx_input_length_check() self.x_i_bits = x_i_bits self.x_q_bits = x_q_bits if x_q_bits else self.x_i_bits self.y_i_bits = y_i_bits self.y_q_bits = y_q_bits if y_q_bits else self.y_i_bits self.x_quant = quantize(self.x, self.x_i_bits, self.x_q_bits) self.y_quant = quantize(self.y, self.y_i_bits, self.y_q_bits) self.output_dir = output_dir self.tb_filename = 'cpx_multiply_tb.v' self.test_value_filename = 'cpx_multiply_input_values.txt' self.test_output_filename = 'cpx_multiply_output_values.txt' copy(self.module_path(), self.output_dir) def gen_tb(self): """ Generate a test bench using quantized values. :return: """ write_quantized_output(self.output_dir, self.test_value_filename, self.x_quant, self.y_quant) self.write_cpx_multiply_tb_module() def gen_quantized_output(self): """ Perform the multiplication and then quantize to the closest representation of what the verilog module should produce for the given bit length. :return: """ """ x y (x + yi)(u + vi) = (xu - yv) + (xv + yu)i """ xu = floor(self.x_quant.real) * floor(self.y_quant.real) yv = floor(self.x_quant.imag) * floor(self.y_quant.imag) xv = floor(self.x_quant.real) * floor(self.y_quant.imag) yu = floor(self.x_quant.imag) * floor(self.y_quant.real) i_sub = xu - yv y_add = xv + yu final_out = i_sub + y_add*1j return final_out def write_cpx_multiply_tb_module(self): """ Write out a testbench file to test the cpx_multiply module. :return: """ out_tb = None t_dict = self.template_dict("cpx_multiply_tb") template_loader = FileSystemLoader(searchpath=self.tb_module_path()) env = Environment(loader=template_loader) template = env.get_template('cpx_multiply_tb.v') out_tb = template.render(**t_dict) with open(os.path.join(self.output_dir, self.tb_filename), 'w+') as tb_file: tb_file.write(out_tb) def template_dict(self, inst_name=None): t_dict = {'xi_bits': self.x_i_bits, 'xq_bits': self.x_q_bits, 'yi_bits': self.y_i_bits, 'yq_bits': self.y_q_bits} t_dict['i_out_bits'] = self.x_i_bits + self.y_i_bits t_dict['q_out_bits'] = self.x_q_bits + self.y_q_bits t_dict['cpx_multiply_input'] = os.path.abspath(os.path.join(self.output_dir, self.test_value_filename)) t_dict['cpx_multiply_output'] = os.path.abspath(os.path.join(self.output_dir, self.test_output_filename)) t_dict['cpx_multiply_name'] = inst_name if inst_name else 'cpx_multiply_tb' return t_dict
[ "os.path.join", "numpy.floor", "jinja2.Environment" ]
[((2683, 2718), 'jinja2.Environment', 'Environment', ([], {'loader': 'template_loader'}), '(loader=template_loader)\n', (2694, 2718), False, 'from jinja2 import Environment, FileSystemLoader\n'), ((2003, 2027), 'numpy.floor', 'floor', (['self.x_quant.real'], {}), '(self.x_quant.real)\n', (2008, 2027), False, 'from numpy import floor\n'), ((2030, 2054), 'numpy.floor', 'floor', (['self.y_quant.real'], {}), '(self.y_quant.real)\n', (2035, 2054), False, 'from numpy import floor\n'), ((2068, 2092), 'numpy.floor', 'floor', (['self.x_quant.imag'], {}), '(self.x_quant.imag)\n', (2073, 2092), False, 'from numpy import floor\n'), ((2095, 2119), 'numpy.floor', 'floor', (['self.y_quant.imag'], {}), '(self.y_quant.imag)\n', (2100, 2119), False, 'from numpy import floor\n'), ((2133, 2157), 'numpy.floor', 'floor', (['self.x_quant.real'], {}), '(self.x_quant.real)\n', (2138, 2157), False, 'from numpy import floor\n'), ((2160, 2184), 'numpy.floor', 'floor', (['self.y_quant.imag'], {}), '(self.y_quant.imag)\n', (2165, 2184), False, 'from numpy import floor\n'), ((2198, 2222), 'numpy.floor', 'floor', (['self.x_quant.imag'], {}), '(self.x_quant.imag)\n', (2203, 2222), False, 'from numpy import floor\n'), ((2225, 2249), 'numpy.floor', 'floor', (['self.y_quant.real'], {}), '(self.y_quant.real)\n', (2230, 2249), False, 'from numpy import floor\n'), ((3301, 3356), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.test_value_filename'], {}), '(self.output_dir, self.test_value_filename)\n', (3313, 3356), False, 'import os\n'), ((3414, 3470), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.test_output_filename'], {}), '(self.output_dir, self.test_output_filename)\n', (3426, 3470), False, 'import os\n'), ((2837, 2884), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.tb_filename'], {}), '(self.output_dir, self.tb_filename)\n', (2849, 2884), False, 'import os\n')]
import numpy as np import teaserpp_python from Config import Config import gtsam as gt from gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2, NonlinearFactorGraph, NonlinearISAM, Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3, PinholeCameraCal3_S2, Values, Point3) # symbol_shorthand_X, symbol_shorthand_L) from gtsam.symbol_shorthand import X, L import matplotlib.pyplot as plt # import g2o # class PoseOptimizer(g2o.SparseOptimizer): # def __init__(self, ): # super().__init__() # solver = g2o.BlockSolverX(g2o.LinearSolverDenseX()) # solver = g2o.OptimizationAlgorithmLevenberg(solver) # super().set_algorithm(solver) # self.edge_list = [] # self.edge_outlier = np.array([], dtype=bool) # self.v_se3 = g2o.VertexSE3Expmap() # self.v_se3.set_id(0) # internal id # self.v_se3.set_fixed(False) # super().add_vertex(self.v_se3) # self.pose = [] # self.inv_lvl_sigma2 = np.zeros((8,), dtype=np.float) # for idx in np.arange(8): # self.inv_lvl_sigma2[idx] = 1./1.2**(2*idx-2) # # def optimize(self, max_iterations=10): # self.edge_outlier = np.full(len(self.edge_list), False) # for iteration in range(4): # # self.v_se3.set_estimate(self.pose) # super().initialize_optimization(0) # super().optimize(max_iterations) # print("ITER", self.vertex(0).estimate().to_vector()) # print("Initial Correspondence: ", np.count_nonzero(1-self.edge_outlier)) # n_bad = 0 # for idx in range(len(self.edge_list)): # e = self.edge_list[idx] # e.compute_error() # chi2 = e.chi2() # # print("Iter ", iteration, "Chi: " ,chi2) # if chi2 > 7.815: # self.edge_outlier[idx] = True # e.set_level(1) # n_bad += 1 # else: # self.edge_outlier[idx] = False # e.set_level(0) # if iteration == 2: # e.set_robust_kernel(None) # # print("NUM BADS: ", n_bad, ":", len(self.edge_list)) # return self.edge_outlier # # def add_pose(self, pose, fixed=False): # self.v_se3.set_estimate(pose) # self.pose = pose # # def add_point(self, world_pos, # measurement_cam, # octave, # robust_kernel=g2o.RobustKernelHuber(np.sqrt(7.815))): # ??% CI # # edge = g2o.EdgeStereoSE3ProjectXYZOnlyPose() # edge.set_vertex(0, self.vertex(0)) # # fx = Config().fx # fy = Config().fy # cx = Config().cx # cy = Config().cy # bf = Config().bf # # edge.fx = fx # edge.fy = fy # edge.cx = cx # edge.cy = cy # edge.bf = bf # edge.Xw = world_pos # # edge.set_measurement(measurement_cam) # projection # information = self.inv_lvl_sigma2[octave]*np.identity(3) # edge.set_information(information) # # if robust_kernel is not None: # edge.set_robust_kernel(robust_kernel) # # super().add_edge(edge) # # self.edge_list.append(edge) # # def get_pose(self): # return self.vertex(0).estimate() class PoseOptimizerTeaser: def __init__(self): self.NOISE_BOUND = 0.1 # 0.05 self.solver_params = teaserpp_python.RobustRegistrationSolver.Params() self.solver_params.cbar2 = 0.6 # 1 self.solver_params.noise_bound = self.NOISE_BOUND self.solver_params.estimate_scaling = False self.solver_params.rotation_estimation_algorithm = \ teaserpp_python.RobustRegistrationSolver.ROTATION_ESTIMATION_ALGORITHM.GNC_TLS self.solver_params.rotation_gnc_factor = 1.4 self.solver_params.rotation_max_iterations = 200 self.solver_params.rotation_cost_threshold = 1e-12 self.solver = teaserpp_python.RobustRegistrationSolver(self.solver_params) def optimize(self, src, dst): # start = time.time() self.solver.solve(src, dst) # end = time.time() solution = self.solver.getSolution() trans = np.hstack((solution.rotation, np.expand_dims(solution.translation, axis=1))) trans = np.concatenate((trans, np.expand_dims(np.array([0, 0, 0, 1]), axis=1).T), axis=0) return trans class PoseOptimizerGTSAM: def __init__(self): fx = Config().fx fy = Config().fy cx = Config().cx cy = Config().cy bf = Config().bf # Create realistic calibration and measurement noise model # format: fx fy skew cx cy baseline baseline = bf/fx self.K_stereo = gt.Cal3_S2Stereo(fx, fy, 0.0, cx, cy, baseline) self.K_mono = gt.Cal3_S2(fx, fy, 0.0, cx, cy) self.deltaMono = np.sqrt(5.991) self.deltaStereo = np.sqrt(7.815) self.depth_threshold = bf/fx * 60 # Create graph container and add factors to it self.graph = gt.NonlinearFactorGraph() # Create initial estimate for camera poses and landmarks self.initialEstimate = gt.Values() # add a constraint on the starting pose # first_pose = gt.Pose3() # self.graph.add(gt.NonlinearEqualityPose3(X(1), first_pose)) self.inv_lvl_sigma2 = np.zeros((8,), dtype=np.float) for idx in np.arange(8): self.inv_lvl_sigma2[idx] = 1. / 1.2 ** (2 * idx - 2) # point counter for landmarks and octave container self.counter = 1 self.octave = [] self.is_stereo = [] def add_pose(self, R, t): # Add measurements # pose 1 # graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(520, 480, 440), stereo_model, x1, l1, K)) # graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(120, 80, 440), stereo_model, x1, l2, K)) # graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(320, 280, 140), stereo_model, x1, l3, K)) # pose 2 # graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(570, 520, 490), stereo_model, x2, l1, K)) # graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(70, 20, 490), stereo_model, x2, l2, K)) # graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(320, 270, 115), stereo_model, x2, l3, K)) # self.initialEstimate.insert(X(1), gt.Rot3(pose[0]), gt.Point3(pose[1])) t = t.reshape((3, 1)) self.initialEstimate.insert(X(1), gt.Pose3(np.concatenate((R, t), axis=1))) def add_point(self, pointsInitial, measurements, octave): if pointsInitial[-1] > self.depth_threshold: information = self.inv_lvl_sigma2[octave] * np.identity(2) stereo_model = gt.noiseModel_Diagonal.Information(information) huber = gt.noiseModel_mEstimator_Huber.Create(self.deltaMono) robust_model = gt.noiseModel_Robust(huber, stereo_model) factor = gt.GenericProjectionFactorCal3_S2(gt.Point2(measurements[0], measurements[2]), robust_model, X(1), L(self.counter), self.K_mono) self.is_stereo.append(False) else: information = self.inv_lvl_sigma2[octave] * np.identity(3) stereo_model = gt.noiseModel_Diagonal.Information(information) huber = gt.noiseModel_mEstimator_Huber.Create(self.deltaStereo) robust_model = gt.noiseModel_Robust(huber, stereo_model) factor = gt.GenericStereoFactor3D(gt.StereoPoint2(*tuple(measurements)), robust_model, X(1), L(self.counter), self.K_stereo) self.is_stereo.append(True) self.graph.add(gt.NonlinearEqualityPoint3(L(self.counter), gt.Point3(pointsInitial))) self.initialEstimate.insert(L(self.counter), gt.Point3(pointsInitial)) self.graph.add(factor) self.octave.append(octave) self.counter += 1 def optimize(self, flag_verbose=False): # optimize edge_outlier = np.full(self.counter-1, False) error_th_stereo = [7.815, 7.815, 5, 5] error_th_mono = [5.991, 5.991, 3.5, 3.5] # error_th_stereo = [7.815, 7.815, 7.815, 7.815] # error_th_mono = [5.991, 5.991, 5.991, 5.991] for iteration in range(4): if flag_verbose: errors = [] optimizer = gt.LevenbergMarquardtOptimizer(self.graph, self.initialEstimate) result = optimizer.optimize() n_bad = 0 if flag_verbose: print(f"Number of Factors: {self.graph.nrFactors()-self.graph.size()//2, self.graph.size()//2}") error_s = error_th_stereo[iteration] error_m = error_th_mono[iteration] for idx in range(1, self.graph.size(), 2): try: if self.is_stereo[idx]: factor = gt.dynamic_cast_GenericStereoFactor3D_NonlinearFactor(self.graph.at(idx)) else: factor = gt.dynamic_cast_GenericProjectionFactorCal3_S2_NonlinearFactor(self.graph.at(idx)) except: if flag_verbose: errors.append(0) continue error = factor.error(result) # print(error) if flag_verbose: errors.append(error) # if error > 7.815: if (self.is_stereo[idx] and error > error_s) or (not self.is_stereo[idx] and error > error_m): edge_outlier[idx//2] = True self.graph.remove(idx) n_bad += 1 else: edge_outlier[idx//2] = False if iteration == 2: if self.is_stereo[idx]: information = self.inv_lvl_sigma2[self.octave[idx//2]] * np.identity(3) stereo_model = gt.noiseModel_Diagonal.Information(information) new_factor = gt.GenericStereoFactor3D(factor.measured(), stereo_model, X(1), L(idx//2+1), self.K_stereo) else: information = self.inv_lvl_sigma2[self.octave[idx // 2]] * np.identity(2) stereo_model = gt.noiseModel_Diagonal.Information(information) new_factor = gt.GenericProjectionFactorCal3_S2(factor.measured(), stereo_model, X(1), L(idx // 2 + 1), self.K_mono) self.graph.replace(idx, new_factor) if flag_verbose: fig, ax = plt.subplots() ax.bar(np.arange(0, len(errors)).tolist(), errors) plt.show() print("NUM BADS: ", n_bad) pose = result.atPose3(X(1)) # marginals = gt.Marginals(self.graph, result) # cov = marginals.marginalCovariance(gt.X(1)) return pose, edge_outlier # self.graph, result class PoseGraphOptimizerGTSAM: def __init__(self): # Create graph container and add factors to it self.graph = gt.NonlinearFactorGraph() # Create initial estimate for camera poses and landmarks self.initialEstimate = gt.Values() sigmas = np.array([5*np.pi/180, 5*np.pi/180, 5*np.pi/180, 0.05, 0.05, 0.05]) self.covariance = gt.noiseModel.Diagonal.Sigmas(sigmas) self.graph.add(gt.NonlinearEqualityPose3(X(0), gt.Pose3(np.eye(4)))) self.result = None self.marginals = None def add_node(self, kf): self.initialEstimate.insert(X(kf.kfID), gt.Pose3(kf.pose_matrix())) for kf_n, rel_pose, _ in kf.neighbors: if kf_n.kfID > kf.kfID: continue self.graph.add(gt.BetweenFactorPose3(X(kf.kfID), X(kf_n.kfID), gt.Pose3(rel_pose), self.covariance)) def add_node_optimize(self, kf): self.add_node(kf) result, marginals = self.optimize() return result, marginals def optimize(self): optimizer = gt.LevenbergMarquardtOptimizer(self.graph, self.initialEstimate) result = optimizer.optimize() marginals = gt.Marginals(self.graph, result) return result, marginals class PoseOptimizerRANSAC: def __init__(self): self.n_iteration = 100 @classmethod def procrustes(cls, X, Y, scaling=True, reflection='best'): """ A port of MATLAB's `procrustes` function to Numpy. Procrustes analysis determines a linear transformation (translation, reflection, orthogonal rotation and scaling) of the points in Y to best conform them to the points in matrix X, using the sum of squared errors as the goodness of fit criterion. d, Z, [tform] = procrustes(X, Y) Inputs: ------------ X, Y matrices of target and input coordinates. they must have equal numbers of points (rows), but Y may have fewer dimensions (columns) than X. scaling if False, the scaling component of the transformation is forced to 1 reflection if 'best' (default), the transformation solution may or may not include a reflection component, depending on which fits the data best. setting reflection to True or False forces a solution with reflection or no reflection respectively. Outputs ------------ d the residual sum of squared errors, normalized according to a measure of the scale of X, ((X - X.mean(0))**2).sum() Z the matrix of transformed Y-values tform a dict specifying the rotation, translation and scaling that maps X --> Y """ n, m = X.shape ny, my = Y.shape muX = X.mean(0) muY = Y.mean(0) X0 = X - muX Y0 = Y - muY ssX = (X0 ** 2.).sum() ssY = (Y0 ** 2.).sum() # centred Frobenius norm normX = np.sqrt(ssX) normY = np.sqrt(ssY) # scale to equal (unit) norm X0 /= normX Y0 /= normY if my < m: Y0 = np.concatenate((Y0, np.zeros(n, m - my)), 0) # optimum rotation matrix of Y A = np.dot(X0.T, Y0) U, s, Vt = np.linalg.svd(A, full_matrices=False) V = Vt.T T = np.dot(V, U.T) if reflection is not 'best': # does the current solution use a reflection? have_reflection = np.linalg.det(T) < 0 # if that's not what was specified, force another reflection if reflection != have_reflection: V[:, -1] *= -1 s[-1] *= -1 T = np.dot(V, U.T) traceTA = s.sum() if scaling: # optimum scaling of Y b = traceTA * normX / normY # standarised distance between X and b*Y*T + c d = 1 - traceTA ** 2 # transformed coords Z = normX * traceTA * np.dot(Y0, T) + muX else: b = 1 d = 1 + ssY / ssX - 2 * traceTA * normY / normX Z = normY * np.dot(Y0, T) + muX # transformation matrix if my < m: T = T[:my, :] c = muX - b * np.dot(muY, T) # transformation values tform = {'rotation': T, 'scale': b, 'translation': c} return d, Z, tform
[ "numpy.sqrt", "gtsam.Pose3", "gtsam.Point3", "gtsam.Marginals", "numpy.array", "gtsam.Values", "numpy.arange", "gtsam.symbol_shorthand.X", "teaserpp_python.RobustRegistrationSolver.Params", "gtsam.noiseModel_Diagonal.Information", "teaserpp_python.RobustRegistrationSolver", "numpy.dot", "num...
[((3547, 3596), 'teaserpp_python.RobustRegistrationSolver.Params', 'teaserpp_python.RobustRegistrationSolver.Params', ([], {}), '()\n', (3594, 3596), False, 'import teaserpp_python\n'), ((4094, 4154), 'teaserpp_python.RobustRegistrationSolver', 'teaserpp_python.RobustRegistrationSolver', (['self.solver_params'], {}), '(self.solver_params)\n', (4134, 4154), False, 'import teaserpp_python\n'), ((4881, 4928), 'gtsam.Cal3_S2Stereo', 'gt.Cal3_S2Stereo', (['fx', 'fy', '(0.0)', 'cx', 'cy', 'baseline'], {}), '(fx, fy, 0.0, cx, cy, baseline)\n', (4897, 4928), True, 'import gtsam as gt\n'), ((4951, 4982), 'gtsam.Cal3_S2', 'gt.Cal3_S2', (['fx', 'fy', '(0.0)', 'cx', 'cy'], {}), '(fx, fy, 0.0, cx, cy)\n', (4961, 4982), True, 'import gtsam as gt\n'), ((5009, 5023), 'numpy.sqrt', 'np.sqrt', (['(5.991)'], {}), '(5.991)\n', (5016, 5023), True, 'import numpy as np\n'), ((5051, 5065), 'numpy.sqrt', 'np.sqrt', (['(7.815)'], {}), '(7.815)\n', (5058, 5065), True, 'import numpy as np\n'), ((5186, 5211), 'gtsam.NonlinearFactorGraph', 'gt.NonlinearFactorGraph', ([], {}), '()\n', (5209, 5211), True, 'import gtsam as gt\n'), ((5309, 5320), 'gtsam.Values', 'gt.Values', ([], {}), '()\n', (5318, 5320), True, 'import gtsam as gt\n'), ((5505, 5535), 'numpy.zeros', 'np.zeros', (['(8,)'], {'dtype': 'np.float'}), '((8,), dtype=np.float)\n', (5513, 5535), True, 'import numpy as np\n'), ((5555, 5567), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (5564, 5567), True, 'import numpy as np\n'), ((8201, 8233), 'numpy.full', 'np.full', (['(self.counter - 1)', '(False)'], {}), '(self.counter - 1, False)\n', (8208, 8233), True, 'import numpy as np\n'), ((11464, 11489), 'gtsam.NonlinearFactorGraph', 'gt.NonlinearFactorGraph', ([], {}), '()\n', (11487, 11489), True, 'import gtsam as gt\n'), ((11587, 11598), 'gtsam.Values', 'gt.Values', ([], {}), '()\n', (11596, 11598), True, 'import gtsam as gt\n'), ((11617, 11696), 'numpy.array', 'np.array', (['[5 * np.pi / 180, 5 * np.pi / 180, 5 * np.pi / 180, 0.05, 0.05, 0.05]'], {}), '([5 * np.pi / 180, 5 * np.pi / 180, 5 * np.pi / 180, 0.05, 0.05, 0.05])\n', (11625, 11696), True, 'import numpy as np\n'), ((11711, 11748), 'gtsam.noiseModel.Diagonal.Sigmas', 'gt.noiseModel.Diagonal.Sigmas', (['sigmas'], {}), '(sigmas)\n', (11740, 11748), True, 'import gtsam as gt\n'), ((12445, 12509), 'gtsam.LevenbergMarquardtOptimizer', 'gt.LevenbergMarquardtOptimizer', (['self.graph', 'self.initialEstimate'], {}), '(self.graph, self.initialEstimate)\n', (12475, 12509), True, 'import gtsam as gt\n'), ((12568, 12600), 'gtsam.Marginals', 'gt.Marginals', (['self.graph', 'result'], {}), '(self.graph, result)\n', (12580, 12600), True, 'import gtsam as gt\n'), ((14273, 14282), 'gtsam.symbol_shorthand.X.mean', 'X.mean', (['(0)'], {}), '(0)\n', (14279, 14282), False, 'from gtsam.symbol_shorthand import X, L\n'), ((14463, 14475), 'numpy.sqrt', 'np.sqrt', (['ssX'], {}), '(ssX)\n', (14470, 14475), True, 'import numpy as np\n'), ((14492, 14504), 'numpy.sqrt', 'np.sqrt', (['ssY'], {}), '(ssY)\n', (14499, 14504), True, 'import numpy as np\n'), ((14717, 14733), 'numpy.dot', 'np.dot', (['X0.T', 'Y0'], {}), '(X0.T, Y0)\n', (14723, 14733), True, 'import numpy as np\n'), ((14753, 14790), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {'full_matrices': '(False)'}), '(A, full_matrices=False)\n', (14766, 14790), True, 'import numpy as np\n'), ((14820, 14834), 'numpy.dot', 'np.dot', (['V', 'U.T'], {}), '(V, U.T)\n', (14826, 14834), True, 'import numpy as np\n'), ((4609, 4617), 'Config.Config', 'Config', ([], {}), '()\n', (4615, 4617), False, 'from Config import Config\n'), ((4634, 4642), 'Config.Config', 'Config', ([], {}), '()\n', (4640, 4642), False, 'from Config import Config\n'), ((4659, 4667), 'Config.Config', 'Config', ([], {}), '()\n', (4665, 4667), False, 'from Config import Config\n'), ((4684, 4692), 'Config.Config', 'Config', ([], {}), '()\n', (4690, 4692), False, 'from Config import Config\n'), ((4709, 4717), 'Config.Config', 'Config', ([], {}), '()\n', (4715, 4717), False, 'from Config import Config\n'), ((6629, 6633), 'gtsam.symbol_shorthand.X', 'X', (['(1)'], {}), '(1)\n', (6630, 6633), False, 'from gtsam.symbol_shorthand import X, L\n'), ((6892, 6939), 'gtsam.noiseModel_Diagonal.Information', 'gt.noiseModel_Diagonal.Information', (['information'], {}), '(information)\n', (6926, 6939), True, 'import gtsam as gt\n'), ((6960, 7013), 'gtsam.noiseModel_mEstimator_Huber.Create', 'gt.noiseModel_mEstimator_Huber.Create', (['self.deltaMono'], {}), '(self.deltaMono)\n', (6997, 7013), True, 'import gtsam as gt\n'), ((7041, 7082), 'gtsam.noiseModel_Robust', 'gt.noiseModel_Robust', (['huber', 'stereo_model'], {}), '(huber, stereo_model)\n', (7061, 7082), True, 'import gtsam as gt\n'), ((7432, 7479), 'gtsam.noiseModel_Diagonal.Information', 'gt.noiseModel_Diagonal.Information', (['information'], {}), '(information)\n', (7466, 7479), True, 'import gtsam as gt\n'), ((7500, 7555), 'gtsam.noiseModel_mEstimator_Huber.Create', 'gt.noiseModel_mEstimator_Huber.Create', (['self.deltaStereo'], {}), '(self.deltaStereo)\n', (7537, 7555), True, 'import gtsam as gt\n'), ((7583, 7624), 'gtsam.noiseModel_Robust', 'gt.noiseModel_Robust', (['huber', 'stereo_model'], {}), '(huber, stereo_model)\n', (7603, 7624), True, 'import gtsam as gt\n'), ((7979, 7994), 'gtsam.symbol_shorthand.L', 'L', (['self.counter'], {}), '(self.counter)\n', (7980, 7994), False, 'from gtsam.symbol_shorthand import X, L\n'), ((7996, 8020), 'gtsam.Point3', 'gt.Point3', (['pointsInitial'], {}), '(pointsInitial)\n', (8005, 8020), True, 'import gtsam as gt\n'), ((8556, 8620), 'gtsam.LevenbergMarquardtOptimizer', 'gt.LevenbergMarquardtOptimizer', (['self.graph', 'self.initialEstimate'], {}), '(self.graph, self.initialEstimate)\n', (8586, 8620), True, 'import gtsam as gt\n'), ((11158, 11162), 'gtsam.symbol_shorthand.X', 'X', (['(1)'], {}), '(1)\n', (11159, 11162), False, 'from gtsam.symbol_shorthand import X, L\n'), ((11949, 11959), 'gtsam.symbol_shorthand.X', 'X', (['kf.kfID'], {}), '(kf.kfID)\n', (11950, 11959), False, 'from gtsam.symbol_shorthand import X, L\n'), ((4377, 4421), 'numpy.expand_dims', 'np.expand_dims', (['solution.translation'], {'axis': '(1)'}), '(solution.translation, axis=1)\n', (4391, 4421), True, 'import numpy as np\n'), ((6644, 6674), 'numpy.concatenate', 'np.concatenate', (['(R, t)'], {'axis': '(1)'}), '((R, t), axis=1)\n', (6658, 6674), True, 'import numpy as np\n'), ((6850, 6864), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (6861, 6864), True, 'import numpy as np\n'), ((7138, 7181), 'gtsam.Point2', 'gt.Point2', (['measurements[0]', 'measurements[2]'], {}), '(measurements[0], measurements[2])\n', (7147, 7181), True, 'import gtsam as gt\n'), ((7243, 7247), 'gtsam.symbol_shorthand.X', 'X', (['(1)'], {}), '(1)\n', (7244, 7247), False, 'from gtsam.symbol_shorthand import X, L\n'), ((7249, 7264), 'gtsam.symbol_shorthand.L', 'L', (['self.counter'], {}), '(self.counter)\n', (7250, 7264), False, 'from gtsam.symbol_shorthand import X, L\n'), ((7390, 7404), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (7401, 7404), True, 'import numpy as np\n'), ((7770, 7774), 'gtsam.symbol_shorthand.X', 'X', (['(1)'], {}), '(1)\n', (7771, 7774), False, 'from gtsam.symbol_shorthand import X, L\n'), ((7776, 7791), 'gtsam.symbol_shorthand.L', 'L', (['self.counter'], {}), '(self.counter)\n', (7777, 7791), False, 'from gtsam.symbol_shorthand import X, L\n'), ((7899, 7914), 'gtsam.symbol_shorthand.L', 'L', (['self.counter'], {}), '(self.counter)\n', (7900, 7914), False, 'from gtsam.symbol_shorthand import X, L\n'), ((7916, 7940), 'gtsam.Point3', 'gt.Point3', (['pointsInitial'], {}), '(pointsInitial)\n', (7925, 7940), True, 'import gtsam as gt\n'), ((10974, 10988), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10986, 10988), True, 'import matplotlib.pyplot as plt\n'), ((11072, 11082), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11080, 11082), True, 'import matplotlib.pyplot as plt\n'), ((11798, 11802), 'gtsam.symbol_shorthand.X', 'X', (['(0)'], {}), '(0)\n', (11799, 11802), False, 'from gtsam.symbol_shorthand import X, L\n'), ((14962, 14978), 'numpy.linalg.det', 'np.linalg.det', (['T'], {}), '(T)\n', (14975, 14978), True, 'import numpy as np\n'), ((15182, 15196), 'numpy.dot', 'np.dot', (['V', 'U.T'], {}), '(V, U.T)\n', (15188, 15196), True, 'import numpy as np\n'), ((15739, 15753), 'numpy.dot', 'np.dot', (['muY', 'T'], {}), '(muY, T)\n', (15745, 15753), True, 'import numpy as np\n'), ((11813, 11822), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (11819, 11822), True, 'import numpy as np\n'), ((12146, 12156), 'gtsam.symbol_shorthand.X', 'X', (['kf.kfID'], {}), '(kf.kfID)\n', (12147, 12156), False, 'from gtsam.symbol_shorthand import X, L\n'), ((12158, 12170), 'gtsam.symbol_shorthand.X', 'X', (['kf_n.kfID'], {}), '(kf_n.kfID)\n', (12159, 12170), False, 'from gtsam.symbol_shorthand import X, L\n'), ((12221, 12239), 'gtsam.Pose3', 'gt.Pose3', (['rel_pose'], {}), '(rel_pose)\n', (12229, 12239), True, 'import gtsam as gt\n'), ((14640, 14659), 'numpy.zeros', 'np.zeros', (['n', '(m - my)'], {}), '(n, m - my)\n', (14648, 14659), True, 'import numpy as np\n'), ((15482, 15495), 'numpy.dot', 'np.dot', (['Y0', 'T'], {}), '(Y0, T)\n', (15488, 15495), True, 'import numpy as np\n'), ((15619, 15632), 'numpy.dot', 'np.dot', (['Y0', 'T'], {}), '(Y0, T)\n', (15625, 15632), True, 'import numpy as np\n'), ((4478, 4500), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (4486, 4500), True, 'import numpy as np\n'), ((10130, 10177), 'gtsam.noiseModel_Diagonal.Information', 'gt.noiseModel_Diagonal.Information', (['information'], {}), '(information)\n', (10164, 10177), True, 'import gtsam as gt\n'), ((10532, 10579), 'gtsam.noiseModel_Diagonal.Information', 'gt.noiseModel_Diagonal.Information', (['information'], {}), '(information)\n', (10566, 10579), True, 'import gtsam as gt\n'), ((10076, 10090), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (10087, 10090), True, 'import numpy as np\n'), ((10273, 10277), 'gtsam.symbol_shorthand.X', 'X', (['(1)'], {}), '(1)\n', (10274, 10277), False, 'from gtsam.symbol_shorthand import X, L\n'), ((10341, 10356), 'gtsam.symbol_shorthand.L', 'L', (['(idx // 2 + 1)'], {}), '(idx // 2 + 1)\n', (10342, 10356), False, 'from gtsam.symbol_shorthand import X, L\n'), ((10478, 10492), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (10489, 10492), True, 'import numpy as np\n'), ((10755, 10759), 'gtsam.symbol_shorthand.X', 'X', (['(1)'], {}), '(1)\n', (10756, 10759), False, 'from gtsam.symbol_shorthand import X, L\n'), ((10832, 10847), 'gtsam.symbol_shorthand.L', 'L', (['(idx // 2 + 1)'], {}), '(idx // 2 + 1)\n', (10833, 10847), False, 'from gtsam.symbol_shorthand import X, L\n')]
try: import torch import torchmetrics from latte.metrics.torch import interpolatability as T has_torch_and_tm = True except: has_torch_and_tm = False import pytest import numpy as np from latte.metrics.core import interpolatability as C @pytest.mark.skipif(not has_torch_and_tm, reason="requires torch and torchmetrics") class TestSmoothness: def test_smoothness(self): core_smth = C.Smoothness() torch_smth = T.Smoothness() for _ in range(3): z = np.repeat( np.repeat(np.arange(16)[None, None, :], 16, axis=0), 8, axis=1 ) a = np.random.randn(16, 3, 16) ztm = torch.from_numpy(z) atm = torch.from_numpy(a) core_smth.update_state(z, a) torch_smth.update(ztm, atm) val = core_smth.compute() valtm = torch_smth.compute() np.testing.assert_allclose(val, valtm) torch.testing.assert_allclose(val, valtm)
[ "torch.testing.assert_allclose", "numpy.arange", "numpy.testing.assert_allclose", "torch.from_numpy", "pytest.mark.skipif", "latte.metrics.core.interpolatability.Smoothness", "numpy.random.randn", "latte.metrics.torch.interpolatability.Smoothness" ]
[((263, 350), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_torch_and_tm)'], {'reason': '"""requires torch and torchmetrics"""'}), "(not has_torch_and_tm, reason=\n 'requires torch and torchmetrics')\n", (281, 350), False, 'import pytest\n'), ((419, 433), 'latte.metrics.core.interpolatability.Smoothness', 'C.Smoothness', ([], {}), '()\n', (431, 433), True, 'from latte.metrics.core import interpolatability as C\n'), ((455, 469), 'latte.metrics.torch.interpolatability.Smoothness', 'T.Smoothness', ([], {}), '()\n', (467, 469), True, 'from latte.metrics.torch import interpolatability as T\n'), ((901, 939), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val', 'valtm'], {}), '(val, valtm)\n', (927, 939), True, 'import numpy as np\n'), ((949, 990), 'torch.testing.assert_allclose', 'torch.testing.assert_allclose', (['val', 'valtm'], {}), '(val, valtm)\n', (978, 990), False, 'import torch\n'), ((634, 660), 'numpy.random.randn', 'np.random.randn', (['(16)', '(3)', '(16)'], {}), '(16, 3, 16)\n', (649, 660), True, 'import numpy as np\n'), ((680, 699), 'torch.from_numpy', 'torch.from_numpy', (['z'], {}), '(z)\n', (696, 699), False, 'import torch\n'), ((718, 737), 'torch.from_numpy', 'torch.from_numpy', (['a'], {}), '(a)\n', (734, 737), False, 'import torch\n'), ((551, 564), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (560, 564), True, 'import numpy as np\n')]
import itertools import time import numpy as np import scipy.ndimage as ndi import pytest from mrrt.utils import ImageGeometry, ellipse_im from mrrt.mri import mri_exp_approx __all__ = ["test_mri_exp_approx"] def _test_mri_exp_approx1( segments=4, nx=64, tmax=25e-3, dt=5e-6, autocorr=False, use_rmap=True, atype="hist,time,unif", nhist=None, ctest=True, verbose=False, tol=None, ): if verbose: from matplotlib import pyplot as plt from pyvolplot import subplot_stack ti = np.arange(0, tmax, dt) if True: # Generate a synthetic fieldmap fmap = np.zeros((64, 64)) fmap[6:27, 9:20] = 90 fmap[36:57, 9:20] = 120 fmap[5:26, 29:60] = 30 fmap[37:58, 29:60] = 60 if nx != 64: fmap = ndi.zoom(fmap, nx / 64, order=0) kernel_size = int(np.round(5 * nx / 64)) smoothing_kernel = np.ones((kernel_size, kernel_size)) / ( kernel_size ** 2 ) ndi.convolve(fmap, smoothing_kernel, output=fmap) fmap = fmap + 10 if verbose: plt.figure() plt.imshow(fmap, interpolation="nearest", cmap="gray") plt.title("Field Map") if use_rmap: # generate a T2 relaxation map rmap = ( np.asarray( [[0, 0, 18, 23, 0, 20 * 64 / nx], [6, 0, 8, 8, 0, 3 * 64 / nx]] ) * nx / 64 ) ig = ImageGeometry(shape=(nx, nx), fov=(nx, nx)) rmap, params = 1 * ellipse_im(ig, rmap, oversample=3) if verbose: plt.figure() plt.imshow(rmap, cmap="gray", interpolation="nearest") plt.title("Relax Map"), else: rmap = 0 zmap = rmap + (2j * np.pi) * fmap if not nhist: if not np.any(rmap > 0): nhist = [40] else: nhist = [40, 10] # autocorr_arg = ['autocorr', True] # test autocorrelation version if True: # convert to single precision ti = np.asarray(ti, dtype="float32") zmap = np.asarray(zmap, dtype="complex64") # single precision complex if isinstance(segments, int): pass elif isinstance(segments, (list, tuple)) and len(segments) == 2: pass else: raise ValueError("Invalid choice for segments") kwargs = {"autocorr": autocorr, "ctest": ctest, "verbose": verbose} tstart = time.time() if tol is None: B, C, hk, zk = mri_exp_approx( ti, zmap, segments, approx_type=(atype, nhist), **kwargs ) else: B, C, hk, zk = mri_exp_approx( ti, zmap, [segments, tol], approx_type=(atype, nhist), **kwargs ) print("\tduration=%g s" % (time.time() - tstart)) if ctest: Eh = np.exp(-ti[:, np.newaxis] * zk.ravel()[np.newaxis, :]) else: Eh = np.exp(-ti[:, np.newaxis] * zmap.ravel()[np.newaxis, :]) Ep = np.dot(B, C) # matrix product err = np.abs(Eh - Ep) mse = np.mean(np.power(err, 2), axis=0) if ctest: wrms = np.sqrt(np.dot(mse, hk) / np.sum(hk)) else: wrms = -1 if verbose: subplot_stack(1000 * ti, B, title="Basis Components", colors=["k", "m"]) nf = np.floor(nhist[0] / 4) if len(nhist) == 2: ik = np.array([0, nf, 2 * nf, 3 * nf, nhist[1] - 1]) + 2 * nf * nhist[1] ik = ik.tolist() elif len(nhist) == 1: ik = [0, nf, 2 * nf, 3 * nf, nhist[0] - 1] ik = np.asarray(ik, dtype=int) mse_mean = mse.mean() max_err = err.max() if verbose: fig = subplot_stack(1000 * ti, Eh[:, ik], colors=["g", "k"]) fig = subplot_stack( 1000 * ti, Ep[:, ik], colors=["b--", "r--"], fig=fig, title="True and Approx", ) fig = subplot_stack( 1000 * ti, err[:, ik], colors=["b--", "r--"], title="True and Approx", ) print( "\tfor L=%d, wrms=%g, mse = %g, max_err=%g" % (B.shape[1], wrms, mse_mean, max_err) ) return wrms, mse_mean, max_err @pytest.mark.parametrize( "use_rmap, alg", itertools.product( [False, True], [ "hist,svd", "hist,time,unif", "time,unif", "hist,fs,unif", "hist,fs,prctile", "hist,fs,kmeans", ], ), ) @pytest.mark.filterwarnings("ignore:the matrix subclass is not") def test_mri_exp_approx(use_rmap, alg, verbose=False): if alg == ["hist,fs,kmeans"]: pytest.importorskip("sklearn") tmax = 25e-3 # overall time duration (s) wrms, mse_mean, max_err = _test_mri_exp_approx1( segments=8, # number of segments nx=64, tmax=tmax, dt=1e-3, autocorr=False, use_rmap=use_rmap, atype=alg, nhist=None, ctest=True, verbose=verbose, ) if alg == "hist,fs,prctile" or (use_rmap and alg == "hist,fs,kmeans"): # may want to just remove these options, as they perform relatively # poorly assert mse_mean < 0.01 else: assert mse_mean < 1e-5
[ "pytest.mark.filterwarnings", "numpy.array", "scipy.ndimage.zoom", "numpy.arange", "matplotlib.pyplot.imshow", "itertools.product", "numpy.asarray", "numpy.dot", "mrrt.mri.mri_exp_approx", "numpy.round", "numpy.abs", "numpy.ones", "numpy.floor", "scipy.ndimage.convolve", "numpy.any", "...
[((4468, 4531), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:the matrix subclass is not"""'], {}), "('ignore:the matrix subclass is not')\n", (4494, 4531), False, 'import pytest\n'), ((547, 569), 'numpy.arange', 'np.arange', (['(0)', 'tmax', 'dt'], {}), '(0, tmax, dt)\n', (556, 569), True, 'import numpy as np\n'), ((2453, 2464), 'time.time', 'time.time', ([], {}), '()\n', (2462, 2464), False, 'import time\n'), ((2964, 2976), 'numpy.dot', 'np.dot', (['B', 'C'], {}), '(B, C)\n', (2970, 2976), True, 'import numpy as np\n'), ((3005, 3020), 'numpy.abs', 'np.abs', (['(Eh - Ep)'], {}), '(Eh - Ep)\n', (3011, 3020), True, 'import numpy as np\n'), ((3268, 3290), 'numpy.floor', 'np.floor', (['(nhist[0] / 4)'], {}), '(nhist[0] / 4)\n', (3276, 3290), True, 'import numpy as np\n'), ((3507, 3532), 'numpy.asarray', 'np.asarray', (['ik'], {'dtype': 'int'}), '(ik, dtype=int)\n', (3517, 3532), True, 'import numpy as np\n'), ((4227, 4361), 'itertools.product', 'itertools.product', (['[False, True]', "['hist,svd', 'hist,time,unif', 'time,unif', 'hist,fs,unif',\n 'hist,fs,prctile', 'hist,fs,kmeans']"], {}), "([False, True], ['hist,svd', 'hist,time,unif', 'time,unif',\n 'hist,fs,unif', 'hist,fs,prctile', 'hist,fs,kmeans'])\n", (4244, 4361), False, 'import itertools\n'), ((639, 657), 'numpy.zeros', 'np.zeros', (['(64, 64)'], {}), '((64, 64))\n', (647, 657), True, 'import numpy as np\n'), ((1019, 1068), 'scipy.ndimage.convolve', 'ndi.convolve', (['fmap', 'smoothing_kernel'], {'output': 'fmap'}), '(fmap, smoothing_kernel, output=fmap)\n', (1031, 1068), True, 'import scipy.ndimage as ndi\n'), ((1490, 1533), 'mrrt.utils.ImageGeometry', 'ImageGeometry', ([], {'shape': '(nx, nx)', 'fov': '(nx, nx)'}), '(shape=(nx, nx), fov=(nx, nx))\n', (1503, 1533), False, 'from mrrt.utils import ImageGeometry, ellipse_im\n'), ((2060, 2091), 'numpy.asarray', 'np.asarray', (['ti'], {'dtype': '"""float32"""'}), "(ti, dtype='float32')\n", (2070, 2091), True, 'import numpy as np\n'), ((2107, 2142), 'numpy.asarray', 'np.asarray', (['zmap'], {'dtype': '"""complex64"""'}), "(zmap, dtype='complex64')\n", (2117, 2142), True, 'import numpy as np\n'), ((2508, 2580), 'mrrt.mri.mri_exp_approx', 'mri_exp_approx', (['ti', 'zmap', 'segments'], {'approx_type': '(atype, nhist)'}), '(ti, zmap, segments, approx_type=(atype, nhist), **kwargs)\n', (2522, 2580), False, 'from mrrt.mri import mri_exp_approx\n'), ((2636, 2715), 'mrrt.mri.mri_exp_approx', 'mri_exp_approx', (['ti', 'zmap', '[segments, tol]'], {'approx_type': '(atype, nhist)'}), '(ti, zmap, [segments, tol], approx_type=(atype, nhist), **kwargs)\n', (2650, 2715), False, 'from mrrt.mri import mri_exp_approx\n'), ((3039, 3055), 'numpy.power', 'np.power', (['err', '(2)'], {}), '(err, 2)\n', (3047, 3055), True, 'import numpy as np\n'), ((3185, 3257), 'pyvolplot.subplot_stack', 'subplot_stack', (['(1000 * ti)', 'B'], {'title': '"""Basis Components"""', 'colors': "['k', 'm']"}), "(1000 * ti, B, title='Basis Components', colors=['k', 'm'])\n", (3198, 3257), False, 'from pyvolplot import subplot_stack\n'), ((3615, 3669), 'pyvolplot.subplot_stack', 'subplot_stack', (['(1000 * ti)', 'Eh[:, ik]'], {'colors': "['g', 'k']"}), "(1000 * ti, Eh[:, ik], colors=['g', 'k'])\n", (3628, 3669), False, 'from pyvolplot import subplot_stack\n'), ((3684, 3781), 'pyvolplot.subplot_stack', 'subplot_stack', (['(1000 * ti)', 'Ep[:, ik]'], {'colors': "['b--', 'r--']", 'fig': 'fig', 'title': '"""True and Approx"""'}), "(1000 * ti, Ep[:, ik], colors=['b--', 'r--'], fig=fig, title=\n 'True and Approx')\n", (3697, 3781), False, 'from pyvolplot import subplot_stack\n'), ((3862, 3951), 'pyvolplot.subplot_stack', 'subplot_stack', (['(1000 * ti)', 'err[:, ik]'], {'colors': "['b--', 'r--']", 'title': '"""True and Approx"""'}), "(1000 * ti, err[:, ik], colors=['b--', 'r--'], title=\n 'True and Approx')\n", (3875, 3951), False, 'from pyvolplot import subplot_stack\n'), ((4629, 4659), 'pytest.importorskip', 'pytest.importorskip', (['"""sklearn"""'], {}), "('sklearn')\n", (4648, 4659), False, 'import pytest\n'), ((823, 855), 'scipy.ndimage.zoom', 'ndi.zoom', (['fmap', '(nx / 64)'], {'order': '(0)'}), '(fmap, nx / 64, order=0)\n', (831, 855), True, 'import scipy.ndimage as ndi\n'), ((882, 903), 'numpy.round', 'np.round', (['(5 * nx / 64)'], {}), '(5 * nx / 64)\n', (890, 903), True, 'import numpy as np\n'), ((932, 967), 'numpy.ones', 'np.ones', (['(kernel_size, kernel_size)'], {}), '((kernel_size, kernel_size))\n', (939, 967), True, 'import numpy as np\n'), ((1126, 1138), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1136, 1138), True, 'from matplotlib import pyplot as plt\n'), ((1151, 1205), 'matplotlib.pyplot.imshow', 'plt.imshow', (['fmap'], {'interpolation': '"""nearest"""', 'cmap': '"""gray"""'}), "(fmap, interpolation='nearest', cmap='gray')\n", (1161, 1205), True, 'from matplotlib import pyplot as plt\n'), ((1218, 1240), 'matplotlib.pyplot.title', 'plt.title', (['"""Field Map"""'], {}), "('Field Map')\n", (1227, 1240), True, 'from matplotlib import pyplot as plt\n'), ((1561, 1595), 'mrrt.utils.ellipse_im', 'ellipse_im', (['ig', 'rmap'], {'oversample': '(3)'}), '(ig, rmap, oversample=3)\n', (1571, 1595), False, 'from mrrt.utils import ImageGeometry, ellipse_im\n'), ((1628, 1640), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1638, 1640), True, 'from matplotlib import pyplot as plt\n'), ((1653, 1707), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rmap'], {'cmap': '"""gray"""', 'interpolation': '"""nearest"""'}), "(rmap, cmap='gray', interpolation='nearest')\n", (1663, 1707), True, 'from matplotlib import pyplot as plt\n'), ((1844, 1860), 'numpy.any', 'np.any', (['(rmap > 0)'], {}), '(rmap > 0)\n', (1850, 1860), True, 'import numpy as np\n'), ((3328, 3375), 'numpy.array', 'np.array', (['[0, nf, 2 * nf, 3 * nf, nhist[1] - 1]'], {}), '([0, nf, 2 * nf, 3 * nf, nhist[1] - 1])\n', (3336, 3375), True, 'import numpy as np\n'), ((1327, 1402), 'numpy.asarray', 'np.asarray', (['[[0, 0, 18, 23, 0, 20 * 64 / nx], [6, 0, 8, 8, 0, 3 * 64 / nx]]'], {}), '([[0, 0, 18, 23, 0, 20 * 64 / nx], [6, 0, 8, 8, 0, 3 * 64 / nx]])\n', (1337, 1402), True, 'import numpy as np\n'), ((1720, 1742), 'matplotlib.pyplot.title', 'plt.title', (['"""Relax Map"""'], {}), "('Relax Map')\n", (1729, 1742), True, 'from matplotlib import pyplot as plt\n'), ((2769, 2780), 'time.time', 'time.time', ([], {}), '()\n', (2778, 2780), False, 'import time\n'), ((3102, 3117), 'numpy.dot', 'np.dot', (['mse', 'hk'], {}), '(mse, hk)\n', (3108, 3117), True, 'import numpy as np\n'), ((3120, 3130), 'numpy.sum', 'np.sum', (['hk'], {}), '(hk)\n', (3126, 3130), True, 'import numpy as np\n')]
from __future__ import print_function import torch import torch.utils.data as data import torchvision from torchvision import transforms import random import os import numpy as np from PIL import Image class Base_Dataset(data.Dataset): def __init__(self, root, partition, target_ratio=0.0): super(Base_Dataset, self).__init__() # set dataset info self.root = root self.partition = partition self.target_ratio = target_ratio # self.target_ratio=0 no mixup mean_pix = [0.485, 0.456, 0.406] std_pix = [0.229, 0.224, 0.225] normalize = transforms.Normalize(mean=mean_pix, std=std_pix) if self.partition == 'train': self.transformer = transforms.Compose([transforms.Resize(256), transforms.RandomHorizontalFlip(), transforms.RandomCrop(224), transforms.ToTensor(), normalize]) else: self.transformer = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]) def __len__(self): if self.partition == 'train': return int(min(sum(self.alpha), len(self.target_image)) / (self.num_class - 1)) elif self.partition == 'test': return int(len(self.target_image) / (self.num_class - 1)) def __getitem__(self, item): image_data = [] label_data = [] target_real_label = [] class_index_target = [] domain_label = [] ST_split = [] # Mask of targets to be evaluated # select index for support class num_class_index_target = int(self.target_ratio * (self.num_class - 1)) if self.target_ratio > 0: available_index = [key for key in self.target_image_list.keys() if len(self.target_image_list[key]) > 0 and key < self.num_class - 1] class_index_target = random.sample(available_index, min(num_class_index_target, len(available_index))) class_index_source = list(set(range(self.num_class - 1)) - set(class_index_target)) random.shuffle(class_index_source) for classes in class_index_source: # select support samples from source domain or target domain image = Image.open(random.choice(self.source_image[classes])).convert('RGB') if self.transformer is not None: image = self.transformer(image) image_data.append(image) label_data.append(classes) domain_label.append(1) ST_split.append(0) # target_real_label.append(classes) for classes in class_index_target: # select support samples from source domain or target domain image = Image.open(random.choice(self.target_image_list[classes])).convert('RGB') if self.transformer is not None: image = self.transformer(image) image_data.append(image) label_data.append(classes) domain_label.append(0) ST_split.append(0) # target_real_label.append(classes) # adding target samples for i in range(self.num_class - 1): if self.partition == 'train': if self.target_ratio > 0: index = random.choice(list(range(len(self.label_flag)))) else: index = random.choice(list(range(len(self.target_image)))) # index = random.choice(list(range(len(self.label_flag)))) target_image = Image.open(self.target_image[index]).convert('RGB') if self.transformer is not None: target_image = self.transformer(target_image) image_data.append(target_image) label_data.append(self.label_flag[index]) target_real_label.append(self.target_label[index]) domain_label.append(0) ST_split.append(1) elif self.partition == 'test': # For last batch # if item * (self.num_class - 1) + i >= len(self.target_image): # break target_image = Image.open(self.target_image[item * (self.num_class - 1) + i]).convert('RGB') if self.transformer is not None: target_image = self.transformer(target_image) image_data.append(target_image) label_data.append(self.num_class) target_real_label.append(self.target_label[item * (self.num_class - 1) + i]) domain_label.append(0) ST_split.append(1) image_data = torch.stack(image_data) label_data = torch.LongTensor(label_data) real_label_data = torch.tensor(target_real_label) domain_label = torch.tensor(domain_label) ST_split = torch.tensor(ST_split) return image_data, label_data, real_label_data, domain_label, ST_split def load_dataset(self): source_image_list = {key: [] for key in range(self.num_class - 1)} target_image_list = [] target_label_list = [] with open(self.source_path) as f: for ind, line in enumerate(f.readlines()): image_dir, label = line.split(' ') label = label.strip() if label == str(self.num_class-1): continue source_image_list[int(label)].append(image_dir) # source_image_list.append(image_dir) with open(self.target_path) as f: for ind, line in enumerate(f.readlines()): image_dir, label = line.split(' ') label = label.strip() # target_image_list[int(label)].append(image_dir) target_image_list.append(image_dir) target_label_list.append(int(label)) return source_image_list, target_image_list, target_label_list class Office_Dataset(Base_Dataset): def __init__(self, root, partition, label_flag=None, source='A', target='W', target_ratio=0.0): super(Office_Dataset, self).__init__(root, partition, target_ratio) # set dataset info src_name, tar_name = self.getFilePath(source, target) self.source_path = os.path.join(root, src_name) self.target_path = os.path.join(root, tar_name) self.class_name = ["back_pack", "bike", "bike_helmet", "bookcase", "bottle", "calculator", "desk_chair", "desk_lamp", "desktop_computer", "file_cabinet", "unk"] self.num_class = len(self.class_name) self.source_image, self.target_image, self.target_label = self.load_dataset() self.alpha = [len(self.source_image[key]) for key in self.source_image.keys()] self.label_flag = label_flag # create the unlabeled tag if self.label_flag is None: self.label_flag = torch.ones(len(self.target_image)) * self.num_class else: # if pseudo label comes self.target_image_list = {key: [] for key in range(self.num_class + 1)} for i in range(len(self.label_flag)): self.target_image_list[self.label_flag[i].item()].append(self.target_image[i]) if self.target_ratio > 0: self.alpha_value = [len(self.source_image[key]) + len(self.target_image_list[key]) for key in self.source_image.keys()] else: self.alpha_value = self.alpha self.alpha_value = np.array(self.alpha_value) self.alpha_value = (self.alpha_value.max() + 1 - self.alpha_value) / self.alpha_value.mean() self.alpha_value = torch.tensor(self.alpha_value).float().cuda() def getFilePath(self, source, target): if source == 'A': src_name = 'amazon_src_list.txt' elif source == 'W': src_name = 'webcam_src_list.txt' elif source == 'D': src_name = 'dslr_src_list.txt' else: print("Unknown Source Type, only supports A W D.") if target == 'A': tar_name = 'amazon_tar_list.txt' elif target == 'W': tar_name = 'webcam_tar_list.txt' elif target == 'D': tar_name = 'dslr_tar_list.txt' else: print("Unknown Target Type, only supports A W D.") return src_name, tar_name class Home_Dataset(Base_Dataset): def __init__(self, root, partition, label_flag=None, source='A', target='R', target_ratio=0.0): super(Home_Dataset, self).__init__(root, partition, target_ratio) src_name, tar_name = self.getFilePath(source, target) self.source_path = os.path.join(root, src_name) self.target_path = os.path.join(root, tar_name) self.class_name = ['Alarm_Clock', 'Backpack', 'Batteries', 'Bed', 'Bike', 'Bottle', 'Bucket', 'Calculator', 'Calendar', 'Candles', 'Chair', 'Clipboards', 'Computer', 'Couch', 'Curtains', 'Desk_Lamp', 'Drill', 'Eraser', 'Exit_Sign', 'Fan', 'File_Cabinet', 'Flipflops', 'Flowers', 'Folder', 'Fork', 'unk'] self.num_class = len(self.class_name) self.source_image, self.target_image, self.target_label = self.load_dataset() self.alpha = [len(self.source_image[key]) for key in self.source_image.keys()] self.label_flag = label_flag # create the unlabeled tag if self.label_flag is None: self.label_flag = torch.ones(len(self.target_image)) * self.num_class else: # if pseudo label comes self.target_image_list = {key: [] for key in range(self.num_class + 1)} for i in range(len(self.label_flag)): self.target_image_list[self.label_flag[i].item()].append(self.target_image[i]) # if self.target_ratio > 0: # self.alpha_value = [len(self.source_image[key]) + len(self.target_image_list[key]) for key in # self.source_image.keys()] # else: # self.alpha_value = self.alpha # # self.alpha_value = np.array(self.alpha_value) # self.alpha_value = (self.alpha_value.max() + 1 - self.alpha_value) / self.alpha_value.mean() # self.alpha_value = torch.tensor(self.alpha_value).float().cuda() def getFilePath(self, source, target): if source == 'A': src_name = 'art_source.txt' elif source == 'C': src_name = 'clip_source.txt' elif source == 'P': src_name = 'product_source.txt' elif source == 'R': src_name = 'real_source.txt' else: print("Unknown Source Type, only supports A C P R.") if target == 'A': tar_name = 'art_tar.txt' elif target == 'C': tar_name = 'clip_tar.txt' elif target == 'P': tar_name = 'product_tar.txt' elif target == 'R': tar_name = 'real_tar.txt' else: print("Unknown Target Type, only supports A C P R.") return src_name, tar_name class Visda_Dataset(Base_Dataset): def __init__(self, root, partition, label_flag=None, target_ratio=0.0): super(Visda_Dataset, self).__init__(root, partition, target_ratio) # set dataset info self.source_path = os.path.join(root, 'source_list.txt') self.target_path = os.path.join(root, 'target_list.txt') self.class_name = ["bicycle", "bus", "car", "motorcycle", "train", "truck", 'unk'] self.num_class = len(self.class_name) self.source_image, self.target_image, self.target_label = self.load_dataset() self.alpha = [len(self.source_image[key]) for key in self.source_image.keys()] self.label_flag = label_flag # create the unlabeled tag if self.label_flag is None: self.label_flag = torch.ones(len(self.target_image)) * self.num_class else: # if pseudo label comes self.target_image_list = {key: [] for key in range(self.num_class + 1)} for i in range(len(self.label_flag)): self.target_image_list[self.label_flag[i].item()].append(self.target_image[i]) class Visda18_Dataset(Base_Dataset): def __init__(self, root, partition, label_flag=None, target_ratio=0.0): super(Visda18_Dataset, self).__init__(root, partition, target_ratio) # set dataset info self.source_path = os.path.join(root, 'source_list_k.txt') self.target_path = os.path.join(root, 'target_list.txt') self.class_name = ["areoplane","bicycle", "bus", "car", "horse", "knife", "motorcycle", "person", "plant", "skateboard", "train", "truck", 'unk'] self.num_class = len(self.class_name) self.source_image, self.target_image, self.target_label = self.load_dataset() self.alpha = [len(self.source_image[key]) for key in self.source_image.keys()] self.label_flag = label_flag # create the unlabeled tag if self.label_flag is None: self.label_flag = torch.ones(len(self.target_image)) * self.num_class else: # if pseudo label comes self.target_image_list = {key: [] for key in range(self.num_class + 1)} for i in range(len(self.label_flag)): self.target_image_list[self.label_flag[i].item()].append(self.target_image[i])
[ "torchvision.transforms.CenterCrop", "random.choice", "PIL.Image.open", "random.shuffle", "torch.LongTensor", "torch.stack", "os.path.join", "torchvision.transforms.RandomHorizontalFlip", "torchvision.transforms.RandomCrop", "torch.tensor", "numpy.array", "torchvision.transforms.Normalize", ...
[((609, 657), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean_pix', 'std': 'std_pix'}), '(mean=mean_pix, std=std_pix)\n', (629, 657), False, 'from torchvision import transforms\n'), ((2421, 2455), 'random.shuffle', 'random.shuffle', (['class_index_source'], {}), '(class_index_source)\n', (2435, 2455), False, 'import random\n'), ((4994, 5017), 'torch.stack', 'torch.stack', (['image_data'], {}), '(image_data)\n', (5005, 5017), False, 'import torch\n'), ((5039, 5067), 'torch.LongTensor', 'torch.LongTensor', (['label_data'], {}), '(label_data)\n', (5055, 5067), False, 'import torch\n'), ((5094, 5125), 'torch.tensor', 'torch.tensor', (['target_real_label'], {}), '(target_real_label)\n', (5106, 5125), False, 'import torch\n'), ((5149, 5175), 'torch.tensor', 'torch.tensor', (['domain_label'], {}), '(domain_label)\n', (5161, 5175), False, 'import torch\n'), ((5195, 5217), 'torch.tensor', 'torch.tensor', (['ST_split'], {}), '(ST_split)\n', (5207, 5217), False, 'import torch\n'), ((6608, 6636), 'os.path.join', 'os.path.join', (['root', 'src_name'], {}), '(root, src_name)\n', (6620, 6636), False, 'import os\n'), ((6664, 6692), 'os.path.join', 'os.path.join', (['root', 'tar_name'], {}), '(root, tar_name)\n', (6676, 6692), False, 'import os\n'), ((7830, 7856), 'numpy.array', 'np.array', (['self.alpha_value'], {}), '(self.alpha_value)\n', (7838, 7856), True, 'import numpy as np\n'), ((8997, 9025), 'os.path.join', 'os.path.join', (['root', 'src_name'], {}), '(root, src_name)\n', (9009, 9025), False, 'import os\n'), ((9053, 9081), 'os.path.join', 'os.path.join', (['root', 'tar_name'], {}), '(root, tar_name)\n', (9065, 9081), False, 'import os\n'), ((11696, 11733), 'os.path.join', 'os.path.join', (['root', '"""source_list.txt"""'], {}), "(root, 'source_list.txt')\n", (11708, 11733), False, 'import os\n'), ((11761, 11798), 'os.path.join', 'os.path.join', (['root', '"""target_list.txt"""'], {}), "(root, 'target_list.txt')\n", (11773, 11798), False, 'import os\n'), ((12825, 12864), 'os.path.join', 'os.path.join', (['root', '"""source_list_k.txt"""'], {}), "(root, 'source_list_k.txt')\n", (12837, 12864), False, 'import os\n'), ((12892, 12929), 'os.path.join', 'os.path.join', (['root', '"""target_list.txt"""'], {}), "(root, 'target_list.txt')\n", (12904, 12929), False, 'import os\n'), ((748, 770), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (765, 770), False, 'from torchvision import transforms\n'), ((823, 856), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (854, 856), False, 'from torchvision import transforms\n'), ((909, 935), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(224)'], {}), '(224)\n', (930, 935), False, 'from torchvision import transforms\n'), ((988, 1009), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1007, 1009), False, 'from torchvision import transforms\n'), ((1139, 1161), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1156, 1161), False, 'from torchvision import transforms\n'), ((1214, 1240), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1235, 1240), False, 'from torchvision import transforms\n'), ((1293, 1314), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1312, 1314), False, 'from torchvision import transforms\n'), ((2604, 2645), 'random.choice', 'random.choice', (['self.source_image[classes]'], {}), '(self.source_image[classes])\n', (2617, 2645), False, 'import random\n'), ((3093, 3139), 'random.choice', 'random.choice', (['self.target_image_list[classes]'], {}), '(self.target_image_list[classes])\n', (3106, 3139), False, 'import random\n'), ((3886, 3922), 'PIL.Image.open', 'Image.open', (['self.target_image[index]'], {}), '(self.target_image[index])\n', (3896, 3922), False, 'from PIL import Image\n'), ((7985, 8015), 'torch.tensor', 'torch.tensor', (['self.alpha_value'], {}), '(self.alpha_value)\n', (7997, 8015), False, 'import torch\n'), ((4515, 4577), 'PIL.Image.open', 'Image.open', (['self.target_image[item * (self.num_class - 1) + i]'], {}), '(self.target_image[item * (self.num_class - 1) + i])\n', (4525, 4577), False, 'from PIL import Image\n')]
import numpy as np import torch from experience_replay import ExperienceReplay from network import Q from config import hyperparameters as h #---------------------------------------------------------------------------- # Reinforcement learning agent. class Agent: def __init__(self, state_shape, nof_actions): self.state_shape = state_shape self.nof_actions = nof_actions self.epsilon = h.epsilon self.replay_buffer = ExperienceReplay(state_shape) self.Q = Q(state_shape, nof_actions, 'online') self.Q_tgt = Q(state_shape, nof_actions, 'target') self.Q_tgt.load_state_dict(self.Q.state_dict()) self.device = self.Q.device def act(self, state): if np.random.sample() < self.epsilon: return np.random.randint(self.nof_actions) else: state = torch.from_numpy(state).unsqueeze(0).to(dtype=torch.float32, device=self.device) return torch.argmax(self.Q(state)).item() def step(self, state, action, reward, state_, done): self.replay_buffer.store(state, action, reward, state_, done) if self.replay_buffer.store_counter % h.update_interval == 0 \ and self.replay_buffer.store_counter >= h.batch_size: self.learn() def learn(self): s, a, r, s_prime, done = self.sample_memory() Q_estimate_next = self.Q_tgt(s_prime).gather(1, torch.argmax(self.Q(s_prime), 1, keepdim=True)).squeeze(1) Q_target = r + (h.gamma * Q_estimate_next * (1 - done.to(torch.float32))) Q_estimate = self.Q(s).gather(1, a.unsqueeze(1)).squeeze(1) loss = self.Q.loss(Q_estimate, Q_target.detach()).to(self.device) self.Q.optimizer.zero_grad() loss.backward() self.Q.optimizer.step() self.soft_update() self.decrement_epsilon() def sample_memory(self): sample = self.replay_buffer.sample() return tuple(map(lambda x: torch.tensor(x).to(self.device), sample)) def soft_update(self): for theta, theta_tgt in zip(self.Q.parameters(), self.Q_tgt.parameters()): theta_tgt.data.copy_(h.tau*theta.data + (1 - h.tau)*theta_tgt.data) def decrement_epsilon(self): self.epsilon = max(h.epsilon_min, self.epsilon * h.epsilon_decay) def save_models(self): self.Q.save_checkpoint() self.Q_tgt.save_checkpoint() def load_models(self): self.Q.load_checkpoint() self.Q_tgt.load_checkpoint() #----------------------------------------------------------------------------
[ "torch.from_numpy", "experience_replay.ExperienceReplay", "torch.tensor", "numpy.random.randint", "numpy.random.sample", "network.Q" ]
[((466, 495), 'experience_replay.ExperienceReplay', 'ExperienceReplay', (['state_shape'], {}), '(state_shape)\n', (482, 495), False, 'from experience_replay import ExperienceReplay\n'), ((513, 550), 'network.Q', 'Q', (['state_shape', 'nof_actions', '"""online"""'], {}), "(state_shape, nof_actions, 'online')\n", (514, 550), False, 'from network import Q\n'), ((572, 609), 'network.Q', 'Q', (['state_shape', 'nof_actions', '"""target"""'], {}), "(state_shape, nof_actions, 'target')\n", (573, 609), False, 'from network import Q\n'), ((740, 758), 'numpy.random.sample', 'np.random.sample', ([], {}), '()\n', (756, 758), True, 'import numpy as np\n'), ((794, 829), 'numpy.random.randint', 'np.random.randint', (['self.nof_actions'], {}), '(self.nof_actions)\n', (811, 829), True, 'import numpy as np\n'), ((864, 887), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (880, 887), False, 'import torch\n'), ((1969, 1984), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (1981, 1984), False, 'import torch\n')]
# Copyright 2022 The Brax Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Logging utilities.""" # Lint as: python3 import collections import copy import csv import logging import os import pprint import sys import time from brax.io import file as io_file import numpy as np _tabulators = {} _timers = {} _record_time = False def save_config(output_path, config, verbose=False): io_file.MakeDirs(os.path.dirname(output_path)) config_str = pprint.pformat(config, indent=2) with io_file.File(output_path, 'w') as f: f.write(config_str) if verbose: print(f'Saved {output_path}') def load_config(path, verbose=False): with io_file.File(path, 'r') as f: config_str = f.read() config = eval(config_str) if verbose: print(f'Loaded {path}') return config class Graph(object): """Visualize data in dynamic graphs.""" def __init__( self, max_length=100, ): import pyqtgraph as pg from pyqtgraph.Qt import QtGui self.max_length = max_length self.app = QtGui.QApplication([]) self.win = pg.GraphicsWindow() self.ps = {} self.curves = {} self.dats = {} def add_plot(self, key): if key not in self.ps: self.ps[key] = self.win.addPlot(colspan=2) self.ps[key].setLabel(axis='top', text=key) self.win.nextRow() self.curves[key] = self.ps[key].plot() self.dats[key] = collections.deque() def update(self, **values): for key, value in values.items(): self.add_plot(key) if len(self.dats[key]) > self.max_length: self.dats[key].popleft() self.dats[key].append(value) self.curves[key].setData(self.dats[key]) self.app.processEvents() class Tabulator(object): """Tabulate data and incrementally format into a csv.""" def __init__(self, output_path=None, append=True, cast_type=None): self._output_path = output_path self._cast_type = cast_type self._curr_values = collections.OrderedDict() self._history_values = collections.OrderedDict() self._curr_counts = {} if append and output_path and io_file.Exists(self._output_path): self.finalize_from_file() else: self._finalized = False def finalize_from_file(self): data = parse_csv(self._output_path) self._history_values = data for key, value in data.items(): self._curr_values[key] = value[-1] self._finalized = True def get_statistics(self, indices=None, stat='mean', tag='', key_filter=None): """Get statistics (average, max, min) values in the table.""" ret = {} for key, values in self._history_values.items(): if key_filter and not key_filter(key): continue target_values = np.array(values) if indices is None: pass elif isinstance(indices, (int, tuple, list, np.ndarray)): if isinstance(indices, int): indices = [indices] target_values = target_values[indices] elif isinstance(indices, str) and ':' in indices: first_index, second_index = [ int(s) if s else None for s in indices.split(':', 1) ] target_values = target_values[first_index:second_index] else: raise NotImplementedError(indices, type(indices)) if tag: key += tag if stat == 'mean': ret[key] = np.mean(target_values, axis=0) elif stat == 'max': ret[key] = np.max(target_values, axis=0) elif stat == 'min': ret[key] = np.min(target_values, axis=0) else: raise NotImplementedError(stat) return ret def get_last(self): return self.get_statistics(indices=-1) def get_curr(self): return self._curr_values def add(self, accumulate=False, **entries): """Add an entry of data.""" for key, value in sorted(entries.items()): if key not in self._history_values: assert not self._finalized, ('Cannot add a new key {} once tabulator is' ' finalized.').format(key) self._history_values[key] = [] value = copy.deepcopy(value) if accumulate: value += self._curr_values.get(key, 0.0) self._curr_counts[key] = self._curr_counts.get(key, 0) + 1 value = self.cast(value) self._curr_values[key] = value def cast(self, value): if self._cast_type: try: value = self._cast_type(value) except TypeError as e: raise TypeError('{}: Failed to cast {} as {}'.format( e, value, self._cast_type)) return value def finalize(self): output_dir = os.path.dirname(self._output_path) if output_dir: io_file.MakeDirs(output_dir) with io_file.File(self._output_path, 'w') as f: writer = csv.writer(f) writer.writerow(self._history_values.keys()) self._finalized = True def dump(self, output_path=None, average=True): """Dump to a csv file.""" output_path = output_path or self._output_path if not self._curr_values: return # empty if output_path: if not self._finalized: self.finalize() # finalize with io_file.File(output_path, 'a') as f: writer = csv.writer(f) writer.writerow(self._curr_values.values()) for key, value in self._history_values.items(): v = copy.deepcopy(self._curr_values[key]) if average: v /= self._curr_counts.get(key, 1.0) value = self.cast(value) value.append(v) self._curr_counts = {} self._curr_values = {k: 0.0 for k in self._curr_values} def parse_csv(filename: str, verbose: bool = False): """Parse a csv file.""" with io_file.File(filename, 'r') as f: csv_data = np.genfromtxt(f, delimiter=',', names=True, deletechars='') data = collections.OrderedDict() try: for i, key in enumerate(csv_data.dtype.names): data[key] = [d[i] for d in csv_data] except TypeError: # 0-D array errors out for key in csv_data.dtype.names: data[key] = np.array([csv_data[key]]) if verbose: print(f'Loaded len={len(list(data.values())[0])}, ' f'keys={sorted(list(data.keys()))} from {filename}') return data def parse_csv_parallel(filenames, n_threads=1): import multiprocessing with multiprocessing.pool.ThreadPool(n_threads) as pool: jobs = { filename: pool.apply_async(parse_csv, [filename], error_callback=print) for filename in filenames } data = {key: value.get() for key, value in jobs.items()} return data def timeit(): global _record_time _record_time = True def tic(name): global _timers _timers[name] = time.time() def toc(name, indent=0): global _timers, _record_time assert name in _timers dt = time.time() - _timers[name] del _timers[name] if _record_time: print('{}[{}] runtime: {}s'.format(''.join(['\t'] * indent), name, dt)) return dt def get_level(name): """Get level.""" level = 'info' # default level os_level = os.getenv('LEVEL') if os_level is not None: if ',' in os_level: os_levels = os_level.split(',') if name in os_levels[1:]: level = os_levels[0] else: level = os_level return level class LoggerWrapper(object): """LoggerWrapper.""" def __init__(self, logger, name): self.logger = logger self.name = name def format(self, content='', name=None, **kwargs): """Format content to str.""" if name is None: name = self.name else: name = self.name + ':' + name s = '[{}]'.format(name) if content: s += ' ' + pprint.pformat(content) if kwargs: s += ' ' + pprint.pformat(kwargs) return s def add_name(self, name): self.name = ':'.join((self.name, name)) def pop_name(self): self.name = ':'.join(self.name.split(':')[:-1]) def debug(self, content='', name=None, **kwargs): level = get_level(self.name) if level in ('debug',): self.logger.debug(self.format(content=content, name=name, **kwargs)) def info(self, content='', name=None, **kwargs): self.logger.info(self.format(content=content, name=name, **kwargs)) def get_logger(level=None, name=__name__): """Get logger. If `level` is not specified, it consults os.getenv('LEVEL'). e.g. LEVEL=debug: print all debug messages. LEVEL=debug,name1,name2: print all debug messages, only for loggers with `name1` or `name2`, and use default level (`info`) for others. Args: level: a string, e.g. 'info', 'debug', 'error'. name: a string, identifier for logger. Returns: A logging.logger object. """ name = name.split('.')[-1] # base name if level is None: level = get_level(name) logger = logging.getLogger(name) out_hdlr = logging.StreamHandler(sys.stdout) out_hdlr.setFormatter( logging.Formatter('[{}] %(asctime)s %(message)s'.format(name))) out_hdlr.setLevel(getattr(logging, level.upper())) logger.addHandler(out_hdlr) logger.setLevel(getattr(logging, level.upper())) return LoggerWrapper(logger, name=name) def get_tabulator(name=__name__, **kwargs): """Get a tabulator.""" global _tabulators if name not in _tabulators: _tabulators[name] = Tabulator(**kwargs) return _tabulators[name] if __name__ == '__main__': tab = get_tabulator(append=False) tab.dump() # do nothing tab.add(a=3, b=2, c=4) tab.add(b=4, d=6) tab.dump() tab.add(a=1, d=4) tab.dump() tab2 = get_tabulator(append=True) tab2.add(a=4, b=1, c=2, d=3) tab2.dump()
[ "logging.getLogger", "logging.StreamHandler", "brax.io.file.Exists", "brax.io.file.MakeDirs", "numpy.array", "pyqtgraph.Qt.QtGui.QApplication", "copy.deepcopy", "pyqtgraph.GraphicsWindow", "numpy.genfromtxt", "numpy.mean", "collections.deque", "brax.io.file.File", "numpy.max", "multiproces...
[((962, 994), 'pprint.pformat', 'pprint.pformat', (['config'], {'indent': '(2)'}), '(config, indent=2)\n', (976, 994), False, 'import pprint\n'), ((6220, 6245), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (6243, 6245), False, 'import collections\n'), ((7076, 7087), 'time.time', 'time.time', ([], {}), '()\n', (7085, 7087), False, 'import time\n'), ((7422, 7440), 'os.getenv', 'os.getenv', (['"""LEVEL"""'], {}), "('LEVEL')\n", (7431, 7440), False, 'import os\n'), ((9176, 9199), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (9193, 9199), False, 'import logging\n'), ((9213, 9246), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (9234, 9246), False, 'import logging\n'), ((917, 945), 'os.path.dirname', 'os.path.dirname', (['output_path'], {}), '(output_path)\n', (932, 945), False, 'import os\n'), ((1002, 1032), 'brax.io.file.File', 'io_file.File', (['output_path', '"""w"""'], {}), "(output_path, 'w')\n", (1014, 1032), True, 'from brax.io import file as io_file\n'), ((1158, 1181), 'brax.io.file.File', 'io_file.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (1170, 1181), True, 'from brax.io import file as io_file\n'), ((1531, 1553), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (1549, 1553), False, 'from pyqtgraph.Qt import QtGui\n'), ((1569, 1588), 'pyqtgraph.GraphicsWindow', 'pg.GraphicsWindow', ([], {}), '()\n', (1586, 1588), True, 'import pyqtgraph as pg\n'), ((2447, 2472), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2470, 2472), False, 'import collections\n'), ((2500, 2525), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2523, 2525), False, 'import collections\n'), ((5060, 5094), 'os.path.dirname', 'os.path.dirname', (['self._output_path'], {}), '(self._output_path)\n', (5075, 5094), False, 'import os\n'), ((6102, 6129), 'brax.io.file.File', 'io_file.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (6114, 6129), True, 'from brax.io import file as io_file\n'), ((6151, 6210), 'numpy.genfromtxt', 'np.genfromtxt', (['f'], {'delimiter': '""","""', 'names': '(True)', 'deletechars': '""""""'}), "(f, delimiter=',', names=True, deletechars='')\n", (6164, 6210), True, 'import numpy as np\n'), ((6704, 6746), 'multiprocessing.pool.ThreadPool', 'multiprocessing.pool.ThreadPool', (['n_threads'], {}), '(n_threads)\n', (6735, 6746), False, 'import multiprocessing\n'), ((7178, 7189), 'time.time', 'time.time', ([], {}), '()\n', (7187, 7189), False, 'import time\n'), ((1893, 1912), 'collections.deque', 'collections.deque', ([], {}), '()\n', (1910, 1912), False, 'import collections\n'), ((2587, 2620), 'brax.io.file.Exists', 'io_file.Exists', (['self._output_path'], {}), '(self._output_path)\n', (2601, 2620), True, 'from brax.io import file as io_file\n'), ((3200, 3216), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (3208, 3216), True, 'import numpy as np\n'), ((4546, 4566), 'copy.deepcopy', 'copy.deepcopy', (['value'], {}), '(value)\n', (4559, 4566), False, 'import copy\n'), ((5120, 5148), 'brax.io.file.MakeDirs', 'io_file.MakeDirs', (['output_dir'], {}), '(output_dir)\n', (5136, 5148), True, 'from brax.io import file as io_file\n'), ((5158, 5194), 'brax.io.file.File', 'io_file.File', (['self._output_path', '"""w"""'], {}), "(self._output_path, 'w')\n", (5170, 5194), True, 'from brax.io import file as io_file\n'), ((5216, 5229), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (5226, 5229), False, 'import csv\n'), ((5771, 5808), 'copy.deepcopy', 'copy.deepcopy', (['self._curr_values[key]'], {}), '(self._curr_values[key])\n', (5784, 5808), False, 'import copy\n'), ((3814, 3844), 'numpy.mean', 'np.mean', (['target_values'], {'axis': '(0)'}), '(target_values, axis=0)\n', (3821, 3844), True, 'import numpy as np\n'), ((5589, 5619), 'brax.io.file.File', 'io_file.File', (['output_path', '"""a"""'], {}), "(output_path, 'a')\n", (5601, 5619), True, 'from brax.io import file as io_file\n'), ((5643, 5656), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (5653, 5656), False, 'import csv\n'), ((6449, 6474), 'numpy.array', 'np.array', (['[csv_data[key]]'], {}), '([csv_data[key]])\n', (6457, 6474), True, 'import numpy as np\n'), ((8014, 8037), 'pprint.pformat', 'pprint.pformat', (['content'], {}), '(content)\n', (8028, 8037), False, 'import pprint\n'), ((8070, 8092), 'pprint.pformat', 'pprint.pformat', (['kwargs'], {}), '(kwargs)\n', (8084, 8092), False, 'import pprint\n'), ((3890, 3919), 'numpy.max', 'np.max', (['target_values'], {'axis': '(0)'}), '(target_values, axis=0)\n', (3896, 3919), True, 'import numpy as np\n'), ((3965, 3994), 'numpy.min', 'np.min', (['target_values'], {'axis': '(0)'}), '(target_values, axis=0)\n', (3971, 3994), True, 'import numpy as np\n')]
''' PointGroup train.py Written by <NAME> ''' import torch import torch.nn.functional as F import torch.optim as optim import time, sys, os, random from tensorboardX import SummaryWriter import numpy as np from util.config import cfg from util.log import logger import util.utils as utils device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu') def init(): # copy important files to backup backup_dir = os.path.join(cfg.exp_path, 'backup_files') os.makedirs(backup_dir, exist_ok=True) os.system('cp train.py {}'.format(backup_dir)) os.system('cp {} {}'.format(cfg.model_dir, backup_dir)) os.system('cp {} {}'.format(cfg.dataset_dir, backup_dir)) os.system('cp {} {}'.format(cfg.config, backup_dir)) # log the config logger.info(cfg) # summary writer global writer writer = SummaryWriter(cfg.exp_path) # random seed random.seed(cfg.manual_seed) np.random.seed(cfg.manual_seed) torch.manual_seed(cfg.manual_seed) torch.cuda.manual_seed_all(cfg.manual_seed) def train_epoch(train_loader, model, model_fn, optimizer, epoch): iter_time = utils.AverageMeter() data_time = utils.AverageMeter() am_dict = {} model.train() start_epoch = time.time() end = time.time() for i, batch in enumerate(train_loader): data_time.update(time.time() - end) torch.cuda.empty_cache() ##### adjust learning rate utils.step_learning_rate(optimizer, cfg.lr, epoch - 1, cfg.step_epoch, cfg.multiplier) ##### prepare input and forward loss, _, visual_dict, meter_dict = model_fn(batch, model, epoch) ##### meter_dict for k, v in meter_dict.items(): if k not in am_dict.keys(): am_dict[k] = utils.AverageMeter() am_dict[k].update(v[0], v[1]) ##### backward optimizer.zero_grad() loss.backward() optimizer.step() ##### time and print current_iter = (epoch - 1) * len(train_loader) + i + 1 max_iter = cfg.epochs * len(train_loader) remain_iter = max_iter - current_iter iter_time.update(time.time() - end) end = time.time() remain_time = remain_iter * iter_time.avg t_m, t_s = divmod(remain_time, 60) t_h, t_m = divmod(t_m, 60) remain_time = '{:02d}:{:02d}:{:02d}'.format(int(t_h), int(t_m), int(t_s)) sys.stdout.write( "epoch: {}/{} iter: {}/{} loss: {:.4f}({:.4f}) data_time: {:.2f}({:.2f}) iter_time: {:.2f}({:.2f}) remain_time: {remain_time}\n".format (epoch, cfg.epochs, i + 1, len(train_loader), am_dict['loss'].val, am_dict['loss'].avg, data_time.val, data_time.avg, iter_time.val, iter_time.avg, remain_time=remain_time)) if (i == len(train_loader) - 1): print() logger.info("epoch: {}/{}, train loss: {:.4f}, time: {}s".format(epoch, cfg.epochs, am_dict['loss'].avg, time.time() - start_epoch)) utils.checkpoint_save(model, cfg.exp_path, cfg.config.split('/')[-1][:-5], epoch, cfg.save_freq, use_cuda) for k in am_dict.keys(): if k in visual_dict.keys(): writer.add_scalar(k+'_train', am_dict[k].avg, epoch) def eval_epoch(val_loader, model, model_fn, epoch): logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>') am_dict = {} with torch.no_grad(): model.eval() start_epoch = time.time() for i, batch in enumerate(val_loader): ##### prepare input and forward loss, preds, visual_dict, meter_dict = model_fn(batch, model, epoch) ##### meter_dict for k, v in meter_dict.items(): if k not in am_dict.keys(): am_dict[k] = utils.AverageMeter() am_dict[k].update(v[0], v[1]) ##### print sys.stdout.write("\riter: {}/{} loss: {:.4f}({:.4f})".format(i + 1, len(val_loader), am_dict['loss'].val, am_dict['loss'].avg)) if (i == len(val_loader) - 1): print() logger.info("epoch: {}/{}, val loss: {:.4f}, time: {}s".format(epoch, cfg.epochs, am_dict['loss'].avg, time.time() - start_epoch)) for k in am_dict.keys(): if k in visual_dict.keys(): writer.add_scalar(k + '_eval', am_dict[k].avg, epoch) def force_cudnn_initialization(): s = 32 dev = torch.device('cuda') torch.nn.functional.conv2d(torch.zeros(s, s, s, s, device=dev), torch.zeros(s, s, s, s, device=dev)) if __name__ == '__main__': torch.set_num_threads(1) #force_cudnn_initialization() ##### init init() ##### get model version and data version exp_name = cfg.config.split('/')[-1][:-5] model_name = exp_name.split('_')[0] data_name = exp_name.split('_')[-1] ##### model logger.info('=> creating model ...') if model_name == 'pointgroup': from model.pointgroup.pointgroup import PointGroup as Network from model.pointgroup.pointgroup import model_fn_decorator else: print("Error: no model - " + model_name) exit(0) model = Network(cfg) #print('#classifier parameters: {}'.format(sum([x.nelement() for x in model.parameters()]))) use_cuda = torch.cuda.is_available() logger.info('cuda available: {}'.format(use_cuda)) assert use_cuda model = model.to('cpu') model = model.cuda() # logger.info(model) logger.info('#classifier parameters: {}'.format(sum([x.nelement() for x in model.parameters()]))) ##### optimizer if cfg.optim == 'Adam': optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.lr) elif cfg.optim == 'SGD': optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.lr, momentum=cfg.momentum, weight_decay=cfg.weight_decay) ##### model_fn (criterion) model_fn = model_fn_decorator() ##### dataset if cfg.dataset == 'scannetv2': if data_name == 'scannet': import data.scannetv2_inst dataset = data.scannetv2_inst.Dataset() dataset.trainLoader() dataset.valLoader() else: print("Error: no data loader - " + data_name) exit(0) ##### resume start_epoch = utils.checkpoint_restore(model, cfg.exp_path, cfg.config.split('/')[-1][:-5], use_cuda) # resume from the latest epoch, or specify the epoch to restore ##### train and val for epoch in range(start_epoch, cfg.epochs + 1): train_epoch(dataset.train_data_loader, model, model_fn, optimizer, epoch) if utils.is_multiple(epoch, cfg.save_freq) or utils.is_power2(epoch): eval_epoch(dataset.val_data_loader, model, model_fn, epoch)
[ "util.utils.is_power2", "torch.cuda.is_available", "util.utils.is_multiple", "model.pointgroup.pointgroup.PointGroup", "tensorboardX.SummaryWriter", "util.log.logger.info", "torch.set_num_threads", "numpy.random.seed", "util.config.cfg.config.split", "time.time", "torch.cuda.empty_cache", "tor...
[((431, 473), 'os.path.join', 'os.path.join', (['cfg.exp_path', '"""backup_files"""'], {}), "(cfg.exp_path, 'backup_files')\n", (443, 473), False, 'import time, sys, os, random\n'), ((478, 516), 'os.makedirs', 'os.makedirs', (['backup_dir'], {'exist_ok': '(True)'}), '(backup_dir, exist_ok=True)\n', (489, 516), False, 'import time, sys, os, random\n'), ((773, 789), 'util.log.logger.info', 'logger.info', (['cfg'], {}), '(cfg)\n', (784, 789), False, 'from util.log import logger\n'), ((843, 870), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['cfg.exp_path'], {}), '(cfg.exp_path)\n', (856, 870), False, 'from tensorboardX import SummaryWriter\n'), ((894, 922), 'random.seed', 'random.seed', (['cfg.manual_seed'], {}), '(cfg.manual_seed)\n', (905, 922), False, 'import time, sys, os, random\n'), ((927, 958), 'numpy.random.seed', 'np.random.seed', (['cfg.manual_seed'], {}), '(cfg.manual_seed)\n', (941, 958), True, 'import numpy as np\n'), ((963, 997), 'torch.manual_seed', 'torch.manual_seed', (['cfg.manual_seed'], {}), '(cfg.manual_seed)\n', (980, 997), False, 'import torch\n'), ((1002, 1045), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['cfg.manual_seed'], {}), '(cfg.manual_seed)\n', (1028, 1045), False, 'import torch\n'), ((1129, 1149), 'util.utils.AverageMeter', 'utils.AverageMeter', ([], {}), '()\n', (1147, 1149), True, 'import util.utils as utils\n'), ((1166, 1186), 'util.utils.AverageMeter', 'utils.AverageMeter', ([], {}), '()\n', (1184, 1186), True, 'import util.utils as utils\n'), ((1241, 1252), 'time.time', 'time.time', ([], {}), '()\n', (1250, 1252), False, 'import time, sys, os, random\n'), ((1263, 1274), 'time.time', 'time.time', ([], {}), '()\n', (1272, 1274), False, 'import time, sys, os, random\n'), ((3277, 3342), 'util.log.logger.info', 'logger.info', (['""">>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>"""'], {}), "('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')\n", (3288, 3342), False, 'from util.log import logger\n'), ((4389, 4409), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4401, 4409), False, 'import torch\n'), ((4547, 4571), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (4568, 4571), False, 'import torch\n'), ((4825, 4861), 'util.log.logger.info', 'logger.info', (['"""=> creating model ..."""'], {}), "('=> creating model ...')\n", (4836, 4861), False, 'from util.log import logger\n'), ((5123, 5135), 'model.pointgroup.pointgroup.PointGroup', 'Network', (['cfg'], {}), '(cfg)\n', (5130, 5135), True, 'from model.pointgroup.pointgroup import PointGroup as Network\n'), ((5249, 5274), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5272, 5274), False, 'import torch\n'), ((5903, 5923), 'model.pointgroup.pointgroup.model_fn_decorator', 'model_fn_decorator', ([], {}), '()\n', (5921, 5923), False, 'from model.pointgroup.pointgroup import model_fn_decorator\n'), ((326, 351), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (349, 351), False, 'import torch\n'), ((1372, 1396), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1394, 1396), False, 'import torch\n'), ((1441, 1532), 'util.utils.step_learning_rate', 'utils.step_learning_rate', (['optimizer', 'cfg.lr', '(epoch - 1)', 'cfg.step_epoch', 'cfg.multiplier'], {}), '(optimizer, cfg.lr, epoch - 1, cfg.step_epoch, cfg.\n multiplier)\n', (1465, 1532), True, 'import util.utils as utils\n'), ((2191, 2202), 'time.time', 'time.time', ([], {}), '()\n', (2200, 2202), False, 'import time, sys, os, random\n'), ((3370, 3385), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3383, 3385), False, 'import torch\n'), ((3430, 3441), 'time.time', 'time.time', ([], {}), '()\n', (3439, 3441), False, 'import time, sys, os, random\n'), ((4441, 4476), 'torch.zeros', 'torch.zeros', (['s', 's', 's', 's'], {'device': 'dev'}), '(s, s, s, s, device=dev)\n', (4452, 4476), False, 'import torch\n'), ((4478, 4513), 'torch.zeros', 'torch.zeros', (['s', 's', 's', 's'], {'device': 'dev'}), '(s, s, s, s, device=dev)\n', (4489, 4513), False, 'import torch\n'), ((4693, 4714), 'util.config.cfg.config.split', 'cfg.config.split', (['"""/"""'], {}), "('/')\n", (4709, 4714), False, 'from util.config import cfg\n'), ((6627, 6666), 'util.utils.is_multiple', 'utils.is_multiple', (['epoch', 'cfg.save_freq'], {}), '(epoch, cfg.save_freq)\n', (6644, 6666), True, 'import util.utils as utils\n'), ((6670, 6692), 'util.utils.is_power2', 'utils.is_power2', (['epoch'], {}), '(epoch)\n', (6685, 6692), True, 'import util.utils as utils\n'), ((1345, 1356), 'time.time', 'time.time', ([], {}), '()\n', (1354, 1356), False, 'import time, sys, os, random\n'), ((1777, 1797), 'util.utils.AverageMeter', 'utils.AverageMeter', ([], {}), '()\n', (1795, 1797), True, 'import util.utils as utils\n'), ((2158, 2169), 'time.time', 'time.time', ([], {}), '()\n', (2167, 2169), False, 'import time, sys, os, random\n'), ((2948, 2959), 'time.time', 'time.time', ([], {}), '()\n', (2957, 2959), False, 'import time, sys, os, random\n'), ((3024, 3045), 'util.config.cfg.config.split', 'cfg.config.split', (['"""/"""'], {}), "('/')\n", (3040, 3045), False, 'from util.config import cfg\n'), ((6344, 6365), 'util.config.cfg.config.split', 'cfg.config.split', (['"""/"""'], {}), "('/')\n", (6360, 6365), False, 'from util.config import cfg\n'), ((3766, 3786), 'util.utils.AverageMeter', 'utils.AverageMeter', ([], {}), '()\n', (3784, 3786), True, 'import util.utils as utils\n'), ((4161, 4172), 'time.time', 'time.time', ([], {}), '()\n', (4170, 4172), False, 'import time, sys, os, random\n')]
import ast import operator import pickle from copy import deepcopy from typing import List import cv2 import numpy as np import albumentations as A from torch.utils.data import Dataset from mlcomp.db.providers import ModelProvider from mlcomp.utils.config import parse_albu_short, Config from mlcomp.utils.torch import infer from mlcomp.worker.executors import Executor from mlcomp.contrib.transform.tta import TtaWrap _OP_MAP = { ast.Add: operator.add, ast.Sub: operator.sub, ast.Mult: operator.mul, ast.Invert: operator.neg, ast.Div: operator.truediv, ast.Pow: operator.pow } @Executor.register class Equation(Executor, ast.NodeVisitor): # noinspection PyTypeChecker def __init__( self, model_id: int = None, suffix: str = '', max_count=None, part_size: int = None, cache_names: List[str] = (), **kwargs ): self.__dict__.update(kwargs) self.model_id = model_id self.suffix = suffix self.max_count = max_count self.part_size = part_size self.part = None self.cache = dict() self.cache_names = cache_names self.model_name = kwargs.get('model_name') self.name = kwargs.get('name') if not self.model_name and self.model_id: self.model_name = ModelProvider(self.session).by_id( self.model_id).name if not self.name: self.name = self.model_name self.suffix = self._solve(suffix) def tta(self, x: Dataset, tfms=()): x = deepcopy(x) transforms = getattr(x, 'transforms') if not transforms: return x assert isinstance(transforms, A.Compose), \ 'only Albumentations transforms are supported' index = len(transforms.transforms) for i, t in enumerate(transforms.transforms): if isinstance(t, A.Normalize): index = i break tfms_albu = [] for i, t in enumerate(tfms): t = parse_albu_short(t, always_apply=True) tfms_albu.append(t) transforms.transforms.insert(index + i, t) return TtaWrap(x, tfms_albu) def adjust_part(self, part): pass def generate_parts(self, count): part_size = self.part_size or count res = [] for i in range(0, count, part_size): res.append((i, min(count, i + part_size))) return res def load(self, file: str = None): file = file or self.name + f'_{self.suffix}' file = f'data/pred/{file}' data = pickle.load(open(file, 'rb')) data = data[self.part[0]: self.part[1]] if isinstance(data, list): for row in data: if type(row).__module__ == np.__name__: continue if isinstance(row, list): for i, c in enumerate(row): row[i] = cv2.imdecode(c, cv2.IMREAD_GRAYSCALE) data = np.array(data) return data def torch( self, x: Dataset, file: str = None, batch_size: int = 1, activation=None, num_workers: int = 1 ): file = (file or self.name) + '.pth' file = f'models/{file}' return infer( x=x, file=file, batch_size=batch_size, activation=activation, num_workers=num_workers ) def visit_BinOp(self, node): left = self.visit(node.left) right = self.visit(node.right) return _OP_MAP[type(node.op)](left, right) def visit_Name(self, node): name = node.id attr = getattr(self, name, None) if attr: if isinstance(attr, str): res = self._solve(attr) if attr in self.cache_names: self.cache[attr] = res return res return attr return str(name) def visit_List(self, node): return self.get_value(node) def visit_Tuple(self, node): return self.get_value(node) def visit_Num(self, node): return node.n def visit_Str(self, node): return node.s def visit_Expr(self, node): return self.visit(node.value) def visit_pow(self, node): return node def visit_NameConstant(self, node): return node.value def get_value(self, node): t = type(node) if t == ast.NameConstant: return node.value if t == ast.Name: return self.visit_Name(node) if t == ast.Str: return node.s if t == ast.Name: return node.id if t == ast.Num: return node.n if t == ast.List: res = [] for e in node.elts: res.append(self.get_value(e)) return res if t == ast.Tuple: res = [] for e in node.elts: res.append(self.get_value(e)) return res raise Exception(f'Unknown type {t}') def visit_Call(self, node): name = node.func.id f = getattr(self, name) if not f: raise Exception(f'Equation class does not contain method = {name}') args = [self.get_value(a) for a in node.args] kwargs = {k.arg: self.get_value(k.value) for k in node.keywords} return f(*args, **kwargs) def _solve(self, equation): if equation is None: return None equation = str(equation) if equation in self.cache: return self.cache[equation] tree = ast.parse(equation) if len(tree.body) == 0: return None calc = self res = calc.visit(tree.body[0]) return res def solve(self, name, parts): equation = getattr(self, name) for part in parts: self.cache = {} self.part = part self.adjust_part(part) res = self._solve(equation) if name in self.cache_names: self.cache[name] = res yield res @classmethod def _from_config( cls, executor: dict, config: Config, additional_info: dict ): return cls(**executor) __all__ = ['Equation']
[ "mlcomp.utils.torch.infer", "numpy.array", "mlcomp.db.providers.ModelProvider", "cv2.imdecode", "mlcomp.contrib.transform.tta.TtaWrap", "copy.deepcopy", "ast.parse", "mlcomp.utils.config.parse_albu_short" ]
[((1599, 1610), 'copy.deepcopy', 'deepcopy', (['x'], {}), '(x)\n', (1607, 1610), False, 'from copy import deepcopy\n'), ((2221, 2242), 'mlcomp.contrib.transform.tta.TtaWrap', 'TtaWrap', (['x', 'tfms_albu'], {}), '(x, tfms_albu)\n', (2228, 2242), False, 'from mlcomp.contrib.transform.tta import TtaWrap\n'), ((3376, 3472), 'mlcomp.utils.torch.infer', 'infer', ([], {'x': 'x', 'file': 'file', 'batch_size': 'batch_size', 'activation': 'activation', 'num_workers': 'num_workers'}), '(x=x, file=file, batch_size=batch_size, activation=activation,\n num_workers=num_workers)\n', (3381, 3472), False, 'from mlcomp.utils.torch import infer\n'), ((5740, 5759), 'ast.parse', 'ast.parse', (['equation'], {}), '(equation)\n', (5749, 5759), False, 'import ast\n'), ((2080, 2118), 'mlcomp.utils.config.parse_albu_short', 'parse_albu_short', (['t'], {'always_apply': '(True)'}), '(t, always_apply=True)\n', (2096, 2118), False, 'from mlcomp.utils.config import parse_albu_short, Config\n'), ((3059, 3073), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3067, 3073), True, 'import numpy as np\n'), ((1366, 1393), 'mlcomp.db.providers.ModelProvider', 'ModelProvider', (['self.session'], {}), '(self.session)\n', (1379, 1393), False, 'from mlcomp.db.providers import ModelProvider\n'), ((3002, 3039), 'cv2.imdecode', 'cv2.imdecode', (['c', 'cv2.IMREAD_GRAYSCALE'], {}), '(c, cv2.IMREAD_GRAYSCALE)\n', (3014, 3039), False, 'import cv2\n')]
import bpy import sys import pickle import struct import numpy PEANO_PREFIX = "_Peano" WATER_MATERIAL_NAME = "Meta-water" WIREFRAME_MATERIAL = "WireframeMaterial" WIREFRAME_OFFSET = 0.001 class ReferenceArray: def __init__(self, cellsPerDimension): numpy.zeros([cellsPerDimension, cellsPerDimension], int) def deletePeanoObjects(): r"""Unlinks all objects starting with PEANO_PREFIX from Blender.""" for objekt in bpy.data.objects: if(objekt.name.startswith(PEANO_PREFIX)): bpy.context.scene.objects.unlink(objekt) bpy.data.objects.remove(objekt) for mesh in bpy.data.meshes: if(mesh.name.startswith(PEANO_PREFIX)): bpy.data.meshes.remove(mesh) def importFile(filename, timestepNumber): #Deselect all objects for obj in bpy.data.objects: obj.select = False #Create object objectName = PEANO_PREFIX + "Object" + str(timestepNumber) coords=[] faces=[] with open(filename, "rb") as f: numberOfVertices = pickle.load(f) coords = [0] * numberOfVertices for vertexIndex in range(numberOfVertices): vertexId = pickle.load(f) x = pickle.load(f) y = pickle.load(f) q = pickle.load(f) coords[vertexId] = (x, y, q/10.0) numberOfCells = pickle.load(f) for cellId in range(numberOfCells): rank = pickle.load(f) vertexIds = [] for i in range(4): vertexIds.append(pickle.load(f)) faces.append((vertexIds[0], vertexIds[1], vertexIds[2], vertexIds[3])) mesh = bpy.data.meshes.new(PEANO_PREFIX + "Mesh" + str(timestepNumber)) peanoObject = bpy.data.objects.new(objectName, mesh) peanoObject.location = (0, 0, 0) bpy.context.scene.objects.link(peanoObject) peanoObject.select = True bpy.context.scene.objects.active = peanoObject #Wireframe wireframeObject = bpy.data.objects.new(objectName + "Wireframe", mesh) wireframeObject.location = (0, 0, WIREFRAME_OFFSET) bpy.context.scene.objects.link(wireframeObject) wireframeObject.select = True #Mesh mesh.from_pydata(coords, [], faces) mesh.update(calc_edges=True) bpy.ops.object.mode_set(mode = 'EDIT') bpy.ops.mesh.select_all(action='SELECT') #bpy.ops.mesh.remove_doubles(limit=0.00001) mesh.calc_normals() mesh.auto_smooth_angle = 10.0 mesh.use_auto_smooth = True bpy.ops.mesh.faces_shade_smooth() bpy.ops.object.editmode_toggle() bpy.ops.object.mode_set(mode = 'EDIT') bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.faces_shade_smooth() bpy.ops.object.editmode_toggle() #Set materials #Water material wireframeObject.select = False bpy.context.scene.objects.active = peanoObject material = bpy.data.materials[WATER_MATERIAL_NAME] bpy.ops.object.material_slot_add() peanoObject.material_slots[len(obj.material_slots) - 1].link = "OBJECT" peanoObject.material_slots[len(obj.material_slots) - 1].material = material bpy.ops.object.mode_set(mode = "EDIT") bpy.ops.mesh.select_all(action="SELECT") bpy.ops.object.material_slot_assign() bpy.ops.object.editmode_toggle() #Wireframe material wireframeObject.select = True peanoObject.select = False bpy.context.scene.objects.active = wireframeObject material = bpy.data.materials[WIREFRAME_MATERIAL] wireframeObject.material_slots[len(obj.material_slots) - 1].link = "OBJECT" wireframeObject.material_slots[len(obj.material_slots) - 1].material = material bpy.ops.object.mode_set(mode = "EDIT") bpy.ops.mesh.select_all(action="SELECT") bpy.ops.object.material_slot_assign() bpy.ops.object.editmode_toggle() wireframeObject.draw_type = "WIRE" #Decimate modifier #py.context.scene.objects.active = peanoObject #bpy.ops.object.modifier_add(type="DECIMATE") #peanoObject.modifiers[len(peanoObject.modifiers)-1].show_viewport = False #bpy.context.scene.objects.active = wireframeObject #bpy.ops.object.modifier_add(type="DECIMATE") #wireframeObject.modifiers[len(peanoObject.modifiers)-1].show_viewport = False # #Set key frames # peanoObject.select = True # wireframeObject.select = True # #t-1 # bpy.context.scene.objects.active = peanoObject # bpy.ops.anim.change_frame(frame = timestepNumber-1) # peanoObject.scale = (0, 0, 0) # wireframeObject.scale = (0, 0, 0) # bpy.ops.anim.keyframe_insert_menu(type="Scaling") # peanoObject.location.z = 1.0e1 # wireframeObject.location.z = 1.0e1 # bpy.ops.anim.keyframe_insert_menu(type="Location") # #peanoObject.modifiers[len(peanoObject.modifiers)-1].ratio = 0.0 # #peanoObject.modifiers[len(peanoObject.modifiers)-1].keyframe_insert(data_path="ratio") # #t # bpy.ops.anim.change_frame(frame = timestepNumber) # peanoObject.scale = (1, 1, 1) # wireframeObject.scale = (1, 1, 1) # bpy.ops.anim.keyframe_insert_menu(type="Scaling") # peanoObject.location.z = 0 # wireframeObject.location.z = WIREFRAME_OFFSET # bpy.ops.anim.keyframe_insert_menu(type="Location") # #peanoObject.modifiers[len(peanoObject.modifiers)-1].ratio = 1.0 # #peanoObject.modifiers[len(peanoObject.modifiers)-1].keyframe_insert(data_path="ratio") # #t+1 # bpy.ops.anim.change_frame(frame = timestepNumber+1) # peanoObject.scale = (0, 0, 0) # wireframeObject.scale = (0, 0, 0) # bpy.ops.anim.keyframe_insert_menu(type="Scaling") # peanoObject.location.z = 1.0e1 # wireframeObject.location.z = 1.0e1 # bpy.ops.anim.keyframe_insert_menu(type="Location") # peanoObject.select = False # wireframeObject.select = False # bpy.ops.anim.change_frame(frame = 0) #def importFiles(sequenceName): # import glob # import os # import naturalSorting # import time # import sys # totalStartTime = time.clock() # print("Deleting old Peano objects...") # deletePeanoObjects() # print(" --done-- (%(time)fs)" % {"time": (time.clock() - totalStartTime)}) # timestepNumber = 0 # fileNames = glob.glob(sequenceName + "*.txt") # fileNames = naturalSorting.naturalSort(fileNames) # for filename in fileNames: # fileStartTime = time.clock() # print("Importing file " + filename) # importFile(filename, timestepNumber) # timestepNumber = timestepNumber + 1 # print(" --done-- (%(time)fs)" % {"time": (time.clock() - fileStartTime)}) # sys.stdout.flush() # print(" --Import completed-- (%(time)fs)" % {"time": (time.clock() - totalStartTime)}) def renderFiles(sequenceName, outputPath, numberOfIterations, cellsInReferenceArray): import glob import os import naturalSorting import time import sys totalStartTime = time.clock() timestepNumber = 0 #fileNames = glob.glob(sequenceName + "*.txt") #fileNames = naturalSorting.naturalSort(fileNames) #for filename in fileNames: for iteration in xrange(numberOfIterations): fileName = sequenceName.replace("__ITERATION__", str(iteration)) print("Deleting old Peano objects...") deletePeanoObjects() print(" --done-- (%(time)fs)" % {"time": (time.clock() - totalStartTime)}) fileStartTime = time.clock() print("Importing file " + filename) importFile(filename, timestepNumber) bpy.ops.anim.change_frame(frame = timestepNumber) bpy.ops.render.render() bpy.data.images['Render Result'].save_render(filepath=outputPath+"%(timestep)05d.jpg" % {"timestep": timestepNumber}) timestepNumber = timestepNumber + 1 print(" --done-- (%(time)fs)" % {"time": (time.clock() - fileStartTime)}) sys.stdout.flush() print(" --Import completed-- (%(time)fs)" % {"time": (time.clock() - totalStartTime)})
[ "bpy.ops.object.editmode_toggle", "bpy.context.scene.objects.link", "bpy.data.meshes.remove", "bpy.ops.mesh.select_all", "time.clock", "bpy.ops.object.mode_set", "bpy.ops.object.material_slot_assign", "bpy.context.scene.objects.unlink", "bpy.data.objects.new", "pickle.load", "bpy.ops.mesh.faces_...
[((1587, 1625), 'bpy.data.objects.new', 'bpy.data.objects.new', (['objectName', 'mesh'], {}), '(objectName, mesh)\n', (1607, 1625), False, 'import bpy\n'), ((1663, 1706), 'bpy.context.scene.objects.link', 'bpy.context.scene.objects.link', (['peanoObject'], {}), '(peanoObject)\n', (1693, 1706), False, 'import bpy\n'), ((1818, 1870), 'bpy.data.objects.new', 'bpy.data.objects.new', (["(objectName + 'Wireframe')", 'mesh'], {}), "(objectName + 'Wireframe', mesh)\n", (1838, 1870), False, 'import bpy\n'), ((1927, 1974), 'bpy.context.scene.objects.link', 'bpy.context.scene.objects.link', (['wireframeObject'], {}), '(wireframeObject)\n', (1957, 1974), False, 'import bpy\n'), ((2087, 2123), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""EDIT"""'}), "(mode='EDIT')\n", (2110, 2123), False, 'import bpy\n'), ((2128, 2168), 'bpy.ops.mesh.select_all', 'bpy.ops.mesh.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (2151, 2168), False, 'import bpy\n'), ((2301, 2334), 'bpy.ops.mesh.faces_shade_smooth', 'bpy.ops.mesh.faces_shade_smooth', ([], {}), '()\n', (2332, 2334), False, 'import bpy\n'), ((2337, 2369), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (2367, 2369), False, 'import bpy\n'), ((2372, 2408), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""EDIT"""'}), "(mode='EDIT')\n", (2395, 2408), False, 'import bpy\n'), ((2413, 2453), 'bpy.ops.mesh.select_all', 'bpy.ops.mesh.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (2436, 2453), False, 'import bpy\n'), ((2456, 2489), 'bpy.ops.mesh.faces_shade_smooth', 'bpy.ops.mesh.faces_shade_smooth', ([], {}), '()\n', (2487, 2489), False, 'import bpy\n'), ((2492, 2524), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (2522, 2524), False, 'import bpy\n'), ((2698, 2732), 'bpy.ops.object.material_slot_add', 'bpy.ops.object.material_slot_add', ([], {}), '()\n', (2730, 2732), False, 'import bpy\n'), ((2887, 2923), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""EDIT"""'}), "(mode='EDIT')\n", (2910, 2923), False, 'import bpy\n'), ((2928, 2968), 'bpy.ops.mesh.select_all', 'bpy.ops.mesh.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (2951, 2968), False, 'import bpy\n'), ((2971, 3008), 'bpy.ops.object.material_slot_assign', 'bpy.ops.object.material_slot_assign', ([], {}), '()\n', (3006, 3008), False, 'import bpy\n'), ((3011, 3043), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (3041, 3043), False, 'import bpy\n'), ((3394, 3430), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""EDIT"""'}), "(mode='EDIT')\n", (3417, 3430), False, 'import bpy\n'), ((3435, 3475), 'bpy.ops.mesh.select_all', 'bpy.ops.mesh.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (3458, 3475), False, 'import bpy\n'), ((3478, 3515), 'bpy.ops.object.material_slot_assign', 'bpy.ops.object.material_slot_assign', ([], {}), '()\n', (3513, 3515), False, 'import bpy\n'), ((3518, 3550), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (3548, 3550), False, 'import bpy\n'), ((6462, 6474), 'time.clock', 'time.clock', ([], {}), '()\n', (6472, 6474), False, 'import time\n'), ((257, 313), 'numpy.zeros', 'numpy.zeros', (['[cellsPerDimension, cellsPerDimension]', 'int'], {}), '([cellsPerDimension, cellsPerDimension], int)\n', (268, 313), False, 'import numpy\n'), ((980, 994), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (991, 994), False, 'import pickle\n'), ((1247, 1261), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1258, 1261), False, 'import pickle\n'), ((6918, 6930), 'time.clock', 'time.clock', ([], {}), '()\n', (6928, 6930), False, 'import time\n'), ((7016, 7063), 'bpy.ops.anim.change_frame', 'bpy.ops.anim.change_frame', ([], {'frame': 'timestepNumber'}), '(frame=timestepNumber)\n', (7041, 7063), False, 'import bpy\n'), ((7070, 7093), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {}), '()\n', (7091, 7093), False, 'import bpy\n'), ((7341, 7359), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7357, 7359), False, 'import sys\n'), ((503, 543), 'bpy.context.scene.objects.unlink', 'bpy.context.scene.objects.unlink', (['objekt'], {}), '(objekt)\n', (535, 543), False, 'import bpy\n'), ((550, 581), 'bpy.data.objects.remove', 'bpy.data.objects.remove', (['objekt'], {}), '(objekt)\n', (573, 581), False, 'import bpy\n'), ((663, 691), 'bpy.data.meshes.remove', 'bpy.data.meshes.remove', (['mesh'], {}), '(mesh)\n', (685, 691), False, 'import bpy\n'), ((1096, 1110), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1107, 1110), False, 'import pickle\n'), ((1121, 1135), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1132, 1135), False, 'import pickle\n'), ((1146, 1160), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1157, 1160), False, 'import pickle\n'), ((1171, 1185), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1182, 1185), False, 'import pickle\n'), ((1315, 1329), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1326, 1329), False, 'import pickle\n'), ((1401, 1415), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1412, 1415), False, 'import pickle\n'), ((7419, 7431), 'time.clock', 'time.clock', ([], {}), '()\n', (7429, 7431), False, 'import time\n'), ((6862, 6874), 'time.clock', 'time.clock', ([], {}), '()\n', (6872, 6874), False, 'import time\n'), ((7305, 7317), 'time.clock', 'time.clock', ([], {}), '()\n', (7315, 7317), False, 'import time\n')]
# Copyright (c) 2017 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """ Meteogram ========= Plots time series data as a meteogram. """ import datetime as dt import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from metpy.calc import dewpoint_rh from metpy.cbook import get_test_data from metpy.plots import add_metpy_logo from metpy.units import units def calc_mslp(t, p, h): return p * (1 - (0.0065 * h) / (t + 0.0065 * h + 273.15)) ** (-5.257) # Make meteogram plot class Meteogram(object): """ Plot a time series of meteorological data from a particular station as a meteogram with standard variables to visualize, including thermodynamic, kinematic, and pressure. The functions below control the plotting of each variable. TO DO: Make the subplot creation dynamic so the number of rows is not static as it is currently. """ def __init__(self, fig, dates, probeid, time=None, axis=0): """ Required input: fig: figure object dates: array of dates corresponding to the data probeid: ID of the station Optional Input: time: Time the data is to be plotted axis: number that controls the new axis to be plotted (FOR FUTURE) """ if not time: time = dt.datetime.utcnow() self.start = dates[0] self.fig = fig self.end = dates[-1] self.axis_num = 0 self.dates = mpl.dates.date2num(dates) self.time = time.strftime('%Y-%m-%d %H:%M UTC') self.title = 'Latest Ob Time: {0}\nProbe ID: {1}'.format(self.time, probeid) def plot_winds(self, ws, wd, wsmax, plot_range=None): """ Required input: ws: Wind speeds (knots) wd: Wind direction (degrees) wsmax: Wind gust (knots) Optional Input: plot_range: Data range for making figure (list of (min,max,step)) """ # PLOT WIND SPEED AND WIND DIRECTION self.ax1 = fig.add_subplot(4, 1, 1) ln1 = self.ax1.plot(self.dates, ws, label='Wind Speed') self.ax1.fill_between(self.dates, ws, 0) self.ax1.set_xlim(self.start, self.end) if not plot_range: plot_range = [0, 20, 1] self.ax1.set_ylabel('Wind Speed (knots)', multialignment='center') self.ax1.set_ylim(plot_range[0], plot_range[1], plot_range[2]) self.ax1.grid(b=True, which='major', axis='y', color='k', linestyle='--', linewidth=0.5) ln2 = self.ax1.plot(self.dates, wsmax, '.r', label='3-sec Wind Speed Max') ax7 = self.ax1.twinx() ln3 = ax7.plot(self.dates, wd, '.k', linewidth=0.5, label='Wind Direction') ax7.set_ylabel('Wind\nDirection\n(degrees)', multialignment='center') ax7.set_ylim(0, 360) ax7.set_yticks(np.arange(45, 405, 90), ['NE', 'SE', 'SW', 'NW']) lns = ln1 + ln2 + ln3 labs = [l.get_label() for l in lns] ax7.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC')) ax7.legend(lns, labs, loc='upper center', bbox_to_anchor=(0.5, 1.2), ncol=3, prop={'size': 12}) def plot_thermo(self, t, td, plot_range=None): """ Required input: T: Temperature (deg F) TD: Dewpoint (deg F) Optional Input: plot_range: Data range for making figure (list of (min,max,step)) """ # PLOT TEMPERATURE AND DEWPOINT if not plot_range: plot_range = [10, 90, 2] self.ax2 = fig.add_subplot(4, 1, 2, sharex=self.ax1) ln4 = self.ax2.plot(self.dates, t, 'r-', label='Temperature') self.ax2.fill_between(self.dates, t, td, color='r') self.ax2.set_ylabel('Temperature\n(F)', multialignment='center') self.ax2.grid(b=True, which='major', axis='y', color='k', linestyle='--', linewidth=0.5) self.ax2.set_ylim(plot_range[0], plot_range[1], plot_range[2]) ln5 = self.ax2.plot(self.dates, td, 'g-', label='Dewpoint') self.ax2.fill_between(self.dates, td, self.ax2.get_ylim()[0], color='g') ax_twin = self.ax2.twinx() ax_twin.set_ylim(plot_range[0], plot_range[1], plot_range[2]) lns = ln4 + ln5 labs = [l.get_label() for l in lns] ax_twin.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC')) self.ax2.legend(lns, labs, loc='upper center', bbox_to_anchor=(0.5, 1.2), ncol=2, prop={'size': 12}) def plot_rh(self, rh, plot_range=None): """ Required input: RH: Relative humidity (%) Optional Input: plot_range: Data range for making figure (list of (min,max,step)) """ # PLOT RELATIVE HUMIDITY if not plot_range: plot_range = [0, 100, 4] self.ax3 = fig.add_subplot(4, 1, 3, sharex=self.ax1) self.ax3.plot(self.dates, rh, 'g-', label='Relative Humidity') self.ax3.legend(loc='upper center', bbox_to_anchor=(0.5, 1.22), prop={'size': 12}) self.ax3.grid(b=True, which='major', axis='y', color='k', linestyle='--', linewidth=0.5) self.ax3.set_ylim(plot_range[0], plot_range[1], plot_range[2]) self.ax3.fill_between(self.dates, rh, self.ax3.get_ylim()[0], color='g') self.ax3.set_ylabel('Relative Humidity\n(%)', multialignment='center') self.ax3.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC')) axtwin = self.ax3.twinx() axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2]) def plot_pressure(self, p, plot_range=None): """ Required input: P: Mean Sea Level Pressure (hPa) Optional Input: plot_range: Data range for making figure (list of (min,max,step)) """ # PLOT PRESSURE if not plot_range: plot_range = [970, 1030, 2] self.ax4 = fig.add_subplot(4, 1, 4, sharex=self.ax1) self.ax4.plot(self.dates, p, 'm', label='Mean Sea Level Pressure') self.ax4.set_ylabel('Mean Sea\nLevel Pressure\n(mb)', multialignment='center') self.ax4.set_ylim(plot_range[0], plot_range[1], plot_range[2]) axtwin = self.ax4.twinx() axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2]) axtwin.fill_between(self.dates, p, axtwin.get_ylim()[0], color='m') axtwin.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC')) self.ax4.legend(loc='upper center', bbox_to_anchor=(0.5, 1.2), prop={'size': 12}) self.ax4.grid(b=True, which='major', axis='y', color='k', linestyle='--', linewidth=0.5) # OTHER OPTIONAL AXES TO PLOT # plot_irradiance # plot_precipitation # set the starttime and endtime for plotting, 24 hour range endtime = dt.datetime(2016, 3, 31, 22, 0, 0, 0) starttime = endtime - dt.timedelta(hours=24) # Height of the station to calculate MSLP hgt_example = 292. # Parse dates from .csv file, knowing their format as a string and convert to datetime def parse_date(date): return dt.datetime.strptime(date.decode('ascii'), '%Y-%m-%d %H:%M:%S') testdata = np.genfromtxt(get_test_data('timeseries.csv', False), names=True, dtype=None, usecols=list(range(1, 8)), converters={'DATE': parse_date}, delimiter=',') # Temporary variables for ease temp = testdata['T'] pres = testdata['P'] rh = testdata['RH'] ws = testdata['WS'] wsmax = testdata['WSMAX'] wd = testdata['WD'] date = testdata['DATE'] # ID For Plotting on Meteogram probe_id = '0102A' data = {'wind_speed': (np.array(ws) * units('m/s')).to(units('knots')), 'wind_speed_max': (np.array(wsmax) * units('m/s')).to(units('knots')), 'wind_direction': np.array(wd) * units('degrees'), 'dewpoint': dewpoint_rh((np.array(temp) * units('degC')).to(units('K')), np.array(rh) / 100.).to(units('degF')), 'air_temperature': (np.array(temp) * units('degC')).to(units('degF')), 'mean_slp': calc_mslp(np.array(temp), np.array(pres), hgt_example) * units('hPa'), 'relative_humidity': np.array(rh), 'times': np.array(date)} fig = plt.figure(figsize=(20, 16)) add_metpy_logo(fig, 250, 180) meteogram = Meteogram(fig, data['times'], probe_id) meteogram.plot_winds(data['wind_speed'], data['wind_direction'], data['wind_speed_max']) meteogram.plot_thermo(data['air_temperature'], data['dewpoint']) meteogram.plot_rh(data['relative_humidity']) meteogram.plot_pressure(data['mean_slp']) fig.subplots_adjust(hspace=0.5) plt.show()
[ "datetime.datetime", "matplotlib.dates.date2num", "metpy.plots.add_metpy_logo", "datetime.datetime.utcnow", "matplotlib.dates.DateFormatter", "metpy.cbook.get_test_data", "numpy.array", "matplotlib.pyplot.figure", "metpy.units.units", "datetime.timedelta", "numpy.arange", "matplotlib.pyplot.sh...
[((6981, 7018), 'datetime.datetime', 'dt.datetime', (['(2016)', '(3)', '(31)', '(22)', '(0)', '(0)', '(0)'], {}), '(2016, 3, 31, 22, 0, 0, 0)\n', (6992, 7018), True, 'import datetime as dt\n'), ((8372, 8400), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 16)'}), '(figsize=(20, 16))\n', (8382, 8400), True, 'import matplotlib.pyplot as plt\n'), ((8401, 8430), 'metpy.plots.add_metpy_logo', 'add_metpy_logo', (['fig', '(250)', '(180)'], {}), '(fig, 250, 180)\n', (8415, 8430), False, 'from metpy.plots import add_metpy_logo\n'), ((8756, 8766), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8764, 8766), True, 'import matplotlib.pyplot as plt\n'), ((7041, 7063), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': '(24)'}), '(hours=24)\n', (7053, 7063), True, 'import datetime as dt\n'), ((7339, 7377), 'metpy.cbook.get_test_data', 'get_test_data', (['"""timeseries.csv"""', '(False)'], {}), "('timeseries.csv', False)\n", (7352, 7377), False, 'from metpy.cbook import get_test_data\n'), ((8326, 8338), 'numpy.array', 'np.array', (['rh'], {}), '(rh)\n', (8334, 8338), True, 'import numpy as np\n'), ((8349, 8363), 'numpy.array', 'np.array', (['date'], {}), '(date)\n', (8357, 8363), True, 'import numpy as np\n'), ((1542, 1567), 'matplotlib.dates.date2num', 'mpl.dates.date2num', (['dates'], {}), '(dates)\n', (1560, 1567), True, 'import matplotlib as mpl\n'), ((7819, 7833), 'metpy.units.units', 'units', (['"""knots"""'], {}), "('knots')\n", (7824, 7833), False, 'from metpy.units import units\n'), ((7898, 7912), 'metpy.units.units', 'units', (['"""knots"""'], {}), "('knots')\n", (7903, 7912), False, 'from metpy.units import units\n'), ((7941, 7953), 'numpy.array', 'np.array', (['wd'], {}), '(wd)\n', (7949, 7953), True, 'import numpy as np\n'), ((7956, 7972), 'metpy.units.units', 'units', (['"""degrees"""'], {}), "('degrees')\n", (7961, 7972), False, 'from metpy.units import units\n'), ((8111, 8124), 'metpy.units.units', 'units', (['"""degF"""'], {}), "('degF')\n", (8116, 8124), False, 'from metpy.units import units\n'), ((8190, 8203), 'metpy.units.units', 'units', (['"""degF"""'], {}), "('degF')\n", (8195, 8203), False, 'from metpy.units import units\n'), ((8283, 8295), 'metpy.units.units', 'units', (['"""hPa"""'], {}), "('hPa')\n", (8288, 8295), False, 'from metpy.units import units\n'), ((1392, 1412), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (1410, 1412), True, 'import datetime as dt\n'), ((2939, 2961), 'numpy.arange', 'np.arange', (['(45)', '(405)', '(90)'], {}), '(45, 405, 90)\n', (2948, 2961), True, 'import numpy as np\n'), ((3101, 3137), 'matplotlib.dates.DateFormatter', 'mpl.dates.DateFormatter', (['"""%d/%H UTC"""'], {}), "('%d/%H UTC')\n", (3124, 3137), True, 'import matplotlib as mpl\n'), ((4457, 4493), 'matplotlib.dates.DateFormatter', 'mpl.dates.DateFormatter', (['"""%d/%H UTC"""'], {}), "('%d/%H UTC')\n", (4480, 4493), True, 'import matplotlib as mpl\n'), ((5576, 5612), 'matplotlib.dates.DateFormatter', 'mpl.dates.DateFormatter', (['"""%d/%H UTC"""'], {}), "('%d/%H UTC')\n", (5599, 5612), True, 'import matplotlib as mpl\n'), ((6568, 6604), 'matplotlib.dates.DateFormatter', 'mpl.dates.DateFormatter', (['"""%d/%H UTC"""'], {}), "('%d/%H UTC')\n", (6591, 6604), True, 'import matplotlib as mpl\n'), ((8236, 8250), 'numpy.array', 'np.array', (['temp'], {}), '(temp)\n', (8244, 8250), True, 'import numpy as np\n'), ((8252, 8266), 'numpy.array', 'np.array', (['pres'], {}), '(pres)\n', (8260, 8266), True, 'import numpy as np\n'), ((7787, 7799), 'numpy.array', 'np.array', (['ws'], {}), '(ws)\n', (7795, 7799), True, 'import numpy as np\n'), ((7802, 7814), 'metpy.units.units', 'units', (['"""m/s"""'], {}), "('m/s')\n", (7807, 7814), False, 'from metpy.units import units\n'), ((7863, 7878), 'numpy.array', 'np.array', (['wsmax'], {}), '(wsmax)\n', (7871, 7878), True, 'import numpy as np\n'), ((7881, 7893), 'metpy.units.units', 'units', (['"""m/s"""'], {}), "('m/s')\n", (7886, 7893), False, 'from metpy.units import units\n'), ((8155, 8169), 'numpy.array', 'np.array', (['temp'], {}), '(temp)\n', (8163, 8169), True, 'import numpy as np\n'), ((8172, 8185), 'metpy.units.units', 'units', (['"""degC"""'], {}), "('degC')\n", (8177, 8185), False, 'from metpy.units import units\n'), ((8042, 8052), 'metpy.units.units', 'units', (['"""K"""'], {}), "('K')\n", (8047, 8052), False, 'from metpy.units import units\n'), ((8087, 8099), 'numpy.array', 'np.array', (['rh'], {}), '(rh)\n', (8095, 8099), True, 'import numpy as np\n'), ((8007, 8021), 'numpy.array', 'np.array', (['temp'], {}), '(temp)\n', (8015, 8021), True, 'import numpy as np\n'), ((8024, 8037), 'metpy.units.units', 'units', (['"""degC"""'], {}), "('degC')\n", (8029, 8037), False, 'from metpy.units import units\n')]
""" Base Container Object """ # global import re import termcolor import numpy as _np import json as _json import h5py as _h5py import pickle as _pickle import random as _random from operator import lt as _lt from operator import le as _le from operator import eq as _eq from operator import ne as _ne from operator import gt as _gt from operator import ge as _ge from operator import mul as _mul from operator import pow as _pow from operator import not_ as _not from functools import reduce as _reduce from typing import Union, Iterable, Dict from operator import truediv as _truediv from operator import floordiv as _floordiv # local import ivy as _ivy def _is_jsonable(x): try: _json.dumps(x) return True except (TypeError, OverflowError): return False def _repr(x): try: return x.__repr__() except TypeError: return str(x) # noinspection PyMissingConstructor class Container(dict): def __init__(self, dict_in=None, queues=None, queue_load_sizes=None, container_combine_method='list_join', queue_timeout=None, print_limit=10, print_indent=4, print_line_spacing=0, ivyh=None, keyword_color_dict=None, rebuild_child_containers=False, types_to_iteratively_nest=None, **kwargs): """ Initialize container object from input dict representation. :param dict_in: the dictionary the container should wrap around. Default is None. :type dict_in: dict, optional :param queues: Sequence of multiprocessing queues, each of which returns containers. This enables the current container to be passed around asynchronously while waiting for data. Default is None. :type queues: sequence of multiprocessing queues, optional :param queue_load_sizes: Size of leading dimension of the containers returned by each queue. Default is None. :type queue_load_sizes: sequence of ints, optional :param container_combine_method: The method to use for combining containers arriving from different queues. Default is ivy.Container.list_join :type container_combine_method: str, optional :param queue_timeout: The timeout when waiting for containers to arrive from the queues. Default is global. :type queue_timeout: float, optional :param print_limit: The total array size limit when printing the container. Default is 10. :type print_limit: int, optional :param print_indent: The number of whitespaces to use for indenting when printing the container. Default is 4. :type print_indent: int, optional :param print_line_spacing: The number of extra newlines to use between keys when printing the container. Default is 0. :type print_line_spacing: int, optional :param ivyh: Handle to ivy module to use for the calculations. Default is None, which results in the global ivy. :type ivyh: handle to ivy module, optional :param keyword_color_dict: A dict mapping keywords to their termcolor color codes for printing the container. :type keyword_color_dict: dict, optional :param rebuild_child_containers: Whether to rebuild container found in dict_in with these constructor params. Default is False, in which case the original container are kept as are. :type rebuild_child_containers: bool, optional :param types_to_iteratively_nest: The data types to nest iteratively in the dict structure, each type must be iterable. Default is None. :type types_to_iteratively_nest: seq of iterable types :param kwargs: keyword arguments for dict creation. Default is None. :type kwargs: keyword arguments. """ self._queues = queues self._print_limit = print_limit self._print_indent = print_indent self._print_line_spacing = print_line_spacing self._container_combine_method = container_combine_method self._types_to_iteratively_nest = _ivy.default(lambda: tuple(types_to_iteratively_nest), (), True) if _ivy.exists(self._queues): if isinstance(self._container_combine_method, str): self._container_combine_method =\ {'list_join': self.list_join, 'concat': lambda conts: self.concat(conts, 0)}[self._container_combine_method] self._loaded_containers_from_queues = dict() self._queue_load_sizes_cum = _np.cumsum(queue_load_sizes) self._queue_timeout = _ivy.default(queue_timeout, _ivy.queue_timeout()) self._local_ivy = ivyh self._keyword_color_dict = _ivy.default(keyword_color_dict, {}) self._rebuild_child_containers = rebuild_child_containers self._config = dict( print_limit=print_limit, print_indent=print_indent, print_line_spacing=print_line_spacing, ivyh=ivyh, keyword_color_dict=keyword_color_dict, rebuild_child_containers=rebuild_child_containers, types_to_iteratively_nest=types_to_iteratively_nest) if dict_in is None: if kwargs: dict_in = dict(**kwargs) else: dict_in = dict() elif kwargs: raise Exception('dict_in and **kwargs cannot both be specified for ivy.Container constructor,' 'please specify one or the other, not both.') if isinstance(dict_in, dict): dict_in = dict_in elif isinstance(dict_in, tuple(self._types_to_iteratively_nest)): dict_in = dict(zip(['it_{}'.format(str(i).zfill(len(str(len(dict_in))))) for i in range(len(dict_in))], dict_in)) else: raise Exception('invalid input {}'.format(dict_in)) for key, value in sorted(dict_in.items()): d = isinstance(value, tuple(self._types_to_iteratively_nest)) if (isinstance(value, dict) and (not isinstance(value, Container) or rebuild_child_containers)) or \ isinstance(value, tuple(self._types_to_iteratively_nest)): self[key] = Container(value, **self._config) else: self[key] = value # Class Methods # # --------------# @staticmethod def list_join(containers, config=None): """ Join containers of lists together along the specified dimension. :param containers: containers to list join :type containers: sequence of Container objects :param config: The configuration for the containers. Default is the same as container0. :type config: dict, optional :return: List joined containers, with each entry being a list of arrays """ container0 = containers[0] if not _ivy.exists(config): config = container0.config if isinstance(container0, Container) else {} if isinstance(container0, Container): return_dict = dict() for key in container0.keys(): new_list = list() for container in containers: new_list.append(container[key]) return_dict[key] = Container.list_join(new_list, config) return Container(return_dict, **config) else: return [item for sublist in containers for item in sublist] @staticmethod def list_stack(containers, dim, config=None): """ List stack containers together along the specified dimension. :param containers: containers to list stack :type containers: sequence of Container objects :param dim: dimension along which to list stack :type dim: int :param config: The configuration for the containers. Default is the same as container0. :type config: dict, optional :return: Stacked containers, with each entry being a list of arrays """ container0 = containers[0] if not _ivy.exists(config): config = container0.config if isinstance(container0, Container) else {} if isinstance(container0, Container): return_dict = dict() for key in container0.keys(): return_dict[key] = Container.list_stack([container[key] for container in containers], dim, config) return Container(return_dict, **config) else: return containers @staticmethod def _concat_unify(containers, dev_str, axis=0): return Container.concat([cont.to_dev(dev_str) for cont in containers.values()], axis) @staticmethod def _sum_unify(containers, dev_str, _=None, _1=None): return sum([cont.to_dev(dev_str) for cont in containers.values()]) @staticmethod def _mean_unify(containers, dev_str, _=None, _1=None): return Container._sum_unify(containers, dev_str) / len(containers) @staticmethod def unify(containers, dev_str, mode, axis=0): """ Unify a list of containers, on arbitrary devices, to a single container on the specified device. :param containers: containers to unify :type containers: sequence of Container objects :param dev_str: The device to unify the containers to. :type dev_str: str :param mode: The mode by which to unify, must be one of [ concat | mean | sum ] :type mode: str :param axis: The axis along which to concattenate the container, if concat mode is set. Default is 0. :type axis: int, optional :return: Unified container """ return {'concat': Container._concat_unify, 'sum': Container._sum_unify, 'mean': Container._mean_unify}[mode](containers, dev_str, axis) @staticmethod def concat(containers, dim, config=None): """ Concatenate containers together along the specified dimension. :param containers: containers to concatenate :type containers: sequence of Container objects :param dim: dimension along which to concatenate :type dim: int :param config: The configuration for the containers. Default is the same as container0. :type config: dict, optional :return: Concatenated containers """ container0 = containers[0] if not _ivy.exists(config): config = container0.config if isinstance(container0, Container) else {} if isinstance(container0, Container): return_dict = dict() for key in container0.keys(): return_dict[key] = Container.concat([container[key] for container in containers], dim, config) return Container(return_dict, **config) else: # noinspection PyProtectedMember ivyh = _ivy.default(config['ivyh'], _ivy) # noinspection PyBroadException try: if len(containers[0].shape) == 0: return ivyh.concatenate([ivyh.reshape(item, [1] * (dim + 1)) for item in containers], dim) else: return ivyh.concatenate(containers, dim) except Exception as e: raise Exception(str(e) + '\nContainer concat operation only valid for containers of arrays') @staticmethod def stack(containers, dim, config=None): """ Stack containers together along the specified dimension. :param containers: containers to stack :type containers: sequence of Container objects :param dim: dimension along which to stack :type dim: int :param config: The configuration for the containers. Default is the same as container0. :type config: dict, optional :return: Stacked containers """ container0 = containers[0] if not _ivy.exists(config): config = container0.config if isinstance(container0, Container) else {} if isinstance(container0, Container): return_dict = dict() for key in container0.keys(): return_dict[key] = Container.stack([container[key] for container in containers], dim, config) return Container(return_dict, **config) else: # noinspection PyProtectedMember ivyh = _ivy.default(config['ivyh'], _ivy) # noinspection PyBroadException try: if len(containers[0].shape) == 0: return ivyh.stack([ivyh.reshape(item, [1] * (dim + 1)) for item in containers], dim, config) else: return ivyh.stack(containers, dim) except Exception as e: raise Exception(str(e) + '\nContainer stack operation only valid for containers of arrays') @staticmethod def combine(*containers, config=None): """ Combine keys and values in a sequence of containers, with priority given to the right-most container in the case of duplicates. :param containers: containers to compare :type containers: sequence of Container objects :param config: The configuration for the containers. Default is the same as container_rightmost. :type config: dict, optional :return: Combined containers """ # if inputs are not dicts, then simply return the right-most value container_rightmost = containers[-1] if not isinstance(container_rightmost, dict): return container_rightmost if not _ivy.exists(config): config = container_rightmost.config if isinstance(container_rightmost, Container) else {} # return if len==1 if len(containers) == 1: return container_rightmost # otherwise, check that the keys are aligned between each container, and apply this method recursively return_dict = dict() all_Keys = set([item for sublist in [list(cont.keys()) for cont in containers] for item in sublist]) for key in all_Keys: keys_present = [key in cont for cont in containers] return_dict[key] =\ _ivy.Container.combine(*[cont[key] for cont, kp in zip(containers, keys_present) if kp], config=config) return _ivy.Container(return_dict, **config) @staticmethod def diff(*containers, mode='all', diff_keys='diff', detect_key_diffs=True, config=None): """ Compare keys and values in a sequence of containers, returning the single shared values where they are the same, and new nested sub-dicts with all values where they are different. :param containers: containers to compare :type containers: sequence of Container objects :param mode: The mode of the diff operation, returning either all keys and values, only those that are consist across the containers, or only the differences. Default is all. :type mode: str, optional :param diff_keys: The key/keys to add to the returned container when differences are found. Default is "diff". :type diff_keys: str or list of strs, optional :param detect_key_diffs: Whether to treat different keys as detected differences. If not, the keys among the input containers are simply combined without flagging differences. Default is True. :type detect_key_diffs: bool, optional :param config: The configuration for the containers. Default is the same as container0. :type config: dict, optional :return: Compared containers """ if mode not in ['all', 'same_only', 'diff_only']: raise Exception('mode must be one of [ "all" | "same_only" | "diff_only" ], but found {}'.format(mode)) # if inputs are not dicts, then compare their values to determine the diff dict num_containers = len(containers) container0 = containers[0] if not _ivy.exists(config): config = container0.config if isinstance(container0, Container) else {} if not isinstance(container0, dict): equal_mat = _ivy.equal(*containers, equality_matrix=True) if _ivy.reduce_min(_ivy.cast(equal_mat, 'int32')) == 1: if mode == 'diff_only': return _ivy.Container(**config) return container0 elif mode == 'same_only': return _ivy.Container(**config) else: cont_range = range(num_containers) diff_dict = dict() cont_dict = dict(zip(cont_range, containers)) idxs_added = list() for idx in cont_range: if idx not in idxs_added: idxs_to_add = _ivy.indices_where(equal_mat[idx]) idxs_to_add_list = sorted(_ivy.to_numpy(idxs_to_add).reshape(-1).tolist()) if isinstance(diff_keys, str): key = diff_keys + '_' + str(idxs_to_add_list)[1:-1] elif isinstance(diff_keys, (list, tuple)): key = diff_keys[idx] else: raise Exception('diff_keys must be either a string or list of strings,' 'but found {} of type {}'.format(diff_keys, type(diff_keys))) diff_dict[key] = cont_dict[idx] idxs_added += idxs_to_add_list return _ivy.Container(diff_dict, **config) # otherwise, check that the keys are aligned between each container, and apply this method recursively return_dict = dict() all_Keys = set([item for sublist in [list(cont.keys()) for cont in containers] for item in sublist]) for key in all_Keys: keys_present = [key in cont for cont in containers] all_Keys_present = sum(keys_present) == num_containers if all_Keys_present: res = _ivy.Container.diff(*[cont[key] for cont in containers], mode=mode, diff_keys=diff_keys, detect_key_diffs=detect_key_diffs, config=config) if not isinstance(res, dict) or res: return_dict[key] = res continue elif sum(keys_present) == 1 and not detect_key_diffs: return_dict[key] = containers[keys_present.index(True)][key] continue diff_dict = dict() for i, (key_present, cont) in enumerate(zip(keys_present, containers)): if detect_key_diffs: if key_present and mode != 'same_only': if isinstance(diff_keys, str): diff_dict[diff_keys + '_' + str(i)] = cont[key] elif isinstance(diff_keys, (list, tuple)): diff_dict[diff_keys[i]] = cont[key] else: raise Exception('diff_keys must be either a string or list of strings,' 'but found {} of type {}'.format(diff_keys, type(diff_keys))) if diff_dict: return_dict[key] = diff_dict return _ivy.Container(return_dict, **config) @staticmethod def multi_map(func, containers, key_chains=None, to_apply=True, prune_unapplied=False, key_chain='', config=None): """ Apply function to all array values from a collection of identically structured containers. :param func: Function to apply to each container entry. :type func: python function :param containers: containers to map. :type containers: sequence of Container objects :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied, otherwise the leftmost container value is used. Default is False. :type prune_unapplied: bool, optional :param key_chain: Chain of keys for this dict entry :type key_chain: str :param config: The configuration for the containers. Default is the same as container0. :type config: dict, optional :return: Contaienr """ container0 = containers[0] if not _ivy.exists(config): config = container0.config if isinstance(container0, Container) else {} return_dict = dict() for key in sorted(container0.keys()): values = [cont[key] for cont in containers] value0 = values[0] this_key_chain = key if key_chain == '' else (key_chain + '/' + key) if isinstance(value0, Container): ret = _ivy.Container.multi_map( func, values, key_chains, to_apply, prune_unapplied, this_key_chain, config) if ret: return_dict[key] = ret else: if key_chains is not None: if (this_key_chain in key_chains and not to_apply) or ( this_key_chain not in key_chains and to_apply): if prune_unapplied: continue return_dict[key] = value0 continue return_dict[key] = func(values, this_key_chain) # noinspection PyProtectedMember return Container(return_dict, **config) @staticmethod def identical_structure(containers, check_types=True, key_chains=None, to_apply=True, key_chain=''): """ Returns a single boolean as to whether the input containers have identical key-chains and data types. :param containers: containers to map. :type containers: sequence of Container objects :param check_types: Whether to also check whether the datatypes of the leaf nodes are the same. Default is True. :type check_types: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param key_chain: Chain of keys for this dict entry :type key_chain: str :return: Boolean """ keys = set([i for sl in [list(cont.keys()) for cont in containers] for i in sl]) for key in sorted(keys): if not min([key in cont for cont in containers]): return False values = [cont[key] for cont in containers] value_0 = values[0] type_0 = type(value_0) types = [type(val) for val in values] if not min([type_n is type_0 for type_n in types]): if isinstance(value_0, Container) or check_types: return False this_key_chain = key if key_chain == '' else (key_chain + '/' + key) if isinstance(value_0, Container): ret = _ivy.Container.identical_structure(values, key_chains, to_apply, this_key_chain) if not ret: return False return True @staticmethod def from_disk_as_hdf5(h5_obj_or_filepath, slice_obj=slice(None), ivyh=None): """ Load container object from disk, as an h5py file, at the specified hdf5 filepath. :param h5_obj_or_filepath: Filepath where the container object is saved to disk, or h5 object. :type h5_obj_or_filepath: str or h5 obj :param slice_obj: slice object to slice all h5 elements. :type slice_obj: slice or sequence of slices :param ivyh: Handle to ivy module to use for the calculations. Default is None, which results in the global ivy. :type ivyh: handle to ivy module, optional :return: Container loaded from disk """ container_dict = dict() if type(h5_obj_or_filepath) is str: h5_obj = _h5py.File(h5_obj_or_filepath, 'r') else: h5_obj = h5_obj_or_filepath for key, value in sorted(h5_obj.items()): if isinstance(value, _h5py.Group): container_dict[key] = Container.from_disk_as_hdf5(value, slice_obj, ivyh) elif isinstance(value, _h5py.Dataset): container_dict[key] = _ivy.default(ivyh, _ivy).array(list(value[slice_obj])) else: raise Exception('Item found inside h5_obj which was neither a Group nor a Dataset.') return Container(container_dict, ivyh=ivyh) @staticmethod def from_disk_as_pickled(pickle_filepath, ivyh=None): """ Load container object from disk at the specified pickle filepath. :param pickle_filepath: Filepath where the container object is saved to disk. :type pickle_filepath: str :param ivyh: Handle to ivy module to use for the calculations. Default is None, which results in the global ivy. :type ivyh: handle to ivy module, optional :return: Container loaded from disk """ if _ivy.wrapped_mode(): return Container(_pickle.load(open(pickle_filepath, 'rb')), ivyh=ivyh).to_ivy() return Container(_pickle.load(open(pickle_filepath, 'rb')), ivyh=ivyh) @staticmethod def from_disk_as_json(json_filepath, ivyh=None): """ Load container object from disk at the specified json filepath. If some objects were not json-able during saving, then they will be loaded as strings. :param json_filepath: Filepath where the container object is saved to disk. :type json_filepath: str :param ivyh: Handle to ivy module to use for the calculations. Default is None, which results in the global ivy. :type ivyh: handle to ivy module, optional :return: Container loaded from disk """ with open(json_filepath) as json_data_file: return Container(_json.load(json_data_file), ivyh=ivyh) @staticmethod def h5_file_size(h5_obj_or_filepath): """ Get file size of h5 file contents. :param h5_obj_or_filepath: Filepath where the container object is saved to disk, or h5 object. :type h5_obj_or_filepath: str or h5 obj :return: Size of h5 file contents, and batch size. """ if type(h5_obj_or_filepath) is str: h5_obj = _h5py.File(h5_obj_or_filepath, 'r') else: h5_obj = h5_obj_or_filepath size = 0 batch_size = 0 for key, value in sorted(h5_obj.items()): if isinstance(value, _h5py.Group): size_to_add, batch_size = Container.h5_file_size(value) size += size_to_add elif isinstance(value, _h5py.Dataset): value_shape = value.shape size += _reduce(_mul, value_shape, 1) * value.dtype.itemsize batch_size = value_shape[0] else: raise Exception('Item found inside h5_obj which was neither a Group nor a Dataset.') return size, batch_size @staticmethod def shuffle_h5_file(h5_obj_or_filepath, seed_value=0): """ Shuffle entries in all datasets of h5 file, such that they are still aligned along axis 0. :param h5_obj_or_filepath: Filepath where the container object is saved to disk, or h5 object. :type h5_obj_or_filepath: str or h5 obj :param seed_value: random seed to use for array shuffling :type seed_value: int """ if seed_value is None: seed_value = _random.randint(0, 1000) if type(h5_obj_or_filepath) is str: h5_obj = _h5py.File(h5_obj_or_filepath, 'a') else: h5_obj = h5_obj_or_filepath for key, value in sorted(h5_obj.items()): if isinstance(value, _h5py.Group): Container.shuffle_h5_file(value, seed_value) elif isinstance(value, _h5py.Dataset): _random.seed(seed_value) # noinspection PyTypeChecker _random.shuffle(value) else: raise Exception('Item found inside h5_obj which was neither a Group nor a Dataset.') if isinstance(h5_obj, _h5py.File): h5_obj.close() @staticmethod def reduce(containers, reduction, config=None): """ Reduce containers. :param containers: containers to reduce :type containers: sequence of Container objects :param reduction: the reduction function :type reduction: callable with single list input x :param config: The configuration for the containers. Default is the same as container0. :type config: dict, optional :return: reduced containers """ container0 = containers[0] if not _ivy.exists(config): config = container0.config if isinstance(container0, Container) else {} if isinstance(container0, Container): return_dict = dict() for key in container0.keys(): return_dict[key] = Container.reduce([container[key] for container in containers], reduction) return Container(return_dict, **config) else: # noinspection PyBroadException try: return reduction(containers) except Exception as e: raise Exception(str(e) + '\nContainer reduce operation only valid for containers of arrays') # Private Methods # # ----------------# def _get_shape(self): if not len(self.keys()): if _ivy.exists(self._queues): return [self._queue_load_sizes_cum[-1]] return [0] sub_shapes =\ [v for k, v in self.map(lambda x, kc: list(x.shape) if self._ivy.is_array(x) else ([len(x)] if isinstance(x, (list, tuple, _ivy.MultiDev)) else None)).to_iterator() if v] if not sub_shapes: return sub_shapes min_num_dims = min([len(sub_shape) for sub_shape in sub_shapes]) sub_shapes_array = _np.asarray([sub_shape[0:min_num_dims] for sub_shape in sub_shapes]) sub_shapes_array = _np.where(sub_shapes_array == 0, -1, sub_shapes_array) mask = _np.prod(sub_shapes_array / sub_shapes_array[0:1], 0) == 1 # noinspection PyTypeChecker return [None if _np.isnan(i) else int(i) for i in _np.where(mask, sub_shapes_array[0], _np.ones(min_num_dims)*float('nan')).tolist()] def _get_shapes(self): return self.map(lambda x, kc: x.shape if hasattr(x, 'shape') else None) def _get_dev_str(self): sub_dev_strs =\ [v for k, v in self.map(lambda x, kc: self._ivy.dev_str(x) if self._ivy.is_array(x) else None).to_iterator() if v] if len(set(sub_dev_strs)) <= 1: return sub_dev_strs[0] return None def _at_key_chains_input_as_seq(self, key_chains, ignore_key_errors=False): return_cont = Container(dict(), **self._config) for kc in key_chains: val = self.at_key_chain(kc, ignore_key_errors=ignore_key_errors) if ignore_key_errors and not _ivy.exists(val): continue return_cont.set_at_key_chain(kc, val, inplace=True) return return_cont def _at_key_chains_input_as_dict(self, key_chains, current_chain='', ignore_key_errors=False): return_dict = dict() for k, v in key_chains.items(): if current_chain == '': new_current_chain = k else: new_current_chain = current_chain + '/' + k if isinstance(v, dict): return_dict[k] = self._at_key_chains_input_as_dict(v, new_current_chain, ignore_key_errors=ignore_key_errors) else: val = self.at_key_chain(new_current_chain, ignore_key_errors=ignore_key_errors) if ignore_key_errors and not _ivy.exists(val): continue return_dict[k] = val return Container(return_dict, **self._config) def _prune_key_chains_input_as_seq(self, key_chains): return_cont = self.copy() for kc in key_chains: return_cont = return_cont.prune_key_chain(kc) return return_cont def _prune_key_chains_input_as_dict(self, key_chains, return_cont=None): if return_cont is None: return_cont = self.copy() for k, v in key_chains.items(): if isinstance(v, dict): ret_cont = self._prune_key_chains_input_as_dict(v, return_cont[k]) if ret_cont.shape[0] == 0: del return_cont[k] else: del return_cont[k] return return_cont # Public Methods # # ---------------# def set_framework(self, ivyh): """ Update the framework to use for the container. """ self._ivy = ivyh self._config['ivyh'] = ivyh return self def all_true(self, assert_is_bool=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Determine whether all the entries in the container boolean evaluate to True. :param assert_is_bool: Whether or not to assert each entry is of type Boolean. :type assert_is_bool: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Boolean, whether all entries are boolean True. """ return bool(_np.prod([v for k, v in self.as_bools( assert_is_bool, key_chains, to_apply, prune_unapplied).to_iterator()])) def all_false(self, assert_is_bool=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Determine whether all the entries in the container boolean evaluate to False. :param assert_is_bool: Whether or not to assert each entry is of type Boolean. :type assert_is_bool: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Boolean, whether all entries are boolean False. """ return not bool(_np.sum([v for k, v in self.as_bools( assert_is_bool, key_chains, to_apply, prune_unapplied).to_iterator()])) def reduce_sum(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Computes sum of array elements along a given axis for all sub-arrays of container object. :param axis: Axis or axes along which a sum is performed. The default, axis=None, will sum all of the elements of the input array. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, a sum is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. :type axis: int or sequence of ints :param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. :type keepdims: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-array dimensions expanded along the axis. """ return self.map(lambda x, kc: self._ivy.reduce_sum(x, axis, keepdims) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def reduce_prod(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Computes product of array elements along a given axis for all sub-arrays of container object. :param axis: Axis or axes along which a product is performed. The default, axis=None, will multiply all of the elements of the input array. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, a multiplication is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. :type axis: int or sequence of ints :param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. :type keepdims: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-array dimensions expanded along the axis. """ return self.map(lambda x, kc: self._ivy.reduce_prod(x, axis, keepdims) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def reduce_mean(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Computes mean of array elements along a given axis for all sub-arrays of container object. :param axis: Axis or axes along which a mean is performed. The default, axis=None, will mean all of the elements of the input array. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, a mean is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. :type axis: int or sequence of ints :param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. :type keepdims: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-array dimensions expanded along the axis. """ return self.map(lambda x, kc: self._ivy.reduce_mean(x, axis, keepdims) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def reduce_var(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Computes variance of array elements along a given axis for all sub-arrays of container object. :param axis: Axis or axes along which a var is performed. The default, axis=None, will var all of the elements of the input array. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, a var is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. :type axis: int or sequence of ints :param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. :type keepdims: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with the variance computed for all sub-arrays. """ return self.map(lambda x, kc: self._ivy.reduce_var(x, axis, keepdims) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def reduce_std(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Computes standard deviation of array elements along a given axis for all sub-arrays of container object. :param axis: Axis or axes along which a var is performed. The default, axis=None, will var all of the elements of the input array. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, a var is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. :type axis: int or sequence of ints :param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. :type keepdims: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with the standard deviation computed for all sub-arrays. """ return self.map(lambda x, kc: self._ivy.reduce_std(x, axis, keepdims) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def reduce_min(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Computes min of array elements along a given axis for all sub-arrays of container object. :param axis: Axis or axes along which a min is performed. The default, axis=None, will min all of the elements of the input array. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, a min is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. :type axis: int or sequence of ints :param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. :type keepdims: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-array dimensions expanded along the axis. """ return self.map(lambda x, kc: self._ivy.reduce_min(x, axis, keepdims) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def reduce_max(self, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Computes max of array elements along a given axis for all sub-arrays of container object. :param axis: Axis or axes along which a max is performed. The default, axis=None, will max all of the elements of the input array. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, a max is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. :type axis: int or sequence of ints :param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. :type keepdims: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-array dimensions expanded along the axis. """ return self.map(lambda x, kc: self._ivy.reduce_max(x, axis, keepdims) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def minimum(self, other, key_chains=None, to_apply=True, prune_unapplied=False): """ Computes the elementwise minimum between this container and another container or number. :param other: The other container or number to compute the minimum against. :type other: Ivy container or number :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-arrays having the minimum values computed. """ is_container = isinstance(other, Container) return self.map(lambda x, kc: self._ivy.minimum(x, other[kc] if is_container else other) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def maximum(self, other, key_chains=None, to_apply=True, prune_unapplied=False): """ Computes the elementwise maximum between this container and another container or number. :param other: The other container or number to compute the maximum against. :type other: Ivy container or number :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-arrays having the maximum values computed. """ is_container = isinstance(other, Container) return self.map(lambda x, kc: self._ivy.maximum(x, other[kc] if is_container else other) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def clip(self, clip_min, clip_max, key_chains=None, to_apply=True, prune_unapplied=False): """ Computes the elementwise clipped values between this container and clip_min and clip_max containers or numbers. :param clip_min: The minimum container or number to clip against. :type clip_min: Ivy container or number :param clip_max: The maximum container or number to clip against. :type clip_max: Ivy container or number :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-arrays having the clipped values returned. """ min_is_container = isinstance(clip_min, Container) max_is_container = isinstance(clip_max, Container) return self.map(lambda x, kc: self._ivy.clip(x, clip_min[kc] if min_is_container else clip_min, clip_max[kc] if max_is_container else clip_max) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def clip_vector_norm(self, max_norm, p, global_norm=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Computes the elementwise clipped values between this container and clip_min and clip_max containers or numbers. :param max_norm: The max norm container or number to clip against. :type max_norm: Ivy container or number :param p: The p-value for computing the p-norm container or number. :type p: Ivy container or number :param global_norm: Whether to compute the norm across all the concattenated sub-arrays. Default is False. :type global_norm: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-arrays having the clipped norms returned. """ max_norm_is_container = isinstance(max_norm, Container) p_is_container = isinstance(p, Container) if global_norm: if max_norm_is_container or p_is_container: raise Exception( 'global_norm can only be computed for scalar max_norm and p_val arguments,' 'but found {} and {} of type {} and {} respectively'.format( max_norm, p, type(max_norm), type(p))) vector_norm = self.vector_norm(p, global_norm=True) ratio = max_norm/vector_norm if ratio < 1: return self * ratio return self.copy() return self.map(lambda x, kc: self._ivy.clip_vector_norm( x, max_norm[kc] if max_norm_is_container else max_norm, p[kc] if p_is_container else p) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def einsum(self, equation, key_chains=None, to_apply=True, prune_unapplied=False): """ Sums the product of the elements of the input operands along dimensions specified using a notation based on the Einstein summation convention, for each array in the container. :param equation: A str describing the contraction, in the same format as numpy.einsum. :type equation: str :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-array dimensions expanded along the axis. """ return self.map(lambda x, kc: self._ivy.einsum(equation, x) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def vector_norm(self, p=2, axis=None, keepdims=False, global_norm=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Compute vector p-norm for each array in the container. :param p: Order of the norm. Default is 2. :type p: int or str or container, optional :param axis: If axis is an integer, it specifies the axis of x along which to compute the vector norms. Default is None, in which case the flattened array is considered. :type axis: int or sequence of ints, optional :param keepdims: If this is set to True, the axes which are normed over are left in the result as dimensions with size one. With this option the result will broadcast correctly against the original x. Default is False. :type keepdims: bool, optional :param global_norm: Whether to compute the norm across all the concattenated sub-arrays. Default is False. :type global_norm: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with the vector norms for each sub-array returned. """ p_is_container = isinstance(p, Container) if global_norm: if p_is_container: raise Exception( 'global_norm can only be computed for scalar p argument,' 'but found {} of type {}'.format(p, type(p))) return sum([v for k, v in self.map(lambda x, kc: self._ivy.reduce_sum(x ** p)).to_iterator()]) ** (1/p) return self.map(lambda x, kc: self._ivy.vector_norm(x, p[kc] if p_is_container else p, axis, keepdims) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def matrix_norm(self, p=2, axis=None, keepdims=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Compute matrix p-norm for each array in the container. :param p: Order of the norm. Default is 2. :type p: int or str, optional :param axis: If axis is an integer, it specifies the axis of x along which to compute the matrix norms. Default is None, in which case the flattened array is considered. :type axis: int or sequence of ints, optional :param keepdims: If this is set to True, the axes which are normed over are left in the result as dimensions with size one. With this option the result will broadcast correctly against the original x. Default is False. :type keepdims: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with the matrix norms for each sub-array returned. """ return self.map(lambda x, kc: self._ivy.matrix_norm(x, p, axis, keepdims) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def flip(self, axis=None, key_chains=None, to_apply=True, prune_unapplied=False): """ Reverses the order of elements in for each array in the container, along the given axis. The shape of the array is preserved, but the elements are reordered. :param axis: Axis or axes along which to flip over. The default, axis=None, will flip over all axes. :type axis: None or int or sequence of ints, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-array dimensions expanded along the axis. """ return self.map(lambda x, kc: self._ivy.flip(x, axis) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def shuffle(self, seed_value=None, key_chains=None, to_apply=True, prune_unapplied=False, key_chain=''): """ Shuffle entries in all sub-arrays, such that they are still aligned along axis 0. :param seed_value: random seed to use for array shuffling :type seed_value: int :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :param key_chain: Chain of keys for this dict entry :type key_chain: str """ return_dict = dict() if seed_value is None: seed_value = self._ivy.to_numpy(self._ivy.random.randint(0, 1000, ())).item() for key, value in sorted(self.items()): this_key_chain = key if key_chain == '' else (key_chain + '/' + key) if isinstance(value, Container): ret = value.shuffle(seed_value, key_chains, to_apply, prune_unapplied, this_key_chain) if ret: return_dict[key] = ret else: if key_chains is not None: if (this_key_chain in key_chains and not to_apply) or ( this_key_chain not in key_chains and to_apply): if prune_unapplied: continue return_dict[key] = value continue self._ivy.seed(seed_value) return_dict[key] = self._ivy.shuffle(value) return Container(return_dict, **self._config) def slice_via_key(self, slice_key): """ Get slice of container, based on key. :param slice_key: key to slice container at. :type slice_key: str :return: Container object sliced at desired key. """ return_dict = dict() for key, value in sorted(self.items()): if key == slice_key: return value elif isinstance(value, Container): return_dict[key] = value.slice_via_key(slice_key) else: return_dict[key] = value return Container(return_dict, **self._config) def as_ones(self, key_chains=None, to_apply=True, prune_unapplied=False): """ Return arrays of ones for all nested arrays in the container. :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-arrays filled with ones. """ return self.map(lambda x, kc: self._ivy.ones_like(x) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def as_zeros(self, key_chains=None, to_apply=True, prune_unapplied=False): """ Return arrays of zeros for all nested arrays in the container. :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-arrays filled with zeros. """ return self.map(lambda x, kc: self._ivy.zeros_like(x) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def as_bools(self, assert_is_bool=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Return boolean evaluation for all nested items in the container. :param assert_is_bool: Whether or not to assert the entry is of type Boolean. :type assert_is_bool: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all entries boolean evaluated. """ def _ret_bool(x): if assert_is_bool: assert isinstance(x, bool) return x return bool(x) return self.map(lambda x, kc: _ret_bool(x), key_chains, to_apply, prune_unapplied) def as_random_uniform(self, low=0.0, high=1.0, key_chains=None, to_apply=True, prune_unapplied=False): """ Return arrays of random uniform values for all nested arrays in the container. :param low: Lower boundary of the output interval. All values generated will be greater than or equal to low. The default value is 0. :type low: float :param high: Upper boundary of the output interval. All values generated will be less than high. The default value is 1.0. :type high: float :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-arrays filled with random uniform values. """ return self.map(lambda x, kc: self._ivy.random_uniform( low, high, x.shape, self._ivy.dev_str(x)) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def to_native(self, nested=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Return native framework arrays for all nested arrays in the container. :param nested: Whether to apply the conversion on arguments in a nested manner. If so, all dicts, lists and tuples will be traversed to their lowest leaves in search of ivy.Array and ivy.Variable instances. Default is False. :type nested: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-arrays converted to their native format. """ return self.map(lambda x, kc: self._ivy.to_native(x, nested=nested), key_chains, to_apply, prune_unapplied) def to_ivy(self, nested=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Return ivy arrays for all nested native framework arrays in the container. :param nested: Whether to apply the conversion on arguments in a nested manner. If so, all dicts, lists and tuples will be traversed to their lowest leaves in search of ivy.Array and ivy.Variable instances. Default is False. :type nested: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all native sub-arrays converted to their ivy.Array instances. """ return self.map(lambda x, kc: self._ivy.to_ivy(x, nested=nested), key_chains, to_apply, prune_unapplied) def expand_dims(self, axis, key_chains=None, to_apply=True, prune_unapplied=False): """ Expand dims of all sub-arrays of container object. :param axis: Axis along which to expand dimensions of the sub-arrays. :type axis: int :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-array dimensions expanded along the axis. """ return self.map(lambda x, kc: self._ivy.expand_dims(x, axis) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def dev_clone(self, dev_strs): """ Clone the current container across multiple devices. :param dev_strs: The devices on which to clone the container. :type dev_strs: sequence of str :return: a set of cloned containers across the specified devices. """ return self._ivy.DevClonedItem({dev_str: self.to_dev(dev_str) for dev_str in dev_strs}) def dev_dist(self, dev_strs: Union[Iterable[str], Dict[str, int]], axis=0): """ Distribute the current container across multiple devices. :param dev_strs: The devices along which to distribute the container. :type dev_strs: sequence of strs or dict of split sizes :param axis: The axis along which to split the arrays at the container leaves. Default is 0. :type axis: int, optional :return: a set of distributed sub-containers across the specified devices. """ split_arg = list(dev_strs.values()) if isinstance(dev_strs, dict) else len(dev_strs) return self._ivy.DevDistItem( {dev_str: cont.to_dev(dev_str) for cont, dev_str in zip(self.split(split_arg, axis, with_remainder=True), dev_strs)}) def to_multi_dev(self, dev_strs, axis=0): """ Return a single MultiDevContainer, which shares the same structure as the current container, but replaces arrays at the leaves with DistributedArray instances. :param dev_strs: The devices along which to distribute each array in the container. :type dev_strs: sequence of str :param axis: The axis along which to split the arrays at the container leaves. Default is 0. :type axis: int, optional :return: a MultiDevContainer instance, with all leafs arrays replaced by DistributedArray instances. """ return MultiDevContainer( self.map(lambda x, kc: self._ivy.dev_dist_array(x, dev_strs, axis)), dev_strs, **self._config) def unstack(self, axis, keepdims=False, dim_size=None): """ Unstack containers along specified dimension. :param axis: Dimensions along which to unstack. :type axis: int :param keepdims: Whether to keep dimension 1 in the unstack dimensions. Default is False. :type keepdims: bool, optional :param dim_size: Size of the dimension to unstack. Determined from inputs by default. :type dim_size: int, optional :return: List of containers, unstacked along the specified dimension. """ if dim_size is None: dim_size = self.shape[axis] if keepdims: # noinspection PyTypeChecker return [self[slice(i, i+1, 1) if axis == 0 else tuple([slice(None, None, None)] * axis + [slice(i, i+1, 1)])] for i in range(dim_size)] # noinspection PyTypeChecker return [self[i if axis == 0 else tuple([slice(None, None, None)] * axis + [i])] for i in range(dim_size)] def split(self, num_or_size_splits=None, axis=0, with_remainder=False, key_chains=None, to_apply=True, prune_unapplied=False): """ Splits a container into multiple sub-containers, by splitting their constituent arrays. :param num_or_size_splits: Number of equal arrays to divide the array into along the given axis if an integer. The size of each split element if a sequence of integers. Default is to divide into as many 1-dimensional arrays as the axis dimension. :type num_or_size_splits: int, optional :param axis: The axis along which to split, default is 0. :type axis: int, optional :param with_remainder: If the tensor does not split evenly, then store the last remainder entry. Default is False. :type with_remainder: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: A list of sub-arrays. """ # ToDo: make this more efficient, without so many recursive container calls. For example the splits indices # can be calculated here, and then slices applied directly only once dim_size = num_or_size_splits if isinstance(num_or_size_splits, int) else len(num_or_size_splits) # noinspection PyTypeChecker return self.map( lambda x, kc: self._ivy.split(x, num_or_size_splits, axis, with_remainder) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied).unstack(0, dim_size=dim_size) def gather(self, indices, axis=-1, key_chains=None, to_apply=True, prune_unapplied=False): """ Gather slices from all container params at axis according to indices. :param indices: Index array. :type indices: array :param axis: The axis from which to gather from. Default is -1. :type axis: int, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-array dimensions gathered along the axis. """ return self.map(lambda x, kc: self._ivy.gather(x, indices, axis) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def gather_nd(self, indices, key_chains=None, to_apply=True, prune_unapplied=False): """ Gather slices from all container params into a arrays with shape specified by indices. :param indices: Index array. :type indices: array :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: Container object with all sub-array dimensions gathered. """ return self.map(lambda x, kc: self._ivy.gather_nd(x, indices) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def repeat(self, repeats, axis=None, key_chains=None, to_apply=True, prune_unapplied=False): """ Repeat values along a given dimension for each array in the container. :param repeats: Number of repetitions for each element. repeats is broadcast to fit the shape of the given axis. :type repeats: int or sequence of ints. :param axis: The axis along which to repeat values. By default, use the flattened input array, and return a flat output array. :type axis: int, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: container with each array being repeated along the specified dimension. """ return self.map(lambda x, kc: self._ivy.repeat(x, repeats, axis) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def swapaxes(self, axis0, axis1, key_chains=None, to_apply=True, prune_unapplied=False): """ Interchange two axes for each array in the container. :param axis0: First axis to be swapped. :type axis0: int :param axis1: Second axis to be swapped. :type axis1: int :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: ivy.Container with each chosen array having the axes swapped. """ return self.map(lambda x, kc: self._ivy.swapaxes(x, axis0, axis1) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def reshape(self, pre_shape=None, shape_slice=None, post_shape=None, key_chains=None, to_apply=True, prune_unapplied=False): """ Reshapes each array x in the container, to a new shape given by pre_shape + x.shape[shape_slice] + post_shape. If shape_slice or post_shape are not specified, then the term is ignored. :param pre_shape: The first elements in the new array shape. :type pre_shape: int or sequence of ints, optional :param shape_slice: The slice of the original shape to use in the new shape. Default is None. :type shape_slice: int or sequence of ints, optional :param post_shape: The final elements in the new array shape. Default is None. :type post_shape: sequence of ints, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: ivy.Container with each array reshaped as specified. """ pre_shape = [] if pre_shape is None else\ ([pre_shape] if isinstance(pre_shape, int) else list(pre_shape)) post_shape = [] if post_shape is None else\ ([post_shape] if isinstance(post_shape, int) else list(post_shape)) if shape_slice is None: return self.map(lambda x, kc: self._ivy.reshape(x, pre_shape + post_shape) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) shape_slice = slice(shape_slice, shape_slice+1) if isinstance(shape_slice, int) else shape_slice return self.map(lambda x, kc: self._ivy.reshape(x, pre_shape + list(x.shape[shape_slice]) + post_shape) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def einops_rearrange(self, pattern, key_chains=None, to_apply=True, prune_unapplied=False, **axes_lengths): """ Perform einops rearrange operation on each sub array in the container. :param pattern: Rearrangement pattern. :type pattern: str :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :param axes_lengths: Any additional specifications for dimensions. :type axes_lengths: keyword parameter args :return: ivy.Container with each array having einops.rearrange applied. """ return self.map(lambda x, kc: _ivy.einops_rearrange(x, pattern, **axes_lengths) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def einops_reduce(self, pattern, reduction, key_chains=None, to_apply=True, prune_unapplied=False, **axes_lengths): """ Perform einops reduce operation on each sub array in the container. :param pattern: Reduction pattern. :type pattern: str :param reduction: One of available reductions ('min', 'max', 'sum', 'mean', 'prod'), or callable. :type reduction: str or callable :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :param axes_lengths: Any additional specifications for dimensions. :type axes_lengths: keyword parameter args :return: ivy.Container with each array having einops.reduce applied. """ return self.map(lambda x, kc: _ivy.einops_reduce(x, pattern, reduction, **axes_lengths) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def einops_repeat(self, pattern, key_chains=None, to_apply=True, prune_unapplied=False, **axes_lengths): """ Perform einops repeat operation on each sub array in the container. :param pattern: Rearrangement pattern. :type pattern: str :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :param axes_lengths: Any additional specifications for dimensions. :type axes_lengths: keyword parameter args :return: ivy.Container with each array having einops.repeat applied. """ return self.map(lambda x, kc: _ivy.einops_repeat(x, pattern, **axes_lengths) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def to_dev(self, dev_str, key_chains=None, to_apply=True, prune_unapplied=False): """ Move the container arrays to the desired device, specified by device string. :param dev_str: device to move the array to 'cuda:0', 'cuda:1', 'cpu' etc. Keep same device if None. :type dev_str: str, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: The container, but with each sub-array now placed on the target device. """ return self.map(lambda x, kc: self._ivy.stop_gradient(self._ivy.to_dev(x, dev_str)) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def stop_gradients(self, preserve_type=True, key_chains=None, to_apply=True, prune_unapplied=False): """ Stop gradients of all array entries in the container. :param preserve_type: Whether to preserve the input type (ivy.Variable or ivy.Array), otherwise an array is always returned. Default is True. :param preserve_type: bool, optional :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: container with each array having their gradients stopped. """ return self.map( lambda x, kc: self._ivy.stop_gradient(x, preserve_type) if self._ivy.is_variable(x) else x, key_chains, to_apply, prune_unapplied) def as_variables(self, key_chains=None, to_apply=True, prune_unapplied=False): """ Converts all nested arrays to variables, which support gradient computation. :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: container with each array converted to a variable. """ return self.map(lambda x, kc: self._ivy.variable(x) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def as_arrays(self, key_chains=None, to_apply=True, prune_unapplied=False): """ Converts all nested variables to arrays, which do not support gradient computation. :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: container with each variable converted to an array. """ return self.map( lambda x, kc: self._ivy.stop_gradient(x, False) if self._ivy.is_variable(x) else (x if self._ivy.is_array(x) else self._ivy.array(x)), key_chains, to_apply, prune_unapplied) def to_numpy(self, key_chains=None, to_apply=True, prune_unapplied=False): """ Converts all nested ivy arrays to numpy arrays. :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: container with each ivy array converted to a numpy array. """ return self.map( lambda x, kc: self._ivy.to_numpy(x) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def arrays_as_lists(self, key_chains=None, to_apply=True, prune_unapplied=False): """ Converts all nested arrays to lists, a useful intermediate step for conversion to other framework array types. :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :return: container with each array converted to a list. """ return self.map( lambda x, kc: self._ivy.to_list(x) if self._ivy.is_array(x) else x, key_chains, to_apply, prune_unapplied) def to_disk_as_hdf5(self, h5_obj_or_filepath, starting_index=0, mode='a', max_batch_size=None): """ Save container object to disk, as an h5py file, at the specified filepath. :param h5_obj_or_filepath: Filepath for where to save the container to disk, or h5 object. :type h5_obj_or_filepath: str or h5 object :param starting_index: Batch index for which to start writing to file, if it already exists :type starting_index: int :param mode: H5 read/write mode for writing to disk, ['r', 'r+', 'w', 'w-', 'a'], default is 'a'. :type mode: str :param max_batch_size: Maximum batch size for the container on disk, this is useful if later appending to file. :type max_batch_size: int :type h5_obj_or_filepath: str or h5 object """ if type(h5_obj_or_filepath) is str: h5_obj = _h5py.File(h5_obj_or_filepath, mode) else: h5_obj = h5_obj_or_filepath for key, value in sorted(self.items()): if isinstance(value, Container): if key not in h5_obj.keys(): h5_group = h5_obj.create_group(key) else: h5_group = h5_obj[key] value.to_disk_as_hdf5(h5_group, starting_index, mode, max_batch_size) else: value_as_np = self._ivy.to_numpy(value) value_shape = value_as_np.shape this_batch_size = value_shape[0] if not max_batch_size: max_batch_size = starting_index + this_batch_size if key not in h5_obj.keys(): dataset_shape = [max_batch_size] + list(value_shape[1:]) maxshape = ([None for _ in dataset_shape]) h5_obj.create_dataset(key, dataset_shape, dtype=value_as_np.dtype, maxshape=maxshape) space_left = max_batch_size - starting_index amount_to_write = min(this_batch_size, space_left) h5_obj[key][starting_index:starting_index + amount_to_write] = value_as_np[0:amount_to_write] def to_disk_as_pickled(self, pickle_filepath): """ Save container object to disk, as an pickled file, at the specified filepath. :param pickle_filepath: Filepath for where to save the container to disk. :type pickle_filepath: str """ if _ivy.wrapped_mode(): _pickle.dump(self.to_native().to_dict(), open(pickle_filepath, 'wb')) else: _pickle.dump(self.to_dict(), open(pickle_filepath, 'wb')) def to_jsonable(self, return_dict=None): """ Return container with non-jsonable elements converted to string representations, which are jsonable. """ if return_dict is None: return_dict = self.copy() for k, v in return_dict.items(): if not _is_jsonable(v): if isinstance(v, dict): return_dict[k] = self.to_jsonable(v) else: return_dict[k] = str(v) return return_dict def to_disk_as_json(self, json_filepath): """ Save container object to disk, as an json file, at the specified filepath. :param json_filepath: Filepath for where to save the container to disk. :type json_filepath: str """ with open(json_filepath, 'w+') as json_data_file: _json.dump(self.to_jsonable().to_dict(), json_data_file, indent=4) def to_list(self): """ Return nested list representation of container object. :return: Container as nested list. """ return_list = list() for key, value in sorted(self.items()): if isinstance(value, Container): return_list.append(value.to_list()) elif value is not None and key != '_f': return_list.append(value) return return_list def to_raw(self): """ Return nested raw representation of container object. This includes restoring lists and tuples passed in the constructor to their original form. :return: Container data in it's raw form. """ return_item = dict() for i, (key, value) in enumerate(sorted(self.items())): if isinstance(value, Container): return_item[key] = value.to_raw() elif key[0:3] == 'it_' and tuple(self._types_to_iteratively_nest): return_item = list([v.to_raw() if isinstance(v, Container) else v for v in self.values()]) break else: return_item[key] = value return return_item def to_dict(self): """ Return nested pure dict representation of container object. :return: Container as nested dict. """ return_dict = dict() for key, value in sorted(self.items()): if isinstance(value, Container): return_dict[key] = value.to_dict() else: return_dict[key] = value return return_dict def to_iterator(self, key_chain='', leaf_keys_only=False): """ Return iterator for traversing through the nested elements of container object. :return: Iterator for the container elements. """ for key, value in sorted(self.items()): if leaf_keys_only: kc = key else: kc = key_chain + '/' + key if key_chain != '' else key if isinstance(value, Container): # noinspection PyCompatibility yield from value.to_iterator(kc, leaf_keys_only) else: yield kc, value def to_iterator_values(self): """ Return iterator for traversing through the nested values of container object. :return: Iterator for the container values. """ for key, value in sorted(self.items()): if isinstance(value, Container): # noinspection PyCompatibility yield from value.to_iterator_values() else: yield value def to_iterator_keys(self, key_chain='', leaf_keys_only=False): """ Return iterator for traversing through the nested keys of container object. :return: Iterator for the container elements. """ for key, value in sorted(self.items()): if leaf_keys_only: kc = key else: kc = key_chain + '/' + key if key_chain != '' else key if isinstance(value, Container): # noinspection PyCompatibility yield from value.to_iterator_keys(kc, leaf_keys_only) else: yield kc def to_flat_list(self): """ Return flat list representation of container object. :return: Container as flat list. """ return list([item for key, item in self.to_iterator()]) def from_flat_list(self, flat_list): """ Return new container object with the same hierarchy, but with values replaced from flat list. :param flat_list: flat list of values to populate container with. :type flat_list: sequence of arrays :return: Container. """ new_dict = dict() for key, value in sorted(self.items()): if isinstance(value, Container): new_value = value.from_flat_list(flat_list) else: new_value = flat_list.pop(0) new_dict[key] = new_value return Container(new_dict, **self._config) def has_key(self, query_key): """ Determine whether container object has specified key somewhere in the nested structure :return: Boolean """ has_key = False def map_fn(x, kc): nonlocal has_key if query_key in kc: has_key = True return x self.map(map_fn) return has_key def has_key_chain(self, key_chain): """ Determine whether container object has specified key-chain :return: Boolean """ keys = re.split('[/.]', key_chain) ret = self for key in keys: try: ret = ret[key] except KeyError: return False return True def has_nans(self, include_infs=True, leafwise=False): """ Determine whether arrays in the container contain any nans, as well as infs or -infs if specified. :param include_infs: Whether to include infs and -infs in the check. Default is True. :type include_infs: bool, optional :param leafwise: Whether to apply the check leaf-wise, and return a container of booleans. Default is False, in which case the check is applied across the entire container, returning a single boolean. :type leafwise: bool, optional :return: Whether the container has any nans, applied either leafwise or across the entire container. """ leafwise_res = self.map(lambda x, kc: _ivy.has_nans(x, include_infs)) if leafwise: return leafwise_res return max([v for k, v in leafwise_res.to_iterator()]) def at_keys(self, queries, ignore_none=True, containing=False, ignore_key_errors=False): """ Query container object at specified keys, either as list or nested dict. :param queries: The keys to query. :type queries: sequence of strs or single str :param ignore_none: Whether to ignore None input. Default is True. :type ignore_none: bool, optional :param containing: Whether to include keys which only contain the query substrings. Default is False. :type containing: bool, optional :param ignore_key_errors: Whether to ignore Key-errors when trying to access the dict. Default is False. :type ignore_key_errors: bool, optional :return: sub-container containing only key-chains containing the specified keys. """ if queries is None and ignore_none: return self key_chains_to_keep = list() if isinstance(queries, str): queries = [queries] def map_fn(x, kc): nonlocal key_chains_to_keep kc_split = re.split('[/.]', kc) for query_key in queries: if query_key in kc_split or (containing and min([query_key in k for k in kc_split])): key_chains_to_keep.append(kc) return x self.map(map_fn) return self.at_key_chains(key_chains_to_keep, ignore_key_errors=ignore_key_errors) def at_key_chain(self, key_chain, ignore_key_errors=False): """ Query container object at a specified key-chain :return: sub-container or value at specified key chain """ keys = re.split('[/.]', key_chain) ret = self for key in keys: try: ret = ret[key] except KeyError as e: if ignore_key_errors: return raise e return ret def at_key_chains(self, key_chains, ignore_none=True, ignore_key_errors=False): """ Query container object at specified key-chains, either as list or nested dict. :return: sub-container containing only the specified key chains """ if key_chains is None and ignore_none: return self if isinstance(key_chains, (list, tuple)): return self._at_key_chains_input_as_seq(key_chains, ignore_key_errors=ignore_key_errors) elif isinstance(key_chains, dict): return self._at_key_chains_input_as_dict(key_chains, ignore_key_errors=ignore_key_errors) elif isinstance(key_chains, str): return self._at_key_chains_input_as_seq([key_chains], ignore_key_errors=ignore_key_errors) else: raise Exception('Invalid type for input key_chains, must either be a list, tuple, dict, or ivy.Container,' 'but found type {}'.format(type(key_chains))) def set_at_keys(self, target_dict): """ Set values of container object at specified keys :return: new container with updated value at each key """ return_dict = dict() for key, val in self.items(): if key in target_dict: return_dict[key] = target_dict[key] elif isinstance(val, Container): return_dict[key] = val.set_at_keys(target_dict) else: return_dict[key] = val return Container(return_dict, **self._config) def set_at_key_chain(self, key_chain, val, inplace=False): """ Set value of container object at a specified key-chain :return: new container with updated value at key chain """ keys = re.split('[/.]', key_chain) if inplace: cont = self else: cont = self.copy() sub_cont = cont for key in keys[:-1]: if key not in sub_cont: sub_cont[key] = Container(**self._config) sub_cont = sub_cont[key] sub_cont[keys[-1]] = val return cont def overwrite_at_key_chain(self, key_chain, val, inplace=False): """ Overwrite value of container object at a specified key-chain :return: new container with updated value at key chain, provided it existed before. """ keys = re.split('[/.]', key_chain) if inplace: cont = self else: cont = self.copy() sub_cont = cont for key in keys[:-1]: if key not in sub_cont: raise Exception('key chain must already exist in container in order to call overwrite_at_key_chain') sub_cont = sub_cont[key] if keys[-1] not in sub_cont: raise Exception('key chain must already exist in container in order to call overwrite_at_key_chain') sub_cont[keys[-1]] = val return cont def set_at_key_chains(self, target_dict, return_dict=None, inplace=False): """ Set values of container object at specified key-chains :return: new container with updated values at the key chains """ if return_dict is None: if inplace: return_dict = self else: return_dict = self.copy() for k, v in target_dict.items(): if isinstance(v, dict): return_dict[k] = self.set_at_key_chains(v, return_dict[k], inplace) else: return_dict[k] = v return Container(return_dict, **self._config) def overwrite_at_key_chains(self, target_dict, return_dict=None, inplace=False): """ Overwrite values of container object at specified key-chains :return: new container with updated values at the key chains, provided they existed before. """ if return_dict is None: if inplace: return_dict = self else: return_dict = self.copy() for k, v in target_dict.items(): if k not in return_dict: raise Exception('key chain must already exist in container in order to call overwrite_at_key_chains') if isinstance(v, dict): return_dict[k] = self.overwrite_at_key_chains(v, return_dict[k], inplace) else: return_dict[k] = v return Container(return_dict, **self._config) def prune_keys(self, query_keys, ignore_none=True): """ Recursively prune set of keys :return: Container with key-chains containing the specified keys pruned. """ if query_keys is None and ignore_none: return self key_chains_to_prune = list() if isinstance(query_keys, str): query_keys = [query_keys] def map_fn(x, kc): nonlocal key_chains_to_prune for query_key in query_keys: if query_key in kc: key_chains_to_prune.append(kc) return x self.map(map_fn) return self.prune_key_chains(key_chains_to_prune) def prune_key_chain(self, key_chain): """ Recursively prune chain of keys, specified as 'key1/key2/key3/...' :return: Container with keys in key chain pruned. """ keys_in_chain = re.split('[/.]', key_chain) out_dict = dict() for key, value in sorted(self.items()): if isinstance(value, Container): if key == keys_in_chain[0]: if len(keys_in_chain) == 1: new_val = [] else: new_val = value.prune_key_chain('/'.join(keys_in_chain[1:])) if len(new_val) > 0: out_dict[key] = new_val else: new_val = value.to_dict() if len(new_val) > 0: out_dict[key] = value.to_dict() else: if len(keys_in_chain) != 1 or key != keys_in_chain[0]: out_dict[key] = value return Container(out_dict, **self._config) def prune_key_chains(self, key_chains, ignore_none=True): """ Recursively prune set of key chains :return: Container with keys in the set of key chains pruned. """ if key_chains is None and ignore_none: return self if isinstance(key_chains, (list, tuple)): return self._prune_key_chains_input_as_seq(key_chains) elif isinstance(key_chains, dict): return self._prune_key_chains_input_as_dict(key_chains) elif isinstance(key_chains, str): return self._prune_key_chains_input_as_seq([key_chains]) else: raise Exception('Invalid type for input key_chains, must either be a list, tuple, dict, or ivy.Container,' 'but found type {}'.format(type(key_chains))) def sort_by_key(self): new_dict = dict() for k, v in sorted(self.items()): if isinstance(v, Container): v_back = v.sort_by_key() else: v_back = v new_dict[k] = v_back return Container(new_dict, **self._config) def restructure_keys(self, key_chain_mapping): """ Restructure the keys of the container. :param key_chain_mapping: Sequence of lists/tuples of key chain mapping to apply, with original and new key chains being the left and right terms respectively. :type key_chain_mapping: sequence of len-2 sequences :return: New contaienr with the key chains updated. """ ret_cont = self.copy() for orig_kc, new_kc in key_chain_mapping: if orig_kc == '': orig_kc_val = ret_cont ret_cont = Container(**self._config) else: orig_kc_val = ret_cont[orig_kc] ret_cont = ret_cont.prune_key_chain(orig_kc) ret_cont[new_kc] = orig_kc_val return ret_cont def prune_empty(self, keep_Nones=False, base=True): """ Recursively prunes empty keys from the container dict structure. Returns None if the entire container is empty. :return: Container with empty keys pruned. """ out_dict = dict() for key, value in sorted(self.items()): if isinstance(value, Container): new_value = value.prune_empty(keep_Nones, False) if new_value: out_dict[key] = new_value elif self._ivy.exists(value) or keep_Nones: out_dict[key] = value if len(out_dict): return Container(out_dict, **self._config) if base: return Container(**self._config) return def prune_key_from_key_chains(self, absolute=None, containing=None): """ Recursively prune absolute key or key containing a certain substring from all key chains. :param absolute: The absolute key to detect in the key chains. :type absolute: str, optional :param containing: A substring to check each key for, when deciding which keys to prune. :type containing: str, optional :return: Container with specified key or substring-containing-key from all key chains removed from the chain. """ if not absolute and not containing: raise Exception('At least one of absolute or containing arguments must be specified.') out_cont = Container(**self._config) for key, value in sorted(self.items()): if (absolute and key == absolute) or (containing and containing in key): if isinstance(value, Container): out_cont = Container.combine(out_cont, value) else: out_cont = value elif isinstance(value, Container): out_cont[key] = value.prune_key_from_key_chains(absolute, containing) else: out_cont[key] = value return out_cont def prune_keys_from_key_chains(self, absolute=None, containing=None): """ Recursively prune absolute keys or keys containing certain substrings from all key chains. :param absolute: The absolute key to detect in the key chains. :type absolute: sequence of strs, optional :param containing: A substring to check each key for, when deciding which keys to prune. :type containing: sequence of strs, optional :return: Container with specified keys or substring-containing-keys from all key chains removed from the chain. """ if not absolute and not containing: raise Exception('At least one of absolute or containing arguments must be specified.') out_cont = Container(**self._config) for key, value in sorted(self.items()): if (absolute and key in absolute) or (containing and max([con in key for con in containing])): if isinstance(value, Container): out_cont = Container.combine(out_cont, value) else: out_cont = value elif isinstance(value, Container): out_cont[key] = value.prune_key_from_key_chains(absolute, containing) else: out_cont[key] = value return out_cont def copy(self): """ Create a copy of this container. :return: A copy of the container """ return Container(self.to_dict(), **self._config) def deep_copy(self): """ Create a deep copy (copying all internal tensors) of this container. :return: A deep copy of the container """ return self.map(lambda x, kc: _ivy.copy_array(x) if _ivy.is_array(x) else x) def map(self, func, key_chains=None, to_apply=True, prune_unapplied=False, key_chain=''): """ Apply function to all array values of container :param func: Function to apply to each container entry :type func: python function :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :param key_chain: Chain of keys for this dict entry :type key_chain: str :return: New container following the function mapped to each sub-array. """ return_dict = dict() for key, value in sorted(self.items()): this_key_chain = key if key_chain == '' else (key_chain + '/' + key) if isinstance(value, Container): ret = value.map(func, key_chains, to_apply, prune_unapplied, this_key_chain) if prune_unapplied and not ret: continue return_dict[key] = ret else: if key_chains is not None: if (this_key_chain in key_chains and not to_apply) or ( this_key_chain not in key_chains and to_apply): if prune_unapplied: continue return_dict[key] = value continue return_dict[key] = func(value, this_key_chain) # ToDo: find an elegant way to pass ALL configs from the current container to the new container return Container(return_dict, **self._config) def map_conts(self, func, key_chains=None, to_apply=True, prune_unapplied=False, include_self=True, key_chain=''): """ Apply function to all sub-contains in the container. :param func: Function to apply to each sub-container :type func: python function :param key_chains: The key-chains to apply or not apply the method to. Default is None. :type key_chains: list or dict of strs, optional :param to_apply: If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. :type to_apply: bool, optional :param prune_unapplied: Whether to prune key_chains for which the function was not applied. Default is False. :type prune_unapplied: bool, optional :param include_self: Whether to also apply the (possiby in-place) function to this container. Default is True. :type include_self: bool, optional :param key_chain: Chain of keys for this dict entry :type key_chain: str :return: New container following the function mapped to each sub-container. """ return_dict = dict() for key, value in sorted(self.items()): this_key_chain = key if key_chain == '' else (key_chain + '/' + key) if isinstance(value, Container): ret = value.map_conts(func, key_chains, to_apply, prune_unapplied, key_chain=this_key_chain) if prune_unapplied and not ret: continue return_dict[key] = ret else: if key_chains is not None: if (this_key_chain in key_chains and not to_apply) or ( this_key_chain not in key_chains and to_apply): if prune_unapplied: continue return_dict[key] = value continue return_dict[key] = value ret = Container(return_dict, **self._config) if key_chain != '' or include_self: return func(ret, key_chain) return ret def dtype(self): """ Return container, with all entries replaced with their data types. :return: New datatype container """ return self.map(lambda x, _: self._ivy.dtype(x)) def with_entries_as_lists(self): """ Return container object, with each array entry in the container cast to a list """ def to_list(x, _=''): try: return self._ivy.to_list(x) except (AttributeError, ValueError): return x return self.map(to_list) def reshape_like(self, target_dict, leading_shape=None, return_cont=None): """ Set shapes of container entries to shapes specified by new container with the same key structure :return: new container with values of updated shapes """ leading_shape = self._ivy.default(leading_shape, list()) if return_cont is None: return_cont = self.copy() for (_, v_shape), (k, v) in zip(target_dict.items(), return_cont.items()): if isinstance(v_shape, dict): return_cont[k] = self.reshape_like(v_shape, leading_shape, return_cont[k]) else: return_cont[k] = self._ivy.reshape(v, leading_shape + list(v_shape)) return Container(return_cont, **self._config) def create_if_absent(self, key, value, inplace=True): """ Add a key to the container with corresponding value, if it is not already present. otherwise, do nothing. """ if key in self: return self.set_at_key_chain(key, value, inplace) def if_exists(self, key): """ Returns the sub-container at the following key if it exists, otherwise None. """ try: return self[key] except KeyError: return def try_kc(self, key): """ Tries the following key or key chain, returning self if not present. """ try: return self[key] except KeyError: return self def with_print_limit(self, print_limit): return Container(self, **{**self._config, **{'print_limit': print_limit, 'rebuild_child_containers': True}}) # noinspection PyTypeChecker def remove_print_limit(self): return self.with_print_limit(None) def with_print_indent(self, print_indent): return Container(self, **{**self._config, **{'print_indent': print_indent, 'rebuild_child_containers': True}}) def with_print_line_spacing(self, print_line_spacing): return Container(self, **{**self._config, **{'print_line_spacing': print_line_spacing, 'rebuild_child_containers': True}}) # Built-ins # # ----------# def __repr__(self, as_repr=True): indent_str = ' '*self._print_indent def _align_array(array_str_in): array_str_in_split = array_str_in.split('([') leading_str_to_keep = array_str_in_split[0].replace('\\n', '') indented_key_size = len(leading_str_to_keep.replace('"', '').split(': ')[0]) indented_key_str = ' '*(indented_key_size+2) padded = False def _pre_pad_alpha_line(str_in): nonlocal padded padded = True return '\\n' + indent_str + indented_key_str + str_in leading_str_to_keep = ', '.join([_pre_pad_alpha_line(s) if s[0].isalpha() and i != 0 else s for i, s in enumerate(leading_str_to_keep.split(', '))]) local_indent_str = '' if padded else indent_str leading_str = leading_str_to_keep.split('\\n')[-1].replace('"', '') remaining_str = array_str_in_split[1] num_extra_dims = 0 for i, char in enumerate(remaining_str): if char != '[': num_extra_dims = i break extra_indent = (len(leading_str) + 1 + num_extra_dims) * ' ' array_str_in = '(['.join([leading_str_to_keep, remaining_str]) uniform_indent_wo_overflow = array_str_in.replace('\\n[', '\n' + local_indent_str + extra_indent + '[') uniform_indent = '\n'.join([local_indent_str + extra_indent + ' ' + s if (s[0].isnumeric() or s[0] == '-' or s[0:3] == '...' or max([ss in s[0:6] for ss in ['nan, ', 'inf, ']])) else (indent_str + indented_key_str + s if (not s[0].isspace() and s[0] != '"') else s) for s in uniform_indent_wo_overflow.split('\\n')]) indented = uniform_indent # 10 dimensions is a sensible upper bound for the number in a single array for i in range(2, 10): indented = indented.replace(' '*(i-1) + '['*i, '['*i) indented = '\n'.join([s for s in indented.split('\n') if bool(s) and not s.isspace()]) return indented def _align_arrays(str_in): chunks = str_in.split('\n' + indent_str) aligned_array_chunks = {i: _align_array(c) for i, c in enumerate(chunks) if '\\n' in c} chunks = [aligned_array_chunks[i] if i in aligned_array_chunks else c_orig for i, c_orig in enumerate(chunks)] return ('\n' + indent_str).join(chunks) new_dict = dict() for k, v in self.items(): if isinstance(v, Container): # noinspection PyArgumentList rep = v.__repr__(as_repr=False) else: if self._ivy.is_array(v) and len(list(v.shape)) > 0 and _ivy.exists(self._print_limit) and \ _reduce(_mul, v.shape) > self._print_limit: rep = (type(v), "shape=", list(v.shape)) elif isinstance(v, (list, tuple)) and v and self._ivy.is_array(v[0]): rep = ("list[{}]".format(len(v)), type(v[0]), "shape=", list(v[0].shape)) else: rep = v new_dict[k] = rep if as_repr: json_dumped_str = _align_arrays(_json.dumps( Container(new_dict, **self._config).map( lambda x, kc: x if _is_jsonable(x) else _repr(x).replace(' ', '').replace(',', ', ')).to_dict(), indent=self._print_indent)) def _add_newline(str_in): str_in_split = str_in.split('\n') str_split_size = len(str_in_split) return '\n'.join([('\n'*self._print_line_spacing + ss) if i == (str_split_size-1) else ss for i, ss in enumerate(str_in_split)]) json_dumped_str = '":'.join([_add_newline(s) for s in json_dumped_str.split('":')]) # improve tf formatting if _ivy.framework_stack and _ivy.current_framework_str() == 'tensorflow': json_dumped_str_split = json_dumped_str.split("\'Variable:") json_dumped_str = json_dumped_str_split[0] + ', ' + ', '.join(["\'".join(ss.split("\'")[1:]) for ss in json_dumped_str_split[1:]]) json_dumped_str = json_dumped_str.replace(':shape', ', shape').replace(')dtype=', '), dtype=').replace( ', ),', ',),') # make keys green json_dumped_str_split = json_dumped_str.split('":') split_size = len(json_dumped_str_split) json_dumped_str =\ '":'.join([' "'.join(sub_str.split(' "')[:-1] + [termcolor.colored(sub_str.split(' "')[-1], 'green')]) if i < split_size - 1 else sub_str for i, sub_str in enumerate(json_dumped_str_split)]) # remove quotation marks, shape tuple, and color other elements of the dict ret = json_dumped_str.replace('"', '').replace(", 'shape=', [", " shape=[").replace( ':', termcolor.colored(':', 'magenta')).replace('{', termcolor.colored('{', 'blue')).replace( '}', termcolor.colored('}', 'blue')).replace('shape=', termcolor.colored('shape=', 'magenta')).replace( 'device=', termcolor.colored('device=', 'magenta')).replace("<class'", "<class '").replace( "'", "").replace('<class', '<' + termcolor.colored('class', 'blue')) # ToDo: make the solution below more elegant for i in range(10): ret = ret.replace('diff_{}'.format(i), termcolor.colored('diff_{}'.format(i), 'red')) for keyword, color in self._keyword_color_dict.items(): ret = ret.replace(keyword, termcolor.colored(keyword, color)) return ret return new_dict def __dir__(self): return list(super.__dir__(self)) + list(self.keys()) def __getattr__(self, item): try: return dict.__getitem__(self, item) except KeyError: # noinspection PyUnresolvedReferences return super.__getattr__(item) def __setattr__(self, name, value): if name[0] != '_': self[name] = value else: super.__setattr__(self, name, value) def _get_queue_item(self, query): if isinstance(query, int): queue_queries = [query] elif isinstance(query, slice): queue_queries = list(range(query.start, query.stop, _ivy.default(query.step, 1))) elif isinstance(query, (list, tuple)): queue_queries = list(range(query[0].start, query[0].stop, _ivy.default(query[0].step, 1))) else: raise Exception('Invalid slice type, must be one of integer, slice, or sequences of slices.') queue_idxs = set([_np.sum(q >= self._queue_load_sizes_cum).item() for q in queue_queries]) conts = list() for i in queue_idxs: if i not in self._loaded_containers_from_queues: cont = Container(self._queues[i].get(timeout=self._queue_timeout), **self._config) if _ivy.wrapped_mode(): cont = cont.to_ivy() self._loaded_containers_from_queues[i] = cont else: cont = self._loaded_containers_from_queues[i] conts.append(cont) combined_cont = self._container_combine_method(conts) idx = list(queue_idxs)[0] offset = 0 if idx == 0 else self._queue_load_sizes_cum[idx - 1] if isinstance(query, int): shifted_query = query - offset elif isinstance(query, slice): shifted_query = slice(query.start-offset, query.stop-offset, query.step) elif isinstance(query, (list, tuple)): shifted_query = tuple([slice(slc.start-offset, slc.stop-offset, slc.step) for slc in query]) # noinspection PyUnboundLocalVariable return combined_cont[shifted_query] def __getitem__(self, query): """ Get slice, key or key chain of container object. :param query: slice object, key or key chain to query all container elements. :type query: slice or str :return: Container object at desired query. """ if isinstance(query, str): if '/' in query or '.' in query: return self.at_key_chain(query) return dict.__getitem__(self, query) elif _ivy.exists(self._queues): return self._get_queue_item(query) return_dict = dict() for key, value in sorted(self.items()): if isinstance(value, Container): return_dict[key] = value[query] else: # noinspection PyBroadException if isinstance(value, list) or isinstance(value, tuple): if len(value) == 0: return_dict[key] = value else: return_dict[key] = value[query] elif value is None or hasattr(value, 'shape') and value.shape == (): return_dict[key] = value else: return_dict[key] = value[query] return Container(return_dict, **self._config) def __setitem__(self, query, val): """ Set key or key chain of container object. :param query: slice object, key or key chain at which to set all container elements. :type query: slice or str :param val: The value to set at the desired query. :type val: ivy.Container, array, or other :return: New container after updating. """ if isinstance(query, str) and ('/' in query or '.' in query): return self.set_at_key_chain(query, val, inplace=True) else: return dict.__setitem__(self, query, val) def __contains__(self, key): if isinstance(key, str) and ('/' in key or '.' in key): return self.has_key_chain(key) else: return dict.__contains__(self, key) def __pos__(self): return self def __neg__(self): return self.map(lambda x, kc: -x) def __pow__(self, power): if isinstance(power, Container): return self.reduce([self, power], lambda x: _reduce(_pow, x)) return self.map(lambda x, kc: x ** power) def __rpow__(self, power): return self.map(lambda x, kc: power ** x) def __add__(self, other): if isinstance(other, Container): return self.reduce([self, other], sum) return self.map(lambda x, kc: x + other) def __radd__(self, other): return self + other def __sub__(self, other): if isinstance(other, Container): return self.reduce([self, -other], sum) return self.map(lambda x, kc: x - other) def __rsub__(self, other): return -self + other def __mul__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: _reduce(_mul, x)) return self.map(lambda x, kc: x * other) def __rmul__(self, other): return self * other def __truediv__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: _reduce(_truediv, x)) return self.map(lambda x, kc: x / other) def __rtruediv__(self, other): return self.map(lambda x, kc: other / x) def __floordiv__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: _reduce(_floordiv, x)) return self.map(lambda x, kc: x // other) def __rfloordiv__(self, other): return self.map(lambda x, kc: other // x) def __abs__(self): return self.map(lambda x, kc: self._ivy.abs(x)) def __lt__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: _reduce(_lt, x)) return self.map(lambda x, kc: x < other) def __le__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: _reduce(_le, x)) return self.map(lambda x, kc: x <= other) def __eq__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: _reduce(_eq, x)) return self.map(lambda x, kc: x == other) def __ne__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: _reduce(_ne, x)) return self.map(lambda x, kc: x != other) def __gt__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: _reduce(_gt, x)) return self.map(lambda x, kc: x > other) def __ge__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: _reduce(_ge, x)) return self.map(lambda x, kc: x >= other) def __and__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: x[0] and x[1]) return self.map(lambda x, kc: x and other) def __rand__(self, other): return self.map(lambda x, kc: other and x) def __or__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: x[0] or x[1]) return self.map(lambda x, kc: x or other) def __ror__(self, other): return self.map(lambda x, kc: other or x) def __invert__(self): return self.map(lambda x, kc: _not(x)) def __xor__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: x[0] != x[1]) return self.map(lambda x, kc: x != other) def __rxor__(self, other): return self.map(lambda x, kc: other != x) # Getters and Setters # # --------------------# # private @property def _ivy(self): return _ivy.default(self._local_ivy, _ivy) @_ivy.setter def _ivy(self, local_ivy): self._local_ivy = local_ivy # public @property def shape(self): """ The shape of the arrays in the container, with None placed in indices which are not consistent across arrays """ return self._get_shape() @property def shapes(self): """ The shapes of each array in the container, with None placed in leaf entries without a shape attribute. """ return self._get_shapes() @property def dev_str(self): """ The device to which the arrays in the container belong, with None returned if the devices are not consistent """ return self._get_dev_str() @property def ivy(self): return self._ivy @property def config(self): return self._config class MultiDevContainer(Container): def __init__(self, dict_in, dev_strs, queues=None, queue_load_sizes=None, container_combine_method='list_join', queue_timeout=None, print_limit=10, print_indent=4, print_line_spacing=0, ivyh=None, keyword_color_dict=None, rebuild_child_containers=False, **kwargs): super().__init__(dict_in, queues, queue_load_sizes, container_combine_method, queue_timeout, print_limit, print_indent, print_line_spacing, ivyh, keyword_color_dict, rebuild_child_containers, **kwargs) self._dev_strs = dev_strs self._num_devs = len(dev_strs) def at_dev(self, dev_str): return self.map(lambda x, kc: x[dev_str] if isinstance(x, _ivy.MultiDevItem) else x) def at_devs(self): return {ds: self.at_dev(ds) for ds in self._dev_strs}
[ "numpy.prod", "ivy.einops_rearrange", "ivy.indices_where", "ivy.einops_repeat", "ivy.cast", "ivy.Container.identical_structure", "operator.not_", "re.split", "numpy.where", "json.dumps", "numpy.asarray", "ivy.copy_array", "ivy.wrapped_mode", "random.randint", "random.shuffle", "numpy.o...
[((698, 712), 'json.dumps', '_json.dumps', (['x'], {}), '(x)\n', (709, 712), True, 'import json as _json\n'), ((4259, 4284), 'ivy.exists', '_ivy.exists', (['self._queues'], {}), '(self._queues)\n', (4270, 4284), True, 'import ivy as _ivy\n'), ((4827, 4863), 'ivy.default', '_ivy.default', (['keyword_color_dict', '{}'], {}), '(keyword_color_dict, {})\n', (4839, 4863), True, 'import ivy as _ivy\n'), ((14419, 14456), 'ivy.Container', '_ivy.Container', (['return_dict'], {}), '(return_dict, **config)\n', (14433, 14456), True, 'import ivy as _ivy\n'), ((19535, 19572), 'ivy.Container', '_ivy.Container', (['return_dict'], {}), '(return_dict, **config)\n', (19549, 19572), True, 'import ivy as _ivy\n'), ((25825, 25844), 'ivy.wrapped_mode', '_ivy.wrapped_mode', ([], {}), '()\n', (25842, 25844), True, 'import ivy as _ivy\n'), ((30862, 30930), 'numpy.asarray', '_np.asarray', (['[sub_shape[0:min_num_dims] for sub_shape in sub_shapes]'], {}), '([sub_shape[0:min_num_dims] for sub_shape in sub_shapes])\n', (30873, 30930), True, 'import numpy as _np\n'), ((30958, 31012), 'numpy.where', '_np.where', (['(sub_shapes_array == 0)', '(-1)', 'sub_shapes_array'], {}), '(sub_shapes_array == 0, -1, sub_shapes_array)\n', (30967, 31012), True, 'import numpy as _np\n'), ((95233, 95252), 'ivy.wrapped_mode', '_ivy.wrapped_mode', ([], {}), '()\n', (95250, 95252), True, 'import ivy as _ivy\n'), ((101084, 101111), 're.split', 're.split', (['"""[/.]"""', 'key_chain'], {}), "('[/.]', key_chain)\n", (101092, 101111), False, 'import re\n'), ((103839, 103866), 're.split', 're.split', (['"""[/.]"""', 'key_chain'], {}), "('[/.]', key_chain)\n", (103847, 103866), False, 'import re\n'), ((105878, 105905), 're.split', 're.split', (['"""[/.]"""', 'key_chain'], {}), "('[/.]', key_chain)\n", (105886, 105905), False, 'import re\n'), ((106504, 106531), 're.split', 're.split', (['"""[/.]"""', 'key_chain'], {}), "('[/.]', key_chain)\n", (106512, 106531), False, 'import re\n'), ((109498, 109525), 're.split', 're.split', (['"""[/.]"""', 'key_chain'], {}), "('[/.]', key_chain)\n", (109506, 109525), False, 'import re\n'), ((137605, 137640), 'ivy.default', '_ivy.default', (['self._local_ivy', '_ivy'], {}), '(self._local_ivy, _ivy)\n', (137617, 137640), True, 'import ivy as _ivy\n'), ((4648, 4676), 'numpy.cumsum', '_np.cumsum', (['queue_load_sizes'], {}), '(queue_load_sizes)\n', (4658, 4676), True, 'import numpy as _np\n'), ((6970, 6989), 'ivy.exists', '_ivy.exists', (['config'], {}), '(config)\n', (6981, 6989), True, 'import ivy as _ivy\n'), ((8150, 8169), 'ivy.exists', '_ivy.exists', (['config'], {}), '(config)\n', (8161, 8169), True, 'import ivy as _ivy\n'), ((10492, 10511), 'ivy.exists', '_ivy.exists', (['config'], {}), '(config)\n', (10503, 10511), True, 'import ivy as _ivy\n'), ((10960, 10994), 'ivy.default', '_ivy.default', (["config['ivyh']", '_ivy'], {}), "(config['ivyh'], _ivy)\n", (10972, 10994), True, 'import ivy as _ivy\n'), ((11995, 12014), 'ivy.exists', '_ivy.exists', (['config'], {}), '(config)\n', (12006, 12014), True, 'import ivy as _ivy\n'), ((12462, 12496), 'ivy.default', '_ivy.default', (["config['ivyh']", '_ivy'], {}), "(config['ivyh'], _ivy)\n", (12474, 12496), True, 'import ivy as _ivy\n'), ((13686, 13705), 'ivy.exists', '_ivy.exists', (['config'], {}), '(config)\n', (13697, 13705), True, 'import ivy as _ivy\n'), ((16146, 16165), 'ivy.exists', '_ivy.exists', (['config'], {}), '(config)\n', (16157, 16165), True, 'import ivy as _ivy\n'), ((16320, 16365), 'ivy.equal', '_ivy.equal', (['*containers'], {'equality_matrix': '(True)'}), '(*containers, equality_matrix=True)\n', (16330, 16365), True, 'import ivy as _ivy\n'), ((20928, 20947), 'ivy.exists', '_ivy.exists', (['config'], {}), '(config)\n', (20939, 20947), True, 'import ivy as _ivy\n'), ((24708, 24743), 'h5py.File', '_h5py.File', (['h5_obj_or_filepath', '"""r"""'], {}), "(h5_obj_or_filepath, 'r')\n", (24718, 24743), True, 'import h5py as _h5py\n'), ((27138, 27173), 'h5py.File', '_h5py.File', (['h5_obj_or_filepath', '"""r"""'], {}), "(h5_obj_or_filepath, 'r')\n", (27148, 27173), True, 'import h5py as _h5py\n'), ((28344, 28368), 'random.randint', '_random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (28359, 28368), True, 'import random as _random\n'), ((28434, 28469), 'h5py.File', '_h5py.File', (['h5_obj_or_filepath', '"""a"""'], {}), "(h5_obj_or_filepath, 'a')\n", (28444, 28469), True, 'import h5py as _h5py\n'), ((29602, 29621), 'ivy.exists', '_ivy.exists', (['config'], {}), '(config)\n', (29613, 29621), True, 'import ivy as _ivy\n'), ((30378, 30403), 'ivy.exists', '_ivy.exists', (['self._queues'], {}), '(self._queues)\n', (30389, 30403), True, 'import ivy as _ivy\n'), ((31028, 31081), 'numpy.prod', '_np.prod', (['(sub_shapes_array / sub_shapes_array[0:1])', '(0)'], {}), '(sub_shapes_array / sub_shapes_array[0:1], 0)\n', (31036, 31081), True, 'import numpy as _np\n'), ((93697, 93733), 'h5py.File', '_h5py.File', (['h5_obj_or_filepath', 'mode'], {}), '(h5_obj_or_filepath, mode)\n', (93707, 93733), True, 'import h5py as _h5py\n'), ((103266, 103286), 're.split', 're.split', (['"""[/.]"""', 'kc'], {}), "('[/.]', kc)\n", (103274, 103286), False, 'import re\n'), ((132036, 132061), 'ivy.exists', '_ivy.exists', (['self._queues'], {}), '(self._queues)\n', (132047, 132061), True, 'import ivy as _ivy\n'), ((4739, 4759), 'ivy.queue_timeout', '_ivy.queue_timeout', ([], {}), '()\n', (4757, 4759), True, 'import ivy as _ivy\n'), ((18227, 18369), 'ivy.Container.diff', '_ivy.Container.diff', (['*[cont[key] for cont in containers]'], {'mode': 'mode', 'diff_keys': 'diff_keys', 'detect_key_diffs': 'detect_key_diffs', 'config': 'config'}), '(*[cont[key] for cont in containers], mode=mode,\n diff_keys=diff_keys, detect_key_diffs=detect_key_diffs, config=config)\n', (18246, 18369), True, 'import ivy as _ivy\n'), ((21344, 21449), 'ivy.Container.multi_map', '_ivy.Container.multi_map', (['func', 'values', 'key_chains', 'to_apply', 'prune_unapplied', 'this_key_chain', 'config'], {}), '(func, values, key_chains, to_apply,\n prune_unapplied, this_key_chain, config)\n', (21368, 21449), True, 'import ivy as _ivy\n'), ((23749, 23834), 'ivy.Container.identical_structure', '_ivy.Container.identical_structure', (['values', 'key_chains', 'to_apply', 'this_key_chain'], {}), '(values, key_chains, to_apply, this_key_chain\n )\n', (23783, 23834), True, 'import ivy as _ivy\n'), ((26695, 26721), 'json.load', '_json.load', (['json_data_file'], {}), '(json_data_file)\n', (26705, 26721), True, 'import json as _json\n'), ((31148, 31160), 'numpy.isnan', '_np.isnan', (['i'], {}), '(i)\n', (31157, 31160), True, 'import numpy as _np\n'), ((102039, 102069), 'ivy.has_nans', '_ivy.has_nans', (['x', 'include_infs'], {}), '(x, include_infs)\n', (102052, 102069), True, 'import ivy as _ivy\n'), ((130710, 130729), 'ivy.wrapped_mode', '_ivy.wrapped_mode', ([], {}), '()\n', (130727, 130729), True, 'import ivy as _ivy\n'), ((137200, 137207), 'operator.not_', '_not', (['x'], {}), '(x)\n', (137204, 137207), True, 'from operator import not_ as _not\n'), ((16397, 16426), 'ivy.cast', '_ivy.cast', (['equal_mat', '"""int32"""'], {}), "(equal_mat, 'int32')\n", (16406, 16426), True, 'import ivy as _ivy\n'), ((16501, 16525), 'ivy.Container', '_ivy.Container', ([], {}), '(**config)\n', (16515, 16525), True, 'import ivy as _ivy\n'), ((16621, 16645), 'ivy.Container', '_ivy.Container', ([], {}), '(**config)\n', (16635, 16645), True, 'import ivy as _ivy\n'), ((17726, 17761), 'ivy.Container', '_ivy.Container', (['diff_dict'], {}), '(diff_dict, **config)\n', (17740, 17761), True, 'import ivy as _ivy\n'), ((28750, 28774), 'random.seed', '_random.seed', (['seed_value'], {}), '(seed_value)\n', (28762, 28774), True, 'import random as _random\n'), ((28836, 28858), 'random.shuffle', '_random.shuffle', (['value'], {}), '(value)\n', (28851, 28858), True, 'import random as _random\n'), ((31962, 31978), 'ivy.exists', '_ivy.exists', (['val'], {}), '(val)\n', (31973, 31978), True, 'import ivy as _ivy\n'), ((84049, 84098), 'ivy.einops_rearrange', '_ivy.einops_rearrange', (['x', 'pattern'], {}), '(x, pattern, **axes_lengths)\n', (84070, 84098), True, 'import ivy as _ivy\n'), ((85388, 85445), 'ivy.einops_reduce', '_ivy.einops_reduce', (['x', 'pattern', 'reduction'], {}), '(x, pattern, reduction, **axes_lengths)\n', (85406, 85445), True, 'import ivy as _ivy\n'), ((86580, 86626), 'ivy.einops_repeat', '_ivy.einops_repeat', (['x', 'pattern'], {}), '(x, pattern, **axes_lengths)\n', (86598, 86626), True, 'import ivy as _ivy\n'), ((116073, 116089), 'ivy.is_array', '_ivy.is_array', (['x'], {}), '(x)\n', (116086, 116089), True, 'import ivy as _ivy\n'), ((116051, 116069), 'ivy.copy_array', '_ivy.copy_array', (['x'], {}), '(x)\n', (116066, 116069), True, 'import ivy as _ivy\n'), ((126240, 126270), 'ivy.exists', '_ivy.exists', (['self._print_limit'], {}), '(self._print_limit)\n', (126251, 126270), True, 'import ivy as _ivy\n'), ((127473, 127501), 'ivy.current_framework_str', '_ivy.current_framework_str', ([], {}), '()\n', (127499, 127501), True, 'import ivy as _ivy\n'), ((128987, 129021), 'termcolor.colored', 'termcolor.colored', (['"""class"""', '"""blue"""'], {}), "('class', 'blue')\n", (129004, 129021), False, 'import termcolor\n'), ((129325, 129358), 'termcolor.colored', 'termcolor.colored', (['keyword', 'color'], {}), '(keyword, color)\n', (129342, 129358), False, 'import termcolor\n'), ((133892, 133908), 'functools.reduce', '_reduce', (['_pow', 'x'], {}), '(_pow, x)\n', (133899, 133908), True, 'from functools import reduce as _reduce\n'), ((134636, 134652), 'functools.reduce', '_reduce', (['_mul', 'x'], {}), '(_mul, x)\n', (134643, 134652), True, 'from functools import reduce as _reduce\n'), ((134895, 134915), 'functools.reduce', '_reduce', (['_truediv', 'x'], {}), '(_truediv, x)\n', (134902, 134915), True, 'from functools import reduce as _reduce\n'), ((135184, 135205), 'functools.reduce', '_reduce', (['_floordiv', 'x'], {}), '(_floordiv, x)\n', (135191, 135205), True, 'from functools import reduce as _reduce\n'), ((135551, 135566), 'functools.reduce', '_reduce', (['_lt', 'x'], {}), '(_lt, x)\n', (135558, 135566), True, 'from functools import reduce as _reduce\n'), ((135744, 135759), 'functools.reduce', '_reduce', (['_le', 'x'], {}), '(_le, x)\n', (135751, 135759), True, 'from functools import reduce as _reduce\n'), ((135938, 135953), 'functools.reduce', '_reduce', (['_eq', 'x'], {}), '(_eq, x)\n', (135945, 135953), True, 'from functools import reduce as _reduce\n'), ((136132, 136147), 'functools.reduce', '_reduce', (['_ne', 'x'], {}), '(_ne, x)\n', (136139, 136147), True, 'from functools import reduce as _reduce\n'), ((136326, 136341), 'functools.reduce', '_reduce', (['_gt', 'x'], {}), '(_gt, x)\n', (136333, 136341), True, 'from functools import reduce as _reduce\n'), ((136519, 136534), 'functools.reduce', '_reduce', (['_ge', 'x'], {}), '(_ge, x)\n', (136526, 136534), True, 'from functools import reduce as _reduce\n'), ((27591, 27620), 'functools.reduce', '_reduce', (['_mul', 'value_shape', '(1)'], {}), '(_mul, value_shape, 1)\n', (27598, 27620), True, 'from functools import reduce as _reduce\n'), ((32805, 32821), 'ivy.exists', '_ivy.exists', (['val'], {}), '(val)\n', (32816, 32821), True, 'import ivy as _ivy\n'), ((126301, 126323), 'functools.reduce', '_reduce', (['_mul', 'v.shape'], {}), '(_mul, v.shape)\n', (126308, 126323), True, 'from functools import reduce as _reduce\n'), ((130080, 130107), 'ivy.default', '_ivy.default', (['query.step', '(1)'], {}), '(query.step, 1)\n', (130092, 130107), True, 'import ivy as _ivy\n'), ((130406, 130446), 'numpy.sum', '_np.sum', (['(q >= self._queue_load_sizes_cum)'], {}), '(q >= self._queue_load_sizes_cum)\n', (130413, 130446), True, 'import numpy as _np\n'), ((16971, 17005), 'ivy.indices_where', '_ivy.indices_where', (['equal_mat[idx]'], {}), '(equal_mat[idx])\n', (16989, 17005), True, 'import ivy as _ivy\n'), ((25075, 25099), 'ivy.default', '_ivy.default', (['ivyh', '_ivy'], {}), '(ivyh, _ivy)\n', (25087, 25099), True, 'import ivy as _ivy\n'), ((130227, 130257), 'ivy.default', '_ivy.default', (['query[0].step', '(1)'], {}), '(query[0].step, 1)\n', (130239, 130257), True, 'import ivy as _ivy\n'), ((31235, 31257), 'numpy.ones', '_np.ones', (['min_num_dims'], {}), '(min_num_dims)\n', (31243, 31257), True, 'import numpy as _np\n'), ((128857, 128896), 'termcolor.colored', 'termcolor.colored', (['"""device="""', '"""magenta"""'], {}), "('device=', 'magenta')\n", (128874, 128896), False, 'import termcolor\n'), ((17056, 17082), 'ivy.to_numpy', '_ivy.to_numpy', (['idxs_to_add'], {}), '(idxs_to_add)\n', (17069, 17082), True, 'import ivy as _ivy\n'), ((128781, 128819), 'termcolor.colored', 'termcolor.colored', (['"""shape="""', '"""magenta"""'], {}), "('shape=', 'magenta')\n", (128798, 128819), False, 'import termcolor\n'), ((128731, 128761), 'termcolor.colored', 'termcolor.colored', (['"""}"""', '"""blue"""'], {}), "('}', 'blue')\n", (128748, 128761), False, 'import termcolor\n'), ((128669, 128699), 'termcolor.colored', 'termcolor.colored', (['"""{"""', '"""blue"""'], {}), "('{', 'blue')\n", (128686, 128699), False, 'import termcolor\n'), ((128621, 128654), 'termcolor.colored', 'termcolor.colored', (['""":"""', '"""magenta"""'], {}), "(':', 'magenta')\n", (128638, 128654), False, 'import termcolor\n')]
from __future__ import print_function import numpy as np from . import utils from numpy import linalg as LA import math def ODL_updateD(D, E, F, iterations=100, tol=1e-8): """ The main algorithm in ODL. Solving the optimization problem: D = arg min_D -2trace(E'*D) + trace(D*F*D') subject to: ||d_i||_2 <= 1, where F is a positive semidefinite matrix. Parameters: ----------- D, E, F as in the above problem. iterations: maximum number of iterations. tol: when the difference of solutions in two successive iterations less than this value, the algorithm will stop. Returns: -------- """ def calc_cost(D): return -2*np.trace(np.dot(E, D.T)) + np.trace(np.dot(np.dot(F, D.T), D)) D_old = D.copy() for _ in range(iterations): for i in range(D.shape[1]): if F[i, i] != 0: a = 1.0/F[i, i] * (E[:, i] - D.dot(F[:, i])) + D[:, i] D[:, i] = a/max(LA.norm(a, 2), 1) if LA.norm(D - D_old, 'fro')/D.size < tol: break D_old = D.copy() return D def DLSI_updateD(D, E, F, A, lambda1, iterations=100): """ def DLSI_updateD(D, E, F, A, lambda1, verbose = False, iterations = 100): problem: `D = argmin_D -2trace(ED') + trace(FD'*D) + lambda *||A*D||F^2,` subject to: `||d_i||_2^2 <= 1` where F is a positive semidefinite matrix ========= aproach: ADMM ============================== rewrite: `[D, Z] = argmin -2trace(ED') + trace(FD'*D) + lambda ||A*Z||_F^2,` subject to `D = Z; ||d_i||_2^2 <= 1` aproach 1: ADMM. 1. D = -2trace(ED') + trace(FD'*D) + rho/2 ||D - Z + U||_F^2, s.t. ||d_i||_2^2 <= 1 2. Z = argmin lambda*||A*Z|| + rho/2||D - Z + U||_F^2 3. U = U + D - Z solve 1: D = argmin -2trace(ED') + trace(FD'*D) + rho/2 ||D - W||_F^2 with W = Z - U; = argmin -2trace((E - rho/2*W)*D') + trace((F + rho/2 * eye())*D'D) solve 2: derivetaive: 0 = 2A'AZ + rho (Z - V) with V = D + U `Z = B*rhoV` with `B = (2*lambda*A'*A + rho I)^{-1}` `U = U + D - Z` ----------------------------------------------- """ def calc_cost(D): cost = -2*np.trace(np.dot(E, D.T)) + np.trace(np.dot(F, np.dot(D.T, D))) +\ lambda1*utils.normF2(np.dot(A, D)) return cost it = 0 rho = 1.0 Z_old = D.copy() U = np.zeros_like(D) I_k = np.eye(D.shape[1]) X = 2*lambda1/rho*A.T Y = A.copy() B1 = np.dot(X, utils.inv_IpXY(Y, X)) # B1 = np.dot(X, LA.inv(eye(Y.shape[0]) + np.dot(Y, X))) tol = 1e-8 for it in range(iterations): it += 1 # update D W = Z_old - U E2 = E + rho/2*W F2 = F + rho/2*I_k D = ODL_updateD(D, E2, F2) # update Z V = D + U Z_new = rho*(V - np.dot(B1, np.dot(Y, V))) e1 = utils.normF2(D - Z_new) e2 = rho*utils.normF2(Z_new - Z_old) if e1 < tol and e2 < tol: break U = U + D - Z_new Z_old = Z_new.copy() return D def num_grad(func, X): """ Calculating gradient of a function `func(X)` where `X` is a matrix or vector """ grad = np.zeros_like(X) eps = 1e-4 # TODO: flatten then unflatten, make it independent on X.shape # the current implementation only work with 2-d array for i in range(X.shape[0]): for j in range(X.shape[1]): # print X, '\n' Xp = X.copy() Xm = X.copy() Xp[i, j] += eps # print X fp = func(Xp) Xm[i, j] -= eps fm = func(Xm) grad[i, j] = (fp - fm)/(2*eps) return grad def check_grad(func, grad, X): print('Checking grad...',) grad1 = grad(X) grad2 = num_grad(func, X) dif = LA.norm(grad1 - grad2) if dif < 1e-5: print('Different = %f' %dif, 'PASS') else: print('Different = %f' %dif, 'FAIL') return dif < 1e-5 def min_rank_dict(Y, X, lambdaD, Dinit, iterations = 100, tol = 1e-8): """ [D] = argmin_D 0.5*|| Y - DX||_F^2 + lambdaD ||D||_* s.t. ||d_i||_2^2 <= 1, for all i using ADMM: INPUT: Y: Data Dinit: intitial D X: sparse code lambdaD: regularization term OUTPUT: D: Created: Tiep Vu 6/29/2015 2:05:28 PM ------------------------ Choose a rho. Algorithm summary ADMM: D,J = argmin_DJ 0.5*||Y - DX||_F^2 + lambdaD||J||_* s.t ||d_i||_2^2 <= 1 and J = D Alternatively solving: (1): D^{k+1} = argmin_D 0.5*||Y - DX||_F^2 + rho/2 ||J - D + U^k||_F^2 s.t. ||d_i||_2^2 <= 1 this problem can be soved using the update dictionary stage in Online Dictionary Learning method (2): J^{k+1} = argminJ lambdaD||J||_* + rho/2||J - D^{k+1} + U^k|| Solution: shrinkage_rank(D^{k+1} - U^k, lambdaD/rho) (3): Update U: U^{k+1} = U^k + J^{k+1} - D^{k+1} Stoping cretia: ||r^k||_F^2 <= tol, ||s^k||_F^2 <= tol r^k = J^k - D^k s^k = rho(J^{k+1} - J^k) --------------------------------------------- Author: <NAME>, <EMAIL>, 04/22/2016 http://www.personal.psu.edu/thv102/ --------------------------------------------- """ YXt = np.dot(Y, X.T) XXt = np.dot(X, X.T) rho = 0.25 D_old = Dinit J_old = Dinit U_old = np.zeros_like(Dinit) it = 0 I = np.eye(XXt.shape[0]) tau = 2 mu = 10.0 for it in range(iterations): ## =========update D ================================ # D = argmin_D 0.5*||Y - DX||_F^2 + rho/2 ||J - D + U||_F^2 # s.t. ||d_i||_2^2 <= 1 E = YXt + rho*(J_old + U_old) F = XXt + rho*I # D_new = updateD_EF(D_old, E, F, 10); D_new = ODL_updateD(D_old, E, F, iterations = 30) ## ========= update J ============================== # J^{k+1} = argminJ lambdaD||J||_* + rho/2||J - D + U|| J_new = np.real(utils.shrinkage_rank(D_old - U_old, lambdaD/rho)) ## ========= update U ============================== U_new = U_old + J_new - D_old ## ========= check stop ============================== r = J_new - D_old s = rho*(J_new - J_old) r_eps = LA.norm(r, 'fro') s_eps = LA.norm(s, 'fro') if r_eps < tol and s_eps < tol: break D_old = D_new J_old = J_new U_old = U_new if r_eps > mu*s_eps: rho = rho*tau elif s_eps > mu*r_eps: rho = rho/tau return D_new class Fista(object): def __init__(self): """ subclasses are required to have three following functions and lambd """ self._grad = None self._calc_f = None self.lossF = None self.lambd = None self.D = None self.Y = None self.L = None def solve(self, Xinit=None, iterations=100, tol=1e-8, verbose=False): if Xinit is None: Xinit = np.zeros((self.D.shape[1], self.Y.shape[1])) if self.L == 0: ### To fix divide-by-zero error self.L = 1 Linv = 1/self.L lambdaLiv = self.lambd/self.L x_old = Xinit.copy() y_old = Xinit.copy() t_old = 1 it = 0 # cost_old = float("inf") for it in range(iterations): x_new = np.real(utils.shrinkage(y_old - Linv*self._grad(y_old), lambdaLiv)) t_new = .5*(1 + math.sqrt(1 + 4*t_old**2)) y_new = x_new + (t_old - 1)/t_new * (x_new - x_old) e = utils.norm1(x_new - x_old)/x_new.size if e < tol: break x_old = x_new.copy() t_old = t_new y_old = y_new.copy() if verbose: print('iter \t%d/%d, loss \t %4.4f'%(it + 1, iterations, self.lossF(x_new))) return x_new def _grad(self, y): raise NotImplementedError def lossF(self, x): raise NotImplementedError def check_grad(self, X): grad1 = self._grad(X) grad2 = num_grad(self._calc_f, X) dif = utils.norm1(grad1 - grad2)/grad1.size print('grad difference = %.7f'%dif) class Lasso(Fista): """ Solving a Lasso optimization problem using FISTA `X, = arg min_X 0.5*||Y - DX||_F^2 + lambd||X||_1 = argmin_X f(X) + lambd||X||_1 F(x) = f(X) + lamb||X||_1 """ def __init__(self, D, lambd = .1): self.D = D self.lambd = lambd self.DtD = np.dot(self.D.T, self.D) self.Y = None self.DtY = None self.L = np.max(LA.eig(self.DtD)[0]) #print(self.L) self.coef_ = None def fit(self, Y, Xinit = None, iterations = 100): self.Y = Y self.DtY = np.dot(self.D.T, self.Y) if Xinit is None: Xinit = np.zeros((self.D.shape[1], self.Y.shape[1])) self.coef_ = self.solve(Xinit=Xinit, iterations=iterations) def _grad(self, X): return np.dot(self.DtD, X) - self.DtY def _calc_f(self, X): return 0.5*utils.normF2(self.Y - np.dot(self.D, X)) def lossF(self, X): return self._calc_f(X) + self.lambd*utils.norm1(X)
[ "numpy.eye", "numpy.linalg.eig", "math.sqrt", "numpy.dot", "numpy.zeros", "numpy.linalg.norm", "numpy.zeros_like" ]
[((2437, 2453), 'numpy.zeros_like', 'np.zeros_like', (['D'], {}), '(D)\n', (2450, 2453), True, 'import numpy as np\n'), ((2464, 2482), 'numpy.eye', 'np.eye', (['D.shape[1]'], {}), '(D.shape[1])\n', (2470, 2482), True, 'import numpy as np\n'), ((3251, 3267), 'numpy.zeros_like', 'np.zeros_like', (['X'], {}), '(X)\n', (3264, 3267), True, 'import numpy as np\n'), ((3871, 3893), 'numpy.linalg.norm', 'LA.norm', (['(grad1 - grad2)'], {}), '(grad1 - grad2)\n', (3878, 3893), True, 'from numpy import linalg as LA\n'), ((5323, 5337), 'numpy.dot', 'np.dot', (['Y', 'X.T'], {}), '(Y, X.T)\n', (5329, 5337), True, 'import numpy as np\n'), ((5348, 5362), 'numpy.dot', 'np.dot', (['X', 'X.T'], {}), '(X, X.T)\n', (5354, 5362), True, 'import numpy as np\n'), ((5426, 5446), 'numpy.zeros_like', 'np.zeros_like', (['Dinit'], {}), '(Dinit)\n', (5439, 5446), True, 'import numpy as np\n'), ((5466, 5486), 'numpy.eye', 'np.eye', (['XXt.shape[0]'], {}), '(XXt.shape[0])\n', (5472, 5486), True, 'import numpy as np\n'), ((6311, 6328), 'numpy.linalg.norm', 'LA.norm', (['r', '"""fro"""'], {}), "(r, 'fro')\n", (6318, 6328), True, 'from numpy import linalg as LA\n'), ((6345, 6362), 'numpy.linalg.norm', 'LA.norm', (['s', '"""fro"""'], {}), "(s, 'fro')\n", (6352, 6362), True, 'from numpy import linalg as LA\n'), ((8579, 8603), 'numpy.dot', 'np.dot', (['self.D.T', 'self.D'], {}), '(self.D.T, self.D)\n', (8585, 8603), True, 'import numpy as np\n'), ((8837, 8861), 'numpy.dot', 'np.dot', (['self.D.T', 'self.Y'], {}), '(self.D.T, self.Y)\n', (8843, 8861), True, 'import numpy as np\n'), ((7056, 7100), 'numpy.zeros', 'np.zeros', (['(self.D.shape[1], self.Y.shape[1])'], {}), '((self.D.shape[1], self.Y.shape[1]))\n', (7064, 7100), True, 'import numpy as np\n'), ((8908, 8952), 'numpy.zeros', 'np.zeros', (['(self.D.shape[1], self.Y.shape[1])'], {}), '((self.D.shape[1], self.Y.shape[1]))\n', (8916, 8952), True, 'import numpy as np\n'), ((9061, 9080), 'numpy.dot', 'np.dot', (['self.DtD', 'X'], {}), '(self.DtD, X)\n', (9067, 9080), True, 'import numpy as np\n'), ((1012, 1037), 'numpy.linalg.norm', 'LA.norm', (['(D - D_old)', '"""fro"""'], {}), "(D - D_old, 'fro')\n", (1019, 1037), True, 'from numpy import linalg as LA\n'), ((8674, 8690), 'numpy.linalg.eig', 'LA.eig', (['self.DtD'], {}), '(self.DtD)\n', (8680, 8690), True, 'from numpy import linalg as LA\n'), ((706, 720), 'numpy.dot', 'np.dot', (['E', 'D.T'], {}), '(E, D.T)\n', (712, 720), True, 'import numpy as np\n'), ((740, 754), 'numpy.dot', 'np.dot', (['F', 'D.T'], {}), '(F, D.T)\n', (746, 754), True, 'import numpy as np\n'), ((2349, 2361), 'numpy.dot', 'np.dot', (['A', 'D'], {}), '(A, D)\n', (2355, 2361), True, 'import numpy as np\n'), ((2896, 2908), 'numpy.dot', 'np.dot', (['Y', 'V'], {}), '(Y, V)\n', (2902, 2908), True, 'import numpy as np\n'), ((7520, 7549), 'math.sqrt', 'math.sqrt', (['(1 + 4 * t_old ** 2)'], {}), '(1 + 4 * t_old ** 2)\n', (7529, 7549), False, 'import math\n'), ((9160, 9177), 'numpy.dot', 'np.dot', (['self.D', 'X'], {}), '(self.D, X)\n', (9166, 9177), True, 'import numpy as np\n'), ((982, 995), 'numpy.linalg.norm', 'LA.norm', (['a', '(2)'], {}), '(a, 2)\n', (989, 995), True, 'from numpy import linalg as LA\n'), ((2259, 2273), 'numpy.dot', 'np.dot', (['E', 'D.T'], {}), '(E, D.T)\n', (2265, 2273), True, 'import numpy as np\n'), ((2296, 2310), 'numpy.dot', 'np.dot', (['D.T', 'D'], {}), '(D.T, D)\n', (2302, 2310), True, 'import numpy as np\n')]
# Given a face image and a model, creates a new image plotting the nose coordinates (or what the model thinks is the nose!) from __future__ import print_function import keras from PIL import Image import numpy as np from data_utils import * import argparse def main(): parser = argparse.ArgumentParser() parser.add_argument("--image", help="Path to image to process", type=str, required=True) parser.add_argument("--model", help="Model file to load", type=str, required=True) parser.add_argument("--output", help="Path to output file (the input image, resized, with a green cross on the nose)", type=str, default="nose_prediction.png") parser.add_argument("--size", help="Image size in pixels (default = 182)", type=int, default=182) args = parser.parse_args() input_shape = (args.size, args.size, 3) img = Image.open(args.image) img = img.resize((input_shape[0], input_shape[1]), Image.ANTIALIAS) data = [] data.append(np.asarray(img)) data = np.asarray(data) data = data / 255.0 data = data - 0.5 result = None mdl = keras.models.load_model(args.model) result = mdl.predict(data) print("Model output: " + str(result[0])) #nose_coords = ((int)(input_shape[0] * result[0][0]), int(input_shape[1] * result[0][1])) write_image_result(img, args.size, result[0], args.output) if __name__ == "__main__": main()
[ "keras.models.load_model", "PIL.Image.open", "numpy.asarray", "argparse.ArgumentParser" ]
[((286, 311), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (309, 311), False, 'import argparse\n'), ((845, 867), 'PIL.Image.open', 'Image.open', (['args.image'], {}), '(args.image)\n', (855, 867), False, 'from PIL import Image\n'), ((999, 1015), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1009, 1015), True, 'import numpy as np\n'), ((1092, 1127), 'keras.models.load_model', 'keras.models.load_model', (['args.model'], {}), '(args.model)\n', (1115, 1127), False, 'import keras\n'), ((971, 986), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (981, 986), True, 'import numpy as np\n')]
import tensorflow as tf import numpy as np from TensorflowLearning.common import deal_label (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data() train_images, test_images = train_images / 255.0, test_images / 255.0 train_images = np.reshape(train_images, [-1, 784]) test_images = np.reshape(test_images, [-1, 784]) train_labels = deal_label(train_labels) test_labels = deal_label(test_labels)
[ "TensorflowLearning.common.deal_label", "numpy.reshape", "tensorflow.keras.datasets.mnist.load_data" ]
[((152, 187), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (185, 187), True, 'import tensorflow as tf\n'), ((273, 308), 'numpy.reshape', 'np.reshape', (['train_images', '[-1, 784]'], {}), '(train_images, [-1, 784])\n', (283, 308), True, 'import numpy as np\n'), ((323, 357), 'numpy.reshape', 'np.reshape', (['test_images', '[-1, 784]'], {}), '(test_images, [-1, 784])\n', (333, 357), True, 'import numpy as np\n'), ((373, 397), 'TensorflowLearning.common.deal_label', 'deal_label', (['train_labels'], {}), '(train_labels)\n', (383, 397), False, 'from TensorflowLearning.common import deal_label\n'), ((412, 435), 'TensorflowLearning.common.deal_label', 'deal_label', (['test_labels'], {}), '(test_labels)\n', (422, 435), False, 'from TensorflowLearning.common import deal_label\n')]
#!/usr/bin/env python # -*- coding: UTF-8 -*- import cv2 import numpy as np import constants as const import transformations.shadow_mask as mask def add_n_ellipses_light(image, intensity = 0.5, blur_width = 6, n = 1): inverted_colors = const.WHITE - image inverted_shadow = add_n_ellipses_shadow(inverted_colors, intensity, blur_width, n) return const.WHITE - inverted_shadow def add_n_ellipses_shadow(image, intensity = 0.5, blur_width = 6, n = 1): for i in range(n): image = add_ellipse_shadow(image, intensity = intensity, blur_width = blur_width, ) return image def add_ellipse_light(image, intensity = 0.5, blur_width = 6): inverted_colors = const.WHITE - image inverted_shadow = add_ellipse_shadow(inverted_colors, intensity, blur_width) return const.WHITE - inverted_shadow def add_ellipse_shadow(image, intensity = 0.5, blur_width = 6): shadow_mask = np.zeros(image.shape[: 2], dtype=np.uint8) shadow_mask.fill(const.WHITE) ellipse = __get_multiple_ellipses(shadow_mask) return mask.apply_shadow_mask(image, blur_width, intensity, ellipse) def __get_multiple_ellipses(image): h, w = image.shape[ : 2] center = int(w * np.random.uniform()), int(h * np.random.uniform()) random_h = np.random.uniform() * h random_w = np.random.uniform() * w axes1 = int(random_h * 0.2), int(random_w * 0.2) axes2 = int(random_h * 0.4), int(random_w * 0.4) axes3 = int(random_h * 0.6), int(random_w * 0.6) axes4 = int(random_h * 0.8), int(random_w * 0.8) axes5 = int(random_h), int(random_w) angle = 360 * np.random.uniform() ellipse = get_single_ellipse(image, center, axes5, angle, const.DARK_WHITE) ellipse = get_single_ellipse(ellipse, center, axes4, angle, const.LIGHT_GRAY) ellipse = get_single_ellipse(ellipse, center, axes3, angle, const.GRAY) ellipse = get_single_ellipse(ellipse, center, axes2, angle, const.DARK_GRAY) return get_single_ellipse(ellipse, center, axes1, angle, const.LIGHT_BLACK) def get_single_ellipse(image, center, axes, angle, color): start_angle = 0 end_angle = 360 thickness = -1 return cv2.ellipse(image, center, axes, angle, start_angle, end_angle, color, thickness)
[ "cv2.ellipse", "numpy.zeros", "transformations.shadow_mask.apply_shadow_mask", "numpy.random.uniform" ]
[((908, 949), 'numpy.zeros', 'np.zeros', (['image.shape[:2]'], {'dtype': 'np.uint8'}), '(image.shape[:2], dtype=np.uint8)\n', (916, 949), True, 'import numpy as np\n'), ((1039, 1100), 'transformations.shadow_mask.apply_shadow_mask', 'mask.apply_shadow_mask', (['image', 'blur_width', 'intensity', 'ellipse'], {}), '(image, blur_width, intensity, ellipse)\n', (1061, 1100), True, 'import transformations.shadow_mask as mask\n'), ((2087, 2172), 'cv2.ellipse', 'cv2.ellipse', (['image', 'center', 'axes', 'angle', 'start_angle', 'end_angle', 'color', 'thickness'], {}), '(image, center, axes, angle, start_angle, end_angle, color,\n thickness)\n', (2098, 2172), False, 'import cv2\n'), ((1246, 1265), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1263, 1265), True, 'import numpy as np\n'), ((1282, 1301), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1299, 1301), True, 'import numpy as np\n'), ((1559, 1578), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1576, 1578), True, 'import numpy as np\n'), ((1183, 1202), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1200, 1202), True, 'import numpy as np\n'), ((1213, 1232), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1230, 1232), True, 'import numpy as np\n')]
"""Description """ import sys, os, tempfile, argparse import tensorflow as tf import numpy as np from definitions import * from feeder import SampleReader from model import SptAudioGen, SptAudioGenParams from pyutils.iolib.audio import save_wav import myutils def parse_arguments(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('model_dir', help='Directory containing model snapshot.') # Inputs parser.add_argument('input_folder', default='', help='Folder with input sample.') parser.add_argument('video', default='', help='High resolution video.') parser.add_argument('--deploy_start', default=0., type=float) parser.add_argument('--deploy_duration', default=10., type=float) # Outputs parser.add_argument('--output_fn', default='output', help='Basename for output files.') parser.add_argument('--save_ambix', action='store_true', help='Output ambix video file.') parser.add_argument('--save_video', action='store_true', help='Output video file.') parser.add_argument('--overlay_map', action='store_true', help='Overlay spherical map.') parser.add_argument('--VR', action='store_true', help='360 video.') parser.add_argument('--gpu', type=int, default=0, help="GPU id") args = parser.parse_args(sys.argv[1:]) if args.deploy_duration <= 0: args.deploy_duration = None return args class W2XYZ(object): def __init__(self, model_dir): print('\n' + '='*30 + ' ARGUMENTS ' + '='*30) params = myutils.load_params(model_dir) for k, v in params.__dict__.items(): print('TRAIN | {}: {}'.format(k, v)) self.params = params self.duration = 0.1 self.batch_size = 10 min_t = min([params.context, self.duration, 1. / params.video_rate]) # Model num_sep = params.num_sep_tracks if params.separation != NO_SEPARATION else 1 net_params = SptAudioGenParams(sep_num_tracks=num_sep, ctx_feats_fc_units=params.context_units, loc_fc_units=params.loc_units, sep_freq_mask_fc_units=params.freq_mask_units, sep_fft_window=params.fft_window) self.model = SptAudioGen(ambi_order=params.ambi_order, audio_rate=params.audio_rate, video_rate=params.video_rate, context=params.context, sample_duration=self.duration, encoders=params.encoders, separation=params.separation, params=net_params) self.audio_size = self.model.snd_dur + self.model.snd_contx - 1 self.video_size = int(self.duration * params.video_rate) shape = (self.batch_size, self.audio_size, 1) self.tba = {AUDIO: tf.compat.v1.placeholder(dtype=tf.float32, shape=shape)} if VIDEO in params.encoders: shape = (self.batch_size, self.video_size, 224, 448, 3) self.tba[VIDEO] = tf.compat.v1.placeholder(dtype=tf.float32, shape=shape) if FLOW in params.encoders: shape = (self.batch_size, self.video_size, 224, 448, 3) self.tba[FLOW] = tf.compat.v1.placeholder(dtype=tf.float32, shape=shape) self.ambi_pred_t = self.model.inference_ops(is_training=False, **self.tba) saver = tf.compat.v1.train.Saver() config = tf.compat.v1.ConfigProto( allow_soft_placement=True, gpu_options=tf.compat.v1.GPUOptions(allow_growth=True) ) self.sess = tf.compat.v1.Session(config=config) print('Loading model...') print(tf.train.latest_checkpoint(model_dir)) saver.restore(self.sess, tf.train.latest_checkpoint(model_dir)) #saver.restore(self.sess, model_dir+'/model.ckpt') def deploy(self, input_folder, deploy_start, deploy_duration): reader = SampleReader(input_folder, ambi_order=self.params.ambi_order, audio_rate=self.params.audio_rate, video_rate=self.params.video_rate, context=self.params.context, duration=self.duration, return_video=VIDEO in self.params.encoders, img_prep=myutils.img_prep_fcn(), return_flow=FLOW in self.params.encoders, start_time=deploy_start, sample_duration=deploy_duration, skip_silence_thr=None, shuffle=False, random_rotations=False, skip_rate=None) dt = reader.chunks_t[0] - deploy_start reader.chunks_t = [t - dt for t in reader.chunks_t] print('Generating ambisonics...') ss = self.model.snd_contx / 2 mono, ambi_pred, sep_channels, sep_mask, weights, biases = [], [], [], [], [], [] while True: batch = [] for _ in range(self.batch_size): chunk = reader.get() if chunk is None: break batch.append(chunk) if not batch: break vids = [b['id'] for b in batch] n_samples = len(vids) ambix = np.stack([b['ambix'] for b in batch], axis=0) if n_samples != self.batch_size: ambix = np.concatenate([ambix, np.zeros((self.batch_size - n_samples, ambix.shape[1], ambix.shape[2]))], axis=0) feed_dict = {self.tba[AUDIO]: ambix[:, :, :1]} #print(f'########AMBIX shape: {ambix.shape}, data: {ambix}') #!!aquí está ok if VIDEO in self.params.encoders: video = np.stack([b['video'] for b in batch], axis=0) if n_samples != self.batch_size: video = np.concatenate([video, np.zeros((self.batch_size - n_samples, video.shape[1], video.shape[2], video.shape[3], video.shape[4]))], axis=0) feed_dict[self.tba[VIDEO]] = video if FLOW in self.params.encoders: flow = np.stack([b['flow'] for b in batch], axis=0) if n_samples != self.batch_size: flow = np.concatenate([flow, np.zeros((self.batch_size - n_samples, flow.shape[1], flow.shape[2], flow.shape[3], flow.shape[4]))], axis=0) feed_dict[self.tba[FLOW]] = flow ambi_pred_chk = self.sess.run(self.ambi_pred_t, feed_dict=feed_dict) # print(f'########AMBI PRED shape: {ambi_pred_chk.shape}, data: {ambi_pred_chk}') n_frames = n_samples * ambi_pred_chk.shape[1] n_out = ambi_pred_chk.shape[2] ambi_pred_chk = np.copy(ambi_pred_chk[:n_samples]).reshape((n_frames, n_out)) # print(f'########tras reshape, AMBI PRED shape: {ambi_pred_chk.shape}, data: {ambi_pred_chk}') ambi_pred.append(ambi_pred_chk) #print(f'Types: n_samples {type(n_samples)} ss {type(ss)} self.model.snd_dur {type(self.model.snd_dur)}') datos = ambix[:n_samples, int(ss):int(ss) + int(self.model.snd_dur), :1] # print(f'ambix shape: {ambix.shape}, data: {ambix}') # print(f'ambix[:{n_samples}, int({ss}):int({ss}) + int({self.model.snd_dur}), :1]') # print(f'Tipo de ambix: {type(datos[0][0][0])}') # print(f'Voy a copiar {datos.shape}, con contenido {datos}, y tipo {type(datos[0][0][0])}') mono.append(np.copy(ambix[:n_samples, int(ss):int(ss) + int(self.model.snd_dur), :1]).reshape((-1, 1))) #print(f'########MONO length: {len(mono)}, data: {mono}') mono = np.concatenate(mono, 0) ambi_pred = np.concatenate((mono, np.concatenate(ambi_pred, 0)), 1) # print(f'########antes return AMBI PRED shape: {ambi_pred.shape}, data: {ambi_pred}') return ambi_pred def main(args): os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % args.gpu tmp_ambix_fn = tempfile.mktemp(prefix='c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/', suffix='.wav') tmp_video_fn = tempfile.mktemp(prefix='c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/', suffix='.mp4') model = W2XYZ(args.model_dir) ambi_pred = model.deploy(args.input_folder, args.deploy_start, args.deploy_duration) # Antes comment begin # dur_t = model.model.duration # snd1 = model.deploy(args.input_folder, args.deploy_start - dur_t/2, args.deploy_duration + dur_t) # hann1 = np.hanning(model.model.snd_dur) # hann1 = np.tile(hann1, int(snd1.shape[0]/hann1.size))[:, np.newaxis] # ss = int(model.model.snd_dur/2) # t = int(args.deploy_duration * model.params.audio_rate) # snd1 = snd1[ss:ss+t] # hann1 = hann1[ss:ss+t] # snd2 = model.deploy(args.input_folder, args.deploy_start, args.deploy_duration + dur_t) # hann2 = np.hanning(model.model.snd_dur) # hann2 = np.tile(hann2, int(snd2.shape[0]/hann2.size))[:, np.newaxis] # ss = 0 # t = int(args.deploy_duration * model.params.audio_rate) # snd2 = snd2[ss:ss+t] # hann2 = hann2[ss:ss+t] # ambi_pred = (snd1 * hann1 + snd2 * hann2) / (hann1 + hann2) # Antes comment end # Save ambisonics save_wav(tmp_ambix_fn, ambi_pred, model.params.audio_rate) print(f'tmp_ambix_fn: {tmp_ambix_fn}, args.output_fn: {args.output_fn}') if args.save_ambix: print('Saving ambisonics wav...') print(f'==================================================') cmd = 'ffmpeg -y -i {} -strict -2 {}'.format(tmp_ambix_fn, args.output_fn) os.system(cmd) print(f'==================================================') if args.save_video: print('Saving video...') print(f'==================================================') cmd = 'ffmpeg -y -ss {} -i {} -t {} {}'.format(args.deploy_start, args.video, args.deploy_duration, tmp_video_fn) os.system(cmd) print(f'==================================================') myutils.gen_360video(tmp_ambix_fn, tmp_video_fn, args.output_fn, overlay_map=args.overlay_map, inject_meta=args.VR, binauralize=not args.VR) os.remove(tmp_video_fn) os.remove(tmp_ambix_fn) if __name__ == '__main__': print(os.getcwd()) main(parse_arguments())
[ "pyutils.iolib.audio.save_wav", "myutils.gen_360video", "tensorflow.compat.v1.Session", "os.remove", "myutils.load_params", "model.SptAudioGenParams", "model.SptAudioGen", "tensorflow.compat.v1.placeholder", "argparse.ArgumentParser", "numpy.stack", "numpy.concatenate", "tensorflow.train.lates...
[((299, 402), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (322, 402), False, 'import sys, os, tempfile, argparse\n'), ((8303, 8420), 'tempfile.mktemp', 'tempfile.mktemp', ([], {'prefix': '"""c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/"""', 'suffix': '""".wav"""'}), "(prefix=\n 'c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/',\n suffix='.wav')\n", (8318, 8420), False, 'import sys, os, tempfile, argparse\n'), ((8431, 8548), 'tempfile.mktemp', 'tempfile.mktemp', ([], {'prefix': '"""c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/"""', 'suffix': '""".mp4"""'}), "(prefix=\n 'c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/',\n suffix='.mp4')\n", (8446, 8548), False, 'import sys, os, tempfile, argparse\n'), ((9577, 9635), 'pyutils.iolib.audio.save_wav', 'save_wav', (['tmp_ambix_fn', 'ambi_pred', 'model.params.audio_rate'], {}), '(tmp_ambix_fn, ambi_pred, model.params.audio_rate)\n', (9585, 9635), False, 'from pyutils.iolib.audio import save_wav\n'), ((10552, 10575), 'os.remove', 'os.remove', (['tmp_ambix_fn'], {}), '(tmp_ambix_fn)\n', (10561, 10575), False, 'import sys, os, tempfile, argparse\n'), ((1592, 1622), 'myutils.load_params', 'myutils.load_params', (['model_dir'], {}), '(model_dir)\n', (1611, 1622), False, 'import myutils\n'), ((2005, 2209), 'model.SptAudioGenParams', 'SptAudioGenParams', ([], {'sep_num_tracks': 'num_sep', 'ctx_feats_fc_units': 'params.context_units', 'loc_fc_units': 'params.loc_units', 'sep_freq_mask_fc_units': 'params.freq_mask_units', 'sep_fft_window': 'params.fft_window'}), '(sep_num_tracks=num_sep, ctx_feats_fc_units=params.\n context_units, loc_fc_units=params.loc_units, sep_freq_mask_fc_units=\n params.freq_mask_units, sep_fft_window=params.fft_window)\n', (2022, 2209), False, 'from model import SptAudioGen, SptAudioGenParams\n'), ((2299, 2543), 'model.SptAudioGen', 'SptAudioGen', ([], {'ambi_order': 'params.ambi_order', 'audio_rate': 'params.audio_rate', 'video_rate': 'params.video_rate', 'context': 'params.context', 'sample_duration': 'self.duration', 'encoders': 'params.encoders', 'separation': 'params.separation', 'params': 'net_params'}), '(ambi_order=params.ambi_order, audio_rate=params.audio_rate,\n video_rate=params.video_rate, context=params.context, sample_duration=\n self.duration, encoders=params.encoders, separation=params.separation,\n params=net_params)\n', (2310, 2543), False, 'from model import SptAudioGen, SptAudioGenParams\n'), ((3537, 3563), 'tensorflow.compat.v1.train.Saver', 'tf.compat.v1.train.Saver', ([], {}), '()\n', (3561, 3563), True, 'import tensorflow as tf\n'), ((3743, 3778), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (3763, 3778), True, 'import tensorflow as tf\n'), ((7989, 8012), 'numpy.concatenate', 'np.concatenate', (['mono', '(0)'], {}), '(mono, 0)\n', (8003, 8012), True, 'import numpy as np\n'), ((9940, 9954), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (9949, 9954), False, 'import sys, os, tempfile, argparse\n'), ((10281, 10295), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (10290, 10295), False, 'import sys, os, tempfile, argparse\n'), ((10374, 10518), 'myutils.gen_360video', 'myutils.gen_360video', (['tmp_ambix_fn', 'tmp_video_fn', 'args.output_fn'], {'overlay_map': 'args.overlay_map', 'inject_meta': 'args.VR', 'binauralize': '(not args.VR)'}), '(tmp_ambix_fn, tmp_video_fn, args.output_fn,\n overlay_map=args.overlay_map, inject_meta=args.VR, binauralize=not args.VR)\n', (10394, 10518), False, 'import myutils\n'), ((10524, 10547), 'os.remove', 'os.remove', (['tmp_video_fn'], {}), '(tmp_video_fn)\n', (10533, 10547), False, 'import sys, os, tempfile, argparse\n'), ((10614, 10625), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10623, 10625), False, 'import sys, os, tempfile, argparse\n'), ((2992, 3047), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.float32', 'shape': 'shape'}), '(dtype=tf.float32, shape=shape)\n', (3016, 3047), True, 'import tensorflow as tf\n'), ((3184, 3239), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.float32', 'shape': 'shape'}), '(dtype=tf.float32, shape=shape)\n', (3208, 3239), True, 'import tensorflow as tf\n'), ((3373, 3428), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.float32', 'shape': 'shape'}), '(dtype=tf.float32, shape=shape)\n', (3397, 3428), True, 'import tensorflow as tf\n'), ((3827, 3864), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_dir'], {}), '(model_dir)\n', (3853, 3864), True, 'import tensorflow as tf\n'), ((3899, 3936), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_dir'], {}), '(model_dir)\n', (3925, 3936), True, 'import tensorflow as tf\n'), ((5600, 5645), 'numpy.stack', 'np.stack', (["[b['ambix'] for b in batch]"], {'axis': '(0)'}), "([b['ambix'] for b in batch], axis=0)\n", (5608, 5645), True, 'import numpy as np\n'), ((3670, 3712), 'tensorflow.compat.v1.GPUOptions', 'tf.compat.v1.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (3693, 3712), True, 'import tensorflow as tf\n'), ((4530, 4552), 'myutils.img_prep_fcn', 'myutils.img_prep_fcn', ([], {}), '()\n', (4550, 4552), False, 'import myutils\n'), ((6051, 6096), 'numpy.stack', 'np.stack', (["[b['video'] for b in batch]"], {'axis': '(0)'}), "([b['video'] for b in batch], axis=0)\n", (6059, 6096), True, 'import numpy as np\n'), ((6431, 6475), 'numpy.stack', 'np.stack', (["[b['flow'] for b in batch]"], {'axis': '(0)'}), "([b['flow'] for b in batch], axis=0)\n", (6439, 6475), True, 'import numpy as np\n'), ((8055, 8083), 'numpy.concatenate', 'np.concatenate', (['ambi_pred', '(0)'], {}), '(ambi_pred, 0)\n', (8069, 8083), True, 'import numpy as np\n'), ((7040, 7074), 'numpy.copy', 'np.copy', (['ambi_pred_chk[:n_samples]'], {}), '(ambi_pred_chk[:n_samples])\n', (7047, 7074), True, 'import numpy as np\n'), ((5738, 5809), 'numpy.zeros', 'np.zeros', (['(self.batch_size - n_samples, ambix.shape[1], ambix.shape[2])'], {}), '((self.batch_size - n_samples, ambix.shape[1], ambix.shape[2]))\n', (5746, 5809), True, 'import numpy as np\n'), ((6197, 6304), 'numpy.zeros', 'np.zeros', (['(self.batch_size - n_samples, video.shape[1], video.shape[2], video.shape[3\n ], video.shape[4])'], {}), '((self.batch_size - n_samples, video.shape[1], video.shape[2],\n video.shape[3], video.shape[4]))\n', (6205, 6304), True, 'import numpy as np\n'), ((6574, 6678), 'numpy.zeros', 'np.zeros', (['(self.batch_size - n_samples, flow.shape[1], flow.shape[2], flow.shape[3],\n flow.shape[4])'], {}), '((self.batch_size - n_samples, flow.shape[1], flow.shape[2], flow.\n shape[3], flow.shape[4]))\n', (6582, 6678), True, 'import numpy as np\n')]
import sys from set_up import Setup from estimator import CommonEstimator import json import h5py #from utils import get_memory_usage import numpy as np SEED = 12939 #from random.org np.random.seed(SEED) print('python main.py fpType fpSize estimators.json dataset') fpType = sys.argv[1] fpSize = int(sys.argv[2]) trainingSetSize = int(sys.argv[3]) json_name = sys.argv[4] dataset = sys.argv[5] print('Running:') print(f'python main.py {fpType} {fpSize} {json_name} {dataset}') estimators = json.load(open(json_name, 'r'))['estimators'] if __name__=='__main__': #setup the data: setup = Setup(fpType, dataset, verbose=True) try: setup.write_fingerprints() except: print('Already written fpfile') setup.load_fingerprints() setup.load_scores() feature_matrix = setup.fold_to_size(fpSize) #evaluation stuff goes here: for estimator in estimators: for repeat in range(5): setup.random_split(trainingSetSize) common_estimator = CommonEstimator(estimator, cutoff=0.3, verbose=setup.verbose) print(setup.train_idx.shape) print(setup.scores.shape) common_estimator.fit(feature_matrix[setup.train_idx], setup.scores[setup.train_idx]) pred = common_estimator.chunked_predict(feature_matrix[setup.test_idx]) setup.write_results(pred, fpSize, trainingSetSize, estimator['name'], repeat)
[ "set_up.Setup", "numpy.random.seed", "estimator.CommonEstimator" ]
[((184, 204), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (198, 204), True, 'import numpy as np\n'), ((604, 640), 'set_up.Setup', 'Setup', (['fpType', 'dataset'], {'verbose': '(True)'}), '(fpType, dataset, verbose=True)\n', (609, 640), False, 'from set_up import Setup\n'), ((1051, 1112), 'estimator.CommonEstimator', 'CommonEstimator', (['estimator'], {'cutoff': '(0.3)', 'verbose': 'setup.verbose'}), '(estimator, cutoff=0.3, verbose=setup.verbose)\n', (1066, 1112), False, 'from estimator import CommonEstimator\n')]
#====================================================================== # # This module contains routines to postprocess the VFI # solutions. # # <NAME>, 01/19 # edited by <NAME>, with <NAME> and <NAME>, 11/2021 #====================================================================== import numpy as np from parameters import * #import cPickle as pickle import pickle from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel) from datetime import datetime from numpy.random import PCG64 #====================================================================== # Routine compute the errors def ls_error(n_agents, t1, t2, num_points): file=open('errors.txt', 'w') now = datetime.now() dt = int(now.strftime("%H%M%S%f")) print("Time seed = ", dt) rng = np.random.default_rng(dt) unif=rng.uniform(0, 1, (num_points, n_agents)) #sample of states kap_smp = kap_L+(unif)*(kap_U-kap_L) to_print=np.empty((1,3)) if (t1 == 1): t1+1 for i in range(t1, t2-1): sum_diffs=0 diff = 0 unif=rng.uniform(0, 1, (num_points, n_agents)) #sample of states kap_smp = kap_L+(unif)*(kap_U-kap_L) # Load the model from the previous iteration step restart_data = filename + str(i) + ".pcl" with open(restart_data, 'rb') as fd_old: gp_old = pickle.load(fd_old) print("data from iteration step ", i , "loaded from disk") fd_old.close() # Load the model from the previous iteration step restart_data = filename + str(i+1) + ".pcl" with open(restart_data, 'rb') as fd_new: gp_new = pickle.load(fd_new) print("data from iteration step ", i+1 , "loaded from disk") fd_new.close() y_pred_old, sigma_old = gp_old.predict(kap_smp, return_std=True) y_pred_new, sigma_new = gp_new.predict(kap_smp, return_std=True) # plot predictive mean and 95% quantiles #for j in range(num_points): #print kap_smp[j], " ",y_pred_new[j], " ",y_pred_new[j] + 1.96*sigma_new[j]," ",y_pred_new[j] - 1.96*sigma_new[j] diff = y_pred_old-y_pred_new max_abs_diff=np.amax(np.fabs(diff)) average = np.average(np.fabs(diff)) to_print[0,0]= i+1 to_print[0,1]= max_abs_diff to_print[0,2]= average np.savetxt(file, to_print, fmt='%2.16f') np.set_printoptions(suppress=True) msg=str(diff) + ", max=" + str(max_abs_diff) print(msg) print("===================================") file.close() return #======================================================================
[ "numpy.fabs", "numpy.random.default_rng", "pickle.load", "datetime.datetime.now", "numpy.empty", "numpy.savetxt", "numpy.set_printoptions" ]
[((917, 931), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (929, 931), False, 'from datetime import datetime\n'), ((1011, 1036), 'numpy.random.default_rng', 'np.random.default_rng', (['dt'], {}), '(dt)\n', (1032, 1036), True, 'import numpy as np\n'), ((1164, 1180), 'numpy.empty', 'np.empty', (['(1, 3)'], {}), '((1, 3))\n', (1172, 1180), True, 'import numpy as np\n'), ((2629, 2669), 'numpy.savetxt', 'np.savetxt', (['file', 'to_print'], {'fmt': '"""%2.16f"""'}), "(file, to_print, fmt='%2.16f')\n", (2639, 2669), True, 'import numpy as np\n'), ((2678, 2712), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (2697, 2712), True, 'import numpy as np\n'), ((1586, 1605), 'pickle.load', 'pickle.load', (['fd_old'], {}), '(fd_old)\n', (1597, 1605), False, 'import pickle\n'), ((1893, 1912), 'pickle.load', 'pickle.load', (['fd_new'], {}), '(fd_new)\n', (1904, 1912), False, 'import pickle\n'), ((2450, 2463), 'numpy.fabs', 'np.fabs', (['diff'], {}), '(diff)\n', (2457, 2463), True, 'import numpy as np\n'), ((2494, 2507), 'numpy.fabs', 'np.fabs', (['diff'], {}), '(diff)\n', (2501, 2507), True, 'import numpy as np\n')]
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # http://www.apache.org/licenses/LICENSE-2.0 # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. """Tests for individual operators""" from __future__ import absolute_import import unittest import numpy as np import numpy.testing as npt from onnx import helper from onnx_mxnet import backend as mxnet_backend class TestLayers(unittest.TestCase): """Tests for different layers comparing output with numpy operators. [WIP] More tests coming soon!""" def _random_array(self, shape): """Generate random array according to input shape""" return np.random.ranf(shape).astype("float32") def test_abs(self): """Test for abs operator""" node_def = helper.make_node("Abs", ["ip1"], ["ip2"]) ip1 = self._random_array([1, 1000]) output = mxnet_backend.run_node(node_def, [ip1])[0] npt.assert_almost_equal(output, np.abs(ip1)) def test_add(self): """Test for add operator with/without broadcasting""" node_def = helper.make_node("Add", ["ip1", "ip2"], ["op1"], broadcast=1) ip1 = self._random_array([1, 1, 5, 5]) ip2 = self._random_array([5]) output = mxnet_backend.run_node(node_def, [ip1, ip2])[0] npt.assert_almost_equal(output, np.add(ip1, ip2)) node_def = helper.make_node("Add", ["ip1", "ip2"], ["op1"]) ip1 = self._random_array([1, 16, 16]) ip2 = self._random_array([1, 16, 16]) output = mxnet_backend.run_node(node_def, [ip1, ip2])[0] npt.assert_almost_equal(output, np.add(ip1, ip2)) def test_sum(self): """Test for sum operator""" node_def = helper.make_node("Sum", ["ip1", "ip2"], ["op1"]) ip1 = self._random_array([1, 1, 16, 16]) ip2 = self._random_array([1, 1, 16, 16]) output = mxnet_backend.run_node(node_def, [ip1, ip2])[0] npt.assert_almost_equal(output, np.add(ip1, ip2)) def test_sub(self): """Test for sub operator with/without broadcasting""" node_def = helper.make_node("Sub", ["ip1", "ip2"], ["op1"], broadcast=1) ip1 = self._random_array([1, 1, 5, 5]) ip2 = self._random_array([5]) output = mxnet_backend.run_node(node_def, [ip1, ip2])[0] npt.assert_almost_equal(output, np.subtract(ip1, ip2)) node_def = helper.make_node("Sub", ["ip1", "ip2"], ["op1"]) ip1 = self._random_array([1, 1, 16, 16]) ip2 = self._random_array([1, 1, 16, 16]) output = mxnet_backend.run_node(node_def, [ip1, ip2])[0] npt.assert_almost_equal(output, np.subtract(ip1, ip2)) def test_mul(self): """Test for mul operator with/without broadcasting""" node_def = helper.make_node("Mul", ["ip1", "ip2"], ["op1"], broadcast=1) ip1 = self._random_array([1, 1, 5, 5]) ip2 = self._random_array([5]) output = mxnet_backend.run_node(node_def, [ip1, ip2])[0] npt.assert_almost_equal(output, np.multiply(ip1, ip2)) node_def = helper.make_node("Mul", ["ip1", "ip2"], ["op1"]) ip1 = self._random_array([1, 1, 16, 16]) ip2 = self._random_array([1, 1, 16, 16]) output = mxnet_backend.run_node(node_def, [ip1, ip2])[0] npt.assert_almost_equal(output, np.multiply(ip1, ip2)) def test_div(self): """Test for div operator with/without broadcasting""" node_def = helper.make_node("Div", ["ip1", "ip2"], ["op1"], broadcast=1) ip1 = self._random_array([1, 1, 5, 5]) ip2 = self._random_array([5]) output = mxnet_backend.run_node(node_def, [ip1, ip2])[0] npt.assert_almost_equal(output, np.divide(ip1, ip2)) node_def = helper.make_node("Div", ["ip1", "ip2"], ["op1"]) ip1 = self._random_array([1, 1, 16, 16]) ip2 = self._random_array([1, 1, 16, 16]) output = mxnet_backend.run_node(node_def, [ip1, ip2])[0] npt.assert_almost_equal(output, np.divide(ip1, ip2)) def test_relu(self): """Test for relu operator""" node_def = helper.make_node("Relu", ["ip1"], ["op1"]) ip1 = self._random_array([1, 256]) output = mxnet_backend.run_node(node_def, [ip1])[0] npt.assert_almost_equal(output, np.maximum(ip1, 0)) def test_neg(self): """Test for neg operator""" node_def = helper.make_node("Neg", ["ip1"], ["op1"]) ip1 = self._random_array([1, 1000]) output = mxnet_backend.run_node(node_def, [ip1])[0] npt.assert_almost_equal(output, np.negative(ip1)) def test_reciprocal(self): """Test for reciprocal operator""" node_def = helper.make_node("Reciprocal", ["ip1"], ["op1"]) ip1 = self._random_array([1, 1000]) output = mxnet_backend.run_node(node_def, [ip1])[0] npt.assert_almost_equal(output, np.reciprocal(ip1)) def test_floor(self): """Test for floor operator""" node_def = helper.make_node("Floor", ["ip1"], ["op1"]) ip1 = self._random_array([1, 1000]) output = mxnet_backend.run_node(node_def, [ip1])[0] npt.assert_almost_equal(output, np.floor(ip1)) def test_ceil(self): """Test for ceil operator""" node_def = helper.make_node("Ceil", ["ip1"], ["op1"]) ip1 = self._random_array([1, 1000]) output = mxnet_backend.run_node(node_def, [ip1])[0] npt.assert_almost_equal(output, np.ceil(ip1)) def test_sqrt(self): """Test for sqrt operator""" node_def = helper.make_node("Sqrt", ["ip1"], ["op1"]) ip1 = self._random_array([1, 1000]) output = mxnet_backend.run_node(node_def, [ip1])[0] npt.assert_almost_equal(output, np.sqrt(ip1)) def test_leaky_relu(self): """Test for LeakyRelu operator""" node_def = helper.make_node("LeakyRelu", ["ip1"], ["op1"]) ip1 = self._random_array([1, 10]) output = mxnet_backend.run_node(node_def, [ip1])[0][0] # default slope in leakyrelu is 0.25 numpy_output = [x if x > 0 else x*0.25 for x in ip1[0]] npt.assert_almost_equal(output, numpy_output) def test_elu(self): """Test for elu operator""" node_def = helper.make_node("Elu", ["ip1"], ["op1"]) ip1 = self._random_array([1, 10]) output = mxnet_backend.run_node(node_def, [ip1])[0][0] # default slope in elu is 0.25 numpy_output = [x if x > 0 else (np.exp(x)-1)*0.25 for x in ip1[0]] npt.assert_almost_equal(output, numpy_output) def test_exp(self): """Test for exp operator""" node_def = helper.make_node("Exp", ["ip1"], ["op1"]) ip1 = self._random_array([1, 10]) output = mxnet_backend.run_node(node_def, [ip1])[0] npt.assert_almost_equal(output, np.exp(ip1)) def test_log(self): """Test for log operator""" node_def = helper.make_node("Log", ["ip1"], ["op1"]) ip1 = self._random_array([1, 10]) output = mxnet_backend.run_node(node_def, [ip1])[0] npt.assert_almost_equal(output, np.log(ip1)) def test_tanh(self): """Test for tanh operator""" node_def = helper.make_node("Tanh", ["ip1"], ["op1"]) ip1 = self._random_array([1, 10]) output = mxnet_backend.run_node(node_def, [ip1])[0] npt.assert_almost_equal(output, np.tanh(ip1)) def test_pow(self): """Test for pow operator""" node_def = helper.make_node("Pow", ["ip1", "ip2"], ["op1"]) ip1 = self._random_array([1, 10]) ip2 = self._random_array([1, 10]) output = mxnet_backend.run_node(node_def, [ip1, ip2])[0] npt.assert_almost_equal(output, np.power(ip1, ip2)) def test_sigmoid(self): """Test for sigmoid operator""" node_def = helper.make_node("Sigmoid", ["ip1"], ["op1"]) ip1 = self._random_array([1, 10]) output = mxnet_backend.run_node(node_def, [ip1]) np_output = [1/(1+np.exp(-ip1))] npt.assert_almost_equal(output, np_output) def test_maximum(self): """Test for maximum operator""" node_def = helper.make_node("Max", ["ip1", "ip2"], ["op1"]) ip1 = self._random_array([1, 10]) ip2 = self._random_array([1, 10]) output = mxnet_backend.run_node(node_def, [ip1, ip2])[0] npt.assert_almost_equal(output, np.maximum(ip1, ip2)) def test_minimum(self): """Test for minimum operator""" node_def = helper.make_node("Min", ["ip1", "ip2"], ["op1"]) ip1 = self._random_array([1, 10]) ip2 = self._random_array([1, 10]) output = mxnet_backend.run_node(node_def, [ip1, ip2])[0] npt.assert_almost_equal(output, np.minimum(ip1, ip2)) def test_softmax(self): """Test for softmax operator""" node_def = helper.make_node("Softmax", ["ip1"], ["op1"]) ip1 = self._random_array([1, 10]) output = mxnet_backend.run_node(node_def, [ip1])[0] exp_score = np.exp(ip1) numpy_op = exp_score / exp_score.sum(0) npt.assert_almost_equal(output, numpy_op) def test_reduce_max(self): """Test for ReduceMax operator""" node_def = helper.make_node("ReduceMax", ["ip1"], ["op1"], axes=[1, 0], keepdims=1) ip1 = self._random_array([3, 10]) output = mxnet_backend.run_node(node_def, [ip1])[0] numpy_op = np.max(ip1, axis=(1, 0), keepdims=True) npt.assert_almost_equal(output, numpy_op) def test_reduce_min(self): """Test for ReduceMin operator""" node_def = helper.make_node("ReduceMin", ["ip1"], ["op1"], axes=[1, 0], keepdims=1) ip1 = self._random_array([3, 10]) output = mxnet_backend.run_node(node_def, [ip1])[0] numpy_op = np.min(ip1, axis=(1, 0), keepdims=True) npt.assert_almost_equal(output, numpy_op) def test_reduce_sum(self): """Test for ReduceSum operator""" node_def = helper.make_node("ReduceSum", ["ip1"], ["op1"], axes=[1, 0], keepdims=1) ip1 = self._random_array([3, 10]) output = mxnet_backend.run_node(node_def, [ip1])[0] numpy_op = np.sum(ip1, axis=(1, 0), keepdims=True) npt.assert_almost_equal(output, numpy_op, decimal=5) def test_reduce_mean(self): """Test for ReduceMean operator""" node_def = helper.make_node("ReduceMean", ["ip1"], ["op1"], axes=[1, 0], keepdims=1) ip1 = self._random_array([3, 10]) output = mxnet_backend.run_node(node_def, [ip1])[0] numpy_op = np.mean(ip1, axis=(1, 0), keepdims=True) npt.assert_almost_equal(output, numpy_op, decimal=5) def test_reduce_prod(self): """Test for ReduceProd operator""" node_def = helper.make_node("ReduceProd", ["ip1"], ["op1"], axes=[1, 0], keepdims=1) ip1 = self._random_array([3, 10]) output = mxnet_backend.run_node(node_def, [ip1])[0] numpy_op = np.prod(ip1, axis=(1, 0), keepdims=True) npt.assert_almost_equal(output, numpy_op, decimal=5) def test_squeeze(self): """Test for squeeze operator""" node_def = helper.make_node("Squeeze", ["ip1"], ["op1"], axes=[1, 3]) ip1 = self._random_array([3, 1, 2, 1, 4]) output = mxnet_backend.run_node(node_def, [ip1])[0] npt.assert_almost_equal(output, np.squeeze(ip1, axis=[1, 3])) if __name__ == '__main__': unittest.main()
[ "numpy.prod", "numpy.sqrt", "numpy.log", "unittest.main", "numpy.divide", "numpy.mean", "numpy.multiply", "numpy.tanh", "numpy.subtract", "numpy.max", "numpy.exp", "numpy.testing.assert_almost_equal", "numpy.min", "numpy.maximum", "numpy.abs", "numpy.ceil", "onnx.helper.make_node", ...
[((11776, 11791), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11789, 11791), False, 'import unittest\n'), ((1161, 1202), 'onnx.helper.make_node', 'helper.make_node', (['"""Abs"""', "['ip1']", "['ip2']"], {}), "('Abs', ['ip1'], ['ip2'])\n", (1177, 1202), False, 'from onnx import helper\n'), ((1466, 1527), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['ip1', 'ip2']", "['op1']"], {'broadcast': '(1)'}), "('Add', ['ip1', 'ip2'], ['op1'], broadcast=1)\n", (1482, 1527), False, 'from onnx import helper\n'), ((1756, 1804), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['ip1', 'ip2']", "['op1']"], {}), "('Add', ['ip1', 'ip2'], ['op1'])\n", (1772, 1804), False, 'from onnx import helper\n'), ((2100, 2148), 'onnx.helper.make_node', 'helper.make_node', (['"""Sum"""', "['ip1', 'ip2']", "['op1']"], {}), "('Sum', ['ip1', 'ip2'], ['op1'])\n", (2116, 2148), False, 'from onnx import helper\n'), ((2476, 2537), 'onnx.helper.make_node', 'helper.make_node', (['"""Sub"""', "['ip1', 'ip2']", "['op1']"], {'broadcast': '(1)'}), "('Sub', ['ip1', 'ip2'], ['op1'], broadcast=1)\n", (2492, 2537), False, 'from onnx import helper\n'), ((2771, 2819), 'onnx.helper.make_node', 'helper.make_node', (['"""Sub"""', "['ip1', 'ip2']", "['op1']"], {}), "('Sub', ['ip1', 'ip2'], ['op1'])\n", (2787, 2819), False, 'from onnx import helper\n'), ((3152, 3213), 'onnx.helper.make_node', 'helper.make_node', (['"""Mul"""', "['ip1', 'ip2']", "['op1']"], {'broadcast': '(1)'}), "('Mul', ['ip1', 'ip2'], ['op1'], broadcast=1)\n", (3168, 3213), False, 'from onnx import helper\n'), ((3447, 3495), 'onnx.helper.make_node', 'helper.make_node', (['"""Mul"""', "['ip1', 'ip2']", "['op1']"], {}), "('Mul', ['ip1', 'ip2'], ['op1'])\n", (3463, 3495), False, 'from onnx import helper\n'), ((3828, 3889), 'onnx.helper.make_node', 'helper.make_node', (['"""Div"""', "['ip1', 'ip2']", "['op1']"], {'broadcast': '(1)'}), "('Div', ['ip1', 'ip2'], ['op1'], broadcast=1)\n", (3844, 3889), False, 'from onnx import helper\n'), ((4121, 4169), 'onnx.helper.make_node', 'helper.make_node', (['"""Div"""', "['ip1', 'ip2']", "['op1']"], {}), "('Div', ['ip1', 'ip2'], ['op1'])\n", (4137, 4169), False, 'from onnx import helper\n'), ((4476, 4518), 'onnx.helper.make_node', 'helper.make_node', (['"""Relu"""', "['ip1']", "['op1']"], {}), "('Relu', ['ip1'], ['op1'])\n", (4492, 4518), False, 'from onnx import helper\n'), ((4762, 4803), 'onnx.helper.make_node', 'helper.make_node', (['"""Neg"""', "['ip1']", "['op1']"], {}), "('Neg', ['ip1'], ['op1'])\n", (4778, 4803), False, 'from onnx import helper\n'), ((5060, 5108), 'onnx.helper.make_node', 'helper.make_node', (['"""Reciprocal"""', "['ip1']", "['op1']"], {}), "('Reciprocal', ['ip1'], ['op1'])\n", (5076, 5108), False, 'from onnx import helper\n'), ((5357, 5400), 'onnx.helper.make_node', 'helper.make_node', (['"""Floor"""', "['ip1']", "['op1']"], {}), "('Floor', ['ip1'], ['op1'])\n", (5373, 5400), False, 'from onnx import helper\n'), ((5642, 5684), 'onnx.helper.make_node', 'helper.make_node', (['"""Ceil"""', "['ip1']", "['op1']"], {}), "('Ceil', ['ip1'], ['op1'])\n", (5658, 5684), False, 'from onnx import helper\n'), ((5925, 5967), 'onnx.helper.make_node', 'helper.make_node', (['"""Sqrt"""', "['ip1']", "['op1']"], {}), "('Sqrt', ['ip1'], ['op1'])\n", (5941, 5967), False, 'from onnx import helper\n'), ((6219, 6266), 'onnx.helper.make_node', 'helper.make_node', (['"""LeakyRelu"""', "['ip1']", "['op1']"], {}), "('LeakyRelu', ['ip1'], ['op1'])\n", (6235, 6266), False, 'from onnx import helper\n'), ((6489, 6534), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'numpy_output'], {}), '(output, numpy_output)\n', (6512, 6534), True, 'import numpy.testing as npt\n'), ((6615, 6656), 'onnx.helper.make_node', 'helper.make_node', (['"""Elu"""', "['ip1']", "['op1']"], {}), "('Elu', ['ip1'], ['op1'])\n", (6631, 6656), False, 'from onnx import helper\n'), ((6885, 6930), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'numpy_output'], {}), '(output, numpy_output)\n', (6908, 6930), True, 'import numpy.testing as npt\n'), ((7011, 7052), 'onnx.helper.make_node', 'helper.make_node', (['"""Exp"""', "['ip1']", "['op1']"], {}), "('Exp', ['ip1'], ['op1'])\n", (7027, 7052), False, 'from onnx import helper\n'), ((7288, 7329), 'onnx.helper.make_node', 'helper.make_node', (['"""Log"""', "['ip1']", "['op1']"], {}), "('Log', ['ip1'], ['op1'])\n", (7304, 7329), False, 'from onnx import helper\n'), ((7567, 7609), 'onnx.helper.make_node', 'helper.make_node', (['"""Tanh"""', "['ip1']", "['op1']"], {}), "('Tanh', ['ip1'], ['op1'])\n", (7583, 7609), False, 'from onnx import helper\n'), ((7846, 7894), 'onnx.helper.make_node', 'helper.make_node', (['"""Pow"""', "['ip1', 'ip2']", "['op1']"], {}), "('Pow', ['ip1', 'ip2'], ['op1'])\n", (7862, 7894), False, 'from onnx import helper\n'), ((8192, 8237), 'onnx.helper.make_node', 'helper.make_node', (['"""Sigmoid"""', "['ip1']", "['op1']"], {}), "('Sigmoid', ['ip1'], ['op1'])\n", (8208, 8237), False, 'from onnx import helper\n'), ((8297, 8336), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (8319, 8336), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((8386, 8428), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'np_output'], {}), '(output, np_output)\n', (8409, 8428), True, 'import numpy.testing as npt\n'), ((8517, 8565), 'onnx.helper.make_node', 'helper.make_node', (['"""Max"""', "['ip1', 'ip2']", "['op1']"], {}), "('Max', ['ip1', 'ip2'], ['op1'])\n", (8533, 8565), False, 'from onnx import helper\n'), ((8865, 8913), 'onnx.helper.make_node', 'helper.make_node', (['"""Min"""', "['ip1', 'ip2']", "['op1']"], {}), "('Min', ['ip1', 'ip2'], ['op1'])\n", (8881, 8913), False, 'from onnx import helper\n'), ((9213, 9258), 'onnx.helper.make_node', 'helper.make_node', (['"""Softmax"""', "['ip1']", "['op1']"], {}), "('Softmax', ['ip1'], ['op1'])\n", (9229, 9258), False, 'from onnx import helper\n'), ((9381, 9392), 'numpy.exp', 'np.exp', (['ip1'], {}), '(ip1)\n', (9387, 9392), True, 'import numpy as np\n'), ((9449, 9490), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'numpy_op'], {}), '(output, numpy_op)\n', (9472, 9490), True, 'import numpy.testing as npt\n'), ((9584, 9656), 'onnx.helper.make_node', 'helper.make_node', (['"""ReduceMax"""', "['ip1']", "['op1']"], {'axes': '[1, 0]', 'keepdims': '(1)'}), "('ReduceMax', ['ip1'], ['op1'], axes=[1, 0], keepdims=1)\n", (9600, 9656), False, 'from onnx import helper\n'), ((9778, 9817), 'numpy.max', 'np.max', (['ip1'], {'axis': '(1, 0)', 'keepdims': '(True)'}), '(ip1, axis=(1, 0), keepdims=True)\n', (9784, 9817), True, 'import numpy as np\n'), ((9826, 9867), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'numpy_op'], {}), '(output, numpy_op)\n', (9849, 9867), True, 'import numpy.testing as npt\n'), ((9961, 10033), 'onnx.helper.make_node', 'helper.make_node', (['"""ReduceMin"""', "['ip1']", "['op1']"], {'axes': '[1, 0]', 'keepdims': '(1)'}), "('ReduceMin', ['ip1'], ['op1'], axes=[1, 0], keepdims=1)\n", (9977, 10033), False, 'from onnx import helper\n'), ((10155, 10194), 'numpy.min', 'np.min', (['ip1'], {'axis': '(1, 0)', 'keepdims': '(True)'}), '(ip1, axis=(1, 0), keepdims=True)\n', (10161, 10194), True, 'import numpy as np\n'), ((10203, 10244), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'numpy_op'], {}), '(output, numpy_op)\n', (10226, 10244), True, 'import numpy.testing as npt\n'), ((10338, 10410), 'onnx.helper.make_node', 'helper.make_node', (['"""ReduceSum"""', "['ip1']", "['op1']"], {'axes': '[1, 0]', 'keepdims': '(1)'}), "('ReduceSum', ['ip1'], ['op1'], axes=[1, 0], keepdims=1)\n", (10354, 10410), False, 'from onnx import helper\n'), ((10532, 10571), 'numpy.sum', 'np.sum', (['ip1'], {'axis': '(1, 0)', 'keepdims': '(True)'}), '(ip1, axis=(1, 0), keepdims=True)\n', (10538, 10571), True, 'import numpy as np\n'), ((10580, 10632), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'numpy_op'], {'decimal': '(5)'}), '(output, numpy_op, decimal=5)\n', (10603, 10632), True, 'import numpy.testing as npt\n'), ((10728, 10801), 'onnx.helper.make_node', 'helper.make_node', (['"""ReduceMean"""', "['ip1']", "['op1']"], {'axes': '[1, 0]', 'keepdims': '(1)'}), "('ReduceMean', ['ip1'], ['op1'], axes=[1, 0], keepdims=1)\n", (10744, 10801), False, 'from onnx import helper\n'), ((10923, 10963), 'numpy.mean', 'np.mean', (['ip1'], {'axis': '(1, 0)', 'keepdims': '(True)'}), '(ip1, axis=(1, 0), keepdims=True)\n', (10930, 10963), True, 'import numpy as np\n'), ((10972, 11024), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'numpy_op'], {'decimal': '(5)'}), '(output, numpy_op, decimal=5)\n', (10995, 11024), True, 'import numpy.testing as npt\n'), ((11120, 11193), 'onnx.helper.make_node', 'helper.make_node', (['"""ReduceProd"""', "['ip1']", "['op1']"], {'axes': '[1, 0]', 'keepdims': '(1)'}), "('ReduceProd', ['ip1'], ['op1'], axes=[1, 0], keepdims=1)\n", (11136, 11193), False, 'from onnx import helper\n'), ((11315, 11355), 'numpy.prod', 'np.prod', (['ip1'], {'axis': '(1, 0)', 'keepdims': '(True)'}), '(ip1, axis=(1, 0), keepdims=True)\n', (11322, 11355), True, 'import numpy as np\n'), ((11364, 11416), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'numpy_op'], {'decimal': '(5)'}), '(output, numpy_op, decimal=5)\n', (11387, 11416), True, 'import numpy.testing as npt\n'), ((11505, 11563), 'onnx.helper.make_node', 'helper.make_node', (['"""Squeeze"""', "['ip1']", "['op1']"], {'axes': '[1, 3]'}), "('Squeeze', ['ip1'], ['op1'], axes=[1, 3])\n", (11521, 11563), False, 'from onnx import helper\n'), ((1264, 1303), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (1286, 1303), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((1347, 1358), 'numpy.abs', 'np.abs', (['ip1'], {}), '(ip1)\n', (1353, 1358), True, 'import numpy as np\n'), ((1630, 1674), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1, ip2]'], {}), '(node_def, [ip1, ip2])\n', (1652, 1674), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((1718, 1734), 'numpy.add', 'np.add', (['ip1', 'ip2'], {}), '(ip1, ip2)\n', (1724, 1734), True, 'import numpy as np\n'), ((1914, 1958), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1, ip2]'], {}), '(node_def, [ip1, ip2])\n', (1936, 1958), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((2002, 2018), 'numpy.add', 'np.add', (['ip1', 'ip2'], {}), '(ip1, ip2)\n', (2008, 2018), True, 'import numpy as np\n'), ((2264, 2308), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1, ip2]'], {}), '(node_def, [ip1, ip2])\n', (2286, 2308), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((2352, 2368), 'numpy.add', 'np.add', (['ip1', 'ip2'], {}), '(ip1, ip2)\n', (2358, 2368), True, 'import numpy as np\n'), ((2640, 2684), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1, ip2]'], {}), '(node_def, [ip1, ip2])\n', (2662, 2684), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((2728, 2749), 'numpy.subtract', 'np.subtract', (['ip1', 'ip2'], {}), '(ip1, ip2)\n', (2739, 2749), True, 'import numpy as np\n'), ((2935, 2979), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1, ip2]'], {}), '(node_def, [ip1, ip2])\n', (2957, 2979), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((3023, 3044), 'numpy.subtract', 'np.subtract', (['ip1', 'ip2'], {}), '(ip1, ip2)\n', (3034, 3044), True, 'import numpy as np\n'), ((3316, 3360), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1, ip2]'], {}), '(node_def, [ip1, ip2])\n', (3338, 3360), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((3404, 3425), 'numpy.multiply', 'np.multiply', (['ip1', 'ip2'], {}), '(ip1, ip2)\n', (3415, 3425), True, 'import numpy as np\n'), ((3611, 3655), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1, ip2]'], {}), '(node_def, [ip1, ip2])\n', (3633, 3655), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((3699, 3720), 'numpy.multiply', 'np.multiply', (['ip1', 'ip2'], {}), '(ip1, ip2)\n', (3710, 3720), True, 'import numpy as np\n'), ((3992, 4036), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1, ip2]'], {}), '(node_def, [ip1, ip2])\n', (4014, 4036), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((4080, 4099), 'numpy.divide', 'np.divide', (['ip1', 'ip2'], {}), '(ip1, ip2)\n', (4089, 4099), True, 'import numpy as np\n'), ((4285, 4329), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1, ip2]'], {}), '(node_def, [ip1, ip2])\n', (4307, 4329), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((4373, 4392), 'numpy.divide', 'np.divide', (['ip1', 'ip2'], {}), '(ip1, ip2)\n', (4382, 4392), True, 'import numpy as np\n'), ((4579, 4618), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (4601, 4618), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((4662, 4680), 'numpy.maximum', 'np.maximum', (['ip1', '(0)'], {}), '(ip1, 0)\n', (4672, 4680), True, 'import numpy as np\n'), ((4865, 4904), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (4887, 4904), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((4948, 4964), 'numpy.negative', 'np.negative', (['ip1'], {}), '(ip1)\n', (4959, 4964), True, 'import numpy as np\n'), ((5170, 5209), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (5192, 5209), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((5253, 5271), 'numpy.reciprocal', 'np.reciprocal', (['ip1'], {}), '(ip1)\n', (5266, 5271), True, 'import numpy as np\n'), ((5462, 5501), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (5484, 5501), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((5545, 5558), 'numpy.floor', 'np.floor', (['ip1'], {}), '(ip1)\n', (5553, 5558), True, 'import numpy as np\n'), ((5746, 5785), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (5768, 5785), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((5829, 5841), 'numpy.ceil', 'np.ceil', (['ip1'], {}), '(ip1)\n', (5836, 5841), True, 'import numpy as np\n'), ((6029, 6068), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (6051, 6068), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((6112, 6124), 'numpy.sqrt', 'np.sqrt', (['ip1'], {}), '(ip1)\n', (6119, 6124), True, 'import numpy as np\n'), ((7112, 7151), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (7134, 7151), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((7195, 7206), 'numpy.exp', 'np.exp', (['ip1'], {}), '(ip1)\n', (7201, 7206), True, 'import numpy as np\n'), ((7389, 7428), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (7411, 7428), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((7472, 7483), 'numpy.log', 'np.log', (['ip1'], {}), '(ip1)\n', (7478, 7483), True, 'import numpy as np\n'), ((7669, 7708), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (7691, 7708), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((7752, 7764), 'numpy.tanh', 'np.tanh', (['ip1'], {}), '(ip1)\n', (7759, 7764), True, 'import numpy as np\n'), ((7996, 8040), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1, ip2]'], {}), '(node_def, [ip1, ip2])\n', (8018, 8040), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((8084, 8102), 'numpy.power', 'np.power', (['ip1', 'ip2'], {}), '(ip1, ip2)\n', (8092, 8102), True, 'import numpy as np\n'), ((8667, 8711), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1, ip2]'], {}), '(node_def, [ip1, ip2])\n', (8689, 8711), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((8755, 8775), 'numpy.maximum', 'np.maximum', (['ip1', 'ip2'], {}), '(ip1, ip2)\n', (8765, 8775), True, 'import numpy as np\n'), ((9015, 9059), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1, ip2]'], {}), '(node_def, [ip1, ip2])\n', (9037, 9059), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((9103, 9123), 'numpy.minimum', 'np.minimum', (['ip1', 'ip2'], {}), '(ip1, ip2)\n', (9113, 9123), True, 'import numpy as np\n'), ((9318, 9357), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (9340, 9357), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((9716, 9755), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (9738, 9755), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((10093, 10132), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (10115, 10132), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((10470, 10509), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (10492, 10509), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((10861, 10900), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (10883, 10900), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((11253, 11292), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (11275, 11292), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((11631, 11670), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (11653, 11670), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((11714, 11742), 'numpy.squeeze', 'np.squeeze', (['ip1'], {'axis': '[1, 3]'}), '(ip1, axis=[1, 3])\n', (11724, 11742), True, 'import numpy as np\n'), ((1041, 1062), 'numpy.random.ranf', 'np.random.ranf', (['shape'], {}), '(shape)\n', (1055, 1062), True, 'import numpy as np\n'), ((6326, 6365), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (6348, 6365), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((6716, 6755), 'onnx_mxnet.backend.run_node', 'mxnet_backend.run_node', (['node_def', '[ip1]'], {}), '(node_def, [ip1])\n', (6738, 6755), True, 'from onnx_mxnet import backend as mxnet_backend\n'), ((8363, 8375), 'numpy.exp', 'np.exp', (['(-ip1)'], {}), '(-ip1)\n', (8369, 8375), True, 'import numpy as np\n'), ((6842, 6851), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (6848, 6851), True, 'import numpy as np\n')]
""" This code implements the Growing Neural Gas algorithm that creates a graph that learns the topologies in the given input data. See e.g. followning documents references: https://papers.nips.cc/paper/893-a-growing-neural-gas-network-learns-topologies.pdf http://www.booru.net/download/MasterThesisProj.pdf """ from FeatureGraph.graph import Graph import numpy as np class GrowingNeuralGas: def __init__(self, feature_dim : int): self.feature_dim = feature_dim # GNG vertex counter self.n_vertex = 0 # Unique vertex naming counter self.name_counter = 0 # Init stuff self.__init_graph() def __get_unique_name(self, prefix : str = 'N') -> str: """ Generates unique names for GNG graph elements """ name = prefix + str(self.name_counter) self.name_counter += 1 return name def __init_graph(self) -> None: """ Initialize the GNG graph database """ self.graph = Graph() def __add_vertex(self, feature_vector : np.ndarray) -> str: """ Add new vertex to the GNG graph. Returns the vertex key. """ key = self.__get_unique_name() self.graph.add_vertex(key=key) self.graph.set_vertex_param(key=key, feature_vector=feature_vector, error=0) self.n_vertex += 1 return key def __add_edge(self, key_a : str, key_b : str) -> None: """ Add new edge to the GNG graph and initialize the GNG specific parameters """ self.graph.add_edge(key_a=key_a, key_b=key_b, bidirectional=True) self.graph.set_edge_param(key_a=key_a, key_b=key_b, age=0) def __delete_vertex(self, key : str) -> None: """ Delete vertex and all edges associated to it. """ self.graph.delete_vertex(key) self.n_vertex -= 1 def __delete_edge(self, key_a : str, key_b : str) -> None: self.graph.delete_edge(key_a, key_b, bidirectional=True) def __get_vertex_param(self, key, param_key) -> any: value = self.graph.get_vertex_param(key=key, param_key=param_key) return value def __get_edge_param(self, key_a, key_b, param_key) -> any: value = self.graph.get_edge_param(key_a=key_a, key_b=key_b, param_key=param_key) return value def __set_vertex_param(self, key, **kwargs) -> None: self.graph.set_vertex_param(key=key, **kwargs) def __set_edge_param(self, key_a, key_b, **kwargs) -> None: self.graph.set_edge_param(key_a=key_a, key_b=key_b, **kwargs) def __find_nearest_vertices(self, ref_vect : np.ndarray, n_vertex : int = 2) -> list: # Get all vertex keys vertices = self.graph.get_vertices() # Calculate all distances distances = [] for vertex in vertices: vertex_vect = self.__get_vertex_param(key=vertex, param_key='feature_vector') dist = np.linalg.norm(vertex_vect - ref_vect) distances.append([vertex, dist]) distances = sorted(distances, key=lambda x: x[1]) return distances[:n_vertex] def __get_neighbor_vertices(self, key) -> list: neighbors = self.graph.get_edges(key)['out'] return neighbors def __delete_expired_edges(self, age_limit : int) -> None: vertices = self.graph.get_vertices() for key_a in vertices: neighbors = self.graph.get_edges(key_a)['out'] for key_b in neighbors: edge_age = self.__get_edge_param(key_a, key_b, 'age') if edge_age > age_limit: self.graph.delete_edge(key_a, key_b, bidirectional=True) def __delete_unconnected_vertices(self) -> None: vertices = self.graph.get_vertices() for key in vertices: neighbors = self.graph.get_edges(key)['out'] if len(neighbors) == 0: self.graph.delete_vertex(key) self.n_vertex -= 1 def __find_largest_error_vertex(self) -> str: vertices = self.graph.get_vertices() max_err_val = 0 max_err_key = '' for key in vertices: error = self.__get_vertex_param(key=key, param_key='error') if error > max_err_val: max_err_val = error max_err_key = key return [max_err_key, max_err_val] def __find_largest_error_neighbor(self, key) -> str: neighbors = self.graph.get_edges(key)['out'] max_err_val = 0 max_err_key = '' for key in neighbors: error = self.__get_vertex_param(key=key, param_key='error') if error > max_err_val: max_err_val = error max_err_key = key return [max_err_key, max_err_val] def __scale_vertex_error_values(self, attenuation : float) -> None: vertices = self.graph.get_vertices() for key in vertices: error = self.__get_vertex_param(key, 'error') error -= attenuation * error self.__set_vertex_param(key, error=error) def get_graph(self): return self.graph def fit(self, dataset : np.ndarray, iterations : int, max_vertex : int, winner_upd_coeff : float = 0.05, neighbor_upd_coeff : float = 0.0005, edge_age_limit : int = 100, vertex_insert_interval : int = 100, vertex_insert_error_scaling : float = 0.5, error_attenuation : float = 0.0005, plot_interval : int = 100, plot_function : any = None) -> None : n_pts = dataset.shape[0] for iteration in range(1, iterations + 1): if self.n_vertex < 2: # Initialize graph # Add the two starting vertices and edge between them key_a = self.__add_vertex(feature_vector=np.random.rand(self.feature_dim)) key_b = self.__add_vertex(feature_vector=np.random.rand(self.feature_dim)) self.__add_edge(key_a, key_b) # Plot stuff if (plot_interval is not None) and \ (iteration % plot_interval == 0) and \ (plot_function is not None): plot_function(dataset, self.graph, iteration) # Get random data point to be used in GNG graph fitting. idx = np.random.randint(n_pts) data_vect = dataset[idx] # Find two nearest vertices from the GNG graph nearest = self.__find_nearest_vertices(data_vect, n_vertex=2) # Update the winner vertex error value vertex_a_key = nearest[0][0] vertex_a_dist = nearest[0][1] error = self.__get_vertex_param(key=vertex_a_key, param_key='error') error += vertex_a_dist ** 2 self.__set_vertex_param(key=vertex_a_key, error=error) # Update winner vertex feature vector vertex_vect = self.__get_vertex_param(key=vertex_a_key, param_key='feature_vector') vertex_vect = vertex_vect + winner_upd_coeff * (data_vect - vertex_vect) self.__set_vertex_param(key=vertex_a_key, feature_vector=vertex_vect) # Update winner's neighbor vertices neighbor_vertices = self.__get_neighbor_vertices(vertex_a_key) for vertex_b_key in neighbor_vertices: # Update vectors vertex_vect = self.__get_vertex_param(key=vertex_b_key, param_key='feature_vector') vertex_vect = vertex_vect + neighbor_upd_coeff * (data_vect - vertex_vect) self.__set_vertex_param(key=vertex_b_key, feature_vector=vertex_vect) # Update edge ages edge_age = self.__get_edge_param(vertex_a_key, vertex_b_key, 'age') edge_age += 1 self.__set_edge_param(vertex_a_key, vertex_b_key, age=edge_age) # Update the second nearest vertex vertex_b_key = nearest[1][0] vertex_b_dist = nearest[1][1] if vertex_b_key not in neighbor_vertices: # Add edge if it doesn't exist yet self.__add_edge(vertex_a_key, vertex_b_key) else: # Set edge age to zero self.__set_edge_param(vertex_a_key, vertex_b_key, age=0) #pass # Delete too old edges self.__delete_expired_edges(edge_age_limit) # Delete vertices with no edges self.__delete_unconnected_vertices() # Add new vertex if (iteration % vertex_insert_interval == 0) and (self.n_vertex < max_vertex): # Get vertex with largest error value search_result = self.__find_largest_error_vertex() vertex_a_key = search_result[0] vertex_a_error = search_result[1] vertex_a_vect = self.__get_vertex_param(vertex_a_key, 'feature_vector') # Get the neighbor with largest error neighbor_result = self.__find_largest_error_neighbor(vertex_a_key) vertex_b_key = neighbor_result[0] vertex_b_error = neighbor_result[1] vertex_b_vect = self.__get_vertex_param(vertex_b_key, 'feature_vector') # Calculate new error values new_a_error = vertex_a_error * vertex_insert_error_scaling self.__set_vertex_param(vertex_a_key, error=new_a_error) new_b_error = vertex_b_error * vertex_insert_error_scaling self.__set_vertex_param(vertex_b_key, error=new_b_error) # Setup new vertex new_vertex_vector = (vertex_a_vect + vertex_b_vect) / 2 new_vertex_error = new_a_error new_vertex_key = self.__add_vertex(new_vertex_vector) self.__set_vertex_param(new_vertex_key, error=new_vertex_error) # Rearrange the edges self.__delete_edge(vertex_a_key, vertex_b_key) self.__add_edge(new_vertex_key, vertex_a_key) self.__add_edge(new_vertex_key, vertex_b_key) # Reduce all error values self.__scale_vertex_error_values(error_attenuation)
[ "numpy.random.rand", "numpy.random.randint", "FeatureGraph.graph.Graph", "numpy.linalg.norm" ]
[((1105, 1112), 'FeatureGraph.graph.Graph', 'Graph', ([], {}), '()\n', (1110, 1112), False, 'from FeatureGraph.graph import Graph\n'), ((3182, 3220), 'numpy.linalg.norm', 'np.linalg.norm', (['(vertex_vect - ref_vect)'], {}), '(vertex_vect - ref_vect)\n', (3196, 3220), True, 'import numpy as np\n'), ((6748, 6772), 'numpy.random.randint', 'np.random.randint', (['n_pts'], {}), '(n_pts)\n', (6765, 6772), True, 'import numpy as np\n'), ((6242, 6274), 'numpy.random.rand', 'np.random.rand', (['self.feature_dim'], {}), '(self.feature_dim)\n', (6256, 6274), True, 'import numpy as np\n'), ((6333, 6365), 'numpy.random.rand', 'np.random.rand', (['self.feature_dim'], {}), '(self.feature_dim)\n', (6347, 6365), True, 'import numpy as np\n')]
import os import json import numpy as np import glob from datetime import datetime import shutil from sklearn.model_selection import train_test_split np.random.seed(41) #0为背景 classname_to_id = {"__background__": 0,"short": 1,"solder":2,"solderball":3} class Lableme2CoCo: def __init__(self): self.images = [] self.annotations = [] self.categories = [] self.img_id = 0 self.ann_id = 0 def save_coco_json(self, instance, save_path): json.dump(instance, open(save_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=1) # indent=2 更加美观显示 # 由json文件构建COCO def to_coco(self, json_path_list): self._init_categories() for json_path in json_path_list: obj = self.read_jsonfile(json_path) self.images.append(self._image(obj, json_path)) shapes = obj['shapes'] for shape in shapes: annotation = self._annotation(shape) self.annotations.append(annotation) self.ann_id += 1 self.img_id += 1 instance = {} instance['info'] = 'spytensor created' instance['license'] = ['license'] instance['images'] = self.images instance['annotations'] = self.annotations instance['categories'] = self.categories return instance # 构建类别 def _init_categories(self): for k, v in classname_to_id.items(): category = {} category['id'] = v category['name'] = k self.categories.append(category) # 构建COCO的image字段 def _image(self, obj, path): image = {} from labelme import utils img_x = utils.img_b64_to_arr(obj['imageData']) h, w = img_x.shape[:-1] image['height'] = h image['width'] = w image['id'] = self.img_id image['file_name'] = os.path.basename(path).replace(".json", ".jpg") return image # 构建COCO的annotation字段 def _annotation(self, shape): label = shape['label'] points = shape['points'] annotation = {} annotation['id'] = self.ann_id annotation['image_id'] = self.img_id annotation['category_id'] = int(classname_to_id[label]) # annotation['segmentation'] = [np.asarray(points,dtype=np.int).flatten().tolist()] annotation['segmentation'] = [list(map(int,self._get_seg(points)))] annotation['bbox'] = list(map(int,self._get_box(points))) annotation['iscrowd'] = 0 annotation['area'] = 1.0 return annotation # 读取json文件,返回一个json对象 def read_jsonfile(self, path): with open(path, "r", encoding='utf-8') as f: return json.load(f) # COCO的格式: [x1,y1,w,h] 对应COCO的bbox格式 def _get_box(self, points): min_x = min_y = np.inf max_x = max_y = 0 for x, y in points: min_x = min(min_x, x) min_y = min(min_y, y) max_x = max(max_x, x) max_y = max(max_y, y) return [min_x, min_y, max_x - min_x, max_y - min_y] # Segmentation的格式:[x1,y1,x1,y2,x2,y2,x1,y2] def _get_seg(self,points): x1 = points[1][0] if points[0][0] > points[1][0] else points[0][0] # min_x y1 = points[1][1] if points[0][1] > points[1][1] else points[0][1] # min_y x3 = points[0][0] if points[0][0] > points[1][0] else points[1][0] # max_x y3 = points[0][1] if points[0][1] > points[1][1] else points[1][1] # max_y x2, y2 = [x3, y1] x4, y4 = [x1, y3] return [x1, y1, x2, y2, x3, y3, x4, y4] if __name__ == '__main__': print(os.curdir) labelme_path = ".\\rawdata\\labelme" saved_coco_path = ".\\convertedData" saved_time = format(datetime.now(),"%Y%m%d%H%M%S") print(str.format('labelme_path:{0}\nsaved_coco_path:{1}',os.path.exists(labelme_path),os.path.exists(saved_coco_path))) # 创建文件 if not os.path.exists('%s/coco/annotations/' % saved_coco_path): os.makedirs('%s/coco/annotations/' % saved_coco_path) if not os.path.exists('%s/coco/images/train%s/' % (saved_coco_path, saved_time)): os.makedirs('%s/coco/images/train%s/' % (saved_coco_path, saved_time)) if not os.path.exists('%s/coco/images/test%s/' % (saved_coco_path, saved_time)): os.makedirs('%s/coco/images/test%s/' % (saved_coco_path, saved_time)) # 获取images目录下所有的joson文件列表 json_list_path = glob.glob(labelme_path + "/*.json") # 数据划分,这里没有区分val2019和tran2019目录,所有图片都放在images目录下 train_path, val_path = train_test_split(json_list_path, test_size=0.2) print("train_n:", len(train_path), 'val_n:', len(val_path)) # 把训练集转化为COCO的json格式 l2c_train = Lableme2CoCo() train_instance = l2c_train.to_coco(train_path) l2c_train.save_coco_json(train_instance, '%s/coco/annotations/instances_train%s.json' %(saved_coco_path,saved_time)) for file in train_path: shutil.copy(file.replace("json","jpg"),"%s/coco/images/train%s/" %(saved_coco_path,saved_time)) for file in val_path: shutil.copy(file.replace("json","jpg"),"%s/coco/images/test%s/" %(saved_coco_path,saved_time)) # 把验证集转化为COCO的json格式 l2c_val = Lableme2CoCo() val_instance = l2c_val.to_coco(val_path) l2c_val.save_coco_json(val_instance, '%s/coco/annotations/instances_test%s.json' %(saved_coco_path,saved_time))
[ "labelme.utils.img_b64_to_arr", "os.path.exists", "os.makedirs", "sklearn.model_selection.train_test_split", "datetime.datetime.now", "numpy.random.seed", "os.path.basename", "json.load", "glob.glob" ]
[((150, 168), 'numpy.random.seed', 'np.random.seed', (['(41)'], {}), '(41)\n', (164, 168), True, 'import numpy as np\n'), ((4431, 4466), 'glob.glob', 'glob.glob', (["(labelme_path + '/*.json')"], {}), "(labelme_path + '/*.json')\n", (4440, 4466), False, 'import glob\n'), ((4547, 4594), 'sklearn.model_selection.train_test_split', 'train_test_split', (['json_list_path'], {'test_size': '(0.2)'}), '(json_list_path, test_size=0.2)\n', (4563, 4594), False, 'from sklearn.model_selection import train_test_split\n'), ((1700, 1738), 'labelme.utils.img_b64_to_arr', 'utils.img_b64_to_arr', (["obj['imageData']"], {}), "(obj['imageData'])\n", (1720, 1738), False, 'from labelme import utils\n'), ((3755, 3769), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3767, 3769), False, 'from datetime import datetime\n'), ((3932, 3988), 'os.path.exists', 'os.path.exists', (["('%s/coco/annotations/' % saved_coco_path)"], {}), "('%s/coco/annotations/' % saved_coco_path)\n", (3946, 3988), False, 'import os\n'), ((3998, 4051), 'os.makedirs', 'os.makedirs', (["('%s/coco/annotations/' % saved_coco_path)"], {}), "('%s/coco/annotations/' % saved_coco_path)\n", (4009, 4051), False, 'import os\n'), ((4063, 4136), 'os.path.exists', 'os.path.exists', (["('%s/coco/images/train%s/' % (saved_coco_path, saved_time))"], {}), "('%s/coco/images/train%s/' % (saved_coco_path, saved_time))\n", (4077, 4136), False, 'import os\n'), ((4146, 4216), 'os.makedirs', 'os.makedirs', (["('%s/coco/images/train%s/' % (saved_coco_path, saved_time))"], {}), "('%s/coco/images/train%s/' % (saved_coco_path, saved_time))\n", (4157, 4216), False, 'import os\n'), ((4228, 4300), 'os.path.exists', 'os.path.exists', (["('%s/coco/images/test%s/' % (saved_coco_path, saved_time))"], {}), "('%s/coco/images/test%s/' % (saved_coco_path, saved_time))\n", (4242, 4300), False, 'import os\n'), ((4310, 4379), 'os.makedirs', 'os.makedirs', (["('%s/coco/images/test%s/' % (saved_coco_path, saved_time))"], {}), "('%s/coco/images/test%s/' % (saved_coco_path, saved_time))\n", (4321, 4379), False, 'import os\n'), ((2716, 2728), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2725, 2728), False, 'import json\n'), ((3847, 3875), 'os.path.exists', 'os.path.exists', (['labelme_path'], {}), '(labelme_path)\n', (3861, 3875), False, 'import os\n'), ((3876, 3907), 'os.path.exists', 'os.path.exists', (['saved_coco_path'], {}), '(saved_coco_path)\n', (3890, 3907), False, 'import os\n'), ((1889, 1911), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1905, 1911), False, 'import os\n')]
#!/usr/bin/env python3 # std import unittest # 3rd import numpy as np # ours from clusterking.util.testing import MyTestCase from clusterking.scan.wilsonscanner import WilsonScanner from clusterking.data.data import Data # noinspection PyUnusedLocal def simple_func(w, q): return q + 1 class TestWilsonScannerRun(MyTestCase): def setUp(self): self.s = WilsonScanner(scale=5, eft="WET", basis="flavio") self.s.set_spoints_equidist( { "CVL_bctaunutau": (-1, 1, 2), "CSL_bctaunutau": (-1, 1, 2), "CT_bctaunutau": (-1, 1, 2), } ) self.s.set_dfunction(simple_func, binning=[0, 1, 2], normalize=True) self.d = Data() def test_run(self): self.s.run(self.d).write() self.assertEqual(self.d.n, 8) self.assertEqual(self.d.nbins, 2) self.assertEqual(self.d.npars, 3) class TestWilsonScanner(MyTestCase): def test_spoints_equidist(self): s = WilsonScanner(scale=5, eft="WET", basis="flavio") s.set_spoints_equidist( { "CVL_bctaunutau": (-1, 1, 2), "CSL_bctaunutau": (-1, 1, 3), "CT_bctaunutau": (-1, 1, 4), } ) self.assertEqual(len(s.spoints), 2 * 3 * 4) def test_spoints_equidist_complex(self): s = WilsonScanner(scale=5, eft="WET", basis="flavio") s.set_spoints_equidist( {"CVL_bctaunutau": (0, 1, 2), "im_CVL_bctaunutau": (0, 1, 2)} ) self.assertEqual(len(s.spoints), 2 * 2) self.assertAllClose( s.spoints, np.array([[0.0], [1.0j], [1.0], [1.0 + 1.0j]]) ) def test_properties(self): s = WilsonScanner(scale=5, eft="WET", basis="flavio") self.assertEqual(s.scale, 5) self.assertEqual(s.eft, "WET") self.assertEqual(s.basis, "flavio") if __name__ == "__main__": unittest.main()
[ "unittest.main", "numpy.array", "clusterking.scan.wilsonscanner.WilsonScanner", "clusterking.data.data.Data" ]
[((1945, 1960), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1958, 1960), False, 'import unittest\n'), ((375, 424), 'clusterking.scan.wilsonscanner.WilsonScanner', 'WilsonScanner', ([], {'scale': '(5)', 'eft': '"""WET"""', 'basis': '"""flavio"""'}), "(scale=5, eft='WET', basis='flavio')\n", (388, 424), False, 'from clusterking.scan.wilsonscanner import WilsonScanner\n'), ((731, 737), 'clusterking.data.data.Data', 'Data', ([], {}), '()\n', (735, 737), False, 'from clusterking.data.data import Data\n'), ((1008, 1057), 'clusterking.scan.wilsonscanner.WilsonScanner', 'WilsonScanner', ([], {'scale': '(5)', 'eft': '"""WET"""', 'basis': '"""flavio"""'}), "(scale=5, eft='WET', basis='flavio')\n", (1021, 1057), False, 'from clusterking.scan.wilsonscanner import WilsonScanner\n'), ((1375, 1424), 'clusterking.scan.wilsonscanner.WilsonScanner', 'WilsonScanner', ([], {'scale': '(5)', 'eft': '"""WET"""', 'basis': '"""flavio"""'}), "(scale=5, eft='WET', basis='flavio')\n", (1388, 1424), False, 'from clusterking.scan.wilsonscanner import WilsonScanner\n'), ((1742, 1791), 'clusterking.scan.wilsonscanner.WilsonScanner', 'WilsonScanner', ([], {'scale': '(5)', 'eft': '"""WET"""', 'basis': '"""flavio"""'}), "(scale=5, eft='WET', basis='flavio')\n", (1755, 1791), False, 'from clusterking.scan.wilsonscanner import WilsonScanner\n'), ((1641, 1687), 'numpy.array', 'np.array', (['[[0.0], [1.0j], [1.0], [1.0 + 1.0j]]'], {}), '([[0.0], [1.0j], [1.0], [1.0 + 1.0j]])\n', (1649, 1687), True, 'import numpy as np\n')]
"""Portfolio.""" import itertools from contextlib import contextmanager from enum import Enum, auto import numpy as np from .base import Quotes from .performance import BriefPerformance, Performance, Stats from .utils import fromtimestamp, timeit __all__ = ( 'Portfolio', 'Position', 'Order', ) class BasePortfolio: def __init__(self, balance=100000, leverage=5): self._initial_balance = balance self.balance = balance self.equity = None # TODO: # self.cash # self.currency self.leverage = leverage self.positions = [] self.balance_curve = None self.equity_curve = None self.long_curve = None self.short_curve = None self.mae_curve = None self.mfe_curve = None self.stats = None self.performance = None self.brief_performance = None def clear(self): self.positions.clear() self.balance = self._initial_balance @property def initial_balance(self): return self._initial_balance @initial_balance.setter def initial_balance(self, value): self._initial_balance = value def add_position(self, position): position.ticket = len(self.positions) + 1 self.positions.append(position) def position_count(self, tp=None): if tp == Order.BUY: return len([p for p in self.positions if p.type == Order.BUY]) elif tp == Order.SELL: return len([p for p in self.positions if p.type == Order.SELL]) return len(self.positions) def _close_open_positions(self): for p in self.positions: if p.status == Position.OPEN: p.close(price=Quotes[-1].open, volume=p.volume, time=Quotes[-1].time) def _get_market_position(self): p = self.positions[0] # real postions p = Position( symbol=p.symbol, ptype=Order.BUY, volume=p.volume, price=Quotes[0].open, open_time=Quotes[0].time, close_price=Quotes[-1].close, close_time=Quotes[-1].time, id_bar_close=len(Quotes) - 1, status=Position.CLOSED) p.profit = p.calc_profit(close_price=Quotes[-1].close) p.profit_perc = p.profit / self._initial_balance * 100 return p def _calc_equity_curve(self): """Equity curve.""" self.equity_curve = np.zeros_like(Quotes.time) for i, p in enumerate(self.positions): balance = np.sum(self.stats['All'][:i].abs) for ibar in range(p.id_bar_open, p.id_bar_close): profit = p.calc_profit(close_price=Quotes[ibar].close) self.equity_curve[ibar] = balance + profit # taking into account the real balance after the last trade self.equity_curve[-1] = self.balance_curve[-1] def _calc_buy_and_hold_curve(self): """Buy and Hold.""" p = self._get_market_position() self.buy_and_hold_curve = np.array( [p.calc_profit(close_price=price) for price in Quotes.close]) def _calc_long_short_curves(self): """Only Long/Short positions curve.""" self.long_curve = np.zeros_like(Quotes.time) self.short_curve = np.zeros_like(Quotes.time) for i, p in enumerate(self.positions): if p.type == Order.BUY: name = 'Long' curve = self.long_curve else: name = 'Short' curve = self.short_curve balance = np.sum(self.stats[name][:i].abs) # Calculate equity for this position for ibar in range(p.id_bar_open, p.id_bar_close): profit = p.calc_profit(close_price=Quotes[ibar].close) curve[ibar] = balance + profit for name, curve in [('Long', self.long_curve), ('Short', self.short_curve)]: curve[:] = fill_zeros_with_last(curve) # taking into account the real balance after the last trade curve[-1] = np.sum(self.stats[name].abs) def _calc_curves(self): self.mae_curve = np.cumsum(self.stats['All'].mae) self.mfe_curve = np.cumsum(self.stats['All'].mfe) self.balance_curve = np.cumsum(self.stats['All'].abs) self._calc_equity_curve() self._calc_buy_and_hold_curve() self._calc_long_short_curves() @contextmanager def optimization_mode(self): """Backup and restore current balance and positions.""" # mode='general', self.backup_balance = self.balance self.backup_positions = self.positions.copy() self.balance = self._initial_balance self.positions.clear() yield self.balance = self.backup_balance self.positions = self.backup_positions.copy() self.backup_positions.clear() @timeit def run_optimization(self, strategy, params): keys = list(params.keys()) vals = list(params.values()) variants = list(itertools.product(*vals)) self.brief_performance = BriefPerformance(shape=(len(variants),)) with self.optimization_mode(): for i, vals in enumerate(variants): kwargs = {keys[n]: val for n, val in enumerate(vals)} strategy.start(**kwargs) self._close_open_positions() self.brief_performance.add( self._initial_balance, self.positions, i, kwargs) self.clear() @timeit def summarize(self): self._close_open_positions() positions = { 'All': self.positions, 'Long': [p for p in self.positions if p.type == Order.BUY], 'Short': [p for p in self.positions if p.type == Order.SELL], 'Market': [self._get_market_position(), ], } self.stats = Stats(positions) self.performance = Performance(self._initial_balance, self.stats, positions) self._calc_curves() Portfolio = BasePortfolio() class PositionStatus(Enum): OPEN = auto() CLOSED = auto() CANCELED = auto() class Position: OPEN = PositionStatus.OPEN CLOSED = PositionStatus.CLOSED CANCELED = PositionStatus.CANCELED __slots__ = ( 'type', 'symbol', 'ticket', 'open_price', 'close_price', 'open_time', 'close_time', 'volume', 'sl', 'tp', 'status', 'profit', 'profit_perc', 'commis', 'id_bar_open', 'id_bar_close', 'entry_name', 'exit_name', 'total_profit', 'comment', ) def __init__(self, symbol, ptype, price, volume, open_time, sl=None, tp=None, status=OPEN, entry_name='', exit_name='', comment='', **kwargs): self.type = ptype self.symbol = symbol self.ticket = None self.open_price = price self.close_price = None self.open_time = open_time self.close_time = None self.volume = volume self.sl = sl self.tp = tp self.status = status self.profit = None self.profit_perc = None self.commis = None self.id_bar_open = np.where(Quotes.time == self.open_time)[0][0] self.id_bar_close = None self.entry_name = entry_name self.exit_name = exit_name self.total_profit = 0 self.comment = comment # self.bars_on_trade = None # self.is_profitable = False for k, v in kwargs.items(): setattr(self, k, v) def __repr__(self): _type = 'LONG' if self.type == Order.BUY else 'SHORT' time = fromtimestamp(self.open_time).strftime('%d.%m.%y %H:%M') return '%s/%s/[%s - %.4f]' % ( self.status.name, _type, time, self.open_price) def close(self, price, time, volume=None): # TODO: allow closing only part of the volume self.close_price = price self.close_time = time self.id_bar_close = np.where(Quotes.time == self.close_time)[0][0] self.profit = self.calc_profit(volume=volume or self.volume) self.profit_perc = self.profit / Portfolio.balance * 100 Portfolio.balance += self.profit self.total_profit = Portfolio.balance - Portfolio.initial_balance self.status = self.CLOSED def calc_profit(self, volume=None, close_price=None): # TODO: rewrite it close_price = close_price or self.close_price volume = volume or self.volume factor = 1 if self.type == Order.BUY else -1 price_delta = (close_price - self.open_price) * factor if self.symbol.mode in [self.symbol.FOREX, self.symbol.CFD]: # Margin: Lots*Contract_Size/Leverage if (self.symbol.mode == self.symbol.FOREX and self.symbol.ticker[:3] == 'USD'): # Example: 'USD/JPY' # Прибыль Размер Объем Текущий # в пунктах пункта позиции курс # 1 * 0.0001 * 100000 / 1.00770 # USD/CHF: 1*0.0001*100000/1.00770 = $9.92 # 0.01 # USD/JPY: 1*0.01*100000/121.35 = $8.24 # (1.00770-1.00595)/0.0001 = 17.5 пунктов # (1.00770-1.00595)/0.0001*0.0001*100000*1/1.00770*1 _points = price_delta / self.symbol.tick_size _profit = _points * self.symbol.tick_size * \ self.symbol.contract_size / close_price * volume elif (self.symbol.mode == self.symbol.FOREX and self.symbol.ticker[-3:] == 'USD'): # Example: 'EUR/USD' # Profit: (close_price-open_price)*Contract_Size*Lots # EUR/USD BUY: (1.05875-1.05850)*100000*1 = +$25 (без комиссии) _profit = price_delta * self.symbol.contract_size * volume else: # Cross rates. Example: 'GBP/CHF' # Цена пункта = # объем поз.*размер п.*тек.курс баз.вал. к USD/тек. кросс-курс # GBP/CHF: 100000*0.0001*1.48140/1.48985 = $9.94 # TODO: temporary patch (same as the previous choice) - # in the future connect to some quotes provider and get rates _profit = price_delta * self.symbol.contract_size * volume elif self.symbol.mode == self.symbol.FUTURES: # Margin: Lots *InitialMargin*Percentage/100 # Profit: (close_price-open_price)*TickPrice/TickSize*Lots # CL BUY: (46.35-46.30)*10/0.01*1 = $50 (без учета комиссии!) # EuroFX(6E) BUY:(1.05875-1.05850)*12.50/0.0001*1 =$31.25 (без ком) # RTS (RIH5) BUY:(84510-84500)*12.26506/10*1 = @12.26506 (без ком) # E-miniSP500 BUY:(2065.95-2065.25)*12.50/0.25 = $35 (без ком) # http://americanclearing.ru/specifications.php # http://www.moex.com/ru/contract.aspx?code=RTS-3.18 # http://www.cmegroup.com/trading/equity-index/us-index/e-mini-sandp500_contract_specifications.html _profit = (price_delta * self.symbol.tick_value / self.symbol.tick_size * volume) else: # shares _profit = price_delta * volume return _profit def calc_mae(self, low, high): """Return [MAE] Maximum Adverse Excursion.""" if self.type == Order.BUY: return self.calc_profit(close_price=low) return self.calc_profit(close_price=high) def calc_mfe(self, low, high): """Return [MFE] Maximum Favorable Excursion.""" if self.type == Order.BUY: return self.calc_profit(close_price=high) return self.calc_profit(close_price=low) class OrderType(Enum): BUY = auto() SELL = auto() BUY_LIMIT = auto() SELL_LIMIT = auto() BUY_STOP = auto() SELL_STOP = auto() class Order: BUY = OrderType.BUY SELL = OrderType.SELL BUY_LIMIT = OrderType.BUY_LIMIT SELL_LIMIT = OrderType.SELL_LIMIT BUY_STOP = OrderType.BUY_STOP SELL_STOP = OrderType.SELL_STOP @staticmethod def open(symbol, otype, price, volume, time, sl=None, tp=None): # TODO: add margin calculation # and if the margin is not enough - do not open the position position = Position( symbol=symbol, ptype=otype, price=price, volume=volume, open_time=time, sl=sl, tp=tp) Portfolio.add_position(position) return position @staticmethod def close(position, price, time, volume=None): # FIXME: may be closed not the whole volume, but # the position status will be changed to CLOSED position.close(price=price, time=time, volume=volume) def fill_zeros_with_last(arr): """Fill empty(zero) elements (between positions).""" index = np.arange(len(arr)) index[arr == 0] = 0 index = np.maximum.accumulate(index) return arr[index]
[ "enum.auto", "numpy.where", "itertools.product", "numpy.sum", "numpy.maximum.accumulate", "numpy.cumsum", "numpy.zeros_like" ]
[((6096, 6102), 'enum.auto', 'auto', ([], {}), '()\n', (6100, 6102), False, 'from enum import Enum, auto\n'), ((6116, 6122), 'enum.auto', 'auto', ([], {}), '()\n', (6120, 6122), False, 'from enum import Enum, auto\n'), ((6138, 6144), 'enum.auto', 'auto', ([], {}), '()\n', (6142, 6144), False, 'from enum import Enum, auto\n'), ((11857, 11863), 'enum.auto', 'auto', ([], {}), '()\n', (11861, 11863), False, 'from enum import Enum, auto\n'), ((11875, 11881), 'enum.auto', 'auto', ([], {}), '()\n', (11879, 11881), False, 'from enum import Enum, auto\n'), ((11898, 11904), 'enum.auto', 'auto', ([], {}), '()\n', (11902, 11904), False, 'from enum import Enum, auto\n'), ((11922, 11928), 'enum.auto', 'auto', ([], {}), '()\n', (11926, 11928), False, 'from enum import Enum, auto\n'), ((11944, 11950), 'enum.auto', 'auto', ([], {}), '()\n', (11948, 11950), False, 'from enum import Enum, auto\n'), ((11967, 11973), 'enum.auto', 'auto', ([], {}), '()\n', (11971, 11973), False, 'from enum import Enum, auto\n'), ((12986, 13014), 'numpy.maximum.accumulate', 'np.maximum.accumulate', (['index'], {}), '(index)\n', (13007, 13014), True, 'import numpy as np\n'), ((2416, 2442), 'numpy.zeros_like', 'np.zeros_like', (['Quotes.time'], {}), '(Quotes.time)\n', (2429, 2442), True, 'import numpy as np\n'), ((3201, 3227), 'numpy.zeros_like', 'np.zeros_like', (['Quotes.time'], {}), '(Quotes.time)\n', (3214, 3227), True, 'import numpy as np\n'), ((3255, 3281), 'numpy.zeros_like', 'np.zeros_like', (['Quotes.time'], {}), '(Quotes.time)\n', (3268, 3281), True, 'import numpy as np\n'), ((4154, 4186), 'numpy.cumsum', 'np.cumsum', (["self.stats['All'].mae"], {}), "(self.stats['All'].mae)\n", (4163, 4186), True, 'import numpy as np\n'), ((4212, 4244), 'numpy.cumsum', 'np.cumsum', (["self.stats['All'].mfe"], {}), "(self.stats['All'].mfe)\n", (4221, 4244), True, 'import numpy as np\n'), ((4274, 4306), 'numpy.cumsum', 'np.cumsum', (["self.stats['All'].abs"], {}), "(self.stats['All'].abs)\n", (4283, 4306), True, 'import numpy as np\n'), ((2512, 2545), 'numpy.sum', 'np.sum', (["self.stats['All'][:i].abs"], {}), "(self.stats['All'][:i].abs)\n", (2518, 2545), True, 'import numpy as np\n'), ((3548, 3580), 'numpy.sum', 'np.sum', (['self.stats[name][:i].abs'], {}), '(self.stats[name][:i].abs)\n', (3554, 3580), True, 'import numpy as np\n'), ((4071, 4099), 'numpy.sum', 'np.sum', (['self.stats[name].abs'], {}), '(self.stats[name].abs)\n', (4077, 4099), True, 'import numpy as np\n'), ((5045, 5069), 'itertools.product', 'itertools.product', (['*vals'], {}), '(*vals)\n', (5062, 5069), False, 'import itertools\n'), ((7169, 7208), 'numpy.where', 'np.where', (['(Quotes.time == self.open_time)'], {}), '(Quotes.time == self.open_time)\n', (7177, 7208), True, 'import numpy as np\n'), ((7975, 8015), 'numpy.where', 'np.where', (['(Quotes.time == self.close_time)'], {}), '(Quotes.time == self.close_time)\n', (7983, 8015), True, 'import numpy as np\n')]
import json import numpy as np import os from photogrammetry_importer.types.camera import Camera from photogrammetry_importer.types.point import Point from photogrammetry_importer.file_handlers.utility import ( check_radial_distortion, ) from photogrammetry_importer.blender_utility.logging_utility import log_report class MeshroomFileHandler: """Class to read and write :code:`Meshroom` files and workspaces.""" # Note: *.SfM files are actually just *.JSON files. @staticmethod def _get_element(data_list, id_string, query_id): result = None for ele in data_list: if int(ele[id_string]) == query_id: result = ele break assert result is not None return result @classmethod def _parse_cameras_from_json_data( cls, json_data, image_dp, image_fp_type, suppress_distortion_warnings, op, ): cams = [] image_index_to_camera_index = {} is_valid_file = ( "views" in json_data and "intrinsics" in json_data and "poses" in json_data ) if not is_valid_file: log_report( "ERROR", "FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain the" + " SfM reconstruction results: view, intrinsics and poses.", op, ) return cams, image_index_to_camera_index views = json_data["views"] # is a list of dicts (view) intrinsics = json_data["intrinsics"] # is a list of dicts (intrinsic) extrinsics = json_data["poses"] # is a list of dicts (extrinsic) # IMPORTANT: # Views contain the number of input images # Extrinsics may contain only a subset of views! # (Not all views are necessarily contained in the reconstruction) for rec_index, extrinsic in enumerate(extrinsics): camera = Camera() view_index = int(extrinsic["poseId"]) image_index_to_camera_index[view_index] = rec_index corresponding_view = cls._get_element(views, "poseId", view_index) camera.image_fp_type = image_fp_type camera.image_dp = image_dp camera._absolute_fp = str(corresponding_view["path"]) camera._relative_fp = os.path.basename( str(corresponding_view["path"]) ) camera._undistorted_relative_fp = str(extrinsic["poseId"]) + ".exr" if image_dp is None: camera._undistorted_absolute_fp = None else: camera._undistorted_absolute_fp = os.path.join( image_dp, camera._undistorted_relative_fp ) camera.width = int(corresponding_view["width"]) camera.height = int(corresponding_view["height"]) id_intrinsic = int(corresponding_view["intrinsicId"]) intrinsic_params = cls._get_element( intrinsics, "intrinsicId", id_intrinsic ) focal_length = float(intrinsic_params["pxFocalLength"]) cx = float(intrinsic_params["principalPoint"][0]) cy = float(intrinsic_params["principalPoint"][1]) if ( "distortionParams" in intrinsic_params and len(intrinsic_params["distortionParams"]) > 0 ): # TODO proper handling of distortion parameters radial_distortion = float( intrinsic_params["distortionParams"][0] ) else: radial_distortion = 0.0 if not suppress_distortion_warnings: check_radial_distortion( radial_distortion, camera._relative_fp, op ) camera_calibration_matrix = np.array( [[focal_length, 0, cx], [0, focal_length, cy], [0, 0, 1]] ) camera.set_calibration( camera_calibration_matrix, radial_distortion ) extrinsic_params = extrinsic["pose"]["transform"] cam_rotation_list = extrinsic_params["rotation"] camera.set_rotation_with_rotation_mat( np.array(cam_rotation_list, dtype=float).reshape(3, 3).T ) camera.set_camera_center_after_rotation( np.array(extrinsic_params["center"], dtype=float) ) camera.view_index = view_index cams.append(camera) return cams, image_index_to_camera_index @staticmethod def _parse_points_from_json_data( json_data, image_index_to_camera_index, op ): points = [] is_valid_file = "structure" in json_data if not is_valid_file: log_report( "ERROR", "FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain " + " the SfM reconstruction results: structure.", op, ) return points structure = json_data["structure"] for json_point in structure: custom_point = Point( coord=np.array(json_point["X"], dtype=float), color=np.array(json_point["color"], dtype=int), id=int(json_point["landmarkId"]), scalars=[], ) points.append(custom_point) return points @classmethod def parse_meshroom_sfm_file( cls, sfm_ifp, image_idp, image_fp_type, suppress_distortion_warnings, op=None, ): """Parse a :code:`Meshroom` (:code:`.sfm` or :code:`.json`) file. Parse different file formats created with the :code:`StructureFromMotion` / :code:`ConvertSfMFormat` node in :code:`Meshroom`. """ log_report("INFO", "parse_meshroom_sfm_file: ...", op) log_report("INFO", "sfm_ifp: " + sfm_ifp, op) input_file = open(sfm_ifp, "r") json_data = json.load(input_file) ( cams, image_index_to_camera_index, ) = cls._parse_cameras_from_json_data( json_data, image_idp, image_fp_type, suppress_distortion_warnings, op, ) if "structure" in json_data: points = cls._parse_points_from_json_data( json_data, image_index_to_camera_index, op ) else: points = [] log_report("INFO", "parse_meshroom_sfm_file: Done", op) return cams, points @staticmethod def _get_latest_node(json_graph, node_type): i = 0 while node_type + "_" + str(i + 1) in json_graph: i = i + 1 if i == 0: return None else: return json_graph[node_type + "_" + str(i)] @classmethod def _get_node(cls, json_graph, node_type, node_number, op): if node_number == -1: return cls._get_latest_node(json_graph, node_type) else: node_key = node_type + "_" + str(node_number) if node_key in json_graph: return json_graph[node_key] else: log_report( "ERROR", "Invalid combination of node type (i.e. " + node_type + ") " + "and node number (i.e. " + str(node_number) + ") provided", op, ) assert False @staticmethod def _get_data_fp_of_node(cache_dp, data_node, fn_or_fn_list): if isinstance(fn_or_fn_list, str): fn_list = [fn_or_fn_list] else: fn_list = fn_or_fn_list if data_node is None: return None node_type = data_node["nodeType"] uid_0 = data_node["uids"]["0"] data_fp = None for fn in fn_list: possible_data_fp = os.path.join(cache_dp, node_type, uid_0, fn) if os.path.isfile(possible_data_fp): data_fp = possible_data_fp break return data_fp @classmethod def _get_node_data_fp( cls, cache_dp, json_graph, node_type, node_number, fn_or_fn_list, op ): data_node = cls._get_node(json_graph, node_type, node_number, op) data_fp = cls._get_data_fp_of_node(cache_dp, data_node, fn_or_fn_list) return data_fp @staticmethod def _get_data_dp_of_node(cache_dp, data_node): if data_node is None: return None node_type = data_node["nodeType"] uid_0 = data_node["uids"]["0"] return os.path.join(cache_dp, node_type, uid_0) @classmethod def _get_node_data_dp( cls, cache_dp, json_graph, node_type, node_number, op ): data_node = cls._get_node(json_graph, node_type, node_number, op) data_dp = cls._get_data_dp_of_node(cache_dp, data_node) return data_dp @classmethod def _get_sfm_fp( cls, sfm_node_type, cache_dp, json_graph, sfm_node_number, op ): if sfm_node_type == "ConvertSfMFormatNode": sfm_fp = cls._get_node_data_fp( cache_dp, json_graph, "ConvertSfMFormat", sfm_node_number, ["sfm.sfm", "sfm.json"], op, ) elif sfm_node_type == "StructureFromMotionNode": sfm_fp = cls._get_node_data_fp( cache_dp, json_graph, "StructureFromMotion", sfm_node_number, "cameras.sfm", op, ) elif sfm_node_type == "AUTOMATIC": sfm_fp = cls._get_node_data_fp( cache_dp, json_graph, "ConvertSfMFormat", sfm_node_number, ["sfm.sfm", "sfm.json"], op, ) if sfm_fp is None: sfm_fp = cls._get_node_data_fp( cache_dp, json_graph, "StructureFromMotion", sfm_node_number, "cameras.sfm", op, ) else: log_report("ERROR", "Selected SfM node is not supported", op) assert False return sfm_fp @classmethod def _get_mesh_fp( cls, mesh_node_type, cache_dp, json_graph, mesh_node_number, op ): if mesh_node_type == "Texturing": mesh_fp = cls._get_node_data_fp( cache_dp, json_graph, "Texturing", mesh_node_number, "texturedMesh.obj", op, ) elif mesh_node_type == "MeshFiltering": mesh_fp = cls._get_node_data_fp( cache_dp, json_graph, "MeshFiltering", mesh_node_number, "mesh.obj", op, ) elif mesh_node_type == "Meshing": mesh_fp = cls._get_node_data_fp( cache_dp, json_graph, "Meshing", mesh_node_number, "mesh.obj", op, ) elif mesh_node_type == "AUTOMATIC": mesh_fp = cls._get_node_data_fp( cache_dp, json_graph, "Texturing", mesh_node_number, "texturedMesh.obj", op, ) if mesh_fp is None: mesh_fp = cls._get_node_data_fp( cache_dp, json_graph, "MeshFiltering", mesh_node_number, "mesh.obj", op, ) if mesh_fp is None: mesh_fp = cls._get_node_data_fp( cache_dp, json_graph, "Meshing", mesh_node_number, "mesh.obj", op, ) else: log_report("ERROR", "Select Mesh node is not supported!", op) assert False return mesh_fp @classmethod def _get_image_dp(cls, cache_dp, json_graph, prepare_node_number, op): prepare_dp = cls._get_node_data_dp( cache_dp, json_graph, "PrepareDenseScene", prepare_node_number, op, ) return prepare_dp @classmethod def parse_meshrom_mg_file( cls, mg_fp, sfm_node_type, sfm_node_number, mesh_node_type, mesh_node_number, prepare_node_number, op=None, ): """Parse a :code:`Meshroom` project file (:code:`.mg`).""" cache_dp = os.path.join(os.path.dirname(mg_fp), "MeshroomCache") json_data = json.load(open(mg_fp, "r")) json_graph = json_data["graph"] sfm_fp = cls._get_sfm_fp( sfm_node_type, cache_dp, json_graph, sfm_node_number, op ) mesh_fp = cls._get_mesh_fp( mesh_node_type, cache_dp, json_graph, mesh_node_number, op ) image_dp = cls._get_image_dp( cache_dp, json_graph, prepare_node_number, op ) if sfm_fp is not None: log_report("INFO", "Found the following sfm file: " + sfm_fp, op) else: log_report( "INFO", "Request target SfM result does not exist in this meshroom" " project.", op, ) if mesh_fp is not None: log_report("INFO", "Found the following mesh file: " + mesh_fp, op) else: log_report( "INFO", "Request target mesh does not exist in this meshroom project.", op, ) return sfm_fp, mesh_fp, image_dp @classmethod def parse_meshroom_file( cls, meshroom_ifp, use_workspace_images, image_dp, image_fp_type, suppress_distortion_warnings, sfm_node_type, sfm_node_number, mesh_node_type, mesh_node_number, prepare_node_number, op=None, ): """Parse a :code:`Meshroom` file. Supported file formats are :code:`.mg`, :code:`.sfm` or :code:`.json`. """ log_report("INFO", "parse_meshroom_file: ...", op) log_report("INFO", "meshroom_ifp: " + meshroom_ifp, op) ext = os.path.splitext(meshroom_ifp)[1].lower() if ext == ".mg": ( meshroom_ifp, mesh_fp, image_idp_workspace, ) = cls.parse_meshrom_mg_file( meshroom_ifp, sfm_node_type, sfm_node_number, mesh_node_type, mesh_node_number, prepare_node_number, op, ) if ( use_workspace_images and image_idp_workspace is not None and os.path.isdir(image_idp_workspace) ): image_dp = image_idp_workspace log_report("INFO", "Using image directory in workspace.", op) else: assert ext == ".json" or ext == ".sfm" mesh_fp = None if meshroom_ifp is not None: cams, points = cls.parse_meshroom_sfm_file( meshroom_ifp, image_dp, image_fp_type, suppress_distortion_warnings, op, ) else: log_report( "WARNING", "Meshroom project does not contain cameras or points. Have" " you saved the project (i.e. the *.mg file)?", op, ) cams = [] points = [] log_report("INFO", "parse_meshroom_file: Done", op) return cams, points, mesh_fp, image_dp
[ "photogrammetry_importer.file_handlers.utility.check_radial_distortion", "photogrammetry_importer.types.camera.Camera", "os.path.join", "os.path.splitext", "photogrammetry_importer.blender_utility.logging_utility.log_report", "os.path.isfile", "numpy.array", "os.path.dirname", "os.path.isdir", "js...
[((5921, 5975), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""INFO"""', '"""parse_meshroom_sfm_file: ..."""', 'op'], {}), "('INFO', 'parse_meshroom_sfm_file: ...', op)\n", (5931, 5975), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((5984, 6029), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""INFO"""', "('sfm_ifp: ' + sfm_ifp)", 'op'], {}), "('INFO', 'sfm_ifp: ' + sfm_ifp, op)\n", (5994, 6029), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((6090, 6111), 'json.load', 'json.load', (['input_file'], {}), '(input_file)\n', (6099, 6111), False, 'import json\n'), ((6581, 6636), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""INFO"""', '"""parse_meshroom_sfm_file: Done"""', 'op'], {}), "('INFO', 'parse_meshroom_sfm_file: Done', op)\n", (6591, 6636), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((8798, 8838), 'os.path.join', 'os.path.join', (['cache_dp', 'node_type', 'uid_0'], {}), '(cache_dp, node_type, uid_0)\n', (8810, 8838), False, 'import os\n'), ((14680, 14730), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""INFO"""', '"""parse_meshroom_file: ..."""', 'op'], {}), "('INFO', 'parse_meshroom_file: ...', op)\n", (14690, 14730), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((14739, 14794), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""INFO"""', "('meshroom_ifp: ' + meshroom_ifp)", 'op'], {}), "('INFO', 'meshroom_ifp: ' + meshroom_ifp, op)\n", (14749, 14794), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((16205, 16256), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""INFO"""', '"""parse_meshroom_file: Done"""', 'op'], {}), "('INFO', 'parse_meshroom_file: Done', op)\n", (16215, 16256), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((1200, 1357), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""ERROR"""', "('FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain the' +\n ' SfM reconstruction results: view, intrinsics and poses.')", 'op'], {}), "('ERROR', \n 'FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain the' +\n ' SfM reconstruction results: view, intrinsics and poses.', op)\n", (1210, 1357), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((1985, 1993), 'photogrammetry_importer.types.camera.Camera', 'Camera', ([], {}), '()\n', (1991, 1993), False, 'from photogrammetry_importer.types.camera import Camera\n'), ((3899, 3966), 'numpy.array', 'np.array', (['[[focal_length, 0, cx], [0, focal_length, cy], [0, 0, 1]]'], {}), '([[focal_length, 0, cx], [0, focal_length, cy], [0, 0, 1]])\n', (3907, 3966), True, 'import numpy as np\n'), ((4857, 4998), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""ERROR"""', "('FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain ' +\n ' the SfM reconstruction results: structure.')", 'op'], {}), "('ERROR', \n 'FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain ' +\n ' the SfM reconstruction results: structure.', op)\n", (4867, 4998), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((8091, 8135), 'os.path.join', 'os.path.join', (['cache_dp', 'node_type', 'uid_0', 'fn'], {}), '(cache_dp, node_type, uid_0, fn)\n', (8103, 8135), False, 'import os\n'), ((8151, 8183), 'os.path.isfile', 'os.path.isfile', (['possible_data_fp'], {}), '(possible_data_fp)\n', (8165, 8183), False, 'import os\n'), ((13086, 13108), 'os.path.dirname', 'os.path.dirname', (['mg_fp'], {}), '(mg_fp)\n', (13101, 13108), False, 'import os\n'), ((13598, 13663), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""INFO"""', "('Found the following sfm file: ' + sfm_fp)", 'op'], {}), "('INFO', 'Found the following sfm file: ' + sfm_fp, op)\n", (13608, 13663), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((13690, 13786), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""INFO"""', '"""Request target SfM result does not exist in this meshroom project."""', 'op'], {}), "('INFO',\n 'Request target SfM result does not exist in this meshroom project.', op)\n", (13700, 13786), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((13910, 13977), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""INFO"""', "('Found the following mesh file: ' + mesh_fp)", 'op'], {}), "('INFO', 'Found the following mesh file: ' + mesh_fp, op)\n", (13920, 13977), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((14004, 14094), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""INFO"""', '"""Request target mesh does not exist in this meshroom project."""', 'op'], {}), "('INFO',\n 'Request target mesh does not exist in this meshroom project.', op)\n", (14014, 14094), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((15937, 16076), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""WARNING"""', '"""Meshroom project does not contain cameras or points. Have you saved the project (i.e. the *.mg file)?"""', 'op'], {}), "('WARNING',\n 'Meshroom project does not contain cameras or points. Have you saved the project (i.e. the *.mg file)?'\n , op)\n", (15947, 16076), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((2693, 2748), 'os.path.join', 'os.path.join', (['image_dp', 'camera._undistorted_relative_fp'], {}), '(image_dp, camera._undistorted_relative_fp)\n', (2705, 2748), False, 'import os\n'), ((3752, 3819), 'photogrammetry_importer.file_handlers.utility.check_radial_distortion', 'check_radial_distortion', (['radial_distortion', 'camera._relative_fp', 'op'], {}), '(radial_distortion, camera._relative_fp, op)\n', (3775, 3819), False, 'from photogrammetry_importer.file_handlers.utility import check_radial_distortion\n'), ((4440, 4489), 'numpy.array', 'np.array', (["extrinsic_params['center']"], {'dtype': 'float'}), "(extrinsic_params['center'], dtype=float)\n", (4448, 4489), True, 'import numpy as np\n'), ((15383, 15417), 'os.path.isdir', 'os.path.isdir', (['image_idp_workspace'], {}), '(image_idp_workspace)\n', (15396, 15417), False, 'import os\n'), ((15496, 15557), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""INFO"""', '"""Using image directory in workspace."""', 'op'], {}), "('INFO', 'Using image directory in workspace.', op)\n", (15506, 15557), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((5232, 5270), 'numpy.array', 'np.array', (["json_point['X']"], {'dtype': 'float'}), "(json_point['X'], dtype=float)\n", (5240, 5270), True, 'import numpy as np\n'), ((5294, 5334), 'numpy.array', 'np.array', (["json_point['color']"], {'dtype': 'int'}), "(json_point['color'], dtype=int)\n", (5302, 5334), True, 'import numpy as np\n'), ((10425, 10486), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""ERROR"""', '"""Selected SfM node is not supported"""', 'op'], {}), "('ERROR', 'Selected SfM node is not supported', op)\n", (10435, 10486), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((14810, 14840), 'os.path.splitext', 'os.path.splitext', (['meshroom_ifp'], {}), '(meshroom_ifp)\n', (14826, 14840), False, 'import os\n'), ((12347, 12408), 'photogrammetry_importer.blender_utility.logging_utility.log_report', 'log_report', (['"""ERROR"""', '"""Select Mesh node is not supported!"""', 'op'], {}), "('ERROR', 'Select Mesh node is not supported!', op)\n", (12357, 12408), False, 'from photogrammetry_importer.blender_utility.logging_utility import log_report\n'), ((4300, 4340), 'numpy.array', 'np.array', (['cam_rotation_list'], {'dtype': 'float'}), '(cam_rotation_list, dtype=float)\n', (4308, 4340), True, 'import numpy as np\n')]
"""Convert Senate speech data from 114th Congress to bag of words format. The data is provided by [1]. Specifically, we use the `hein-daily` data. To run this script, make sure the relevant files are in `data/senate-speeches-114/raw/`. The files needed for this script are `speeches_114.txt`, `descr_114.txt`, and `114_SpeakerMap.txt`. #### References [1]: Gentzkow, Matthew, <NAME>, and <NAME>. Congressional Record for the 43rd-114th Congresses: Parsed Speeches and Phrase Counts. Palo Alto, CA: Stanford Libraries [distributor], 2018-01-16. https://data.stanford.edu/congress_text """ import os import setup_utils as utils import numpy as np import pandas as pd from scipy import sparse from sklearn.feature_extraction.text import CountVectorizer project_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir)) data_dir = os.path.join(project_dir, "data/senate-speeches-114/raw") save_dir = os.path.join(project_dir, "data/senate-speeches-114/clean") speeches = pd.read_csv(os.path.join(data_dir, 'speeches_114.txt'), encoding="ISO-8859-1", sep="|", error_bad_lines=False) description = pd.read_csv(os.path.join(data_dir, 'descr_114.txt'), encoding="ISO-8859-1", sep="|") speaker_map = pd.read_csv(os.path.join(data_dir, '114_SpeakerMap.txt'), encoding="ISO-8859-1", sep="|") # Merge all data into a single dataframe. merged_df = speeches.merge(description, left_on='speech_id', right_on='speech_id') df = merged_df.merge(speaker_map, left_on='speech_id', right_on='speech_id') # Only look at senate speeches. senate_df = df[df['chamber_x'] == 'S'] speaker = np.array( [' '.join([first, last]) for first, last in list(zip(np.array(senate_df['firstname']), np.array(senate_df['lastname'])))]) speeches = np.array(senate_df['speech']) party = np.array(senate_df['party']) # Remove senators who make less than 24 speeches. min_speeches = 24 unique_speaker, speaker_counts = np.unique(speaker, return_counts=True) absent_speakers = unique_speaker[np.where(speaker_counts < min_speeches)] absent_speaker_inds = [ind for ind, x in enumerate(speaker) if x in absent_speakers] speaker = np.delete(speaker, absent_speaker_inds) speeches = np.delete(speeches, absent_speaker_inds) party = np.delete(party, absent_speaker_inds) speaker_party = np.array( [speaker[i] + " (" + party[i] + ")" for i in range(len(speaker))]) # Create mapping between names and IDs. speaker_to_speaker_id = dict( [(y.title(), x) for x, y in enumerate(sorted(set(speaker_party)))]) author_indices = np.array( [speaker_to_speaker_id[s.title()] for s in speaker_party]) author_map = np.array(list(speaker_to_speaker_id.keys())) stopwords = set( np.loadtxt(os.path.join(project_dir, "setup/stopwords/senate_speeches.txt"), dtype=str, delimiter="\n")) count_vectorizer = CountVectorizer(min_df=0.001, max_df=0.3, stop_words=stopwords, ngram_range=(1, 3), token_pattern="[a-zA-Z]+") # Learn initial document term matrix. This is only initial because we use it to # identify words to exclude based on author counts. counts = count_vectorizer.fit_transform(speeches) vocabulary = np.array( [k for (k, v) in sorted(count_vectorizer.vocabulary_.items(), key=lambda kv: kv[1])]) # Remove phrases spoken by less than 10 Senators. counts_per_author = utils.bincount_2d(author_indices, counts.toarray()) min_authors_per_word = 10 author_counts_per_word = np.sum(counts_per_author > 0, axis=0) acceptable_words = np.where( author_counts_per_word >= min_authors_per_word)[0] # Fit final document-term matrix with modified vocabulary. count_vectorizer = CountVectorizer(ngram_range=(1, 3), vocabulary=vocabulary[acceptable_words]) counts = count_vectorizer.fit_transform(speeches) vocabulary = np.array( [k for (k, v) in sorted(count_vectorizer.vocabulary_.items(), key=lambda kv: kv[1])]) # Adjust counts by removing unigram/n-gram pairs which co-occur. counts_dense = utils.remove_cooccurring_ngrams(counts, vocabulary) # Remove speeches with no words. existing_speeches = np.where(np.sum(counts_dense, axis=1) > 0)[0] counts_dense = counts_dense[existing_speeches] author_indices = author_indices[existing_speeches] # Save data. if not os.path.exists(save_dir): os.makedirs(save_dir) # `counts.npz` is a [num_documents, num_words] sparse matrix containing the # word counts for each document. sparse.save_npz(os.path.join(save_dir, "counts.npz"), sparse.csr_matrix(counts_dense).astype(np.float32)) # `author_indices.npy` is a [num_documents] vector where each entry is an # integer indicating the author of the corresponding document. np.save(os.path.join(save_dir, "author_indices.npy"), author_indices) # `vocabulary.txt` is a [num_words] vector where each entry is a string # denoting the corresponding word in the vocabulary. np.savetxt(os.path.join(save_dir, "vocabulary.txt"), vocabulary, fmt="%s") # `author_map.txt` is a [num_authors] vector of strings providing the name of # each author in the corpus. np.savetxt(os.path.join(save_dir, "author_map.txt"), author_map, fmt="%s") # `raw_documents.txt` contains all the documents we ended up using. raw_documents = [document.replace("\n", ' ').replace("\r", ' ') for document in speeches[existing_speeches]] np.savetxt(os.path.join(save_dir, "raw_documents.txt"), raw_documents, fmt="%s")
[ "os.path.exists", "numpy.unique", "os.makedirs", "sklearn.feature_extraction.text.CountVectorizer", "numpy.delete", "numpy.where", "os.path.join", "scipy.sparse.csr_matrix", "numpy.array", "numpy.sum", "os.path.dirname", "setup_utils.remove_cooccurring_ngrams" ]
[((875, 932), 'os.path.join', 'os.path.join', (['project_dir', '"""data/senate-speeches-114/raw"""'], {}), "(project_dir, 'data/senate-speeches-114/raw')\n", (887, 932), False, 'import os\n'), ((944, 1003), 'os.path.join', 'os.path.join', (['project_dir', '"""data/senate-speeches-114/clean"""'], {}), "(project_dir, 'data/senate-speeches-114/clean')\n", (956, 1003), False, 'import os\n'), ((2020, 2049), 'numpy.array', 'np.array', (["senate_df['speech']"], {}), "(senate_df['speech'])\n", (2028, 2049), True, 'import numpy as np\n'), ((2058, 2086), 'numpy.array', 'np.array', (["senate_df['party']"], {}), "(senate_df['party'])\n", (2066, 2086), True, 'import numpy as np\n'), ((2189, 2227), 'numpy.unique', 'np.unique', (['speaker'], {'return_counts': '(True)'}), '(speaker, return_counts=True)\n', (2198, 2227), True, 'import numpy as np\n'), ((2421, 2460), 'numpy.delete', 'np.delete', (['speaker', 'absent_speaker_inds'], {}), '(speaker, absent_speaker_inds)\n', (2430, 2460), True, 'import numpy as np\n'), ((2472, 2512), 'numpy.delete', 'np.delete', (['speeches', 'absent_speaker_inds'], {}), '(speeches, absent_speaker_inds)\n', (2481, 2512), True, 'import numpy as np\n'), ((2521, 2558), 'numpy.delete', 'np.delete', (['party', 'absent_speaker_inds'], {}), '(party, absent_speaker_inds)\n', (2530, 2558), True, 'import numpy as np\n'), ((3152, 3267), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'min_df': '(0.001)', 'max_df': '(0.3)', 'stop_words': 'stopwords', 'ngram_range': '(1, 3)', 'token_pattern': '"""[a-zA-Z]+"""'}), "(min_df=0.001, max_df=0.3, stop_words=stopwords, ngram_range\n =(1, 3), token_pattern='[a-zA-Z]+')\n", (3167, 3267), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((3903, 3940), 'numpy.sum', 'np.sum', (['(counts_per_author > 0)'], {'axis': '(0)'}), '(counts_per_author > 0, axis=0)\n', (3909, 3940), True, 'import numpy as np\n'), ((4104, 4180), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'ngram_range': '(1, 3)', 'vocabulary': 'vocabulary[acceptable_words]'}), '(ngram_range=(1, 3), vocabulary=vocabulary[acceptable_words])\n', (4119, 4180), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((4489, 4540), 'setup_utils.remove_cooccurring_ngrams', 'utils.remove_cooccurring_ngrams', (['counts', 'vocabulary'], {}), '(counts, vocabulary)\n', (4520, 4540), True, 'import setup_utils as utils\n'), ((1028, 1070), 'os.path.join', 'os.path.join', (['data_dir', '"""speeches_114.txt"""'], {}), "(data_dir, 'speeches_114.txt')\n", (1040, 1070), False, 'import os\n'), ((1224, 1263), 'os.path.join', 'os.path.join', (['data_dir', '"""descr_114.txt"""'], {}), "(data_dir, 'descr_114.txt')\n", (1236, 1263), False, 'import os\n'), ((1377, 1421), 'os.path.join', 'os.path.join', (['data_dir', '"""114_SpeakerMap.txt"""'], {}), "(data_dir, '114_SpeakerMap.txt')\n", (1389, 1421), False, 'import os\n'), ((2261, 2300), 'numpy.where', 'np.where', (['(speaker_counts < min_speeches)'], {}), '(speaker_counts < min_speeches)\n', (2269, 2300), True, 'import numpy as np\n'), ((3960, 4016), 'numpy.where', 'np.where', (['(author_counts_per_word >= min_authors_per_word)'], {}), '(author_counts_per_word >= min_authors_per_word)\n', (3968, 4016), True, 'import numpy as np\n'), ((4760, 4784), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4774, 4784), False, 'import os\n'), ((4788, 4809), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (4799, 4809), False, 'import os\n'), ((4936, 4972), 'os.path.join', 'os.path.join', (['save_dir', '"""counts.npz"""'], {}), "(save_dir, 'counts.npz')\n", (4948, 4972), False, 'import os\n'), ((5187, 5231), 'os.path.join', 'os.path.join', (['save_dir', '"""author_indices.npy"""'], {}), "(save_dir, 'author_indices.npy')\n", (5199, 5231), False, 'import os\n'), ((5385, 5425), 'os.path.join', 'os.path.join', (['save_dir', '"""vocabulary.txt"""'], {}), "(save_dir, 'vocabulary.txt')\n", (5397, 5425), False, 'import os\n'), ((5567, 5607), 'os.path.join', 'os.path.join', (['save_dir', '"""author_map.txt"""'], {}), "(save_dir, 'author_map.txt')\n", (5579, 5607), False, 'import os\n'), ((5837, 5880), 'os.path.join', 'os.path.join', (['save_dir', '"""raw_documents.txt"""'], {}), "(save_dir, 'raw_documents.txt')\n", (5849, 5880), False, 'import os\n'), ((824, 849), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (839, 849), False, 'import os\n'), ((2980, 3044), 'os.path.join', 'os.path.join', (['project_dir', '"""setup/stopwords/senate_speeches.txt"""'], {}), "(project_dir, 'setup/stopwords/senate_speeches.txt')\n", (2992, 3044), False, 'import os\n'), ((4604, 4632), 'numpy.sum', 'np.sum', (['counts_dense'], {'axis': '(1)'}), '(counts_dense, axis=1)\n', (4610, 4632), True, 'import numpy as np\n'), ((4990, 5021), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['counts_dense'], {}), '(counts_dense)\n', (5007, 5021), False, 'from scipy import sparse\n'), ((1924, 1956), 'numpy.array', 'np.array', (["senate_df['firstname']"], {}), "(senate_df['firstname'])\n", (1932, 1956), True, 'import numpy as np\n'), ((1973, 2004), 'numpy.array', 'np.array', (["senate_df['lastname']"], {}), "(senate_df['lastname'])\n", (1981, 2004), True, 'import numpy as np\n')]
import unittest from os import path from os.path import join from pyrep import PyRep from pyrep.robots.arms.panda import Panda from pyrep.robots.end_effectors.panda_gripper import PandaGripper from rlbench import environment from rlbench.backend.const import TTT_FILE from rlbench.backend.scene import Scene from rlbench.noise_model import GaussianNoise from rlbench.observation_config import ObservationConfig, CameraConfig import numpy as np from rlbench.backend.robot import Robot from rlbench.tasks.reach_target import ReachTarget ASSET_DIR = path.join(path.dirname(path.abspath(__file__)), 'assets', 'tasks') class TestScene(unittest.TestCase): """Tests the following: - Getting observations from the scene - Applying noise - Applying domain randomization """ def setUp(self): self.pyrep = PyRep() self.pyrep.launch(join(environment.DIR_PATH, TTT_FILE), headless=True) self.pyrep.set_simulation_timestep(0.005) self.robot = Robot(Panda(), PandaGripper()) def tearDown(self): self.pyrep.shutdown() def test_sensor_noise_images(self): cam_config = CameraConfig(rgb_noise=GaussianNoise(0.05, (.0, 1.))) obs_config = ObservationConfig( left_shoulder_camera=cam_config, joint_forces=False, task_low_dim_state=False) scene = Scene(self.pyrep, self.robot, obs_config) scene.load(ReachTarget(self.pyrep, self.robot)) obs1 = scene.get_observation() obs2 = scene.get_observation() self.assertTrue( np.array_equal(obs1.right_shoulder_rgb, obs2.right_shoulder_rgb)) self.assertFalse( np.array_equal(obs1.left_shoulder_rgb, obs2.left_shoulder_rgb)) self.assertTrue(obs1.left_shoulder_rgb.max() <= 1.0) self.assertTrue(obs1.left_shoulder_rgb.min() >= 0.0) def test_sensor_noise_robot(self): obs_config = ObservationConfig( joint_velocities_noise=GaussianNoise(0.01), joint_forces=False, task_low_dim_state=False) scene = Scene(self.pyrep, self.robot, obs_config) scene.load(ReachTarget(self.pyrep, self.robot)) obs1 = scene.get_observation() obs2 = scene.get_observation() self.assertTrue( np.array_equal(obs1.joint_positions, obs2.joint_positions)) self.assertFalse( np.array_equal(obs1.joint_velocities, obs2.joint_velocities))
[ "rlbench.backend.scene.Scene", "rlbench.observation_config.ObservationConfig", "pyrep.robots.arms.panda.Panda", "pyrep.robots.end_effectors.panda_gripper.PandaGripper", "os.path.join", "rlbench.tasks.reach_target.ReachTarget", "numpy.array_equal", "pyrep.PyRep", "os.path.abspath", "rlbench.noise_m...
[((571, 593), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (583, 593), False, 'from os import path\n'), ((843, 850), 'pyrep.PyRep', 'PyRep', ([], {}), '()\n', (848, 850), False, 'from pyrep import PyRep\n'), ((1224, 1324), 'rlbench.observation_config.ObservationConfig', 'ObservationConfig', ([], {'left_shoulder_camera': 'cam_config', 'joint_forces': '(False)', 'task_low_dim_state': '(False)'}), '(left_shoulder_camera=cam_config, joint_forces=False,\n task_low_dim_state=False)\n', (1241, 1324), False, 'from rlbench.observation_config import ObservationConfig, CameraConfig\n'), ((1374, 1415), 'rlbench.backend.scene.Scene', 'Scene', (['self.pyrep', 'self.robot', 'obs_config'], {}), '(self.pyrep, self.robot, obs_config)\n', (1379, 1415), False, 'from rlbench.backend.scene import Scene\n'), ((2099, 2140), 'rlbench.backend.scene.Scene', 'Scene', (['self.pyrep', 'self.robot', 'obs_config'], {}), '(self.pyrep, self.robot, obs_config)\n', (2104, 2140), False, 'from rlbench.backend.scene import Scene\n'), ((877, 913), 'os.path.join', 'join', (['environment.DIR_PATH', 'TTT_FILE'], {}), '(environment.DIR_PATH, TTT_FILE)\n', (881, 913), False, 'from os.path import join\n'), ((1007, 1014), 'pyrep.robots.arms.panda.Panda', 'Panda', ([], {}), '()\n', (1012, 1014), False, 'from pyrep.robots.arms.panda import Panda\n'), ((1016, 1030), 'pyrep.robots.end_effectors.panda_gripper.PandaGripper', 'PandaGripper', ([], {}), '()\n', (1028, 1030), False, 'from pyrep.robots.end_effectors.panda_gripper import PandaGripper\n'), ((1435, 1470), 'rlbench.tasks.reach_target.ReachTarget', 'ReachTarget', (['self.pyrep', 'self.robot'], {}), '(self.pyrep, self.robot)\n', (1446, 1470), False, 'from rlbench.tasks.reach_target import ReachTarget\n'), ((1587, 1651), 'numpy.array_equal', 'np.array_equal', (['obs1.right_shoulder_rgb', 'obs2.right_shoulder_rgb'], {}), '(obs1.right_shoulder_rgb, obs2.right_shoulder_rgb)\n', (1601, 1651), True, 'import numpy as np\n'), ((1691, 1753), 'numpy.array_equal', 'np.array_equal', (['obs1.left_shoulder_rgb', 'obs2.left_shoulder_rgb'], {}), '(obs1.left_shoulder_rgb, obs2.left_shoulder_rgb)\n', (1705, 1753), True, 'import numpy as np\n'), ((2160, 2195), 'rlbench.tasks.reach_target.ReachTarget', 'ReachTarget', (['self.pyrep', 'self.robot'], {}), '(self.pyrep, self.robot)\n', (2171, 2195), False, 'from rlbench.tasks.reach_target import ReachTarget\n'), ((2312, 2370), 'numpy.array_equal', 'np.array_equal', (['obs1.joint_positions', 'obs2.joint_positions'], {}), '(obs1.joint_positions, obs2.joint_positions)\n', (2326, 2370), True, 'import numpy as np\n'), ((2410, 2470), 'numpy.array_equal', 'np.array_equal', (['obs1.joint_velocities', 'obs2.joint_velocities'], {}), '(obs1.joint_velocities, obs2.joint_velocities)\n', (2424, 2470), True, 'import numpy as np\n'), ((1172, 1203), 'rlbench.noise_model.GaussianNoise', 'GaussianNoise', (['(0.05)', '(0.0, 1.0)'], {}), '(0.05, (0.0, 1.0))\n', (1185, 1203), False, 'from rlbench.noise_model import GaussianNoise\n'), ((1992, 2011), 'rlbench.noise_model.GaussianNoise', 'GaussianNoise', (['(0.01)'], {}), '(0.01)\n', (2005, 2011), False, 'from rlbench.noise_model import GaussianNoise\n')]
""" Running operational space control with a PyGame display, and using the pydmps library to specify a trajectory for the end-effector to follow, in this case, a bell shaped velocity profile. To install the pydmps library, clone https://github.com/studywolf/pydmps and run 'python setup.py develop' ***NOTE*** there are two ways to use this filter 1: wrt to timesteps - the dmp is created during the instantiation of the class and the next step along the path is returned by calling the `step()` function 2: wrt to time - after instantiation, calling `generate_path_function()` interpolates the dmp to the specified time limit. Calling the `next_timestep(t)` function at a specified time will return the end-effector state at that point along the path planner. This ensures that the path will reach the desired target within the time_limit specified in `generate_path_function()` """ import numpy as np import matplotlib.pyplot as plt try: import pydmps except ImportError: print('\npydmps library required, see github.com/studywolf/pydmps\n') from .path_planner import PathPlanner class BellShaped(PathPlanner): """ PARAMETERS ---------- n_timesteps: int, Optional (Default: 3000) the number of steps to break the path into error_scale: int, Optional (Default: 1) the scaling factor to apply to the error term, increasing error passed 1 will increase the speed of motion """ def __init__(self, n_timesteps=3000, error_scale=1): self.n_timesteps = n_timesteps self.error_scale = error_scale # create a dmp for a straight reach with a bell shaped velocity profile x = np.linspace(0, np.pi*2, 100) a = 1 # amplitude b = np.pi # center c = 1 # std deviation g = a * np.exp(-(x-b)**2/(2*c)**2) g /= np.sum(g) # normalize # integrate desired velocities to get desired positions over time y_des = np.cumsum(g) # want to follow the same trajectory in (x, y, z) y_des = np.vstack([y_des, y_des, y_des]) # we can control the DMP rollout speed with the time step size # the DMP will reach the target in 1s of sim time dt = 1 / n_timesteps self.dmps = pydmps.DMPs_discrete(n_dmps=3, n_bfs=50, dt=dt) self.dmps.imitate_path(y_des) def generate_path(self, position, target_pos, plot=False): """ Calls the step function self.n_timestep times to pregenerate the entire path planner PARAMETERS ---------- position: numpy.array the current position of the system target_pos: numpy.array the target position plot: boolean, optional (Default: False) plot the path after generating if True """ self.reset(target_pos=target_pos, position=position) self.position, self.velocity, _ = self.dmps.rollout( timesteps=self.n_timesteps) self.position = np.array([traj + self.origin for traj in self.position]) # reset trajectory index self.n = 0 if plot: plt.plot(self.position) plt.legend(['X', 'Y', 'Z']) plt.show() return self.position, self.velocity def reset(self, target_pos, position): """ Resets the dmp path planner to a new state and target_pos PARAMETERS ---------- target_pos: list of 3 floats the target_pos end-effector position in cartesian coordinates [meters] position: list of 3 floats the current end-effector cartesian position [meters] """ self.origin = position self.dmps.reset_state() self.dmps.goal = target_pos - self.origin def _step(self, error=None): """ Steps through the dmp, returning the next position and velocity along the path planner. """ if error is None: error = 0 # get the next point in the target trajectory from the dmp position, velocity, _ = self.dmps.step(error=error * self.error_scale) # add the start position offset since the dmp starts from the origin position = position + self.origin return position, velocity
[ "pydmps.DMPs_discrete", "matplotlib.pyplot.plot", "numpy.exp", "numpy.sum", "numpy.linspace", "numpy.array", "numpy.vstack", "numpy.cumsum", "matplotlib.pyplot.legend", "matplotlib.pyplot.show" ]
[((1667, 1697), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi * 2)', '(100)'], {}), '(0, np.pi * 2, 100)\n', (1678, 1697), True, 'import numpy as np\n'), ((1838, 1847), 'numpy.sum', 'np.sum', (['g'], {}), '(g)\n', (1844, 1847), True, 'import numpy as np\n'), ((1951, 1963), 'numpy.cumsum', 'np.cumsum', (['g'], {}), '(g)\n', (1960, 1963), True, 'import numpy as np\n'), ((2038, 2070), 'numpy.vstack', 'np.vstack', (['[y_des, y_des, y_des]'], {}), '([y_des, y_des, y_des])\n', (2047, 2070), True, 'import numpy as np\n'), ((2250, 2297), 'pydmps.DMPs_discrete', 'pydmps.DMPs_discrete', ([], {'n_dmps': '(3)', 'n_bfs': '(50)', 'dt': 'dt'}), '(n_dmps=3, n_bfs=50, dt=dt)\n', (2270, 2297), False, 'import pydmps\n'), ((2993, 3051), 'numpy.array', 'np.array', (['[(traj + self.origin) for traj in self.position]'], {}), '([(traj + self.origin) for traj in self.position])\n', (3001, 3051), True, 'import numpy as np\n'), ((1798, 1834), 'numpy.exp', 'np.exp', (['(-(x - b) ** 2 / (2 * c) ** 2)'], {}), '(-(x - b) ** 2 / (2 * c) ** 2)\n', (1804, 1834), True, 'import numpy as np\n'), ((3133, 3156), 'matplotlib.pyplot.plot', 'plt.plot', (['self.position'], {}), '(self.position)\n', (3141, 3156), True, 'import matplotlib.pyplot as plt\n'), ((3169, 3196), 'matplotlib.pyplot.legend', 'plt.legend', (["['X', 'Y', 'Z']"], {}), "(['X', 'Y', 'Z'])\n", (3179, 3196), True, 'import matplotlib.pyplot as plt\n'), ((3209, 3219), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3217, 3219), True, 'import matplotlib.pyplot as plt\n')]
from torch.utils.data import DataLoader import torch from tqdm import tqdm import os from shutil import copyfile import numpy as np import matplotlib.pyplot as plt from src.generic_model import Criterian from .dataloader import DataLoaderSYNTH from src.utils.data_manipulation import denormalize_mean_variance import train_synth.config as config from src.utils.parallel import DataParallelModel, DataParallelCriterion from src.utils.utils import calculate_batch_fscore, generate_word_bbox_batch, _init_fn os.environ['CUDA_VISIBLE_DEVICES'] = str(config.num_cuda) def save(data, output, target, target_affinity, drawn_image, no): """ Saving the synthesised outputs in between the training :param data: image as tensor :param output: predicted output from the model as tensor :param target: character heatmap target as tensor :param target_affinity: affinity heatmap target as tensor :param no: current iteration number :return: None """ output = output.data.cpu().numpy() data = data.data.cpu().numpy() target = target.data.cpu().numpy() target_affinity = target_affinity.data.cpu().numpy() drawn_image = drawn_image.data.cpu().numpy() batch_size = output.shape[0] base = config.DataLoaderSYNTH_Train_Synthesis+str(no)+'/' os.makedirs(base, exist_ok=True) for i in range(batch_size): os.makedirs(base+str(i), exist_ok=True) character_bbox = output[i, 0, :, :] affinity_bbox = output[i, 1, :, :] plt.imsave(base+str(i) + '/image.png', denormalize_mean_variance(data[i].transpose(1, 2, 0))) plt.imsave(base+str(i) + '/target_characters.png', target[i, :, :]) plt.imsave(base+str(i) + '/target_affinity.png', target_affinity[i, :, :]) blob = np.logical_or( target[i, :, :] > config.threshold_character, target_affinity[i, :, :] > config.threshold_affinity ) blob = np.float32(blob) plt.imsave(base + str(i) + '/blob.png', blob) plt.imsave(base + str(i) + '/pred_characters.png', character_bbox) plt.imsave(base + str(i) + '/pred_affinity.png', affinity_bbox) # Thresholding the character and affinity heatmap plt.imsave( base + str(i) + '/pred_characters_thresh.png', np.float32(character_bbox > config.threshold_character) ) plt.imsave( base + str(i) + '/pred_affinity_thresh.png', np.float32(affinity_bbox > config.threshold_affinity) ) plt.imsave( base + str(i) + '/drawn_image.png', drawn_image[i] ) def train(dataloader, loss_criterian, model, optimizer, starting_no, all_loss, all_accuracy): """ Function to test :param dataloader: Pytorch dataloader :param loss_criterian: Loss function with OHNM using MSE Loss :param model: Pytorch model of UNet-ResNet :param optimizer: Adam Optimizer :param starting_no: how many items to skip in the dataloader :param all_loss: list of all loss values :param all_accuracy: list of all f-scores :return: all iteration loss values """ model.train() optimizer.zero_grad() iterator = tqdm(dataloader) def change_lr(no_i): for i in config.lr: if i == no_i: print('Learning Rate Changed to ', config.lr[i]) for param_group in optimizer.param_groups: param_group['lr'] = config.lr[i] for no, (image, weight, weight_affinity, drawn_image) in enumerate(iterator): change_lr(no) if config.pretrained: if no == starting_no: dataloader.start = True continue elif no < starting_no: continue if config.use_cuda: image, weight, weight_affinity = image.cuda(), weight.cuda(), weight_affinity.cuda() output = model(image) loss = loss_criterian(output, weight, weight_affinity).mean()/config.optimizer_iteration all_loss.append(loss.item()*config.optimizer_iteration) loss.backward() if (no + 1) % config.optimizer_iteration == 0: optimizer.step() optimizer.zero_grad() if no >= 2000: # Calculating the f-score after some iterations because initially there are a lot of stray contours if no % config.periodic_fscore == 0: if type(output) == list: output = [x.cpu() for x in output] output = torch.cat(output, dim=0) predicted_bbox = generate_word_bbox_batch( output[:, 0, :, :].data.cpu().numpy(), output[:, 1, :, :].data.cpu().numpy(), character_threshold=config.threshold_character, affinity_threshold=config.threshold_affinity, word_threshold=config.threshold_word, character_threshold_upper=config.threshold_character_upper, # affinity_threshold_upper=config.threshold_affinity_upper, scaling_character=config.scale_character, scaling_affinity=config.scale_affinity, ) target_bbox = generate_word_bbox_batch( weight.data.cpu().numpy(), weight_affinity.data.cpu().numpy(), character_threshold=config.threshold_character, affinity_threshold=config.threshold_affinity, word_threshold=config.threshold_word, character_threshold_upper=config.threshold_character_upper, affinity_threshold_upper=config.threshold_affinity_upper, scaling_character=config.scale_character, scaling_affinity=config.scale_affinity, ) all_accuracy.append( calculate_batch_fscore( predicted_bbox, target_bbox, threshold=config.threshold_fscore, text_target=None ) ) if len(all_accuracy) == 0: iterator.set_description( 'Loss:' + str( int(loss.item() * config.optimizer_iteration * 100000000) / 100000000) + ' Iterations:[' + str(no) + '/' + str( len(iterator)) + '] Average Loss:' + str(int(np.array(all_loss)[-min(1000, len(all_loss)):].mean() * 100000000) / 100000000)) else: iterator.set_description( 'Loss:' + str(int(loss.item() * config.optimizer_iteration * 100000000) / 100000000) + ' Iterations:[' + str(no) + '/' + str(len(iterator)) + '] Average Loss:' + str(int(np.array(all_loss)[-min(1000, len(all_loss)):].mean()*100000000)/100000000) + '| Average F-Score: ' + str(int(np.array(all_accuracy)[-min(1000, len(all_accuracy)):].mean()*100000000)/100000000) ) if no % config.periodic_output == 0: if type(output) == list: output = [x.cpu() for x in output] output = torch.cat(output, dim=0) save(image, output, weight, weight_affinity, drawn_image, no) if no % config.periodic_save == 0 and no != 0: torch.save( { 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict() }, config.save_path + '/' + str(no) + '_model.pkl') np.save(config.save_path + '/loss_plot_training.npy', all_loss) plt.plot(all_loss) plt.savefig(config.save_path + '/loss_plot_training.png') plt.clf() return all_loss def main(): copyfile('train_synth/config.py', config.save_path + '/config.py') if config.model_architecture == 'UNET_ResNet': from src.UNET_ResNet import UNetWithResnet50Encoder model = UNetWithResnet50Encoder() else: from src.craft_model import CRAFT model = CRAFT() model_parameters = filter(lambda p: p.requires_grad, model.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) print('Total number of trainable parameters: ', params) model = DataParallelModel(model) loss_criterian = DataParallelCriterion(Criterian()) if config.use_cuda: model = model.cuda() optimizer = torch.optim.Adam(model.parameters(), lr=config.lr[1]) if config.pretrained: saved_model = torch.load(config.pretrained_path) model.load_state_dict(saved_model['state_dict']) optimizer.load_state_dict(saved_model['optimizer']) starting_no = int(config.pretrained_path.split('/')[-1].split('_')[0]) all_loss = np.load(config.pretrained_loss_plot_training).tolist() print('Loaded the model') else: starting_no = 0 all_loss = [] all_accuracy = [] print('Loading the dataloader') train_dataloader = DataLoaderSYNTH('train') train_dataloader = DataLoader( train_dataloader, batch_size=config.batch_size['train'], shuffle=True, num_workers=config.num_workers['train'], worker_init_fn=_init_fn) print('Loaded the dataloader') all_loss = train( train_dataloader, loss_criterian, model, optimizer, starting_no=starting_no, all_loss=all_loss, all_accuracy=all_accuracy) torch.save( { 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict() }, config.save_path + '/final_model.pkl') np.save(config.save_path + '/loss_plot_training.npy', all_loss) plt.plot(all_loss) plt.savefig(config.save_path + '/loss_plot_training.png') plt.clf() print("Saved Final Model")
[ "numpy.array", "train_synth.config.pretrained_path.split", "numpy.save", "matplotlib.pyplot.plot", "src.utils.utils.calculate_batch_fscore", "src.utils.parallel.DataParallelModel", "matplotlib.pyplot.savefig", "src.UNET_ResNet.UNetWithResnet50Encoder", "src.generic_model.Criterian", "shutil.copyfi...
[((1249, 1281), 'os.makedirs', 'os.makedirs', (['base'], {'exist_ok': '(True)'}), '(base, exist_ok=True)\n', (1260, 1281), False, 'import os\n'), ((2940, 2956), 'tqdm.tqdm', 'tqdm', (['dataloader'], {}), '(dataloader)\n', (2944, 2956), False, 'from tqdm import tqdm\n'), ((6590, 6656), 'shutil.copyfile', 'copyfile', (['"""train_synth/config.py"""', "(config.save_path + '/config.py')"], {}), "('train_synth/config.py', config.save_path + '/config.py')\n", (6598, 6656), False, 'from shutil import copyfile\n'), ((7061, 7085), 'src.utils.parallel.DataParallelModel', 'DataParallelModel', (['model'], {}), '(model)\n', (7078, 7085), False, 'from src.utils.parallel import DataParallelModel, DataParallelCriterion\n'), ((7763, 7915), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataloader'], {'batch_size': "config.batch_size['train']", 'shuffle': '(True)', 'num_workers': "config.num_workers['train']", 'worker_init_fn': '_init_fn'}), "(train_dataloader, batch_size=config.batch_size['train'], shuffle\n =True, num_workers=config.num_workers['train'], worker_init_fn=_init_fn)\n", (7773, 7915), False, 'from torch.utils.data import DataLoader\n'), ((8236, 8299), 'numpy.save', 'np.save', (["(config.save_path + '/loss_plot_training.npy')", 'all_loss'], {}), "(config.save_path + '/loss_plot_training.npy', all_loss)\n", (8243, 8299), True, 'import numpy as np\n'), ((8301, 8319), 'matplotlib.pyplot.plot', 'plt.plot', (['all_loss'], {}), '(all_loss)\n', (8309, 8319), True, 'import matplotlib.pyplot as plt\n'), ((8321, 8378), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(config.save_path + '/loss_plot_training.png')"], {}), "(config.save_path + '/loss_plot_training.png')\n", (8332, 8378), True, 'import matplotlib.pyplot as plt\n'), ((8380, 8389), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8387, 8389), True, 'import matplotlib.pyplot as plt\n'), ((1685, 1803), 'numpy.logical_or', 'np.logical_or', (['(target[i, :, :] > config.threshold_character)', '(target_affinity[i, :, :] > config.threshold_affinity)'], {}), '(target[i, :, :] > config.threshold_character, target_affinity\n [i, :, :] > config.threshold_affinity)\n', (1698, 1803), True, 'import numpy as np\n'), ((1819, 1835), 'numpy.float32', 'np.float32', (['blob'], {}), '(blob)\n', (1829, 1835), True, 'import numpy as np\n'), ((6770, 6795), 'src.UNET_ResNet.UNetWithResnet50Encoder', 'UNetWithResnet50Encoder', ([], {}), '()\n', (6793, 6795), False, 'from src.UNET_ResNet import UNetWithResnet50Encoder\n'), ((6849, 6856), 'src.craft_model.CRAFT', 'CRAFT', ([], {}), '()\n', (6854, 6856), False, 'from src.craft_model import CRAFT\n'), ((7126, 7137), 'src.generic_model.Criterian', 'Criterian', ([], {}), '()\n', (7135, 7137), False, 'from src.generic_model import Criterian\n'), ((7292, 7326), 'torch.load', 'torch.load', (['config.pretrained_path'], {}), '(config.pretrained_path)\n', (7302, 7326), False, 'import torch\n'), ((2142, 2197), 'numpy.float32', 'np.float32', (['(character_bbox > config.threshold_character)'], {}), '(character_bbox > config.threshold_character)\n', (2152, 2197), True, 'import numpy as np\n'), ((2267, 2320), 'numpy.float32', 'np.float32', (['(affinity_bbox > config.threshold_affinity)'], {}), '(affinity_bbox > config.threshold_affinity)\n', (2277, 2320), True, 'import numpy as np\n'), ((6396, 6459), 'numpy.save', 'np.save', (["(config.save_path + '/loss_plot_training.npy')", 'all_loss'], {}), "(config.save_path + '/loss_plot_training.npy', all_loss)\n", (6403, 6459), True, 'import numpy as np\n'), ((6463, 6481), 'matplotlib.pyplot.plot', 'plt.plot', (['all_loss'], {}), '(all_loss)\n', (6471, 6481), True, 'import matplotlib.pyplot as plt\n'), ((6485, 6542), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(config.save_path + '/loss_plot_training.png')"], {}), "(config.save_path + '/loss_plot_training.png')\n", (6496, 6542), True, 'import matplotlib.pyplot as plt\n'), ((6546, 6555), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6553, 6555), True, 'import matplotlib.pyplot as plt\n'), ((6093, 6117), 'torch.cat', 'torch.cat', (['output'], {'dim': '(0)'}), '(output, dim=0)\n', (6102, 6117), False, 'import torch\n'), ((7518, 7563), 'numpy.load', 'np.load', (['config.pretrained_loss_plot_training'], {}), '(config.pretrained_loss_plot_training)\n', (7525, 7563), True, 'import numpy as np\n'), ((4032, 4056), 'torch.cat', 'torch.cat', (['output'], {'dim': '(0)'}), '(output, dim=0)\n', (4041, 4056), False, 'import torch\n'), ((5090, 5199), 'src.utils.utils.calculate_batch_fscore', 'calculate_batch_fscore', (['predicted_bbox', 'target_bbox'], {'threshold': 'config.threshold_fscore', 'text_target': 'None'}), '(predicted_bbox, target_bbox, threshold=config.\n threshold_fscore, text_target=None)\n', (5112, 5199), False, 'from src.utils.utils import calculate_batch_fscore, generate_word_bbox_batch, _init_fn\n'), ((7452, 7485), 'train_synth.config.pretrained_path.split', 'config.pretrained_path.split', (['"""/"""'], {}), "('/')\n", (7480, 7485), True, 'import train_synth.config as config\n'), ((5467, 5485), 'numpy.array', 'np.array', (['all_loss'], {}), '(all_loss)\n', (5475, 5485), True, 'import numpy as np\n'), ((5883, 5905), 'numpy.array', 'np.array', (['all_accuracy'], {}), '(all_accuracy)\n', (5891, 5905), True, 'import numpy as np\n'), ((5769, 5787), 'numpy.array', 'np.array', (['all_loss'], {}), '(all_loss)\n', (5777, 5787), True, 'import numpy as np\n')]
# ============================================================================== # MIT License # # Copyright 2020 Institute for Automotive Engineering of RWTH Aachen University. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ============================================================================== import numpy as np # for dataaset 1_FRLR H = [ np.array([[4.651574574230558e-14, 10.192351107009959, -5.36318723862984e-07], [-5.588661045867985e-07, 0.0, 2.3708767903941617], [35.30731833118676, 0.0, -1.7000018578614013]]), # front np.array([[-5.336674306912119e-14, -10.192351107009957, 5.363187220578325e-07], [5.588660952931949e-07, 3.582264351370481e-23, 2.370876772982613], [-35.30731833118661, -2.263156574813233e-15, -0.5999981421386035]]), # rear np.array([[20.38470221401992, 7.562206982469407e-14, -0.28867638384075833], [-3.422067857504854e-23, 2.794330463189411e-07, 2.540225111648729], [2.1619497190382224e-15, -17.65365916559334, -0.4999990710692976]]), # left np.array([[-20.38470221401991, -4.849709834037436e-15, 0.2886763838407495], [-3.4220679184765114e-23, -2.794330512976549e-07, 2.5402251116487626], [2.161949719038217e-15, 17.653659165593304, -0.5000009289306967]]) # right ]
[ "numpy.array" ]
[((1367, 1553), 'numpy.array', 'np.array', (['[[4.651574574230558e-14, 10.192351107009959, -5.36318723862984e-07], [-\n 5.588661045867985e-07, 0.0, 2.3708767903941617], [35.30731833118676, \n 0.0, -1.7000018578614013]]'], {}), '([[4.651574574230558e-14, 10.192351107009959, -5.36318723862984e-07\n ], [-5.588661045867985e-07, 0.0, 2.3708767903941617], [\n 35.30731833118676, 0.0, -1.7000018578614013]])\n', (1375, 1553), True, 'import numpy as np\n'), ((1581, 1810), 'numpy.array', 'np.array', (['[[-5.336674306912119e-14, -10.192351107009957, 5.363187220578325e-07], [\n 5.588660952931949e-07, 3.582264351370481e-23, 2.370876772982613], [-\n 35.30731833118661, -2.263156574813233e-15, -0.5999981421386035]]'], {}), '([[-5.336674306912119e-14, -10.192351107009957, \n 5.363187220578325e-07], [5.588660952931949e-07, 3.582264351370481e-23, \n 2.370876772982613], [-35.30731833118661, -2.263156574813233e-15, -\n 0.5999981421386035]])\n', (1589, 1810), True, 'import numpy as np\n'), ((1832, 2052), 'numpy.array', 'np.array', (['[[20.38470221401992, 7.562206982469407e-14, -0.28867638384075833], [-\n 3.422067857504854e-23, 2.794330463189411e-07, 2.540225111648729], [\n 2.1619497190382224e-15, -17.65365916559334, -0.4999990710692976]]'], {}), '([[20.38470221401992, 7.562206982469407e-14, -0.28867638384075833],\n [-3.422067857504854e-23, 2.794330463189411e-07, 2.540225111648729], [\n 2.1619497190382224e-15, -17.65365916559334, -0.4999990710692976]])\n', (1840, 2052), True, 'import numpy as np\n'), ((2080, 2301), 'numpy.array', 'np.array', (['[[-20.38470221401991, -4.849709834037436e-15, 0.2886763838407495], [-\n 3.4220679184765114e-23, -2.794330512976549e-07, 2.5402251116487626], [\n 2.161949719038217e-15, 17.653659165593304, -0.5000009289306967]]'], {}), '([[-20.38470221401991, -4.849709834037436e-15, 0.2886763838407495],\n [-3.4220679184765114e-23, -2.794330512976549e-07, 2.5402251116487626],\n [2.161949719038217e-15, 17.653659165593304, -0.5000009289306967]])\n', (2088, 2301), True, 'import numpy as np\n')]
import os, sys import argparse from collections import defaultdict import numpy as np from netCDF4 import Dataset import adios2 try: from mpi4py import MPI if MPI.COMM_WORLD.Get_size() > 1: parallel = True else: parallel = False except ImportError: parallel = False def progress(count, total, status=""): bar_len = 60 filled_len = int(round(bar_len * count / float(total))) percents = round(100.0 * count / float(total), 1) bar = "=" * filled_len + "-" * (bar_len - filled_len) sys.stdout.write("\033[K") sys.stdout.write("[{0}] {1}% {2}\r".format(bar, percents, status)) sys.stdout.flush() def Locate(rank, nproc, datasize): extra = 0 if rank == nproc - 1: extra = datasize % nproc num = datasize // nproc start = num * rank size = num + extra return start, size def open_files(input_file, output_file, parallel=False, diskless=False): if parallel: adios2f = adios2.open(input_file, "r", comm=MPI.COMM_WORLD) else: adios2f = adios2.open(input_file, "r") netcdff = Dataset( output_file, "w", format="NETCDF4", parallel=parallel, diskless=diskless, ) netcdff.set_fill_off() return (adios2f, netcdff) def r_attrs(adios2f): attrs = adios2f.available_attributes() var_attrs = defaultdict(dict) # init 2d dict global_attrs = {} for attr in attrs.keys(): # "Dims" attribute not needed in NetCDF file if "/Dims" in attr: continue try: val = adios2f.read_attribute(attr) except ValueError: val = adios2f.read_attribute_string(attr) if "/" in attr: var, var_attrib = attr.split("/") var_attrs[var][var_attrib] = val else: if not attr.startswith("_DIM_"): global_attrs[attr] = val return (attrs, var_attrs, global_attrs) def r_metadata(adios2f, attrs): vars = adios2f.available_variables() var_names = list(vars.keys()) num_steps = adios2f.steps() dim_lens = { key[5:]: int(value["Value"]) for key, value in attrs.items() if key.startswith("_DIM_") } var_dims = {} for var in vars: dims = adios2f.read_attribute_string("Dims", var) dims.reverse() var_dims[var] = dims typemap = {"float": "f", "int32_t": "i", "string": "c"} var_types = {} for var in vars: vtype = vars[var]["Type"] var_types[var] = typemap[vtype] return (var_names, num_steps, dim_lens, var_dims, var_types, vars) def create_nc_dims(netcdff, num_steps, dim_lens): netcdff.createDimension("Time", size=num_steps) for dim in dim_lens: netcdff.createDimension(dim, size=dim_lens[dim]) def create_nc_vars(netcdff, var_names, var_types, var_dims): for var in var_names: netcdff.createVariable(var, var_types[var], var_dims[var]) def decomp(var, vars): comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() dshape = vars[var]["Shape"].split(",") dshape = list(map(int, dshape)) max_ind = np.argmax(np.array(dshape)) # do not decompose small arrays if dshape[max_ind] < 50: start_arr = np.zeros_like(dshape) count_arr = np.array(dshape) else: start, count = Locate(rank, size, dshape[max_ind]) start_arr = np.zeros_like(dshape) start_arr[max_ind] = start count_arr = np.array(dshape) count_arr[max_ind] = count return start_arr, count_arr def r_w_data_serial(adios2f, netcdff, var_names, num_steps, vars): for i, var in enumerate(var_names): progress(i, len(var_names), status=var) for step in range(num_steps): if vars[var]["Type"] == "string": data = adios2f.read_string(var) netcdff.variables[var][step, :] = data else: data = adios2f.read(var) if data.ndim == 1 and len(data) == 1: netcdff.variables[var][step] = data else: netcdff.variables[var][step, :] = data def r_w_data_parallel(adios2f, netcdff, var_names, num_steps, vars): comm = MPI.COMM_WORLD for i, var in enumerate(var_names): progress(i, len(var_names), status=var) if vars[var]["Type"] != "string": start_arr, count_arr = decomp(var, vars) for step in range(num_steps): if vars[var]["Type"] == "string": data = adios2f.read_string(var) netcdff.variables[var][step, :] = data else: data = adios2f.read(var, start=start_arr, count=count_arr) if data.ndim == 1 and len(data) == 1: netcdff.variables[var][step] = data else: netcdff.variables[var].set_collective(True) if len(start_arr) == 3: netcdff.variables[var][ step, start_arr[0] : start_arr[0] + count_arr[0], start_arr[1] : start_arr[1] + count_arr[1], start_arr[2] : start_arr[2] + count_arr[2], ] = data elif len(start_arr) == 2: netcdff.variables[var][ step, start_arr[0] : start_arr[0] + count_arr[0], start_arr[1] : start_arr[1] + count_arr[1], ] = data elif len(start_arr) == 1: netcdff.variables[var][ step, start_arr[0] : start_arr[0] + count_arr[0] ] = data def w_global_attrs(netcdff, global_attrs): netcdff.setncatts(global_attrs) def w_var_attrs(netcdff, var_attrs): for var in var_attrs.keys(): for attr in var_attrs[var].keys(): netcdff.variables[var].setncattr(attr, var_attrs[var][attr]) def close_files(adios2f, netcdff, diskless=False): if diskless == False: netcdff.close() adios2f.close() def convert(input_file, output_file, parallel=False, diskless=False): adios2f, netcdff = open_files( input_file, output_file, parallel=parallel, diskless=diskless ) attrs, var_attrs, global_attrs = r_attrs(adios2f) var_names, num_steps, dim_lens, var_dims, var_types, vars = r_metadata( adios2f, attrs ) create_nc_dims(netcdff, num_steps, dim_lens) create_nc_vars(netcdff, var_names, var_types, var_dims) if parallel: r_w_data_parallel(adios2f, netcdff, var_names, num_steps, vars) else: r_w_data_serial(adios2f, netcdff, var_names, num_steps, vars) w_global_attrs(netcdff, global_attrs) w_var_attrs(netcdff, var_attrs) close_files(adios2f, netcdff, diskless) if diskless == True: return netcdff if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--input", help="input: ADIOS2 file") parser.add_argument("--output", help="output: NetCDF file") args = parser.parse_args() convert(args.input, args.output, parallel=parallel)
[ "argparse.ArgumentParser", "netCDF4.Dataset", "mpi4py.MPI.COMM_WORLD.Get_size", "numpy.array", "collections.defaultdict", "adios2.open", "sys.stdout.flush", "numpy.zeros_like", "sys.stdout.write" ]
[((536, 562), 'sys.stdout.write', 'sys.stdout.write', (['"""\x1b[K"""'], {}), "('\\x1b[K')\n", (552, 562), False, 'import os, sys\n'), ((638, 656), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (654, 656), False, 'import os, sys\n'), ((1095, 1181), 'netCDF4.Dataset', 'Dataset', (['output_file', '"""w"""'], {'format': '"""NETCDF4"""', 'parallel': 'parallel', 'diskless': 'diskless'}), "(output_file, 'w', format='NETCDF4', parallel=parallel, diskless=\n diskless)\n", (1102, 1181), False, 'from netCDF4 import Dataset\n'), ((1364, 1381), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1375, 1381), False, 'from collections import defaultdict\n'), ((7046, 7071), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7069, 7071), False, 'import argparse\n'), ((170, 195), 'mpi4py.MPI.COMM_WORLD.Get_size', 'MPI.COMM_WORLD.Get_size', ([], {}), '()\n', (193, 195), False, 'from mpi4py import MPI\n'), ((974, 1023), 'adios2.open', 'adios2.open', (['input_file', '"""r"""'], {'comm': 'MPI.COMM_WORLD'}), "(input_file, 'r', comm=MPI.COMM_WORLD)\n", (985, 1023), False, 'import adios2\n'), ((1052, 1080), 'adios2.open', 'adios2.open', (['input_file', '"""r"""'], {}), "(input_file, 'r')\n", (1063, 1080), False, 'import adios2\n'), ((3174, 3190), 'numpy.array', 'np.array', (['dshape'], {}), '(dshape)\n', (3182, 3190), True, 'import numpy as np\n'), ((3278, 3299), 'numpy.zeros_like', 'np.zeros_like', (['dshape'], {}), '(dshape)\n', (3291, 3299), True, 'import numpy as np\n'), ((3320, 3336), 'numpy.array', 'np.array', (['dshape'], {}), '(dshape)\n', (3328, 3336), True, 'import numpy as np\n'), ((3426, 3447), 'numpy.zeros_like', 'np.zeros_like', (['dshape'], {}), '(dshape)\n', (3439, 3447), True, 'import numpy as np\n'), ((3503, 3519), 'numpy.array', 'np.array', (['dshape'], {}), '(dshape)\n', (3511, 3519), True, 'import numpy as np\n')]
# Copyright 2018 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import numpy as np import tensorflow as tf def unit_simplex_initialization(rng, batch_size, shape, dtype=tf.float32): mat = [] for i in range(batch_size): mat.append(rng.uniform(0, 1 / np.sum(shape), shape)) mat = np.array(mat) return tf.constant(mat, dtype=dtype)
[ "numpy.array", "tensorflow.constant", "numpy.sum" ]
[((886, 899), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (894, 899), True, 'import numpy as np\n'), ((911, 940), 'tensorflow.constant', 'tf.constant', (['mat'], {'dtype': 'dtype'}), '(mat, dtype=dtype)\n', (922, 940), True, 'import tensorflow as tf\n'), ((853, 866), 'numpy.sum', 'np.sum', (['shape'], {}), '(shape)\n', (859, 866), True, 'import numpy as np\n')]
from collections import Counter import random import numpy as np def matches(vector, a): """ Returns indices where the elements of a vector match some value. Args: vector (ndarray(int)): A 1D numpy array describing a vector. a (int): The value to match. Returns: list(int): A list of indices for matching elements. Example:: >>> vector = np.array( [ 1, 0, 1, 0 ] ) >>> matches( vector, 0 ) [1, 3] """ return [ i for i, e in enumerate( vector ) if e == a ] def mutate(i, mutator): """Return a new Individual, generated by mutating a starting Individual. Args: i (Individual): The Individual to be mutated. mutator (func): A function that takes a 1D numpy array as an argument, and returns a "mutated" new 1D numpy array. Returns: (Individual) Example:: >>> vector = np.array( [ 1, 0, 1, 0 ] ) >>> i = Individual( vector ) >>> m = lambda x: 1-x # element-wise 1 <--> 0 >>> mutate(i, m) Individual([0 1 0 1]) """ return Individual( vector=mutator( i.vector ) ) def crossover(i1, i2): return Individual(np.array([random.choice([a, b]) for a, b in zip(i1.vector, i2.vector)])) class Individual: """ Class definition for Individual objects. An Individual describes a single potential solution within the problem vector space. Example:: >>> vector = np.array( [ 1, 0, 1, 0 ] ) >>> Individual( vector ) Individual([1 0 1 0]) """ def __init__( self, vector ): """Create an `Individual` object. Example:: >>> vector = np.array( [ 1, 0, 1, 0 ] ) >>> Individual( vector ) Individual([1 0 1 0]) Args: vector (ndarray(int)): A vector of integers, describing this particular potential solution. Returns: None """ self.vector = vector self._score = None def fitness_score( self, fitness_function, use_saved_value=True ): """Returns the fitness score of this `Individual`, evaluated with a particular objective function. Example:: >>> vector = np.array( [ 1, 0, 1, 0 ] ) >>> ind = Individual( vector ) >>> objective_function = lambda x: sum( x ) >>> ind.fitness_score( objective_function ) 2 Args: fitness_function (function): The objective function, f(x), where x is the vector for this Individual. use_saved_value (optional:bool): The first time `fitness_score()` is called, the score is saved. If `use_saved_value` is `True`, subsequent calls will return the saved value instead of recalculating f(x). To force recalculation of f(x), set `use_saved_value=False`. Default: `True`. Returns: (float): The fitness score of this `Individual`. """ if not self._score or not use_saved_value: self._score = fitness_function( self.vector ) return self._score @property def score( self ): """Returns the fitness score of this `Individual`, providing this has already been calculated by passing the objective function to `fitness_score( f(x) )`. If the score has not yet been evaluated, trying to access this attribute will raise an `AtttributeError`. Example:: >>> ind = Individual( np.array( [ 1, 0, 1, 0 ] ) ) >>> objective_function = lambda x: sum( x ) >>> ind.fitness_score( objective_function ) 2 >>> ind.score 2 Args: None Returns: (float): The fitness score of this `Individual`. Raises: AttributeError: If the score for this individual has not previously been evaluated. """ if not self._score: raise AttributeError( 'The fitness score for this Individual has not yet been calculated' ) else: return self._score def off_target( self, target ): """ Returns the difference between the counts of appearances of integers for this `Individual` vector and a target count. For example, an `Individual` with vector `[1, 0, 1, 0]` contains `1` twice and `0` twice. If the target composition is `1` four times, and `0` none, this method will return the difference: `{1: -2, 0: 2}`. Example: >>> ind = Individual( np.array( [ 1, 0, 1, 0 ] ) ) >>> target = { 1: 4, 0: 0 } >>> output = ind.off_target( target ) >>> output[0] 2 >>> output[1] -2 """ difference = {} count = dict( Counter( self.vector ).items() ) for k, v in target.items(): if k in count: difference[k] = count[k] - v else: difference[k] = - target[k] return difference def constrain( self, target ): """ This method will attempt to constrain an `Individual` vector to match the composition specified by `target`. Elements that appear with too high frequency are replaced at random with elements that appear with too low frequency. Example: >>> ind = Individual( np.array( [ 1, 0, 2 ] ) ) >>> target = { 0: 0, 1: 1, 2: 2 } >>> ind.constrain( target ) >>> ind Individual([1 2 2]) """ difference = self.off_target( target ) while not all( v == 0 for v in difference.values() ): too_many = [ k for k, v in difference.items() if v > 0 ] too_few = [ k for k, v in difference.items() if v < 0 ] i = random.choice( too_many ) j = random.choice( too_few ) self.vector[ random.choice( matches( self.vector, i ) ) ] = j difference = self.off_target( target ) def __eq__( self, other ): """ Test whether this `Individual` has the same vector as another `Individual`. Args: other (`Individual`): The other `Individual`. Returns: (bool): True | False. Example: >>> i = Individual( np.array( [ 1, 2, 3 ] ) ) >>> j = Individual( np.array( [ 1, 2, 3 ] ) ) >>> k = Individual( np.array( [ 2, 3, 1 ] ) ) >>> i == j True >>> i == k False """ return np.array_equal( self.vector, other.vector ) def __lt__( self, other ): """ Test whether this `Individual` has a score less than that of another `Individual`. Args: other (`Individual`): The other `Individual`. Returns: (bool): True | False. Example: >>> i = Individual( np.array( [ 1, 2, 3 ] ) ) >>> j = Individual( np.array( [ 2, 2, 3 ] ) ) >>> objective_function = lambda x: sum( x ) >>> i.fitness_score( objective_function ) 6 >>> j.fitness_score( objective_function ) 7 >>> i < j True >>> j < i False """ return self.score < other.score def __repr__( self ): to_return = "Individual({})".format(self.vector) return to_return
[ "collections.Counter", "random.choice", "numpy.array_equal" ]
[((6665, 6706), 'numpy.array_equal', 'np.array_equal', (['self.vector', 'other.vector'], {}), '(self.vector, other.vector)\n', (6679, 6706), True, 'import numpy as np\n'), ((5918, 5941), 'random.choice', 'random.choice', (['too_many'], {}), '(too_many)\n', (5931, 5941), False, 'import random\n'), ((5960, 5982), 'random.choice', 'random.choice', (['too_few'], {}), '(too_few)\n', (5973, 5982), False, 'import random\n'), ((1206, 1227), 'random.choice', 'random.choice', (['[a, b]'], {}), '([a, b])\n', (1219, 1227), False, 'import random\n'), ((4895, 4915), 'collections.Counter', 'Counter', (['self.vector'], {}), '(self.vector)\n', (4902, 4915), False, 'from collections import Counter\n')]
from torch.utils.data import Dataset import torch import json, os, random, time import cv2 import torchvision.transforms as transforms from data_transform.transform_wrapper import TRANSFORMS import numpy as np from utils.utils import get_category_list import math from PIL import Image class BaseSet(Dataset): def __init__(self, mode="train", cfg=None, transform=None): self.mode = mode self.transform = transform self.cfg = cfg self.input_size = cfg.INPUT_SIZE self.color_space = cfg.COLOR_SPACE self.size = self.input_size print("Use {} Mode to train network".format(self.color_space)) if self.mode == "train": print("Loading train data ...", end=" ") self.json_path = cfg.DATASET.TRAIN_JSON elif "valid" in self.mode: print("Loading valid data ...", end=" ") self.json_path = cfg.DATASET.VALID_JSON else: raise NotImplementedError self.update_transform() with open(self.json_path, "r") as f: self.all_info = json.load(f) self.num_classes = self.all_info["num_classes"] if not self.cfg.DATASET.USE_CAM_BASED_DATASET or self.mode != 'train': self.data = self.all_info['annotations'] else: assert os.path.isfile(self.cfg.DATASET.CAM_DATA_JSON_SAVE_PATH), \ 'the CAM-based generated json file does not exist!' self.data = json.load(open(self.cfg.DATASET.CAM_DATA_JSON_SAVE_PATH)) print("Contain {} images of {} classes".format(len(self.data), self.num_classes)) self.class_weight, self.sum_weight = self.get_weight(self.data, self.num_classes) if self.cfg.TRAIN.SAMPLER.TYPE == "weighted sampler" and mode == "train": print('-'*20+' dataset'+'-'*20) print('class_weight is (the first 10 classes): ') print(self.class_weight[:10]) num_list, cat_list = get_category_list(self.get_annotations(), self.num_classes, self.cfg) self.instance_p = np.array([num / sum(num_list) for num in num_list]) self.class_p = np.array([1/self.num_classes for _ in num_list]) num_list = [math.sqrt(num) for num in num_list] self.square_p = np.array([num / sum(num_list) for num in num_list]) self.class_dict = self._get_class_dict() def update(self, epoch): self.epoch = max(0, epoch-self.cfg.TRAIN.TWO_STAGE.START_EPOCH) if self.cfg.TRAIN.TWO_STAGE.DRS else epoch if self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "progressive": self.progress_p = epoch/self.cfg.TRAIN.MAX_EPOCH * self.class_p + (1-epoch/self.cfg.TRAIN.MAX_EPOCH)*self.instance_p print('self.progress_p', self.progress_p) def __getitem__(self, index): print('start get item...') now_info = self.data[index] img = self._get_image(now_info) print('complete get img...') meta = dict() image = self.transform(img) image_label = ( now_info["category_id"] if "test" not in self.mode else 0 ) # 0-index if self.mode not in ["train", "valid"]: meta["image_id"] = now_info["image_id"] meta["fpath"] = now_info["fpath"] return image, image_label, meta def update_transform(self, input_size=None): normalize = TRANSFORMS["normalize"](cfg=self.cfg, input_size=input_size) transform_list = [transforms.ToPILImage()] transform_ops = ( self.cfg.TRANSFORMS.TRAIN_TRANSFORMS if self.mode == "train" else self.cfg.TRANSFORMS.TEST_TRANSFORMS ) for tran in transform_ops: transform_list.append(TRANSFORMS[tran](cfg=self.cfg, input_size=input_size)) transform_list.extend([transforms.ToTensor(), normalize]) self.transform = transforms.Compose(transform_list) def get_num_classes(self): return self.num_classes def get_annotations(self): return self.all_info['annotations'] def __len__(self): return len(self.all_info['annotations']) def imread_with_retry(self, fpath): retry_time = 10 for k in range(retry_time): try: img = cv2.imread(fpath) if img is None: print("img is None, try to re-read img") continue return img except Exception as e: if k == retry_time - 1: assert False, "pillow open {} failed".format(fpath) time.sleep(0.1) def _get_image(self, now_info): fpath = os.path.join(now_info["fpath"]) img = self.imread_with_retry(fpath) if self.color_space == "RGB": img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img def _get_trans_image(self, img_idx): now_info = self.data[img_idx] fpath = os.path.join(now_info["fpath"]) img = self.imread_with_retry(fpath) if self.color_space == "RGB": img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return self.transform(img)[None, :, :, :] def _get_class_dict(self): class_dict = dict() for i, anno in enumerate(self.data): cat_id = ( anno["category_id"] if "category_id" in anno else anno["image_label"] ) if not cat_id in class_dict: class_dict[cat_id] = [] class_dict[cat_id].append(i) return class_dict def get_weight(self, annotations, num_classes): num_list = [0] * num_classes cat_list = [] for anno in annotations: category_id = anno["category_id"] num_list[category_id] += 1 cat_list.append(category_id) max_num = max(num_list) class_weight = [max_num / i if i != 0 else 0 for i in num_list] sum_weight = sum(class_weight) return class_weight, sum_weight def sample_class_index_by_weight(self): rand_number, now_sum = random.random() * self.sum_weight, 0 for i in range(self.num_classes): now_sum += self.class_weight[i] if rand_number <= now_sum: return i
[ "torchvision.transforms.ToPILImage", "os.path.join", "math.sqrt", "time.sleep", "os.path.isfile", "numpy.array", "random.random", "cv2.cvtColor", "json.load", "torchvision.transforms.ToTensor", "cv2.imread", "torchvision.transforms.Compose" ]
[((4008, 4042), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (4026, 4042), True, 'import torchvision.transforms as transforms\n'), ((4821, 4852), 'os.path.join', 'os.path.join', (["now_info['fpath']"], {}), "(now_info['fpath'])\n", (4833, 4852), False, 'import json, os, random, time\n'), ((5113, 5144), 'os.path.join', 'os.path.join', (["now_info['fpath']"], {}), "(now_info['fpath'])\n", (5125, 5144), False, 'import json, os, random, time\n'), ((1122, 1134), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1131, 1134), False, 'import json, os, random, time\n'), ((1363, 1419), 'os.path.isfile', 'os.path.isfile', (['self.cfg.DATASET.CAM_DATA_JSON_SAVE_PATH'], {}), '(self.cfg.DATASET.CAM_DATA_JSON_SAVE_PATH)\n', (1377, 1419), False, 'import json, os, random, time\n'), ((2212, 2264), 'numpy.array', 'np.array', (['[(1 / self.num_classes) for _ in num_list]'], {}), '([(1 / self.num_classes) for _ in num_list])\n', (2220, 2264), True, 'import numpy as np\n'), ((3585, 3608), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (3606, 3608), True, 'import torchvision.transforms as transforms\n'), ((4956, 4992), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (4968, 4992), False, 'import cv2\n'), ((5248, 5284), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (5260, 5284), False, 'import cv2\n'), ((2286, 2300), 'math.sqrt', 'math.sqrt', (['num'], {}), '(num)\n', (2295, 2300), False, 'import math\n'), ((3947, 3968), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3966, 3968), True, 'import torchvision.transforms as transforms\n'), ((4411, 4428), 'cv2.imread', 'cv2.imread', (['fpath'], {}), '(fpath)\n', (4421, 4428), False, 'import cv2\n'), ((6268, 6283), 'random.random', 'random.random', ([], {}), '()\n', (6281, 6283), False, 'import json, os, random, time\n'), ((4749, 4764), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4759, 4764), False, 'import json, os, random, time\n')]
""" Database schema. """ import datetime import enum import os import copy import gwemopt.utils import gwemopt.ztf_tiling from astropy import table from astropy import coordinates from astropy import units as u from flask_login.mixins import UserMixin from flask_sqlalchemy import SQLAlchemy import gcn import healpy as hp from ligo.skymap.bayestar import rasterize import lxml.etree import pkg_resources import numpy as np from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy_utils import EmailType, PhoneNumberType from tqdm import tqdm from .flask import app db = SQLAlchemy(app) def get_ztf_quadrants(): """Calculate ZTF quadrant footprints as offsets from the telescope boresight.""" quad_prob = gwemopt.ztf_tiling.QuadProb(0, 0) ztf_tile = gwemopt.ztf_tiling.ZTFtile(0, 0) quad_cents_ra, quad_cents_dec = ztf_tile.quadrant_centers() offsets = np.asarray([ quad_prob.getWCS( quad_cents_ra[quadrant_id], quad_cents_dec[quadrant_id] ).calc_footprint(axes=quad_prob.quadrant_size) for quadrant_id in range(64)]) return np.transpose(offsets, (2, 0, 1)) def create_all(): db.create_all(bind=None) telescopes = ["ZTF", "Gattini", "DECam", "KPED", "GROWTH-India"] available_filters = {"ZTF": ["g", "r", "i"], "Gattini": ["J"], "DECam": ["g", "r", "i", "z"], "KPED": ["U", "g", "r", "i"], "GROWTH-India": ["g", "r", "i", "z"]} plan_args = { 'ZTF': { 'filt': ['g', 'r', 'g'], 'exposuretimes': [300.0, 300.0, 300.0], 'doReferences': True, 'doUsePrimary': True, 'doBalanceExposure': False, 'doDither': False, 'usePrevious': False, 'doCompletedObservations': False, 'doPlannedObservations': False, 'cobs': [None, None], 'schedule_type': 'greedy', 'filterScheduleType': 'block', 'airmass': 2.5, 'schedule_strategy': 'tiling', 'mindiff': 30.*60., 'doMaxTiles': False, 'max_nb_tiles': 1000 }, 'DECam': { 'filt': ['g', 'z'], 'exposuretimes': [25.0, 25.0], 'doReferences': True, 'doUsePrimary': False, 'doBalanceExposure': False, 'doDither': True, 'usePrevious': False, 'doCompletedObservations': False, 'doPlannedObservations': False, 'cobs': [None, None], 'schedule_type': 'greedy_slew', 'filterScheduleType': 'integrated', 'airmass': 2.5, 'schedule_strategy': 'tiling', 'mindiff': 30.*60., 'doMaxTiles': False, 'max_nb_tiles': 1000 }, 'Gattini': { 'filt': ['J'], 'exposuretimes': [300.0], 'doReferences': False, 'doUsePrimary': False, 'doBalanceExposure': False, 'doDither': False, 'usePrevious': False, 'doCompletedObservations': False, 'doPlannedObservations': False, 'cobs': [None, None], 'schedule_type': 'greedy', 'filterScheduleType': 'block', 'airmass': 2.5, 'schedule_strategy': 'tiling', 'mindiff': 30.*60., 'doMaxTiles': False, 'max_nb_tiles': 1000 }, 'KPED': { 'filt': ['r'], 'exposuretimes': [300.0], 'doReferences': False, 'doUsePrimary': False, 'doBalanceExposure': False, 'doDither': False, 'usePrevious': False, 'doCompletedObservations': False, 'doPlannedObservations': False, 'cobs': [None, None], 'schedule_type': 'greedy', 'filterScheduleType': 'integrated', 'airmass': 2.5, 'schedule_strategy': 'catalog', 'mindiff': 30.*60., 'doMaxTiles': False, 'max_nb_tiles': 1000 }, 'GROWTH-India': { 'filt': ['r'], 'exposuretimes': [300.0], 'doReferences': False, 'doUsePrimary': False, 'doBalanceExposure': False, 'doDither': False, 'usePrevious': False, 'doCompletedObservations': False, 'doPlannedObservations': False, 'cobs': [None, None], 'schedule_type': 'greedy', 'filterScheduleType': 'integrated', 'airmass': 2.5, 'schedule_strategy': 'catalog', 'mindiff': 30.*60., 'doMaxTiles': False, 'max_nb_tiles': 1000 } } with tqdm(telescopes) as telescope_progress: for tele in telescope_progress: telescope_progress.set_description('populating {}'.format(tele)) filename = pkg_resources.resource_filename( __name__, 'input/%s.ref' % tele) if os.path.isfile(filename): refstable = table.Table.read( filename, format='ascii', data_start=2, data_end=-1) refs = table.unique(refstable, keys=['field', 'fid']) if "maglimcat" not in refs.columns: refs["maglimcat"] = np.nan reference_images = { group[0]['field']: group['fid'].astype(int).tolist() for group in refs.group_by('field').groups} reference_mags = { group[0]['field']: group['maglimcat'].tolist() for group in refs.group_by('field').groups} else: reference_images = {} reference_mags = {} tesspath = 'input/%s.tess' % tele try: tessfile = app.open_instance_resource(tesspath) except IOError: tessfile = pkg_resources.resource_stream(__name__, tesspath) tessfilename = tessfile.name tessfile.close() fields = np.recfromtxt( tessfilename, usecols=range(3), names=['field_id', 'ra', 'dec']) with pkg_resources.resource_stream( __name__, 'config/%s.config' % tele) as g: config_struct = {} for line in g.readlines(): line_without_return = line.decode().split("\n") line_split = line_without_return[0].split(" ") line_split = list(filter(None, line_split)) if line_split: try: config_struct[line_split[0]] = float(line_split[1]) except ValueError: config_struct[line_split[0]] = line_split[1] db.session.merge(Telescope(telescope=tele, lat=config_struct["latitude"], lon=config_struct["longitude"], elevation=config_struct["elevation"], timezone=config_struct["timezone"], filters=available_filters[tele], default_plan_args=plan_args[tele])) for field_id, ra, dec in tqdm(fields, 'populating fields'): ref_filter_ids = reference_images.get(field_id, []) ref_filter_mags = [] for val in reference_mags.get(field_id, []): ref_filter_mags.append(val) bands = {1: 'g', 2: 'r', 3: 'i', 4: 'z', 5: 'J'} ref_filter_bands = [bands.get(n, n) for n in ref_filter_ids] if config_struct["FOV_type"] == "square": ipix, radecs, patch, area = gwemopt.utils.getSquarePixels( ra, dec, config_struct["FOV"], Localization.nside) elif config_struct["FOV_type"] == "circle": ipix, radecs, patch, area = gwemopt.utils.getCirclePixels( ra, dec, config_struct["FOV"], Localization.nside) if len(radecs) == 0: continue corners = np.vstack((radecs, radecs[0, :])) if corners.size == 10: corners_copy = copy.deepcopy(corners) corners[2] = corners_copy[3] corners[3] = corners_copy[2] contour = { 'type': 'Feature', 'geometry': { 'type': 'MultiLineString', 'coordinates': [corners.tolist()] }, 'properties': { 'telescope': tele, 'field_id': int(field_id), 'ra': ra, 'dec': dec, 'depth': dict(zip(ref_filter_bands, ref_filter_mags)) } } db.session.merge(Field(telescope=tele, field_id=int(field_id), ra=ra, dec=dec, contour=contour, reference_filter_ids=ref_filter_ids, reference_filter_mags=ref_filter_mags, ipix=ipix.tolist())) if tele == "ZTF": quadrant_coords = get_ztf_quadrants() skyoffset_frames = coordinates.SkyCoord( fields['ra'], fields['dec'], unit=u.deg ).skyoffset_frame() quadrant_coords_icrs = coordinates.SkyCoord( *np.tile( quadrant_coords[:, np.newaxis, ...], (len(fields), 1, 1)), unit=u.deg, frame=skyoffset_frames[:, np.newaxis, np.newaxis] ).transform_to(coordinates.ICRS) quadrant_xyz = np.moveaxis( quadrant_coords_icrs.cartesian.xyz.value, 0, -1) for field_id, xyz in zip( tqdm(fields['field_id'], 'populating subfields'), quadrant_xyz): for ii, xyz in enumerate(xyz): ipix = hp.query_polygon(Localization.nside, xyz) db.session.merge(SubField(telescope=tele, field_id=int(field_id), subfield_id=int(ii), ipix=ipix.tolist())) class User(db.Model, UserMixin): name = db.Column( db.String, primary_key=True, comment='Unique username') email = db.Column( EmailType, comment='E-mail address') phone = db.Column( PhoneNumberType, comment='Mobile/SMS phone number') voice = db.Column( db.Boolean, nullable=False, default=False, comment='Set to true for voice alerts (default: SMS only)') timezone = db.Column( db.Unicode, nullable=False, default='America/New_York') alert_from = db.Column( db.Time, comment='Start of hours for alerts') alert_to = db.Column( db.Time, comment='End of hours for alerts') def get_id(self): """Provide user ID for flask_login.""" return self.name class Event(db.Model): """Event information, including an event ID, mission, and time of the event""" dateobs = db.Column( db.DateTime, primary_key=True, comment='Event time') gcn_notices = db.relationship( lambda: GcnNotice, order_by=lambda: GcnNotice.date) _tags = db.relationship( lambda: Tag, lazy='selectin', order_by=lambda: ( db.func.lower(Tag.text).notin_({'fermi', 'swift', 'amon', 'lvc'}), db.func.lower(Tag.text).notin_({'long', 'short'}), db.func.lower(Tag.text).notin_({'grb', 'gw', 'transient'}) ) ) tags = association_proxy( '_tags', 'text', creator=lambda tag: Tag(text=tag)) localizations = db.relationship(lambda: Localization) plans = db.relationship(lambda: Plan, backref='event') @hybrid_property def retracted(self): return 'retracted' in self.tags @retracted.expression def retracted(cls): return db.literal('retracted').in_(cls.tags) @property def lightcurve(self): try: notice = self.gcn_notices[0] except IndexError: return None root = lxml.etree.fromstring(notice.content) elem = root.find(".//Param[@name='LightCurve_URL']") if elem is None: return None else: return elem.attrib.get('value', '').replace('http://', 'https://') @property def gracedb(self): try: notice = self.gcn_notices[0] except IndexError: return None root = lxml.etree.fromstring(notice.content) elem = root.find(".//Param[@name='EventPage']") if elem is None: return None else: return elem.attrib.get('value', '') @property def ned_gwf(self): return "https://ned.ipac.caltech.edu/gwf/events" @property def graceid(self): try: notice = self.gcn_notices[0] except IndexError: return None root = lxml.etree.fromstring(notice.content) elem = root.find(".//Param[@name='GraceID']") if elem is None: return None else: return elem.attrib.get('value', '') class Tag(db.Model): """Store qualitative tags for events.""" dateobs = db.Column( db.DateTime, db.ForeignKey(Event.dateobs), primary_key=True) text = db.Column( db.Unicode, primary_key=True) class Telescope(db.Model): """Telescope information""" telescope = db.Column( db.String, primary_key=True, comment='Telescope name') lat = db.Column( db.Float, nullable=False, comment='Latitude') lon = db.Column( db.Float, nullable=False, comment='Longitude') elevation = db.Column( db.Float, nullable=False, comment='Elevation') timezone = db.Column( db.String, nullable=False, comment='Time zone') filters = db.Column( db.ARRAY(db.String), nullable=False, comment='Available filters') fields = db.relationship(lambda: Field) plans = db.relationship(lambda: Plan) default_plan_args = db.Column( db.JSON, nullable=False, comment='Default plan arguments') class Field(db.Model): """Footprints and number of observations in each filter for standard PTF tiles""" telescope = db.Column( db.String, db.ForeignKey(Telescope.telescope), primary_key=True, comment='Telescope') field_id = db.Column( db.Integer, primary_key=True, comment='Field ID') ra = db.Column( db.Float, nullable=False, comment='RA of field center') dec = db.Column( db.Float, nullable=False, comment='Dec of field center') contour = db.Column( db.JSON, nullable=False, comment='GeoJSON contours') reference_filter_ids = db.Column( db.ARRAY(db.Integer), nullable=False, comment='Reference filter IDs') reference_filter_mags = db.Column( db.ARRAY(db.Float), nullable=False, comment='Reference filter magss') ipix = db.Column( db.ARRAY(db.Integer), comment='Healpix indices') subfields = db.relationship(lambda: SubField) class SubField(db.Model): """SubFields""" __table_args__ = ( db.ForeignKeyConstraint( ['telescope', 'field_id'], ['field.telescope', 'field.field_id'] ), ) telescope = db.Column( db.String, db.ForeignKey(Telescope.telescope), primary_key=True, comment='Telescope') field_id = db.Column( db.Integer, primary_key=True, comment='Field ID') subfield_id = db.Column( db.Integer, primary_key=True, comment='SubField ID') ipix = db.Column( db.ARRAY(db.Integer), comment='Healpix indices') class GcnNotice(db.Model): """Records of ingested GCN notices""" ivorn = db.Column( db.String, primary_key=True, comment='Unique identifier of VOEvent') notice_type = db.Column( db.Enum(gcn.NoticeType, native_enum=False), nullable=False, comment='GCN Notice type') stream = db.Column( db.String, nullable=False, comment='Event stream or mission (i.e., "Fermi")') date = db.Column( db.DateTime, nullable=False, comment='UTC message timestamp') dateobs = db.Column( db.DateTime, db.ForeignKey(Event.dateobs), nullable=False, comment='UTC event timestamp') content = db.deferred(db.Column( db.LargeBinary, nullable=False, comment='Raw VOEvent content')) def _get_property(self, property_name, value=None): root = lxml.etree.fromstring(self.content) path = ".//Param[@name='{}']".format(property_name) elem = root.find(path) value = float(elem.attrib.get('value', '')) * 100 return value @property def has_ns(self): return self._get_property(property_name="HasNS") @property def has_remnant(self): return self._get_property(property_name="HasRemnant") @property def far(self): return self._get_property(property_name="FAR") @property def bns(self): return self._get_property(property_name="BNS") @property def nsbh(self): return self._get_property(property_name="NSBH") @property def bbh(self): return self._get_property(property_name="BBH") @property def mass_gap(self): return self._get_property(property_name="MassGap") @property def noise(self): return self._get_property(property_name="Terrestrial") class Localization(db.Model): """Localization information, including the localization ID, event ID, right ascension, declination, error radius (if applicable), and the healpix map.""" nside = 512 """HEALPix resolution used for flat (non-multiresolution) operations.""" dateobs = db.Column( db.DateTime, db.ForeignKey(Event.dateobs), primary_key=True, comment='UTC event timestamp') localization_name = db.Column( db.String, primary_key=True, comment='Localization name') uniq = db.deferred(db.Column( db.ARRAY(db.BigInteger), nullable=False, comment='Multiresolution HEALPix UNIQ pixel index array')) probdensity = db.deferred(db.Column( db.ARRAY(db.Float), nullable=False, comment='Multiresolution HEALPix probability density array')) distmu = db.deferred(db.Column( db.ARRAY(db.Float), comment='Multiresolution HEALPix distance mu array')) distsigma = db.deferred(db.Column( db.ARRAY(db.Float), comment='Multiresolution HEALPix distance sigma array')) distnorm = db.deferred(db.Column( db.ARRAY(db.Float), comment='Multiresolution HEALPix distance normalization array')) contour = db.deferred(db.Column( db.JSON, comment='GeoJSON contours')) @hybrid_property def is_3d(self): return (self.distmu is not None and self.distsigma is not None and self.distnorm is not None) @is_3d.expression def is_3d(self): return (self.distmu.isnot(None) and self.distsigma.isnot(None) and self.distnorm.isnot(None)) @property def table_2d(self): """Get multiresolution HEALPix dataset, probability density only.""" return table.Table( [self.uniq, self.probdensity], names=['UNIQ', 'PROBDENSITY']) @property def table(self): """Get multiresolution HEALPix dataset, probability density and distance.""" if self.is_3d: return table.Table( [ self.uniq, self.probdensity, self.distmu, self.distsigma, self.distnorm], names=[ 'UNIQ', 'PROBDENSITY', 'DISTMU', 'DISTSIGMA', 'DISTNORM']) else: return self.table_2d @property def flat_2d(self): """Get flat resolution HEALPix dataset, probability density only.""" order = hp.nside2order(Localization.nside) result = rasterize(self.table_2d, order)['PROB'] return hp.reorder(result, 'NESTED', 'RING') @property def flat(self): """Get flat resolution HEALPix dataset, probability density and distance.""" if self.is_3d: order = hp.nside2order(Localization.nside) t = rasterize(self.table, order) result = t['PROB'], t['DISTMU'], t['DISTSIGMA'], t['DISTNORM'] return hp.reorder(result, 'NESTED', 'RING') else: return self.flat_2d, class Plan(db.Model): """Tiling information, including the event time, localization ID, tile IDs, and plan name""" dateobs = db.Column( db.DateTime, db.ForeignKey(Event.dateobs), primary_key=True, comment='UTC event timestamp') telescope = db.Column( db.String, db.ForeignKey(Telescope.telescope), primary_key=True, comment='Telescope') plan_name = db.Column( db.String, primary_key=True, comment='Plan name') validity_window_start = db.Column( db.DateTime, nullable=False, default=lambda: datetime.datetime.now(), comment='Start of validity window') validity_window_end = db.Column( db.DateTime, nullable=False, default=lambda: datetime.datetime.now() + datetime.timedelta(1), comment='End of validity window') plan_args = db.Column( db.JSON, nullable=False, comment='Plan arguments') # FIXME: Hard-code program_id, filter_id, subprogram_name program_id = 2 class Status(enum.IntEnum): WORKING = 0 READY = 1 SUBMITTED = 2 status = db.Column( db.Enum(Status), default=Status.WORKING, nullable=False, comment='Plan status') planned_observations = db.relationship( 'PlannedObservation', backref='plan', order_by=lambda: PlannedObservation.obstime) @property def start_observation(self): """Time of the first planned observation.""" if self.planned_observations: return self.planned_observations[0].obstime else: return None @hybrid_property def num_observations(self): """Number of planned observation.""" return len(self.planned_observations) @num_observations.expression def num_observations(cls): """Number of planned observation.""" return cls.planned_observations.count() @property def num_observations_per_filter(self): """Number of planned observation per filter.""" filters = list(Telescope.query.get(self.telescope).filters) nepochs = np.zeros(len(filters),) bands = {1: 'g', 2: 'r', 3: 'i', 4: 'z', 5: 'J'} for planned_observation in self.planned_observations: filt = bands[planned_observation.filter_id] idx = filters.index(filt) nepochs[idx] = nepochs[idx] + 1 nobs_per_filter = [] for ii, filt in enumerate(filters): nobs_per_filter.append("%s: %d" % (filt, nepochs[ii])) return " ".join(nobs_per_filter) @property def total_time(self): """Total observation time (seconds).""" return sum(_.exposure_time for _ in self.planned_observations) @property def tot_time_with_overheads(self): overhead = sum( _.overhead_per_exposure for _ in self.planned_observations) return overhead + self.total_time @property def ipix(self): return { i for planned_observation in self.planned_observations if planned_observation.field.ipix is not None for i in planned_observation.field.ipix} @property def area(self): nside = Localization.nside return hp.nside2pixarea(nside, degrees=True) * len(self.ipix) def get_probability(self, localization): ipix = np.asarray(list(self.ipix)) if len(ipix) > 0: return localization.flat_2d[ipix].sum() else: return 0.0 class PlannedObservation(db.Model): """Tile information, including the event time, localization ID, field IDs, tiling name, and tile probabilities.""" __table_args__ = ( db.ForeignKeyConstraint( ['dateobs', 'telescope', 'plan_name'], ['plan.dateobs', 'plan.telescope', 'plan.plan_name'], ondelete='CASCADE', onupdate='CASCADE' ), db.ForeignKeyConstraint( ['telescope', 'field_id'], ['field.telescope', 'field.field_id'] ), ) planned_observation_id = db.Column( db.Integer, primary_key=True, comment='Exposure ID') dateobs = db.Column( db.DateTime, db.ForeignKey(Event.dateobs), primary_key=True, comment='UTC event timestamp') telescope = db.Column( db.String, db.ForeignKey(Telescope.telescope), primary_key=True, comment='Telescope') field_id = db.Column( db.Integer, primary_key=True, comment='Field ID') plan_name = db.Column( db.String, primary_key=True, comment='Plan name') field = db.relationship(Field, viewonly=True) exposure_time = db.Column( db.Integer, nullable=False, comment='Exposure time in seconds') # FIXME: remove weight = db.Column( db.Float, nullable=False, comment='Weight associated with each observation') filter_id = db.Column( db.Integer, nullable=False, comment='Filter ID (g=1, r=2, i=3, z=4, J=5)') obstime = db.Column( db.DateTime, nullable=False, comment='UTC observation timestamp') overhead_per_exposure = db.Column( db.Integer, nullable=False, comment='Overhead time per exposure in seconds') class Observation(db.Model): """Observation information, including the field ID, exposure time, and filter.""" __table_args__ = ( db.ForeignKeyConstraint( ['telescope', 'field_id'], ['field.telescope', 'field.field_id'] ), ) telescope = db.Column( db.String, db.ForeignKey(Telescope.telescope), primary_key=True, comment='Telescope') field_id = db.Column( db.Integer, primary_key=True, comment='Field ID') observation_id = db.Column( db.Integer, primary_key=True, comment='Observation ID') obstime = db.Column( db.DateTime, comment='Exposure timestamp') field = db.relationship(Field) filter_id = db.Column( db.Integer, nullable=False, comment='Filter ID (g=1, r=2, i=3, z=4, J=5)') exposure_time = db.Column( db.Integer, nullable=False, comment='Exposure times') airmass = db.Column( db.Float, comment='Airmass') seeing = db.Column( db.Float, comment='Seeing') limmag = db.Column( db.Float, comment='Limiting magnitude') subfield_id = db.Column( db.Integer, default=0, primary_key=True, nullable=False, comment='subfield (e.g. quadrant/chip as relevant for instrument') successful = db.Column( db.Boolean, nullable=False, comment='processed successfully?') class Candidate(db.Model): name = db.Column( db.String, primary_key=True, comment='Candidate name') growth_marshal_id = db.Column( db.String, unique=True, nullable=False, comment='GROWTH marshal ID') subfield_id = db.Column( db.Integer, nullable=True, comment='Readout channel ID') creationdate = db.Column( db.DateTime, comment='Date of candidate creation') classification = db.Column( db.String, nullable=True, comment='Classification') redshift = db.Column( db.Float, nullable=True, comment='Resdshift of the source') iauname = db.Column( db.String, nullable=True, comment='IAU name on TNS') field_id = db.Column( db.Integer, comment='Field ID') candid = db.Column( db.BigInteger, comment='Candidate ID') ra = db.Column( db.Float, nullable=False, comment='RA of the candidate') dec = db.Column( db.Float, nullable=False, comment='Dec of the candidate') last_updated = db.Column( db.DateTime, nullable=False, comment='Date of last update') autoannotations = db.Column( db.String, nullable=True, comment='Autoannotations from the GROWTH marshal') photometry = db.relationship( lambda: CandidatePhotometry, backref='candidate', order_by=lambda: CandidatePhotometry.dateobs) @hybrid_property def first_detection_time(self): return self.photometry[0].dateobs @first_detection_time.expression def first_detection_time(cls): return db.select( [db.func.min(cls.dateobs)] ).where( CandidatePhotometry.name == cls.name ).label(__name__) class CandidatePhotometry(db.Model): """Candidate light curve pulled from the GROWTH Marshal""" lcid = db.Column( db.BigInteger, primary_key=True) name = db.Column( db.ForeignKey(Candidate.name), nullable=False, comment='Candidate name') dateobs = db.Column( db.DateTime, nullable=True, comment='Observation date') fil = db.Column( db.String, nullable=True, comment='Filter') instrument = db.Column( db.String, nullable=True, comment='Instruments') limmag = db.Column( db.Float, nullable=True, comment='Limiting magnitude') mag = db.Column( db.Float, nullable=True, comment='Mag PSF') magerr = db.Column( db.Float, nullable=True, comment='Mag uncertainty') exptime = db.Column( db.Float, nullable=True, comment='Exposure time') programid = db.Column( db.Integer, nullable=True, comment='Program ID number (1,2,3)')
[ "astropy.table.Table", "ligo.skymap.bayestar.rasterize", "copy.deepcopy", "numpy.moveaxis", "datetime.timedelta", "astropy.table.unique", "healpy.reorder", "healpy.query_polygon", "numpy.vstack", "healpy.nside2order", "os.path.isfile", "flask_sqlalchemy.SQLAlchemy", "numpy.transpose", "pkg...
[((646, 661), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (656, 661), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((1178, 1210), 'numpy.transpose', 'np.transpose', (['offsets', '(2, 0, 1)'], {}), '(offsets, (2, 0, 1))\n', (1190, 1210), True, 'import numpy as np\n'), ((4913, 4929), 'tqdm.tqdm', 'tqdm', (['telescopes'], {}), '(telescopes)\n', (4917, 4929), False, 'from tqdm import tqdm\n'), ((20675, 20748), 'astropy.table.Table', 'table.Table', (['[self.uniq, self.probdensity]'], {'names': "['UNIQ', 'PROBDENSITY']"}), "([self.uniq, self.probdensity], names=['UNIQ', 'PROBDENSITY'])\n", (20686, 20748), False, 'from astropy import table\n'), ((21379, 21413), 'healpy.nside2order', 'hp.nside2order', (['Localization.nside'], {}), '(Localization.nside)\n', (21393, 21413), True, 'import healpy as hp\n'), ((21486, 21522), 'healpy.reorder', 'hp.reorder', (['result', '"""NESTED"""', '"""RING"""'], {}), "(result, 'NESTED', 'RING')\n", (21496, 21522), True, 'import healpy as hp\n'), ((5094, 5158), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', "('input/%s.ref' % tele)"], {}), "(__name__, 'input/%s.ref' % tele)\n", (5125, 5158), False, 'import pkg_resources\n'), ((5191, 5215), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (5205, 5215), False, 'import os\n'), ((7558, 7591), 'tqdm.tqdm', 'tqdm', (['fields', '"""populating fields"""'], {}), "(fields, 'populating fields')\n", (7562, 7591), False, 'from tqdm import tqdm\n'), ((20933, 21094), 'astropy.table.Table', 'table.Table', (['[self.uniq, self.probdensity, self.distmu, self.distsigma, self.distnorm]'], {'names': "['UNIQ', 'PROBDENSITY', 'DISTMU', 'DISTSIGMA', 'DISTNORM']"}), "([self.uniq, self.probdensity, self.distmu, self.distsigma, self\n .distnorm], names=['UNIQ', 'PROBDENSITY', 'DISTMU', 'DISTSIGMA',\n 'DISTNORM'])\n", (20944, 21094), False, 'from astropy import table\n'), ((21431, 21462), 'ligo.skymap.bayestar.rasterize', 'rasterize', (['self.table_2d', 'order'], {}), '(self.table_2d, order)\n', (21440, 21462), False, 'from ligo.skymap.bayestar import rasterize\n'), ((21694, 21728), 'healpy.nside2order', 'hp.nside2order', (['Localization.nside'], {}), '(Localization.nside)\n', (21708, 21728), True, 'import healpy as hp\n'), ((21745, 21773), 'ligo.skymap.bayestar.rasterize', 'rasterize', (['self.table', 'order'], {}), '(self.table, order)\n', (21754, 21773), False, 'from ligo.skymap.bayestar import rasterize\n'), ((21868, 21904), 'healpy.reorder', 'hp.reorder', (['result', '"""NESTED"""', '"""RING"""'], {}), "(result, 'NESTED', 'RING')\n", (21878, 21904), True, 'import healpy as hp\n'), ((25275, 25312), 'healpy.nside2pixarea', 'hp.nside2pixarea', (['nside'], {'degrees': '(True)'}), '(nside, degrees=True)\n', (25291, 25312), True, 'import healpy as hp\n'), ((5245, 5314), 'astropy.table.Table.read', 'table.Table.read', (['filename'], {'format': '"""ascii"""', 'data_start': '(2)', 'data_end': '(-1)'}), "(filename, format='ascii', data_start=2, data_end=-1)\n", (5261, 5314), False, 'from astropy import table\n'), ((5359, 5405), 'astropy.table.unique', 'table.unique', (['refstable'], {'keys': "['field', 'fid']"}), "(refstable, keys=['field', 'fid'])\n", (5371, 5405), False, 'from astropy import table\n'), ((6393, 6459), 'pkg_resources.resource_stream', 'pkg_resources.resource_stream', (['__name__', "('config/%s.config' % tele)"], {}), "(__name__, 'config/%s.config' % tele)\n", (6422, 6459), False, 'import pkg_resources\n'), ((8504, 8537), 'numpy.vstack', 'np.vstack', (['(radecs, radecs[0, :])'], {}), '((radecs, radecs[0, :]))\n', (8513, 8537), True, 'import numpy as np\n'), ((10289, 10349), 'numpy.moveaxis', 'np.moveaxis', (['quadrant_coords_icrs.cartesian.xyz.value', '(0)', '(-1)'], {}), '(quadrant_coords_icrs.cartesian.xyz.value, 0, -1)\n', (10300, 10349), True, 'import numpy as np\n'), ((22584, 22607), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (22605, 22607), False, 'import datetime\n'), ((6122, 6171), 'pkg_resources.resource_stream', 'pkg_resources.resource_stream', (['__name__', 'tesspath'], {}), '(__name__, tesspath)\n', (6151, 6171), False, 'import pkg_resources\n'), ((8612, 8634), 'copy.deepcopy', 'copy.deepcopy', (['corners'], {}), '(corners)\n', (8625, 8634), False, 'import copy\n'), ((10438, 10486), 'tqdm.tqdm', 'tqdm', (["fields['field_id']", '"""populating subfields"""'], {}), "(fields['field_id'], 'populating subfields')\n", (10442, 10486), False, 'from tqdm import tqdm\n'), ((22760, 22783), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (22781, 22783), False, 'import datetime\n'), ((22786, 22807), 'datetime.timedelta', 'datetime.timedelta', (['(1)'], {}), '(1)\n', (22804, 22807), False, 'import datetime\n'), ((9809, 9870), 'astropy.coordinates.SkyCoord', 'coordinates.SkyCoord', (["fields['ra']", "fields['dec']"], {'unit': 'u.deg'}), "(fields['ra'], fields['dec'], unit=u.deg)\n", (9829, 9870), False, 'from astropy import coordinates\n'), ((10609, 10650), 'healpy.query_polygon', 'hp.query_polygon', (['Localization.nside', 'xyz'], {}), '(Localization.nside, xyz)\n', (10625, 10650), True, 'import healpy as hp\n')]
""" P2] Se presenta una escena con objetos dibujados con diferentes materiales a la escena base """ """ Se usa imgui para generar un menu y controlar variables de reflexion para el material de los objetos """ import glfw from OpenGL.GL import * import OpenGL.GL.shaders import numpy as np import grafica.transformations as tr import grafica.basic_shapes as bs import grafica.easy_shaders as es import grafica.performance_monitor as pm import grafica.lighting_shaders as ls import grafica.scene_graph as sg from shapes3d import * import imgui from imgui.integrations.glfw import GlfwRenderer # Clase para manejar una camara que se mueve en coordenadas polares class PolarCamera: def __init__(self): self.center = np.array([0.0, 0.0, -0.5]) # centro de movimiento de la camara y donde mira la camara self.theta = 0 # coordenada theta, angulo de la camara self.rho = 5 # coordenada rho, distancia al centro de la camara self.eye = np.array([0.0, 0.0, 0.0]) # posicion de la camara self.height = 0.5 # altura fija de la camara self.up = np.array([0, 0, 1]) # vector up self.viewMatrix = None # Matriz de vista # Añadir ángulo a la coordenada theta def set_theta(self, delta): self.theta = (self.theta + delta) % (np.pi * 2) # Añadir distancia a la coordenada rho, sin dejar que sea menor o igual a 0 def set_rho(self, delta): if ((self.rho + delta) > 0.1): self.rho += delta # Actualizar la matriz de vista def update_view(self): # Se calcula la posición de la camara con coordenadas poleras relativas al centro self.eye[0] = self.rho * np.sin(self.theta) + self.center[0] self.eye[1] = self.rho * np.cos(self.theta) + self.center[1] self.eye[2] = self.height + self.center[2] # Se genera la matriz de vista viewMatrix = tr.lookAt( self.eye, self.center, self.up ) return viewMatrix # Clase para manejar el controlador y la camara polar class Controller: def __init__(self): self.fillPolygon = True self.showAxis = True # Variables para controlar la camara self.is_up_pressed = False self.is_down_pressed = False self.is_left_pressed = False self.is_right_pressed = False # Se crea instancia de la camara self.polar_camera = PolarCamera() # Entregar la referencia a la camara def get_camera(self): return self.polar_camera # Metodo para ller el input del teclado def on_key(self, window, key, scancode, action, mods): # Caso de detectar la tecla [UP], actualiza estado de variable if key == glfw.KEY_UP: if action == glfw.PRESS: self.is_up_pressed = True elif action == glfw.RELEASE: self.is_up_pressed = False # Caso de detectar la tecla [DOWN], actualiza estado de variable if key == glfw.KEY_DOWN: if action == glfw.PRESS: self.is_down_pressed = True elif action == glfw.RELEASE: self.is_down_pressed = False # Caso de detectar la tecla [RIGHT], actualiza estado de variable if key == glfw.KEY_RIGHT: if action == glfw.PRESS: self.is_right_pressed = True elif action == glfw.RELEASE: self.is_right_pressed = False # Caso de detectar la tecla [LEFT], actualiza estado de variable if key == glfw.KEY_LEFT: if action == glfw.PRESS: self.is_left_pressed = True elif action == glfw.RELEASE: self.is_left_pressed = False # Caso de detectar la barra espaciadora, se cambia el metodo de dibujo if key == glfw.KEY_SPACE: if action == glfw.PRESS: self.fillPolygon = not self.fillPolygon # Caso en que se cierra la ventana if key == glfw.KEY_ESCAPE: if action == glfw.PRESS: glfw.set_window_should_close(window, True) # Caso de detectar Control izquierdo, se cambia el metodo de dibujo elif key == glfw.KEY_LEFT_CONTROL: if action == glfw.PRESS: self.showAxis = not self.showAxis #Funcion que recibe el input para manejar la camara y controlar sus coordenadas def update_camera(self, delta): # Camara rota a la izquierda if self.is_left_pressed: self.polar_camera.set_theta(-2 * delta) # Camara rota a la derecha if self.is_right_pressed: self.polar_camera.set_theta( 2 * delta) # Camara se acerca al centro if self.is_up_pressed: self.polar_camera.set_rho(-5 * delta) # Camara se aleja del centro if self.is_down_pressed: self.polar_camera.set_rho(5 * delta) def transformGuiOverlay(lightPos, ka, kd, ks): # Funcion para actualizar el menu # start new frame context imgui.new_frame() # open new window context imgui.begin("Material control", False, imgui.WINDOW_ALWAYS_AUTO_RESIZE) # draw text label inside of current window imgui.text("Configuration sliders") # Posicion x de la fuente de luz edited, lightPos[0] = imgui.slider_float("light position X", lightPos[0], -2.3, 2.3) # Posicion y de la fuente de luz edited, lightPos[1] = imgui.slider_float("light position Y", lightPos[1], -2.3, 2.3) # Posicion z de la fuente de luz edited, lightPos[2] = imgui.slider_float("light position Z", lightPos[2], -2.3, 2.3) # Coeficiente de reflexion ambiental edited, ka = imgui.color_edit3("ka", ka[0], ka[1], ka[2]) # Boton para reiniciar la reflexion ambiental if imgui.button("clean ka"): ka = (1.0, 1.0, 1.0) # Coeficiente de reflexion difusa edited, kd = imgui.color_edit3("kd", kd[0], kd[1], kd[2]) # Boton para reiniciar la reflexion difusa if imgui.button("clean kd"): kd = (1.0, 1.0, 1.0) # Coeficiente de reflexion especular edited, ks = imgui.color_edit3("ks", ks[0], ks[1], ks[2]) # Boton para reiniciar la reflexion especular if imgui.button("clean ks"): ks = (1.0, 1.0, 1.0) # close current window context imgui.end() # pass all drawing comands to the rendering pipeline # and close frame context imgui.render() imgui.end_frame() return lightPos, ka, kd, ks if __name__ == "__main__": # Initialize glfw if not glfw.init(): glfw.set_window_should_close(window, True) width = 800 height = 800 title = "P0 - Scene" glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 4) glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 1) glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE) glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE) window = glfw.create_window(width, height, title, None, None) if not window: glfw.terminate() glfw.set_window_should_close(window, True) glfw.make_context_current(window) # Binding artificial vertex array object for validation VAO = glGenVertexArrays(1) glBindVertexArray(VAO) # Different shader programs for different lighting strategies phongPipeline = ls.SimplePhongShaderProgram() phongTexPipeline = ls.SimpleTexturePhongShaderProgram() # Pipeline para dibujar texturas # This shader program does not consider lighting mvpPipeline = es.SimpleModelViewProjectionShaderProgram() # Setting up the clear screen color glClearColor(0.85, 0.85, 0.85, 1.0) # As we work in 3D, we need to check which part is in front, # and which one is at the back glEnable(GL_DEPTH_TEST) # Creating shapes on GPU memory gpuAxis = createGPUShape(mvpPipeline, bs.createAxis(4)) scene = createScene(phongPipeline) # Crea toro texturizado de piedra R1 = 0.8 # Radio mayor 1 r1 = 0.2 # Radio menor 1 torus1 = createTexTorusNode1(phongTexPipeline, R1, r1) # Crea toro texturizado de madera R2 = 0.6 # Radio mayor 2 r2 = 0.3 # Radio menor 2 torus2 = createTexTorusNode2(phongTexPipeline, R2, r2) perfMonitor = pm.PerformanceMonitor(glfw.get_time(), 0.5) # glfw will swap buffers as soon as possible glfw.swap_interval(0) t0 = glfw.get_time() # initilize imgui context (see documentation) imgui.create_context() impl = GlfwRenderer(window) # IMPORTANTE!! si usa imgui debe conectar los input con on_key despues de inicializar imgui, de lo contrario no funcionan los input # Se instancia un controller controller = Controller() # Se conecta el metodo on_key del controller para manejar el input del teclado glfw.set_key_callback(window, controller.on_key) # valores que controlara el menu de imgui lightPos = [0, 0, 2.3] ka = [0.2, 0.2, 0.2] kd = [0.5, 0.5, 0.5] ks = [1.0, 1.0, 1.0] # Application loop while not glfw.window_should_close(window): # Variables del tiempo t1 = glfw.get_time() delta = t1 -t0 t0 = t1 impl.process_inputs() # Using GLFW to check for input events glfw.poll_events() controller.update_camera(delta) camera = controller.get_camera() viewMatrix = camera.update_view() # Setting up the projection transform projection = tr.perspective(60, float(width) / float(height), 0.1, 100) # Clearing the screen in both, color and depth glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # imgui function lightPos, ka, kd, ks = \ transformGuiOverlay(lightPos, ka, kd, ks) # Filling or not the shapes depending on the controller state if (controller.fillPolygon): glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) else: glPolygonMode(GL_FRONT_AND_BACK, GL_LINE) # The axis is drawn without lighting effects if controller.showAxis: glUseProgram(mvpPipeline.shaderProgram) glUniformMatrix4fv(glGetUniformLocation(mvpPipeline.shaderProgram, "projection"), 1, GL_TRUE, projection) glUniformMatrix4fv(glGetUniformLocation(mvpPipeline.shaderProgram, "view"), 1, GL_TRUE, viewMatrix) glUniformMatrix4fv(glGetUniformLocation(mvpPipeline.shaderProgram, "model"), 1, GL_TRUE, tr.identity()) mvpPipeline.drawCall(gpuAxis, GL_LINES) lightingPipeline = phongPipeline # Setting all uniform shader variables # Setea luces de distinto color que se alternan cada 1 segundo if int(t1%3) == 0: r, g, b = 0.8, 0.3, 0.3 elif int(t1%3) == 1: r, g, b = 0.8, 0.8, 0.5 elif int(t1%3) == 2: r, g, b = 0.3, 0.7, 0.7 glUseProgram(lightingPipeline.shaderProgram) glUniform3f(glGetUniformLocation(lightingPipeline.shaderProgram, "La"), r, g, b) glUniform3f(glGetUniformLocation(lightingPipeline.shaderProgram, "Ld"), r, g, b) glUniform3f(glGetUniformLocation(lightingPipeline.shaderProgram, "Ls"), r, g, b) glUniform3f(glGetUniformLocation(lightingPipeline.shaderProgram, "Ka"), 0.2, 0.2, 0.2) glUniform3f(glGetUniformLocation(lightingPipeline.shaderProgram, "Kd"), 0.5, 0.5, 0.5) glUniform3f(glGetUniformLocation(lightingPipeline.shaderProgram, "Ks"), 1.0, 1.0, 1.0) glUniform3f(glGetUniformLocation(lightingPipeline.shaderProgram, "lightPosition"), lightPos[0], lightPos[1], lightPos[2]) glUniform3f(glGetUniformLocation(lightingPipeline.shaderProgram, "viewPosition"), camera.eye[0], camera.eye[1], camera.eye[2]) glUniform1ui(glGetUniformLocation(lightingPipeline.shaderProgram, "shininess"), 100) glUniform1f(glGetUniformLocation(lightingPipeline.shaderProgram, "constantAttenuation"), 0.001) glUniform1f(glGetUniformLocation(lightingPipeline.shaderProgram, "linearAttenuation"), 0.03) glUniform1f(glGetUniformLocation(lightingPipeline.shaderProgram, "quadraticAttenuation"), 0.01) glUniformMatrix4fv(glGetUniformLocation(lightingPipeline.shaderProgram, "projection"), 1, GL_TRUE, projection) glUniformMatrix4fv(glGetUniformLocation(lightingPipeline.shaderProgram, "view"), 1, GL_TRUE, viewMatrix) # Dibujo de la escena base sg.drawSceneGraphNode(scene, lightingPipeline, "model") # Se debe cambiara los coeficientes de reflexion si se quiere dibujar con otro material glUniform3f(glGetUniformLocation(lightingPipeline.shaderProgram, "Ka"), ka[0], ka[1], ka[2]) glUniform3f(glGetUniformLocation(lightingPipeline.shaderProgram, "Kd"), kd[0], kd[1], kd[2]) glUniform3f(glGetUniformLocation(lightingPipeline.shaderProgram, "Ks"), ks[0], ks[1], ks[2]) # Pipeline para dibujar texturas glUseProgram(phongTexPipeline.shaderProgram) # White light in all components: ambient, diffuse and specular. glUniform3f(glGetUniformLocation(phongTexPipeline.shaderProgram, "La"), r, g, b) glUniform3f(glGetUniformLocation(phongTexPipeline.shaderProgram, "Ld"), r, g, b) glUniform3f(glGetUniformLocation(phongTexPipeline.shaderProgram, "Ls"), r, g, b) glUniform3f(glGetUniformLocation(phongTexPipeline.shaderProgram, "lightPosition"), lightPos[0], lightPos[1], lightPos[2]) glUniform3f(glGetUniformLocation(phongTexPipeline.shaderProgram, "viewPosition"), camera.eye[0], camera.eye[1], camera.eye[2]) glUniform1ui(glGetUniformLocation(phongTexPipeline.shaderProgram, "shininess"), 100) glUniform1f(glGetUniformLocation(phongTexPipeline.shaderProgram, "constantAttenuation"), 0.001) glUniform1f(glGetUniformLocation(phongTexPipeline.shaderProgram, "linearAttenuation"), 0.03) glUniform1f(glGetUniformLocation(phongTexPipeline.shaderProgram, "quadraticAttenuation"), 0.01) glUniform3f(glGetUniformLocation(phongTexPipeline.shaderProgram, "Ka"), ka[0], ka[1], ka[2]) glUniform3f(glGetUniformLocation(phongTexPipeline.shaderProgram, "Kd"), kd[0], kd[1], kd[2]) glUniform3f(glGetUniformLocation(phongTexPipeline.shaderProgram, "Ks"), ks[0], ks[1], ks[2]) glUniformMatrix4fv(glGetUniformLocation(phongTexPipeline.shaderProgram, "projection"), 1, GL_TRUE, projection) glUniformMatrix4fv(glGetUniformLocation(phongTexPipeline.shaderProgram, "view"), 1, GL_TRUE, viewMatrix) # Dibuja toros texturizados sg.drawSceneGraphNode(torus1, phongTexPipeline, "model") sg.drawSceneGraphNode(torus2, phongTexPipeline, "model") # Drawing the imgui texture over our drawing glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) impl.render(imgui.get_draw_data()) # Once the drawing is rendered, buffers are swap so an uncomplete drawing is never seen. glfw.swap_buffers(window) gpuAxis.clear() impl.shutdown() scene.clear() torus1.clear() torus2.clear() glfw.terminate()
[ "glfw.make_context_current", "glfw.swap_interval", "glfw.poll_events", "numpy.array", "grafica.transformations.lookAt", "numpy.sin", "imgui.slider_float", "imgui.end_frame", "imgui.color_edit3", "imgui.render", "grafica.lighting_shaders.SimplePhongShaderProgram", "glfw.get_time", "imgui.get_...
[((5320, 5337), 'imgui.new_frame', 'imgui.new_frame', ([], {}), '()\n', (5335, 5337), False, 'import imgui\n'), ((5376, 5447), 'imgui.begin', 'imgui.begin', (['"""Material control"""', '(False)', 'imgui.WINDOW_ALWAYS_AUTO_RESIZE'], {}), "('Material control', False, imgui.WINDOW_ALWAYS_AUTO_RESIZE)\n", (5387, 5447), False, 'import imgui\n'), ((5503, 5538), 'imgui.text', 'imgui.text', (['"""Configuration sliders"""'], {}), "('Configuration sliders')\n", (5513, 5538), False, 'import imgui\n'), ((5606, 5668), 'imgui.slider_float', 'imgui.slider_float', (['"""light position X"""', 'lightPos[0]', '(-2.3)', '(2.3)'], {}), "('light position X', lightPos[0], -2.3, 2.3)\n", (5624, 5668), False, 'import imgui\n'), ((5734, 5796), 'imgui.slider_float', 'imgui.slider_float', (['"""light position Y"""', 'lightPos[1]', '(-2.3)', '(2.3)'], {}), "('light position Y', lightPos[1], -2.3, 2.3)\n", (5752, 5796), False, 'import imgui\n'), ((5862, 5924), 'imgui.slider_float', 'imgui.slider_float', (['"""light position Z"""', 'lightPos[2]', '(-2.3)', '(2.3)'], {}), "('light position Z', lightPos[2], -2.3, 2.3)\n", (5880, 5924), False, 'import imgui\n'), ((5991, 6035), 'imgui.color_edit3', 'imgui.color_edit3', (['"""ka"""', 'ka[0]', 'ka[1]', 'ka[2]'], {}), "('ka', ka[0], ka[1], ka[2])\n", (6008, 6035), False, 'import imgui\n'), ((6095, 6119), 'imgui.button', 'imgui.button', (['"""clean ka"""'], {}), "('clean ka')\n", (6107, 6119), False, 'import imgui\n'), ((6210, 6254), 'imgui.color_edit3', 'imgui.color_edit3', (['"""kd"""', 'kd[0]', 'kd[1]', 'kd[2]'], {}), "('kd', kd[0], kd[1], kd[2])\n", (6227, 6254), False, 'import imgui\n'), ((6311, 6335), 'imgui.button', 'imgui.button', (['"""clean kd"""'], {}), "('clean kd')\n", (6323, 6335), False, 'import imgui\n'), ((6429, 6473), 'imgui.color_edit3', 'imgui.color_edit3', (['"""ks"""', 'ks[0]', 'ks[1]', 'ks[2]'], {}), "('ks', ks[0], ks[1], ks[2])\n", (6446, 6473), False, 'import imgui\n'), ((6533, 6557), 'imgui.button', 'imgui.button', (['"""clean ks"""'], {}), "('clean ks')\n", (6545, 6557), False, 'import imgui\n'), ((6632, 6643), 'imgui.end', 'imgui.end', ([], {}), '()\n', (6641, 6643), False, 'import imgui\n'), ((6740, 6754), 'imgui.render', 'imgui.render', ([], {}), '()\n', (6752, 6754), False, 'import imgui\n'), ((6760, 6777), 'imgui.end_frame', 'imgui.end_frame', ([], {}), '()\n', (6775, 6777), False, 'import imgui\n'), ((7015, 7062), 'glfw.window_hint', 'glfw.window_hint', (['glfw.CONTEXT_VERSION_MAJOR', '(4)'], {}), '(glfw.CONTEXT_VERSION_MAJOR, 4)\n', (7031, 7062), False, 'import glfw\n'), ((7068, 7115), 'glfw.window_hint', 'glfw.window_hint', (['glfw.CONTEXT_VERSION_MINOR', '(1)'], {}), '(glfw.CONTEXT_VERSION_MINOR, 1)\n', (7084, 7115), False, 'import glfw\n'), ((7121, 7174), 'glfw.window_hint', 'glfw.window_hint', (['glfw.OPENGL_FORWARD_COMPAT', 'GL_TRUE'], {}), '(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE)\n', (7137, 7174), False, 'import glfw\n'), ((7180, 7243), 'glfw.window_hint', 'glfw.window_hint', (['glfw.OPENGL_PROFILE', 'glfw.OPENGL_CORE_PROFILE'], {}), '(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)\n', (7196, 7243), False, 'import glfw\n'), ((7260, 7312), 'glfw.create_window', 'glfw.create_window', (['width', 'height', 'title', 'None', 'None'], {}), '(width, height, title, None, None)\n', (7278, 7312), False, 'import glfw\n'), ((7420, 7453), 'glfw.make_context_current', 'glfw.make_context_current', (['window'], {}), '(window)\n', (7445, 7453), False, 'import glfw\n'), ((7668, 7697), 'grafica.lighting_shaders.SimplePhongShaderProgram', 'ls.SimplePhongShaderProgram', ([], {}), '()\n', (7695, 7697), True, 'import grafica.lighting_shaders as ls\n'), ((7722, 7758), 'grafica.lighting_shaders.SimpleTexturePhongShaderProgram', 'ls.SimpleTexturePhongShaderProgram', ([], {}), '()\n', (7756, 7758), True, 'import grafica.lighting_shaders as ls\n'), ((7867, 7910), 'grafica.easy_shaders.SimpleModelViewProjectionShaderProgram', 'es.SimpleModelViewProjectionShaderProgram', ([], {}), '()\n', (7908, 7910), True, 'import grafica.easy_shaders as es\n'), ((8744, 8765), 'glfw.swap_interval', 'glfw.swap_interval', (['(0)'], {}), '(0)\n', (8762, 8765), False, 'import glfw\n'), ((8776, 8791), 'glfw.get_time', 'glfw.get_time', ([], {}), '()\n', (8789, 8791), False, 'import glfw\n'), ((8850, 8872), 'imgui.create_context', 'imgui.create_context', ([], {}), '()\n', (8870, 8872), False, 'import imgui\n'), ((8885, 8905), 'imgui.integrations.glfw.GlfwRenderer', 'GlfwRenderer', (['window'], {}), '(window)\n', (8897, 8905), False, 'from imgui.integrations.glfw import GlfwRenderer\n'), ((9200, 9248), 'glfw.set_key_callback', 'glfw.set_key_callback', (['window', 'controller.on_key'], {}), '(window, controller.on_key)\n', (9221, 9248), False, 'import glfw\n'), ((15617, 15633), 'glfw.terminate', 'glfw.terminate', ([], {}), '()\n', (15631, 15633), False, 'import glfw\n'), ((748, 774), 'numpy.array', 'np.array', (['[0.0, 0.0, -0.5]'], {}), '([0.0, 0.0, -0.5])\n', (756, 774), True, 'import numpy as np\n'), ((1045, 1070), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1053, 1070), True, 'import numpy as np\n'), ((1195, 1214), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1203, 1214), True, 'import numpy as np\n'), ((2048, 2089), 'grafica.transformations.lookAt', 'tr.lookAt', (['self.eye', 'self.center', 'self.up'], {}), '(self.eye, self.center, self.up)\n', (2057, 2089), True, 'import grafica.transformations as tr\n'), ((6880, 6891), 'glfw.init', 'glfw.init', ([], {}), '()\n', (6889, 6891), False, 'import glfw\n'), ((6902, 6944), 'glfw.set_window_should_close', 'glfw.set_window_should_close', (['window', '(True)'], {}), '(window, True)\n', (6930, 6944), False, 'import glfw\n'), ((7344, 7360), 'glfw.terminate', 'glfw.terminate', ([], {}), '()\n', (7358, 7360), False, 'import glfw\n'), ((7370, 7412), 'glfw.set_window_should_close', 'glfw.set_window_should_close', (['window', '(True)'], {}), '(window, True)\n', (7398, 7412), False, 'import glfw\n'), ((8210, 8226), 'grafica.basic_shapes.createAxis', 'bs.createAxis', (['(4)'], {}), '(4)\n', (8223, 8226), True, 'import grafica.basic_shapes as bs\n'), ((8667, 8682), 'glfw.get_time', 'glfw.get_time', ([], {}), '()\n', (8680, 8682), False, 'import glfw\n'), ((9447, 9479), 'glfw.window_should_close', 'glfw.window_should_close', (['window'], {}), '(window)\n', (9471, 9479), False, 'import glfw\n'), ((9527, 9542), 'glfw.get_time', 'glfw.get_time', ([], {}), '()\n', (9540, 9542), False, 'import glfw\n'), ((9674, 9692), 'glfw.poll_events', 'glfw.poll_events', ([], {}), '()\n', (9690, 9692), False, 'import glfw\n'), ((12919, 12974), 'grafica.scene_graph.drawSceneGraphNode', 'sg.drawSceneGraphNode', (['scene', 'lightingPipeline', '"""model"""'], {}), "(scene, lightingPipeline, 'model')\n", (12940, 12974), True, 'import grafica.scene_graph as sg\n'), ((15098, 15154), 'grafica.scene_graph.drawSceneGraphNode', 'sg.drawSceneGraphNode', (['torus1', 'phongTexPipeline', '"""model"""'], {}), "(torus1, phongTexPipeline, 'model')\n", (15119, 15154), True, 'import grafica.scene_graph as sg\n'), ((15164, 15220), 'grafica.scene_graph.drawSceneGraphNode', 'sg.drawSceneGraphNode', (['torus2', 'phongTexPipeline', '"""model"""'], {}), "(torus2, phongTexPipeline, 'model')\n", (15185, 15220), True, 'import grafica.scene_graph as sg\n'), ((15481, 15506), 'glfw.swap_buffers', 'glfw.swap_buffers', (['window'], {}), '(window)\n', (15498, 15506), False, 'import glfw\n'), ((15349, 15370), 'imgui.get_draw_data', 'imgui.get_draw_data', ([], {}), '()\n', (15368, 15370), False, 'import imgui\n'), ((1826, 1844), 'numpy.sin', 'np.sin', (['self.theta'], {}), '(self.theta)\n', (1832, 1844), True, 'import numpy as np\n'), ((1896, 1914), 'numpy.cos', 'np.cos', (['self.theta'], {}), '(self.theta)\n', (1902, 1914), True, 'import numpy as np\n'), ((4306, 4348), 'glfw.set_window_should_close', 'glfw.set_window_should_close', (['window', '(True)'], {}), '(window, True)\n', (4334, 4348), False, 'import glfw\n'), ((10898, 10911), 'grafica.transformations.identity', 'tr.identity', ([], {}), '()\n', (10909, 10911), True, 'import grafica.transformations as tr\n')]
""" Module providing testing of `halotools.mock_observables.velocity_marked_npairs_3d` """ from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np from astropy.tests.helper import pytest from astropy.utils.misc import NumpyRNGContext from ..velocity_marked_npairs_3d import velocity_marked_npairs_3d from ..velocity_marked_npairs_3d import _velocity_marked_npairs_3d_process_weights as process_weights_3d __all__ = ('test_velocity_marked_npairs_3d_test1', ) fixed_seed = 43 def test_velocity_marked_npairs_3d_test1(): npts = 10 with NumpyRNGContext(fixed_seed): sample1 = np.random.random((npts, 3)) weights1 = np.random.random((npts, 6)) weight_func_id = 11 __ = process_weights_3d(sample1, sample1, weights1, weights1, weight_func_id) def test_velocity_marked_npairs_3d_test2(): npts = 10 with NumpyRNGContext(fixed_seed): sample1 = np.random.random((npts, 3)) sample2 = np.random.random((npts, 3)) weights1 = np.random.random((npts, 6)) weights2 = np.random.random((npts, 6)) weight_func_id = 11 __ = process_weights_3d(sample1, sample2, weights1, weights2, weight_func_id) def test_velocity_marked_npairs_3d_test3(): npts = 10 with NumpyRNGContext(fixed_seed): sample1 = np.random.random((npts, 3)) sample2 = np.random.random((npts, 3)) weights1 = np.random.random((npts, 7)) weights2 = np.random.random((npts, 7)) weight_func_id = 11 with pytest.raises(ValueError) as err: __ = process_weights_3d(sample1, sample2, weights1, weights2, weight_func_id) substr = "For this value of `weight_func_id`, there should be" assert substr in err.value.args[0] def test_velocity_marked_npairs_3d_test4(): npts = 10 with NumpyRNGContext(fixed_seed): sample1 = np.random.random((npts, 3)) sample2 = np.random.random((npts, 3)) weights1 = np.random.random(npts) weights2 = np.random.random(npts) weight_func_id = 11 with pytest.raises(ValueError) as err: __ = process_weights_3d(sample1, sample2, weights1, weights2, weight_func_id) substr = "does not have the correct length. " assert substr in err.value.args[0] def test_velocity_marked_npairs_3d_test5(): npts = 10 with NumpyRNGContext(fixed_seed): sample1 = np.random.random((npts, 3)) sample2 = np.random.random((npts, 3)) weights1 = np.random.random((npts, 3)) weights2 = np.random.random((npts, 3)) weight_func_id = 11 with pytest.raises(ValueError) as err: __ = process_weights_3d(sample1, sample2, weights1, weights2, weight_func_id) substr = "For this value of `weight_func_id`, there should be " assert substr in err.value.args[0] def test_velocity_marked_npairs_3d_test6(): npts = 10 with NumpyRNGContext(fixed_seed): sample1 = np.random.random((npts, 3)) sample2 = np.random.random((npts, 3)) weights1 = np.random.random((npts, 3, 4)) weights2 = np.random.random((npts, 3, 4)) weight_func_id = 11 with pytest.raises(ValueError) as err: __ = process_weights_3d(sample1, sample2, weights1, weights2, weight_func_id) substr = "You must either pass in a 1-D or 2-D array" assert substr in err.value.args[0]
[ "numpy.random.random", "astropy.tests.helper.pytest.raises", "astropy.utils.misc.NumpyRNGContext" ]
[((594, 621), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (609, 621), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((641, 668), 'numpy.random.random', 'np.random.random', (['(npts, 3)'], {}), '((npts, 3))\n', (657, 668), True, 'import numpy as np\n'), ((688, 715), 'numpy.random.random', 'np.random.random', (['(npts, 6)'], {}), '((npts, 6))\n', (704, 715), True, 'import numpy as np\n'), ((892, 919), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (907, 919), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((939, 966), 'numpy.random.random', 'np.random.random', (['(npts, 3)'], {}), '((npts, 3))\n', (955, 966), True, 'import numpy as np\n'), ((985, 1012), 'numpy.random.random', 'np.random.random', (['(npts, 3)'], {}), '((npts, 3))\n', (1001, 1012), True, 'import numpy as np\n'), ((1032, 1059), 'numpy.random.random', 'np.random.random', (['(npts, 6)'], {}), '((npts, 6))\n', (1048, 1059), True, 'import numpy as np\n'), ((1079, 1106), 'numpy.random.random', 'np.random.random', (['(npts, 6)'], {}), '((npts, 6))\n', (1095, 1106), True, 'import numpy as np\n'), ((1283, 1310), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (1298, 1310), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((1330, 1357), 'numpy.random.random', 'np.random.random', (['(npts, 3)'], {}), '((npts, 3))\n', (1346, 1357), True, 'import numpy as np\n'), ((1376, 1403), 'numpy.random.random', 'np.random.random', (['(npts, 3)'], {}), '((npts, 3))\n', (1392, 1403), True, 'import numpy as np\n'), ((1423, 1450), 'numpy.random.random', 'np.random.random', (['(npts, 7)'], {}), '((npts, 7))\n', (1439, 1450), True, 'import numpy as np\n'), ((1470, 1497), 'numpy.random.random', 'np.random.random', (['(npts, 7)'], {}), '((npts, 7))\n', (1486, 1497), True, 'import numpy as np\n'), ((1532, 1557), 'astropy.tests.helper.pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1545, 1557), False, 'from astropy.tests.helper import pytest\n'), ((1827, 1854), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (1842, 1854), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((1874, 1901), 'numpy.random.random', 'np.random.random', (['(npts, 3)'], {}), '((npts, 3))\n', (1890, 1901), True, 'import numpy as np\n'), ((1920, 1947), 'numpy.random.random', 'np.random.random', (['(npts, 3)'], {}), '((npts, 3))\n', (1936, 1947), True, 'import numpy as np\n'), ((1967, 1989), 'numpy.random.random', 'np.random.random', (['npts'], {}), '(npts)\n', (1983, 1989), True, 'import numpy as np\n'), ((2009, 2031), 'numpy.random.random', 'np.random.random', (['npts'], {}), '(npts)\n', (2025, 2031), True, 'import numpy as np\n'), ((2066, 2091), 'astropy.tests.helper.pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2079, 2091), False, 'from astropy.tests.helper import pytest\n'), ((2344, 2371), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (2359, 2371), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((2391, 2418), 'numpy.random.random', 'np.random.random', (['(npts, 3)'], {}), '((npts, 3))\n', (2407, 2418), True, 'import numpy as np\n'), ((2437, 2464), 'numpy.random.random', 'np.random.random', (['(npts, 3)'], {}), '((npts, 3))\n', (2453, 2464), True, 'import numpy as np\n'), ((2484, 2511), 'numpy.random.random', 'np.random.random', (['(npts, 3)'], {}), '((npts, 3))\n', (2500, 2511), True, 'import numpy as np\n'), ((2531, 2558), 'numpy.random.random', 'np.random.random', (['(npts, 3)'], {}), '((npts, 3))\n', (2547, 2558), True, 'import numpy as np\n'), ((2593, 2618), 'astropy.tests.helper.pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2606, 2618), False, 'from astropy.tests.helper import pytest\n'), ((2889, 2916), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (2904, 2916), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((2936, 2963), 'numpy.random.random', 'np.random.random', (['(npts, 3)'], {}), '((npts, 3))\n', (2952, 2963), True, 'import numpy as np\n'), ((2982, 3009), 'numpy.random.random', 'np.random.random', (['(npts, 3)'], {}), '((npts, 3))\n', (2998, 3009), True, 'import numpy as np\n'), ((3029, 3059), 'numpy.random.random', 'np.random.random', (['(npts, 3, 4)'], {}), '((npts, 3, 4))\n', (3045, 3059), True, 'import numpy as np\n'), ((3079, 3109), 'numpy.random.random', 'np.random.random', (['(npts, 3, 4)'], {}), '((npts, 3, 4))\n', (3095, 3109), True, 'import numpy as np\n'), ((3144, 3169), 'astropy.tests.helper.pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3157, 3169), False, 'from astropy.tests.helper import pytest\n')]
''' Train and operate with PMI-SVD embeddings ''' from collections import Counter import numpy as np import pandas as pd from scipy.sparse import linalg import umap import src.utility.general as util class PmiSvdEmbeddings: def __init__(self, texts): ''' Args: - texts (str|list): text data to model ''' # check if texts are in the right format self.texts = util.validate_input(texts) # print report print('found {} document(s) in texts'.format(len(self.texts))) # initialize fields for later self.id_vocab = {} self.tok_vocab = {} self.model = {} def make_vocabulary(self): ''' Assigns an index to each word. Creates two vocabularies for translation: id_vocab: keys are words, values are indices tok_vocab: keys are indices, values are words ''' id_vocab = dict() for doc in self.texts: for token in doc: if token not in id_vocab: id_vocab[token] = len(id_vocab) tok_vocab = {indx: tok for tok, indx in id_vocab.items()} # report print('found {} unique tokens in texts'.format(len(id_vocab))) # save dictionaries self.id_vocab = id_vocab self.tok_vocab = tok_vocab def count_pairs(self, back_window: int, front_window: int): ''' Count times word pairs appear together in a specified window. Args: - back_window (int): how many words before should be counted as a pair - front_window (int): how many words after should be counted as a pair Returns: - pair_counts (collections.Counter): counts of word pairs ''' # check input if not all([isinstance(back_window, int), isinstance(front_window, int)]): raise ValueError("back_window and front_window must be int") # count pairs with counter pair_counts = Counter() for doc in self.texts: # indside docs, go through words for i_left, word in enumerate(doc): # minimum word index in the window lwr_window_limit = max(0, i_left - back_window) # maximum word index in the window upr_window_limit = min(len(doc) - 1, i_left + front_window) # get indices of words appearing # together with current word words_in_window = [i_right for i_right in range(lwr_window_limit, upr_window_limit + 1) if i_right != i_left] # add word pairs to the counter for i_right in words_in_window: skipgram = (doc[i_left], doc[i_right]) pair_counts[skipgram] += 1 return pair_counts @staticmethod def sparse_it(row_ids, col_ids, values) -> np.array: ''' transfrom lists into sparse np.array ''' # determine shape (max index) this_shape = (max(row_ids)+1, max(col_ids)+1) # generate a matrix of zeroes of that thape sparse_mat = np.zeros(this_shape) # fill desired cells with values sparse_mat[row_ids, col_ids] = values return sparse_mat def word_cooc_matrix(self, pair_counts) -> np.array: ''' Convert pair_counts into a sparse matrix. ''' row_ids = list() col_ids = list() count_values = list() for (word_left, word_right), count in pair_counts.items(): # find indices of word pairs word_left_i = self.id_vocab[word_left] word_right_i = self.id_vocab[word_right] # update lists row_ids.append(word_left_i) col_ids.append(word_right_i) count_values.append(count) cooc_mat = self.sparse_it(row_ids, col_ids, count_values) return cooc_mat def pmi(self, cooc_mat, id_vocab, pair_counts, alpha: float, pmi_type: str): ''' Calculate Pointwise Mutual Information Measures. - Positive Pointwise Mutual Information (pmi_type='ppmi') - Smoothed Positive Pointwise Mutual Information (pmi_type='sppmi') ''' # sums n_pairs = cooc_mat.sum() sum_over_words = cooc_mat.sum(axis=0).flatten() sum_over_contexts = cooc_mat.sum(axis=1).flatten() # smoothing sum_over_words_alpha = sum_over_words**alpha nca_denom = np.sum(cooc_mat.sum(axis=0).flatten()**alpha) # calculate PMI for each pair of words row_ids = [] col_ids = [] spmi_values = [] sppmi_values = [] for (word_left, word_right), count in pair_counts.items(): # find indices of word pairs word_left_i = id_vocab[word_left] word_right_i = id_vocab[word_right] # define variables nwc = count Pwc = nwc / n_pairs nw = sum_over_contexts[word_left_i] Pw = nw / n_pairs nca = sum_over_words_alpha[word_right_i] Pca = nca / nca_denom # apply formula spmi = np.log2(Pwc/(Pw*Pca)) # replace negative values with 0 # by defualt, that is something you want sppmi = max(spmi, 0) # update lists row_ids.append(word_left_i) col_ids.append(word_right_i) spmi_values.append(spmi) sppmi_values.append(sppmi) # prepare sparse matrices for output if pmi_type == 'spmi': out_mat = self.sparse_it(row_ids, col_ids, spmi_values) if pmi_type == 'sspmi': out_mat = self.sparse_it(row_ids, col_ids, sppmi_values) return out_mat def train_model(self, back_window: int, front_window: int, pmi_type: str, alpha: float, embedding_dim: int): ''' Main method for fitting the PMI-SVD embeddings. Parameters ---------- back_window : int How many words before x should be counted as a pair? front_window : int How many words after x should be counted as a pair pmi_type : str Type of PMI measure to use ('spmi' or 'sspmi') alpha : float Smoothing factor to apply to PMI scores. Recommended alpha = 0.75 embedding_dim : int How many dimensions word vectors should have? ''' # create vocabularies self.make_vocabulary() # count pairs pair_counts = self.count_pairs(back_window, front_window) # make a matrix of word co-occurance cooc_mat = self.word_cooc_matrix(pair_counts) # calcualte Pointwise Mutual Information pmi_mat = self.pmi(cooc_mat=cooc_mat, id_vocab=self.id_vocab, pair_counts=pair_counts, alpha=alpha, pmi_type=pmi_type) # test if desired embedding_dim is possible to fit if embedding_dim >= pmi_mat.shape[1]: raise ValueError( 'embedding_dim must be lower than \ the number of columns in PMI matrix ({})' .format(pmi_mat.shape[1])) # reduce dimensions: Singular Value Decomposition u, s, vt = linalg.svds(pmi_mat, embedding_dim) self.model = u + vt.T def words_df(self) -> pd.DataFrame: ''' Get a DF counting occurance of individual words. returns a dataframe of words and counts in descending order ''' word_counts = Counter() for doc in self.texts: for token in doc: word_counts[token] += 1 counter_df = (pd.Series(word_counts, name='count') .reset_index() .rename(columns={'index': 'token'}) .sort_values(by='count', ascending=False) .reset_index()) return counter_df def find(self, query): ''' input word, get vector representation ''' if query in self.id_vocab: query_index = self.id_vocab[query] return self.model[query_index] raise ValueError( '"{}" is not present in the model'.format(query)) def dotprod_two_words(self, word1, word2): ''' Calcualte the dot product between two given embeddings. The order doesn't matter. - word1: str, first word to query - word2: str, second word to query ''' if not all(isinstance(i, str) for i in [word1, word2]): raise ValueError( f"expected string, please input a string to query") return np.dot(self.find(word1), self.find(word2)) def similar_to(self, x, n_similar=5): ''' Print n most similar words to x. Cosine similarity is the metric used (min = -1, max = 1) Args: - x (np.array | string): can be a vector, or a word that's in the model - n_similar: int, number of most simialr words to output ''' if not isinstance(n_similar, int): raise ValueError(f"n_similar expected int") if isinstance(x, str): x = self.find(x) # cosine similarity matrix cos_sim_matrix = np.dot(self.model, x) / \ (np.linalg.norm(self.model)*np.linalg.norm(x)) # get indices of interesting cells similar_subset = np.argpartition(-1 * cos_sim_matrix, n_similar + 1)[:n_similar + 1] # extract similar words and score list_similar = [(float(cos_sim_matrix[i]), self.tok_vocab[i]) for i in similar_subset] return sorted(list_similar, reverse=True) def reduce_dim_umap(self, n_components=2, n_neighbors=15, min_dist=0.1, metric='cosine'): ''' reduce the dimensions of tranined embeddings using UMAP. Output is 2D for easy visualization. Algorithm parameters are pre-defined here to give a drity overview of the global structure of the word-embedding model. Results may vary quite a bit based on UMAP paramenters. For tweaking the parameters, see https://umap-learn.readthedocs.io/en/latest/parameters.html ''' reduced = umap.UMAP( # reduce to 2 dimensions n_components=n_components, # preserve local structure (low number) # or global structure (high number) of the data n_neighbors=n_neighbors, # how tightly do we alow to pack points together min_dist=min_dist, # correlational metric metric=metric) self.modelumap = reduced.fit_transform(self.model)
[ "pandas.Series", "numpy.argpartition", "src.utility.general.validate_input", "collections.Counter", "numpy.zeros", "numpy.dot", "umap.UMAP", "numpy.linalg.norm", "scipy.sparse.linalg.svds", "numpy.log2" ]
[((415, 441), 'src.utility.general.validate_input', 'util.validate_input', (['texts'], {}), '(texts)\n', (434, 441), True, 'import src.utility.general as util\n'), ((2013, 2022), 'collections.Counter', 'Counter', ([], {}), '()\n', (2020, 2022), False, 'from collections import Counter\n'), ((3276, 3296), 'numpy.zeros', 'np.zeros', (['this_shape'], {}), '(this_shape)\n', (3284, 3296), True, 'import numpy as np\n'), ((7617, 7652), 'scipy.sparse.linalg.svds', 'linalg.svds', (['pmi_mat', 'embedding_dim'], {}), '(pmi_mat, embedding_dim)\n', (7628, 7652), False, 'from scipy.sparse import linalg\n'), ((7896, 7905), 'collections.Counter', 'Counter', ([], {}), '()\n', (7903, 7905), False, 'from collections import Counter\n'), ((10720, 10820), 'umap.UMAP', 'umap.UMAP', ([], {'n_components': 'n_components', 'n_neighbors': 'n_neighbors', 'min_dist': 'min_dist', 'metric': 'metric'}), '(n_components=n_components, n_neighbors=n_neighbors, min_dist=\n min_dist, metric=metric)\n', (10729, 10820), False, 'import umap\n'), ((5351, 5376), 'numpy.log2', 'np.log2', (['(Pwc / (Pw * Pca))'], {}), '(Pwc / (Pw * Pca))\n', (5358, 5376), True, 'import numpy as np\n'), ((9637, 9658), 'numpy.dot', 'np.dot', (['self.model', 'x'], {}), '(self.model, x)\n', (9643, 9658), True, 'import numpy as np\n'), ((9791, 9842), 'numpy.argpartition', 'np.argpartition', (['(-1 * cos_sim_matrix)', '(n_similar + 1)'], {}), '(-1 * cos_sim_matrix, n_similar + 1)\n', (9806, 9842), True, 'import numpy as np\n'), ((9676, 9702), 'numpy.linalg.norm', 'np.linalg.norm', (['self.model'], {}), '(self.model)\n', (9690, 9702), True, 'import numpy as np\n'), ((9703, 9720), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (9717, 9720), True, 'import numpy as np\n'), ((8030, 8066), 'pandas.Series', 'pd.Series', (['word_counts'], {'name': '"""count"""'}), "(word_counts, name='count')\n", (8039, 8066), True, 'import pandas as pd\n')]
#!/usr/bin/env python """ Some common lineshapes and distribution functions """ from __future__ import division from numpy import exp, pi, sqrt, where from scipy import special from lmfit.lineshapes import (gaussian, lorentzian, voigt, pvoigt, moffat, pearson7, breit_wigner, damped_oscillator, dho, logistic, lognormal, students_t, donaich, skewed_gaussian, expgaussian, skewed_voigt, step, rectangle, exponential, powerlaw, linear, parabolic) s2pi = sqrt(2*pi) s2 = sqrt(2.0) def hypermet(x, amplitude=1.0, center=0., sigma=1.0, step=0, tail=0, gamma=0.1): """ hypermet function to simulate XRF peaks and/or Compton Scatter Peak Arguments --------- x array of ordinate (energy) values amplitude overall scale factor center peak centroid sigma Gaussian sigma step step parameter for low-x erfc step [0] tail amplitude of tail function [0] gamma slope of tail function [0.1] Notes ----- The function is given by (with error checking for small values of sigma, gamma and s2 = sqrt(2) and s2pi = sqrt(2*pi)): arg = (x - center)/sigma gauss = (1.0/(s2pi*sigma)) * exp(-arg**2 /2) sfunc = step * max(gauss) * erfc(arg/2.0) / 2.0 tfunc = tail * exp((x-center)/(gamma*sigma)) * erfc(arg/s2 + 1.0/gamma)) hypermet = amplitude * (gauss + sfunc + tfunc) / 2.0 This follows the definitions given in ED-XRF SPECTRUM EVALUATION AND QUANTITATIVE ANALYSIS USING MULTIVARIATE AND NONLINEAR TECHNIQUES <NAME>, <NAME> JCPDS-International Centre for Diffraction Data 2000, Advances in X-ray Analysis,Vol.43 560 But is modified slightly to better preserve area with changing tail and gamma """ sigma = max(1.e-8, sigma) gamma = max(1.e-8, gamma) arg = (x - center)/sigma arg[where(arg>100)] = 100.0 arg[where(arg<-100)] = -100.0 gscale = s2pi*sigma gauss = (1.0/gscale) * exp(-arg**2 / 2.0) sfunc = step * special.erfc(arg/2.0) / (2.0*gscale) targ = (x-center)/(gamma*sigma) targ[where(targ>100)] = 100.0 targ[where(targ<-100)] = -100.0 tfunc = exp(targ) * special.erfc(arg/2.0 + 1.0/gamma) tfunc = tail*tfunc / (max(tfunc)*gscale) return amplitude * (gauss + sfunc + tfunc) /2.0 def erf(x): """Return the error function. erf = 2/sqrt(pi)*integral(exp(-t**2), t=[0, z]) """ return special.erf(x) def erfc(x): """Return the complementary error function. erfc = 1 - erf(x) """ return special.erfc(x) def wofz(x): """Return the fadeeva function for complex argument. wofz = exp(-x**2)*erfc(-i*x) """ return special.wofz(x) def gamma(x): """Return the gamma function.""" return special.gamma(x) def gammaln(x): """Return the log of absolute value of gamma function.""" return special.gammaln(x)
[ "numpy.sqrt", "numpy.where", "numpy.exp", "scipy.special.erf", "scipy.special.gamma", "scipy.special.erfc", "scipy.special.wofz", "scipy.special.gammaln" ]
[((606, 618), 'numpy.sqrt', 'sqrt', (['(2 * pi)'], {}), '(2 * pi)\n', (610, 618), False, 'from numpy import exp, pi, sqrt, where\n'), ((622, 631), 'numpy.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (626, 631), False, 'from numpy import exp, pi, sqrt, where\n'), ((2630, 2644), 'scipy.special.erf', 'special.erf', (['x'], {}), '(x)\n', (2641, 2644), False, 'from scipy import special\n'), ((2748, 2763), 'scipy.special.erfc', 'special.erfc', (['x'], {}), '(x)\n', (2760, 2763), False, 'from scipy import special\n'), ((2887, 2902), 'scipy.special.wofz', 'special.wofz', (['x'], {}), '(x)\n', (2899, 2902), False, 'from scipy import special\n'), ((2966, 2982), 'scipy.special.gamma', 'special.gamma', (['x'], {}), '(x)\n', (2979, 2982), False, 'from scipy import special\n'), ((3073, 3091), 'scipy.special.gammaln', 'special.gammaln', (['x'], {}), '(x)\n', (3088, 3091), False, 'from scipy import special\n'), ((2063, 2079), 'numpy.where', 'where', (['(arg > 100)'], {}), '(arg > 100)\n', (2068, 2079), False, 'from numpy import exp, pi, sqrt, where\n'), ((2095, 2112), 'numpy.where', 'where', (['(arg < -100)'], {}), '(arg < -100)\n', (2100, 2112), False, 'from numpy import exp, pi, sqrt, where\n'), ((2172, 2192), 'numpy.exp', 'exp', (['(-arg ** 2 / 2.0)'], {}), '(-arg ** 2 / 2.0)\n', (2175, 2192), False, 'from numpy import exp, pi, sqrt, where\n'), ((2293, 2310), 'numpy.where', 'where', (['(targ > 100)'], {}), '(targ > 100)\n', (2298, 2310), False, 'from numpy import exp, pi, sqrt, where\n'), ((2327, 2345), 'numpy.where', 'where', (['(targ < -100)'], {}), '(targ < -100)\n', (2332, 2345), False, 'from numpy import exp, pi, sqrt, where\n'), ((2367, 2376), 'numpy.exp', 'exp', (['targ'], {}), '(targ)\n', (2370, 2376), False, 'from numpy import exp, pi, sqrt, where\n'), ((2379, 2416), 'scipy.special.erfc', 'special.erfc', (['(arg / 2.0 + 1.0 / gamma)'], {}), '(arg / 2.0 + 1.0 / gamma)\n', (2391, 2416), False, 'from scipy import special\n'), ((2210, 2233), 'scipy.special.erfc', 'special.erfc', (['(arg / 2.0)'], {}), '(arg / 2.0)\n', (2222, 2233), False, 'from scipy import special\n')]
import numpy as np import time import cv2 import os class YoloObjectsDetector: def __init__(self, image, yolo_path=None, min_confidence=0.5, threshold=0.3): self.image = image # get image height and width self.height = self.image.shape[0] self.width = self.image.shape[1] # load yolo model labels and set paths to yolo weights and config if yolo_path is None: yolo_path = os.path.abspath(os.path.dirname(__file__)) self.labels = open(yolo_path + "/coco.names").read().strip().split("\n") self.weights_path = yolo_path + "/yolov3.weights" self.config_path = yolo_path + "/yolov3.cfg" # set confidence and threshold level self.min_confidence = min_confidence self.threshold = threshold # initialize a list of colors to represent class labels self.colors = np.random.randint(0, 255, size=(len(self.labels), 3), dtype="uint8") def get_selected_object_bounding_box(self): outputs = self.load_run_model() boxes, confidences, classIDs = self.generate_bounding_boxes(outputs) bbox = self.get_bbox_from_image(boxes, confidences, classIDs) return bbox def load_run_model(self): # load YOLO object detector trained on COCO dataset print("Loading YOLO from disk...") net = cv2.dnn.readNetFromDarknet(self.config_path, self.weights_path) ln = net.getLayerNames() ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()] # construct a blob from the image and set as input blob = cv2.dnn.blobFromImage(self.image, 1 / 255.0, (416, 416), swapRB=True, crop=False) net.setInput(blob) # perform a forward pass of the YOLO object detector, giving # bboxes of detected objects and associated probabilities start = time.time() outputs = net.forward(ln) end = time.time() print(f"YOLO objects detecting took {round(end - start, 4)} seconds") return outputs def generate_bounding_boxes(self, layerOutputs): # initialize lists of detected bboxes, confidences, and class IDs boxes = [] confidences = [] classIDs = [] # loop over each of the layer outputs for output in layerOutputs: # loop over each of the detections for detection in output: # get the class ID and confidence of the current object detection scores = detection[5:] classID = np.argmax(scores) confidence = scores[classID] # filter objects with confidence smaller than minimum given as class parameter if confidence > self.min_confidence: # scale the bounding box coordinates back relative to the size of the image box = detection[0:4] * np.array([self.width, self.height, self.width, self.height]) (centerX, centerY, width, height) = box.astype("int") # get coordinates of top left corner of the bbox x = int(centerX - (width / 2)) y = int(centerY - (height / 2)) # update list of bboxes, confidences, and class IDs boxes.append([x, y, int(width), int(height)]) confidences.append(float(confidence)) classIDs.append(classID) return boxes, confidences, classIDs def paint_box_on_object(self, box, color, label, confidence): # unpack coordinates x, y, w, h = box[0], box[1], box[2], box[3] # draw a bounding box and label on the image cv2.rectangle(self.image, (x, y), (x + w, y + h), color, 2) text = "{}: {:.4f}".format(label, confidence) cv2.putText(self.image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) def get_bbox_from_image(self, boxes, confidences, classIDs): # apply non-maxima suppression to suppress weak, overlapping bounding boxes indexes = cv2.dnn.NMSBoxes(boxes, confidences, self.min_confidence, self.threshold) n = len(indexes) # creat lists of filtered bboxes, confidences and classIDs boxes = [boxes[i] for i in indexes] confidences = [confidences[i] for i in indexes] classIDs = [classIDs[i] for i in indexes] # create list containing center of each bounding box centers = np.array([[int(bbox[0] + bbox[2] / 2), int(bbox[1] + bbox[3] / 2)] for bbox in boxes]) if len(boxes) == 0: return None if len(boxes) == 1: return boxes[0] # list of selected bboxes selected = [] for i in range(n): color = [int(c) for c in self.colors[classIDs[i]]] label = self.labels[classIDs[i]] confidence = confidences[i] self.paint_box_on_object(boxes[i], color, label, confidence) # on mouse event function to select bbox def onMouse(event, x, y, flags, param): nonlocal selected if event == cv2.EVENT_LBUTTONDOWN: print('x = %d, y = %d' % (x, y)) selected_index = np.argmin(np.linalg.norm(centers - np.array([x, y]), axis=1)) selected = boxes[selected_index] # show image and set onMouse event to select bbox cv2.imshow("Image", self.image) cv2.setMouseCallback('Image', onMouse) while len(selected) == 0: cv2.waitKey(10) return selected
[ "cv2.dnn.blobFromImage", "cv2.rectangle", "cv2.setMouseCallback", "numpy.argmax", "cv2.putText", "cv2.imshow", "os.path.dirname", "cv2.waitKey", "numpy.array", "cv2.dnn.NMSBoxes", "time.time", "cv2.dnn.readNetFromDarknet" ]
[((1360, 1423), 'cv2.dnn.readNetFromDarknet', 'cv2.dnn.readNetFromDarknet', (['self.config_path', 'self.weights_path'], {}), '(self.config_path, self.weights_path)\n', (1386, 1423), False, 'import cv2\n'), ((1597, 1683), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['self.image', '(1 / 255.0)', '(416, 416)'], {'swapRB': '(True)', 'crop': '(False)'}), '(self.image, 1 / 255.0, (416, 416), swapRB=True, crop=\n False)\n', (1618, 1683), False, 'import cv2\n'), ((1858, 1869), 'time.time', 'time.time', ([], {}), '()\n', (1867, 1869), False, 'import time\n'), ((1918, 1929), 'time.time', 'time.time', ([], {}), '()\n', (1927, 1929), False, 'import time\n'), ((3697, 3756), 'cv2.rectangle', 'cv2.rectangle', (['self.image', '(x, y)', '(x + w, y + h)', 'color', '(2)'], {}), '(self.image, (x, y), (x + w, y + h), color, 2)\n', (3710, 3756), False, 'import cv2\n'), ((3819, 3905), 'cv2.putText', 'cv2.putText', (['self.image', 'text', '(x, y - 5)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'color', '(2)'], {}), '(self.image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n color, 2)\n', (3830, 3905), False, 'import cv2\n'), ((4070, 4143), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', 'self.min_confidence', 'self.threshold'], {}), '(boxes, confidences, self.min_confidence, self.threshold)\n', (4086, 4143), False, 'import cv2\n'), ((5405, 5436), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'self.image'], {}), "('Image', self.image)\n", (5415, 5436), False, 'import cv2\n'), ((5445, 5483), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""Image"""', 'onMouse'], {}), "('Image', onMouse)\n", (5465, 5483), False, 'import cv2\n'), ((5531, 5546), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (5542, 5546), False, 'import cv2\n'), ((455, 480), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (470, 480), False, 'import os\n'), ((2541, 2558), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (2550, 2558), True, 'import numpy as np\n'), ((2892, 2952), 'numpy.array', 'np.array', (['[self.width, self.height, self.width, self.height]'], {}), '([self.width, self.height, self.width, self.height])\n', (2900, 2952), True, 'import numpy as np\n'), ((5262, 5278), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (5270, 5278), True, 'import numpy as np\n')]
import numpy as np import os from . import plot from . import util def label_statistics(labels): labels = (np.array(labels)).astype(np.int64) label_num = np.max(labels)+1 label_cnt = np.zeros(label_num,dtype=np.int64) for i in range(len(labels)): label_cnt[labels[i]] += 1 label_cnt_per = label_cnt/len(labels) return label_cnt,label_cnt_per,label_num def mat2predtrue(mat): y_pred = [];y_true = [] for i in range(mat.shape[0]): for j in range(mat.shape[1]): for x in range(mat[i][j]): y_true.append(i) y_pred.append(j) return y_true,y_pred def predtrue2mat(y_true,y_pred): label_num = label_statistics(y_true)[2] mat = np.zeros((label_num,label_num), dtype=np.int64) for i in range(len(y_true)): mat[y_true[i]][y_pred[i]] +=1 return mat def mergemat(mat,mergemethod): y_true,y_pred = mat2predtrue(mat) new_true = np.zeros(len(y_true), dtype=np.int64) new_pred = np.zeros(len(y_true), dtype=np.int64) for i in range(len(y_true)): for j in range(len(mergemethod)): if y_true[i] in mergemethod[j]: new_true[i]=j if y_pred[i] in mergemethod[j]: new_pred[i]=j return predtrue2mat(new_true, new_pred) def Kappa(mat): mat=mat/10000 # avoid overflow mat_length=np.sum(mat) wide=mat.shape[0] po=0.0;pe=0.0 for i in range(wide): po=po+mat[i][i] pe=pe+np.sum(mat[:,i])*np.sum(mat[i,:]) po=po/mat_length pe=pe/(mat_length*mat_length) k=(po-pe)/(1-pe) return k def report(mat,print_sub=False): wide=mat.shape[0] sub_recall = np.zeros(wide) sub_precision = np.zeros(wide) sub_F1 = np.zeros(wide) sub_acc = np.zeros(wide) _err = 0 for i in range(wide): TP = mat[i,i] FN = np.sum(mat[i])- mat[i,i] TN = (np.sum(mat)-np.sum(mat[i])-np.sum(mat[:,i])+mat[i,i]) FP = np.sum(mat[:,i]) - mat[i,i] _err += mat[i,i] sub_acc[i]=(TP+TN)/(TP+FN+TN+FP) sub_precision[i] = TP/np.clip((TP+FP), 1e-5, 1e10) sub_recall[i]=(TP)/np.clip((TP+FN), 1e-5, 1e10) #F1 score = 2 * P * R / (P + R) sub_F1[i] = 2*sub_precision[i]*sub_recall[i] / np.clip((sub_precision[i]+sub_recall[i]),1e-5,1e10) if print_sub == True: print('sub_recall:',sub_recall,'\nsub_acc:',sub_acc,'\nsub_sp:',sub_sp) err = 1-_err/np.sum(mat) Macro_precision = np.mean(sub_precision) Macro_recall = np.mean(sub_recall) Macro_F1 = np.mean(sub_F1) Macro_acc = np.mean(sub_acc) k = Kappa(mat) return round(Macro_precision,4),round(Macro_recall,4),round(Macro_F1,4),round(err,4),round(k, 4) def statistics(mat,opt,logname,heatmapname): util.writelog('------------------------------ '+logname+' result ------------------------------',opt,True) util.writelog(logname+' -> macro-prec,reca,F1,err,kappa: '+str(report(mat)),opt,True) util.writelog('confusion_mat:\n'+str(mat)+'\n',opt,True) plot.draw_heatmap(mat,opt,name = heatmapname) def main(): mat=[[37980,1322,852,2,327],[3922,8784,3545,0,2193],[1756,5136,99564,1091,991],[18,1,7932,4063,14],[1361,1680,465,0,23931]] mat = np.array(mat) avg_recall,avg_acc,avg_sp,err,kappa = result(mat) print(avg_recall,avg_acc,avg_sp,err,kappa) if __name__ == '__main__': main()
[ "numpy.clip", "numpy.mean", "numpy.max", "numpy.array", "numpy.sum", "numpy.zeros" ]
[((196, 231), 'numpy.zeros', 'np.zeros', (['label_num'], {'dtype': 'np.int64'}), '(label_num, dtype=np.int64)\n', (204, 231), True, 'import numpy as np\n'), ((727, 775), 'numpy.zeros', 'np.zeros', (['(label_num, label_num)'], {'dtype': 'np.int64'}), '((label_num, label_num), dtype=np.int64)\n', (735, 775), True, 'import numpy as np\n'), ((1371, 1382), 'numpy.sum', 'np.sum', (['mat'], {}), '(mat)\n', (1377, 1382), True, 'import numpy as np\n'), ((1683, 1697), 'numpy.zeros', 'np.zeros', (['wide'], {}), '(wide)\n', (1691, 1697), True, 'import numpy as np\n'), ((1718, 1732), 'numpy.zeros', 'np.zeros', (['wide'], {}), '(wide)\n', (1726, 1732), True, 'import numpy as np\n'), ((1746, 1760), 'numpy.zeros', 'np.zeros', (['wide'], {}), '(wide)\n', (1754, 1760), True, 'import numpy as np\n'), ((1775, 1789), 'numpy.zeros', 'np.zeros', (['wide'], {}), '(wide)\n', (1783, 1789), True, 'import numpy as np\n'), ((2488, 2510), 'numpy.mean', 'np.mean', (['sub_precision'], {}), '(sub_precision)\n', (2495, 2510), True, 'import numpy as np\n'), ((2530, 2549), 'numpy.mean', 'np.mean', (['sub_recall'], {}), '(sub_recall)\n', (2537, 2549), True, 'import numpy as np\n'), ((2565, 2580), 'numpy.mean', 'np.mean', (['sub_F1'], {}), '(sub_F1)\n', (2572, 2580), True, 'import numpy as np\n'), ((2597, 2613), 'numpy.mean', 'np.mean', (['sub_acc'], {}), '(sub_acc)\n', (2604, 2613), True, 'import numpy as np\n'), ((3246, 3259), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (3254, 3259), True, 'import numpy as np\n'), ((163, 177), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (169, 177), True, 'import numpy as np\n'), ((112, 128), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (120, 128), True, 'import numpy as np\n'), ((1865, 1879), 'numpy.sum', 'np.sum', (['mat[i]'], {}), '(mat[i])\n', (1871, 1879), True, 'import numpy as np\n'), ((1971, 1988), 'numpy.sum', 'np.sum', (['mat[:, i]'], {}), '(mat[:, i])\n', (1977, 1988), True, 'import numpy as np\n'), ((2096, 2134), 'numpy.clip', 'np.clip', (['(TP + FP)', '(1e-05)', '(10000000000.0)'], {}), '(TP + FP, 1e-05, 10000000000.0)\n', (2103, 2134), True, 'import numpy as np\n'), ((2152, 2190), 'numpy.clip', 'np.clip', (['(TP + FN)', '(1e-05)', '(10000000000.0)'], {}), '(TP + FN, 1e-05, 10000000000.0)\n', (2159, 2190), True, 'import numpy as np\n'), ((2277, 2340), 'numpy.clip', 'np.clip', (['(sub_precision[i] + sub_recall[i])', '(1e-05)', '(10000000000.0)'], {}), '(sub_precision[i] + sub_recall[i], 1e-05, 10000000000.0)\n', (2284, 2340), True, 'import numpy as np\n'), ((2454, 2465), 'numpy.sum', 'np.sum', (['mat'], {}), '(mat)\n', (2460, 2465), True, 'import numpy as np\n'), ((1487, 1504), 'numpy.sum', 'np.sum', (['mat[:, i]'], {}), '(mat[:, i])\n', (1493, 1504), True, 'import numpy as np\n'), ((1504, 1521), 'numpy.sum', 'np.sum', (['mat[i, :]'], {}), '(mat[i, :])\n', (1510, 1521), True, 'import numpy as np\n'), ((1931, 1948), 'numpy.sum', 'np.sum', (['mat[:, i]'], {}), '(mat[:, i])\n', (1937, 1948), True, 'import numpy as np\n'), ((1904, 1915), 'numpy.sum', 'np.sum', (['mat'], {}), '(mat)\n', (1910, 1915), True, 'import numpy as np\n'), ((1916, 1930), 'numpy.sum', 'np.sum', (['mat[i]'], {}), '(mat[i])\n', (1922, 1930), True, 'import numpy as np\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from itertools import product from pathlib import Path import numpy as np import tensorflow as tf from dotenv import load_dotenv from annotation.direction import PinDirection from annotation.piece import Piece from ..count import WhiteEffectCountLayer from ..long_effect import WhiteLongEffectLayer from ..ou import WhiteOuEffectLayer from ..short_effect import WhiteShortEffectLayer __author__ = 'Yasuhiro' __date__ = '2018/3/21' class TestGiEffect(tf.test.TestCase): @classmethod def setUpClass(cls): dotenv_path = Path(__file__).parents[3] / '.env' load_dotenv(str(dotenv_path)) cls.data_format = os.environ.get('DATA_FORMAT') cls.use_cudnn = bool(os.environ.get('USE_CUDNN')) def test_gi_effect(self): shape = (1, 1, 9, 9) if self.data_format == 'NCHW' else (1, 9, 9, 1) board = np.empty(shape, dtype=np.int32) # ここでは相手の利きがない設定 black_effect_mask = np.zeros(shape, dtype=np.bool) ph = tf.placeholder(tf.int32, shape=shape) short_effect = WhiteShortEffectLayer( data_format=self.data_format, use_cudnn=self.use_cudnn )(ph) long_effect = WhiteLongEffectLayer( data_format=self.data_format, use_cudnn=self.use_cudnn )(ph) ou_effect = WhiteOuEffectLayer( data_format=self.data_format, use_cudnn=self.use_cudnn )(ph, black_effect_mask) effect_count = WhiteEffectCountLayer()( short_effect, long_effect, ou_effect ) effect_count = tf.squeeze(effect_count) with self.test_session() as sess: for i, j in product(range(9), repeat=2): # (i, j)に駒を置く board[:] = Piece.EMPTY if self.data_format == 'NCHW': board[0, 0, i, j] = Piece.WHITE_GI else: board[0, i, j, 0] = Piece.WHITE_GI count = sess.run(effect_count, feed_dict={ph: board}) with self.subTest(i=i, j=j): c = 0 for x, y in ((i - 1, j - 1), (i - 1, j + 1), (i, j + 1), (i + 1, j - 1), (i + 1, j + 1)): if x in range(9) and y in range(9): self.assertEqual(count[x, y], 1) c += 1 self.assertEqual(np.sum(count), c) def test_gi_pin(self): shape = (1, 1, 9, 9) if self.data_format == 'NCHW' else (1, 9, 9, 1) board = np.empty(shape, dtype=np.int32) # ここでは相手の利きがない設定 black_effect_mask = np.zeros(shape, dtype=np.bool) ph = tf.placeholder(tf.int32, shape=shape) short_effect = WhiteShortEffectLayer( data_format=self.data_format, use_cudnn=self.use_cudnn )(ph) long_effect = WhiteLongEffectLayer( data_format=self.data_format, use_cudnn=self.use_cudnn )(ph) ou_effect = WhiteOuEffectLayer( data_format=self.data_format, use_cudnn=self.use_cudnn )(ph, black_effect_mask) effect_count = WhiteEffectCountLayer()( short_effect, long_effect, ou_effect ) effect_count = tf.squeeze(effect_count) with self.test_session() as sess: for i, j in product(range(9), repeat=2): # (i, j)に駒を置く board[:] = Piece.EMPTY for pin_direction in PinDirection: if pin_direction == PinDirection.SIZE: continue offset = Piece.SIZE - Piece.WHITE_FU + pin_direction * 14 if self.data_format == 'NCHW': board[0, 0, i, j] = Piece.WHITE_GI + offset else: board[0, i, j, 0] = Piece.WHITE_GI + offset count = sess.run(effect_count, feed_dict={ph: board}) with self.subTest(i=i, j=j, pin_direction=pin_direction): c = 0 if pin_direction == PinDirection.VERTICAL: for x, y in ((i, j + 1),): if x in range(9) and y in range(9): self.assertEqual(count[x, y], 1) c += 1 self.assertEqual(np.sum(count), c) elif pin_direction == PinDirection.HORIZONTAL: self.assertTrue(np.all(count == 0)) elif pin_direction == PinDirection.DIAGONAL1: for x, y in ((i - 1, j - 1), (i + 1, j + 1)): if x in range(9) and y in range(9): self.assertEqual(count[x, y], 1) c += 1 self.assertEqual(np.sum(count), c) elif pin_direction == PinDirection.DIAGONAL2: for x, y in ((i - 1, j + 1), (i + 1, j - 1)): if x in range(9) and y in range(9): self.assertEqual(count[x, y], 1) c += 1 self.assertEqual(np.sum(count), c) else: raise ValueError(pin_direction)
[ "pathlib.Path", "tensorflow.placeholder", "os.environ.get", "numpy.sum", "numpy.zeros", "numpy.empty", "numpy.all", "tensorflow.squeeze" ]
[((695, 724), 'os.environ.get', 'os.environ.get', (['"""DATA_FORMAT"""'], {}), "('DATA_FORMAT')\n", (709, 724), False, 'import os\n'), ((907, 938), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'np.int32'}), '(shape, dtype=np.int32)\n', (915, 938), True, 'import numpy as np\n'), ((993, 1023), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.bool'}), '(shape, dtype=np.bool)\n', (1001, 1023), True, 'import numpy as np\n'), ((1038, 1075), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'shape'}), '(tf.int32, shape=shape)\n', (1052, 1075), True, 'import tensorflow as tf\n'), ((1598, 1622), 'tensorflow.squeeze', 'tf.squeeze', (['effect_count'], {}), '(effect_count)\n', (1608, 1622), True, 'import tensorflow as tf\n'), ((2585, 2616), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'np.int32'}), '(shape, dtype=np.int32)\n', (2593, 2616), True, 'import numpy as np\n'), ((2671, 2701), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.bool'}), '(shape, dtype=np.bool)\n', (2679, 2701), True, 'import numpy as np\n'), ((2716, 2753), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': 'shape'}), '(tf.int32, shape=shape)\n', (2730, 2753), True, 'import tensorflow as tf\n'), ((3276, 3300), 'tensorflow.squeeze', 'tf.squeeze', (['effect_count'], {}), '(effect_count)\n', (3286, 3300), True, 'import tensorflow as tf\n'), ((754, 781), 'os.environ.get', 'os.environ.get', (['"""USE_CUDNN"""'], {}), "('USE_CUDNN')\n", (768, 781), False, 'import os\n'), ((595, 609), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (599, 609), False, 'from pathlib import Path\n'), ((2446, 2459), 'numpy.sum', 'np.sum', (['count'], {}), '(count)\n', (2452, 2459), True, 'import numpy as np\n'), ((4432, 4445), 'numpy.sum', 'np.sum', (['count'], {}), '(count)\n', (4438, 4445), True, 'import numpy as np\n'), ((4565, 4583), 'numpy.all', 'np.all', (['(count == 0)'], {}), '(count == 0)\n', (4571, 4583), True, 'import numpy as np\n'), ((4954, 4967), 'numpy.sum', 'np.sum', (['count'], {}), '(count)\n', (4960, 4967), True, 'import numpy as np\n'), ((5341, 5354), 'numpy.sum', 'np.sum', (['count'], {}), '(count)\n', (5347, 5354), True, 'import numpy as np\n')]
import unittest from yauber_algo.errors import * class CategorizeTestCase(unittest.TestCase): def test_categorize(self): import yauber_algo.sanitychecks as sc from numpy import array, nan, inf import os import sys import pandas as pd import numpy as np from yauber_algo.algo import categorize # # Function settings # algo = 'categorize' func = categorize with sc.SanityChecker(algo) as s: # # Check regular algorithm logic # s.check_regular( np.array([0., 0., 0., 0., 1., 1., 1., 2., 2., 2.]), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]), [0, 3, 6, 10] ), suffix='reg' ) s.check_regular( np.array([0., 0., 0., 0., 1., 1., 1., 2., 2., 2.]), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]), [0.1, 3, 6, 10] ), suffix='min_not_in_bins', exception=YaUberAlgoInternalError ) s.check_regular( np.array([0., 0., 0., 0., 1., 1., 1., 2., 2., 2.]), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]), [0, 3, 6, 9.999] ), suffix='max_not_in_bins', exception=YaUberAlgoInternalError ) s.check_regular( np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]), [0, 10] ), suffix='min_max_one_bin', exception=YaUberAlgoArgumentError ) s.check_regular( np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]), [0, 10, 10] ), suffix='bins_non_unique', exception=YaUberAlgoArgumentError ) s.check_regular( np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]), [0, 10, 5] ), suffix='bins_not_sorted', exception=YaUberAlgoArgumentError ) s.check_regular( np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]), [0, 5, 'obj'] ), suffix='bins_non_number', exception=YaUberAlgoArgumentError ) s.check_regular( np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]), [0, 5, nan] ), suffix='bins_nan', exception=YaUberAlgoArgumentError ) s.check_regular( np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]), [0, 5, inf] ), suffix='bins_inf', exception=YaUberAlgoArgumentError ) s.check_naninf( np.array([0., 0., 0., 0., 1., 1., 1., 2., nan, nan]), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, inf, nan]), [0, 3, 6, 10] ), suffix='reg' ) s.check_regular( np.array([0., 0., 0., 0., 1., 1., 1., 2., 2., 2.]), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]), np.array([0, 3, 6, 10]) ), suffix='bins_are_np_array' ) s.check_regular( np.array([0., 0., 0., 0., 1., 1., 1., 2., 2., 2.]), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]), pd.Series([0, 3, 6, 10]) ), suffix='bins_are_series' ) s.check_series( pd.Series(np.array([0., 0., 0., 0., 1., 1., 1., 2., 2., 2.])), func, ( pd.Series(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10])), [0, 3, 6, 10] ), suffix='' ) s.check_dtype_float( np.array([0., 0., 0., 0., 1., 1., 1., 2., 2., 2.], dtype=np.float), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10], dtype=np.float), [0, 3, 6, 10] ), suffix='' ) s.check_dtype_int( np.array([0., 0., 0., 0., 1., 1., 1., 2., 2., 2.], dtype=np.float), func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10], dtype=np.int32), [0, 3, 6, 10] ), suffix='' ) s.check_dtype_bool( np.array([0., 0., 0., 0., 1., 1., 1., 2., 2., 2.], dtype=np.float), func, ( np.array([0, 1, 0, 0, 1, 0, 1, 0, 0, 1], dtype=np.bool), [0, 3, 6, 10] ), suffix='', exception=YaUberAlgoDtypeNotSupportedError ) s.check_dtype_object( func, ( np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10], dtype=np.object), [0, 3, 6, 10] ), suffix='' ) s.check_futref(5, 1, func, ( np.random.random(1000), [0, 0.33, 0.66, 1.0] ), ) s.check_window_consistency(5, 1, func, ( np.random.random(1000), [0, 0.33, 0.66, 1.0] ), )
[ "pandas.Series", "numpy.array", "numpy.random.random", "yauber_algo.sanitychecks.SanityChecker" ]
[((473, 495), 'yauber_algo.sanitychecks.SanityChecker', 'sc.SanityChecker', (['algo'], {}), '(algo)\n', (489, 495), True, 'import yauber_algo.sanitychecks as sc\n'), ((619, 679), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0])\n', (627, 679), True, 'import numpy as np\n'), ((916, 976), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0])\n', (924, 976), True, 'import numpy as np\n'), ((1278, 1338), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0])\n', (1286, 1338), True, 'import numpy as np\n'), ((1641, 1701), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (1649, 1701), True, 'import numpy as np\n'), ((1995, 2055), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (2003, 2055), True, 'import numpy as np\n'), ((2353, 2413), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (2361, 2413), True, 'import numpy as np\n'), ((2710, 2770), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (2718, 2770), True, 'import numpy as np\n'), ((3070, 3130), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (3078, 3130), True, 'import numpy as np\n'), ((3421, 3481), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (3429, 3481), True, 'import numpy as np\n'), ((3771, 3831), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, nan, nan]'], {}), '([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, nan, nan])\n', (3779, 3831), True, 'import numpy as np\n'), ((4073, 4133), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0])\n', (4081, 4133), True, 'import numpy as np\n'), ((4394, 4454), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0])\n', (4402, 4454), True, 'import numpy as np\n'), ((5033, 5109), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0]'], {'dtype': 'np.float'}), '([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0], dtype=np.float)\n', (5041, 5109), True, 'import numpy as np\n'), ((5361, 5437), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0]'], {'dtype': 'np.float'}), '([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0], dtype=np.float)\n', (5369, 5437), True, 'import numpy as np\n'), ((5690, 5766), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0]'], {'dtype': 'np.float'}), '([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0], dtype=np.float)\n', (5698, 5766), True, 'import numpy as np\n'), ((731, 772), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n', (739, 772), True, 'import numpy as np\n'), ((1028, 1069), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n', (1036, 1069), True, 'import numpy as np\n'), ((1390, 1431), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n', (1398, 1431), True, 'import numpy as np\n'), ((1753, 1794), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n', (1761, 1794), True, 'import numpy as np\n'), ((2107, 2148), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n', (2115, 2148), True, 'import numpy as np\n'), ((2465, 2506), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n', (2473, 2506), True, 'import numpy as np\n'), ((2822, 2863), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n', (2830, 2863), True, 'import numpy as np\n'), ((3182, 3223), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n', (3190, 3223), True, 'import numpy as np\n'), ((3533, 3574), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n', (3541, 3574), True, 'import numpy as np\n'), ((3885, 3929), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, inf, nan]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, inf, nan])\n', (3893, 3929), True, 'import numpy as np\n'), ((4185, 4226), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n', (4193, 4226), True, 'import numpy as np\n'), ((4248, 4271), 'numpy.array', 'np.array', (['[0, 3, 6, 10]'], {}), '([0, 3, 6, 10])\n', (4256, 4271), True, 'import numpy as np\n'), ((4506, 4547), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n', (4514, 4547), True, 'import numpy as np\n'), ((4569, 4593), 'pandas.Series', 'pd.Series', (['[0, 3, 6, 10]'], {}), '([0, 3, 6, 10])\n', (4578, 4593), True, 'import pandas as pd\n'), ((4723, 4783), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0])\n', (4731, 4783), True, 'import numpy as np\n'), ((5161, 5218), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {'dtype': 'np.float'}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10], dtype=np.float)\n', (5169, 5218), True, 'import numpy as np\n'), ((5489, 5546), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {'dtype': 'np.int32'}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10], dtype=np.int32)\n', (5497, 5546), True, 'import numpy as np\n'), ((5818, 5873), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 1, 0, 1, 0, 0, 1]'], {'dtype': 'np.bool'}), '([0, 1, 0, 0, 1, 0, 1, 0, 0, 1], dtype=np.bool)\n', (5826, 5873), True, 'import numpy as np\n'), ((6123, 6181), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {'dtype': 'np.object'}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10], dtype=np.object)\n', (6131, 6181), True, 'import numpy as np\n'), ((6403, 6425), 'numpy.random.random', 'np.random.random', (['(1000)'], {}), '(1000)\n', (6419, 6425), True, 'import numpy as np\n'), ((6713, 6735), 'numpy.random.random', 'np.random.random', (['(1000)'], {}), '(1000)\n', (6729, 6735), True, 'import numpy as np\n'), ((4846, 4887), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n', (4854, 4887), True, 'import numpy as np\n')]
# -*- coding: UTF-8 -*- # Authors: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # License: BSD (3-clause) import mne import os.path import pytest import copy import itertools import numpy as np from mne.datasets import testing from mne.io.fieldtrip.utils import NOINFO_WARNING, _create_events from mne.utils import _check_pandas_installed, requires_h5py from mne.io.fieldtrip.tests.helpers import (check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record) # missing: KIT: biggest problem here is that the channels do not have the same # names. # EGI: no calibration done in FT. so data is VERY different all_systems_raw = ['neuromag306', 'CTF', 'CNT', 'BTI', 'eximia'] all_systems_epochs = ['neuromag306', 'CTF', 'CNT'] all_versions = ['v7', 'v73'] use_info = [True, False] all_test_params_raw = list(itertools.product(all_systems_raw, all_versions, use_info)) all_test_params_epochs = list(itertools.product(all_systems_epochs, all_versions, use_info)) no_info_warning = {'expected_warning': RuntimeWarning, 'match': NOINFO_WARNING} @pytest.mark.slowtest @testing.requires_testing_data # Reading the sample CNT data results in a RuntimeWarning because it cannot # parse the measurement date. We need to ignore that warning. @pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') @pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') @pytest.mark.parametrize('cur_system, version, use_info', all_test_params_epochs) def test_read_evoked(cur_system, version, use_info): """Test comparing reading an Evoked object and the FieldTrip version.""" test_data_folder_ft = get_data_paths(cur_system) mne_avg = get_evoked(cur_system) if use_info: info = get_raw_info(cur_system) pytestwarning = {'expected_warning': None} else: info = None pytestwarning = no_info_warning cur_fname = os.path.join(test_data_folder_ft, 'averaged_%s.mat' % (version,)) if version == 'v73' and not _has_h5py(): with pytest.raises(ImportError): mne.io.read_evoked_fieldtrip(cur_fname, info) return with pytest.warns(**pytestwarning): avg_ft = mne.io.read_evoked_fieldtrip(cur_fname, info) mne_data = mne_avg.data[:, :-1] ft_data = avg_ft.data check_data(mne_data, ft_data, cur_system) check_info_fields(mne_avg, avg_ft, use_info) @testing.requires_testing_data # Reading the sample CNT data results in a RuntimeWarning because it cannot # parse the measurement date. We need to ignore that warning. @pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') @pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') @pytest.mark.parametrize('cur_system, version, use_info', all_test_params_epochs) # Strange, non-deterministic Pandas errors: # "ValueError: cannot expose native-only dtype 'g' in non-native # byte order '<' via buffer interface" @pytest.mark.skipif(os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true', reason='Pandas problem on Azure CI') def test_read_epochs(cur_system, version, use_info, monkeypatch): """Test comparing reading an Epochs object and the FieldTrip version.""" pandas = _check_pandas_installed(strict=False) has_pandas = pandas is not False test_data_folder_ft = get_data_paths(cur_system) mne_epoched = get_epochs(cur_system) if use_info: info = get_raw_info(cur_system) pytestwarning = {'expected_warning': None} else: info = None pytestwarning = no_info_warning cur_fname = os.path.join(test_data_folder_ft, 'epoched_%s.mat' % (version,)) if has_pandas: if version == 'v73' and not _has_h5py(): with pytest.raises(ImportError): mne.io.read_epochs_fieldtrip(cur_fname, info) return with pytest.warns(**pytestwarning): epoched_ft = mne.io.read_epochs_fieldtrip(cur_fname, info) assert isinstance(epoched_ft.metadata, pandas.DataFrame) else: with pytest.warns(None) as warn_record: if version == 'v73' and not _has_h5py(): with pytest.raises(ImportError): mne.io.read_epochs_fieldtrip(cur_fname, info) return epoched_ft = mne.io.read_epochs_fieldtrip(cur_fname, info) assert epoched_ft.metadata is None assert_warning_in_record(pandas_not_found_warning_msg, warn_record) if pytestwarning['expected_warning'] is not None: assert_warning_in_record(pytestwarning['match'], warn_record) mne_data = mne_epoched.get_data()[:, :, :-1] ft_data = epoched_ft.get_data() check_data(mne_data, ft_data, cur_system) check_info_fields(mne_epoched, epoched_ft, use_info) # weird sfreq from mne.externals.pymatreader import read_mat def modify_mat(fname, variable_names=None, ignore_fields=None): out = read_mat(fname, variable_names, ignore_fields) if 'fsample' in out['data']: out['data']['fsample'] = np.repeat(out['data']['fsample'], 2) return out monkeypatch.setattr(mne.externals.pymatreader, 'read_mat', modify_mat) with pytest.warns(RuntimeWarning, match='multiple'): mne.io.read_epochs_fieldtrip(cur_fname, info) @testing.requires_testing_data # Reading the sample CNT data results in a RuntimeWarning because it cannot # parse the measurement date. We need to ignore that warning. @pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') @pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') @pytest.mark.parametrize('cur_system, version, use_info', all_test_params_raw) def test_raw(cur_system, version, use_info): """Test comparing reading a raw fiff file and the FieldTrip version.""" # Load the raw fiff file with mne test_data_folder_ft = get_data_paths(cur_system) raw_fiff_mne = get_raw_data(cur_system, drop_extra_chs=True) if use_info: info = get_raw_info(cur_system) pytestwarning = {'expected_warning': None} else: info = None pytestwarning = no_info_warning cur_fname = os.path.join(test_data_folder_ft, 'raw_%s.mat' % (version,)) if version == 'v73' and not _has_h5py(): with pytest.raises(ImportError): mne.io.read_raw_fieldtrip(cur_fname, info) return with pytest.warns(**pytestwarning): raw_fiff_ft = mne.io.read_raw_fieldtrip(cur_fname, info) if cur_system == 'BTI' and not use_info: raw_fiff_ft.drop_channels(['MzA', 'MxA', 'MyaA', 'MyA', 'MxaA', 'MzaA']) if cur_system == 'eximia' and not use_info: raw_fiff_ft.drop_channels(['TRIG2', 'TRIG1', 'GATE']) # Check that the data was loaded correctly check_data(raw_fiff_mne.get_data(), raw_fiff_ft.get_data(), cur_system) # Check info field check_info_fields(raw_fiff_mne, raw_fiff_ft, use_info) @testing.requires_testing_data def test_load_epoched_as_raw(): """Test whether exception is thrown when loading epochs as raw.""" test_data_folder_ft = get_data_paths('neuromag306') info = get_raw_info('neuromag306') cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat') with pytest.raises(RuntimeError): mne.io.read_raw_fieldtrip(cur_fname, info) @testing.requires_testing_data def test_invalid_trialinfocolumn(): """Test for exceptions when using wrong values for trialinfo parameter.""" test_data_folder_ft = get_data_paths('neuromag306') info = get_raw_info('neuromag306') cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat') with pytest.raises(ValueError): mne.io.read_epochs_fieldtrip(cur_fname, info, trialinfo_column=-1) with pytest.raises(ValueError): mne.io.read_epochs_fieldtrip(cur_fname, info, trialinfo_column=3) @testing.requires_testing_data def test_create_events(): """Test 2dim trialinfo fields.""" from mne.externals.pymatreader import read_mat test_data_folder_ft = get_data_paths('neuromag306') cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat') original_data = read_mat(cur_fname, ['data', ]) new_data = copy.deepcopy(original_data) new_data['trialinfo'] = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) with pytest.raises(ValueError): _create_events(new_data, -1) for cur_col in np.arange(4): evts = _create_events(new_data, cur_col) assert np.all(evts[:, 2] == cur_col + 1) with pytest.raises(ValueError): _create_events(new_data, 4) @testing.requires_testing_data @pytest.mark.parametrize('version', all_versions) @requires_h5py def test_one_channel_elec_bug(version): """Test if loading data having only one elec in the elec field works.""" fname = os.path.join(mne.datasets.testing.data_path(), 'fieldtrip', 'one_channel_elec_bug_data_%s.mat' % (version, )) with pytest.warns(**no_info_warning): mne.io.read_raw_fieldtrip(fname, info=None) @testing.requires_testing_data # Reading the sample CNT data results in a RuntimeWarning because it cannot # parse the measurement date. We need to ignore that warning. @pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning') @pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning') @pytest.mark.parametrize('version', all_versions) @pytest.mark.parametrize('type', ['averaged', 'epoched', 'raw']) @requires_h5py def test_throw_exception_on_cellarray(version, type): """Test for a meaningful exception when the data is a cell array.""" fname = os.path.join(get_data_paths('cellarray'), '%s_%s.mat' % (type, version)) info = get_raw_info('CNT') with pytest.raises(RuntimeError, match='Loading of data in cell arrays ' 'is not supported'): if type == 'averaged': mne.read_evoked_fieldtrip(fname, info) elif type == 'epoched': mne.read_epochs_fieldtrip(fname, info) elif type == 'raw': mne.io.read_raw_fieldtrip(fname, info) @testing.requires_testing_data def test_with_missing_channels(): """Test _create_info when channels are missing from info.""" cur_system = 'neuromag306' test_data_folder_ft = get_data_paths(cur_system) info = get_raw_info(cur_system) del info['chs'][1:20] info._update_redundant() with pytest.warns(RuntimeWarning): mne.io.read_raw_fieldtrip( os.path.join(test_data_folder_ft, 'raw_v7.mat'), info) mne.read_evoked_fieldtrip( os.path.join(test_data_folder_ft, 'averaged_v7.mat'), info) mne.read_epochs_fieldtrip( os.path.join(test_data_folder_ft, 'epoched_v7.mat'), info) @testing.requires_testing_data @pytest.mark.filterwarnings('ignore: Importing FieldTrip data without an info') @pytest.mark.filterwarnings('ignore: Cannot guess the correct type') def test_throw_error_on_non_uniform_time_field(): """Test if an error is thrown when time fields are not uniform.""" fname = os.path.join(mne.datasets.testing.data_path(), 'fieldtrip', 'not_uniform_time.mat') with pytest.raises(RuntimeError, match='Loading data with non-uniform ' 'times per epoch is not supported'): mne.io.read_epochs_fieldtrip(fname, info=None) @testing.requires_testing_data @pytest.mark.filterwarnings('ignore: Importing FieldTrip data without an info') def test_throw_error_when_importing_old_ft_version_data(): """Test if an error is thrown if the data was saved with an old version.""" fname = os.path.join(mne.datasets.testing.data_path(), 'fieldtrip', 'old_version.mat') with pytest.raises(RuntimeError, match='This file was created with ' 'an old version of FieldTrip. You ' 'can convert the data to the new ' 'version by loading it into ' 'FieldTrip and applying ' 'ft_selectdata with an ' 'empty cfg structure on it. ' 'Otherwise you can supply ' 'the Info field.'): mne.io.read_epochs_fieldtrip(fname, info=None)
[ "pytest.mark.filterwarnings", "mne.utils._check_pandas_installed", "mne.io.fieldtrip.tests.helpers.get_data_paths", "numpy.array", "copy.deepcopy", "mne.io.fieldtrip.tests.helpers.assert_warning_in_record", "mne.io.read_evoked_fieldtrip", "numpy.arange", "mne.datasets.testing.data_path", "numpy.re...
[((1722, 1793), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:.*parse meas date.*:RuntimeWarning"""'], {}), "('ignore:.*parse meas date.*:RuntimeWarning')\n", (1748, 1793), False, 'import pytest\n'), ((1795, 1866), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:.*number of bytes.*:RuntimeWarning"""'], {}), "('ignore:.*number of bytes.*:RuntimeWarning')\n", (1821, 1866), False, 'import pytest\n'), ((1868, 1953), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cur_system, version, use_info"""', 'all_test_params_epochs'], {}), "('cur_system, version, use_info', all_test_params_epochs\n )\n", (1891, 1953), False, 'import pytest\n'), ((3078, 3149), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:.*parse meas date.*:RuntimeWarning"""'], {}), "('ignore:.*parse meas date.*:RuntimeWarning')\n", (3104, 3149), False, 'import pytest\n'), ((3151, 3222), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:.*number of bytes.*:RuntimeWarning"""'], {}), "('ignore:.*number of bytes.*:RuntimeWarning')\n", (3177, 3222), False, 'import pytest\n'), ((3224, 3309), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cur_system, version, use_info"""', 'all_test_params_epochs'], {}), "('cur_system, version, use_info', all_test_params_epochs\n )\n", (3247, 3309), False, 'import pytest\n'), ((6067, 6138), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:.*parse meas date.*:RuntimeWarning"""'], {}), "('ignore:.*parse meas date.*:RuntimeWarning')\n", (6093, 6138), False, 'import pytest\n'), ((6140, 6211), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:.*number of bytes.*:RuntimeWarning"""'], {}), "('ignore:.*number of bytes.*:RuntimeWarning')\n", (6166, 6211), False, 'import pytest\n'), ((6213, 6290), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cur_system, version, use_info"""', 'all_test_params_raw'], {}), "('cur_system, version, use_info', all_test_params_raw)\n", (6236, 6290), False, 'import pytest\n'), ((9388, 9436), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""version"""', 'all_versions'], {}), "('version', all_versions)\n", (9411, 9436), False, 'import pytest\n'), ((9983, 10054), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:.*parse meas date.*:RuntimeWarning"""'], {}), "('ignore:.*parse meas date.*:RuntimeWarning')\n", (10009, 10054), False, 'import pytest\n'), ((10056, 10127), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:.*number of bytes.*:RuntimeWarning"""'], {}), "('ignore:.*number of bytes.*:RuntimeWarning')\n", (10082, 10127), False, 'import pytest\n'), ((10129, 10177), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""version"""', 'all_versions'], {}), "('version', all_versions)\n", (10152, 10177), False, 'import pytest\n'), ((10179, 10242), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""type"""', "['averaged', 'epoched', 'raw']"], {}), "('type', ['averaged', 'epoched', 'raw'])\n", (10202, 10242), False, 'import pytest\n'), ((11609, 11687), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Importing FieldTrip data without an info"""'], {}), "('ignore: Importing FieldTrip data without an info')\n", (11635, 11687), False, 'import pytest\n'), ((11689, 11756), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Cannot guess the correct type"""'], {}), "('ignore: Cannot guess the correct type')\n", (11715, 11756), False, 'import pytest\n'), ((12270, 12348), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Importing FieldTrip data without an info"""'], {}), "('ignore: Importing FieldTrip data without an info')\n", (12296, 12348), False, 'import pytest\n'), ((1134, 1192), 'itertools.product', 'itertools.product', (['all_systems_raw', 'all_versions', 'use_info'], {}), '(all_systems_raw, all_versions, use_info)\n', (1151, 1192), False, 'import itertools\n'), ((1269, 1330), 'itertools.product', 'itertools.product', (['all_systems_epochs', 'all_versions', 'use_info'], {}), '(all_systems_epochs, all_versions, use_info)\n', (1286, 1330), False, 'import itertools\n'), ((2130, 2156), 'mne.io.fieldtrip.tests.helpers.get_data_paths', 'get_data_paths', (['cur_system'], {}), '(cur_system)\n', (2144, 2156), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((2171, 2193), 'mne.io.fieldtrip.tests.helpers.get_evoked', 'get_evoked', (['cur_system'], {}), '(cur_system)\n', (2181, 2193), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((2815, 2856), 'mne.io.fieldtrip.tests.helpers.check_data', 'check_data', (['mne_data', 'ft_data', 'cur_system'], {}), '(mne_data, ft_data, cur_system)\n', (2825, 2856), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((2861, 2905), 'mne.io.fieldtrip.tests.helpers.check_info_fields', 'check_info_fields', (['mne_avg', 'avg_ft', 'use_info'], {}), '(mne_avg, avg_ft, use_info)\n', (2878, 2905), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((3769, 3806), 'mne.utils._check_pandas_installed', '_check_pandas_installed', ([], {'strict': '(False)'}), '(strict=False)\n', (3792, 3806), False, 'from mne.utils import _check_pandas_installed, requires_h5py\n'), ((3870, 3896), 'mne.io.fieldtrip.tests.helpers.get_data_paths', 'get_data_paths', (['cur_system'], {}), '(cur_system)\n', (3884, 3896), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((3915, 3937), 'mne.io.fieldtrip.tests.helpers.get_epochs', 'get_epochs', (['cur_system'], {}), '(cur_system)\n', (3925, 3937), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((5279, 5320), 'mne.io.fieldtrip.tests.helpers.check_data', 'check_data', (['mne_data', 'ft_data', 'cur_system'], {}), '(mne_data, ft_data, cur_system)\n', (5289, 5320), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((5325, 5377), 'mne.io.fieldtrip.tests.helpers.check_info_fields', 'check_info_fields', (['mne_epoched', 'epoched_ft', 'use_info'], {}), '(mne_epoched, epoched_ft, use_info)\n', (5342, 5377), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((6476, 6502), 'mne.io.fieldtrip.tests.helpers.get_data_paths', 'get_data_paths', (['cur_system'], {}), '(cur_system)\n', (6490, 6502), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((6522, 6567), 'mne.io.fieldtrip.tests.helpers.get_raw_data', 'get_raw_data', (['cur_system'], {'drop_extra_chs': '(True)'}), '(cur_system, drop_extra_chs=True)\n', (6534, 6567), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((7570, 7624), 'mne.io.fieldtrip.tests.helpers.check_info_fields', 'check_info_fields', (['raw_fiff_mne', 'raw_fiff_ft', 'use_info'], {}), '(raw_fiff_mne, raw_fiff_ft, use_info)\n', (7587, 7624), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((7787, 7816), 'mne.io.fieldtrip.tests.helpers.get_data_paths', 'get_data_paths', (['"""neuromag306"""'], {}), "('neuromag306')\n", (7801, 7816), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((7828, 7855), 'mne.io.fieldtrip.tests.helpers.get_raw_info', 'get_raw_info', (['"""neuromag306"""'], {}), "('neuromag306')\n", (7840, 7855), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((8188, 8217), 'mne.io.fieldtrip.tests.helpers.get_data_paths', 'get_data_paths', (['"""neuromag306"""'], {}), "('neuromag306')\n", (8202, 8217), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((8229, 8256), 'mne.io.fieldtrip.tests.helpers.get_raw_info', 'get_raw_info', (['"""neuromag306"""'], {}), "('neuromag306')\n", (8241, 8256), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((8723, 8752), 'mne.io.fieldtrip.tests.helpers.get_data_paths', 'get_data_paths', (['"""neuromag306"""'], {}), "('neuromag306')\n", (8737, 8752), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((8841, 8870), 'mne.externals.pymatreader.read_mat', 'read_mat', (['cur_fname', "['data']"], {}), "(cur_fname, ['data'])\n", (8849, 8870), False, 'from mne.externals.pymatreader import read_mat\n'), ((8889, 8917), 'copy.deepcopy', 'copy.deepcopy', (['original_data'], {}), '(original_data)\n', (8902, 8917), False, 'import copy\n'), ((8946, 8998), 'numpy.array', 'np.array', (['[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]'], {}), '([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])\n', (8954, 8998), True, 'import numpy as np\n'), ((9169, 9181), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (9178, 9181), True, 'import numpy as np\n'), ((10507, 10526), 'mne.io.fieldtrip.tests.helpers.get_raw_info', 'get_raw_info', (['"""CNT"""'], {}), "('CNT')\n", (10519, 10526), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((11102, 11128), 'mne.io.fieldtrip.tests.helpers.get_data_paths', 'get_data_paths', (['cur_system'], {}), '(cur_system)\n', (11116, 11128), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((11140, 11164), 'mne.io.fieldtrip.tests.helpers.get_raw_info', 'get_raw_info', (['cur_system'], {}), '(cur_system)\n', (11152, 11164), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((2226, 2250), 'mne.io.fieldtrip.tests.helpers.get_raw_info', 'get_raw_info', (['cur_system'], {}), '(cur_system)\n', (2238, 2250), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((2653, 2682), 'pytest.warns', 'pytest.warns', ([], {}), '(**pytestwarning)\n', (2665, 2682), False, 'import pytest\n'), ((2701, 2746), 'mne.io.read_evoked_fieldtrip', 'mne.io.read_evoked_fieldtrip', (['cur_fname', 'info'], {}), '(cur_fname, info)\n', (2729, 2746), False, 'import mne\n'), ((3970, 3994), 'mne.io.fieldtrip.tests.helpers.get_raw_info', 'get_raw_info', (['cur_system'], {}), '(cur_system)\n', (3982, 3994), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((5531, 5577), 'mne.externals.pymatreader.read_mat', 'read_mat', (['fname', 'variable_names', 'ignore_fields'], {}), '(fname, variable_names, ignore_fields)\n', (5539, 5577), False, 'from mne.externals.pymatreader import read_mat\n'), ((5793, 5839), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {'match': '"""multiple"""'}), "(RuntimeWarning, match='multiple')\n", (5805, 5839), False, 'import pytest\n'), ((5849, 5894), 'mne.io.read_epochs_fieldtrip', 'mne.io.read_epochs_fieldtrip', (['cur_fname', 'info'], {}), '(cur_fname, info)\n', (5877, 5894), False, 'import mne\n'), ((6600, 6624), 'mne.io.fieldtrip.tests.helpers.get_raw_info', 'get_raw_info', (['cur_system'], {}), '(cur_system)\n', (6612, 6624), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((7019, 7048), 'pytest.warns', 'pytest.warns', ([], {}), '(**pytestwarning)\n', (7031, 7048), False, 'import pytest\n'), ((7072, 7114), 'mne.io.read_raw_fieldtrip', 'mne.io.read_raw_fieldtrip', (['cur_fname', 'info'], {}), '(cur_fname, info)\n', (7097, 7114), False, 'import mne\n'), ((7934, 7961), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (7947, 7961), False, 'import pytest\n'), ((7971, 8013), 'mne.io.read_raw_fieldtrip', 'mne.io.read_raw_fieldtrip', (['cur_fname', 'info'], {}), '(cur_fname, info)\n', (7996, 8013), False, 'import mne\n'), ((8335, 8360), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8348, 8360), False, 'import pytest\n'), ((8370, 8436), 'mne.io.read_epochs_fieldtrip', 'mne.io.read_epochs_fieldtrip', (['cur_fname', 'info'], {'trialinfo_column': '(-1)'}), '(cur_fname, info, trialinfo_column=-1)\n', (8398, 8436), False, 'import mne\n'), ((8447, 8472), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8460, 8472), False, 'import pytest\n'), ((8482, 8547), 'mne.io.read_epochs_fieldtrip', 'mne.io.read_epochs_fieldtrip', (['cur_fname', 'info'], {'trialinfo_column': '(3)'}), '(cur_fname, info, trialinfo_column=3)\n', (8510, 8547), False, 'import mne\n'), ((9085, 9110), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9098, 9110), False, 'import pytest\n'), ((9120, 9148), 'mne.io.fieldtrip.utils._create_events', '_create_events', (['new_data', '(-1)'], {}), '(new_data, -1)\n', (9134, 9148), False, 'from mne.io.fieldtrip.utils import NOINFO_WARNING, _create_events\n'), ((9198, 9231), 'mne.io.fieldtrip.utils._create_events', '_create_events', (['new_data', 'cur_col'], {}), '(new_data, cur_col)\n', (9212, 9231), False, 'from mne.io.fieldtrip.utils import NOINFO_WARNING, _create_events\n'), ((9247, 9280), 'numpy.all', 'np.all', (['(evts[:, 2] == cur_col + 1)'], {}), '(evts[:, 2] == cur_col + 1)\n', (9253, 9280), True, 'import numpy as np\n'), ((9291, 9316), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9304, 9316), False, 'import pytest\n'), ((9326, 9353), 'mne.io.fieldtrip.utils._create_events', '_create_events', (['new_data', '(4)'], {}), '(new_data, 4)\n', (9340, 9353), False, 'from mne.io.fieldtrip.utils import NOINFO_WARNING, _create_events\n'), ((9594, 9626), 'mne.datasets.testing.data_path', 'mne.datasets.testing.data_path', ([], {}), '()\n', (9624, 9626), False, 'import mne\n'), ((9726, 9757), 'pytest.warns', 'pytest.warns', ([], {}), '(**no_info_warning)\n', (9738, 9757), False, 'import pytest\n'), ((9767, 9810), 'mne.io.read_raw_fieldtrip', 'mne.io.read_raw_fieldtrip', (['fname'], {'info': 'None'}), '(fname, info=None)\n', (9792, 9810), False, 'import mne\n'), ((10410, 10437), 'mne.io.fieldtrip.tests.helpers.get_data_paths', 'get_data_paths', (['"""cellarray"""'], {}), "('cellarray')\n", (10424, 10437), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((10537, 10626), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Loading of data in cell arrays is not supported"""'}), "(RuntimeError, match=\n 'Loading of data in cell arrays is not supported')\n", (10550, 10626), False, 'import pytest\n'), ((11230, 11258), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {}), '(RuntimeWarning)\n', (11242, 11258), False, 'import pytest\n'), ((11903, 11935), 'mne.datasets.testing.data_path', 'mne.datasets.testing.data_path', ([], {}), '()\n', (11933, 11935), False, 'import mne\n'), ((12034, 12138), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Loading data with non-uniform times per epoch is not supported"""'}), "(RuntimeError, match=\n 'Loading data with non-uniform times per epoch is not supported')\n", (12047, 12138), False, 'import pytest\n'), ((12189, 12235), 'mne.io.read_epochs_fieldtrip', 'mne.io.read_epochs_fieldtrip', (['fname'], {'info': 'None'}), '(fname, info=None)\n', (12217, 12235), False, 'import mne\n'), ((12513, 12545), 'mne.datasets.testing.data_path', 'mne.datasets.testing.data_path', ([], {}), '()\n', (12543, 12545), False, 'import mne\n'), ((12639, 12917), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""This file was created with an old version of FieldTrip. You can convert the data to the new version by loading it into FieldTrip and applying ft_selectdata with an empty cfg structure on it. Otherwise you can supply the Info field."""'}), "(RuntimeError, match=\n 'This file was created with an old version of FieldTrip. You can convert the data to the new version by loading it into FieldTrip and applying ft_selectdata with an empty cfg structure on it. Otherwise you can supply the Info field.'\n )\n", (12652, 12917), False, 'import pytest\n'), ((13285, 13331), 'mne.io.read_epochs_fieldtrip', 'mne.io.read_epochs_fieldtrip', (['fname'], {'info': 'None'}), '(fname, info=None)\n', (13313, 13331), False, 'import mne\n'), ((2516, 2527), 'mne.io.fieldtrip.tests.helpers._has_h5py', '_has_h5py', ([], {}), '()\n', (2525, 2527), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((2542, 2568), 'pytest.raises', 'pytest.raises', (['ImportError'], {}), '(ImportError)\n', (2555, 2568), False, 'import pytest\n'), ((2582, 2627), 'mne.io.read_evoked_fieldtrip', 'mne.io.read_evoked_fieldtrip', (['cur_fname', 'info'], {}), '(cur_fname, info)\n', (2610, 2627), False, 'import mne\n'), ((4434, 4463), 'pytest.warns', 'pytest.warns', ([], {}), '(**pytestwarning)\n', (4446, 4463), False, 'import pytest\n'), ((4490, 4535), 'mne.io.read_epochs_fieldtrip', 'mne.io.read_epochs_fieldtrip', (['cur_fname', 'info'], {}), '(cur_fname, info)\n', (4518, 4535), False, 'import mne\n'), ((4624, 4642), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (4636, 4642), False, 'import pytest\n'), ((4875, 4920), 'mne.io.read_epochs_fieldtrip', 'mne.io.read_epochs_fieldtrip', (['cur_fname', 'info'], {}), '(cur_fname, info)\n', (4903, 4920), False, 'import mne\n'), ((4980, 5047), 'mne.io.fieldtrip.tests.helpers.assert_warning_in_record', 'assert_warning_in_record', (['pandas_not_found_warning_msg', 'warn_record'], {}), '(pandas_not_found_warning_msg, warn_record)\n', (5004, 5047), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((5652, 5688), 'numpy.repeat', 'np.repeat', (["out['data']['fsample']", '(2)'], {}), "(out['data']['fsample'], 2)\n", (5661, 5688), True, 'import numpy as np\n'), ((6886, 6897), 'mne.io.fieldtrip.tests.helpers._has_h5py', '_has_h5py', ([], {}), '()\n', (6895, 6897), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((6912, 6938), 'pytest.raises', 'pytest.raises', (['ImportError'], {}), '(ImportError)\n', (6925, 6938), False, 'import pytest\n'), ((6952, 6994), 'mne.io.read_raw_fieldtrip', 'mne.io.read_raw_fieldtrip', (['cur_fname', 'info'], {}), '(cur_fname, info)\n', (6977, 6994), False, 'import mne\n'), ((10712, 10750), 'mne.read_evoked_fieldtrip', 'mne.read_evoked_fieldtrip', (['fname', 'info'], {}), '(fname, info)\n', (10737, 10750), False, 'import mne\n'), ((4282, 4293), 'mne.io.fieldtrip.tests.helpers._has_h5py', '_has_h5py', ([], {}), '()\n', (4291, 4293), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((4312, 4338), 'pytest.raises', 'pytest.raises', (['ImportError'], {}), '(ImportError)\n', (4325, 4338), False, 'import pytest\n'), ((4356, 4401), 'mne.io.read_epochs_fieldtrip', 'mne.io.read_epochs_fieldtrip', (['cur_fname', 'info'], {}), '(cur_fname, info)\n', (4384, 4401), False, 'import mne\n'), ((5126, 5187), 'mne.io.fieldtrip.tests.helpers.assert_warning_in_record', 'assert_warning_in_record', (["pytestwarning['match']", 'warn_record'], {}), "(pytestwarning['match'], warn_record)\n", (5150, 5187), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((10795, 10833), 'mne.read_epochs_fieldtrip', 'mne.read_epochs_fieldtrip', (['fname', 'info'], {}), '(fname, info)\n', (10820, 10833), False, 'import mne\n'), ((4699, 4710), 'mne.io.fieldtrip.tests.helpers._has_h5py', '_has_h5py', ([], {}), '()\n', (4708, 4710), False, 'from mne.io.fieldtrip.tests.helpers import check_info_fields, get_data_paths, get_raw_data, get_epochs, get_evoked, _has_h5py, pandas_not_found_warning_msg, get_raw_info, check_data, assert_warning_in_record\n'), ((4733, 4759), 'pytest.raises', 'pytest.raises', (['ImportError'], {}), '(ImportError)\n', (4746, 4759), False, 'import pytest\n'), ((4781, 4826), 'mne.io.read_epochs_fieldtrip', 'mne.io.read_epochs_fieldtrip', (['cur_fname', 'info'], {}), '(cur_fname, info)\n', (4809, 4826), False, 'import mne\n'), ((10874, 10912), 'mne.io.read_raw_fieldtrip', 'mne.io.read_raw_fieldtrip', (['fname', 'info'], {}), '(fname, info)\n', (10899, 10912), False, 'import mne\n')]
# Copyright 2014 <NAME>, <EMAIL>. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from math import degrees from math import radians from numpy import array import sys import unittest from .quaternion import Quaternion class QuaternionTest(unittest.TestCase): def check_vector_close(self, vector, x, y, z): self.assertAlmostEqual(vector[0], x) self.assertAlmostEqual(vector[1], y) self.assertAlmostEqual(vector[2], z) def test_basic(self): v = array([10, 0, 0]) v = Quaternion.from_euler(0, 0, radians(90)).rotate(v) self.check_vector_close(v, 0, -10.0, 0) v = Quaternion.from_euler(0, 0, radians(-90)).rotate(v) self.check_vector_close(v, 10, 0, 0) v = Quaternion.from_euler(0, radians(90), 0).rotate(v) self.check_vector_close(v, 10, 0, 0) v = Quaternion.from_euler(radians(90), 0, 0).rotate(v) self.check_vector_close(v, 0, 0, -10) v = Quaternion.from_euler(0, 0, radians(90)).rotate(v) self.check_vector_close(v, 0, 0, -10) v = Quaternion.from_euler(0, radians(90), 0).rotate(v) self.check_vector_close(v, 0, 10, 0) v = Quaternion.from_euler(radians(90), 0, 0).rotate(v) self.check_vector_close(v, 0, 10, 0) v = Quaternion.from_euler(0, 0, radians(90)).rotate(v) self.check_vector_close(v, 10, 0, 0) def check_euler(self, euler, roll_rad, pitch_rad, yaw_rad): self.assertAlmostEqual(degrees(euler.yaw), degrees(yaw_rad)) self.assertAlmostEqual(degrees(euler.pitch), degrees(pitch_rad)) self.assertAlmostEqual(degrees(euler.roll), degrees(roll_rad)) def test_euler_and_back(self): tests = [ (45, 0, 0), (0, 45, 0), (0, 0, 45), (0, 90, 0), (0, 90, 20), (0, -90, 0), (0, -90, -10), (0, -90, 30), (10, 20, 30), (-30, 10, 20), ] for test in tests: try: self.check_euler( Quaternion.from_euler(radians(test[0]), radians(test[1]), radians(test[2])).euler(), radians(test[0]), radians(test[1]), radians(test[2])) except: print >> sys.stderr, 'in test:', test raise def test_multiplication(self): x90 = Quaternion.from_euler(0, radians(90), 0) xn90 = Quaternion.from_euler(0, -radians(90), 0) y90 = Quaternion.from_euler(radians(90), 0, 0) result = xn90 * y90 * x90 vector = array([0, 1, 0]) vector = result.rotate(vector) self.check_vector_close(vector, 1, 0, 0) initial = Quaternion.from_euler(0, 0, radians(45)) initial = Quaternion.from_euler(0, 0, radians(45)) * initial self.check_euler(initial.euler(), 0, 0, radians(90)) initial = Quaternion.from_euler(0, radians(10), 0) * initial vector = initial.rotate(vector) self.check_vector_close(vector, 0, -0.9848078, -0.17364818) self.check_euler(initial.euler(), radians(10), 0, radians(90)) def test_multiplication2(self): attitude = Quaternion.from_euler(radians(-5), 0, radians(90)) attitude = attitude * Quaternion.from_euler(0, 0, radians(90)) self.check_euler(attitude.euler(), 0, radians(5), radians(180)) def test_multiplication3(self): attitude = Quaternion.from_euler(radians(-3), radians(3), 0) attitude = attitude * Quaternion.integrate_rotation_rate( 0, 0, radians(-5), 1) self.check_euler(attitude.euler(), radians(-3.24974326), radians(2.727438544), radians(-4.99563857))
[ "numpy.array", "math.degrees", "math.radians" ]
[((985, 1002), 'numpy.array', 'array', (['[10, 0, 0]'], {}), '([10, 0, 0])\n', (990, 1002), False, 'from numpy import array\n'), ((3334, 3350), 'numpy.array', 'array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (3339, 3350), False, 'from numpy import array\n'), ((1977, 1995), 'math.degrees', 'degrees', (['euler.yaw'], {}), '(euler.yaw)\n', (1984, 1995), False, 'from math import degrees\n'), ((2028, 2044), 'math.degrees', 'degrees', (['yaw_rad'], {}), '(yaw_rad)\n', (2035, 2044), False, 'from math import degrees\n'), ((2077, 2097), 'math.degrees', 'degrees', (['euler.pitch'], {}), '(euler.pitch)\n', (2084, 2097), False, 'from math import degrees\n'), ((2130, 2148), 'math.degrees', 'degrees', (['pitch_rad'], {}), '(pitch_rad)\n', (2137, 2148), False, 'from math import degrees\n'), ((2181, 2200), 'math.degrees', 'degrees', (['euler.roll'], {}), '(euler.roll)\n', (2188, 2200), False, 'from math import degrees\n'), ((2233, 2250), 'math.degrees', 'degrees', (['roll_rad'], {}), '(roll_rad)\n', (2240, 2250), False, 'from math import degrees\n'), ((3154, 3165), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (3161, 3165), False, 'from math import radians\n'), ((3263, 3274), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (3270, 3274), False, 'from math import radians\n'), ((3486, 3497), 'math.radians', 'radians', (['(45)'], {}), '(45)\n', (3493, 3497), False, 'from math import radians\n'), ((3616, 3627), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (3623, 3627), False, 'from math import radians\n'), ((3849, 3860), 'math.radians', 'radians', (['(10)'], {}), '(10)\n', (3856, 3860), False, 'from math import radians\n'), ((3865, 3876), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (3872, 3876), False, 'from math import radians\n'), ((3956, 3967), 'math.radians', 'radians', (['(-5)'], {}), '(-5)\n', (3963, 3967), False, 'from math import radians\n'), ((3972, 3983), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (3979, 3983), False, 'from math import radians\n'), ((4102, 4112), 'math.radians', 'radians', (['(5)'], {}), '(5)\n', (4109, 4112), False, 'from math import radians\n'), ((4114, 4126), 'math.radians', 'radians', (['(180)'], {}), '(180)\n', (4121, 4126), False, 'from math import radians\n'), ((4206, 4217), 'math.radians', 'radians', (['(-3)'], {}), '(-3)\n', (4213, 4217), False, 'from math import radians\n'), ((4219, 4229), 'math.radians', 'radians', (['(3)'], {}), '(3)\n', (4226, 4229), False, 'from math import radians\n'), ((4402, 4422), 'math.radians', 'radians', (['(-3.24974326)'], {}), '(-3.24974326)\n', (4409, 4422), False, 'from math import radians\n'), ((4449, 4469), 'math.radians', 'radians', (['(2.727438544)'], {}), '(2.727438544)\n', (4456, 4469), False, 'from math import radians\n'), ((4496, 4516), 'math.radians', 'radians', (['(-4.99563857)'], {}), '(-4.99563857)\n', (4503, 4516), False, 'from math import radians\n'), ((3211, 3222), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (3218, 3222), False, 'from math import radians\n'), ((3545, 3556), 'math.radians', 'radians', (['(45)'], {}), '(45)\n', (3552, 3556), False, 'from math import radians\n'), ((3673, 3684), 'math.radians', 'radians', (['(10)'], {}), '(10)\n', (3680, 3684), False, 'from math import radians\n'), ((4043, 4054), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (4050, 4054), False, 'from math import radians\n'), ((4318, 4329), 'math.radians', 'radians', (['(-5)'], {}), '(-5)\n', (4325, 4329), False, 'from math import radians\n'), ((1044, 1055), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (1051, 1055), False, 'from math import radians\n'), ((1156, 1168), 'math.radians', 'radians', (['(-90)'], {}), '(-90)\n', (1163, 1168), False, 'from math import radians\n'), ((1263, 1274), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (1270, 1274), False, 'from math import radians\n'), ((1369, 1380), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (1376, 1380), False, 'from math import radians\n'), ((1485, 1496), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (1492, 1496), False, 'from math import radians\n'), ((1592, 1603), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (1599, 1603), False, 'from math import radians\n'), ((1698, 1709), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (1705, 1709), False, 'from math import radians\n'), ((1813, 1824), 'math.radians', 'radians', (['(90)'], {}), '(90)\n', (1820, 1824), False, 'from math import radians\n'), ((2889, 2905), 'math.radians', 'radians', (['test[0]'], {}), '(test[0])\n', (2896, 2905), False, 'from math import radians\n'), ((2927, 2943), 'math.radians', 'radians', (['test[1]'], {}), '(test[1])\n', (2934, 2943), False, 'from math import radians\n'), ((2965, 2981), 'math.radians', 'radians', (['test[2]'], {}), '(test[2])\n', (2972, 2981), False, 'from math import radians\n'), ((2722, 2738), 'math.radians', 'radians', (['test[0]'], {}), '(test[0])\n', (2729, 2738), False, 'from math import radians\n'), ((2782, 2798), 'math.radians', 'radians', (['test[1]'], {}), '(test[1])\n', (2789, 2798), False, 'from math import radians\n'), ((2842, 2858), 'math.radians', 'radians', (['test[2]'], {}), '(test[2])\n', (2849, 2858), False, 'from math import radians\n')]
"""Basic Package """ import numpy as np from .base import MFPackageDIS from .reader import MFFileReader __all__ = ['BAS6'] class BAS6(MFPackageDIS): """Basic Package""" valid_options = ['XSECTION', 'CHTOCH', 'FREE', 'PRINTTIME', 'SHOWPROGRESS', 'STOPERROR'] _Options = [] @property def Options(self): """List of Options""" return getattr(self, '_Options') @Options.setter def Options(self, value): setattr(self, '_Options', value) @property def free(self): """Indicates that free format is used for input variables throughout the Basic Package and other packages as indicated in their input instructions.""" w = 'FREE' return any([k in self.Options for k in [w, w.lower(), w.upper()]]) @free.setter def free(self, value): cur = self.free w = 'FREE' if value and not cur: self.Options.append(w) elif not value and cur: for k in [w, w.lower(), w.upper()]: if k in self.Options: self.Options.remove(k) @property def xsection(self): """Indicates that the model is a 1-row cross section for which STRT and IBOUND should each be read as single two-dimensional variables with dimensions of NCOL and NLAY.""" w = 'XSECTION' return any([k in self.Options for k in [w, w.lower(), w.upper()]]) @xsection.setter def xsection(self, value): cur = self.free w = 'XSECTION' if value and not cur: self.Options.append(w) elif not value and cur: for k in [w, w.lower(), w.upper()]: if k in self.Options: self.Options.remove(k) @property def Ibound(self): """The boundary variable array, which is < 0 for constant heads, = 0 for inactive cells and > 0 for active cells.""" return getattr(self, '_Ibound', None) @Ibound.setter def Ibound(self, value): setattr(self, '_Ibound', value) @property def hnoflo(self): """The value of head to be assigned to all inactive (no flow) cells (ibound == 0) throughout the simulation.""" return getattr(self, '_hnoflo', None) @hnoflo.setter def hnoflo(self, value): setattr(self, '_hnoflo', value) @property def Strt(self): """Initial or starting head array.""" return getattr(self, '_Strt', None) @Strt.setter def Strt(self, value): setattr(self, '_Strt', value) def read(self, fpath=None): """Read BAS6 file""" self._setup_read() fp = MFFileReader(fpath, self) try: # 0: [#Text] fp.read_text(0) # 1: Options fp.read_options(1, False) if self.disu: # 2a. IBOUND(NDSLAY) -- U1DINT self.Ibound = [] if not self.xsection: for ilay, ndslay in enumerate(self.disu.Nodelay): n = '2a:L' + str(ilay + 1) self.Ibound.append( fp.get_array(n, ndslay, self._float_type)) else: # same??? for ilay, ndslay in enumerate(self.disu.Nodelay): n = '2a:L' + str(ilay + 1) self.Ibound.append( fp.get_array(n, ndslay, self._float_type)) else: # 2b: IBOUND(NCOL,NROW) or (NCOL,NLAY) -- U2DINT if self.xsection: assert self.dis.nrow == 1, self.dis.nrow LC_shape = (self.dis.nlay, self.dis.ncol) self.Ibound = fp.get_array('2b', LC_shape, 'i') else: self.Ibound = np.empty(self.dis.shape3d, 'i') for ilay in range(self.dis.nlay): n = '2b:L' + str(ilay + 1) self.Ibound[ilay, :, :] = \ fp.get_array(n, self.dis.shape2d, 'i') # 3: HNOFLO (10-character field unless Item 1 contains 'FREE'.) line = fp.next_line(3) if self.free: self.hnoflo = self._float_type.type(line.split()[0]) else: self.hnoflo = self._float_type.type(line[0:10]) # 4: STRT(NCOL,NROW) or (NCOL,NLAY) -- U2DREL if self.xsection: self.strt = fp.get_array(4, LC_shape, self._float_type) else: self.strt = np.empty(self.dis.shape3d, self._float_type) for ilay in range(self.dis.nlay): self.strt[ilay, :, :] = \ fp.get_array(4, self.dis.shape2d, self._float_type) fp.check_end() except Exception as e: exec(fp.location_exception(e))
[ "numpy.empty" ]
[((4623, 4667), 'numpy.empty', 'np.empty', (['self.dis.shape3d', 'self._float_type'], {}), '(self.dis.shape3d, self._float_type)\n', (4631, 4667), True, 'import numpy as np\n'), ((3873, 3904), 'numpy.empty', 'np.empty', (['self.dis.shape3d', '"""i"""'], {}), "(self.dis.shape3d, 'i')\n", (3881, 3904), True, 'import numpy as np\n')]
"Bag Of Discriptors" import cv2 import numpy as np class Detector(object): def __init__(self, verbose=True): ''' Detector (class) constructor. Args: verbose(bool): Indicator for log and progress bar ''' self.orb = cv2.ORB_create() self.bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) self.verbose = verbose self.templates = [] self.template_des = [] def reader(self, img_path): ''' Reader for the images in cv2. Args: img_path(string): path to load the data Returns: image (cv2) : loaded image in cv2 ''' image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) assert (image is None) != True return image def list_reader(self, im_path_list): ''' Reader for the list of images in cv2. Args: img_path_list (list): path to load the images Returns: image (list) : loaded images list in cv2 ''' img_list = [] for img_path in im_path_list: img = self.reader(img_path) img_list.append(img) return img_list def descExtractor(self, img): ''' Extract the descriptors based on technique specified. Args: img (cv2 image): image od interest Returns: kp (list): keypoints of the image des(list): descriptors of the image ''' kp, des = self.orb.detectAndCompute(img, None) return kp, des def list_descExtractor(self, img_list): ''' Extract the descriptors from the list of the images based on technique specified. Args: img_list(list): image list of interest Returns: kp (list): list of the keypoints of the images des(list): list of the descriptors of the images ''' keypoints_list = [] descriptors_list = [] for img in img_list: # Using ORB to extract features kp, des = self.descExtractor(img) keypoints_list.append(kp) descriptors_list.append(des) return keypoints_list, descriptors_list def bruteForceMatcher(self, des1, des2): ''' Comparing the descriptors based on brute force. Args: des1 (list): Descriptor of an image des2 (list): Descriptor of an image Returns: matches(list) : sorted matche object after comparison ''' matches = self.bf.match(des1, des2) return sorted(matches, key=lambda x: x.distance), len(matches) def matchFinder(self, img_des, template_des_list): ''' Comparing the descriptors based on brute force. Args: des1 (list): Descriptor of an image des2 (list): Descriptor of an image Returns: matches(list) : sorted matche object after comparison ''' matches_list = [] tot_matches_list = [] for template_des in template_des_list: matches, tot_matches = self.bruteForceMatcher(img_des, template_des) matches_list.append(matches) tot_matches_list.append(tot_matches) return matches_list, tot_matches_list def train(self, train_images, train_labels): ''' Extracting features from training images and saving it. Args: train_images (list): list of all the images (cv2) train_labels (list): list of all teh labels (string) ''' self.labels = train_labels self.templates = self.list_reader(train_images) _, self.template_des = self.list_descExtractor(self.templates) self.total_templates = len(self.templates) def softmax(self, x): ''' Compute softmax values for each sets of scores in x. ''' e_x = np.exp(x - np.max(x)) return e_x / e_x.sum() def distance_minmax(self, matches_list): ''' Normalises the vector based on inverse of the magintute by min-max rule ''' best_distance = [match[0].distance for match in matches_list] return [(max(best_distance) - x) / (max(best_distance) - min(best_distance)) for x in best_distance] def scorer(self, matches_list, num_matches): ''' Ad-Hoc Scorer function Args: matches_list (list): list of all the matches correspondence num_matches (list): list of the total number of matches with correspondence Return: probability values of the clssifier ''' p_match = np.array(self.probs(num_matches)) p_distance = np.array(self.probs(self.distance_minmax(matches_list))) p_score = self.probs(p_match * p_distance) return p_match def probs(self, x): ''' Compute softmax values for each sets of scores in x. ''' return np.array([xx / sum(x) for xx in x]) def predict(self, im_path): ''' Class predictor function Args: im_path (string): path of the testing image ''' # process the image img = self.reader(im_path) _, img_des = self.descExtractor(img) # matching and scoring matches_list, tot_matches = self.matchFinder(img_des, self.template_des) pval = self.scorer(matches_list, tot_matches) if self.verbose: for i in range(self.total_templates): msg = "Template: {0:>12}, Number of Discriptors Matched: {1:>4}, Decision Confidence: {2:>6.3%}" print(msg.format(self.labels[i], tot_matches[i], pval[i])) print("Predicted Class : {0:>12}\n\n".format(self.labels[np.argmax(pval)]))
[ "cv2.BFMatcher", "numpy.argmax", "numpy.max", "cv2.ORB_create", "cv2.imread" ]
[((283, 299), 'cv2.ORB_create', 'cv2.ORB_create', ([], {}), '()\n', (297, 299), False, 'import cv2\n'), ((318, 366), 'cv2.BFMatcher', 'cv2.BFMatcher', (['cv2.NORM_HAMMING'], {'crossCheck': '(True)'}), '(cv2.NORM_HAMMING, crossCheck=True)\n', (331, 366), False, 'import cv2\n'), ((713, 755), 'cv2.imread', 'cv2.imread', (['img_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(img_path, cv2.IMREAD_GRAYSCALE)\n', (723, 755), False, 'import cv2\n'), ((4099, 4108), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (4105, 4108), True, 'import numpy as np\n'), ((5990, 6005), 'numpy.argmax', 'np.argmax', (['pval'], {}), '(pval)\n', (5999, 6005), True, 'import numpy as np\n')]
import numpy as np from girard import sampling def all_coordinates_are_positive(vec): return all(map(lambda pos: pos >= 0, vec)) def estimate_solid_angle(spanning_matrix, sample_size): dim = len(spanning_matrix) inverse = np.linalg.inv(spanning_matrix) points_inside_cone = 0 for i in range(sample_size): sample_point = np.matrix(sampling.sample_hypersphere_point(dim)).T transformed_point = inverse * sample_point if all_coordinates_are_positive(transformed_point): points_inside_cone += 1 return points_inside_cone / sample_size
[ "numpy.linalg.inv", "girard.sampling.sample_hypersphere_point" ]
[((236, 266), 'numpy.linalg.inv', 'np.linalg.inv', (['spanning_matrix'], {}), '(spanning_matrix)\n', (249, 266), True, 'import numpy as np\n'), ((360, 398), 'girard.sampling.sample_hypersphere_point', 'sampling.sample_hypersphere_point', (['dim'], {}), '(dim)\n', (393, 398), False, 'from girard import sampling\n')]
import os import cv2 import numpy as np def convert(size, box): ''' convert (xmin, ymin, xmax, ymax) to (cx/w, cy/h, bw/w, bw/h) param: size: tuple (im_width, im_height) box: list [xmin, ymin, xmax, ymax] return: tuple (cx/w, cy/h, bw/w, bw/h) ''' dw = 1. / size[0] dh = 1. / size[1] x = (box[0] + box[2]) / 2.0 y = (box[1] + box[3]) / 2.0 w = box[2] - box[0] h = box[3] - box[1] x = x * dw w = w * dw y = y * dh h = h * dh return [x,y,w,h] def gen_train_list(imgs_file, target_file): tf = open(target_file, 'w') imgs_f = open(imgs_file, 'r') imgs = imgs_f.readlines() for i in imgs: boxes = np.array([float(j) for j in i.strip().split(' ')[1:]]).reshape(-1,4) box_list = [] for b in boxes: bb = convert((960.0, 960.0), b) + [0.0] box_list.append(bb) box_string = ' '.join([str(j)[:6] for j in np.array(box_list).reshape(-1,)]) img_label = i.strip().split(' ')[0] + ' ' + box_string tf.write(img_label + '\n') if __name__ == '__main__': imgs_file = '../../data/img_list/img_label_list.txt' target_file = '../../data/train_list/train_list.txt' gen_train_list(imgs_file, target_file) imgs_file = '../../data/img_list/img_label_list_cv4aug4.txt' target_file = '../../data/train_list/train_list_cv4aug4.txt' gen_train_list(imgs_file, target_file)
[ "numpy.array" ]
[((957, 975), 'numpy.array', 'np.array', (['box_list'], {}), '(box_list)\n', (965, 975), True, 'import numpy as np\n')]
"""This submodule defines the "vanilla" `MODNetModel`, i.e. a single model with deterministic weights and outputs. """ from typing import List, Tuple, Dict, Optional, Callable, Any from pathlib import Path import multiprocessing import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.metrics import mean_absolute_error, roc_auc_score import tensorflow as tf from modnet.preprocessing import MODData from modnet.utils import LOG from modnet import __version__ import tqdm __all__ = ("MODNetModel",) class MODNetModel: """Container class for the underlying tf.keras `Model`, that handles setting up the architecture, activations, training and learning curve. Attributes: n_feat: The number of features used in the model. weights: The relative loss weights for each target. optimal_descriptors: The list of column names used in training the model. model: The `tf.keras.model.Model` of the network itself. target_names: The list of targets names that the model was trained for. """ can_return_uncertainty = False def __init__( self, targets: List, weights: Dict[str, float], num_neurons=([64], [32], [16], [16]), num_classes: Optional[Dict[str, int]] = None, multi_label: Optional[bool] = False, n_feat: Optional[int] = 64, act: str = "relu", out_act: str = "linear", ): """Initialise the model on the passed targets with the desired architecture, feature count and loss functions and activation functions. Parameters: targets: A nested list of targets names that defines the hierarchy of the output layers. weights: The relative loss weights to apply for each target. num_classes: Dictionary defining the target types (classification or regression). Should be constructed as follows: key: string giving the target name; value: integer n, with n=0 for regression and n>=2 for classification with n the number of classes. multi_label: Whether the problem (if classification) is multi-label. In this case the softmax output-activation is replaced by a sigmoid. num_neurons: A specification of the model layers, as a 4-tuple of lists of integers. Hidden layers are split into four blocks of `tf.keras.layers.Dense`, with neuron count specified by the elements of the `num_neurons` argument. n_feat: The number of features to use as model inputs. act: A string defining a tf.keras activation function to pass to use in the `tf.keras.layers.Dense` layers. out_act: A string defining a tf.keras activation function to pass to use for the last output layer (regression only) """ self.__modnet_version__ = __version__ if n_feat is None: n_feat = 64 self.n_feat = n_feat self.weights = weights self.num_classes = num_classes self.multi_label = multi_label self.num_neurons = num_neurons self.act = act self.out_act = out_act self._scaler = None self.optimal_descriptors = None self.target_names = None self.targets = targets self.model = None f_temp = [x for subl in targets for x in subl] self.targets_flatten = [x for subl in f_temp for x in subl] self.num_classes = {name: 0 for name in self.targets_flatten} if num_classes is not None: self.num_classes.update(num_classes) self._multi_target = len(self.targets_flatten) > 1 self.model = self.build_model( targets, n_feat, num_neurons, act=act, out_act=out_act, num_classes=self.num_classes, multi_label=multi_label, ) def build_model( self, targets: List, n_feat: int, num_neurons: Tuple[List[int], List[int], List[int], List[int]], num_classes: Optional[Dict[str, int]] = None, multi_label: Optional[bool] = False, act: str = "relu", out_act: str = "linear", ): """Builds the tf.keras model and sets the `self.model` attribute. Parameters: targets: A nested list of targets names that defines the hierarchy of the output layers. n_feat: The number of features to use as model inputs. num_neurons: A specification of the model layers, as a 4-tuple of lists of integers. Hidden layers are split into four blocks of `tf.keras.layers.Dense`, with neuron count specified by the elements of the `num_neurons` argument. num_classes: Dictionary defining the target types (classification or regression). Should be constructed as follows: key: string giving the target name; value: integer n, with n=0 for regression and n>=2 for classification with n the number of classes. multi_label: Whether the problem (if classification) is multi-label. In this case the softmax output-activation is replaced by a sigmoid. act: A string defining a tf.keras activation function to pass to use in the `tf.keras.layers.Dense` layers. out_act: A string defining a tf.keras activation function to pass to use for the last output layer (regression only) """ num_layers = [len(x) for x in num_neurons] # Build first common block f_input = tf.keras.layers.Input(shape=(n_feat,)) previous_layer = f_input for i in range(num_layers[0]): previous_layer = tf.keras.layers.Dense(num_neurons[0][i], activation=act)( previous_layer ) if self._multi_target: previous_layer = tf.keras.layers.BatchNormalization()(previous_layer) common_out = previous_layer # Build intermediate representations intermediate_models_out = [] for _ in range(len(targets)): previous_layer = common_out for j in range(num_layers[1]): previous_layer = tf.keras.layers.Dense( num_neurons[1][j], activation=act )(previous_layer) if self._multi_target: previous_layer = tf.keras.layers.BatchNormalization()( previous_layer ) intermediate_models_out.append(previous_layer) # Build outputs final_out = [] for group_idx, group in enumerate(targets): for prop_idx in range(len(group)): previous_layer = intermediate_models_out[group_idx] for k in range(num_layers[2]): previous_layer = tf.keras.layers.Dense( num_neurons[2][k], activation=act )(previous_layer) if self._multi_target: previous_layer = tf.keras.layers.BatchNormalization()( previous_layer ) clayer = previous_layer for pi in range(len(group[prop_idx])): previous_layer = clayer for li in range(num_layers[3]): previous_layer = tf.keras.layers.Dense(num_neurons[3][li])( previous_layer ) n = num_classes[group[prop_idx][pi]] if n >= 2: out = tf.keras.layers.Dense( n, activation="sigmoid" if multi_label else "softmax", name=group[prop_idx][pi], )(previous_layer) else: out = tf.keras.layers.Dense( 1, activation=out_act, name=group[prop_idx][pi] )(previous_layer) final_out.append(out) return tf.keras.models.Model(inputs=f_input, outputs=final_out) def fit( self, training_data: MODData, val_fraction: float = 0.0, val_key: Optional[str] = None, val_data: Optional[MODData] = None, lr: float = 0.001, epochs: int = 200, batch_size: int = 128, xscale: Optional[str] = "minmax", metrics: List[str] = ["mae"], callbacks: List[Callable] = None, verbose: int = 0, loss: str = "mse", **fit_params, ) -> None: """Train the model on the passed training `MODData` object. Parameters: training_data: A `MODData` that has been featurized and feature selected. The first `self.n_feat` entries in `training_data.get_optimal_descriptors()` will be used for training. val_fraction: The fraction of the training data to use as a validation set for tracking model performance during training. val_key: The target name to track on the validation set during training, if performing multi-target learning. lr: The learning rate. epochs: The maximum number of epochs to train for. batch_size: The batch size to use for training. xscale: The feature scaler to use, either `None`, `'minmax'` or `'standard'`. metrics: A list of tf.keras metrics to pass to `compile(...)`. loss: The built-in tf.keras loss to pass to `compile(...)`. fit_params: Any additional parameters to pass to `fit(...)`, these will be overwritten by the explicit keyword arguments above. """ if self.n_feat > len(training_data.get_optimal_descriptors()): raise RuntimeError( "The model requires more features than computed in data. " f"Please reduce n_feat below or equal to {len(training_data.get_optimal_descriptors())}" ) self.xscale = xscale self.target_names = list(self.weights.keys()) self.optimal_descriptors = training_data.get_optimal_descriptors() x = training_data.get_featurized_df()[ self.optimal_descriptors[: self.n_feat] ].values # For compatibility with MODNet 0.1.7; if there is only one target in the training data, # use that for the name of the target too. if ( len(self.targets_flatten) == 1 and len(training_data.df_targets.columns) == 1 ): self.targets_flatten = list(training_data.df_targets.columns) y = [] for targ in self.targets_flatten: if self.num_classes[targ] >= 2: # Classification if self.multi_label: y_inner = np.stack(training_data.df_targets[targ].values) loss = "binary_crossentropy" else: y_inner = tf.keras.utils.to_categorical( training_data.df_targets[targ].values, num_classes=self.num_classes[targ], ) loss = "categorical_crossentropy" else: y_inner = training_data.df_targets[targ].values.astype( np.float, copy=False ) y.append(y_inner) # Scale the input features: if self.xscale == "minmax": self._scaler = MinMaxScaler(feature_range=(-0.5, 0.5)) elif self.xscale == "standard": self._scaler = StandardScaler() x = self._scaler.fit_transform(x) x = np.nan_to_num(x, nan=-1) if val_data is not None: val_x = val_data.get_featurized_df()[ self.optimal_descriptors[: self.n_feat] ].values val_x = self._scaler.transform(val_x) val_x = np.nan_to_num(val_x, nan=-1) val_y = [] for targ in self.targets_flatten: if self.num_classes[targ] >= 2: # Classification if self.multi_label: y_inner = np.stack(val_data.df_targets[targ].values) loss = "binary_crossentropy" else: y_inner = tf.keras.utils.to_categorical( val_data.df_targets[targ].values, num_classes=self.num_classes[targ], ) else: y_inner = val_data.df_targets[targ].values.astype( np.float, copy=False ) val_y.append(y_inner) validation_data = (val_x, val_y) else: validation_data = None # set up bounds for postprocessing if max(self.num_classes.values()) <= 2: # regression self.min_y = training_data.df_targets.values.min(axis=0) self.max_y = training_data.df_targets.values.max(axis=0) # Optionally set up print callback if verbose: if val_fraction > 0 or validation_data: if self._multi_target and val_key is not None: val_metric_key = f"val_{val_key}_mae" else: val_metric_key = "val_mae" print_callback = tf.keras.callbacks.LambdaCallback( on_epoch_end=lambda epoch, logs: print( f"epoch {epoch}: loss: {logs['loss']:.3f}, " f"val_loss:{logs['val_loss']:.3f} {val_metric_key}:{logs[val_metric_key]:.3f}" ) ) else: print_callback = tf.keras.callbacks.LambdaCallback( on_epoch_end=lambda epoch, logs: print( f"epoch {epoch}: loss: {logs['loss']:.3f}" ) ) if callbacks is None: callbacks = [print_callback] else: callbacks.append(print_callback) fit_params = { "x": x, "y": y, "epochs": epochs, "batch_size": batch_size, "verbose": 0, "validation_split": val_fraction, "validation_data": validation_data, "callbacks": callbacks, } self.model.compile( loss=loss, optimizer=tf.keras.optimizers.Adam(lr=lr), metrics=metrics, loss_weights=self.weights, ) history = self.model.fit(**fit_params) self.history = history.history def fit_preset( self, data: MODData, presets: List[Dict[str, Any]] = None, val_fraction: float = 0.15, verbose: int = 0, classification: bool = False, refit: bool = True, fast: bool = False, nested: int = 5, callbacks: List[Any] = None, n_jobs=None, ) -> Tuple[ List[List[Any]], np.ndarray, Optional[List[float]], List[List[float]], Dict[str, Any], ]: """Chooses an optimal hyper-parametered MODNet model from different presets. This function implements the "inner loop" of a cross-validation workflow. By modifying the `nested` argument, it can be run in full nested mode (i.e. train n_fold * n_preset models) or just with a simple random hold-out set. The data is first fitted on several well working MODNet presets with a validation set (10% of the furnished data by default). Sets the `self.model` attribute to the model with the lowest mean validation loss across all folds. Args: data: MODData object contain training and validation samples. presets: A list of dictionaries containing custom presets. verbose: The verbosity level to pass to tf.keras val_fraction: The fraction of the data to use for validation. classification: Whether or not we are performing classification. refit: Whether or not to refit the final model for each fold with the best-performing settings. fast: Used for debugging. If `True`, only fit the first 2 presets and reduce the number of epochs. nested: integer specifying whether or not to perform a full nested CV. If 0, a simple validation split is performed based on val_fraction argument. If an integer, use this number of inner CV folds, ignoring the `val_fraction` argument. Note: If set to 1, the value will be overwritten to a default of 5 folds. n_jobs: number of jobs for multiprocessing Returns: - A list of length num_outer_folds containing lists of MODNet models of length num_inner_folds. - A list of validation losses achieved by the best model for each fold during validation (excluding refit). - The learning curve of the final (refitted) model (or `None` if `refit` is `False`) - A nested list of learning curves for each trained model of lengths (num_outer_folds, num_inner folds). - The settings of the best-performing preset. """ from modnet.matbench.benchmark import matbench_kfold_splits import os os.environ[ "TF_CPP_MIN_LOG_LEVEL" ] = "2" # many models will be fitted => reduce output if callbacks is None: es = tf.keras.callbacks.EarlyStopping( monitor="loss", min_delta=0.001, patience=100, verbose=verbose, mode="auto", baseline=None, restore_best_weights=False, ) callbacks = [es] if presets is None: from modnet.model_presets import gen_presets presets = gen_presets( len(data.optimal_features), len(data.df_targets), classification=classification, ) if fast and len(presets) >= 2: presets = presets[:2] for k, _ in enumerate(presets): presets[k]["epochs"] = 100 num_nested_folds = 5 if nested: num_nested_folds = nested if num_nested_folds <= 1: num_nested_folds = 5 # create tasks splits = matbench_kfold_splits( data, n_splits=num_nested_folds, classification=classification ) if not nested: splits = [ train_test_split(range(len(data.df_featurized)), test_size=val_fraction) ] n_splits = 1 else: n_splits = num_nested_folds train_val_datas = [] for train, val in splits: train_val_datas.append(data.split((train, val))) tasks = [] for i, params in enumerate(presets): n_feat = min(len(data.get_optimal_descriptors()), params["n_feat"]) for ind in range(n_splits): val_params = {} train_data, val_data = train_val_datas[ind] val_params["val_data"] = val_data tasks += [ { "train_data": train_data, "targets": self.targets, "weights": self.weights, "num_classes": self.num_classes, "n_feat": n_feat, "num_neurons": params["num_neurons"], "lr": params["lr"], "batch_size": params["batch_size"], "epochs": params["epochs"], "loss": params["loss"], "act": params["act"], "out_act": self.out_act, "callbacks": callbacks, "preset_id": i, "fold_id": ind, "verbose": verbose, **val_params, } ] val_losses = 1e20 * np.ones((len(presets), n_splits)) learning_curves = [[None for _ in range(n_splits)] for _ in range(len(presets))] models = [[None for _ in range(n_splits)] for _ in range(len(presets))] ctx = multiprocessing.get_context("spawn") pool = ctx.Pool(processes=n_jobs) LOG.info( f"Multiprocessing on {n_jobs} cores. Total of {multiprocessing.cpu_count()} cores available." ) for res in tqdm.tqdm( pool.imap_unordered(map_validate_model, tasks, chunksize=1), total=len(tasks), ): val_loss, learning_curve, model, preset_id, fold_id = res LOG.info(f"Preset #{preset_id} fitting finished, loss: {val_loss}") # reload the model object after serialization model._restore_model() val_losses[preset_id, fold_id] = val_loss learning_curves[preset_id][fold_id] = learning_curve models[preset_id][fold_id] = model pool.close() pool.join() val_loss_per_preset = np.mean(val_losses, axis=1) best_preset_idx = int(np.argmin(val_loss_per_preset)) best_model_idx = int(np.argmin(val_losses[best_preset_idx, :])) best_preset = presets[best_preset_idx] best_learning_curve = learning_curves[best_preset_idx][best_model_idx] best_model = models[best_preset_idx][best_model_idx] LOG.info( "Preset #{} resulted in lowest validation loss with params {}".format( best_preset_idx + 1, tasks[n_splits * best_preset_idx + best_model_idx] ) ) if refit: LOG.info("Refitting with all data and parameters: {}".format(best_preset)) # Building final model n_feat = min(len(data.get_optimal_descriptors()), best_preset["n_feat"]) self.model = MODNetModel( self.targets, self.weights, num_neurons=best_preset["num_neurons"], n_feat=n_feat, act=best_preset["act"], out_act=self.out_act, num_classes=self.num_classes, ).model self.n_feat = n_feat self.fit( data, val_fraction=0, lr=best_preset["lr"], epochs=best_preset["epochs"], batch_size=best_preset["batch_size"], loss=best_preset["loss"], callbacks=callbacks, verbose=verbose, ) else: self.n_feat = best_model.n_feat self.model = best_model.model self._scaler = best_model._scaler os.environ["TF_CPP_MIN_LOG_LEVEL"] = "0" # reset return models, val_losses, best_learning_curve, learning_curves, best_preset def predict(self, test_data: MODData, return_prob=False) -> pd.DataFrame: """Predict the target values for the passed MODData. Parameters: test_data: A featurized and feature-selected `MODData` object containing the descriptors used in training. return_prob: For a classification tasks only: whether to return the probability of each class OR only return the most probable class. Returns: A `pandas.DataFrame` containing the predicted values of the targets. """ # prevents Nan predictions if some features are inf x = ( test_data.get_featurized_df() .replace([np.inf, -np.inf, np.nan], 0)[ self.optimal_descriptors[: self.n_feat] ] .values ) # Scale the input features: x = np.nan_to_num(x) if self._scaler is not None: x = self._scaler.transform(x) x = np.nan_to_num(x, nan=-1) p = np.array(self.model.predict(x)) if len(p.shape) == 2: p = np.array([p]) # post-process based on training data if max(self.num_classes.values()) <= 2: # regression yrange = self.max_y - self.min_y upper_bound = self.max_y + 0.25 * yrange lower_bound = self.min_y - 0.25 * yrange for i, vals in enumerate(p): out_of_range_idxs = np.where( (vals < lower_bound[i]) | (vals > upper_bound[i]) ) vals[out_of_range_idxs] = ( np.random.uniform(0, 1, size=len(out_of_range_idxs[0])) * (self.max_y[i] - self.min_y[i]) + self.min_y[i] ) p_dic = {} for i, name in enumerate(self.targets_flatten): if self.num_classes[name] >= 2: if return_prob: # temp = p[i, :, :] / (p[i, :, :].sum(axis=1)).reshape((-1, 1)) temp = p[i, :, :] for j in range(temp.shape[-1]): p_dic["{}_prob_{}".format(name, j)] = temp[:, j] else: p_dic[name] = np.argmax(p[i, :, :], axis=1) else: p_dic[name] = p[i, :, 0] predictions = pd.DataFrame(p_dic) predictions.index = test_data.structure_ids return predictions def evaluate(self, test_data: MODData) -> pd.DataFrame: """Evaluates predictions on the passed MODData by returning the corresponding score: - for regression: MAE - for classification: negative ROC AUC. averaged over the targets when multi-target. Parameters: test_data: A featurized and feature-selected `MODData` object containing the descriptors used in training. Returns: Score defined hereabove. """ # prevents Nan predictions if some features are inf x = ( test_data.get_featurized_df() .replace([np.inf, -np.inf, np.nan], 0)[ self.optimal_descriptors[: self.n_feat] ] .values ) # Scale the input features: x = np.nan_to_num(x) if self._scaler is not None: x = self._scaler.transform(x) x = np.nan_to_num(x, nan=-1) y_pred = np.array(self.model.predict(x)) if len(y_pred.shape) == 2: y_pred = np.array([y_pred]) score = [] for i, targ in enumerate(self.targets_flatten): if self.num_classes[targ] >= 2: # Classification if self.multi_label: y_true = np.stack(test_data.df_targets[targ].values) else: y_true = tf.keras.utils.to_categorical( test_data.df_targets[targ].values, num_classes=self.num_classes[targ], ) try: score.append(-roc_auc_score(y_true, y_pred[i], multi_class="ovr")) except ValueError: scores = [] for j in range(y_true.shape[1]): try: scores.append(-roc_auc_score(y_true[:, j], y_pred[i][:, j])) except ValueError: scores.append(float("nan")) score.append(np.nanmean(scores)) else: y_true = test_data.df_targets[targ].values.astype(np.float, copy=False) score.append(mean_absolute_error(y_true, y_pred[i])) return np.mean(score) def _make_picklable(self): """ transforms inner keras model to jsons so that th MODNet object becomes picklable. """ model_json = self.model.to_json() model_weights = self.model.get_weights() self.model = (model_json, model_weights) def _restore_model(self): """ restore inner keras model after running make_picklable """ model_json, model_weights = self.model self.model = tf.keras.models.model_from_json(model_json) self.model.set_weights(model_weights) def save(self, filename: str) -> None: """Save the `MODNetModel` to filename: If the filename ends in "tgz", "bz2" or "zip", the pickle will be compressed accordingly by :meth:`pandas.DataFrame.to_pickle`. Parameters: filename: The base filename to save to. """ self._make_picklable() pd.to_pickle(self, filename) self._restore_model() LOG.info(f"Model successfully saved as {filename}!") @staticmethod def load(filename: str) -> "MODNetModel": """Load `MODNetModel` object pickled by the :meth:`MODNetModel.save` method. If the filename ends in "tgz", "bz2" or "zip", the pickle will be decompressed accordingly by :func:`pandas.read_pickle`. Returns: The loaded `MODNetModel` object. """ pickled_data = None if isinstance(filename, Path): filename = str(filename) # handle .zip files explicitly for OS X/macOS compatibility if filename.endswith(".zip"): from zipfile import ZipFile with ZipFile(filename, "r") as zf: namelist = zf.namelist() _files = [ _ for _ in namelist if not _.startswith("__MACOSX/") or _.startswith(".DS_STORE") ] if len(_files) == 1: with zf.open(_files.pop()) as f: pickled_data = pd.read_pickle(f) if pickled_data is None: pickled_data = pd.read_pickle(filename) if isinstance(pickled_data, MODNetModel): if not hasattr(pickled_data, "__modnet_version__"): pickled_data.__modnet_version__ = "unknown" pickled_data._restore_model() LOG.info( f"Loaded {pickled_data} object, created with modnet version {pickled_data.__modnet_version__}" ) return pickled_data raise ValueError( f"File {filename} did not contain compatible data to create a MODNetModel object, " f"instead found {pickled_data.__class__.__name__}." ) def validate_model( train_data=None, val_data=None, targets=None, weights=None, num_classes=None, n_feat=100, num_neurons=[[8], [8], [8], [8]], lr=0.1, batch_size=64, epochs=100, loss="mse", act="relu", out_act="linear", xscale="minmax", callbacks=[], preset_id=None, fold_id=None, verbose=0, ): """For a given set of parameters, create a new model and train it on the passed training data, validating it against the passed validation data and returning some relevant metrics. """ model = MODNetModel( targets, weights, num_neurons=num_neurons, n_feat=n_feat, act=act, out_act=out_act, num_classes=num_classes, ) model.fit( train_data, lr=lr, epochs=epochs, batch_size=batch_size, loss=loss, xscale=xscale, callbacks=callbacks, verbose=verbose, val_fraction=0, val_data=val_data, ) learning_curve = model.history["val_loss"] val_loss = model.evaluate(val_data) # save model model._make_picklable() return val_loss, learning_curve, model, preset_id, fold_id def map_validate_model(kwargs): return validate_model(**kwargs)
[ "zipfile.ZipFile", "pandas.to_pickle", "multiprocessing.cpu_count", "tensorflow.keras.layers.BatchNormalization", "sklearn.metrics.roc_auc_score", "tensorflow.keras.callbacks.EarlyStopping", "numpy.array", "tensorflow.keras.layers.Dense", "numpy.nanmean", "pandas.read_pickle", "tensorflow.keras....
[((5852, 5890), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(n_feat,)'}), '(shape=(n_feat,))\n', (5873, 5890), True, 'import tensorflow as tf\n'), ((8390, 8446), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'f_input', 'outputs': 'final_out'}), '(inputs=f_input, outputs=final_out)\n', (8411, 8446), True, 'import tensorflow as tf\n'), ((12098, 12122), 'numpy.nan_to_num', 'np.nan_to_num', (['x'], {'nan': '(-1)'}), '(x, nan=-1)\n', (12111, 12122), True, 'import numpy as np\n'), ((18952, 19042), 'modnet.matbench.benchmark.matbench_kfold_splits', 'matbench_kfold_splits', (['data'], {'n_splits': 'num_nested_folds', 'classification': 'classification'}), '(data, n_splits=num_nested_folds, classification=\n classification)\n', (18973, 19042), False, 'from modnet.matbench.benchmark import matbench_kfold_splits\n'), ((20895, 20931), 'multiprocessing.get_context', 'multiprocessing.get_context', (['"""spawn"""'], {}), "('spawn')\n", (20922, 20931), False, 'import multiprocessing\n'), ((21736, 21763), 'numpy.mean', 'np.mean', (['val_losses'], {'axis': '(1)'}), '(val_losses, axis=1)\n', (21743, 21763), True, 'import numpy as np\n'), ((24407, 24423), 'numpy.nan_to_num', 'np.nan_to_num', (['x'], {}), '(x)\n', (24420, 24423), True, 'import numpy as np\n'), ((25879, 25898), 'pandas.DataFrame', 'pd.DataFrame', (['p_dic'], {}), '(p_dic)\n', (25891, 25898), True, 'import pandas as pd\n'), ((26817, 26833), 'numpy.nan_to_num', 'np.nan_to_num', (['x'], {}), '(x)\n', (26830, 26833), True, 'import numpy as np\n'), ((28238, 28252), 'numpy.mean', 'np.mean', (['score'], {}), '(score)\n', (28245, 28252), True, 'import numpy as np\n'), ((28728, 28771), 'tensorflow.keras.models.model_from_json', 'tf.keras.models.model_from_json', (['model_json'], {}), '(model_json)\n', (28759, 28771), True, 'import tensorflow as tf\n'), ((29180, 29208), 'pandas.to_pickle', 'pd.to_pickle', (['self', 'filename'], {}), '(self, filename)\n', (29192, 29208), True, 'import pandas as pd\n'), ((29247, 29299), 'modnet.utils.LOG.info', 'LOG.info', (['f"""Model successfully saved as {filename}!"""'], {}), "(f'Model successfully saved as {filename}!')\n", (29255, 29299), False, 'from modnet.utils import LOG\n'), ((11918, 11957), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-0.5, 0.5)'}), '(feature_range=(-0.5, 0.5))\n', (11930, 11957), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler\n'), ((12354, 12382), 'numpy.nan_to_num', 'np.nan_to_num', (['val_x'], {'nan': '(-1)'}), '(val_x, nan=-1)\n', (12367, 12382), True, 'import numpy as np\n'), ((18022, 18184), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""loss"""', 'min_delta': '(0.001)', 'patience': '(100)', 'verbose': 'verbose', 'mode': '"""auto"""', 'baseline': 'None', 'restore_best_weights': '(False)'}), "(monitor='loss', min_delta=0.001, patience=\n 100, verbose=verbose, mode='auto', baseline=None, restore_best_weights=\n False)\n", (18054, 18184), True, 'import tensorflow as tf\n'), ((21335, 21402), 'modnet.utils.LOG.info', 'LOG.info', (['f"""Preset #{preset_id} fitting finished, loss: {val_loss}"""'], {}), "(f'Preset #{preset_id} fitting finished, loss: {val_loss}')\n", (21343, 21402), False, 'from modnet.utils import LOG\n'), ((21794, 21824), 'numpy.argmin', 'np.argmin', (['val_loss_per_preset'], {}), '(val_loss_per_preset)\n', (21803, 21824), True, 'import numpy as np\n'), ((21855, 21896), 'numpy.argmin', 'np.argmin', (['val_losses[best_preset_idx, :]'], {}), '(val_losses[best_preset_idx, :])\n', (21864, 21896), True, 'import numpy as np\n'), ((24519, 24543), 'numpy.nan_to_num', 'np.nan_to_num', (['x'], {'nan': '(-1)'}), '(x, nan=-1)\n', (24532, 24543), True, 'import numpy as np\n'), ((24636, 24649), 'numpy.array', 'np.array', (['[p]'], {}), '([p])\n', (24644, 24649), True, 'import numpy as np\n'), ((26929, 26953), 'numpy.nan_to_num', 'np.nan_to_num', (['x'], {'nan': '(-1)'}), '(x, nan=-1)\n', (26942, 26953), True, 'import numpy as np\n'), ((27060, 27078), 'numpy.array', 'np.array', (['[y_pred]'], {}), '([y_pred])\n', (27068, 27078), True, 'import numpy as np\n'), ((30400, 30424), 'pandas.read_pickle', 'pd.read_pickle', (['filename'], {}), '(filename)\n', (30414, 30424), True, 'import pandas as pd\n'), ((30654, 30768), 'modnet.utils.LOG.info', 'LOG.info', (['f"""Loaded {pickled_data} object, created with modnet version {pickled_data.__modnet_version__}"""'], {}), "(\n f'Loaded {pickled_data} object, created with modnet version {pickled_data.__modnet_version__}'\n )\n", (30662, 30768), False, 'from modnet.utils import LOG\n'), ((5992, 6048), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_neurons[0][i]'], {'activation': 'act'}), '(num_neurons[0][i], activation=act)\n', (6013, 6048), True, 'import tensorflow as tf\n'), ((12026, 12042), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (12040, 12042), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler\n'), ((14903, 14934), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (14927, 14934), True, 'import tensorflow as tf\n'), ((24987, 25046), 'numpy.where', 'np.where', (['((vals < lower_bound[i]) | (vals > upper_bound[i]))'], {}), '((vals < lower_bound[i]) | (vals > upper_bound[i]))\n', (24995, 25046), True, 'import numpy as np\n'), ((29934, 29956), 'zipfile.ZipFile', 'ZipFile', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (29941, 29956), False, 'from zipfile import ZipFile\n'), ((6163, 6199), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (6197, 6199), True, 'import tensorflow as tf\n'), ((6489, 6545), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_neurons[1][j]'], {'activation': 'act'}), '(num_neurons[1][j], activation=act)\n', (6510, 6545), True, 'import tensorflow as tf\n'), ((11260, 11307), 'numpy.stack', 'np.stack', (['training_data.df_targets[targ].values'], {}), '(training_data.df_targets[targ].values)\n', (11268, 11307), True, 'import numpy as np\n'), ((11409, 11517), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['training_data.df_targets[targ].values'], {'num_classes': 'self.num_classes[targ]'}), '(training_data.df_targets[targ].values,\n num_classes=self.num_classes[targ])\n', (11438, 11517), True, 'import tensorflow as tf\n'), ((21051, 21078), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (21076, 21078), False, 'import multiprocessing\n'), ((25768, 25797), 'numpy.argmax', 'np.argmax', (['p[i, :, :]'], {'axis': '(1)'}), '(p[i, :, :], axis=1)\n', (25777, 25797), True, 'import numpy as np\n'), ((27282, 27325), 'numpy.stack', 'np.stack', (['test_data.df_targets[targ].values'], {}), '(test_data.df_targets[targ].values)\n', (27290, 27325), True, 'import numpy as np\n'), ((27377, 27481), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['test_data.df_targets[targ].values'], {'num_classes': 'self.num_classes[targ]'}), '(test_data.df_targets[targ].values,\n num_classes=self.num_classes[targ])\n', (27406, 27481), True, 'import tensorflow as tf\n'), ((28182, 28220), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_true', 'y_pred[i]'], {}), '(y_true, y_pred[i])\n', (28201, 28220), False, 'from sklearn.metrics import mean_absolute_error, roc_auc_score\n'), ((6676, 6712), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (6710, 6712), True, 'import tensorflow as tf\n'), ((7133, 7189), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_neurons[2][k]'], {'activation': 'act'}), '(num_neurons[2][k], activation=act)\n', (7154, 7189), True, 'import tensorflow as tf\n'), ((12593, 12635), 'numpy.stack', 'np.stack', (['val_data.df_targets[targ].values'], {}), '(val_data.df_targets[targ].values)\n', (12601, 12635), True, 'import numpy as np\n'), ((12749, 12853), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['val_data.df_targets[targ].values'], {'num_classes': 'self.num_classes[targ]'}), '(val_data.df_targets[targ].values, num_classes\n =self.num_classes[targ])\n', (12778, 12853), True, 'import tensorflow as tf\n'), ((30321, 30338), 'pandas.read_pickle', 'pd.read_pickle', (['f'], {}), '(f)\n', (30335, 30338), True, 'import pandas as pd\n'), ((7336, 7372), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (7370, 7372), True, 'import tensorflow as tf\n'), ((7675, 7716), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_neurons[3][li]'], {}), '(num_neurons[3][li])\n', (7696, 7716), True, 'import tensorflow as tf\n'), ((7905, 8011), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['n'], {'activation': "('sigmoid' if multi_label else 'softmax')", 'name': 'group[prop_idx][pi]'}), "(n, activation='sigmoid' if multi_label else 'softmax',\n name=group[prop_idx][pi])\n", (7926, 8011), True, 'import tensorflow as tf\n'), ((8191, 8261), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': 'out_act', 'name': 'group[prop_idx][pi]'}), '(1, activation=out_act, name=group[prop_idx][pi])\n', (8212, 8261), True, 'import tensorflow as tf\n'), ((27604, 27655), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_true', 'y_pred[i]'], {'multi_class': '"""ovr"""'}), "(y_true, y_pred[i], multi_class='ovr')\n", (27617, 27655), False, 'from sklearn.metrics import mean_absolute_error, roc_auc_score\n'), ((28027, 28045), 'numpy.nanmean', 'np.nanmean', (['scores'], {}), '(scores)\n', (28037, 28045), True, 'import numpy as np\n'), ((27849, 27893), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_true[:, j]', 'y_pred[i][:, j]'], {}), '(y_true[:, j], y_pred[i][:, j])\n', (27862, 27893), False, 'from sklearn.metrics import mean_absolute_error, roc_auc_score\n')]
#coding:utf-8 # # Copyright (c) 2018-present, the Authors of the OpenKE-PyTorch (old). # All rights reserved. # # Link to the project: https://github.com/thunlp/OpenKE/tree/OpenKE-PyTorch(old) # # Note: This code was partially adapted by <NAME> # to adapt to the case of HyperKG, described in: # https://arxiv.org/abs/1908.04895 # import torch import torch.nn as nn import numpy as np from torch.autograd import Variable from .rsgd import RiemannianSGD, euclidean_retraction, poincare_grad from .data import create_adjacencies import torch.optim as optim import os import time import datetime import ctypes import json class Config(object): r''' use ctypes to call C functions from python and set essential parameters. ''' def __init__(self): base_file = os.path.abspath(os.path.join(os.path.dirname(__file__), '../release/Base.so')) self.lib = ctypes.cdll.LoadLibrary(base_file) self.lib.sampling.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64, ctypes.c_int64, ctypes.c_int64] self.lib.getFrequencies.argtypes = [ctypes.c_void_p, ctypes.c_void_p] self.lib.getHeadBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] self.lib.getTailBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] self.lib.testHead.argtypes = [ctypes.c_void_p, ctypes.c_int64] self.lib.testTail.argtypes = [ctypes.c_void_p, ctypes.c_int64] self.lib.getTestBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] self.lib.getValidBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] self.lib.getBestThreshold.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] self.lib.test_triple_classification.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] self.test_flag = False self.in_path = "./" self.out_path = "./" self.bern = 0 self.hidden_size = 100 self.ent_size = self.hidden_size self.rel_size = self.hidden_size self.train_times = 0 self.margin = 1.0 self.nbatches = 100 self.negative_ent = 1 self.negative_rel = 0 self.workThreads = 1 self.alpha = 0.001 self.lr_multiplier = 1.0 self.burn_in_epochs = 30 self.lmbda = 0.000 self.gpu_activated = False self.log_on = 1 self.lr_decay=0.000 self.weight_decay=0.000 self.exportName = None self.importName = None self.export_steps = 0 self.opt_method = "SGD" self.optimizer = None self.test_link_prediction = False self.test_triple_classification = False self.valid_every = 5 self.int_type = np.int32 self.data_loader_on = False self.train_data = None self.data_loader = None self.dataloader_iterator = None self.data_adjacencies = None def init_link_prediction(self): r''' import essential files and set essential interfaces for link prediction ''' self.lib.importTestFiles() self.lib.importTypeFiles() self.test_h = np.zeros(self.lib.getEntityTotal(), dtype = self.int_type) self.test_t = np.zeros(self.lib.getEntityTotal(), dtype = self.int_type) self.test_r = np.zeros(self.lib.getEntityTotal(), dtype = self.int_type) self.test_h_addr = self.test_h.__array_interface__['data'][0] self.test_t_addr = self.test_t.__array_interface__['data'][0] self.test_r_addr = self.test_r.__array_interface__['data'][0] def init_triple_classification(self): r''' import essential files and set essential interfaces for triple classification ''' self.lib.importTestFiles() self.lib.importTypeFiles() self.test_pos_h = np.zeros(self.lib.getTestTotal(), dtype = self.int_type) self.test_pos_t = np.zeros(self.lib.getTestTotal(), dtype = self.int_type) self.test_pos_r = np.zeros(self.lib.getTestTotal(), dtype = self.int_type) self.test_neg_h = np.zeros(self.lib.getTestTotal(), dtype = self.int_type) self.test_neg_t = np.zeros(self.lib.getTestTotal(), dtype = self.int_type) self.test_neg_r = np.zeros(self.lib.getTestTotal(), dtype = self.int_type) self.test_pos_h_addr = self.test_pos_h.__array_interface__['data'][0] self.test_pos_t_addr = self.test_pos_t.__array_interface__['data'][0] self.test_pos_r_addr = self.test_pos_r.__array_interface__['data'][0] self.test_neg_h_addr = self.test_neg_h.__array_interface__['data'][0] self.test_neg_t_addr = self.test_neg_t.__array_interface__['data'][0] self.test_neg_r_addr = self.test_neg_r.__array_interface__['data'][0] self.valid_pos_h = np.zeros(self.lib.getValidTotal(), dtype = self.int_type) self.valid_pos_t = np.zeros(self.lib.getValidTotal(), dtype = self.int_type) self.valid_pos_r = np.zeros(self.lib.getValidTotal(), dtype = self.int_type) self.valid_neg_h = np.zeros(self.lib.getValidTotal(), dtype = self.int_type) self.valid_neg_t = np.zeros(self.lib.getValidTotal(), dtype = self.int_type) self.valid_neg_r = np.zeros(self.lib.getValidTotal(), dtype = self.int_type) self.valid_pos_h_addr = self.valid_pos_h.__array_interface__['data'][0] self.valid_pos_t_addr = self.valid_pos_t.__array_interface__['data'][0] self.valid_pos_r_addr = self.valid_pos_r.__array_interface__['data'][0] self.valid_neg_h_addr = self.valid_neg_h.__array_interface__['data'][0] self.valid_neg_t_addr = self.valid_neg_t.__array_interface__['data'][0] self.valid_neg_r_addr = self.valid_neg_r.__array_interface__['data'][0] self.relThresh = np.zeros(self.lib.getRelationTotal(), dtype = np.float32) self.relThresh_addr = self.relThresh.__array_interface__['data'][0] # prepare for train and test def init(self): self.trainModel = None if self.in_path != None: self.lib.setInPath(ctypes.create_string_buffer(self.in_path.encode(), len(self.in_path) * 2)) self.lib.setBern(self.bern) self.lib.setWorkThreads(self.workThreads) self.lib.randReset() self.lib.importTrainFiles() self.relTotal = self.lib.getRelationTotal() self.entTotal = self.lib.getEntityTotal() self.trainTotal = self.lib.getTrainTotal() self.testTotal = self.lib.getTestTotal() self.validTotal = self.lib.getValidTotal() self.batch_size = int(self.lib.getTrainTotal() / self.nbatches) self.batch_seq_size = self.batch_size * (1 + self.negative_ent + self.negative_rel) self.batch_h = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype = self.int_type) self.batch_t = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype = self.int_type) self.batch_r = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype = self.int_type) self.batch_y = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype = np.float32) self.batch_h_addr = self.batch_h.__array_interface__['data'][0] self.batch_t_addr = self.batch_t.__array_interface__['data'][0] self.batch_r_addr = self.batch_r.__array_interface__['data'][0] self.batch_y_addr = self.batch_y.__array_interface__['data'][0] self.check_for_data_loader() self.data_adjacencies = create_adjacencies(self.in_path + 'train2id.txt', self.entTotal, int_type=self.int_type, reverse=True) if self.test_link_prediction: self.init_link_prediction() if self.test_triple_classification: self.init_triple_classification() def get_ent_total(self): return self.entTotal def get_rel_total(self): return self.relTotal def set_lmbda(self, lmbda): self.lmbda = lmbda def set_optimizer(self, optimizer): self.optimizer = optimizer def set_opt_method(self, method): self.opt_method = method def set_test_link_prediction(self, flag): self.test_link_prediction = flag def set_test_triple_classification(self, flag): self.test_triple_classification = flag def set_log_on(self, flag): self.log_on = flag def set_data_loader(self, flag): self.data_loader_on = flag def set_gpu(self, flag): self.gpu_activated = flag and torch.cuda.is_available() def set_alpha(self, alpha): self.alpha = alpha def set_in_path(self, path): self.in_path = path def set_out_files(self, path): self.out_path = path def set_bern(self, bern): self.bern = bern def set_dimension(self, dim): self.hidden_size = dim self.ent_size = dim self.rel_size = dim def set_ent_dimension(self, dim): self.ent_size = dim def set_rel_dimension(self, dim): self.rel_size = dim def set_train_times(self, times): self.train_times = times def set_valid_every(self, times): self.valid_every = times def set_burn_in_epochs(self, times): self.burn_in_epochs = times def set_nbatches(self, nbatches): self.nbatches = nbatches def set_margin(self, margin): self.margin = margin def set_work_threads(self, threads): self.workThreads = threads def set_ent_neg_rate(self, rate): self.negative_ent = rate def set_rel_neg_rate(self, rate): self.negative_rel = rate def set_int_type(self, t='int32'): if t == 'int32': self.int_type = np.int32 elif t == 'int64': self.int_type = np.int64 else: raise ValueError('Not a proper integer type: {t}.') def set_import_files(self, path): self.importName = path def set_export_files(self, path): self.exportName = path def set_export_steps(self, steps): self.export_steps = steps def set_lr_decay(self,lr_decay): self.lr_decay=lr_decay def set_weight_decay(self,weight_decay): self.weight_decay=weight_decay def belongs_in_poincare_family(self): return type(self.trainModel).__name__ in ['Poincare'] def check_for_data_loader(self): if self.data_loader_on: from .data import load_dataset from torch.utils.data import DataLoader self.train_data = load_dataset(self.in_path + 'train2id.txt', nnegs=self.negative_ent, int_type=self.int_type) # self.data_loader = DataLoader(self.train_data, batch_size=self.batch_size ,shuffle=True, num_workers=self.workThreads, collate_fn=self.train_data.collate) self.data_loader = DataLoader(self.train_data, batch_size=self.batch_size ,shuffle=True, num_workers=0, collate_fn=self.train_data.collate) # call function for sampling def sampling(self): if self.data_loader_on: # call pytorch function for sampling # print('The pytorch sampling is being used!') batch_h, batch_t, batch_r, batch_y = next(self.dataloader_iterator) batch_size = batch_h.shape[0] self.batch_h[:batch_size], self.batch_t[:batch_size], self.batch_r[:batch_size], self.batch_y[:batch_size] = batch_h, batch_t, batch_r, batch_y else: # call c function for sampling # print('The c custom sampling is being used!') self.lib.sampling(self.batch_h_addr, self.batch_t_addr, self.batch_r_addr, self.batch_y_addr, self.batch_size, self.negative_ent, self.negative_rel) # save model def save_pytorch(self): torch.save(self.trainModel.state_dict(), self.exportName) # restore model def restore_pytorch(self): self.trainModel.load_state_dict(torch.load(self.importName)) # save model def export_variables(self, path = None): if path == None: torch.save(self.trainModel.state_dict(), self.exportName) else: torch.save(self.trainModel.state_dict(), path) def import_variables(self, path = None): if path == None: self.trainModel.load_state_dict(torch.load(self.importName)) else: self.trainModel.load_state_dict(torch.load(path)) def get_parameter_lists(self): return self.trainModel.cpu().state_dict() def get_parameters_by_name(self, var_name): return self.trainModel.cpu().state_dict().get(var_name) # return dict of parameters # parameter_name -> parameters def get_parameters(self, mode = "numpy"): res = {} lists = self.get_parameter_lists() for var_name in lists: if mode == "numpy": res[var_name] = lists[var_name].numpy() if mode == "list": res[var_name] = lists[var_name].numpy().tolist() else: res[var_name] = lists[var_name] return res def save_parameters(self, path = None): if path == None: path = self.out_path f = open(path, "w") f.write(json.dumps(self.get_parameters("list"))) f.close() def set_parameters_by_name(self, var_name, tensor): self.trainModel.state_dict().get(var_name).copy_(torch.from_numpy(np.array(tensor))) def set_parameters(self, lists): for i in lists: self.set_parameters_by_name(i, lists[i]) def set_model(self, model): self.model = model self.trainModel = self.model(config = self) if self.gpu_activated: self.trainModel.cuda() if self.optimizer != None: pass elif self.opt_method == "RiemannianSGD" or self.opt_method == "RSGD": self.optimizer = RiemannianSGD(self.trainModel.parameters(),rgrad=poincare_grad,retraction=euclidean_retraction,lr=self.alpha) elif self.opt_method == "Adagrad" or self.opt_method == "adagrad": self.optimizer = optim.Adagrad(self.trainModel.parameters(), lr=self.alpha,lr_decay=self.lr_decay,weight_decay=self.weight_decay) elif self.opt_method == "Adadelta" or self.opt_method == "adadelta": self.optimizer = optim.Adadelta(self.trainModel.parameters(), lr=self.alpha) elif self.opt_method == "Adam" or self.opt_method == "adam": self.optimizer = optim.Adam(self.trainModel.parameters(), lr=self.alpha) else: self.optimizer = optim.SGD(self.trainModel.parameters(), lr=self.alpha) def run(self): from torch.optim.lr_scheduler import ExponentialLR torch.manual_seed(0) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(0) if self.importName != None: self.restore_pytorch() for epoch in range(self.train_times): res = 0.0 lr = self.alpha # lr = self.alpha * (0.9 ** (epoch // 100)) if self.data_loader_on: self.train_data.burnin = False self.dataloader_iterator = iter(self.data_loader) if self.belongs_in_poincare_family() and (epoch + 1) <= self.burn_in_epochs: # self.data_loader_on = False if self.data_loader_on: self.train_data.burnin = True lr = self.lr_multiplier * self.alpha self.batch_seq_size = self.batch_size * (1 + self.negative_ent + self.negative_rel) self.batch_h = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype = self.int_type) self.batch_t = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype = self.int_type) self.batch_r = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype = self.int_type) self.batch_y = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype = np.float32) self.batch_h_addr = self.batch_h.__array_interface__['data'][0] self.batch_t_addr = self.batch_t.__array_interface__['data'][0] self.batch_r_addr = self.batch_r.__array_interface__['data'][0] self.batch_y_addr = self.batch_y.__array_interface__['data'][0] for batch in range(self.nbatches): self.sampling() self.optimizer.zero_grad() loss = self.trainModel() res = res + loss.item() loss.backward() if self.opt_method == "RiemannianSGD" or self.opt_method == "RSGD": self.optimizer.step(lr=lr) else: self.optimizer.step() if self.exportName != None and (self.export_steps!=0 and epoch % self.export_steps == 0): self.save_pytorch() if self.log_on == 1: print(f'Epoch {epoch}: loss: {res}') if (epoch+1) % self.valid_every == 0: print(f'Validation begins.') self.test(epoch+1, 0) if self.exportName != None: self.save_pytorch() if self.out_path != None: self.save_parameters(self.out_path) def test(self, save_epoch, show=0): self.lib.zeroOut() if self.importName != None: self.restore_pytorch() if self.test_link_prediction: total = self.lib.getTestTotal() for epoch in range(total): self.lib.getHeadBatch(self.test_h_addr, self.test_t_addr, self.test_r_addr) res = self.trainModel.predict(self.test_h, self.test_t, self.test_r) self.lib.testHead(res.data.numpy().__array_interface__['data'][0], show) if epoch % 1000 == 0: np.savetxt("./debug/head_res_" + str(epoch) + ".txt", res.detach().numpy(), newline=" ") self.lib.getTailBatch(self.test_h_addr, self.test_t_addr, self.test_r_addr) res = self.trainModel.predict(self.test_h, self.test_t, self.test_r) self.lib.testTail(res.data.numpy().__array_interface__['data'][0], show) if epoch % 1000 == 0: np.savetxt("./debug/tail_res_" + str(epoch) + ".txt", res.detach().numpy(), newline=" ") if self.log_on and show == 1: print(epoch) self.lib.test_link_prediction() save_path = './debug/' + self.in_path.split('/')[-2] + '/hyperkg_' + str(save_epoch) self.export_variables(save_path + '.pt') # self.save_parameters(save_path + '.vec.json') if self.test_triple_classification: self.lib.getValidBatch(self.valid_pos_h_addr, self.valid_pos_t_addr, self.valid_pos_r_addr, self.valid_neg_h_addr, self.valid_neg_t_addr, self.valid_neg_r_addr) res_pos = self.trainModel.predict(self.valid_pos_h, self.valid_pos_t, self.valid_pos_r) res_neg = self.trainModel.predict(self.valid_neg_h, self.valid_neg_t, self.valid_neg_r) self.lib.getBestThreshold(self.relThresh_addr, res_pos.data.numpy().__array_interface__['data'][0], res_neg.data.numpy().__array_interface__['data'][0]) self.lib.getTestBatch(self.test_pos_h_addr, self.test_pos_t_addr, self.test_pos_r_addr, self.test_neg_h_addr, self.test_neg_t_addr, self.test_neg_r_addr) res_pos = self.trainModel.predict(self.test_pos_h, self.test_pos_t, self.test_pos_r) res_neg = self.trainModel.predict(self.test_neg_h, self.test_neg_t, self.test_neg_r) self.lib.test_triple_classification(self.relThresh_addr, res_pos.data.numpy().__array_interface__['data'][0], res_neg.data.numpy().__array_interface__['data'][0]) def predict_head_entity(self, t, r, k): r'''This mothod predicts the top k head entities given tail entity and relation. Args: t (int): tail entity id r (int): relation id k (int): top k head entities Returns: list: k possible head entity ids ''' self.init_link_prediction() if self.importName != None: self.restore_pytorch() test_h = np.array(range(self.entTotal)) test_r = np.array([r] * self.entTotal) test_t = np.array([t] * self.entTotal) res = self.trainModel.predict(test_h, test_t, test_r).data.numpy().reshape(-1).argsort()[:k] print(res) return res def predict_tail_entity(self, h, r, k): r'''This method predicts the tail entities given head entity and relation. Args: h (int): head entity id r (int): relation id k (int): top k tail entities Returns: list: k possible tail entity ids ''' self.init_link_prediction() if self.importName != None: self.restore_pytorch() test_h = np.array([h] * self.entTotal) test_r = np.array([r] * self.entTotal) test_t = np.array(range(self.entTotal)) res = self.trainModel.predict(test_h, test_t, test_r).data.numpy().reshape(-1).argsort()[:k] print(res) return res def predict_relation(self, h, t, k): r'''This methods predict the relation id given head entity and tail entity. Args: h (int): head entity id t (int): tail entity id k (int): top k relations Returns: list: k possible relation ids ''' self.init_link_prediction() if self.importName != None: self.restore_pytorch() test_h = np.array([h] * self.relTotal) test_r = np.array(range(self.relTotal)) test_t = np.array([t] * self.relTotal) res = self.trainModel.predict(test_h, test_t, test_r).data.numpy().reshape(-1).argsort()[:k] print(res) return res def predict_triple(self, h, t, r, thresh = None): r'''This method tells you whether the given triple (h, t, r) is correct of wrong Args: h (int): head entity id t (int): tail entity id r (int): relation id thresh (fload): threshold for the triple ''' self.init_triple_classification() if self.importName != None: self.restore_pytorch() res = self.trainModel.predict(np.array([h]), np.array([t]), np.array([r])).data.numpy() if thresh != None: if res < thresh: print("triple (%d,%d,%d) is correct" % (h, t, r)) else: print("triple (%d,%d,%d) is wrong" % (h, t, r)) return self.lib.getValidBatch(self.valid_pos_h_addr, self.valid_pos_t_addr, self.valid_pos_r_addr, self.valid_neg_h_addr, self.valid_neg_t_addr, self.valid_neg_r_addr) res_pos = self.trainModel.predict(self.valid_pos_h, self.valid_pos_t, self.valid_pos_r) res_neg = self.trainModel.predict(self.valid_neg_h, self.valid_neg_t, self.valid_neg_r) self.lib.getBestThreshold(self.relThresh_addr, res_pos.data.numpy().__array_interface__['data'][0], res_neg.data.numpy().__array_interface__['data'][0]) if res < self.relThresh[r]: print("triple (%d,%d,%d) is correct" % (h, t, r)) else: print("triple (%d,%d,%d) is wrong" % (h, t, r))
[ "torch.manual_seed", "ctypes.cdll.LoadLibrary", "torch.load", "numpy.array", "numpy.zeros", "torch.cuda.is_available", "os.path.dirname", "numpy.random.seed", "torch.utils.data.DataLoader" ]
[((876, 910), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['base_file'], {}), '(base_file)\n', (899, 910), False, 'import ctypes\n'), ((13304, 13324), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (13321, 13324), False, 'import torch\n'), ((13412, 13429), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (13426, 13429), True, 'import numpy as np\n'), ((18099, 18128), 'numpy.array', 'np.array', (['([r] * self.entTotal)'], {}), '([r] * self.entTotal)\n', (18107, 18128), True, 'import numpy as np\n'), ((18140, 18169), 'numpy.array', 'np.array', (['([t] * self.entTotal)'], {}), '([t] * self.entTotal)\n', (18148, 18169), True, 'import numpy as np\n'), ((18658, 18687), 'numpy.array', 'np.array', (['([h] * self.entTotal)'], {}), '([h] * self.entTotal)\n', (18666, 18687), True, 'import numpy as np\n'), ((18699, 18728), 'numpy.array', 'np.array', (['([r] * self.entTotal)'], {}), '([r] * self.entTotal)\n', (18707, 18728), True, 'import numpy as np\n'), ((19252, 19281), 'numpy.array', 'np.array', (['([h] * self.relTotal)'], {}), '([h] * self.relTotal)\n', (19260, 19281), True, 'import numpy as np\n'), ((19335, 19364), 'numpy.array', 'np.array', (['([t] * self.relTotal)'], {}), '([t] * self.relTotal)\n', (19343, 19364), True, 'import numpy as np\n'), ((6287, 6383), 'numpy.zeros', 'np.zeros', (['(self.batch_size * (1 + self.negative_ent + self.negative_rel))'], {'dtype': 'self.int_type'}), '(self.batch_size * (1 + self.negative_ent + self.negative_rel),\n dtype=self.int_type)\n', (6295, 6383), True, 'import numpy as np\n'), ((6400, 6496), 'numpy.zeros', 'np.zeros', (['(self.batch_size * (1 + self.negative_ent + self.negative_rel))'], {'dtype': 'self.int_type'}), '(self.batch_size * (1 + self.negative_ent + self.negative_rel),\n dtype=self.int_type)\n', (6408, 6496), True, 'import numpy as np\n'), ((6513, 6609), 'numpy.zeros', 'np.zeros', (['(self.batch_size * (1 + self.negative_ent + self.negative_rel))'], {'dtype': 'self.int_type'}), '(self.batch_size * (1 + self.negative_ent + self.negative_rel),\n dtype=self.int_type)\n', (6521, 6609), True, 'import numpy as np\n'), ((6626, 6719), 'numpy.zeros', 'np.zeros', (['(self.batch_size * (1 + self.negative_ent + self.negative_rel))'], {'dtype': 'np.float32'}), '(self.batch_size * (1 + self.negative_ent + self.negative_rel),\n dtype=np.float32)\n', (6634, 6719), True, 'import numpy as np\n'), ((7913, 7938), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7936, 7938), False, 'import torch\n'), ((9928, 10052), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train_data'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': '(0)', 'collate_fn': 'self.train_data.collate'}), '(self.train_data, batch_size=self.batch_size, shuffle=True,\n num_workers=0, collate_fn=self.train_data.collate)\n', (9938, 10052), False, 'from torch.utils.data import DataLoader\n'), ((10892, 10919), 'torch.load', 'torch.load', (['self.importName'], {}), '(self.importName)\n', (10902, 10919), False, 'import torch\n'), ((14049, 14145), 'numpy.zeros', 'np.zeros', (['(self.batch_size * (1 + self.negative_ent + self.negative_rel))'], {'dtype': 'self.int_type'}), '(self.batch_size * (1 + self.negative_ent + self.negative_rel),\n dtype=self.int_type)\n', (14057, 14145), True, 'import numpy as np\n'), ((14162, 14258), 'numpy.zeros', 'np.zeros', (['(self.batch_size * (1 + self.negative_ent + self.negative_rel))'], {'dtype': 'self.int_type'}), '(self.batch_size * (1 + self.negative_ent + self.negative_rel),\n dtype=self.int_type)\n', (14170, 14258), True, 'import numpy as np\n'), ((14275, 14371), 'numpy.zeros', 'np.zeros', (['(self.batch_size * (1 + self.negative_ent + self.negative_rel))'], {'dtype': 'self.int_type'}), '(self.batch_size * (1 + self.negative_ent + self.negative_rel),\n dtype=self.int_type)\n', (14283, 14371), True, 'import numpy as np\n'), ((14388, 14481), 'numpy.zeros', 'np.zeros', (['(self.batch_size * (1 + self.negative_ent + self.negative_rel))'], {'dtype': 'np.float32'}), '(self.batch_size * (1 + self.negative_ent + self.negative_rel),\n dtype=np.float32)\n', (14396, 14481), True, 'import numpy as np\n'), ((811, 836), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (826, 836), False, 'import os\n'), ((11214, 11241), 'torch.load', 'torch.load', (['self.importName'], {}), '(self.importName)\n', (11224, 11241), False, 'import torch\n'), ((11286, 11302), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (11296, 11302), False, 'import torch\n'), ((12154, 12170), 'numpy.array', 'np.array', (['tensor'], {}), '(tensor)\n', (12162, 12170), True, 'import numpy as np\n'), ((19884, 19897), 'numpy.array', 'np.array', (['[h]'], {}), '([h])\n', (19892, 19897), True, 'import numpy as np\n'), ((19899, 19912), 'numpy.array', 'np.array', (['[t]'], {}), '([t])\n', (19907, 19912), True, 'import numpy as np\n'), ((19914, 19927), 'numpy.array', 'np.array', (['[r]'], {}), '([r])\n', (19922, 19927), True, 'import numpy as np\n')]
""" Module containing all segmentation related functions. """ from typing import List, Tuple, NamedTuple, Generator import cv2 as cv import numpy as np from skimage import measure, morphology, segmentation as segment from scipy import ndimage # ========================================================== # SETTINGS # ========================================================== # kernel used in first convolution C_KERNEL = cv.getStructuringElement(cv.MORPH_ELLIPSE, (9, 9)) # average of values in image that the image is shifted to after convolution TARGET_AVERAGE = 180.0 # threshold value used after convolution to select regions of interests THRESHOLD = 240.0 # kernel used in morphological closing MC_KERNEL = cv.getStructuringElement(cv.MORPH_ELLIPSE, (9, 9)) # kernel used in creating labels from local maxes SEEDS_STRUCT = np.ones((3, 3)) # settings of active contour algorithm SNAKES_SIGMA = 5 SNAKES_ITERATIONS = 5 SNAKES_BALLOON = 1 SNAKES_THRESHOLD = 0.6 SNAKES_SMOOTHING = 0 # settings used in calculating local maxes LMAX_KERNEL = cv.getStructuringElement(cv.MORPH_ELLIPSE, (21, 21)) LMAX_FRACTION = 0.95 LMAX_THRESHOLD = 10 LMAX_CONNECT_DISTANCE = 10 # mask size below which cells are filtered out as too small MIN_CELL_SIZE = 900 # mask size over which cells are filtered out as too big MAX_CELL_SIZE = 16000 # ========================================================== # DATA STRUCTURES # ========================================================== class Segment(NamedTuple): """ Tuple that represents masked part of image. """ x: int y: int mask: np.ndarray @property def slice_x(self) -> slice: """ Returns slice along first axis that match this segment. """ return slice(self.x, self.x + self.mask.shape[0]) @property def slice_y(self) -> slice: """ Returns slice along second axis that match this segment. """ return slice(self.y, self.y + self.mask.shape[1]) @property def slices(self) -> Tuple[slice, slice]: """ Returns slices that match this segment. """ return self.slice_x, self.slice_y @staticmethod def from_mask(mask: np.ndarray) -> 'Segment': """ Creates segment by cropping given mask. """ rows, cols = np.where(mask) xs = slice(rows.min(), rows.max()+1) ys = slice(cols.min(), cols.max()+1) return Segment(xs.start, ys.start, mask[xs, ys]) class SegmentationResult(NamedTuple): """ Tuple that contains segmentation results while also providing helpful getters. """ img: np.ndarray segments: List[Segment] @property def cells(self) -> Generator[np.ndarray, None, None]: """ Yields segments of original image that contain cells. """ for seg in self.segments: yield self.img[seg.slices] @property def cells_masked(self) -> Generator[np.ndarray, None, None]: """ Yields segments of original image that contain cells with parts that do not belong to cell set to 0. """ for seg in self.segments: yield self.img[seg.slices] * seg.mask @property def masks_full(self) -> Generator[np.ndarray, None, None]: """ Yields cell masks that have shape of original image. """ for seg in self.segments: m = np.zeros_like(self.img) m[seg.slices] = seg.mask yield m def save(self, path: str, compressed: bool = True): """ Saves this segmentation result to numpy .npz file. """ method = np.savez_compressed if compressed else np.savez data = {f'mask_{i}': seg.mask for i, seg in enumerate(self.segments)} data['img'] = self.img data['offsets'] = np.array([(seg.x, seg.y) for seg in self.segments]) method(path, **data) @staticmethod def load(path: str) -> 'SegmentationResult': """ Loads segmentation result form numpy .npz file. """ data = np.load(path) img = data['img'] offsets = data['offsets'] masks = [data[f'mask_{i}'] for i in range(offsets.shape[0])] return SegmentationResult(img, [Segment(x, y, mask) for (x, y), mask in zip(offsets, masks)]) # ========================================================== # PARTIALS # ========================================================== def _convolve(img: np.ndarray, kernel: np.ndarray = C_KERNEL) -> np.ndarray: """ Applies convolution using given kernel. """ return ndimage.convolve(img.astype('float'), kernel, mode='constant') def _shift_to_average(img: np.ndarray, avg: float = TARGET_AVERAGE) -> np.ndarray: """ Shifts values in image so that its average is equal to the given one. """ return img - np.average(img) + avg def _threshold(img: np.ndarray, threshold: float = THRESHOLD) -> np.ndarray: """ Applies threshold to given image returning array of ones and zeroes. """ return (img > threshold).astype(np.uint8) def _morph_closing(img: np.ndarray, kernel: np.ndarray = MC_KERNEL) -> np.ndarray: """ Applies morphological closing to given binary image. """ return cv.morphologyEx(img, cv.MORPH_CLOSE, kernel) def _label(img: np.ndarray) -> np.ndarray: """ Applies labeling to given binary image returning array in which each group of pixels has the same value. """ return measure.label(img) def _distance_transform(img: np.ndarray) -> np.ndarray: """ Applies distance transform by calculating distance form each pixel to nearest pixel that evaluates to false. """ return ndimage.distance_transform_edt(img > 0) def _local_maximums(img: np.ndarray, kernel: np.ndarray = LMAX_KERNEL, fraction: float = LMAX_FRACTION, global_threshold: float = LMAX_THRESHOLD, connect_distance: int = LMAX_CONNECT_DISTANCE): """ Finds local maximums. """ foreground_mask = img > global_threshold # apply maximum filter maxed = ndimage.maximum_filter(img, footprint=kernel, mode='reflect') # select only interesting peaks peaks = (img >= fraction * maxed) * foreground_mask # join peaks that are close to each other dilation_struct = cv.getStructuringElement(cv.MORPH_ELLIPSE, (connect_distance, connect_distance)) peaks_dilated = cv.morphologyEx(peaks.astype('uint8'), cv.MORPH_DILATE, dilation_struct) # return only peaks that belong are within mask return peaks_dilated * foreground_mask def _watershed_seeds(dists: np.ndarray, struct: np.ndarray = SEEDS_STRUCT) -> np.ndarray: """ Finds seeds for watershed algorithm by local max in given distances. """ local_maxes = _local_maximums(dists) labeled_maxes = ndimage.label(local_maxes, structure=struct)[0] return labeled_maxes.astype(np.uint8) def _watershed(labels: np.ndarray) -> np.ndarray: """ Applies watershed algorithm on given labels. """ dists = _distance_transform(labels) seeds = _watershed_seeds(dists) _, markers = cv.connectedComponents(seeds) water = morphology.watershed(-dists, markers) return water * (dists > 0) def _morph_snakes(img: np.ndarray, labels: np.ndarray, sigma: int = SNAKES_SIGMA, iterations: int = SNAKES_ITERATIONS, balloon: int = SNAKES_BALLOON, threshold: float = SNAKES_THRESHOLD, smoothing: float = SNAKES_SMOOTHING ) -> List[np.ndarray]: """ Applies morphological active contour method to given image starting from given labels. """ gradient = ndimage.gaussian_gradient_magnitude(img.astype(np.float32), sigma=sigma) return [ segment.morphological_geodesic_active_contour( gradient, iterations, labels == region_id, smoothing=smoothing, balloon=balloon, threshold=threshold ) for region_id in range(1, np.amax(labels)) ] def _convex_hull(seg: Segment) -> Segment: """ Returns convex hull of given segment. """ return Segment(seg.x, seg.y, morphology.convex_hull_image(seg.mask)) def _is_touching_edge(img: np.ndarray, seg: Segment) -> bool: """ Returns whether given segment touches edges of given image """ return seg.slice_x.start <= 0 \ or seg.slice_x.stop >= img.shape[0] \ or seg.slice_y.start <= 0 \ or seg.slice_y.stop >= img.shape[1] def _has_expected_size(seg: Segment, min_size: int = MIN_CELL_SIZE, max_size: int = MAX_CELL_SIZE) -> bool: """ Returns whether mask of given segment has size between given values. """ return min_size <= np.sum(seg.mask) <= max_size # ========================================================== # FINAL FUNCTION # ========================================================== def segmentate(img: np.ndarray) -> SegmentationResult: """ Performs whole segmentation process of given image. """ # binary image creation img_processed = _convolve(img) img_processed = _shift_to_average(img_processed) img_processed = _threshold(img_processed) img_processed = _morph_closing(img_processed) # labels creation labels = _label(img_processed) labels = _watershed(labels) # contours detection masks = _morph_snakes(img, labels) # segments creation segments = map(Segment.from_mask, masks) # convex hull segments = map(_convex_hull, segments) # segments filtering segments = filter(lambda s: not _is_touching_edge(img, s), segments) segments = filter(lambda s: _has_expected_size(s), segments) return SegmentationResult(img, list(segments))
[ "skimage.morphology.watershed", "scipy.ndimage.distance_transform_edt", "numpy.amax", "numpy.ones", "numpy.average", "numpy.where", "scipy.ndimage.label", "numpy.zeros_like", "skimage.segmentation.morphological_geodesic_active_contour", "cv2.morphologyEx", "numpy.array", "skimage.morphology.co...
[((425, 475), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(9, 9)'], {}), '(cv.MORPH_ELLIPSE, (9, 9))\n', (449, 475), True, 'import cv2 as cv\n'), ((716, 766), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(9, 9)'], {}), '(cv.MORPH_ELLIPSE, (9, 9))\n', (740, 766), True, 'import cv2 as cv\n'), ((832, 847), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (839, 847), True, 'import numpy as np\n'), ((1046, 1098), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(21, 21)'], {}), '(cv.MORPH_ELLIPSE, (21, 21))\n', (1070, 1098), True, 'import cv2 as cv\n'), ((5081, 5125), 'cv2.morphologyEx', 'cv.morphologyEx', (['img', 'cv.MORPH_CLOSE', 'kernel'], {}), '(img, cv.MORPH_CLOSE, kernel)\n', (5096, 5125), True, 'import cv2 as cv\n'), ((5299, 5317), 'skimage.measure.label', 'measure.label', (['img'], {}), '(img)\n', (5312, 5317), False, 'from skimage import measure, morphology, segmentation as segment\n'), ((5508, 5547), 'scipy.ndimage.distance_transform_edt', 'ndimage.distance_transform_edt', (['(img > 0)'], {}), '(img > 0)\n', (5538, 5547), False, 'from scipy import ndimage\n'), ((5883, 5944), 'scipy.ndimage.maximum_filter', 'ndimage.maximum_filter', (['img'], {'footprint': 'kernel', 'mode': '"""reflect"""'}), "(img, footprint=kernel, mode='reflect')\n", (5905, 5944), False, 'from scipy import ndimage\n'), ((6107, 6192), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(connect_distance, connect_distance)'], {}), '(cv.MORPH_ELLIPSE, (connect_distance, connect_distance)\n )\n', (6131, 6192), True, 'import cv2 as cv\n'), ((6903, 6932), 'cv2.connectedComponents', 'cv.connectedComponents', (['seeds'], {}), '(seeds)\n', (6925, 6932), True, 'import cv2 as cv\n'), ((6945, 6982), 'skimage.morphology.watershed', 'morphology.watershed', (['(-dists)', 'markers'], {}), '(-dists, markers)\n', (6965, 6982), False, 'from skimage import measure, morphology, segmentation as segment\n'), ((2254, 2268), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (2262, 2268), True, 'import numpy as np\n'), ((3694, 3745), 'numpy.array', 'np.array', (['[(seg.x, seg.y) for seg in self.segments]'], {}), '([(seg.x, seg.y) for seg in self.segments])\n', (3702, 3745), True, 'import numpy as np\n'), ((3923, 3936), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (3930, 3936), True, 'import numpy as np\n'), ((6611, 6655), 'scipy.ndimage.label', 'ndimage.label', (['local_maxes'], {'structure': 'struct'}), '(local_maxes, structure=struct)\n', (6624, 6655), False, 'from scipy import ndimage\n'), ((7524, 7675), 'skimage.segmentation.morphological_geodesic_active_contour', 'segment.morphological_geodesic_active_contour', (['gradient', 'iterations', '(labels == region_id)'], {'smoothing': 'smoothing', 'balloon': 'balloon', 'threshold': 'threshold'}), '(gradient, iterations, labels ==\n region_id, smoothing=smoothing, balloon=balloon, threshold=threshold)\n', (7569, 7675), True, 'from skimage import measure, morphology, segmentation as segment\n'), ((7879, 7917), 'skimage.morphology.convex_hull_image', 'morphology.convex_hull_image', (['seg.mask'], {}), '(seg.mask)\n', (7907, 7917), False, 'from skimage import measure, morphology, segmentation as segment\n'), ((8430, 8446), 'numpy.sum', 'np.sum', (['seg.mask'], {}), '(seg.mask)\n', (8436, 8446), True, 'import numpy as np\n'), ((3289, 3312), 'numpy.zeros_like', 'np.zeros_like', (['self.img'], {}), '(self.img)\n', (3302, 3312), True, 'import numpy as np\n'), ((4692, 4707), 'numpy.average', 'np.average', (['img'], {}), '(img)\n', (4702, 4707), True, 'import numpy as np\n'), ((7728, 7743), 'numpy.amax', 'np.amax', (['labels'], {}), '(labels)\n', (7735, 7743), True, 'import numpy as np\n')]
""" Levenberg Marquart fitting class and helper tools https://github.com/jaimedelacruz/LevMar Coded by <NAME> (ISP-SU 2021) References: This implementation follows the notation presented in: <NAME>, Leenaarts, Danilovic & Uitenbroek (2019): https://ui.adsabs.harvard.edu/abs/2019A%26A...623A..74D/abstract but without l-2 regularization for the time being. Original Levenberg-Marquardt algorithm references: Levenberg (1944) Marquardt (1963) Dependences: NumPy Modifications history: 2021-07-8, JdlCR: added SVD_thres to reject small singular values. Bugfix in autoderivatives scaling TODO: 1) Implement handling of cyclic variables. """ import numpy as np # ******************************************************* class Pinfo: """ Helper class to store parameter scaling norms and limits Methods: checkLimits, scale, normalize """ def __init__(self, scale = 1.0, min = None, max = None, is_cyclic = 0): self.scl = scale self.min = min self.max = max self.is_cyclic = is_cyclic # -------------------------------------------------- def checkLimits(self, value): """ This function gets a parameter value and checks whether it is within the specified limits. If it isn't, the value will saturate at the limit value. """ if(self.min is not None): value = np.maximum(value, self.min) if(self.max is not None): value = np.minimum(value, self.max) return value # -------------------------------------------------- def scale(self, value): """ This function scales a paramter value that has been normalized with the scaling norm """ return value*self.scl # -------------------------------------------------- def normalize(self, value): """ This function normalizes a parameter value with a scaling norm """ return value / self.scl # ******************************************************* def ScalePars(pars, Pinfo): """ Helper function that scales an array of parameters inplace pars: 1D numpy array of length Npar Pinfo: tuple/list of Pinfo objects of length Npar """ pLen = pars.size piLen = len(Pinfo) if(piLen != pLen): print("[error] ScalePars: Error, parameter array has different length than Parameter info array: {0} != {1}".format(pLen, piLen)) return 0 for ii in range(pLen): pars[ii] = Pinfo[ii].scale(pars[ii]) return 1 # ******************************************************* def NormalizePars(pars, Pinfo): """ Helper function that normalizes an array of parameters inplace pars: 1D numpy array of length Npar Pinfo: tuple/list of Pinfo objects of length Npar """ pLen = pars.size piLen = len(Pinfo) if(piLen != pLen): print("[error] NormalizePars: Error, parameter array has different length than Parameter info array: {0} != {1}".format(pLen, piLen)) return 0 for ii in range(pLen): pars[ii] = Pinfo[ii].normalize(pars[ii]) return 1 # ******************************************************* def CheckPars(pars, Pinfo): """ Helper function that checks an array of parameters inplace pars: 1D numpy array of length Npar Pinfo: tuple/list of Pinfo objects of length Npar """ pLen = pars.size piLen = len(Pinfo) if(piLen != pLen): print("[error] CheckPars: Error, parameter array has different length than Parameter info array: {0} != {1}".format(pLen, piLen)) return 0 for ii in range(pLen): pars[ii] = Pinfo[ii].checkLimits(pars[ii]) return 1 # ******************************************************* def _eval_fx(fx, x, pinfo, udat, auto_derivatives = False, get_J = False): """ Internal helper function that evaluates the user provided function fx, and computes the Jacobian if needed. If the analytical form of the Jacobian is unknown, this routine can do if by finite diferences """ nPar = x.size xtmp = np.copy(x) status = ScalePars(xtmp, pinfo) #print("kk", xtmp.size) #print(xtmp) if(get_J): # # The user does not provide a drivatives engine, compute them # automatically. Keep your fingers crossed # if(auto_derivatives): dpar = 0.001 syn = fx(xtmp, udat) nObs = syn.size J = np.zeros((nPar, nObs), dtype='float64') for ii in range(nPar): xtmp = np.copy(x) xtmp[ii] += dpar status = ScalePars(xtmp, pinfo) left = fx(xtmp, udat) xtmp = np.copy(x) xtmp[ii] -= dpar status = ScalePars(xtmp, pinfo) right = fx(xtmp, udat) J[ii] = (left - right) / (2*dpar*pinfo[ii].scl) else: # The user provides derivatives syn, J = fx(xtmp, udat, get_J=get_J) return syn, J else: # # No derivatives are requested # return fx(xtmp, udat) # ******************************************************* def _getResidue(syn, o, s, pinfo, J = None): """ Internal helper function that computes the residue and scales the Jacobian with sigma and the sqrt of scaling factors """ nPar = len(pinfo) nDat = o.size scl = np.sqrt(1.0/nDat) / s # Includes the noise estimate! res = scl * (o-syn) if(J is not None): for pp in range(nPar): J[pp] *= scl * pinfo[pp].scl return res # ******************************************************* def _getChi2(res): """ Helper function that computes Chi2 from the residue array """ return (res*res).sum() # ******************************************************* def _checkLambda(lamb, lmin, lmax, lstep): """ Helper function that check the lambda parameter limits """ if(lamb > lmax): return lmax elif(lamb < lmin): return lamb*lstep*lstep else: return lamb # ******************************************************* def _solveLinearSVD(A,b, svd_thres = None): """ Resolution of a linear system of equation using SVD TODO: Singular value filtering for small values below svd_thres """ U,s,Vh = np.linalg.svd(A) if(svd_thres is not None): ithr = np.abs(s).max() * svd_thres for ii in range(len(s)): if(s[ii] >= ithr): s[ii] = 1.0 / s[ii] else: s[ii] = 0.0 else: s = 1.0 / s c = np.dot(U.T,np.transpose(b)) w = np.dot(np.diag(s),c) x = np.dot(Vh.conj().T,w) return x # ******************************************************* def _computeNew(J, res, x, lamb, pinfo, svd_thres=None): """ Helper function that computes the correction to the current estimate of the model for a given Jacobian matrix, residues array and lambda parameter """ # Allocate linear system terms # A = J.T * J, where A is a symmetric matrix # b = J.T * res, where b is a vector nPar, nDat = J.shape A = np.zeros((nPar,nPar), dtype='float64') b = np.zeros((nPar), dtype='float64') for jj in range(nPar): # Evaluate b = J.T * res b[jj] = (J[jj] * res).sum() for ii in range(jj,nPar): # Remember, it is sym! # Evaluate A = J.T * J tmp = (J[jj]*J[ii]).sum() A[jj,ii] = tmp A[ii,jj] = tmp # Apply diagonal damping to A matrix A[jj,jj] *= (1.0 + lamb) # Solve linear system for correction dx = _solveLinearSVD(A, b, svd_thres=svd_thres) # Add correction to the current estimate of the model xnew = x + dx # Check parameter limits status = ScalePars(xnew, pinfo) status = CheckPars(xnew, pinfo) status = NormalizePars(xnew, pinfo) return xnew # ******************************************************* def _getNewEstimate(fx, x, o, s, res, pinfo, udat, lamb, J, lmin, lmax, \ lstep, auto_derivatives = False, svd_thres = None): """ Wrapper helper function that computes a new estimate of the model and evaluates Chi2 for this estimate for a given J, res and lambda parameter """ # get nde estimate of the model for lambda parameter xnew = _computeNew(J, res, np.copy(x), lamb, pinfo, svd_thres=svd_thres) # get model prediction synnew = _eval_fx(fx, xnew, pinfo, udat, auto_derivatives=auto_derivatives, get_J = False) # residue, no J scaling this time as it is already done res_new = _getResidue(synnew, o, s, pinfo) new_chi2 = _getChi2(res_new) return new_chi2, xnew, lamb # ******************************************************* def LevMar(fx, par_in, obs_in, sig_in, pinfo, udat, Niter = 20, init_lambda=10.0, \ lmin = 1.e-4, lmax=1.e4, lstep = 10**0.5, chi2_thres=1.0, \ fx_thres=0.001, auto_derivatives = False, verbose = True, n_reject_max = 6, svd_thres = 1.e-14): """ Levenberg-Marquard based fitting routine for non-linear models Coded by <NAME> (ISP-SU 2021) Input: fx: a user provided function that taxes as input fx(pars, user_data, get_J = True/False) par_in: a numpy array with the initial estimate of the model parameters (length=Npar). It will be flattened. obs_in: a numpy array with the data to be fitted of length (Ndat). It will be flattened internally. sig_in: a numpy array with the noise estimate for each data point (length Ndat). pinfo: a list of Pinfo objects of length (Npar), containing the scaling norm and the parameter limits (if any). udat: User provided data. This variable can be anyhing (a struct?) with data that will be passed to fx as an argument. The user can pack here as much info as needed. Optional: Niter: Maximum number of iterations (typically Niter=20) init_lambda: Initial value of the lambda parameter that scales the diagonal of the Hessian. Typically > 1.0 when starting the fitting process. lmin: Minimum value of lambda allowed (default 1.e-4) lmax: Maximum value of lambda allowed (default 1.e+4) lstep: step in lambda between iterations (lambda is divided by this number, default sqrt(10)) chi2_thres: stop the iterations if Chi2 goes below this value. If the noise estimate is correct, the threshold should be around 1. fx_thres: stop iterating if the relative change in Chi2 is below this threshold for two consecutive iterations auto_derivatives: Compute the derivatives automatically using centered finite differences (True/False default) verbose: Print iteration information n_reject_max: maximum number of consecutive rejected iterations before stopping the iterations. The lambda parameter will be increased after each rejection. svd_thres: SVD threshold to reject small singular values. We use s.max() * svd_thres as a limit value. Typically 1.e-14 for double prec calculations. """ nam = "LevMar: " n_rejected = 0; too_small = False lamb = _checkLambda(init_lambda * 1.0, lmin, lmax, lstep) # # Prepare input arrays and check parameter limits5 # x = np.ascontiguousarray(par_in, dtype='float64').flatten() o = np.ascontiguousarray(obs_in, dtype='float64').flatten() s = np.ascontiguousarray(sig_in, dtype='float64').flatten() if(not CheckPars(x, pinfo)): print("Please fix your input, exiting") return None # # Normalize parameters according to the user Norm # status = NormalizePars(x, pinfo) # # Get first evaluation of Chi2 and init Jacobian # syn, J = _eval_fx(fx, x, pinfo, udat, auto_derivatives=auto_derivatives, get_J = True) bestJ = np.copy(J) Jsave = np.copy(J) res = _getResidue(syn, o, s, pinfo, J = J) bestChi2 = _getChi2(res) best_x = np.copy(x) if(verbose): print(nam + "Init Chi2={0}".format(bestChi2)) # # Init iterations # for ii in range(Niter): olamb = lamb*1 x = np.copy(best_x) # Get model correction for current lambda and J chi2, xnew, lamb = _getNewEstimate(fx, x, o, s, res, pinfo, udat, lamb, J, lmin, lmax, lstep, auto_derivatives = auto_derivatives, svd_thres=svd_thres) dchi2 = np.abs((bestChi2 - chi2) / chi2) # Check if the step improves things if(chi2 < bestChi2): bestChi2 = chi2*1 best_x = np.copy(xnew) bestJ = np.copy(Jsave) n_rejected = 0 olamb = lamb*1 lamb = _checkLambda(lamb/lstep, lmin, lmax, lstep) # Is the correction too small for 2 consecutive iterations? if(dchi2 < fx_thres): if too_small: if(verbose): print(nam+" iter={0}, Chi2={1}, dChi2={2}, lambda={3}".format(ii, bestChi2, dchi2, olamb)) print(nam+"terminating, two consecutive small iterations") break else: too_small = True else: too_small = False if(verbose): print(nam+" iter={0}, Chi2={1}, dChi2={2}, lambda={3}".format(ii, bestChi2, dchi2, olamb)) # Check if we have reached the chi2_thres indicated by the user if(bestChi2 < chi2_thres): if(verbose): print(nam+"Chi2_threshold ({0}) reached -> Chi2={1}".format(chi2_thres, bestChi2)) break # Update J and res x = np.copy(best_x) syn, J = _eval_fx(fx, x, pinfo, udat, auto_derivatives=auto_derivatives, get_J = True) Jsave = np.copy(J) res = _getResidue(syn, o, s, pinfo, J = J) else: # If the iteration increases Chi2, then increase the Lambda parameter by lstep**2 olamb = lamb*1 lamb = _checkLambda(lamb*lstep*lstep, lmin, lmax, lstep) n_rejected += 1 if(verbose): print(nam+"Chi2 > best Chi2 ({2} > {3}), Increasing lambda ({0} -> {1})".format(olamb, lamb, chi2, bestChi2)) if(n_rejected >= n_reject_max): if(verbose): print(nam+"Rejected too many iterations, terminating") break else: continue if(verbose): print(nam+"Final Chi2={0}".format(bestChi2)) # # Get synthetic and derivatives # syn = _eval_fx(fx, np.copy(best_x), pinfo, udat, auto_derivatives=auto_derivatives, get_J = False) status = ScalePars(best_x, pinfo) return bestChi2, best_x, syn, bestJ
[ "numpy.copy", "numpy.abs", "numpy.sqrt", "numpy.minimum", "numpy.diag", "numpy.ascontiguousarray", "numpy.zeros", "numpy.linalg.svd", "numpy.maximum", "numpy.transpose" ]
[((4195, 4205), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (4202, 4205), True, 'import numpy as np\n'), ((6606, 6622), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {}), '(A)\n', (6619, 6622), True, 'import numpy as np\n'), ((7442, 7481), 'numpy.zeros', 'np.zeros', (['(nPar, nPar)'], {'dtype': '"""float64"""'}), "((nPar, nPar), dtype='float64')\n", (7450, 7481), True, 'import numpy as np\n'), ((7489, 7520), 'numpy.zeros', 'np.zeros', (['nPar'], {'dtype': '"""float64"""'}), "(nPar, dtype='float64')\n", (7497, 7520), True, 'import numpy as np\n'), ((12253, 12263), 'numpy.copy', 'np.copy', (['J'], {}), '(J)\n', (12260, 12263), True, 'import numpy as np\n'), ((12278, 12288), 'numpy.copy', 'np.copy', (['J'], {}), '(J)\n', (12285, 12288), True, 'import numpy as np\n'), ((12385, 12395), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (12392, 12395), True, 'import numpy as np\n'), ((5621, 5640), 'numpy.sqrt', 'np.sqrt', (['(1.0 / nDat)'], {}), '(1.0 / nDat)\n', (5628, 5640), True, 'import numpy as np\n'), ((6898, 6913), 'numpy.transpose', 'np.transpose', (['b'], {}), '(b)\n', (6910, 6913), True, 'import numpy as np\n'), ((6930, 6940), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (6937, 6940), True, 'import numpy as np\n'), ((8754, 8764), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (8761, 8764), True, 'import numpy as np\n'), ((12580, 12595), 'numpy.copy', 'np.copy', (['best_x'], {}), '(best_x)\n', (12587, 12595), True, 'import numpy as np\n'), ((12848, 12880), 'numpy.abs', 'np.abs', (['((bestChi2 - chi2) / chi2)'], {}), '((bestChi2 - chi2) / chi2)\n', (12854, 12880), True, 'import numpy as np\n'), ((15113, 15128), 'numpy.copy', 'np.copy', (['best_x'], {}), '(best_x)\n', (15120, 15128), True, 'import numpy as np\n'), ((1418, 1445), 'numpy.maximum', 'np.maximum', (['value', 'self.min'], {}), '(value, self.min)\n', (1428, 1445), True, 'import numpy as np\n'), ((1513, 1540), 'numpy.minimum', 'np.minimum', (['value', 'self.max'], {}), '(value, self.max)\n', (1523, 1540), True, 'import numpy as np\n'), ((4602, 4641), 'numpy.zeros', 'np.zeros', (['(nPar, nObs)'], {'dtype': '"""float64"""'}), "((nPar, nObs), dtype='float64')\n", (4610, 4641), True, 'import numpy as np\n'), ((11686, 11731), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['par_in'], {'dtype': '"""float64"""'}), "(par_in, dtype='float64')\n", (11706, 11731), True, 'import numpy as np\n'), ((11750, 11795), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['obs_in'], {'dtype': '"""float64"""'}), "(obs_in, dtype='float64')\n", (11770, 11795), True, 'import numpy as np\n'), ((11814, 11859), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['sig_in'], {'dtype': '"""float64"""'}), "(sig_in, dtype='float64')\n", (11834, 11859), True, 'import numpy as np\n'), ((13014, 13027), 'numpy.copy', 'np.copy', (['xnew'], {}), '(xnew)\n', (13021, 13027), True, 'import numpy as np\n'), ((13049, 13063), 'numpy.copy', 'np.copy', (['Jsave'], {}), '(Jsave)\n', (13056, 13063), True, 'import numpy as np\n'), ((14150, 14165), 'numpy.copy', 'np.copy', (['best_x'], {}), '(best_x)\n', (14157, 14165), True, 'import numpy as np\n'), ((14286, 14296), 'numpy.copy', 'np.copy', (['J'], {}), '(J)\n', (14293, 14296), True, 'import numpy as np\n'), ((4701, 4711), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (4708, 4711), True, 'import numpy as np\n'), ((4871, 4881), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (4878, 4881), True, 'import numpy as np\n'), ((6670, 6679), 'numpy.abs', 'np.abs', (['s'], {}), '(s)\n', (6676, 6679), True, 'import numpy as np\n')]
# Copyright 2020 Google LLC # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for methods in quantizer_impl.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import pytest from tensorflow.keras.layers import * from tensorflow.keras.models import * from qkeras import * from qkeras.qtools.quantized_operators import quantizer_impl from qkeras import quantizers from numpy.testing import assert_equal # pylint: disable=invalid-name def test_QuantizedBits(): qkeras_quantizer = quantizers.quantized_bits() qtools_quantizer = quantizer_impl.QuantizedBits() qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer) new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer( symmetric=qkeras_quantizer.symmetric, alpha=qkeras_quantizer.alpha, use_stochastic_rounding=qkeras_quantizer.use_stochastic_rounding, scale_axis=qkeras_quantizer.scale_axis, qnoise_factor=qkeras_quantizer.qnoise_factor) result = new_quantizer.__dict__ for (key, val) in result.items(): assert_equal(val, qkeras_quantizer.__dict__[key]) def test_QuantizedTanh(): qkeras_quantizer = quantizers.quantized_tanh() qtools_quantizer = quantizer_impl.QuantizedTanh() qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer) new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer( symmetric=qkeras_quantizer.symmetric, use_stochastic_rounding=qkeras_quantizer.use_stochastic_rounding) result = new_quantizer.__dict__ for (key, val) in result.items(): assert_equal(val, qkeras_quantizer.__dict__[key]) def test_QuantizedUlaw(): qkeras_quantizer = quantizers.quantized_ulaw() qtools_quantizer = quantizer_impl.QuantizedUlaw() qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer) new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer( symmetric=qkeras_quantizer.symmetric, u=qkeras_quantizer.u) result = new_quantizer.__dict__ for (key, val) in result.items(): assert_equal(val, qkeras_quantizer.__dict__[key]) def test_Binary(): qkeras_quantizer = quantizers.binary() qtools_quantizer = quantizer_impl.Binary() qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer) new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer( alpha=qkeras_quantizer.alpha, use_stochastic_rounding=qkeras_quantizer.use_stochastic_rounding) result = new_quantizer.__dict__ for (key, val) in result.items(): assert_equal(val, qkeras_quantizer.__dict__[key]) def test_StochasticBinary(): qkeras_quantizer = quantizers.stochastic_binary() qtools_quantizer = quantizer_impl.StochasticBinary() qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer) new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer( alpha=qkeras_quantizer.alpha, temperature=qkeras_quantizer.temperature, use_real_sigmoid=qkeras_quantizer.use_real_sigmoid) result = new_quantizer.__dict__ for (key, val) in result.items(): assert_equal(val, qkeras_quantizer.__dict__[key]) def test_Bernoulli(): qkeras_quantizer = quantizers.bernoulli() qtools_quantizer = quantizer_impl.Bernoulli() qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer) new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer( alpha=qkeras_quantizer.alpha, temperature=qkeras_quantizer.temperature, use_real_sigmoid=qkeras_quantizer.use_real_sigmoid) result = new_quantizer.__dict__ for (key, val) in result.items(): assert_equal(val, qkeras_quantizer.__dict__[key]) def test_QuantizedRelu(): qkeras_quantizer = quantizers.quantized_relu() qtools_quantizer = quantizer_impl.QuantizedRelu() qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer) new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer( use_sigmoid=qkeras_quantizer.use_sigmoid, negative_slope=qkeras_quantizer.negative_slope, use_stochastic_rounding=qkeras_quantizer.use_stochastic_rounding, relu_upper_bound=qkeras_quantizer.relu_upper_bound, is_quantized_clip=qkeras_quantizer.is_quantized_clip, qnoise_factor=qkeras_quantizer.qnoise_factor) result = new_quantizer.__dict__ for (key, val) in result.items(): assert_equal(val, qkeras_quantizer.__dict__[key]) def test_Ternary(): qkeras_quantizer = quantizers.ternary() qtools_quantizer = quantizer_impl.Ternary() qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer) new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer( alpha=qkeras_quantizer.alpha, threshold=qkeras_quantizer.threshold, use_stochastic_rounding=qkeras_quantizer.use_stochastic_rounding, number_of_unrolls=qkeras_quantizer.number_of_unrolls) result = new_quantizer.__dict__ for (key, val) in result.items(): assert_equal(val, qkeras_quantizer.__dict__[key]) def test_StochasticTernary(): qkeras_quantizer = quantizers.stochastic_ternary() qtools_quantizer = quantizer_impl.StochasticTernary() qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer) new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer( alpha=qkeras_quantizer.alpha, threshold=qkeras_quantizer.threshold, temperature=qkeras_quantizer.temperature, use_real_sigmoid=qkeras_quantizer.use_real_sigmoid, number_of_unrolls=qkeras_quantizer.number_of_unrolls) result = new_quantizer.__dict__ for (key, val) in result.items(): assert_equal(val, qkeras_quantizer.__dict__[key]) def test_PowerOfTwo(): qkeras_quantizer = quantizers.quantized_po2() qtools_quantizer = quantizer_impl.PowerOfTwo(is_signed=True) qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer) new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer( negative_slope=None, use_stochastic_rounding=qkeras_quantizer.use_stochastic_rounding, quadratic_approximation=qkeras_quantizer.quadratic_approximation) result = new_quantizer.__dict__ for (key, val) in result.items(): assert_equal(val, qkeras_quantizer.__dict__[key]) def test_ReluPowerOfTwo(): qkeras_quantizer = quantizers.quantized_relu_po2() qtools_quantizer = quantizer_impl.ReluPowerOfTwo() qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer) new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer( negative_slope=qkeras_quantizer.negative_slope, use_stochastic_rounding=qkeras_quantizer.use_stochastic_rounding, quadratic_approximation=qkeras_quantizer.quadratic_approximation) result = new_quantizer.__dict__ for (key, val) in result.items(): assert_equal(val, qkeras_quantizer.__dict__[key]) if __name__ == "__main__": pytest.main([__file__])
[ "qkeras.qtools.quantized_operators.quantizer_impl.Bernoulli", "qkeras.qtools.quantized_operators.quantizer_impl.StochasticTernary", "numpy.testing.assert_equal", "qkeras.qtools.quantized_operators.quantizer_impl.Ternary", "qkeras.quantizers.quantized_ulaw", "qkeras.quantizers.stochastic_binary", "qkeras...
[((1154, 1181), 'qkeras.quantizers.quantized_bits', 'quantizers.quantized_bits', ([], {}), '()\n', (1179, 1181), False, 'from qkeras import quantizers\n'), ((1203, 1233), 'qkeras.qtools.quantized_operators.quantizer_impl.QuantizedBits', 'quantizer_impl.QuantizedBits', ([], {}), '()\n', (1231, 1233), False, 'from qkeras.qtools.quantized_operators import quantizer_impl\n'), ((1778, 1805), 'qkeras.quantizers.quantized_tanh', 'quantizers.quantized_tanh', ([], {}), '()\n', (1803, 1805), False, 'from qkeras import quantizers\n'), ((1827, 1857), 'qkeras.qtools.quantized_operators.quantizer_impl.QuantizedTanh', 'quantizer_impl.QuantizedTanh', ([], {}), '()\n', (1855, 1857), False, 'from qkeras.qtools.quantized_operators import quantizer_impl\n'), ((2275, 2302), 'qkeras.quantizers.quantized_ulaw', 'quantizers.quantized_ulaw', ([], {}), '()\n', (2300, 2302), False, 'from qkeras import quantizers\n'), ((2324, 2354), 'qkeras.qtools.quantized_operators.quantizer_impl.QuantizedUlaw', 'quantizer_impl.QuantizedUlaw', ([], {}), '()\n', (2352, 2354), False, 'from qkeras.qtools.quantized_operators import quantizer_impl\n'), ((2720, 2739), 'qkeras.quantizers.binary', 'quantizers.binary', ([], {}), '()\n', (2737, 2739), False, 'from qkeras import quantizers\n'), ((2761, 2784), 'qkeras.qtools.quantized_operators.quantizer_impl.Binary', 'quantizer_impl.Binary', ([], {}), '()\n', (2782, 2784), False, 'from qkeras.qtools.quantized_operators import quantizer_impl\n'), ((3196, 3226), 'qkeras.quantizers.stochastic_binary', 'quantizers.stochastic_binary', ([], {}), '()\n', (3224, 3226), False, 'from qkeras import quantizers\n'), ((3248, 3281), 'qkeras.qtools.quantized_operators.quantizer_impl.StochasticBinary', 'quantizer_impl.StochasticBinary', ([], {}), '()\n', (3279, 3281), False, 'from qkeras.qtools.quantized_operators import quantizer_impl\n'), ((3720, 3742), 'qkeras.quantizers.bernoulli', 'quantizers.bernoulli', ([], {}), '()\n', (3740, 3742), False, 'from qkeras import quantizers\n'), ((3764, 3790), 'qkeras.qtools.quantized_operators.quantizer_impl.Bernoulli', 'quantizer_impl.Bernoulli', ([], {}), '()\n', (3788, 3790), False, 'from qkeras.qtools.quantized_operators import quantizer_impl\n'), ((4227, 4254), 'qkeras.quantizers.quantized_relu', 'quantizers.quantized_relu', ([], {}), '()\n', (4252, 4254), False, 'from qkeras import quantizers\n'), ((4276, 4306), 'qkeras.qtools.quantized_operators.quantizer_impl.QuantizedRelu', 'quantizer_impl.QuantizedRelu', ([], {}), '()\n', (4304, 4306), False, 'from qkeras.qtools.quantized_operators import quantizer_impl\n'), ((4946, 4966), 'qkeras.quantizers.ternary', 'quantizers.ternary', ([], {}), '()\n', (4964, 4966), False, 'from qkeras import quantizers\n'), ((4988, 5012), 'qkeras.qtools.quantized_operators.quantizer_impl.Ternary', 'quantizer_impl.Ternary', ([], {}), '()\n', (5010, 5012), False, 'from qkeras.qtools.quantized_operators import quantizer_impl\n'), ((5523, 5554), 'qkeras.quantizers.stochastic_ternary', 'quantizers.stochastic_ternary', ([], {}), '()\n', (5552, 5554), False, 'from qkeras import quantizers\n'), ((5576, 5610), 'qkeras.qtools.quantized_operators.quantizer_impl.StochasticTernary', 'quantizer_impl.StochasticTernary', ([], {}), '()\n', (5608, 5610), False, 'from qkeras.qtools.quantized_operators import quantizer_impl\n'), ((6148, 6174), 'qkeras.quantizers.quantized_po2', 'quantizers.quantized_po2', ([], {}), '()\n', (6172, 6174), False, 'from qkeras import quantizers\n'), ((6196, 6237), 'qkeras.qtools.quantized_operators.quantizer_impl.PowerOfTwo', 'quantizer_impl.PowerOfTwo', ([], {'is_signed': '(True)'}), '(is_signed=True)\n', (6221, 6237), False, 'from qkeras.qtools.quantized_operators import quantizer_impl\n'), ((6710, 6741), 'qkeras.quantizers.quantized_relu_po2', 'quantizers.quantized_relu_po2', ([], {}), '()\n', (6739, 6741), False, 'from qkeras import quantizers\n'), ((6763, 6794), 'qkeras.qtools.quantized_operators.quantizer_impl.ReluPowerOfTwo', 'quantizer_impl.ReluPowerOfTwo', ([], {}), '()\n', (6792, 6794), False, 'from qkeras.qtools.quantized_operators import quantizer_impl\n'), ((7275, 7298), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (7286, 7298), False, 'import pytest\n'), ((1679, 1728), 'numpy.testing.assert_equal', 'assert_equal', (['val', 'qkeras_quantizer.__dict__[key]'], {}), '(val, qkeras_quantizer.__dict__[key])\n', (1691, 1728), False, 'from numpy.testing import assert_equal\n'), ((2175, 2224), 'numpy.testing.assert_equal', 'assert_equal', (['val', 'qkeras_quantizer.__dict__[key]'], {}), '(val, qkeras_quantizer.__dict__[key])\n', (2187, 2224), False, 'from numpy.testing import assert_equal\n'), ((2628, 2677), 'numpy.testing.assert_equal', 'assert_equal', (['val', 'qkeras_quantizer.__dict__[key]'], {}), '(val, qkeras_quantizer.__dict__[key])\n', (2640, 2677), False, 'from numpy.testing import assert_equal\n'), ((3094, 3143), 'numpy.testing.assert_equal', 'assert_equal', (['val', 'qkeras_quantizer.__dict__[key]'], {}), '(val, qkeras_quantizer.__dict__[key])\n', (3106, 3143), False, 'from numpy.testing import assert_equal\n'), ((3625, 3674), 'numpy.testing.assert_equal', 'assert_equal', (['val', 'qkeras_quantizer.__dict__[key]'], {}), '(val, qkeras_quantizer.__dict__[key])\n', (3637, 3674), False, 'from numpy.testing import assert_equal\n'), ((4128, 4177), 'numpy.testing.assert_equal', 'assert_equal', (['val', 'qkeras_quantizer.__dict__[key]'], {}), '(val, qkeras_quantizer.__dict__[key])\n', (4140, 4177), False, 'from numpy.testing import assert_equal\n'), ((4852, 4901), 'numpy.testing.assert_equal', 'assert_equal', (['val', 'qkeras_quantizer.__dict__[key]'], {}), '(val, qkeras_quantizer.__dict__[key])\n', (4864, 4901), False, 'from numpy.testing import assert_equal\n'), ((5420, 5469), 'numpy.testing.assert_equal', 'assert_equal', (['val', 'qkeras_quantizer.__dict__[key]'], {}), '(val, qkeras_quantizer.__dict__[key])\n', (5432, 5469), False, 'from numpy.testing import assert_equal\n'), ((6052, 6101), 'numpy.testing.assert_equal', 'assert_equal', (['val', 'qkeras_quantizer.__dict__[key]'], {}), '(val, qkeras_quantizer.__dict__[key])\n', (6064, 6101), False, 'from numpy.testing import assert_equal\n'), ((6610, 6659), 'numpy.testing.assert_equal', 'assert_equal', (['val', 'qkeras_quantizer.__dict__[key]'], {}), '(val, qkeras_quantizer.__dict__[key])\n', (6622, 6659), False, 'from numpy.testing import assert_equal\n'), ((7194, 7243), 'numpy.testing.assert_equal', 'assert_equal', (['val', 'qkeras_quantizer.__dict__[key]'], {}), '(val, qkeras_quantizer.__dict__[key])\n', (7206, 7243), False, 'from numpy.testing import assert_equal\n')]
import numpy as np ########################## method used - "1env_1jet", or "1env_njet" ################################################## method='1env_1jet' ########################## nature of neural network - "mlp", "mlp_shared" or "cnn" ################################################## policy_name='mlp' ########################## architecture of NN - default = [128, 64, 64] ################################################## ## this should be like this: [nb of neurons in first layer, nb of neurons in second layer, ...] arch=[128, 64, 64] ############################################### about the simulation ############################################################## ###### if you change these params, next time you launch a training, set the following param to 1 ############ new_params = 0 ### will recalculate the initial system_state with the new params #TODO - make that automatic L = 300.0 # length in mm - default: 300 C = 1e-4 # default: 1e⁻4 dx = 10e-2 # default: 1e⁻1 dt = C / dx # C / dx - default value: 1e-3 NUM = int(L/dx) # number of points in numerical resolution simulation_step_time = 5.0e-2 # time in s of a step of the simulation - default: 5e-2 n_step = int(simulation_step_time / dt) # number of times the numerical scheme will be applied in a single step # it will affect how much time passes between each step of the environment (= dt*n_step) simulation_time=20 # time in s of an episode initial_waiting_time = 200 # the number of s before saving the system state that will serve as the initial system state for all trainings to come initial_waiting_n_step = int(initial_waiting_time/simulation_step_time) ######################################################################################################################################## ############################################### about the environment ############################################################## nb_timestep_per_simulation = int(simulation_time/simulation_step_time) # number of steps per epoch # nb_total_epoch = 10000 # total number of episodes # save_every_n_epoch = 100 # the model is saved every n episodes total_nb_timesteps = int(3e5) nb_saves_per_training = 10 nb_epoch = total_nb_timesteps // nb_timestep_per_simulation save_every_n_epoch = (nb_epoch-1) // nb_saves_per_training n_cpu=1 threshold_hq = 5 # max value in obs, to not give too high inputs to the nn #################### about the jets ################## n_jets=10 JET_MAX_POWER=5 JET_WIDTH_mm=5.0 space_between_jets=10 position_first_jet=150 JET_WIDTH = int(JET_WIDTH_mm/dx) jets_position = np.array( [position_first_jet + space_between_jets*i for i in range(n_jets)]) # in mm jets_position = np.array(jets_position/dx, dtype="int") # we can add perturbation jets to challenge a policy that would adapted only to the case of a an unpertubated simulation perturbation_jets_position=[] perturbation_jets_power = JET_MAX_POWER #################### about the obs/reward ################### cut_obs_before_jet = 1.0 # to change the size of the jet without changing the position of its left extremity - default: 1 size_obs=20 size_obs_to_reward=20 reward_param = 5.66 # chosen so that a no jet policy gives a reward of ~0 obs_param = 1 nan_punition=-500.0 # reward given when the simulation collapses true_reset_every_n_episodes = False ############################################################################################################################ ########################################## about rendering ################################################################## render = False MAX_TIMEFRAME_CONTROL_PLOT = 64 # Max number of points to plot the control+h/time MAX_TIMEFRAME_FULL_CONTROL_PLOT = 48 POSITION_JET_PLOT = 0.5 # Where to plot the jets and sensors POSITION_REWARD_SPOTS = 0.4 POSITION_CONTROL_SPOTS = 0.6 N_PLOTS = 3 # nb of plots show_control = False # It's where we start the plotting (we're most interested in the zone where waves are already fully formed) start_h_plot = 0 RENDER_PERIOD=1 SAVE_PERIOD = 1000 obs_at_jet_render_param = 4.0 reward_multiplier_render = 1 ######################################################################################################################################## ########################################## parameters that should not be changed ################################################### obs_h, obs_q = True, True delta = 0.1 noise_mag = 1e-4 hq_base_value = 1.0 max_h = 1 # Important for plot and obs space max_q=3 normalize_value_q=1 normalize_value_h=1 ######################################################################################################################################## # misc tensorboard_integration = True monitor_reward = True is_dummy_vec_env = False
[ "numpy.array" ]
[((2754, 2795), 'numpy.array', 'np.array', (['(jets_position / dx)'], {'dtype': '"""int"""'}), "(jets_position / dx, dtype='int')\n", (2762, 2795), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """ Survey_Estimate3dCoord.py *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = '<NAME>' __date__ = '2020-02-07' __copyright__ = '(C) 2020, <NAME>' from PyQt5.QtCore import QCoreApplication, QVariant from qgis.core import * import qgis.utils from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag from numpy.linalg import pinv, norm from lftools.geocapt.imgs import Imgs from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd import os from qgis.PyQt.QtGui import QIcon class Estimate3dCoord(QgsProcessingAlgorithm): COC = 'COC' AZIMUTH = 'AZIMUTH' ZENITH = 'ZENITH' OUTPUT = 'OUTPUT' WEIGHT = 'WEIGHT' OPENOUTPUT = 'OPENOUTPUT' HTML = 'HTML' OPEN = 'OPEN' LOC = QgsApplication.locale()[:2] def translate(self, string): return QCoreApplication.translate('Processing', string) def tr(self, *string): # Traduzir para o portugês: arg[0] - english (translate), arg[1] - português if self.LOC == 'pt': if len(string) == 2: return string[1] else: return self.translate(string[0]) else: return self.translate(string[0]) def createInstance(self): return Estimate3dCoord() def name(self): return 'estimate3dcoord' def displayName(self): return self.tr('Estimate 3D coordinates', 'Estimar coordenadas 3D') def group(self): return self.tr('Survey', 'Agrimensura') def groupId(self): return 'survey' def tags(self): return self.tr('survey,agrimensura,3D,coordinate,azimuth,zenith,angle,least square,minimum distantce,adjustment,slant').split(',') def icon(self): return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/total_station.png')) txt_en = 'This tool calculates the coordinates (X, Y, Z) of a point from azimuth and zenith angle measurements observed from two or more stations with known coordinates using the Foward Intersection Method adjusted by the Minimum Distances.' txt_pt = 'Esta ferramenta calcula as coordenadas (X,Y,Z) de um ponto a partir de medições de azimute e ângulo zenital observados de duas ou mais estações de coordenadas conhecidas utilizando o Método de Interseção à Vante ajustado pelas Distâncias Mínimas.' figure = 'images/tutorial/survey_3D_coord.jpg' def shortHelpString(self): social_BW = Imgs().social_BW nota_en = '''Notes: Data collected in the discipline of <i>Geodetic Surveys</i> in the Graduate Program at UFPE, in field work coordinated by <b>Prof. Dr. <NAME></b>. For more information on the methodology used, please read the article at the link below:''' nota_pt = '''Notas: Dados coletados na disciplina de <i>Levantamentos Geodésicos</i> no programa de Pós-Graduação da UFPE, em trabalho de campo coordenado pela <b>Profa. Dra. Andrea de Seixas</b>. Para mais informações sobre a metodologia utilizada, por favor leia o artigo no link abaixo:''' footer = '''<div align="center"> <img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''"> </div> <div align="right"> <div>''' + self.tr(nota_en, nota_pt) + ''' </div> <p align="right"> <b><a href="https://www.researchgate.net/publication/352817150_OPTIMIZED_DETERMINATION_OF_3D_COORDINATES_IN_THE_SURVEY_OF_INACCESSIBLE_POINTS_OF_BUILDINGS_-_EXAMPLE_OF_APPLICATION_IMPLEMENTED_IN_FREE_SOFTWARE_Determinacao_otimizada_de_coordenadas_3D_no_levantamen" target="_blank">'''+self.tr('<NAME>.; <NAME>.; <NAME>.; <NAME>. Optimized determination of 3D coordinates in the survey of inaccessible points of buildings - example of application implemented in free software. Bulletin of Geodetic Sciences. 27(2): e2021017, 2021. ') + '''</b> ''' +'</a><br><b>'+ self.tr('Author: <NAME>', 'Autor: <NAME>')+'''</b> </p>'''+ social_BW + '''</div> </div>''' return self.tr(self.txt_en, self.txt_pt) + footer def initAlgorithm(self, config=None): # 'INPUTS' self.addParameter( QgsProcessingParameterString( self.COC, self.tr('Coordinates of Optical Centers', 'Coordenadas dos Centros Ópticos'), defaultValue = '149867.058, 249817.768, 1.825; 149988.309, 249782.867, 1.962; 150055.018, 249757.128, 1.346; 150085.600, 249877.691, 1.559', multiLine = True ) ) self.addParameter( QgsProcessingParameterString( self.AZIMUTH, self.tr('Azimuths', 'Azimutes'), defaultValue = '''46°10'06.37”, 359°12'12.21”, 338°32'59.40”, 298°46'22.93”''', multiLine = True ) ) self.addParameter( QgsProcessingParameterString( self.ZENITH, self.tr('Zenith Angles', 'Ângulos Zenitais'), defaultValue = '''72°24'22.25”, 70°43'01.75", 74°17'54.17", 65°04'27.25"''', multiLine = True ) ) self.addParameter( QgsProcessingParameterBoolean( self.WEIGHT, self.tr('Use Weight Matrix (W)', 'Usar Matrix Peso (P)'), defaultValue = False ) ) # 'OUTPUT' self.addParameter( QgsProcessingParameterFileDestination( self.OUTPUT, self.tr('Adjusted 3D Coordinates', 'Coordenadas 3D Ajustadas'), fileFilter = 'CSV (*.csv)' ) ) self.addParameter( QgsProcessingParameterFileDestination( 'HTML', self.tr('Adjustment Report', 'Relatório de Ajustamento'), self.tr('HTML files (*.html)') ) ) self.addParameter( QgsProcessingParameterBoolean( self.OPEN, self.tr('Open output file after executing the algorithm', 'Abrir arquivo de saída com coordenadas 3D'), defaultValue= True ) ) def CosDir(self, Az, Z): k = sin(Z)*sin(Az) m = sin(Z)*cos(Az) n = cos(Z) return array([[k],[m],[n]]) def processAlgorithm(self, parameters, context, feedback): COs = self.parameterAsString( parameters, self.COC, context ) Azimutes = self.parameterAsString( parameters, self.AZIMUTH, context ) ÂngulosZenitais = self.parameterAsString( parameters, self.ZENITH, context ) usar_peso = self.parameterAsBool( parameters, self.WEIGHT, context ) abrir_arquivo = self.parameterAsBool( parameters, self.OPENOUTPUT, context ) output = self.parameterAsFileOutput( parameters, self.OUTPUT, context ) if output[-3:] != 'csv': output += '.csv' html_output = self.parameterAsFileOutput( parameters, self.HTML, context ) # Pontos Coords = String2CoordList(COs) # Azimutes (radianos) Az = [] for item in String2StringList(Azimutes): Az += [dms2dd(item)] Az = radians(array(Az)) # Ângulos Zenitais (radianos) Z = [] for item in String2StringList(ÂngulosZenitais): Z += [dms2dd(item)] Z = radians(array(Z)) # Validação dos dados de entrada if not (len(Coords) == len(Az) and len(Az) == len(Z)): raise QgsProcessingException(self.tr('Wrong number of parameters!', 'Número de parâmetros errado!')) else: n = len(Coords) # não deve haver valores nulos # ângulos entre 0 e 360 graus # Montagem do Vetor L L = [] for k in range(len(Coords)): L+= [[Coords[k][0]], [Coords[k][1]], [Coords[k][2]]] L = array(L) # Montagem da Matriz A e = 3*n p = 3 + n A = matrix(zeros([e, p])) for k in range(n): A[3*k:3*k+3, 0:3] = identity(3) A[3*k:3*k+3, 3+k] = -1*self.CosDir(Az[k], Z[k]) # Ajustamento MMQ X = pinv(A.T*A)*A.T*L V = A*X - L sigma2 = (V.T*V)/(e-p) SigmaX = sigma2[0,0]*pinv(A.T*A) if usar_peso: Ponto = array(X[0:3, :].T)[0] d = [] for coord in Coords: dist = norm(array(coord)-Ponto) d += [1/dist, 1/dist, 1/dist] P = diag(d) X = pinv(A.T*P*A)*A.T*P*L V = A*X - L sigma2 = (V.T*P*V)/(e-p) SigmaX = sigma2[0,0]*pinv(A.T*P*A) VAR = str(round(sigma2[0,0],5)) x = round(float(X[0, 0]),3) y = round(float(X[1, 0]),3) z = round(float(X[2, 0]),3) s_x = round(float(sqrt(SigmaX[0, 0])),3) s_y = round(float(sqrt(SigmaX[1, 1])),3) s_z = round(float(sqrt(SigmaX[2, 2])),3) # Slant Range slant_range = [] s_t = [] for k in range(len(Coords)): slant_range += [round(float(X[k+3, 0]),3)] s_t += [round(float(sqrt(SigmaX[k+3, k+3])),3)] # Resultados arq = open(output, 'w') arq.write('X,Y,Z,'+ self.tr('type', 'tipo') + '\n') arq.write('{},{},{},'.format(x,y,z) + self.tr('Adjusted 3D Coordinates', 'Coordenadas 3D Ajustadas') + '\n') for k in range(len(Coords)): arq.write('{},{},{},{}'.format(Coords[k][0],Coords[k][1],Coords[k][2], self.tr('Station', 'Estacao') + ' ' + str(k+1) ) + '\n') arq.close() texto = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"> <html> <head> <meta content="text/html; charset=ISO-8859-1" http-equiv="content-type"> <title>''' + self.tr('Estimate 3D Coordinates', str2HTML('Estimação de Coordenadas 3D')) + '''</title> <link rel = "icon" href = "https://github.com/LEOXINGU/lftools/blob/main/images/lftools.png?raw=true" type = "image/x-icon"> </head> <body style="color: rgb(0, 0, 0); background-color: rgb(255, 255, 204);" alink="#000099" link="#000099" vlink="#990099"> <p class="MsoNormal" style="text-align: center;" align="center"><b><span style="font-size: 12pt; line-height: 107%;">''' + self.tr('ESTIMATE 3D COORDINATES', 'ESTIMA&Ccedil;&Atilde;O DE COORDENADAS 3D') + '''<o:p></o:p></span></b></p> <p class="MsoNormal" style="text-align: center;" align="center"><i>''' + self.tr('Minimum Distance Method', 'M&eacute;todo das Dist&acirc;ncias M&iacute;nimas') + '''</i></p> <p class="MsoNormal" style="text-align: center;" align="center"><b><u>''' + self.tr('REPORT','RELAT&Oacute;RIO') + '''<o:p></o:p></u></b></p> <div> <table style="text-align: center; width: 100%;" border="1" cellpadding="0" cellspacing="0"> <tbody> <tr> <td width="50%"><b>''' + self.tr('Inputs', 'Dados de Entrada') + '''</b></td> <td width="50%"><b>'''+ self.tr('Adjustment','Ajustamento') + '''</b></td> </tr> <tr> <td style="text-align: center;"> <p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;">''' + self.tr('Coordinates of the Optical Centers','Coordenadas dos Centros &Oacute;pticos')+ '''</span><o:p></o:p></p> <div align="center"> <table class="MsoTableGrid" style="border: medium none ; border-collapse: collapse;" border="1" cellpadding="0" cellspacing="0"> <tbody> <tr style=""> <td style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;" valign="top" width="52"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center"><i>'''+self.tr('Station', str2HTML('Estação')) + '''</i><o:p></o:p></p> </td> <td style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;" valign="top" width="61"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center"><i>X</i><o:p></o:p></p> </td> <td style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;" valign="top" width="61"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center"><i>Y</i><o:p></o:p></p> </td> <td style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;" valign="top" width="61"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center"><i>Z</i><o:p></o:p></p> </td> </tr> [tabela 1] </tbody> </table> </div> <p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;"></span></p> <p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;">''' + self.tr('Azimuths','Azimutes') + '''</span><o:p></o:p></p> <div align="center"> <table class="MsoTableGrid" style="border: medium none ; border-collapse: collapse;" border="0" cellpadding="0" cellspacing="0"> <tbody> [tabela 2] </tbody> </table> </div> <p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;"></span></p> <p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;">''' + self.tr('Zenith Angles', '&Acirc;ngulos Zenitais') + '''</span><o:p></o:p></p> <div align="center"> <table class="MsoTableGrid" style="border: medium none ; border-collapse: collapse;" border="0" cellpadding="0" cellspacing="0"> <tbody> [tabela 3] </tbody> </table> <p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;">''' + self.tr('Weight Matrix'+str2HTML('*'),'Matriz Peso'+str2HTML('*')) + ''': [PESO]</span><o:p></o:p></p> </div> </td> <td> <p class="MsoNormal" style="text-align: center;" align="center"><o:p>&nbsp;</o:p><span style="font-style: italic;">'''+ self.tr('Residuals (V)', 'Res&iacute;duos (V)') + '''</span><o:p></o:p></p> <div align="center"> <table class="MsoTableGrid" style="border: medium none ; border-collapse: collapse;" border="1" cellpadding="0" cellspacing="0"> <tbody> <tr style=""> <td style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;" valign="top" width="52"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center"><i>'''+self.tr('Station', str2HTML('Estação')) + '''</i><o:p></o:p></p> </td> <td style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;" valign="top" width="61"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center"><i>V_X</i><o:p></o:p></p> </td> <td style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;" valign="top" width="61"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center"><i>V_Y</i><o:p></o:p></p> </td> <td style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;" valign="top" width="61"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center"><i>V_Z</i><o:p></o:p></p> </td> </tr> [tabela 4] </tbody> </table> </div> <p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;">''' + self.tr('Posteriori Variance', 'Vari&acirc;ncia a posteriori') + ''' &nbsp;</span>[VAR]<o:p></o:p></p> <p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;">''' + self.tr(str2HTML('Adjusted Coordinates, Slant Ranges and Precisions**'), str2HTML('Coordenas Ajustados, Distâncias e Precis&otilde;es**')) + '''</span><o:p></o:p></p> <div align="center"> <table class="MsoTableGrid" style="border: medium none ; width: 100.7pt; border-collapse: collapse;" border="1" cellpadding="0" cellspacing="0" width="134"> <tbody> <tr style=""> <td style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 38.9pt;" valign="top" width="52"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">X<o:p></o:p></p> </td> <td style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 28.75pt;" valign="top" width="38"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[X]<o:p></o:p></p> </td> <td style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 33.05pt;" valign="top" width="44"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[sX]<o:p></o:p></p> </td> </tr> <tr style=""> <td style="border-style: none solid solid; border-color: -moz-use-text-color windowtext windowtext; border-width: medium 1pt 1pt; padding: 0cm 5.4pt; width: 38.9pt;" valign="top" width="52"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">Y<o:p></o:p></p> </td> <td style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 28.75pt;" valign="top" width="38"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[Y]<o:p></o:p></p> </td> <td style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 33.05pt;" valign="top" width="44"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[sY]<o:p></o:p></p> </td> </tr> <tr style=""> <td style="border-style: none solid solid; border-color: -moz-use-text-color windowtext windowtext; border-width: medium 1pt 1pt; padding: 0cm 5.4pt; width: 38.9pt;" valign="top" width="52"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">Z<o:p></o:p></p> </td> <td style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 28.75pt;" valign="top" width="38"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[Z]<o:p></o:p></p> </td> <td style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 33.05pt;" valign="top" width="44"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[sZ]<o:p></o:p></p> </td> </tr> [SLANT_RANGE] </tbody> </table> </br> </div> </td> </tr> </tbody> </table> <p class="MsoNormal" style="text-align: left;" align="left"><i><span style="font-size: 10pt; line-height: 100%; color: rgb(127, 127, 127);">''' + self.tr(str2HTML('*')+'The inverse of the distances to the diagonal of the Weight Matrix is considered.', str2HTML('*')+'&Eacute; considerado o inverso das dist&acirc;ncias para a diagonal da Matriz Peso.') + ''' </br>''' + self.tr(str2HTML('**The unit of measurement of the adjusted coordinates is the same as the input coordinates.'), str2HTML('**A unidade de medida das coordenadas ajustadas é a mesma da coordenadas de entrada.')) + '''<o:p></o:p></span></i></p> </div> <footer"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: right;" align="right"><b>''' + self.tr('<NAME>', str2HTML('<NAME>')) + ''' </br>''' + self.tr('Cartographic Engineer', 'Eng. Cart&oacute;grafo') + '''<o:p></o:p></b></p> </br> <div align="right">'''+ Imgs().social_table_color + ''' </div> <o:p></o:p></b></p> </footer> </body> </html> ''' template0 = '''<tr style=""> <td style="border-style: none solid solid; border-color: -moz-use-text-color windowtext windowtext; border-width: medium 1pt 1pt; padding: 0cm 5.4pt; width: 39.3pt;" valign="top" width="52"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[STATION]<o:p></o:p></p> </td> <td style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;" valign="top" width="61"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[X]<o:p></o:p></p> </td> <td style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;" valign="top" width="61"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[Y]<o:p></o:p></p> </td> <td style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;" valign="top" width="61"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[Z]<o:p></o:p></p> </td> </tr> ''' template1 = '''<tr style=""> <td style="padding: 0cm 5.4pt; width: 460.2pt;" valign="top" width="614"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[SUBS]<o:p></o:p></p> </td> </tr> ''' template2 = '''<tr style=""> <td style="border-style: none solid solid; border-color: -moz-use-text-color windowtext windowtext; border-width: medium 1pt 1pt; padding: 0cm 5.4pt; width: 39.3pt;" valign="top" width="52"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[STATION]<o:p></o:p></p> </td> <td style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;" valign="top" width="61"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[V_x]<o:p></o:p></p> </td> <td style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;" valign="top" width="61"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[V_y]<o:p></o:p></p> </td> <td style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;" valign="top" width="61"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[V_z]<o:p></o:p></p> </td> </tr> ''' linha_slant_range = '''<tr style=""> <td style="border-style: none solid solid; border-color: -moz-use-text-color windowtext windowtext; border-width: medium 1pt 1pt; padding: 0cm 5.4pt; width: 38.9pt;" valign="top" width="52"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">t[VISADA]<o:p></o:p></p> </td> <td style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 28.75pt;" valign="top" width="38"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[tn]<o:p></o:p></p> </td> <td style="border-style: none solid solid none; border-color: -moz-use-text-color windowtext windowtext -moz-use-text-color; border-width: medium 1pt 1pt medium; padding: 0cm 5.4pt; width: 33.05pt;" valign="top" width="44"> <p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;" align="center">[s_tn]<o:p></o:p></p> </td> </tr>''' linhas_s_r = '' for k, t in enumerate(slant_range): tableRowN = linha_slant_range str_t = self.tr('{:,.3f}'.format(t), '{:,.3f}'.format(t).replace(',', 'X').replace('.', ',').replace('X', '.')) str_s_t = self.tr('{:,.3f}'.format(s_t[k]), '{:,.3f}'.format(s_t[k]).replace(',', 'X').replace('.', ',').replace('X', '.')) linhas_s_r += tableRowN.replace('[VISADA]', str(k+1)).replace('[tn]', str_t).replace('[s_tn]', str_s_t) # Preenchimento das tabelas table1 = '' for k, coord in enumerate(Coords): vx = coord[0] vy = coord[1] vz = coord[2] tableRowN = template0 itens = { '[X]' : self.tr('{:,.3f}'.format(vx), '{:,.3f}'.format(vx).replace(',', 'X').replace('.', ',').replace('X', '.')), '[Y]' : self.tr('{:,.3f}'.format(vy), '{:,.3f}'.format(vy).replace(',', 'X').replace('.', ',').replace('X', '.')), '[Z]' : self.tr('{:,.3f}'.format(vz), '{:,.3f}'.format(vz).replace(',', 'X').replace('.', ',').replace('X', '.')), '[STATION]' : str(k+1), } for item in itens: tableRowN = tableRowN.replace(item, itens[item]) table1 += tableRowN table2 = '' for azimute in String2StringList(Azimutes): tableRowN = template1 itens = { '[SUBS]' : self.tr(str2HTML(azimute), str2HTML(azimute).replace('.', ',')), } for item in itens: tableRowN = tableRowN.replace(item, itens[item]) table2 += tableRowN table3 = '' for zenite_ang in String2StringList(ÂngulosZenitais): tableRowN = template1 itens = { '[SUBS]' : self.tr(str2HTML(zenite_ang), str2HTML(zenite_ang).replace('.', ',')), } for item in itens: tableRowN = tableRowN.replace(item, itens[item]) table3 += tableRowN table4 = '' for k in range(len(Coords)): vx = V[3*k,0] vy = V[3*k+1,0] vz = V[3*k+2,0] tableRowN = template2 itens = { '[V_x]' : self.tr('{:,.3f}'.format(vx), '{:,.3f}'.format(vx).replace(',', 'X').replace('.', ',').replace('X', '.')), '[V_y]' : self.tr('{:,.3f}'.format(vy), '{:,.3f}'.format(vy).replace(',', 'X').replace('.', ',').replace('X', '.')), '[V_z]' : self.tr('{:,.3f}'.format(vz), '{:,.3f}'.format(vz).replace(',', 'X').replace('.', ',').replace('X', '.')), '[STATION]' : str(k+1), } for item in itens: tableRowN = tableRowN.replace(item, itens[item]) table4 += tableRowN texto = texto.replace('[tabela 1]', table1).replace('[tabela 2]', table2).replace('[tabela 3]', table3).replace('[tabela 4]', table4) texto = texto.replace('[PESO]', self.tr('Yes', 'Sim') if usar_peso else self.tr('No', str2HTML('Não'))) texto = texto.replace('[VAR]', VAR) strX = self.tr('{:,.3f}'.format(x), '{:,.3f}'.format(x).replace(',', 'X').replace('.', ',').replace('X', '.')) strY = self.tr('{:,.3f}'.format(y), '{:,.3f}'.format(y).replace(',', 'X').replace('.', ',').replace('X', '.')) strZ = self.tr('{:,.3f}'.format(z), '{:,.3f}'.format(z).replace(',', 'X').replace('.', ',').replace('X', '.')) str_S_X = self.tr('{:,.3f}'.format(s_x), '{:,.3f}'.format(s_x).replace(',', 'X').replace('.', ',').replace('X', '.')) str_S_Y = self.tr('{:,.3f}'.format(s_y), '{:,.3f}'.format(s_y).replace(',', 'X').replace('.', ',').replace('X', '.')) str_S_Z = self.tr('{:,.3f}'.format(s_z), '{:,.3f}'.format(s_z).replace(',', 'X').replace('.', ',').replace('X', '.')) texto = texto.replace('[X]', strX).replace('[Y]', strY).replace('[Z]', strZ).replace('[sX]', str_S_X).replace('[sY]', str_S_Y).replace('[sZ]', str_S_Z) texto = texto.replace('[SLANT_RANGE]', linhas_s_r) # Exportar HTML arq = open(html_output, 'w') arq.write(texto) arq.close() feedback.pushInfo(self.tr('Operation completed successfully!', 'Operação finalizada com sucesso!')) feedback.pushInfo(self.tr('<NAME> - Eng Cart', '<NAME> - Cartographic Engineer')) Carregar = self.parameterAsBool( parameters, self.OPEN, context ) self.CAMINHO = output self.CARREGAR = Carregar return {self.OUTPUT: output, self.HTML: html_output} # Carregamento de arquivo de saída CAMINHO = '' CARREGAR = True def postProcessAlgorithm(self, context, feedback): if self.CARREGAR: vlayer = QgsVectorLayer(self.CAMINHO, self.tr('Adjusted 3D Coordinates', 'Coordenadas 3D Ajustadas'), "ogr") QgsProject.instance().addMapLayer(vlayer) return {}
[ "numpy.identity", "PyQt5.QtCore.QCoreApplication.translate", "numpy.sqrt", "numpy.linalg.pinv", "lftools.geocapt.topogeo.dms2dd", "numpy.diag", "numpy.array", "lftools.geocapt.topogeo.String2CoordList", "lftools.geocapt.imgs.Imgs", "lftools.geocapt.topogeo.String2StringList", "numpy.cos", "num...
[((1447, 1495), 'PyQt5.QtCore.QCoreApplication.translate', 'QCoreApplication.translate', (['"""Processing"""', 'string'], {}), "('Processing', string)\n", (1473, 1495), False, 'from PyQt5.QtCore import QCoreApplication, QVariant\n'), ((7050, 7056), 'numpy.cos', 'cos', (['Z'], {}), '(Z)\n', (7053, 7056), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((7072, 7094), 'numpy.array', 'array', (['[[k], [m], [n]]'], {}), '([[k], [m], [n]])\n', (7077, 7094), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((8128, 8149), 'lftools.geocapt.topogeo.String2CoordList', 'String2CoordList', (['COs'], {}), '(COs)\n', (8144, 8149), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((8217, 8244), 'lftools.geocapt.topogeo.String2StringList', 'String2StringList', (['Azimutes'], {}), '(Azimutes)\n', (8234, 8244), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((8385, 8419), 'lftools.geocapt.topogeo.String2StringList', 'String2StringList', (['ÂngulosZenitais'], {}), '(ÂngulosZenitais)\n', (8402, 8419), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((8980, 8988), 'numpy.array', 'array', (['L'], {}), '(L)\n', (8985, 8988), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((28907, 28934), 'lftools.geocapt.topogeo.String2StringList', 'String2StringList', (['Azimutes'], {}), '(Azimutes)\n', (28924, 28934), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((29296, 29330), 'lftools.geocapt.topogeo.String2StringList', 'String2StringList', (['ÂngulosZenitais'], {}), '(ÂngulosZenitais)\n', (29313, 29330), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((3070, 3076), 'lftools.geocapt.imgs.Imgs', 'Imgs', ([], {}), '()\n', (3074, 3076), False, 'from lftools.geocapt.imgs import Imgs\n'), ((6996, 7002), 'numpy.sin', 'sin', (['Z'], {}), '(Z)\n', (6999, 7002), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((7003, 7010), 'numpy.sin', 'sin', (['Az'], {}), '(Az)\n', (7006, 7010), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((7023, 7029), 'numpy.sin', 'sin', (['Z'], {}), '(Z)\n', (7026, 7029), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((7030, 7037), 'numpy.cos', 'cos', (['Az'], {}), '(Az)\n', (7033, 7037), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((8300, 8309), 'numpy.array', 'array', (['Az'], {}), '(Az)\n', (8305, 8309), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((8473, 8481), 'numpy.array', 'array', (['Z'], {}), '(Z)\n', (8478, 8481), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((9074, 9087), 'numpy.zeros', 'zeros', (['[e, p]'], {}), '([e, p])\n', (9079, 9087), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((9148, 9159), 'numpy.identity', 'identity', (['(3)'], {}), '(3)\n', (9156, 9159), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((9357, 9370), 'numpy.linalg.pinv', 'pinv', (['(A.T * A)'], {}), '(A.T * A)\n', (9361, 9370), False, 'from numpy.linalg import pinv, norm\n'), ((9596, 9603), 'numpy.diag', 'diag', (['d'], {}), '(d)\n', (9600, 9603), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((8265, 8277), 'lftools.geocapt.topogeo.dms2dd', 'dms2dd', (['item'], {}), '(item)\n', (8271, 8277), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((8439, 8451), 'lftools.geocapt.topogeo.dms2dd', 'dms2dd', (['item'], {}), '(item)\n', (8445, 8451), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((9259, 9272), 'numpy.linalg.pinv', 'pinv', (['(A.T * A)'], {}), '(A.T * A)\n', (9263, 9272), False, 'from numpy.linalg import pinv, norm\n'), ((9412, 9430), 'numpy.array', 'array', (['X[0:3, :].T'], {}), '(X[0:3, :].T)\n', (9417, 9430), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((9736, 9753), 'numpy.linalg.pinv', 'pinv', (['(A.T * P * A)'], {}), '(A.T * P * A)\n', (9740, 9753), False, 'from numpy.linalg import pinv, norm\n'), ((9925, 9943), 'numpy.sqrt', 'sqrt', (['SigmaX[0, 0]'], {}), '(SigmaX[0, 0])\n', (9929, 9943), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((9974, 9992), 'numpy.sqrt', 'sqrt', (['SigmaX[1, 1]'], {}), '(SigmaX[1, 1])\n', (9978, 9992), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((10023, 10041), 'numpy.sqrt', 'sqrt', (['SigmaX[2, 2]'], {}), '(SigmaX[2, 2])\n', (10027, 10041), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((2401, 2426), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2416, 2426), False, 'import os\n'), ((22623, 22629), 'lftools.geocapt.imgs.Imgs', 'Imgs', ([], {}), '()\n', (22627, 22629), False, 'from lftools.geocapt.imgs import Imgs\n'), ((29037, 29054), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['azimute'], {}), '(azimute)\n', (29045, 29054), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((29433, 29453), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['zenite_ang'], {}), '(zenite_ang)\n', (29441, 29453), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((30715, 30730), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Não"""'], {}), "('Não')\n", (30723, 30730), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((9514, 9526), 'numpy.array', 'array', (['coord'], {}), '(coord)\n', (9519, 9526), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((9620, 9637), 'numpy.linalg.pinv', 'pinv', (['(A.T * P * A)'], {}), '(A.T * P * A)\n', (9624, 9637), False, 'from numpy.linalg import pinv, norm\n'), ((10235, 10261), 'numpy.sqrt', 'sqrt', (['SigmaX[k + 3, k + 3]'], {}), '(SigmaX[k + 3, k + 3])\n', (10239, 10261), False, 'from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag\n'), ((29056, 29073), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['azimute'], {}), '(azimute)\n', (29064, 29073), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((29455, 29475), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['zenite_ang'], {}), '(zenite_ang)\n', (29463, 29475), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((22472, 22490), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""<NAME>"""'], {}), "('<NAME>')\n", (22480, 22490), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((22106, 22219), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""**The unit of measurement of the adjusted coordinates is the same as the input coordinates."""'], {}), "(\n '**The unit of measurement of the adjusted coordinates is the same as the input coordinates.'\n )\n", (22114, 22219), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((22211, 22317), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""**A unidade de medida das coordenadas ajustadas é a mesma da coordenadas de entrada."""'], {}), "(\n '**A unidade de medida das coordenadas ajustadas é a mesma da coordenadas de entrada.'\n )\n", (22219, 22317), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((21882, 21895), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""*"""'], {}), "('*')\n", (21890, 21895), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((21980, 21993), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""*"""'], {}), "('*')\n", (21988, 21993), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((3762, 3787), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3777, 3787), False, 'import os\n'), ((17780, 17843), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Adjusted Coordinates, Slant Ranges and Precisions**"""'], {}), "('Adjusted Coordinates, Slant Ranges and Precisions**')\n", (17788, 17843), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((17845, 17909), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Coordenas Ajustados, Distâncias e Precis&otilde;es**"""'], {}), "('Coordenas Ajustados, Distâncias e Precis&otilde;es**')\n", (17853, 17909), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((16118, 16137), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Estação"""'], {}), "('Estação')\n", (16126, 16137), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((15335, 15348), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""*"""'], {}), "('*')\n", (15343, 15348), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((15363, 15376), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""*"""'], {}), "('*')\n", (15371, 15376), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((12809, 12828), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Estação"""'], {}), "('Estação')\n", (12817, 12828), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n'), ((10915, 10954), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Estimação de Coordenadas 3D"""'], {}), "('Estimação de Coordenadas 3D')\n", (10923, 10954), False, 'from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd\n')]
import numpy as np from sklearn.preprocessing import Imputer # Represent the unknown value by np.nan in numpy data_origin = [[30, 100], [20, 50], [35, np.nan], [25, 80], [30, 70], [40, 60]] # Imputation with the mean value imp_mean = Imputer(missing_values='NaN', strategy='mean') imp_mean.fit(data_origin) data_mean_imp = imp_mean.transform(data_origin) print(data_mean_imp) # Imputation with the median value imp_median = Imputer(missing_values='NaN', strategy='median') imp_median.fit(data_origin) data_median_imp = imp_median.transform(data_origin) print(data_median_imp) # New samples new = [[20, np.nan], [30, np.nan], [np.nan, 70], [np.nan, np.nan]] new_mean_imp = imp_mean.transform(new) print(new_mean_imp) # Effects of discarding missing values and imputation from sklearn import datasets dataset = datasets.load_diabetes() X_full, y = dataset.data, dataset.target # Simulate a corrupted data set by adding 25% missing values m, n = X_full.shape m_missing = int(m * 0.25) print(m, m_missing) # Randomly select m_missing samples np.random.seed(42) missing_samples = np.array([True] * m_missing + [False] * (m - m_missing)) np.random.shuffle(missing_samples) # For each missing sample, randomly select 1 out of n features missing_features = np.random.randint(low=0, high=n, size=m_missing) # Represent missing values by nan X_missing = X_full.copy() X_missing[np.where(missing_samples)[0], missing_features] = np.nan # Discard samples containing missing values X_rm_missing = X_missing[~missing_samples, :] y_rm_missing = y[~missing_samples] # Estimate R^2 on the data set with missing samples removed from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import cross_val_score regressor = RandomForestRegressor(random_state=42, max_depth=10, n_estimators=100) score_rm_missing = cross_val_score(regressor, X_rm_missing, y_rm_missing).mean() print('Score with the data set with missing samples removed: {0:.2f}'.format(score_rm_missing)) # Imputation with mean value imp_mean = Imputer(missing_values='NaN', strategy='mean') X_mean_imp = imp_mean.fit_transform(X_missing) # Estimate R^2 on the data set with missing samples removed regressor = RandomForestRegressor(random_state=42, max_depth=10, n_estimators=100) score_mean_imp = cross_val_score(regressor, X_mean_imp, y).mean() print('Score with the data set with missing values replaced by mean: {0:.2f}'.format(score_mean_imp)) # Estimate R^2 on the full data set regressor = RandomForestRegressor(random_state=42, max_depth=10, n_estimators=500) score_full = cross_val_score(regressor, X_full, y).mean() print('Score with the full data set: {0:.2f}'.format(score_full)) # # Imputation with median value # imp_mean = Imputer(missing_values='NaN', strategy='median') # X_mean_imp = imp_mean.fit_transform(X_missing) # # Estimate R^2 on the data set with missing samples removed # regressor = RandomForestRegressor(random_state=42, max_depth=10, n_estimators=100) # score_mean_imp = cross_val_score(regressor, X_mean_imp, y).mean() # print('Score with the data set with missing values replaced by mean: {0:.2f}'.format(score_mean_imp))
[ "sklearn.ensemble.RandomForestRegressor", "numpy.where", "sklearn.preprocessing.Imputer", "sklearn.model_selection.cross_val_score", "numpy.array", "numpy.random.randint", "sklearn.datasets.load_diabetes", "numpy.random.seed", "numpy.random.shuffle" ]
[((311, 357), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {'missing_values': '"""NaN"""', 'strategy': '"""mean"""'}), "(missing_values='NaN', strategy='mean')\n", (318, 357), False, 'from sklearn.preprocessing import Imputer\n'), ((502, 550), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {'missing_values': '"""NaN"""', 'strategy': '"""median"""'}), "(missing_values='NaN', strategy='median')\n", (509, 550), False, 'from sklearn.preprocessing import Imputer\n'), ((912, 936), 'sklearn.datasets.load_diabetes', 'datasets.load_diabetes', ([], {}), '()\n', (934, 936), False, 'from sklearn import datasets\n'), ((1144, 1162), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1158, 1162), True, 'import numpy as np\n'), ((1181, 1237), 'numpy.array', 'np.array', (['([True] * m_missing + [False] * (m - m_missing))'], {}), '([True] * m_missing + [False] * (m - m_missing))\n', (1189, 1237), True, 'import numpy as np\n'), ((1238, 1272), 'numpy.random.shuffle', 'np.random.shuffle', (['missing_samples'], {}), '(missing_samples)\n', (1255, 1272), True, 'import numpy as np\n'), ((1356, 1404), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'n', 'size': 'm_missing'}), '(low=0, high=n, size=m_missing)\n', (1373, 1404), True, 'import numpy as np\n'), ((1835, 1905), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(42)', 'max_depth': '(10)', 'n_estimators': '(100)'}), '(random_state=42, max_depth=10, n_estimators=100)\n', (1856, 1905), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((2125, 2171), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {'missing_values': '"""NaN"""', 'strategy': '"""mean"""'}), "(missing_values='NaN', strategy='mean')\n", (2132, 2171), False, 'from sklearn.preprocessing import Imputer\n'), ((2291, 2361), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(42)', 'max_depth': '(10)', 'n_estimators': '(100)'}), '(random_state=42, max_depth=10, n_estimators=100)\n', (2312, 2361), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((2580, 2650), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(42)', 'max_depth': '(10)', 'n_estimators': '(500)'}), '(random_state=42, max_depth=10, n_estimators=500)\n', (2601, 2650), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1925, 1979), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['regressor', 'X_rm_missing', 'y_rm_missing'], {}), '(regressor, X_rm_missing, y_rm_missing)\n', (1940, 1979), False, 'from sklearn.model_selection import cross_val_score\n'), ((2379, 2420), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['regressor', 'X_mean_imp', 'y'], {}), '(regressor, X_mean_imp, y)\n', (2394, 2420), False, 'from sklearn.model_selection import cross_val_score\n'), ((2664, 2701), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['regressor', 'X_full', 'y'], {}), '(regressor, X_full, y)\n', (2679, 2701), False, 'from sklearn.model_selection import cross_val_score\n'), ((1475, 1500), 'numpy.where', 'np.where', (['missing_samples'], {}), '(missing_samples)\n', (1483, 1500), True, 'import numpy as np\n')]
import cv2 import numpy as np import os import pickle import torch from torch.utils.data import Dataset from torchvision.transforms import Normalize import config class PW3DEvalDataset(Dataset): def __init__(self, pw3d_dir_path, img_wh): super(PW3DEvalDataset, self).__init__() # Paths cropped_frames_dir = os.path.join(pw3d_dir_path, 'cropped_frames') # Data data = np.load(os.path.join(pw3d_dir_path, '3dpw_test.npz')) self.frame_fnames = data['imgname'] self.pose = data['pose'] self.shape = data['shape'] self.gender = data['gender'] self.cropped_frames_dir = cropped_frames_dir self.img_wh = img_wh self.normalize_img = Normalize(mean=config.IMG_NORM_MEAN, std=config.IMG_NORM_STD) def __len__(self): return len(self.frame_fnames) def __getitem__(self, index): if torch.is_tensor(index): index = index.tolist() # Inputs fname = self.frame_fnames[index] frame_path = os.path.join(self.cropped_frames_dir, fname) img = cv2.cvtColor(cv2.imread(frame_path), cv2.COLOR_BGR2RGB) img = cv2.resize(img, (self.img_wh, self.img_wh), interpolation=cv2.INTER_LINEAR) img = np.transpose(img, [2, 0, 1])/255.0 # Targets pose = self.pose[index] shape = self.shape[index] gender = self.gender[index] img = torch.from_numpy(img).float() pose = torch.from_numpy(pose).float() shape = torch.from_numpy(shape).float() input = self.normalize_img(img) return {'input': input, 'vis_img': img, 'pose': pose, 'shape': shape, 'fname': fname, 'gender': gender}
[ "os.path.join", "torch.from_numpy", "torch.is_tensor", "torchvision.transforms.Normalize", "cv2.resize", "numpy.transpose", "cv2.imread" ]
[((339, 384), 'os.path.join', 'os.path.join', (['pw3d_dir_path', '"""cropped_frames"""'], {}), "(pw3d_dir_path, 'cropped_frames')\n", (351, 384), False, 'import os\n'), ((731, 792), 'torchvision.transforms.Normalize', 'Normalize', ([], {'mean': 'config.IMG_NORM_MEAN', 'std': 'config.IMG_NORM_STD'}), '(mean=config.IMG_NORM_MEAN, std=config.IMG_NORM_STD)\n', (740, 792), False, 'from torchvision.transforms import Normalize\n'), ((940, 962), 'torch.is_tensor', 'torch.is_tensor', (['index'], {}), '(index)\n', (955, 962), False, 'import torch\n'), ((1079, 1123), 'os.path.join', 'os.path.join', (['self.cropped_frames_dir', 'fname'], {}), '(self.cropped_frames_dir, fname)\n', (1091, 1123), False, 'import os\n'), ((1209, 1284), 'cv2.resize', 'cv2.resize', (['img', '(self.img_wh, self.img_wh)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, (self.img_wh, self.img_wh), interpolation=cv2.INTER_LINEAR)\n', (1219, 1284), False, 'import cv2\n'), ((424, 468), 'os.path.join', 'os.path.join', (['pw3d_dir_path', '"""3dpw_test.npz"""'], {}), "(pw3d_dir_path, '3dpw_test.npz')\n", (436, 468), False, 'import os\n'), ((1152, 1174), 'cv2.imread', 'cv2.imread', (['frame_path'], {}), '(frame_path)\n', (1162, 1174), False, 'import cv2\n'), ((1299, 1327), 'numpy.transpose', 'np.transpose', (['img', '[2, 0, 1]'], {}), '(img, [2, 0, 1])\n', (1311, 1327), True, 'import numpy as np\n'), ((1470, 1491), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (1486, 1491), False, 'import torch\n'), ((1515, 1537), 'torch.from_numpy', 'torch.from_numpy', (['pose'], {}), '(pose)\n', (1531, 1537), False, 'import torch\n'), ((1562, 1585), 'torch.from_numpy', 'torch.from_numpy', (['shape'], {}), '(shape)\n', (1578, 1585), False, 'import torch\n')]
#### !/usr/bin/env python # coding: utf-8 from molmap.model import RegressionEstimator, MultiClassEstimator, MultiLabelEstimator from molmap import loadmap, dataset from molmap.show import imshow_wrap import molmap from sklearn.utils import shuffle from joblib import load, dump import numpy as np import pandas as pd import os from chembench import dataset gpuid = 4 result_file = 'bace_bbbp_hiv.csv' random_seeds = [2, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096] file_path = "/raid/shenwanxiang/08_Robustness/dataset_induces/split" #split tmp_feature_dir = '/raid/shenwanxiang/08_Robustness/tempignore' #feature path bace = dataset.load_BACE() bbbp = dataset.load_BBBP() hiv = dataset.load_HIV() with open(result_file, 'w+') as f: f.write('task_name, seed, valid_auc, test_auc\n') res = [] for data in [bace, bbbp, hiv]: task_name = data.task_name task_type = data.task_type X1_name = os.path.join(tmp_feature_dir, 'X1_%s.data' % task_name) X2_name = os.path.join(tmp_feature_dir, 'X2_%s.data' % task_name) X1 = load(X1_name) X2 = load(X2_name) fmap_shape1 = X1.shape[1:] fmap_shape2 = X2.shape[1:] Y = data.y df = data.df n_outputs = Y.shape[1] for seed in random_seeds: train_path = os.path.join(file_path, task_name,"%s" % seed, "train.csv") valid_path = os.path.join(file_path, task_name,"%s" % seed, "val.csv") test_path = os.path.join(file_path, task_name,"%s" % seed, "test.csv") train_df = pd.read_csv(train_path) valid_df = pd.read_csv(valid_path) test_df = pd.read_csv(test_path) train_idx = df[df.smiles.isin(train_df.smiles)].index.tolist() valid_idx = df[df.smiles.isin(valid_df.smiles)].index.tolist() test_idx = df[df.smiles.isin(test_df.smiles)].index.tolist() print(len(train_idx), len(valid_idx), len(test_idx)) X_train = (X1[train_idx], X2[train_idx]) y_train = Y[train_idx] X_valid = (X1[valid_idx], X2[valid_idx]) y_valid = Y[valid_idx] X_test = (X1[test_idx], X2[test_idx]) y_test = Y[test_idx] if task_name == 'HIV': patience = 20 #speed reason only, early topping looking ahead else: patience = 50 clf = MultiLabelEstimator(n_outputs, fmap_shape1, fmap_shape2, batch_size = 128, dense_layers = [256, 128, 32], gpuid = gpuid, patience = patience, monitor = 'val_auc', ) clf.fit(X_train,y_train, X_valid, y_valid) train_aucs = clf._performance.evaluate(X_train,y_train) valid_aucs = clf._performance.evaluate(X_valid,y_valid) test_aucs = clf._performance.evaluate(X_test,y_test) train_auc = np.nanmean(train_aucs) valid_auc = np.nanmean(valid_aucs) test_auc = np.nanmean(test_aucs) final_res = {'seed': seed, "task_name": task_name, 'train_auc':train_auc, 'valid_auc':valid_auc, 'test_auc':test_auc,} print(final_res) with open(result_file, 'a+') as f: f.write('%s, %s, %s, %s\n' % (task_name, seed, valid_auc, test_auc)) res.append(final_res) pd.DataFrame(res).to_csv(result_file + '.bak.csv')
[ "molmap.model.MultiLabelEstimator", "pandas.read_csv", "os.path.join", "chembench.dataset.load_BACE", "numpy.nanmean", "joblib.load", "pandas.DataFrame", "chembench.dataset.load_HIV", "chembench.dataset.load_BBBP" ]
[((635, 654), 'chembench.dataset.load_BACE', 'dataset.load_BACE', ([], {}), '()\n', (652, 654), False, 'from chembench import dataset\n'), ((662, 681), 'chembench.dataset.load_BBBP', 'dataset.load_BBBP', ([], {}), '()\n', (679, 681), False, 'from chembench import dataset\n'), ((688, 706), 'chembench.dataset.load_HIV', 'dataset.load_HIV', ([], {}), '()\n', (704, 706), False, 'from chembench import dataset\n'), ((924, 979), 'os.path.join', 'os.path.join', (['tmp_feature_dir', "('X1_%s.data' % task_name)"], {}), "(tmp_feature_dir, 'X1_%s.data' % task_name)\n", (936, 979), False, 'import os\n'), ((994, 1049), 'os.path.join', 'os.path.join', (['tmp_feature_dir', "('X2_%s.data' % task_name)"], {}), "(tmp_feature_dir, 'X2_%s.data' % task_name)\n", (1006, 1049), False, 'import os\n'), ((1059, 1072), 'joblib.load', 'load', (['X1_name'], {}), '(X1_name)\n', (1063, 1072), False, 'from joblib import load, dump\n'), ((1082, 1095), 'joblib.load', 'load', (['X2_name'], {}), '(X2_name)\n', (1086, 1095), False, 'from joblib import load, dump\n'), ((1274, 1334), 'os.path.join', 'os.path.join', (['file_path', 'task_name', "('%s' % seed)", '"""train.csv"""'], {}), "(file_path, task_name, '%s' % seed, 'train.csv')\n", (1286, 1334), False, 'import os\n'), ((1355, 1413), 'os.path.join', 'os.path.join', (['file_path', 'task_name', "('%s' % seed)", '"""val.csv"""'], {}), "(file_path, task_name, '%s' % seed, 'val.csv')\n", (1367, 1413), False, 'import os\n'), ((1433, 1492), 'os.path.join', 'os.path.join', (['file_path', 'task_name', "('%s' % seed)", '"""test.csv"""'], {}), "(file_path, task_name, '%s' % seed, 'test.csv')\n", (1445, 1492), False, 'import os\n'), ((1512, 1535), 'pandas.read_csv', 'pd.read_csv', (['train_path'], {}), '(train_path)\n', (1523, 1535), True, 'import pandas as pd\n'), ((1555, 1578), 'pandas.read_csv', 'pd.read_csv', (['valid_path'], {}), '(valid_path)\n', (1566, 1578), True, 'import pandas as pd\n'), ((1597, 1619), 'pandas.read_csv', 'pd.read_csv', (['test_path'], {}), '(test_path)\n', (1608, 1619), True, 'import pandas as pd\n'), ((2318, 2479), 'molmap.model.MultiLabelEstimator', 'MultiLabelEstimator', (['n_outputs', 'fmap_shape1', 'fmap_shape2'], {'batch_size': '(128)', 'dense_layers': '[256, 128, 32]', 'gpuid': 'gpuid', 'patience': 'patience', 'monitor': '"""val_auc"""'}), "(n_outputs, fmap_shape1, fmap_shape2, batch_size=128,\n dense_layers=[256, 128, 32], gpuid=gpuid, patience=patience, monitor=\n 'val_auc')\n", (2337, 2479), False, 'from molmap.model import RegressionEstimator, MultiClassEstimator, MultiLabelEstimator\n'), ((3083, 3105), 'numpy.nanmean', 'np.nanmean', (['train_aucs'], {}), '(train_aucs)\n', (3093, 3105), True, 'import numpy as np\n'), ((3126, 3148), 'numpy.nanmean', 'np.nanmean', (['valid_aucs'], {}), '(valid_aucs)\n', (3136, 3148), True, 'import numpy as np\n'), ((3168, 3189), 'numpy.nanmean', 'np.nanmean', (['test_aucs'], {}), '(test_aucs)\n', (3178, 3189), True, 'import numpy as np\n'), ((3631, 3648), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (3643, 3648), True, 'import pandas as pd\n')]
import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from facial.util import getData, y2indicator, error_rate, init_weight_and_bias from sklearn.utils import shuffle class HiddenLayer(object): def __init__(self, M1, M2, an_id): self.id = an_id self.M1 = M1 self.M2 = M2 W, b = init_weight_and_bias(M1, M2) self.W = tf.Variable(W.astype(np.float32)) self.b = tf.Variable(b.astype(np.float32)) self.params = [self.W, self.b] def forward(self, X): return tf.nn.relu(tf.matmul(X, self.W) + self.b) class ANN(object): def __init__(self, hidden_layer_sizes): self.hidden_layer_sizes = hidden_layer_sizes def fit(self, X, Y, learning_rate=1e-2, mu=0.99, decay=0.999, reg=1e-3, epochs=10, batch_size=100, show_fig=False): K = len(set(Y)) # Make a validation set X, Y = shuffle(X, Y) X = X.astype(np.float32) Y = y2indicator(Y).astype(np.float32) Xvalid, Yvalid = X[-1000:], Y[-1000:] Yvalid_flat = np.argmax(Yvalid, axis=1) # for calculating error rate X, Y = X[:-1000], Y[:-1000] # Initialize hidden layers N, D = X.shape self.hiden_layers = [] M1 = D count = 0 for m2 in self.hidden_layer_sizes: h = HiddenLayer(M1, m2, count) self.hiden_layers.append(h) M1 = m2 count += 1 W, b = init_weight_and_bias(M1, K) # latest M1 from the for loop self.W = tf.Variable(W.astype(np.float32)) self.b = tf.Variable(b.astype(np.float32)) # collect params for later use self.params = [self.W, self.b] for h in self.hiden_layers: self.params += h.params tfX = tf.placeholder(tf.float32, shape=(None, D), name='X') tfT = tf.placeholder(tf.float32, shape=(None, K), name='T') act = self.forward(tfX) rcost = reg * sum([tf.nn.l2_loss(p) for p in self.params]) cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2(logits=act, labels=tfT) ) + rcost prediction = self.predict(tfX) train_op = tf.train.RMSPropOptimizer(learning_rate, decay=decay, momentum=mu).minimize(cost) n_batches = N // batch_size costs = [] init = tf.global_variables_initializer() with tf.Session() as session: session.run(init) for i in range(epochs): X, Y = shuffle(X, Y) for j in range(n_batches): Xbatch = X[j * batch_size:(j * batch_size + batch_size)] Ybatch = Y[j * batch_size:(j * batch_size + batch_size)] session.run(train_op, feed_dict={tfX: Xbatch, tfT: Ybatch}) if j % 20 == 0: c = session.run(cost, feed_dict={tfX: Xvalid, tfT: Yvalid}) costs.append(c) p = session.run(prediction, feed_dict={tfX: Xvalid, tfT: Yvalid}) e = error_rate(Yvalid_flat, p) print("i:", i, "j:", j, "nb:", n_batches, "cost:", c, "error rate:", e) if show_fig: plt.plot(costs) plt.show() def forward(self, X): Z = X for h in self.hiden_layers: Z = h.forward(Z) return tf.matmul(Z, self.W) + self.b def predict(self, X): act = self.forward(X) return tf.argmax(act, 1) if __name__ == '__main__': X, Y = getData() model = ANN([2000, 1000, 500]) model.fit(X, Y, show_fig=True)
[ "tensorflow.train.RMSPropOptimizer", "facial.util.init_weight_and_bias", "tensorflow.placeholder", "sklearn.utils.shuffle", "tensorflow.Session", "numpy.argmax", "matplotlib.pyplot.plot", "tensorflow.nn.softmax_cross_entropy_with_logits_v2", "tensorflow.global_variables_initializer", "tensorflow.n...
[((3672, 3681), 'facial.util.getData', 'getData', ([], {}), '()\n', (3679, 3681), False, 'from facial.util import getData, y2indicator, error_rate, init_weight_and_bias\n'), ((337, 365), 'facial.util.init_weight_and_bias', 'init_weight_and_bias', (['M1', 'M2'], {}), '(M1, M2)\n', (357, 365), False, 'from facial.util import getData, y2indicator, error_rate, init_weight_and_bias\n'), ((902, 915), 'sklearn.utils.shuffle', 'shuffle', (['X', 'Y'], {}), '(X, Y)\n', (909, 915), False, 'from sklearn.utils import shuffle\n'), ((1063, 1088), 'numpy.argmax', 'np.argmax', (['Yvalid'], {'axis': '(1)'}), '(Yvalid, axis=1)\n', (1072, 1088), True, 'import numpy as np\n'), ((1464, 1491), 'facial.util.init_weight_and_bias', 'init_weight_and_bias', (['M1', 'K'], {}), '(M1, K)\n', (1484, 1491), False, 'from facial.util import getData, y2indicator, error_rate, init_weight_and_bias\n'), ((1791, 1844), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, D)', 'name': '"""X"""'}), "(tf.float32, shape=(None, D), name='X')\n", (1805, 1844), True, 'import tensorflow as tf\n'), ((1859, 1912), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, K)', 'name': '"""T"""'}), "(tf.float32, shape=(None, K), name='T')\n", (1873, 1912), True, 'import tensorflow as tf\n'), ((2352, 2385), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2383, 2385), True, 'import tensorflow as tf\n'), ((3614, 3631), 'tensorflow.argmax', 'tf.argmax', (['act', '(1)'], {}), '(act, 1)\n', (3623, 3631), True, 'import tensorflow as tf\n'), ((2399, 2411), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2409, 2411), True, 'import tensorflow as tf\n'), ((3352, 3367), 'matplotlib.pyplot.plot', 'plt.plot', (['costs'], {}), '(costs)\n', (3360, 3367), True, 'import matplotlib.pyplot as plt\n'), ((3380, 3390), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3388, 3390), True, 'import matplotlib.pyplot as plt\n'), ((3512, 3532), 'tensorflow.matmul', 'tf.matmul', (['Z', 'self.W'], {}), '(Z, self.W)\n', (3521, 3532), True, 'import tensorflow as tf\n'), ((560, 580), 'tensorflow.matmul', 'tf.matmul', (['X', 'self.W'], {}), '(X, self.W)\n', (569, 580), True, 'import tensorflow as tf\n'), ((961, 975), 'facial.util.y2indicator', 'y2indicator', (['Y'], {}), '(Y)\n', (972, 975), False, 'from facial.util import getData, y2indicator, error_rate, init_weight_and_bias\n'), ((2056, 2122), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'act', 'labels': 'tfT'}), '(logits=act, labels=tfT)\n', (2098, 2122), True, 'import tensorflow as tf\n'), ((2199, 2265), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['learning_rate'], {'decay': 'decay', 'momentum': 'mu'}), '(learning_rate, decay=decay, momentum=mu)\n', (2224, 2265), True, 'import tensorflow as tf\n'), ((2513, 2526), 'sklearn.utils.shuffle', 'shuffle', (['X', 'Y'], {}), '(X, Y)\n', (2520, 2526), False, 'from sklearn.utils import shuffle\n'), ((1973, 1989), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['p'], {}), '(p)\n', (1986, 1989), True, 'import tensorflow as tf\n'), ((3165, 3191), 'facial.util.error_rate', 'error_rate', (['Yvalid_flat', 'p'], {}), '(Yvalid_flat, p)\n', (3175, 3191), False, 'from facial.util import getData, y2indicator, error_rate, init_weight_and_bias\n')]
import numpy as np import tensorflow as tf import scipy import lib_gcnn.graph as graph class GraphCNN(object): """ A graph CNN for text classification. Composed of graph convolutional + max-pooling layer(s) and a softmax layer. filter_name = Filter name (i.e. "chebyshev", "spline", "fourier") L = List of graph Laplacians. K = List of filter sizes i.e. support sizes (no. of hops) (Polynomial orders for Chebyshev; K[i] = L[i].shape[0] for non-param Fourier) F = List of no. of features (per filter). P = List of pooling sizes (per filter). FC = List of fully-connected layers. Paper for Chebyshev Filter: https://arxiv.org/abs/1606.09375 Paper for Spline Filter: https://arxiv.org/abs/1312.6203 Code adapted from https://github.com/mdeff/cnn_graph """ def __init__(self, filter_name, L, K, F, P, FC, batch_size, num_vertices, num_classes, l2_reg_lambda): # Sanity checks assert len(L) >= len(F) == len(K) == len(P) # verify consistency w.r.t. the no. of GCLs assert np.all(np.array(P) >= 1) # all pool sizes >= 1 p_log2 = np.where(np.array(P) > 1, np.log2(P), 0) assert np.all(np.mod(p_log2, 1) == 0) # all pool sizes > 1 should be powers of 2 assert len(L) >= 1 + np.sum(p_log2) # enough coarsening levels for pool sizes # Retrieve convolutional filter assert filter_name == "chebyshev" or filter_name == "spline" or filter_name == "fourier" self.graph_conv = getattr(self, "graph_conv_" + filter_name) # Placeholders for input, output and dropout self.input_x = tf.placeholder(tf.float32, [batch_size, num_vertices], name="input_x") self.input_y = tf.placeholder(tf.int32, [batch_size], name="input_y") self.train_flag = tf.placeholder(tf.bool, name="train_flag") self.dropout_keep_prob = tf.placeholder_with_default(1.0, shape=[], name="dropout_keep_prob") # Keeping track of L2 regularization loss l2_loss = tf.constant(0.0) # Keep the useful Laplacians only (may be zero) M_0 = L[0].shape[0] j = 0 L_tmp = [] for p_i in P: L_tmp.append(L[j]) j += int(np.log2(p_i)) if p_i > 1 else 0 L = L_tmp # Expand dims for convolution operation x = tf.expand_dims(self.input_x, 2) # B x V x F=1 # Graph convolution + max-pool layer(s) for i in range(len(K)): with tf.variable_scope("conv-maxpool-{}".format(i)): with tf.variable_scope("conv-{}-{}".format(K[i], F[i])): # Graph convolution operation x = self.graph_conv(x, L[i], K[i], F[i]) # Add bias & apply non-linearity b = tf.Variable(tf.constant(0.1, shape=[1, 1, F[i]]), name="b") x = tf.nn.relu(x + b, name="relu") with tf.variable_scope("maxpool-{}".format(P[i])): # Graph max-pooling operation x = self.graph_max_pool(x, P[i]) # Add dropout with tf.variable_scope("dropout"): x = tf.nn.dropout(x, self.dropout_keep_prob) # Reshape x for fully-connected layers with tf.variable_scope("reshape"): B, V, F = x.get_shape() B, V, F = int(B), int(V), int(F) x = tf.reshape(x, [B, V * F]) # Add fully-connected layers (if any) for i, num_units in enumerate(FC): with tf.variable_scope("fc-{}-{}".format(i, num_units)): W = tf.get_variable("W", shape=[x.get_shape().as_list()[1], num_units], initializer=tf.contrib.layers.xavier_initializer()) b = tf.Variable(tf.constant(0.1, shape=[num_units]), name="b") l2_loss += tf.nn.l2_loss(W) x = tf.nn.xw_plus_b(x, W, b) x = tf.layers.batch_normalization(x, training=self.train_flag) x = tf.nn.relu(x) x = tf.nn.dropout(x, self.dropout_keep_prob) # Final (unnormalized) scores and predictions with tf.variable_scope("output"): W = tf.get_variable("W", shape=[x.get_shape().as_list()[1], num_classes], initializer=tf.contrib.layers.xavier_initializer()) b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b") l2_loss += tf.nn.l2_loss(W) self.scores = tf.nn.xw_plus_b(x, W, b, name="scores") self.predictions = tf.argmax(self.scores, 1, name="predictions") self.predictions = tf.cast(self.predictions, tf.int32) # Calculate mean cross-entropy loss with tf.variable_scope("loss"): losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y) self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss # Calculate accuracy with tf.variable_scope("accuracy"): correct_predictions = tf.equal(self.predictions, self.input_y) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") def graph_conv_chebyshev(self, x, L, K, F_out): """ Graph convolutional operation. """ # K = Chebyshev polynomial order & support size # F_out = No. of output features (per vertex) # B = Batch size # V = No. of vertices # F_in = No. of input features (per vertex) B, V, F_in = x.get_shape() B, V, F_in = int(B), int(V), int(F_in) # Rescale Laplacian and store as a TF sparse tensor (copy to not modify the shared L) L = scipy.sparse.csr_matrix(L) L = graph.rescale_L(L, lmax=2) L = L.tocoo() indices = np.column_stack((L.row, L.col)) L = tf.SparseTensor(indices, L.data, L.shape) L = tf.sparse_reorder(L) L = tf.cast(L, tf.float32) # Transform to Chebyshev basis x0 = tf.transpose(x, perm=[1, 2, 0]) # V x F_in x B x0 = tf.reshape(x0, [V, F_in * B]) # V x F_in*B x = tf.expand_dims(x0, 0) # 1 x V x F_in*B def concat(x, x_): x_ = tf.expand_dims(x_, 0) # 1 x V x F_in*B return tf.concat([x, x_], axis=0) # K x V x F_in*B if K > 1: x1 = tf.sparse_tensor_dense_matmul(L, x0) x = concat(x, x1) for k in range(2, K): x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0 # V x F_in*B x = concat(x, x2) x0, x1 = x1, x2 x = tf.reshape(x, [K, V, F_in, B]) # K x V x F_in x B x = tf.transpose(x, perm=[3, 1, 2, 0]) # B x V x F_in x K x = tf.reshape(x, [B * V, F_in * K]) # B*V x F_in*K # Compose linearly F_in features to get F_out features W = tf.Variable(tf.truncated_normal([F_in * K, F_out], stddev=0.1), name="W") x = tf.matmul(x, W) # B*V x F_out x = tf.reshape(x, [B, V, F_out]) # B x V x F_out return x def graph_conv_spline(self, x, L, K, F_out): """ Graph convolution operation. """ B, V, F_in = x.get_shape() B, V, F_in = int(B), int(V), int(F_in) # Fourier basis lamb, U = graph.fourier(L) U = tf.constant(U.T, dtype=tf.float32) # V x V # Spline basis basis = self.bspline_basis(K, lamb, degree=3) # V x K basis = tf.constant(basis, dtype=tf.float32) # Weight multiplication W = tf.Variable(tf.truncated_normal([K, F_in * F_out], stddev=0.1), name="W") W = tf.matmul(basis, W) # V x F_out*F_in W = tf.reshape(W, [V, F_out, F_in]) return self.filter_in_fourier(x, L, K, F_out, U, W) def graph_conv_fourier(self, x, L, K, F_out): """ Graph convolution operation. """ assert K == L.shape[0] # artificial but useful to compute number of parameters B, V, F_in = x.get_shape() B, V, F_in = int(B), int(V), int(F_in) # Fourier basis _, U = graph.fourier(L) U = tf.constant(U.T, dtype=tf.float32) # Weights W = tf.Variable(tf.truncated_normal([V, F_out, F_in], stddev=0.1), name="W") return self.filter_in_fourier(x, L, K, F_out, U, W) def graph_max_pool(self, x, p): """ Graph max pooling operation. p must be 1 or a power of 2. """ if p > 1: x = tf.expand_dims(x, 3) # B x V x F x 1 x = tf.nn.max_pool(x, ksize=[1, p, 1, 1], strides=[1, p, 1, 1], padding="SAME") return tf.squeeze(x, [3]) # B x V/p x F else: return x def filter_in_fourier(self, x, L, K, F_out, U, W): B, V, F_in = x.get_shape() B, V, F_in = int(B), int(V), int(F_in) x = tf.transpose(x, perm=[1, 2, 0]) # V x F_in x B # Transform to Fourier domain x = tf.reshape(x, [V, F_in * B]) # V x F_in*B x = tf.matmul(U, x) # V x F_in*B x = tf.reshape(x, [V, F_in, B]) # V x F_in x B # Filter x = tf.matmul(W, x) # for each feature x = tf.transpose(x) # B x F_out x V x = tf.reshape(x, [B * F_out, V]) # B*F_out x V # Transform back to graph domain x = tf.matmul(x, U) # B*F_out x V x = tf.reshape(x, [B, F_out, V]) # B x F_out x V return tf.transpose(x, perm=[0, 2, 1]) # B x V x F_out def bspline_basis(self, K, x, degree=3): """ Return the B-spline basis. K: Number of control points. x: Evaluation points or number of evenly distributed evaluation points. degree: Degree of the spline. Cubic spline by default. """ if np.isscalar(x): x = np.linspace(0, 1, x) # Evenly distributed knot vectors kv1 = x.min() * np.ones(degree) kv2 = np.linspace(x.min(), x.max(), K - degree + 1) kv3 = x.max() * np.ones(degree) kv = np.concatenate((kv1, kv2, kv3)) # Cox-DeBoor recursive function to compute one spline over x def cox_deboor(k, d): # Test for end conditions, the rectangular degree zero spline if (d == 0): return ((x - kv[k] >= 0) & (x - kv[k + 1] < 0)).astype(int) denom1 = kv[k + d] - kv[k] term1 = 0 if denom1 > 0: term1 = ((x - kv[k]) / denom1) * cox_deboor(k, d - 1) denom2 = kv[k + d + 1] - kv[k + 1] term2 = 0 if denom2 > 0: term2 = ((-(x - kv[k + d + 1]) / denom2) * cox_deboor(k + 1, d - 1)) return term1 + term2 # Compute basis for each point basis = np.column_stack([cox_deboor(k, degree) for k in range(K)]) basis[-1, -1] = 1 return basis
[ "tensorflow.equal", "tensorflow.transpose", "numpy.column_stack", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "numpy.array", "tensorflow.nn.dropout", "lib_gcnn.graph.rescale_L", "tensorflow.reduce_mean", "tensorflow.cast", "numpy.mod", "tensorflow.sparse_reorder", "numpy.isscalar...
[((1631, 1701), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, num_vertices]'], {'name': '"""input_x"""'}), "(tf.float32, [batch_size, num_vertices], name='input_x')\n", (1645, 1701), True, 'import tensorflow as tf\n'), ((1725, 1779), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size]'], {'name': '"""input_y"""'}), "(tf.int32, [batch_size], name='input_y')\n", (1739, 1779), True, 'import tensorflow as tf\n'), ((1806, 1848), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""train_flag"""'}), "(tf.bool, name='train_flag')\n", (1820, 1848), True, 'import tensorflow as tf\n'), ((1882, 1950), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(1.0)'], {'shape': '[]', 'name': '"""dropout_keep_prob"""'}), "(1.0, shape=[], name='dropout_keep_prob')\n", (1909, 1950), True, 'import tensorflow as tf\n'), ((2020, 2036), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (2031, 2036), True, 'import tensorflow as tf\n'), ((2340, 2371), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.input_x', '(2)'], {}), '(self.input_x, 2)\n', (2354, 2371), True, 'import tensorflow as tf\n'), ((5797, 5823), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['L'], {}), '(L)\n', (5820, 5823), False, 'import scipy\n'), ((5836, 5862), 'lib_gcnn.graph.rescale_L', 'graph.rescale_L', (['L'], {'lmax': '(2)'}), '(L, lmax=2)\n', (5851, 5862), True, 'import lib_gcnn.graph as graph\n'), ((5903, 5934), 'numpy.column_stack', 'np.column_stack', (['(L.row, L.col)'], {}), '((L.row, L.col))\n', (5918, 5934), True, 'import numpy as np\n'), ((5947, 5988), 'tensorflow.SparseTensor', 'tf.SparseTensor', (['indices', 'L.data', 'L.shape'], {}), '(indices, L.data, L.shape)\n', (5962, 5988), True, 'import tensorflow as tf\n'), ((6001, 6021), 'tensorflow.sparse_reorder', 'tf.sparse_reorder', (['L'], {}), '(L)\n', (6018, 6021), True, 'import tensorflow as tf\n'), ((6034, 6056), 'tensorflow.cast', 'tf.cast', (['L', 'tf.float32'], {}), '(L, tf.float32)\n', (6041, 6056), True, 'import tensorflow as tf\n'), ((6110, 6141), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[1, 2, 0]'}), '(x, perm=[1, 2, 0])\n', (6122, 6141), True, 'import tensorflow as tf\n'), ((6174, 6203), 'tensorflow.reshape', 'tf.reshape', (['x0', '[V, F_in * B]'], {}), '(x0, [V, F_in * B])\n', (6184, 6203), True, 'import tensorflow as tf\n'), ((6235, 6256), 'tensorflow.expand_dims', 'tf.expand_dims', (['x0', '(0)'], {}), '(x0, 0)\n', (6249, 6256), True, 'import tensorflow as tf\n'), ((6728, 6758), 'tensorflow.reshape', 'tf.reshape', (['x', '[K, V, F_in, B]'], {}), '(x, [K, V, F_in, B])\n', (6738, 6758), True, 'import tensorflow as tf\n'), ((6796, 6830), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[3, 1, 2, 0]'}), '(x, perm=[3, 1, 2, 0])\n', (6808, 6830), True, 'import tensorflow as tf\n'), ((6864, 6896), 'tensorflow.reshape', 'tf.reshape', (['x', '[B * V, F_in * K]'], {}), '(x, [B * V, F_in * K])\n', (6874, 6896), True, 'import tensorflow as tf\n'), ((7078, 7093), 'tensorflow.matmul', 'tf.matmul', (['x', 'W'], {}), '(x, W)\n', (7087, 7093), True, 'import tensorflow as tf\n'), ((7141, 7169), 'tensorflow.reshape', 'tf.reshape', (['x', '[B, V, F_out]'], {}), '(x, [B, V, F_out])\n', (7151, 7169), True, 'import tensorflow as tf\n'), ((7448, 7464), 'lib_gcnn.graph.fourier', 'graph.fourier', (['L'], {}), '(L)\n', (7461, 7464), True, 'import lib_gcnn.graph as graph\n'), ((7477, 7511), 'tensorflow.constant', 'tf.constant', (['U.T'], {'dtype': 'tf.float32'}), '(U.T, dtype=tf.float32)\n', (7488, 7511), True, 'import tensorflow as tf\n'), ((7624, 7660), 'tensorflow.constant', 'tf.constant', (['basis'], {'dtype': 'tf.float32'}), '(basis, dtype=tf.float32)\n', (7635, 7660), True, 'import tensorflow as tf\n'), ((7792, 7811), 'tensorflow.matmul', 'tf.matmul', (['basis', 'W'], {}), '(basis, W)\n', (7801, 7811), True, 'import tensorflow as tf\n'), ((7842, 7873), 'tensorflow.reshape', 'tf.reshape', (['W', '[V, F_out, F_in]'], {}), '(W, [V, F_out, F_in])\n', (7852, 7873), True, 'import tensorflow as tf\n'), ((8258, 8274), 'lib_gcnn.graph.fourier', 'graph.fourier', (['L'], {}), '(L)\n', (8271, 8274), True, 'import lib_gcnn.graph as graph\n'), ((8287, 8321), 'tensorflow.constant', 'tf.constant', (['U.T'], {'dtype': 'tf.float32'}), '(U.T, dtype=tf.float32)\n', (8298, 8321), True, 'import tensorflow as tf\n'), ((9017, 9048), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[1, 2, 0]'}), '(x, perm=[1, 2, 0])\n', (9029, 9048), True, 'import tensorflow as tf\n'), ((9116, 9144), 'tensorflow.reshape', 'tf.reshape', (['x', '[V, F_in * B]'], {}), '(x, [V, F_in * B])\n', (9126, 9144), True, 'import tensorflow as tf\n'), ((9171, 9186), 'tensorflow.matmul', 'tf.matmul', (['U', 'x'], {}), '(U, x)\n', (9180, 9186), True, 'import tensorflow as tf\n'), ((9213, 9240), 'tensorflow.reshape', 'tf.reshape', (['x', '[V, F_in, B]'], {}), '(x, [V, F_in, B])\n', (9223, 9240), True, 'import tensorflow as tf\n'), ((9287, 9302), 'tensorflow.matmul', 'tf.matmul', (['W', 'x'], {}), '(W, x)\n', (9296, 9302), True, 'import tensorflow as tf\n'), ((9335, 9350), 'tensorflow.transpose', 'tf.transpose', (['x'], {}), '(x)\n', (9347, 9350), True, 'import tensorflow as tf\n'), ((9380, 9409), 'tensorflow.reshape', 'tf.reshape', (['x', '[B * F_out, V]'], {}), '(x, [B * F_out, V])\n', (9390, 9409), True, 'import tensorflow as tf\n'), ((9479, 9494), 'tensorflow.matmul', 'tf.matmul', (['x', 'U'], {}), '(x, U)\n', (9488, 9494), True, 'import tensorflow as tf\n'), ((9522, 9550), 'tensorflow.reshape', 'tf.reshape', (['x', '[B, F_out, V]'], {}), '(x, [B, F_out, V])\n', (9532, 9550), True, 'import tensorflow as tf\n'), ((9584, 9615), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 2, 1]'}), '(x, perm=[0, 2, 1])\n', (9596, 9615), True, 'import tensorflow as tf\n'), ((9930, 9944), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (9941, 9944), True, 'import numpy as np\n'), ((10179, 10210), 'numpy.concatenate', 'np.concatenate', (['(kv1, kv2, kv3)'], {}), '((kv1, kv2, kv3))\n', (10193, 10210), True, 'import numpy as np\n'), ((1155, 1165), 'numpy.log2', 'np.log2', (['P'], {}), '(P)\n', (1162, 1165), True, 'import numpy as np\n'), ((3116, 3144), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dropout"""'], {}), "('dropout')\n", (3133, 3144), True, 'import tensorflow as tf\n'), ((3162, 3202), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['x', 'self.dropout_keep_prob'], {}), '(x, self.dropout_keep_prob)\n', (3175, 3202), True, 'import tensorflow as tf\n'), ((3264, 3292), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reshape"""'], {}), "('reshape')\n", (3281, 3292), True, 'import tensorflow as tf\n'), ((3391, 3416), 'tensorflow.reshape', 'tf.reshape', (['x', '[B, V * F]'], {}), '(x, [B, V * F])\n', (3401, 3416), True, 'import tensorflow as tf\n'), ((4200, 4227), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""output"""'], {}), "('output')\n", (4217, 4227), True, 'import tensorflow as tf\n'), ((4532, 4548), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['W'], {}), '(W)\n', (4545, 4548), True, 'import tensorflow as tf\n'), ((4576, 4615), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['x', 'W', 'b'], {'name': '"""scores"""'}), "(x, W, b, name='scores')\n", (4591, 4615), True, 'import tensorflow as tf\n'), ((4647, 4692), 'tensorflow.argmax', 'tf.argmax', (['self.scores', '(1)'], {'name': '"""predictions"""'}), "(self.scores, 1, name='predictions')\n", (4656, 4692), True, 'import tensorflow as tf\n'), ((4724, 4759), 'tensorflow.cast', 'tf.cast', (['self.predictions', 'tf.int32'], {}), '(self.predictions, tf.int32)\n', (4731, 4759), True, 'import tensorflow as tf\n'), ((4818, 4843), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (4835, 4843), True, 'import tensorflow as tf\n'), ((4866, 4958), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'self.scores', 'labels': 'self.input_y'}), '(logits=self.scores, labels=\n self.input_y)\n', (4912, 4958), True, 'import tensorflow as tf\n'), ((5070, 5099), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (5087, 5099), True, 'import tensorflow as tf\n'), ((5135, 5175), 'tensorflow.equal', 'tf.equal', (['self.predictions', 'self.input_y'], {}), '(self.predictions, self.input_y)\n', (5143, 5175), True, 'import tensorflow as tf\n'), ((6334, 6355), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_', '(0)'], {}), '(x_, 0)\n', (6348, 6355), True, 'import tensorflow as tf\n'), ((6402, 6428), 'tensorflow.concat', 'tf.concat', (['[x, x_]'], {'axis': '(0)'}), '([x, x_], axis=0)\n', (6411, 6428), True, 'import tensorflow as tf\n'), ((6484, 6520), 'tensorflow.sparse_tensor_dense_matmul', 'tf.sparse_tensor_dense_matmul', (['L', 'x0'], {}), '(L, x0)\n', (6513, 6520), True, 'import tensorflow as tf\n'), ((7004, 7054), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[F_in * K, F_out]'], {'stddev': '(0.1)'}), '([F_in * K, F_out], stddev=0.1)\n', (7023, 7054), True, 'import tensorflow as tf\n'), ((7718, 7768), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[K, F_in * F_out]'], {'stddev': '(0.1)'}), '([K, F_in * F_out], stddev=0.1)\n', (7737, 7768), True, 'import tensorflow as tf\n'), ((8365, 8414), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[V, F_out, F_in]'], {'stddev': '(0.1)'}), '([V, F_out, F_in], stddev=0.1)\n', (8384, 8414), True, 'import tensorflow as tf\n'), ((8648, 8668), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', '(3)'], {}), '(x, 3)\n', (8662, 8668), True, 'import tensorflow as tf\n'), ((8703, 8778), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, p, 1, 1]', 'strides': '[1, p, 1, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, p, 1, 1], strides=[1, p, 1, 1], padding='SAME')\n", (8717, 8778), True, 'import tensorflow as tf\n'), ((8798, 8816), 'tensorflow.squeeze', 'tf.squeeze', (['x', '[3]'], {}), '(x, [3])\n', (8808, 8816), True, 'import tensorflow as tf\n'), ((9962, 9982), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'x'], {}), '(0, 1, x)\n', (9973, 9982), True, 'import numpy as np\n'), ((10050, 10065), 'numpy.ones', 'np.ones', (['degree'], {}), '(degree)\n', (10057, 10065), True, 'import numpy as np\n'), ((10150, 10165), 'numpy.ones', 'np.ones', (['degree'], {}), '(degree)\n', (10157, 10165), True, 'import numpy as np\n'), ((1071, 1082), 'numpy.array', 'np.array', (['P'], {}), '(P)\n', (1079, 1082), True, 'import numpy as np\n'), ((1138, 1149), 'numpy.array', 'np.array', (['P'], {}), '(P)\n', (1146, 1149), True, 'import numpy as np\n'), ((1192, 1209), 'numpy.mod', 'np.mod', (['p_log2', '(1)'], {}), '(p_log2, 1)\n', (1198, 1209), True, 'import numpy as np\n'), ((1289, 1303), 'numpy.sum', 'np.sum', (['p_log2'], {}), '(p_log2)\n', (1295, 1303), True, 'import numpy as np\n'), ((3895, 3911), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['W'], {}), '(W)\n', (3908, 3911), True, 'import tensorflow as tf\n'), ((3933, 3957), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['x', 'W', 'b'], {}), '(x, W, b)\n', (3948, 3957), True, 'import tensorflow as tf\n'), ((3978, 4036), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['x'], {'training': 'self.train_flag'}), '(x, training=self.train_flag)\n', (4007, 4036), True, 'import tensorflow as tf\n'), ((4057, 4070), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (4067, 4070), True, 'import tensorflow as tf\n'), ((4091, 4131), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['x', 'self.dropout_keep_prob'], {}), '(x, self.dropout_keep_prob)\n', (4104, 4131), True, 'import tensorflow as tf\n'), ((4459, 4496), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[num_classes]'}), '(0.1, shape=[num_classes])\n', (4470, 4496), True, 'import tensorflow as tf\n'), ((4978, 5000), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {}), '(losses)\n', (4992, 5000), True, 'import tensorflow as tf\n'), ((5219, 5256), 'tensorflow.cast', 'tf.cast', (['correct_predictions', '"""float"""'], {}), "(correct_predictions, 'float')\n", (5226, 5256), True, 'import tensorflow as tf\n'), ((2229, 2241), 'numpy.log2', 'np.log2', (['p_i'], {}), '(p_i)\n', (2236, 2241), True, 'import numpy as np\n'), ((2879, 2909), 'tensorflow.nn.relu', 'tf.nn.relu', (['(x + b)'], {'name': '"""relu"""'}), "(x + b, name='relu')\n", (2889, 2909), True, 'import tensorflow as tf\n'), ((3820, 3855), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[num_units]'}), '(0.1, shape=[num_units])\n', (3831, 3855), True, 'import tensorflow as tf\n'), ((4391, 4429), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4427, 4429), True, 'import tensorflow as tf\n'), ((6602, 6638), 'tensorflow.sparse_tensor_dense_matmul', 'tf.sparse_tensor_dense_matmul', (['L', 'x1'], {}), '(L, x1)\n', (6631, 6638), True, 'import tensorflow as tf\n'), ((2807, 2843), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[1, 1, F[i]]'}), '(0.1, shape=[1, 1, F[i]])\n', (2818, 2843), True, 'import tensorflow as tf\n'), ((3748, 3786), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (3784, 3786), True, 'import tensorflow as tf\n')]
from __future__ import print_function import numpy as np import bayesiancoresets as bc from scipy.optimize import minimize from inference import nuts, rhat, hmc import time ## FOR LOGISTIC REGRESSION from model_lr import * dnames = ['synth', 'ds1', 'phishing'] fldr = 'lr' ## FOR POISSON REGRESSION #from model_poiss import * #dnames = ['synth', 'airportdelays', 'biketrips'] #fldr = 'poiss' n_trials = 20 mcmc_steps = 5000 #total number of MH steps mcmc_burn = 1000 projection_dim = 500 #random projection dimension Ms = np.unique(np.logspace(0, 3, 10, dtype=int)) pbar = True #progress bar display flag step_size_init = 0.001 n_leap = 15 target_a = 0.8 anms = ['GIGA', 'FW', 'RND'] sampler = hmc #nuts for dnm in dnames: print('Loading dataset '+dnm) Z, Zt, D = load_data(fldr+'/'+dnm+'.npz') print('Computing Laplace approximation') t0 = time.time() res = minimize(lambda mu : -log_joint(Z, mu, np.ones(Z.shape[0])), Z.mean(axis=0)[:D], jac=lambda mu : -grad_log_joint(Z, mu, np.ones(Z.shape[0]))) mu = res.x cov = -np.linalg.inv(hess_log_joint(Z, mu)) t_laplace = time.time() - t0 var_scales=np.ones(cov.shape[0]) cputs = np.zeros((len(anms), n_trials, Ms.shape[0])) csizes = np.zeros((len(anms), n_trials, Ms.shape[0])) Fs = np.zeros((len(anms), n_trials, Ms.shape[0])) cputs_full = np.zeros(n_trials) chains = np.zeros((n_trials, mcmc_steps, mu.shape[0])) for tr in range(n_trials): print('Trial ' + str(tr+1) +'/' + str(n_trials)) print('Computing random projection') t0 = time.time() proj = bc.ProjectionF(Z, grad_log_likelihood, projection_dim, lambda : np.random.multivariate_normal(mu, cov)) vecs = proj.get() t_projection = time.time()-t0 print('Running MCMC on the full dataset') logpZ = lambda th : log_joint(Z, th, np.ones(Z.shape[0])) glogpZ = lambda th : grad_log_joint(Z, th, np.ones(Z.shape[0])) mcmc_param_init = np.random.multivariate_normal(mu, cov) t0 = time.time() full_samples = sampler(logp = logpZ, gradlogp = glogpZ, x0 = mcmc_param_init, sample_steps=mcmc_steps, burn_steps=mcmc_burn, adapt_steps=mcmc_burn, n_leapfrogs = n_leap, scale=var_scales, progress_bar=pbar, step_size=step_size_init, target_accept=target_a) cputs_full[tr] = time.time()-t0 chains[tr, :, :] = full_samples print('Running coreset construction / MCMC') for aidx, anm in enumerate(anms): print(anm +':') t0 = time.time() alg = None if 'GIGA' in anm: alg = bc.GIGA(vecs) elif anm == 'FW': alg = bc.FrankWolfe(vecs) else: alg = bc.RandomSubsampling(vecs) t_setup = time.time() - t0 t_alg = 0. for m in range(Ms.shape[0]): print('M = ' + str(Ms[m]) + ': coreset construction') #this runs alg up to a level of M; on the next iteration, it will continue from where it left off t0 = time.time() alg.run(Ms[m]) t_alg += time.time()-t0 wts = alg.weights() idcs = wts > 0 logpZ = lambda th : log_joint(Z[idcs, :], th, wts[idcs]) glogpZ = lambda th : grad_log_joint(Z[idcs, :], th, wts[idcs]) mcmc_param_init = np.random.multivariate_normal(mu, cov) print('M = ' + str(Ms[m]) + ': MCMC') t0 = time.time() th_samples = sampler(logp=logpZ, gradlogp=glogpZ, x0 = mcmc_param_init, sample_steps=mcmc_steps, burn_steps=mcmc_burn, adapt_steps=mcmc_burn, n_leapfrogs= n_leap, scale=var_scales, progress_bar=pbar, step_size=step_size_init, target_accept=target_a) t_alg_mcmc = time.time()-t0 print('M = ' + str(Ms[m]) + ': CPU times') cputs[aidx, tr, m] = t_laplace + t_projection + t_setup + t_alg + t_alg_mcmc print('M = ' + str(Ms[m]) + ': coreset sizes') csizes[aidx, tr, m] = (wts>0).sum() print('M = ' + str(Ms[m]) + ': F norms') gcs = np.array([ grad_log_joint(Z[idcs, :], full_samples[i, :], wts[idcs]) for i in range(full_samples.shape[0]) ]) gfs = np.array([ grad_log_joint(Z, full_samples[i, :], np.ones(Z.shape[0])) for i in range(full_samples.shape[0]) ]) Fs[aidx, tr, m] = (((gcs - gfs)**2).sum(axis=1)).mean() #print(rhat(chains)) np.savez_compressed(fldr+'/'+dnm+'_results.npz', Ms=Ms, Fs=Fs, cputs=cputs, cputs_full=cputs_full, csizes=csizes, anms=anms)
[ "numpy.ones", "numpy.random.multivariate_normal", "numpy.zeros", "bayesiancoresets.RandomSubsampling", "bayesiancoresets.GIGA", "numpy.savez_compressed", "numpy.logspace", "time.time", "bayesiancoresets.FrankWolfe" ]
[((536, 568), 'numpy.logspace', 'np.logspace', (['(0)', '(3)', '(10)'], {'dtype': 'int'}), '(0, 3, 10, dtype=int)\n', (547, 568), True, 'import numpy as np\n'), ((855, 866), 'time.time', 'time.time', ([], {}), '()\n', (864, 866), False, 'import time\n'), ((1120, 1141), 'numpy.ones', 'np.ones', (['cov.shape[0]'], {}), '(cov.shape[0])\n', (1127, 1141), True, 'import numpy as np\n'), ((1321, 1339), 'numpy.zeros', 'np.zeros', (['n_trials'], {}), '(n_trials)\n', (1329, 1339), True, 'import numpy as np\n'), ((1352, 1397), 'numpy.zeros', 'np.zeros', (['(n_trials, mcmc_steps, mu.shape[0])'], {}), '((n_trials, mcmc_steps, mu.shape[0]))\n', (1360, 1397), True, 'import numpy as np\n'), ((4296, 4431), 'numpy.savez_compressed', 'np.savez_compressed', (["(fldr + '/' + dnm + '_results.npz')"], {'Ms': 'Ms', 'Fs': 'Fs', 'cputs': 'cputs', 'cputs_full': 'cputs_full', 'csizes': 'csizes', 'anms': 'anms'}), "(fldr + '/' + dnm + '_results.npz', Ms=Ms, Fs=Fs, cputs=\n cputs, cputs_full=cputs_full, csizes=csizes, anms=anms)\n", (4315, 4431), True, 'import numpy as np\n'), ((1090, 1101), 'time.time', 'time.time', ([], {}), '()\n', (1099, 1101), False, 'import time\n'), ((1531, 1542), 'time.time', 'time.time', ([], {}), '()\n', (1540, 1542), False, 'import time\n'), ((1914, 1952), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'cov'], {}), '(mu, cov)\n', (1943, 1952), True, 'import numpy as np\n'), ((1962, 1973), 'time.time', 'time.time', ([], {}), '()\n', (1971, 1973), False, 'import time\n'), ((1700, 1711), 'time.time', 'time.time', ([], {}), '()\n', (1709, 1711), False, 'import time\n'), ((2301, 2312), 'time.time', 'time.time', ([], {}), '()\n', (2310, 2312), False, 'import time\n'), ((2474, 2485), 'time.time', 'time.time', ([], {}), '()\n', (2483, 2485), False, 'import time\n'), ((1618, 1656), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'cov'], {}), '(mu, cov)\n', (1647, 1656), True, 'import numpy as np\n'), ((1803, 1822), 'numpy.ones', 'np.ones', (['Z.shape[0]'], {}), '(Z.shape[0])\n', (1810, 1822), True, 'import numpy as np\n'), ((1871, 1890), 'numpy.ones', 'np.ones', (['Z.shape[0]'], {}), '(Z.shape[0])\n', (1878, 1890), True, 'import numpy as np\n'), ((2541, 2554), 'bayesiancoresets.GIGA', 'bc.GIGA', (['vecs'], {}), '(vecs)\n', (2548, 2554), True, 'import bayesiancoresets as bc\n'), ((2683, 2694), 'time.time', 'time.time', ([], {}), '()\n', (2692, 2694), False, 'import time\n'), ((2934, 2945), 'time.time', 'time.time', ([], {}), '()\n', (2943, 2945), False, 'import time\n'), ((3221, 3259), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'cov'], {}), '(mu, cov)\n', (3250, 3259), True, 'import numpy as np\n'), ((3319, 3330), 'time.time', 'time.time', ([], {}), '()\n', (3328, 3330), False, 'import time\n'), ((914, 933), 'numpy.ones', 'np.ones', (['Z.shape[0]'], {}), '(Z.shape[0])\n', (921, 933), True, 'import numpy as np\n'), ((2593, 2612), 'bayesiancoresets.FrankWolfe', 'bc.FrankWolfe', (['vecs'], {}), '(vecs)\n', (2606, 2612), True, 'import bayesiancoresets as bc\n'), ((2639, 2665), 'bayesiancoresets.RandomSubsampling', 'bc.RandomSubsampling', (['vecs'], {}), '(vecs)\n', (2659, 2665), True, 'import bayesiancoresets as bc\n'), ((2986, 2997), 'time.time', 'time.time', ([], {}), '()\n', (2995, 2997), False, 'import time\n'), ((3654, 3665), 'time.time', 'time.time', ([], {}), '()\n', (3663, 3665), False, 'import time\n'), ((995, 1014), 'numpy.ones', 'np.ones', (['Z.shape[0]'], {}), '(Z.shape[0])\n', (1002, 1014), True, 'import numpy as np\n'), ((4145, 4164), 'numpy.ones', 'np.ones', (['Z.shape[0]'], {}), '(Z.shape[0])\n', (4152, 4164), True, 'import numpy as np\n')]
import matplotlib.pyplot as plt import numpy as np from scipy.optimize import curve_fit from astropy.io import ascii from uncertainties import ufloat import uncertainties.unumpy as unp g = ufloat(9.811899, 0.000041) x, d0, d = np.genfromtxt("Messdaten/c.txt", unpack=True) D = d - d0 x1 = x[0:26] x2 = x[28:52] D1 = D[0:26] D2 = D[28:52] ascii.write([x, d0, d, D], 'Messdaten/beidseitig.tex', format="latex", names=["messpunkt x ", "D0", "Dlast", "D"]) m_alu, d_alu, dx_alu = np.genfromtxt("Messdaten/a.txt", unpack=True) L_stab_alueingespannt = 0.56 mlast_alu = 4720.1 L_stab_alu = 60 d_stab_alu = 0.01 v_stab_alu = d_stab_alu**2 * L_stab_alu m_stab_alu = 167.1 pdichte_alu = m_stab_alu / v_stab_alu print("Dichte Stab rechteckig", pdichte_alu) dichte_lit_alu = 2.712 # in g/m^3 print("Dichte Alu Literatur", dichte_lit_alu) x = x / 100 x1 = x1 / 100 x2 = x2 / 100 x_alu_fit = ((3 * L_stab_alueingespannt**2) * x1 - 4 * x1**3) #x1_ls = np.linspace(3, 60, 50) / 100 #x_alu_fit_ls = ((3 * L_stab_alueingespannt**2) * x1_ls - 4 * x1_ls**3) def Y1(x, a): return a * x params, covariance = curve_fit(Y1, x_alu_fit, D1) errors = np.sqrt(np.diag(covariance)) print("params", *params, "und +/-", errors[0]) plt.plot(x_alu_fit, D1, 'rx', label="Messwerte") plt.plot(x_alu_fit, Y1(x_alu_fit, *params), 'b-', label="Regressionsgrade") plt.xlabel(r"$3L^2 x - 4x^3$/$10^{-3}\,\si{\cubic\meter}$") plt.ylabel(r"$D(x)$/$\si{\milli\meter}$") plt.legend(loc='best') plt.tight_layout() #plt.ylim(0, 3.5) #plt.xlim(0, 0.19) plt.savefig('Bilder/c.pdf') a_alu = ufloat(params[0], errors[0]) F_alu = mlast_alu * g I_alu = d_stab_alu**4 / 12 E_alu = F_alu / (48 * a_alu * I_alu) print("E alu=", E_alu) ########################################################################## def Y2(x, A): return A * x x_alufit = 4 * x2**3 - 12 * L_stab_alueingespannt * x2**2 + \ 9 * L_stab_alueingespannt**2 * x2 - L_stab_alueingespannt**3 #x2_ls = np.linspace(3, 70, 50) / 100 # x_alufit_ls = 4 * x2_ls**3 - 12 * L_stab_alueingespannt * x2_ls**2 + \ #9 * L_stab_alueingespannt**2 * x2_ls - L_stab_alueingespannt**3 plt.clf() params, covariance = curve_fit(Y2, x_alufit, D2) errors = np.sqrt(np.diag(covariance)) print("params", *params, "fehler", *errors) plt.plot(x_alufit, D2, 'rx', label="Messwerte") plt.plot(x_alufit, Y2(x_alufit, *params), 'b-', label="Regressionsgrade") plt.xlabel( r"$4x^3 -12Lx^2 + 9L^2x - L^3$/$10^{-3}\,\si{\cubic\meter}$") plt.ylabel(r"$D(x)$/$\si{\milli\meter}$") plt.ylim(0.5, 3.0) plt.xlim(0.03, 0.18) plt.legend(loc='best') plt.tight_layout() plt.savefig('Bilder/c2.pdf') a_alu = ufloat(params[0], errors[0]) F_alu = mlast_alu * g I_alu = d_stab_alu**4 / 12 E_alu = F_alu / (48 * a_alu * I_alu) print("E alu=", E_alu)
[ "scipy.optimize.curve_fit", "matplotlib.pyplot.savefig", "astropy.io.ascii.write", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.clf", "numpy.diag", "matplotlib.pyplot.tight_layout", "uncertainties.ufloat", "matplotlib.pyplot.xlim", "matpl...
[((189, 214), 'uncertainties.ufloat', 'ufloat', (['(9.811899)', '(4.1e-05)'], {}), '(9.811899, 4.1e-05)\n', (195, 214), False, 'from uncertainties import ufloat\n'), ((228, 273), 'numpy.genfromtxt', 'np.genfromtxt', (['"""Messdaten/c.txt"""'], {'unpack': '(True)'}), "('Messdaten/c.txt', unpack=True)\n", (241, 273), True, 'import numpy as np\n'), ((340, 458), 'astropy.io.ascii.write', 'ascii.write', (['[x, d0, d, D]', '"""Messdaten/beidseitig.tex"""'], {'format': '"""latex"""', 'names': "['messpunkt x ', 'D0', 'Dlast', 'D']"}), "([x, d0, d, D], 'Messdaten/beidseitig.tex', format='latex',\n names=['messpunkt x ', 'D0', 'Dlast', 'D'])\n", (351, 458), False, 'from astropy.io import ascii\n'), ((491, 536), 'numpy.genfromtxt', 'np.genfromtxt', (['"""Messdaten/a.txt"""'], {'unpack': '(True)'}), "('Messdaten/a.txt', unpack=True)\n", (504, 536), True, 'import numpy as np\n'), ((1111, 1139), 'scipy.optimize.curve_fit', 'curve_fit', (['Y1', 'x_alu_fit', 'D1'], {}), '(Y1, x_alu_fit, D1)\n', (1120, 1139), False, 'from scipy.optimize import curve_fit\n'), ((1225, 1273), 'matplotlib.pyplot.plot', 'plt.plot', (['x_alu_fit', 'D1', '"""rx"""'], {'label': '"""Messwerte"""'}), "(x_alu_fit, D1, 'rx', label='Messwerte')\n", (1233, 1273), True, 'import matplotlib.pyplot as plt\n'), ((1359, 1421), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$3L^2 x - 4x^3$/$10^{-3}\\\\,\\\\si{\\\\cubic\\\\meter}$"""'], {}), "('$3L^2 x - 4x^3$/$10^{-3}\\\\,\\\\si{\\\\cubic\\\\meter}$')\n", (1369, 1421), True, 'import matplotlib.pyplot as plt\n'), ((1419, 1462), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$D(x)$/$\\\\si{\\\\milli\\\\meter}$"""'], {}), "('$D(x)$/$\\\\si{\\\\milli\\\\meter}$')\n", (1429, 1462), True, 'import matplotlib.pyplot as plt\n'), ((1461, 1483), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1471, 1483), True, 'import matplotlib.pyplot as plt\n'), ((1484, 1502), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1500, 1502), True, 'import matplotlib.pyplot as plt\n'), ((1540, 1567), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Bilder/c.pdf"""'], {}), "('Bilder/c.pdf')\n", (1551, 1567), True, 'import matplotlib.pyplot as plt\n'), ((1576, 1604), 'uncertainties.ufloat', 'ufloat', (['params[0]', 'errors[0]'], {}), '(params[0], errors[0])\n', (1582, 1604), False, 'from uncertainties import ufloat\n'), ((2127, 2136), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2134, 2136), True, 'import matplotlib.pyplot as plt\n'), ((2158, 2185), 'scipy.optimize.curve_fit', 'curve_fit', (['Y2', 'x_alufit', 'D2'], {}), '(Y2, x_alufit, D2)\n', (2167, 2185), False, 'from scipy.optimize import curve_fit\n'), ((2268, 2315), 'matplotlib.pyplot.plot', 'plt.plot', (['x_alufit', 'D2', '"""rx"""'], {'label': '"""Messwerte"""'}), "(x_alufit, D2, 'rx', label='Messwerte')\n", (2276, 2315), True, 'import matplotlib.pyplot as plt\n'), ((2390, 2465), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$4x^3 -12Lx^2 + 9L^2x - L^3$/$10^{-3}\\\\,\\\\si{\\\\cubic\\\\meter}$"""'], {}), "('$4x^3 -12Lx^2 + 9L^2x - L^3$/$10^{-3}\\\\,\\\\si{\\\\cubic\\\\meter}$')\n", (2400, 2465), True, 'import matplotlib.pyplot as plt\n'), ((2468, 2511), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$D(x)$/$\\\\si{\\\\milli\\\\meter}$"""'], {}), "('$D(x)$/$\\\\si{\\\\milli\\\\meter}$')\n", (2478, 2511), True, 'import matplotlib.pyplot as plt\n'), ((2510, 2528), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.5)', '(3.0)'], {}), '(0.5, 3.0)\n', (2518, 2528), True, 'import matplotlib.pyplot as plt\n'), ((2529, 2549), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.03)', '(0.18)'], {}), '(0.03, 0.18)\n', (2537, 2549), True, 'import matplotlib.pyplot as plt\n'), ((2550, 2572), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2560, 2572), True, 'import matplotlib.pyplot as plt\n'), ((2573, 2591), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2589, 2591), True, 'import matplotlib.pyplot as plt\n'), ((2592, 2620), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Bilder/c2.pdf"""'], {}), "('Bilder/c2.pdf')\n", (2603, 2620), True, 'import matplotlib.pyplot as plt\n'), ((2629, 2657), 'uncertainties.ufloat', 'ufloat', (['params[0]', 'errors[0]'], {}), '(params[0], errors[0])\n', (2635, 2657), False, 'from uncertainties import ufloat\n'), ((1157, 1176), 'numpy.diag', 'np.diag', (['covariance'], {}), '(covariance)\n', (1164, 1176), True, 'import numpy as np\n'), ((2203, 2222), 'numpy.diag', 'np.diag', (['covariance'], {}), '(covariance)\n', (2210, 2222), True, 'import numpy as np\n')]
import numpy as np import matplotlib.pyplot as plt from scipy.stats import norm from sklearn.decomposition import FastICA from ..utils.parallel import ParallelBackend, get_backend from ..utils.kde import kde from ..utils.cubic import cubic_spline from ..utils.sobol import multivariate_normal from ..utils.random import get_generator from itertools import starmap import copy import warnings try: from getdist import plots, MCSamples HAS_GETDIST = True except Exception: HAS_GETDIST = False __all__ = ['SIT'] # TODO: vectorize this # TODO: update when sklearn supports random_generator # https://github.com/scikit-learn/scikit-learn/issues/16988 # TODO: do not activate the backend if not use_parallel class SIT: """ Sliced Iterative Transform. Parameters ---------- n_iter : positive int, optional Number of iterations to perform. Set to 10 by default. parallel_backend : None, int, Pool, Client or ParallelBackend, optional The backend for parallelization. If `None`, will use the bayesfast global parallel backend. Otherwise, will be passed to initialize a ParallelBackend. bw_factor : positive float, optional Multiplicative factor for the kde bandwidth. Set to 1. by default. m_ica : positive int, optional Max number of points used to compute FastICA. Set to 20000 by default. random_generator : None, int, array_like[ints], SeedSequence, BitGenerator or Generator, optional The numpy random generator. If `None`, will use the bayesfast global random generator. Otherwise, will be passed to `numpy.random.default_rng` to initialize a random generator. m_plot : int, optional Max number of dims for triangle_plot. If non-positive, will be interpreted as no limits. Set to 8 by default. cubic_options :dict, optional Additional keyword arguments for the cubic spline. Set to {} by default. ica_options : dict, optional Additional keyword arguments for FastICA. Set to {'max_iter': 100} by default. mvn_generator : None or callable, optional Random number generator for the multivairate normal distribution. Should have signature `(mean, cov, size) -> samples`. If `None`, will use `bayesfast.utils.sobol.multivariate_normal`. Set to `None` by default. """ def __init__(self, n_iter=10, parallel_backend=None, bw_factor=1., m_ica=20000, random_generator=None, m_plot=8, cubic_options=None, ica_options=None, mvn_generator=None): self._data = None self._cubic = [] self.n_iter = n_iter self.parallel_backend = parallel_backend self.bw_factor = bw_factor self.m_ica = m_ica self.random_generator = random_generator self.m_plot = m_plot self.cubic_options = cubic_options self.ica_options = ica_options self.mvn_generator = mvn_generator def __getstate__(self): """We need this to make self._parallel_backend work correctly.""" self_dict = self.__dict__.copy() self_dict['_parallel_backend'] = None return self_dict @property def data(self): return self._data @property def data_init(self): return self._data_init @property def dim(self): return self._data.shape[-1] @property def weights(self): return self._weights @property def n_iter(self): return self._n_iter @n_iter.setter def n_iter(self, n): try: n = int(n) assert n > 0 except Exception: raise ValueError('n_iter should be a positive int.') self._n_iter = n @property def i_iter(self): return len(self._cubic) def add_iter(self, n): self.n_iter = self.n_iter + n @property def parallel_backend(self): if self._parallel_backend is None: return get_backend() else: return self._parallel_backend @parallel_backend.setter def parallel_backend(self, backend): if backend is None: self._parallel_backend = None else: self._parallel_backend = ParallelBackend(backend) @property def bw_factor(self): return self._bw_factor @bw_factor.setter def bw_factor(self, bw): try: bw = float(bw) assert bw > 0 except Exception: raise ValueError('bw_factor should be a positive float.') self._bw_factor = bw @property def m_ica(self): return self._m_ica @m_ica.setter def m_ica(self, m): try: m = int(m) assert m > 0 except Exception: raise ValueError('m_ica should be a positive int.') self._m_ica = m @property def random_generator(self): if self._random_generator is None: return get_generator() else: return self._random_generator @random_generator.setter def random_generator(self, generator): if generator is None: self._random_generator = None else: self._random_generator = np.random.default_rng(generator) @property def m_plot(self): return self._m_plot @m_plot.setter def m_plot(self, m): try: m = int(m) except Exception: raise ValueError('m_plot should be an int.') self._m_plot = m @property def cubic_options(self): return self._cubic_options @cubic_options.setter def cubic_options(self, co): try: if co is None: co = {} self._cubic_options = dict(co) except Exception: raise ValueError('cubic_options should be a dict.') @property def ica_options(self): return self._ica_options @ica_options.setter def ica_options(self, io): try: if io is None: io = {'max_iter': 100} self._ica_options = dict(io) except Exception: raise ValueError('ica_options should be a dict.') @property def mvn_generator(self): return self._mvn_generator @mvn_generator.setter def mvn_generator(self, mg): if mg is None: mg = multivariate_normal if callable(mg): self._mvn_generator = mg else: raise ValueError('invalid value for mvn_generator.') def _gaussianize_1d(self, x): k = kde(x, bw_factor=self._bw_factor, weights=self._weights) c = cubic_spline(x, lambda xx: norm.ppf(k.cdf(xx)), **self._cubic_options) return c def _gaussianize_nd(self, x): map_result = self.parallel_backend.map(self._gaussianize_1d, x.T) self._cubic.append(map_result) y = np.array([map_result[i](x[:, i]) for i in range(self.dim)]).T return y def _ica(self, x): io = self._ica_options.copy() if not 'random_state' in io: io['random_state'] = self.random_generator.integers(0, 2**32) ica = FastICA(**io) if self._m_ica is None: ica.fit(x) else: n_ica = min(x.shape[0], self.m_ica) ica.fit(x[self.random_generator.choice(x.shape[0], n_ica, False)]) y = ica.transform(x) m = np.mean(x, axis=0) s = np.std(y, axis=0) y /= s A = ica.components_ / s[:, np.newaxis] B = np.linalg.inv(A) return y, A, B, m def _init_data(self, data, weights): if data is None: if self._data is None: raise ValueError('you have not given me the data to fit.') else: try: data = np.array(data) assert data.size > 0 except Exception: raise ValueError('invalid value for data.') if data.ndim == 2: self._data = data elif data.ndim >= 3: self._data = data.reshape((-1, data.shape[-1])) else: raise ValueError('invalid shape for data.ndim.') self._data_init = self._data.copy() if self.dim == 1: raise ValueError('I cannot do rotations for only one variable.') _n = self._data.shape[0] if weights is not None: try: weights = np.asarray(weights) assert weights.shape == (_n,) except Exception: raise ValueError('invalid value for weights.') self._weights = weights else: self._weights = np.ones(_n) / _n self._cubic = [] self._A = np.zeros((0, self.dim, self.dim)) self._B = np.zeros((0, self.dim, self.dim)) self._m = np.zeros((0, self.dim)) self._logdetA = np.zeros(0) def fit(self, data=None, weights=None, n_run=None, plot=0): self._init_data(data, weights) try: plot = int(plot) except Exception: raise ValueError('plot should be an int.') if (not HAS_GETDIST) and (plot != 0): plot = 0 warnings.warn('you have not installed getdist, so I can only do ' 'plot=0.', RuntimeWarning) if n_run is None: n_run = self.n_iter - self.i_iter else: try: n_run = int(n_run) assert n_run > 0 except Exception: raise ValueError('invalid value for n_run.') if n_run > self.n_iter - self.i_iter: self.n_iter = self.i_iter + n_run with self.parallel_backend: for i in range(n_run): if plot != 0 and self.i_iter == 0: self.triangle_plot() try: y, A, B, m = self._ica(self._data) self._data = self._gaussianize_nd(y) except Exception: warnings.warn( "we found that sometimes it goes wrong, but actually " "it can work if we use a different random seed, so " "let's give it one more chance.", RuntimeWarning) y, A, B, m = self._ica(self._data) self._data = self._gaussianize_nd(y) self._A = np.concatenate((self._A, A[np.newaxis]), axis=0) self._B = np.concatenate((self._B, B[np.newaxis]), axis=0) self._m = np.concatenate((self._m, m[np.newaxis]), axis=0) self._logdetA = np.append(self._logdetA, np.log(np.abs(np.linalg.det(A)))) finite_index = np.isfinite(self._data).all(axis=1) if len(finite_index) < self._data.shape[0]: warnings.warn( 'inf encountered for some data points. We will remove ' 'these inf points for now.', RuntimeWarning) self._data = self._data[finite_index, :] self._weights = self._weights[finite_index] if (plot > 0) and (not (self.i_iter + 1) % plot): self.triangle_plot() if plot < 0: self.triangle_plot() def triangle_plot(self): if not HAS_GETDIST: raise RuntimeError( 'you need to install getdist to get the triangle plot.') if 0 < self.m_plot < self.dim: plot_data = self._data[:, :self.m_plot] else: plot_data = self._data samples = MCSamples(samples=plot_data) g = plots.getSubplotPlotter() g.triangle_plot([samples,], filled=True, contour_args={'alpha':0.8}, diag1d_kwargs={'normalized':True}) if self.i_iter: plt.suptitle("triangle plot after iteration " + str(self.i_iter), fontsize=plot_data.shape[-1] * 4, ha='left') else: plt.suptitle('triangle plot for the initial data', fontsize=plot_data.shape[-1] * 4, ha='left') plt.show() def sample(self, n, use_parallel=False): try: n = int(n) assert n > 0 except Exception: raise ValueError('n should be a positive int.') y = self.mvn_generator(np.zeros(self.dim), np.eye(self.dim), n) x, log_j = self.backward_transform(y, use_parallel) return x, log_j, y def _do_evaluate(self, c, x): return c.evaluate(x) def _do_derivative(self, c, x): return c.derivative(x) def _do_solve(self, c, x): return c.solve(x) def forward_transform(self, x, use_parallel=False): try: y = np.array(x) except Exception: raise ValueError('invalid value for x.') if y.ndim == 1: y = y[np.newaxis, :] if y.shape[-1] != self.dim: raise ValueError('invalid shape for x.') _original_shape = y.shape y = y.reshape((-1, _original_shape[-1])) log_j = np.zeros(y.shape[0]) with self.parallel_backend: for i in range(self.i_iter): y = (y - self._m[i]) @ self._A[i].T if use_parallel: map_result = self.parallel_backend.map( self._do_derivative, self._cubic[i], y.T) else: map_result = list( starmap(self._do_derivative, zip(self._cubic[i], y.T))) log_j += np.sum(np.log(map_result), axis=0) if use_parallel: map_result = self.parallel_backend.map( self._do_evaluate, self._cubic[i], y.T) else: map_result = list( starmap(self._do_evaluate, zip(self._cubic[i], y.T))) y = np.array(map_result).T log_j += np.sum(self._logdetA) y = y.reshape(_original_shape) log_j = log_j.reshape(_original_shape[:-1]) return y, log_j def backward_transform(self, y, use_parallel=False): try: x = np.array(y) except Exception: raise ValueError('invalid value for y.') if x.ndim == 1: x = x[np.newaxis, :] if x.shape[-1] != self.dim: raise ValueError('invalid shape for y.') _original_shape = x.shape x = x.reshape((-1, _original_shape[-1])) log_j = np.zeros(x.shape[0]) with self.parallel_backend: for i in reversed(range(self.i_iter)): if use_parallel: map_result = self.parallel_backend.map( self._do_solve, self._cubic[i], x.T) else: map_result = list( starmap(self._do_solve, zip(self._cubic[i], x.T))) x = np.array(map_result).T if use_parallel: map_result = self.parallel_backend.map( self._do_derivative, self._cubic[i], x.T) else: map_result = list( starmap(self._do_derivative, zip(self._cubic[i], x.T))) log_j += np.sum(np.log(map_result), axis=0) x = x @ self._B[i].T + self._m[i] log_j += np.sum(self._logdetA) x = x.reshape(_original_shape) log_j = log_j.reshape(_original_shape[:-1]) return x, log_j def logq(self, x, use_parallel=False): y, log_j = self.forward_transform(x, use_parallel) return np.sum(norm.logpdf(y), axis=-1) + log_j
[ "numpy.random.default_rng", "numpy.log", "numpy.array", "getdist.plots.getSubplotPlotter", "numpy.isfinite", "scipy.stats.norm.logpdf", "sklearn.decomposition.FastICA", "numpy.mean", "numpy.asarray", "numpy.concatenate", "warnings.warn", "numpy.eye", "numpy.ones", "numpy.std", "matplotli...
[((7215, 7228), 'sklearn.decomposition.FastICA', 'FastICA', ([], {}), '(**io)\n', (7222, 7228), False, 'from sklearn.decomposition import FastICA\n'), ((7466, 7484), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (7473, 7484), True, 'import numpy as np\n'), ((7497, 7514), 'numpy.std', 'np.std', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (7503, 7514), True, 'import numpy as np\n'), ((7589, 7605), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (7602, 7605), True, 'import numpy as np\n'), ((11815, 11843), 'getdist.MCSamples', 'MCSamples', ([], {'samples': 'plot_data'}), '(samples=plot_data)\n', (11824, 11843), False, 'from getdist import plots, MCSamples\n'), ((11856, 11881), 'getdist.plots.getSubplotPlotter', 'plots.getSubplotPlotter', ([], {}), '()\n', (11879, 11881), False, 'from getdist import plots, MCSamples\n'), ((12345, 12355), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12353, 12355), True, 'import matplotlib.pyplot as plt\n'), ((13320, 13340), 'numpy.zeros', 'np.zeros', (['y.shape[0]'], {}), '(y.shape[0])\n', (13328, 13340), True, 'import numpy as np\n'), ((14752, 14772), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (14760, 14772), True, 'import numpy as np\n'), ((5258, 5290), 'numpy.random.default_rng', 'np.random.default_rng', (['generator'], {}), '(generator)\n', (5279, 5290), True, 'import numpy as np\n'), ((8866, 8899), 'numpy.zeros', 'np.zeros', (['(0, self.dim, self.dim)'], {}), '((0, self.dim, self.dim))\n', (8874, 8899), True, 'import numpy as np\n'), ((8922, 8955), 'numpy.zeros', 'np.zeros', (['(0, self.dim, self.dim)'], {}), '((0, self.dim, self.dim))\n', (8930, 8955), True, 'import numpy as np\n'), ((8978, 9001), 'numpy.zeros', 'np.zeros', (['(0, self.dim)'], {}), '((0, self.dim))\n', (8986, 9001), True, 'import numpy as np\n'), ((9030, 9041), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (9038, 9041), True, 'import numpy as np\n'), ((9349, 9442), 'warnings.warn', 'warnings.warn', (['"""you have not installed getdist, so I can only do plot=0."""', 'RuntimeWarning'], {}), "('you have not installed getdist, so I can only do plot=0.',\n RuntimeWarning)\n", (9362, 9442), False, 'import warnings\n'), ((12216, 12316), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""triangle plot for the initial data"""'], {'fontsize': '(plot_data.shape[-1] * 4)', 'ha': '"""left"""'}), "('triangle plot for the initial data', fontsize=plot_data.shape\n [-1] * 4, ha='left')\n", (12228, 12316), True, 'import matplotlib.pyplot as plt\n'), ((12580, 12598), 'numpy.zeros', 'np.zeros', (['self.dim'], {}), '(self.dim)\n', (12588, 12598), True, 'import numpy as np\n'), ((12600, 12616), 'numpy.eye', 'np.eye', (['self.dim'], {}), '(self.dim)\n', (12606, 12616), True, 'import numpy as np\n'), ((12984, 12995), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (12992, 12995), True, 'import numpy as np\n'), ((14191, 14212), 'numpy.sum', 'np.sum', (['self._logdetA'], {}), '(self._logdetA)\n', (14197, 14212), True, 'import numpy as np\n'), ((14416, 14427), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (14424, 14427), True, 'import numpy as np\n'), ((15625, 15646), 'numpy.sum', 'np.sum', (['self._logdetA'], {}), '(self._logdetA)\n', (15631, 15646), True, 'import numpy as np\n'), ((7864, 7878), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (7872, 7878), True, 'import numpy as np\n'), ((10565, 10613), 'numpy.concatenate', 'np.concatenate', (['(self._A, A[np.newaxis])'], {'axis': '(0)'}), '((self._A, A[np.newaxis]), axis=0)\n', (10579, 10613), True, 'import numpy as np\n'), ((10640, 10688), 'numpy.concatenate', 'np.concatenate', (['(self._B, B[np.newaxis])'], {'axis': '(0)'}), '((self._B, B[np.newaxis]), axis=0)\n', (10654, 10688), True, 'import numpy as np\n'), ((10715, 10763), 'numpy.concatenate', 'np.concatenate', (['(self._m, m[np.newaxis])'], {'axis': '(0)'}), '((self._m, m[np.newaxis]), axis=0)\n', (10729, 10763), True, 'import numpy as np\n'), ((15888, 15902), 'scipy.stats.norm.logpdf', 'norm.logpdf', (['y'], {}), '(y)\n', (15899, 15902), False, 'from scipy.stats import norm\n'), ((8536, 8555), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (8546, 8555), True, 'import numpy as np\n'), ((8797, 8808), 'numpy.ones', 'np.ones', (['_n'], {}), '(_n)\n', (8804, 8808), True, 'import numpy as np\n'), ((11044, 11165), 'warnings.warn', 'warnings.warn', (['"""inf encountered for some data points. We will remove these inf points for now."""', 'RuntimeWarning'], {}), "(\n 'inf encountered for some data points. We will remove these inf points for now.'\n , RuntimeWarning)\n", (11057, 11165), False, 'import warnings\n'), ((13803, 13821), 'numpy.log', 'np.log', (['map_result'], {}), '(map_result)\n', (13809, 13821), True, 'import numpy as np\n'), ((14147, 14167), 'numpy.array', 'np.array', (['map_result'], {}), '(map_result)\n', (14155, 14167), True, 'import numpy as np\n'), ((15171, 15191), 'numpy.array', 'np.array', (['map_result'], {}), '(map_result)\n', (15179, 15191), True, 'import numpy as np\n'), ((15526, 15544), 'numpy.log', 'np.log', (['map_result'], {}), '(map_result)\n', (15532, 15544), True, 'import numpy as np\n'), ((10182, 10357), 'warnings.warn', 'warnings.warn', (['"""we found that sometimes it goes wrong, but actually it can work if we use a different random seed, so let\'s give it one more chance."""', 'RuntimeWarning'], {}), '(\n "we found that sometimes it goes wrong, but actually it can work if we use a different random seed, so let\'s give it one more chance."\n , RuntimeWarning)\n', (10195, 10357), False, 'import warnings\n'), ((10928, 10951), 'numpy.isfinite', 'np.isfinite', (['self._data'], {}), '(self._data)\n', (10939, 10951), True, 'import numpy as np\n'), ((10877, 10893), 'numpy.linalg.det', 'np.linalg.det', (['A'], {}), '(A)\n', (10890, 10893), True, 'import numpy as np\n')]
import common import numpy as np from utils import iter_proofs from lark.exceptions import UnexpectedCharacters, ParseError from tac_grammar import CFG, TreeBuilder, NonterminalNode, TerminalNode import pdb grammar = CFG(common.tac_grammar, 'tactic_expr') tree_builder = TreeBuilder(grammar) ast_height = [] num_tokens = [] num_chars = [] has_argument = [] def process_proof(filename, proof_data): global ast_height global num_tokens global num_chars for step in proof_data['steps']: if step['command'][1] != 'VernacExtend': continue if not step['command'][0].endswith('.'): continue tac_str = step['command'][0][:-1] try: tree = tree_builder.transform(grammar.parser.parse(tac_str)) except (UnexpectedCharacters, ParseError) as ex: continue ast_height.append(tree.height()) num_tokens.append(tree.num_tokens()) num_chars.append(len(tac_str)) has_argument.append(int(tree.has_argument())) iter_proofs(common.data_root, process_proof, show_progress=True) print(np.mean(ast_height), np.mean(num_tokens), np.mean(num_chars), np.mean(has_argument))
[ "tac_grammar.CFG", "numpy.mean", "tac_grammar.TreeBuilder", "utils.iter_proofs" ]
[((219, 257), 'tac_grammar.CFG', 'CFG', (['common.tac_grammar', '"""tactic_expr"""'], {}), "(common.tac_grammar, 'tactic_expr')\n", (222, 257), False, 'from tac_grammar import CFG, TreeBuilder, NonterminalNode, TerminalNode\n'), ((273, 293), 'tac_grammar.TreeBuilder', 'TreeBuilder', (['grammar'], {}), '(grammar)\n', (284, 293), False, 'from tac_grammar import CFG, TreeBuilder, NonterminalNode, TerminalNode\n'), ((1041, 1105), 'utils.iter_proofs', 'iter_proofs', (['common.data_root', 'process_proof'], {'show_progress': '(True)'}), '(common.data_root, process_proof, show_progress=True)\n', (1052, 1105), False, 'from utils import iter_proofs\n'), ((1112, 1131), 'numpy.mean', 'np.mean', (['ast_height'], {}), '(ast_height)\n', (1119, 1131), True, 'import numpy as np\n'), ((1133, 1152), 'numpy.mean', 'np.mean', (['num_tokens'], {}), '(num_tokens)\n', (1140, 1152), True, 'import numpy as np\n'), ((1154, 1172), 'numpy.mean', 'np.mean', (['num_chars'], {}), '(num_chars)\n', (1161, 1172), True, 'import numpy as np\n'), ((1174, 1195), 'numpy.mean', 'np.mean', (['has_argument'], {}), '(has_argument)\n', (1181, 1195), True, 'import numpy as np\n')]
# pylint: disable-msg=W0611, W0612, W0511,R0201 """Tests suite for MaskedArray. Adapted from the original test_ma by <NAME> :author: <NAME> & <NAME> :contact: pierregm_at_uga_dot_edu & mattknox_ca_at_hotmail_dot_com :version: $Id: test_timeseries.py 3836 2008-01-15 13:09:03Z <EMAIL> $ """ __author__ = "<NAME> & <NAME> ($Author: <EMAIL> $)" __revision__ = "$Revision: 3836 $" __date__ = '$Date: 2008-01-15 08:09:03 -0500 (Tue, 15 Jan 2008) $' import numpy as np from numpy import bool_, complex_, float_, int_, object_ from numpy.testing import * import numpy.ma as ma from numpy.ma import MaskedArray, masked, nomask from numpy.ma.testutils import * import scikits.timeseries as ts from scikits.timeseries import \ TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, \ tseries, Date, date_array, now, time_series, \ adjust_endpoints, align_series, align_with, \ concatenate, fill_missing_dates, find_duplicated_dates, \ remove_duplicated_dates, split, stack get_varshape = tseries.get_varshape _timeseriescompat_multiple = tseries._timeseriescompat_multiple #------------------------------------------------------------------------------ class TestCreation(TestCase): "Base test class for MaskedArrays." def __init__(self, *args, **kwds): TestCase.__init__(self, *args, **kwds) dlist = ['2007-01-%02i' % i for i in range(1, 16)] dates = date_array(dlist, freq='D') data = ma.array(np.arange(15), mask=[1, 0, 0, 0, 0] * 3) self.d = (dlist, dates, data) def test_fromlist (self): "Test the creation of a TimeSeries w/ a list of dates as input dates." (dlist, dates, data) = self.d series = time_series(data, dlist, freq='D') self.failUnless(isinstance(series, TimeSeries)) assert_equal(series.mask, [1, 0, 0, 0, 0] * 3) assert_equal(series.series, data) assert_equal(series.dates, dates) assert_equal(series.freqstr, 'D') def test_fromrange (self): "Test the creation of a TimeSeries w/ a starting date." (dlist, dates, data) = self.d series = time_series(data, start_date=dates[0]) self.failUnless(isinstance(series, TimeSeries)) assert_equal(series.mask, [1, 0, 0, 0, 0] * 3) assert_equal(series.series, data) assert_equal(series.dates, dates) assert_equal(series.freqstr, 'D') def test_fromseries (self): "Test the creation of a TimeSeries w/ a time series as input data." (dlist, dates, data) = self.d series = time_series(data, dlist, freq='D') dates = dates + 15 series = time_series(series, dates) self.failUnless(isinstance(series, TimeSeries)) assert_equal(series.mask, [1, 0, 0, 0, 0] * 3) assert_equal(series.series, data) assert_equal(series.dates, dates) assert_equal(series.freqstr, 'D') def test_fromdatearray(self): "Tests the creation of a series with a DateArray as input data." (_, dates, _) = self.d data = dates # series = time_series(data, dates) self.failUnless(isinstance(series, TimeSeries)) assert_equal(series.dates, dates) assert_equal(series.data, data) assert_equal(series.freqstr, 'D') # series[5] = masked # ensure that series can be represented by a string after masking a value # (there was a bug before that prevented this from working when using a # DateArray for the data) strrep = str(series) def test_datafromlist(self): "Test the creation of a series w/ a list as input data." (_, dates, _) = self.d data = list(range(15)) series = time_series(data, dates) assert_equal(series._data.size, 15) def test_unsorted(self): "Tests that the data are properly sorted along the dates." dlist = ['2007-01-%02i' % i for i in (3, 2, 1)] data = [10, 20, 30] series = time_series(data, dlist, freq='D') assert_equal(series.data, [30, 20, 10]) # dates = date_array(dlist, freq='D') series = TimeSeries(data, dates) assert_equal(series.data, [30, 20, 10]) # series = time_series(data, dlist, freq='D', mask=[1, 0, 0]) assert_equal(series.mask, [0, 0, 1]) # data = ma.array([10, 20, 30], mask=[1, 0, 0]) series = time_series(data, dlist, freq='D') assert_equal(series._mask, [0, 0, 1]) def test_unsorted_w_datearray(self): "Tests that the data are properly sorted along the dates." dlist = ['2007-01-%02i' % i for i in (3, 2, 1)] data = [10, 20, 30] dates = date_array(dlist, freq='D') self.failUnless(dates._unsorted is not None) # series = time_series(data, dates=dates) assert_equal(series.data, [30, 20, 10]) self.failUnless(dates._unsorted is not None) self.failUnless(series.dates._unsorted is None) # series = time_series(data, dates=dates) assert_equal(series.data, [30, 20, 10]) self.failUnless(series.dates._unsorted is None) def test_setdates(self): "Tests setting the dates of a series." (dlist, dates, data) = self.d reference = time_series(data, dates=dates) # Set with a DateArray: that should work test_series = data.view(TimeSeries) test_series.dates = dates assert_equal(test_series.dates, reference.dates) def test_setdates_asndarray(self): "Tests setting the dates as a ndarray." (dlist, dates, data) = self.d test_series = data.view(TimeSeries) # Set with a ndarray: that shouldn't work test_dates = np.array(dates, copy=False, subok=False) try: test_series._dates = test_dates except TypeError: pass else: err_msg = "Dates shouldn't be set as basic ndarrays." raise TimeSeriesError(err_msg) def test_setdates_asdate(self): "Tests setting the dates as a Date" (dlist, dates, data) = self.d series = data.view(TimeSeries) try: series.dates = ts.now('D') except TypeError: pass else: err_msg = "Dates shouldn't be set as a Date objects." raise TimeSeriesError(err_msg) def test_setdates_with_incompatible_size(self): "Tests setting the dates w/ a DateArray of incompatible size" (dlist, dates, data) = self.d series = data.view(TimeSeries) try: series.dates = dates[:len(dates) // 2] except ts.TimeSeriesCompatibilityError: pass else: err_msg = "Dates size should match the input." raise TimeSeriesError(err_msg) def test_setdates_with_autoreshape(self): "Tests the automatic reshaping of dates." (dlist, dates, data) = self.d reference = time_series(data, dates=dates) test_series = data.view(TimeSeries) # Set with a datearray w/ a different size than expected: should fail test_dates = dates[:-1] try: test_series.dates = test_dates except TimeSeriesCompatibilityError: pass else: err_msg = "Dates should have a size compatible with data" raise TimeSeriesError(err_msg) # Set w/ a date of a different shape: should work, but the shape changes test_dates = dates.reshape(-1, 1) test_series._dates = test_dates assert_equal(test_series.dates, reference.dates) assert_equal(test_series.dates.shape, test_series.shape) test_dates = np.array(dates, copy=False, subok=True, ndmin=2) test_series._dates = test_dates assert_equal(test_series.dates, reference.dates) assert_equal(test_series.dates.shape, test_series.shape) def test_setdates_unsorted_basic(self): "Test automatic sorting when setting dates - 1D case." dates = date_array([ts.Date('D', '2001-01-%02i' % _) for _ in (4, 3, 2, 1)]) a = np.array((4, 3, 2, 1), dtype=float) series = a.view(ts.TimeSeries) assert_equal(series.dates, []) assert_equal(series, (4, 3, 2, 1)) # series._dates = dates series.sort_chronologically() assert_equal(series, (1, 2, 3, 4)) def test_setdates_unsorted_reshaped(self): "Test automatic sorting when setting dates - 1D case reshaped to nD." dates = date_array([ts.Date('D', '2001-01-%02i' % _) for _ in (4, 3, 2, 1)]) a = np.array([[4., 3.], [2., 1.]], dtype=float) series = a.view(TimeSeries) series._dates = dates series.sort_chronologically() assert_equal(series, [[1., 2.], [3., 4.]]) def test_setdates_unsorted_2D(self): "Test automatic sorting when setting dates - 1D case reshaped to nD." dates = date_array([ts.Date('D', '2001-01-%02i' % _) for _ in (4, 3, 2, 1)]) a = np.arange(12).reshape(4, 3) series = a.view(TimeSeries) series._dates = dates series.sort_chronologically() assert_equal(series, [[ 9., 10., 11.], [ 6., 7., 8.], [ 3., 4., 5.], [ 0., 1., 2.]]) def test_copy(self): "Tests the creation of a timeseries with copy=True" dlist = ['2007-01-%02i' % i for i in range(1, 16)] dates = date_array(dlist, freq='D') data = ma.array(np.arange(15), mask=[1, 0, 0, 0, 0] * 3) series = time_series(data, dates) assert_equal(series.dates.ctypes.data, dates.ctypes.data) assert_equal(series.data.ctypes.data, data.data.ctypes.data) assert_equal(series.mask.ctypes.data, data.mask.ctypes.data) # series = time_series(data, dates, copy=True) assert_not_equal(series.dates.ctypes.data, dates.ctypes.data) assert_not_equal(series.data.ctypes.data, data.data.ctypes.data) assert_not_equal(series.mask.ctypes.data, data.mask.ctypes.data) def test_using_length(self): "Test using the `length` parameter of time_series." start = ts.Date('M', '1955-01') data = np.random.uniform(0, 1, 50 * 12).reshape(50, 12) # Default : the dates should be (50,) series = ts.time_series(data, start_date=start) assert_equal(series.shape, (50, 12)) assert_equal(series.dates.shape, (50,)) assert_equal(series.varshape, (12,)) # Forcing dates to be 2D series = ts.time_series(data, start_date=start, length=600) assert_equal(series.shape, (50, 12)) assert_equal(series.dates.shape, (50, 12)) assert_equal(series.varshape, ()) # Forcing dates to 1D series = ts.time_series(data, start_date=start, length=50) assert_equal(series.shape, (50, 12)) assert_equal(series.dates.shape, (50,)) assert_equal(series.varshape, (12,)) # Make sure we raise an exception if something goes wrong.... try: series = ts.time_series(data, start_date=start, length=100) except ts.TimeSeriesCompatibilityError: pass else: errmsg = "The should not be dates/data compatibility in this case." raise TimeSeriesCompatibilityError(errmsg) def test_varshape(self): "Test some corner case of varshape" test = ts.time_series(np.ones((10, 2)), start_date=ts.now('d')) assert_equal(test.varshape, (2,)) # test = ts.time_series(np.ones((10, 1)), start_date=ts.now('d')) assert_equal(test.varshape, (1,)) # test = ts.time_series(np.ones((10,)), start_date=ts.now('d')) assert_equal(test.varshape, ()) #------------------------------------------------------------------------------ class TestArithmetics(TestCase): "Some basic arithmetic tests" def __init__(self, *args, **kwds): TestCase.__init__(self, *args, **kwds) dlist = ['2007-01-%02i' % i for i in range(1, 16)] dates = date_array(dlist, freq='D') data = ma.array(np.arange(15), mask=[1, 0, 0, 0, 0] * 3) self.d = (time_series(data, dlist, freq='D'), data) def test_intfloat(self): "Test arithmetic timeseries/integers" (series, data) = self.d # nseries = series + 1 self.failUnless(isinstance(nseries, TimeSeries)) assert_equal(nseries.mask, [1, 0, 0, 0, 0] * 3) assert_equal(nseries.series, data + 1) assert_equal(nseries.dates, series.dates) # nseries = series - 1 self.failUnless(isinstance(nseries, TimeSeries)) assert_equal(nseries.mask, [1, 0, 0, 0, 0] * 3) assert_equal(nseries.series, data - 1) assert_equal(nseries.dates, series.dates) # nseries = series * 1 self.failUnless(isinstance(nseries, TimeSeries)) assert_equal(nseries.mask, [1, 0, 0, 0, 0] * 3) assert_equal(nseries.series, data * 1) assert_equal(nseries.dates, series.dates) # nseries = series / 1. self.failUnless(isinstance(nseries, TimeSeries)) assert_equal(nseries.mask, [1, 0, 0, 0, 0] * 3) assert_equal(nseries.series, data / 1.) assert_equal(nseries.dates, series.dates) def test_intfloat_inplace(self): "Test int/float arithmetics in place." (series, data) = self.d nseries = series.astype(float_) idini = id(nseries) data = data.astype(float_) # nseries += 1. self.failUnless(isinstance(nseries, TimeSeries)) assert_equal(nseries.mask, [1, 0, 0, 0, 0] * 3) assert_equal(nseries.series, data + 1.) assert_equal(nseries.dates, series.dates) assert_equal(id(nseries), idini) # nseries -= 1. self.failUnless(isinstance(nseries, TimeSeries)) assert_equal(nseries.mask, [1, 0, 0, 0, 0] * 3) assert_equal(nseries.series, data) assert_equal(nseries.dates, series.dates) assert_equal(id(nseries), idini) # nseries *= 2. self.failUnless(isinstance(nseries, TimeSeries)) assert_equal(nseries.mask, [1, 0, 0, 0, 0] * 3) assert_equal(nseries.series, data * 2.) assert_equal(nseries.dates, series.dates) assert_equal(id(nseries), idini) # nseries /= 2. self.failUnless(isinstance(nseries, TimeSeries)) assert_equal(nseries.mask, [1, 0, 0, 0, 0] * 3) assert_equal(nseries.series, data) assert_equal(nseries.dates, series.dates) assert_equal(id(nseries), idini) def test_updatemask(self): "Checks modification of mask." (series, data) = self.d assert_equal(series.mask, [1, 0, 0, 0, 0] * 3) series.mask = nomask self.failUnless(not series.mask.any()) self.failUnless(not series.series.mask.any()) #series._series.mask = [1,0,0]*5 series.mask = [1, 0, 0] * 5 assert_equal(series.mask, [1, 0, 0] * 5) assert_equal(series.series.mask, [1, 0, 0] * 5) series[2] = masked assert_equal(series.mask, [1, 0, 1] + [1, 0, 0] * 4) assert_equal(series.series.mask, [1, 0, 1] + [1, 0, 0] * 4) def test_ismasked(self): "Checks checks on masked" (series, data) = self.d self.failUnless(series._series[0] is masked) #!!!:... and of course, masked doesn't have a _series attribute # self.failUnless(series[0]._series is masked) def test_incompatible_dates(self): """ Test operations on two series with same dimensions but incompatible dates """ (series, data) = self.d a, b = series[1:], series[:-1] result = a + b self.failUnless(not isinstance(result, TimeSeries)) assert_equal(result.ndim, a.ndim) assert_equal(result.size, a.size) #------------------------------------------------------------------------------ class TestGetitem(TestCase): "Some getitem tests" def setUp(self): dates = date_array(['2007-01-%02i' % i for i in range(1, 16)], freq='D') data1D = ma.array(np.arange(15), mask=[1, 0, 0, 0, 0] * 3, dtype=float_) data3V = ma.array([[10, 11, 12], [20, 21, 22], [30, 31, 32]], mask=[[1, 0, 0, ], [0, 0, 0], [0, 0, 1]]) data2D = ma.array(np.random.rand(60).reshape(3, 4, 5)) for i in range(3): data2D[i, i, i] = masked #......................... series1D = time_series(data1D, dates, freq='D') series3V = time_series(data3V, dates[:len(data3V)], freq='D') series2D = time_series(data2D, dates[:len(data2D)], freq='D') self.info = locals() del(self.info['i']) self.__dict__.update(self.info) return def test_with_integers(self): # 1D series .............. (series1D, data1D) = (self.series1D, self.data1D) self.failUnless(series1D[0] is masked) test = series1D[-1] assert_equal(test, data1D[-1]) self.failUnless(not isinstance(test, TimeSeries)) # nV series .............. (series3V, data3V) = (self.series3V, self.data3V) test = series3V[-1] assert_equal(test, data3V[-1]) assert_equal(test.mask, [0, 0, 1]) self.failUnless(not isinstance(test, TimeSeries)) # 2D series .............. (series2D, data2D) = (self.series2D, self.data2D) test = series2D[-1] assert_equal(test, data2D[-1].squeeze()) self.failUnless(not isinstance(test, TimeSeries)) def test_with_slices(self): "Tests __getitem__ w/ slices." def _wslice(series, data, dates): test = series[1:2] self.failUnless(isinstance(test, TimeSeries)) assert_equal(test._varshape, series._varshape) assert_equal(test.series, data[1:2]) assert_equal(test.dates, dates[1:2]) assert_equal(test.mask, data.mask[1:2]) assert_equal(test.freq, dates.freq) # test = series[:3] self.failUnless(isinstance(test, TimeSeries)) test_series = test.series assert_equal(test_series.data, data[:3].data) assert_equal(test_series.mask, data[:3].mask) assert_equal(test.dates, dates[:3]) #..... dates = self.dates (series1D, data1D) = (self.series1D, self.data1D) _wslice(series1D, data1D, dates) (series3V, data3V) = (self.series3V, self.data3V) _wslice(series3V, data3V, dates) (series2D, data2D) = (self.series2D, self.data2D) _wslice(series2D, data2D, dates) def test_with_slices_on_nD(self): (series3V, data3V) = (self.series3V, self.data3V) # test = series3V[0, :] self.failUnless(not isinstance(test, TimeSeries)) assert_equal(test, data3V[0, :]) assert_equal(test.mask, data3V[0, :].mask) # test = series3V[:, 0] self.failUnless(isinstance(test, TimeSeries)) assert_equal(test, data3V[:, 0]) assert_equal(test.mask, data3V[:, 0].mask) assert_equal(test._varshape, ()) assert_equal(test.dates, series3V.dates) # (series2D, data2D) = (self.series2D, self.data2D) test = series2D[0] self.failUnless(not isinstance(test, TimeSeries)) assert_equal(test.shape, (4, 5)) assert_equal(test, data2D[0]) # test = series2D[:, :, 0] self.failUnless(isinstance(test, TimeSeries)) assert_equal(test, series2D.data[:, :, 0]) assert_equal(test.dates, series2D.dates) def test_with_list(self): "Tests __getitem__ w/ list." def _wlist(series, data, dates): test = series[[0, 1, -1]] control = data[[0, 1, -1]] self.failUnless(isinstance(test, TimeSeries)) assert_equal(test.series, control) assert_equal(test.mask, control.mask) assert_equal(test.dates, dates[[0, 1, -1]]) #..... dates = self.dates (series1D, data1D) = (self.series1D, self.data1D) _wlist(series1D, data1D, dates) (series3V, data3V) = (self.series3V, self.data3V) _wlist(series3V, data3V, dates[:3]) (series2D, data2D) = (self.series2D, self.data2D) _wlist(series2D, data2D, dates[:3]) def test_with_dates(self): "Tests __getitem__ w/ dates." def _wdates(series, data, dates): # Single date test = series[dates[0]] assert_equal(test, data[0]) assert_equal(test.mask, data[0].mask) self.failUnless(not isinstance(test, TimeSeries)) # Multiple dates as a date_array test = series[dates[[0, -1]]] assert_equal(test, data[[0, -1]]) self.failUnless(isinstance(test, TimeSeries)) assert_equal(test.dates, dates[[0, -1]]) # Multiple dates as a list test = series[[dates[0], dates[-1]]] assert_equal(test, data[[0, -1]]) self.failUnless(isinstance(test, TimeSeries)) # Multiple dates as a slice dslice = slice(dates[1], None, None) test = series[dslice] assert_equal(test, data[1:]) self.failUnless(isinstance(test, TimeSeries)) #..... dates = self.dates (series1D, data1D) = (self.series1D, self.data1D) _wdates(series1D, data1D, dates) (series3V, data3V) = (self.series3V, self.data3V) _wdates(series3V, data3V, dates[:3]) (series2D, data2D) = (self.series2D, self.data2D) _wdates(series2D, data2D, dates[:3]) def test_slicing_with_dates(self): "Tests __getitem__ w/ date based slices" def _testslice(series): sd, ed = series.start_date, series.end_date # full range of series assert_equal(series, series[sd:ed + 1]) # exclude first and last point of series assert_equal(series[1:-1], series[sd + 1:ed]) # slice with dates beyond the start and end dates assert_equal(series, series[sd - 10:ed + 10]) # slice with dates before the series start date assert_equal(series[0:0], series[sd - 10:sd - 5]) #..... series = self.series1D _testslice(series) # Now try slicing on a series with missing dates series = series[::2] _testslice(series) def test_with_dates_as_str(self): "Test using a string corresponding to a date as index." def _wdates(series, data): date = self.dates[0].strfmt("%Y-%m-%d") # Single date test = series[date] assert_equal(test, data[0]) assert_equal(test.mask, data[0].mask) self.failUnless(not isinstance(test, TimeSeries)) #..... (series1D, data1D) = (self.series1D, self.data1D) _wdates(series1D, data1D) (series3V, data3V) = (self.series3V, self.data3V) _wdates(series3V, data3V) (series2D, data2D) = (self.series2D, self.data2D) _wdates(series2D, data2D) # test = series1D[['2007-01-01', '2007-01-15']] control = series1D[[0, -1]] assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(test.dates, control.dates) def test_on1D_reshaped(self): trick = time_series(self.data1D.reshape(3, 5), dates=self.dates.reshape(3, 5), freq='D') test = trick[0, 0] self.failUnless(not isinstance(test, TimeSeries)) self.failUnless(test is masked) # test = trick[-1, -1] self.failUnless(not isinstance(test, TimeSeries)) assert_equal(test, 14) # test = trick[0] self.failUnless(isinstance(test, TimeSeries)) assert_equal(test._varshape, ()) assert_equal(test, trick.series[0]) assert_equal(test.dates, trick.dates[0]) def test_wtimeseries(self): "Tests getitem w/ TimeSeries as index" series1D = self.series1D # Testing a basic condition on data cond = (series1D < 8).filled(False) dseries = series1D[cond] assert_equal(dseries.data, [1, 2, 3, 4, 6, 7]) assert_equal(dseries.dates, series1D.dates[[1, 2, 3, 4, 6, 7]]) assert_equal(dseries.mask, nomask) # Testing a basic condition on dates series1D[series1D.dates < Date('D', string='2007-01-06')] = masked assert_equal(series1D[:5].series.mask, [1, 1, 1, 1, 1]) def test_on2d(self): "Tests getitem on a 2D series" (a, b, d) = ([1, 2, 3], [3, 2, 1], date_array(now('M'), length=3)) ser_x = time_series(np.column_stack((a, b)), dates=d) assert_equal(ser_x[0, 0], time_series(a[0], d[0])) assert_equal(ser_x[0, :], (a[0], b[0])) assert_equal(ser_x[:, 0], time_series(a, d)) assert_equal(ser_x[:, :], ser_x) def test_slicing_and_keeping_additional_attributes(self): series1D = self.series1D series1D.fill_value = -9999 series1D._basedict['info'] = '???' piece = series1D[:5] assert_equal(piece._fill_value, -9999) assert_equal(piece[:5]._basedict['info'], '???') #------------------------------------------------------------------------------ class TestSetItem(TestCase): # def setUp(self): dlist = ['2007-01-%02i' % i for i in range(1, 6)] dates = date_array(dlist, freq='D') data = ma.array(np.arange(5), mask=[1, 0, 0, 0, 0], dtype=float) self.series = time_series(data, dates) self.dates = dates # def test_with_integers(self): "Tests setitem with integers" series = self.series series[0] = 1 assert_equal(series.data, [1, 1, 2, 3, 4]) assert_equal(series.mask, [0, 0, 0, 0, 0]) series[0] = masked assert_equal(series.data, [1, 1, 2, 3, 4]) assert_equal(series.mask, [1, 0, 0, 0, 0]) try: series[10] = -999 except IndexError: pass # def test_with_dates(self): "Test setitem w/ dates" (series, dates) = (self.series, self.dates) # last_date = dates[-1] series[last_date] = 5 assert_equal(series.data, [0, 1, 2, 3, 5]) assert_equal(series.mask, [1, 0, 0, 0, 0]) # last_date += 10 try: series[last_date] = -999 except IndexError: pass # With dates as string series['2007-01-01'] = 5 assert_equal(series.data, [5, 1, 2, 3, 5]) assert_equal(series.mask, [0, 0, 0, 0, 0]) # test for bug fixed in r1203 x, y = ts.now('b'), ts.now('b') + 1 a = ts.time_series([1], start_date=x) b = ts.time_series([4, 5], start_date=x) b[x:y] = a[x:y] assert_equal(b[0], 1) def test_with_datearray(self): "Test setitem w/ a date_array" (series, dates) = (self.series, self.dates) # Test with date array series[dates[[0, -1]]] = 0 assert_equal(series.data, [0, 1, 2, 3, 0]) assert_equal(series.mask, [0, 0, 0, 0, 0]) # Test with date as list ofstring series[['2007-01-01', '2007-01-02']] = 10 assert_equal(series.data, [10, 10, 2, 3, 0]) assert_equal(series.mask, [ 0, 0, 0, 0, 0]) #------------------------------------------------------------------------------ class TestTimeSeriesMethods(TestCase): def setUp(self): dates = date_array(['2007-01-%02i' % i for i in (1, 2, 3)], freq='D') data1D = ma.array([1, 2, 3], mask=[1, 0, 0, ]) data3V = ma.array([[10, 11, 12], [20, 21, 22], [30, 31, 32]], mask=[[1, 0, 0, ], [0, 0, 0], [0, 0, 1]]) data2D = np.random.rand(60).reshape(3, 4, 5) series1D = time_series(data1D, dates, freq='D') series3V = time_series(data3V, dates, freq='D') series2D = time_series(data2D, dates, freq='D') self.info = locals() del(self.info['i']) return def test_torecords_1D(self): "Test conversion to records on 1D series" series = ts.time_series([1, 2, 3], start_date=ts.Date('M', '2001-01-01'), mask=[0, 1, 0]) ndtype = [('_dates', int), ('_data', int), ('_mask', bool)] control = np.array([(24001, 1, False), (24002, 2, True), (24003, 3, False)], dtype=ndtype) test = series.torecords() assert_equal(test, control) def test_torecords_2D(self): "Test torecords on 2D series" series = ts.time_series([[1, 1], [2, 2], [3, 3]], start_date=ts.Date('M', '2001-01-01'), mask=[[0, 1], [0, 0], [1, 0]]) ndtype = [('_dates', int), ('_data', (int, (2,))), ('_mask', (bool, (2,)))] control = np.array([(24001, [1, 1], [False, True]), (24002, [2, 2], [False, False]), (24003, [3, 3], [True, False])], dtype=ndtype) test = series.torecords() assert_equal_records(test, control) def test_torecords_structured(self): "Test torecords on structured array" series = ts.time_series([(1, 1), (2, 2), (3, 3)], start_date=ts.Date('M', '2001-01-01'), mask=[(0, 1), (0, 0), (1, 0)], dtype=[('a', int), ('b', float)]) ndtype = [('_dates', int), ('_data', [('a', int), ('b', float)]), ('_mask', [('a', bool), ('b', bool)])] control = np.array([(24001, (1, 1), (False, True)), (24002, (2, 2), (False, False)), (24003, (3, 3), (True, False))], dtype=ndtype) test = series.torecords() assert_equal_records(test, control) def test_reshape_1D(self): "Test reshape on data w/ 1 variables" start = ts.Date('M', '2001-01') series = ts.time_series([1, 2, 3, 4], mask=[0, 0, 1, 0], start_date=start) test = series.reshape(2, 2) control = ts.time_series([[1, 2], [3, 4]], mask=[[0, 0], [1, 0]], dates=ts.date_array(start_date=start, length=4).reshape(2, 2)) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(test.dates, control.dates) assert_equal(test.varshape, series.varshape) # test = series.copy() test.shape = (2, 2) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(test.dates, control.dates) assert_equal(test.varshape, series.varshape) def test_reshape_1V(self): "Test reshape on series w/ 2 variables" series = ts.time_series([[1, 2], [3, 4]], mask=[[0, 0], [1, 0]], start_date=ts.Date('M', '2001-01')) test = series.reshape((-1, 1)) control = ts.time_series([[[1, 2]], [[3, 4]]], mask=[[[0, 0]], [[1, 0]]], dates=series.dates.reshape((-1, 1))) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(test.dates, control.dates) assert_equal(test.varshape, control.varshape) # test = series.reshape((1, -1, 1)) control = ts.time_series([[[[1, 2]], [[3, 4]]]], mask=[[[[0, 0]], [[1, 0]]]], dates=series.dates.reshape((1, -1, 1))) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(test.dates, control.dates) def test_reshaping_1D(self): "Tests the reshaping of a 1D series." series1D = self.info['series1D'] newshape = (3, 1) test1D = series1D.reshape(newshape) assert_equal(test1D.shape, newshape) assert_equal(test1D.series.shape, newshape) assert_equal(test1D.dates.shape, newshape) assert_equal(test1D.varshape, series1D.varshape) # Make sure we haven't propagated the new shape self.failUnless(test1D.shape != series1D.shape) self.failUnless(test1D.dates.shape != series1D.dates.shape) # Using .shape test1D = series1D.copy() test1D.shape = newshape assert_equal(test1D.shape, newshape) assert_equal(test1D.series.shape, newshape) assert_equal(test1D.dates.shape, newshape) self.failUnless(series1D.dates.shape != newshape) assert_equal(test1D.varshape, series1D.varshape) # Using multiple args test1D = series1D.reshape(*newshape) assert_equal(test1D.shape, newshape) assert_equal(test1D.varshape, series1D.varshape) def test_reshape_batch(self): "Test a succession of reshape" a = ts.time_series([1, 2, 3], start_date=ts.now('D')) test = a.reshape(-1, 1) assert_equal(test.shape, (3, 1)) assert_equal(test.varshape, ()) test = a.reshape(-1, 1).reshape(-1) assert_equal(test.shape, (3,)) assert_equal(test.varshape, ()) def test_reshaping_2D(self): "Tests the reshaping of a nV/nD series." series3V = self.info['series3V'] newshape = (1, 3, 3) try: test3V = series3V.reshape(newshape) assert_equal(test3V.shape, newshape) assert_equal(test3V.series.shape, newshape) assert_equal(test3V.dates.shape, (1, 3)) assert_equal(test3V.varshape, series3V.varshape) except NotImplementedError: pass else: raise Exception("Reshaping nV/nD series should be implemented!") # Using .shape try: test3V = series3V.copy() test3V.shape = newshape assert_equal(test3V.shape, newshape) assert_equal(test3V.series.shape, newshape) assert_equal(test3V.dates.shape, (1, 3)) assert_equal(test3V.varshape, series3V.varshape) except NotImplementedError: pass else: raise Exception("Reshaping nV/nD series should be implemented!") def test_ravel_1D(self): "Test .ravel on 1D data" series = ts.time_series([1, 2, 3, 4], mask=[0, 0, 1, 0], start_date=ts.Date('M', '2009-01')) test = series.ravel() assert_equal(test, series) assert_equal(test.mask, series.mask) assert_equal(test.dates, series.dates) assert_equal(test.varshape, series.varshape) def test_ravel_1V(self): "Test .ravel on nD/1V data" dates = ts.date_array(start_date=ts.Date('M', '2009-01'), length=4) series = ts.time_series([[1, 2], [3, 4]], mask=[[0, 0], [1, 0]], dates=dates) test = series.ravel() assert_equal(test.data, series.data.ravel()) assert_equal(test.mask, series.mask.ravel()) assert_equal(test.dates, series.dates.ravel()) assert_equal(test.varshape, series.varshape) assert_equal(test.varshape, ()) def test_ravel_2V(self): "Test .ravel on 2V data" series = ts.time_series([[1, 2], [3, 4]], mask=[[0, 0], [1, 0]], start_date=ts.Date('M', '2009-01'),) test = series.ravel() assert_equal(test.data, series.data) assert_equal(test.mask, series.mask) assert_equal(test.dates, series.dates) assert_equal(test.varshape, series.varshape) # dates = ts.date_array(start_date=ts.Date('M', '2009-01'), length=2) series = ts.time_series([[[1, 2]], [[3, 4]]], mask=[[[0, 0]], [[1, 0]]], dates=dates.reshape(1, 2)) test = series.ravel() assert_equal(test.data, [[1, 2], [3, 4]]) assert_equal(test.mask, [[0, 0], [1, 0]]) assert_equal(test.dates, series.dates.ravel()) assert_equal(test.varshape, (2,)) #------------------------------------------------------------------------------ class TestFunctions(TestCase): "Some getitem tests" def __init__(self, *args, **kwds): TestCase.__init__(self, *args, **kwds) dlist = ['2007-01-%02i' % i for i in range(1, 16)] dates = date_array(dlist, freq='D') data = ma.array(np.arange(15), mask=[1, 0, 0, 0, 0] * 3) self.d = (time_series(data, dates), data, dates) # def test_adjustendpoints(self): "Tests adjust_endpoints" (series, data, dates) = self.d dseries = adjust_endpoints(series, series.dates[0], series.dates[-1]) assert_equal(dseries, series) dseries = adjust_endpoints(series, series.dates[3], series.dates[-3]) assert_equal(dseries, series[3:-2]) dseries = adjust_endpoints(series, end_date=Date('D', string='2007-01-31')) assert_equal(dseries.size, 31) assert_equal(dseries._mask, np.r_[series.mask, [1] * 16]) dseries = adjust_endpoints(series, end_date=Date('D', string='2007-01-06')) assert_equal(dseries.size, 6) assert_equal(dseries, series[:6]) dseries = adjust_endpoints(series, start_date=Date('D', string='2007-01-06'), end_date=Date('D', string='2007-01-31')) assert_equal(dseries.size, 26) assert_equal(dseries._mask, np.r_[series.mask[5:], [1] * 16]) # def test_adjustendpoints_withdatestrings(self): "Tests adjust_endpoints w/ string dates" (series, data, dates) = self.d dseries = adjust_endpoints(series, end_date='2007-01-31') assert_equal(dseries.size, 31) assert_equal(dseries._mask, np.r_[series.mask, [1] * 16]) dseries = adjust_endpoints(series, end_date='2007-01-06') assert_equal(dseries.size, 6) assert_equal(dseries, series[:6]) dseries = adjust_endpoints(series, start_date='2007-01-06', end_date='2007-01-31') assert_equal(dseries.size, 26) assert_equal(dseries._mask, np.r_[series.mask[5:], [1] * 16]) # def test_alignseries(self): "Tests align_series & align_with" (series, data, dates) = self.d # empty_series = time_series([], freq='d') a, b = align_series(series, empty_series) assert_equal(a.start_date, b.start_date) assert_equal(a.end_date, b.end_date) # aseries = time_series(data, dates + 10) bseries = time_series(data, dates - 10) (a, b) = align_with(series, aseries, bseries) assert_equal(a.dates, series.dates) assert_equal(b.dates, series.dates) assert_equal(a[-5:], series[:5]) assert_equal(b[:5], series[-5:]) # def test_tshift(self): "Test tshift function" series = self.d[0] shift_negative = series.tshift(-1) result_data = [999] + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] result_mask = [ 1] + [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0] shift_negative_result = time_series(result_data, dates=series.dates, mask=result_mask) shift_positive = series.tshift(1) result_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] + [999] result_mask = [0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0] + [ 1] shift_positive_result = time_series(result_data, dates=series.dates, mask=result_mask) assert_array_equal(shift_negative, shift_negative_result) assert_array_equal(shift_positive, shift_positive_result) # def test_split(self): """Test the split function.""" ms = time_series(np.arange(62).reshape(31, 2), start_date=Date(freq='d', year=2005, month=7, day=1)) d1, d2 = split(ms) assert_array_equal(d1.data, ms.data[:, 0]) assert_array_equal(d1.dates, ms.dates) assert_array_equal(d2.data, ms.data[:, 1]) series = self.d[0] ss = split(series)[0] assert_array_equal(series, ss) def test_convert(self): """Test convert function Just check basic functionality. The details of the actual date conversion algorithms already tested by asfreq in the test_dates test suite. """ June2005M = Date(freq='M', year=2005, month=6) lowFreqSeries = time_series(np.arange(10), start_date=June2005M) # Conversion to same frequency assert_array_equal(lowFreqSeries, lowFreqSeries.convert("M")) # Conversion to higher frequency - position=START lowToHigh_start = lowFreqSeries.convert('B', position='START') assert_equal(lowToHigh_start.start_date, June2005M.asfreq("B", relation="START")) assert_equal(lowToHigh_start.end_date, (June2005M + 9).asfreq("B", relation="END")) assert_equal(lowToHigh_start.mask[0], False) assert_equal(lowToHigh_start.mask[-1], True) # Conversion to higher frequencyt - position=END lowToHigh_end = lowFreqSeries.convert('B', position='END') assert_equal(lowToHigh_end.start_date, June2005M.asfreq("B", relation="START")) assert_equal(lowToHigh_end.end_date, (June2005M + 9).asfreq("B", relation="END")) assert_equal(lowToHigh_end.mask[0], True) assert_equal(lowToHigh_end.mask[-1], False) # ensure that position argument is not case sensitive lowToHigh_start_lowercase = lowFreqSeries.convert('B', position='start') assert_array_equal(lowToHigh_start, lowToHigh_start_lowercase) # # Conversion to lower frequency June2005B = Date(freq='b', year=2005, month=6, day=1) highFreqSeries = time_series(np.arange(100), start_date=June2005B) highToLow = highFreqSeries.convert('M', func=None) assert_equal(highToLow.ndim, 2) assert_equal(highToLow.shape[1], 23) assert_equal(highToLow.start_date, June2005B.asfreq('M')) assert_equal(highToLow.end_date, (June2005B + 99).asfreq('M')) def test_convert_with_func(self): "Test convert w/ function on 1D series" mdata = ts.time_series(np.arange(24), mask=[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1], start_date=ts.Date('M', '2001-01')) test = mdata.convert('A', func=ts.last_unmasked_val) control = ts.time_series([7, 22], start_date=ts.Date('A', '2001')) assert_equal(test, control) def test_convert_nd_with_func(self): "Test convert w/ function on nD series" ndseries = time_series(np.arange(124).reshape(62, 2), start_date=Date(freq='D', string='2005-07-01')) assert_equal(ndseries.convert('M', sum), [[930, 961], [2852, 2883]]) def test_fill_missing_dates(self): """Test fill_missing_dates function""" _start = Date(freq='m', year=2005, month=1) _end = Date(freq='m', year=2005, month=4) # dates = date_array([_start, _end], freq='M') series = time_series([1, 2], dates) filled_ser = fill_missing_dates(series) # assert_equal(filled_ser.start_date, _start) assert_equal(filled_ser.end_date, _end) self.failUnless(filled_ser.is_full()) self.failUnless(not filled_ser.has_duplicated_dates()) assert_equal(filled_ser.size, _end - _start + 1) # data = np.arange(5 * 24).reshape(5, 24) datelist = ['2007-07-0%i' % i for i in (1, 2, 3, 5, 6)] dates = date_array(datelist, freq='D') dseries = time_series(data, dates) ndates = date_array(start_date=dates[0], end_date=dates[-2]) # fseries = fill_missing_dates(dseries) assert_equal(fseries.shape, (6, 24)) assert_equal(fseries.mask[:, 0], [0, 0, 0, 1, 0, 0]) # fseries = fill_missing_dates(dseries[:, 0]) assert_equal(fseries.shape, (6,)) assert_equal(fseries.mask, [0, 0, 0, 1, 0, 0]) # series = time_series(data.ravel()[:4].reshape(2, 2), dates=dates[:-1]) fseries = fill_missing_dates(series) assert_equal(fseries.shape, (5,)) assert_equal(fseries.mask, [0, 0, 0, 1, 0, ]) def test_fill_missing_dates_structured_arrays(self): "Test fill_missing_dates on structured arrays" ndtype = [('a', float), ('b', float)] series = ts.time_series([(1, 1), (2, 2), (3, 3), ], dtype=ndtype, dates=['2001-%02i' % i for i in (1, 2, 6)], freq='M') test = series.fill_missing_dates() control = ts.time_series([(1, 1), (2, 2), (0, 0), (0, 0), (0, 0), (3, 3), ], mask=[False, False, True, True, True, False], dtype=ndtype, start_date=ts.Date('M', '2001-01')) assert_equal(test, control) # def test_fill_missing_dates_undefined(self): "Test fill_missing_dates on undefined frequencies." ndtype = [('a', float), ('b', float)] series = ts.time_series([(1, 1), (2, 2), (3, 3), ], dtype=ndtype, dates=[1, 2, 6], freq='U') test = series.fill_missing_dates() control = ts.time_series([(1, 1), (2, 2), (0, 0), (0, 0), (0, 0), (3, 3), ], mask=[False, False, True, True, True, False], dtype=ndtype, start_date=ts.Date('U', 1)) assert_equal(test, control) def test_pickling(self): "Tests pickling/unpickling" (series, data, dates) = self.d import cPickle series_pickled = cPickle.loads(series.dumps()) assert_equal(series_pickled.dates, series.dates) assert_equal(series_pickled.data, series.data) assert_equal(series_pickled.mask, series.mask) # data = ma.array(np.matrix(range(10)).T, mask=[1, 0, 0, 0, 0] * 2) dates = date_array(start_date=now('D'), length=10) series = time_series(data, dates=dates) series_pickled = cPickle.loads(series.dumps()) assert_equal(series_pickled.dates, series.dates) assert_equal(series_pickled.data, series.data) assert_equal(series_pickled.mask, series.mask) self.failUnless(isinstance(series_pickled._data, np.matrix)) # def test_pickling_memo(self): "Test the conservation of _optinfo" import cPickle control = ts.time_series(np.arange(10), start_date=ts.Date('A', 2001)) control._optinfo['memo'] = "Control information" test = cPickle.loads(cPickle.dumps(control)) assert_equal(test._dates, control._dates) assert_equal(test, control) assert_equal(test._optinfo, control._optinfo) # # def test_pickling_oddity(self): # "Test some pickling oddity (bug #97)" # import cPickle # control = ts.time_series([{'a':1}], start_date=ts.Date('A', 2001)) # if tuple(map(int, np.version.version.split('.')[:2])) > (1, 4): # test = cPickle.loads(cPickle.dumps(control)) # assert_equal(test, control) # assert_equal(test.dates, control.dates) def test_empty_timeseries(self): "Tests that empty TimeSeries are handled properly" empty_ts = time_series([], freq='b') assert_array_equal(empty_ts, empty_ts + 1) assert_array_equal(empty_ts, empty_ts + empty_ts) assert_equal(empty_ts.start_date, None) assert_equal(empty_ts.end_date, None) def test__timeseriescompat_multiple(self): "Tests the compatibility of multiple time series." newyearsday = Date('D', '2005-01-01') aprilsfool = Date('D', '2005-04-01') seriesM_10 = time_series(np.arange(10), date_array(start_date=newyearsday.asfreq('M'), length=10)) seriesD_10 = time_series(np.arange(10), date_array(start_date=newyearsday, length=10)) seriesD_5 = time_series(np.arange(5), date_array(start_date=newyearsday, length=5)) seriesD_5_apr = time_series(np.arange(5), date_array(start_date=aprilsfool, length=5)) self.failUnless(tseries._timeseriescompat_multiple(seriesM_10, seriesM_10, seriesM_10)) exception = False try: tseries._timeseriescompat_multiple(seriesM_10, seriesD_10) except ts.TimeSeriesCompatibilityError: exception = True self.failUnless(exception) exception = False try: tseries._timeseriescompat_multiple(seriesD_5, seriesD_10) except ts.TimeSeriesCompatibilityError: exception = True self.failUnless(exception) exception = False try: tseries._timeseriescompat_multiple(seriesD_5, seriesD_5_apr) except ts.TimeSeriesCompatibilityError: exception = True self.failUnless(exception) def test_compressed(self): "Tests compress" dlist = ['2007-01-%02i' % i for i in range(1, 16)] dates = date_array(dlist, freq='D') data = ma.array(np.arange(15), mask=[1, 0, 0, 0, 0] * 3, dtype=float_) series = time_series(data, dlist, freq='D') # keeper = np.array([0, 1, 1, 1, 1] * 3, dtype=bool_) c_series = series.compressed() assert_equal(c_series.data, [1, 2, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14]) assert_equal(c_series.mask, nomask) assert_equal(c_series.dates, dates[keeper]) # series_st = time_series(ma.column_stack((data, data[::-1])), dates=dates) c_series = series_st.compressed() d = [1, 2, 3, 6, 7, 8, 11, 12, 13] assert_equal(c_series.data, np.c_[(d, list(reversed(d)))]) assert_equal(c_series.mask, nomask) assert_equal(c_series.dates, dates[d]) def test_concatenate(self): "Tests concatenate" dlist = ['2007-%02i' % i for i in range(1, 6)] _dates = date_array(dlist, freq='M') data = ma.array(np.arange(5), mask=[1, 0, 0, 0, 0], dtype=float_) # ser_1 = time_series(data, _dates) ser_2 = time_series(data, dates=_dates + 10) newseries = concatenate((ser_1, ser_2), fill_missing=True) assert_equal(newseries._series, [0, 1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4]) assert_equal(newseries._mask, [1, 0, 0, 0, 0] + [1] * 5 + [1, 0, 0, 0, 0]) assert ~ (newseries.has_missing_dates()) # ser_1 = time_series(data, _dates) ser_2 = time_series(data, dates=_dates + 10) newseries = concatenate((ser_1, ser_2)) assert_equal(newseries._data, [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) assert_equal(newseries._mask, [1, 0, 0, 0, 0] + [1, 0, 0, 0, 0]) assert newseries.has_missing_dates() # ser_2 = time_series(data, dates=_dates + 3) newseries = concatenate((ser_1, ser_2)) assert_equal(newseries._data, [0, 1, 2, 3, 4, 2, 3, 4]) assert_equal(newseries._mask, [1, 0, 0, 0, 0, 0, 0, 0]) # newseries = concatenate((ser_1, ser_1[::-1])) assert_equal(newseries, ser_1) # def test_concatenate_remove_duplicates(self): "Test concatenate w/ remove_duplicates" first = Date("D", "2009-01-01") a = time_series([1, 2, 3, ], start_date=first) b = time_series([10, 20, 30, 40, 50], start_date=first) # test = ts.concatenate((a, b), remove_duplicates=True) ctrl = time_series([1, 2, 3, 40, 50], start_date=first) assert_equal(test, ctrl) assert_equal(test.dates, ctrl.dates) # test = ts.concatenate((b, a), remove_duplicates=True) ctrl = time_series([10, 20, 30, 40, 50], start_date=first) assert_equal(test, ctrl) assert_equal(test.dates, ctrl.dates) # c = time_series(100 * np.arange(1, 8), start_date=first + 2) test = ts.concatenate((a, b, c), remove_duplicates=True) ctrl = time_series([1, 2, 3, 40, 50, 400, 500, 600, 700], start_date=first) assert_equal(test, ctrl) assert_equal(test.dates, ctrl.dates) test = ts.concatenate((c, a, b), remove_duplicates=True) ctrl = time_series([1, 2, 100, 200, 300, 400, 500, 600, 700], start_date=first) assert_equal(test, ctrl) assert_equal(test.dates, ctrl.dates) def test_concatenate_2D(self): "Test concatenate on 2D" adata = ma.array([[1, 2], [2, 4], [3, 8]], mask=[[0, 0], [1, 0], [0, 1]]) bdata = ma.array([[10, 20], [30, 40], [50, 60], [70, 80]]) a = time_series(adata, start_date=ts.Date('D', '01-Jan-2009')) b = time_series(bdata, start_date=ts.Date('D', '05-Jan-2009')) # test = ts.concatenate([a, b], axis=0, remove_duplicates=True) ctrl = ma.array([[1, 2], [2, 4], [3, 8], [10, 20], [30, 40], [50, 60], [70, 80]], mask=[[0, 0], [1, 0], [0, 1], [0, 0], [0, 0], [0, 0], [0, 0]]) assert_equal(test.series, ctrl) assert_equal(test.dates, np.concatenate((a.dates, b.dates))) # test = ts.concatenate([a, b], axis=0, remove_duplicates=False) assert_equal(test.series, ctrl) assert_equal(test.dates, np.concatenate((a.dates, b.dates))) # b.dates -= 2 test = ts.concatenate([a, b], axis=0, remove_duplicates=False) ctrl = ts.time_series([[1, 2], [2, 4], [3, 8], [10, 20], [30, 40], [50, 60], [70, 80]], mask=[[0, 0], [1, 0], [0, 1], [0, 0], [0, 0], [0, 0], [0, 0]], dates=np.concatenate((a.dates, b.dates)), freq='D') assert_equal(test.series, ctrl) assert_equal(test.dates, ctrl.dates) test = ts.concatenate([a, b], axis=0, remove_duplicates=True) ctrl = ts.time_series([[1, 2], [2, 4], [3, 8], [30, 40], [50, 60], [70, 80]], mask=[[0, 0], [1, 0], [0, 1], [0, 0], [0, 0], [0, 0]], start_date=a.dates[0]) assert_equal(test.series, ctrl) assert_equal(test.dates, ctrl.dates) # def test_maxmin(self): "Test min/max" series = time_series(np.arange(10), start_date=now('D')) smax = series.max() #!!!: Used to be a TimeSeries, now is only a scalar # self.failUnless(isinstance(smax, TimeSeries)) # assert_equal(smax._dates, date_array(series._dates[-1])) self.failUnless(not isinstance(smax, TimeSeries)) assert_equal(smax, 9) # smin = series.min() #!!!: Used to be a TimeSeries, now is only a scalar # self.failUnless(isinstance(smin, TimeSeries)) # assert_equal(smin._dates, date_array(series._dates[0])) assert_equal(smin, 0) # series = time_series([[0, 1, 2, 3, 4], [9, 8, 7, 6, 5]], start_date=now('D')) smax = series.max(0) assert_equal(smax.series, [9, 8, 7, 6, 5]) assert_equal(smax.dates, date_array([series.dates[1]] * 5)) smax = series.max(1) assert_equal(smax.series, [4, 9]) assert_equal(smax.dates, series.dates) smax = series.max() assert_equal(smax.series, [9]) assert_equal(smax.dates, date_array(series.dates[1])) ser_m = ts.time_series(range(10), freq='M', start_date='2008-01-01') ser_q = ser_m.convert(freq='Q') mx = ser_q.max(-1) assert_equal(mx, ma.array([2, 5, 8, 9])) self.failUnless(isinstance(mx, TimeSeries)) # def test_pct(self): series = time_series(np.arange(1, 10), start_date=now('D')) _pct = series.pct() assert_equal(_pct.dtype, np.dtype('d')) assert_equal(series.start_date, _pct.start_date) assert_equal(series.end_date, _pct.end_date) self.failUnless(_pct[0] is masked) assert_equal(_pct[1], 1.0) assert_equal(_pct[2], 0.5) series = ts.time_series([2., 1., 2., 3.], start_date=ts.Date(freq='A', year=2005)) # standard pct result = series.pct() assert_almost_equal(result, ma.array([999, -0.5, 1.0, 0.5], mask=[1, 0, 0, 0]) ) result = series.pct(2) assert_almost_equal( result, ma.array([999, 999, 0.0, 2.0], mask=[1, 1, 0, 0]) ) # log pct result = series.pct_log() assert_almost_equal( result, ma.array( [999, -0.69314718056, 0.69314718056, 0.405465108108], mask=[1, 0, 0, 0]) ) result = series.pct_log(2) assert_almost_equal( result, ma.array([999, 999, 0.0, 1.09861228867], mask=[1, 1, 0, 0]) ) # symmetric pct result = series.pct_symmetric() assert_almost_equal( result, ma.array( [999, -0.666666666667, 0.666666666667, 0.4], mask=[1, 0, 0, 0]) ) result = series.pct_symmetric(2) assert_almost_equal( result, ma.array([999, 999, 0.0, 1.0], mask=[1, 1, 0, 0]) ) def test_find_duplicated_dates(self): "Test find_duplicated_dates" years = ['2000', '2001', '2002', '2003', '2003', '2003', '2004', '2005', '2005', '2006'] series = time_series(np.arange(len(years)), dates=years, freq='A') test = find_duplicated_dates(series) control = {Date('A', '2003'): (np.array([3, 4, 5]),), Date('A', '2005'): (np.array([7, 8]),), } assert_equal(test, control) # def test_find_duplicated_dates_allduplicated(self): "Test find_duplicated_dates w/all duplicates" series = time_series([0, 1, 2, 3, 4], dates=[2000, 2000, 2000, 2000, 2000], freq='A') test = find_duplicated_dates(series) control = {Date('A', '2000'): (np.array([0, 1, 2, 3, 4]),), } assert_equal(test, control) # def test_find_duplicated_dates_noduplicates(self): "Test find_duplicated_dates w/o duplicates" series = time_series(np.arange(5), start_date=Date('A', '2001')) test = find_duplicated_dates(series) assert_equal(test, {}) def test_remove_duplicated_dates(self): "Test remove_duplicated_dates" years = ['2000', '2001', '2002', '2003', '2003', '2003', '2004', '2005', '2005', '2006'] series = time_series(np.arange(len(years)), dates=years, freq='A') test = remove_duplicated_dates(series) control = time_series([0, 1, 2, 3, 6, 7, 9], start_date=Date('A', '2000')) assert_equal(test, control) assert_equal(test._dates, control._dates) # def test_remove_duplicated_dates_allduplicates(self): "Test remove_duplicated_dates w/ all duplicates" years = ['2000', '2000', '2000', '2000', '2000'] series = time_series(np.arange(len(years)), dates=years, freq='A') test = remove_duplicated_dates(series) control = time_series([0, ], start_date=Date('A', '2000')) assert_equal(test, control) assert_equal(test._dates, control._dates) # def test_remove_duplicated_dates_noduplicates(self): "Test remove_duplicated_dates w/o duplicates" series = time_series(np.arange(5), start_date=Date('A', '2001')) test = remove_duplicated_dates(series) assert_equal(test, series) assert_equal(test._dates, series._dates) # def test_remove_duplicated_dates_nonchrono(self): "Test remove_duplicated_dates on non-chronological series" series = time_series([0, 1, 2, 3, 4, 5, 6], dates=[2005, 2005, 2004, 2003, 2002, 2002, 2002], freq='A', autosort=False) test = remove_duplicated_dates(series) control = time_series([0, 2, 3, 4], dates=[2005, 2004, 2003, 2002], freq='A', autosort=True) assert_equal(test, control) assert_equal(test._dates, control._dates) #------------------------------------------------------------------------------ class TestMisc(TestCase): def test_ma_ufuncs(self): a = time_series([-2, -1, 0, 1, 2], start_date=now('D')) z = ma.sqrt(a) self.failUnless(isinstance(z, TimeSeries)) assert_equal(z, [1, 1, 0, 1, np.sqrt(2)]) assert_equal(z.mask, [1, 1, 0, 0, 0]) assert_equal(z.dates, a.dates) def test_emptylike(self): x = time_series([1, 2, 3, 4, 5], mask=[1, 0, 0, 0, 0], start_date=now('D')) y = ts.empty_like(x) # Basic checks assert_equal(x.dtype, y.dtype) assert_equal(x.shape, y.shape) # y.flat = 0 assert_equal(x.mask, [1, 0, 0, 0, 0]) assert_equal(y.mask, nomask) # x.mask = nomask y = ts.empty_like(x) assert_equal(y.mask, nomask) def test_compatibility_shape(self): "Tests shape compatibility." data = np.arange(2 * 3 * 4 * 5,) dates = np.empty((2 * 3 * 4 * 5,)) assert_equal(get_varshape(data, dates), ()) # dates.shape = (2, 3, 4, 5) assert_equal(get_varshape(data, dates), ()) # dates = np.empty((2 * 3 * 4,)) try: assert_equal(get_varshape(data, dates), None) except TimeSeriesCompatibilityError: pass # dates = np.empty((3 * 3 * 5,)) try: assert_equal(get_varshape(data, dates), None) except TimeSeriesCompatibilityError: pass # data.shape = (2 * 3 * 4, 5) dates = np.empty((2 * 3 * 4,)) assert_equal(get_varshape(data, dates), (5,)) data.shape = (2 * 3, 4 * 5) dates = np.empty((2 * 3 * 4,)) try: assert_equal(get_varshape(data, dates), None) except TimeSeriesCompatibilityError: pass dates = np.empty((2 * 3, 4)) try: assert_equal(get_varshape(data, dates), None) except TimeSeriesCompatibilityError: pass data.shape = (2 * 3, 4, 5) dates = np.empty((2,)) try: assert_equal(get_varshape(data, dates), None) except TimeSeriesCompatibilityError: pass dates = np.empty((2 * 3,)) assert_equal(get_varshape(data, dates), (4, 5)) # 1D start = ts.now('M') series = time_series(np.arange(60), start_date=start) assert_equal(series._varshape, ()) # 2D (multi 1D series) series = time_series(np.arange(60).reshape(20, 3), start_date=start) assert_equal(series._varshape, (3,)) # 3D (2D series) series = time_series(np.arange(60).reshape(5, 4, 3), start_date=start) assert_equal(series._varshape, (4, 3)) def test_deepcopy(self): "Test deepcopy" from copy import deepcopy t = time_series([0, 1, 2], mask=[0, 1, 0], start_date=ts.now('D')) t_ = deepcopy(t) for attr in ('_data', '_mask', '_dates'): attrt = getattr(t, attr) attrt_ = getattr(t_, attr) assert_equal(attrt, attrt_) assert_not_equal(id(attrt), id(attrt_)) t_.mask[1] = False assert_equal(t_.mask, [False, False, False]) assert_equal(t.mask, [False, True, False]) def test_firstlast_unmasked_vals(self): data = ma.array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]], mask=[[0, 0, 1, 0, 0], [0, 0, 0, 1, 1], [1, 1, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 0, 0]],) assert_equal(ts.first_unmasked_val(data, None), 0) assert_equal(ts.last_unmasked_val(data, None), 24) assert_equal(ts.first_unmasked_val(data, 0), [0, 1, 7, 3, 4]) assert_equal(ts.last_unmasked_val(data, 0), [15, 16, 17, 23, 24]) assert_equal(ts.first_unmasked_val(data, -1), [0, 5, 12, 15, 23]) assert_equal(ts.last_unmasked_val(data, -1), [4, 7, 14, 19, 24]) # data_ = data.data assert_equal(ts.first_unmasked_val(data_, None), 0) assert_equal(ts.last_unmasked_val(data_, None), 24) assert_equal(ts.first_unmasked_val(data_, 0), [0, 1, 2, 3, 4]) assert_equal(ts.last_unmasked_val(data_, 0), [20, 21, 22, 23, 24]) assert_equal(ts.first_unmasked_val(data_, -1), [0, 5, 10, 15, 20]) assert_equal(ts.last_unmasked_val(data_, -1), [4, 9, 14, 19, 24]) # data[-2] = ma.masked assert_equal(ts.first_unmasked_val(data, None), 0) assert_equal(ts.last_unmasked_val(data, None), 24) assert_equal(ts.first_unmasked_val(data, 0), [0, 1, 7, 3, 4]) assert_equal(ts.last_unmasked_val(data, 0), [5, 6, 12, 23, 24]) assert_equal(ts.first_unmasked_val(data, -1), ma.array([0, 5, 12, -1, 23], mask=[0, 0, 0, 1, 0])) assert_equal(ts.last_unmasked_val(data, -1), ma.array([4, 7, 14, -1, 24], mask=[0, 0, 0, 1, 0])) #------------------------------------------------------------------------------ class TestGenericMethods(TestCase): # class SubTimeSeries(TimeSeries): pass # def setUp(self): self.methodlist = ('cumsum', 'cumprod', 'ravel') data = np.random.rand(10) self.series = time_series(data, start_date=ts.now('D') - len(data)) # def test_generic_methods(self): "Tests some generic methods" series = self.series for method in self.methodlist: test = getattr(series, method).__call__() self.failUnless(isinstance(test, ts.TimeSeries)) assert_equal(test, getattr(series.series, method).__call__()) assert_equal(test.dates, series.dates) # def test_generic_methods_w_subclassing(self): "Tests generic method on subclasses of TimeSeries." subseries = self.series.view(self.SubTimeSeries) for method in self.methodlist: test = getattr(subseries, method).__call__() self.failUnless(isinstance(test, self.SubTimeSeries)) assert_equal(test, getattr(subseries._series, method).__call__()) assert_equal(test.dates, subseries.dates) # def test_generated_method(self): "Test that TimeSeries.method(s) gives the same result as s.method" series = ts.time_series(range(10), start_date=ts.now('D')) control = ts.time_series(np.cumsum(range(10)), start_date=ts.now('D')) # test = series.cumsum() assert_equal(test, control) # test = ts.TimeSeries.cumsum(series) assert_equal(test, control) # def test_generated_axismethod(self): "Test axismethod" series = ts.time_series(np.arange(9).reshape(3, 3), start_date=ts.now('D')) control = ts.time_series([1., 4., 7.], start_date=ts.now('D')) # test = series.mean(1) assert_equal(test, control) # test = ts.TimeSeries.mean(series, 1) assert_equal(test, control) test = ts.TimeSeries.mean(series, axis=1) assert_equal(test, control) # def test_axismethod(self): "Test axis method" series = ts.time_series(np.arange(9).reshape(3, 3), start_date=ts.now('D')) control = ts.time_series([0, 60, 336], start_date=ts.now('D')) assert_equal(series.product(axis= -1), control) assert_equal(series.product(-1), control) assert_equal(series.prod(axis= -1), control) assert_equal(series.prod(-1), control) # control = ts.time_series([3, 12, 21], start_date=ts.now('D')) assert_equal(series.sum(axis= -1), control) assert_equal(series.sum(-1), control) #------------------------------------------------------------------------------ class TestFlexibleType(TestCase): "Test flexible types" # def setUp(self): ndtype = [('a', float), ('b', float)] data = ma.array(zip(np.random.rand(10), np.arange(10)), dtype=ndtype) data.mask[0] = (0, 1) data.mask[1] = (1, 1) data.mask[-1] = (1, 0) series = time_series(data, start_date=ts.Date('M', '2007-01')) self.data = (data, series) # def test_getitem_index(self): (data, series) = self.data test = series[0] self.failUnless(isinstance(test, MaskedArray)) assert_equal(test, data[0]) test = series[1] self.failUnless(isinstance(test, MaskedArray)) assert_equal(test, data[1]) test = series[2] self.failUnless(isinstance(test, np.void)) assert_equal(test, data[2]) # def test_getitem_dates(self): (data, series) = self.data test = series['2007-01'] self.failUnless(isinstance(test, MaskedArray)) assert_equal(test, data[0]) test = series['2007-02'] self.failUnless(isinstance(test, MaskedArray)) assert_equal(test, data[1]) test = series['2007-03'] self.failUnless(isinstance(test, np.void)) assert_equal(test, data[2]) #------------------------------------------------------------------------------ class TestViewTimeSeries(TestCase): # def setUp(self): (a, b) = (np.arange(10), np.random.rand(10)) ndtype = [('a', np.float), ('b', np.float)] tarr = ts.time_series(np.array(zip(a, b), dtype=ndtype), start_date=ts.now('M')) tarr.mask[3] = (False, True) self.data = (tarr, a, b) # def test_view_by_itself(self): (tarr, a, b) = self.data test = tarr.view() self.failUnless(isinstance(test, ts.TimeSeries)) assert_equal_records(test, tarr) assert_equal_records(test.mask, tarr.mask) assert_equal(test.dates, tarr.dates) # def test_view_simple_dtype(self): (tarr, a, b) = self.data ntype = (np.float, 2) test = tarr.view(ntype) self.failUnless(isinstance(test, TimeSeries)) assert_equal(test, np.array(zip(a, b), dtype=np.float)) self.failUnless(test[3, 1] is ma.masked) # def test_view_flexible_type(self): (tarr, a, b) = self.data arr = tarr._series alttype = [('A', np.float), ('B', np.float)] test = tarr.view(alttype) self.failUnless(isinstance(test, TimeSeries)) assert_equal_records(test, arr.view(alttype)) self.failUnless(test['B'][3] is masked) assert_equal(test.dtype, np.dtype(alttype)) self.failUnless(test._fill_value is None) # def test_view(self): "Test view w/ flexible dtype" ndtype = [('a', float), ('b', float), ('c', float)] data = np.random.rand(15).reshape(-1, 3) fseries = time_series([tuple(_) for _ in data], start_date=ts.now('D'), dtype=ndtype) dseries = time_series(data, start_date=ts.now('D')) # test = fseries.view((float, 3)) assert_equal(test._series, dseries) self.failUnless(test._varshape == (3,)) # self.failUnless(dseries._varshape == (3,)) test = dseries.view(ndtype).squeeze() assert_equal(test, fseries) self.failUnless(test._varshape == ()) ############################################################################### #------------------------------------------------------------------------------ if __name__ == "__main__": run_module_suite()
[ "numpy.sqrt", "numpy.random.rand", "scikits.timeseries.date_array", "scikits.timeseries.TimeSeriesError", "numpy.ma.column_stack", "numpy.column_stack", "numpy.ma.sqrt", "numpy.array", "scikits.timeseries.last_unmasked_val", "scikits.timeseries.align_series", "copy.deepcopy", "numpy.arange", ...
[((1408, 1435), 'scikits.timeseries.date_array', 'date_array', (['dlist'], {'freq': '"""D"""'}), "(dlist, freq='D')\n", (1418, 1435), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((1704, 1738), 'scikits.timeseries.time_series', 'time_series', (['data', 'dlist'], {'freq': '"""D"""'}), "(data, dlist, freq='D')\n", (1715, 1738), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((2127, 2165), 'scikits.timeseries.time_series', 'time_series', (['data'], {'start_date': 'dates[0]'}), '(data, start_date=dates[0])\n', (2138, 2165), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((2567, 2601), 'scikits.timeseries.time_series', 'time_series', (['data', 'dlist'], {'freq': '"""D"""'}), "(data, dlist, freq='D')\n", (2578, 2601), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((2646, 2672), 'scikits.timeseries.time_series', 'time_series', (['series', 'dates'], {}), '(series, dates)\n', (2657, 2672), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((3098, 3122), 'scikits.timeseries.time_series', 'time_series', (['data', 'dates'], {}), '(data, dates)\n', (3109, 3122), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((3744, 3768), 'scikits.timeseries.time_series', 'time_series', (['data', 'dates'], {}), '(data, dates)\n', (3755, 3768), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((4012, 4046), 'scikits.timeseries.time_series', 'time_series', (['data', 'dlist'], {'freq': '"""D"""'}), "(data, dlist, freq='D')\n", (4023, 4046), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((4121, 4148), 'scikits.timeseries.date_array', 'date_array', (['dlist'], {'freq': '"""D"""'}), "(dlist, freq='D')\n", (4131, 4148), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((4166, 4189), 'scikits.timeseries.TimeSeries', 'TimeSeries', (['data', 'dates'], {}), '(data, dates)\n', (4176, 4189), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((4265, 4315), 'scikits.timeseries.time_series', 'time_series', (['data', 'dlist'], {'freq': '"""D"""', 'mask': '[1, 0, 0]'}), "(data, dlist, freq='D', mask=[1, 0, 0])\n", (4276, 4315), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((4386, 4424), 'numpy.ma.array', 'ma.array', (['[10, 20, 30]'], {'mask': '[1, 0, 0]'}), '([10, 20, 30], mask=[1, 0, 0])\n', (4394, 4424), True, 'import numpy.ma as ma\n'), ((4442, 4476), 'scikits.timeseries.time_series', 'time_series', (['data', 'dlist'], {'freq': '"""D"""'}), "(data, dlist, freq='D')\n", (4453, 4476), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((4732, 4759), 'scikits.timeseries.date_array', 'date_array', (['dlist'], {'freq': '"""D"""'}), "(dlist, freq='D')\n", (4742, 4759), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((4840, 4870), 'scikits.timeseries.time_series', 'time_series', (['data'], {'dates': 'dates'}), '(data, dates=dates)\n', (4851, 4870), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((5055, 5085), 'scikits.timeseries.time_series', 'time_series', (['data'], {'dates': 'dates'}), '(data, dates=dates)\n', (5066, 5085), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((5326, 5356), 'scikits.timeseries.time_series', 'time_series', (['data'], {'dates': 'dates'}), '(data, dates=dates)\n', (5337, 5356), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((5782, 5822), 'numpy.array', 'np.array', (['dates'], {'copy': '(False)', 'subok': '(False)'}), '(dates, copy=False, subok=False)\n', (5790, 5822), True, 'import numpy as np\n'), ((7023, 7053), 'scikits.timeseries.time_series', 'time_series', (['data'], {'dates': 'dates'}), '(data, dates=dates)\n', (7034, 7053), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((7760, 7808), 'numpy.array', 'np.array', (['dates'], {'copy': '(False)', 'subok': '(True)', 'ndmin': '(2)'}), '(dates, copy=False, subok=True, ndmin=2)\n', (7768, 7808), True, 'import numpy as np\n'), ((8212, 8247), 'numpy.array', 'np.array', (['(4, 3, 2, 1)'], {'dtype': 'float'}), '((4, 3, 2, 1), dtype=float)\n', (8220, 8247), True, 'import numpy as np\n'), ((8749, 8796), 'numpy.array', 'np.array', (['[[4.0, 3.0], [2.0, 1.0]]'], {'dtype': 'float'}), '([[4.0, 3.0], [2.0, 1.0]], dtype=float)\n', (8757, 8796), True, 'import numpy as np\n'), ((9678, 9705), 'scikits.timeseries.date_array', 'date_array', (['dlist'], {'freq': '"""D"""'}), "(dlist, freq='D')\n", (9688, 9705), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((9788, 9812), 'scikits.timeseries.time_series', 'time_series', (['data', 'dates'], {}), '(data, dates)\n', (9799, 9812), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((10044, 10079), 'scikits.timeseries.time_series', 'time_series', (['data', 'dates'], {'copy': '(True)'}), '(data, dates, copy=True)\n', (10055, 10079), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((10407, 10430), 'scikits.timeseries.Date', 'ts.Date', (['"""M"""', '"""1955-01"""'], {}), "('M', '1955-01')\n", (10414, 10430), True, 'import scikits.timeseries as ts\n'), ((10558, 10596), 'scikits.timeseries.time_series', 'ts.time_series', (['data'], {'start_date': 'start'}), '(data, start_date=start)\n', (10572, 10596), True, 'import scikits.timeseries as ts\n'), ((10785, 10835), 'scikits.timeseries.time_series', 'ts.time_series', (['data'], {'start_date': 'start', 'length': '(600)'}), '(data, start_date=start, length=600)\n', (10799, 10835), True, 'import scikits.timeseries as ts\n'), ((11021, 11070), 'scikits.timeseries.time_series', 'ts.time_series', (['data'], {'start_date': 'start', 'length': '(50)'}), '(data, start_date=start, length=50)\n', (11035, 11070), True, 'import scikits.timeseries as ts\n'), ((12323, 12350), 'scikits.timeseries.date_array', 'date_array', (['dlist'], {'freq': '"""D"""'}), "(dlist, freq='D')\n", (12333, 12350), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((16548, 16644), 'numpy.ma.array', 'ma.array', (['[[10, 11, 12], [20, 21, 22], [30, 31, 32]]'], {'mask': '[[1, 0, 0], [0, 0, 0], [0, 0, 1]]'}), '([[10, 11, 12], [20, 21, 22], [30, 31, 32]], mask=[[1, 0, 0], [0, 0,\n 0], [0, 0, 1]])\n', (16556, 16644), True, 'import numpy.ma as ma\n'), ((16850, 16886), 'scikits.timeseries.time_series', 'time_series', (['data1D', 'dates'], {'freq': '"""D"""'}), "(data1D, dates, freq='D')\n", (16861, 16886), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((25977, 26004), 'scikits.timeseries.date_array', 'date_array', (['dlist'], {'freq': '"""D"""'}), "(dlist, freq='D')\n", (25987, 26004), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((26100, 26124), 'scikits.timeseries.time_series', 'time_series', (['data', 'dates'], {}), '(data, dates)\n', (26111, 26124), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((27281, 27314), 'scikits.timeseries.time_series', 'ts.time_series', (['[1]'], {'start_date': 'x'}), '([1], start_date=x)\n', (27295, 27314), True, 'import scikits.timeseries as ts\n'), ((27327, 27363), 'scikits.timeseries.time_series', 'ts.time_series', (['[4, 5]'], {'start_date': 'x'}), '([4, 5], start_date=x)\n', (27341, 27363), True, 'import scikits.timeseries as ts\n'), ((28071, 28134), 'scikits.timeseries.date_array', 'date_array', (["[('2007-01-%02i' % i) for i in (1, 2, 3)]"], {'freq': '"""D"""'}), "([('2007-01-%02i' % i) for i in (1, 2, 3)], freq='D')\n", (28081, 28134), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((28150, 28185), 'numpy.ma.array', 'ma.array', (['[1, 2, 3]'], {'mask': '[1, 0, 0]'}), '([1, 2, 3], mask=[1, 0, 0])\n', (28158, 28185), True, 'import numpy.ma as ma\n'), ((28205, 28301), 'numpy.ma.array', 'ma.array', (['[[10, 11, 12], [20, 21, 22], [30, 31, 32]]'], {'mask': '[[1, 0, 0], [0, 0, 0], [0, 0, 1]]'}), '([[10, 11, 12], [20, 21, 22], [30, 31, 32]], mask=[[1, 0, 0], [0, 0,\n 0], [0, 0, 1]])\n', (28213, 28301), True, 'import numpy.ma as ma\n'), ((28398, 28434), 'scikits.timeseries.time_series', 'time_series', (['data1D', 'dates'], {'freq': '"""D"""'}), "(data1D, dates, freq='D')\n", (28409, 28434), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((28454, 28490), 'scikits.timeseries.time_series', 'time_series', (['data3V', 'dates'], {'freq': '"""D"""'}), "(data3V, dates, freq='D')\n", (28465, 28490), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((28510, 28546), 'scikits.timeseries.time_series', 'time_series', (['data2D', 'dates'], {'freq': '"""D"""'}), "(data2D, dates, freq='D')\n", (28521, 28546), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((28952, 29037), 'numpy.array', 'np.array', (['[(24001, 1, False), (24002, 2, True), (24003, 3, False)]'], {'dtype': 'ndtype'}), '([(24001, 1, False), (24002, 2, True), (24003, 3, False)], dtype=ndtype\n )\n', (28960, 29037), True, 'import numpy as np\n'), ((29561, 29686), 'numpy.array', 'np.array', (['[(24001, [1, 1], [False, True]), (24002, [2, 2], [False, False]), (24003, [\n 3, 3], [True, False])]'], {'dtype': 'ndtype'}), '([(24001, [1, 1], [False, True]), (24002, [2, 2], [False, False]),\n (24003, [3, 3], [True, False])], dtype=ndtype)\n', (29569, 29686), True, 'import numpy as np\n'), ((30329, 30454), 'numpy.array', 'np.array', (['[(24001, (1, 1), (False, True)), (24002, (2, 2), (False, False)), (24003, (\n 3, 3), (True, False))]'], {'dtype': 'ndtype'}), '([(24001, (1, 1), (False, True)), (24002, (2, 2), (False, False)),\n (24003, (3, 3), (True, False))], dtype=ndtype)\n', (30337, 30454), True, 'import numpy as np\n'), ((30680, 30703), 'scikits.timeseries.Date', 'ts.Date', (['"""M"""', '"""2001-01"""'], {}), "('M', '2001-01')\n", (30687, 30703), True, 'import scikits.timeseries as ts\n'), ((30721, 30786), 'scikits.timeseries.time_series', 'ts.time_series', (['[1, 2, 3, 4]'], {'mask': '[0, 0, 1, 0]', 'start_date': 'start'}), '([1, 2, 3, 4], mask=[0, 0, 1, 0], start_date=start)\n', (30735, 30786), True, 'import scikits.timeseries as ts\n'), ((35703, 35771), 'scikits.timeseries.time_series', 'ts.time_series', (['[[1, 2], [3, 4]]'], {'mask': '[[0, 0], [1, 0]]', 'dates': 'dates'}), '([[1, 2], [3, 4]], mask=[[0, 0], [1, 0]], dates=dates)\n', (35717, 35771), True, 'import scikits.timeseries as ts\n'), ((37392, 37419), 'scikits.timeseries.date_array', 'date_array', (['dlist'], {'freq': '"""D"""'}), "(dlist, freq='D')\n", (37402, 37419), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((37674, 37733), 'scikits.timeseries.adjust_endpoints', 'adjust_endpoints', (['series', 'series.dates[0]', 'series.dates[-1]'], {}), '(series, series.dates[0], series.dates[-1])\n', (37690, 37733), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((37790, 37849), 'scikits.timeseries.adjust_endpoints', 'adjust_endpoints', (['series', 'series.dates[3]', 'series.dates[-3]'], {}), '(series, series.dates[3], series.dates[-3])\n', (37806, 37849), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((38787, 38834), 'scikits.timeseries.adjust_endpoints', 'adjust_endpoints', (['series'], {'end_date': '"""2007-01-31"""'}), "(series, end_date='2007-01-31')\n", (38803, 38834), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((38958, 39005), 'scikits.timeseries.adjust_endpoints', 'adjust_endpoints', (['series'], {'end_date': '"""2007-01-06"""'}), "(series, end_date='2007-01-06')\n", (38974, 39005), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((39104, 39176), 'scikits.timeseries.adjust_endpoints', 'adjust_endpoints', (['series'], {'start_date': '"""2007-01-06"""', 'end_date': '"""2007-01-31"""'}), "(series, start_date='2007-01-06', end_date='2007-01-31')\n", (39120, 39176), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((39508, 39533), 'scikits.timeseries.time_series', 'time_series', (['[]'], {'freq': '"""d"""'}), "([], freq='d')\n", (39519, 39533), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((39549, 39583), 'scikits.timeseries.align_series', 'align_series', (['series', 'empty_series'], {}), '(series, empty_series)\n', (39561, 39583), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((39706, 39735), 'scikits.timeseries.time_series', 'time_series', (['data', '(dates + 10)'], {}), '(data, dates + 10)\n', (39717, 39735), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((39754, 39783), 'scikits.timeseries.time_series', 'time_series', (['data', '(dates - 10)'], {}), '(data, dates - 10)\n', (39765, 39783), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((39801, 39837), 'scikits.timeseries.align_with', 'align_with', (['series', 'aseries', 'bseries'], {}), '(series, aseries, bseries)\n', (39811, 39837), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((40324, 40386), 'scikits.timeseries.time_series', 'time_series', (['result_data'], {'dates': 'series.dates', 'mask': 'result_mask'}), '(result_data, dates=series.dates, mask=result_mask)\n', (40335, 40386), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((40701, 40763), 'scikits.timeseries.time_series', 'time_series', (['result_data'], {'dates': 'series.dates', 'mask': 'result_mask'}), '(result_data, dates=series.dates, mask=result_mask)\n', (40712, 40763), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((41207, 41216), 'scikits.timeseries.split', 'split', (['ms'], {}), '(ms)\n', (41212, 41216), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((41699, 41733), 'scikits.timeseries.Date', 'Date', ([], {'freq': '"""M"""', 'year': '(2005)', 'month': '(6)'}), "(freq='M', year=2005, month=6)\n", (41703, 41733), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((43105, 43146), 'scikits.timeseries.Date', 'Date', ([], {'freq': '"""b"""', 'year': '(2005)', 'month': '(6)', 'day': '(1)'}), "(freq='b', year=2005, month=6, day=1)\n", (43109, 43146), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((44439, 44473), 'scikits.timeseries.Date', 'Date', ([], {'freq': '"""m"""', 'year': '(2005)', 'month': '(1)'}), "(freq='m', year=2005, month=1)\n", (44443, 44473), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((44489, 44523), 'scikits.timeseries.Date', 'Date', ([], {'freq': '"""m"""', 'year': '(2005)', 'month': '(4)'}), "(freq='m', year=2005, month=4)\n", (44493, 44523), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((44550, 44586), 'scikits.timeseries.date_array', 'date_array', (['[_start, _end]'], {'freq': '"""M"""'}), "([_start, _end], freq='M')\n", (44560, 44586), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((44604, 44630), 'scikits.timeseries.time_series', 'time_series', (['[1, 2]', 'dates'], {}), '([1, 2], dates)\n', (44615, 44630), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((44652, 44678), 'scikits.timeseries.fill_missing_dates', 'fill_missing_dates', (['series'], {}), '(series)\n', (44670, 44678), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((45093, 45123), 'scikits.timeseries.date_array', 'date_array', (['datelist'], {'freq': '"""D"""'}), "(datelist, freq='D')\n", (45103, 45123), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((45142, 45166), 'scikits.timeseries.time_series', 'time_series', (['data', 'dates'], {}), '(data, dates)\n', (45153, 45166), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((45184, 45235), 'scikits.timeseries.date_array', 'date_array', ([], {'start_date': 'dates[0]', 'end_date': 'dates[-2]'}), '(start_date=dates[0], end_date=dates[-2])\n', (45194, 45235), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((45264, 45291), 'scikits.timeseries.fill_missing_dates', 'fill_missing_dates', (['dseries'], {}), '(dseries)\n', (45282, 45291), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((45426, 45459), 'scikits.timeseries.fill_missing_dates', 'fill_missing_dates', (['dseries[:, 0]'], {}), '(dseries[:, 0])\n', (45444, 45459), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((45664, 45690), 'scikits.timeseries.fill_missing_dates', 'fill_missing_dates', (['series'], {}), '(series)\n', (45682, 45690), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((45963, 46077), 'scikits.timeseries.time_series', 'ts.time_series', (['[(1, 1), (2, 2), (3, 3)]'], {'dtype': 'ndtype', 'dates': "[('2001-%02i' % i) for i in (1, 2, 6)]", 'freq': '"""M"""'}), "([(1, 1), (2, 2), (3, 3)], dtype=ndtype, dates=[('2001-%02i' %\n i) for i in (1, 2, 6)], freq='M')\n", (45977, 46077), True, 'import scikits.timeseries as ts\n'), ((46741, 46826), 'scikits.timeseries.time_series', 'ts.time_series', (['[(1, 1), (2, 2), (3, 3)]'], {'dtype': 'ndtype', 'dates': '[1, 2, 6]', 'freq': '"""U"""'}), "([(1, 1), (2, 2), (3, 3)], dtype=ndtype, dates=[1, 2, 6],\n freq='U')\n", (46755, 46826), True, 'import scikits.timeseries as ts\n'), ((47817, 47847), 'scikits.timeseries.time_series', 'time_series', (['data'], {'dates': 'dates'}), '(data, dates=dates)\n', (47828, 47847), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((49107, 49132), 'scikits.timeseries.time_series', 'time_series', (['[]'], {'freq': '"""b"""'}), "([], freq='b')\n", (49118, 49132), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((49465, 49488), 'scikits.timeseries.Date', 'Date', (['"""D"""', '"""2005-01-01"""'], {}), "('D', '2005-01-01')\n", (49469, 49488), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((49510, 49533), 'scikits.timeseries.Date', 'Date', (['"""D"""', '"""2005-04-01"""'], {}), "('D', '2005-04-01')\n", (49514, 49533), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((51001, 51028), 'scikits.timeseries.date_array', 'date_array', (['dlist'], {'freq': '"""D"""'}), "(dlist, freq='D')\n", (51011, 51028), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((51125, 51159), 'scikits.timeseries.time_series', 'time_series', (['data', 'dlist'], {'freq': '"""D"""'}), "(data, dlist, freq='D')\n", (51136, 51159), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((51187, 51229), 'numpy.array', 'np.array', (['([0, 1, 1, 1, 1] * 3)'], {'dtype': 'bool_'}), '([0, 1, 1, 1, 1] * 3, dtype=bool_)\n', (51195, 51229), True, 'import numpy as np\n'), ((51943, 51970), 'scikits.timeseries.date_array', 'date_array', (['dlist'], {'freq': '"""M"""'}), "(dlist, freq='M')\n", (51953, 51970), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((52071, 52096), 'scikits.timeseries.time_series', 'time_series', (['data', '_dates'], {}), '(data, _dates)\n', (52082, 52096), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((52113, 52149), 'scikits.timeseries.time_series', 'time_series', (['data'], {'dates': '(_dates + 10)'}), '(data, dates=_dates + 10)\n', (52124, 52149), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((52170, 52216), 'scikits.timeseries.concatenate', 'concatenate', (['(ser_1, ser_2)'], {'fill_missing': '(True)'}), '((ser_1, ser_2), fill_missing=True)\n', (52181, 52216), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((52543, 52568), 'scikits.timeseries.time_series', 'time_series', (['data', '_dates'], {}), '(data, _dates)\n', (52554, 52568), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((52585, 52621), 'scikits.timeseries.time_series', 'time_series', (['data'], {'dates': '(_dates + 10)'}), '(data, dates=_dates + 10)\n', (52596, 52621), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((52642, 52669), 'scikits.timeseries.concatenate', 'concatenate', (['(ser_1, ser_2)'], {}), '((ser_1, ser_2))\n', (52653, 52669), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((52884, 52919), 'scikits.timeseries.time_series', 'time_series', (['data'], {'dates': '(_dates + 3)'}), '(data, dates=_dates + 3)\n', (52895, 52919), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((52940, 52967), 'scikits.timeseries.concatenate', 'concatenate', (['(ser_1, ser_2)'], {}), '((ser_1, ser_2))\n', (52951, 52967), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((53126, 53159), 'scikits.timeseries.concatenate', 'concatenate', (['(ser_1, ser_1[::-1])'], {}), '((ser_1, ser_1[::-1]))\n', (53137, 53159), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((53319, 53342), 'scikits.timeseries.Date', 'Date', (['"""D"""', '"""2009-01-01"""'], {}), "('D', '2009-01-01')\n", (53323, 53342), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((53355, 53395), 'scikits.timeseries.time_series', 'time_series', (['[1, 2, 3]'], {'start_date': 'first'}), '([1, 2, 3], start_date=first)\n', (53366, 53395), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((53410, 53461), 'scikits.timeseries.time_series', 'time_series', (['[10, 20, 30, 40, 50]'], {'start_date': 'first'}), '([10, 20, 30, 40, 50], start_date=first)\n', (53421, 53461), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((53487, 53533), 'scikits.timeseries.concatenate', 'ts.concatenate', (['(a, b)'], {'remove_duplicates': '(True)'}), '((a, b), remove_duplicates=True)\n', (53501, 53533), True, 'import scikits.timeseries as ts\n'), ((53549, 53597), 'scikits.timeseries.time_series', 'time_series', (['[1, 2, 3, 40, 50]'], {'start_date': 'first'}), '([1, 2, 3, 40, 50], start_date=first)\n', (53560, 53597), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((53701, 53747), 'scikits.timeseries.concatenate', 'ts.concatenate', (['(b, a)'], {'remove_duplicates': '(True)'}), '((b, a), remove_duplicates=True)\n', (53715, 53747), True, 'import scikits.timeseries as ts\n'), ((53763, 53814), 'scikits.timeseries.time_series', 'time_series', (['[10, 20, 30, 40, 50]'], {'start_date': 'first'}), '([10, 20, 30, 40, 50], start_date=first)\n', (53774, 53814), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((53987, 54036), 'scikits.timeseries.concatenate', 'ts.concatenate', (['(a, b, c)'], {'remove_duplicates': '(True)'}), '((a, b, c), remove_duplicates=True)\n', (54001, 54036), True, 'import scikits.timeseries as ts\n'), ((54052, 54120), 'scikits.timeseries.time_series', 'time_series', (['[1, 2, 3, 40, 50, 400, 500, 600, 700]'], {'start_date': 'first'}), '([1, 2, 3, 40, 50, 400, 500, 600, 700], start_date=first)\n', (54063, 54120), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((54241, 54290), 'scikits.timeseries.concatenate', 'ts.concatenate', (['(c, a, b)'], {'remove_duplicates': '(True)'}), '((c, a, b), remove_duplicates=True)\n', (54255, 54290), True, 'import scikits.timeseries as ts\n'), ((54306, 54378), 'scikits.timeseries.time_series', 'time_series', (['[1, 2, 100, 200, 300, 400, 500, 600, 700]'], {'start_date': 'first'}), '([1, 2, 100, 200, 300, 400, 500, 600, 700], start_date=first)\n', (54317, 54378), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((54569, 54634), 'numpy.ma.array', 'ma.array', (['[[1, 2], [2, 4], [3, 8]]'], {'mask': '[[0, 0], [1, 0], [0, 1]]'}), '([[1, 2], [2, 4], [3, 8]], mask=[[0, 0], [1, 0], [0, 1]])\n', (54577, 54634), True, 'import numpy.ma as ma\n'), ((54651, 54701), 'numpy.ma.array', 'ma.array', (['[[10, 20], [30, 40], [50, 60], [70, 80]]'], {}), '([[10, 20], [30, 40], [50, 60], [70, 80]])\n', (54659, 54701), True, 'import numpy.ma as ma\n'), ((54869, 54923), 'scikits.timeseries.concatenate', 'ts.concatenate', (['[a, b]'], {'axis': '(0)', 'remove_duplicates': '(True)'}), '([a, b], axis=0, remove_duplicates=True)\n', (54883, 54923), True, 'import scikits.timeseries as ts\n'), ((54939, 55080), 'numpy.ma.array', 'ma.array', (['[[1, 2], [2, 4], [3, 8], [10, 20], [30, 40], [50, 60], [70, 80]]'], {'mask': '[[0, 0], [1, 0], [0, 1], [0, 0], [0, 0], [0, 0], [0, 0]]'}), '([[1, 2], [2, 4], [3, 8], [10, 20], [30, 40], [50, 60], [70, 80]],\n mask=[[0, 0], [1, 0], [0, 1], [0, 0], [0, 0], [0, 0], [0, 0]])\n', (54947, 55080), True, 'import numpy.ma as ma\n'), ((55290, 55345), 'scikits.timeseries.concatenate', 'ts.concatenate', (['[a, b]'], {'axis': '(0)', 'remove_duplicates': '(False)'}), '([a, b], axis=0, remove_duplicates=False)\n', (55304, 55345), True, 'import scikits.timeseries as ts\n'), ((55501, 55556), 'scikits.timeseries.concatenate', 'ts.concatenate', (['[a, b]'], {'axis': '(0)', 'remove_duplicates': '(False)'}), '([a, b], axis=0, remove_duplicates=False)\n', (55515, 55556), True, 'import scikits.timeseries as ts\n'), ((56025, 56079), 'scikits.timeseries.concatenate', 'ts.concatenate', (['[a, b]'], {'axis': '(0)', 'remove_duplicates': '(True)'}), '([a, b], axis=0, remove_duplicates=True)\n', (56039, 56079), True, 'import scikits.timeseries as ts\n'), ((56095, 56248), 'scikits.timeseries.time_series', 'ts.time_series', (['[[1, 2], [2, 4], [3, 8], [30, 40], [50, 60], [70, 80]]'], {'mask': '[[0, 0], [1, 0], [0, 1], [0, 0], [0, 0], [0, 0]]', 'start_date': 'a.dates[0]'}), '([[1, 2], [2, 4], [3, 8], [30, 40], [50, 60], [70, 80]], mask\n =[[0, 0], [1, 0], [0, 1], [0, 0], [0, 0], [0, 0]], start_date=a.dates[0])\n', (56109, 56248), True, 'import scikits.timeseries as ts\n'), ((59821, 59850), 'scikits.timeseries.find_duplicated_dates', 'find_duplicated_dates', (['series'], {}), '(series)\n', (59842, 59850), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((60143, 60219), 'scikits.timeseries.time_series', 'time_series', (['[0, 1, 2, 3, 4]'], {'dates': '[2000, 2000, 2000, 2000, 2000]', 'freq': '"""A"""'}), "([0, 1, 2, 3, 4], dates=[2000, 2000, 2000, 2000, 2000], freq='A')\n", (60154, 60219), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((60264, 60293), 'scikits.timeseries.find_duplicated_dates', 'find_duplicated_dates', (['series'], {}), '(series)\n', (60285, 60293), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((60601, 60630), 'scikits.timeseries.find_duplicated_dates', 'find_duplicated_dates', (['series'], {}), '(series)\n', (60622, 60630), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((60951, 60982), 'scikits.timeseries.remove_duplicated_dates', 'remove_duplicated_dates', (['series'], {}), '(series)\n', (60974, 60982), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((61450, 61481), 'scikits.timeseries.remove_duplicated_dates', 'remove_duplicated_dates', (['series'], {}), '(series)\n', (61473, 61481), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((61870, 61901), 'scikits.timeseries.remove_duplicated_dates', 'remove_duplicated_dates', (['series'], {}), '(series)\n', (61893, 61901), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((62130, 62245), 'scikits.timeseries.time_series', 'time_series', (['[0, 1, 2, 3, 4, 5, 6]'], {'dates': '[2005, 2005, 2004, 2003, 2002, 2002, 2002]', 'freq': '"""A"""', 'autosort': '(False)'}), "([0, 1, 2, 3, 4, 5, 6], dates=[2005, 2005, 2004, 2003, 2002, \n 2002, 2002], freq='A', autosort=False)\n", (62141, 62245), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((62343, 62374), 'scikits.timeseries.remove_duplicated_dates', 'remove_duplicated_dates', (['series'], {}), '(series)\n', (62366, 62374), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((62393, 62479), 'scikits.timeseries.time_series', 'time_series', (['[0, 2, 3, 4]'], {'dates': '[2005, 2004, 2003, 2002]', 'freq': '"""A"""', 'autosort': '(True)'}), "([0, 2, 3, 4], dates=[2005, 2004, 2003, 2002], freq='A',\n autosort=True)\n", (62404, 62479), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((62841, 62851), 'numpy.ma.sqrt', 'ma.sqrt', (['a'], {}), '(a)\n', (62848, 62851), True, 'import numpy.ma as ma\n'), ((63190, 63206), 'scikits.timeseries.empty_like', 'ts.empty_like', (['x'], {}), '(x)\n', (63203, 63206), True, 'import scikits.timeseries as ts\n'), ((63466, 63482), 'scikits.timeseries.empty_like', 'ts.empty_like', (['x'], {}), '(x)\n', (63479, 63482), True, 'import scikits.timeseries as ts\n'), ((63614, 63638), 'numpy.arange', 'np.arange', (['(2 * 3 * 4 * 5)'], {}), '(2 * 3 * 4 * 5)\n', (63623, 63638), True, 'import numpy as np\n'), ((63656, 63682), 'numpy.empty', 'np.empty', (['(2 * 3 * 4 * 5,)'], {}), '((2 * 3 * 4 * 5,))\n', (63664, 63682), True, 'import numpy as np\n'), ((63858, 63880), 'numpy.empty', 'np.empty', (['(2 * 3 * 4,)'], {}), '((2 * 3 * 4,))\n', (63866, 63880), True, 'import numpy as np\n'), ((64040, 64062), 'numpy.empty', 'np.empty', (['(3 * 3 * 5,)'], {}), '((3 * 3 * 5,))\n', (64048, 64062), True, 'import numpy as np\n'), ((64258, 64280), 'numpy.empty', 'np.empty', (['(2 * 3 * 4,)'], {}), '((2 * 3 * 4,))\n', (64266, 64280), True, 'import numpy as np\n'), ((64387, 64409), 'numpy.empty', 'np.empty', (['(2 * 3 * 4,)'], {}), '((2 * 3 * 4,))\n', (64395, 64409), True, 'import numpy as np\n'), ((64559, 64579), 'numpy.empty', 'np.empty', (['(2 * 3, 4)'], {}), '((2 * 3, 4))\n', (64567, 64579), True, 'import numpy as np\n'), ((64764, 64778), 'numpy.empty', 'np.empty', (['(2,)'], {}), '((2,))\n', (64772, 64778), True, 'import numpy as np\n'), ((64928, 64946), 'numpy.empty', 'np.empty', (['(2 * 3,)'], {}), '((2 * 3,))\n', (64936, 64946), True, 'import numpy as np\n'), ((65032, 65043), 'scikits.timeseries.now', 'ts.now', (['"""M"""'], {}), "('M')\n", (65038, 65043), True, 'import scikits.timeseries as ts\n'), ((65630, 65641), 'copy.deepcopy', 'deepcopy', (['t'], {}), '(t)\n', (65638, 65641), False, 'from copy import deepcopy\n'), ((66052, 66263), 'numpy.ma.array', 'ma.array', (['[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, \n 19], [20, 21, 22, 23, 24]]'], {'mask': '[[0, 0, 1, 0, 0], [0, 0, 0, 1, 1], [1, 1, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1,\n 1, 0, 0]]'}), '([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, \n 17, 18, 19], [20, 21, 22, 23, 24]], mask=[[0, 0, 1, 0, 0], [0, 0, 0, 1,\n 1], [1, 1, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 0, 0]])\n', (66060, 66263), True, 'import numpy.ma as ma\n'), ((68189, 68207), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (68203, 68207), True, 'import numpy as np\n'), ((69504, 69532), 'scikits.timeseries.TimeSeries.cumsum', 'ts.TimeSeries.cumsum', (['series'], {}), '(series)\n', (69524, 69532), True, 'import scikits.timeseries as ts\n'), ((69930, 69959), 'scikits.timeseries.TimeSeries.mean', 'ts.TimeSeries.mean', (['series', '(1)'], {}), '(series, 1)\n', (69948, 69959), True, 'import scikits.timeseries as ts\n'), ((70011, 70045), 'scikits.timeseries.TimeSeries.mean', 'ts.TimeSeries.mean', (['series'], {'axis': '(1)'}), '(series, axis=1)\n', (70029, 70045), True, 'import scikits.timeseries as ts\n'), ((1460, 1473), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (1469, 1473), True, 'import numpy as np\n'), ((6021, 6045), 'scikits.timeseries.TimeSeriesError', 'TimeSeriesError', (['err_msg'], {}), '(err_msg)\n', (6036, 6045), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((6244, 6255), 'scikits.timeseries.now', 'ts.now', (['"""D"""'], {}), "('D')\n", (6250, 6255), True, 'import scikits.timeseries as ts\n'), ((6397, 6421), 'scikits.timeseries.TimeSeriesError', 'TimeSeriesError', (['err_msg'], {}), '(err_msg)\n', (6412, 6421), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((6842, 6866), 'scikits.timeseries.TimeSeriesError', 'TimeSeriesError', (['err_msg'], {}), '(err_msg)\n', (6857, 6866), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((7428, 7452), 'scikits.timeseries.TimeSeriesError', 'TimeSeriesError', (['err_msg'], {}), '(err_msg)\n', (7443, 7452), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((9730, 9743), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (9739, 9743), True, 'import numpy as np\n'), ((11313, 11363), 'scikits.timeseries.time_series', 'ts.time_series', (['data'], {'start_date': 'start', 'length': '(100)'}), '(data, start_date=start, length=100)\n', (11327, 11363), True, 'import scikits.timeseries as ts\n'), ((11541, 11577), 'scikits.timeseries.TimeSeriesCompatibilityError', 'TimeSeriesCompatibilityError', (['errmsg'], {}), '(errmsg)\n', (11569, 11577), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((11683, 11699), 'numpy.ones', 'np.ones', (['(10, 2)'], {}), '((10, 2))\n', (11690, 11699), True, 'import numpy as np\n'), ((11807, 11823), 'numpy.ones', 'np.ones', (['(10, 1)'], {}), '((10, 1))\n', (11814, 11823), True, 'import numpy as np\n'), ((11931, 11945), 'numpy.ones', 'np.ones', (['(10,)'], {}), '((10,))\n', (11938, 11945), True, 'import numpy as np\n'), ((12375, 12388), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (12384, 12388), True, 'import numpy as np\n'), ((12434, 12468), 'scikits.timeseries.time_series', 'time_series', (['data', 'dlist'], {'freq': '"""D"""'}), "(data, dlist, freq='D')\n", (12445, 12468), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((16476, 16489), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (16485, 16489), True, 'import numpy as np\n'), ((25220, 25243), 'numpy.column_stack', 'np.column_stack', (['(a, b)'], {}), '((a, b))\n', (25235, 25243), True, 'import numpy as np\n'), ((25288, 25311), 'scikits.timeseries.time_series', 'time_series', (['a[0]', 'd[0]'], {}), '(a[0], d[0])\n', (25299, 25311), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((25395, 25412), 'scikits.timeseries.time_series', 'time_series', (['a', 'd'], {}), '(a, d)\n', (25406, 25412), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((26029, 26041), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (26038, 26041), True, 'import numpy as np\n'), ((27240, 27251), 'scikits.timeseries.now', 'ts.now', (['"""b"""'], {}), "('b')\n", (27246, 27251), True, 'import scikits.timeseries as ts\n'), ((37444, 37457), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (37453, 37457), True, 'import numpy as np\n'), ((37503, 37527), 'scikits.timeseries.time_series', 'time_series', (['data', 'dates'], {}), '(data, dates)\n', (37514, 37527), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((41407, 41420), 'scikits.timeseries.split', 'split', (['series'], {}), '(series)\n', (41412, 41420), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((41770, 41783), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (41779, 41783), True, 'import numpy as np\n'), ((43184, 43198), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (43193, 43198), True, 'import numpy as np\n'), ((43621, 43634), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (43630, 43634), True, 'import numpy as np\n'), ((48279, 48292), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (48288, 48292), True, 'import numpy as np\n'), ((48411, 48433), 'cPickle.dumps', 'cPickle.dumps', (['control'], {}), '(control)\n', (48424, 48433), False, 'import cPickle\n'), ((49568, 49581), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (49577, 49581), True, 'import numpy as np\n'), ((49752, 49765), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (49761, 49765), True, 'import numpy as np\n'), ((49800, 49845), 'scikits.timeseries.date_array', 'date_array', ([], {'start_date': 'newyearsday', 'length': '(10)'}), '(start_date=newyearsday, length=10)\n', (49810, 49845), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((49879, 49891), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (49888, 49891), True, 'import numpy as np\n'), ((49925, 49969), 'scikits.timeseries.date_array', 'date_array', ([], {'start_date': 'newyearsday', 'length': '(5)'}), '(start_date=newyearsday, length=5)\n', (49935, 49969), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((50007, 50019), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (50016, 50019), True, 'import numpy as np\n'), ((50057, 50100), 'scikits.timeseries.date_array', 'date_array', ([], {'start_date': 'aprilsfool', 'length': '(5)'}), '(start_date=aprilsfool, length=5)\n', (50067, 50100), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((50127, 50197), 'scikits.timeseries.tseries._timeseriescompat_multiple', 'tseries._timeseriescompat_multiple', (['seriesM_10', 'seriesM_10', 'seriesM_10'], {}), '(seriesM_10, seriesM_10, seriesM_10)\n', (50161, 50197), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((50251, 50309), 'scikits.timeseries.tseries._timeseriescompat_multiple', 'tseries._timeseriescompat_multiple', (['seriesM_10', 'seriesD_10'], {}), '(seriesM_10, seriesD_10)\n', (50285, 50309), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((50474, 50531), 'scikits.timeseries.tseries._timeseriescompat_multiple', 'tseries._timeseriescompat_multiple', (['seriesD_5', 'seriesD_10'], {}), '(seriesD_5, seriesD_10)\n', (50508, 50531), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((50696, 50756), 'scikits.timeseries.tseries._timeseriescompat_multiple', 'tseries._timeseriescompat_multiple', (['seriesD_5', 'seriesD_5_apr'], {}), '(seriesD_5, seriesD_5_apr)\n', (50730, 50756), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((51053, 51066), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (51062, 51066), True, 'import numpy as np\n'), ((51485, 51520), 'numpy.ma.column_stack', 'ma.column_stack', (['(data, data[::-1])'], {}), '((data, data[::-1]))\n', (51500, 51520), True, 'import numpy.ma as ma\n'), ((51995, 52007), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (52004, 52007), True, 'import numpy as np\n'), ((55229, 55263), 'numpy.concatenate', 'np.concatenate', (['(a.dates, b.dates)'], {}), '((a.dates, b.dates))\n', (55243, 55263), True, 'import numpy as np\n'), ((55419, 55453), 'numpy.concatenate', 'np.concatenate', (['(a.dates, b.dates)'], {}), '((a.dates, b.dates))\n', (55433, 55453), True, 'import numpy as np\n'), ((56541, 56554), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (56550, 56554), True, 'import numpy as np\n'), ((57360, 57393), 'scikits.timeseries.date_array', 'date_array', (['([series.dates[1]] * 5)'], {}), '([series.dates[1]] * 5)\n', (57370, 57393), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((57613, 57640), 'scikits.timeseries.date_array', 'date_array', (['series.dates[1]'], {}), '(series.dates[1])\n', (57623, 57640), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((57812, 57834), 'numpy.ma.array', 'ma.array', (['[2, 5, 8, 9]'], {}), '([2, 5, 8, 9])\n', (57820, 57834), True, 'import numpy.ma as ma\n'), ((57948, 57964), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (57957, 57964), True, 'import numpy as np\n'), ((58048, 58061), 'numpy.dtype', 'np.dtype', (['"""d"""'], {}), "('d')\n", (58056, 58061), True, 'import numpy as np\n'), ((58528, 58578), 'numpy.ma.array', 'ma.array', (['[999, -0.5, 1.0, 0.5]'], {'mask': '[1, 0, 0, 0]'}), '([999, -0.5, 1.0, 0.5], mask=[1, 0, 0, 0])\n', (58536, 58578), True, 'import numpy.ma as ma\n'), ((58682, 58731), 'numpy.ma.array', 'ma.array', (['[999, 999, 0.0, 2.0]'], {'mask': '[1, 1, 0, 0]'}), '([999, 999, 0.0, 2.0], mask=[1, 1, 0, 0])\n', (58690, 58731), True, 'import numpy.ma as ma\n'), ((58856, 58942), 'numpy.ma.array', 'ma.array', (['[999, -0.69314718056, 0.69314718056, 0.405465108108]'], {'mask': '[1, 0, 0, 0]'}), '([999, -0.69314718056, 0.69314718056, 0.405465108108], mask=[1, 0, \n 0, 0])\n', (58864, 58942), True, 'import numpy.ma as ma\n'), ((59078, 59137), 'numpy.ma.array', 'ma.array', (['[999, 999, 0.0, 1.09861228867]'], {'mask': '[1, 1, 0, 0]'}), '([999, 999, 0.0, 1.09861228867], mask=[1, 1, 0, 0])\n', (59086, 59137), True, 'import numpy.ma as ma\n'), ((59274, 59346), 'numpy.ma.array', 'ma.array', (['[999, -0.666666666667, 0.666666666667, 0.4]'], {'mask': '[1, 0, 0, 0]'}), '([999, -0.666666666667, 0.666666666667, 0.4], mask=[1, 0, 0, 0])\n', (59282, 59346), True, 'import numpy.ma as ma\n'), ((59477, 59526), 'numpy.ma.array', 'ma.array', (['[999, 999, 0.0, 1.0]'], {'mask': '[1, 1, 0, 0]'}), '([999, 999, 0.0, 1.0], mask=[1, 1, 0, 0])\n', (59485, 59526), True, 'import numpy.ma as ma\n'), ((59870, 59887), 'scikits.timeseries.Date', 'Date', (['"""A"""', '"""2003"""'], {}), "('A', '2003')\n", (59874, 59887), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((59932, 59949), 'scikits.timeseries.Date', 'Date', (['"""A"""', '"""2005"""'], {}), "('A', '2005')\n", (59936, 59949), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((60313, 60330), 'scikits.timeseries.Date', 'Date', (['"""A"""', '"""2000"""'], {}), "('A', '2000')\n", (60317, 60330), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((60542, 60554), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (60551, 60554), True, 'import numpy as np\n'), ((61811, 61823), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (61820, 61823), True, 'import numpy as np\n'), ((65073, 65086), 'numpy.arange', 'np.arange', (['(60)'], {}), '(60)\n', (65082, 65086), True, 'import numpy as np\n'), ((66523, 66556), 'scikits.timeseries.first_unmasked_val', 'ts.first_unmasked_val', (['data', 'None'], {}), '(data, None)\n', (66544, 66556), True, 'import scikits.timeseries as ts\n'), ((66582, 66614), 'scikits.timeseries.last_unmasked_val', 'ts.last_unmasked_val', (['data', 'None'], {}), '(data, None)\n', (66602, 66614), True, 'import scikits.timeseries as ts\n'), ((66641, 66671), 'scikits.timeseries.first_unmasked_val', 'ts.first_unmasked_val', (['data', '(0)'], {}), '(data, 0)\n', (66662, 66671), True, 'import scikits.timeseries as ts\n'), ((66711, 66740), 'scikits.timeseries.last_unmasked_val', 'ts.last_unmasked_val', (['data', '(0)'], {}), '(data, 0)\n', (66731, 66740), True, 'import scikits.timeseries as ts\n'), ((66785, 66816), 'scikits.timeseries.first_unmasked_val', 'ts.first_unmasked_val', (['data', '(-1)'], {}), '(data, -1)\n', (66806, 66816), True, 'import scikits.timeseries as ts\n'), ((66859, 66889), 'scikits.timeseries.last_unmasked_val', 'ts.last_unmasked_val', (['data', '(-1)'], {}), '(data, -1)\n', (66879, 66889), True, 'import scikits.timeseries as ts\n'), ((66968, 67002), 'scikits.timeseries.first_unmasked_val', 'ts.first_unmasked_val', (['data_', 'None'], {}), '(data_, None)\n', (66989, 67002), True, 'import scikits.timeseries as ts\n'), ((67028, 67061), 'scikits.timeseries.last_unmasked_val', 'ts.last_unmasked_val', (['data_', 'None'], {}), '(data_, None)\n', (67048, 67061), True, 'import scikits.timeseries as ts\n'), ((67088, 67119), 'scikits.timeseries.first_unmasked_val', 'ts.first_unmasked_val', (['data_', '(0)'], {}), '(data_, 0)\n', (67109, 67119), True, 'import scikits.timeseries as ts\n'), ((67159, 67189), 'scikits.timeseries.last_unmasked_val', 'ts.last_unmasked_val', (['data_', '(0)'], {}), '(data_, 0)\n', (67179, 67189), True, 'import scikits.timeseries as ts\n'), ((67234, 67266), 'scikits.timeseries.first_unmasked_val', 'ts.first_unmasked_val', (['data_', '(-1)'], {}), '(data_, -1)\n', (67255, 67266), True, 'import scikits.timeseries as ts\n'), ((67309, 67340), 'scikits.timeseries.last_unmasked_val', 'ts.last_unmasked_val', (['data_', '(-1)'], {}), '(data_, -1)\n', (67329, 67340), True, 'import scikits.timeseries as ts\n'), ((67422, 67455), 'scikits.timeseries.first_unmasked_val', 'ts.first_unmasked_val', (['data', 'None'], {}), '(data, None)\n', (67443, 67455), True, 'import scikits.timeseries as ts\n'), ((67481, 67513), 'scikits.timeseries.last_unmasked_val', 'ts.last_unmasked_val', (['data', 'None'], {}), '(data, None)\n', (67501, 67513), True, 'import scikits.timeseries as ts\n'), ((67540, 67570), 'scikits.timeseries.first_unmasked_val', 'ts.first_unmasked_val', (['data', '(0)'], {}), '(data, 0)\n', (67561, 67570), True, 'import scikits.timeseries as ts\n'), ((67610, 67639), 'scikits.timeseries.last_unmasked_val', 'ts.last_unmasked_val', (['data', '(0)'], {}), '(data, 0)\n', (67630, 67639), True, 'import scikits.timeseries as ts\n'), ((67682, 67713), 'scikits.timeseries.first_unmasked_val', 'ts.first_unmasked_val', (['data', '(-1)'], {}), '(data, -1)\n', (67703, 67713), True, 'import scikits.timeseries as ts\n'), ((67736, 67786), 'numpy.ma.array', 'ma.array', (['[0, 5, 12, -1, 23]'], {'mask': '[0, 0, 0, 1, 0]'}), '([0, 5, 12, -1, 23], mask=[0, 0, 0, 1, 0])\n', (67744, 67786), True, 'import numpy.ma as ma\n'), ((67809, 67839), 'scikits.timeseries.last_unmasked_val', 'ts.last_unmasked_val', (['data', '(-1)'], {}), '(data, -1)\n', (67829, 67839), True, 'import scikits.timeseries as ts\n'), ((67862, 67912), 'numpy.ma.array', 'ma.array', (['[4, 7, 14, -1, 24]'], {'mask': '[0, 0, 0, 1, 0]'}), '([4, 7, 14, -1, 24], mask=[0, 0, 0, 1, 0])\n', (67870, 67912), True, 'import numpy.ma as ma\n'), ((72235, 72248), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (72244, 72248), True, 'import numpy as np\n'), ((72250, 72268), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (72264, 72268), True, 'import numpy as np\n'), ((73493, 73510), 'numpy.dtype', 'np.dtype', (['alttype'], {}), '(alttype)\n', (73501, 73510), True, 'import numpy as np\n'), ((8107, 8139), 'scikits.timeseries.Date', 'ts.Date', (['"""D"""', "('2001-01-%02i' % _)"], {}), "('D', '2001-01-%02i' % _)\n", (8114, 8139), True, 'import scikits.timeseries as ts\n'), ((8644, 8676), 'scikits.timeseries.Date', 'ts.Date', (['"""D"""', "('2001-01-%02i' % _)"], {}), "('D', '2001-01-%02i' % _)\n", (8651, 8676), True, 'import scikits.timeseries as ts\n'), ((9096, 9128), 'scikits.timeseries.Date', 'ts.Date', (['"""D"""', "('2001-01-%02i' % _)"], {}), "('D', '2001-01-%02i' % _)\n", (9103, 9128), True, 'import scikits.timeseries as ts\n'), ((9201, 9214), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (9210, 9214), True, 'import numpy as np\n'), ((10446, 10478), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(50 * 12)'], {}), '(0, 1, 50 * 12)\n', (10463, 10478), True, 'import numpy as np\n'), ((11712, 11723), 'scikits.timeseries.now', 'ts.now', (['"""d"""'], {}), "('d')\n", (11718, 11723), True, 'import scikits.timeseries as ts\n'), ((11836, 11847), 'scikits.timeseries.now', 'ts.now', (['"""d"""'], {}), "('d')\n", (11842, 11847), True, 'import scikits.timeseries as ts\n'), ((11958, 11969), 'scikits.timeseries.now', 'ts.now', (['"""d"""'], {}), "('d')\n", (11964, 11969), True, 'import scikits.timeseries as ts\n'), ((24946, 24976), 'scikits.timeseries.Date', 'Date', (['"""D"""'], {'string': '"""2007-01-06"""'}), "('D', string='2007-01-06')\n", (24950, 24976), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((25171, 25179), 'scikits.timeseries.now', 'now', (['"""M"""'], {}), "('M')\n", (25174, 25179), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((27253, 27264), 'scikits.timeseries.now', 'ts.now', (['"""b"""'], {}), "('b')\n", (27259, 27264), True, 'import scikits.timeseries as ts\n'), ((28343, 28361), 'numpy.random.rand', 'np.random.rand', (['(60)'], {}), '(60)\n', (28357, 28361), True, 'import numpy as np\n'), ((28790, 28816), 'scikits.timeseries.Date', 'ts.Date', (['"""M"""', '"""2001-01-01"""'], {}), "('M', '2001-01-01')\n", (28797, 28816), True, 'import scikits.timeseries as ts\n'), ((29332, 29358), 'scikits.timeseries.Date', 'ts.Date', (['"""M"""', '"""2001-01-01"""'], {}), "('M', '2001-01-01')\n", (29339, 29358), True, 'import scikits.timeseries as ts\n'), ((30005, 30031), 'scikits.timeseries.Date', 'ts.Date', (['"""M"""', '"""2001-01-01"""'], {}), "('M', '2001-01-01')\n", (30012, 30031), True, 'import scikits.timeseries as ts\n'), ((31740, 31763), 'scikits.timeseries.Date', 'ts.Date', (['"""M"""', '"""2001-01"""'], {}), "('M', '2001-01')\n", (31747, 31763), True, 'import scikits.timeseries as ts\n'), ((33776, 33787), 'scikits.timeseries.now', 'ts.now', (['"""D"""'], {}), "('D')\n", (33782, 33787), True, 'import scikits.timeseries as ts\n'), ((35279, 35302), 'scikits.timeseries.Date', 'ts.Date', (['"""M"""', '"""2009-01"""'], {}), "('M', '2009-01')\n", (35286, 35302), True, 'import scikits.timeseries as ts\n'), ((35621, 35644), 'scikits.timeseries.Date', 'ts.Date', (['"""M"""', '"""2009-01"""'], {}), "('M', '2009-01')\n", (35628, 35644), True, 'import scikits.timeseries as ts\n'), ((36331, 36354), 'scikits.timeseries.Date', 'ts.Date', (['"""M"""', '"""2009-01"""'], {}), "('M', '2009-01')\n", (36338, 36354), True, 'import scikits.timeseries as ts\n'), ((36628, 36651), 'scikits.timeseries.Date', 'ts.Date', (['"""M"""', '"""2009-01"""'], {}), "('M', '2009-01')\n", (36635, 36651), True, 'import scikits.timeseries as ts\n'), ((37981, 38011), 'scikits.timeseries.Date', 'Date', (['"""D"""'], {'string': '"""2007-01-31"""'}), "('D', string='2007-01-31')\n", (37985, 38011), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((38205, 38235), 'scikits.timeseries.Date', 'Date', (['"""D"""'], {'string': '"""2007-01-06"""'}), "('D', string='2007-01-06')\n", (38209, 38235), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((38406, 38436), 'scikits.timeseries.Date', 'Date', (['"""D"""'], {'string': '"""2007-01-06"""'}), "('D', string='2007-01-06')\n", (38410, 38436), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((38482, 38512), 'scikits.timeseries.Date', 'Date', (['"""D"""'], {'string': '"""2007-01-31"""'}), "('D', string='2007-01-31')\n", (38486, 38512), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((41147, 41188), 'scikits.timeseries.Date', 'Date', ([], {'freq': '"""d"""', 'year': '(2005)', 'month': '(7)', 'day': '(1)'}), "(freq='d', year=2005, month=7, day=1)\n", (41151, 41188), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((43825, 43848), 'scikits.timeseries.Date', 'ts.Date', (['"""M"""', '"""2001-01"""'], {}), "('M', '2001-01')\n", (43832, 43848), True, 'import scikits.timeseries as ts\n'), ((43964, 43984), 'scikits.timeseries.Date', 'ts.Date', (['"""A"""', '"""2001"""'], {}), "('A', '2001')\n", (43971, 43984), True, 'import scikits.timeseries as ts\n'), ((44217, 44252), 'scikits.timeseries.Date', 'Date', ([], {'freq': '"""D"""', 'string': '"""2005-07-01"""'}), "(freq='D', string='2005-07-01')\n", (44221, 44252), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((44980, 44997), 'numpy.arange', 'np.arange', (['(5 * 24)'], {}), '(5 * 24)\n', (44989, 44997), True, 'import numpy as np\n'), ((46502, 46525), 'scikits.timeseries.Date', 'ts.Date', (['"""M"""', '"""2001-01"""'], {}), "('M', '2001-01')\n", (46509, 46525), True, 'import scikits.timeseries as ts\n'), ((47253, 47268), 'scikits.timeseries.Date', 'ts.Date', (['"""U"""', '(1)'], {}), "('U', 1)\n", (47260, 47268), True, 'import scikits.timeseries as ts\n'), ((47779, 47787), 'scikits.timeseries.now', 'now', (['"""D"""'], {}), "('D')\n", (47782, 47787), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((48305, 48323), 'scikits.timeseries.Date', 'ts.Date', (['"""A"""', '(2001)'], {}), "('A', 2001)\n", (48312, 48323), True, 'import scikits.timeseries as ts\n'), ((53933, 53948), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (53942, 53948), True, 'import numpy as np\n'), ((54744, 54771), 'scikits.timeseries.Date', 'ts.Date', (['"""D"""', '"""01-Jan-2009"""'], {}), "('D', '01-Jan-2009')\n", (54751, 54771), True, 'import scikits.timeseries as ts\n'), ((54815, 54842), 'scikits.timeseries.Date', 'ts.Date', (['"""D"""', '"""05-Jan-2009"""'], {}), "('D', '05-Jan-2009')\n", (54822, 54842), True, 'import scikits.timeseries as ts\n'), ((55849, 55883), 'numpy.concatenate', 'np.concatenate', (['(a.dates, b.dates)'], {}), '((a.dates, b.dates))\n', (55863, 55883), True, 'import numpy as np\n'), ((56567, 56575), 'scikits.timeseries.now', 'now', (['"""D"""'], {}), "('D')\n", (56570, 56575), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((57237, 57245), 'scikits.timeseries.now', 'now', (['"""D"""'], {}), "('D')\n", (57240, 57245), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((57977, 57985), 'scikits.timeseries.now', 'now', (['"""D"""'], {}), "('D')\n", (57980, 57985), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((58380, 58408), 'scikits.timeseries.Date', 'ts.Date', ([], {'freq': '"""A"""', 'year': '(2005)'}), "(freq='A', year=2005)\n", (58387, 58408), True, 'import scikits.timeseries as ts\n'), ((59890, 59909), 'numpy.array', 'np.array', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (59898, 59909), True, 'import numpy as np\n'), ((59952, 59968), 'numpy.array', 'np.array', (['[7, 8]'], {}), '([7, 8])\n', (59960, 59968), True, 'import numpy as np\n'), ((60333, 60358), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (60341, 60358), True, 'import numpy as np\n'), ((60567, 60584), 'scikits.timeseries.Date', 'Date', (['"""A"""', '"""2001"""'], {}), "('A', '2001')\n", (60571, 60584), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((61077, 61094), 'scikits.timeseries.Date', 'Date', (['"""A"""', '"""2000"""'], {}), "('A', '2000')\n", (61081, 61094), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((61560, 61577), 'scikits.timeseries.Date', 'Date', (['"""A"""', '"""2000"""'], {}), "('A', '2000')\n", (61564, 61577), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((61836, 61853), 'scikits.timeseries.Date', 'Date', (['"""A"""', '"""2001"""'], {}), "('A', '2001')\n", (61840, 61853), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((62819, 62827), 'scikits.timeseries.now', 'now', (['"""D"""'], {}), "('D')\n", (62822, 62827), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((62940, 62950), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (62947, 62950), True, 'import numpy as np\n'), ((63168, 63176), 'scikits.timeseries.now', 'now', (['"""D"""'], {}), "('D')\n", (63171, 63176), False, 'from scikits.timeseries import TimeSeries, TimeSeriesError, TimeSeriesCompatibilityError, tseries, Date, date_array, now, time_series, adjust_endpoints, align_series, align_with, concatenate, fill_missing_dates, find_duplicated_dates, remove_duplicated_dates, split, stack\n'), ((65604, 65615), 'scikits.timeseries.now', 'ts.now', (['"""D"""'], {}), "('D')\n", (65610, 65615), True, 'import scikits.timeseries as ts\n'), ((69310, 69321), 'scikits.timeseries.now', 'ts.now', (['"""D"""'], {}), "('D')\n", (69316, 69321), True, 'import scikits.timeseries as ts\n'), ((69389, 69400), 'scikits.timeseries.now', 'ts.now', (['"""D"""'], {}), "('D')\n", (69395, 69400), True, 'import scikits.timeseries as ts\n'), ((69745, 69756), 'scikits.timeseries.now', 'ts.now', (['"""D"""'], {}), "('D')\n", (69751, 69756), True, 'import scikits.timeseries as ts\n'), ((69816, 69827), 'scikits.timeseries.now', 'ts.now', (['"""D"""'], {}), "('D')\n", (69822, 69827), True, 'import scikits.timeseries as ts\n'), ((70249, 70260), 'scikits.timeseries.now', 'ts.now', (['"""D"""'], {}), "('D')\n", (70255, 70260), True, 'import scikits.timeseries as ts\n'), ((70320, 70331), 'scikits.timeseries.now', 'ts.now', (['"""D"""'], {}), "('D')\n", (70326, 70331), True, 'import scikits.timeseries as ts\n'), ((70606, 70617), 'scikits.timeseries.now', 'ts.now', (['"""D"""'], {}), "('D')\n", (70612, 70617), True, 'import scikits.timeseries as ts\n'), ((70962, 70980), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (70976, 70980), True, 'import numpy as np\n'), ((70982, 70995), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (70991, 70995), True, 'import numpy as np\n'), ((71149, 71172), 'scikits.timeseries.Date', 'ts.Date', (['"""M"""', '"""2007-01"""'], {}), "('M', '2007-01')\n", (71156, 71172), True, 'import scikits.timeseries as ts\n'), ((72428, 72439), 'scikits.timeseries.now', 'ts.now', (['"""M"""'], {}), "('M')\n", (72434, 72439), True, 'import scikits.timeseries as ts\n'), ((73706, 73724), 'numpy.random.rand', 'np.random.rand', (['(15)'], {}), '(15)\n', (73720, 73724), True, 'import numpy as np\n'), ((73837, 73848), 'scikits.timeseries.now', 'ts.now', (['"""D"""'], {}), "('D')\n", (73843, 73848), True, 'import scikits.timeseries as ts\n'), ((73911, 73922), 'scikits.timeseries.now', 'ts.now', (['"""D"""'], {}), "('D')\n", (73917, 73922), True, 'import scikits.timeseries as ts\n'), ((16695, 16713), 'numpy.random.rand', 'np.random.rand', (['(60)'], {}), '(60)\n', (16709, 16713), True, 'import numpy as np\n'), ((41081, 41094), 'numpy.arange', 'np.arange', (['(62)'], {}), '(62)\n', (41090, 41094), True, 'import numpy as np\n'), ((44144, 44158), 'numpy.arange', 'np.arange', (['(124)'], {}), '(124)\n', (44153, 44158), True, 'import numpy as np\n'), ((65209, 65222), 'numpy.arange', 'np.arange', (['(60)'], {}), '(60)\n', (65218, 65222), True, 'import numpy as np\n'), ((65356, 65369), 'numpy.arange', 'np.arange', (['(60)'], {}), '(60)\n', (65365, 65369), True, 'import numpy as np\n'), ((68259, 68270), 'scikits.timeseries.now', 'ts.now', (['"""D"""'], {}), "('D')\n", (68265, 68270), True, 'import scikits.timeseries as ts\n'), ((69674, 69686), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (69683, 69686), True, 'import numpy as np\n'), ((70178, 70190), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (70187, 70190), True, 'import numpy as np\n'), ((30968, 31009), 'scikits.timeseries.date_array', 'ts.date_array', ([], {'start_date': 'start', 'length': '(4)'}), '(start_date=start, length=4)\n', (30981, 31009), True, 'import scikits.timeseries as ts\n')]
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown copyright. The Met Office. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Unit tests for the wind_components.ResolveWindComponents plugin.""" import unittest import iris import numpy as np from iris.coord_systems import OSGB from iris.coords import DimCoord from iris.tests import IrisTest from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube from improver.wind_calculations.wind_components import ResolveWindComponents RAD_TO_DEG = 180.0 / np.pi def set_up_cube(data_2d, name, unit): """Set up a 2D test cube of wind direction or speed""" cube = set_up_variable_cube( data_2d.astype(np.float32), name=name, units=unit, spatial_grid="equalarea" ) cube.coord("projection_x_coordinate").points = np.linspace( 150000, 250000, data_2d.shape[1] ) cube.coord("projection_y_coordinate").points = np.linspace( 0, 600000, data_2d.shape[0] ) for axis in ["x", "y"]: cube.coord(axis=axis).units = "metres" cube.coord(axis=axis).coord_system = OSGB() cube.coord(axis=axis).bounds = None return cube def add_new_dimension(cube, npoints, name, unit): """Add a new dimension with npoints by copying cube data""" cubelist = iris.cube.CubeList([]) for i in range(npoints): newcube = cube.copy(cube.data) newcube.add_aux_coord(DimCoord(i, name, unit)) cubelist.append(newcube) merged_cube = cubelist.merge_cube() return merged_cube class Test__repr__(IrisTest): """Tests the __repr__ method""" def test_basic(self): """Tests the output string is as expected""" result = str(ResolveWindComponents()) self.assertEqual(result, "<ResolveWindComponents>") class Test_calc_true_north_offset(IrisTest): """Tests the calc_true_north_offset function""" def setUp(self): """Set up a target cube with OSGB projection""" wind_angle = np.zeros((3, 5), dtype=np.float32) self.directions = set_up_cube(wind_angle, "wind_to_direction", "degrees") self.plugin = ResolveWindComponents() def test_basic(self): """Test function returns correct type""" result = self.plugin.calc_true_north_offset(self.directions) self.assertIsInstance(result, np.ndarray) def test_values(self): """Test that for UK National Grid coordinates the angle adjustments are sensible""" expected_result = np.array( [ [2.651483, 2.386892, 2.122119, 1.857182, 1.592121], [2.921058, 2.629620, 2.337963, 2.046132, 1.754138], [3.223816, 2.902300, 2.580523, 2.258494, 1.936247], ], dtype=np.float32, ) result = self.plugin.calc_true_north_offset(self.directions) self.assertArrayAlmostEqual(RAD_TO_DEG * result, expected_result) class Test_resolve_wind_components(IrisTest): """Tests the resolve_wind_components method""" def setUp(self): """Set up some arrays to convert""" self.plugin = ResolveWindComponents() wind_speed = 10.0 * np.ones((4, 4), dtype=np.float32) wind_angle = np.array( [ [0.0, 30.0, 45.0, 60.0], [90.0, 120.0, 135.0, 150.0], [180.0, 210.0, 225.0, 240.0], [270.0, 300.0, 315.0, 330.0], ], dtype=np.float32, ) self.wind_cube = set_up_cube(wind_speed, "wind_speed", "knots") self.directions = set_up_cube(wind_angle, "wind_to_direction", "degrees") self.adjustments = np.zeros((4, 4), dtype=np.float32) def test_basic(self): """Test function returns correct type""" uspeed, vspeed = self.plugin.resolve_wind_components( self.wind_cube, self.directions, self.adjustments ) self.assertIsInstance(uspeed, iris.cube.Cube) self.assertIsInstance(vspeed, iris.cube.Cube) def test_values(self): """Test correct values are returned for well-behaved angles""" expected_uspeed = 5.0 * np.array( [ [0.0, 1.0, np.sqrt(2.0), np.sqrt(3.0)], [2.0, np.sqrt(3.0), np.sqrt(2.0), 1.0], [0.0, -1.0, -np.sqrt(2.0), -np.sqrt(3.0)], [-2.0, -np.sqrt(3.0), -np.sqrt(2.0), -1.0], ], dtype=np.float32, ) expected_vspeed = 5 * np.array( [ [2.0, np.sqrt(3.0), np.sqrt(2.0), 1.0], [0.0, -1.0, -np.sqrt(2.0), -np.sqrt(3.0)], [-2.0, -np.sqrt(3.0), -np.sqrt(2.0), -1.0], [0.0, 1.0, np.sqrt(2.0), np.sqrt(3.0)], ], dtype=np.float32, ) uspeed, vspeed = self.plugin.resolve_wind_components( self.wind_cube, self.directions, self.adjustments ) self.assertArrayAlmostEqual(uspeed.data, expected_uspeed, decimal=5) self.assertArrayAlmostEqual(vspeed.data, expected_vspeed, decimal=5) class Test_process(IrisTest): """Tests the process method""" def setUp(self): """Create dummy cubes for tests""" self.plugin = ResolveWindComponents() wind_speed_data = np.array( [[6, 5, 4, 3], [8, 6, 4, 4], [12, 8, 6, 5]], dtype=np.float32 ) self.wind_speed_cube = set_up_cube(wind_speed_data, "wind_speed", "knots") wind_direction_data = np.array( [[138, 142, 141, 141], [141, 143, 140, 142], [142, 146, 141, 142]], dtype=np.float32, ) self.wind_direction_cube = set_up_cube( wind_direction_data, "wind_to_direction", "degrees" ) self.expected_u = np.array( [ [3.804214, 2.917800, 2.410297, 1.822455], [4.711193, 3.395639, 2.454748, 2.365005], [6.844465, 4.144803, 3.580219, 2.943424], ], dtype=np.float32, ) self.expected_v = np.array( [ [-4.639823, -4.060351, -3.1922507, -2.382994], [-6.465651, -4.946681, -3.1581972, -3.225949], [-9.856638, -6.842559, -4.8147717, -4.041813], ], dtype=np.float32, ) def test_basic(self): """Test plugin creates two output cubes with the correct metadata""" ucube, vcube = self.plugin.process( self.wind_speed_cube, self.wind_direction_cube ) for cube in ucube, vcube: self.assertIsInstance(cube, iris.cube.Cube) self.assertEqual(cube.units, self.wind_speed_cube.units) self.assertEqual(ucube.name(), "grid_eastward_wind") self.assertEqual(vcube.name(), "grid_northward_wind") def test_values(self): """Test plugin generates expected wind values""" ucube, vcube = self.plugin.process( self.wind_speed_cube, self.wind_direction_cube ) self.assertArrayAlmostEqual(ucube.data, self.expected_u, decimal=5) self.assertArrayAlmostEqual(vcube.data, self.expected_v, decimal=5) def test_coordinate_value_mismatch(self): """Test an error is raised if coordinate values are different for wind speed and direction cubes""" self.wind_direction_cube.coord(axis="y").convert_units("km") msg = "Wind speed and direction cubes have unmatched coordinates" with self.assertRaisesRegex(ValueError, msg): _, _ = self.plugin.process(self.wind_speed_cube, self.wind_direction_cube) def test_projection_mismatch(self): """Test an error is raised if coordinate names are different for wind speed and direction cubes""" self.wind_speed_cube.coord(axis="x").rename("longitude") self.wind_speed_cube.coord(axis="y").rename("latitude") msg = "Wind speed and direction cubes have unmatched coordinates" with self.assertRaisesRegex(ValueError, msg): _, _ = self.plugin.process(self.wind_speed_cube, self.wind_direction_cube) def test_height_levels(self): """Test a cube on more than one height level is correctly processed""" wind_speed_3d = add_new_dimension(self.wind_speed_cube, 3, "height", "km") wind_direction_3d = add_new_dimension( self.wind_direction_cube, 3, "height", "km" ) ucube, vcube = self.plugin.process(wind_speed_3d, wind_direction_3d) self.assertSequenceEqual(ucube.shape, (3, 3, 4)) self.assertArrayAlmostEqual(ucube[1].data, self.expected_u, decimal=5) self.assertArrayAlmostEqual(vcube[2].data, self.expected_v, decimal=5) def test_wind_from_direction(self): """Test correct behaviour when wind direction is 'from' not 'to'. We do not get perfect direction inversion to the 7th decimal place here because we ignore imprecision in the iris rotate_winds calcuation near the corners of the domain, and regrid the available data linearly to fill the gap. The output wind speeds (in m s-1) compare equal to the 5th decimal place. """ expected_u = -1.0 * self.expected_u expected_v = -1.0 * self.expected_v self.wind_direction_cube.rename("wind_from_direction") ucube, vcube = self.plugin.process( self.wind_speed_cube, self.wind_direction_cube ) self.assertArrayAllClose(ucube.data, expected_u, atol=1e-5) self.assertArrayAllClose(vcube.data, expected_v, atol=1e-5) if __name__ == "__main__": unittest.main()
[ "numpy.sqrt", "iris.cube.CubeList", "numpy.ones", "iris.coords.DimCoord", "numpy.array", "numpy.linspace", "numpy.zeros", "improver.wind_calculations.wind_components.ResolveWindComponents", "unittest.main", "iris.coord_systems.OSGB" ]
[((2334, 2379), 'numpy.linspace', 'np.linspace', (['(150000)', '(250000)', 'data_2d.shape[1]'], {}), '(150000, 250000, data_2d.shape[1])\n', (2345, 2379), True, 'import numpy as np\n'), ((2445, 2485), 'numpy.linspace', 'np.linspace', (['(0)', '(600000)', 'data_2d.shape[0]'], {}), '(0, 600000, data_2d.shape[0])\n', (2456, 2485), True, 'import numpy as np\n'), ((2819, 2841), 'iris.cube.CubeList', 'iris.cube.CubeList', (['[]'], {}), '([])\n', (2837, 2841), False, 'import iris\n'), ((11145, 11160), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11158, 11160), False, 'import unittest\n'), ((2620, 2626), 'iris.coord_systems.OSGB', 'OSGB', ([], {}), '()\n', (2624, 2626), False, 'from iris.coord_systems import OSGB\n'), ((3513, 3547), 'numpy.zeros', 'np.zeros', (['(3, 5)'], {'dtype': 'np.float32'}), '((3, 5), dtype=np.float32)\n', (3521, 3547), True, 'import numpy as np\n'), ((3652, 3675), 'improver.wind_calculations.wind_components.ResolveWindComponents', 'ResolveWindComponents', ([], {}), '()\n', (3673, 3675), False, 'from improver.wind_calculations.wind_components import ResolveWindComponents\n'), ((4025, 4216), 'numpy.array', 'np.array', (['[[2.651483, 2.386892, 2.122119, 1.857182, 1.592121], [2.921058, 2.62962, \n 2.337963, 2.046132, 1.754138], [3.223816, 2.9023, 2.580523, 2.258494, \n 1.936247]]'], {'dtype': 'np.float32'}), '([[2.651483, 2.386892, 2.122119, 1.857182, 1.592121], [2.921058, \n 2.62962, 2.337963, 2.046132, 1.754138], [3.223816, 2.9023, 2.580523, \n 2.258494, 1.936247]], dtype=np.float32)\n', (4033, 4216), True, 'import numpy as np\n'), ((4638, 4661), 'improver.wind_calculations.wind_components.ResolveWindComponents', 'ResolveWindComponents', ([], {}), '()\n', (4659, 4661), False, 'from improver.wind_calculations.wind_components import ResolveWindComponents\n'), ((4745, 4892), 'numpy.array', 'np.array', (['[[0.0, 30.0, 45.0, 60.0], [90.0, 120.0, 135.0, 150.0], [180.0, 210.0, 225.0,\n 240.0], [270.0, 300.0, 315.0, 330.0]]'], {'dtype': 'np.float32'}), '([[0.0, 30.0, 45.0, 60.0], [90.0, 120.0, 135.0, 150.0], [180.0, \n 210.0, 225.0, 240.0], [270.0, 300.0, 315.0, 330.0]], dtype=np.float32)\n', (4753, 4892), True, 'import numpy as np\n'), ((5183, 5217), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {'dtype': 'np.float32'}), '((4, 4), dtype=np.float32)\n', (5191, 5217), True, 'import numpy as np\n'), ((6761, 6784), 'improver.wind_calculations.wind_components.ResolveWindComponents', 'ResolveWindComponents', ([], {}), '()\n', (6782, 6784), False, 'from improver.wind_calculations.wind_components import ResolveWindComponents\n'), ((6811, 6882), 'numpy.array', 'np.array', (['[[6, 5, 4, 3], [8, 6, 4, 4], [12, 8, 6, 5]]'], {'dtype': 'np.float32'}), '([[6, 5, 4, 3], [8, 6, 4, 4], [12, 8, 6, 5]], dtype=np.float32)\n', (6819, 6882), True, 'import numpy as np\n'), ((7019, 7117), 'numpy.array', 'np.array', (['[[138, 142, 141, 141], [141, 143, 140, 142], [142, 146, 141, 142]]'], {'dtype': 'np.float32'}), '([[138, 142, 141, 141], [141, 143, 140, 142], [142, 146, 141, 142]],\n dtype=np.float32)\n', (7027, 7117), True, 'import numpy as np\n'), ((7298, 7460), 'numpy.array', 'np.array', (['[[3.804214, 2.9178, 2.410297, 1.822455], [4.711193, 3.395639, 2.454748, \n 2.365005], [6.844465, 4.144803, 3.580219, 2.943424]]'], {'dtype': 'np.float32'}), '([[3.804214, 2.9178, 2.410297, 1.822455], [4.711193, 3.395639, \n 2.454748, 2.365005], [6.844465, 4.144803, 3.580219, 2.943424]], dtype=\n np.float32)\n', (7306, 7460), True, 'import numpy as np\n'), ((7578, 7757), 'numpy.array', 'np.array', (['[[-4.639823, -4.060351, -3.1922507, -2.382994], [-6.465651, -4.946681, -\n 3.1581972, -3.225949], [-9.856638, -6.842559, -4.8147717, -4.041813]]'], {'dtype': 'np.float32'}), '([[-4.639823, -4.060351, -3.1922507, -2.382994], [-6.465651, -\n 4.946681, -3.1581972, -3.225949], [-9.856638, -6.842559, -4.8147717, -\n 4.041813]], dtype=np.float32)\n', (7586, 7757), True, 'import numpy as np\n'), ((2940, 2963), 'iris.coords.DimCoord', 'DimCoord', (['i', 'name', 'unit'], {}), '(i, name, unit)\n', (2948, 2963), False, 'from iris.coords import DimCoord\n'), ((3230, 3253), 'improver.wind_calculations.wind_components.ResolveWindComponents', 'ResolveWindComponents', ([], {}), '()\n', (3251, 3253), False, 'from improver.wind_calculations.wind_components import ResolveWindComponents\n'), ((4690, 4723), 'numpy.ones', 'np.ones', (['(4, 4)'], {'dtype': 'np.float32'}), '((4, 4), dtype=np.float32)\n', (4697, 4723), True, 'import numpy as np\n'), ((5718, 5730), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (5725, 5730), True, 'import numpy as np\n'), ((5732, 5744), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (5739, 5744), True, 'import numpy as np\n'), ((5769, 5781), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (5776, 5781), True, 'import numpy as np\n'), ((5783, 5795), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (5790, 5795), True, 'import numpy as np\n'), ((6054, 6066), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (6061, 6066), True, 'import numpy as np\n'), ((6068, 6080), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (6075, 6080), True, 'import numpy as np\n'), ((6234, 6246), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (6241, 6246), True, 'import numpy as np\n'), ((6248, 6260), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (6255, 6260), True, 'import numpy as np\n'), ((5832, 5844), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (5839, 5844), True, 'import numpy as np\n'), ((5847, 5859), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (5854, 5859), True, 'import numpy as np\n'), ((5886, 5898), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (5893, 5898), True, 'import numpy as np\n'), ((5901, 5913), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (5908, 5913), True, 'import numpy as np\n'), ((6117, 6129), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (6124, 6129), True, 'import numpy as np\n'), ((6132, 6144), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (6139, 6144), True, 'import numpy as np\n'), ((6171, 6183), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (6178, 6183), True, 'import numpy as np\n'), ((6186, 6198), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (6193, 6198), True, 'import numpy as np\n')]
import argparse from utils.distributions import RandInt, Uniform from functions.mnist import MLPWithMNIST import numpy as np import os import datetime from hyperband import Hyperband from utils import plot_util import time import pandas as pd def get_path_with_time(alg_name): time_name = str(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')) path = 'log/' + alg_name + '/' + time_name return path def get_param_with_bench(bench): params = {} if bench == 'MLPWithMNIST': # hyperparameters params['hparams'] = { 'lr': Uniform(0.001, 0.30), 'momentum': Uniform(0.50, 0.999), 'fc1_unit': RandInt(30, 1000), 'fc2_unit': RandInt(30, 1000) } params['obj_func'] = MLPWithMNIST params['seed'] = np.random.randint(0, 2 ** 32 - 1) params['path'] = get_path_with_time('random_search') if not os.path.isdir(params['path']): os.makedirs(params['path']) print('create directory which is ' + params['path']) return params def main(): parser = argparse.ArgumentParser(description='Hyperband main script') parser.add_argument('bench', action='store', nargs=None, const=None, default=None, type=str, choices=['MLPWithMNIST'], help='the benchmark function you want to run', metavar=None) parser.add_argument('--max_iter', type=int, default=27, help='maximum amount of resource that can be allocated to a single configuration') parser.add_argument('--eta', type=int, default=3, help='proportion of the configurations discarded in each round of SH') parser.add_argument('--patience', type=int, default=5, help='threshold for original early-stopping') parser.add_argument('--gcp', action='store_true') args = parser.parse_args() params = get_param_with_bench(args.bench) params['max_iter'] = args.max_iter params['eta'] = args.eta params['patience'] = args.patience params['homedir'] = '/hyperband_sandbox/' if args.gcp else './' # run optimization hb = Hyperband(**params) best = hb.run() print("best:{}".format(best)) separate_history = hb.separate_history print("separate_history:{}".format(separate_history)) i = 0 for k, v in separate_history.items(): df = pd.DataFrame(v) df.to_csv("./log_{}.csv".format(i)) i += 1 plot_util.plot_separately(separate_history, homedir=params['homedir']) if __name__ == '__main__': start = time.time() main() elapsed_time = time.time() - start print("elapsed_time:{}".format(elapsed_time) + "[sec]")
[ "utils.plot_util.plot_separately", "argparse.ArgumentParser", "os.makedirs", "utils.distributions.Uniform", "datetime.datetime.now", "numpy.random.randint", "os.path.isdir", "pandas.DataFrame", "time.time", "hyperband.Hyperband", "utils.distributions.RandInt" ]
[((799, 832), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2 ** 32 - 1)'], {}), '(0, 2 ** 32 - 1)\n', (816, 832), True, 'import numpy as np\n'), ((1070, 1130), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Hyperband main script"""'}), "(description='Hyperband main script')\n", (1093, 1130), False, 'import argparse\n'), ((2460, 2479), 'hyperband.Hyperband', 'Hyperband', ([], {}), '(**params)\n', (2469, 2479), False, 'from hyperband import Hyperband\n'), ((2781, 2851), 'utils.plot_util.plot_separately', 'plot_util.plot_separately', (['separate_history'], {'homedir': "params['homedir']"}), "(separate_history, homedir=params['homedir'])\n", (2806, 2851), False, 'from utils import plot_util\n'), ((2893, 2904), 'time.time', 'time.time', ([], {}), '()\n', (2902, 2904), False, 'import time\n'), ((901, 930), 'os.path.isdir', 'os.path.isdir', (["params['path']"], {}), "(params['path'])\n", (914, 930), False, 'import os\n'), ((940, 967), 'os.makedirs', 'os.makedirs', (["params['path']"], {}), "(params['path'])\n", (951, 967), False, 'import os\n'), ((2701, 2716), 'pandas.DataFrame', 'pd.DataFrame', (['v'], {}), '(v)\n', (2713, 2716), True, 'import pandas as pd\n'), ((2935, 2946), 'time.time', 'time.time', ([], {}), '()\n', (2944, 2946), False, 'import time\n'), ((572, 591), 'utils.distributions.Uniform', 'Uniform', (['(0.001)', '(0.3)'], {}), '(0.001, 0.3)\n', (579, 591), False, 'from utils.distributions import RandInt, Uniform\n'), ((618, 637), 'utils.distributions.Uniform', 'Uniform', (['(0.5)', '(0.999)'], {}), '(0.5, 0.999)\n', (625, 637), False, 'from utils.distributions import RandInt, Uniform\n'), ((664, 681), 'utils.distributions.RandInt', 'RandInt', (['(30)', '(1000)'], {}), '(30, 1000)\n', (671, 681), False, 'from utils.distributions import RandInt, Uniform\n'), ((707, 724), 'utils.distributions.RandInt', 'RandInt', (['(30)', '(1000)'], {}), '(30, 1000)\n', (714, 724), False, 'from utils.distributions import RandInt, Uniform\n'), ((299, 322), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (320, 322), False, 'import datetime\n')]
import numpy as np import numpy.matlib from ..core import Agent class FPSP(Agent): """ Fast Periodic Switching between high and low beta policy. Implementation of https://robertshorten.files.wordpress.com/2020/03/fpsr_title.pdf Agent returns [0, ... suppression start]: beta_high [suppression_start, ..., switching_start): beta_low [switching_start, ..., END]: beta_high for steps_high followed by beta_low for steps_low steps. """ def __init__(self, steps_high, steps_low, beta_high=1, beta_low=0, suppression_start=0, switching_start=0): self.beta_high = beta_high self.beta_low = beta_low self.steps_high = steps_high self.steps_low = steps_low self.suppression_start = suppression_start self.switching_start = switching_start def reset(self): self.steps = 0 return self.beta() def step(self, x): switching_start = self._get_input(self.switching_start, x) steps_high = self._get_input(self.steps_high, x) steps_low = self._get_input(self.steps_low, x) y = self.beta(switching_start, steps_high, steps_low) self.steps += 1 return y, 0, False, None def beta(self, switching_start=0, steps_high=1, steps_low=1): if self.steps >= switching_start: cycle_length = self.steps_high + self.steps_low cycle_step = np.maximum(0, self.steps-self.switching_start).astype(np.int32) cycle_step = np.mod(cycle_step, cycle_length) is_high_cycle = cycle_step < self.steps_high return self.beta_high if is_high_cycle else self.beta_low elif self.steps >= self.suppression_start: return self.beta_low else: return self.beta_high class BatchFPSP(Agent): """ Fast Periodic Switching between high and low beta policy. Implementation of https://robertshorten.files.wordpress.com/2020/03/fpsr_title.pdf Agent returns [0, ... suppression start]: beta_high [suppression_start, ..., switching_start): beta_low [switching_start, ..., END]: beta_high for steps_high followed by beta_low for steps_low steps. """ def __init__(self, beta_high=1, beta_low=0, steps_high=1, steps_low=1, batch_size=1, suppression_start=0, switching_start=0): self.batch_size = batch_size self.beta_high = self._to_batch(beta_high) self.beta_low = self._to_batch(beta_low) self.steps_high = steps_high self.steps_low = steps_low self.suppression_start = suppression_start self.switching_start = switching_start # variable should have shape (batch, ) + shape def _to_batch(self, x, shape=()): # return placeholder key or callable if isinstance(x, str) or callable(x): return x x_arr = np.array(x) target_shape = (self.batch_size, ) + shape if x_arr.shape == target_shape: return x_arr elif (x_arr.shape == shape): return np.matlib.repmat(x_arr.reshape(shape), self.batch_size,1).reshape(target_shape) elif len(x_arr.shape) > 0 and x_arr.shape[0] == target_shape: return x_arr.reshape(target_shape) else: print("Warning: unable to convert to target shape", x, target_shape) return x def reset(self): self.steps = 1 self.cycle_length = None return self.beta_high def step(self, x): steps_high = self._to_batch(self._get_input(self.steps_high, x)) steps_low = self._to_batch(self._get_input(self.steps_low, x)) y = self.beta(steps_high, steps_low) self.steps += 1 return y, 0, False, None def beta(self, steps_high, steps_low): is_phase_3 = self.steps >= self.switching_start is_phase_2 = (self.steps >= self.suppression_start) * (1-is_phase_3) is_phase_1 = (1-is_phase_3)*(1-is_phase_2) cycle_length = steps_high + steps_low cycle_step = np.maximum(0, (self.steps - self.switching_start)).astype(np.int32) cycle_step = np.mod(cycle_step, cycle_length) is_high_cycle = cycle_step < steps_high out = is_phase_1 * self.beta_high + \ is_phase_2 * self.beta_low + \ is_phase_3 * (is_high_cycle * self.beta_high + (1-is_high_cycle) * self.beta_low) return out class BatchOuterLoopFPSP(Agent): """ FPSP Outer supervisory loop - batch version. """ def __init__(self, start=0, period=7, o =-1, x_init=1, x_min=0, x_max=7, alpha_x=0.4, alpha_y=0.0, batch_size=1): self.start = start self.period = period self.x_init = x_init self.x_min = x_min self.x_max = x_max self.alpha_x = alpha_x self.alpha_y = alpha_y self.batch_size = batch_size self.o = o # control signal # variable should have shape (batch, ) + shape def _to_batch(self, x, shape=()): # return placeholder key or callable if isinstance(x, str) or callable(x): return x x_arr = np.array(x) target_shape = (self.batch_size, ) + shape if x_arr.shape == target_shape: return x_arr elif (x_arr.shape == shape): return np.matlib.repmat(x_arr.reshape(shape), self.batch_size,1).reshape(target_shape) elif len(x_arr.shape) > 0 and x_arr.shape[0] == target_shape: return x_arr.reshape(target_shape) else: print("Warning: unable to convert to target shape", x, target_shape) return x def reset(self): self.steps = 0 self.x = self._to_batch(self.x_init) self.o_k1 = np.zeros(self.batch_size) self.o_k = np.zeros(self.batch_size) return np.vstack([self.x, self.period-self.x]).T def step(self, x=None): o = self._to_batch(self._get_input(self.o, x)) # fetch observed signal self.o_k += o do_update_buffer = (np.mod(self.steps-self.start, self.period) == 0) do_update_x = do_update_buffer * (self.steps >= self.start) #print(self.steps, do_update_x*self.o_k/self.o_k1) x_ = self.x # increase duty cycle if observed infecteds is lower than in preceeding period cond_x = self.o_k < (1 - self.alpha_x) * self.o_k1 # decrease duty cycle if observed infecteds is greater than in preceeding period cond_y = self.o_k > (1 + self.alpha_y) * self.o_k1 x_ = do_update_x * (self.x + 1 * cond_x -1 * cond_y) + (1-do_update_x)*x_ # saturate cast duty cycle to fixed bounds x_ = self.mid(self.x_min, x_, self.x_max) self.x = x_ self.o_k1 = do_update_buffer * self.o_k + (1-do_update_buffer) * self.o_k1 self.o_k = (1-do_update_buffer) * self.o_k self.steps += 1 out = np.vstack([self.x, self.x_max-self.x]).T return out, 0, False, None @staticmethod def mid(a, b, c): cond_1 = b <= a cond_2 = b < c return cond_1 * a + (1-cond_1)*cond_2 * b + (1-cond_1)*(1-cond_2) * c
[ "numpy.array", "numpy.zeros", "numpy.vstack", "numpy.maximum", "numpy.mod" ]
[((3017, 3028), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3025, 3028), True, 'import numpy as np\n'), ((4315, 4347), 'numpy.mod', 'np.mod', (['cycle_step', 'cycle_length'], {}), '(cycle_step, cycle_length)\n', (4321, 4347), True, 'import numpy as np\n'), ((5523, 5534), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5531, 5534), True, 'import numpy as np\n'), ((6139, 6164), 'numpy.zeros', 'np.zeros', (['self.batch_size'], {}), '(self.batch_size)\n', (6147, 6164), True, 'import numpy as np\n'), ((6184, 6209), 'numpy.zeros', 'np.zeros', (['self.batch_size'], {}), '(self.batch_size)\n', (6192, 6209), True, 'import numpy as np\n'), ((1602, 1634), 'numpy.mod', 'np.mod', (['cycle_step', 'cycle_length'], {}), '(cycle_step, cycle_length)\n', (1608, 1634), True, 'import numpy as np\n'), ((6225, 6266), 'numpy.vstack', 'np.vstack', (['[self.x, self.period - self.x]'], {}), '([self.x, self.period - self.x])\n', (6234, 6266), True, 'import numpy as np\n'), ((6430, 6474), 'numpy.mod', 'np.mod', (['(self.steps - self.start)', 'self.period'], {}), '(self.steps - self.start, self.period)\n', (6436, 6474), True, 'import numpy as np\n'), ((7321, 7361), 'numpy.vstack', 'np.vstack', (['[self.x, self.x_max - self.x]'], {}), '([self.x, self.x_max - self.x])\n', (7330, 7361), True, 'import numpy as np\n'), ((4226, 4274), 'numpy.maximum', 'np.maximum', (['(0)', '(self.steps - self.switching_start)'], {}), '(0, self.steps - self.switching_start)\n', (4236, 4274), True, 'import numpy as np\n'), ((1513, 1561), 'numpy.maximum', 'np.maximum', (['(0)', '(self.steps - self.switching_start)'], {}), '(0, self.steps - self.switching_start)\n', (1523, 1561), True, 'import numpy as np\n')]
# Copyright 2019 NREL # Licensed under the Apache License, Version 2.0 (the "License"); you may not use # this file except in compliance with the License. You may obtain a copy of the # License at http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import numpy as np import sys import datetime from wisdem.ccblade import CCAirfoil, CCBlade from scipy import interpolate, gradient, integrate # Some useful constants now = datetime.datetime.now() pi = np.pi rad2deg = np.rad2deg(1) deg2rad = np.deg2rad(1) rpm2RadSec = 2.0*(np.pi)/60.0 RadSec2rpm = 60/(2.0 * np.pi) class Controller(): """ Class Controller used to calculate controller tunings parameters Methods: ------- tune_controller Parameters: ----------- controller_params: dict Dictionary containing controller paramaters that need to be defined """ def __init__(self, controller_params): ''' Load controller tuning parameters from input dictionary ''' print('-----------------------------------------------------------------------------') print(' Tuning a reference wind turbine controller using NREL\'s ROSCO toolbox ') # print(' Developed by <NAME> for collaborative research purposes. ') print('-----------------------------------------------------------------------------') # Controller Flags self.LoggingLevel = controller_params['LoggingLevel'] self.F_LPFType = controller_params['F_LPFType'] self.F_NotchType = controller_params['F_NotchType'] self.IPC_ControlMode = controller_params['IPC_ControlMode'] self.VS_ControlMode = controller_params['VS_ControlMode'] self.PC_ControlMode = controller_params['PC_ControlMode'] self.Y_ControlMode = controller_params['Y_ControlMode'] self.SS_Mode = controller_params['SS_Mode'] self.WE_Mode = controller_params['WE_Mode'] self.PS_Mode = controller_params['PS_Mode'] self.SD_Mode = controller_params['SD_Mode'] self.Fl_Mode = controller_params['Fl_Mode'] self.Flp_Mode = controller_params['Flp_Mode'] # Necessary parameters self.zeta_pc = controller_params['zeta_pc'] self.omega_pc = controller_params['omega_pc'] self.zeta_vs = controller_params['zeta_vs'] self.omega_vs = controller_params['omega_vs'] if self.Flp_Mode > 0: self.zeta_flp = controller_params['zeta_flp'] self.omega_flp = controller_params['omega_flp'] # Optional parameters, default to standard if not defined if controller_params['min_pitch']: self.min_pitch = controller_params['min_pitch'] else: self.min_pitch = None if controller_params['max_pitch']: self.max_pitch = controller_params['max_pitch'] else: self.max_pitch = 90*deg2rad # Default to 90 degrees max pitch if controller_params['vs_minspd']: self.vs_minspd = controller_params['vs_minspd'] else: self.vs_minspd = None if controller_params['ss_vsgain']: self.ss_vsgain = controller_params['ss_vsgain'] else: self.ss_vsgain = 1. # Default to 100% setpoint shift if controller_params['ss_pcgain']: self.ss_pcgain = controller_params['ss_pcgain'] else: self.ss_pcgain = 0.001 # Default to 0.1% setpoint shift if controller_params['ss_cornerfreq']: self.ss_cornerfreq = controller_params['ss_cornerfreq'] else: self.ss_cornerfreq = .62831850001 # Default to 10 second time constant if controller_params['ps_percent']: self.ps_percent = controller_params['ps_percent'] else: self.ps_percent = 0.75 # Default to 75% peak shaving # critical damping if LPFType = 2 if controller_params['F_LPFType']: if controller_params['F_LPFType'] == 2: self.F_LPFDamping = 0.7 else: self.F_LPFDamping = 0.0 else: self.F_LPFDamping = 0.0 # Shutdown filter default cornering freq at 15s time constant if controller_params['sd_cornerfreq']: self.sd_cornerfreq = controller_params['sd_cornerfreq'] else: self.sd_cornerfreq = 0.41888 if controller_params['sd_maxpit']: self.sd_maxpit = controller_params['sd_maxpit'] else: self.sd_maxpit = None if controller_params['flp_maxpit']: self.flp_maxpit = controller_params['flp_maxpit'] else: if controller_params['Flp_Mode'] > 0: self.flp_maxpit = 10.0 * deg2rad else: self.flp_maxpit = 0.0 def tune_controller(self, turbine): """ Given a turbine model, tune a controller based on the NREL generic controller tuning process Parameters: ----------- turbine : class Turbine class containing necessary turbine information to accurately tune the controller. """ # -------------Load Parameters ------------- # # Re-define Turbine Parameters for shorthand J = turbine.J # Total rotor inertial (kg-m^2) rho = turbine.rho # Air density (kg/m^3) R = turbine.rotor_radius # Rotor radius (m) Ar = np.pi*R**2 # Rotor area (m^2) Ng = turbine.Ng # Gearbox ratio (-) rated_rotor_speed = turbine.rated_rotor_speed # Rated rotor speed (rad/s) # -------------Define Operation Points ------------- # TSR_rated = rated_rotor_speed*R/turbine.v_rated # TSR at rated # separate wind speeds by operation regions v_below_rated = np.arange(turbine.v_min,turbine.v_rated,0.5) # below rated v_above_rated = np.arange(turbine.v_rated+0.5,turbine.v_max,0.5) # above rated v = np.concatenate((v_below_rated, v_above_rated)) # separate TSRs by operations regions TSR_below_rated = np.ones(len(v_below_rated))*turbine.TSR_operational # below rated TSR_above_rated = rated_rotor_speed*R/v_above_rated # above rated TSR_op = np.concatenate((TSR_below_rated, TSR_above_rated)) # operational TSRs # Find expected operational Cp values Cp_above_rated = turbine.Cp.interp_surface(0,TSR_above_rated[0]) # Cp during rated operation (not optimal). Assumes cut-in bld pitch to be 0 Cp_op_br = np.ones(len(v_below_rated)) * turbine.Cp.max # below rated Cp_op_ar = Cp_above_rated * (TSR_above_rated/TSR_rated)**3 # above rated Cp_op = np.concatenate((Cp_op_br, Cp_op_ar)) # operational CPs to linearize around pitch_initial_rad = turbine.pitch_initial_rad TSR_initial = turbine.TSR_initial # initialize variables pitch_op = np.empty(len(TSR_op)) dCp_beta = np.empty(len(TSR_op)) dCp_TSR = np.empty(len(TSR_op)) dCt_beta = np.empty(len(TSR_op)) dCt_TSR = np.empty(len(TSR_op)) Ct_op = np.empty(len(TSR_op)) # ------------- Find Linearized State "Matrices" ------------- # for i in range(len(TSR_op)): # Find pitch angle as a function of expected operating CP for each TSR Cp_TSR = np.ndarray.flatten(turbine.Cp.interp_surface(turbine.pitch_initial_rad, TSR_op[i])) # all Cp values for a given tsr Cp_op[i] = np.clip(Cp_op[i], np.min(Cp_TSR), np.max(Cp_TSR)) # saturate Cp values to be on Cp surface f_cp_pitch = interpolate.interp1d(Cp_TSR,pitch_initial_rad) # interpolate function for Cp(tsr) values pitch_op[i] = f_cp_pitch(Cp_op[i]) # expected operation blade pitch values dCp_beta[i], dCp_TSR[i] = turbine.Cp.interp_gradient(pitch_op[i],TSR_op[i]) # gradients of Cp surface in Beta and TSR directions dCt_beta[i], dCt_TSR[i] = turbine.Ct.interp_gradient(pitch_op[i],TSR_op[i]) # gradients of Cp surface in Beta and TSR directions # Thrust Ct_TSR = np.ndarray.flatten(turbine.Ct.interp_surface(turbine.pitch_initial_rad, TSR_op[i])) # all Cp values for a given tsr f_ct = interpolate.interp1d(pitch_initial_rad,Ct_TSR) Ct_op[i] = f_ct(pitch_op[i]) Ct_op[i] = np.clip(Ct_op[i], np.min(Ct_TSR), np.max(Ct_TSR)) # saturate Ct values to be on Ct surface # Define minimum pitch saturation to be at Cp-maximizing pitch angle if not specifically defined if not self.min_pitch: self.min_pitch = 0.0 self.min_pitch = max(self.min_pitch,pitch_op[0]) # Full Cx surface gradients dCp_dbeta = dCp_beta/np.diff(pitch_initial_rad)[0] dCp_dTSR = dCp_TSR/np.diff(TSR_initial)[0] dCt_dbeta = dCt_beta/np.diff(pitch_initial_rad)[0] dCt_dTSR = dCt_TSR/np.diff(TSR_initial)[0] # Linearized system derivatives dtau_dbeta = Ng/2*rho*Ar*R*(1/TSR_op)*dCp_dbeta*v**2 dtau_dlambda = Ng/2*rho*Ar*R*v**2*(1/(TSR_op**2))*(dCp_dTSR*TSR_op - Cp_op) dlambda_domega = R/v/Ng dtau_domega = dtau_dlambda*dlambda_domega dlambda_dv = -(TSR_op/v) Pi_beta = 1/2 * rho * Ar * v**2 * dCt_dbeta Pi_omega = 1/2 * rho * Ar * R * v * dCt_dTSR Pi_wind = 1/2 * rho * Ar * v**2 * dCt_dTSR * dlambda_dv + rho * Ar * v * Ct_op # Second order system coefficients A = dtau_domega/J # Plant pole B_tau = -Ng**2/J # Torque input B_beta = dtau_dbeta/J # Blade pitch input # Wind Disturbance Input dtau_dv = (0.5 * rho * Ar * 1/rated_rotor_speed) * (dCp_dTSR*dlambda_dv*v**3 + Cp_op*3*v**2) B_wind = dtau_dv/J # wind speed input - currently unused # separate and define below and above rated parameters A_vs = A[0:len(v_below_rated)] # below rated A_pc = A[len(v_below_rated):len(v)] # above rated B_tau = B_tau * np.ones(len(v)) # -- Find gain schedule -- self.pc_gain_schedule = ControllerTypes() self.pc_gain_schedule.second_order_PI(self.zeta_pc, self.omega_pc,A_pc,B_beta[len(v_below_rated):len(v)],linearize=True,v=v_above_rated) self.vs_gain_schedule = ControllerTypes() self.vs_gain_schedule.second_order_PI(self.zeta_vs, self.omega_vs,A_vs,B_tau[0:len(v_below_rated)],linearize=False,v=v_below_rated) # -- Find K for Komega_g^2 -- self.vs_rgn2K = (pi*rho*R**5.0 * turbine.Cp.max) / (2.0 * turbine.Cp.TSR_opt**3 * Ng**3) self.vs_refspd = min(turbine.TSR_operational * turbine.v_rated/R, turbine.rated_rotor_speed) * Ng # -- Define some setpoints -- # minimum rotor speed saturation limits if self.vs_minspd: self.vs_minspd = np.maximum(self.vs_minspd, (turbine.TSR_operational * turbine.v_min / turbine.rotor_radius) * Ng) else: self.vs_minspd = (turbine.TSR_operational * turbine.v_min / turbine.rotor_radius) * Ng self.pc_minspd = self.vs_minspd # max pitch angle for shutdown if self.sd_maxpit: self.sd_maxpit = self.sd_maxpit else: self.sd_maxpit = pitch_op[-1] # Store some variables self.v = v # Wind speed (m/s) self.v_below_rated = v_below_rated self.pitch_op = pitch_op self.pitch_op_pc = pitch_op[len(v_below_rated):len(v)] self.TSR_op = TSR_op self.A = A self.B_beta = B_beta self.B_tau = B_tau self.B_wind = B_wind self.TSR_op = TSR_op self.omega_op = TSR_op*v/R self.Pi_omega = Pi_omega self.Pi_beta = Pi_beta self.Pi_wind = Pi_wind # - Might want these to debug - # self.Cp_op = Cp_op # --- Minimum pitch saturation --- self.ps_min_bld_pitch = np.ones(len(self.pitch_op)) * self.min_pitch self.ps = ControllerBlocks() if self.PS_Mode == 1: # Peak Shaving self.ps.peak_shaving(self, turbine) elif self.PS_Mode == 2: # Cp-maximizing minimum pitch saturation self.ps.min_pitch_saturation(self,turbine) elif self.PS_Mode == 3: # Peak shaving and Cp-maximizing minimum pitch saturation self.ps.peak_shaving(self, turbine) self.ps.min_pitch_saturation(self,turbine) # --- Floating feedback term --- if self.Fl_Mode == 1: # Floating feedback Kp_float = (dtau_dv/dtau_dbeta) * turbine.TowerHt * Ng self.Kp_float = Kp_float[len(v_below_rated)] # Turn on the notch filter if floating self.F_NotchType = 2 # And check for .yaml input inconsistencies if turbine.twr_freq == 0.0 or turbine.ptfm_freq == 0.0: print('WARNING: twr_freq and ptfm_freq should be defined for floating turbine control!!') else: self.Kp_float = 0.0 # Flap actuation if self.Flp_Mode >= 1: self.flp_angle = 0.0 try: self.tune_flap_controller(turbine) except AttributeError: print('ERROR: If Flp_Mode > 0, you need to have blade information loaded in the turbine object.') raise except UnboundLocalError: print('ERROR: You are attempting to tune a flap controller for a blade without flaps!') raise else: self.flp_angle = 0.0 self.Ki_flap = np.array([0.0]) self.Kp_flap = np.array([0.0]) def tune_flap_controller(self,turbine): ''' Tune controller for distributed aerodynamic control Parameters: ----------- turbine : class Turbine class containing necessary turbine information to accurately tune the controller. ''' # Find blade aerodynamic coefficients v_rel = [] phi_vec = [] alpha=[] for i, _ in enumerate(self.v): turbine.cc_rotor.induction_inflow=True # Axial and tangential inductions a, ap, alpha0, cl, cd = turbine.cc_rotor.distributedAeroLoads(self.v[i], self.omega_op[i], self.pitch_op[i], 0.0) # Relative windspeed v_rel.append([np.sqrt(self.v[i]**2*(1-a)**2 + self.omega_op[i]**2*turbine.span**2*(1-ap)**2)]) # Inflow wind direction phi_vec.append(self.pitch_op[i] + turbine.twist*deg2rad) # Lift and drag coefficients Cl0 = np.zeros_like(turbine.af_data) Cd0 = np.zeros_like(turbine.af_data) Clp = np.zeros_like(turbine.af_data) Cdp = np.zeros_like(turbine.af_data) Clm = np.zeros_like(turbine.af_data) Cdm = np.zeros_like(turbine.af_data) for i,section in enumerate(turbine.af_data): # assume airfoil section as AOA of zero for slope calculations - for now a0_ind = section[0]['Alpha'].index(np.min(np.abs(section[0]['Alpha']))) # Coefficients if section[0]['NumTabs'] == 3: # sections with flaps Clm[i,] = section[0]['Cl'][a0_ind] Cdm[i,] = section[0]['Cd'][a0_ind] Cl0[i,] = section[1]['Cl'][a0_ind] Cd0[i,] = section[1]['Cd'][a0_ind] Clp[i,] = section[2]['Cl'][a0_ind] Cdp[i,] = section[2]['Cd'][a0_ind] Ctrl_flp = float(section[2]['Ctrl']) else: # sections without flaps Cl0[i,] = Clp[i,] = Clm[i,] = section[0]['Cl'][a0_ind] Cd0[i,] = Cdp[i,] = Cdm[i,] = section[0]['Cd'][a0_ind] Ctrl = float(section[0]['Ctrl']) # Find slopes Kcl = (Clp - Cl0)/( (Ctrl_flp-Ctrl)*deg2rad ) Kcd = (Cdp - Cd0)/( (Ctrl_flp-Ctrl)*deg2rad ) # Find integrated constants kappa = np.zeros(len(v_rel)) C1 = np.zeros(len(v_rel)) C2 = np.zeros(len(v_rel)) for i, (v_sec,phi) in enumerate(zip(v_rel, phi_vec)): C1[i] = integrate.trapz(0.5 * turbine.rho * turbine.chord * v_sec[0]**2 * turbine.span * Kcl * np.cos(phi)) C2[i] = integrate.trapz(0.5 * turbine.rho * turbine.chord * v_sec[0]**2 * turbine.span * Kcd * np.sin(phi)) kappa[i]=C1[i]+C2[i] # ------ Controller tuning ------- # Open loop blade response zetaf = turbine.bld_flapwise_damp omegaf = turbine.bld_flapwise_freq # Desired Closed loop response # zeta = self.zeta_flp # omega = 4.6/(ts*zeta) # PI Gains if (self.zeta_flp == 0 or self.omega_flp == 0) or (not self.zeta_flp or not self.omega_flp): sys.exit('ERROR! --- Zeta and Omega flap must be nonzero for Flp_Mode >= 1 ---') self.Kp_flap = (2*self.zeta_flp*self.omega_flp - 2*zetaf*omegaf)/(kappa*omegaf**2) self.Ki_flap = (self.omega_flp**2 - omegaf**2)/(kappa*omegaf**2) class ControllerBlocks(): ''' Class ControllerBlocks defines tuning parameters for additional controller features or "blocks" Methods: -------- peak_shaving ''' def __init__(self): pass def peak_shaving(self,controller, turbine): ''' Define minimum blade pitch angle for peak shaving routine based on a maximum allowable thrust Parameters: ----------- controller: class Controller class containing controller operational information turbine: class Turbine class containing necessary wind turbine information for controller tuning ''' # Re-define Turbine Parameters for shorthand J = turbine.J # Total rotor inertial (kg-m^2) rho = turbine.rho # Air density (kg/m^3) R = turbine.rotor_radius # Rotor radius (m) A = np.pi*R**2 # Rotor area (m^2) Ng = turbine.Ng # Gearbox ratio (-) rated_rotor_speed = turbine.rated_rotor_speed # Rated rotor speed (rad/s) # Initialize some arrays Ct_op = np.empty(len(controller.TSR_op),dtype='float64') Ct_max = np.empty(len(controller.TSR_op),dtype='float64') beta_min = np.empty(len(controller.TSR_op),dtype='float64') # Find unshaved rotor thurst coefficients and associated rotor thrusts # for i in len(controller.TSR_op): for i in range(len(controller.TSR_op)): Ct_op[i] = turbine.Ct.interp_surface(controller.pitch_op[i],controller.TSR_op[i]) T = 0.5 * rho * A * controller.v**2 * Ct_op # Define minimum max thrust and initialize pitch_min Tmax = controller.ps_percent * np.max(T) pitch_min = np.ones(len(controller.pitch_op)) * controller.min_pitch # Modify pitch_min if max thrust exceeds limits for i in range(len(controller.TSR_op)): # Find Ct values for operational TSR # Ct_tsr = turbine.Ct.interp_surface(turbine.pitch_initial_rad, controller.TSR_op[i]) Ct_tsr = turbine.Ct.interp_surface(turbine.pitch_initial_rad,controller.TSR_op[i]) # Define max Ct values Ct_max[i] = Tmax/(0.5 * rho * A * controller.v[i]**2) if T[i] > Tmax: Ct_op[i] = Ct_max[i] else: Ct_max[i] = np.minimum( np.max(Ct_tsr), Ct_max[i]) # Define minimum pitch angle f_pitch_min = interpolate.interp1d(Ct_tsr, turbine.pitch_initial_rad, bounds_error=False, fill_value=(turbine.pitch_initial_rad[0],turbine.pitch_initial_rad[-1])) pitch_min[i] = f_pitch_min(Ct_max[i]) controller.ps_min_bld_pitch = pitch_min # save some outputs for analysis or future work self.Tshaved = 0.5 * rho * A * controller.v**2 * Ct_op self.pitch_min = pitch_min self.v = controller.v self.Ct_max = Ct_max self.Ct_op = Ct_op self.T = T def min_pitch_saturation(self, controller, turbine): # Find TSR associated with minimum rotor speed TSR_at_minspeed = (controller.pc_minspd/turbine.Ng) * turbine.rotor_radius / controller.v_below_rated for i in range(len(TSR_at_minspeed)): if TSR_at_minspeed[i] > controller.TSR_op[i]: controller.TSR_op[i] = TSR_at_minspeed[i] # Initialize some arrays Cp_op = np.empty(len(turbine.pitch_initial_rad),dtype='float64') min_pitch = np.empty(len(TSR_at_minspeed),dtype='float64') # Find Cp-maximizing minimum pitch schedule # Find Cp coefficients at below-rated tip speed ratios Cp_op = turbine.Cp.interp_surface(turbine.pitch_initial_rad,TSR_at_minspeed[i]) Cp_max = max(Cp_op) f_pitch_min = interpolate.interp1d(Cp_op, turbine.pitch_initial_rad, bounds_error=False, fill_value=(turbine.pitch_initial_rad[0],turbine.pitch_initial_rad[-1])) min_pitch[i] = f_pitch_min(Cp_max) # modify existing minimum pitch schedule controller.ps_min_bld_pitch[i] = np.maximum(controller.ps_min_bld_pitch[i], min_pitch[i]) else: return class ControllerTypes(): ''' Class ControllerTypes used to define any types of controllers that can be tuned. Generally, calculates gains based on some pre-defined tuning parameters. Methods: -------- second_order_PI ''' def __init__(self): pass def second_order_PI(self,zeta,om_n,A,B,linearize=False,v=None): ''' Define proportional integral gain schedule for a closed loop system with a standard second-order form. Parameters: ----------- zeta : int (-) Desired damping ratio om_n : int (rad/s) Desired natural frequency A : array_like (1/s) Plant poles (state transition matrix) B : array_like (varies) Plant numerators (input matrix) linearize : bool, optional If 'True', find a gain scheduled based on a linearized plant. v : array_like (m/s) Wind speeds for linearized plant model, if desired. ''' # Linearize system coefficients w.r.t. wind speed if desired if linearize: pA = np.polyfit(v,A,1) pB = np.polyfit(v,B,1) A = pA[0]*v + pA[1] B = pB[0]*v + pB[1] # Calculate gain schedule self.Kp = 1/B * (2*zeta*om_n + A) self.Ki = om_n**2/B
[ "numpy.abs", "numpy.sqrt", "numpy.polyfit", "numpy.min", "numpy.diff", "scipy.interpolate.interp1d", "numpy.max", "datetime.datetime.now", "numpy.deg2rad", "numpy.array", "numpy.cos", "numpy.concatenate", "sys.exit", "numpy.sin", "numpy.maximum", "numpy.rad2deg", "numpy.zeros_like", ...
[((731, 754), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (752, 754), False, 'import datetime\n'), ((776, 789), 'numpy.rad2deg', 'np.rad2deg', (['(1)'], {}), '(1)\n', (786, 789), True, 'import numpy as np\n'), ((800, 813), 'numpy.deg2rad', 'np.deg2rad', (['(1)'], {}), '(1)\n', (810, 813), True, 'import numpy as np\n'), ((6337, 6383), 'numpy.arange', 'np.arange', (['turbine.v_min', 'turbine.v_rated', '(0.5)'], {}), '(turbine.v_min, turbine.v_rated, 0.5)\n', (6346, 6383), True, 'import numpy as np\n'), ((6432, 6484), 'numpy.arange', 'np.arange', (['(turbine.v_rated + 0.5)', 'turbine.v_max', '(0.5)'], {}), '(turbine.v_rated + 0.5, turbine.v_max, 0.5)\n', (6441, 6484), True, 'import numpy as np\n'), ((6519, 6565), 'numpy.concatenate', 'np.concatenate', (['(v_below_rated, v_above_rated)'], {}), '((v_below_rated, v_above_rated))\n', (6533, 6565), True, 'import numpy as np\n'), ((6821, 6871), 'numpy.concatenate', 'np.concatenate', (['(TSR_below_rated, TSR_above_rated)'], {}), '((TSR_below_rated, TSR_above_rated))\n', (6835, 6871), True, 'import numpy as np\n'), ((7299, 7335), 'numpy.concatenate', 'np.concatenate', (['(Cp_op_br, Cp_op_ar)'], {}), '((Cp_op_br, Cp_op_ar))\n', (7313, 7335), True, 'import numpy as np\n'), ((15563, 15593), 'numpy.zeros_like', 'np.zeros_like', (['turbine.af_data'], {}), '(turbine.af_data)\n', (15576, 15593), True, 'import numpy as np\n'), ((15608, 15638), 'numpy.zeros_like', 'np.zeros_like', (['turbine.af_data'], {}), '(turbine.af_data)\n', (15621, 15638), True, 'import numpy as np\n'), ((15653, 15683), 'numpy.zeros_like', 'np.zeros_like', (['turbine.af_data'], {}), '(turbine.af_data)\n', (15666, 15683), True, 'import numpy as np\n'), ((15698, 15728), 'numpy.zeros_like', 'np.zeros_like', (['turbine.af_data'], {}), '(turbine.af_data)\n', (15711, 15728), True, 'import numpy as np\n'), ((15743, 15773), 'numpy.zeros_like', 'np.zeros_like', (['turbine.af_data'], {}), '(turbine.af_data)\n', (15756, 15773), True, 'import numpy as np\n'), ((15788, 15818), 'numpy.zeros_like', 'np.zeros_like', (['turbine.af_data'], {}), '(turbine.af_data)\n', (15801, 15818), True, 'import numpy as np\n'), ((8262, 8309), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['Cp_TSR', 'pitch_initial_rad'], {}), '(Cp_TSR, pitch_initial_rad)\n', (8282, 8309), False, 'from scipy import interpolate, gradient, integrate\n'), ((8975, 9022), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['pitch_initial_rad', 'Ct_TSR'], {}), '(pitch_initial_rad, Ct_TSR)\n', (8995, 9022), False, 'from scipy import interpolate, gradient, integrate\n'), ((11679, 11778), 'numpy.maximum', 'np.maximum', (['self.vs_minspd', '(turbine.TSR_operational * turbine.v_min / turbine.rotor_radius * Ng)'], {}), '(self.vs_minspd, turbine.TSR_operational * turbine.v_min /\n turbine.rotor_radius * Ng)\n', (11689, 11778), True, 'import numpy as np\n'), ((14539, 14554), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (14547, 14554), True, 'import numpy as np\n'), ((14582, 14597), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (14590, 14597), True, 'import numpy as np\n'), ((17781, 17866), 'sys.exit', 'sys.exit', (['"""ERROR! --- Zeta and Omega flap must be nonzero for Flp_Mode >= 1 ---"""'], {}), "('ERROR! --- Zeta and Omega flap must be nonzero for Flp_Mode >= 1 ---'\n )\n", (17789, 17866), False, 'import sys\n'), ((19874, 19883), 'numpy.max', 'np.max', (['T'], {}), '(T)\n', (19880, 19883), True, 'import numpy as np\n'), ((20626, 20779), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['Ct_tsr', 'turbine.pitch_initial_rad'], {'bounds_error': '(False)', 'fill_value': '(turbine.pitch_initial_rad[0], turbine.pitch_initial_rad[-1])'}), '(Ct_tsr, turbine.pitch_initial_rad, bounds_error=False,\n fill_value=(turbine.pitch_initial_rad[0], turbine.pitch_initial_rad[-1]))\n', (20646, 20779), False, 'from scipy import interpolate, gradient, integrate\n'), ((23629, 23648), 'numpy.polyfit', 'np.polyfit', (['v', 'A', '(1)'], {}), '(v, A, 1)\n', (23639, 23648), True, 'import numpy as np\n'), ((23664, 23683), 'numpy.polyfit', 'np.polyfit', (['v', 'B', '(1)'], {}), '(v, B, 1)\n', (23674, 23683), True, 'import numpy as np\n'), ((8157, 8171), 'numpy.min', 'np.min', (['Cp_TSR'], {}), '(Cp_TSR)\n', (8163, 8171), True, 'import numpy as np\n'), ((8173, 8187), 'numpy.max', 'np.max', (['Cp_TSR'], {}), '(Cp_TSR)\n', (8179, 8187), True, 'import numpy as np\n'), ((9110, 9124), 'numpy.min', 'np.min', (['Ct_TSR'], {}), '(Ct_TSR)\n', (9116, 9124), True, 'import numpy as np\n'), ((9126, 9140), 'numpy.max', 'np.max', (['Ct_TSR'], {}), '(Ct_TSR)\n', (9132, 9140), True, 'import numpy as np\n'), ((9490, 9516), 'numpy.diff', 'np.diff', (['pitch_initial_rad'], {}), '(pitch_initial_rad)\n', (9497, 9516), True, 'import numpy as np\n'), ((9550, 9570), 'numpy.diff', 'np.diff', (['TSR_initial'], {}), '(TSR_initial)\n', (9557, 9570), True, 'import numpy as np\n'), ((9605, 9631), 'numpy.diff', 'np.diff', (['pitch_initial_rad'], {}), '(pitch_initial_rad)\n', (9612, 9631), True, 'import numpy as np\n'), ((9665, 9685), 'numpy.diff', 'np.diff', (['TSR_initial'], {}), '(TSR_initial)\n', (9672, 9685), True, 'import numpy as np\n'), ((22053, 22205), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['Cp_op', 'turbine.pitch_initial_rad'], {'bounds_error': '(False)', 'fill_value': '(turbine.pitch_initial_rad[0], turbine.pitch_initial_rad[-1])'}), '(Cp_op, turbine.pitch_initial_rad, bounds_error=False,\n fill_value=(turbine.pitch_initial_rad[0], turbine.pitch_initial_rad[-1]))\n', (22073, 22205), False, 'from scipy import interpolate, gradient, integrate\n'), ((22375, 22431), 'numpy.maximum', 'np.maximum', (['controller.ps_min_bld_pitch[i]', 'min_pitch[i]'], {}), '(controller.ps_min_bld_pitch[i], min_pitch[i])\n', (22385, 22431), True, 'import numpy as np\n'), ((15325, 15428), 'numpy.sqrt', 'np.sqrt', (['(self.v[i] ** 2 * (1 - a) ** 2 + self.omega_op[i] ** 2 * turbine.span ** 2 *\n (1 - ap) ** 2)'], {}), '(self.v[i] ** 2 * (1 - a) ** 2 + self.omega_op[i] ** 2 * turbine.\n span ** 2 * (1 - ap) ** 2)\n', (15332, 15428), True, 'import numpy as np\n'), ((16020, 16047), 'numpy.abs', 'np.abs', (["section[0]['Alpha']"], {}), "(section[0]['Alpha'])\n", (16026, 16047), True, 'import numpy as np\n'), ((17205, 17216), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (17211, 17216), True, 'import numpy as np\n'), ((17325, 17336), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (17331, 17336), True, 'import numpy as np\n'), ((20532, 20546), 'numpy.max', 'np.max', (['Ct_tsr'], {}), '(Ct_tsr)\n', (20538, 20546), True, 'import numpy as np\n')]
import numpy as np import pyaudio import time, sys, math from collections import deque from src.utils import * class Stream_Reader: """ The Stream_Reader continuously reads data from a selected sound source using PyAudio Arguments: device: int or None: Select which audio stream to read . rate: float or None: Sample rate to use. Defaults to something supported. updatesPerSecond: int: How often to record new data. """ def __init__(self, device = None, rate = None, updates_per_second = 1000, FFT_window_size = None, verbose = False): self.rate = rate self.verbose = verbose self.pa = pyaudio.PyAudio() #Temporary variables #hacks! self.update_window_n_frames = 1024 #Don't remove this, needed for device testing! self.data_buffer = None self.device = device if self.device is None: self.device = self.input_device() if self.rate is None: self.rate = self.valid_low_rate(self.device) self.update_window_n_frames = round_up_to_even(self.rate / updates_per_second) self.updates_per_second = self.rate / self.update_window_n_frames self.info = self.pa.get_device_info_by_index(self.device) self.data_capture_delays = deque(maxlen=20) self.new_data = False if self.verbose: self.data_capture_delays = deque(maxlen=20) self.num_data_captures = 0 self.stream = self.pa.open( format = pyaudio.paInt16, channels = 1, rate = self.rate, input=True, frames_per_buffer = self.update_window_n_frames, stream_callback=self.non_blocking_stream_read) print("\n##################################################################################################") print("\nDefaulted to using first working mic, Running on:") self.print_mic_info(self.device) print("\n##################################################################################################") print('Recording from %s at %d Hz\nUsing (non-overlapping) data-windows of %d samples (updating at %.2ffps)' %(self.info["name"],self.rate, self.update_window_n_frames, self.updates_per_second)) def non_blocking_stream_read(self, in_data, frame_count, time_info, status): if self.verbose: start = time.time() if self.data_buffer is not None: self.data_buffer.append_data(np.frombuffer(in_data, dtype=np.int16)) self.new_data = True if self.verbose: self.num_data_captures += 1 self.data_capture_delays.append(time.time() - start) return in_data, pyaudio.paContinue def stream_start(self, data_windows_to_buffer = None): self.data_windows_to_buffer = data_windows_to_buffer if data_windows_to_buffer is None: self.data_windows_to_buffer = int(self.updates_per_second / 2) #By default, buffer 0.5 second of audio else: self.data_windows_to_buffer = data_windows_to_buffer self.data_buffer = numpy_data_buffer(self.data_windows_to_buffer, self.update_window_n_frames) print("\n--🎙 -- Starting live audio stream...\n") self.stream.start_stream() self.stream_start_time = time.time() def terminate(self): print("👋 Sending stream termination command...") self.stream.stop_stream() self.stream.close() self.pa.terminate() def valid_low_rate(self, device, test_rates = [44100, 22050]): """Set the rate to the lowest supported audio rate.""" for testrate in test_rates: if self.test_device(device, rate=testrate): return testrate #If none of the test_rates worked, try the default rate: self.info = self.pa.get_device_info_by_index(device) default_rate = int(self.info["defaultSampleRate"]) if self.test_device(device, rate=default_rate): return default_rate print("SOMETHING'S WRONG! I can't figure out a good sample-rate for DEVICE =>", device) return default_rate def test_device(self, device, rate=None): """given a device ID and a rate, return True/False if it's valid.""" try: self.info = self.pa.get_device_info_by_index(device) if not self.info["maxInputChannels"] > 0: return False if rate is None: rate = int(self.info["defaultSampleRate"]) stream = self.pa.open( format = pyaudio.paInt16, channels = 1, input_device_index=device, frames_per_buffer=self.update_window_n_frames, rate = rate, input = True) stream.close() return True except Exception as e: #print(e) return False def input_device(self): """ See which devices can be opened for microphone input. Return the first valid device """ mics=[] for device in range(self.pa.get_device_count()): if self.test_device(device): mics.append(device) if len(mics) == 0: print("No working microphone devices found!") sys.exit() print("Found %d working microphone device(s): " % len(mics)) for mic in mics: self.print_mic_info(mic) return mics[0] def print_mic_info(self, mic): mic_info = self.pa.get_device_info_by_index(mic) print('\nMIC %s:' %(str(mic))) for k, v in sorted(mic_info.items()): print("%s: %s" %(k, v))
[ "collections.deque", "sys.exit", "numpy.frombuffer", "pyaudio.PyAudio", "time.time" ]
[((712, 729), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (727, 729), False, 'import pyaudio\n'), ((1348, 1364), 'collections.deque', 'deque', ([], {'maxlen': '(20)'}), '(maxlen=20)\n', (1353, 1364), False, 'from collections import deque\n'), ((3414, 3425), 'time.time', 'time.time', ([], {}), '()\n', (3423, 3425), False, 'import time, sys, math\n'), ((1459, 1475), 'collections.deque', 'deque', ([], {'maxlen': '(20)'}), '(maxlen=20)\n', (1464, 1475), False, 'from collections import deque\n'), ((2480, 2491), 'time.time', 'time.time', ([], {}), '()\n', (2489, 2491), False, 'import time, sys, math\n'), ((5445, 5455), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5453, 5455), False, 'import time, sys, math\n'), ((2575, 2613), 'numpy.frombuffer', 'np.frombuffer', (['in_data'], {'dtype': 'np.int16'}), '(in_data, dtype=np.int16)\n', (2588, 2613), True, 'import numpy as np\n'), ((2758, 2769), 'time.time', 'time.time', ([], {}), '()\n', (2767, 2769), False, 'import time, sys, math\n')]
import numpy as np from sklearn.exceptions import NotFittedError from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors._base import _get_weights from .base import BaseDetector # TODO: Support other distance metrics class KDN(BaseDetector): """ For each sample, the percentage of it's nearest neighbors with same label serves as it's `conf_score`. Euclidean distance is used to find the nearest neighbors. See :cite:`ensih18,ih14` for details. Parameters -------------- n_neighbors : int, default=5 No of nearest neighbors to use to compute `conf_score` weight : string, default='uniform' weight function used in prediction. If 'uniform', all points in each neighborhood are weighted equally. If 'distance', weights points by the inverse of their distance. n_jobs : int, default=1 No of parallel cpu cores to use """ def __init__(self, n_neighbors=5, weight='uniform', n_jobs=1): super().__init__(n_jobs=n_jobs, random_state=None) self.n_neighbors = n_neighbors self.weight = weight def _get_kdn(self, knn, y): dist, kid = knn.kneighbors() # (n_estimators,K) : ids & dist of nn's for every sample in X weights = _get_weights(dist, self.weight) if weights is None: weights = np.ones_like(kid) agreement = y[kid] == y.reshape(-1, 1) return np.average(agreement, axis=1, weights=weights) def detect(self, X, y): X, y = self._validate_data(X, y) # , accept_sparse=True knn = KNeighborsClassifier(n_neighbors=self.n_neighbors, weights=self.weight, n_jobs=self.n_jobs).fit(X, y) return self._get_kdn(knn, y) class ForestKDN(KDN): """ Like KDN, but a trained Random Forest is used to compute pairwise similarity. Specifically, for a pair of samples, their similarity is the percentage of times they belong to the same leaf. See :cite:`forestkdn17` for details. Parameters ------------------- n_neighbors : int, default=5 No of nearest neighbors to use to compute `conf_score` n_estimators : int, default=101 No of trees in Random Forest. max_leaf_nodes : int, default=64 Maximum no of leaves in each tree. weight : string, default='distance' weight function used in prediction. If 'distance', weights points by the inverse of their distance. If 'uniform', all points in each neighborhood are weighted equally. n_jobs : int, default=1 No of parallel cpu cores to use random_state : int, default=None Set this value for reproducibility """ def __init__(self, n_neighbors=5, n_estimators=100, max_leaf_nodes=64, weight='distance', n_jobs=1, random_state=None): super().__init__(n_neighbors=n_neighbors, weight=weight, n_jobs=n_jobs) self.n_estimators = n_estimators self.max_leaf_nodes = max_leaf_nodes self.random_state = random_state def detect(self, X, y): X, y = self._check_everything(X, y) forest = RandomForestClassifier( n_estimators=self.n_estimators, max_leaf_nodes=self.max_leaf_nodes, n_jobs=self.n_jobs, random_state=self.random_state).fit(X, y) Xs = forest.apply(X) knn = KNeighborsClassifier( n_neighbors=self.n_neighbors, metric='hamming', algorithm='brute', weights=self.weight, n_jobs=self.n_jobs).fit(Xs, y) return self._get_kdn(knn, y) # TODO: rename this class (?) class HybridKDN(KDN): def __init__(self, classifier, n_neighbors=5, weight='uniform', n_jobs=1): super().__init__(n_neighbors=n_neighbors, weight=weight, n_jobs=n_jobs) self.classifier = classifier def detect(self, X, y): X, y = self._validate_data(X, y) try: # classifier may already be trained yp = self.classifier.predict(X) except NotFittedError: yp = self.classifier.fit(X, y).predict(X) knn = KNeighborsClassifier().fit(X, y) _, kid = knn.kneighbors() agr = yp[kid] == y[kid] return agr.sum(axis=1) / knn.n_neighbors class RkDN(KDN): __doc__ = KDN.__doc__ def detect(self, X, y): X, y = self._validate_data(X, y) knn = KNeighborsClassifier(n_neighbors=self.n_neighbors, weights=self.weight, n_jobs=self.n_jobs).fit(X, y) _, kid = knn.kneighbors() N = len(X) M = np.zeros((N, N), dtype='bool') cols = np.zeros_like(kid) + np.arange(0, N).reshape(-1, 1) M[kid.reshape(-1), cols.reshape(-1)] = 1 label_agr = y.reshape(1, -1) == y.reshape(-1, 1) agr = M & label_agr m = M.sum(axis=1).astype('float') # Outliers who doesn't belong to anybody's NN list have conf_score=0 return np.divide(agr.sum(axis=1), m, out=np.zeros_like(m), where=(m != 0))
[ "numpy.ones_like", "numpy.average", "sklearn.neighbors.KNeighborsClassifier", "sklearn.ensemble.RandomForestClassifier", "sklearn.neighbors._base._get_weights", "numpy.zeros", "numpy.zeros_like", "numpy.arange" ]
[((1321, 1352), 'sklearn.neighbors._base._get_weights', '_get_weights', (['dist', 'self.weight'], {}), '(dist, self.weight)\n', (1333, 1352), False, 'from sklearn.neighbors._base import _get_weights\n'), ((1483, 1529), 'numpy.average', 'np.average', (['agreement'], {'axis': '(1)', 'weights': 'weights'}), '(agreement, axis=1, weights=weights)\n', (1493, 1529), True, 'import numpy as np\n'), ((4638, 4668), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': '"""bool"""'}), "((N, N), dtype='bool')\n", (4646, 4668), True, 'import numpy as np\n'), ((1403, 1420), 'numpy.ones_like', 'np.ones_like', (['kid'], {}), '(kid)\n', (1415, 1420), True, 'import numpy as np\n'), ((4685, 4703), 'numpy.zeros_like', 'np.zeros_like', (['kid'], {}), '(kid)\n', (4698, 4703), True, 'import numpy as np\n'), ((1638, 1733), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'self.n_neighbors', 'weights': 'self.weight', 'n_jobs': 'self.n_jobs'}), '(n_neighbors=self.n_neighbors, weights=self.weight,\n n_jobs=self.n_jobs)\n', (1658, 1733), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3206, 3353), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'self.n_estimators', 'max_leaf_nodes': 'self.max_leaf_nodes', 'n_jobs': 'self.n_jobs', 'random_state': 'self.random_state'}), '(n_estimators=self.n_estimators, max_leaf_nodes=self.\n max_leaf_nodes, n_jobs=self.n_jobs, random_state=self.random_state)\n', (3228, 3353), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3440, 3572), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'self.n_neighbors', 'metric': '"""hamming"""', 'algorithm': '"""brute"""', 'weights': 'self.weight', 'n_jobs': 'self.n_jobs'}), "(n_neighbors=self.n_neighbors, metric='hamming',\n algorithm='brute', weights=self.weight, n_jobs=self.n_jobs)\n", (3460, 3572), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4158, 4180), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (4178, 4180), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4435, 4530), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'self.n_neighbors', 'weights': 'self.weight', 'n_jobs': 'self.n_jobs'}), '(n_neighbors=self.n_neighbors, weights=self.weight,\n n_jobs=self.n_jobs)\n', (4455, 4530), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((5042, 5058), 'numpy.zeros_like', 'np.zeros_like', (['m'], {}), '(m)\n', (5055, 5058), True, 'import numpy as np\n'), ((4706, 4721), 'numpy.arange', 'np.arange', (['(0)', 'N'], {}), '(0, N)\n', (4715, 4721), True, 'import numpy as np\n')]
"""Test numpy array to matrix conversion function.""" import numpy as np import libs.test_carma as carma test_flags = { 1: 'Number of elements between array and matrix are not the same', 2: 'Number of rows between array and matrix are not the same', 3: 'Number of columns between array and matrix are not the same', 4: 'Sum of elements between array and matrix is not aproximately equal', 5: 'Pointer to memory is not as expected', } def test_arr_to_mat_double(): """Test arr_to_mat.""" sample = np.asarray( np.random.normal(size=(10, 2)), dtype=np.float64, order='F' ) flag = carma.arr_to_mat_double(sample, False, False) assert flag == 0, test_flags[flag] def test_arr_to_mat_long(): """Test arr_to_mat.""" sample = np.asarray( np.random.normal(size=(10, 2)), dtype=np.int64, order='F' ) flag = carma.arr_to_mat_long(sample, False, False) assert flag == 0, test_flags[flag] def test_arr_to_mat_double_c_contiguous(): """Test arr_to_mat.""" sample = np.asarray(np.random.normal(size=(10, 2)), dtype=np.float64) flag = carma.arr_to_mat_double(sample, False, False) assert flag == 5, test_flags[flag] def test_arr_to_mat_long_c_contiguous(): """Test arr_to_mat.""" sample = np.asarray(np.random.normal(size=(10, 2)), dtype=np.int64) flag = carma.arr_to_mat_long(sample, False, False) assert flag == 5, test_flags[flag] def test_arr_to_mat_double_copy(): """Test arr_to_mat.""" sample = np.asarray( np.random.normal(size=(10, 2)), dtype=np.float64, order='F' ) flag = carma.arr_to_mat_double_copy(sample) assert flag == 0, test_flags[flag] def test_arr_to_mat_double_copy_c_contiguous(): """Test arr_to_mat.""" sample = np.asarray( np.random.normal(size=(10, 2)), dtype=np.float64, order='C' ) flag = carma.arr_to_mat_double_copy(sample) assert flag == 0, test_flags[flag] # ############################################################################# # N-DIM 1 # # ############################################################################# def test_arr_to_mat_1d(): """Test arr_to_mat.""" sample = np.asarray( np.random.normal(size=(10)), dtype=np.float64, order='F' ) flag = carma.arr_to_mat_1d(sample, False, False) assert flag == 0, test_flags[flag] def test_arr_to_mat_1d_copy(): """Test arr_to_mat.""" sample = np.asarray( np.random.normal(size=(10)), dtype=np.float64, order='F' ) flag = carma.arr_to_mat_1d(sample, True, False) assert flag == 5, test_flags[flag] def test_arr_to_col(): """Test arr_to_col.""" sample = np.asarray(np.random.normal(size=10), dtype=np.float64, order='F') flag = carma.arr_to_col(sample, False, False) assert flag == 0, test_flags[flag] def test_arr_to_col_C(): """Test arr_to_col.""" sample = np.asarray(np.random.normal(size=10), dtype=np.float64, order='C') flag = carma.arr_to_col(sample, False, False) assert flag == 0, test_flags[flag] def test_arr_to_col_writeable(): """Test arr_to_col.""" sample = np.asarray(np.random.normal(size=10), dtype=np.float64, order='C') sample.setflags(write=0) flag = carma.arr_to_col(sample, False, False) assert flag == 5, test_flags[flag] def test_arr_to_col_copy(): """Test arr_to_col.""" sample = np.asarray(np.random.normal(size=10), dtype=np.float64, order='F') flag = carma.arr_to_col(sample, True, False) assert flag == 5, test_flags[flag] def test_arr_to_col_copy_C(): """Test arr_to_col.""" sample = np.asarray(np.random.normal(size=10), dtype=np.float64, order='C') flag = carma.arr_to_col(sample, True, False) assert flag == 5, test_flags[flag] def test_arr_to_row(): """Test arr_to_row.""" sample = np.asarray(np.random.normal(size=10), dtype=np.float64, order='F') flag = carma.arr_to_row(sample, False, False) assert flag == 0, test_flags[flag] def test_arr_to_row_C(): """Test arr_to_row.""" sample = np.asarray(np.random.normal(size=10), dtype=np.float64, order='C') flag = carma.arr_to_row(sample, False, False) assert flag == 0, test_flags[flag] def test_arr_to_row_writeable(): """Test arr_to_row.""" sample = np.asarray(np.random.normal(size=10), dtype=np.float64, order='F') sample.setflags(write=0) flag = carma.arr_to_row(sample, False, False) assert flag == 5, test_flags[flag] def test_arr_to_row_copy(): """Test arr_to_col.""" sample = np.asarray(np.random.normal(size=10), dtype=np.float64, order='F') flag = carma.arr_to_row(sample, True, False) assert flag == 5, test_flags[flag] def test_arr_to_row_copy_C(): """Test arr_to_col.""" sample = np.asarray(np.random.normal(size=10), dtype=np.float64, order='C') flag = carma.arr_to_row(sample, True, False) assert flag == 5, test_flags[flag] def test_arr_to_mat_cube(): """Test arr_to_cube.""" sample = np.asarray( np.random.normal(size=(10, 2, 2)), dtype=np.float64, order='F' ) flag = carma.arr_to_cube(sample, False, False) assert flag == 0, test_flags[flag] def test_arr_to_cube_double_c_contiguous(): """Test arr_to_mat.""" sample = np.asarray(np.random.normal(size=(10, 2, 2)), dtype=np.float64) flag = carma.arr_to_cube(sample, False, False) assert flag == 5, test_flags[flag] def test_arr_to_mat_cube_copy(): """Test arr_to_cube.""" sample = np.asarray( np.random.normal(size=(10, 2, 2)), dtype=np.float64, order='F' ) flag = carma.arr_to_cube(sample, True, False) assert flag == 5, test_flags[flag] def test_to_arma_mat(): """Test private implementation of to_arma for matrix.""" sample = np.asarray( np.random.normal(size=(10, 2)), dtype=np.float64, order='F' ) flag = carma.to_arma_mat(sample, False, False) assert flag == 0, test_flags[flag] def test_to_arma_cube(): """Test private implementation of to_arma for matrix.""" sample = np.asarray( np.random.normal(size=(10, 2, 2)), dtype=np.float64, order='F' ) flag = carma.to_arma_cube(sample, False, False) assert flag == 0, test_flags[flag] def test_to_arma_col(): """Test private implementation of to_arma for matrix.""" sample = np.asarray( np.random.normal(size=10), dtype=np.float64, order='F' ) flag = carma.to_arma_col(sample, False, False) assert flag == 0, test_flags[flag] def test_to_arma_row(): """Test private implementation of to_arma for matrix.""" sample = np.asarray( np.random.normal(size=10), dtype=np.float64, order='F' ) flag = carma.to_arma_row(sample, False, False) assert flag == 0, test_flags[flag]
[ "numpy.random.normal", "libs.test_carma.to_arma_mat", "libs.test_carma.arr_to_col", "libs.test_carma.arr_to_cube", "libs.test_carma.to_arma_row", "libs.test_carma.arr_to_mat_long", "libs.test_carma.to_arma_cube", "libs.test_carma.arr_to_mat_double_copy", "libs.test_carma.to_arma_col", "libs.test_c...
[((625, 670), 'libs.test_carma.arr_to_mat_double', 'carma.arr_to_mat_double', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (648, 670), True, 'import libs.test_carma as carma\n'), ((875, 918), 'libs.test_carma.arr_to_mat_long', 'carma.arr_to_mat_long', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (896, 918), True, 'import libs.test_carma as carma\n'), ((1115, 1160), 'libs.test_carma.arr_to_mat_double', 'carma.arr_to_mat_double', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (1138, 1160), True, 'import libs.test_carma as carma\n'), ((1353, 1396), 'libs.test_carma.arr_to_mat_long', 'carma.arr_to_mat_long', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (1374, 1396), True, 'import libs.test_carma as carma\n'), ((1610, 1646), 'libs.test_carma.arr_to_mat_double_copy', 'carma.arr_to_mat_double_copy', (['sample'], {}), '(sample)\n', (1638, 1646), True, 'import libs.test_carma as carma\n'), ((1873, 1909), 'libs.test_carma.arr_to_mat_double_copy', 'carma.arr_to_mat_double_copy', (['sample'], {}), '(sample)\n', (1901, 1909), True, 'import libs.test_carma as carma\n'), ((2351, 2392), 'libs.test_carma.arr_to_mat_1d', 'carma.arr_to_mat_1d', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (2370, 2392), True, 'import libs.test_carma as carma\n'), ((2599, 2639), 'libs.test_carma.arr_to_mat_1d', 'carma.arr_to_mat_1d', (['sample', '(True)', '(False)'], {}), '(sample, True, False)\n', (2618, 2639), True, 'import libs.test_carma as carma\n'), ((2822, 2860), 'libs.test_carma.arr_to_col', 'carma.arr_to_col', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (2838, 2860), True, 'import libs.test_carma as carma\n'), ((3045, 3083), 'libs.test_carma.arr_to_col', 'carma.arr_to_col', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (3061, 3083), True, 'import libs.test_carma as carma\n'), ((3305, 3343), 'libs.test_carma.arr_to_col', 'carma.arr_to_col', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (3321, 3343), True, 'import libs.test_carma as carma\n'), ((3531, 3568), 'libs.test_carma.arr_to_col', 'carma.arr_to_col', (['sample', '(True)', '(False)'], {}), '(sample, True, False)\n', (3547, 3568), True, 'import libs.test_carma as carma\n'), ((3758, 3795), 'libs.test_carma.arr_to_col', 'carma.arr_to_col', (['sample', '(True)', '(False)'], {}), '(sample, True, False)\n', (3774, 3795), True, 'import libs.test_carma as carma\n'), ((3978, 4016), 'libs.test_carma.arr_to_row', 'carma.arr_to_row', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (3994, 4016), True, 'import libs.test_carma as carma\n'), ((4201, 4239), 'libs.test_carma.arr_to_row', 'carma.arr_to_row', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (4217, 4239), True, 'import libs.test_carma as carma\n'), ((4461, 4499), 'libs.test_carma.arr_to_row', 'carma.arr_to_row', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (4477, 4499), True, 'import libs.test_carma as carma\n'), ((4687, 4724), 'libs.test_carma.arr_to_row', 'carma.arr_to_row', (['sample', '(True)', '(False)'], {}), '(sample, True, False)\n', (4703, 4724), True, 'import libs.test_carma as carma\n'), ((4914, 4951), 'libs.test_carma.arr_to_row', 'carma.arr_to_row', (['sample', '(True)', '(False)'], {}), '(sample, True, False)\n', (4930, 4951), True, 'import libs.test_carma as carma\n'), ((5162, 5201), 'libs.test_carma.arr_to_cube', 'carma.arr_to_cube', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (5179, 5201), True, 'import libs.test_carma as carma\n'), ((5402, 5441), 'libs.test_carma.arr_to_cube', 'carma.arr_to_cube', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (5419, 5441), True, 'import libs.test_carma as carma\n'), ((5657, 5695), 'libs.test_carma.arr_to_cube', 'carma.arr_to_cube', (['sample', '(True)', '(False)'], {}), '(sample, True, False)\n', (5674, 5695), True, 'import libs.test_carma as carma\n'), ((5932, 5971), 'libs.test_carma.to_arma_mat', 'carma.to_arma_mat', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (5949, 5971), True, 'import libs.test_carma as carma\n'), ((6212, 6252), 'libs.test_carma.to_arma_cube', 'carma.to_arma_cube', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (6230, 6252), True, 'import libs.test_carma as carma\n'), ((6484, 6523), 'libs.test_carma.to_arma_col', 'carma.to_arma_col', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (6501, 6523), True, 'import libs.test_carma as carma\n'), ((6755, 6794), 'libs.test_carma.to_arma_row', 'carma.to_arma_row', (['sample', '(False)', '(False)'], {}), '(sample, False, False)\n', (6772, 6794), True, 'import libs.test_carma as carma\n'), ((548, 578), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10, 2)'}), '(size=(10, 2))\n', (564, 578), True, 'import numpy as np\n'), ((800, 830), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10, 2)'}), '(size=(10, 2))\n', (816, 830), True, 'import numpy as np\n'), ((1054, 1084), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10, 2)'}), '(size=(10, 2))\n', (1070, 1084), True, 'import numpy as np\n'), ((1294, 1324), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10, 2)'}), '(size=(10, 2))\n', (1310, 1324), True, 'import numpy as np\n'), ((1533, 1563), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10, 2)'}), '(size=(10, 2))\n', (1549, 1563), True, 'import numpy as np\n'), ((1796, 1826), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10, 2)'}), '(size=(10, 2))\n', (1812, 1826), True, 'import numpy as np\n'), ((2277, 2302), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (2293, 2302), True, 'import numpy as np\n'), ((2525, 2550), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (2541, 2550), True, 'import numpy as np\n'), ((2755, 2780), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (2771, 2780), True, 'import numpy as np\n'), ((2978, 3003), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (2994, 3003), True, 'import numpy as np\n'), ((3209, 3234), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (3225, 3234), True, 'import numpy as np\n'), ((3464, 3489), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (3480, 3489), True, 'import numpy as np\n'), ((3691, 3716), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (3707, 3716), True, 'import numpy as np\n'), ((3911, 3936), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (3927, 3936), True, 'import numpy as np\n'), ((4134, 4159), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (4150, 4159), True, 'import numpy as np\n'), ((4365, 4390), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (4381, 4390), True, 'import numpy as np\n'), ((4620, 4645), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (4636, 4645), True, 'import numpy as np\n'), ((4847, 4872), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (4863, 4872), True, 'import numpy as np\n'), ((5082, 5115), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10, 2, 2)'}), '(size=(10, 2, 2))\n', (5098, 5115), True, 'import numpy as np\n'), ((5338, 5371), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10, 2, 2)'}), '(size=(10, 2, 2))\n', (5354, 5371), True, 'import numpy as np\n'), ((5577, 5610), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10, 2, 2)'}), '(size=(10, 2, 2))\n', (5593, 5610), True, 'import numpy as np\n'), ((5855, 5885), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10, 2)'}), '(size=(10, 2))\n', (5871, 5885), True, 'import numpy as np\n'), ((6132, 6165), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10, 2, 2)'}), '(size=(10, 2, 2))\n', (6148, 6165), True, 'import numpy as np\n'), ((6412, 6437), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (6428, 6437), True, 'import numpy as np\n'), ((6683, 6708), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10)'}), '(size=10)\n', (6699, 6708), True, 'import numpy as np\n')]
from __future__ import print_function """ Markov based methods for spatial dynamics. """ __author__ = "<NAME> <<EMAIL>" __all__ = ["Markov", "LISA_Markov", "Spatial_Markov", "kullback", "prais", "shorrock", "homogeneity"] import numpy as np from pysal.spatial_dynamics.ergodic import fmpt from pysal.spatial_dynamics.ergodic import steady_state as STEADY_STATE from scipy import stats from operator import gt import pysal # TT predefine LISA transitions # TT[i,j] is the transition type from i to j # i = quadrant in period 0 # j = quadrant in period 1 # uses one offset so first row and col of TT are ignored TT = np.zeros((5, 5), int) c = 1 for i in range(1, 5): for j in range(1, 5): TT[i, j] = c c += 1 # MOVE_TYPES is a dictionary that returns the move type of a LISA transition # filtered on the significance of the LISA end points # True indicates significant LISA in a particular period # e.g. a key of (1, 3, True, False) indicates a significant LISA located in # quadrant 1 in period 0 moved to quadrant 3 in period 1 but was not # significant in quadrant 3. MOVE_TYPES = {} c = 1 cases = (True, False) sig_keys = [(i, j) for i in cases for j in cases] for i, sig_key in enumerate(sig_keys): c = 1 + i * 16 for i in range(1, 5): for j in range(1, 5): key = (i, j, sig_key[0], sig_key[1]) MOVE_TYPES[key] = c c += 1 class Markov(object): """ Classic Markov transition matrices. Parameters ---------- class_ids : array (n, t), one row per observation, one column recording the state of each observation, with as many columns as time periods. classes : array (k, 1), all different classes (bins) of the matrix. Attributes ---------- p : matrix (k, k), transition probability matrix. steady_state : matrix (k, 1), ergodic distribution. transitions : matrix (k, k), count of transitions between each state i and j. Examples -------- >>> c = [['b','a','c'],['c','c','a'],['c','b','c']] >>> c.extend([['a','a','b'], ['a','b','c']]) >>> c = np.array(c) >>> m = Markov(c) >>> m.classes.tolist() ['a', 'b', 'c'] >>> m.p matrix([[ 0.25 , 0.5 , 0.25 ], [ 0.33333333, 0. , 0.66666667], [ 0.33333333, 0.33333333, 0.33333333]]) >>> m.steady_state matrix([[ 0.30769231], [ 0.28846154], [ 0.40384615]]) US nominal per capita income 48 states 81 years 1929-2009 >>> import pysal >>> f = pysal.open(pysal.examples.get_path("usjoin.csv")) >>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]) set classes to quintiles for each year >>> q5 = np.array([pysal.Quantiles(y).yb for y in pci]).transpose() >>> m = Markov(q5) >>> m.transitions array([[ 729., 71., 1., 0., 0.], [ 72., 567., 80., 3., 0.], [ 0., 81., 631., 86., 2.], [ 0., 3., 86., 573., 56.], [ 0., 0., 1., 57., 741.]]) >>> m.p matrix([[ 0.91011236, 0.0886392 , 0.00124844, 0. , 0. ], [ 0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ], [ 0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ], [ 0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443], [ 0. , 0. , 0.00125156, 0.07133917, 0.92740926]]) >>> m.steady_state matrix([[ 0.20774716], [ 0.18725774], [ 0.20740537], [ 0.18821787], [ 0.20937187]]) Relative incomes >>> pci = pci.transpose() >>> rpci = pci/(pci.mean(axis=0)) >>> rq = pysal.Quantiles(rpci.flatten()).yb >>> rq.shape = (48,81) >>> mq = Markov(rq) >>> mq.transitions array([[ 707., 58., 7., 1., 0.], [ 50., 629., 80., 1., 1.], [ 4., 79., 610., 73., 2.], [ 0., 7., 72., 650., 37.], [ 0., 0., 0., 48., 724.]]) >>> mq.steady_state matrix([[ 0.17957376], [ 0.21631443], [ 0.21499942], [ 0.21134662], [ 0.17776576]]) """ def __init__(self, class_ids, classes=None): if classes is not None: self.classes = classes else: self.classes = np.unique(class_ids) n, t = class_ids.shape k = len(self.classes) js = range(t - 1) classIds = self.classes.tolist() transitions = np.zeros((k, k)) for state_0 in js: state_1 = state_0 + 1 state_0 = class_ids[:, state_0] state_1 = class_ids[:, state_1] initial = np.unique(state_0) for i in initial: ending = state_1[state_0 == i] uending = np.unique(ending) row = classIds.index(i) for j in uending: col = classIds.index(j) transitions[row, col] += sum(ending == j) self.transitions = transitions row_sum = transitions.sum(axis=1) p = np.dot(np.diag(1 / (row_sum + (row_sum == 0))), transitions) self.p = np.matrix(p) @property def steady_state(self): if not hasattr(self, '_steady_state'): self._steady_state = STEADY_STATE(self.p) return self._steady_state class Spatial_Markov(object): """ Markov transitions conditioned on the value of the spatial lag. Parameters ---------- y : array (n,t), one row per observation, one column per state of each observation, with as many columns as time periods. w : W spatial weights object. k : integer number of classes (quantiles). permutations : int, optional number of permutations for use in randomization based inference (the default is 0). fixed : bool If true, quantiles are taken over the entire n*t pooled series. If false, quantiles are taken each time period over n. variable_name : string name of variable. Attributes ---------- p : matrix (k, k), transition probability matrix for a-spatial Markov. s : matrix (k, 1), ergodic distribution for a-spatial Markov. transitions : matrix (k, k), counts of transitions between each state i and j for a-spatial Markov. T : matrix (k, k, k), counts of transitions for each conditional Markov. T[0] is the matrix of transitions for observations with lags in the 0th quantile; T[k-1] is the transitions for the observations with lags in the k-1th. P : matrix (k, k, k), transition probability matrix for spatial Markov first dimension is the conditioned on the lag. S : matrix (k, k), steady state distributions for spatial Markov. Each row is a conditional steady_state. F : matrix (k, k, k),first mean passage times. First dimension is conditioned on the lag. shtest : list (k elements), each element of the list is a tuple for a multinomial difference test between the steady state distribution from a conditional distribution versus the overall steady state distribution: first element of the tuple is the chi2 value, second its p-value and the third the degrees of freedom. chi2 : list (k elements), each element of the list is a tuple for a chi-squared test of the difference between the conditional transition matrix against the overall transition matrix: first element of the tuple is the chi2 value, second its p-value and the third the degrees of freedom. x2 : float sum of the chi2 values for each of the conditional tests. Has an asymptotic chi2 distribution with k(k-1)(k-1) degrees of freedom. Under the null that transition probabilities are spatially homogeneous. (see chi2 above) x2_dof : int degrees of freedom for homogeneity test. x2_pvalue : float pvalue for homogeneity test based on analytic. distribution x2_rpvalue : float (if permutations>0) pseudo p-value for x2 based on random spatial permutations of the rows of the original transitions. x2_realizations : array (permutations,1), the values of x2 for the random permutations. Q : float Chi-square test of homogeneity across lag classes based on Bickenbach and Bode (2003) [Bickenbach2003]_. Q_p_value : float p-value for Q. LR : float Likelihood ratio statistic for homogeneity across lag classes based on Bickenback and Bode (2003) [Bickenbach2003]_. LR_p_value : float p-value for LR. dof_hom : int degrees of freedom for LR and Q, corrected for 0 cells. Notes ----- Based on Rey (2001) [Rey2001]_. The shtest and chi2 tests should be used with caution as they are based on classic theory assuming random transitions. The x2 based test is preferable since it simulates the randomness under the null. It is an experimental test requiring further analysis. This is new Examples -------- >>> import pysal as ps >>> f = ps.open(ps.examples.get_path("usjoin.csv")) >>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]) >>> pci = pci.transpose() >>> rpci = pci/(pci.mean(axis=0)) >>> w = ps.open(ps.examples.get_path("states48.gal")).read() >>> w.transform = 'r' >>> sm = ps.Spatial_Markov(rpci, w, fixed=True, k=5, variable_name='rpci') >>> for p in sm.P: ... print(p) ... [[ 0.96341463 0.0304878 0.00609756 0. 0. ] [ 0.06040268 0.83221477 0.10738255 0. 0. ] [ 0. 0.14 0.74 0.12 0. ] [ 0. 0.03571429 0.32142857 0.57142857 0.07142857] [ 0. 0. 0. 0.16666667 0.83333333]] [[ 0.79831933 0.16806723 0.03361345 0. 0. ] [ 0.0754717 0.88207547 0.04245283 0. 0. ] [ 0.00537634 0.06989247 0.8655914 0.05913978 0. ] [ 0. 0. 0.06372549 0.90196078 0.03431373] [ 0. 0. 0. 0.19444444 0.80555556]] [[ 0.84693878 0.15306122 0. 0. 0. ] [ 0.08133971 0.78947368 0.1291866 0. 0. ] [ 0.00518135 0.0984456 0.79274611 0.0984456 0.00518135] [ 0. 0. 0.09411765 0.87058824 0.03529412] [ 0. 0. 0. 0.10204082 0.89795918]] [[ 0.8852459 0.09836066 0. 0.01639344 0. ] [ 0.03875969 0.81395349 0.13953488 0. 0.00775194] [ 0.0049505 0.09405941 0.77722772 0.11881188 0.0049505 ] [ 0. 0.02339181 0.12865497 0.75438596 0.09356725] [ 0. 0. 0. 0.09661836 0.90338164]] [[ 0.33333333 0.66666667 0. 0. 0. ] [ 0.0483871 0.77419355 0.16129032 0.01612903 0. ] [ 0.01149425 0.16091954 0.74712644 0.08045977 0. ] [ 0. 0.01036269 0.06217617 0.89637306 0.03108808] [ 0. 0. 0. 0.02352941 0.97647059]] The probability of a poor state remaining poor is 0.963 if their neighbors are in the 1st quintile and 0.798 if their neighbors are in the 2nd quintile. The probability of a rich economy remaining rich is 0.976 if their neighbors are in the 5th quintile, but if their neighbors are in the 4th quintile this drops to 0.903. The Q and likelihood ratio statistics are both significant indicating the dynamics are not homogeneous across the lag classes: >>> "%.3f"%sm.LR '170.659' >>> "%.3f"%sm.Q '200.624' >>> "%.3f"%sm.LR_p_value '0.000' >>> "%.3f"%sm.Q_p_value '0.000' >>> sm.dof_hom 60 The long run distribution for states with poor (rich) neighbors has 0.435 (0.018) of the values in the first quintile, 0.263 (0.200) in the second quintile, 0.204 (0.190) in the third, 0.0684 (0.255) in the fourth and 0.029 (0.337) in the fifth quintile. >>> sm.S array([[ 0.43509425, 0.2635327 , 0.20363044, 0.06841983, 0.02932278], [ 0.13391287, 0.33993305, 0.25153036, 0.23343016, 0.04119356], [ 0.12124869, 0.21137444, 0.2635101 , 0.29013417, 0.1137326 ], [ 0.0776413 , 0.19748806, 0.25352636, 0.22480415, 0.24654013], [ 0.01776781, 0.19964349, 0.19009833, 0.25524697, 0.3372434 ]]) States with incomes in the first quintile with neighbors in the first quintile return to the first quartile after 2.298 years, after leaving the first quintile. They enter the fourth quintile after 80.810 years after leaving the first quintile, on average. Poor states within neighbors in the fourth quintile return to the first quintile, on average, after 12.88 years, and would enter the fourth quintile after 28.473 years. >>> for f in sm.F: ... print(f) ... [[ 2.29835259 28.95614035 46.14285714 80.80952381 279.42857143] [ 33.86549708 3.79459555 22.57142857 57.23809524 255.85714286] [ 43.60233918 9.73684211 4.91085714 34.66666667 233.28571429] [ 46.62865497 12.76315789 6.25714286 14.61564626 198.61904762] [ 52.62865497 18.76315789 12.25714286 6. 34.1031746 ]] [[ 7.46754205 9.70574606 25.76785714 74.53116883 194.23446197] [ 27.76691978 2.94175577 24.97142857 73.73474026 193.4380334 ] [ 53.57477715 28.48447637 3.97566318 48.76331169 168.46660482] [ 72.03631562 46.94601483 18.46153846 4.28393653 119.70329314] [ 77.17917276 52.08887197 23.6043956 5.14285714 24.27564033]] [[ 8.24751154 6.53333333 18.38765432 40.70864198 112.76732026] [ 47.35040872 4.73094099 11.85432099 34.17530864 106.23398693] [ 69.42288828 24.76666667 3.794921 22.32098765 94.37966594] [ 83.72288828 39.06666667 14.3 3.44668119 76.36702977] [ 93.52288828 48.86666667 24.1 9.8 8.79255406]] [[ 12.87974382 13.34847151 19.83446328 28.47257282 55.82395142] [ 99.46114206 5.06359731 10.54545198 23.05133495 49.68944423] [ 117.76777159 23.03735526 3.94436301 15.0843986 43.57927247] [ 127.89752089 32.4393006 14.56853107 4.44831643 31.63099455] [ 138.24752089 42.7893006 24.91853107 10.35 4.05613474]] [[ 56.2815534 1.5 10.57236842 27.02173913 110.54347826] [ 82.9223301 5.00892857 9.07236842 25.52173913 109.04347826] [ 97.17718447 19.53125 5.26043557 21.42391304 104.94565217] [ 127.1407767 48.74107143 33.29605263 3.91777427 83.52173913] [ 169.6407767 91.24107143 75.79605263 42.5 2.96521739]] """ def __init__(self, y, w, k=4, permutations=0, fixed=False, variable_name=None): self.y = y rows, cols = y.shape self.k = k self.cols = cols npa = np.array self.fixed = fixed self.variable_name = variable_name if fixed: yf = y.flatten() yb = pysal.Quantiles(yf, k=k).yb yb.shape = (rows, cols) classes = yb else: classes = npa([pysal.Quantiles(y[:, i], k=k) .yb for i in np.arange(cols)]).transpose() classic = Markov(classes) self.classes = classes self.p = classic.p self.transitions = classic.transitions T, P = self._calc(y, w, classes, k=k) self.T = T self.P = P if permutations: nrp = np.random.permutation counter = 0 x2_realizations = np.zeros((permutations, 1)) for perm in range(permutations): T, P = self._calc(nrp(y), w, classes, k=k) x2 = [chi2(T[i], self.transitions)[0] for i in range(k)] x2s = sum(x2) x2_realizations[perm] = x2s if x2s >= self.x2: counter += 1 self.x2_rpvalue = (counter + 1.0) / (permutations + 1.) self.x2_realizations = x2_realizations @property def s(self): if not hasattr(self, '_s'): self._s = STEADY_STATE(self.p) return self._s @property def S(self): if not hasattr(self, '_S'): S = np.zeros_like(self.p) for i, p in enumerate(self.P): S[i] = STEADY_STATE(p) self._S = np.asarray(S) return self._S @property def F(self): if not hasattr(self, '_F'): F = np.zeros_like(self.P) for i, p in enumerate(self.P): F[i] = fmpt(np.asmatrix(p)) self._F = np.asarray(F) return self._F # bickenbach and bode tests @property def ht(self): if not hasattr(self, '_ht'): self._ht = homogeneity(self.T) return self._ht @property def Q(self): if not hasattr(self, '_Q'): self._Q = self.ht.Q return self._Q @property def Q_p_value(self): self._Q_p_value = self.ht.Q_p_value return self._Q_p_value @property def LR(self): self._LR = self.ht.LR return self._LR @property def LR_p_value(self): self._LR_p_value = self.ht.LR_p_value return self._LR_p_value @property def dof_hom(self): self._dof_hom = self.ht.dof return self._dof_hom # shtests @property def shtest(self): if not hasattr(self, '_shtest'): self._shtest = self._mn_test() return self._shtest @property def chi2(self): if not hasattr(self, '_chi2'): self._chi2 = self._chi2_test() return self._chi2 @property def x2(self): if not hasattr(self, '_x2'): self._x2 = sum([c[0] for c in self.chi2]) return self._x2 @property def x2_pvalue(self): if not hasattr(self, '_x2_pvalue'): self._x2_pvalue = 1 - stats.chi2.cdf(self.x2, self.x2_dof) return self._x2_pvalue @property def x2_dof(self): if not hasattr(self, '_x2_dof'): k = self.k self._x2_dof = k * (k - 1) * (k - 1) return self._x2_dof def _calc(self, y, w, classes, k): ly = pysal.lag_spatial(w, y) npa = np.array if self.fixed: l_classes = pysal.Quantiles(ly.flatten(), k=k).yb l_classes.shape = ly.shape else: l_classes = npa([pysal.Quantiles( ly[:, i], k=k).yb for i in np.arange(self.cols)]) l_classes = l_classes.transpose() T = np.zeros((k, k, k)) n, t = y.shape for t1 in range(t - 1): t2 = t1 + 1 for i in range(n): T[l_classes[i, t1], classes[i, t1], classes[i, t2]] += 1 P = np.zeros_like(T) for i, mat in enumerate(T): row_sum = mat.sum(axis=1) row_sum = row_sum + (row_sum == 0) p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat)) P[i] = p_i return T, P def _mn_test(self): """ helper to calculate tests of differences between steady state distributions from the conditional and overall distributions. """ n, t = self.y.shape n0, n1, n2 = self.T.shape rn = range(n0) mat = [self._ssmnp_test( self.s, self.S[i], self.T[i].sum()) for i in rn] return mat def _ssmnp_test(self, p1, p2, nt): """ Steady state multinomial probability difference test. Arguments --------- p1 : array (k, 1), first steady state probability distribution. p1 : array (k, 1), second steady state probability distribution. nt : int number of transitions to base the test on. Returns ------- tuple (3 elements) (chi2 value, pvalue, degrees of freedom) """ p1 = np.array(p1) k, c = p1.shape p1.shape = (k, ) o = nt * p2 e = nt * p1 d = np.multiply((o - e), (o - e)) d = d / e chi2 = d.sum() pvalue = 1 - stats.chi2.cdf(chi2, k - 1) return (chi2, pvalue, k - 1) def _chi2_test(self): """ helper to calculate tests of differences between the conditional transition matrices and the overall transitions matrix. """ n, t = self.y.shape n0, n1, n2 = self.T.shape rn = range(n0) mat = [chi2(self.T[i], self.transitions) for i in rn] return mat def summary(self, file_name=None): class_names = ["C%d" % i for i in range(self.k)] regime_names = ["LAG%d" % i for i in range(self.k)] ht = homogeneity(self.T, class_names=class_names, regime_names=regime_names) title = "Spatial Markov Test" if self.variable_name: title = title + ": " + self.variable_name if file_name: ht.summary(file_name=file_name, title=title) else: ht.summary(title=title) def chi2(T1, T2): """ chi-squared test of difference between two transition matrices. Parameters ---------- T1 : matrix (k, k), matrix of transitions (counts). T2 : matrix (k, k), matrix of transitions (counts) to use to form the probabilities under the null. Returns ------- : tuple (3 elements). (chi2 value, pvalue, degrees of freedom). Examples -------- >>> import pysal >>> f = pysal.open(pysal.examples.get_path("usjoin.csv")) >>> years = range(1929, 2010) >>> pci = np.array([f.by_col[str(y)] for y in years]).transpose() >>> rpci = pci/(pci.mean(axis=0)) >>> w = pysal.open(pysal.examples.get_path("states48.gal")).read() >>> w.transform='r' >>> sm = Spatial_Markov(rpci, w, fixed=True) >>> T1 = sm.T[0] >>> T1 array([[ 562., 22., 1., 0.], [ 12., 201., 22., 0.], [ 0., 17., 97., 4.], [ 0., 0., 3., 19.]]) >>> T2 = sm.transitions >>> T2 array([[ 884., 77., 4., 0.], [ 68., 794., 87., 3.], [ 1., 92., 815., 51.], [ 1., 0., 60., 903.]]) >>> chi2(T1,T2) (23.397284414732951, 0.0053631167048613371, 9) Notes ----- Second matrix is used to form the probabilities under the null. Marginal sums from first matrix are distributed across these probabilities under the null. In other words the observed transitions are taken from T1 while the expected transitions are formed as follows .. math:: E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j} Degrees of freedom corrected for any rows in either T1 or T2 that have zero total transitions. """ rs2 = T2.sum(axis=1) rs1 = T1.sum(axis=1) rs2nz = rs2 > 0 rs1nz = rs1 > 0 dof1 = sum(rs1nz) dof2 = sum(rs2nz) rs2 = rs2 + (rs2 == 0) dof = (dof1 - 1) * (dof2 - 1) p = np.diag(1 / rs2) * np.matrix(T2) E = np.diag(rs1) * np.matrix(p) num = T1 - E num = np.multiply(num, num) E = E + (E == 0) chi2 = num / E chi2 = chi2.sum() pvalue = 1 - stats.chi2.cdf(chi2, dof) return chi2, pvalue, dof class LISA_Markov(Markov): """ Markov for Local Indicators of Spatial Association Parameters ---------- y : array (n, t), n cross-sectional units observed over t time periods. w : W spatial weights object. permutations : int, optional number of permutations used to determine LISA significance (the default is 0). significance_level : float, optional significance level (two-sided) for filtering significant LISA endpoints in a transition (the default is 0.05). geoda_quads : bool If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4. If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4. (the default is False). Attributes ---------- chi_2 : tuple (3 elements) (chi square test statistic, p-value, degrees of freedom) for test that dynamics of y are independent of dynamics of wy. classes : array (4, 1) 1=HH, 2=LH, 3=LL, 4=HL (own, lag) 1=HH, 2=LL, 3=LH, 4=HL (own, lag) (if geoda_quads=True) expected_t : array (4, 4), expected number of transitions under the null that dynamics of y are independent of dynamics of wy. move_types : matrix (n, t-1), integer values indicating which type of LISA transition occurred (q1 is quadrant in period 1, q2 is quadrant in period 2). .. Table:: Move Types == == ======== q1 q2 move_type == == ======== 1 1 1 1 2 2 1 3 3 1 4 4 2 1 5 2 2 6 2 3 7 2 4 8 3 1 9 3 2 10 3 3 11 3 4 12 4 1 13 4 2 14 4 3 15 4 4 16 == == ======== p : matrix (k, k), transition probability matrix. p_values : matrix (n, t), LISA p-values for each end point (if permutations > 0). significant_moves : matrix (n, t-1), integer values indicating the type and significance of a LISA transition. st = 1 if significant in period t, else st=0 (if permutations > 0). .. Table:: Significant Moves =============== =================== (s1,s2) move_type =============== =================== (1,1) [1, 16] (1,0) [17, 32] (0,1) [33, 48] (0,0) [49, 64] =============== =================== == == == == ========= q1 q2 s1 s2 move_type == == == == ========= 1 1 1 1 1 1 2 1 1 2 1 3 1 1 3 1 4 1 1 4 2 1 1 1 5 2 2 1 1 6 2 3 1 1 7 2 4 1 1 8 3 1 1 1 9 3 2 1 1 10 3 3 1 1 11 3 4 1 1 12 4 1 1 1 13 4 2 1 1 14 4 3 1 1 15 4 4 1 1 16 1 1 1 0 17 1 2 1 0 18 . . . . . . . . . . 4 3 1 0 31 4 4 1 0 32 1 1 0 1 33 1 2 0 1 34 . . . . . . . . . . 4 3 0 1 47 4 4 0 1 48 1 1 0 0 49 1 2 0 0 50 . . . . . . . . . . 4 3 0 0 63 4 4 0 0 64 == == == == ========= steady_state : matrix (k, 1), ergodic distribution. transitions : matrix (4, 4), count of transitions between each state i and j. spillover : array (n, 1) binary array, locations that were not part of a cluster in period 1 but joined a prexisting cluster in period 2. Examples -------- >>> import pysal as ps >>> import numpy as np >>> f = ps.open(ps.examples.get_path("usjoin.csv")) >>> years = range(1929, 2010) >>> pci = np.array([f.by_col[str(y)] for y in years]).transpose() >>> w = ps.open(ps.examples.get_path("states48.gal")).read() >>> lm = ps.LISA_Markov(pci,w) >>> lm.classes array([1, 2, 3, 4]) >>> lm.steady_state matrix([[ 0.28561505], [ 0.14190226], [ 0.40493672], [ 0.16754598]]) >>> lm.transitions array([[ 1.08700000e+03, 4.40000000e+01, 4.00000000e+00, 3.40000000e+01], [ 4.10000000e+01, 4.70000000e+02, 3.60000000e+01, 1.00000000e+00], [ 5.00000000e+00, 3.40000000e+01, 1.42200000e+03, 3.90000000e+01], [ 3.00000000e+01, 1.00000000e+00, 4.00000000e+01, 5.52000000e+02]]) >>> lm.p matrix([[ 0.92985458, 0.03763901, 0.00342173, 0.02908469], [ 0.07481752, 0.85766423, 0.06569343, 0.00182482], [ 0.00333333, 0.02266667, 0.948 , 0.026 ], [ 0.04815409, 0.00160514, 0.06420546, 0.88603531]]) >>> lm.move_types[0,:3] array([11, 11, 11]) >>> lm.move_types[0,-3:] array([11, 11, 11]) Now consider only moves with one, or both, of the LISA end points being significant >>> np.random.seed(10) >>> lm_random = pysal.LISA_Markov(pci, w, permutations=99) >>> lm_random.significant_moves[0, :3] array([11, 11, 11]) >>> lm_random.significant_moves[0,-3:] array([59, 43, 27]) Any value less than 49 indicates at least one of the LISA end points was significant. So for example, the first spatial unit experienced a transition of type 11 (LL, LL) during the first three and last tree intervals (according to lm.move_types), however, the last three of these transitions involved insignificant LISAS in both the start and ending year of each transition. Test whether the moves of y are independent of the moves of wy >>> "Chi2: %8.3f, p: %5.2f, dof: %d" % lm.chi_2 'Chi2: 1058.208, p: 0.00, dof: 9' Actual transitions of LISAs >>> lm.transitions array([[ 1.08700000e+03, 4.40000000e+01, 4.00000000e+00, 3.40000000e+01], [ 4.10000000e+01, 4.70000000e+02, 3.60000000e+01, 1.00000000e+00], [ 5.00000000e+00, 3.40000000e+01, 1.42200000e+03, 3.90000000e+01], [ 3.00000000e+01, 1.00000000e+00, 4.00000000e+01, 5.52000000e+02]]) Expected transitions of LISAs under the null y and wy are moving independently of one another >>> lm.expected_t array([[ 1.12328098e+03, 1.15377356e+01, 3.47522158e-01, 3.38337644e+01], [ 3.50272664e+00, 5.28473882e+02, 1.59178880e+01, 1.05503814e-01], [ 1.53878082e-01, 2.32163556e+01, 1.46690710e+03, 9.72266513e+00], [ 9.60775143e+00, 9.86856346e-02, 6.23537392e+00, 6.07058189e+02]]) If the LISA classes are to be defined according to GeoDa, the `geoda_quad` option has to be set to true >>> lm.q[0:5,0] array([3, 2, 3, 1, 4]) >>> lm = ps.LISA_Markov(pci,w, geoda_quads=True) >>> lm.q[0:5,0] array([2, 3, 2, 1, 4]) """ def __init__(self, y, w, permutations=0, significance_level=0.05, geoda_quads=False): y = y.transpose() pml = pysal.Moran_Local gq = geoda_quads ml = ([pml(yi, w, permutations=permutations, geoda_quads=gq) for yi in y]) q = np.array([mli.q for mli in ml]).transpose() classes = np.arange(1, 5) # no guarantee all 4 quadrants are visited Markov.__init__(self, q, classes) self.q = q self.w = w n, k = q.shape k -= 1 self.significance_level = significance_level move_types = np.zeros((n, k), int) sm = np.zeros((n, k), int) self.significance_level = significance_level if permutations > 0: p = np.array([mli.p_z_sim for mli in ml]).transpose() self.p_values = p pb = p <= significance_level else: pb = np.zeros_like(y.T) for t in range(k): origin = q[:, t] dest = q[:, t + 1] p_origin = pb[:, t] p_dest = pb[:, t + 1] for r in range(n): move_types[r, t] = TT[origin[r], dest[r]] key = (origin[r], dest[r], p_origin[r], p_dest[r]) sm[r, t] = MOVE_TYPES[key] if permutations > 0: self.significant_moves = sm self.move_types = move_types # null of own and lag moves being independent ybar = y.mean(axis=0) r = y / ybar ylag = np.array([pysal.lag_spatial(w, yt) for yt in y]) rlag = ylag / ybar rc = r < 1. rlagc = rlag < 1. markov_y = pysal.Markov(rc) markov_ylag = pysal.Markov(rlagc) A = np.matrix([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0]]) kp = A * np.kron(markov_y.p, markov_ylag.p) * A.T trans = self.transitions.sum(axis=1) t1 = np.diag(trans) * kp t2 = self.transitions t1 = t1.getA() self.chi_2 = pysal.spatial_dynamics.markov.chi2(t2, t1) self.expected_t = t1 self.permutations = permutations def spillover(self, quadrant=1, neighbors_on=False): """ Detect spillover locations for diffusion in LISA Markov. Parameters ---------- quadrant : int which quadrant in the scatterplot should form the core of a cluster. neighbors_on : binary If false, then only the 1st order neighbors of a core location are included in the cluster. If true, neighbors of cluster core 1st order neighbors are included in the cluster. Returns ------- results : dictionary two keys - values pairs: 'components' - array (n, t) values are integer ids (starting at 1) indicating which component/cluster observation i in period t belonged to. 'spillover' - array (n, t-1) binary values indicating if the location was a spill-over location that became a new member of a previously existing cluster. Examples -------- >>> f = pysal.open(pysal.examples.get_path("usjoin.csv")) >>> years = range(1929, 2010) >>> pci = np.array([f.by_col[str(y)] for y in years]).transpose() >>> w = pysal.open(pysal.examples.get_path("states48.gal")).read() >>> np.random.seed(10) >>> lm_random = pysal.LISA_Markov(pci, w, permutations=99) >>> r = lm_random.spillover() >>> r['components'][:,12] array([ 0., 0., 0., 2., 0., 1., 1., 0., 0., 2., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 2., 1., 1., 0., 1., 0., 0., 1., 0., 2., 1., 1., 0., 0., 0., 0., 0., 1., 0., 2., 1., 0., 0.]) >>> r['components'][:,14] array([ 0., 2., 0., 2., 0., 1., 1., 0., 0., 2., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 2., 0., 1., 0., 1., 0., 0., 1., 0., 2., 1., 1., 0., 0., 0., 0., 0., 1., 0., 2., 1., 0., 0.]) >>> r['spill_over'][:,12] array([ 0., 1., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 1.]) Including neighbors of core neighbors >>> rn = lm_random.spillover(neighbors_on=True) >>> rn['components'][:,12] array([ 0., 2., 0., 2., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 0., 1., 1., 1., 1., 0., 0., 0., 2., 0., 2., 1., 1., 0., 1., 0., 0., 1., 0., 2., 1., 1., 0., 0., 0., 0., 2., 1., 1., 2., 1., 0., 2.]) >>> rn["components"][:,13] array([ 0., 2., 0., 2., 2., 1., 1., 0., 0., 2., 0., 1., 0., 2., 1., 0., 1., 1., 1., 1., 0., 0., 0., 2., 2., 2., 1., 1., 2., 1., 0., 2., 1., 2., 2., 1., 1., 0., 2., 0., 2., 2., 1., 1., 2., 1., 0., 2.]) >>> rn["spill_over"][:,12] array([ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 1., 0., 1., 0., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0.]) """ n, k = self.q.shape if self.permutations: spill_over = np.zeros((n, k - 1)) components = np.zeros((n, k)) i2id = {} # handle string keys for key in self.w.neighbors.keys(): idx = self.w.id2i[key] i2id[idx] = key sig_lisas = (self.q == quadrant) \ * (self.p_values <= self.significance_level) sig_ids = [np.nonzero( sig_lisas[:, i])[0].tolist() for i in range(k)] neighbors = self.w.neighbors for t in range(k - 1): s1 = sig_ids[t] s2 = sig_ids[t + 1] g1 = pysal.region.components.Graph(undirected=True) for i in s1: for neighbor in neighbors[i2id[i]]: g1.add_edge(i2id[i], neighbor, 1.0) if neighbors_on: for nn in neighbors[neighbor]: g1.add_edge(neighbor, nn, 1.0) components1 = g1.connected_components(op=gt) components1 = [list(c.nodes) for c in components1] g2 = pysal.region.components.Graph(undirected=True) for i in s2: for neighbor in neighbors[i2id[i]]: g2.add_edge(i2id[i], neighbor, 1.0) if neighbors_on: for nn in neighbors[neighbor]: g2.add_edge(neighbor, nn, 1.0) components2 = g2.connected_components(op=gt) components2 = [list(c.nodes) for c in components2] c2 = [] c1 = [] for c in components2: c2.extend(c) for c in components1: c1.extend(c) new_ids = [j for j in c2 if j not in c1] spill_ids = [] for j in new_ids: # find j's component in period 2 cj = [c for c in components2 if j in c][0] # for members of j's component in period 2, check if they # belonged to any components in period 1 for i in cj: if i in c1: spill_ids.append(j) break for spill_id in spill_ids: id = self.w.id2i[spill_id] spill_over[id, t] = 1 for c, component in enumerate(components1): for i in component: ii = self.w.id2i[i] components[ii, t] = c + 1 results = {} results['components'] = components results['spill_over'] = spill_over return results else: return None def kullback(F): """ Kullback information based test of Markov Homogeneity. Parameters ---------- F : array (s, r, r), values are transitions (not probabilities) for s strata, r initial states, r terminal states. Returns ------- Results : dictionary (key - value) Conditional homogeneity - (float) test statistic for homogeneity of transition probabilities across strata. Conditional homogeneity pvalue - (float) p-value for test statistic. Conditional homogeneity dof - (int) degrees of freedom = r(s-1)(r-1). Notes ----- Based on Kullback, Kupperman and Ku (1962) [Kullback1962]_. Example below is taken from Table 9.2 . Examples -------- >>> s1 = np.array([ ... [ 22, 11, 24, 2, 2, 7], ... [ 5, 23, 15, 3, 42, 6], ... [ 4, 21, 190, 25, 20, 34], ... [0, 2, 14, 56, 14, 28], ... [32, 15, 20, 10, 56, 14], ... [5, 22, 31, 18, 13, 134] ... ]) >>> s2 = np.array([ ... [3, 6, 9, 3, 0, 8], ... [1, 9, 3, 12, 27, 5], ... [2, 9, 208, 32, 5, 18], ... [0, 14, 32, 108, 40, 40], ... [22, 14, 9, 26, 224, 14], ... [1, 5, 13, 53, 13, 116] ... ]) >>> >>> F = np.array([s1, s2]) >>> res = kullback(F) >>> "%8.3f"%res['Conditional homogeneity'] ' 160.961' >>> "%d"%res['Conditional homogeneity dof'] '30' >>> "%3.1f"%res['Conditional homogeneity pvalue'] '0.0' """ F1 = F == 0 F1 = F + F1 FLF = F * np.log(F1) T1 = 2 * FLF.sum() FdJK = F.sum(axis=0) FdJK1 = FdJK + (FdJK == 0) FdJKLFdJK = FdJK * np.log(FdJK1) T2 = 2 * FdJKLFdJK.sum() FdJd = F.sum(axis=0).sum(axis=1) FdJd1 = FdJd + (FdJd == 0) T3 = 2 * (FdJd * np.log(FdJd1)).sum() FIJd = F[:, :].sum(axis=1) FIJd1 = FIJd + (FIJd == 0) T4 = 2 * (FIJd * np.log(FIJd1)).sum() T6 = F.sum() T6 = 2 * T6 * np.log(T6) s, r, r1 = F.shape chom = T1 - T4 - T2 + T3 cdof = r * (s - 1) * (r - 1) results = {} results['Conditional homogeneity'] = chom results['Conditional homogeneity dof'] = cdof results['Conditional homogeneity pvalue'] = 1 - stats.chi2.cdf(chom, cdof) return results def prais(pmat): """ Prais conditional mobility measure. Parameters ---------- pmat : matrix (k, k), Markov probability transition matrix. Returns ------- pr : matrix (1, k), conditional mobility measures for each of the k classes. Notes ----- Prais' conditional mobility measure for a class is defined as: .. math:: pr_i = 1 - p_{i,i} Examples -------- >>> import numpy as np >>> import pysal >>> f = pysal.open(pysal.examples.get_path("usjoin.csv")) >>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]) >>> q5 = np.array([pysal.Quantiles(y).yb for y in pci]).transpose() >>> m = pysal.Markov(q5) >>> m.transitions array([[ 729., 71., 1., 0., 0.], [ 72., 567., 80., 3., 0.], [ 0., 81., 631., 86., 2.], [ 0., 3., 86., 573., 56.], [ 0., 0., 1., 57., 741.]]) >>> m.p matrix([[ 0.91011236, 0.0886392 , 0.00124844, 0. , 0. ], [ 0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ], [ 0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ], [ 0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443], [ 0. , 0. , 0.00125156, 0.07133917, 0.92740926]]) >>> pysal.spatial_dynamics.markov.prais(m.p) matrix([[ 0.08988764, 0.21468144, 0.21125 , 0.20194986, 0.07259074]]) """ pr = (pmat.sum(axis=1) - np.diag(pmat))[0] return pr def shorrock(pmat): """ Shorrock's mobility measure. Parameters ---------- pmat : matrix (k, k), Markov probability transition matrix. Returns ------- sh : float Shorrock mobility measure. Notes ----- Shorock's mobility measure is defined as .. math:: sh = (k - \sum_{j=1}^{k} p_{j,j})/(k - 1) Examples -------- >>> import numpy as np >>> import pysal >>> f = pysal.open(pysal.examples.get_path("usjoin.csv")) >>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]) >>> q5 = np.array([pysal.Quantiles(y).yb for y in pci]).transpose() >>> m = pysal.Markov(q5) >>> m.transitions array([[ 729., 71., 1., 0., 0.], [ 72., 567., 80., 3., 0.], [ 0., 81., 631., 86., 2.], [ 0., 3., 86., 573., 56.], [ 0., 0., 1., 57., 741.]]) >>> m.p matrix([[ 0.91011236, 0.0886392 , 0.00124844, 0. , 0. ], [ 0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ], [ 0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ], [ 0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443], [ 0. , 0. , 0.00125156, 0.07133917, 0.92740926]]) >>> pysal.spatial_dynamics.markov.shorrock(m.p) 0.19758992000997844 """ t = np.trace(pmat) k = pmat.shape[1] sh = (k - t) / (k - 1) return sh def homogeneity(transition_matrices, regime_names=[], class_names=[], title="Markov Homogeneity Test"): """ Test for homogeneity of Markov transition probabilities across regimes. Parameters ---------- transition_matrices : list of transition matrices for regimes, all matrices must have same size (r, c). r is the number of rows in the transition matrix and c is the number of columns in the transition matrix. regime_names : sequence Labels for the regimes. class_names : sequence Labels for the classes/states of the Markov chain. title : string name of test. Returns ------- : implicit an instance of Homogeneity_Results. """ return Homogeneity_Results(transition_matrices, regime_names=regime_names, class_names=class_names, title=title) class Homogeneity_Results: """ Wrapper class to present homogeneity results. Parameters ---------- transition_matrices : list of transition matrices for regimes, all matrices must have same size (r, c). r is the number of rows in the transition matrix and c is the number of columns in the transition matrix. regime_names : sequence Labels for the regimes. class_names : sequence Labels for the classes/states of the Markov chain. title : string Title of the table. Attributes ----------- Notes ----- Degrees of freedom adjustment follow the approach in Bickenbach and Bode (2003) [Bickenbach2003]_. Examples -------- See Spatial_Markov above. """ def __init__(self, transition_matrices, regime_names=[], class_names=[], title="Markov Homogeneity Test"): self._homogeneity(transition_matrices) self.regime_names = regime_names self.class_names = class_names self.title = title def _homogeneity(self, transition_matrices): # form null transition probability matrix M = np.array(transition_matrices) m, r, k = M.shape self.k = k B = np.zeros((r, m)) T = M.sum(axis=0) self.t_total = T.sum() n_i = T.sum(axis=1) A_i = (T > 0).sum(axis=1) A_im = np.zeros((r, m)) p_ij = np.dot(np.diag(1./(n_i + (n_i == 0)*1.)), T) den = p_ij + 1. * (p_ij == 0) b_i = np.zeros_like(A_i) p_ijm = np.zeros_like(M) # get dimensions m, n_rows, n_cols = M.shape m = 0 Q = 0.0 LR = 0.0 lr_table = np.zeros_like(M) q_table = np.zeros_like(M) for nijm in M: nim = nijm.sum(axis=1) B[:, m] = 1.*(nim > 0) b_i = b_i + 1. * (nim > 0) p_ijm[m] = np.dot(np.diag(1./(nim + (nim == 0)*1.)), nijm) num = (p_ijm[m]-p_ij)**2 ratio = num / den qijm = np.dot(np.diag(nim), ratio) q_table[m] = qijm Q = Q + qijm.sum() # only use nonzero pijm in lr test mask = (nijm > 0) * (p_ij > 0) A_im[:, m] = (nijm > 0).sum(axis=1) unmask = 1.0 * (mask == 0) ratio = (mask * p_ijm[m] + unmask) / (mask * p_ij + unmask) lr = nijm * np.log(ratio) LR = LR + lr.sum() lr_table[m] = 2 * lr m += 1 # b_i is the number of regimes that have non-zero observations in row i # A_i is the number of non-zero elements in row i of the aggregated # transition matrix self.dof = int(((b_i-1) * (A_i-1)).sum()) self.Q = Q self.Q_p_value = 1 - stats.chi2.cdf(self.Q, self.dof) self.LR = LR * 2. self.LR_p_value = 1 - stats.chi2.cdf(self.LR, self.dof) self.A = A_i self.A_im = A_im self.B = B self.b_i = b_i self.LR_table = lr_table self.Q_table = q_table self.m = m self.p_h0 = p_ij self.p_h1 = p_ijm def summary(self, file_name=None, title="Markov Homogeneity Test"): regime_names = ["%d" % i for i in range(self.m)] if self.regime_names: regime_names = self.regime_names cols = ["P(%s)" % str(regime) for regime in regime_names] if not self.class_names: self.class_names = range(self.k) max_col = max([len(col) for col in cols]) col_width = max([5, max_col]) # probabilities have 5 chars n_tabs = self.k width = n_tabs * 4 + (self.k+1)*col_width lead = "-" * width head = title.center(width) contents = [lead, head, lead] l = "Number of regimes: %d" % int(self.m) k = "Number of classes: %d" % int(self.k) r = "Regime names: " r += ", ".join(regime_names) t = "Number of transitions: %d" % int(self.t_total) contents.append(k) contents.append(t) contents.append(l) contents.append(r) contents.append(lead) h = "%7s %20s %20s" % ('Test', 'LR', 'Chi-2') contents.append(h) stat = "%7s %20.3f %20.3f" % ('Stat.', self.LR, self.Q) contents.append(stat) stat = "%7s %20d %20d" % ('DOF', self.dof, self.dof) contents.append(stat) stat = "%7s %20.3f %20.3f" % ('p-value', self.LR_p_value, self.Q_p_value) contents.append(stat) print("\n".join(contents)) print(lead) cols = ["P(%s)" % str(regime) for regime in self.regime_names] if not self.class_names: self.class_names = range(self.k) cols.extend(["%s" % str(cname) for cname in self.class_names]) max_col = max([len(col) for col in cols]) col_width = max([5, max_col]) # probabilities have 5 chars p0 = [] line0 = ['{s: <{w}}'.format(s="P(H0)", w=col_width)] line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname in self.class_names])) print(" ".join(line0)) p0.append("&".join(line0)) for i, row in enumerate(self.p_h0): line = ["%*s" % (col_width, str(self.class_names[i]))] line.extend(["%*.3f" % (col_width, v) for v in row]) print(" ".join(line)) p0.append("&".join(line)) pmats = [p0] print(lead) for r, p1 in enumerate(self.p_h1): p0 = [] line0 = ['{s: <{w}}'.format(s="P(%s)" % regime_names[r], w=col_width)] line0.extend((['{s: >{w}}'.format(s=cname, w=col_width) for cname in self.class_names])) print(" ".join(line0)) p0.append("&".join(line0)) for i, row in enumerate(p1): line = ["%*s" % (col_width, str(self.class_names[i]))] line.extend(["%*.3f" % (col_width, v) for v in row]) print(" ".join(line)) p0.append("&".join(line)) pmats.append(p0) print(lead) if file_name: k = self.k ks = str(k+1) with open(file_name, 'w') as f: c = [] fmt = "r"*(k+1) s = "\\begin{tabular}{|%s|}\\hline\n" % fmt s += "\\multicolumn{%s}{|c|}{%s}" % (ks, title) c.append(s) s = "Number of classes: %d" % int(self.k) c.append("\\hline\\multicolumn{%s}{|l|}{%s}" % (ks, s)) s = "Number of transitions: %d" % int(self.t_total) c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s)) s = "Number of regimes: %d" % int(self.m) c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s)) s = "Regime names: " s += ", ".join(regime_names) c.append("\\multicolumn{%s}{|l|}{%s}" % (ks, s)) s = "\\hline\\multicolumn{2}{|l}{%s}" % ("Test") s += "&\\multicolumn{2}{r}{LR}&\\multicolumn{2}{r|}{Q}" c.append(s) s = "Stat." s = "\\multicolumn{2}{|l}{%s}" % (s) s += "&\\multicolumn{2}{r}{%.3f}" % self.LR s += "&\\multicolumn{2}{r|}{%.3f}" % self.Q c.append(s) s = "\\multicolumn{2}{|l}{%s}" % ("DOF") s += "&\\multicolumn{2}{r}{%d}" % int(self.dof) s += "&\\multicolumn{2}{r|}{%d}" % int(self.dof) c.append(s) s = "\\multicolumn{2}{|l}{%s}" % ("p-value") s += "&\\multicolumn{2}{r}{%.3f}" % self.LR_p_value s += "&\\multicolumn{2}{r|}{%.3f}" % self.Q_p_value c.append(s) s1 = "\\\\\n".join(c) s1 += "\\\\\n" c = [] for mat in pmats: c.append("\\hline\n") for row in mat: c.append(row+"\\\\\n") c.append("\\hline\n") c.append("\\end{tabular}") s2 = "".join(c) f.write(s1+s2)
[ "numpy.trace", "numpy.asmatrix", "numpy.log", "pysal.Quantiles", "numpy.array", "numpy.arange", "numpy.multiply", "scipy.stats.chi2.cdf", "numpy.asarray", "pysal.region.components.Graph", "pysal.spatial_dynamics.ergodic.steady_state", "pysal.spatial_dynamics.markov.chi2", "numpy.kron", "nu...
[((631, 652), 'numpy.zeros', 'np.zeros', (['(5, 5)', 'int'], {}), '((5, 5), int)\n', (639, 652), True, 'import numpy as np\n'), ((25202, 25223), 'numpy.multiply', 'np.multiply', (['num', 'num'], {}), '(num, num)\n', (25213, 25223), True, 'import numpy as np\n'), ((48485, 48499), 'numpy.trace', 'np.trace', (['pmat'], {}), '(pmat)\n', (48493, 48499), True, 'import numpy as np\n'), ((4753, 4769), 'numpy.zeros', 'np.zeros', (['(k, k)'], {}), '((k, k))\n', (4761, 4769), True, 'import numpy as np\n'), ((5432, 5444), 'numpy.matrix', 'np.matrix', (['p'], {}), '(p)\n', (5441, 5444), True, 'import numpy as np\n'), ((20134, 20157), 'pysal.lag_spatial', 'pysal.lag_spatial', (['w', 'y'], {}), '(w, y)\n', (20151, 20157), False, 'import pysal\n'), ((20489, 20508), 'numpy.zeros', 'np.zeros', (['(k, k, k)'], {}), '((k, k, k))\n', (20497, 20508), True, 'import numpy as np\n'), ((20705, 20721), 'numpy.zeros_like', 'np.zeros_like', (['T'], {}), '(T)\n', (20718, 20721), True, 'import numpy as np\n'), ((21943, 21955), 'numpy.array', 'np.array', (['p1'], {}), '(p1)\n', (21951, 21955), True, 'import numpy as np\n'), ((22057, 22082), 'numpy.multiply', 'np.multiply', (['(o - e)', '(o - e)'], {}), '(o - e, o - e)\n', (22068, 22082), True, 'import numpy as np\n'), ((25106, 25122), 'numpy.diag', 'np.diag', (['(1 / rs2)'], {}), '(1 / rs2)\n', (25113, 25122), True, 'import numpy as np\n'), ((25125, 25138), 'numpy.matrix', 'np.matrix', (['T2'], {}), '(T2)\n', (25134, 25138), True, 'import numpy as np\n'), ((25147, 25159), 'numpy.diag', 'np.diag', (['rs1'], {}), '(rs1)\n', (25154, 25159), True, 'import numpy as np\n'), ((25162, 25174), 'numpy.matrix', 'np.matrix', (['p'], {}), '(p)\n', (25171, 25174), True, 'import numpy as np\n'), ((25303, 25328), 'scipy.stats.chi2.cdf', 'stats.chi2.cdf', (['chi2', 'dof'], {}), '(chi2, dof)\n', (25317, 25328), False, 'from scipy import stats\n'), ((34571, 34586), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (34580, 34586), True, 'import numpy as np\n'), ((34823, 34844), 'numpy.zeros', 'np.zeros', (['(n, k)', 'int'], {}), '((n, k), int)\n', (34831, 34844), True, 'import numpy as np\n'), ((34858, 34879), 'numpy.zeros', 'np.zeros', (['(n, k)', 'int'], {}), '((n, k), int)\n', (34866, 34879), True, 'import numpy as np\n'), ((35870, 35886), 'pysal.Markov', 'pysal.Markov', (['rc'], {}), '(rc)\n', (35882, 35886), False, 'import pysal\n'), ((35909, 35928), 'pysal.Markov', 'pysal.Markov', (['rlagc'], {}), '(rlagc)\n', (35921, 35928), False, 'import pysal\n'), ((35941, 36008), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0]]'], {}), '([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0]])\n', (35950, 36008), True, 'import numpy as np\n'), ((36289, 36331), 'pysal.spatial_dynamics.markov.chi2', 'pysal.spatial_dynamics.markov.chi2', (['t2', 't1'], {}), '(t2, t1)\n', (36323, 36331), False, 'import pysal\n'), ((44709, 44719), 'numpy.log', 'np.log', (['F1'], {}), '(F1)\n', (44715, 44719), True, 'import numpy as np\n'), ((44823, 44836), 'numpy.log', 'np.log', (['FdJK1'], {}), '(FdJK1)\n', (44829, 44836), True, 'import numpy as np\n'), ((45118, 45128), 'numpy.log', 'np.log', (['T6'], {}), '(T6)\n', (45124, 45128), True, 'import numpy as np\n'), ((45380, 45406), 'scipy.stats.chi2.cdf', 'stats.chi2.cdf', (['chom', 'cdof'], {}), '(chom, cdof)\n', (45394, 45406), False, 'from scipy import stats\n'), ((51000, 51029), 'numpy.array', 'np.array', (['transition_matrices'], {}), '(transition_matrices)\n', (51008, 51029), True, 'import numpy as np\n'), ((51087, 51103), 'numpy.zeros', 'np.zeros', (['(r, m)'], {}), '((r, m))\n', (51095, 51103), True, 'import numpy as np\n'), ((51238, 51254), 'numpy.zeros', 'np.zeros', (['(r, m)'], {}), '((r, m))\n', (51246, 51254), True, 'import numpy as np\n'), ((51367, 51385), 'numpy.zeros_like', 'np.zeros_like', (['A_i'], {}), '(A_i)\n', (51380, 51385), True, 'import numpy as np\n'), ((51402, 51418), 'numpy.zeros_like', 'np.zeros_like', (['M'], {}), '(M)\n', (51415, 51418), True, 'import numpy as np\n'), ((51546, 51562), 'numpy.zeros_like', 'np.zeros_like', (['M'], {}), '(M)\n', (51559, 51562), True, 'import numpy as np\n'), ((51581, 51597), 'numpy.zeros_like', 'np.zeros_like', (['M'], {}), '(M)\n', (51594, 51597), True, 'import numpy as np\n'), ((4580, 4600), 'numpy.unique', 'np.unique', (['class_ids'], {}), '(class_ids)\n', (4589, 4600), True, 'import numpy as np\n'), ((4941, 4959), 'numpy.unique', 'np.unique', (['state_0'], {}), '(state_0)\n', (4950, 4959), True, 'import numpy as np\n'), ((5361, 5400), 'numpy.diag', 'np.diag', (['(1 / (row_sum + (row_sum == 0)))'], {}), '(1 / (row_sum + (row_sum == 0)))\n', (5368, 5400), True, 'import numpy as np\n'), ((5568, 5588), 'pysal.spatial_dynamics.ergodic.steady_state', 'STEADY_STATE', (['self.p'], {}), '(self.p)\n', (5580, 5588), True, 'from pysal.spatial_dynamics.ergodic import steady_state as STEADY_STATE\n'), ((17448, 17475), 'numpy.zeros', 'np.zeros', (['(permutations, 1)'], {}), '((permutations, 1))\n', (17456, 17475), True, 'import numpy as np\n'), ((18004, 18024), 'pysal.spatial_dynamics.ergodic.steady_state', 'STEADY_STATE', (['self.p'], {}), '(self.p)\n', (18016, 18024), True, 'from pysal.spatial_dynamics.ergodic import steady_state as STEADY_STATE\n'), ((18132, 18153), 'numpy.zeros_like', 'np.zeros_like', (['self.p'], {}), '(self.p)\n', (18145, 18153), True, 'import numpy as np\n'), ((18258, 18271), 'numpy.asarray', 'np.asarray', (['S'], {}), '(S)\n', (18268, 18271), True, 'import numpy as np\n'), ((18379, 18400), 'numpy.zeros_like', 'np.zeros_like', (['self.P'], {}), '(self.P)\n', (18392, 18400), True, 'import numpy as np\n'), ((18510, 18523), 'numpy.asarray', 'np.asarray', (['F'], {}), '(F)\n', (18520, 18523), True, 'import numpy as np\n'), ((22149, 22176), 'scipy.stats.chi2.cdf', 'stats.chi2.cdf', (['chi2', '(k - 1)'], {}), '(chi2, k - 1)\n', (22163, 22176), False, 'from scipy import stats\n'), ((35130, 35148), 'numpy.zeros_like', 'np.zeros_like', (['y.T'], {}), '(y.T)\n', (35143, 35148), True, 'import numpy as np\n'), ((36195, 36209), 'numpy.diag', 'np.diag', (['trans'], {}), '(trans)\n', (36202, 36209), True, 'import numpy as np\n'), ((40225, 40245), 'numpy.zeros', 'np.zeros', (['(n, k - 1)'], {}), '((n, k - 1))\n', (40233, 40245), True, 'import numpy as np\n'), ((40271, 40287), 'numpy.zeros', 'np.zeros', (['(n, k)'], {}), '((n, k))\n', (40279, 40287), True, 'import numpy as np\n'), ((46999, 47012), 'numpy.diag', 'np.diag', (['pmat'], {}), '(pmat)\n', (47006, 47012), True, 'import numpy as np\n'), ((51277, 51316), 'numpy.diag', 'np.diag', (['(1.0 / (n_i + (n_i == 0) * 1.0))'], {}), '(1.0 / (n_i + (n_i == 0) * 1.0))\n', (51284, 51316), True, 'import numpy as np\n'), ((52629, 52661), 'scipy.stats.chi2.cdf', 'stats.chi2.cdf', (['self.Q', 'self.dof'], {}), '(self.Q, self.dof)\n', (52643, 52661), False, 'from scipy import stats\n'), ((52718, 52751), 'scipy.stats.chi2.cdf', 'stats.chi2.cdf', (['self.LR', 'self.dof'], {}), '(self.LR, self.dof)\n', (52732, 52751), False, 'from scipy import stats\n'), ((5063, 5080), 'numpy.unique', 'np.unique', (['ending'], {}), '(ending)\n', (5072, 5080), True, 'import numpy as np\n'), ((16875, 16899), 'pysal.Quantiles', 'pysal.Quantiles', (['yf'], {'k': 'k'}), '(yf, k=k)\n', (16890, 16899), False, 'import pysal\n'), ((18220, 18235), 'pysal.spatial_dynamics.ergodic.steady_state', 'STEADY_STATE', (['p'], {}), '(p)\n', (18232, 18235), True, 'from pysal.spatial_dynamics.ergodic import steady_state as STEADY_STATE\n'), ((19835, 19871), 'scipy.stats.chi2.cdf', 'stats.chi2.cdf', (['self.x2', 'self.x2_dof'], {}), '(self.x2, self.x2_dof)\n', (19849, 19871), False, 'from scipy import stats\n'), ((34509, 34540), 'numpy.array', 'np.array', (['[mli.q for mli in ml]'], {}), '([mli.q for mli in ml])\n', (34517, 34540), True, 'import numpy as np\n'), ((35739, 35763), 'pysal.lag_spatial', 'pysal.lag_spatial', (['w', 'yt'], {}), '(w, yt)\n', (35756, 35763), False, 'import pysal\n'), ((36096, 36130), 'numpy.kron', 'np.kron', (['markov_y.p', 'markov_ylag.p'], {}), '(markov_y.p, markov_ylag.p)\n', (36103, 36130), True, 'import numpy as np\n'), ((40824, 40870), 'pysal.region.components.Graph', 'pysal.region.components.Graph', ([], {'undirected': '(True)'}), '(undirected=True)\n', (40853, 40870), False, 'import pysal\n'), ((41328, 41374), 'pysal.region.components.Graph', 'pysal.region.components.Graph', ([], {'undirected': '(True)'}), '(undirected=True)\n', (41357, 41374), False, 'import pysal\n'), ((51761, 51800), 'numpy.diag', 'np.diag', (['(1.0 / (nim + (nim == 0) * 1.0))'], {}), '(1.0 / (nim + (nim == 0) * 1.0))\n', (51768, 51800), True, 'import numpy as np\n'), ((51895, 51907), 'numpy.diag', 'np.diag', (['nim'], {}), '(nim)\n', (51902, 51907), True, 'import numpy as np\n'), ((52250, 52263), 'numpy.log', 'np.log', (['ratio'], {}), '(ratio)\n', (52256, 52263), True, 'import numpy as np\n'), ((18472, 18486), 'numpy.asmatrix', 'np.asmatrix', (['p'], {}), '(p)\n', (18483, 18486), True, 'import numpy as np\n'), ((20871, 20893), 'numpy.diag', 'np.diag', (['(1.0 / row_sum)'], {}), '(1.0 / row_sum)\n', (20878, 20893), True, 'import numpy as np\n'), ((20895, 20909), 'numpy.matrix', 'np.matrix', (['mat'], {}), '(mat)\n', (20904, 20909), True, 'import numpy as np\n'), ((34978, 35015), 'numpy.array', 'np.array', (['[mli.p_z_sim for mli in ml]'], {}), '([mli.p_z_sim for mli in ml])\n', (34986, 35015), True, 'import numpy as np\n'), ((44956, 44969), 'numpy.log', 'np.log', (['FdJd1'], {}), '(FdJd1)\n', (44962, 44969), True, 'import numpy as np\n'), ((45061, 45074), 'numpy.log', 'np.log', (['FIJd1'], {}), '(FIJd1)\n', (45067, 45074), True, 'import numpy as np\n'), ((20348, 20378), 'pysal.Quantiles', 'pysal.Quantiles', (['ly[:, i]'], {'k': 'k'}), '(ly[:, i], k=k)\n', (20363, 20378), False, 'import pysal\n'), ((20408, 20428), 'numpy.arange', 'np.arange', (['self.cols'], {}), '(self.cols)\n', (20417, 20428), True, 'import numpy as np\n'), ((40582, 40609), 'numpy.nonzero', 'np.nonzero', (['sig_lisas[:, i]'], {}), '(sig_lisas[:, i])\n', (40592, 40609), True, 'import numpy as np\n'), ((17005, 17034), 'pysal.Quantiles', 'pysal.Quantiles', (['y[:, i]'], {'k': 'k'}), '(y[:, i], k=k)\n', (17020, 17034), False, 'import pysal\n'), ((17075, 17090), 'numpy.arange', 'np.arange', (['cols'], {}), '(cols)\n', (17084, 17090), True, 'import numpy as np\n')]
import numpy as np x = np.linspace(0, 2*np.pi, 100) y = np.sin(x)
[ "numpy.sin", "numpy.linspace" ]
[((24, 54), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (35, 54), True, 'import numpy as np\n'), ((57, 66), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (63, 66), True, 'import numpy as np\n')]
import math import os import re import numpy as np from BluPrintTriboSys import TriboSys from Constants import PltOpts, SubDir, TexTempl, UnitTex, Unit, PrintOpts, \ PreSol from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, \ plt_2d_scatt_line, \ plt_energy_ring_on_ring, plt_profile_approx from generate_latex_output import get_calc_specific_latex_template from hertz_equations import hertz_displ from influ_matrix_management import load_influ_mat, cache_influ_mat from solve_half_space import solve_half_space, pre_solve_half_space from system_functions import print_it, to_preci, exit_program, save_to_matlab class RingOnRing(TriboSys): """Global tribosystem data""" def __init__(self, num_planets, sun, planet, global_force, setup_name='ring-on-ring'): super().__init__(setup_name, global_force, None) self.num_planets = num_planets self.sun = sun self.planet = planet self.sun.norm_forces = None self.init_force = None self.planet_slip = None self.rot_velocity = None self.rot_velocity2 = None self.sun_rot_vel = None self.planet_rot_vel = None self.sliding_vel = None self.sun.press = None self.sun.max_press = None self.sun.rot_vel = None self.planet.rot_vel = None self.sun.omega = None self.planet.omega = None self.sun.vel = None self.planet.vel = None self.slip = None self.rel_vel = None self.pv = None self.influ_mat_db_1 = None self.press_zone_len = None def calc_load_distribution(self, ui=None, res_dir=None): """Calculate load distribution""" print_it("calculating load distribution") self.sun.norm_forces = np.multiply(np.ones(self.num_planets), self.global_force / self.num_planets) self.init_force = self.global_force def get_grid_size(self, ui, res_dir): """Determine grid size by running (quick) simulation with simplified contact bodies""" print_it('determining grid size', PrintOpts.lvl1.value) self.sun.simple_clone() self.sun.clone.make_profile(PreSol.res_x.value, PreSol.res_y.value, self.init_force) self.planet.simple_clone() self.planet.clone.make_slave_to(self.sun.clone) init_displ = hertz_displ(self.sun.clone.e, self.planet.e, self.sun.clone.ny, self.planet.ny, self.sun.clone.r_hertz_x, self.sun.clone.r_hertz_y, self.planet.clone.r_hertz_x, self.planet.clone.r_hertz_y, self.sun.norm_forces[0]) too_many_els_in_y = 1 too_many_els_in_x = 1 contact_width_y = 0.05 contact_width_x = 0.05 while too_many_els_in_y != 0 or \ too_many_els_in_x != 0: self.sun.clone.make_profile(self.sun.clone.res_x, self.sun.clone.res_y, self.init_force, contact_width=contact_width_y, contact_length=contact_width_x) self.planet.clone.make_slave_to(self.sun.clone) pressure, init_displ = \ pre_solve_half_space(self.sun.clone.profile, self.planet.clone.profile, self.sun.clone.x_axis, self.sun.clone.y_axis, self.sun.clone.res_x, self.sun.clone.res_y, self.sun.clone.delta_x, self.sun.clone.delta_y, self.sun.clone.e, self.planet.clone.e, self.sun.clone.ny, self.planet.clone.ny, self.sun.norm_forces[0], init_displ=init_displ, print_prog=False) pressure_els_y = sum( pressure[math.floor(self.sun.clone.res_y / 2), :] > 0) too_many_els_in_y = self.sun.clone.res_y - pressure_els_y - 2 if too_many_els_in_y: contact_width_y += -np.sign( too_many_els_in_y) * contact_width_y / 25 pressure_els_x = sum( pressure[:, math.floor(self.sun.clone.res_x / 2)] > 0) too_many_els_in_x = self.sun.clone.res_x - pressure_els_x - 2 if too_many_els_in_x: contact_width_x += -np.sign( too_many_els_in_x) * contact_width_x / 25 self.sun.make_profile(self.sun.res_x, self.sun.res_y, self.init_force, contact_width=contact_width_y, contact_length=contact_width_x) self.planet.make_slave_to(self.sun) return init_displ def calc_contact_pressure(self, ui=None, res_dir=None): """Calculate contact pressure distribution between sun and planet ring(s)""" print_it('calculating 1 pressure distribution') init_displ = self.get_grid_size(ui, res_dir) [self.influ_mat_db_1] = load_influ_mat(ui, res_dir, 1) print_it('solving first half space', PrintOpts.lvl1.value) self.sun.press, self.influ_mat_db_1 = \ solve_half_space(self.sun.profile, self.planet.profile, self.sun.x_axis, self.sun.y_axis, self.sun.res_x, self.sun.res_y, self.sun.delta_x, self.sun.delta_y, self.sun.e, self.planet.e, self.sun.ny, self.planet.ny, self.sun.norm_forces[0], res_dir, init_displ=init_displ, influ_mat_db=self.influ_mat_db_1) cache_influ_mat(ui, [self.influ_mat_db_1], res_dir) self.sun.max_press = np.amax(self.sun.press, axis=1) dat_dict = dict(x_axis=self.sun.x_axis, y_axis=self.sun.y_axis, contact_pressure=self.sun.press) save_to_matlab(dat_dict, res_dir, 'pressure_field') def calc_kinematics(self, rot_vel1, rot_vel2, ui=None, res_dir=None): """Calculate tribosystem kinematics based on rotational velocities of sun and planet(s)""" print_it("calculating kinematics") self.sun.rot_vel = rot_vel1 self.planet.rot_vel = rot_vel2 self.sun.omega = self.sun.rot_vel / 60 self.planet.omega = self.planet.rot_vel / 60 self.sun.vel = self.sun.diameter * math.pi * self.sun.omega self.planet.vel = self.planet.diameter * math.pi * self.planet.omega self.slip = (self.sun.vel - self.planet.vel) / self.sun.vel self.rel_vel = np.ones(self.sun.res_x) * ( self.sun.vel - self.planet.vel) self.sun.footpr_vel = \ 2 * math.pi * self.sun.diameter / 2 * self.sun.omega self.planet.footpr_vel = \ 2 * math.pi * self.planet.diameter / 2 * self.planet.omega try: self.sun.overroll_t_incr = self.sun.delta_y / self.sun.footpr_vel self.planet.overroll_t_incr = \ self.planet.delta_y / self.planet.footpr_vel except ZeroDivisionError: exit_program( 'rotational velocities of sun and planet must not be 0') self.press_zone_len = (self.sun.press > 0).sum(1) * self.sun.delta_y self.sun.overroll_t = np.divide(self.press_zone_len, self.sun.footpr_vel) self.planet.overroll_t = np.divide(self.press_zone_len, self.planet.footpr_vel) self.sun.no_overroll_t = np.divide( (2 * math.pi * (self.sun.diameter / 2) - self.num_planets * self.press_zone_len) / self.num_planets, self.sun.footpr_vel) self.planet.no_overroll_t = np.divide( (2 * math.pi * (self.planet.diameter / 2) - self.press_zone_len), self.planet.footpr_vel) def calc_pv(self, ui=None, res_dir=None): """Calculate product of local maximum pressure and local maximum relative velocity""" print_it("calculating pv_rel") self.pv = np.multiply(abs(self.rel_vel), self.sun.max_press) / 1000 dat_dict = dict(x_axis=self.sun.x_axis, pv_rel=self.pv) save_to_matlab(dat_dict, res_dir, 'pv-rel') def calc_e_akin(self, ui=None, res_dir=None): """"Calculate the kinetic friction energy accumulation in W per m^2""" print_it("calculating e_a,kin") pv_local = np.multiply(self.sun.press.sum(1), self.rel_vel) self.sun.e_akin = np.absolute( np.divide(np.multiply(pv_local, self.sun.overroll_t_incr), self.sun.no_overroll_t)) / 1000 self.planet.e_akin = np.absolute( np.divide(np.multiply(pv_local, self.planet.overroll_t_incr), self.planet.no_overroll_t)) / 1000 def plot_it(self, ui=None, res_dir=None): """Orchestrate output plot generation""" print_it("plotting results") plt_profile(self.sun, PltOpts.DD.value, res_dir, SubDir.profiles.value) plt_profile(self.sun, PltOpts.DDD.value, res_dir, SubDir.profiles.value) plt_profile(self.planet, PltOpts.DD.value, res_dir, SubDir.profiles.value) plt_profile(self.planet, PltOpts.DDD.value, res_dir, SubDir.profiles.value) plt_profile_approx(res_dir, SubDir.profiles.value) plt_contact(self.sun, self.planet, PltOpts.DD.value, res_dir, SubDir.contacts.value) plt_contact(self.sun, self.planet, PltOpts.DDD.value, res_dir, SubDir.contacts.value) plt_3d(self.sun.x_axis, self.sun.y_axis, self.sun.press, self.sun.x_label, self.sun.y_label, 'pressure in MPa', 'contact_pressure_sun', res_dir, SubDir.pressures.value, 'contact_pressure_sun') plt_2d_scatt_line(self.sun.x_axis, self.pv, self.sun.x_axis, self.pv, self.sun.x_label, 'pv_rel in {}'.format(Unit.pvrel.value), 'pv_rel', res_dir, SubDir.energy.value, 'pv_rel') plt_2d_scatt_line(self.sun.x_axis, self.sun.e_akin, self.sun.x_axis, self.sun.e_akin, self.sun.x_label, 'e_akin in {}'.format(Unit.eakin.value), 'e_akin', res_dir, SubDir.energy.value, 'sun.e_akin') plt_2d_scatt_line(self.planet.x_axis, self.planet.e_akin, self.planet.x_axis, self.planet.e_akin, self.planet.x_label, 'e_akin in {}'.format(Unit.eakin.value), 'e_akin', res_dir, SubDir.energy.value, 'planet.e_akin') plt_energy_ring_on_ring(self, res_dir, SubDir.energy.value, 'e-akin-vs-pv-rel') def generate_latex_output(self, calc_spec_tex_file_handle, sim, ui=None, res_dir=None): """Generate calculation-specific part of the LaTeX output file""" average_pressure = np.mean(self.sun.press[self.sun.press > 0]) numeric_output_data = [ ('pressure, max.', to_preci(np.amax(self.sun.press), 4), UnitTex.pressure.value, 'unverified'), ('pressure, av.', to_preci(average_pressure, 4), UnitTex.pressure.value, 'unverified'), ('e_a,kin sun, max.', to_preci(np.amax(self.sun.e_akin), 4), UnitTex.eakin.value, 'unverified'), ('e_a,kin planet, max.', to_preci(np.amax(self.planet.e_akin), 4), UnitTex.eakin.value, 'unverified'), ('pv_rel, max.', to_preci(np.amax(self.pv), 4), UnitTex.pvrel.value, 'unverified'), ('contact area', to_preci(self.sun.get_area(self.sun.press), 4), UnitTex.area.value, 'unverified')] table_calc_summary = [] for key, value, unit, status in sorted(numeric_output_data): table_calc_summary.append( (re.sub('_', '\_', key), value, unit, status)) latex_variables = {'table_calc_summary': table_calc_summary, 'contact_plot1': '{}{}contact1.png'.format( SubDir.tex_figs_rel_to_tex_file.value, '/'), 'pressure_plot1': '{}{}pressure1.png'.format( SubDir.tex_figs_rel_to_tex_file.value, '/'), 'energy_plot1': '{}{}energy1.png'.format( SubDir.tex_figs_rel_to_tex_file.value, '/')} template_calc_specific = get_calc_specific_latex_template( TexTempl.RingOnRing.value, sim) with open(calc_spec_tex_file_handle, 'w') as f: f.write(template_calc_specific.render(latex_variables)) def generate_latex_figures(self, ui=None, res_dir=None): """Generate calculation-specific figures for LaTeX report""" plt_contact(self.sun, self.planet, PltOpts.DDD.value, res_dir, SubDir.tex_figs.value, 'contact1') plt_profile_approx(res_dir, SubDir.tex_figs.value) plt_3d(self.sun.x_axis, self.sun.y_axis, self.sun.press, self.sun.x_label, self.sun.y_label, 'pressure in MPa', 'contact_pressure_sun', res_dir, SubDir.tex_figs.value, 'pressure1') plt_energy_ring_on_ring(self, res_dir, SubDir.tex_figs.value, 'energy1')
[ "math.floor", "generate_latex_output.get_calc_specific_latex_template", "cartesian_plot_functions.plt_energy_ring_on_ring", "system_functions.exit_program", "system_functions.print_it", "system_functions.to_preci", "influ_matrix_management.load_influ_mat", "hertz_equations.hertz_displ", "numpy.divid...
[((1803, 1844), 'system_functions.print_it', 'print_it', (['"""calculating load distribution"""'], {}), "('calculating load distribution')\n", (1811, 1844), False, 'from system_functions import print_it, to_preci, exit_program, save_to_matlab\n'), ((2202, 2257), 'system_functions.print_it', 'print_it', (['"""determining grid size"""', 'PrintOpts.lvl1.value'], {}), "('determining grid size', PrintOpts.lvl1.value)\n", (2210, 2257), False, 'from system_functions import print_it, to_preci, exit_program, save_to_matlab\n'), ((2539, 2763), 'hertz_equations.hertz_displ', 'hertz_displ', (['self.sun.clone.e', 'self.planet.e', 'self.sun.clone.ny', 'self.planet.ny', 'self.sun.clone.r_hertz_x', 'self.sun.clone.r_hertz_y', 'self.planet.clone.r_hertz_x', 'self.planet.clone.r_hertz_y', 'self.sun.norm_forces[0]'], {}), '(self.sun.clone.e, self.planet.e, self.sun.clone.ny, self.planet\n .ny, self.sun.clone.r_hertz_x, self.sun.clone.r_hertz_y, self.planet.\n clone.r_hertz_x, self.planet.clone.r_hertz_y, self.sun.norm_forces[0])\n', (2550, 2763), False, 'from hertz_equations import hertz_displ\n'), ((5410, 5457), 'system_functions.print_it', 'print_it', (['"""calculating 1 pressure distribution"""'], {}), "('calculating 1 pressure distribution')\n", (5418, 5457), False, 'from system_functions import print_it, to_preci, exit_program, save_to_matlab\n'), ((5545, 5575), 'influ_matrix_management.load_influ_mat', 'load_influ_mat', (['ui', 'res_dir', '(1)'], {}), '(ui, res_dir, 1)\n', (5559, 5575), False, 'from influ_matrix_management import load_influ_mat, cache_influ_mat\n'), ((5585, 5643), 'system_functions.print_it', 'print_it', (['"""solving first half space"""', 'PrintOpts.lvl1.value'], {}), "('solving first half space', PrintOpts.lvl1.value)\n", (5593, 5643), False, 'from system_functions import print_it, to_preci, exit_program, save_to_matlab\n'), ((5706, 6028), 'solve_half_space.solve_half_space', 'solve_half_space', (['self.sun.profile', 'self.planet.profile', 'self.sun.x_axis', 'self.sun.y_axis', 'self.sun.res_x', 'self.sun.res_y', 'self.sun.delta_x', 'self.sun.delta_y', 'self.sun.e', 'self.planet.e', 'self.sun.ny', 'self.planet.ny', 'self.sun.norm_forces[0]', 'res_dir'], {'init_displ': 'init_displ', 'influ_mat_db': 'self.influ_mat_db_1'}), '(self.sun.profile, self.planet.profile, self.sun.x_axis,\n self.sun.y_axis, self.sun.res_x, self.sun.res_y, self.sun.delta_x, self\n .sun.delta_y, self.sun.e, self.planet.e, self.sun.ny, self.planet.ny,\n self.sun.norm_forces[0], res_dir, init_displ=init_displ, influ_mat_db=\n self.influ_mat_db_1)\n', (5722, 6028), False, 'from solve_half_space import solve_half_space, pre_solve_half_space\n'), ((6232, 6283), 'influ_matrix_management.cache_influ_mat', 'cache_influ_mat', (['ui', '[self.influ_mat_db_1]', 'res_dir'], {}), '(ui, [self.influ_mat_db_1], res_dir)\n', (6247, 6283), False, 'from influ_matrix_management import load_influ_mat, cache_influ_mat\n'), ((6314, 6345), 'numpy.amax', 'np.amax', (['self.sun.press'], {'axis': '(1)'}), '(self.sun.press, axis=1)\n', (6321, 6345), True, 'import numpy as np\n'), ((6513, 6564), 'system_functions.save_to_matlab', 'save_to_matlab', (['dat_dict', 'res_dir', '"""pressure_field"""'], {}), "(dat_dict, res_dir, 'pressure_field')\n", (6527, 6564), False, 'from system_functions import print_it, to_preci, exit_program, save_to_matlab\n'), ((6760, 6794), 'system_functions.print_it', 'print_it', (['"""calculating kinematics"""'], {}), "('calculating kinematics')\n", (6768, 6794), False, 'from system_functions import print_it, to_preci, exit_program, save_to_matlab\n'), ((7939, 7990), 'numpy.divide', 'np.divide', (['self.press_zone_len', 'self.sun.footpr_vel'], {}), '(self.press_zone_len, self.sun.footpr_vel)\n', (7948, 7990), True, 'import numpy as np\n'), ((8066, 8120), 'numpy.divide', 'np.divide', (['self.press_zone_len', 'self.planet.footpr_vel'], {}), '(self.press_zone_len, self.planet.footpr_vel)\n', (8075, 8120), True, 'import numpy as np\n'), ((8199, 8335), 'numpy.divide', 'np.divide', (['((2 * math.pi * (self.sun.diameter / 2) - self.num_planets * self.\n press_zone_len) / self.num_planets)', 'self.sun.footpr_vel'], {}), '((2 * math.pi * (self.sun.diameter / 2) - self.num_planets * self.\n press_zone_len) / self.num_planets, self.sun.footpr_vel)\n', (8208, 8335), True, 'import numpy as np\n'), ((8396, 8497), 'numpy.divide', 'np.divide', (['(2 * math.pi * (self.planet.diameter / 2) - self.press_zone_len)', 'self.planet.footpr_vel'], {}), '(2 * math.pi * (self.planet.diameter / 2) - self.press_zone_len,\n self.planet.footpr_vel)\n', (8405, 8497), True, 'import numpy as np\n'), ((8685, 8715), 'system_functions.print_it', 'print_it', (['"""calculating pv_rel"""'], {}), "('calculating pv_rel')\n", (8693, 8715), False, 'from system_functions import print_it, to_preci, exit_program, save_to_matlab\n'), ((8892, 8935), 'system_functions.save_to_matlab', 'save_to_matlab', (['dat_dict', 'res_dir', '"""pv-rel"""'], {}), "(dat_dict, res_dir, 'pv-rel')\n", (8906, 8935), False, 'from system_functions import print_it, to_preci, exit_program, save_to_matlab\n'), ((9078, 9109), 'system_functions.print_it', 'print_it', (['"""calculating e_a,kin"""'], {}), "('calculating e_a,kin')\n", (9086, 9109), False, 'from system_functions import print_it, to_preci, exit_program, save_to_matlab\n'), ((9630, 9658), 'system_functions.print_it', 'print_it', (['"""plotting results"""'], {}), "('plotting results')\n", (9638, 9658), False, 'from system_functions import print_it, to_preci, exit_program, save_to_matlab\n'), ((9668, 9739), 'cartesian_plot_functions.plt_profile', 'plt_profile', (['self.sun', 'PltOpts.DD.value', 'res_dir', 'SubDir.profiles.value'], {}), '(self.sun, PltOpts.DD.value, res_dir, SubDir.profiles.value)\n', (9679, 9739), False, 'from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, plt_2d_scatt_line, plt_energy_ring_on_ring, plt_profile_approx\n'), ((9749, 9821), 'cartesian_plot_functions.plt_profile', 'plt_profile', (['self.sun', 'PltOpts.DDD.value', 'res_dir', 'SubDir.profiles.value'], {}), '(self.sun, PltOpts.DDD.value, res_dir, SubDir.profiles.value)\n', (9760, 9821), False, 'from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, plt_2d_scatt_line, plt_energy_ring_on_ring, plt_profile_approx\n'), ((9831, 9905), 'cartesian_plot_functions.plt_profile', 'plt_profile', (['self.planet', 'PltOpts.DD.value', 'res_dir', 'SubDir.profiles.value'], {}), '(self.planet, PltOpts.DD.value, res_dir, SubDir.profiles.value)\n', (9842, 9905), False, 'from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, plt_2d_scatt_line, plt_energy_ring_on_ring, plt_profile_approx\n'), ((9936, 10011), 'cartesian_plot_functions.plt_profile', 'plt_profile', (['self.planet', 'PltOpts.DDD.value', 'res_dir', 'SubDir.profiles.value'], {}), '(self.planet, PltOpts.DDD.value, res_dir, SubDir.profiles.value)\n', (9947, 10011), False, 'from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, plt_2d_scatt_line, plt_energy_ring_on_ring, plt_profile_approx\n'), ((10042, 10092), 'cartesian_plot_functions.plt_profile_approx', 'plt_profile_approx', (['res_dir', 'SubDir.profiles.value'], {}), '(res_dir, SubDir.profiles.value)\n', (10060, 10092), False, 'from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, plt_2d_scatt_line, plt_energy_ring_on_ring, plt_profile_approx\n'), ((10102, 10191), 'cartesian_plot_functions.plt_contact', 'plt_contact', (['self.sun', 'self.planet', 'PltOpts.DD.value', 'res_dir', 'SubDir.contacts.value'], {}), '(self.sun, self.planet, PltOpts.DD.value, res_dir, SubDir.\n contacts.value)\n', (10113, 10191), False, 'from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, plt_2d_scatt_line, plt_energy_ring_on_ring, plt_profile_approx\n'), ((10217, 10307), 'cartesian_plot_functions.plt_contact', 'plt_contact', (['self.sun', 'self.planet', 'PltOpts.DDD.value', 'res_dir', 'SubDir.contacts.value'], {}), '(self.sun, self.planet, PltOpts.DDD.value, res_dir, SubDir.\n contacts.value)\n', (10228, 10307), False, 'from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, plt_2d_scatt_line, plt_energy_ring_on_ring, plt_profile_approx\n'), ((10335, 10535), 'cartesian_plot_functions.plt_3d', 'plt_3d', (['self.sun.x_axis', 'self.sun.y_axis', 'self.sun.press', 'self.sun.x_label', 'self.sun.y_label', '"""pressure in MPa"""', '"""contact_pressure_sun"""', 'res_dir', 'SubDir.pressures.value', '"""contact_pressure_sun"""'], {}), "(self.sun.x_axis, self.sun.y_axis, self.sun.press, self.sun.x_label,\n self.sun.y_label, 'pressure in MPa', 'contact_pressure_sun', res_dir,\n SubDir.pressures.value, 'contact_pressure_sun')\n", (10341, 10535), False, 'from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, plt_2d_scatt_line, plt_energy_ring_on_ring, plt_profile_approx\n'), ((11504, 11583), 'cartesian_plot_functions.plt_energy_ring_on_ring', 'plt_energy_ring_on_ring', (['self', 'res_dir', 'SubDir.energy.value', '"""e-akin-vs-pv-rel"""'], {}), "(self, res_dir, SubDir.energy.value, 'e-akin-vs-pv-rel')\n", (11527, 11583), False, 'from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, plt_2d_scatt_line, plt_energy_ring_on_ring, plt_profile_approx\n'), ((11846, 11889), 'numpy.mean', 'np.mean', (['self.sun.press[self.sun.press > 0]'], {}), '(self.sun.press[self.sun.press > 0])\n', (11853, 11889), True, 'import numpy as np\n'), ((13413, 13477), 'generate_latex_output.get_calc_specific_latex_template', 'get_calc_specific_latex_template', (['TexTempl.RingOnRing.value', 'sim'], {}), '(TexTempl.RingOnRing.value, sim)\n', (13445, 13477), False, 'from generate_latex_output import get_calc_specific_latex_template\n'), ((13761, 13863), 'cartesian_plot_functions.plt_contact', 'plt_contact', (['self.sun', 'self.planet', 'PltOpts.DDD.value', 'res_dir', 'SubDir.tex_figs.value', '"""contact1"""'], {}), "(self.sun, self.planet, PltOpts.DDD.value, res_dir, SubDir.\n tex_figs.value, 'contact1')\n", (13772, 13863), False, 'from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, plt_2d_scatt_line, plt_energy_ring_on_ring, plt_profile_approx\n'), ((13889, 13939), 'cartesian_plot_functions.plt_profile_approx', 'plt_profile_approx', (['res_dir', 'SubDir.tex_figs.value'], {}), '(res_dir, SubDir.tex_figs.value)\n', (13907, 13939), False, 'from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, plt_2d_scatt_line, plt_energy_ring_on_ring, plt_profile_approx\n'), ((13949, 14137), 'cartesian_plot_functions.plt_3d', 'plt_3d', (['self.sun.x_axis', 'self.sun.y_axis', 'self.sun.press', 'self.sun.x_label', 'self.sun.y_label', '"""pressure in MPa"""', '"""contact_pressure_sun"""', 'res_dir', 'SubDir.tex_figs.value', '"""pressure1"""'], {}), "(self.sun.x_axis, self.sun.y_axis, self.sun.press, self.sun.x_label,\n self.sun.y_label, 'pressure in MPa', 'contact_pressure_sun', res_dir,\n SubDir.tex_figs.value, 'pressure1')\n", (13955, 14137), False, 'from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, plt_2d_scatt_line, plt_energy_ring_on_ring, plt_profile_approx\n'), ((14187, 14259), 'cartesian_plot_functions.plt_energy_ring_on_ring', 'plt_energy_ring_on_ring', (['self', 'res_dir', 'SubDir.tex_figs.value', '"""energy1"""'], {}), "(self, res_dir, SubDir.tex_figs.value, 'energy1')\n", (14210, 14259), False, 'from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, plt_2d_scatt_line, plt_energy_ring_on_ring, plt_profile_approx\n'), ((1889, 1914), 'numpy.ones', 'np.ones', (['self.num_planets'], {}), '(self.num_planets)\n', (1896, 1914), True, 'import numpy as np\n'), ((3572, 3944), 'solve_half_space.pre_solve_half_space', 'pre_solve_half_space', (['self.sun.clone.profile', 'self.planet.clone.profile', 'self.sun.clone.x_axis', 'self.sun.clone.y_axis', 'self.sun.clone.res_x', 'self.sun.clone.res_y', 'self.sun.clone.delta_x', 'self.sun.clone.delta_y', 'self.sun.clone.e', 'self.planet.clone.e', 'self.sun.clone.ny', 'self.planet.clone.ny', 'self.sun.norm_forces[0]'], {'init_displ': 'init_displ', 'print_prog': '(False)'}), '(self.sun.clone.profile, self.planet.clone.profile,\n self.sun.clone.x_axis, self.sun.clone.y_axis, self.sun.clone.res_x,\n self.sun.clone.res_y, self.sun.clone.delta_x, self.sun.clone.delta_y,\n self.sun.clone.e, self.planet.clone.e, self.sun.clone.ny, self.planet.\n clone.ny, self.sun.norm_forces[0], init_displ=init_displ, print_prog=False)\n', (3592, 3944), False, 'from solve_half_space import solve_half_space, pre_solve_half_space\n'), ((7214, 7237), 'numpy.ones', 'np.ones', (['self.sun.res_x'], {}), '(self.sun.res_x)\n', (7221, 7237), True, 'import numpy as np\n'), ((7740, 7809), 'system_functions.exit_program', 'exit_program', (['"""rotational velocities of sun and planet must not be 0"""'], {}), "('rotational velocities of sun and planet must not be 0')\n", (7752, 7809), False, 'from system_functions import print_it, to_preci, exit_program, save_to_matlab\n'), ((12077, 12106), 'system_functions.to_preci', 'to_preci', (['average_pressure', '(4)'], {}), '(average_pressure, 4)\n', (12085, 12106), False, 'from system_functions import print_it, to_preci, exit_program, save_to_matlab\n'), ((9242, 9289), 'numpy.multiply', 'np.multiply', (['pv_local', 'self.sun.overroll_t_incr'], {}), '(pv_local, self.sun.overroll_t_incr)\n', (9253, 9289), True, 'import numpy as np\n'), ((9412, 9462), 'numpy.multiply', 'np.multiply', (['pv_local', 'self.planet.overroll_t_incr'], {}), '(pv_local, self.planet.overroll_t_incr)\n', (9423, 9462), True, 'import numpy as np\n'), ((11964, 11987), 'numpy.amax', 'np.amax', (['self.sun.press'], {}), '(self.sun.press)\n', (11971, 11987), True, 'import numpy as np\n'), ((12205, 12229), 'numpy.amax', 'np.amax', (['self.sun.e_akin'], {}), '(self.sun.e_akin)\n', (12212, 12229), True, 'import numpy as np\n'), ((12332, 12359), 'numpy.amax', 'np.amax', (['self.planet.e_akin'], {}), '(self.planet.e_akin)\n', (12339, 12359), True, 'import numpy as np\n'), ((12454, 12470), 'numpy.amax', 'np.amax', (['self.pv'], {}), '(self.pv)\n', (12461, 12470), True, 'import numpy as np\n'), ((12814, 12837), 're.sub', 're.sub', (['"""_"""', '"""\\\\_"""', 'key'], {}), "('_', '\\\\_', key)\n", (12820, 12837), False, 'import re\n'), ((4371, 4407), 'math.floor', 'math.floor', (['(self.sun.clone.res_y / 2)'], {}), '(self.sun.clone.res_y / 2)\n', (4381, 4407), False, 'import math\n'), ((4564, 4590), 'numpy.sign', 'np.sign', (['too_many_els_in_y'], {}), '(too_many_els_in_y)\n', (4571, 4590), True, 'import numpy as np\n'), ((4702, 4738), 'math.floor', 'math.floor', (['(self.sun.clone.res_x / 2)'], {}), '(self.sun.clone.res_x / 2)\n', (4712, 4738), False, 'import math\n'), ((4892, 4918), 'numpy.sign', 'np.sign', (['too_many_els_in_x'], {}), '(too_many_els_in_x)\n', (4899, 4918), True, 'import numpy as np\n')]
from lumicks.pylake.detail.utilities import * import pytest import matplotlib as mpl import numpy as np def test_first(): assert(first((1, 2, 3), condition=lambda x: x % 2 == 0) == 2) assert(first(range(3, 100)) == 3) with pytest.raises(StopIteration): first((1, 2, 3), condition=lambda x: x % 5 == 0) with pytest.raises(StopIteration): first(()) def test_unique(): uiq = unique(['str', 'str', 'hmm', 'potato', 'hmm', 'str']) assert(uiq == ['str', 'hmm', 'potato']) def test_colors(): [mpl.colors.to_rgb(get_color(k)) for k in range(30)] np.testing.assert_allclose(lighten_color([0.5, 0, 0], .2), [.7, 0, 0]) def test_find_contiguous(): def check_blocks_are_true(mask, ranges): for rng in ranges: assert np.all(mask[slice(*rng)]) data = np.array([0, 1, 2, 3, 4, 5, 4, 3, 2, 1, 0]) mask = data ranges, lengths = find_contiguous(mask) assert np.all(np.equal(ranges, [[1, 10]])) assert np.all(np.equal(lengths, [9])) check_blocks_are_true(mask, ranges) mask = data < 10 ranges, lengths = find_contiguous(mask) assert np.all(np.equal(ranges, [[0, 11]])) assert np.all(np.equal(lengths, [11])) check_blocks_are_true(mask, ranges) mask = data > 10 ranges, lengths = find_contiguous(mask) assert len(ranges) == 0 assert len(lengths) == 0 check_blocks_are_true(mask, ranges) mask = data < 4 ranges, lengths = find_contiguous(mask) assert np.all(np.equal(ranges, [[0, 4], [7, 11]])) assert np.all(np.equal(lengths, [4, 4])) check_blocks_are_true(mask, ranges) data = np.arange(10) mask = data <= 5 ranges, lengths = find_contiguous(mask) assert np.all(np.equal(ranges, [[0, 6]])) assert np.all(np.equal(lengths, [6])) check_blocks_are_true(mask, ranges) mask = data >= 5 ranges, lengths = find_contiguous(mask) assert np.all(np.equal(ranges, [[5, 10]])) assert np.all(np.equal(lengths, [5])) check_blocks_are_true(mask, ranges) @pytest.mark.parametrize( "data,factor,avg,std", [ [np.arange(10), 2, [0.5, 2.5, 4.5, 6.5, 8.5], [0.5, 0.5, 0.5, 0.5, 0.5]], [np.arange(0, 10, 2), 1, [0.0, 2.0, 4.0, 6.0, 8.0], [0.0, 0.0, 0.0, 0.0, 0.0]], [np.arange(0, 10, 2), 2, [1.0, 5.0], [1.0, 1.0]], [np.arange(0, 11, 2), 2, [1.0, 5.0, 9.0], [1.0, 1.0, 1.0]], ], ) def test_downsample(data, factor, avg, std): np.testing.assert_allclose(avg, downsample(data, factor, reduce=np.mean)) np.testing.assert_allclose(std, downsample(data, factor, reduce=np.std))
[ "numpy.array", "numpy.equal", "pytest.raises", "numpy.arange" ]
[((823, 866), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 4, 3, 2, 1, 0]'], {}), '([0, 1, 2, 3, 4, 5, 4, 3, 2, 1, 0])\n', (831, 866), True, 'import numpy as np\n'), ((1673, 1686), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1682, 1686), True, 'import numpy as np\n'), ((238, 266), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (251, 266), False, 'import pytest\n'), ((334, 362), 'pytest.raises', 'pytest.raises', (['StopIteration'], {}), '(StopIteration)\n', (347, 362), False, 'import pytest\n'), ((946, 973), 'numpy.equal', 'np.equal', (['ranges', '[[1, 10]]'], {}), '(ranges, [[1, 10]])\n', (954, 973), True, 'import numpy as np\n'), ((993, 1015), 'numpy.equal', 'np.equal', (['lengths', '[9]'], {}), '(lengths, [9])\n', (1001, 1015), True, 'import numpy as np\n'), ((1145, 1172), 'numpy.equal', 'np.equal', (['ranges', '[[0, 11]]'], {}), '(ranges, [[0, 11]])\n', (1153, 1172), True, 'import numpy as np\n'), ((1192, 1215), 'numpy.equal', 'np.equal', (['lengths', '[11]'], {}), '(lengths, [11])\n', (1200, 1215), True, 'import numpy as np\n'), ((1503, 1538), 'numpy.equal', 'np.equal', (['ranges', '[[0, 4], [7, 11]]'], {}), '(ranges, [[0, 4], [7, 11]])\n', (1511, 1538), True, 'import numpy as np\n'), ((1594, 1619), 'numpy.equal', 'np.equal', (['lengths', '[4, 4]'], {}), '(lengths, [4, 4])\n', (1602, 1619), True, 'import numpy as np\n'), ((1771, 1797), 'numpy.equal', 'np.equal', (['ranges', '[[0, 6]]'], {}), '(ranges, [[0, 6]])\n', (1779, 1797), True, 'import numpy as np\n'), ((1817, 1839), 'numpy.equal', 'np.equal', (['lengths', '[6]'], {}), '(lengths, [6])\n', (1825, 1839), True, 'import numpy as np\n'), ((1965, 1992), 'numpy.equal', 'np.equal', (['ranges', '[[5, 10]]'], {}), '(ranges, [[5, 10]])\n', (1973, 1992), True, 'import numpy as np\n'), ((2012, 2034), 'numpy.equal', 'np.equal', (['lengths', '[5]'], {}), '(lengths, [5])\n', (2020, 2034), True, 'import numpy as np\n'), ((2146, 2159), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2155, 2159), True, 'import numpy as np\n'), ((2228, 2247), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(2)'], {}), '(0, 10, 2)\n', (2237, 2247), True, 'import numpy as np\n'), ((2316, 2335), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(2)'], {}), '(0, 10, 2)\n', (2325, 2335), True, 'import numpy as np\n'), ((2374, 2393), 'numpy.arange', 'np.arange', (['(0)', '(11)', '(2)'], {}), '(0, 11, 2)\n', (2383, 2393), True, 'import numpy as np\n')]
from granger_causality import granger_causality import pandas as pd import numpy as np our_data = pd.read_csv("natural_data2.csv") our_data = our_data[np.where(our_data['Year'] == 1880)[0][0]:] print(granger_causality(our_data, ['Ozone', 'WMGHG'], 'Temperature', lags=3, our_type='trend'))
[ "numpy.where", "granger_causality.granger_causality", "pandas.read_csv" ]
[((99, 131), 'pandas.read_csv', 'pd.read_csv', (['"""natural_data2.csv"""'], {}), "('natural_data2.csv')\n", (110, 131), True, 'import pandas as pd\n'), ((202, 294), 'granger_causality.granger_causality', 'granger_causality', (['our_data', "['Ozone', 'WMGHG']", '"""Temperature"""'], {'lags': '(3)', 'our_type': '"""trend"""'}), "(our_data, ['Ozone', 'WMGHG'], 'Temperature', lags=3,\n our_type='trend')\n", (219, 294), False, 'from granger_causality import granger_causality\n'), ((152, 186), 'numpy.where', 'np.where', (["(our_data['Year'] == 1880)"], {}), "(our_data['Year'] == 1880)\n", (160, 186), True, 'import numpy as np\n')]
from collections import defaultdict import numpy as np from pyNastran.bdf.bdf_interface.assign_type import ( integer, integer_or_blank, double_or_blank) from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default from pyNastran.bdf.cards.base_card import _format_comment class Rods: """intializes the Rods""" def __init__(self, model): self.model = model self.conrod = model.conrod self.crod = model.crod self.ctube = model.ctube self._eids = set() def add(self, eid): if eid not in self._eids: self._eids.add(eid) else: raise RuntimeError('eid=%s is duplicated' % eid) def write_card(self, size=8, is_double=False, bdf_file=None): assert bdf_file is not None if len(self.conrod): self.conrod.write_card(size, is_double, bdf_file) if len(self.crod): self.crod.write_card(size, is_double, bdf_file) if len(self.ctube): self.ctube.write_card(size, is_double, bdf_file) def make_current(self): self.conrod.make_current() self.crod.make_current() self.ctube.make_current() def __len__(self): return len(self.conrod) + len(self.crod) + len(self.ctube) def repr_indent(self, indent=' '): msg = '%s<Rods> : nelements=%s\n' % (indent, len(self)) msg += '%s CONROD: %s\n' % (indent, len(self.conrod)) msg += '%s CROD : %s\n' % (indent, len(self.crod)) msg += '%s CTUBE : %s\n' % (indent, len(self.ctube)) return msg def __repr__(self): return self.repr_indent(indent='') class RodElement: """base class for CONROD, CROD, and CTUBE""" card_name = '' def check_if_current(self, eid, eids): """we split this up to reason about it easier""" if self.is_current: if eid in eids: # card exists, so we use that slot add_card = False else: add_card = True else: add_card = True return add_card def cross_reference(self, model): """does this do anything?""" self.make_current() def __len__(self): """returns the number of elements""" return len(self.eid) + len(self._eid) def repr_indent(self, indent=''): self.make_current() neids = len(self.eid) if neids == 0: return '%s%sv; nelements=%s' % (indent, self.card_name, neids) msg = '%s%sv; nelements=%s\n' % (indent, self.card_name, neids) msg += '%s eid = %s\n' % (indent, self.eid) if hasattr(self, 'pid'): upid = np.unique(self.pid) if len(upid) == 1: msg += '%s upid = %s\n' % (indent, upid) else: msg += '%s pid = %s\n' % (indent, self.pid) else: msg += '%s A = %s\n' % (indent, self.A) msg += '%s j = %s\n' % (indent, self.j) msg += '%s c = %s\n' % (indent, self.c) msg += '%s nsm = %s\n' % (indent, self.nsm) return msg #umcid = np.unique(self.mcid) #if len(umcid) == 1 and umcid[0] == 0: #msg += ' umcid = %s\n' % umcid #else: #msg += ' umcid = %s\n' % umcid #msg += ' mcid = %s\n' % self.mcid #utheta = np.unique(self.theta) #if len(utheta) == 1 and umcid[0] == 0: #msg += ' utheta = %s\n' % utheta #else: #msg += ' theta = %s\n' % self.theta #msg += ' is_theta = %s\n' % self.is_theta #msg += ' nid =\n%s' % self.nid #return msg def __repr__(self): return self.repr_indent(indent='') class CONRODv(RodElement): """ +--------+-----+-----+----+-----+---+---+---+-----+ | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | +========+=====+=====+====+=====+===+===+===+=====+ | CONROD | EID | N1 | N2 | MID | A | J | C | NSM | +--------+-----+-----+----+-----+---+---+---+-----+ """ card_name = 'CONROD' def __init__(self, model): self.model = model self.is_current = True self.eid = np.array([], dtype='int32') self.nids = np.array([], dtype='int32') self.mid = np.array([], dtype='int32') self.A = np.array([], dtype='float64') self.j = np.array([], dtype='float64') self.c = np.array([], dtype='float64') self.nsm = np.array([], dtype='float64') self._eid = [] self._nids = [] self._mid = [] self._A = [] self._j = [] self._c = [] self._nsm = [] self.comment = defaultdict(str) def add(self, eid, mid, nids, A=0., j=0., c=0., nsm=0., comment=''): """ Creates a CONROD card Parameters ---------- eid : int element id mid : int material id nids : List[int, int] node ids A : float area j : float; default=0. polar moment of inertia c : float; default=0. stress factor nsm : float; default=0. non-structural mass per unit length comment : str; default='' a comment for the card """ self.model.rods.add(eid) self.is_current = False self._eid.append(eid) self._nids.append(nids) self._mid.append(mid) self._A.append(A) self._j.append(j) self._c.append(c) self._nsm.append(nsm) if comment: self.comment[eid] = _format_comment(comment) def add_card(self, card, comment=''): """ Adds a CONROD card from ``BDF.add_card(...)`` Parameters ---------- card : BDFCard() a BDFCard object comment : str; default='' a comment for the card """ eid = integer(card, 1, 'eid') nids = [integer(card, 2, 'n1'), integer(card, 3, 'n2')] mid = integer(card, 4, 'mid') A = double_or_blank(card, 5, 'A', 0.0) j = double_or_blank(card, 6, 'j', 0.0) c = double_or_blank(card, 7, 'c', 0.0) nsm = double_or_blank(card, 8, 'nsm', 0.0) self.add(eid, mid, nids, A, j, c, nsm, comment=comment) def write_card(self, size=8, is_double=False, bdf_file=None): assert bdf_file is not None self.make_current() msg = '' for eid, mid, nodes, A, j, c, nsm in zip(self.eid, self.mid, self.nids, self.A, self.j, self.c, self.nsm): j = set_blank_if_default(j, 0.0) c = set_blank_if_default(c, 0.0) nsm = set_blank_if_default(nsm, 0.0) list_fields = ['CONROD', eid] + nodes.tolist() + [mid, A, j, c, nsm] msgi = print_card_8(list_fields) msg += self.comment[eid] + msgi.rstrip() + '\n' bdf_file.write(msg) return msg def make_current(self): """creates an array of the elements""" if not self.is_current: if len(self.eid) > 0: # there are already elements in self.eid self.eid = np.hstack([self.eid, self._eid]) self.mid = np.vstack([self.mid, self._mid]) self.nids = np.hstack([self.nids, self._nids]) self.A = np.hstack([self.A, self._A]) self.j = np.hstack([self.j, self._j]) self.c = np.hstack([self.c, self._c]) self.nsm = np.hstack([self.nsm, self._nsm]) # don't need to handle comments else: self.eid = np.array(self._eid, dtype='int32') self.mid = np.array(self._mid, dtype='int32') self.nids = np.array(self._nids, dtype='int32') self.A = np.array(self._A, dtype='float64') self.j = np.array(self._j, dtype='float64') self.c = np.array(self._c, dtype='float64') self.nsm = np.array(self._nsm, dtype='float64') assert len(self.eid) == len(np.unique(self.eid)) self._eid = [] self._mid = [] self._nids = [] self._A = [] self._j = [] self._c = [] self._nsm = [] self.is_current = True class CRODv(RodElement): """ +------+-----+-----+----+----+ | 1 | 2 | 3 | 4 | 5 | +======+=====+=====+====+====+ | CROD | EID | PID | N1 | N2 | +------+-----+-----+----+----+ """ card_name = 'CROD' def __init__(self, model): self.model = model self.is_current = True self.eid = np.array([], dtype='int32') self.pid = np.array([], dtype='int32') self.nids = np.array([], dtype='int32') self._eid = [] self._pid = [] self._nids = [] self._dofs = [] self.comment = defaultdict(str) def add(self, eid, pid, nids, comment=''): """ Creates a CROD card Parameters ---------- eid : int element id pid : int property id (PROD) nids : List[int, int] node ids comment : str; default='' a comment for the card """ self.model.solids.add(eid) self.is_current = False self._eid.append(eid) self._pid.append(pid) self._nids.append(nids) if comment: self.comment[eid] = _format_comment(comment) def add_card(self, card, comment=''): """ Adds a CROD card from ``BDF.add_card(...)`` Parameters ---------- card : BDFCard() a BDFCard object comment : str; default='' a comment for the card """ eid = integer(card, 1, 'eid') pid = integer_or_blank(card, 2, 'pid', eid) nids = [integer(card, 3, 'n1'), integer(card, 4, 'n2')] assert len(card) == 5, 'len(CROD card) = %i\ncard=%s' % (len(card), str(card)) self.add(eid, pid, nids, comment=comment) def write_card(self, size=8, is_double=False, bdf_file=None): assert bdf_file is not None self.make_current() msg = '' for eid, pid, nodes in zip(self.eid, self.pid, self.nids): list_fields = ['CROD', eid, pid] + nodes.tolist() msgi = print_card_8(list_fields) msg += self.comment[eid] + msgi.rstrip() + '\n' bdf_file.write(msg) return msg def make_current(self): """creates an array of the elements""" if not self.is_current: if len(self.eid) > 0: # there are already elements in self.eid self.eid = np.hstack([self.eid, self._eid]) self.pid = np.vstack([self.pid, self._pid]) self.nids = np.hstack([self.nids, self._nids]) # don't need to handle comments else: self.eid = np.array(self._eid, dtype='int32') self.pid = np.array(self._pid, dtype='int32') self.nids = np.array(self._nids, dtype='int32') assert len(self.eid) == len(np.unique(self.eid)) self._eid = [] self._pid = [] self._nids = [] self.is_current = True class CTUBEv(RodElement): """ +--------+-----+-----+----+----+ | 1 | 2 | 3 | 4 | 5 | +========+=====+=====+====+====+ | CELAS3 | EID | PID | S1 | S2 | +--------+-----+-----+----+----+ """ card_name = 'CTUBE' def __init__(self, model): self.model = model self.is_current = True self.eid = np.array([], dtype='int32') self.pid = np.array([], dtype='int32') self.nids = np.array([], dtype='int32') self._eid = [] self._pid = [] self._nids = [] self.comment = defaultdict(str) def add(self, eid, pid, nids, comment=''): """ Creates a CTUBE card Parameters ---------- eid : int element id pid : int property id nids : List[int, int] node ids comment : str; default='' a comment for the card """ self.model.rods.add(eid) self.is_current = False self._eid.append(eid) self._pid.append(pid) self._nids.append(nids) if comment: self.comment[eid] = _format_comment(comment) def add_card(self, card, comment=''): """ Adds a CTUBE card from ``BDF.add_card(...)`` Parameters ---------- card : BDFCard() a BDFCard object comment : str; default='' a comment for the card """ eid = integer(card, 1, 'eid') pid = integer_or_blank(card, 2, 'pid', eid) nids = [integer(card, 3, 'n1'), integer(card, 4, 'n2')] assert len(card) == 5, 'len(CTUBE card) = %i\ncard=%s' % (len(card), card) self.add(eid, pid, nids, comment=comment) def write_card(self, size=8, is_double=False, bdf_file=None): assert bdf_file is not None self.make_current() msg = '' for eid, pid, nodes in zip(self.eid, self.pid, self.nids): list_fields = ['CTUBE', eid, pid, nodes[0], nodes[1]] msgi = print_card_8(list_fields) msg += self.comment[eid] + msgi.rstrip() + '\n' bdf_file.write(msg) return msg def make_current(self): """creates an array of the elements""" if not self.is_current: if len(self.eid) > 0: # there are already elements in self.eid self.eid = np.hstack([self.eid, self._eid]) self.pid = np.vstack([self.pid, self._pid]) self.nids = np.hstack([self.nids, self._nids]) # don't need to handle comments else: self.eid = np.array(self._eid, dtype='int32') self.pid = np.array(self._pid, dtype='int32') self.nids = np.array(self._nids, dtype='int32') assert len(self.eid) == len(np.unique(self.eid)) self._eid = [] self._pid = [] self._nids = [] self.is_current = True
[ "numpy.unique", "pyNastran.bdf.cards.base_card._format_comment", "numpy.hstack", "pyNastran.bdf.field_writer_8.set_blank_if_default", "numpy.array", "pyNastran.bdf.bdf_interface.assign_type.integer_or_blank", "collections.defaultdict", "numpy.vstack", "pyNastran.bdf.bdf_interface.assign_type.integer...
[((4213, 4240), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (4221, 4240), True, 'import numpy as np\n'), ((4261, 4288), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (4269, 4288), True, 'import numpy as np\n'), ((4308, 4335), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (4316, 4335), True, 'import numpy as np\n'), ((4353, 4382), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float64"""'}), "([], dtype='float64')\n", (4361, 4382), True, 'import numpy as np\n'), ((4400, 4429), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float64"""'}), "([], dtype='float64')\n", (4408, 4429), True, 'import numpy as np\n'), ((4447, 4476), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float64"""'}), "([], dtype='float64')\n", (4455, 4476), True, 'import numpy as np\n'), ((4496, 4525), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float64"""'}), "([], dtype='float64')\n", (4504, 4525), True, 'import numpy as np\n'), ((4706, 4722), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (4717, 4722), False, 'from collections import defaultdict\n'), ((5969, 5992), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(1)', '"""eid"""'], {}), "(card, 1, 'eid')\n", (5976, 5992), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((6087, 6110), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(4)', '"""mid"""'], {}), "(card, 4, 'mid')\n", (6094, 6110), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((6123, 6157), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(5)', '"""A"""', '(0.0)'], {}), "(card, 5, 'A', 0.0)\n", (6138, 6157), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((6170, 6204), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(6)', '"""j"""', '(0.0)'], {}), "(card, 6, 'j', 0.0)\n", (6185, 6204), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((6217, 6251), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(7)', '"""c"""', '(0.0)'], {}), "(card, 7, 'c', 0.0)\n", (6232, 6251), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((6266, 6302), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(8)', '"""nsm"""', '(0.0)'], {}), "(card, 8, 'nsm', 0.0)\n", (6281, 6302), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((8769, 8796), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (8777, 8796), True, 'import numpy as np\n'), ((8816, 8843), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (8824, 8843), True, 'import numpy as np\n'), ((8864, 8891), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (8872, 8891), True, 'import numpy as np\n'), ((9010, 9026), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (9021, 9026), False, 'from collections import defaultdict\n'), ((9907, 9930), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(1)', '"""eid"""'], {}), "(card, 1, 'eid')\n", (9914, 9930), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((9945, 9982), 'pyNastran.bdf.bdf_interface.assign_type.integer_or_blank', 'integer_or_blank', (['card', '(2)', '"""pid"""', 'eid'], {}), "(card, 2, 'pid', eid)\n", (9961, 9982), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((11789, 11816), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (11797, 11816), True, 'import numpy as np\n'), ((11836, 11863), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (11844, 11863), True, 'import numpy as np\n'), ((11884, 11911), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (11892, 11911), True, 'import numpy as np\n'), ((12006, 12022), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (12017, 12022), False, 'from collections import defaultdict\n'), ((12896, 12919), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(1)', '"""eid"""'], {}), "(card, 1, 'eid')\n", (12903, 12919), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((12934, 12971), 'pyNastran.bdf.bdf_interface.assign_type.integer_or_blank', 'integer_or_blank', (['card', '(2)', '"""pid"""', 'eid'], {}), "(card, 2, 'pid', eid)\n", (12950, 12971), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((2696, 2715), 'numpy.unique', 'np.unique', (['self.pid'], {}), '(self.pid)\n', (2705, 2715), True, 'import numpy as np\n'), ((5647, 5671), 'pyNastran.bdf.cards.base_card._format_comment', '_format_comment', (['comment'], {}), '(comment)\n', (5662, 5671), False, 'from pyNastran.bdf.cards.base_card import _format_comment\n'), ((6009, 6031), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(2)', '"""n1"""'], {}), "(card, 2, 'n1')\n", (6016, 6031), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((6049, 6071), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(3)', '"""n2"""'], {}), "(card, 3, 'n2')\n", (6056, 6071), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((6695, 6723), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['j', '(0.0)'], {}), '(j, 0.0)\n', (6715, 6723), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((6740, 6768), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['c', '(0.0)'], {}), '(c, 0.0)\n', (6760, 6768), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((6787, 6817), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['nsm', '(0.0)'], {}), '(nsm, 0.0)\n', (6807, 6817), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((6918, 6943), 'pyNastran.bdf.field_writer_8.print_card_8', 'print_card_8', (['list_fields'], {}), '(list_fields)\n', (6930, 6943), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9587, 9611), 'pyNastran.bdf.cards.base_card._format_comment', '_format_comment', (['comment'], {}), '(comment)\n', (9602, 9611), False, 'from pyNastran.bdf.cards.base_card import _format_comment\n'), ((9999, 10021), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(3)', '"""n1"""'], {}), "(card, 3, 'n1')\n", (10006, 10021), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((10039, 10061), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(4)', '"""n2"""'], {}), "(card, 4, 'n2')\n", (10046, 10061), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((10496, 10521), 'pyNastran.bdf.field_writer_8.print_card_8', 'print_card_8', (['list_fields'], {}), '(list_fields)\n', (10508, 10521), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((12575, 12599), 'pyNastran.bdf.cards.base_card._format_comment', '_format_comment', (['comment'], {}), '(comment)\n', (12590, 12599), False, 'from pyNastran.bdf.cards.base_card import _format_comment\n'), ((12988, 13010), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(3)', '"""n1"""'], {}), "(card, 3, 'n1')\n", (12995, 13010), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((13028, 13050), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(4)', '"""n2"""'], {}), "(card, 4, 'n2')\n", (13035, 13050), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank\n'), ((13485, 13510), 'pyNastran.bdf.field_writer_8.print_card_8', 'print_card_8', (['list_fields'], {}), '(list_fields)\n', (13497, 13510), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((7261, 7293), 'numpy.hstack', 'np.hstack', (['[self.eid, self._eid]'], {}), '([self.eid, self._eid])\n', (7270, 7293), True, 'import numpy as np\n'), ((7321, 7353), 'numpy.vstack', 'np.vstack', (['[self.mid, self._mid]'], {}), '([self.mid, self._mid])\n', (7330, 7353), True, 'import numpy as np\n'), ((7382, 7416), 'numpy.hstack', 'np.hstack', (['[self.nids, self._nids]'], {}), '([self.nids, self._nids])\n', (7391, 7416), True, 'import numpy as np\n'), ((7443, 7471), 'numpy.hstack', 'np.hstack', (['[self.A, self._A]'], {}), '([self.A, self._A])\n', (7452, 7471), True, 'import numpy as np\n'), ((7497, 7525), 'numpy.hstack', 'np.hstack', (['[self.j, self._j]'], {}), '([self.j, self._j])\n', (7506, 7525), True, 'import numpy as np\n'), ((7551, 7579), 'numpy.hstack', 'np.hstack', (['[self.c, self._c]'], {}), '([self.c, self._c])\n', (7560, 7579), True, 'import numpy as np\n'), ((7607, 7639), 'numpy.hstack', 'np.hstack', (['[self.nsm, self._nsm]'], {}), '([self.nsm, self._nsm])\n', (7616, 7639), True, 'import numpy as np\n'), ((7734, 7768), 'numpy.array', 'np.array', (['self._eid'], {'dtype': '"""int32"""'}), "(self._eid, dtype='int32')\n", (7742, 7768), True, 'import numpy as np\n'), ((7796, 7830), 'numpy.array', 'np.array', (['self._mid'], {'dtype': '"""int32"""'}), "(self._mid, dtype='int32')\n", (7804, 7830), True, 'import numpy as np\n'), ((7859, 7894), 'numpy.array', 'np.array', (['self._nids'], {'dtype': '"""int32"""'}), "(self._nids, dtype='int32')\n", (7867, 7894), True, 'import numpy as np\n'), ((7920, 7954), 'numpy.array', 'np.array', (['self._A'], {'dtype': '"""float64"""'}), "(self._A, dtype='float64')\n", (7928, 7954), True, 'import numpy as np\n'), ((7980, 8014), 'numpy.array', 'np.array', (['self._j'], {'dtype': '"""float64"""'}), "(self._j, dtype='float64')\n", (7988, 8014), True, 'import numpy as np\n'), ((8040, 8074), 'numpy.array', 'np.array', (['self._c'], {'dtype': '"""float64"""'}), "(self._c, dtype='float64')\n", (8048, 8074), True, 'import numpy as np\n'), ((8102, 8138), 'numpy.array', 'np.array', (['self._nsm'], {'dtype': '"""float64"""'}), "(self._nsm, dtype='float64')\n", (8110, 8138), True, 'import numpy as np\n'), ((10839, 10871), 'numpy.hstack', 'np.hstack', (['[self.eid, self._eid]'], {}), '([self.eid, self._eid])\n', (10848, 10871), True, 'import numpy as np\n'), ((10899, 10931), 'numpy.vstack', 'np.vstack', (['[self.pid, self._pid]'], {}), '([self.pid, self._pid])\n', (10908, 10931), True, 'import numpy as np\n'), ((10960, 10994), 'numpy.hstack', 'np.hstack', (['[self.nids, self._nids]'], {}), '([self.nids, self._nids])\n', (10969, 10994), True, 'import numpy as np\n'), ((11088, 11122), 'numpy.array', 'np.array', (['self._eid'], {'dtype': '"""int32"""'}), "(self._eid, dtype='int32')\n", (11096, 11122), True, 'import numpy as np\n'), ((11150, 11184), 'numpy.array', 'np.array', (['self._pid'], {'dtype': '"""int32"""'}), "(self._pid, dtype='int32')\n", (11158, 11184), True, 'import numpy as np\n'), ((11213, 11248), 'numpy.array', 'np.array', (['self._nids'], {'dtype': '"""int32"""'}), "(self._nids, dtype='int32')\n", (11221, 11248), True, 'import numpy as np\n'), ((13828, 13860), 'numpy.hstack', 'np.hstack', (['[self.eid, self._eid]'], {}), '([self.eid, self._eid])\n', (13837, 13860), True, 'import numpy as np\n'), ((13888, 13920), 'numpy.vstack', 'np.vstack', (['[self.pid, self._pid]'], {}), '([self.pid, self._pid])\n', (13897, 13920), True, 'import numpy as np\n'), ((13949, 13983), 'numpy.hstack', 'np.hstack', (['[self.nids, self._nids]'], {}), '([self.nids, self._nids])\n', (13958, 13983), True, 'import numpy as np\n'), ((14077, 14111), 'numpy.array', 'np.array', (['self._eid'], {'dtype': '"""int32"""'}), "(self._eid, dtype='int32')\n", (14085, 14111), True, 'import numpy as np\n'), ((14139, 14173), 'numpy.array', 'np.array', (['self._pid'], {'dtype': '"""int32"""'}), "(self._pid, dtype='int32')\n", (14147, 14173), True, 'import numpy as np\n'), ((14202, 14237), 'numpy.array', 'np.array', (['self._nids'], {'dtype': '"""int32"""'}), "(self._nids, dtype='int32')\n", (14210, 14237), True, 'import numpy as np\n'), ((8179, 8198), 'numpy.unique', 'np.unique', (['self.eid'], {}), '(self.eid)\n', (8188, 8198), True, 'import numpy as np\n'), ((11289, 11308), 'numpy.unique', 'np.unique', (['self.eid'], {}), '(self.eid)\n', (11298, 11308), True, 'import numpy as np\n'), ((14278, 14297), 'numpy.unique', 'np.unique', (['self.eid'], {}), '(self.eid)\n', (14287, 14297), True, 'import numpy as np\n')]
import pandas as pd import numpy as np import os import tensorflow as tf ####### STUDENTS FILL THIS OUT ###### #Question 3 def reduce_dimension_ndc(df, ndc_df): ''' df: pandas dataframe, input dataset ndc_df: pandas dataframe, drug code dataset used for mapping in generic names return: df: pandas dataframe, output dataframe with joined generic drug name ''' ndc_df = ndc_df[["NDC_Code", "Non-proprietary Name"]] ndc_df.columns = ["ndc_code", "generic_drug_name"] df = df.merge(ndc_df, on="ndc_code", how="left") return df #Question 4 def select_first_encounter(df): ''' df: pandas dataframe, dataframe with all encounters return: - first_encounter_df: pandas dataframe, dataframe with only the first encounter for a given patient ''' first_encounter_df = df.groupby(["patient_nbr"]).first().reset_index() return first_encounter_df #Question 6 def patient_dataset_splitter(df, patient_key='patient_nbr'): ''' df: pandas dataframe, input dataset that will be split patient_key: string, column that is the patient id return: - train: pandas dataframe, - validation: pandas dataframe, - test: pandas dataframe, ''' keys = df[patient_key].values np.random.seed(seed=13) # for reproduciblity keys = np.random.permutation(keys) n = len(keys) n_train = int(0.6*n) n_validation = int(0.2*n) keys_train = keys[:n_train] keys_validation = keys[n_train:n_train+n_validation] keys_test = keys[n_train+n_validation:] train = df[df[patient_key].isin(keys_train)] validation = df[df[patient_key].isin(keys_validation)] test = df[df[patient_key].isin(keys_test)] return train, validation, test #Question 7 def create_tf_categorical_feature_cols(categorical_col_list, vocab_dir='./diabetes_vocab/'): ''' categorical_col_list: list, categorical field list that will be transformed with TF feature column vocab_dir: string, the path where the vocabulary text files are located return: output_tf_list: list of TF feature columns ''' output_tf_list = [] for c in categorical_col_list: vocab_file_path = os.path.join(vocab_dir, c + "_vocab.txt") ''' Which TF function allows you to read from a text file and create a categorical feature You can use a pattern like this below... tf_categorical_feature_column = tf.feature_column....... ''' categorical_column = tf.feature_column.categorical_column_with_vocabulary_file(c, vocab_file_path) tf_categorical_feature_column = tf.feature_column.indicator_column( categorical_column ) output_tf_list.append(tf_categorical_feature_column) return output_tf_list #Question 8 def normalize_numeric_with_zscore(col, mean, std): ''' This function can be used in conjunction with the tf feature column for normalization ''' col = tf.cast(col, tf.float32) return (col - mean)/std def create_tf_numeric_feature(col, MEAN, STD, default_value=0): ''' col: string, input numerical column name MEAN: the mean for the column in the training data STD: the standard deviation for the column in the training data default_value: the value that will be used for imputing the field return: tf_numeric_feature: tf feature column representation of the input field ''' tf_numeric_feature = tf.feature_column.numeric_column(col, default_value=default_value, normalizer_fn=lambda x: normalize_numeric_with_zscore(x, MEAN, STD)) return tf_numeric_feature #Question 9 def get_mean_std_from_preds(diabetes_yhat): ''' diabetes_yhat: TF Probability prediction object ''' m = diabetes_yhat.mean() s = diabetes_yhat.stddev() return m, s # Question 10 def get_student_binary_prediction(df, col): ''' df: pandas dataframe prediction output dataframe col: str, probability mean prediction field return: student_binary_prediction: pandas dataframe converting input to flattened numpy array and binary labels ''' threshold = 5 # include the data if the prediction is more than 3 days student_binary_prediction = np.where(df[col] > threshold, 1, 0) return student_binary_prediction
[ "numpy.where", "tensorflow.feature_column.categorical_column_with_vocabulary_file", "os.path.join", "tensorflow.feature_column.indicator_column", "numpy.random.seed", "tensorflow.cast", "numpy.random.permutation" ]
[((1268, 1291), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(13)'}), '(seed=13)\n', (1282, 1291), True, 'import numpy as np\n'), ((1324, 1351), 'numpy.random.permutation', 'np.random.permutation', (['keys'], {}), '(keys)\n', (1345, 1351), True, 'import numpy as np\n'), ((3004, 3028), 'tensorflow.cast', 'tf.cast', (['col', 'tf.float32'], {}), '(col, tf.float32)\n', (3011, 3028), True, 'import tensorflow as tf\n'), ((4330, 4365), 'numpy.where', 'np.where', (['(df[col] > threshold)', '(1)', '(0)'], {}), '(df[col] > threshold, 1, 0)\n', (4338, 4365), True, 'import numpy as np\n'), ((2235, 2276), 'os.path.join', 'os.path.join', (['vocab_dir', "(c + '_vocab.txt')"], {}), "(vocab_dir, c + '_vocab.txt')\n", (2247, 2276), False, 'import os\n'), ((2541, 2618), 'tensorflow.feature_column.categorical_column_with_vocabulary_file', 'tf.feature_column.categorical_column_with_vocabulary_file', (['c', 'vocab_file_path'], {}), '(c, vocab_file_path)\n', (2598, 2618), True, 'import tensorflow as tf\n'), ((2659, 2713), 'tensorflow.feature_column.indicator_column', 'tf.feature_column.indicator_column', (['categorical_column'], {}), '(categorical_column)\n', (2693, 2713), True, 'import tensorflow as tf\n')]
#!/usr/bin/env python import os import shutil import copy import csv import json import math as m import traceback import cv2 import numpy as np from .util_video import FrameStamps from .util_video import FrameCache class FrameStamps: def __init__(self, Nfrm, runTime_s): self.Nfrm = Nfrm self.runTime_s = runTime_s def frm2time(self, frm): time_s = max( min( float(frm) * self.runTime_s / self.Nfrm, self.runTime_s), 0.0) return time_s def time2frm(self, time_s): frm = max(int(round( time_s * self.Nfrm / self.runTime_s )), 0) return frm class TrackManager: def __init__(self, videoFilePath, useCache=False, cacheFile="frame.cache", debug=False): self.cap = None self.frameBGR = None self.frameBGRnoAnn = None self.Nfrm = None self.FPS = None self.runTime_s = None self.fc = None self.nXpix = None self.nYpix = None self.useCache = useCache self.cacheFile = cacheFile self.frameCache = None self.frameCacheIndx = None self.trackRecs = None self.trackMetas = None self.trackMetaMask = None self.trackSummary = [] self.trackRecsUndo = None self.trackMetasUndo = None self.trackMetaMaskUndo = None self.currentTrackUndo = None self.editRec = None self.dType = [("frm", "u8"), ("x", "f8"), ("y", "f8"), ("xHalfSide", "f8"), ("yHalfSide", "f8"), ("label", "a5"), ("score", "f8"), ("mask", "i8")] self.debug = debug self.currentFrame = None self.currentTrack = None self.doAnnTrackNum = True self.outDir, videoFile = os.path.split(videoFilePath) self.videoFileRoot, ext = os.path.splitext(videoFile) success = self._read_video(videoFilePath) if success: return self.trackFilePath = os.path.join(self.outDir, self.videoFileRoot + "_tracks.json") success = self._read_tracks(self.trackFilePath) if success: exit("[ERR] Failed to read tracks! Check the trackfile format:\n" + "{}".format(self.trackFilePath)) self.editRec = self._get_empty_trackrec() def _read_video(self, videoFilePath): if not os.path.exists(videoFilePath): print("[ERR] video file missing {}".format(videoFilePath)) return 1 try: self.cap = cv2.VideoCapture(videoFilePath) self.Nfrm = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) self.FPS = float(self.cap.get(cv2.CAP_PROP_FPS)) self.runTime_s = self.Nfrm / self.FPS self.nXpix = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) self.nYpix = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) if self.useCache: self.frameCache = FrameCache(videoFilePath, self.cacheFile) self.frameCache.start() self.fc = FrameStamps(self.Nfrm, self.runTime_s) self.set_frame(0) print("[INFO] number of frames = {:d}".format(self.Nfrm)) print("[INFO] frame rate = {:.1f} frames/s".format(self.FPS)) print("[INFO] run time = {:d} min {:02.1f} s" .format(int(self.runTime_s // 60), self.runTime_s % 60)) return 0 except Exception: print("[ERR] error reading video {}".format(videoFilePath)) if self.debug: print(traceback.format_exc()) return 1 def _read_tracks(self, trackFilePath): outDir, trackFile = os.path.split(trackFilePath) if not os.path.exists(trackFilePath): print("[WARN] track file missing {}".format(trackFile)) print("[WARN] creating blank track as a convenience") self.trackRecs = [self._get_empty_trackrec()] self.trackMetas = [self._get_empty_trackmeta()] self._create_track_summary() return 0 try: with open(trackFilePath, 'r') as FH: trackLst = json.load(FH) nTracks = len(trackLst) print("[INFO] found {:d} tracks".format(nTracks)) self.trackRecs, self.trackMetas = self._tracklist_to_recs(trackLst) self._create_track_summary() return 0 except Exception: print("[ERR] error reading tracks {}".format(trackFile)) if self.debug: print(traceback.format_exc()) return 1 def _create_track_summary(self): self.trackSummary = [] for i, mD in enumerate(self.trackMetas): self.trackSummary.append( (i+1, mD["length"], mD["label"], mD["qual"])) def _tracklist_to_recs(self, trackLst): trackRecs = [] trackMetas = [] for trkIndx, tD in enumerate(trackLst): print("[INFO] loading track {:d}".format(trkIndx + 1)) trackRec = self._get_empty_trackrec(self.Nfrm) for i in range(len(tD["frmLst"])): frm = tD["frmLst"][i] if not m.isnan(tD["xLst"][i]): trackRec["x"][frm] = tD["xLst"][i] trackRec["y"][frm] = tD["yLst"][i] trackRec["xHalfSide"][frm] = tD["xHalfSideLst"][i] trackRec["yHalfSide"][frm] = tD["yHalfSideLst"][i] trackRec["label"][frm] = tD["label"] if "scoreLst" in tD: trackRec["score"][frm] = tD["scoreLst"][i] else: trackRec["score"][frm] = tD["qual"] trackRec["mask"][frm] = True mDict = {} mDict["label"] = tD["label"] mDict["qual"] = tD["qual"] mDict["comments"] = tD["comments"] mDict["pickle"] = tD["pickle"] mDict["length"] = np.sum(trackRec["mask"]) mDict["use"] = True trackRecs.append(trackRec) trackMetas.append(mDict) return trackRecs, trackMetas def _tracks_to_json(self): trackLst = [] for indx in range(len(self.trackRecs)): tR = self.trackRecs[indx] tM = self.trackMetas[indx] msk = np.nonzero(tR["mask"]) trackDict = {"frmLst" : tR["frm"][msk].tolist(), "xLst" : tR["x"][msk].tolist(), "yLst" : tR["y"][msk].tolist(), "xHalfSideLst" : tR["xHalfSide"][msk].tolist(), "yHalfSideLst" : tR["yHalfSide"][msk].tolist(), "label" : tM["label"], "qual" : tM["qual"], "timeRng" : [0.0, 0.0], "comments" : tM["comments"], "pickle" : tM["pickle"]} trackLst.append(trackDict) return trackLst def _get_empty_trackrec(self, nRows=None): if nRows is None: nRows = self.Nfrm trackRec = np.empty((nRows,), dtype=self.dType) trackRec[:] = np.nan trackRec["frm"] = range(nRows) trackRec["label"] = "" trackRec["mask"] = False return trackRec def _get_empty_trackmeta(self): mDict = {} mDict["label"] = "NULL" mDict["qual"] = 10 mDict["comments"] = "Empty track" mDict["pickle"] = "" mDict["length"] = 0 mDict["use"] = True return mDict def _calc_rect_x1y1(self, trackRec, frameNum): if not trackRec["mask"][frameNum]: return 0, 0, 0, 0 x = trackRec["x"][frameNum] y = trackRec["y"][frameNum] xHalfSide = trackRec["xHalfSide"][frameNum] yHalfSide = trackRec["yHalfSide"][frameNum] x1 = round(int(x - xHalfSide)) x2 = round(int(x + xHalfSide)) y1 = round(int(y - yHalfSide)) y2 = round(int(y + yHalfSide)) return x1, y1, x2, y2 def _annotate_frame(self, linewidth=2): if self.currentTrack is None: return for trackNum, trackRec in enumerate(self.trackRecs): x1, y1, x2, y2 = self._calc_rect_x1y1(trackRec, self.currentFrame) if trackNum == self.currentTrack: colour = (255, 0, 255) else: colour = (0, 255, 255) if not(x1 == x2 or y1 == y2): cv2.rectangle(self.frameBGR, (x1, y1), (x2, y2), colour, linewidth) if self.doAnnTrackNum: cv2.putText(self.frameBGR, str(trackNum + 1), (x2+5, y2+5), cv2.FONT_HERSHEY_TRIPLEX, 1.0, colour) x1, y1, x2, y2 = self._calc_rect_x1y1(self.editRec, self.currentFrame) if not(x1 == x2 or y1 == y2): cv2.rectangle(self.frameBGR, (x1, y1), (x2, y2), (255, 0, 0), linewidth) def _save_undo(self): self.trackRecsUndo = copy.deepcopy(self.trackRecs) self.trackMetasUndo = copy.deepcopy(self.trackMetas) self.trackMetaMaskUndo = copy.deepcopy(self.trackMetaMask) self.currentTrackUndo = copy.deepcopy(self.currentTrack) def _nan_helper(self, y): return np.isnan(y), lambda z: z.nonzero()[0] def _interp(self, y): y = y.copy() nans, x = self._nan_helper(y) y[nans] = np.interp(x(nans), x(~nans), y[~nans]) return y def _interpolate_rec(self, trackRec, frm1, frm2): frm1 = max(int(frm1), 0) frm2 = min(int(frm2), self.Nfrm-1) msk = trackRec["mask"].copy() msk[:frm1] = 0 msk[frm2+1:] = 0 lim1 = np.min(np.nonzero(msk)) lim2 = np.max(np.nonzero(msk)) frm1 = max(lim1, frm1) frm2 = min(lim2, frm2) frm1 = max(int(frm1), 0) frm2 = min(int(frm2), self.Nfrm-1) trackRec = trackRec.copy() tRsec = trackRec[frm1:frm2+1] msk = tRsec["mask"] == False tRsec["x"][msk] = np.nan tRsec["y"][msk] = np.nan tRsec["xHalfSide"][msk] = np.nan tRsec["yHalfSide"][msk] = np.nan tRsec["x"] = self._interp(tRsec["x"]) tRsec["y"] = self._interp(tRsec["y"]) tRsec["xHalfSide"] = self._interp(tRsec["xHalfSide"]) tRsec["yHalfSide"] = self._interp(tRsec["yHalfSide"]) tRsec["mask"] = True trackRec[frm1:frm2+1] = tRsec return trackRec def _query_cache_state(self): nRead = 0 if self.useCache: return self.frameCache.query_state() def set_frame(self, frameNum=None, preSeek=100): if frameNum is None: frameNum = self.currentFrame if preSeek > 0: setFrm = max(0, frameNum - preSeek) else: setFrm = 0 if self.useCache: grabbed, frame = self.frameCache.read(frameNum) if grabbed: self.frameBGRnoAnn = frame.copy() self.frameBGR = frame.copy() self.currentFrame = frameNum self._annotate_frame() return 0 else: return 1 else: if self.currentFrame is not None: if frameNum <= self.currentFrame: self.cap.set(cv2.CAP_PROP_POS_FRAMES, setFrm) success = 1 while True: grabbed = self.cap.grab() if not grabbed: return success frm = int(self.cap.get(cv2.CAP_PROP_POS_FRAMES)) -1 if frm == frameNum: _, frame = self.cap.retrieve() self.frameBGR = frame.copy() self.currentFrame = frameNum self.frameBGRnoAnn = self.frameBGR.copy() self._annotate_frame() return def set_track(self, trackNum): self.currentTrack = trackNum self.frameBGR = self.frameBGRnoAnn.copy() self._annotate_frame() def get_track(self, trackNum=None): if trackNum is None: trackNum = self.currentTrack return self.trackRecs[trackNum].copy() def get_edit(self): return self.editRec.copy() def get_tracks_mask(self, recalculate=True): if recalculate: self.trackMetaMask = np.zeros((self.Nfrm,), dtype="i8") self.trackMetaMask[:] = False for trackRec in self.trackRecs: self.trackMetaMask = np.where(trackRec["mask"], True, self.trackMetaMask) return self.trackMetaMask def cull_tracks(self, cullLim=10): trackLst = [] for i, mD in enumerate(self.trackMetas): if mD["length"] <= cullLim: trackLst.append(i) if len(trackLst) > 0: print("[INFO] culling tracks {}".format(trackLst)) self.delete_tracks(trackNums=trackLst, saveUndo=True) def delete_tracks(self, trackNums=None, saveUndo=True): if saveUndo: self._save_undo() if trackNums is None: trackNums = [self.currentTrack] trackNums.sort() for indx in trackNums[::-1]: self.trackRecs.pop(indx) self.trackMetas.pop(indx) self._create_track_summary() self.set_track(max(0, trackNums[0] -1)) def split_track(self, saveUndo=True): if saveUndo: self._save_undo() self.trackRecs.append(self._get_empty_trackrec()) trOld = self.trackRecs[self.currentTrack] trNew = self.trackRecs[-1] k = self.currentFrame trNew["x"][k:] = trOld["x"][k:] trNew["y"][k:] = trOld["y"][k:] trNew["xHalfSide"][k:] = trOld["xHalfSide"][k:] trNew["yHalfSide"][k:] = trOld["yHalfSide"][k:] trNew["label"][k:] = trOld["label"][k:] trNew["score"][k:] = trOld["score"][k:] trNew["mask"][k:] = trOld["mask"][k:] trOld["mask"][k:] = False self.trackMetas.append(self.trackMetas[self.currentTrack].copy()) mOld = self.trackMetas[self.currentTrack] mNew = self.trackMetas[-1] mOld["length"] = np.sum(trOld["mask"]) mNew["length"] = np.sum(trNew["mask"]) self._create_track_summary() def merge_tracks(self, trackNums=[], saveUndo=True): if len(trackNums) < 2: print("[WARN] select at least two tracks") return if saveUndo: self._save_undo() mergeLst = trackNums.copy() mergeLst.sort() trDst = self.trackRecs[mergeLst[0]] for indx in mergeLst[::-1][:-1]: trSrc = self.trackRecs[indx] msk = trSrc["mask"] == True trDst[msk] = trSrc[msk] self.trackRecs.pop(indx) self.trackMetas.pop(indx) mDst = self.trackMetas[mergeLst[0]] mDst["length"] = np.sum(trDst["mask"]) self._create_track_summary() self.set_track(mergeLst[0]) def undo(self): if not self.trackRecsUndo is None: trackRecsTmp = copy.deepcopy(self.trackRecs) trackMetasTmp = copy.deepcopy(self.trackMetas) trackMetaMaskTmp = copy.deepcopy(self.trackMetaMask) currentTrackTmp = copy.deepcopy(self.currentTrack) self.trackRecs = copy.deepcopy(self.trackRecsUndo) self.trackMetas = copy.deepcopy(self.trackMetasUndo) self.trackMetaMask = copy.deepcopy(self.trackMetaMaskUndo) self.currentTrack = copy.deepcopy(self.currentTrackUndo) self.trackRecsUndo = copy.deepcopy(trackRecsTmp) self.trackMetasUndo = copy.deepcopy(trackMetasTmp) self.trackMetaMaskUndo = copy.deepcopy(trackMetaMaskTmp) self.currentTrackUndo = copy.deepcopy(currentTrackTmp) self._create_track_summary() def relabel_track(self, label, trackNums=[], saveUndo=True): if saveUndo: self._save_undo() if len(trackNums) < 1: trackNums = [self.currentTrack] for trackNum in trackNums: self.trackRecs[trackNum]["label"] = label self.trackMetas[trackNum]["label"] = label self._create_track_summary() def setqual_track(self, qual, trackNums=[], saveUndo=True): qual = int(qual) qual = min([qual, 10]) qual = max([qual, 0]) if saveUndo: self._save_undo() if len(trackNums) < 1: trackNums = [self.currentTrack] for trackNum in trackNums: self.trackMetas[trackNum]["qual"] = qual self.trackMetas[trackNum]["qual"] = qual self._create_track_summary() def add_neg_track(self, frm1, frm2, saveUndo=True): if saveUndo: self._save_undo() frm1 = max(int(frm1), 0) frm2 = min(int(frm2), self.Nfrm-1) self.trackRecs.append(self._get_empty_trackrec()) trNew = self.trackRecs[-1] trNew["x"][frm1:frm2+1] = self.nXpix // 2 trNew["y"][frm1:frm2+1] = self.nYpix // 2 trNew["xHalfSide"][frm1:frm2+1] = self.nXpix // 2 trNew["yHalfSide"][frm1:frm2+1] = self.nYpix // 2 trNew["label"][frm1:frm2+1] = "NEG" trNew["score"][frm1:frm2+1] = 1.0 trNew["mask"][frm1:frm2+1] = True mDict = {} mDict["label"] = "NEG" mDict["qual"] = 10 mDict["comments"] = "Added by GUI" mDict["pickle"] = "" mDict["length"] = np.sum(trNew["mask"]) mDict["use"] = True self.trackMetas.append(mDict) trNums = list(range(len(self.trackRecs) -1)) self.flag_framerange(frm1, frm2, trackNums=trNums, saveUndo=False) self._create_track_summary() def interpolate_track(self, frm1, frm2, saveUndo=True): if saveUndo: self._save_undo() trackRec = self.trackRecs[self.currentTrack] trackRec = self._interpolate_rec(trackRec, frm1, frm2) self.trackRecs[self.currentTrack] = trackRec mR = self.trackMetas[self.currentTrack] mR["length"] = np.sum(trackRec["mask"]) self._create_track_summary() def flag_framerange(self, frm1, frm2, trackNums=[], flagAllTracks=None, saveUndo=True): if saveUndo: self._save_undo() frm1 = max(int(frm1), 0) frm2 = min(int(frm2), self.Nfrm-1) if len(trackNums) < 0: trackNums = [self.currentTrack] if flagAllTracks: trackNums = list(range(len(self.trackRecs))) trackNums.sort() for indx in trackNums[::-1]: tR = self.trackRecs[indx] tR["mask"][frm1:frm2+1] = False mR = self.trackMetas[indx] mR["length"] = np.sum(tR["mask"]) self._create_track_summary() def save_json(self): trackLst = self._tracks_to_json() trackFileBak = os.path.join(self.outDir, self.videoFileRoot + "_tracks.orig") if (not os.path.exists(trackFileBak) and os.path.exists(self.trackFilePath)): print("[INFO] backing up original tracks") shutil.copyfile(self.trackFilePath, trackFileBak) with open(self.trackFilePath, 'w') as FH: json.dump(trackLst, FH) print("[INFO] wrote tracks to {}.".format(self.trackFilePath)) def put_box_edit(self, coords): x1, y1, x2, y2 = coords x = (x1 + x2) / 2 y = (y1 + y2) / 2 xHalfSide = (x2 - x1) / 2 yHalfSide = (y2 - y1) / 2 k = self.currentFrame self.editRec["x"][k] = x self.editRec["y"][k] = y self.editRec["xHalfSide"][k] = xHalfSide self.editRec["yHalfSide"][k] = yHalfSide self.editRec["mask"][k] = True def clear_edits(self): self.editRec = self._get_empty_trackrec() def create_track_from_edit(self): eR = self.editRec.copy() if np.sum(eR["mask"]) == 0: print("[INFO] please draw at least one box") return frm1 = np.min(np.nonzero(eR["mask"])) frm2 = np.max(np.nonzero(eR["mask"])) eR = self._interpolate_rec(eR, frm1, frm2) eR["label"] = "EDIT" eR["score"] = 1.0 self.trackRecs.append(eR) mDict = {} mDict["label"] = "EDIT" mDict["qual"] = 10 mDict["comments"] = "Added by GUI" mDict["pickle"] = "" mDict["length"] = np.sum(eR["mask"]) mDict["use"] = True self.trackMetas.append(mDict) self._create_track_summary() def cleanup(self): print("[INFO] cleaning up temporary files ... ", end="", flush=True) self.cap.release() if self.useCache: self.frameCache.stop() self.frameCache.cleanup() print("done", flush=True)
[ "cv2.rectangle", "os.path.exists", "traceback.format_exc", "numpy.where", "os.path.join", "os.path.splitext", "os.path.split", "json.load", "numpy.sum", "numpy.zeros", "shutil.copyfile", "numpy.empty", "cv2.VideoCapture", "numpy.nonzero", "copy.deepcopy", "numpy.isnan", "json.dump", ...
[((1950, 1978), 'os.path.split', 'os.path.split', (['videoFilePath'], {}), '(videoFilePath)\n', (1963, 1978), False, 'import os\n'), ((2013, 2040), 'os.path.splitext', 'os.path.splitext', (['videoFile'], {}), '(videoFile)\n', (2029, 2040), False, 'import os\n'), ((2159, 2221), 'os.path.join', 'os.path.join', (['self.outDir', "(self.videoFileRoot + '_tracks.json')"], {}), "(self.outDir, self.videoFileRoot + '_tracks.json')\n", (2171, 2221), False, 'import os\n'), ((3884, 3912), 'os.path.split', 'os.path.split', (['trackFilePath'], {}), '(trackFilePath)\n', (3897, 3912), False, 'import os\n'), ((7444, 7480), 'numpy.empty', 'np.empty', (['(nRows,)'], {'dtype': 'self.dType'}), '((nRows,), dtype=self.dType)\n', (7452, 7480), True, 'import numpy as np\n'), ((9417, 9446), 'copy.deepcopy', 'copy.deepcopy', (['self.trackRecs'], {}), '(self.trackRecs)\n', (9430, 9446), False, 'import copy\n'), ((9477, 9507), 'copy.deepcopy', 'copy.deepcopy', (['self.trackMetas'], {}), '(self.trackMetas)\n', (9490, 9507), False, 'import copy\n'), ((9541, 9574), 'copy.deepcopy', 'copy.deepcopy', (['self.trackMetaMask'], {}), '(self.trackMetaMask)\n', (9554, 9574), False, 'import copy\n'), ((9607, 9639), 'copy.deepcopy', 'copy.deepcopy', (['self.currentTrack'], {}), '(self.currentTrack)\n', (9620, 9639), False, 'import copy\n'), ((14604, 14625), 'numpy.sum', 'np.sum', (["trOld['mask']"], {}), "(trOld['mask'])\n", (14610, 14625), True, 'import numpy as np\n'), ((14651, 14672), 'numpy.sum', 'np.sum', (["trNew['mask']"], {}), "(trNew['mask'])\n", (14657, 14672), True, 'import numpy as np\n'), ((15329, 15350), 'numpy.sum', 'np.sum', (["trDst['mask']"], {}), "(trDst['mask'])\n", (15335, 15350), True, 'import numpy as np\n'), ((17917, 17938), 'numpy.sum', 'np.sum', (["trNew['mask']"], {}), "(trNew['mask'])\n", (17923, 17938), True, 'import numpy as np\n'), ((18521, 18545), 'numpy.sum', 'np.sum', (["trackRec['mask']"], {}), "(trackRec['mask'])\n", (18527, 18545), True, 'import numpy as np\n'), ((19340, 19402), 'os.path.join', 'os.path.join', (['self.outDir', "(self.videoFileRoot + '_tracks.orig')"], {}), "(self.outDir, self.videoFileRoot + '_tracks.orig')\n", (19352, 19402), False, 'import os\n'), ((20906, 20924), 'numpy.sum', 'np.sum', (["eR['mask']"], {}), "(eR['mask'])\n", (20912, 20924), True, 'import numpy as np\n'), ((2577, 2606), 'os.path.exists', 'os.path.exists', (['videoFilePath'], {}), '(videoFilePath)\n', (2591, 2606), False, 'import os\n'), ((2736, 2767), 'cv2.VideoCapture', 'cv2.VideoCapture', (['videoFilePath'], {}), '(videoFilePath)\n', (2752, 2767), False, 'import cv2\n'), ((3928, 3957), 'os.path.exists', 'os.path.exists', (['trackFilePath'], {}), '(trackFilePath)\n', (3942, 3957), False, 'import os\n'), ((6250, 6274), 'numpy.sum', 'np.sum', (["trackRec['mask']"], {}), "(trackRec['mask'])\n", (6256, 6274), True, 'import numpy as np\n'), ((6616, 6638), 'numpy.nonzero', 'np.nonzero', (["tR['mask']"], {}), "(tR['mask'])\n", (6626, 6638), True, 'import numpy as np\n'), ((9263, 9335), 'cv2.rectangle', 'cv2.rectangle', (['self.frameBGR', '(x1, y1)', '(x2, y2)', '(255, 0, 0)', 'linewidth'], {}), '(self.frameBGR, (x1, y1), (x2, y2), (255, 0, 0), linewidth)\n', (9276, 9335), False, 'import cv2\n'), ((9685, 9696), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (9693, 9696), True, 'import numpy as np\n'), ((10121, 10136), 'numpy.nonzero', 'np.nonzero', (['msk'], {}), '(msk)\n', (10131, 10136), True, 'import numpy as np\n'), ((10160, 10175), 'numpy.nonzero', 'np.nonzero', (['msk'], {}), '(msk)\n', (10170, 10175), True, 'import numpy as np\n'), ((12744, 12778), 'numpy.zeros', 'np.zeros', (['(self.Nfrm,)'], {'dtype': '"""i8"""'}), "((self.Nfrm,), dtype='i8')\n", (12752, 12778), True, 'import numpy as np\n'), ((15514, 15543), 'copy.deepcopy', 'copy.deepcopy', (['self.trackRecs'], {}), '(self.trackRecs)\n', (15527, 15543), False, 'import copy\n'), ((15572, 15602), 'copy.deepcopy', 'copy.deepcopy', (['self.trackMetas'], {}), '(self.trackMetas)\n', (15585, 15602), False, 'import copy\n'), ((15634, 15667), 'copy.deepcopy', 'copy.deepcopy', (['self.trackMetaMask'], {}), '(self.trackMetaMask)\n', (15647, 15667), False, 'import copy\n'), ((15698, 15730), 'copy.deepcopy', 'copy.deepcopy', (['self.currentTrack'], {}), '(self.currentTrack)\n', (15711, 15730), False, 'import copy\n'), ((15760, 15793), 'copy.deepcopy', 'copy.deepcopy', (['self.trackRecsUndo'], {}), '(self.trackRecsUndo)\n', (15773, 15793), False, 'import copy\n'), ((15824, 15858), 'copy.deepcopy', 'copy.deepcopy', (['self.trackMetasUndo'], {}), '(self.trackMetasUndo)\n', (15837, 15858), False, 'import copy\n'), ((15892, 15929), 'copy.deepcopy', 'copy.deepcopy', (['self.trackMetaMaskUndo'], {}), '(self.trackMetaMaskUndo)\n', (15905, 15929), False, 'import copy\n'), ((15962, 15998), 'copy.deepcopy', 'copy.deepcopy', (['self.currentTrackUndo'], {}), '(self.currentTrackUndo)\n', (15975, 15998), False, 'import copy\n'), ((16032, 16059), 'copy.deepcopy', 'copy.deepcopy', (['trackRecsTmp'], {}), '(trackRecsTmp)\n', (16045, 16059), False, 'import copy\n'), ((16094, 16122), 'copy.deepcopy', 'copy.deepcopy', (['trackMetasTmp'], {}), '(trackMetasTmp)\n', (16107, 16122), False, 'import copy\n'), ((16160, 16191), 'copy.deepcopy', 'copy.deepcopy', (['trackMetaMaskTmp'], {}), '(trackMetaMaskTmp)\n', (16173, 16191), False, 'import copy\n'), ((16228, 16258), 'copy.deepcopy', 'copy.deepcopy', (['currentTrackTmp'], {}), '(currentTrackTmp)\n', (16241, 16258), False, 'import copy\n'), ((19194, 19212), 'numpy.sum', 'np.sum', (["tR['mask']"], {}), "(tR['mask'])\n", (19200, 19212), True, 'import numpy as np\n'), ((19506, 19540), 'os.path.exists', 'os.path.exists', (['self.trackFilePath'], {}), '(self.trackFilePath)\n', (19520, 19540), False, 'import os\n'), ((19610, 19659), 'shutil.copyfile', 'shutil.copyfile', (['self.trackFilePath', 'trackFileBak'], {}), '(self.trackFilePath, trackFileBak)\n', (19625, 19659), False, 'import shutil\n'), ((19722, 19745), 'json.dump', 'json.dump', (['trackLst', 'FH'], {}), '(trackLst, FH)\n', (19731, 19745), False, 'import json\n'), ((20397, 20415), 'numpy.sum', 'np.sum', (["eR['mask']"], {}), "(eR['mask'])\n", (20403, 20415), True, 'import numpy as np\n'), ((20520, 20542), 'numpy.nonzero', 'np.nonzero', (["eR['mask']"], {}), "(eR['mask'])\n", (20530, 20542), True, 'import numpy as np\n'), ((20566, 20588), 'numpy.nonzero', 'np.nonzero', (["eR['mask']"], {}), "(eR['mask'])\n", (20576, 20588), True, 'import numpy as np\n'), ((4362, 4375), 'json.load', 'json.load', (['FH'], {}), '(FH)\n', (4371, 4375), False, 'import json\n'), ((8846, 8913), 'cv2.rectangle', 'cv2.rectangle', (['self.frameBGR', '(x1, y1)', '(x2, y2)', 'colour', 'linewidth'], {}), '(self.frameBGR, (x1, y1), (x2, y2), colour, linewidth)\n', (8859, 8913), False, 'import cv2\n'), ((12902, 12954), 'numpy.where', 'np.where', (["trackRec['mask']", '(True)', 'self.trackMetaMask'], {}), "(trackRec['mask'], True, self.trackMetaMask)\n", (12910, 12954), True, 'import numpy as np\n'), ((19461, 19489), 'os.path.exists', 'os.path.exists', (['trackFileBak'], {}), '(trackFileBak)\n', (19475, 19489), False, 'import os\n'), ((5422, 5444), 'math.isnan', 'm.isnan', (["tD['xLst'][i]"], {}), "(tD['xLst'][i])\n", (5429, 5444), True, 'import math as m\n'), ((3768, 3790), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3788, 3790), False, 'import traceback\n'), ((4764, 4786), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4784, 4786), False, 'import traceback\n')]
from typing import Dict import numpy as np from gym import spaces from stable_baselines3.common.vec_env import VecEnv, VecEnvWrapper class ObsDictWrapper(VecEnvWrapper): """ Wrapper for a VecEnv which overrides the observation space for Hindsight Experience Replay to support dict observations. :param env: The vectorized environment to wrap. """ def __init__(self, venv: VecEnv): super(ObsDictWrapper, self).__init__(venv, venv.observation_space, venv.action_space) self.venv = venv self.spaces = list(venv.observation_space.spaces.values()) # get dimensions of observation and goal if isinstance(self.spaces[0], spaces.Discrete): self.obs_dim = 1 self.goal_dim = 1 else: self.obs_dim = venv.observation_space.spaces["observation"].shape[0] self.goal_dim = venv.observation_space.spaces["achieved_goal"].shape[0] # new observation space with concatenated observation and (desired) goal # for the different types of spaces if isinstance(self.spaces[0], spaces.Box): low_values = np.concatenate( [venv.observation_space.spaces["observation"].low, venv.observation_space.spaces["desired_goal"].low] ) high_values = np.concatenate( [venv.observation_space.spaces["observation"].high, venv.observation_space.spaces["desired_goal"].high] ) self.observation_space = spaces.Box(low_values, high_values, dtype=np.float32) elif isinstance(self.spaces[0], spaces.MultiBinary): total_dim = self.obs_dim + self.goal_dim self.observation_space = spaces.MultiBinary(total_dim) elif isinstance(self.spaces[0], spaces.Discrete): dimensions = [venv.observation_space.spaces["observation"].n, venv.observation_space.spaces["desired_goal"].n] self.observation_space = spaces.MultiDiscrete(dimensions) else: raise NotImplementedError(f"{type(self.spaces[0])} space is not supported") def reset(self): return self.venv.reset() def step_wait(self): return self.venv.step_wait() @staticmethod def convert_dict( observation_dict: Dict[str, np.ndarray], observation_key: str = "observation", goal_key: str = "desired_goal" ) -> np.ndarray: """ Concatenate observation and (desired) goal of observation dict. :param observation_dict: Dictionary with observation. :param observation_key: Key of observation in dicitonary. :param goal_key: Key of (desired) goal in dicitonary. :return: Concatenated observation. """ return np.concatenate([observation_dict[observation_key], observation_dict[goal_key]], axis=-1)
[ "gym.spaces.MultiDiscrete", "gym.spaces.MultiBinary", "numpy.concatenate", "gym.spaces.Box" ]
[((2733, 2826), 'numpy.concatenate', 'np.concatenate', (['[observation_dict[observation_key], observation_dict[goal_key]]'], {'axis': '(-1)'}), '([observation_dict[observation_key], observation_dict[\n goal_key]], axis=-1)\n', (2747, 2826), True, 'import numpy as np\n'), ((1141, 1263), 'numpy.concatenate', 'np.concatenate', (["[venv.observation_space.spaces['observation'].low, venv.observation_space.\n spaces['desired_goal'].low]"], {}), "([venv.observation_space.spaces['observation'].low, venv.\n observation_space.spaces['desired_goal'].low])\n", (1155, 1263), True, 'import numpy as np\n'), ((1315, 1439), 'numpy.concatenate', 'np.concatenate', (["[venv.observation_space.spaces['observation'].high, venv.observation_space.\n spaces['desired_goal'].high]"], {}), "([venv.observation_space.spaces['observation'].high, venv.\n observation_space.spaces['desired_goal'].high])\n", (1329, 1439), True, 'import numpy as np\n'), ((1502, 1555), 'gym.spaces.Box', 'spaces.Box', (['low_values', 'high_values'], {'dtype': 'np.float32'}), '(low_values, high_values, dtype=np.float32)\n', (1512, 1555), False, 'from gym import spaces\n'), ((1707, 1736), 'gym.spaces.MultiBinary', 'spaces.MultiBinary', (['total_dim'], {}), '(total_dim)\n', (1725, 1736), False, 'from gym import spaces\n'), ((1955, 1987), 'gym.spaces.MultiDiscrete', 'spaces.MultiDiscrete', (['dimensions'], {}), '(dimensions)\n', (1975, 1987), False, 'from gym import spaces\n')]
from __future__ import absolute_import import sys, os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) sys.path.append(BASE_DIR) import numpy as np import PMML43Ext as pml from skl import pre_process as pp from datetime import datetime import math import metadata import inspect from nyoka.keras.keras_model_to_pmml import KerasToPmml from nyoka.xgboost.xgboost_to_pmml import xgboost_to_pmml from nyoka.lgbm.lgb_to_pmml import lgb_to_pmml from nyoka.lgbm.lgbmTrainingAPI_to_pmml import ExportToPMML as ext def model_to_pmml(toExportDict, pmml_f_name='from_sklearn.pmml'): """ Exports scikit-learn pipeline object into pmml Parameters ---------- pipeline : Contains an instance of Pipeline with preprocessing and final estimator col_names : List Contains list of feature/column names. target_name : String Name of the target column. (Default='target') pmml_f_name : String Name of the pmml file. (Default='from_sklearn.pmml') Returns ------- Returns a pmml file """ # To support multiple models and Transformation dictionaries models_dict = {'DeepNetwork':[]} trfm_dict_kwargs = {'TransformationDictionary':[]} data_dicts = [] visited = [] categoric_values = None derived_col_names = None mining_imp_val = None for model_name in toExportDict.keys(): col_names = toExportDict[model_name]['featuresUsed'] target_name = toExportDict[model_name]['targetName'] tasktype = toExportDict[model_name]['taskType'] model = toExportDict[model_name]['modelObj'] pipelineOnly = toExportDict[model_name]['pipelineObj'] categoric_values = tuple() derived_col_names = col_names mining_imp_val = tuple() if (pipelineOnly is not None) and (pipelineOnly not in visited): derived_col_names,categoric_values,mining_imp_val,trfm_dict_kwargs = get_trfm_dict_kwargs(col_names,pipelineOnly, trfm_dict_kwargs,model,model_name) if 'keras' in str(model): KModelObj=toExportDict[model_name] if 'model_graph' in KModelObj: model_graph = KModelObj['model_graph'] with model_graph.as_default(): tf_session = KModelObj['tf_session'] with tf_session.as_default(): KerasPMML = KerasToPmml(model.model,model_name=pmml_f_name,targetVarName=target_name) else: KerasPMML = KerasToPmml(model,model_name=pmml_f_name,targetVarName=target_name) model_obj = KerasPMML.DeepNetwork[0] model_obj.modelName = model_name model_obj.taskType=tasktype models_dict['DeepNetwork'].append(model_obj) else: #model = pipeline.steps[-1][1] #ppln_sans_predictor = pipeline.steps[:-1] #derived_col_names,categoric_values,mining_imp_val,trfm_dict_kwargs = get_trfm_dict_kwargs(col_names,pipelineOnly, # trfm_dict_kwargs,modelobj,model_name) if ('XGBRegressor' in str(model)) or ('XGBClassifier' in str(model)): PMML_kwargs = xgboost_to_pmml(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype) elif ('LGBMRegressor' in str(model)) or ('LGBMClassifier' in str(model)): PMML_kwargs = lgb_to_pmml(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype) elif ('Booster' in str(model)): PMML_kwargs = ext(model,tasktype,target_name) else: PMML_kwargs = get_PMML_kwargs(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype) model_obj = list(PMML_kwargs.values())[0][0] model_obj.modelName = model_name key = list(PMML_kwargs.keys())[0] if key in models_dict: models_dict[key].append(model_obj) else: PMML_kwargs = {key:[model_obj]} models_dict.update(PMML_kwargs) data_dicts.append(get_data_dictionary(model, col_names, target_name, categoric_values)) pmml = pml.PMML( version=get_version(), Header=get_header(), MiningBuildTask=get_mining_buildtask(toExportDict), DataDictionary=get_data_dictionary_values(data_dicts), script = get_script_execution(toExportDict), **trfm_dict_kwargs, **models_dict ) pmml.export(outfile=open(pmml_f_name, "w"), level=0) def get_trfm_dict_kwargs(col_names,pipelineOnly,trfm_dict_kwargs,model,model_name): if isinstance(col_names, np.ndarray): col_names = col_names.tolist() #ppln_sans_predictor = pipeline.steps[:-1] ppln_sans_predictor = pipelineOnly.steps derived_col_names = col_names categoric_values = tuple() mining_imp_val = tuple() if ppln_sans_predictor: pml_pp = pp.get_preprocess_val(ppln_sans_predictor, col_names, model, model_name) trfm_dict_kwargs['TransformationDictionary'].append(pml_pp['trfm_dict']) derived_col_names = pml_pp['derived_col_names'] col_names = pml_pp['preprocessed_col_names'] categoric_values = pml_pp['categorical_feat_values'] mining_imp_val = pml_pp['mining_imp_values'] return derived_col_names,categoric_values,mining_imp_val,trfm_dict_kwargs def processScript(scr): scr=scr.replace('&','&amp;') return scr def get_data_dictionary_values(data_dicts): data_dicts = [x for x in data_dicts if x is not None] lst = [] lislen = len(data_dicts) if lislen != 0: for indfile in data_dicts[0].DataField: lst.append(indfile.get_name()) if lislen == 0: datadict = None elif lislen == 1: datadict = data_dicts[0] else: for dd in range(1,lislen): for indfile in data_dicts[dd].DataField: if indfile.get_name() in lst and len(indfile.get_Value())==0: pass else: data_dicts[0].add_DataField(indfile) lst.append(indfile.get_name()) datadict = data_dicts[0] return datadict def get_script_execution(toExportDict): # Script execution scrps = [] for model_name in toExportDict.keys(): if toExportDict[model_name]['preProcessingScript'] is not None: lstlen = len(toExportDict[model_name]['preProcessingScript']['scripts']) for leng in range(lstlen): scrps.append(pml.script(content=processScript(toExportDict[model_name]['preProcessingScript']['scripts'][leng]), for_= model_name, class_ = 'preprocessing', scriptPurpose = toExportDict[model_name]['preProcessingScript']['scriptpurpose'][leng] )) if toExportDict[model_name]['postProcessingScript'] is not None: lstlen = len(toExportDict[model_name]['postProcessingScript']['scripts']) for leng in range(0,lstlen): scrps.append(pml.script(content=processScript(toExportDict[model_name]['postProcessingScript']['scripts'][leng]), for_= model_name, class_ = 'postprocessing', scriptPurpose = toExportDict[model_name]['postProcessingScript']['scriptpurpose'][leng] )) return scrps def get_entire_string(pipe0): pipe_steps = pipe0.steps pipe_memory = 'memory=' + str(pipe0.memory) df_container = '' pipe_container = '' for step_idx, step in enumerate(pipe_steps): pipe_step_container = '' step_name = step[0] step_item = step[1] if step_item.__class__.__name__ == "DataFrameMapper": df_default_val = "default=" + str(step_item.default) df_out_val = "df_out=" + str(step_item.df_out) input_df_val = "input_df=" + str(step_item.input_df) sparse_val = "sparse=" + str(step_item.sparse) for feature in step_item.features: if not df_container: df_container = df_container + str(feature) else: df_container = df_container + ',' + str(feature) df_container = '[' + df_container + ']' df_container = 'features=' + df_container df_container = df_default_val + ',' + df_out_val + ',\n\t' + df_container df_container = df_container + ',\n\t' + input_df_val + ',' + sparse_val df_container = '(' + df_container + ')' df_container = 'DataFrameMapper' + df_container df_container = '\'' + step_name + '\'' + ',' + df_container df_container = '(' + df_container + ')' else: pipe_step_container = '\'' + step_name + '\'' + ',' + str(step_item) pipe_step_container = '(' + pipe_step_container + ')' if not pipe_container: pipe_container = pipe_container + pipe_step_container else: pipe_container = pipe_container + ',' + pipe_step_container if df_container: pipe_container = df_container + ',' + pipe_container pipe_container = '[' + pipe_container + ']' pipe_container = 'steps=' + pipe_container pipe_container = pipe_memory + ',\n ' + pipe_container pipe_container = 'Pipeline(' + pipe_container + ')' return pipe_container def get_mining_buildtask(toExportDict): extension = [] for model_name in toExportDict.keys(): pipeline = toExportDict[model_name]['pipelineObj'] if 'keras' in str(pipeline): pass else: if pipeline: pipeline = get_entire_string(pipeline) extension.append(pml.Extension(value=pipeline,for_=model_name,name="preprocessingPipeline")) modelobj = toExportDict[model_name]['modelObj'] modelobj = str(modelobj) extension.append(pml.Extension(value=modelobj,for_=model_name,name="modelObject")) if toExportDict[model_name]['hyperparameters']: extension.append(pml.Extension(value=toExportDict[model_name]['hyperparameters'],for_=model_name,name="hyperparameters")) mining_bld_task = pml.MiningBuildTask(Extension = extension) return mining_bld_task def any_in(seq_a, seq_b): return any(elem in seq_b for elem in seq_a) def get_PMML_kwargs(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype): """ It returns all the pmml elements. Parameters ---------- model : Scikit-learn model object An instance of Scikit-learn model. derived_col_names : List Contains column names after preprocessing col_names : List Contains list of feature/column names. target_name : String Name of the target column . mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value categoric_values : tuple Contains Categorical attribute names and its values Returns ------- algo_kwargs : Dictionary Get the PMML model argument based on scikit learn model object """ skl_mdl_super_cls_names = get_super_cls_names(model) # regression_model_names = ('LinearRegression','LinearSVR') # regression_mining_model_names = ('LogisticRegression', 'RidgeClassifier','LinearDiscriminantAnalysis', \ # 'SGDClassifier','LinearSVC',) regression_model_names = ('LinearRegression', 'LogisticRegression', 'RidgeClassifier', 'SGDClassifier', 'LinearDiscriminantAnalysis','LinearSVC','LinearSVR') tree_model_names = ('BaseDecisionTree',) support_vector_model_names = ('SVC', 'SVR') anomaly_model_names = ('OneClassSVM',) naive_bayes_model_names = ('GaussianNB',) mining_model_names = ('RandomForestRegressor', 'RandomForestClassifier', 'GradientBoostingClassifier', 'GradientBoostingRegressor','IsolationForest') neurl_netwk_model_names = ('MLPClassifier', 'MLPRegressor') nearest_neighbour_names = ('NeighborsBase',) clustering_model_names = ('KMeans',) if any_in(tree_model_names, skl_mdl_super_cls_names): algo_kwargs = {'TreeModel': get_tree_models(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype)} # elif any_in(regression_mining_model_names, skl_mdl_super_cls_names): # if len(model.classes_) == 2: # algo_kwargs = {'RegressionModel': get_regrs_models(model, # derived_col_names, # col_names, # target_name, # mining_imp_val, # categoric_values, # tasktype)} # else: # algo_kwargs = {'MiningModel': get_reg_mining_models(model, # derived_col_names, # col_names, # target_name, # mining_imp_val, # categoric_values, # tasktype)} elif any_in(regression_model_names, skl_mdl_super_cls_names): algo_kwargs = {'RegressionModel': get_regrs_models(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype)} elif any_in(support_vector_model_names, skl_mdl_super_cls_names): algo_kwargs = {'SupportVectorMachineModel': get_supportVectorMachine_models(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype)} elif any_in(mining_model_names, skl_mdl_super_cls_names): algo_kwargs = {'MiningModel': get_ensemble_models(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype)} elif any_in(neurl_netwk_model_names, skl_mdl_super_cls_names): algo_kwargs = {'NeuralNetwork': get_neural_models(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype)} elif any_in(naive_bayes_model_names, skl_mdl_super_cls_names): algo_kwargs = {'NaiveBayesModel': get_naiveBayesModel(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype)} elif any_in(nearest_neighbour_names, skl_mdl_super_cls_names): algo_kwargs = {'NearestNeighborModel': get_nearestNeighbour_model(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype)} elif any_in(anomaly_model_names, skl_mdl_super_cls_names): algo_kwargs = {'AnomalyDetectionModel': get_anomalydetection_model(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype)} elif any_in(clustering_model_names, skl_mdl_super_cls_names): algo_kwargs = {'ClusteringModel': get_clustering_model(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype )} else: raise NotImplementedError("{} is not Implemented!".format(model.__class__.__name__)) return algo_kwargs def get_model_kwargs(model, col_names, target_name, mining_imp_val, categoric_values): """ It returns all the model element for a specific model. Parameters ---------- model : An instance of Scikit-learn model. col_names : List Contains list of feature/column names. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value Returns ------- model_kwargs : Dictionary Returns function name, MiningSchema and Output of the sk_model object """ model_kwargs = dict() model_kwargs['functionName'] = get_mining_func(model) model_kwargs['MiningSchema'] = get_mining_schema(model, col_names, target_name, mining_imp_val, categoric_values) if model.__class__.__name__ == 'IsolationForest': model_kwargs['Output']=get_anomaly_detection_output(model) else: model_kwargs['Output'] = get_output(model, target_name) return model_kwargs def get_reg_mining_models(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype): num_classes = len(model.classes_) model_kwargs = get_model_kwargs(model, col_names, target_name, mining_imp_val, categoric_values) mining_model = pml.MiningModel(modelName=model.__class__.__name__, taskType=tasktype,**model_kwargs) inner_mining_schema = [mfield for mfield in model_kwargs['MiningSchema'].MiningField if mfield.usageType != 'target'] segmentation = pml.Segmentation(multipleModelMethod="modelChain") for idx in range(num_classes): segment = pml.Segment(id=str(idx+1),True_=pml.True_()) segment.RegressionModel = pml.RegressionModel( functionName='regression', MiningSchema=pml.MiningSchema( MiningField=inner_mining_schema ), Output=pml.Output( OutputField=[ pml.OutputField( name="probablity_"+str(idx), optype="continuous", dataType="double" ) ] ), RegressionTable=get_reg_tab_for_reg_mining_model(model,derived_col_names,idx) ) if model.__class__.__name__ != 'LinearSVC': segment.RegressionModel.normalizationMethod = "logit" segmentation.add_Segment(segment) last_segment = pml.Segment(id=str(num_classes+1),True_=pml.True_()) mining_flds_for_last = [pml.MiningField(name="probablity_"+str(idx)) for idx in range(num_classes)] mining_flds_for_last.append(pml.MiningField(name=target_name,usageType="target")) mining_schema_for_last = pml.MiningSchema(MiningField=mining_flds_for_last) reg_tab_for_last = list() for idx in range(num_classes): reg_tab_for_last.append( pml.RegressionTable( intercept="0.0", targetCategory=str(model.classes_[idx]), NumericPredictor=[pml.NumericPredictor( name="probablity_"+str(idx), coefficient="1.0" )] ) ) last_segment.RegressionModel = pml.RegressionModel( functionName="classification", MiningSchema=mining_schema_for_last, RegressionTable=reg_tab_for_last ) if model.__class__.__name__ != 'LinearSVC': last_segment.RegressionModel.normalizationMethod = "simplemax" segmentation.add_Segment(last_segment) mining_model.set_Segmentation(segmentation) return [mining_model] def get_reg_tab_for_reg_mining_model(model, col_names, index): reg_tab = pml.RegressionTable(intercept="{:.16f}".format(model.intercept_[index])) for idx, coef in enumerate(model.coef_[index]): reg_tab.add_NumericPredictor(pml.NumericPredictor(name=col_names[idx],coefficient="{:.16f}".format(coef))) return [reg_tab] def get_anomalydetection_model(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype): """ It returns the KMean Clustering model element. Parameters ---------- model : An instance of Scikit-learn model. derived_col_names : List Contains column names after preprocessing col_names : List Contains list of feature/column names. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value categoric_values : tuple Contains Categorical attribute names and its values Returns ------- anomaly_detection_model :List Returns an anomaly detection model within a list """ anomaly_detection_model = list() if 'OneClassSVM' in str(model.__class__): anomaly_detection_model.append( pml.AnomalyDetectionModel( modelName=model.__class__.__name__, algorithmType="ocsvm", functionName="regression", MiningSchema=get_mining_schema(model, col_names, target_name, mining_imp_val,categoric_values), Output=get_anomaly_detection_output(model), taskType=tasktype, SupportVectorMachineModel=get_supportVectorMachine_models(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values,tasktype)[0] ) ) # else: # anomaly_detection_model.append( # pml.AnomalyDetectionModel( # modelName="IsolationForests", # algorithmType="iforest", # functionName="regression", # MiningSchema=get_mining_schema(model, col_names, target_name, mining_imp_val), # Output=get_anomaly_detection_output(model), # ParameterList=pml.ParameterList(Parameter=[pml.Parameter( # name="training_data_count", # value=model.max_samples_)]), # MiningModel=get_ensemble_models(model, # derived_col_names, # col_names, # target_name, # mining_imp_val, # categoric_values)[0] # ) # ) return anomaly_detection_model def get_anomaly_detection_output(model): """ Parameters ---------- Returns ------- output_fields : Returns an Output instance of anomaly detection model """ output_fields = list() if 'OneClassSVM' in str(model.__class__): output_fields.append(pml.OutputField( name="anomalyScore", feature="predictedValue", optype="continuous", dataType="float")) output_fields.append(pml.OutputField( name="anomaly", feature="anomaly", optype="categorical", dataType="boolean", threshold="0" )) else: n = model.max_samples_ eulers_gamma = 0.577215664901532860606512090082402431 output_fields.append(pml.OutputField(name="rawAnomalyScore", optype="continuous", dataType="double", feature="predictedValue", isFinalResult="false")) output_fields.append(pml.OutputField(name="normalizedAnomalyScore", optype="continuous", dataType="double", feature="transformedValue", isFinalResult="false", Apply=pml.Apply(function="/", FieldRef=[pml.FieldRef(field="rawAnomalyScore")], Constant=[pml.Constant(dataType="double", valueOf_=(2.0*(math.log(n-1.0)+eulers_gamma))- (2.0*((n-1.0)/n)))]))) appl_inner_inner = pml.Apply(function="*") cnst = pml.Constant(dataType="double", valueOf_=-1.0) fldref = pml.FieldRef(field="normalizedAnomalyScore") cnst.original_tagname_ = 'Constant' appl_inner_inner.add_FieldRef(cnst) appl_inner_inner.add_FieldRef(fldref) appl_inner = pml.Apply(function='pow') cnst = pml.Constant(dataType="double", valueOf_=2.0) cnst.original_tagname_ = 'Constant' appl_inner.add_FieldRef(cnst) appl_inner_inner.original_tagname_='Apply' appl_inner.add_FieldRef(appl_inner_inner) appl_outer = pml.Apply(function="-") cnst = pml.Constant(dataType="double", valueOf_=0.5) cnst.original_tagname_ = 'Constant' appl_outer.add_FieldRef(cnst) appl_inner.original_tagname_='Apply' appl_outer.add_FieldRef(appl_inner) output_fields.append(pml.OutputField(name="decisionFunction", optype="continuous", dataType="double", feature="transformedValue", isFinalResult="false", Apply=appl_outer)) output_fields.append(pml.OutputField(name="outlier", optype="categorical", dataType="boolean", feature="transformedValue", isFinalResult="true", Apply=pml.Apply(function="greaterThan", FieldRef=[pml.FieldRef(field="decisionFunction")], Constant=[pml.Constant(dataType="double", valueOf_="{:.16f}".format(model.threshold_))]))) return pml.Output(OutputField=output_fields) def get_clustering_model(model, derived_col_names, col_names, target_name, mining_imp_val,categoric_values,tasktype): """ It returns the KMean Clustering model element. Parameters ---------- model : An instance of Scikit-learn model. derived_col_names : List Contains column names after preprocessing col_names : List Contains list of feature/column names. target_name : String Name of the Target column. Returns ------- clustering_models :List Returns a KMean Clustering model within a list """ clustering_models = list() model_kwargs = get_model_kwargs(model, col_names, target_name, mining_imp_val,categoric_values) values, counts = np.unique(model.labels_,return_counts=True) model_kwargs["Output"] = get_output_for_clustering(values) clustering_models.append( pml.ClusteringModel( modelClass="centerBased", modelName=model.__class__.__name__, numberOfClusters=get_cluster_num(model), ComparisonMeasure=get_comp_measure(), ClusteringField=get_clustering_flds(derived_col_names), Cluster=get_cluster_vals(model,counts), taskType=tasktype, **model_kwargs ) ) return clustering_models def get_output_for_clustering(values): """ Parameters ---------- model : An instance of Scikit-learn model. Returns ------- output_fields :List Returns a list of OutputField """ output_fields = list() output_fields.append(pml.OutputField(name="cluster", optype="categorical",dataType="string",feature="predictedValue")) for idx, val in enumerate(values): output_fields.append( pml.OutputField( name="affinity("+str(idx)+")", optype="continuous", dataType="double", feature="entityAffinity", value=str(val) ) ) return pml.Output(OutputField=output_fields) def get_cluster_vals(model,counts): """ Parameters ---------- model : An instance of Scikit-learn model. Returns ------- cluster_flds :List Returns a list of Cluster instances """ centroids = model.cluster_centers_ cluster_flds = [] for centroid_idx in range(centroids.shape[0]): centroid_values = "" centroid_flds = pml.ArrayType(type_="real") for centroid_cordinate_idx in range(centroids.shape[1]): centroid_flds.content_[0].value = centroid_values + "{:.16f}".format(centroids[centroid_idx][centroid_cordinate_idx]) centroid_values = centroid_flds.content_[0].value + " " cluster_flds.append(pml.Cluster(id=str(centroid_idx), Array=centroid_flds,size=str(counts[centroid_idx]))) return cluster_flds def get_cluster_num(model): """ Parameters ---------- model : An instance of Scikit-learn model. Returns ------- model.n_clusters: Integer Returns the number of clusters """ return model.n_clusters def get_comp_measure(): """ Parameters ---------- Returns ------- Returns an instance of comparision measure """ comp_equation = pml.euclidean() return pml.ComparisonMeasure(euclidean=comp_equation, kind="distance") def get_clustering_flds(col_names): """ Parameters ---------- col_names : Contains list of feature/column names. Returns ------- clustering_flds: List Returns the list containing clustering field instances """ clustering_flds = [] for name in col_names: clustering_flds.append(pml.ClusteringField(field=str(name))) return clustering_flds def get_nearestNeighbour_model(model, derived_col_names, col_names, target_name, mining_imp_val,categoric_values,tasktype): """ It returns the Nearest Neighbour model element. Parameters ---------- model : An instance of Scikit-learn model. derived_col_names : List Contains column names after preprocessing col_names : List Contains list of feature/column names. target_name : String Name of the Target column. Returns ------- nearest_neighbour_model : Returns a nearest neighbour model instance """ model_kwargs = get_model_kwargs(model, col_names, target_name, mining_imp_val,categoric_values) nearest_neighbour_model = list() nearest_neighbour_model.append( pml.NearestNeighborModel( modelName=model.__class__.__name__, continuousScoringMethod='average', algorithmName="KNN", numberOfNeighbors=model.n_neighbors, KNNInputs=get_knn_inputs(derived_col_names), ComparisonMeasure=get_comparison_measure(model), TrainingInstances=get_training_instances(model, derived_col_names, target_name), taskType=tasktype, **model_kwargs ) ) return nearest_neighbour_model def get_training_instances(model, derived_col_names, target_name): """ It returns the Training Instance element. Parameters ---------- model : An instance of Scikit-learn model. derived_col_names : List Contains column names after preprocessing target_name : String Name of the Target column. Returns ------- TrainingInstances : Returns a TrainingInstances instance """ return pml.TrainingInstances( InstanceFields=get_instance_fields(derived_col_names, target_name), InlineTable=get_inline_table(model) ) def get_inline_table(model): """ It Returns the Inline Table element of the model. Parameters ---------- model : An instance of Scikit-learn model. Returns ------- InlineTable : Returns a InlineTable instance. """ rows = [] x = model._tree.get_arrays()[0].tolist() y = model._y.tolist() X = [] for idx in range(len(model._tree.get_arrays()[0][0])): X.append("x" + str(idx + 1)) for idx in range(len(x)): row = pml.row() row.elementobjs_ = ['y'] + X if hasattr(model, 'classes_'): row.y = model.classes_[y[idx]] else: row.y = y[idx] for idx_2 in range(len(x[idx])): exec("row." + X[idx_2] + "=" + str(x[idx][idx_2])) rows.append(row) return pml.InlineTable(row=rows) def get_instance_fields(derived_col_names, target_name): """ It returns the Instance field element. Parameters ---------- derived_col_names : List Contains column names after preprocessing. target_name : String Name of the Target column. Returns ------- InstanceFields : Returns a InstanceFields instance """ instance_fields = list() instance_fields.append(pml.InstanceField(field=target_name, column="y")) for (index, name) in enumerate(derived_col_names): instance_fields.append(pml.InstanceField(field=str(name), column="x" + str(index + 1))) return pml.InstanceFields(InstanceField=instance_fields) def get_comparison_measure(model): """ It return the Comparison measure element. Parameters ---------- model : An instance of Scikit-learn model. Returns ------- comp_measure : Returns a ComparisonMeasure instance. """ if model.effective_metric_ == 'euclidean': comp_measure = pml.ComparisonMeasure(euclidean=pml.euclidean(), kind="distance") elif model.effective_metric_ == 'minkowski': comp_measure = pml.ComparisonMeasure(minkowski=pml.minkowski(p_parameter=model.p), kind="distance") elif model.effective_metric_ in ['manhattan','cityblock']: comp_measure = pml.ComparisonMeasure(cityBlock=pml.cityBlock(), kind="distance") elif model.effective_metric_ == 'sqeuclidean': comp_measure = pml.ComparisonMeasure(squaredEuclidean=pml.squaredEuclidean(), kind="distance") elif model.effective_metric_ == 'chebyshev': comp_measure = pml.ComparisonMeasure(chebychev=pml.chebychev(), kind="distance") elif model.effective_metric_ == 'matching': comp_measure = pml.ComparisonMeasure(simpleMatching=pml.simpleMatching(), kind="similarity") elif model.effective_metric_ == 'jaccard': comp_measure = pml.ComparisonMeasure(jaccard=pml.jaccard(), kind="similarity") elif model.effective_metric_ == 'rogerstanimoto': comp_measure = pml.ComparisonMeasure(tanimoto=pml.tanimoto(), kind="similarity") else: raise NotImplementedError("{} metric is not implemented for KNN Model!".format(model.effective_metric_)) return comp_measure def get_knn_inputs(col_names): """ It returns the KNN Inputs element. Parameters ---------- col_names : List Contains list of feature/column names. Returns ------- KNNInputs : Returns a KNNInputs instance. """ knnInput = list() for name in col_names: knnInput.append(pml.KNNInput(field=str(name))) return pml.KNNInputs(KNNInput=knnInput) def get_naiveBayesModel(model, derived_col_names, col_names, target_name, mining_imp_val,categoric_values,tasktype): """ It returns the Naive Bayes Model element of the model. Parameters ---------- model : An instance of Scikit-learn model. derived_col_names : List Contains column names after preprocessing. col_names : List Contains list of feature/column names. target_name : String Name of the Target column. Returns ------- naive_bayes_model : List Returns the NaiveBayesModel """ model_kwargs = get_model_kwargs(model, col_names, target_name, mining_imp_val,categoric_values) naive_bayes_model = list() naive_bayes_model.append(pml.NaiveBayesModel( modelName=model.__class__.__name__, BayesInputs=get_bayes_inputs(model, derived_col_names), BayesOutput=get_bayes_output(model, target_name), threshold=get_threshold(), taskType=tasktype, **model_kwargs )) return naive_bayes_model def get_threshold(): """ It returns the Threshold value. Returns ------- Returns the Threshold value """ return '0.001' def get_bayes_output(model, target_name): """ It returns the Bayes Output element of the model Parameters ---------- model : An instance of Scikit-learn model. target_name : String Name of the Target column. Returns ------- BayesOutput : Returns a BayesOutput instance """ class_counts = model.class_count_ target_val_counts = pml.TargetValueCounts() for name, count in zip(model.classes_, class_counts): tr_val = pml.TargetValueCount(value=str(name), count=str(count)) target_val_counts.add_TargetValueCount(tr_val) return pml.BayesOutput( fieldName=target_name, TargetValueCounts=target_val_counts ) def get_bayes_inputs(model, derived_col_names): """ It returns the Bayes Input element of the model . Parameters ---------- model : An instance of Scikit-learn model. derived_col_names : List Contains column names after preprocessing. Returns ------- bayes_inputs : Returns a BayesInput instance. """ bayes_inputs = pml.BayesInputs() for indx, name in enumerate(derived_col_names): means = model.theta_[:, indx] variances = model.sigma_[:, indx] target_val_stats = pml.TargetValueStats() for idx, val in enumerate(model.classes_): target_val = pml.TargetValueStat( val, GaussianDistribution=pml.GaussianDistribution( mean="{:.16f}".format(means[idx]), variance="{:.16f}".format(variances[idx]))) target_val_stats.add_TargetValueStat(target_val) bayes_inputs.add_BayesInput(pml.BayesInput(fieldName=str(name), TargetValueStats=target_val_stats)) return bayes_inputs def get_supportVectorMachine_models(model, derived_col_names, col_names, target_names, mining_imp_val, categoric_values, tasktype): """ It returns the Support Vector Machine Model element. Parameters ---------- model : An instance of Scikit-learn model. derived_col_names : List Contains column names after preprocessing. col_names : List Contains list of feature/column names. target_names : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value categoric_values : tuple Contains Categorical attribute names and its values Returns ------- supportVector_models : List Returns SupportVectorMachineModel elements which contains classificationMethod, VectorDictionary, SupportVectorMachine, kernelType """ model_kwargs = get_model_kwargs(model, col_names, target_names, mining_imp_val,categoric_values) supportVector_models = list() kernel_type = get_kernel_type(model) supportVector_models.append(pml.SupportVectorMachineModel( modelName=model.__class__.__name__, classificationMethod=get_classificationMethod(model), VectorDictionary=get_vectorDictionary(model, derived_col_names, categoric_values), SupportVectorMachine=get_supportVectorMachine(model), taskType=tasktype, **kernel_type, **model_kwargs )) # supportVector_models[0].export(sys.stdout,0," ") return supportVector_models def get_model_name(model): if 'OneClassSVM' in str(model.__class__): return 'ocsvm' elif 'IsolationForest' in str(model.__class__): return 'iforest' elif 'XGB' in str(model.__class__): return 'XGBoostModel' elif 'LGB' in str(model.__class__): return 'LightGBModel' elif 'GradientBoosting' in str(model.__class__): return 'GradientBoostingModel' elif 'RandomForest' in str(model.__class__): return 'RandomForestModel' def get_ensemble_models(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype): """ It returns the Mining Model element of the model Parameters ---------- model : An instance of Scikit-learn model. derived_col_names : List Contains column names after preprocessing. col_names : List Contains list of feature/column names. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value. categoric_values : tuple Contains Categorical attribute names and its values Returns ------- mining_models : List Returns the MiningModel of the respective ensemble model """ model_kwargs = get_model_kwargs(model, col_names, target_name, mining_imp_val,categoric_values) if model.__class__.__name__ == 'GradientBoostingRegressor': model_kwargs['Targets'] = get_targets(model, target_name) mining_fields = model_kwargs['MiningSchema'].MiningField new_mining_fields = list() if model.__class__.__name__ != 'IsolationForest': for idx, imp_ in enumerate(model.feature_importances_): if imp_ > 0: new_mining_fields.append(mining_fields[idx]) else: for idx in range(len(col_names)): new_mining_fields.append(mining_fields[idx]) for fld in mining_fields: if fld.usageType == 'target': new_mining_fields.append(fld) model_kwargs['MiningSchema'].MiningField = new_mining_fields mining_models = list() mining_models.append(pml.MiningModel( modelName=model.__class__.__name__, Segmentation=get_outer_segmentation(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values,tasktype), taskType=tasktype, **model_kwargs )) return mining_models def get_targets(model, target_name): """ It returns the Target element of the model. Parameters ---------- model : A Scikit-learn model instance. target_name : String Name of the Target column. Returns ------- targets : Returns a Target instance. """ if model.__class__.__name__ == 'GradientBoostingRegressor': targets = pml.Targets( Target=[ pml.Target( field=target_name, rescaleConstant="{:.16f}".format(model.init_.mean), rescaleFactor="{:.16f}".format(model.learning_rate) ) ] ) else: targets = pml.Targets( Target=[ pml.Target( field=target_name, rescaleConstant="{:.16f}".format(model.base_score) ) ] ) return targets def get_multiple_model_method(model): """ It returns the name of the Multiple Model Chain element of the model. Parameters ---------- model : A Scikit-learn model instance Returns ------- The multiple model method for a mining model. """ if model.__class__.__name__ == 'GradientBoostingClassifier': return 'modelChain' elif model.__class__.__name__ == 'GradientBoostingRegressor': return 'sum' elif model.__class__.__name__ == 'RandomForestClassifier': return 'majorityVote' elif model.__class__.__name__ in ['RandomForestRegressor','IsolationForest']: return 'average' def get_outer_segmentation(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values,tasktype): """ It returns the Segmentation element of the model. Parameters ---------- model : A Scikit-learn model instance. derived_col_names : List Contains column names after preprocessing. col_names : List Contains list of feature/column names. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value categoric_values : tuple Contains Categorical attribute names and its values Returns ------- segmentation : A segmentation instance. """ segmentation = pml.Segmentation( multipleModelMethod=get_multiple_model_method(model), Segment=get_segments(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values,tasktype) ) return segmentation def get_segments(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values,tasktype): """ It returns the Segment element of the model. Parameters ---------- model : A Scikit-learn model instance. derived_col_names : List Contains column names after preprocessing. col_names : List Contains list of feature/column names. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value categoric_values : tuple Contains Categorical attribute names and its values Returns ------- segments : A list of segment instances. """ segments = None if 'GradientBoostingClassifier' in str(model.__class__): segments = get_segments_for_gbc(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values,tasktype) else: segments = get_inner_segments(model, derived_col_names, col_names, 0) return segments def get_segments_for_gbc(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values,tasktype): """ It returns list of Segments element of the model. Parameters ---------- model : A Scikit-learn model instance. derived_col_names : List Contains column names after preprocessing. col_names : List Contains list of feature/column names. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value categoric_values : tuple Contains Categorical attribute names and its values Returns ------- segments : List Get the Segments for the Segmentation element. """ segments = list() out_field_names = list() for estm_idx in range(len(model.estimators_[0])): mining_fields_for_first = list() # for name in col_names: for idx,imp_ in enumerate(model.feature_importances_): # mining_fields_for_first.append(pml.MiningField(name=name)) if imp_ > 0: mining_fields_for_first.append(pml.MiningField(name=col_names[idx])) miningschema_for_first = pml.MiningSchema(MiningField=mining_fields_for_first) output_fields = list() output_fields.append( pml.OutputField( name='decisionFunction(' + str(estm_idx) + ')', feature='predictedValue', dataType="double", isFinalResult=False ) ) if len(model.classes_) == 2: output_fields.append( pml.OutputField( name='transformedDecisionFunction(0)', feature='transformedValue', dataType="double", isFinalResult=True, Apply=pml.Apply( function="+", Constant=[pml.Constant( dataType="double", valueOf_="{:.16f}".format(model.init_.prior) )], Apply_member=[pml.Apply( function="*", Constant=[pml.Constant( dataType="double", valueOf_="{:.16f}".format(model.learning_rate) )], FieldRef=[pml.FieldRef( field="decisionFunction(0)", )] )] ) ) ) else: output_fields.append( pml.OutputField( name='transformedDecisionFunction(' + str(estm_idx) + ')', feature='transformedValue', dataType="double", isFinalResult=True, Apply=pml.Apply( function="+", Constant=[pml.Constant( dataType="double", valueOf_="{:.16f}".format(model.init_.priors[estm_idx]) )], Apply_member=[pml.Apply( function="*", Constant=[pml.Constant( dataType="double", valueOf_="{:.16f}".format(model.learning_rate) )], FieldRef=[pml.FieldRef( field="decisionFunction(" + str(estm_idx) + ")", )] )] ) ) ) out_field_names.append('transformedDecisionFunction(' + str(estm_idx) + ')') segments.append( pml.Segment( True_=pml.True_(), id=str(estm_idx), MiningModel=pml.MiningModel( functionName='regression', modelName="MiningModel", MiningSchema=miningschema_for_first, Output=pml.Output(OutputField=output_fields), Segmentation=pml.Segmentation( multipleModelMethod="sum", Segment=get_inner_segments(model, derived_col_names, col_names, estm_idx) ) ) ) ) reg_model = get_regrs_models(model, out_field_names,out_field_names, target_name, mining_imp_val, categoric_values,tasktype)[0] reg_model.Output = None if len(model.classes_) == 2: reg_model.normalizationMethod="logit" else: reg_model.normalizationMethod="softmax" segments.append( pml.Segment( id=str(len(model.estimators_[0])), True_=pml.True_(), RegressionModel=reg_model ) ) return segments def get_inner_segments(model, derived_col_names, col_names, index): """ It returns the Inner segments of the model. Parameters ---------- model : A Scikit-learn model instance. derived_col_names : List Contains column names after preprocessing. col_names : List Contains list of feature/column names. index : Integer The index of the estimator for the model Returns ------- segments : List Get the Segments for the Segmentation element. """ segments = list() for estm_idx in range(model.n_estimators): if np.asanyarray(model.estimators_).ndim == 1: estm = model.estimators_[estm_idx] else: estm = model.estimators_[estm_idx][index] tree_features = estm.tree_.feature features_ = list() for feat in tree_features: if feat != -2 and feat not in features_: features_.append(feat) if len(features_) != 0: mining_fields = list() # for feat in col_names: feature_importances = estm.tree_.compute_feature_importances() for idx,imp_ in enumerate(feature_importances): if imp_ > 0: # mining_fields.append(pml.MiningField(name=feat)) mining_fields.append(pml.MiningField(name=col_names[idx])) segments.append( pml.Segment( True_=pml.True_(), id=str(estm_idx), TreeModel=pml.TreeModel( modelName=estm.__class__.__name__, functionName=get_mining_func(estm), splitCharacteristic="multiSplit", MiningSchema=pml.MiningSchema(MiningField = mining_fields), Node=get_node(estm, derived_col_names, model) ) ) ) return segments def get_classificationMethod(model): """ It returns the Classification Model name of the model. Parameters ---------- model : A Scikit-learn model instance. Returns ------- Returns the classification method of the SVM model """ if model.__class__.__name__ == 'SVC': return 'OneAgainstOne' else: return 'OneAgainstAll' def get_vectorDictionary(model, derived_col_names, categoric_values): """ It return the Vector Dictionary element. Parameters ---------- model : A Scikit-learn model instance. derived_col_names : List Contains column names after preprocessing. categoric_values : tuple Contains Categorical attribute names and its values Returns ------- VectorDictionary : A Vector Dictionary instance. """ model_coef = model.C fieldref_element = get_vectorfields(model_coef, derived_col_names, categoric_values) vectorfields_element = pml.VectorFields(FieldRef=fieldref_element) vec_id = list(model.support_) vecinsts = list() vecs = list(model.support_vectors_) if model.support_vectors_.__class__.__name__ != 'csr_matrix': for vec_idx in range(len(vecs)): vecinsts.append(pml.VectorInstance( id=vec_id[vec_idx], REAL_SparseArray=pml.REAL_SparseArray( n=len(fieldref_element), Indices=([x for x in range(1, len(vecs[vec_idx]) + 1)]), REAL_Entries=vecs[vec_idx].tolist() ) )) else: for vec_idx in range(len(vecs)): vecinsts.append(pml.VectorInstance( id=vec_id[vec_idx], REAL_SparseArray=pml.REAL_SparseArray( n=len(fieldref_element), Indices=([x for x in range(1, len(vecs[vec_idx].todense().tolist()[0]) + 1)]), REAL_Entries=vecs[vec_idx].todense().tolist()[0] ) )) vd=pml.VectorDictionary(VectorFields=vectorfields_element, VectorInstance=vecinsts) return vd def get_vectorfields(model_coef, feat_names, categoric_values): """ It return the Vector Fields . Parameters ---------- model : A Scikit-learn model instance. derived_col_names : List Contains column names after preprocessing. categoric_values : tuple Contains Categorical attribute names and its values Returns ------- Returns the Vector Dictionary instance for Support Vector model. """ der_fld_len = len(feat_names) der_fld_idx = 0 row_idx = -1 predictors = list() if categoric_values: class_lbls = categoric_values[0] class_attribute = categoric_values[1] while der_fld_idx < der_fld_len: if is_labelbinarizer(feat_names[der_fld_idx]): if not is_stdscaler(feat_names[der_fld_idx]): class_id = get_classid(class_attribute, feat_names[der_fld_idx]) cat_predictors = get_categoric_pred(feat_names[der_fld_idx],row_idx, der_fld_idx, model_coef, class_lbls[class_id], class_attribute[class_id]) for predictor in cat_predictors: predictors.append(predictor) if len(class_lbls[class_id]) == 2: incrementor = 1 else: incrementor = len(class_lbls[class_id]) der_fld_idx = der_fld_idx + incrementor else: vectorfields_element = pml.FieldRef(field=feat_names[der_fld_idx]) predictors.append(vectorfields_element) der_fld_idx += 1 elif is_onehotencoder(feat_names[der_fld_idx]): if not is_stdscaler(feat_names[der_fld_idx]): class_id = get_classid(class_attribute, feat_names[der_fld_idx]) cat_predictors = get_categoric_pred(feat_names[der_fld_idx],row_idx, der_fld_idx, model_coef, class_lbls[class_id], class_attribute[class_id]) for predictor in cat_predictors: predictors.append(predictor) incrementor = len(class_lbls[class_id]) der_fld_idx = der_fld_idx + incrementor else: vectorfields_element = pml.FieldRef(field=feat_names[der_fld_idx]) predictors.append(vectorfields_element) der_fld_idx += 1 else: vectorfields_element = pml.FieldRef(field=feat_names[der_fld_idx]) predictors.append(vectorfields_element) der_fld_idx += 1 return predictors def is_onehotencoder(feat_name): """ Parameters ---------- feat_name : string Contains the name of the attribute Returns ------- Returns a boolean value that states whether OneHotEncoder has been applied or not """ if "oneHotEncoder" in feat_name: return True else: return False def get_kernel_type(model): """ It returns the kernel type element. Parameters ---------- model : A Scikit-learn model instance. Returns ------- kernel_kwargs : Dictionary Get the respective kernel type of the SVM model. """ kernel_kwargs = dict() if model.kernel == 'linear': kernel_kwargs['LinearKernelType'] = pml.LinearKernelType(description='Linear Kernel Type') elif model.kernel == 'poly': kernel_kwargs['PolynomialKernelType'] = pml.PolynomialKernelType(description='Polynomial Kernel type', gamma="{:.16f}".format(model._gamma), coef0="{:.16f}".format(model.coef0), degree=model.degree) elif model.kernel == 'rbf': kernel_kwargs['RadialBasisKernelType'] = pml.RadialBasisKernelType(description='Radial Basis Kernel Type', gamma="{:.16f}".format(model._gamma)) elif model.kernel == 'sigmoid': kernel_kwargs['SigmoidKernelType'] = pml.SigmoidKernelType(description='Sigmoid Kernel Type', gamma="{:.16f}".format(model._gamma), coef0="{:.16f}".format(model.coef0)) else: raise NotImplementedError("{} kernel is not implemented!".format(model.kernel)) return kernel_kwargs def get_supportVectorMachine(model): """ It return the Support Vector Machine element. Parameters ---------- model : A Scikit-learn model instance. Returns ------- support_vector_machines : List Get the Support Vector Machine element which conatains targetCategory, alternateTargetCategory, SupportVectors, Coefficients """ support_vector_machines = list() if model.__class__.__name__ in ['SVR','OneClassSVM']: support_vector = list() for sv in model.support_: support_vector.append(pml.SupportVector(vectorId=sv)) support_vectors = pml.SupportVectors(SupportVector=support_vector) coefficient = list() absoValue = model.intercept_[0] if model.dual_coef_.__class__.__name__ != 'csr_matrix': for coef in model.dual_coef_: for num in coef: coefficient.append(pml.Coefficient(value="{:.16f}".format(num))) else: dual_coefficent=model.dual_coef_.data for num in dual_coefficent: coefficient.append(pml.Coefficient(value="{:.16f}".format(num))) coeff = pml.Coefficients(absoluteValue=absoValue, Coefficient=coefficient) support_vector_machines.append(pml.SupportVectorMachine(SupportVectors=support_vectors, Coefficients=coeff)) else: support_vector_locs = np.cumsum(np.hstack([[0], model.n_support_])) n_class = model.dual_coef_.shape[0] + 1 coef_abs_val_index = 0 for class1 in range(n_class): sv1 = model.support_[support_vector_locs[class1]:support_vector_locs[class1 + 1]] for class2 in range(class1 + 1, n_class): svs = list() coefs = list() sv2 = model.support_[support_vector_locs[class2]:support_vector_locs[class2 + 1]] svs.append((list(sv1) + list(sv2))) alpha1 = model.dual_coef_[class2 - 1, support_vector_locs[class1]:support_vector_locs[class1 + 1]] alpha2 = model.dual_coef_[class1, support_vector_locs[class2]:support_vector_locs[class2 + 1]] coefs.append((list(alpha1) + list(alpha2))) all_svs = list() for sv in (svs[0]): all_svs.append(pml.SupportVector(vectorId=sv)) all_coefs = list() for coef in (coefs[0]): all_coefs.append(pml.Coefficient(value="{:.16f}".format(coef))) coef_abs_value = model.intercept_[coef_abs_val_index] coef_abs_val_index += 1 if len(model.classes_) == 2: support_vector_machines.append( pml.SupportVectorMachine( targetCategory=model.classes_[class1], alternateTargetCategory=model.classes_[class2], SupportVectors=pml.SupportVectors(SupportVector=all_svs), Coefficients=pml.Coefficients(absoluteValue="{:.16f}".format(coef_abs_value), Coefficient=all_coefs) ) ) else: support_vector_machines.append( pml.SupportVectorMachine( targetCategory=model.classes_[class2], alternateTargetCategory=model.classes_[class1], SupportVectors=pml.SupportVectors(SupportVector=all_svs), Coefficients=pml.Coefficients(absoluteValue="{:.16f}".format(coef_abs_value), Coefficient=all_coefs) ) ) return support_vector_machines def get_tree_models(model, derived_col_names, col_names, target_name, mining_imp_val,categoric_values,tasktype): """ It return Tree Model element of the model Parameters ---------- model : A Scikit-learn model instance. derived_col_names : Contains column names after preprocessing. col_names : List Contains list of feature/column names. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value Returns ------- tree_models : List Get the TreeModel element. """ model_kwargs = get_model_kwargs(model, col_names, target_name, mining_imp_val,categoric_values) tree_models = list() tree_models.append(pml.TreeModel( modelName=model.__class__.__name__, Node=get_node(model, derived_col_names), taskType=tasktype, **model_kwargs )) return tree_models def get_neural_models(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values,tasktype): """ It returns Neural Network element of the model. Parameters ---------- model : A Scikit-learn model instance. derived_col_names : List Contains column names after preprocessing. col_names : List Contains list of feature/column names. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value. Returns ------- neural_model : List Model attributes for PMML file. """ model_kwargs = get_model_kwargs(model, col_names, target_name, mining_imp_val,categoric_values) neural_model = list() neural_model.append(pml.NeuralNetwork( modelName=model.__class__.__name__, threshold='0', altitude='1.0', activationFunction=get_funct(model), NeuralInputs = get_neuron_input(derived_col_names), NeuralLayer = get_neural_layer(model, derived_col_names, target_name)[0], NeuralOutputs = get_neural_layer(model, derived_col_names, target_name)[1], **model_kwargs )) return neural_model def get_funct(sk_model): """ It returns the activation fucntion of the model. Parameters ---------- model : A Scikit-learn model instance. Returns ------- a_fn : String Returns the activation function. """ a_fn = sk_model.activation if a_fn =='relu': a_fn = 'rectifier' return a_fn def get_regrs_models(model, derived_col_names, col_names, target_name, mining_imp_val, categoric_values, tasktype): """ It returns the Regression Model element of the model Parameters ---------- model : A Scikit-learn model instance. derived_col_names : List Contains column names after preprocessing. col_names : List Contains list of feature/column names. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value categoric_values : tuple Contains Categorical attribute names and its values Returns ------- regrs_models : List Returns a regression model of the respective model """ model_kwargs = get_model_kwargs(model, col_names, target_name, mining_imp_val, categoric_values) if model.__class__.__name__ not in ['LinearRegression','LinearSVR']: model_kwargs['normalizationMethod'] = 'logit' regrs_models = list() regrs_models.append(pml.RegressionModel( modelName=model.__class__.__name__, RegressionTable=get_regrs_tabl(model, derived_col_names, target_name, categoric_values), taskType=tasktype, **model_kwargs )) return regrs_models def get_regrs_tabl(model, feature_names, target_name, categoric_values): """ It returns the Regression Table element of the model. Parameters ---------- model : A Scikit-learn model instance. derived_col_names : List Contains column names after preprocessing. target_name : String Name of the Target column. categoric_values : tuple Contains Categorical attribute names and its values Returns ------- merge : List Returns a list of Regression Table. """ merge = list() if hasattr(model, 'intercept_'): func_name = get_mining_func(model) inter = model.intercept_ model_coef = model.coef_ merge = list() target_classes = target_name row_idx = 0 if not hasattr(inter, '__iter__') or model.__class__.__name__ in ['LinearRegression','LinearSVR']: inter = np.array([inter]) target_classes = [target_classes] model_coef = np.ravel(model_coef) model_coef = model_coef.reshape(1, model_coef.shape[0]) target_cat = None else: target_classes = model.classes_ max_target_index = len(target_classes) - 1 target_cat = target_classes[max_target_index] if len(inter) == 1: regr_predictor = get_regr_predictors(model_coef, row_idx, feature_names, categoric_values) merge.append( pml.RegressionTable( intercept="{:.16f}".format(inter.item()), targetCategory=target_cat, NumericPredictor=regr_predictor ) ) if func_name != 'regression': merge.append( pml.RegressionTable( intercept="0.0", targetCategory=target_classes[0] ) ) else: for tgname, tg_idx in zip(np.unique(target_classes), range(len(np.unique(target_classes)))): row_idx = tg_idx regr_predictors = get_regr_predictors(model_coef, row_idx, feature_names, categoric_values) merge.append( pml.RegressionTable( intercept="{:.16f}".format(inter[tg_idx]), targetCategory=tgname, NumericPredictor=regr_predictors ) ) else: if len(model.classes_) == 2: merge.append( pml.RegressionTable( NumericPredictor=[pml.NumericPredictor(coefficient='1.0',name=feature_names[0])], intercept='0.0', targetCategory=str(model.classes_[-1]) ) ) merge.append( pml.RegressionTable(intercept='0.0', targetCategory=str(model.classes_[0])) ) else: for feat_idx in range(len(feature_names)): merge.append( pml.RegressionTable( NumericPredictor=[pml.NumericPredictor(coefficient='1.0',name=feature_names[feat_idx])], intercept='0.0', targetCategory=str(model.classes_[feat_idx]) ) ) return merge def get_node(model, features_names, main_model=None): """ It return the Node element of the model. Parameters ---------- model : An instance of the estimator of the tree object. features_names : List Contains the list of feature/column name. main_model : A Scikit-learn model instance. Returns ------- _getNode : Get all the underlying Nodes. """ tree = model.tree_ node_samples = tree.n_node_samples if main_model and main_model.__class__.__name__ == 'RandomForestClassifier': classes = main_model.classes_ elif hasattr(model,'classes_'): classes = model.classes_ tree_leaf = -1 def _getNode(idx,parent=None, cond=None): simple_pred_cond = None if cond: simple_pred_cond = cond node = pml.Node(id=idx, recordCount=float(tree.n_node_samples[idx])) if simple_pred_cond: node.SimplePredicate = simple_pred_cond else: node.True_ = pml.True_() if tree.children_left[idx] != tree_leaf: fieldName = features_names[tree.feature[idx]] prnt = None if model.__class__.__name__ == "ExtraTreeRegressor": prnt = parent + 1 simplePredicate = pml.SimplePredicate(field=fieldName, operator="lessOrEqual", value="{:.16f}".format(tree.threshold[idx])) left_child = _getNode(tree.children_left[idx],prnt, simplePredicate) simplePredicate = pml.SimplePredicate(field=fieldName, operator="greaterThan", value="{:.16f}".format(tree.threshold[idx])) right_child = _getNode(tree.children_right[idx],prnt, simplePredicate) node.add_Node(left_child) node.add_Node(right_child) else: nodeValue = list(tree.value[idx][0]) lSum = float(sum(nodeValue)) if model.__class__.__name__ == 'DecisionTreeClassifier': probs = [x / lSum for x in nodeValue] score_dst = [] for i in range(len(probs)): score_dst.append(pml.ScoreDistribution(confidence=probs[i], recordCount=float(nodeValue[i]), value=classes[i])) node.ScoreDistribution = score_dst node.score = classes[probs.index(max(probs))] else: if model.__class__.__name__ == "ExtraTreeRegressor": nd_sam=node_samples[int(idx)] node.score = "{:.16f}".format(parent+avgPathLength(nd_sam)) else: node.score="{:.16f}".format(lSum) return node if model.__class__.__name__ == "ExtraTreeRegressor": return _getNode(0,0) else: return _getNode(0) def avgPathLength(n): if n<=1.0: return 1.0 return 2.0*(math.log(n-1.0)+0.57721566) - 2.0*((n-1.0)/n) def get_output(model, target_name): """ It returns the output element of the model. Parameters ---------- model : A Scikit-learn model instance. target_name : String Name of the Target column. Returns ------- Output : Get the Output element. """ mining_func = get_mining_func(model) output_fields = list() if not has_target(model): output_fields.append(pml.OutputField( name='predicted', feature="predictedValue", optype="categorical", dataType="double" )) else: alt_target_name = 'predicted_' + target_name if mining_func == 'classification': for cls in model.classes_: output_fields.append(pml.OutputField( name='probability_' + str(cls), feature="probability", optype="continuous", dataType="double", value=str(cls) )) output_fields.append(pml.OutputField( name=alt_target_name, feature="predictedValue", optype="categorical", dataType="string")) else: output_fields.append(pml.OutputField( name=alt_target_name, feature="predictedValue", optype="continuous", dataType="double")) return pml.Output(OutputField=output_fields) def get_mining_func(model): """ It returns the name of the mining function of the model. Parameters ---------- model : A Scikit-learn model instance. Returns ------- func_name : String Returns the function name of the model """ if not hasattr(model, 'classes_'): if hasattr(model,'n_clusters'): func_name = 'clustering' else: func_name = 'regression' else: if isinstance(model.classes_, np.ndarray): func_name = 'classification' else: func_name = 'regression' return func_name def get_mining_schema(model, feature_names, target_name, mining_imp_val, categoric_values): """ It returns the Mining Schema of the model. Parameters ---------- model : A Scikit-learn model instance. feature_names : List Contains the list of feature/column name. target_name : String Name of the Target column. mining_imp_val : tuple Contains the mining_attributes,mining_strategy, mining_impute_value. Returns ------- MiningSchema : Get the MiningSchema element """ if mining_imp_val: mining_attributes = mining_imp_val[0] mining_strategy = mining_imp_val[1] mining_replacement_val = mining_imp_val[2] n_features = len(feature_names) features_pmml_optype = ['continuous'] * n_features features_pmml_utype = ['active'] * n_features target_pmml_utype = 'target' mining_func = get_mining_func(model) if mining_func == 'classification': target_pmml_optype = 'categorical' elif mining_func == 'regression': target_pmml_optype = 'continuous' mining_flds = list() mining_name_stored = list() # handling impute pre processing if mining_imp_val: for mining_item, mining_idx in zip(mining_attributes, range(len(mining_attributes))): for feat_name,feat_idx in zip(feature_names, range(len(feature_names))): if feat_name in mining_item: if feat_name not in mining_name_stored: impute_index = mining_item.index(feat_name) mining_flds.append(pml.MiningField(name=str(feat_name), optype=features_pmml_optype[feat_idx], missingValueReplacement=mining_replacement_val[mining_idx][ impute_index], missingValueTreatment=mining_strategy[mining_idx], usageType=features_pmml_utype[feat_idx])) mining_name_stored.append(feat_name) if len(categoric_values) > 0: for cls_attr in categoric_values[1]: mining_flds.append(pml.MiningField( name=cls_attr, usageType='active', optype='categorical' )) mining_name_stored.append(cls_attr) for feat_name, feat_idx in zip(feature_names, range(len(feature_names))): if feat_name not in mining_name_stored: mining_flds.append(pml.MiningField(name=str(feat_name), optype=features_pmml_optype[feat_idx], usageType=features_pmml_utype[feat_idx])) if has_target(model): mining_flds.append(pml.MiningField(name=target_name, optype=target_pmml_optype, usageType=target_pmml_utype)) return pml.MiningSchema(MiningField=mining_flds) def get_neuron_input(feature_names): """ It returns the Neural Input element. Parameters ---------- feature_names : List Contains the list of feature/column name. Returns ------- neural_input_element : Returns the NeuralInputs element """ neural_input = list() for features in feature_names: field_ref = pml.FieldRef(field = str(features)) derived_flds = pml.DerivedField(optype = "continuous", dataType = "double", FieldRef = field_ref) class_node = pml.NeuralInput(id = str(features), DerivedField = derived_flds) neural_input.append(class_node) neural_input_element = pml.NeuralInputs(NeuralInput = neural_input, numberOfInputs = str(len(neural_input))) return neural_input_element def get_neural_layer(model, feature_names, target_name): """ It returns the Neural Layer and Neural Ouptput element. Parameters ---------- model : A Scikit-learn model instance. feature_names : List Contains the list of feature/column name. target_name : String Name of the Target column. Returns ------- all_neuron_layer : List Return the list of NeuralLayer elelemt. neural_output_element : Return the NeuralOutput element instance """ weight = model.coefs_ bias = model.intercepts_ last_layer = bias[-1] hidden_layer_sizes = model.hidden_layer_sizes hidden_layers = list(hidden_layer_sizes) hidden_layers.append(len(last_layer)) neuron = list() all_neuron_layer = list() input_features = feature_names neuron_id = list() for count in range(len(hidden_layers)): for count1 in range(hidden_layers[count]): con = list() for count2 in range(len(input_features)): con.append(pml.Con(from_ = input_features[count2], weight = format(weight[count][count2][count1]))) neuron.append(pml.Neuron(id = str(count)+str(count1), bias = format(bias[count][count1]),Con = con)) neuron_id.append(str(count)+str(count1)) all_neuron_layer.append(pml.NeuralLayer(Neuron = neuron)) input_features = neuron_id neuron_id = list() neuron = list() if hidden_layers[-1]==1 and 'MLPClassifier' in str(model.__class__): bias1=[1.0,0.0] weight1=[-1.0,1.0] con = list() linear = ['linear/1'] i_d = ['true', 'false'] con.append(pml.Con(from_ = input_features[0], weight = 1.0)) neuron.append(pml.Neuron(id = linear[0], bias = ('0.0'), Con = con)) all_neuron_layer.append(pml.NeuralLayer(activationFunction = "logistic", Neuron = neuron)) neuron = list() con = list() for num in range(2): con.append(pml.Con(from_ = linear[0], weight = format(weight1[num]))) neuron.append(pml.Neuron(id = i_d[num], bias = format(bias1[num]), Con = con)) con = list() all_neuron_layer.append(pml.NeuralLayer(activationFunction = "identity", Neuron = neuron)) if 'MLPClassifier' in str(model.__class__): neural_output = list() for values, count in zip(model.classes_, range(len(model.classes_))): norm_discrete = pml.NormDiscrete(field = target_name, value = str(values)) derived_flds = pml.DerivedField(optype = "categorical", dataType = 'double', NormDiscrete = norm_discrete) if len(input_features)==1: class_node = pml.NeuralOutput(outputNeuron = input_features, DerivedField = derived_flds) else: class_node = pml.NeuralOutput(outputNeuron = input_features[count],DerivedField = derived_flds) neural_output.append(class_node) neural_output_element = pml.NeuralOutputs(numberOfOutputs = None, Extension = None, NeuralOutput = neural_output) if 'MLPRegressor' in str(model.__class__): neural_output = list() fieldRef = pml.FieldRef(field = target_name) derived_flds = pml.DerivedField(optype = "continuous", dataType = "double", FieldRef = fieldRef) class_node = pml.NeuralOutput(outputNeuron = input_features, DerivedField = derived_flds) neural_output.append(class_node) neural_output_element = pml.NeuralOutputs(numberOfOutputs = None, Extension = None, NeuralOutput = neural_output) return all_neuron_layer, neural_output_element def get_super_cls_names(model_inst): """ It returns the set of Super class of the model. Parameters: ------- model_inst: Instance of the scikit-learn model Returns ------- parents : Set Returns all the parent class of the model instance. """ def super_cls_names(cls): nonlocal parents parents.add(cls.__name__) for super_cls in cls.__bases__: super_cls_names(super_cls) cls = model_inst.__class__ parents = set() super_cls_names(cls) return parents def get_version(): """ It returns the pmml version . Returns ------- version : String Returns the version of the pmml. """ version = '4.4' return version def get_header(): """ It returns the Header element of the pmml. Returns ------- header : Returns the header of the pmml. """ copyryt = "Copyright (c) 2019 Software AG" description = "Default Description" timestamp = pml.Timestamp(datetime.now()) application=pml.Application(name="Nyoka",version=metadata.__version__) header = pml.Header(copyright=copyryt, description=description, Timestamp=timestamp, Application=application) return header def get_dtype(feat_value): """ It return the data type of the value. Parameters ---------- feat_value : Contains a value for finding the its data type. Returns ------- Returns the respective data type of that value. """ data_type=str(type(feat_value)) if 'float' in data_type: return 'float' if 'int' in data_type: return 'integer' if 'long' in data_type: return 'long' if 'complex' in data_type: return 'complex' if 'str' in data_type: return 'string' def get_data_dictionary(model, feature_names, target_name, categoric_values=None): """ It returns the Data Dictionary element. Parameters ---------- model : A Scikit-learn model instance. feature_names : List Contains the list of feature/column name. target_name : List Name of the Target column. categoric_values : tuple Contains Categorical attribute names and its values Returns ------- data_dict : Return the dataDictionary instance """ categoric_feature_name = list() if categoric_values: categoric_labels = categoric_values[0] categoric_feature_name = categoric_values[1] target_attr_values = [] n_features = len(feature_names) features_pmml_optype = ['continuous'] * n_features features_pmml_dtype = ['double'] * n_features mining_func = get_mining_func(model) if mining_func == 'classification': target_pmml_optype = 'categorical' target_pmml_dtype = get_dtype(model.classes_[0]) target_attr_values = model.classes_.tolist() elif mining_func == 'regression': target_pmml_optype = 'continuous' target_pmml_dtype = 'double' data_fields = list() if categoric_values: for class_list, attr_for_class in zip(categoric_labels, categoric_feature_name): category_flds = pml.DataField(name=str(attr_for_class), optype="categorical", dataType=get_dtype(class_list[0]) if class_list else 'string') if class_list: for values in class_list: category_flds.add_Value(pml.Value(value=str(values))) data_fields.append(category_flds) attr_without_class_attr = [feat_name for feat_name in feature_names if feat_name not in categoric_feature_name] for feature_idx, feat_name in enumerate(attr_without_class_attr): data_fields.append(pml.DataField(name=str(feat_name), optype=features_pmml_optype[feature_idx], dataType=features_pmml_dtype[feature_idx])) if has_target(model): class_node = pml.DataField(name=str(target_name), optype=target_pmml_optype, dataType=target_pmml_dtype) for class_value in target_attr_values: class_node.add_Value(pml.Value(value=str(class_value))) data_fields.append(class_node) data_dict = pml.DataDictionary(numberOfFields=len(data_fields), DataField=data_fields) return data_dict def has_target(model): target_less_models = ['KMeans','OneClassSVM','IsolationForest', ] if model.__class__.__name__ in target_less_models: return False else: return True def get_regr_predictors(model_coef, row_idx, feat_names, categoric_values): """ Parameters ---------- model_coef : array Contains the estimators coefficient values row_idx : int Contains an integer value to differentiate between linear and svm models feat_names : list Contains the list of feature/column names categoric_values : tuple Contains Categorical attribute names and its values Returns ------- predictors : list Returns a list with instances of nyoka numeric/categorical predictor class """ der_fld_len = len(feat_names) der_fld_idx = 0 predictors = list() if categoric_values: class_lbls = categoric_values[0] class_attribute = categoric_values[1] while der_fld_idx < der_fld_len: if is_labelbinarizer(feat_names[der_fld_idx]): if not is_stdscaler(feat_names[der_fld_idx]): class_id = get_classid(class_attribute, feat_names[der_fld_idx]) cat_predictors = get_categoric_pred(feat_names[der_fld_idx], row_idx, der_fld_idx, model_coef, class_lbls[class_id], class_attribute[class_id]) for predictor in cat_predictors: predictors.append(predictor) if len(class_lbls[class_id]) == 2: incrementor = 1 else: incrementor = len(class_lbls[class_id]) der_fld_idx = der_fld_idx + incrementor else: num_predictors = get_numeric_pred(row_idx, der_fld_idx, model_coef, feat_names[der_fld_idx]) predictors.append(num_predictors) der_fld_idx += 1 elif is_onehotencoder(feat_names[der_fld_idx]): if not is_stdscaler(feat_names[der_fld_idx]): class_id = get_classid(class_attribute, feat_names[der_fld_idx]) cat_predictors = get_categoric_pred(feat_names[der_fld_idx], row_idx, der_fld_idx, model_coef, class_lbls[class_id], class_attribute[class_id]) for predictor in cat_predictors: predictors.append(predictor) incrementor = len(class_lbls[class_id]) der_fld_idx = der_fld_idx + incrementor else: vectorfields_element = pml.FieldRef(field=feat_names[der_fld_idx]) predictors.append(vectorfields_element) der_fld_idx += 1 else: num_predictors = get_numeric_pred(row_idx, der_fld_idx, model_coef, feat_names[der_fld_idx]) predictors.append(num_predictors) der_fld_idx += 1 return predictors def get_classid(class_attribute, feat_name): """ Parameters ---------- class_attribute: Contains the name of the attribute/column that contains categorical values feat_name : string Contains the name of the attribute/column Returns ------- class_idx:int Returns an integer value that will represent each categorical value """ for class_idx,class_attr in enumerate(class_attribute): if class_attr in feat_name: return class_idx def is_labelbinarizer(feat_name): """ Parameters ---------- feat_name : string Contains the name of the attribute Returns ------- Returns a boolean value that states whether label binarizer has been applied or not """ if "labelBinarizer" in feat_name or "one_hot_encoder" in feat_name: return True else: return False def is_stdscaler(feat_name): """ Parameters ---------- feat_name : string Contains the name of the attribute Returns ------- Returns a boolean value that states whether standard scaler has been applied or not """ if "standardScaler" in feat_name: return True else: return False def get_categoric_pred(feat_names,row_idx, der_fld_idx, model_coef, class_lbls, class_attribute): """ Parameters ---------- feat_names : str Contains the name of the field row_idx : int Contains an integer value to index attribute/column names der_fld_idx : int Contains an integer value to differentiate between linear and svm models model_coef : array Contains the estimators coefficient values class_lbls : list Contains the list of categorical values class_attribute : tuple Contains Categorical attribute name Returns ------- categoric_predictor : list Returns a list with instances of nyoka categorical predictor class """ categoric_predictor = list() classes_len = len(class_lbls) if not is_onehotencoder(feat_names): if classes_len == 2: if row_idx == -1: coef = model_coef else: coef = model_coef[row_idx][der_fld_idx ] cat_pred = pml.CategoricalPredictor(name=class_attribute, value=class_lbls[-1], coefficient="{:.16f}".format(coef)) cat_pred.original_tagname_ = "CategoricalPredictor" categoric_predictor.append(cat_pred) else: for cname, class_idx in zip(class_lbls, range(len(class_lbls))): if row_idx == -1: coef = model_coef else: coef = model_coef[row_idx][der_fld_idx+class_idx] cat_pred = pml.CategoricalPredictor(name=class_attribute, value=cname, coefficient="{:.16f}".format(coef)) cat_pred.original_tagname_ = "CategoricalPredictor" categoric_predictor.append(cat_pred) else: for cname, class_idx in zip(class_lbls, range(len(class_lbls))): if row_idx == -1: coef = model_coef else: coef = model_coef[row_idx][der_fld_idx + class_idx] cat_pred = pml.CategoricalPredictor(name=class_attribute, value=cname, coefficient="{:.16f}".format(coef)) cat_pred.original_tagname_ = "CategoricalPredictor" categoric_predictor.append(cat_pred) return categoric_predictor def get_numeric_pred(row_idx, der_fld_idx, model_coef, der_fld_name): """ Parameters ---------- row_idx : int Contains an integer value to index attribute/column names der_fld_idx : int Contains an integer value to differentiate between linear and svm models model_coef : array Contains the estimators coefficient values der_fld_name : string Contains the name of the attribute Returns ------- num_pred : Returns an instances of nyoka numeric predictor class """ num_pred = pml.NumericPredictor( name=der_fld_name, exponent='1', coefficient="{:.16f}".format(model_coef[row_idx][der_fld_idx])) num_pred.original_tagname_ = "NumericPredictor" return num_pred
[ "PMML43Ext.KNNInputs", "PMML43Ext.TargetValueCounts", "PMML43Ext.InstanceFields", "PMML43Ext.OutputField", "numpy.hstack", "PMML43Ext.NeuralOutputs", "PMML43Ext.minkowski", "numpy.asanyarray", "PMML43Ext.LinearKernelType", "numpy.array", "PMML43Ext.MiningModel", "PMML43Ext.ComparisonMeasure", ...
[((109, 134), 'sys.path.append', 'sys.path.append', (['BASE_DIR'], {}), '(BASE_DIR)\n', (124, 134), False, 'import sys, os\n'), ((82, 107), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (97, 107), False, 'import sys, os\n'), ((11628, 11668), 'PMML43Ext.MiningBuildTask', 'pml.MiningBuildTask', ([], {'Extension': 'extension'}), '(Extension=extension)\n', (11647, 11668), True, 'import PMML43Ext as pml\n'), ((21504, 21595), 'PMML43Ext.MiningModel', 'pml.MiningModel', ([], {'modelName': 'model.__class__.__name__', 'taskType': 'tasktype'}), '(modelName=model.__class__.__name__, taskType=tasktype, **\n model_kwargs)\n', (21519, 21595), True, 'import PMML43Ext as pml\n'), ((21731, 21781), 'PMML43Ext.Segmentation', 'pml.Segmentation', ([], {'multipleModelMethod': '"""modelChain"""'}), "(multipleModelMethod='modelChain')\n", (21747, 21781), True, 'import PMML43Ext as pml\n'), ((22941, 22991), 'PMML43Ext.MiningSchema', 'pml.MiningSchema', ([], {'MiningField': 'mining_flds_for_last'}), '(MiningField=mining_flds_for_last)\n', (22957, 22991), True, 'import PMML43Ext as pml\n'), ((23435, 23561), 'PMML43Ext.RegressionModel', 'pml.RegressionModel', ([], {'functionName': '"""classification"""', 'MiningSchema': 'mining_schema_for_last', 'RegressionTable': 'reg_tab_for_last'}), "(functionName='classification', MiningSchema=\n mining_schema_for_last, RegressionTable=reg_tab_for_last)\n", (23454, 23561), True, 'import PMML43Ext as pml\n'), ((31037, 31074), 'PMML43Ext.Output', 'pml.Output', ([], {'OutputField': 'output_fields'}), '(OutputField=output_fields)\n', (31047, 31074), True, 'import PMML43Ext as pml\n'), ((31818, 31862), 'numpy.unique', 'np.unique', (['model.labels_'], {'return_counts': '(True)'}), '(model.labels_, return_counts=True)\n', (31827, 31862), True, 'import numpy as np\n'), ((33103, 33140), 'PMML43Ext.Output', 'pml.Output', ([], {'OutputField': 'output_fields'}), '(OutputField=output_fields)\n', (33113, 33140), True, 'import PMML43Ext as pml\n'), ((34406, 34421), 'PMML43Ext.euclidean', 'pml.euclidean', ([], {}), '()\n', (34419, 34421), True, 'import PMML43Ext as pml\n'), ((34433, 34496), 'PMML43Ext.ComparisonMeasure', 'pml.ComparisonMeasure', ([], {'euclidean': 'comp_equation', 'kind': '"""distance"""'}), "(euclidean=comp_equation, kind='distance')\n", (34454, 34496), True, 'import PMML43Ext as pml\n'), ((37685, 37710), 'PMML43Ext.InlineTable', 'pml.InlineTable', ([], {'row': 'rows'}), '(row=rows)\n', (37700, 37710), True, 'import PMML43Ext as pml\n'), ((38379, 38428), 'PMML43Ext.InstanceFields', 'pml.InstanceFields', ([], {'InstanceField': 'instance_fields'}), '(InstanceField=instance_fields)\n', (38397, 38428), True, 'import PMML43Ext as pml\n'), ((40415, 40447), 'PMML43Ext.KNNInputs', 'pml.KNNInputs', ([], {'KNNInput': 'knnInput'}), '(KNNInput=knnInput)\n', (40428, 40447), True, 'import PMML43Ext as pml\n'), ((42064, 42087), 'PMML43Ext.TargetValueCounts', 'pml.TargetValueCounts', ([], {}), '()\n', (42085, 42087), True, 'import PMML43Ext as pml\n'), ((42285, 42360), 'PMML43Ext.BayesOutput', 'pml.BayesOutput', ([], {'fieldName': 'target_name', 'TargetValueCounts': 'target_val_counts'}), '(fieldName=target_name, TargetValueCounts=target_val_counts)\n', (42300, 42360), True, 'import PMML43Ext as pml\n'), ((42778, 42795), 'PMML43Ext.BayesInputs', 'pml.BayesInputs', ([], {}), '()\n', (42793, 42795), True, 'import PMML43Ext as pml\n'), ((59459, 59502), 'PMML43Ext.VectorFields', 'pml.VectorFields', ([], {'FieldRef': 'fieldref_element'}), '(FieldRef=fieldref_element)\n', (59475, 59502), True, 'import PMML43Ext as pml\n'), ((60499, 60584), 'PMML43Ext.VectorDictionary', 'pml.VectorDictionary', ([], {'VectorFields': 'vectorfields_element', 'VectorInstance': 'vecinsts'}), '(VectorFields=vectorfields_element, VectorInstance=vecinsts\n )\n', (60519, 60584), True, 'import PMML43Ext as pml\n'), ((80859, 80896), 'PMML43Ext.Output', 'pml.Output', ([], {'OutputField': 'output_fields'}), '(OutputField=output_fields)\n', (80869, 80896), True, 'import PMML43Ext as pml\n'), ((84671, 84712), 'PMML43Ext.MiningSchema', 'pml.MiningSchema', ([], {'MiningField': 'mining_flds'}), '(MiningField=mining_flds)\n', (84687, 84712), True, 'import PMML43Ext as pml\n'), ((89114, 89202), 'PMML43Ext.NeuralOutputs', 'pml.NeuralOutputs', ([], {'numberOfOutputs': 'None', 'Extension': 'None', 'NeuralOutput': 'neural_output'}), '(numberOfOutputs=None, Extension=None, NeuralOutput=\n neural_output)\n', (89131, 89202), True, 'import PMML43Ext as pml\n'), ((90332, 90391), 'PMML43Ext.Application', 'pml.Application', ([], {'name': '"""Nyoka"""', 'version': 'metadata.__version__'}), "(name='Nyoka', version=metadata.__version__)\n", (90347, 90391), True, 'import PMML43Ext as pml\n'), ((90404, 90508), 'PMML43Ext.Header', 'pml.Header', ([], {'copyright': 'copyryt', 'description': 'description', 'Timestamp': 'timestamp', 'Application': 'application'}), '(copyright=copyryt, description=description, Timestamp=timestamp,\n Application=application)\n', (90414, 90508), True, 'import PMML43Ext as pml\n'), ((6128, 6200), 'skl.pre_process.get_preprocess_val', 'pp.get_preprocess_val', (['ppln_sans_predictor', 'col_names', 'model', 'model_name'], {}), '(ppln_sans_predictor, col_names, model, model_name)\n', (6149, 6200), True, 'from skl import pre_process as pp\n'), ((22858, 22911), 'PMML43Ext.MiningField', 'pml.MiningField', ([], {'name': 'target_name', 'usageType': '"""target"""'}), "(name=target_name, usageType='target')\n", (22873, 22911), True, 'import PMML43Ext as pml\n'), ((29008, 29031), 'PMML43Ext.Apply', 'pml.Apply', ([], {'function': '"""*"""'}), "(function='*')\n", (29017, 29031), True, 'import PMML43Ext as pml\n'), ((29047, 29093), 'PMML43Ext.Constant', 'pml.Constant', ([], {'dataType': '"""double"""', 'valueOf_': '(-1.0)'}), "(dataType='double', valueOf_=-1.0)\n", (29059, 29093), True, 'import PMML43Ext as pml\n'), ((29111, 29155), 'PMML43Ext.FieldRef', 'pml.FieldRef', ([], {'field': '"""normalizedAnomalyScore"""'}), "(field='normalizedAnomalyScore')\n", (29123, 29155), True, 'import PMML43Ext as pml\n'), ((29312, 29337), 'PMML43Ext.Apply', 'pml.Apply', ([], {'function': '"""pow"""'}), "(function='pow')\n", (29321, 29337), True, 'import PMML43Ext as pml\n'), ((29353, 29398), 'PMML43Ext.Constant', 'pml.Constant', ([], {'dataType': '"""double"""', 'valueOf_': '(2.0)'}), "(dataType='double', valueOf_=2.0)\n", (29365, 29398), True, 'import PMML43Ext as pml\n'), ((29604, 29627), 'PMML43Ext.Apply', 'pml.Apply', ([], {'function': '"""-"""'}), "(function='-')\n", (29613, 29627), True, 'import PMML43Ext as pml\n'), ((29643, 29688), 'PMML43Ext.Constant', 'pml.Constant', ([], {'dataType': '"""double"""', 'valueOf_': '(0.5)'}), "(dataType='double', valueOf_=0.5)\n", (29655, 29688), True, 'import PMML43Ext as pml\n'), ((32680, 32782), 'PMML43Ext.OutputField', 'pml.OutputField', ([], {'name': '"""cluster"""', 'optype': '"""categorical"""', 'dataType': '"""string"""', 'feature': '"""predictedValue"""'}), "(name='cluster', optype='categorical', dataType='string',\n feature='predictedValue')\n", (32695, 32782), True, 'import PMML43Ext as pml\n'), ((33548, 33575), 'PMML43Ext.ArrayType', 'pml.ArrayType', ([], {'type_': '"""real"""'}), "(type_='real')\n", (33561, 33575), True, 'import PMML43Ext as pml\n'), ((37375, 37384), 'PMML43Ext.row', 'pml.row', ([], {}), '()\n', (37382, 37384), True, 'import PMML43Ext as pml\n'), ((38167, 38215), 'PMML43Ext.InstanceField', 'pml.InstanceField', ([], {'field': 'target_name', 'column': '"""y"""'}), "(field=target_name, column='y')\n", (38184, 38215), True, 'import PMML43Ext as pml\n'), ((42955, 42977), 'PMML43Ext.TargetValueStats', 'pml.TargetValueStats', ([], {}), '()\n', (42975, 42977), True, 'import PMML43Ext as pml\n'), ((52614, 52667), 'PMML43Ext.MiningSchema', 'pml.MiningSchema', ([], {'MiningField': 'mining_fields_for_first'}), '(MiningField=mining_fields_for_first)\n', (52630, 52667), True, 'import PMML43Ext as pml\n'), ((63986, 64040), 'PMML43Ext.LinearKernelType', 'pml.LinearKernelType', ([], {'description': '"""Linear Kernel Type"""'}), "(description='Linear Kernel Type')\n", (64006, 64040), True, 'import PMML43Ext as pml\n'), ((65869, 65917), 'PMML43Ext.SupportVectors', 'pml.SupportVectors', ([], {'SupportVector': 'support_vector'}), '(SupportVector=support_vector)\n', (65887, 65917), True, 'import PMML43Ext as pml\n'), ((66412, 66478), 'PMML43Ext.Coefficients', 'pml.Coefficients', ([], {'absoluteValue': 'absoValue', 'Coefficient': 'coefficient'}), '(absoluteValue=absoValue, Coefficient=coefficient)\n', (66428, 66478), True, 'import PMML43Ext as pml\n'), ((85159, 85235), 'PMML43Ext.DerivedField', 'pml.DerivedField', ([], {'optype': '"""continuous"""', 'dataType': '"""double"""', 'FieldRef': 'field_ref'}), "(optype='continuous', dataType='double', FieldRef=field_ref)\n", (85175, 85235), True, 'import PMML43Ext as pml\n'), ((88571, 88659), 'PMML43Ext.NeuralOutputs', 'pml.NeuralOutputs', ([], {'numberOfOutputs': 'None', 'Extension': 'None', 'NeuralOutput': 'neural_output'}), '(numberOfOutputs=None, Extension=None, NeuralOutput=\n neural_output)\n', (88588, 88659), True, 'import PMML43Ext as pml\n'), ((88808, 88839), 'PMML43Ext.FieldRef', 'pml.FieldRef', ([], {'field': 'target_name'}), '(field=target_name)\n', (88820, 88839), True, 'import PMML43Ext as pml\n'), ((88865, 88940), 'PMML43Ext.DerivedField', 'pml.DerivedField', ([], {'optype': '"""continuous"""', 'dataType': '"""double"""', 'FieldRef': 'fieldRef'}), "(optype='continuous', dataType='double', FieldRef=fieldRef)\n", (88881, 88940), True, 'import PMML43Ext as pml\n'), ((88968, 89040), 'PMML43Ext.NeuralOutput', 'pml.NeuralOutput', ([], {'outputNeuron': 'input_features', 'DerivedField': 'derived_flds'}), '(outputNeuron=input_features, DerivedField=derived_flds)\n', (88984, 89040), True, 'import PMML43Ext as pml\n'), ((90300, 90314), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (90312, 90314), False, 'from datetime import datetime\n'), ((11350, 11416), 'PMML43Ext.Extension', 'pml.Extension', ([], {'value': 'modelobj', 'for_': 'model_name', 'name': '"""modelObject"""'}), "(value=modelobj, for_=model_name, name='modelObject')\n", (11363, 11416), True, 'import PMML43Ext as pml\n'), ((22709, 22720), 'PMML43Ext.True_', 'pml.True_', ([], {}), '()\n', (22718, 22720), True, 'import PMML43Ext as pml\n'), ((27300, 27406), 'PMML43Ext.OutputField', 'pml.OutputField', ([], {'name': '"""anomalyScore"""', 'feature': '"""predictedValue"""', 'optype': '"""continuous"""', 'dataType': '"""float"""'}), "(name='anomalyScore', feature='predictedValue', optype=\n 'continuous', dataType='float')\n", (27315, 27406), True, 'import PMML43Ext as pml\n'), ((27481, 27592), 'PMML43Ext.OutputField', 'pml.OutputField', ([], {'name': '"""anomaly"""', 'feature': '"""anomaly"""', 'optype': '"""categorical"""', 'dataType': '"""boolean"""', 'threshold': '"""0"""'}), "(name='anomaly', feature='anomaly', optype='categorical',\n dataType='boolean', threshold='0')\n", (27496, 27592), True, 'import PMML43Ext as pml\n'), ((27794, 27927), 'PMML43Ext.OutputField', 'pml.OutputField', ([], {'name': '"""rawAnomalyScore"""', 'optype': '"""continuous"""', 'dataType': '"""double"""', 'feature': '"""predictedValue"""', 'isFinalResult': '"""false"""'}), "(name='rawAnomalyScore', optype='continuous', dataType=\n 'double', feature='predictedValue', isFinalResult='false')\n", (27809, 27927), True, 'import PMML43Ext as pml\n'), ((29890, 30049), 'PMML43Ext.OutputField', 'pml.OutputField', ([], {'name': '"""decisionFunction"""', 'optype': '"""continuous"""', 'dataType': '"""double"""', 'feature': '"""transformedValue"""', 'isFinalResult': '"""false"""', 'Apply': 'appl_outer'}), "(name='decisionFunction', optype='continuous', dataType=\n 'double', feature='transformedValue', isFinalResult='false', Apply=\n appl_outer)\n", (29905, 30049), True, 'import PMML43Ext as pml\n'), ((66518, 66594), 'PMML43Ext.SupportVectorMachine', 'pml.SupportVectorMachine', ([], {'SupportVectors': 'support_vectors', 'Coefficients': 'coeff'}), '(SupportVectors=support_vectors, Coefficients=coeff)\n', (66542, 66594), True, 'import PMML43Ext as pml\n'), ((66646, 66680), 'numpy.hstack', 'np.hstack', (['[[0], model.n_support_]'], {}), '([[0], model.n_support_])\n', (66655, 66680), True, 'import numpy as np\n'), ((73849, 73866), 'numpy.array', 'np.array', (['[inter]'], {}), '([inter])\n', (73857, 73866), True, 'import numpy as np\n'), ((73938, 73958), 'numpy.ravel', 'np.ravel', (['model_coef'], {}), '(model_coef)\n', (73946, 73958), True, 'import numpy as np\n'), ((77345, 77356), 'PMML43Ext.True_', 'pml.True_', ([], {}), '()\n', (77354, 77356), True, 'import PMML43Ext as pml\n'), ((79818, 79923), 'PMML43Ext.OutputField', 'pml.OutputField', ([], {'name': '"""predicted"""', 'feature': '"""predictedValue"""', 'optype': '"""categorical"""', 'dataType': '"""double"""'}), "(name='predicted', feature='predictedValue', optype=\n 'categorical', dataType='double')\n", (79833, 79923), True, 'import PMML43Ext as pml\n'), ((84485, 84579), 'PMML43Ext.MiningField', 'pml.MiningField', ([], {'name': 'target_name', 'optype': 'target_pmml_optype', 'usageType': 'target_pmml_utype'}), '(name=target_name, optype=target_pmml_optype, usageType=\n target_pmml_utype)\n', (84500, 84579), True, 'import PMML43Ext as pml\n'), ((86877, 86907), 'PMML43Ext.NeuralLayer', 'pml.NeuralLayer', ([], {'Neuron': 'neuron'}), '(Neuron=neuron)\n', (86892, 86907), True, 'import PMML43Ext as pml\n'), ((87223, 87267), 'PMML43Ext.Con', 'pml.Con', ([], {'from_': 'input_features[0]', 'weight': '(1.0)'}), '(from_=input_features[0], weight=1.0)\n', (87230, 87267), True, 'import PMML43Ext as pml\n'), ((87295, 87340), 'PMML43Ext.Neuron', 'pml.Neuron', ([], {'id': 'linear[0]', 'bias': '"""0.0"""', 'Con': 'con'}), "(id=linear[0], bias='0.0', Con=con)\n", (87305, 87340), True, 'import PMML43Ext as pml\n'), ((87382, 87443), 'PMML43Ext.NeuralLayer', 'pml.NeuralLayer', ([], {'activationFunction': '"""logistic"""', 'Neuron': 'neuron'}), "(activationFunction='logistic', Neuron=neuron)\n", (87397, 87443), True, 'import PMML43Ext as pml\n'), ((87753, 87814), 'PMML43Ext.NeuralLayer', 'pml.NeuralLayer', ([], {'activationFunction': '"""identity"""', 'Neuron': 'neuron'}), "(activationFunction='identity', Neuron=neuron)\n", (87768, 87814), True, 'import PMML43Ext as pml\n'), ((88091, 88181), 'PMML43Ext.DerivedField', 'pml.DerivedField', ([], {'optype': '"""categorical"""', 'dataType': '"""double"""', 'NormDiscrete': 'norm_discrete'}), "(optype='categorical', dataType='double', NormDiscrete=\n norm_discrete)\n", (88107, 88181), True, 'import PMML43Ext as pml\n'), ((2644, 2713), 'nyoka.keras.keras_model_to_pmml.KerasToPmml', 'KerasToPmml', (['model'], {'model_name': 'pmml_f_name', 'targetVarName': 'target_name'}), '(model, model_name=pmml_f_name, targetVarName=target_name)\n', (2655, 2713), False, 'from nyoka.keras.keras_model_to_pmml import KerasToPmml\n'), ((3403, 3516), 'nyoka.xgboost.xgboost_to_pmml.xgboost_to_pmml', 'xgboost_to_pmml', (['model', 'derived_col_names', 'col_names', 'target_name', 'mining_imp_val', 'categoric_values', 'tasktype'], {}), '(model, derived_col_names, col_names, target_name,\n mining_imp_val, categoric_values, tasktype)\n', (3418, 3516), False, 'from nyoka.xgboost.xgboost_to_pmml import xgboost_to_pmml\n'), ((11501, 11611), 'PMML43Ext.Extension', 'pml.Extension', ([], {'value': "toExportDict[model_name]['hyperparameters']", 'for_': 'model_name', 'name': '"""hyperparameters"""'}), "(value=toExportDict[model_name]['hyperparameters'], for_=\n model_name, name='hyperparameters')\n", (11514, 11611), True, 'import PMML43Ext as pml\n'), ((21867, 21878), 'PMML43Ext.True_', 'pml.True_', ([], {}), '()\n', (21876, 21878), True, 'import PMML43Ext as pml\n'), ((21999, 22048), 'PMML43Ext.MiningSchema', 'pml.MiningSchema', ([], {'MiningField': 'inner_mining_schema'}), '(MiningField=inner_mining_schema)\n', (22015, 22048), True, 'import PMML43Ext as pml\n'), ((38817, 38832), 'PMML43Ext.euclidean', 'pml.euclidean', ([], {}), '()\n', (38830, 38832), True, 'import PMML43Ext as pml\n'), ((56353, 56364), 'PMML43Ext.True_', 'pml.True_', ([], {}), '()\n', (56362, 56364), True, 'import PMML43Ext as pml\n'), ((57071, 57103), 'numpy.asanyarray', 'np.asanyarray', (['model.estimators_'], {}), '(model.estimators_)\n', (57084, 57103), True, 'import numpy as np\n'), ((62101, 62144), 'PMML43Ext.FieldRef', 'pml.FieldRef', ([], {'field': 'feat_names[der_fld_idx]'}), '(field=feat_names[der_fld_idx])\n', (62113, 62144), True, 'import PMML43Ext as pml\n'), ((63092, 63135), 'PMML43Ext.FieldRef', 'pml.FieldRef', ([], {'field': 'feat_names[der_fld_idx]'}), '(field=feat_names[der_fld_idx])\n', (63104, 63135), True, 'import PMML43Ext as pml\n'), ((65811, 65841), 'PMML43Ext.SupportVector', 'pml.SupportVector', ([], {'vectorId': 'sv'}), '(vectorId=sv)\n', (65828, 65841), True, 'import PMML43Ext as pml\n'), ((74919, 74944), 'numpy.unique', 'np.unique', (['target_classes'], {}), '(target_classes)\n', (74928, 74944), True, 'import numpy as np\n'), ((79316, 79333), 'math.log', 'math.log', (['(n - 1.0)'], {}), '(n - 1.0)\n', (79324, 79333), False, 'import math\n'), ((80460, 80569), 'PMML43Ext.OutputField', 'pml.OutputField', ([], {'name': 'alt_target_name', 'feature': '"""predictedValue"""', 'optype': '"""categorical"""', 'dataType': '"""string"""'}), "(name=alt_target_name, feature='predictedValue', optype=\n 'categorical', dataType='string')\n", (80475, 80569), True, 'import PMML43Ext as pml\n'), ((80678, 80786), 'PMML43Ext.OutputField', 'pml.OutputField', ([], {'name': 'alt_target_name', 'feature': '"""predictedValue"""', 'optype': '"""continuous"""', 'dataType': '"""double"""'}), "(name=alt_target_name, feature='predictedValue', optype=\n 'continuous', dataType='double')\n", (80693, 80786), True, 'import PMML43Ext as pml\n'), ((83879, 83951), 'PMML43Ext.MiningField', 'pml.MiningField', ([], {'name': 'cls_attr', 'usageType': '"""active"""', 'optype': '"""categorical"""'}), "(name=cls_attr, usageType='active', optype='categorical')\n", (83894, 83951), True, 'import PMML43Ext as pml\n'), ((88287, 88359), 'PMML43Ext.NeuralOutput', 'pml.NeuralOutput', ([], {'outputNeuron': 'input_features', 'DerivedField': 'derived_flds'}), '(outputNeuron=input_features, DerivedField=derived_flds)\n', (88303, 88359), True, 'import PMML43Ext as pml\n'), ((88411, 88490), 'PMML43Ext.NeuralOutput', 'pml.NeuralOutput', ([], {'outputNeuron': 'input_features[count]', 'DerivedField': 'derived_flds'}), '(outputNeuron=input_features[count], DerivedField=derived_flds)\n', (88427, 88490), True, 'import PMML43Ext as pml\n'), ((3906, 4015), 'nyoka.lgbm.lgb_to_pmml.lgb_to_pmml', 'lgb_to_pmml', (['model', 'derived_col_names', 'col_names', 'target_name', 'mining_imp_val', 'categoric_values', 'tasktype'], {}), '(model, derived_col_names, col_names, target_name,\n mining_imp_val, categoric_values, tasktype)\n', (3917, 4015), False, 'from nyoka.lgbm.lgb_to_pmml import lgb_to_pmml\n'), ((11160, 11236), 'PMML43Ext.Extension', 'pml.Extension', ([], {'value': 'pipeline', 'for_': 'model_name', 'name': '"""preprocessingPipeline"""'}), "(value=pipeline, for_=model_name, name='preprocessingPipeline')\n", (11173, 11236), True, 'import PMML43Ext as pml\n'), ((38955, 38989), 'PMML43Ext.minkowski', 'pml.minkowski', ([], {'p_parameter': 'model.p'}), '(p_parameter=model.p)\n', (38968, 38989), True, 'import PMML43Ext as pml\n'), ((52542, 52578), 'PMML43Ext.MiningField', 'pml.MiningField', ([], {'name': 'col_names[idx]'}), '(name=col_names[idx])\n', (52557, 52578), True, 'import PMML43Ext as pml\n'), ((55327, 55338), 'PMML43Ext.True_', 'pml.True_', ([], {}), '()\n', (55336, 55338), True, 'import PMML43Ext as pml\n'), ((62909, 62952), 'PMML43Ext.FieldRef', 'pml.FieldRef', ([], {'field': 'feat_names[der_fld_idx]'}), '(field=feat_names[der_fld_idx])\n', (62921, 62952), True, 'import PMML43Ext as pml\n'), ((74708, 74778), 'PMML43Ext.RegressionTable', 'pml.RegressionTable', ([], {'intercept': '"""0.0"""', 'targetCategory': 'target_classes[0]'}), "(intercept='0.0', targetCategory=target_classes[0])\n", (74727, 74778), True, 'import PMML43Ext as pml\n'), ((96337, 96380), 'PMML43Ext.FieldRef', 'pml.FieldRef', ([], {'field': 'feat_names[der_fld_idx]'}), '(field=feat_names[der_fld_idx])\n', (96349, 96380), True, 'import PMML43Ext as pml\n'), ((2495, 2570), 'nyoka.keras.keras_model_to_pmml.KerasToPmml', 'KerasToPmml', (['model.model'], {'model_name': 'pmml_f_name', 'targetVarName': 'target_name'}), '(model.model, model_name=pmml_f_name, targetVarName=target_name)\n', (2506, 2570), False, 'from nyoka.keras.keras_model_to_pmml import KerasToPmml\n'), ((4395, 4428), 'nyoka.lgbm.lgbmTrainingAPI_to_pmml.ExportToPMML', 'ext', (['model', 'tasktype', 'target_name'], {}), '(model, tasktype, target_name)\n', (4398, 4428), True, 'from nyoka.lgbm.lgbmTrainingAPI_to_pmml import ExportToPMML as ext\n'), ((39126, 39141), 'PMML43Ext.cityBlock', 'pml.cityBlock', ([], {}), '()\n', (39139, 39141), True, 'import PMML43Ext as pml\n'), ((57803, 57839), 'PMML43Ext.MiningField', 'pml.MiningField', ([], {'name': 'col_names[idx]'}), '(name=col_names[idx])\n', (57818, 57839), True, 'import PMML43Ext as pml\n'), ((57925, 57936), 'PMML43Ext.True_', 'pml.True_', ([], {}), '()\n', (57934, 57936), True, 'import PMML43Ext as pml\n'), ((67547, 67577), 'PMML43Ext.SupportVector', 'pml.SupportVector', ([], {'vectorId': 'sv'}), '(vectorId=sv)\n', (67564, 67577), True, 'import PMML43Ext as pml\n'), ((74956, 74981), 'numpy.unique', 'np.unique', (['target_classes'], {}), '(target_classes)\n', (74965, 74981), True, 'import numpy as np\n'), ((39273, 39295), 'PMML43Ext.squaredEuclidean', 'pml.squaredEuclidean', ([], {}), '()\n', (39293, 39295), True, 'import PMML43Ext as pml\n'), ((55595, 55632), 'PMML43Ext.Output', 'pml.Output', ([], {'OutputField': 'output_fields'}), '(OutputField=output_fields)\n', (55605, 55632), True, 'import PMML43Ext as pml\n'), ((75557, 75619), 'PMML43Ext.NumericPredictor', 'pml.NumericPredictor', ([], {'coefficient': '"""1.0"""', 'name': 'feature_names[0]'}), "(coefficient='1.0', name=feature_names[0])\n", (75577, 75619), True, 'import PMML43Ext as pml\n'), ((28592, 28629), 'PMML43Ext.FieldRef', 'pml.FieldRef', ([], {'field': '"""rawAnomalyScore"""'}), "(field='rawAnomalyScore')\n", (28604, 28629), True, 'import PMML43Ext as pml\n'), ((30748, 30786), 'PMML43Ext.FieldRef', 'pml.FieldRef', ([], {'field': '"""decisionFunction"""'}), "(field='decisionFunction')\n", (30760, 30786), True, 'import PMML43Ext as pml\n'), ((39418, 39433), 'PMML43Ext.chebychev', 'pml.chebychev', ([], {}), '()\n', (39431, 39433), True, 'import PMML43Ext as pml\n'), ((58235, 58278), 'PMML43Ext.MiningSchema', 'pml.MiningSchema', ([], {'MiningField': 'mining_fields'}), '(MiningField=mining_fields)\n', (58251, 58278), True, 'import PMML43Ext as pml\n'), ((68181, 68222), 'PMML43Ext.SupportVectors', 'pml.SupportVectors', ([], {'SupportVector': 'all_svs'}), '(SupportVector=all_svs)\n', (68199, 68222), True, 'import PMML43Ext as pml\n'), ((68711, 68752), 'PMML43Ext.SupportVectors', 'pml.SupportVectors', ([], {'SupportVector': 'all_svs'}), '(SupportVector=all_svs)\n', (68729, 68752), True, 'import PMML43Ext as pml\n'), ((76063, 76132), 'PMML43Ext.NumericPredictor', 'pml.NumericPredictor', ([], {'coefficient': '"""1.0"""', 'name': 'feature_names[feat_idx]'}), "(coefficient='1.0', name=feature_names[feat_idx])\n", (76083, 76132), True, 'import PMML43Ext as pml\n'), ((39560, 39580), 'PMML43Ext.simpleMatching', 'pml.simpleMatching', ([], {}), '()\n', (39578, 39580), True, 'import PMML43Ext as pml\n'), ((39701, 39714), 'PMML43Ext.jaccard', 'pml.jaccard', ([], {}), '()\n', (39712, 39714), True, 'import PMML43Ext as pml\n'), ((39843, 39857), 'PMML43Ext.tanimoto', 'pml.tanimoto', ([], {}), '()\n', (39855, 39857), True, 'import PMML43Ext as pml\n'), ((53863, 53904), 'PMML43Ext.FieldRef', 'pml.FieldRef', ([], {'field': '"""decisionFunction(0)"""'}), "(field='decisionFunction(0)')\n", (53875, 53904), True, 'import PMML43Ext as pml\n'), ((28833, 28850), 'math.log', 'math.log', (['(n - 1.0)'], {}), '(n - 1.0)\n', (28841, 28850), False, 'import math\n')]
# This code is part of Qiskit. # # (C) Copyright IBM 2017, 2018. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=invalid-name,inconsistent-return-statements """mpl circuit visualization backend.""" import collections import itertools import json import logging import re import os from warnings import warn import numpy as np try: from matplotlib import get_backend from matplotlib import patches from matplotlib import pyplot as plt HAS_MATPLOTLIB = True except ImportError: HAS_MATPLOTLIB = False try: from pylatexenc.latex2text import LatexNodes2Text HAS_PYLATEX = True except ImportError: HAS_PYLATEX = False from qiskit.circuit import ControlledGate from qiskit.visualization.qcstyle import DefaultStyle, set_style from qiskit.circuit import Delay from qiskit import user_config from qiskit.circuit.tools.pi_check import pi_check logger = logging.getLogger(__name__) # Default gate width and height WID = 0.65 HIG = 0.65 BASE_SIZE = 3.01 PORDER_GATE = 5 PORDER_LINE = 3 PORDER_REGLINE = 2 PORDER_GRAY = 3 PORDER_TEXT = 6 PORDER_SUBP = 4 class Anchor: """Locate the anchors for the gates""" def __init__(self, reg_num, yind, fold): self.__yind = yind self.__fold = fold self.__reg_num = reg_num self.__gate_placed = [] self.gate_anchor = 0 def plot_coord(self, index, gate_width, x_offset): """Set the coord positions for an index""" h_pos = index % self.__fold + 1 # check folding if self.__fold > 0: if h_pos + (gate_width - 1) > self.__fold: index += self.__fold - (h_pos - 1) x_pos = index % self.__fold + 0.5 * gate_width + 0.04 y_pos = self.__yind - (index // self.__fold) * (self.__reg_num + 1) else: x_pos = index + 0.5 * gate_width + 0.04 y_pos = self.__yind # could have been updated, so need to store self.gate_anchor = index return x_pos + x_offset, y_pos def is_locatable(self, index, gate_width): """Determine if a gate has been placed""" hold = [index + i for i in range(gate_width)] for p in hold: if p in self.__gate_placed: return False return True def set_index(self, index, gate_width): """Set the index for a gate""" if self.__fold < 2: _index = index else: h_pos = index % self.__fold + 1 if h_pos + (gate_width - 1) > self.__fold: _index = index + self.__fold - (h_pos - 1) + 1 else: _index = index for ii in range(gate_width): if _index + ii not in self.__gate_placed: self.__gate_placed.append(_index + ii) self.__gate_placed.sort() def get_index(self): """Getter for the index""" if self.__gate_placed: return self.__gate_placed[-1] + 1 return 0 class MatplotlibDrawer: """Matplotlib drawer class called from circuit_drawer""" def __init__(self, qregs, cregs, ops, scale=None, style=None, plot_barriers=True, layout=None, fold=25, ax=None, initial_state=False, cregbundle=True, global_phase=None): if not HAS_MATPLOTLIB: raise ImportError('The class MatplotlibDrawer needs matplotlib. ' 'To install, run "pip install matplotlib".') if not HAS_PYLATEX: raise ImportError('The class MatplotlibDrawer needs pylatexenc. ' 'to install, run "pip install pylatexenc".') self._creg = [] self._qreg = [] self._registers(cregs, qregs) self._qreg_dict = collections.OrderedDict() self._creg_dict = collections.OrderedDict() self._ops = ops self._scale = 1.0 if scale is None else scale self._style = self._load_style(style) self._plot_barriers = plot_barriers self._layout = layout self._fold = fold if self._fold < 2: self._fold = -1 if ax is None: self._return_fig = True self._figure = plt.figure() self._figure.patch.set_facecolor(color=self._style['bg']) self._ax = self._figure.add_subplot(111) else: self._return_fig = False self._ax = ax self._figure = ax.get_figure() self._ax.axis('off') self._ax.set_aspect('equal') self._ax.tick_params(labelbottom=False, labeltop=False, labelleft=False, labelright=False) self._initial_state = initial_state self._cregbundle = cregbundle self._global_phase = global_phase self._ast = None self._n_lines = 0 self._xmax = 0 self._ymax = 0 self._x_offset = 0 self._reg_long_text = 0 self._style['fs'] *= self._scale self._style['sfs'] *= self._scale self._lwidth15 = 1.5 * self._scale self._lwidth2 = 2.0 * self._scale # default is to use character table for text width, # but get_renderer will work with some mpl backends """fig = plt.figure() if hasattr(fig.canvas, 'get_renderer'): self._renderer = fig.canvas.get_renderer() else: self._renderer = None""" self._renderer = None self._mathmode_regex = re.compile(r"(?<!\\)\$(.*)(?<!\\)\$") # these char arrays are for finding text_width when not # using get_renderer method for the matplotlib backend self._char_list = {' ': (0.0958, 0.0583), '!': (0.1208, 0.0729), '"': (0.1396, 0.0875), '#': (0.2521, 0.1562), '$': (0.1917, 0.1167), '%': (0.2854, 0.1771), '&': (0.2333, 0.1458), "'": (0.0833, 0.0521), '(': (0.1167, 0.0729), ')': (0.1167, 0.0729), '*': (0.15, 0.0938), '+': (0.25, 0.1562), ',': (0.0958, 0.0583), '-': (0.1083, 0.0667), '.': (0.0958, 0.0604), '/': (0.1021, 0.0625), '0': (0.1875, 0.1167), '1': (0.1896, 0.1167), '2': (0.1917, 0.1188), '3': (0.1917, 0.1167), '4': (0.1917, 0.1188), '5': (0.1917, 0.1167), '6': (0.1896, 0.1167), '7': (0.1917, 0.1188), '8': (0.1896, 0.1188), '9': (0.1917, 0.1188), ':': (0.1021, 0.0604), ';': (0.1021, 0.0604), '<': (0.25, 0.1542), '=': (0.25, 0.1562), '>': (0.25, 0.1542), '?': (0.1583, 0.0979), '@': (0.2979, 0.1854), 'A': (0.2062, 0.1271), 'B': (0.2042, 0.1271), 'C': (0.2083, 0.1292), 'D': (0.2312, 0.1417), 'E': (0.1875, 0.1167), 'F': (0.1708, 0.1062), 'G': (0.2312, 0.1438), 'H': (0.225, 0.1396), 'I': (0.0875, 0.0542), 'J': (0.0875, 0.0542), 'K': (0.1958, 0.1208), 'L': (0.1667, 0.1042), 'M': (0.2583, 0.1604), 'N': (0.225, 0.1396), 'O': (0.2354, 0.1458), 'P': (0.1812, 0.1125), 'Q': (0.2354, 0.1458), 'R': (0.2083, 0.1292), 'S': (0.1896, 0.1188), 'T': (0.1854, 0.1125), 'U': (0.2208, 0.1354), 'V': (0.2062, 0.1271), 'W': (0.2958, 0.1833), 'X': (0.2062, 0.1271), 'Y': (0.1833, 0.1125), 'Z': (0.2042, 0.1271), '[': (0.1167, 0.075), '\\': (0.1021, 0.0625), ']': (0.1167, 0.0729), '^': (0.2521, 0.1562), '_': (0.1521, 0.0938), '`': (0.15, 0.0938), 'a': (0.1854, 0.1146), 'b': (0.1917, 0.1167), 'c': (0.1646, 0.1021), 'd': (0.1896, 0.1188), 'e': (0.1854, 0.1146), 'f': (0.1042, 0.0667), 'g': (0.1896, 0.1188), 'h': (0.1896, 0.1188), 'i': (0.0854, 0.0521), 'j': (0.0854, 0.0521), 'k': (0.1729, 0.1083), 'l': (0.0854, 0.0521), 'm': (0.2917, 0.1812), 'n': (0.1896, 0.1188), 'o': (0.1833, 0.1125), 'p': (0.1917, 0.1167), 'q': (0.1896, 0.1188), 'r': (0.125, 0.0771), 's': (0.1562, 0.0958), 't': (0.1167, 0.0729), 'u': (0.1896, 0.1188), 'v': (0.1771, 0.1104), 'w': (0.2458, 0.1521), 'x': (0.1771, 0.1104), 'y': (0.1771, 0.1104), 'z': (0.1562, 0.0979), '{': (0.1917, 0.1188), '|': (0.1, 0.0604), '}': (0.1896, 0.1188)} def _registers(self, creg, qreg): self._creg = [] for r in creg: self._creg.append(r) self._qreg = [] for r in qreg: self._qreg.append(r) @property def ast(self): """AST getter""" return self._ast def _load_style(self, style): current_style = DefaultStyle().style style_name = 'default' def_font_ratio = current_style['fs'] / current_style['sfs'] config = user_config.get_config() if style is not None: if style is False: style_name = 'bw' elif isinstance(style, dict) and 'name' in style: style_name = style['name'] elif isinstance(style, str): style_name = style elif config: style_name = config.get('circuit_mpl_style', 'default') elif not isinstance(style, (str, dict)): warn("style parameter '{}' must be a str or a dictionary." " Will use default style.".format(style), UserWarning, 2) if style_name.endswith('.json'): style_name = style_name[:-5] # Search for file in 'styles' dir, then config_path, and finally 'cwd' style_path = [] if style_name != 'default': style_name = style_name + '.json' spath = os.path.dirname(os.path.abspath(__file__)) style_path.append(os.path.join(spath, 'styles', style_name)) if config: config_path = config.get('circuit_mpl_style_path', '') if config_path: for path in config_path: style_path.append(os.path.normpath(os.path.join(path, style_name))) style_path.append(os.path.normpath(os.path.join('', style_name))) for path in style_path: exp_user = os.path.expanduser(path) if os.path.isfile(exp_user): try: with open(exp_user) as infile: json_style = json.load(infile) set_style(current_style, json_style) break except json.JSONDecodeError as e: warn("Could not decode JSON in file '{}': {}. ".format( path, str(e)) + "Will use default style.", UserWarning, 2) break except (OSError, FileNotFoundError): warn("Error loading JSON file '{}'. Will use default style.".format( path), UserWarning, 2) break else: warn("Style JSON file '{}' not found in any of these locations: {}. Will use" " default style.".format(style_name, ', '.join(style_path)), UserWarning, 2) if isinstance(style, dict): set_style(current_style, style) # If font/subfont ratio changes from default, have to scale width calculations for # subfont. Font change is auto scaled in the self._figure.set_size_inches call in draw() self._subfont_factor = current_style['sfs'] * def_font_ratio / current_style['fs'] return current_style # This computes the width of a string in the default font def _get_text_width(self, text, fontsize, param=False): if not text: return 0.0 if self._renderer: t = plt.text(0.5, 0.5, text, fontsize=fontsize) return t.get_window_extent(renderer=self._renderer).width / 60.0 else: math_mode_match = self._mathmode_regex.search(text) num_underscores = 0 num_carets = 0 if math_mode_match: math_mode_text = math_mode_match.group(1) num_underscores = math_mode_text.count('_') num_carets = math_mode_text.count('^') text = LatexNodes2Text().latex_to_text(text.replace('$$', '')) # If there are subscripts or superscripts in mathtext string # we need to account for that spacing by manually removing # from text string for text length if num_underscores: text = text.replace('_', '', num_underscores) if num_carets: text = text.replace('^', '', num_carets) # This changes hyphen to + to match width of math mode minus sign. if param: text = text.replace('-', '+') f = 0 if fontsize == self._style['fs'] else 1 sum_text = 0.0 for c in text: try: sum_text += self._char_list[c][f] except KeyError: # if non-ASCII char, use width of 'c', an average size sum_text += self._char_list['c'][f] if f == 1: sum_text *= self._subfont_factor return sum_text def _param_parse(self, v): param_parts = [None] * len(v) for i, e in enumerate(v): try: param_parts[i] = pi_check(e, output='mpl', ndigits=3) except TypeError: param_parts[i] = str(e) param_parts = ', '.join(param_parts).replace('-', '$-$') return param_parts def _get_gate_ctrl_text(self, op): op_label = getattr(op.op, 'label', None) base_name = None if not hasattr(op.op, 'base_gate') else op.op.base_gate.name base_label = None if not hasattr(op.op, 'base_gate') else op.op.base_gate.label ctrl_text = None if base_label: gate_text = base_label ctrl_text = op_label elif op_label and isinstance(op.op, ControlledGate): gate_text = base_name ctrl_text = op_label elif op_label: gate_text = op_label elif base_name: gate_text = base_name else: gate_text = op.name if gate_text in self._style['disptex']: gate_text = "{}".format(self._style['disptex'][gate_text]) else: gate_text = "{}".format(gate_text[0].upper() + gate_text[1:]) if ctrl_text: ctrl_text = "{}".format(ctrl_text[0].upper() + ctrl_text[1:]) return gate_text, ctrl_text def _get_colors(self, op): base_name = None if not hasattr(op.op, 'base_gate') else op.op.base_gate.name if op.name in self._style['dispcol']: color = self._style['dispcol'][op.name] # Backward compatibility for style dict using 'displaycolor' with # gate color and no text color, so test for str first if isinstance(color, str): fc = color gt = self._style['gt'] else: fc = color[0] gt = color[1] # Treat special case of classical gates in iqx style by making all # controlled gates of x, dcx, and swap the classical gate color elif self._style['name'] == 'iqx' and base_name in ['x', 'dcx', 'swap']: color = self._style['dispcol'][base_name] if isinstance(color, str): fc = color gt = self._style['gt'] else: fc = color[0] gt = color[1] else: fc = self._style['gc'] gt = self._style['gt'] if self._style['name'] == 'bw': ec = self._style['ec'] lc = self._style['lc'] else: ec = fc lc = fc # Subtext needs to be same color as gate text sc = gt return fc, ec, gt, self._style['tc'], sc, lc def _multiqubit_gate(self, xy, fc=None, ec=None, gt=None, sc=None, text='', subtext=''): xpos = min([x[0] for x in xy]) ypos = min([y[1] for y in xy]) ypos_max = max([y[1] for y in xy]) fs = self._style['fs'] sfs = self._style['sfs'] # added .21 is for qubit numbers on the left side text_width = self._get_text_width(text, fs) + .21 sub_width = self._get_text_width(subtext, sfs, param=True) + .21 wid = max((text_width, sub_width, WID)) qubit_span = abs(ypos) - abs(ypos_max) + 1 height = HIG + (qubit_span - 1) box = patches.Rectangle( xy=(xpos - 0.5 * wid, ypos - .5 * HIG), width=wid, height=height, fc=fc, ec=ec, linewidth=self._lwidth15, zorder=PORDER_GATE) self._ax.add_patch(box) # annotate inputs for bit, y in enumerate([x[1] for x in xy]): self._ax.text(xpos + .07 - 0.5 * wid, y, str(bit), ha='left', va='center', fontsize=fs, color=gt, clip_on=True, zorder=PORDER_TEXT) if text: if subtext: self._ax.text(xpos + .11, ypos + 0.4 * height, text, ha='center', va='center', fontsize=fs, color=gt, clip_on=True, zorder=PORDER_TEXT) self._ax.text(xpos + .11, ypos + 0.2 * height, subtext, ha='center', va='center', fontsize=sfs, color=sc, clip_on=True, zorder=PORDER_TEXT) else: self._ax.text(xpos + .11, ypos + .5 * (qubit_span - 1), text, ha='center', va='center', fontsize=fs, color=gt, clip_on=True, zorder=PORDER_TEXT, wrap=True) def _gate(self, xy, fc=None, ec=None, gt=None, sc=None, text='', subtext=''): xpos, ypos = xy fs = self._style['fs'] sfs = self._style['sfs'] text_width = self._get_text_width(text, fs) sub_width = self._get_text_width(subtext, sfs, param=True) wid = max((text_width, sub_width, WID)) box = patches.Rectangle(xy=(xpos - 0.5 * wid, ypos - 0.5 * HIG), width=wid, height=HIG, fc=fc, ec=ec, linewidth=self._lwidth15, zorder=PORDER_GATE) self._ax.add_patch(box) if text: if subtext: self._ax.text(xpos, ypos + 0.15 * HIG, text, ha='center', va='center', fontsize=fs, color=gt, clip_on=True, zorder=PORDER_TEXT) self._ax.text(xpos, ypos - 0.3 * HIG, subtext, ha='center', va='center', fontsize=sfs, color=sc, clip_on=True, zorder=PORDER_TEXT) else: self._ax.text(xpos, ypos, text, ha='center', va='center', fontsize=fs, color=gt, clip_on=True, zorder=PORDER_TEXT) def _sidetext(self, xy, tc=None, text=''): xpos, ypos = xy # 0.08 = the initial gap, add 1/2 text width to place on the right text_width = self._get_text_width(text, self._style['sfs']) xp = xpos + 0.08 + text_width / 2 self._ax.text(xp, ypos + HIG, text, ha='center', va='top', fontsize=self._style['sfs'], color=tc, clip_on=True, zorder=PORDER_TEXT) def _line(self, xy0, xy1, lc=None, ls=None, zorder=PORDER_LINE): x0, y0 = xy0 x1, y1 = xy1 linecolor = self._style['lc'] if lc is None else lc linestyle = 'solid' if ls is None else ls if linestyle == 'doublet': theta = np.arctan2(np.abs(x1 - x0), np.abs(y1 - y0)) dx = 0.05 * WID * np.cos(theta) dy = 0.05 * WID * np.sin(theta) self._ax.plot([x0 + dx, x1 + dx], [y0 + dy, y1 + dy], color=linecolor, linewidth=self._lwidth2, linestyle='solid', zorder=zorder) self._ax.plot([x0 - dx, x1 - dx], [y0 - dy, y1 - dy], color=linecolor, linewidth=self._lwidth2, linestyle='solid', zorder=zorder) else: self._ax.plot([x0, x1], [y0, y1], color=linecolor, linewidth=self._lwidth2, linestyle=linestyle, zorder=zorder) def _measure(self, qxy, cxy, cid, fc=None, ec=None, gt=None, sc=None): qx, qy = qxy cx, cy = cxy # draw gate box self._gate(qxy, fc=fc, ec=ec, gt=gt, sc=sc) # add measure symbol arc = patches.Arc(xy=(qx, qy - 0.15 * HIG), width=WID * 0.7, height=HIG * 0.7, theta1=0, theta2=180, fill=False, ec=gt, linewidth=self._lwidth2, zorder=PORDER_GATE) self._ax.add_patch(arc) self._ax.plot([qx, qx + 0.35 * WID], [qy - 0.15 * HIG, qy + 0.20 * HIG], color=gt, linewidth=self._lwidth2, zorder=PORDER_GATE) # arrow self._line(qxy, [cx, cy + 0.35 * WID], lc=self._style['cc'], ls=self._style['cline']) arrowhead = patches.Polygon(((cx - 0.20 * WID, cy + 0.35 * WID), (cx + 0.20 * WID, cy + 0.35 * WID), (cx, cy + 0.04)), fc=self._style['cc'], ec=None) self._ax.add_artist(arrowhead) # target if self._cregbundle: self._ax.text(cx + .25, cy + .1, str(cid), ha='left', va='bottom', fontsize=0.8 * self._style['fs'], color=self._style['tc'], clip_on=True, zorder=PORDER_TEXT) def _conditional(self, xy, istrue=False): xpos, ypos = xy fc = self._style['lc'] if istrue else self._style['bg'] box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15, fc=fc, ec=self._style['lc'], linewidth=self._lwidth15, zorder=PORDER_GATE) self._ax.add_patch(box) def _ctrl_qubit(self, xy, fc=None, ec=None, tc=None, text='', text_top=None): xpos, ypos = xy box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15, fc=fc, ec=ec, linewidth=self._lwidth15, zorder=PORDER_GATE) self._ax.add_patch(box) # display the control label at the top or bottom if there is one if text_top is True: self._ax.text(xpos, ypos + 0.7 * HIG, text, ha='center', va='top', fontsize=self._style['sfs'], color=tc, clip_on=True, zorder=PORDER_TEXT) elif text_top is False: self._ax.text(xpos, ypos - 0.3 * HIG, text, ha='center', va='top', fontsize=self._style['sfs'], color=tc, clip_on=True, zorder=PORDER_TEXT) def _set_ctrl_bits(self, ctrl_state, num_ctrl_qubits, qbit, ec=None, tc=None, text='', qargs=None): # place the control label at the top or bottom of controls if text: qlist = [qubit.index for qubit in qargs] ctbits = qlist[:num_ctrl_qubits] qubits = qlist[num_ctrl_qubits:] max_ctbit = max(ctbits) min_ctbit = min(ctbits) top = min(qubits) > min_ctbit # display the control qubits as open or closed based on ctrl_state cstate = "{:b}".format(ctrl_state).rjust(num_ctrl_qubits, '0')[::-1] for i in range(num_ctrl_qubits): fc_open_close = ec if cstate[i] == '1' else self._style['bg'] text_top = None if text: if top and qlist[i] == min_ctbit: text_top = True elif not top and qlist[i] == max_ctbit: text_top = False self._ctrl_qubit(qbit[i], fc=fc_open_close, ec=ec, tc=tc, text=text, text_top=text_top) def _x_tgt_qubit(self, xy, ec=None, ac=None): linewidth = self._lwidth2 xpos, ypos = xy box = patches.Circle(xy=(xpos, ypos), radius=HIG * 0.35, fc=ec, ec=ec, linewidth=linewidth, zorder=PORDER_GATE) self._ax.add_patch(box) # add '+' symbol self._ax.plot([xpos, xpos], [ypos - 0.2 * HIG, ypos + 0.2 * HIG], color=ac, linewidth=linewidth, zorder=PORDER_GATE + 1) self._ax.plot([xpos - 0.2 * HIG, xpos + 0.2 * HIG], [ypos, ypos], color=ac, linewidth=linewidth, zorder=PORDER_GATE + 1) def _swap(self, xy, color=None): xpos, ypos = xy self._ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID], [ypos - 0.20 * WID, ypos + 0.20 * WID], color=color, linewidth=self._lwidth2, zorder=PORDER_LINE + 1) self._ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID], [ypos + 0.20 * WID, ypos - 0.20 * WID], color=color, linewidth=self._lwidth2, zorder=PORDER_LINE + 1) def _barrier(self, config): xys = config['coord'] for xy in xys: xpos, ypos = xy self._ax.plot([xpos, xpos], [ypos + 0.5, ypos - 0.5], linewidth=self._scale, linestyle="dashed", color=self._style['lc'], zorder=PORDER_TEXT) box = patches.Rectangle(xy=(xpos - (0.3 * WID), ypos - 0.5), width=0.6 * WID, height=1, fc=self._style['bc'], ec=None, alpha=0.6, linewidth=self._lwidth15, zorder=PORDER_GRAY) self._ax.add_patch(box) def draw(self, filename=None, verbose=False): """Draw method called from circuit_drawer""" self._draw_regs() self._draw_ops(verbose) _xl = - self._style['margin'][0] _xr = self._xmax + self._style['margin'][1] _yb = - self._ymax - self._style['margin'][2] + 1 - 0.5 _yt = self._style['margin'][3] + 0.5 self._ax.set_xlim(_xl, _xr) self._ax.set_ylim(_yb, _yt) # update figure size fig_w = _xr - _xl fig_h = _yt - _yb if self._style['figwidth'] < 0.0: self._style['figwidth'] = fig_w * BASE_SIZE * self._style['fs'] / 72 / WID self._figure.set_size_inches(self._style['figwidth'], self._style['figwidth'] * fig_h / fig_w) if self._global_phase: plt.text(_xl, _yt, 'Global Phase: %s' % pi_check(self._global_phase, output='mpl')) if filename: self._figure.savefig(filename, dpi=self._style['dpi'], bbox_inches='tight', facecolor=self._figure.get_facecolor()) if self._return_fig: if get_backend() in ['module://ipykernel.pylab.backend_inline', 'nbAgg']: plt.close(self._figure) return self._figure def _draw_regs(self): longest_reg_name_width = 0 initial_qbit = ' |0>' if self._initial_state else '' initial_cbit = ' 0' if self._initial_state else '' def _fix_double_script(reg_name): words = reg_name.split(' ') words = [word.replace('_', r'\_') if word.count('_') > 1 else word for word in words] words = [word.replace('^', r'\^{\ }') if word.count('^') > 1 else word for word in words] reg_name = ' '.join(words).replace(' ', '\\;') return reg_name # quantum register fs = self._style['fs'] for ii, reg in enumerate(self._qreg): if len(self._qreg) > 1: if self._layout is None: qreg_name = '${{{name}}}_{{{index}}}$'.format(name=reg.register.name, index=reg.index) else: if self._layout[reg.index]: qreg_name = '${{{name}}}_{{{index}}} \\mapsto {{{physical}}}$'.format( name=self._layout[reg.index].register.name, index=self._layout[reg.index].index, physical=reg.index) else: qreg_name = '${{{physical}}}$'.format(physical=reg.index) else: qreg_name = '{name}'.format(name=reg.register.name) qreg_name = _fix_double_script(qreg_name) + initial_qbit text_width = self._get_text_width(qreg_name, fs) * 1.15 if text_width > longest_reg_name_width: longest_reg_name_width = text_width pos = -ii self._qreg_dict[ii] = { 'y': pos, 'reg_name': qreg_name, 'index': reg.index, 'group': reg.register} self._n_lines += 1 # classical register if self._creg: n_creg = self._creg.copy() n_creg.pop(0) idx = 0 y_off = -len(self._qreg) for ii, (reg, nreg) in enumerate(itertools.zip_longest(self._creg, n_creg)): pos = y_off - idx if self._cregbundle: creg_name = '{}'.format(reg.register.name) creg_name = _fix_double_script(creg_name) + initial_cbit text_width = self._get_text_width(reg.register.name, fs) * 1.15 if text_width > longest_reg_name_width: longest_reg_name_width = text_width self._creg_dict[ii] = {'y': pos, 'reg_name': creg_name, 'index': reg.index, 'group': reg.register} if not (not nreg or reg.register != nreg.register): continue else: creg_name = '${}_{{{}}}$'.format(reg.register.name, reg.index) creg_name = _fix_double_script(creg_name) + initial_cbit text_width = self._get_text_width(reg.register.name, fs) * 1.15 if text_width > longest_reg_name_width: longest_reg_name_width = text_width self._creg_dict[ii] = {'y': pos, 'reg_name': creg_name, 'index': reg.index, 'group': reg.register} self._n_lines += 1 idx += 1 self._reg_long_text = longest_reg_name_width self._x_offset = -1.2 + self._reg_long_text def _draw_regs_sub(self, n_fold, feedline_l=False, feedline_r=False): # quantum register fs = self._style['fs'] for qreg in self._qreg_dict.values(): qreg_name = qreg['reg_name'] y = qreg['y'] - n_fold * (self._n_lines + 1) self._ax.text(self._x_offset - 0.2, y, qreg_name, ha='right', va='center', fontsize=1.25 * fs, color=self._style['tc'], clip_on=True, zorder=PORDER_TEXT) self._line([self._x_offset, y], [self._xmax, y], zorder=PORDER_REGLINE) # classical register this_creg_dict = {} for creg in self._creg_dict.values(): creg_name = creg['reg_name'] y = creg['y'] - n_fold * (self._n_lines + 1) if y not in this_creg_dict.keys(): this_creg_dict[y] = {'val': 1, 'reg_name': creg_name} else: this_creg_dict[y]['val'] += 1 for y, this_creg in this_creg_dict.items(): # cregbundle if this_creg['val'] > 1: self._ax.plot([self._x_offset + 0.2, self._x_offset + 0.3], [y - 0.1, y + 0.1], color=self._style['cc'], zorder=PORDER_LINE) self._ax.text(self._x_offset + 0.1, y + 0.1, str(this_creg['val']), ha='left', va='bottom', fontsize=0.8 * fs, color=self._style['tc'], clip_on=True, zorder=PORDER_TEXT) self._ax.text(self._x_offset - 0.2, y, this_creg['reg_name'], ha='right', va='center', fontsize=1.25 * fs, color=self._style['tc'], clip_on=True, zorder=PORDER_TEXT) self._line([self._x_offset, y], [self._xmax, y], lc=self._style['cc'], ls=self._style['cline'], zorder=PORDER_REGLINE) # lf vertical line at either end if feedline_l or feedline_r: xpos_l = self._x_offset - 0.01 xpos_r = self._fold + self._x_offset + 0.1 ypos1 = -n_fold * (self._n_lines + 1) ypos2 = -(n_fold + 1) * (self._n_lines) - n_fold + 1 if feedline_l: self._ax.plot([xpos_l, xpos_l], [ypos1, ypos2], color=self._style['lc'], linewidth=self._lwidth15, zorder=PORDER_LINE) if feedline_r: self._ax.plot([xpos_r, xpos_r], [ypos1, ypos2], color=self._style['lc'], linewidth=self._lwidth15, zorder=PORDER_LINE) def _draw_ops(self, verbose=False): _standard_1q_gates = ['x', 'y', 'z', 'id', 'h', 'r', 's', 'sdg', 't', 'tdg', 'rx', 'ry', 'rz', 'rxx', 'ryy', 'rzx', 'u1', 'u2', 'u3', 'u', 'swap', 'reset', 'sx', 'sxdg', 'p'] _barrier_gates = ['barrier', 'snapshot', 'load', 'save', 'noise'] _barriers = {'coord': [], 'group': []} # # generate coordinate manager # q_anchors = {} for key, qreg in self._qreg_dict.items(): q_anchors[key] = Anchor(reg_num=self._n_lines, yind=qreg['y'], fold=self._fold) c_anchors = {} for key, creg in self._creg_dict.items(): c_anchors[key] = Anchor(reg_num=self._n_lines, yind=creg['y'], fold=self._fold) # # draw the ops # prev_anc = -1 fs = self._style['fs'] sfs = self._style['sfs'] for layer in self._ops: widest_box = 0.0 # # compute the layer_width for this layer # for op in layer: if op.name in [*_barrier_gates, 'measure']: continue base_name = None if not hasattr(op.op, 'base_gate') else op.op.base_gate.name gate_text, ctrl_text = self._get_gate_ctrl_text(op) # if a standard_gate, no params, and no labels, layer_width is 1 if (not hasattr(op.op, 'params') and ((op.name in _standard_1q_gates or base_name in _standard_1q_gates) and gate_text in (op.name, base_name) and ctrl_text is None)): continue # small increments at end of the 3 _get_text_width calls are for small # spacing adjustments between gates ctrl_width = self._get_text_width(ctrl_text, fontsize=sfs) - 0.05 # get param_width, but 0 for gates with array params if (hasattr(op.op, 'params') and not any([isinstance(param, np.ndarray) for param in op.op.params]) and len(op.op.params) > 0): param = self._param_parse(op.op.params) if op.name == 'initialize': param = '[%s]' % param param = "${}$".format(param) param_width = self._get_text_width(param, fontsize=sfs, param=True) + 0.08 else: param_width = 0.0 if op.name == 'cu1' or op.name == 'rzz' or base_name == 'rzz': tname = 'U1' if op.name == 'cu1' else 'zz' gate_width = (self._get_text_width(tname + ' ()', fontsize=sfs) + param_width) * 1.5 else: gate_width = self._get_text_width(gate_text, fontsize=fs) + 0.10 # add .21 for the qubit numbers on the left of the multibit gates if (op.name not in _standard_1q_gates and base_name not in _standard_1q_gates): gate_width += 0.21 box_width = max(gate_width, ctrl_width, param_width, WID) if box_width > widest_box: widest_box = box_width layer_width = int(widest_box) + 1 this_anc = prev_anc + 1 # # draw the gates in this layer # for op in layer: base_name = None if not hasattr(op.op, 'base_gate') else op.op.base_gate.name gate_text, ctrl_text = self._get_gate_ctrl_text(op) fc, ec, gt, tc, sc, lc = self._get_colors(op) # get qreg index q_idxs = [] for qarg in op.qargs: for index, reg in self._qreg_dict.items(): if (reg['group'] == qarg.register and reg['index'] == qarg.index): q_idxs.append(index) break # get creg index c_idxs = [] for carg in op.cargs: for index, reg in self._creg_dict.items(): if (reg['group'] == carg.register and reg['index'] == carg.index): c_idxs.append(index) break # only add the gate to the anchors if it is going to be plotted. # this prevents additional blank wires at the end of the line if # the last instruction is a barrier type if self._plot_barriers or op.name not in _barrier_gates: for ii in q_idxs: q_anchors[ii].set_index(this_anc, layer_width) # qreg coordinate q_xy = [q_anchors[ii].plot_coord(this_anc, layer_width, self._x_offset) for ii in q_idxs] # creg coordinate c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width, self._x_offset) for ii in c_idxs] # bottom and top point of qreg qreg_b = min(q_xy, key=lambda xy: xy[1]) qreg_t = max(q_xy, key=lambda xy: xy[1]) # update index based on the value from plotting this_anc = q_anchors[q_idxs[0]].gate_anchor if verbose: print(op) # load param if (op.type == 'op' and hasattr(op.op, 'params') and len(op.op.params) > 0 and not any([isinstance(param, np.ndarray) for param in op.op.params])): param = "{}".format(self._param_parse(op.op.params)) else: param = '' # conditional gate if op.condition: c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width, self._x_offset) for ii in self._creg_dict] mask = 0 for index, cbit in enumerate(self._creg): if cbit.register == op.condition[0]: mask |= (1 << index) val = op.condition[1] # cbit list to consider fmt_c = '{{:0{}b}}'.format(len(c_xy)) cmask = list(fmt_c.format(mask))[::-1] # value fmt_v = '{{:0{}b}}'.format(cmask.count('1')) vlist = list(fmt_v.format(val))[::-1] # plot conditionals v_ind = 0 xy_plot = [] for xy, m in zip(c_xy, cmask): if m == '1': if xy not in xy_plot: if vlist[v_ind] == '1' or self._cregbundle: self._conditional(xy, istrue=True) else: self._conditional(xy, istrue=False) xy_plot.append(xy) v_ind += 1 creg_b = sorted(xy_plot, key=lambda xy: xy[1])[0] xpos, ypos = creg_b self._ax.text(xpos, ypos - 0.3 * HIG, hex(val), ha='center', va='top', fontsize=sfs, color=self._style['tc'], clip_on=True, zorder=PORDER_TEXT) self._line(qreg_t, creg_b, lc=self._style['cc'], ls=self._style['cline']) # # draw special gates # if op.name == 'measure': vv = self._creg_dict[c_idxs[0]]['index'] self._measure(q_xy[0], c_xy[0], vv, fc=fc, ec=ec, gt=gt, sc=sc) elif op.name in _barrier_gates: _barriers = {'coord': [], 'group': []} for index, qbit in enumerate(q_idxs): q_group = self._qreg_dict[qbit]['group'] if q_group not in _barriers['group']: _barriers['group'].append(q_group) _barriers['coord'].append(q_xy[index]) if self._plot_barriers: self._barrier(_barriers) elif op.name == 'initialize': vec = "$[{}]$".format(param.replace('$', '')) if len(q_xy) == 1: self._gate(q_xy[0], fc=fc, ec=ec, gt=gt, sc=sc, text=gate_text, subtext=vec) else: self._multiqubit_gate(q_xy, fc=fc, ec=ec, gt=gt, sc=sc, text=gate_text, subtext=vec) elif isinstance(op.op, Delay): param_text = "(%s)" % param if op.op.unit: param_text += "[%s]" % op.op.unit self._gate(q_xy[0], fc=fc, ec=ec, gt=gt, sc=sc, text=gate_text, subtext=param_text) # # draw single qubit gates # elif len(q_xy) == 1: self._gate(q_xy[0], fc=fc, ec=ec, gt=gt, sc=sc, text=gate_text, subtext=str(param)) # # draw controlled and special gates # # cx gates elif isinstance(op.op, ControlledGate) and base_name == 'x': num_ctrl_qubits = op.op.num_ctrl_qubits self._set_ctrl_bits(op.op.ctrl_state, num_ctrl_qubits, q_xy, ec=ec, tc=tc, text=ctrl_text, qargs=op.qargs) tgt_color = self._style['dispcol']['target'] tgt = tgt_color if isinstance(tgt_color, str) else tgt_color[0] self._x_tgt_qubit(q_xy[num_ctrl_qubits], ec=ec, ac=tgt) self._line(qreg_b, qreg_t, lc=lc) # cz gate elif op.name == 'cz': num_ctrl_qubits = op.op.num_ctrl_qubits self._set_ctrl_bits(op.op.ctrl_state, num_ctrl_qubits, q_xy, ec=ec, tc=tc, text=ctrl_text, qargs=op.qargs) self._ctrl_qubit(q_xy[1], fc=ec, ec=ec, tc=tc) self._line(qreg_b, qreg_t, lc=lc, zorder=PORDER_LINE + 1) # cu1, rzz, and controlled rzz gates (sidetext gates) elif (op.name == 'cu1' or op.name == 'rzz' or base_name == 'rzz'): num_ctrl_qubits = 0 if op.name == 'rzz' else op.op.num_ctrl_qubits if op.name != 'rzz': self._set_ctrl_bits(op.op.ctrl_state, num_ctrl_qubits, q_xy, ec=ec, tc=tc, text=ctrl_text, qargs=op.qargs) self._ctrl_qubit(q_xy[num_ctrl_qubits], fc=ec, ec=ec, tc=tc) if op.name != 'cu1': self._ctrl_qubit(q_xy[num_ctrl_qubits + 1], fc=ec, ec=ec, tc=tc) stext = self._style['disptex']['u1'] if op.name == 'cu1' else 'zz' self._sidetext(qreg_b, tc=tc, text='{}'.format(stext) + ' ' + '({})'.format(param)) self._line(qreg_b, qreg_t, lc=lc) # swap gate elif op.name == 'swap': self._swap(q_xy[0], color=lc) self._swap(q_xy[1], color=lc) self._line(qreg_b, qreg_t, lc=lc) # cswap gate elif op.name != 'swap' and base_name == 'swap': num_ctrl_qubits = op.op.num_ctrl_qubits self._set_ctrl_bits(op.op.ctrl_state, num_ctrl_qubits, q_xy, ec=ec, tc=tc, text=ctrl_text, qargs=op.qargs) self._swap(q_xy[num_ctrl_qubits], color=lc) self._swap(q_xy[num_ctrl_qubits + 1], color=lc) self._line(qreg_b, qreg_t, lc=lc) # all other controlled gates elif isinstance(op.op, ControlledGate): num_ctrl_qubits = op.op.num_ctrl_qubits num_qargs = len(q_xy) - num_ctrl_qubits self._set_ctrl_bits(op.op.ctrl_state, num_ctrl_qubits, q_xy, ec=ec, tc=tc, text=ctrl_text, qargs=op.qargs) self._line(qreg_b, qreg_t, lc=lc) if num_qargs == 1: self._gate(q_xy[num_ctrl_qubits], fc=fc, ec=ec, gt=gt, sc=sc, text=gate_text, subtext='{}'.format(param)) else: self._multiqubit_gate(q_xy[num_ctrl_qubits:], fc=fc, ec=ec, gt=gt, sc=sc, text=gate_text, subtext='{}'.format(param)) # draw multi-qubit gate as final default else: self._multiqubit_gate(q_xy, fc=fc, ec=ec, gt=gt, sc=sc, text=gate_text, subtext='{}'.format(param)) # adjust the column if there have been barriers encountered, but not plotted barrier_offset = 0 if not self._plot_barriers: # only adjust if everything in the layer wasn't plotted barrier_offset = -1 if all([op.name in _barrier_gates for op in layer]) else 0 prev_anc = this_anc + layer_width + barrier_offset - 1 # # adjust window size and draw horizontal lines # anchors = [q_anchors[ii].get_index() for ii in self._qreg_dict] max_anc = max(anchors) if anchors else 0 n_fold = max(0, max_anc - 1) // self._fold if self._fold > 0 else 0 # window size if max_anc > self._fold > 0: self._xmax = self._fold + 1 + self._x_offset - 0.9 self._ymax = (n_fold + 1) * (self._n_lines + 1) - 1 else: self._xmax = max_anc + 1 + self._x_offset - 0.9 self._ymax = self._n_lines # add horizontal lines for ii in range(n_fold + 1): feedline_r = (n_fold > 0 and n_fold > ii) feedline_l = (ii > 0) self._draw_regs_sub(ii, feedline_l, feedline_r) # draw anchor index number if self._style['index']: for ii in range(max_anc): if self._fold > 0: x_coord = ii % self._fold + self._reg_long_text - 0.67 y_coord = - (ii // self._fold) * (self._n_lines + 1) + 0.7 else: x_coord = ii + self._reg_long_text - 0.67 y_coord = 0.7 self._ax.text(x_coord, y_coord, str(ii + 1), ha='center', va='center', fontsize=sfs, color=self._style['tc'], clip_on=True, zorder=PORDER_TEXT)
[ "logging.getLogger", "qiskit.circuit.tools.pi_check.pi_check", "re.compile", "matplotlib.patches.Arc", "pylatexenc.latex2text.LatexNodes2Text", "numpy.sin", "matplotlib.get_backend", "qiskit.visualization.qcstyle.DefaultStyle", "matplotlib.pyplot.close", "matplotlib.patches.Circle", "os.path.exp...
[((1274, 1301), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1291, 1301), False, 'import logging\n'), ((4154, 4179), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (4177, 4179), False, 'import collections\n'), ((4206, 4231), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (4229, 4231), False, 'import collections\n'), ((5862, 5904), 're.compile', 're.compile', (['"""(?<!\\\\\\\\)\\\\$(.*)(?<!\\\\\\\\)\\\\$"""'], {}), "('(?<!\\\\\\\\)\\\\$(.*)(?<!\\\\\\\\)\\\\$')\n", (5872, 5904), False, 'import re\n'), ((9516, 9540), 'qiskit.user_config.get_config', 'user_config.get_config', ([], {}), '()\n', (9538, 9540), False, 'from qiskit import user_config\n'), ((17396, 17544), 'matplotlib.patches.Rectangle', 'patches.Rectangle', ([], {'xy': '(xpos - 0.5 * wid, ypos - 0.5 * HIG)', 'width': 'wid', 'height': 'height', 'fc': 'fc', 'ec': 'ec', 'linewidth': 'self._lwidth15', 'zorder': 'PORDER_GATE'}), '(xy=(xpos - 0.5 * wid, ypos - 0.5 * HIG), width=wid,\n height=height, fc=fc, ec=ec, linewidth=self._lwidth15, zorder=PORDER_GATE)\n', (17413, 17544), False, 'from matplotlib import patches\n'), ((19036, 19181), 'matplotlib.patches.Rectangle', 'patches.Rectangle', ([], {'xy': '(xpos - 0.5 * wid, ypos - 0.5 * HIG)', 'width': 'wid', 'height': 'HIG', 'fc': 'fc', 'ec': 'ec', 'linewidth': 'self._lwidth15', 'zorder': 'PORDER_GATE'}), '(xy=(xpos - 0.5 * wid, ypos - 0.5 * HIG), width=wid,\n height=HIG, fc=fc, ec=ec, linewidth=self._lwidth15, zorder=PORDER_GATE)\n', (19053, 19181), False, 'from matplotlib import patches\n'), ((21606, 21772), 'matplotlib.patches.Arc', 'patches.Arc', ([], {'xy': '(qx, qy - 0.15 * HIG)', 'width': '(WID * 0.7)', 'height': '(HIG * 0.7)', 'theta1': '(0)', 'theta2': '(180)', 'fill': '(False)', 'ec': 'gt', 'linewidth': 'self._lwidth2', 'zorder': 'PORDER_GATE'}), '(xy=(qx, qy - 0.15 * HIG), width=WID * 0.7, height=HIG * 0.7,\n theta1=0, theta2=180, fill=False, ec=gt, linewidth=self._lwidth2,\n zorder=PORDER_GATE)\n', (21617, 21772), False, 'from matplotlib import patches\n'), ((22137, 22277), 'matplotlib.patches.Polygon', 'patches.Polygon', (['((cx - 0.2 * WID, cy + 0.35 * WID), (cx + 0.2 * WID, cy + 0.35 * WID), (cx,\n cy + 0.04))'], {'fc': "self._style['cc']", 'ec': 'None'}), "(((cx - 0.2 * WID, cy + 0.35 * WID), (cx + 0.2 * WID, cy + \n 0.35 * WID), (cx, cy + 0.04)), fc=self._style['cc'], ec=None)\n", (22152, 22277), False, 'from matplotlib import patches\n'), ((22808, 22938), 'matplotlib.patches.Circle', 'patches.Circle', ([], {'xy': '(xpos, ypos)', 'radius': '(WID * 0.15)', 'fc': 'fc', 'ec': "self._style['lc']", 'linewidth': 'self._lwidth15', 'zorder': 'PORDER_GATE'}), "(xy=(xpos, ypos), radius=WID * 0.15, fc=fc, ec=self._style[\n 'lc'], linewidth=self._lwidth15, zorder=PORDER_GATE)\n", (22822, 22938), False, 'from matplotlib import patches\n'), ((23116, 23231), 'matplotlib.patches.Circle', 'patches.Circle', ([], {'xy': '(xpos, ypos)', 'radius': '(WID * 0.15)', 'fc': 'fc', 'ec': 'ec', 'linewidth': 'self._lwidth15', 'zorder': 'PORDER_GATE'}), '(xy=(xpos, ypos), radius=WID * 0.15, fc=fc, ec=ec, linewidth=\n self._lwidth15, zorder=PORDER_GATE)\n', (23130, 23231), False, 'from matplotlib import patches\n'), ((25047, 25157), 'matplotlib.patches.Circle', 'patches.Circle', ([], {'xy': '(xpos, ypos)', 'radius': '(HIG * 0.35)', 'fc': 'ec', 'ec': 'ec', 'linewidth': 'linewidth', 'zorder': 'PORDER_GATE'}), '(xy=(xpos, ypos), radius=HIG * 0.35, fc=ec, ec=ec, linewidth=\n linewidth, zorder=PORDER_GATE)\n', (25061, 25157), False, 'from matplotlib import patches\n'), ((4597, 4609), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4607, 4609), True, 'from matplotlib import pyplot as plt\n'), ((9378, 9392), 'qiskit.visualization.qcstyle.DefaultStyle', 'DefaultStyle', ([], {}), '()\n', (9390, 9392), False, 'from qiskit.visualization.qcstyle import DefaultStyle, set_style\n'), ((11971, 12002), 'qiskit.visualization.qcstyle.set_style', 'set_style', (['current_style', 'style'], {}), '(current_style, style)\n', (11980, 12002), False, 'from qiskit.visualization.qcstyle import DefaultStyle, set_style\n'), ((12524, 12567), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.5)', 'text'], {'fontsize': 'fontsize'}), '(0.5, 0.5, text, fontsize=fontsize)\n', (12532, 12567), True, 'from matplotlib import pyplot as plt\n'), ((26388, 26564), 'matplotlib.patches.Rectangle', 'patches.Rectangle', ([], {'xy': '(xpos - 0.3 * WID, ypos - 0.5)', 'width': '(0.6 * WID)', 'height': '(1)', 'fc': "self._style['bc']", 'ec': 'None', 'alpha': '(0.6)', 'linewidth': 'self._lwidth15', 'zorder': 'PORDER_GRAY'}), "(xy=(xpos - 0.3 * WID, ypos - 0.5), width=0.6 * WID,\n height=1, fc=self._style['bc'], ec=None, alpha=0.6, linewidth=self.\n _lwidth15, zorder=PORDER_GRAY)\n", (26405, 26564), False, 'from matplotlib import patches\n'), ((10425, 10450), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (10440, 10450), False, 'import os\n'), ((10482, 10523), 'os.path.join', 'os.path.join', (['spath', '"""styles"""', 'style_name'], {}), "(spath, 'styles', style_name)\n", (10494, 10523), False, 'import os\n'), ((10930, 10954), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (10948, 10954), False, 'import os\n'), ((10974, 10998), 'os.path.isfile', 'os.path.isfile', (['exp_user'], {}), '(exp_user)\n', (10988, 10998), False, 'import os\n'), ((14186, 14222), 'qiskit.circuit.tools.pi_check.pi_check', 'pi_check', (['e'], {'output': '"""mpl"""', 'ndigits': '(3)'}), "(e, output='mpl', ndigits=3)\n", (14194, 14222), False, 'from qiskit.circuit.tools.pi_check import pi_check\n'), ((20667, 20682), 'numpy.abs', 'np.abs', (['(x1 - x0)'], {}), '(x1 - x0)\n', (20673, 20682), True, 'import numpy as np\n'), ((20684, 20699), 'numpy.abs', 'np.abs', (['(y1 - y0)'], {}), '(y1 - y0)\n', (20690, 20699), True, 'import numpy as np\n'), ((20731, 20744), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (20737, 20744), True, 'import numpy as np\n'), ((20775, 20788), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (20781, 20788), True, 'import numpy as np\n'), ((27904, 27917), 'matplotlib.get_backend', 'get_backend', ([], {}), '()\n', (27915, 27917), False, 'from matplotlib import get_backend\n'), ((28024, 28047), 'matplotlib.pyplot.close', 'plt.close', (['self._figure'], {}), '(self._figure)\n', (28033, 28047), True, 'from matplotlib import pyplot as plt\n'), ((30188, 30229), 'itertools.zip_longest', 'itertools.zip_longest', (['self._creg', 'n_creg'], {}), '(self._creg, n_creg)\n', (30209, 30229), False, 'import itertools\n'), ((10835, 10863), 'os.path.join', 'os.path.join', (['""""""', 'style_name'], {}), "('', style_name)\n", (10847, 10863), False, 'import os\n'), ((13006, 13023), 'pylatexenc.latex2text.LatexNodes2Text', 'LatexNodes2Text', ([], {}), '()\n', (13021, 13023), False, 'from pylatexenc.latex2text import LatexNodes2Text\n'), ((27572, 27614), 'qiskit.circuit.tools.pi_check.pi_check', 'pi_check', (['self._global_phase'], {'output': '"""mpl"""'}), "(self._global_phase, output='mpl')\n", (27580, 27614), False, 'from qiskit.circuit.tools.pi_check import pi_check\n'), ((11163, 11199), 'qiskit.visualization.qcstyle.set_style', 'set_style', (['current_style', 'json_style'], {}), '(current_style, json_style)\n', (11172, 11199), False, 'from qiskit.visualization.qcstyle import DefaultStyle, set_style\n'), ((11121, 11138), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (11130, 11138), False, 'import json\n'), ((10755, 10785), 'os.path.join', 'os.path.join', (['path', 'style_name'], {}), '(path, style_name)\n', (10767, 10785), False, 'import os\n')]
import numpy as np import pytest import tensorflow as tf from autokeras.adapters import output_adapter from tests import utils def test_y_is_pd_series(): (x, y), (val_x, val_y) = utils.dataframe_series() head = output_adapter.ClassificationHeadAdapter(name='a') head.fit_transform(y) assert isinstance(head.transform(y), tf.data.Dataset) def test_unsupported_types(): y = 1 head = output_adapter.ClassificationHeadAdapter(name='a') with pytest.raises(TypeError) as info: head.fit_transform(y) assert 'Expect the target data' in str(info.value) def test_one_class(): y = np.array(['a', 'a', 'a']) head = output_adapter.ClassificationHeadAdapter(name='a') with pytest.raises(ValueError) as info: head.fit_transform(y) assert 'Expect the target data' in str(info.value) def test_tf_dataset(): y = utils.generate_one_hot_labels(dtype='dataset') head = output_adapter.ClassificationHeadAdapter(name='a') y = head.fit_transform(y) assert isinstance(head.transform(y), tf.data.Dataset)
[ "tests.utils.generate_one_hot_labels", "numpy.array", "tests.utils.dataframe_series", "pytest.raises", "autokeras.adapters.output_adapter.ClassificationHeadAdapter" ]
[((186, 210), 'tests.utils.dataframe_series', 'utils.dataframe_series', ([], {}), '()\n', (208, 210), False, 'from tests import utils\n'), ((222, 272), 'autokeras.adapters.output_adapter.ClassificationHeadAdapter', 'output_adapter.ClassificationHeadAdapter', ([], {'name': '"""a"""'}), "(name='a')\n", (262, 272), False, 'from autokeras.adapters import output_adapter\n'), ((410, 460), 'autokeras.adapters.output_adapter.ClassificationHeadAdapter', 'output_adapter.ClassificationHeadAdapter', ([], {'name': '"""a"""'}), "(name='a')\n", (450, 460), False, 'from autokeras.adapters import output_adapter\n'), ((621, 646), 'numpy.array', 'np.array', (["['a', 'a', 'a']"], {}), "(['a', 'a', 'a'])\n", (629, 646), True, 'import numpy as np\n'), ((658, 708), 'autokeras.adapters.output_adapter.ClassificationHeadAdapter', 'output_adapter.ClassificationHeadAdapter', ([], {'name': '"""a"""'}), "(name='a')\n", (698, 708), False, 'from autokeras.adapters import output_adapter\n'), ((871, 917), 'tests.utils.generate_one_hot_labels', 'utils.generate_one_hot_labels', ([], {'dtype': '"""dataset"""'}), "(dtype='dataset')\n", (900, 917), False, 'from tests import utils\n'), ((929, 979), 'autokeras.adapters.output_adapter.ClassificationHeadAdapter', 'output_adapter.ClassificationHeadAdapter', ([], {'name': '"""a"""'}), "(name='a')\n", (969, 979), False, 'from autokeras.adapters import output_adapter\n'), ((470, 494), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (483, 494), False, 'import pytest\n'), ((718, 743), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (731, 743), False, 'import pytest\n')]
# --- built in --- import os # --- 3rd party --- import numpy as np import torch from torch import nn # --- my module --- __all__ = [ 'langevin_dynamics', 'anneal_langevin_dynamics', 'sample_score_field', 'sample_energy_field' ] # --- dynamics --- def langevin_dynamics( score_fn, x, eps=0.1, n_steps=1000 ): """Langevin dynamics Args: score_fn (callable): a score function with the following sign func(x: torch.Tensor) -> torch.Tensor x (torch.Tensor): input samples eps (float, optional): noise scale. Defaults to 0.1. n_steps (int, optional): number of steps. Defaults to 1000. """ for i in range(n_steps): x = x + eps/2. * score_fn(x).detach() x = x + torch.randn_like(x) * np.sqrt(eps) return x def anneal_langevin_dynamics( score_fn, x, sigmas=None, eps=0.1, n_steps_each=100 ): """Annealed Langevin dynamics Args: score_fn (callable): a score function with the following sign func(x: torch.Tensor, sigma: float) -> torch.Tensor x (torch.Tensor): input samples sigmas (torch.Tensor, optional): noise schedule. Defualts to None. eps (float, optional): noise scale. Defaults to 0.1. n_steps (int, optional): number of steps. Defaults to 1000. """ # default sigma schedule if sigmas is None: sigmas = np.exp(np.linspace(np.log(20), 0., 10)) for sigma in sigmas: for i in range(n_steps_each): cur_eps = eps * (sigma / sigmas[-1]) ** 2 x = x + cur_eps/2. * score_fn(x, sigma).detach() x = x + torch.randn_like(x) * np.sqrt(eps) return x # --- sampling utils --- def sample_score_field( score_fn, range_lim=4, grid_size=50, device='cpu' ): """Sampling score field from an energy model Args: score_fn (callable): a score function with the following sign func(x: torch.Tensor) -> torch.Tensor range_lim (int, optional): Range of x, y coordimates. Defaults to 4. grid_size (int, optional): Grid size. Defaults to 50. device (str, optional): torch device. Defaults to 'cpu'. """ mesh = [] x = np.linspace(-range_lim, range_lim, grid_size) y = np.linspace(-range_lim, range_lim, grid_size) for i in x: for j in y: mesh.append(np.asarray([i, j])) mesh = np.stack(mesh, axis=0) x = torch.from_numpy(mesh).float() x = x.to(device=device) scores = score_fn(x.detach()).detach() scores = scores.cpu().numpy() return mesh, scores def sample_energy_field( energy_fn, range_lim=4, grid_size=1000, device='cpu' ): """Sampling energy field from an energy model Args: energy_fn (callable): an energy function with the following sign func(x: torch.Tensor) -> torch.Tensor range_lim (int, optional): range of x, y coordinates. Defaults to 4. grid_size (int, optional): grid size. Defaults to 1000. device (str, optional): torch device. Defaults to 'cpu'. """ energy = [] x = np.linspace(-range_lim, range_lim, grid_size) y = np.linspace(-range_lim, range_lim, grid_size) for i in y: mesh = [] for j in x: mesh.append(np.asarray([j, i])) mesh = np.stack(mesh, axis=0) inputs = torch.from_numpy(mesh).float() inputs = inputs.to(device=device) e = energy_fn(inputs.detach()).detach() e = e.view(grid_size).cpu().numpy() energy.append(e) energy = np.stack(energy, axis=0) # (grid_size, grid_size) return energy
[ "numpy.sqrt", "numpy.log", "numpy.asarray", "torch.from_numpy", "numpy.stack", "numpy.linspace", "torch.randn_like" ]
[((2245, 2290), 'numpy.linspace', 'np.linspace', (['(-range_lim)', 'range_lim', 'grid_size'], {}), '(-range_lim, range_lim, grid_size)\n', (2256, 2290), True, 'import numpy as np\n'), ((2299, 2344), 'numpy.linspace', 'np.linspace', (['(-range_lim)', 'range_lim', 'grid_size'], {}), '(-range_lim, range_lim, grid_size)\n', (2310, 2344), True, 'import numpy as np\n'), ((2436, 2458), 'numpy.stack', 'np.stack', (['mesh'], {'axis': '(0)'}), '(mesh, axis=0)\n', (2444, 2458), True, 'import numpy as np\n'), ((3147, 3192), 'numpy.linspace', 'np.linspace', (['(-range_lim)', 'range_lim', 'grid_size'], {}), '(-range_lim, range_lim, grid_size)\n', (3158, 3192), True, 'import numpy as np\n'), ((3201, 3246), 'numpy.linspace', 'np.linspace', (['(-range_lim)', 'range_lim', 'grid_size'], {}), '(-range_lim, range_lim, grid_size)\n', (3212, 3246), True, 'import numpy as np\n'), ((3603, 3627), 'numpy.stack', 'np.stack', (['energy'], {'axis': '(0)'}), '(energy, axis=0)\n', (3611, 3627), True, 'import numpy as np\n'), ((3360, 3382), 'numpy.stack', 'np.stack', (['mesh'], {'axis': '(0)'}), '(mesh, axis=0)\n', (3368, 3382), True, 'import numpy as np\n'), ((2467, 2489), 'torch.from_numpy', 'torch.from_numpy', (['mesh'], {}), '(mesh)\n', (2483, 2489), False, 'import torch\n'), ((770, 789), 'torch.randn_like', 'torch.randn_like', (['x'], {}), '(x)\n', (786, 789), False, 'import torch\n'), ((792, 804), 'numpy.sqrt', 'np.sqrt', (['eps'], {}), '(eps)\n', (799, 804), True, 'import numpy as np\n'), ((1443, 1453), 'numpy.log', 'np.log', (['(20)'], {}), '(20)\n', (1449, 1453), True, 'import numpy as np\n'), ((2405, 2423), 'numpy.asarray', 'np.asarray', (['[i, j]'], {}), '([i, j])\n', (2415, 2423), True, 'import numpy as np\n'), ((3325, 3343), 'numpy.asarray', 'np.asarray', (['[j, i]'], {}), '([j, i])\n', (3335, 3343), True, 'import numpy as np\n'), ((3400, 3422), 'torch.from_numpy', 'torch.from_numpy', (['mesh'], {}), '(mesh)\n', (3416, 3422), False, 'import torch\n'), ((1663, 1682), 'torch.randn_like', 'torch.randn_like', (['x'], {}), '(x)\n', (1679, 1682), False, 'import torch\n'), ((1685, 1697), 'numpy.sqrt', 'np.sqrt', (['eps'], {}), '(eps)\n', (1692, 1697), True, 'import numpy as np\n')]
import cv2 import numpy as np import pytesseract import requests from PIL import Image def ocr(img) -> str: """ 识别验证码,由于可以绕过验证码,该方法不再需要 """ img = np.array(img) img = img[:, :, ::-1].copy() img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) mask = cv2.inRange(img, (45, 100, 40), (90, 255, 255)) imask = mask > 0 green = np.zeros_like(img, np.uint8) green[imask] = np.array([0, 0, 255]) green = cv2.cvtColor(green, cv2.COLOR_HSV2RGB) img = Image.fromarray(green) r = pytesseract.image_to_string(img).strip() return r if __name__ == "__main__": raw = requests.get("https://passport.ustc.edu.cn/validatecode.jsp?type=login", stream=True).raw img = Image.open(raw) img.show() r = ocr(img) print(r)
[ "PIL.Image.fromarray", "PIL.Image.open", "cv2.inRange", "requests.get", "numpy.array", "cv2.cvtColor", "pytesseract.image_to_string", "numpy.zeros_like" ]
[((164, 177), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (172, 177), True, 'import numpy as np\n'), ((221, 257), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (233, 257), False, 'import cv2\n'), ((270, 317), 'cv2.inRange', 'cv2.inRange', (['img', '(45, 100, 40)', '(90, 255, 255)'], {}), '(img, (45, 100, 40), (90, 255, 255))\n', (281, 317), False, 'import cv2\n'), ((351, 379), 'numpy.zeros_like', 'np.zeros_like', (['img', 'np.uint8'], {}), '(img, np.uint8)\n', (364, 379), True, 'import numpy as np\n'), ((399, 420), 'numpy.array', 'np.array', (['[0, 0, 255]'], {}), '([0, 0, 255])\n', (407, 420), True, 'import numpy as np\n'), ((434, 472), 'cv2.cvtColor', 'cv2.cvtColor', (['green', 'cv2.COLOR_HSV2RGB'], {}), '(green, cv2.COLOR_HSV2RGB)\n', (446, 472), False, 'import cv2\n'), ((483, 505), 'PIL.Image.fromarray', 'Image.fromarray', (['green'], {}), '(green)\n', (498, 505), False, 'from PIL import Image\n'), ((708, 723), 'PIL.Image.open', 'Image.open', (['raw'], {}), '(raw)\n', (718, 723), False, 'from PIL import Image\n'), ((608, 697), 'requests.get', 'requests.get', (['"""https://passport.ustc.edu.cn/validatecode.jsp?type=login"""'], {'stream': '(True)'}), "('https://passport.ustc.edu.cn/validatecode.jsp?type=login',\n stream=True)\n", (620, 697), False, 'import requests\n'), ((515, 547), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['img'], {}), '(img)\n', (542, 547), False, 'import pytesseract\n')]
# coding=UTF-8 # ex:ts=4:sw=4:et=on # Copyright (c) 2013, <NAME> # All rights reserved. # Complete license can be found in the LICENSE file. import numpy as np from scipy.special import erf from math import sqrt from .math_tools import sqrt2pi, sqrt8 def get_S(soller1, soller2): _S = sqrt((soller1 * 0.5) ** 2 + (soller2 * 0.5) ** 2) _S1S2 = soller1 * soller2 return _S, _S1S2 def get_T(range_theta, sigma_star, soller1, soller2): sigma_star = float(max(sigma_star, 1e-18)) S, _ = get_S(soller1, soller2) range_st = np.sin(range_theta) Q = S / (sqrt8 * range_st * sigma_star) return erf(Q) * sqrt2pi / (2.0 * sigma_star * S) - 2.0 * range_st * (1.0 - np.exp(-(Q ** 2.0))) / (S ** 2.0) def get_lorentz_polarisation_factor(range_theta, sigma_star, soller1, soller2, mcr_2theta): """ Get the lorentz polarisation factor for the given sigma-star value, soller slits, monochromator Bragg angle and the given theta range """ T = get_T(range_theta, sigma_star, soller1, soller2) pol = np.cos(np.radians(mcr_2theta)) ** 2 return T * (1.0 + pol * (np.cos(2.0 * range_theta) ** 2)) / np.sin(range_theta) def get_fixed_to_ads_correction_range(range_theta, goniometer): return np.sin(range_theta) def get_nm_from_t(theta, wavelength=0.154056, zero_for_inf=False): """ Convert the given theta angles (scalar or np array) to nanometer spacings using the given wavelength """ return get_nm_from_2t(2.0 * theta, wavelength=wavelength, zero_for_inf=zero_for_inf) def get_nm_from_2t(twotheta, wavelength=0.154056, zero_for_inf=False): """ Convert the given 2-theta angles (scalar or np array) to nanometer spacings using the given wavelength """ if twotheta == 0: return 0. if zero_for_inf else 1e16 else: return wavelength / (2.0 * np.sin(np.radians(twotheta / 2.0))) def get_t_from_nm(nm, wavelength=0.154056): """ Convert the given nanometer spacings (scalar or np array) to theta angles using the given wavelength """ return get_2t_from_nm(nm, wavelength=wavelength) / 2 def get_2t_from_nm(nm, wavelength=0.154056): """ Convert the given nanometer spacings (scalar or np array) to 2-theta angles using the given wavelength """ twotheta = 0.0 if nm != 0: twotheta = np.degrees(np.arcsin(max(-1.0, min(1.0, wavelength / (2.0 * nm))))) * 2.0 return twotheta
[ "numpy.radians", "math.sqrt", "numpy.exp", "scipy.special.erf", "numpy.cos", "numpy.sin" ]
[((294, 343), 'math.sqrt', 'sqrt', (['((soller1 * 0.5) ** 2 + (soller2 * 0.5) ** 2)'], {}), '((soller1 * 0.5) ** 2 + (soller2 * 0.5) ** 2)\n', (298, 343), False, 'from math import sqrt\n'), ((547, 566), 'numpy.sin', 'np.sin', (['range_theta'], {}), '(range_theta)\n', (553, 566), True, 'import numpy as np\n'), ((1247, 1266), 'numpy.sin', 'np.sin', (['range_theta'], {}), '(range_theta)\n', (1253, 1266), True, 'import numpy as np\n'), ((1151, 1170), 'numpy.sin', 'np.sin', (['range_theta'], {}), '(range_theta)\n', (1157, 1170), True, 'import numpy as np\n'), ((1058, 1080), 'numpy.radians', 'np.radians', (['mcr_2theta'], {}), '(mcr_2theta)\n', (1068, 1080), True, 'import numpy as np\n'), ((622, 628), 'scipy.special.erf', 'erf', (['Q'], {}), '(Q)\n', (625, 628), False, 'from scipy.special import erf\n'), ((690, 707), 'numpy.exp', 'np.exp', (['(-Q ** 2.0)'], {}), '(-Q ** 2.0)\n', (696, 707), True, 'import numpy as np\n'), ((1882, 1908), 'numpy.radians', 'np.radians', (['(twotheta / 2.0)'], {}), '(twotheta / 2.0)\n', (1892, 1908), True, 'import numpy as np\n'), ((1116, 1141), 'numpy.cos', 'np.cos', (['(2.0 * range_theta)'], {}), '(2.0 * range_theta)\n', (1122, 1141), True, 'import numpy as np\n')]
from PINN_Base.base_v1 import PINN_Base import tensorflow as tf import numpy as np class Soft_Mesh(PINN_Base): def __init__(self, lower_bound, upper_bound, layers_approx, layers_mesh, **kwargs ): assert(layers_approx[-2] == layers_mesh[-1]) self.output_dim = layers_approx[-1] # Output just the weighted basis functions which make up the final layer layers = layers_approx[:-1] self.layers_mesh = layers_mesh super().__init__(lower_bound, upper_bound, layers, **kwargs) def _init_params(self): self.weights_mesh, self.biases_mesh = self._init_NN(self.layers_mesh) super()._init_params() def _forward_mesh(self, X, weights, biases): scores, activations = self._NN(X, weights, biases) return tf.nn.softmax(scores), activations def _forward_approx(self, X, weights, biases): return self._NN( X, self.weights, self.biases) def _forward(self, X): probs, activations_probs = self._forward_mesh( X, self.weights_mesh, self.biases_mesh) basis_functions, activations_basis = self._forward_approx( X, self.weights, self.biases) if X == self.X: self.probs = probs self.activations_probs = activations_probs self.basis_functions = basis_functions self.activations_basis = activations_basis # Take the combination of basis functions multiplied by probabilities. return tf.reduce_sum(basis_functions * probs, axis=1)[:, None] def get_all_weights(self): return self.sess.run(self.get_all_weight_variables()) def get_all_weight_variables(self): return [self.weights, self.biases, self.weights_mesh, self.biases_mesh] def get_output_dim(self): return self.output_dim def get_probs(self, X): return self.sess.run(self.probs, {self.X: X}) def get_scaled_basis(self, X): probs = self.sess.run(self.probs, {self.X: X}) basis = self.sess.run(self.basis_functions, {self.X: X}) scaled_basis = probs * np.abs(basis) return scaled_basis def get_basis_functions(self, X): return self.sess.run(self.basis_functions, {self.X: X}) def _count_params(self): params_main = super()._count_params() params_mesh_weights = self._size_of_variable_list(self.weights_mesh) params_mesh_biases = self._size_of_variable_list(self.biases_mesh) return params_main + params_mesh_weights + params_mesh_biases def get_architecture_description(self): params = self._count_params() return { "arch_name": "soft_mesh", "n_params": params, "shape_fit": self.layers[:], "shape_mesh": self.layers_mesh[:], "dtype": "float32" if self.dtype == tf.float32 else "float64" }
[ "tensorflow.reduce_sum", "numpy.abs", "tensorflow.nn.softmax" ]
[((898, 919), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['scores'], {}), '(scores)\n', (911, 919), True, 'import tensorflow as tf\n'), ((1610, 1656), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(basis_functions * probs)'], {'axis': '(1)'}), '(basis_functions * probs, axis=1)\n', (1623, 1656), True, 'import tensorflow as tf\n'), ((2214, 2227), 'numpy.abs', 'np.abs', (['basis'], {}), '(basis)\n', (2220, 2227), True, 'import numpy as np\n')]
# Copyright (c) 2020-2022 by Fraunhofer Institute for Energy Economics # and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved. # Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. import os import numpy as np import pandapipes as pp import pandas as pd from pandapipes.component_models import Pipe from pandapipes.idx_branch import VINIT from pandapipes.idx_node import PINIT, TINIT from pandapipes.pipeflow_setup import get_lookup from pandapipes.component_models.junction_component import Junction from pandapipes.test import test_path data_path = os.path.join(test_path, "pipeflow_internals", "data") def test_hydraulic_only(): """ :return: :rtype: """ net = pp.create_empty_network("net") d = 75e-3 pp.create_junction(net, pn_bar=5, tfluid_k=283) pp.create_junction(net, pn_bar=5, tfluid_k=283) pp.create_pipe_from_parameters(net, 0, 1, 6, diameter_m=d, k_mm=.1, sections=1, alpha_w_per_m2k=5) pp.create_ext_grid(net, 0, p_bar=5, t_k=330, type="pt") pp.create_sink(net, 1, mdot_kg_per_s=1) pp.create_fluid_from_lib(net, "water", overwrite=True) pp.pipeflow(net, stop_condition="tol", iter=70, friction_model="nikuradse", transient=False, nonlinear_method="automatic", tol_p=1e-4, tol_v=1e-4) data = pd.read_csv(os.path.join(data_path, "hydraulics.csv"), sep=';', header=0, keep_default_na=False) node_pit = net["_pit"]["node"] branch_pit = net["_pit"]["branch"] v_an = data.loc[0, "pv"] p_an = data.loc[1:3, "pv"] p_pandapipes = node_pit[:, PINIT] v_pandapipes = branch_pit[:, VINIT] p_diff = np.abs(1 - p_pandapipes / p_an) v_diff = np.abs(v_pandapipes - v_an) assert np.all(p_diff < 0.01) assert (np.all(v_diff < 0.05)) def test_heat_only(): net = pp.create_empty_network("net") d = 75e-3 pp.create_junction(net, pn_bar=5, tfluid_k=283) pp.create_junction(net, pn_bar=5, tfluid_k=283) pp.create_pipe_from_parameters(net, 0, 1, 6, diameter_m=d, k_mm=.1, sections=6, alpha_w_per_m2k=5) pp.create_ext_grid(net, 0, p_bar=5, t_k=330, type="pt") pp.create_sink(net, 1, mdot_kg_per_s=1) pp.create_fluid_from_lib(net, "water", overwrite=True) pp.pipeflow(net, stop_condition="tol", iter=70, friction_model="nikuradse", nonlinear_method="automatic", mode="all") ntw = pp.create_empty_network("net") d = 75e-3 pp.create_junction(ntw, pn_bar=5, tfluid_k=283) pp.create_junction(ntw, pn_bar=5, tfluid_k=283) pp.create_pipe_from_parameters(ntw, 0, 1, 6, diameter_m=d, k_mm=.1, sections=6, alpha_w_per_m2k=5) pp.create_ext_grid(ntw, 0, p_bar=5, t_k=330, type="pt") pp.create_sink(ntw, 1, mdot_kg_per_s=1) pp.create_fluid_from_lib(ntw, "water", overwrite=True) pp.pipeflow(ntw, stop_condition="tol", iter=50, friction_model="nikuradse", nonlinear_method="automatic", mode="hydraulics") p = ntw._pit["node"][:, 5] v = ntw._pit["branch"][:, 12] u = np.concatenate((p, v)) pp.pipeflow(ntw, sol_vec=u, stop_condition="tol", iter=50, friction_model="nikuradse", nonlinear_method="automatic", mode="heat") T_net = net.res_junction.t_k T_ntw = ntw.res_junction.t_k T_diff = np.abs(1 - T_net / T_ntw) assert np.all(T_diff < 0.01)
[ "numpy.abs", "numpy.all", "pandapipes.create_fluid_from_lib", "os.path.join", "pandapipes.create_junction", "pandapipes.create_ext_grid", "numpy.concatenate", "pandapipes.create_pipe_from_parameters", "pandapipes.pipeflow", "pandapipes.create_empty_network", "pandapipes.create_sink" ]
[((637, 690), 'os.path.join', 'os.path.join', (['test_path', '"""pipeflow_internals"""', '"""data"""'], {}), "(test_path, 'pipeflow_internals', 'data')\n", (649, 690), False, 'import os\n'), ((772, 802), 'pandapipes.create_empty_network', 'pp.create_empty_network', (['"""net"""'], {}), "('net')\n", (795, 802), True, 'import pandapipes as pp\n'), ((821, 868), 'pandapipes.create_junction', 'pp.create_junction', (['net'], {'pn_bar': '(5)', 'tfluid_k': '(283)'}), '(net, pn_bar=5, tfluid_k=283)\n', (839, 868), True, 'import pandapipes as pp\n'), ((873, 920), 'pandapipes.create_junction', 'pp.create_junction', (['net'], {'pn_bar': '(5)', 'tfluid_k': '(283)'}), '(net, pn_bar=5, tfluid_k=283)\n', (891, 920), True, 'import pandapipes as pp\n'), ((925, 1028), 'pandapipes.create_pipe_from_parameters', 'pp.create_pipe_from_parameters', (['net', '(0)', '(1)', '(6)'], {'diameter_m': 'd', 'k_mm': '(0.1)', 'sections': '(1)', 'alpha_w_per_m2k': '(5)'}), '(net, 0, 1, 6, diameter_m=d, k_mm=0.1,\n sections=1, alpha_w_per_m2k=5)\n', (955, 1028), True, 'import pandapipes as pp\n'), ((1063, 1118), 'pandapipes.create_ext_grid', 'pp.create_ext_grid', (['net', '(0)'], {'p_bar': '(5)', 't_k': '(330)', 'type': '"""pt"""'}), "(net, 0, p_bar=5, t_k=330, type='pt')\n", (1081, 1118), True, 'import pandapipes as pp\n'), ((1123, 1162), 'pandapipes.create_sink', 'pp.create_sink', (['net', '(1)'], {'mdot_kg_per_s': '(1)'}), '(net, 1, mdot_kg_per_s=1)\n', (1137, 1162), True, 'import pandapipes as pp\n'), ((1168, 1222), 'pandapipes.create_fluid_from_lib', 'pp.create_fluid_from_lib', (['net', '"""water"""'], {'overwrite': '(True)'}), "(net, 'water', overwrite=True)\n", (1192, 1222), True, 'import pandapipes as pp\n'), ((1228, 1382), 'pandapipes.pipeflow', 'pp.pipeflow', (['net'], {'stop_condition': '"""tol"""', 'iter': '(70)', 'friction_model': '"""nikuradse"""', 'transient': '(False)', 'nonlinear_method': '"""automatic"""', 'tol_p': '(0.0001)', 'tol_v': '(0.0001)'}), "(net, stop_condition='tol', iter=70, friction_model='nikuradse',\n transient=False, nonlinear_method='automatic', tol_p=0.0001, tol_v=0.0001)\n", (1239, 1382), True, 'import pandapipes as pp\n'), ((1768, 1799), 'numpy.abs', 'np.abs', (['(1 - p_pandapipes / p_an)'], {}), '(1 - p_pandapipes / p_an)\n', (1774, 1799), True, 'import numpy as np\n'), ((1813, 1840), 'numpy.abs', 'np.abs', (['(v_pandapipes - v_an)'], {}), '(v_pandapipes - v_an)\n', (1819, 1840), True, 'import numpy as np\n'), ((1853, 1874), 'numpy.all', 'np.all', (['(p_diff < 0.01)'], {}), '(p_diff < 0.01)\n', (1859, 1874), True, 'import numpy as np\n'), ((1887, 1908), 'numpy.all', 'np.all', (['(v_diff < 0.05)'], {}), '(v_diff < 0.05)\n', (1893, 1908), True, 'import numpy as np\n'), ((1944, 1974), 'pandapipes.create_empty_network', 'pp.create_empty_network', (['"""net"""'], {}), "('net')\n", (1967, 1974), True, 'import pandapipes as pp\n'), ((1993, 2040), 'pandapipes.create_junction', 'pp.create_junction', (['net'], {'pn_bar': '(5)', 'tfluid_k': '(283)'}), '(net, pn_bar=5, tfluid_k=283)\n', (2011, 2040), True, 'import pandapipes as pp\n'), ((2045, 2092), 'pandapipes.create_junction', 'pp.create_junction', (['net'], {'pn_bar': '(5)', 'tfluid_k': '(283)'}), '(net, pn_bar=5, tfluid_k=283)\n', (2063, 2092), True, 'import pandapipes as pp\n'), ((2097, 2200), 'pandapipes.create_pipe_from_parameters', 'pp.create_pipe_from_parameters', (['net', '(0)', '(1)', '(6)'], {'diameter_m': 'd', 'k_mm': '(0.1)', 'sections': '(6)', 'alpha_w_per_m2k': '(5)'}), '(net, 0, 1, 6, diameter_m=d, k_mm=0.1,\n sections=6, alpha_w_per_m2k=5)\n', (2127, 2200), True, 'import pandapipes as pp\n'), ((2235, 2290), 'pandapipes.create_ext_grid', 'pp.create_ext_grid', (['net', '(0)'], {'p_bar': '(5)', 't_k': '(330)', 'type': '"""pt"""'}), "(net, 0, p_bar=5, t_k=330, type='pt')\n", (2253, 2290), True, 'import pandapipes as pp\n'), ((2295, 2334), 'pandapipes.create_sink', 'pp.create_sink', (['net', '(1)'], {'mdot_kg_per_s': '(1)'}), '(net, 1, mdot_kg_per_s=1)\n', (2309, 2334), True, 'import pandapipes as pp\n'), ((2340, 2394), 'pandapipes.create_fluid_from_lib', 'pp.create_fluid_from_lib', (['net', '"""water"""'], {'overwrite': '(True)'}), "(net, 'water', overwrite=True)\n", (2364, 2394), True, 'import pandapipes as pp\n'), ((2400, 2521), 'pandapipes.pipeflow', 'pp.pipeflow', (['net'], {'stop_condition': '"""tol"""', 'iter': '(70)', 'friction_model': '"""nikuradse"""', 'nonlinear_method': '"""automatic"""', 'mode': '"""all"""'}), "(net, stop_condition='tol', iter=70, friction_model='nikuradse',\n nonlinear_method='automatic', mode='all')\n", (2411, 2521), True, 'import pandapipes as pp\n'), ((2545, 2575), 'pandapipes.create_empty_network', 'pp.create_empty_network', (['"""net"""'], {}), "('net')\n", (2568, 2575), True, 'import pandapipes as pp\n'), ((2594, 2641), 'pandapipes.create_junction', 'pp.create_junction', (['ntw'], {'pn_bar': '(5)', 'tfluid_k': '(283)'}), '(ntw, pn_bar=5, tfluid_k=283)\n', (2612, 2641), True, 'import pandapipes as pp\n'), ((2646, 2693), 'pandapipes.create_junction', 'pp.create_junction', (['ntw'], {'pn_bar': '(5)', 'tfluid_k': '(283)'}), '(ntw, pn_bar=5, tfluid_k=283)\n', (2664, 2693), True, 'import pandapipes as pp\n'), ((2698, 2801), 'pandapipes.create_pipe_from_parameters', 'pp.create_pipe_from_parameters', (['ntw', '(0)', '(1)', '(6)'], {'diameter_m': 'd', 'k_mm': '(0.1)', 'sections': '(6)', 'alpha_w_per_m2k': '(5)'}), '(ntw, 0, 1, 6, diameter_m=d, k_mm=0.1,\n sections=6, alpha_w_per_m2k=5)\n', (2728, 2801), True, 'import pandapipes as pp\n'), ((2836, 2891), 'pandapipes.create_ext_grid', 'pp.create_ext_grid', (['ntw', '(0)'], {'p_bar': '(5)', 't_k': '(330)', 'type': '"""pt"""'}), "(ntw, 0, p_bar=5, t_k=330, type='pt')\n", (2854, 2891), True, 'import pandapipes as pp\n'), ((2896, 2935), 'pandapipes.create_sink', 'pp.create_sink', (['ntw', '(1)'], {'mdot_kg_per_s': '(1)'}), '(ntw, 1, mdot_kg_per_s=1)\n', (2910, 2935), True, 'import pandapipes as pp\n'), ((2941, 2995), 'pandapipes.create_fluid_from_lib', 'pp.create_fluid_from_lib', (['ntw', '"""water"""'], {'overwrite': '(True)'}), "(ntw, 'water', overwrite=True)\n", (2965, 2995), True, 'import pandapipes as pp\n'), ((3001, 3129), 'pandapipes.pipeflow', 'pp.pipeflow', (['ntw'], {'stop_condition': '"""tol"""', 'iter': '(50)', 'friction_model': '"""nikuradse"""', 'nonlinear_method': '"""automatic"""', 'mode': '"""hydraulics"""'}), "(ntw, stop_condition='tol', iter=50, friction_model='nikuradse',\n nonlinear_method='automatic', mode='hydraulics')\n", (3012, 3129), True, 'import pandapipes as pp\n'), ((3216, 3238), 'numpy.concatenate', 'np.concatenate', (['(p, v)'], {}), '((p, v))\n', (3230, 3238), True, 'import numpy as np\n'), ((3244, 3378), 'pandapipes.pipeflow', 'pp.pipeflow', (['ntw'], {'sol_vec': 'u', 'stop_condition': '"""tol"""', 'iter': '(50)', 'friction_model': '"""nikuradse"""', 'nonlinear_method': '"""automatic"""', 'mode': '"""heat"""'}), "(ntw, sol_vec=u, stop_condition='tol', iter=50, friction_model=\n 'nikuradse', nonlinear_method='automatic', mode='heat')\n", (3255, 3378), True, 'import pandapipes as pp\n'), ((3471, 3496), 'numpy.abs', 'np.abs', (['(1 - T_net / T_ntw)'], {}), '(1 - T_net / T_ntw)\n', (3477, 3496), True, 'import numpy as np\n'), ((3509, 3530), 'numpy.all', 'np.all', (['(T_diff < 0.01)'], {}), '(T_diff < 0.01)\n', (3515, 3530), True, 'import numpy as np\n'), ((1431, 1472), 'os.path.join', 'os.path.join', (['data_path', '"""hydraulics.csv"""'], {}), "(data_path, 'hydraulics.csv')\n", (1443, 1472), False, 'import os\n')]
""" Definition of views. """ from datetime import datetime from django.shortcuts import render from django.http import HttpRequest from . import models import numpy as np from . import predict_model as pm import time import random def home(request): """Renders the home page.""" assert isinstance(request, HttpRequest) return render( request, 'app/index.html', { 'title':'主页', 'year':datetime.now().year, } ) #总览页面 def overview(request): """Renders the home page.""" assert isinstance(request, HttpRequest) teacher_class=[] try: teacher_class.append(request.user.TeacherClass) except: teacher_class.append(4) #获取所有班级的所有次考试,得到最近考试次数 class_all_full_prime=models.AllTest.objects.filter(Class=teacher_class[0]).values() class_testid=[int(i.get('TesT_id')) for i in list(class_all_full_prime)] now_testid=max(class_testid) #获得当前班级的所有次数考试,将其打包为各学科值 class_all_full=models.AllTest.objects.filter(Class=teacher_class[0]).values().order_by('TesT_id') class_all=list(class_all_full) #初始化列表 test_id,math,english,chinese,phy,bio,che=[],[],[],[],[],[],[] for i in class_all: test_id.append('第'+str(i.get('TesT_id'))+'次考试') math.append(i.get('Math2')) english.append(i.get('English2')) chinese.append(i.get('Chinese2')) phy.append(i.get('Physics2')) bio.append(i.get('Biology2')) che.append(i.get('Chemistry2')) #print(test_id,math,english,chinese,phy,bio,che) #获得第选定班级,最近一次考试的所有成绩 grade_all=models.ClassGrade.objects.filter(TEST_id=now_testid,CLASS=teacher_class[0]).values() grade=list(grade_all.order_by('-Total')) #获取趋势并计算 class_total=[int(i.get('total')) for i in list(class_all)] if len(class_total)>=2: class_trend=int(((class_total[-1]-class_total[-2])/class_total[-2])*100) else: class_trend=0 if class_trend>=0: class_trends='+'+str(class_trend) else: class_trends='-'+str(class_trend) #以趋势判断学生状态 if class_trend>=-3 and class_trend <=10: stauts='好' else: if class_trend>=10: stauts='非常好' else: stauts='差' #获取顺位并处理 class_rank= class_all_full_prime.filter(TesT_id=now_testid).order_by('total') temp=0 for i in class_rank: temp+=1 if i.get('Class')==teacher_class[0]: rank=temp else: rank='error' temp=0 #获取班级人数 class_people=models.ClassGrade.objects.filter(TEST_id=now_testid,CLASS=teacher_class[0]).count() #高考天数与月考天数 next_test=31-datetime.now().day next_test_pre=((datetime.now().day)/31)*100 final_test=(6%datetime.now().month)*30+(7-datetime.now().day) final_test_pre=((158-final_test)/158)*100 return render( request, 'app/GGindex.html', { 'title':'总览/Overview', 'year':datetime.now().year, 'next_test':next_test, 'next_test_precent':int(next_test_pre), 'final_test':final_test, 'final_test_precent':int(final_test_pre), 'now_testid':now_testid, 'next_testid':now_testid+1, 'people':class_people, 'trend':class_trends, 'grade':grade[:10], 'testid':test_id, 'math':math, 'chinese':chinese, 'english':english, 'phy':phy, 'bio':bio, 'che':che, 'rank':rank, 'stauts': stauts, } ) def datas_change(request): """Renders the datas page.""" assert isinstance(request, HttpRequest) teacher_class=[] try: teacher_class.append(request.user.TeacherClass) except: teacher_class.append(4) class_inside_prime=models.ClassGrade.objects.filter(CLASS=teacher_class[0]).values() class_inside=list(class_inside_prime) class_num=class_inside[0].get('CLASS') return render( request, 'app/datas_change.html', { 'title':'数据修改/Datas', 'message':'database contrl page.', 'year':datetime.now().year, 'class_info':class_inside, 'class':class_num, } ) def datas_search(request): """Renders the datas page.""" assert isinstance(request, HttpRequest) #使用GET方法从前端获取搜索学号 q = request.GET.get('q') #判别语句 search_res=models.ClassGrade.objects.filter(studentNum=q).order_by('TEST_id') #如果存在 if search_res: search_grade=list(search_res.values()) search_name=search_grade[0].get('STName') search_class=search_grade[0].get('CLASS') test_id,math,english,chinese,phy,bio,che,total=[],[],[],[],[],[],[],[] for i in search_grade: test_id.append('第'+str(i.get('TEST_id'))+'次考试') math.append(i.get('Math3')) english.append(i.get('English3')) chinese.append(i.get('Chinese3')) phy.append(i.get('Physics3')) bio.append(i.get('Biology3')) che.append(i.get('Chemistry3')) total.append(i.get('Total')) return render( request, 'app/datas_search.html', { 'title':'数据查询/Datas', 'message':'Database contrl page.', 'year':datetime.now().year, 'res':1, 'typecode':'has-success', 'searchmessage':'学号 '+q+' 搜索完成!', 'search_res':search_res, 'testid':test_id, 'math':math, 'chinese':chinese, 'english':english, 'phy':phy, 'bio':bio, 'che':che, 'total':total, 'name':search_name, 'class':search_class, } ) return render( request, 'app/datas_search.html', { 'title':'数据查询/Datas', 'message':'Database contrl page.', 'year':datetime.now().year, 'res':0, 'typecode':'', 'searchmessage':'', 'search_res':'', 'testid':'', 'math':'', 'chinese':'', 'english':'', 'phy':'', 'bio':'', 'che':'', 'total':'', 'name':'', 'class':'', } ) def datas_add(request): """Renders the predict page.""" assert isinstance(request, HttpRequest) request.encoding='utf-8' res='' if request.method =="POST": studentid = request.POST.get('studentid') testid = request.POST.get('testid') sub1 = request.POST.get('sub1') sub2 = request.POST.get('sub2') sub3 = request.POST.get('sub3') sub4 = request.POST.get('sub4') sub5 = request.POST.get('sub5') sub6 = request.POST.get('sub6') #数据库代码1 if studentid and testid and sub1 and sub2 and sub3 and sub4 and sub5 and sub6: total=int(sub1)+int(sub2)+int(sub3)+int(sub4)+int(sub5)+int(sub6) if request.POST.get('name') and request.POST.get('classid'): if models.ClassGrade.objects.filter(studentNum=studentid,TEST_id=testid): res='已有记录,无法添加此次考试' else: name = request.POST.get('name') classid = request.POST.get('classid') models.ClassGrade.objects.create(CLASS=classid,STName=name,studentNum=studentid,TEST_id=testid,Chinese3=sub1,Math3=sub2,English3=sub3,Physics3=sub4,Chemistry3=sub5,Biology3=sub6,Total=total) if models.ClassGrade.objects.filter(studentNum=studentid,TEST_id=testid): res='学号'+str(studentid)+'第'+str(testid)+'次考试,'+'学生'+name+'已成功入库!' else: res='学生'+name+'入库失败!' #数据库代码2 else: total=int(sub1)+int(sub2)+int(sub3)+int(sub4)+int(sub5)+int(sub6) if models.ClassGrade.objects.filter(studentNum=studentid,TEST_id=testid): res='已有记录,无法添加此次考试' else: if models.ClassGrade.objects.filter(studentNum=studentid): student = models.ClassGrade.objects.filter(studentNum=studentid).values() name=list(student)[0].get('STName') classid = list(student)[0].get('CLASS') models.ClassGrade.objects.create(CLASS=classid,STName=name,studentNum=studentid,TEST_id=testid,Chinese3=sub1,Math3=sub2,English3=sub3,Physics3=sub4,Chemistry3=sub5,Biology3=sub6,Total=total) if models.ClassGrade.objects.filter(studentNum=studentid,TEST_id=testid): res='学号'+str(studentid)+'第'+str(testid)+'次考试,'+'学生'+name+'已成功入库!' else: res='学生'+name+'入库失败!' else: res='并非已存在学生' else: res='参数传入失败,请检查传参格式' return render( request, 'app/datas_add.html', { 'title':'数据提交/Datas', 'message':'Database contrl page.', 'year':datetime.now().year, 'res':res } ) def datas_del(request): """Renders the predict page.""" assert isinstance(request, HttpRequest) request.encoding='utf-8' res='' if request.method =="POST": studentid = request.POST.get('studentid') testid = request.POST.get('testid') testnum = request.POST.get('testnum') #数据库代码1 if studentid and testid: student=models.ClassGrade.objects.filter(studentNum=studentid,TEST_id=testid).values() if student: name=list(student)[0].get('STName') models.ClassGrade.objects.filter(studentNum=studentid,TEST_id=testid).delete() if not models.ClassGrade.objects.filter(studentNum=studentid,TEST_id=testid): res='学生'+str(name)+'已成功删除第'+str(testid)+'条考试数据!' else: res='学生'+str(name)+'删除失败' else: res='学生删除失败,无对应记录!' #数据库代码2 else: if testnum: student=models.ClassGrade.objects.filter(id=testnum).values() if student: name=list(student)[0].get('STName') models.ClassGrade.objects.filter(id=testnum).delete() if not models.ClassGrade.objects.filter(studentNum=studentid,TEST_id=testid): res='学生'+str(name)+'已成功删除第'+str(testnum)+'条考试数据!' else: res='学生'+str(name)+'删除失败' else: res='学生删除失败,无对应记录!' else: res='参数传入失败,请检查传参格式' return render( request, 'app/datas_del.html', { 'title':'数据删除/Datas', 'message':'Database contrl page.', 'year':datetime.now().year, 'res':res } ) def class_predict(request): """Renders the predict page.""" assert isinstance(request, HttpRequest) res,message='','' teacher_class=[] try: teacher_class.append(request.user.TeacherClass) except: teacher_class.append(4) final_score=0 cost=0 optimal=[0,0,0] res=0 class_id='' test_id='' allgrade='' avgclass='' grade_fir='' grade_sec='' grade_thr='' grade_four='' grade_fiv='' message_tx='' grademin=0 grademax=0 if request.method =="POST": class_id=request.POST.get('class') hard=request.POST.get('hardrank') temp=[] if class_id and hard: #预测部份 if models.ClassGrade.objects.filter(CLASS=class_id): grade_frame=models.ClassGrade.objects.filter(CLASS=class_id).values() grade=list(grade_frame) grade_final=[] max_test=max([i.get('TEST_id') for i in grade]) for i in grade: if int(i.get('TEST_id'))>=(int(max_test)-4): grade_final.append(i) test_id=max(i.get('TEST_id') for i in grade) allgrade=list(models.AllClassGrade.objects.filter(test=test_id).values()) avgclass=list(models.AllTest.objects.filter(TesT_id=test_id,Class=class_id).values()) grade_fir=models.ClassGrade.objects.filter(CLASS=class_id,TEST_id=test_id,Total__gte=0,Total__lt=400).count() grade_sec=models.ClassGrade.objects.filter(CLASS=class_id,TEST_id=test_id,Total__gte=400,Total__lt=500).count() grade_thr=models.ClassGrade.objects.filter(CLASS=class_id,TEST_id=test_id,Total__gte=500,Total__lt=550).count() grade_four=models.ClassGrade.objects.filter(CLASS=class_id,TEST_id=test_id,Total__gte=550,Total__lt=600).count() grade_fiv=models.ClassGrade.objects.filter(CLASS=class_id,TEST_id=test_id,Total__gte=600,Total__lt=750).count() x,y=Pretreatment(grade_final,'Total',750) pre_model=pm.predict_model() pre_model.set_train(x,y) final_score,optimal=pre_model.get_res([5,float(hard)]) final_score=final_score*7.5 cost=pre_model.cost_function() grademin=int(final_score-cost/10) if int(final_score+cost/10)<=750: grademax=int(final_score+cost/10) else: grademax=750 message_tx='预测完成' res=1 if models.ClassGrade.objects.filter(CLASS=class_id).count()<=30: message='本次预测数据小于30个,模型可信度<1%' else : if 80>models.ClassGrade.objects.filter(CLASS=class_id).count()>30: message='本次预测数据介于30-80个,模型可信度接近15%' else: if 200>models.ClassGrade.objects.filter(CLASS=class_id).count()>80: message='本次预测数据介于80-200个,模型可信度接近60%' else: message='本次预测数据大于200个,模型可信度到达70%以上' #普通数据 else: message_tx='无此班数据' else: message_tx='传参错误' return render( request, 'app/class_predict.html', { 'title':'班级预测/Predict', 'message':'Predict page.', 'year':datetime.now().year, 'message':message_tx, 'class':teacher_class, 'premessage':message, 'res':res, 'score':int(final_score), 'theta1':float(optimal[0]), 'theta2':float(optimal[1]), 'theta3':float(optimal[2]), 'grademin':grademin, 'grademax':grademax, 'classid':class_id, 'testid':test_id, 'avgclass':avgclass, 'avgall':allgrade, 'grade_fir':grade_fir, 'grade_sec':grade_sec, 'grade_thr':grade_thr, 'grade_four':grade_four, 'grade_fiv':grade_fiv, } ) def subject_predict(request): """Renders the predict page.""" assert isinstance(request, HttpRequest) res,message='','' teacher_class=[] try: teacher_class.append(request.user.TeacherClass) except: teacher_class.append(4) final_score=0 cost=0 optimal=[0,0,0] res=0 sub_name='' test_id='' allgrade='' avgclass='' grade_fir='' grade_sec='' grade_thr='' grade_four='' grade_fiv='' message_tx='' max_score=0 grademin=0 grademax=0 pass_1,unpass_1,pass_2,unpass_2,pass_3,unpass_3=0,0,0,0,0,0 grade_5,grade_4,grade_3,grade_2,grade_1,grade_0,grade_t=0,0,0,0,0,0,0 if request.method =="POST": sub_name=request.POST.get('sub') hard=request.POST.get('hardrank') temp=[] if sub_name and hard: max_score,sub_key=subjects(sub_name) if max_score==0: message_tx='参数接受错误,请检查格式是否正确' else: if models.ClassGrade.objects.values(sub_key): grade_frame=models.ClassGrade.objects.values() grade=list(grade_frame) test_id=max(i.get('TEST_id') for i in grade) grade_final=[] max_test=max([i.get('TEST_id') for i in grade]) for i in grade: if int(i.get('TEST_id'))>=(int(max_test)-4): grade_final.append(i) test_id=max(i.get('TEST_id') for i in grade) allgrade=list(models.AllClassGrade.objects.filter(test=test_id).values()) avgclass=list(models.AllTest.objects.filter(TesT_id=test_id).values()) target_sub=models.ClassGrade.objects.values(sub_key).filter(TEST_id=test_id) print(target_sub) for i in list(target_sub): if i.get(sub_key)<=70: grade_5+=1 if 70<i.get(sub_key)<=80: grade_t+=1 if 80<i.get(sub_key)<=90: grade_4+=1 if 90<i.get(sub_key)<=100: grade_3+=1 if 100<i.get(sub_key)<=110: grade_2+=1 if 110<i.get(sub_key)<=130: grade_1+=1 if 130<i.get(sub_key)<=150: grade_0+=1 pass_grade=models.ClassGrade.objects.values(sub_key).filter(TEST_id=test_id) for i in list(pass_grade): if i.get(sub_key)>=max_score*0.6: pass_1+=1 if i.get(sub_key)<max_score*0.6: unpass_1+=1 pass_grade=models.ClassGrade.objects.values(sub_key).filter(TEST_id=int(test_id)-1) for i in list(pass_grade): if i.get(sub_key)>=max_score*0.6: pass_2+=1 if i.get(sub_key)<max_score*0.6: unpass_2+=1 pass_grade=models.ClassGrade.objects.values(sub_key).filter(TEST_id=int(test_id)-2) for i in list(pass_grade): if i.get(sub_key)>=max_score*0.6 : pass_3+=1 if i.get(sub_key)<max_score*0.6 : unpass_3+=1 #预测部份 x,y=Pretreatment(grade_final,sub_key,max_score) pre_model=pm.predict_model() pre_model.set_train(x,y) final_score,optimal=pre_model.get_res([5,float(hard)]) final_score=final_score*(max_score/100) cost=pre_model.cost_function() grademin=int(final_score-cost/10) if int(final_score+cost/10)<=150: grademax=int(final_score+cost/10) else: grademax=150 message_tx='预测完成' res=1 if models.ClassGrade.objects.values(sub_key).count()<=30: message='本次预测数据小于30个,模型可信度<1%' else : if 80>models.ClassGrade.objects.values(sub_key).count()>30: message='本次预测数据介于30-80个,模型可信度接近15%' else: if 200>models.ClassGrade.objects.values(sub_key).count()>80: message='本次预测数据介于80-200个,模型可信度接近60%' else: message='本次预测数据大于200个,模型可信度到达70%以上' #普通数据 else: message_tx='无此班数据' else: message_tx='传参错误' return render( request, 'app/subject_predict.html', { 'title':'科目预测/Predict', 'message':'Predict page.', 'year':datetime.now().year, 'message':message_tx, 'premessage':message, 'res':res, 'score':int(final_score), 'theta1':float(optimal[0]), 'theta2':float(optimal[1]), 'theta3':float(optimal[2]), 'grademin':grademin, 'grademax':grademax, 'max_score':max_score, 'subname':sub_name, 'testid':test_id, 'avgclass':avgclass, 'avgall':allgrade, 'grade_fir':grade_1, 'grade_sec':grade_2, 'grade_thr':grade_3, 'grade_four':grade_4, 'grade_fiv':grade_5, 'grade_zero':grade_0, 'grade_temp':grade_t, 'pass1':pass_1, 'unpass1':unpass_1, 'pass2':pass_2, 'unpass2':unpass_2, 'pass3':pass_3, 'unpass3':unpass_3, } ) def class_analysis(request): """Renders the analysis page.""" assert isinstance(request, HttpRequest) res,message,class_id,test_id='','','','' teacher_class=[] max_grade,min_grade,avg_grade,in2_count=[],[],[],[] test_id_temp=[] lastin200,max_min,trend='','','' try: teacher_class.append(request.user.TeacherClass) except: teacher_class.append(4) if request.method =="POST": class_id=request.POST.get('class') if class_id: target=models.ClassGrade.objects.filter(CLASS=class_id) if target: test_id=list(models.AllTest.objects.filter(Class=class_id).values('TesT_id').order_by('TesT_id')) for i in test_id: temp=models.ClassGrade.objects.filter(TEST_id=i.get('TesT_id')).order_by('-Total').values() if list(temp): max_grade.append(list(temp)[0].get('Total')) temp=models.ClassGrade.objects.filter(TEST_id=i.get('TesT_id')).order_by('Total').values() if list(temp): min_grade.append(list(temp)[0].get('Total')) temp=models.AllTest.objects.filter(TesT_id=i.get('TesT_id')).values() if list(temp): avg_grade.append(list(temp)[0].get('total')) temp=models.ClassGrade.objects.filter(TEST_id=i.get('TesT_id')).order_by('-Total').values() count=0 for j in list(temp)[:200]: if j.get('CLASS')==class_id: count+=1 in2_count.append(count) res=1 try: lastin200=in2_count[-1] except: lastin200='无' try: max_min=max_grade[-1]-min_grade[-1] except: max_min='无数据' try: if avg_grade[-1]-avg_grade[-2]>=0: trend='上升' else: trend='下降' except: trend='无法判断' test_id=[int(i.get('TesT_id')) for i in list(models.AllTest.objects.filter(Class=class_id).values('TesT_id').order_by('TesT_id'))] else: message='此班无数据!' else: message='传参错误,请检查!' return render( request, 'app/class_analysis.html', { 'title':'班级分析/Analysis', 'message':'Analysis page.', 'year':datetime.now().year, 'res':res, 'anmessage':message, 'tcclass':teacher_class, 'class':class_id, 'testcount':test_id, 'max_grade':max_grade, 'min_grade':min_grade, 'avg_grade':avg_grade, 'in200':in2_count, 'trend':trend, 'lastin200':lastin200, 'max_min':max_min, } ) def subject_analysis(request): """Renders the analysis page.""" assert isinstance(request, HttpRequest) class_id,test_id,rank='','','' trend,avg_grade,max_in5,min_in5,max_math,max_chinese,max_english,max_bio,max_che,max_phy=0,'','','','','','','','','' teacher_class=[] try: teacher_class.append(request.user.TeacherClass) except: teacher_class.append(4) #获取所有班级的所有次考试,得到最近考试次数 class_all_full_prime=models.AllTest.objects.filter(Class=teacher_class[0]).values() class_testid=[int(i.get('TesT_id')) for i in list(class_all_full_prime)] now_testid=max(class_testid) #获取顺位并处理 class_rank=class_all_full_prime.filter(TesT_id=now_testid).order_by('total') temp=0 for i in class_rank: temp+=1 if i.get('Class')==teacher_class[0]: rank=temp else: rank='error' temp=0 avg_grade_prime=models.AllTest.objects.filter(Class=teacher_class[0],TesT_id=now_testid).values() if avg_grade_prime: avg_grade=list(avg_grade_prime)[0].get('total') else: avg_grade='error' #获得当前班级的所有次数考试,将其打包为各学科值 class_all_full=models.AllTest.objects.filter(Class=teacher_class[0]).values().order_by('TesT_id') class_all=list(class_all_full) #获取趋势并计算 class_total=[int(i.get('total')) for i in list(class_all)] if len(class_total)>=2: trend=int(((class_total[-1]-class_total[-2])/class_total[-2])*100) else: trend=0 if trend>=0: trend='+'+str(trend) else: trend='-'+str(trend) grade_prime=models.ClassGrade.objects.filter(CLASS=teacher_class[0],TEST_id=now_testid) max_math_prime=grade_prime.order_by('-Math3').values() max_math=list(max_math_prime)[0] max_chinese_prime=grade_prime.order_by('-Chinese3').values() max_chinese=list(max_chinese_prime)[0] max_eng_prime=grade_prime.order_by('-English3').values() max_english=list(max_eng_prime)[0] max_bio_prime=grade_prime.order_by('-Biology3').values() max_bio=list(max_bio_prime)[0] max_che_prime=grade_prime.order_by('-Chemistry3').values() max_che=list(max_che_prime)[0] max_phy_prime=grade_prime.order_by('-Physics3').values() max_phy=list(max_phy_prime)[0] max_in5=list(grade_prime.order_by('-Total').values())[:5] min_in5=list(grade_prime.order_by('Total').values())[:5] return render( request, 'app/subject_analysis.html', { 'title':'学况速览/Analysis', 'message':'Analysis page.', 'year':datetime.now().year, 'rank':rank, 'class':teacher_class, 'trend':trend, 'avg_grade':avg_grade, 'max_in5':max_in5, 'min_in5':min_in5, 'max_math':max_math, 'max_chinese':max_chinese, 'max_english':max_english, 'max_bio':max_bio, 'max_che':max_che, 'max_phy':max_phy, } ) def register(request): """Renders the home page.""" assert isinstance(request, HttpRequest) return render( request, 'app/register.html', { 'title':'注册', 'year':datetime.now().year, } ) def page_not_found(request,exception,template_name='error/404.html'): return render(request,template_name) def page_error(request,template_name='error/500.html'): #404 return render(request,template_name) def permission_denied(request,exception,template_name='error/403.html'): #403 return render(request, template_name) def bad_request(request,exception,template_name='error/400.html'): #400 return render(request, temptale_name) def Pretreatment(data,target,full): x1=[int(i.get('TEST_id')) for i in data] y=[i.get(str(target)) for i in data] x2=[] temp1=[] temp2=[] temp3=[] xy=list(zip(x1,y)) max_id=max([i[0] for i in xy]) for i in range(max_id): y_same=[] count=0 for j in xy: if i+1==j[0]: y_same.append(j[1]) count+=1 for _ in range(count): x2.append(hardrank(y_same,full)) for i in x1: temp1.append([i-int(max_id)+4]) for j in x2: temp2.append([j]) for i in y: i=(i/full)*100 temp3.append([i]) y=temp3 x=np.hstack((temp1,temp2)) return x,y def hardrank(y,full): temp=0 for i in y: temp+=i avg=temp/len(y) return 1-avg/full def subjects(sub_name): max_score=0 sub_key='' if sub_name=='数学': sub_key='Math3' max_score=150 if sub_name=='语文': sub_key='Chinese3' max_score=150 if sub_name=='英语': sub_key='English3' max_score=150 if sub_name=='生物': sub_key='Biology3' max_score=90 if sub_name=='化学': sub_key='Chemistry3' max_score=100 if sub_name=='物理': sub_key='Physics3' max_score=110 return max_score,sub_key
[ "django.shortcuts.render", "datetime.datetime.now", "numpy.hstack" ]
[((27989, 28019), 'django.shortcuts.render', 'render', (['request', 'template_name'], {}), '(request, template_name)\n', (27995, 28019), False, 'from django.shortcuts import render\n'), ((28096, 28126), 'django.shortcuts.render', 'render', (['request', 'template_name'], {}), '(request, template_name)\n', (28102, 28126), False, 'from django.shortcuts import render\n'), ((28220, 28250), 'django.shortcuts.render', 'render', (['request', 'template_name'], {}), '(request, template_name)\n', (28226, 28250), False, 'from django.shortcuts import render\n'), ((28339, 28369), 'django.shortcuts.render', 'render', (['request', 'temptale_name'], {}), '(request, temptale_name)\n', (28345, 28369), False, 'from django.shortcuts import render\n'), ((29029, 29054), 'numpy.hstack', 'np.hstack', (['(temp1, temp2)'], {}), '((temp1, temp2))\n', (29038, 29054), True, 'import numpy as np\n'), ((2664, 2678), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2676, 2678), False, 'from datetime import datetime\n'), ((446, 460), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (458, 460), False, 'from datetime import datetime\n'), ((2703, 2717), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2715, 2717), False, 'from datetime import datetime\n'), ((2777, 2791), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2789, 2791), False, 'from datetime import datetime\n'), ((2971, 2985), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2983, 2985), False, 'from datetime import datetime\n'), ((4194, 4208), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4206, 4208), False, 'from datetime import datetime\n'), ((6129, 6143), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6141, 6143), False, 'from datetime import datetime\n'), ((9418, 9432), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9430, 9432), False, 'from datetime import datetime\n'), ((11254, 11268), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11266, 11268), False, 'from datetime import datetime\n'), ((14820, 14834), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14832, 14834), False, 'from datetime import datetime\n'), ((20699, 20713), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (20711, 20713), False, 'from datetime import datetime\n'), ((24236, 24250), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (24248, 24250), False, 'from datetime import datetime\n'), ((27219, 27233), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (27231, 27233), False, 'from datetime import datetime\n'), ((27865, 27879), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (27877, 27879), False, 'from datetime import datetime\n'), ((2749, 2763), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2761, 2763), False, 'from datetime import datetime\n'), ((5404, 5418), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5416, 5418), False, 'from datetime import datetime\n')]
import fileinput import math from matplotlib import pyplot as plt import numpy as np import pandas as pd import re from scipy import interpolate # strait up linear interpolation, nothing fancy import scipy.signal as signal yaw_interp = None pitch_interp = None roll_interp = None north_interp = None east_interp = None down_interp = None def load_horiz(filename): global roll_interp global pitch_interp data = pd.read_csv(filename) data.set_index('flight time (sec)', inplace=True, drop=False) # time range / hz tmin = data['flight time (sec)'].min() tmax = data['flight time (sec)'].max() span_sec = tmax - tmin feat_count = len(data['flight time (sec)']) print("number of video records:", feat_count) hz = int(round((feat_count / span_sec))) # smooth cutoff_hz = 1 b, a = signal.butter(2, cutoff_hz, fs=hz) data['ekf roll error (rad)'] = \ signal.filtfilt(b, a, data['ekf roll error (rad)']) data['ekf pitch error (rad)'] = \ signal.filtfilt(b, a, data['ekf pitch error (rad)']) if False: plt.figure() plt.plot(data['ekf roll error (rad)'], label="roll error") plt.plot(data['ekf pitch error (rad)'], label="pitch error") plt.xlabel("Flight time (sec)") plt.ylabel("Rad") plt.legend() plt.show() # interpolators roll_interp = interpolate.interp1d(data['flight time (sec)'], data['ekf roll error (rad)'], bounds_error=False, fill_value=0.0) pitch_interp = interpolate.interp1d(data['flight time (sec)'], data['ekf pitch error (rad)'], bounds_error=False, fill_value=0.0) def load_old(filename): global yaw_interp global pitch_interp global roll_interp global north_interp global east_interp global down_interp f = fileinput.input(filename) table = [] for line in f: tokens = re.split('[,\s]+', line.rstrip()) time = float(tokens[0]) yaw_error = float(tokens[1]) pitch_error = float(tokens[2]) roll_error = float(tokens[3]) n_error = float(tokens[4]) e_error = float(tokens[5]) d_error = float(tokens[6]) table.append( [ time, yaw_error, pitch_error, roll_error, n_error, e_error, d_error ] ) array = np.array(table) x = array[:,0] yaw_interp = interpolate.interp1d(x, array[:,1], bounds_error=False, fill_value=0.0) pitch_interp = interpolate.interp1d(x, array[:,2], bounds_error=False, fill_value=0.0) roll_interp = interpolate.interp1d(x, array[:,3], bounds_error=False, fill_value=0.0) north_interp = interpolate.interp1d(x, array[:,4], bounds_error=False, fill_value=0.0) east_interp = interpolate.interp1d(x, array[:,5], bounds_error=False, fill_value=0.0) down_interp = interpolate.interp1d(x, array[:,6], bounds_error=False, fill_value=0.0)
[ "pandas.read_csv", "matplotlib.pyplot.ylabel", "scipy.signal.filtfilt", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "scipy.signal.butter", "scipy.interpolate.interp1d", "numpy.array", "matplotlib.pyplot.figure", "fileinput.input", "matplotlib.pyplot.legend", "matplotlib.pyplot.show" ...
[((429, 450), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (440, 450), True, 'import pandas as pd\n'), ((839, 873), 'scipy.signal.butter', 'signal.butter', (['(2)', 'cutoff_hz'], {'fs': 'hz'}), '(2, cutoff_hz, fs=hz)\n', (852, 873), True, 'import scipy.signal as signal\n'), ((919, 970), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', "data['ekf roll error (rad)']"], {}), "(b, a, data['ekf roll error (rad)'])\n", (934, 970), True, 'import scipy.signal as signal\n'), ((1017, 1069), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', "data['ekf pitch error (rad)']"], {}), "(b, a, data['ekf pitch error (rad)'])\n", (1032, 1069), True, 'import scipy.signal as signal\n'), ((1387, 1505), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["data['flight time (sec)']", "data['ekf roll error (rad)']"], {'bounds_error': '(False)', 'fill_value': '(0.0)'}), "(data['flight time (sec)'], data['ekf roll error (rad)'\n ], bounds_error=False, fill_value=0.0)\n", (1407, 1505), False, 'from scipy import interpolate\n'), ((1520, 1639), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["data['flight time (sec)']", "data['ekf pitch error (rad)']"], {'bounds_error': '(False)', 'fill_value': '(0.0)'}), "(data['flight time (sec)'], data[\n 'ekf pitch error (rad)'], bounds_error=False, fill_value=0.0)\n", (1540, 1639), False, 'from scipy import interpolate\n'), ((1817, 1842), 'fileinput.input', 'fileinput.input', (['filename'], {}), '(filename)\n', (1832, 1842), False, 'import fileinput\n'), ((2344, 2359), 'numpy.array', 'np.array', (['table'], {}), '(table)\n', (2352, 2359), True, 'import numpy as np\n'), ((2396, 2468), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'array[:, 1]'], {'bounds_error': '(False)', 'fill_value': '(0.0)'}), '(x, array[:, 1], bounds_error=False, fill_value=0.0)\n', (2416, 2468), False, 'from scipy import interpolate\n'), ((2487, 2559), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'array[:, 2]'], {'bounds_error': '(False)', 'fill_value': '(0.0)'}), '(x, array[:, 2], bounds_error=False, fill_value=0.0)\n', (2507, 2559), False, 'from scipy import interpolate\n'), ((2577, 2649), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'array[:, 3]'], {'bounds_error': '(False)', 'fill_value': '(0.0)'}), '(x, array[:, 3], bounds_error=False, fill_value=0.0)\n', (2597, 2649), False, 'from scipy import interpolate\n'), ((2668, 2740), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'array[:, 4]'], {'bounds_error': '(False)', 'fill_value': '(0.0)'}), '(x, array[:, 4], bounds_error=False, fill_value=0.0)\n', (2688, 2740), False, 'from scipy import interpolate\n'), ((2758, 2830), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'array[:, 5]'], {'bounds_error': '(False)', 'fill_value': '(0.0)'}), '(x, array[:, 5], bounds_error=False, fill_value=0.0)\n', (2778, 2830), False, 'from scipy import interpolate\n'), ((2848, 2920), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'array[:, 6]'], {'bounds_error': '(False)', 'fill_value': '(0.0)'}), '(x, array[:, 6], bounds_error=False, fill_value=0.0)\n', (2868, 2920), False, 'from scipy import interpolate\n'), ((1093, 1105), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1103, 1105), True, 'from matplotlib import pyplot as plt\n'), ((1114, 1172), 'matplotlib.pyplot.plot', 'plt.plot', (["data['ekf roll error (rad)']"], {'label': '"""roll error"""'}), "(data['ekf roll error (rad)'], label='roll error')\n", (1122, 1172), True, 'from matplotlib import pyplot as plt\n'), ((1181, 1241), 'matplotlib.pyplot.plot', 'plt.plot', (["data['ekf pitch error (rad)']"], {'label': '"""pitch error"""'}), "(data['ekf pitch error (rad)'], label='pitch error')\n", (1189, 1241), True, 'from matplotlib import pyplot as plt\n'), ((1250, 1281), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Flight time (sec)"""'], {}), "('Flight time (sec)')\n", (1260, 1281), True, 'from matplotlib import pyplot as plt\n'), ((1290, 1307), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Rad"""'], {}), "('Rad')\n", (1300, 1307), True, 'from matplotlib import pyplot as plt\n'), ((1316, 1328), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1326, 1328), True, 'from matplotlib import pyplot as plt\n'), ((1337, 1347), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1345, 1347), True, 'from matplotlib import pyplot as plt\n')]