code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
from nvidia.dali.backend_impl import TensorListGPU
import subprocess
import os
import sys
import random
def get_dali_extra_path():
try:
dali_extra_path = os.environ['DALI_EXTRA_PATH']
except KeyError:
print("WARNING: DALI_EXTRA_PATH not initialized.", file=sys.stderr)
dali_extra_path = "."
return dali_extra_path
# those functions import modules on demand to no impose additional dependency on numpy or matplot
# to test that are using these utilities
np = None
assert_array_equal = None
assert_allclose = None
cp = None
def import_numpy():
global np
global assert_array_equal
global assert_allclose
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
Image = None
def import_pil():
global Image
from PIL import Image
def save_image(image, file_name):
import_numpy()
import_pil()
if image.dtype == np.float32:
min = np.min(image)
max = np.max(image)
if min >= 0 and max <= 1:
image = image * 256
elif min >= -1 and max <= 1:
image = ((image + 1) * 128)
elif min >= -128 and max <= 127:
image = image + 128
else:
image = (image - np.iinfo(image.dtype).min) * (255.0 / (np.iinfo(image.dtype).max - np.iinfo(image.dtype).min))
image = image.astype(np.uint8)
Image.fromarray(image).save(file_name)
def get_gpu_num():
sp = subprocess.Popen(['nvidia-smi', '-L'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out_str = sp.communicate()
out_list = out_str[0].split('\n')
out_list = [elm for elm in out_list if len(elm) > 0]
return len(out_list)
# If the `max_allowed_error` is not None, it's checked instead of comparing mean error with `eps`.
def check_batch(batch1, batch2, batch_size, eps=1e-07, max_allowed_error=None):
def is_error(mean_err, max_err, eps, max_allowed_error):
if max_allowed_error is not None:
if max_err > max_allowed_error:
return True
elif mean_err > eps:
return True
return False
import_numpy()
if isinstance(batch1, dali.backend_impl.TensorListGPU):
batch1 = batch1.as_cpu()
if isinstance(batch2, dali.backend_impl.TensorListGPU):
batch2 = batch2.as_cpu()
for i in range(batch_size):
# This allows to handle list of Tensors, list of np arrays and TensorLists
left = np.array(batch1[i])
right = np.array(batch2[i])
is_failed = False
assert(left.shape == right.shape), \
"Shape mismatch {} != {}".format(left.shape, right.shape)
assert(left.size == right.size), \
"Size mismatch {} != {}".format(left.size, right.size)
if left.size != 0:
try:
# abs doesn't handle overflow for uint8, so get minimal value of a-b and b-a
diff1 = np.abs(left - right)
diff2 = np.abs(right - left)
absdiff = np.minimum(diff2, diff1)
err = np.mean(absdiff)
max_err = np.max(absdiff)
min_err = np.min(absdiff)
total_errors = np.sum(absdiff != 0)
except:
is_failed = True
if is_failed or is_error(err, max_err, eps, max_allowed_error):
error_msg = ("Mean error: [{}], Min error: [{}], Max error: [{}]" +
"\n Total error count: [{}], Tensor size: [{}], Error calculation failed: [{}]").format(
err, min_err, max_err, total_errors, absdiff.size, is_failed)
try:
save_image(left, "err_1.png")
save_image(right, "err_2.png")
except:
print("Batch at {} can't be saved as an image".format(i))
print(left)
print(right)
assert False, error_msg
def compare_pipelines(pipe1, pipe2, batch_size, N_iterations, eps = 1e-07):
pipe1.build()
pipe2.build()
for _ in range(N_iterations):
out1 = pipe1.run()
out2 = pipe2.run()
assert len(out1) == len(out2)
for i in range(len(out1)):
out1_data = out1[i].as_cpu() if isinstance(out1[i][0], dali.backend_impl.TensorGPU) else out1[i]
out2_data = out2[i].as_cpu() if isinstance(out2[i][0], dali.backend_impl.TensorGPU) else out2[i]
check_batch(out1_data, out2_data, batch_size, eps)
print("OK: ({} iterations)".format(N_iterations))
class RandomDataIterator(object):
import_numpy()
def __init__(self, batch_size, shape=(10, 600, 800, 3), dtype=np.uint8):
self.batch_size = batch_size
self.test_data = []
for _ in range(self.batch_size):
np.random.seed(0)
if dtype == np.float32:
self.test_data.append(
np.array(np.random.rand(*shape) * (1.0), dtype=dtype) - 0.5)
else:
self.test_data.append(
np.array(np.random.rand(*shape) * 255, dtype=dtype))
def __iter__(self):
self.i = 0
self.n = self.batch_size
return self
def __next__(self):
batch = self.test_data
self.i = (self.i + 1) % self.n
return (batch)
next = __next__
class RandomlyShapedDataIterator(object):
import_numpy()
def __init__(self, batch_size, min_shape=None, max_shape=(10, 600, 800, 3), seed=12345, dtype=np.uint8):
self.batch_size = batch_size
self.test_data = []
self.min_shape = min_shape
self.max_shape = max_shape
self.dtype = dtype
self.seed = seed
def __iter__(self):
self.i = 0
self.n = self.batch_size
return self
def __next__(self):
np.random.seed(self.seed)
random.seed(self.seed)
self.test_data = []
for _ in range(self.batch_size):
# Scale between 0.5 and 1.0
if self.min_shape is None:
shape = [int(self.max_shape[dim] * (0.5 + random.random()*0.5)) for dim in range(len(self.max_shape))]
else:
shape = [random.randint(min_s, max_s) for min_s, max_s in zip(self.min_shape, self.max_shape)]
if self.dtype == np.float32:
self.test_data.append(
np.array(np.random.rand(*shape) * (1.0), dtype=self.dtype) - 0.5)
else:
self.test_data.append(
np.array(np.random.rand(*shape) * 255, dtype=self.dtype))
batch = self.test_data
self.i = (self.i + 1) % self.n
self.seed = self.seed + 12345678;
return (batch)
next = __next__
class ConstantDataIterator(object):
import_numpy()
def __init__(self, batch_size, sample_data, dtype):
self.batch_size = batch_size
self.test_data = []
for _ in range(self.batch_size):
self.test_data.append(np.array(sample_data, dtype=dtype))
def __iter__(self):
self.i = 0
self.n = self.batch_size
return self
def __next__(self):
batch = self.test_data
self.i = (self.i + 1) % self.n
return (batch)
next = __next__
def check_output(outputs, ref_out, ref_is_list_of_outputs = None):
"""Checks the outputs of the pipeline.
`outputs`
return value from pipeline `run`
`ref_out`
a batch or tuple of batches
`ref_is_list_of_outputs`
only meaningful when there's just one output - if True, ref_out is a one-lement
list containing a single batch for output 0; otherwise ref_out _is_ a batch
"""
if ref_is_list_of_outputs is None:
ref_is_list_of_outputs = len(outputs) > 1
assert(ref_is_list_of_outputs or (len(outputs) == 1))
for idx in range(len(outputs)):
out = outputs[idx]
ref = ref_out[idx] if ref_is_list_of_outputs else ref_out
if isinstance(out, dali.backend_impl.TensorListGPU):
out = out.as_cpu()
for i in range(len(out)):
if not np.array_equal(out[i], ref[i]):
print("Out: ", out.at(i))
print("Ref: ", ref[i])
assert(np.array_equal(out[i], ref[i]))
def dali_type(t):
if t is None:
return None
if t is np.float32:
return types.FLOAT
if t is np.uint8:
return types.UINT8
if t is np.int8:
return types.INT8
if t is np.uint16:
return types.UINT16
if t is np.int16:
return types.INT16
if t is np.uint32:
return types.UINT32
if t is np.int32:
return types.INT32
raise TypeError("Unsupported type: " + str(t))
def py_buffer_from_address(address, shape, dtype, gpu = False):
buff = {'data': (address, False), 'shape': tuple(shape), 'typestr': dtype}
class py_holder(object):
pass
holder = py_holder()
holder.__array_interface__ = buff
holder.__cuda_array_interface__ = buff
if not gpu:
return np.array(holder, copy=False)
else:
global cp
import cupy as cp
return cp.asanyarray(holder)
| [
"numpy.abs",
"PIL.Image.fromarray",
"numpy.mean",
"numpy.minimum",
"numpy.random.rand",
"subprocess.Popen",
"numpy.iinfo",
"random.seed",
"numpy.max",
"numpy.array",
"numpy.sum",
"numpy.array_equal",
"numpy.random.seed",
"numpy.min",
"cupy.asanyarray",
"random.random",
"random.randin... | [((2201, 2317), 'subprocess.Popen', 'subprocess.Popen', (["['nvidia-smi', '-L']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'universal_newlines': '(True)'}), "(['nvidia-smi', '-L'], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, universal_newlines=True)\n", (2217, 2317), False, 'import subprocess\n'), ((1706, 1719), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (1712, 1719), True, 'import numpy as np\n'), ((1734, 1747), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (1740, 1747), True, 'import numpy as np\n'), ((3232, 3251), 'numpy.array', 'np.array', (['batch1[i]'], {}), '(batch1[i])\n', (3240, 3251), True, 'import numpy as np\n'), ((3268, 3287), 'numpy.array', 'np.array', (['batch2[i]'], {}), '(batch2[i])\n', (3276, 3287), True, 'import numpy as np\n'), ((6623, 6648), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (6637, 6648), True, 'import numpy as np\n'), ((6657, 6679), 'random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (6668, 6679), False, 'import random\n'), ((9850, 9878), 'numpy.array', 'np.array', (['holder'], {'copy': '(False)'}), '(holder, copy=False)\n', (9858, 9878), True, 'import numpy as np\n'), ((9948, 9969), 'cupy.asanyarray', 'cp.asanyarray', (['holder'], {}), '(holder)\n', (9961, 9969), True, 'import cupy as cp\n'), ((2133, 2155), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (2148, 2155), False, 'from PIL import Image\n'), ((5595, 5612), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5609, 5612), True, 'import numpy as np\n'), ((9039, 9069), 'numpy.array_equal', 'np.array_equal', (['out[i]', 'ref[i]'], {}), '(out[i], ref[i])\n', (9053, 9069), True, 'import numpy as np\n'), ((3700, 3720), 'numpy.abs', 'np.abs', (['(left - right)'], {}), '(left - right)\n', (3706, 3720), True, 'import numpy as np\n'), ((3745, 3765), 'numpy.abs', 'np.abs', (['(right - left)'], {}), '(right - left)\n', (3751, 3765), True, 'import numpy as np\n'), ((3792, 3816), 'numpy.minimum', 'np.minimum', (['diff2', 'diff1'], {}), '(diff2, diff1)\n', (3802, 3816), True, 'import numpy as np\n'), ((3839, 3855), 'numpy.mean', 'np.mean', (['absdiff'], {}), '(absdiff)\n', (3846, 3855), True, 'import numpy as np\n'), ((3882, 3897), 'numpy.max', 'np.max', (['absdiff'], {}), '(absdiff)\n', (3888, 3897), True, 'import numpy as np\n'), ((3924, 3939), 'numpy.min', 'np.min', (['absdiff'], {}), '(absdiff)\n', (3930, 3939), True, 'import numpy as np\n'), ((3971, 3991), 'numpy.sum', 'np.sum', (['(absdiff != 0)'], {}), '(absdiff != 0)\n', (3977, 3991), True, 'import numpy as np\n'), ((7786, 7820), 'numpy.array', 'np.array', (['sample_data'], {'dtype': 'dtype'}), '(sample_data, dtype=dtype)\n', (7794, 7820), True, 'import numpy as np\n'), ((8907, 8937), 'numpy.array_equal', 'np.array_equal', (['out[i]', 'ref[i]'], {}), '(out[i], ref[i])\n', (8921, 8937), True, 'import numpy as np\n'), ((1999, 2020), 'numpy.iinfo', 'np.iinfo', (['image.dtype'], {}), '(image.dtype)\n', (2007, 2020), True, 'import numpy as np\n'), ((6990, 7018), 'random.randint', 'random.randint', (['min_s', 'max_s'], {}), '(min_s, max_s)\n', (7004, 7018), False, 'import random\n'), ((2038, 2059), 'numpy.iinfo', 'np.iinfo', (['image.dtype'], {}), '(image.dtype)\n', (2046, 2059), True, 'import numpy as np\n'), ((2066, 2087), 'numpy.iinfo', 'np.iinfo', (['image.dtype'], {}), '(image.dtype)\n', (2074, 2087), True, 'import numpy as np\n'), ((5855, 5877), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (5869, 5877), True, 'import numpy as np\n'), ((7328, 7350), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (7342, 7350), True, 'import numpy as np\n'), ((5717, 5739), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (5731, 5739), True, 'import numpy as np\n'), ((7185, 7207), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (7199, 7207), True, 'import numpy as np\n'), ((6886, 6901), 'random.random', 'random.random', ([], {}), '()\n', (6899, 6901), False, 'import random\n')] |
"""
evaluates a trained Neural Network on its salient features regarding the time and feature dimension
creates a saliency heatmap
model should be trained beforehand
"""
import pandas
import pandas as pd
import torch
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import step
from statsmodels.tsa.vector_ar.var_model import forecast
import sys
import os
sys.path.append("../")
MAIN_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(MAIN_PATH)
import utils.datahandler as dh
import utils.tensorloader as dl
from utils.confighandler import read_config, write_config
from utils.cli import parse_basic, parse_with_loss, query_true_false
import json
from random import gauss
from random import seed
from pandas.plotting import autocorrelation_plot
import utils.modelhandler as mh
import utils.metrics as metrics
import itertools
import torch.nn as nn
import optuna
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from time import perf_counter
def create_mean_saliency_map(saliency_maps):
## create mean over all saliency maps
print('create mean saliency map over all timesteps')
saliency_maps_tensor1 = torch.zeros(length, history_horizon, num_features1)
saliency_maps_tensor2 = torch.zeros(length, forecast_horizon, num_features2)
for i, timestep in enumerate(saliency_maps):
saliency_maps_tensor1[i] = timestep[0]
for i, timestep in enumerate(saliency_maps):
saliency_maps_tensor2[i] = timestep[1]
mean_saliency_map = (torch.mean(saliency_maps_tensor1, 0), torch.mean(saliency_maps_tensor2, 0))
fig, ax = create_saliency_heatmap_plot(mean_saliency_map)
print('Done')
fig.savefig(interpretation_plot_path + '/mean_heatmap')
print('mean saliency map saved in '+ interpretation_plot_path)
fig.show()
def create_reference(dataloader, timestep, batch_size):
#creates reference for a certain timestep
num_ref = batch_size #number of references per saliency map ("batch number")
history_horizon = dataloader.dataset.inputs1.shape[1]
forecast_horizon = dataloader.dataset.inputs2.shape[1]
num_features1 = dataloader.dataset.inputs1.shape[2]
num_features2 = dataloader.dataset.inputs2.shape[2]
seed(1)# seed random number generator
features1_references_np = np.zeros(shape=(num_ref, history_horizon, num_features1 ))
features2_references_np = np.zeros(shape=(num_ref, forecast_horizon, num_features2))
inputs1_np = dataloader.dataset.inputs1[timestep].cpu().numpy()
inputs2_np = dataloader.dataset.inputs2[timestep].cpu().numpy()
for x in range(num_features1): # iterate through encoder features
feature_x = inputs1_np[:, x]
mu = 0
sigma = abs(np.std(feature_x)) # 0.3 is chosen arbitrarily # hier np.std nehmen
for j in range(num_ref):
noise_feature1 = np.random.default_rng().normal(mu, sigma, history_horizon) # create white noise series
features1_references_np[j, :, x] = noise_feature1 + feature_x
for x in range(num_features2): # iterate through decoder features
feature_x = inputs2_np[:, x]
mu = 0
sigma = abs(np.std(feature_x)) # 0.3 is chosen arbitrarily
for j in range(num_ref):
noise_feature2 = np.random.default_rng().normal(mu, sigma, forecast_horizon)
features2_references_np[j, :, x] = noise_feature2 + feature_x
# create Torch Tensors
features1_references = torch.Tensor(features1_references_np).to(DEVICE)
features2_references = torch.Tensor(features2_references_np).to(DEVICE)
return features1_references, features2_references
def create_saliency_plot(timestep,
datetime,
saliency_maps,
target_prediction,
net_prediction,
perturbated_prediction,
inputs1,
inputs2,
plot_path):
#font sizes
plt.rc('font', size=30) #default font size
plt.rc('axes', labelsize=30) # fontsize of the x and y labels
plt.rc('axes', titlesize=30) # fontsize of the title
fig1, ax1 = plt.subplots(1,figsize=(14,14))
fig2, ax2 = plt.subplots(1, figsize=(20, 14))
ax1.plot(net_prediction[0][0, :, :].cpu(), label='original prediction')
ax1.plot(target_prediction.squeeze().cpu(), label='target')
mean_perturbated_prediction = torch.mean(perturbated_prediction[0].detach().squeeze(2), dim=0).cpu().numpy()
ax1.plot(mean_perturbated_prediction, label='mean prediction \nof all perturbated inputs')
# saliency heatmap
time_axis_length = history_horizon + forecast_horizon
common = list(set(encoder_features) & set(decoder_features)) #features which are both encoder and decoder features
feature_axis_length = len(encoder_features)+len(decoder_features)-len(common)
features = pd.array(['']*feature_axis_length)
saliency_heatmap = np.full((time_axis_length, feature_axis_length), fill_value=np.nan)# for features not present in certain areas(nan), use different colour (white)
counter = -1
#only encoder features
i=0
while i < len(encoder_features):
if encoder_features[i] not in common:
counter += 1
features[counter] = encoder_features[i]
saliency_heatmap[0:history_horizon, counter] = saliency_map[0][:, i].cpu().detach().numpy()
i += 1
#common features
i = 0
j=0
while i < len(encoder_features):
if encoder_features[i] in common:
counter += 1
features[counter] = encoder_features[i]
j = 0
while j < len(decoder_features):
if encoder_features[i] == decoder_features[j]:
saliency_heatmap[0:history_horizon+forecast_horizon, counter] =torch.cat((saliency_map[0][:, i], saliency_map[1][:, j]),dim=0).cpu().detach().numpy()
break
j +=1
i += 1
#only decoder features
i = 0
while i < len(decoder_features):
if decoder_features[i] not in common:
features[counter] = decoder_features[i]
counter += 1
saliency_heatmap[history_horizon+1:, counter] = saliency_map[1][:, i].cpu().detach().numpy()
i += 1
saliency_heatmap = np.transpose(saliency_heatmap) # swap axes
im = ax2.imshow(saliency_heatmap, cmap='jet',
norm=None, aspect='auto', interpolation='nearest', vmin=0, vmax=1, origin='lower')
#create datetime x-axis
plot_datetime = pd.array(['']*time_axis_length) #looks better for plot
for h in range(datetime.array.size):
if datetime.array.hour[h] == 0: #only show full date once per day
plot_datetime[h] = datetime.array.strftime('%b %d %Y %H:%M')[h]
else:
if datetime.array.hour[h]%12 == 0: #every 12th hour
plot_datetime[h] = datetime.array.strftime('%H:%M')[h]
#feature names renamed for plot
feature_labels = features
for i, f_label in enumerate(features):
if feature_labels[i]=='DE_load_actual_entsoe_transparency' : feature_labels[i]='load'
elif feature_labels[i]=='DE_temperature' : feature_labels[i]='temperature'
elif feature_labels[i]=='DE_radiation_direct_horizontal' : feature_labels[i]='direct radiation'
elif feature_labels[i]=='DE_radiation_diffuse_horizontal' : feature_labels[i]='diffuse radiation'
# show ticks
ax2.set_xticks(np.arange(len(datetime)))
ax2.set_xticklabels(plot_datetime)
feature_ticks = np.arange(len(features))
ax2.set_yticks(feature_ticks)
ax2.set_yticklabels(features)
ax1.set_xticks(np.arange(forecast_horizon))
ax1.set_xticklabels(plot_datetime[history_horizon:])
# rotate tick labels and set alignment
plt.setp(ax2.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# set titles and legends
ax2.set_xlabel('Time')
ax2.set_ylabel('Features')
cbar = fig2.colorbar(im)# add colorbar
ax1.legend()
#layout
fig2.tight_layout()
fig1.tight_layout()
fig2.savefig(plot_path + '/heatmap' + str(timestep))
fig1.savefig(plot_path + '/predictions' + str(timestep))
#inputs 1
for i in range(num_features1):
fig, ax = plt.subplots()
feature_name = encoder_features[i]
feature = inputs1[0,:,i]
ax.set_xlabel('time')
ax.set_ylabel(feature_name)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
ax.plot(feature.cpu())
input_plot_path = plot_path + str(timestep) + '/' + 'encoder_inputs/'
if not os.path.exists(plot_path + str(timestep)):
os.mkdir(plot_path + str(timestep))
if not os.path.exists(input_plot_path):
os.mkdir(input_plot_path)
fig.savefig(input_plot_path + feature_name)
plt.close(fig)
# inputs 2
for i in range(num_features2):
fig, ax = plt.subplots()
feature_name = decoder_features[i]
feature = inputs2[0, :, i]
ax.set_xlabel('time')
ax.set_ylabel(feature_name)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
ax.plot(feature.cpu())
input_plot_path = plot_path + str(timestep) + '/' + 'decoder_inputs/'
if not os.path.exists(input_plot_path):
os.mkdir(input_plot_path)
fig.savefig(input_plot_path + feature_name)
plt.close(fig)
print('all plots saved in ' + plot_path)
def save_interpretation_tensors(saliency_maps,
perturbated_predictions,
perturbated_input1,
perturbated_input2,
inputs1,
inputs2,
rmse,
path,
trial_id):
torch.save(saliency_maps, path + 'saliency_maps_'+str(trial_id))
torch.save(perturbated_predictions, path + 'perturbated_predictions_'+str(trial_id))
torch.save(perturbated_input1, path + 'perturbated_input1_'+str(trial_id))
torch.save(perturbated_input2, path + 'perturbated_input2_'+str(trial_id))
torch.save(inputs1, path + 'inputs1_'+str(trial_id))
torch.save(inputs2, path + 'inputs2_'+str(trial_id))
torch.save(rmse, path + 'rmse_'+str(trial_id))
def load_interpretation_tensors(path, trial_id):
saliency_maps=torch.load( path + 'saliency_maps_'+str(trial_id))
perturbated_predictions=torch.load(path + 'perturbated_predictions_'+str(trial_id))
return saliency_maps, perturbated_predictions
def mask_weights_loss(mask_encoder, mask_decoder): # penalizes high mask parameter values
max_norm_encoder = torch.norm(torch.ones(mask_encoder.shape))
max_norm_decoder = torch.norm(torch.ones(mask_decoder.shape))
mask_encoder_matrix_norm = torch.norm(mask_encoder)/max_norm_encoder # frobenius norm
mask_decoder_matrix_norm = torch.norm(mask_decoder)/max_norm_decoder # frobenius norm
loss = mask_encoder_matrix_norm + mask_decoder_matrix_norm
return loss
def mask_interval_loss(mask_encoder, mask_decoder):
# encourage to keep in interval 0 to 1
# loss function is zero when mask value is between zero and 1, otherwise high
tresh_plus = nn.Threshold(1, 0) # thresh for >1
tresh_zero = nn.Threshold(0, 0) # thresh for <0
loss = (
torch.norm(tresh_plus(mask_encoder))
+ torch.norm(tresh_plus(mask_decoder))
+ torch.norm(tresh_zero(torch.mul(mask_encoder, -1)))
+ torch.norm(tresh_zero(torch.mul(mask_decoder, -1)))
)
return loss
def loss_function(criterion,
target_predictions,
perturbated_predictions,
mask,
lambda1=0.1,
lambda2=1e10,
):
mask_encoder = mask[0]
mask_decoder = mask[1]
batch_size = perturbated_predictions[0].shape[0]
target_prediction = target_predictions[0]
target_copies = torch.zeros(perturbated_predictions[0].shape).to(DEVICE)
for n in range(batch_size): # target prediction is copied for all references in batch
target_copies[n] = target_prediction
loss1 = criterion(target_copies, perturbated_predictions) # prediction loss
loss2 = lambda1*mask_weights_loss(mask_encoder, mask_decoder) # abs value of mask weights
loss3 = lambda2*mask_interval_loss(mask_encoder, mask_decoder)
ssr_loss = loss1 + loss2 + loss3
#sdr_loss = -loss1 + loss2 + loss3
return ssr_loss, loss1
# creates saliency map for one timestep:
def objective(trial):
torch.autograd.set_detect_anomaly(True)
learning_rate = trial.suggest_loguniform("learning rate", low=1e-5 ,high=0.01)
mask_init_value = trial.suggest_uniform('mask initialisation value',0.,1.)
inputs1_temp = torch.squeeze(inputs1, dim=0).to(DEVICE)
inputs2_temp = torch.squeeze(inputs2, dim=0).to(DEVICE)
saliency_map = (torch.full((history_horizon, num_features1), fill_value=mask_init_value, device=DEVICE, requires_grad=True),
torch.full((forecast_horizon, num_features2), fill_value=mask_init_value, device=DEVICE, requires_grad=True))
optimizer = torch.optim.Adam(saliency_map, lr=learning_rate)
stop_counter = 0
# calculate mask
for epoch in range(MAX_EPOCHS): # mask 'training' epochs
# create inverse masks
inverse_saliency_map1 = torch.sub(torch.ones(inputs1_temp.shape,device=DEVICE),
saliency_map[0]).to(DEVICE) # elementwise 1-m
inverse_saliency_map2 = torch.sub(torch.ones(inputs2_temp.shape,device=DEVICE),
saliency_map[1]).to(DEVICE) # elementwise 1-m
input_summand1 = torch.mul(inputs1_temp, saliency_map[0]).to(DEVICE) # element wise multiplication
input_summand2 = torch.mul(inputs2_temp, saliency_map[1]).to(DEVICE) # element wise multiplication
# create perturbated series through mask
reference_summand1 = torch.mul(features1_references, inverse_saliency_map1).to(DEVICE)
perturbated_input1 = torch.add(input_summand1, reference_summand1).to(DEVICE)
reference_summand2 = torch.mul(features2_references, inverse_saliency_map2).to(DEVICE)
perturbated_input2 = torch.add(input_summand2, reference_summand2).to(DEVICE)
# get prediction
net.train()
perturbated_predictions, _ = net(perturbated_input1,
perturbated_input2)
loss, rmse = loss_function(
criterion,
predictions,
perturbated_predictions,
saliency_map
)
optimizer.zero_grad() # set all gradients zero
if ((epoch >= 1000) and (epoch < 3000)):
if ((loss > 0.2) and (loss < 1)):#loss <1 to prevent stopping because mask out of [0,1] boundary
stop_counter += 1 #stop counter to prevent stopping due to temporary loss jumps
if (stop_counter == 10):
print('stopping...')
break
else: stop_counter = 0
elif ((epoch >= 3000) and (epoch < 5000)):
if ((loss > 0.1) and (loss < 1)): #loss <1 to prevent stopping because mask out of [0,1] boundary
stop_counter += 1 #stop counter to prevent stopping due to temporary loss jumps
if (stop_counter == 10):
print('stopping...')
break
else: stop_counter = 0
elif ((epoch >= 5000) and (epoch < 10000)):
if ((loss > 0.05) and (loss < 1)): #loss <1 to prevent stopping because mask out of [0,1] boundary
stop_counter += 1 #stop counter to prevent stopping due to temporary loss jumps
if (stop_counter == 10):
print('stopping...')
break
else: stop_counter = 0
loss.backward() # backpropagate mean loss
optimizer.step() # update mask parameters
if epoch%1000 ==0: #print every 100 epochs
print('epoch ', epoch, '/', MAX_EPOCHS, '... loss:', loss.item())
trial_id = trial.number
save_interpretation_tensors(saliency_map,
perturbated_predictions,
perturbated_input1,
perturbated_input2,
inputs1,
inputs2,
rmse,
tensor_save_path,
trial_id)
return loss
if __name__ == "__main__":
MAIN_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
INTERPRETATION_PATH = './oracles/interpretation/'
sys.path.append(MAIN_PATH)
MAX_BATCH_SIZE = 10
MAX_EPOCHS = 10000
N_TRIALS = 50 #hyperparameter tuning trials
MODEL_NAME = 'opsd_PSCC_few' #path relative to targets folder
SEP = ';' #seperation for csv data
criterion = metrics.rmse # loss function criterion
#time steps to interpret (beginning of history horizon) to calculate: index in csv table -2 -history horizon
timesteps = [35784]
print('model: ', MODEL_NAME)
## Data preperation
# import config and extract relevant config variables
CONFIG_PATH = './targets/' + MODEL_NAME + '/config.json'
config_file = os.path.join(MAIN_PATH, CONFIG_PATH)
CONFIG = read_config(config_path=config_file, main_path=MAIN_PATH)
target_id = CONFIG['target_id']
encoder_features = CONFIG['encoder_features']
decoder_features = CONFIG['decoder_features']
history_horizon = CONFIG['history_horizon']
forecast_horizon = CONFIG['forecast_horizon']
feature_groups = CONFIG['feature_groups']
path = os.path.join(MAIN_PATH, INTERPRETATION_PATH)
if not os.path.exists(path):
os.mkdir(path)
model_name = CONFIG["model_name"]
model_interpretation_path = os.path.join(path, model_name + '/')
if not os.path.exists(model_interpretation_path):
os.mkdir(model_interpretation_path)
interpretation_plot_path = os.path.join(model_interpretation_path, 'plots/')
if not os.path.exists(interpretation_plot_path):
os.mkdir(interpretation_plot_path)
tensor_path = os.path.join(model_interpretation_path, 'Tensors/')
if not os.path.exists(tensor_path):
os.mkdir(tensor_path)
cuda_id = CONFIG["cuda_id"]
if torch.cuda.is_available():
DEVICE = 'cuda'
if cuda_id is not None:
torch.cuda.set_device(cuda_id)
print('Device: ', DEVICE)
print('Current CUDA ID: ', torch.cuda.current_device())
else:
DEVICE = 'cpu'
print(DEVICE)
# import data as df
data_path = CONFIG["data_path"]
print('reading csv...')
df = pd.read_csv(os.path.join(MAIN_PATH, data_path), sep=SEP)
time_column = df.loc[:, "Time"]
print('done')
# scale all data
print('scaling data...')
df, scalers = dt.scale_all(df, feature_groups=feature_groups)
print('done')
# load input data into tensors
print('loading input data...')
dataloader = dl.make_dataloader(df,
target_id,
encoder_features,
decoder_features,
history_horizon=history_horizon,
forecast_horizon=forecast_horizon,
shuffle=False).to(DEVICE)
print('Done')
length = dataloader.dataset.targets.shape[0] # length of sequence per batch
num_features1 = dataloader.number_features1()
num_features2 = dataloader.number_features2()
number_of_targets = dataloader.dataset.targets.shape[2]
print('timesteps', length)
print('num_features1', num_features1)
print('num_features2', num_features2)
print('targets:', number_of_targets)
print('history_horizon', history_horizon)
print('forecast_horizon', forecast_horizon)
## load the trained NN
print('load net...')
INMODEL = os.path.join(MAIN_PATH, CONFIG["output_path"], CONFIG["model_name"])
net = torch.load(INMODEL, map_location=torch.device(DEVICE))
print('Done.')
t0_start=perf_counter()
results_df = pd.DataFrame(columns=['RMSE PERTURBATED',
'RMSE ORIGINAL',
'RMSE DIFFERENCE PERTURBATED ORIGINAL'],
index=timesteps)
for timestep in timesteps:
t1_start = perf_counter()
print('\n\ntimestep: ', timestep)
datetime = pd.to_datetime(time_column.iloc[timestep:timestep+history_horizon+forecast_horizon])
tensor_save_path = os.path.join(tensor_path, str(timestep) + '/')
if not os.path.exists(tensor_save_path):
os.mkdir(tensor_save_path)
# get original inputs and predictions
inputs1 = torch.unsqueeze(dataloader.dataset.inputs1[timestep], dim=0)
inputs2 = torch.unsqueeze(dataloader.dataset.inputs2[timestep], dim=0)
targets = torch.unsqueeze(dataloader.dataset.targets[timestep], dim=0)
with torch.no_grad():
predictions, _ = net(inputs1, inputs2)
## obtain reference input data
features1_references, features2_references = create_reference(dataloader, timestep, MAX_BATCH_SIZE)
## create saliency map
print('create saliency maps...')
study = optuna.create_study()
study.optimize(
objective,
n_trials=N_TRIALS)
print('Done')
#load best saliency map
best_trial_id = study.best_trial.number
saliency_map, perturbated_prediction = load_interpretation_tensors(tensor_save_path, best_trial_id)
#save plot for best saliency map
create_saliency_plot(timestep,
datetime,
saliency_map,
targets,
predictions,
perturbated_prediction,
inputs1,
inputs2,
interpretation_plot_path)
t1_stop = perf_counter()
print("Elapsed time: ", t1_stop-t1_start)
#calculate rmse of perturbated prediction and original prediction in respect to target value
rmse_perturbated = criterion(targets, torch.unsqueeze(torch.unsqueeze(torch.mean(perturbated_prediction[0],dim=0), dim=0),dim=0)).cpu().detach().numpy()
rmse_original = criterion(targets, predictions).cpu().detach().numpy()
rmse_diff = rmse_perturbated - rmse_original # difference in rmse scores between perturbated and original prediction
data = {
'RMSE PERTURBATED': rmse_perturbated,
'RMSE ORIGINAL': rmse_original,
'RMSE DIFFERENCE PERTURBATED ORIGINAL': rmse_diff}
results_df.loc[timestep] = data
save_path = model_interpretation_path
results_df.to_csv(save_path+'rmse.csv', sep=';', index=True)
t0_stop = perf_counter()
print("Total elapsed time: ", t0_stop-t0_start)
| [
"torch.nn.Threshold",
"torch.mul",
"numpy.random.default_rng",
"torch.cuda.is_available",
"torch.squeeze",
"sys.path.append",
"numpy.arange",
"pandas.to_datetime",
"os.path.exists",
"pandas.array",
"torch.mean",
"torch.unsqueeze",
"time.perf_counter",
"matplotlib.pyplot.close",
"os.mkdir... | [((381, 403), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (396, 403), False, 'import sys\n'), ((478, 504), 'sys.path.append', 'sys.path.append', (['MAIN_PATH'], {}), '(MAIN_PATH)\n', (493, 504), False, 'import sys\n'), ((1226, 1277), 'torch.zeros', 'torch.zeros', (['length', 'history_horizon', 'num_features1'], {}), '(length, history_horizon, num_features1)\n', (1237, 1277), False, 'import torch\n'), ((1306, 1358), 'torch.zeros', 'torch.zeros', (['length', 'forecast_horizon', 'num_features2'], {}), '(length, forecast_horizon, num_features2)\n', (1317, 1358), False, 'import torch\n'), ((2309, 2316), 'random.seed', 'seed', (['(1)'], {}), '(1)\n', (2313, 2316), False, 'from random import seed\n'), ((2378, 2435), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_ref, history_horizon, num_features1)'}), '(shape=(num_ref, history_horizon, num_features1))\n', (2386, 2435), True, 'import numpy as np\n'), ((2467, 2525), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_ref, forecast_horizon, num_features2)'}), '(shape=(num_ref, forecast_horizon, num_features2))\n', (2475, 2525), True, 'import numpy as np\n'), ((4095, 4118), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(30)'}), "('font', size=30)\n", (4101, 4118), True, 'import matplotlib.pyplot as plt\n'), ((4142, 4170), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': '(30)'}), "('axes', labelsize=30)\n", (4148, 4170), True, 'import matplotlib.pyplot as plt\n'), ((4209, 4237), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': '(30)'}), "('axes', titlesize=30)\n", (4215, 4237), True, 'import matplotlib.pyplot as plt\n'), ((4284, 4317), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(14, 14)'}), '(1, figsize=(14, 14))\n', (4296, 4317), True, 'import matplotlib.pyplot as plt\n'), ((4332, 4365), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(20, 14)'}), '(1, figsize=(20, 14))\n', (4344, 4365), True, 'import matplotlib.pyplot as plt\n'), ((5021, 5057), 'pandas.array', 'pd.array', (["([''] * feature_axis_length)"], {}), "([''] * feature_axis_length)\n", (5029, 5057), True, 'import pandas as pd\n'), ((5079, 5146), 'numpy.full', 'np.full', (['(time_axis_length, feature_axis_length)'], {'fill_value': 'np.nan'}), '((time_axis_length, feature_axis_length), fill_value=np.nan)\n', (5086, 5146), True, 'import numpy as np\n'), ((6451, 6481), 'numpy.transpose', 'np.transpose', (['saliency_heatmap'], {}), '(saliency_heatmap)\n', (6463, 6481), True, 'import numpy as np\n'), ((6697, 6730), 'pandas.array', 'pd.array', (["([''] * time_axis_length)"], {}), "([''] * time_axis_length)\n", (6705, 6730), True, 'import pandas as pd\n'), ((11614, 11632), 'torch.nn.Threshold', 'nn.Threshold', (['(1)', '(0)'], {}), '(1, 0)\n', (11626, 11632), True, 'import torch.nn as nn\n'), ((11667, 11685), 'torch.nn.Threshold', 'nn.Threshold', (['(0)', '(0)'], {}), '(0, 0)\n', (11679, 11685), True, 'import torch.nn as nn\n'), ((13059, 13098), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (13092, 13098), False, 'import torch\n'), ((13667, 13715), 'torch.optim.Adam', 'torch.optim.Adam', (['saliency_map'], {'lr': 'learning_rate'}), '(saliency_map, lr=learning_rate)\n', (13683, 13715), False, 'import torch\n'), ((17374, 17400), 'sys.path.append', 'sys.path.append', (['MAIN_PATH'], {}), '(MAIN_PATH)\n', (17389, 17400), False, 'import sys\n'), ((18008, 18044), 'os.path.join', 'os.path.join', (['MAIN_PATH', 'CONFIG_PATH'], {}), '(MAIN_PATH, CONFIG_PATH)\n', (18020, 18044), False, 'import os\n'), ((18058, 18115), 'utils.confighandler.read_config', 'read_config', ([], {'config_path': 'config_file', 'main_path': 'MAIN_PATH'}), '(config_path=config_file, main_path=MAIN_PATH)\n', (18069, 18115), False, 'from utils.confighandler import read_config, write_config\n'), ((18411, 18455), 'os.path.join', 'os.path.join', (['MAIN_PATH', 'INTERPRETATION_PATH'], {}), '(MAIN_PATH, INTERPRETATION_PATH)\n', (18423, 18455), False, 'import os\n'), ((18582, 18618), 'os.path.join', 'os.path.join', (['path', "(model_name + '/')"], {}), "(path, model_name + '/')\n", (18594, 18618), False, 'import os\n'), ((18748, 18797), 'os.path.join', 'os.path.join', (['model_interpretation_path', '"""plots/"""'], {}), "(model_interpretation_path, 'plots/')\n", (18760, 18797), False, 'import os\n'), ((18912, 18963), 'os.path.join', 'os.path.join', (['model_interpretation_path', '"""Tensors/"""'], {}), "(model_interpretation_path, 'Tensors/')\n", (18924, 18963), False, 'import os\n'), ((19074, 19099), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (19097, 19099), False, 'import torch\n'), ((20762, 20830), 'os.path.join', 'os.path.join', (['MAIN_PATH', "CONFIG['output_path']", "CONFIG['model_name']"], {}), "(MAIN_PATH, CONFIG['output_path'], CONFIG['model_name'])\n", (20774, 20830), False, 'import os\n'), ((20928, 20942), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (20940, 20942), False, 'from time import perf_counter\n'), ((20965, 21085), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['RMSE PERTURBATED', 'RMSE ORIGINAL', 'RMSE DIFFERENCE PERTURBATED ORIGINAL']", 'index': 'timesteps'}), "(columns=['RMSE PERTURBATED', 'RMSE ORIGINAL',\n 'RMSE DIFFERENCE PERTURBATED ORIGINAL'], index=timesteps)\n", (20977, 21085), True, 'import pandas as pd\n'), ((23810, 23824), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (23822, 23824), False, 'from time import perf_counter\n'), ((449, 475), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (465, 475), False, 'import os\n'), ((1579, 1615), 'torch.mean', 'torch.mean', (['saliency_maps_tensor1', '(0)'], {}), '(saliency_maps_tensor1, 0)\n', (1589, 1615), False, 'import torch\n'), ((1617, 1653), 'torch.mean', 'torch.mean', (['saliency_maps_tensor2', '(0)'], {}), '(saliency_maps_tensor2, 0)\n', (1627, 1653), False, 'import torch\n'), ((7854, 7881), 'numpy.arange', 'np.arange', (['forecast_horizon'], {}), '(forecast_horizon)\n', (7863, 7881), True, 'import numpy as np\n'), ((8488, 8502), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8500, 8502), True, 'import matplotlib.pyplot as plt\n'), ((9113, 9127), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9122, 9127), True, 'import matplotlib.pyplot as plt\n'), ((9198, 9212), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9210, 9212), True, 'import matplotlib.pyplot as plt\n'), ((9718, 9732), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9727, 9732), True, 'import matplotlib.pyplot as plt\n'), ((11058, 11088), 'torch.ones', 'torch.ones', (['mask_encoder.shape'], {}), '(mask_encoder.shape)\n', (11068, 11088), False, 'import torch\n'), ((11124, 11154), 'torch.ones', 'torch.ones', (['mask_decoder.shape'], {}), '(mask_decoder.shape)\n', (11134, 11154), False, 'import torch\n'), ((11187, 11211), 'torch.norm', 'torch.norm', (['mask_encoder'], {}), '(mask_encoder)\n', (11197, 11211), False, 'import torch\n'), ((11278, 11302), 'torch.norm', 'torch.norm', (['mask_decoder'], {}), '(mask_decoder)\n', (11288, 11302), False, 'import torch\n'), ((13412, 13523), 'torch.full', 'torch.full', (['(history_horizon, num_features1)'], {'fill_value': 'mask_init_value', 'device': 'DEVICE', 'requires_grad': '(True)'}), '((history_horizon, num_features1), fill_value=mask_init_value,\n device=DEVICE, requires_grad=True)\n', (13422, 13523), False, 'import torch\n'), ((13541, 13653), 'torch.full', 'torch.full', (['(forecast_horizon, num_features2)'], {'fill_value': 'mask_init_value', 'device': 'DEVICE', 'requires_grad': '(True)'}), '((forecast_horizon, num_features2), fill_value=mask_init_value,\n device=DEVICE, requires_grad=True)\n', (13551, 13653), False, 'import torch\n'), ((18467, 18487), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (18481, 18487), False, 'import os\n'), ((18497, 18511), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (18505, 18511), False, 'import os\n'), ((18630, 18671), 'os.path.exists', 'os.path.exists', (['model_interpretation_path'], {}), '(model_interpretation_path)\n', (18644, 18671), False, 'import os\n'), ((18681, 18716), 'os.mkdir', 'os.mkdir', (['model_interpretation_path'], {}), '(model_interpretation_path)\n', (18689, 18716), False, 'import os\n'), ((18809, 18849), 'os.path.exists', 'os.path.exists', (['interpretation_plot_path'], {}), '(interpretation_plot_path)\n', (18823, 18849), False, 'import os\n'), ((18859, 18893), 'os.mkdir', 'os.mkdir', (['interpretation_plot_path'], {}), '(interpretation_plot_path)\n', (18867, 18893), False, 'import os\n'), ((18975, 19002), 'os.path.exists', 'os.path.exists', (['tensor_path'], {}), '(tensor_path)\n', (18989, 19002), False, 'import os\n'), ((19012, 19033), 'os.mkdir', 'os.mkdir', (['tensor_path'], {}), '(tensor_path)\n', (19020, 19033), False, 'import os\n'), ((19482, 19516), 'os.path.join', 'os.path.join', (['MAIN_PATH', 'data_path'], {}), '(MAIN_PATH, data_path)\n', (19494, 19516), False, 'import os\n'), ((21233, 21247), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (21245, 21247), False, 'from time import perf_counter\n'), ((21309, 21401), 'pandas.to_datetime', 'pd.to_datetime', (['time_column.iloc[timestep:timestep + history_horizon + forecast_horizon]'], {}), '(time_column.iloc[timestep:timestep + history_horizon +\n forecast_horizon])\n', (21323, 21401), True, 'import pandas as pd\n'), ((21620, 21680), 'torch.unsqueeze', 'torch.unsqueeze', (['dataloader.dataset.inputs1[timestep]'], {'dim': '(0)'}), '(dataloader.dataset.inputs1[timestep], dim=0)\n', (21635, 21680), False, 'import torch\n'), ((21699, 21759), 'torch.unsqueeze', 'torch.unsqueeze', (['dataloader.dataset.inputs2[timestep]'], {'dim': '(0)'}), '(dataloader.dataset.inputs2[timestep], dim=0)\n', (21714, 21759), False, 'import torch\n'), ((21778, 21838), 'torch.unsqueeze', 'torch.unsqueeze', (['dataloader.dataset.targets[timestep]'], {'dim': '(0)'}), '(dataloader.dataset.targets[timestep], dim=0)\n', (21793, 21838), False, 'import torch\n'), ((22157, 22178), 'optuna.create_study', 'optuna.create_study', ([], {}), '()\n', (22176, 22178), False, 'import optuna\n'), ((22929, 22943), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (22941, 22943), False, 'from time import perf_counter\n'), ((2807, 2824), 'numpy.std', 'np.std', (['feature_x'], {}), '(feature_x)\n', (2813, 2824), True, 'import numpy as np\n'), ((3244, 3261), 'numpy.std', 'np.std', (['feature_x'], {}), '(feature_x)\n', (3250, 3261), True, 'import numpy as np\n'), ((3543, 3580), 'torch.Tensor', 'torch.Tensor', (['features1_references_np'], {}), '(features1_references_np)\n', (3555, 3580), False, 'import torch\n'), ((3619, 3656), 'torch.Tensor', 'torch.Tensor', (['features2_references_np'], {}), '(features2_references_np)\n', (3631, 3656), False, 'import torch\n'), ((8981, 9012), 'os.path.exists', 'os.path.exists', (['input_plot_path'], {}), '(input_plot_path)\n', (8995, 9012), False, 'import os\n'), ((9026, 9051), 'os.mkdir', 'os.mkdir', (['input_plot_path'], {}), '(input_plot_path)\n', (9034, 9051), False, 'import os\n'), ((9587, 9618), 'os.path.exists', 'os.path.exists', (['input_plot_path'], {}), '(input_plot_path)\n', (9601, 9618), False, 'import os\n'), ((9632, 9657), 'os.mkdir', 'os.mkdir', (['input_plot_path'], {}), '(input_plot_path)\n', (9640, 9657), False, 'import os\n'), ((12443, 12488), 'torch.zeros', 'torch.zeros', (['perturbated_predictions[0].shape'], {}), '(perturbated_predictions[0].shape)\n', (12454, 12488), False, 'import torch\n'), ((13290, 13319), 'torch.squeeze', 'torch.squeeze', (['inputs1'], {'dim': '(0)'}), '(inputs1, dim=0)\n', (13303, 13319), False, 'import torch\n'), ((13350, 13379), 'torch.squeeze', 'torch.squeeze', (['inputs2'], {'dim': '(0)'}), '(inputs2, dim=0)\n', (13363, 13379), False, 'import torch\n'), ((17287, 17313), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (17303, 17313), False, 'import os\n'), ((19169, 19199), 'torch.cuda.set_device', 'torch.cuda.set_device', (['cuda_id'], {}), '(cuda_id)\n', (19190, 19199), False, 'import torch\n'), ((19269, 19296), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (19294, 19296), False, 'import torch\n'), ((19804, 19964), 'utils.tensorloader.make_dataloader', 'dl.make_dataloader', (['df', 'target_id', 'encoder_features', 'decoder_features'], {'history_horizon': 'history_horizon', 'forecast_horizon': 'forecast_horizon', 'shuffle': '(False)'}), '(df, target_id, encoder_features, decoder_features,\n history_horizon=history_horizon, forecast_horizon=forecast_horizon,\n shuffle=False)\n', (19822, 19964), True, 'import utils.tensorloader as dl\n'), ((20874, 20894), 'torch.device', 'torch.device', (['DEVICE'], {}), '(DEVICE)\n', (20886, 20894), False, 'import torch\n'), ((21483, 21515), 'os.path.exists', 'os.path.exists', (['tensor_save_path'], {}), '(tensor_save_path)\n', (21497, 21515), False, 'import os\n'), ((21529, 21555), 'os.mkdir', 'os.mkdir', (['tensor_save_path'], {}), '(tensor_save_path)\n', (21537, 21555), False, 'import os\n'), ((21852, 21867), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21865, 21867), False, 'import torch\n'), ((11919, 11946), 'torch.mul', 'torch.mul', (['mask_decoder', '(-1)'], {}), '(mask_decoder, -1)\n', (11928, 11946), False, 'import torch\n'), ((14233, 14273), 'torch.mul', 'torch.mul', (['inputs1_temp', 'saliency_map[0]'], {}), '(inputs1_temp, saliency_map[0])\n', (14242, 14273), False, 'import torch\n'), ((14341, 14381), 'torch.mul', 'torch.mul', (['inputs2_temp', 'saliency_map[1]'], {}), '(inputs2_temp, saliency_map[1])\n', (14350, 14381), False, 'import torch\n'), ((14504, 14558), 'torch.mul', 'torch.mul', (['features1_references', 'inverse_saliency_map1'], {}), '(features1_references, inverse_saliency_map1)\n', (14513, 14558), False, 'import torch\n'), ((14599, 14644), 'torch.add', 'torch.add', (['input_summand1', 'reference_summand1'], {}), '(input_summand1, reference_summand1)\n', (14608, 14644), False, 'import torch\n'), ((14685, 14739), 'torch.mul', 'torch.mul', (['features2_references', 'inverse_saliency_map2'], {}), '(features2_references, inverse_saliency_map2)\n', (14694, 14739), False, 'import torch\n'), ((14780, 14825), 'torch.add', 'torch.add', (['input_summand2', 'reference_summand2'], {}), '(input_summand2, reference_summand2)\n', (14789, 14825), False, 'import torch\n'), ((2938, 2961), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (2959, 2961), True, 'import numpy as np\n'), ((3354, 3377), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (3375, 3377), True, 'import numpy as np\n'), ((11853, 11880), 'torch.mul', 'torch.mul', (['mask_encoder', '(-1)'], {}), '(mask_encoder, -1)\n', (11862, 11880), False, 'import torch\n'), ((13896, 13941), 'torch.ones', 'torch.ones', (['inputs1_temp.shape'], {'device': 'DEVICE'}), '(inputs1_temp.shape, device=DEVICE)\n', (13906, 13941), False, 'import torch\n'), ((14073, 14118), 'torch.ones', 'torch.ones', (['inputs2_temp.shape'], {'device': 'DEVICE'}), '(inputs2_temp.shape, device=DEVICE)\n', (14083, 14118), False, 'import torch\n'), ((5961, 6025), 'torch.cat', 'torch.cat', (['(saliency_map[0][:, i], saliency_map[1][:, j])'], {'dim': '(0)'}), '((saliency_map[0][:, i], saliency_map[1][:, j]), dim=0)\n', (5970, 6025), False, 'import torch\n'), ((23182, 23226), 'torch.mean', 'torch.mean', (['perturbated_prediction[0]'], {'dim': '(0)'}), '(perturbated_prediction[0], dim=0)\n', (23192, 23226), False, 'import torch\n')] |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import json
import glob
import h5py
import numpy as np
import pickle
from PIL import Image, ImageOps
import torch
from torchmeta.utils.data.task import Task, ConcatTask, SubsetTask
from collections import OrderedDict
from torchmeta.utils.data import Dataset, ClassDataset, CombinationMetaDataset, MetaDataLoader
from torchmeta.utils.data.dataloader import batch_meta_collate
from torchvision.datasets.utils import list_dir, download_url, download_file_from_google_drive
from torchmeta.datasets.utils import get_asset
import warnings
from torchmeta.datasets.omniglot import OmniglotDataset
from torchmeta.datasets.miniimagenet import MiniImagenetDataset
from torchmeta.transforms import Categorical, ClassSplitter, Rotation, Splitter
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor
class OmniglotClassDataset(ClassDataset):
folder = 'omniglot'
download_url_prefix = 'https://github.com/brendenlake/omniglot/raw/master/python'
zips_md5 = {
'images_background': '68d2efa1b9178cc56df9314c21c6e718',
'images_evaluation': '6b91aef0f799c5bb55b94e3f2daec811'
}
filename = 'data.hdf5'
filename_labels = '{0}_labels.json'
def __init__(self, root, meta_train=False, meta_val=False, meta_test=False,
meta_split=None, transform=None,
class_augmentations=None, download=False):
super(OmniglotClassDataset, self).__init__(meta_train=meta_train,
meta_val=meta_val, meta_test=meta_test, meta_split=meta_split,
class_augmentations=class_augmentations)
self.root = os.path.join(os.path.expanduser(root), self.folder)
self.transform = transform
self.split_filename = os.path.join(self.root, self.filename)
self.split_filename_labels = os.path.join(self.root,
self.filename_labels.format(self.meta_split))
self._data = None
self._labels = None
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Omniglot integrity check failed')
self._num_classes = len(self.labels)
print('# classes loaded for %s:' % self.meta_split, self._num_classes)
def __getitem__(self, index):
character_name = '/'.join(self.labels[index % self.num_classes])
data = self.data[character_name]
transform = self.get_transform(index, self.transform)
target_transform = self.get_target_transform(index)
return OmniglotDataset(data, character_name, transform=transform,
target_transform=target_transform)
@property
def num_classes(self):
return self._num_classes
@property
def data(self):
if self._data is None:
self._data = h5py.File(self.split_filename, 'r')
return self._data
@property
def labels(self):
if self._labels is None:
with open(self.split_filename_labels, 'r') as f:
self._labels = json.load(f)
return self._labels
def _check_integrity(self):
return (os.path.isfile(self.split_filename)
and os.path.isfile(self.split_filename_labels))
def close(self):
if self._data is not None:
self._data.close()
self._data = None
def download(self):
import zipfile
import shutil
if self._check_integrity():
return
for name in self.zips_md5:
zip_filename = '{0}.zip'.format(name)
filename = os.path.join(self.root, zip_filename)
if os.path.isfile(filename):
continue
url = '{0}/{1}'.format(self.download_url_prefix, zip_filename)
download_url(url, self.root, zip_filename, self.zips_md5[name])
with zipfile.ZipFile(filename, 'r') as f:
f.extractall(self.root)
filename = os.path.join(self.root, self.filename)
with h5py.File(filename, 'w') as f:
group = f.create_group('omniglot')
for name in self.zips_md5:
alphabets = list_dir(os.path.join(self.root, name))
characters = [(name, alphabet, character) for alphabet in alphabets
for character in list_dir(os.path.join(self.root, name, alphabet))]
for _, alphabet, character in characters:
filenames = glob.glob(os.path.join(self.root, name,
alphabet, character, '*.png'))
dataset = group.create_dataset('{0}/{1}'.format(alphabet,
character), (len(filenames), 105, 105), dtype='uint8')
for i, char_filename in enumerate(filenames):
image = Image.open(char_filename, mode='r').convert('L')
dataset[i] = ImageOps.invert(image)
shutil.rmtree(os.path.join(self.root, name))
class MiniImagenetClassDataset(ClassDataset):
folder = 'miniimagenet'
# Google Drive ID from https://github.com/renmengye/few-shot-ssl-public
gdrive_id = '16V_ZlkW4SsnNDtnGmaBRq2OoPmUOc5mY'
gz_filename = 'mini-imagenet.tar.gz'
gz_md5 = 'b38f1eb4251fb9459ecc8e7febf9b2eb'
pkl_filename = 'mini-imagenet-cache-{0}.pkl'
filename = '{0}_data.hdf5'
filename_labels = '{0}_labels.json'
def __init__(self, root, meta_train=False, meta_val=False, meta_test=False,
meta_split=None, transform=None, class_augmentations=None,
download=False):
super(MiniImagenetClassDataset, self).__init__(meta_train=meta_train,
meta_val=meta_val, meta_test=meta_test, meta_split=meta_split,
class_augmentations=class_augmentations)
self.root = os.path.join(os.path.expanduser(root), self.folder)
self.transform = transform
self.split_filename = os.path.join(self.root,
self.filename.format(self.meta_split))
self.split_filename_labels = os.path.join(self.root,
self.filename_labels.format(self.meta_split))
self._data = None
self._labels = None
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('MiniImagenet integrity check failed')
self._num_classes = len(self.labels)
print('# classes loaded for %s:' % self.meta_split, self._num_classes)
def __getitem__(self, index):
class_name = self.labels[index % self.num_classes]
data = self.data[class_name]
transform = self.get_transform(index, self.transform)
target_transform = self.get_target_transform(index)
return MiniImagenetDataset(data, class_name, transform=transform,
target_transform=target_transform)
@property
def num_classes(self):
return self._num_classes
@property
def data(self):
if self._data is None:
self._data_file = h5py.File(self.split_filename, 'r')
self._data = self._data_file['datasets']
return self._data
@property
def labels(self):
if self._labels is None:
with open(self.split_filename_labels, 'r') as f:
self._labels = json.load(f)
return self._labels
def _check_integrity(self):
return (os.path.isfile(self.split_filename)
and os.path.isfile(self.split_filename_labels))
def close(self):
if self._data_file is not None:
self._data_file.close()
self._data_file = None
self._data = None
def download(self):
import tarfile
if self._check_integrity():
return
download_file_from_google_drive(self.gdrive_id, self.root,
self.gz_filename, md5=self.gz_md5)
filename = os.path.join(self.root, self.gz_filename)
with tarfile.open(filename, 'r') as f:
f.extractall(self.root)
for split in ['train', 'val', 'test']:
filename = os.path.join(self.root, self.filename.format(split))
if os.path.isfile(filename):
continue
pkl_filename = os.path.join(self.root, self.pkl_filename.format(split))
if not os.path.isfile(pkl_filename):
raise IOError()
with open(pkl_filename, 'rb') as f:
data = pickle.load(f)
images, classes = data['image_data'], data['class_dict']
with h5py.File(filename, 'w') as f:
group = f.create_group('datasets')
for name, indices in classes.items():
group.create_dataset(name, data=images[indices])
labels_filename = os.path.join(self.root, self.filename_labels.format(split))
with open(labels_filename, 'w') as f:
labels = sorted(list(classes.keys()))
json.dump(labels, f)
if os.path.isfile(pkl_filename):
os.remove(pkl_filename)
class MiniImagenet(CombinationMetaDataset):
def __init__(self, root, num_classes_per_task=None, meta_train=False,
meta_val=False, meta_test=False, meta_split=None,
transform=None, target_transform=None, dataset_transform=None,
class_augmentations=None, download=False):
dataset = MiniImagenetClassDataset(root, meta_train=meta_train,
meta_val=meta_val, meta_test=meta_test, meta_split=meta_split,
transform=transform, class_augmentations=class_augmentations,
download=download)
super(MiniImagenet, self).__init__(dataset, num_classes_per_task,
target_transform=target_transform, dataset_transform=dataset_transform)
class Omniglot(CombinationMetaDataset):
def __init__(self, root, num_classes_per_task=None, meta_train=False,
meta_val=False, meta_test=False, meta_split=None,
transform=None, target_transform=None,
dataset_transform=None, class_augmentations=None, download=False):
dataset = OmniglotClassDataset(root, meta_train=meta_train,
meta_val=meta_val, meta_test=meta_test,
transform=transform,
meta_split=meta_split, class_augmentations=class_augmentations,
download=download)
super(Omniglot, self).__init__(dataset, num_classes_per_task,
target_transform=target_transform, dataset_transform=dataset_transform)
class RandClassSplitter(Splitter):
def __init__(self, min_train_per_class, max_train_per_class, num_test_per_class, shuffle=True):
self.shuffle = shuffle
num_samples_per_class = OrderedDict()
num_samples_per_class['train'] = (min_train_per_class, max_train_per_class)
num_samples_per_class['test'] = (num_test_per_class, num_test_per_class)
self._min_samples_per_class = min_train_per_class + num_test_per_class
super(RandClassSplitter, self).__init__(num_samples_per_class)
def _rand_split_size(self, list_n_samples):
cur_size = OrderedDict()
for split, range_split in self.splits.items():
d = range_split[1] - range_split[0] + 1
for num_samples in list_n_samples:
d = min(d, num_samples - self._min_samples_per_class + 1)
cur_size[split] = np.random.randint(d) + range_split[0]
return cur_size
def get_indices_task(self, task):
all_class_indices = self._get_class_indices(task)
indices = OrderedDict([(split, []) for split in self.splits])
cur_size = self._rand_split_size([x[1] for x in all_class_indices.items()])
for name, class_indices in all_class_indices.items():
num_samples = len(class_indices)
if num_samples < self._min_samples_per_class:
raise ValueError('The number of samples for class `{0}` ({1}) '
'is smaller than the minimum number of samples per class '
'required by `ClassSplitter` ({2}).'.format(name,
num_samples, self._min_samples_per_class))
if self.shuffle:
# TODO: Replace torch.randperm with seed-friendly counterpart
dataset_indices = torch.randperm(num_samples).tolist()
ptr = 0
for split, num_split in cur_size.items():
split_indices = (dataset_indices[ptr:ptr + num_split]
if self.shuffle else range(ptr, ptr + num_split))
indices[split].extend([class_indices[idx] for idx in split_indices])
ptr += num_split
return indices
def get_indices_concattask(self, task):
indices = OrderedDict([(split, []) for split in self.splits])
cum_size = 0
cur_size = self._rand_split_size([len(x) for x in task.datasets])
for dataset in task.datasets:
num_samples = len(dataset)
if num_samples < self._min_samples_per_class:
raise ValueError('The number of samples for one class ({0}) '
'is smaller than the minimum number of samples per class '
'required by `ClassSplitter` ({1}).'.format(num_samples,
self._min_samples_per_class))
if self.shuffle:
# TODO: Replace torch.randperm with seed-friendly counterpart
dataset_indices = torch.randperm(num_samples).tolist()
ptr = 0
for split, num_split in cur_size.items():
split_indices = (dataset_indices[ptr:ptr + num_split]
if self.shuffle else range(ptr, ptr + num_split))
indices[split].extend([idx + cum_size for idx in split_indices])
ptr += num_split
cum_size += num_samples
return indices
def _update_args(shots, ways, kwargs, shuffle=True, test_shots=None):
if 'num_classes_per_task' in kwargs:
assert ways == kwargs['num_classes_per_task']
del kwargs['num_classes_per_task']
if 'target_transform' not in kwargs:
kwargs['target_transform'] = Categorical(ways)
if 'class_augmentations' not in kwargs:
kwargs['class_augmentations'] = [Rotation([90, 180, 270])]
if isinstance(shots, int):
min_shot = max_shot = shots
else:
min_shot, max_shot = shots
if test_shots is None:
test_shots = min_shot
if 'dataset_transform' not in kwargs:
if min_shot == max_shot:
dataset_transform = ClassSplitter(shuffle=shuffle,
num_train_per_class=min_shot,
num_test_per_class=test_shots)
else:
dataset_transform = RandClassSplitter(shuffle=shuffle,
min_train_per_class=min_shot,
max_train_per_class=max_shot,
num_test_per_class=test_shots)
kwargs['dataset_transform'] = dataset_transform
return kwargs
def omniglot(folder, shots, ways, shuffle=True, test_shots=None,
seed=None, **kwargs):
if 'transform' not in kwargs:
kwargs['transform'] = Compose([Resize(28), ToTensor()])
kwargs = _update_args(shots, ways, kwargs, shuffle, test_shots)
dataset = Omniglot(folder, num_classes_per_task=ways, **kwargs)
dataset.seed(seed)
return dataset
def miniimagenet(folder, shots, ways, shuffle=True, test_shots=None,
seed=None, **kwargs):
if 'transform' not in kwargs:
kwargs['transform'] = Compose([Resize(84), ToTensor()])
kwargs = _update_args(shots, ways, kwargs, shuffle, test_shots)
dataset = MiniImagenet(folder, num_classes_per_task=ways, **kwargs)
dataset.seed(seed)
return dataset
from torch.utils.data.dataloader import default_collate
from torch.utils.data.dataset import Dataset as TorchDataset
def batch_list_collate(collate_fn):
def collate_task(task):
if isinstance(task, TorchDataset):
return collate_fn([task[idx] for idx in range(len(task))])
elif isinstance(task, OrderedDict):
return OrderedDict([(key, collate_task(subtask))
for (key, subtask) in task.items()])
else:
raise NotImplementedError()
def _collate_fn(batch):
batch = [collate_task(task) for task in batch]
assert isinstance(batch[0], OrderedDict)
keys = list(batch[0].keys())
out_dict = OrderedDict()
for key in keys:
out_dict[key] = [x[key] for x in batch]
return out_dict
return _collate_fn
def no_collate(batch):
return batch
class ListMetaDataLoader(MetaDataLoader):
def __init__(self, dataset, batch_size=1, shuffle=True, num_workers=0,
pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None):
collate_fn = batch_list_collate(default_collate)
super(ListMetaDataLoader, self).__init__(dataset,
batch_size=batch_size, shuffle=shuffle, sampler=None,
batch_sampler=None, num_workers=num_workers,
collate_fn=collate_fn, pin_memory=pin_memory, drop_last=drop_last,
timeout=timeout, worker_init_fn=worker_init_fn)
| [
"tarfile.open",
"torch.randperm",
"zipfile.ZipFile",
"torchmeta.transforms.Categorical",
"torchmeta.datasets.omniglot.OmniglotDataset",
"os.remove",
"torchvision.datasets.utils.download_url",
"PIL.ImageOps.invert",
"torchvision.transforms.ToTensor",
"torchvision.datasets.utils.download_file_from_g... | [((1835, 1873), 'os.path.join', 'os.path.join', (['self.root', 'self.filename'], {}), '(self.root, self.filename)\n', (1847, 1873), False, 'import os\n'), ((2615, 2713), 'torchmeta.datasets.omniglot.OmniglotDataset', 'OmniglotDataset', (['data', 'character_name'], {'transform': 'transform', 'target_transform': 'target_transform'}), '(data, character_name, transform=transform, target_transform\n =target_transform)\n', (2630, 2713), False, 'from torchmeta.datasets.omniglot import OmniglotDataset\n'), ((4021, 4059), 'os.path.join', 'os.path.join', (['self.root', 'self.filename'], {}), '(self.root, self.filename)\n', (4033, 4059), False, 'import os\n'), ((6805, 6903), 'torchmeta.datasets.miniimagenet.MiniImagenetDataset', 'MiniImagenetDataset', (['data', 'class_name'], {'transform': 'transform', 'target_transform': 'target_transform'}), '(data, class_name, transform=transform, target_transform\n =target_transform)\n', (6824, 6903), False, 'from torchmeta.datasets.miniimagenet import MiniImagenetDataset\n'), ((7821, 7918), 'torchvision.datasets.utils.download_file_from_google_drive', 'download_file_from_google_drive', (['self.gdrive_id', 'self.root', 'self.gz_filename'], {'md5': 'self.gz_md5'}), '(self.gdrive_id, self.root, self.gz_filename,\n md5=self.gz_md5)\n', (7852, 7918), False, 'from torchvision.datasets.utils import list_dir, download_url, download_file_from_google_drive\n'), ((7947, 7988), 'os.path.join', 'os.path.join', (['self.root', 'self.gz_filename'], {}), '(self.root, self.gz_filename)\n', (7959, 7988), False, 'import os\n'), ((10804, 10817), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10815, 10817), False, 'from collections import OrderedDict\n'), ((11203, 11216), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11214, 11216), False, 'from collections import OrderedDict\n'), ((11652, 11703), 'collections.OrderedDict', 'OrderedDict', (['[(split, []) for split in self.splits]'], {}), '([(split, []) for split in self.splits])\n', (11663, 11703), False, 'from collections import OrderedDict\n'), ((12845, 12896), 'collections.OrderedDict', 'OrderedDict', (['[(split, []) for split in self.splits]'], {}), '([(split, []) for split in self.splits])\n', (12856, 12896), False, 'from collections import OrderedDict\n'), ((14267, 14284), 'torchmeta.transforms.Categorical', 'Categorical', (['ways'], {}), '(ways)\n', (14278, 14284), False, 'from torchmeta.transforms import Categorical, ClassSplitter, Rotation, Splitter\n'), ((16716, 16729), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (16727, 16729), False, 'from collections import OrderedDict\n'), ((1730, 1754), 'os.path.expanduser', 'os.path.expanduser', (['root'], {}), '(root)\n', (1748, 1754), False, 'import os\n'), ((2887, 2922), 'h5py.File', 'h5py.File', (['self.split_filename', '"""r"""'], {}), "(self.split_filename, 'r')\n", (2896, 2922), False, 'import h5py\n'), ((3201, 3236), 'os.path.isfile', 'os.path.isfile', (['self.split_filename'], {}), '(self.split_filename)\n', (3215, 3236), False, 'import os\n'), ((3253, 3295), 'os.path.isfile', 'os.path.isfile', (['self.split_filename_labels'], {}), '(self.split_filename_labels)\n', (3267, 3295), False, 'import os\n'), ((3650, 3687), 'os.path.join', 'os.path.join', (['self.root', 'zip_filename'], {}), '(self.root, zip_filename)\n', (3662, 3687), False, 'import os\n'), ((3703, 3727), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (3717, 3727), False, 'import os\n'), ((3842, 3905), 'torchvision.datasets.utils.download_url', 'download_url', (['url', 'self.root', 'zip_filename', 'self.zips_md5[name]'], {}), '(url, self.root, zip_filename, self.zips_md5[name])\n', (3854, 3905), False, 'from torchvision.datasets.utils import list_dir, download_url, download_file_from_google_drive\n'), ((4073, 4097), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (4082, 4097), False, 'import h5py\n'), ((5897, 5921), 'os.path.expanduser', 'os.path.expanduser', (['root'], {}), '(root)\n', (5915, 5921), False, 'import os\n'), ((7082, 7117), 'h5py.File', 'h5py.File', (['self.split_filename', '"""r"""'], {}), "(self.split_filename, 'r')\n", (7091, 7117), False, 'import h5py\n'), ((7449, 7484), 'os.path.isfile', 'os.path.isfile', (['self.split_filename'], {}), '(self.split_filename)\n', (7463, 7484), False, 'import os\n'), ((7501, 7543), 'os.path.isfile', 'os.path.isfile', (['self.split_filename_labels'], {}), '(self.split_filename_labels)\n', (7515, 7543), False, 'import os\n'), ((8002, 8029), 'tarfile.open', 'tarfile.open', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (8014, 8029), False, 'import tarfile\n'), ((8211, 8235), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (8225, 8235), False, 'import os\n'), ((9058, 9086), 'os.path.isfile', 'os.path.isfile', (['pkl_filename'], {}), '(pkl_filename)\n', (9072, 9086), False, 'import os\n'), ((14370, 14394), 'torchmeta.transforms.Rotation', 'Rotation', (['[90, 180, 270]'], {}), '([90, 180, 270])\n', (14378, 14394), False, 'from torchmeta.transforms import Categorical, ClassSplitter, Rotation, Splitter\n'), ((14673, 14768), 'torchmeta.transforms.ClassSplitter', 'ClassSplitter', ([], {'shuffle': 'shuffle', 'num_train_per_class': 'min_shot', 'num_test_per_class': 'test_shots'}), '(shuffle=shuffle, num_train_per_class=min_shot,\n num_test_per_class=test_shots)\n', (14686, 14768), False, 'from torchmeta.transforms import Categorical, ClassSplitter, Rotation, Splitter\n'), ((3111, 3123), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3120, 3123), False, 'import json\n'), ((3924, 3954), 'zipfile.ZipFile', 'zipfile.ZipFile', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (3939, 3954), False, 'import zipfile\n'), ((7359, 7371), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7368, 7371), False, 'import json\n'), ((8366, 8394), 'os.path.isfile', 'os.path.isfile', (['pkl_filename'], {}), '(pkl_filename)\n', (8380, 8394), False, 'import os\n'), ((8499, 8513), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8510, 8513), False, 'import pickle\n'), ((8605, 8629), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (8614, 8629), False, 'import h5py\n'), ((9021, 9041), 'json.dump', 'json.dump', (['labels', 'f'], {}), '(labels, f)\n', (9030, 9041), False, 'import json\n'), ((9104, 9127), 'os.remove', 'os.remove', (['pkl_filename'], {}), '(pkl_filename)\n', (9113, 9127), False, 'import os\n'), ((11475, 11495), 'numpy.random.randint', 'np.random.randint', (['d'], {}), '(d)\n', (11492, 11495), True, 'import numpy as np\n'), ((15430, 15440), 'torchvision.transforms.Resize', 'Resize', (['(28)'], {}), '(28)\n', (15436, 15440), False, 'from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor\n'), ((15442, 15452), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (15450, 15452), False, 'from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor\n'), ((15811, 15821), 'torchvision.transforms.Resize', 'Resize', (['(84)'], {}), '(84)\n', (15817, 15821), False, 'from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor\n'), ((15823, 15833), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (15831, 15833), False, 'from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor\n'), ((4228, 4257), 'os.path.join', 'os.path.join', (['self.root', 'name'], {}), '(self.root, name)\n', (4240, 4257), False, 'import os\n'), ((5013, 5042), 'os.path.join', 'os.path.join', (['self.root', 'name'], {}), '(self.root, name)\n', (5025, 5042), False, 'import os\n'), ((4532, 4591), 'os.path.join', 'os.path.join', (['self.root', 'name', 'alphabet', 'character', '"""*.png"""'], {}), "(self.root, name, alphabet, character, '*.png')\n", (4544, 4591), False, 'import os\n'), ((4959, 4981), 'PIL.ImageOps.invert', 'ImageOps.invert', (['image'], {}), '(image)\n', (4974, 4981), False, 'from PIL import Image, ImageOps\n'), ((12388, 12415), 'torch.randperm', 'torch.randperm', (['num_samples'], {}), '(num_samples)\n', (12402, 12415), False, 'import torch\n'), ((13554, 13581), 'torch.randperm', 'torch.randperm', (['num_samples'], {}), '(num_samples)\n', (13568, 13581), False, 'import torch\n'), ((4389, 4428), 'os.path.join', 'os.path.join', (['self.root', 'name', 'alphabet'], {}), '(self.root, name, alphabet)\n', (4401, 4428), False, 'import os\n'), ((4873, 4908), 'PIL.Image.open', 'Image.open', (['char_filename'], {'mode': '"""r"""'}), "(char_filename, mode='r')\n", (4883, 4908), False, 'from PIL import Image, ImageOps\n')] |
import tensorflow as tf
import numpy as np
import tensorflow.keras.layers as tfkl
from tensorflow.keras import backend as K
import tensorflow_probability as tfp
tfd = tfp.distributions
tfpl = tfp.layers
tfb = tfp.bijectors
scale_shift = np.log(np.exp(1) - 1).astype(np.float32)
def test_make_mvn_prior():
from indl.model.tfp import make_mvn_prior
def _test(latent_size=5, init_std=0.1, trainable_mean=True, trainable_var=True, offdiag=False):
prior = make_mvn_prior(latent_size, init_std=init_std,
trainable_mean=trainable_mean, trainable_var=trainable_var,
offdiag=offdiag)
assert (isinstance(prior.loc, tf.Variable) == trainable_mean)
if offdiag:
assert (hasattr(prior.scale_tril, 'trainable_variables') == trainable_var)
else:
assert ((len(prior.scale.trainable_variables) > 0) == trainable_var)
if not trainable_var:
assert np.all(prior.stddev().numpy() == init_std)
if not trainable_mean:
assert np.all(prior.mean().numpy() == 0.0)
for _mean in True, False:
for _var in True, False:
for _offd in True, False:
_test(trainable_mean=_mean, trainable_var=_var, offdiag=_offd)
def _run_assertions_on_qdist(q_dist, inputs, input_dim, dist_dim, batch_size):
assert isinstance(q_dist, tfd.MultivariateNormalDiag)
# Test in a model with a data tensor
model = tf.keras.Model(inputs=inputs, outputs=q_dist)
dummy_inputs = tf.random.uniform((batch_size, input_dim))
dummy_q = model(dummy_inputs)
assert isinstance(dummy_q, tfd.MultivariateNormalDiag)
assert dummy_q.stddev().shape.as_list() == [batch_size, dist_dim]
assert np.all(dummy_q.stddev().numpy() > 0)
assert dummy_q.sample().shape.as_list() == [batch_size, dist_dim]
assert ~np.any(np.isnan(dummy_q.sample().numpy()))
def test_make_mvn_dist_fn():
from indl.model.tfp import make_mvn_dist_fn
input_dim = 4
dist_dim = 3
batch_size = 8
# Test with placeholder
inputs = tfkl.Input(shape=(input_dim,))
# First the callable
make_dist_fn, dist_params = make_mvn_dist_fn(inputs, dist_dim, shift_std=0.1)
assert hasattr(make_dist_fn, '__call__')
assert isinstance(dist_params[0], tf.Tensor)
assert isinstance(dist_params[1], tf.Tensor)
# Then test using it to make a distribution
q_dist = tfpl.DistributionLambda(make_distribution_fn=make_dist_fn,
# convert_to_tensor_fn=lambda s: s.sample(n_samples),
)(dist_params)
_run_assertions_on_qdist(q_dist, inputs, input_dim, dist_dim, batch_size)
def test_make_variational():
from indl.model.tfp import make_variational
input_dim = 4
dist_dim = 3
batch_size = 8
# Test making a placeholder variational.
inputs = tfkl.Input(shape=(input_dim,))
q_dist = make_variational(inputs, dist_dim, init_std=0.1)
_run_assertions_on_qdist(q_dist, inputs, input_dim, dist_dim, batch_size)
| [
"tensorflow.random.uniform",
"tensorflow.keras.layers.Input",
"numpy.exp",
"indl.model.tfp.make_mvn_dist_fn",
"indl.model.tfp.make_variational",
"tensorflow.keras.Model",
"indl.model.tfp.make_mvn_prior"
] | [((1480, 1525), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'q_dist'}), '(inputs=inputs, outputs=q_dist)\n', (1494, 1525), True, 'import tensorflow as tf\n'), ((1545, 1587), 'tensorflow.random.uniform', 'tf.random.uniform', (['(batch_size, input_dim)'], {}), '((batch_size, input_dim))\n', (1562, 1587), True, 'import tensorflow as tf\n'), ((2100, 2130), 'tensorflow.keras.layers.Input', 'tfkl.Input', ([], {'shape': '(input_dim,)'}), '(shape=(input_dim,))\n', (2110, 2130), True, 'import tensorflow.keras.layers as tfkl\n'), ((2188, 2237), 'indl.model.tfp.make_mvn_dist_fn', 'make_mvn_dist_fn', (['inputs', 'dist_dim'], {'shift_std': '(0.1)'}), '(inputs, dist_dim, shift_std=0.1)\n', (2204, 2237), False, 'from indl.model.tfp import make_mvn_dist_fn\n'), ((2915, 2945), 'tensorflow.keras.layers.Input', 'tfkl.Input', ([], {'shape': '(input_dim,)'}), '(shape=(input_dim,))\n', (2925, 2945), True, 'import tensorflow.keras.layers as tfkl\n'), ((2959, 3007), 'indl.model.tfp.make_variational', 'make_variational', (['inputs', 'dist_dim'], {'init_std': '(0.1)'}), '(inputs, dist_dim, init_std=0.1)\n', (2975, 3007), False, 'from indl.model.tfp import make_variational\n'), ((470, 598), 'indl.model.tfp.make_mvn_prior', 'make_mvn_prior', (['latent_size'], {'init_std': 'init_std', 'trainable_mean': 'trainable_mean', 'trainable_var': 'trainable_var', 'offdiag': 'offdiag'}), '(latent_size, init_std=init_std, trainable_mean=\n trainable_mean, trainable_var=trainable_var, offdiag=offdiag)\n', (484, 598), False, 'from indl.model.tfp import make_mvn_prior\n'), ((244, 253), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (250, 253), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.linear_model import LinearRegression as LinR, Lasso as LasR
def sample_data(X_cov, M, noise_std, n_data, rng):
X = rng.multivariate_normal(np.zeros_like(M), X_cov, n_data)
X /= X.std(axis=0, keepdims=True)
Y = X.dot(M) + rng.randn(n_data) * noise_std
return X, Y
def fit_linear(X, Y):
return LinR(fit_intercept=False).fit(X, Y).coef_
def fit_lasso(X, Y, alpha):
return LasR(alpha=alpha, fit_intercept=False).fit(X, Y).coef_
def fit_lasso_linear(X, Y, alpha):
las = fit_lasso(X, Y, alpha)
if np.count_nonzero(las) < X.shape[1]:
result = np.zeros_like(las)
if np.count_nonzero(las) == 0:
return result
nz = np.nonzero(las)[0]
Xp = X[:, nz]
lin = fit_linear(Xp, Y)
result[nz] = lin
return result
else:
return fit_linear(X, Y)
def lin_cost(X, Y, M):
if M.ndim > 1:
n = M.shape[0]
cost = np.zeros(n)
for ii, Mi in enumerate(M):
cost[ii] = np.mean((Y - X.dot(Mi))**2) / 2.
return cost
else:
return np.mean((Y - X.dot(M))**2) / 2.
def abs_cost(M, alpha):
if M.ndim > 1:
n = M.shape[0]
cost = np.zeros(n)
for ii, Mi in enumerate(M):
cost[ii] = alpha * np.sum(abs(Mi))
return cost
else:
return alpha * np.sum(abs(Mi))
def las_cost(X, Y, M, alpha):
return lin_cost(X, Y, M) + abs_cost(M, alpha)
| [
"sklearn.linear_model.Lasso",
"numpy.count_nonzero",
"numpy.zeros",
"numpy.nonzero",
"numpy.zeros_like",
"sklearn.linear_model.LinearRegression"
] | [((176, 192), 'numpy.zeros_like', 'np.zeros_like', (['M'], {}), '(M)\n', (189, 192), True, 'import numpy as np\n'), ((559, 580), 'numpy.count_nonzero', 'np.count_nonzero', (['las'], {}), '(las)\n', (575, 580), True, 'import numpy as np\n'), ((612, 630), 'numpy.zeros_like', 'np.zeros_like', (['las'], {}), '(las)\n', (625, 630), True, 'import numpy as np\n'), ((952, 963), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (960, 963), True, 'import numpy as np\n'), ((1215, 1226), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1223, 1226), True, 'import numpy as np\n'), ((642, 663), 'numpy.count_nonzero', 'np.count_nonzero', (['las'], {}), '(las)\n', (658, 663), True, 'import numpy as np\n'), ((709, 724), 'numpy.nonzero', 'np.nonzero', (['las'], {}), '(las)\n', (719, 724), True, 'import numpy as np\n'), ((346, 371), 'sklearn.linear_model.LinearRegression', 'LinR', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (350, 371), True, 'from sklearn.linear_model import LinearRegression as LinR, Lasso as LasR\n'), ((428, 466), 'sklearn.linear_model.Lasso', 'LasR', ([], {'alpha': 'alpha', 'fit_intercept': '(False)'}), '(alpha=alpha, fit_intercept=False)\n', (432, 466), True, 'from sklearn.linear_model import LinearRegression as LinR, Lasso as LasR\n')] |
"""
------------------------------------------------------
This file is part of RobustGaussianFittingLibrary,
a free library WITHOUT ANY WARRANTY
Copyright: 2017-2020 LaTrobe University Melbourne,
2019-2020 Deutsches Elektronen-Synchrotron
------------------------------------------------------
"""
import numpy as np
from multiprocessing import Process, Queue, cpu_count
from .basic import fitValueTensor, fitLineTensor, fitBackgroundTensor, fitBackgroundRadially
from .misc import textProgBar
def bigTensor2SmallsInds(inTensor_shape, numRowSegs, numClmSegs):
"""
This function gives indices by which a large tensor is broken into smaller segments,
the input shape is FxRxC and the output would be indices that makes up a tensor Fx(R/rs)x(C/cs)
Output:
rowClmInds (rs x cs, 4) where rowClmInds (rs x cs, 0) is the row start index
rowClmInds (rs x cs, 1) is the row end index
rowClmInds (rs x cs, 2) is the coloumn start index
rowClmInds (rs x cs, 3) is the coloumn end index
rs x cs will be the segment number going in row direction first.
Note: because we use linspace, obviously, one of the parts will be larger in case the sizes don't match
"""
#print('meshgrid indices for shape: ' + str(inTensor_shape))
#print('divided into ' + str(numRowSegs) + ' row segments and into '+ str(numClmSegs) + ' clm segments')
rowClmInds = np.zeros((numRowSegs*numClmSegs, 4), dtype='int')
rowStarts = np.linspace(0, inTensor_shape[1], numRowSegs+1, dtype='int')
rowEnds = rowStarts
rowStarts = rowStarts[:-1]
rowEnds = rowEnds[1:]
clmStarts = np.linspace(0, inTensor_shape[2], numClmSegs+1, dtype='int')
clmEnds = clmStarts
clmStarts = clmStarts[:-1]
clmEnds = clmEnds[1:]
segCnt = 0
segInds = np.zeros((numRowSegs* numClmSegs, 2), dtype='int')
for rcnt in range(numRowSegs):
for ccnt in range(numClmSegs):
rowClmInds[segCnt, 0] = rowStarts[rcnt]
rowClmInds[segCnt, 1] = rowEnds[rcnt]
rowClmInds[segCnt, 2] = clmStarts[ccnt]
rowClmInds[segCnt, 3] = clmEnds[ccnt]
segInds[segCnt, 0] = rcnt
segInds[segCnt, 1] = ccnt
segCnt += 1
#print('meshgrid number of parts: ' + str(segCnt))
return(rowClmInds, segInds)
################################################################################################
################################### Value fitting library #######################################
def fitValueTensor_MultiProcFunc(aQ,
partCnt, inTensor, inWeights,
topKthPerc, bottomKthPerc,
MSSE_LAMBDA, optIters,
minimumResidual, downSampledSize):
""" following:
def fitValueTensor(inTensor,
inWeights = None,
topKthPerc = 0.5,
bottomKthPerc=0.35,
MSSE_LAMBDA = 3.0,
optIters = 12,
minimumResidual = 0.0,
downSampledSize = np.iinfo('uint32').max)
"""
modelParams = fitValueTensor(inTensor=inTensor,
inWeights = inWeights,
topKthPerc=topKthPerc,
bottomKthPerc=bottomKthPerc,
MSSE_LAMBDA = MSSE_LAMBDA,
optIters = optIters,
minimumResidual = minimumResidual,
downSampledSize = downSampledSize)
aQ.put(list([partCnt, modelParams]))
def fitValueTensor_MultiProc(inTensor,
inWeights = None,
numRowSegs = 1,
numClmSegs = 1,
topKthPerc = 0.5,
bottomKthPerc = 0.4,
MSSE_LAMBDA = 3.0,
showProgress = False,
optIters = 12,
minimumResidual = 0,
downSampledSize = 400):
""""Does fitValueTensor in RGFLib.py using multiprocessing
Input arguments
~~~~~~~~~~~~~~~
inTensor: n_F x n_R x n_C Tensor of n_R x n_C vectors, each with size n_F, float32
inWeights: n_F x n_R x n_C Tensor of n_R x n_C vectors, each with size n_F, float32
numRowSegs, numClmSegs: if you have 80 processors, and the image is 512x128, then set them to 7, 11. This way, the patches are almost equal and the processes are spread among the cores. It has no mathematical value.
MSSE_LAMBDA : How far (normalized by STD of the Gaussian) from the
mean of the Gaussian, data is considered inlier.
topKthPerc: A rough but certain guess of portion of inliers, between 0 and 1, e.g. 0.5.
Choose the topKthPerc to be as high as you are sure the portion of data is inlier.
if you are not sure at all, refer to the note above this code.
default : 0.5
bottomKthPerc: We'd like to make a sample out of worst inliers from data points that are
between bottomKthPerc and topKthPerc of sorted residuals.
set it to 0.9*topKthPerc, if N is number of data points, then make sure that
(topKthPerc - bottomKthPerc)*N>4,
it is best if bottomKthPerc*N>12 then MSSE makes sense
otherwise the code may return non-robust results.
optIters: number of iterations of FLKOS for this fitting
value 0: returns total mean and total STD
value 1: returns topKthPerc percentile and the scale by MSSE.
value 8 and above is recommended for optimization according
to Newton method
default : 12
minimumResidual : minimum fitting error to initialize MSSE (dtype = 'float32')
default : 0
downSampledSize: the data will be downsampled regualrly starting from
position 0 to have this length. This is used for finding the
parameter model and not to find the noise scale. The entire
inVec will be used to find the noise scale. If you'd like to use
less part of data for edtimation of the noise which is not recommended
then down sample the whole thing before you send it to this function
and set the downSampledSize to inf.
default: np.iinfo('uint32').max
Output
~~~~~~
2 x n_R x n_C float32 values, out[0] is mean and out[1] is the STDs for each element
"""
if(inWeights is None):
inWeights = np.ones(shape = inTensor.shape, dtype = 'float32')
rowClmInds, segInds = bigTensor2SmallsInds(inTensor.shape, numRowSegs, numClmSegs)
numSegs = rowClmInds.shape[0]
myCPUCount = cpu_count()-1
aQ = Queue()
numBusyCores = 0
numProc = numSegs
numWiating = numSegs
numDone = 0
partCnt = 0
firstProcessed = 0
modelParamsMap = np.zeros((2, inTensor.shape[1], inTensor.shape[2]), dtype='float32')
while(numDone<numProc):
if (not aQ.empty()):
aQElement = aQ.get()
_partCnt = aQElement[0]
modelParamsMap[:,
rowClmInds[_partCnt, 0]: rowClmInds[_partCnt, 1],
rowClmInds[_partCnt, 2]: rowClmInds[_partCnt, 3] ] = aQElement[1]
numDone += 1
numBusyCores -= 1
if(showProgress):
if(firstProcessed==0):
pBar = textProgBar(numProc-1, title = 'Calculationg Values')
firstProcessed = 1
else:
pBar.go(1)
if((numWiating>0) & (numBusyCores < myCPUCount)):
Process(target=fitValueTensor_MultiProcFunc,
args=(aQ, partCnt,
np.squeeze(inTensor[ :,
rowClmInds[partCnt, 0]:rowClmInds[partCnt, 1],
rowClmInds[partCnt, 2]:rowClmInds[partCnt, 3] ]),
np.squeeze(inWeights[ :,
rowClmInds[partCnt, 0]:rowClmInds[partCnt, 1],
rowClmInds[partCnt, 2]:rowClmInds[partCnt, 3] ]),
topKthPerc,
bottomKthPerc,
MSSE_LAMBDA,
optIters,
minimumResidual,
downSampledSize)).start()
partCnt += 1
numWiating -= 1
numBusyCores += 1
if(showProgress):
pBar.end()
return (modelParamsMap)
################################################################################################
################################### Line fitting library #######################################
def fitLineTensor_MultiProcFunc(aQ, partCnt,
inX, inY,
topKthPerc,
bottomKthPerc,
MSSE_LAMBDA):
modelParams = fitLineTensor(inX, inY,
topKthPerc,
bottomKthPerc,
MSSE_LAMBDA)
aQ.put(list([partCnt, modelParams]))
def fitLineTensor_MultiProc(inTensorX, inTensorY,
numRowSegs = 1,
numClmSegs = 1,
topKthPerc = 0.5,
bottomKthPerc = 0.4,
MSSE_LAMBDA = 3.0,
showProgress = False):
""""Does fitLineTensor in RGFLib.py using multiprocessing
Input arguments
~~~~~~~~~~~~~~~
inX: Tensor of data points x, n_F x n_R x n_C
inY: vector of data points y, n_F x n_R x n_C
numRowSegs, numClmSegs: if you have 80 processors, and the image is 512x128, then set them to 7, 11. This way, the patches are almost equal and the processes are spread among the cores. It has no mathematical value.
MSSE_LAMBDA : How far (normalized by STD of the Gaussian) from the
mean of the Gaussian, data is considered inlier.
topKthPerc: A rough but certain guess of portion of inliers, between 0 and 1, e.g. 0.5.
Choose the topKthPerc to be as high as you are sure the portion of data is inlier.
if you are not sure at all, refer to the note above this code.
default : 0.5
bottomKthPerc: We'd like to make a sample out of worst inliers from data points that are
between bottomKthPerc and topKthPerc of sorted residuals.
set it to 0.9*topKthPerc, if N is number of data points, then make sure that
(topKthPerc - bottomKthPerc)*N>4,
it is best if bottomKthPerc*N>12 then MSSE makes sense
otherwise the code may return non-robust results.
Output
~~~~~~
3 x n_R x n_C, a, Rmean, RSTD fpr each pixel
"""
rowClmInds, _ = bigTensor2SmallsInds(inTensorX.shape, numRowSegs, numClmSegs)
numSegs = rowClmInds.shape[0]
myCPUCount = cpu_count()-1
aQ = Queue()
numBusyCores = 0
numProc = numSegs
numWiating = numSegs
numDone = 0
partCnt = 0
firstProcessed = 0
modelParamsMap = np.zeros((3, inTensorX.shape[1], inTensorX.shape[2]), dtype='float32')
while(numDone<numProc):
if (not aQ.empty()):
aQElement = aQ.get()
_partCnt = aQElement[0]
modelParamsMap[:,
rowClmInds[_partCnt, 0]: rowClmInds[_partCnt, 1],
rowClmInds[_partCnt, 2]: rowClmInds[_partCnt, 3] ] = aQElement[1]
numDone += 1
numBusyCores -= 1
if(showProgress):
if(firstProcessed==0):
pBar = textProgBar(numProc-1, title = 'Calculationg line parameters')
firstProcessed = 1
else:
pBar.go(1)
if((numWiating>0) & (numBusyCores < myCPUCount)):
Process(target=fitLineTensor_MultiProcFunc,
args=(aQ, partCnt,
np.squeeze(inTensorX[ :,
rowClmInds[partCnt, 0]:rowClmInds[partCnt, 1],
rowClmInds[partCnt, 2]:rowClmInds[partCnt, 3] ]),
np.squeeze(inTensorY[ :,
rowClmInds[partCnt, 0]:rowClmInds[partCnt, 1],
rowClmInds[partCnt, 2]:rowClmInds[partCnt, 3] ]),
topKthPerc,
bottomKthPerc,
MSSE_LAMBDA)).start()
partCnt += 1
numWiating -= 1
numBusyCores += 1
if(showProgress):
pBar.end()
return (modelParamsMap)
############################################
###### background estimation library #######
def fitBackgroundTensor_multiprocFunc(aQ, imgCnt,
inImage_Tensor,
inMask_Tensor,
winX,
winY,
topKthPerc,
bottomKthPerc,
MSSE_LAMBDA,
stretch2CornersOpt,
numModelParams,
optIters,
numStrides,
minimumResidual):
modelParamsMap = fitBackgroundTensor(inImage_Tensor,
inMask_Tensor,
winX,
winY,
topKthPerc,
bottomKthPerc,
MSSE_LAMBDA,
stretch2CornersOpt,
numModelParams,
optIters,
numStrides,
minimumResidual)
aQ.put(list([imgCnt, modelParamsMap]))
def fitBackgroundTensor_multiproc(inDataSet, inMask = None,
winX = None, winY = None,
topKthPerc = 0.5,
bottomKthPerc = 0.3,
MSSE_LAMBDA = 3.0,
stretch2CornersOpt = 0,
numModelParams = 4,
optIters = 12,
showProgress = False,
numStrides = 0,
minimumResidual = 0,
numProcesses = None):
""""Does fitBackgroundTensor in RGFLib.py using multiprocessing
Input arguments
~~~~~~~~~~~~~~~
inImage_Tensor: n_F x n_R x n_C input Tensor, each image has size n_R x n_C
inMask_Tensor: same size of inImage_Tensor
MSSE_LAMBDA : How far (normalized by STD of the Gaussian) from the
mean of the Gaussian, data is considered inlier.
optIters: number of iterations of FLKOS for this fitting
value 0: returns total mean and total STD
value 1: returns topKthPerc percentile and the scale by MSSE.
value 8 and above is recommended for optimization according
to Newton method
default : 12
numModelParams: takes either 1, which gives a horizontal plane or 4 which gives an algebraic plane.
topKthPerc: A rough but certain guess of portion of inliers, between 0 and 1, e.g. 0.5.
Choose the topKthPerc to be as high as you are sure the portion of data is inlier.
if you are not sure at all, refer to the note above this code.
default : 0.5
bottomKthPerc: We'd like to make a sample out of worst inliers from data points that are
between bottomKthPerc and topKthPerc of sorted residuals.
set it to 0.9*topKthPerc, if N is number of data points, then make sure that
(topKthPerc - bottomKthPerc)*N>4,
it is best if bottomKthPerc*N>12 then MSSE makes sense
otherwise the code may return non-robust results.
numStrides: Convolve the filter this number of times. For example, if the image is 32 by 32
and winX and Y are 16 and numStrides is 1, from 0 to 15 and 15 to 31,
will be analysed. But if numStrides is 2, from 0 to 15, 10 to 25 and 15 to 31
will be analysed and averaged. This means that the method will run 7 times.
minimumResidual : minimum fitting error if available
Output
~~~~~~
2 x n_F x n_R x n_C where out[0] would be background mean and out[1] would be STD for each pixel in the Tensor.
"""
f_N = inDataSet.shape[0]
r_N = inDataSet.shape[1]
c_N = inDataSet.shape[2]
if(inMask is None):
inMask = np.ones(inDataSet.shape, dtype='uint8')
if(winX is None):
winX = r_N
if(winY is None):
winY = c_N
modelParamsMapTensor = np.zeros((2, f_N, r_N, c_N), dtype='float32')
aQ = Queue()
mycpucount = cpu_count() - 1
if(numProcesses is None):
numProcesses = 2*mycpucount
if(showProgress):
print('Multiprocessing background ' + str(f_N) + ' frames...')
numProc = f_N
numSubmitted = 0
numProcessed = 0
numBusyCores = 0
firstProcessed = 0
default_stride = int(np.ceil(numProc/numProcesses))
while(numProcessed<numProc):
if (not aQ.empty()):
qElement = aQ.get()
_imgCnt = qElement[0]
_tmpResult = qElement[1]
_stride = _tmpResult.shape[1]
modelParamsMapTensor[:, _imgCnt:_imgCnt+_stride, :, :] = _tmpResult
numBusyCores -= 1
numProcessed += _stride
if(showProgress):
if(firstProcessed==0):
pBar = textProgBar(numProc-_stride, title = 'Calculationg background')
firstProcessed = 1
else:
pBar.go(_stride)
if((numSubmitted<numProc) & (numBusyCores < mycpucount)):
stride = np.minimum(default_stride, numProc - numSubmitted)
Process(target = fitBackgroundTensor_multiprocFunc,
args=(aQ, numSubmitted,
inDataSet[numSubmitted:numSubmitted+stride, :, :],
inMask[numSubmitted:numSubmitted+stride, :, :],
winX,
winY,
topKthPerc,
bottomKthPerc,
MSSE_LAMBDA,
stretch2CornersOpt,
numModelParams,
optIters,
numStrides,
minimumResidual)).start()
numSubmitted += stride
numBusyCores += 1
if(showProgress):
pBar.end()
return(modelParamsMapTensor)
def fitBackgroundRadiallyTensor_multiprocFunc(aQ,
inImg,
inMask,
minRes,
includeCenter,
maxRes,
shellWidth,
stride,
x_Cent,
y_Cent,
finiteSampleBias,
topKthPerc,
bottomKthPerc,
MSSE_LAMBDA,
optIters,
minimumResidual,
return_vecMP,
imgCnt):
toUnpack = fitBackgroundRadially(inImg,
inMask,
minRes = minRes,
includeCenter = includeCenter,
maxRes = maxRes,
shellWidth = shellWidth,
stride = stride,
x_Cent = x_Cent,
y_Cent = y_Cent,
finiteSampleBias = finiteSampleBias,
topKthPerc = topKthPerc,
bottomKthPerc = bottomKthPerc,
MSSE_LAMBDA = MSSE_LAMBDA,
optIters = optIters,
minimumResidual = minimumResidual,
return_vecMP = return_vecMP)
if(return_vecMP):
mP, vec = toUnpack
aQ.put(list([imgCnt, mP, vec]))
else:
mP= toUnpack
aQ.put(list([imgCnt, mP]))
def fitBackgroundRadiallyTensor_multiproc(inImg_Tensor,
inMask_Tensor = None,
minRes = 3,
includeCenter = 0,
maxRes = None,
shellWidth = 1,
stride = 1,
x_Cent = None,
y_Cent = None,
finiteSampleBias = 200,
topKthPerc = 0.5,
bottomKthPerc = 0.35,
MSSE_LAMBDA = 3.0,
optIters = 12,
showProgress = False,
minimumResidual = 0,
return_vecMP = False):
""" using Multiprocessing in python,
fit a value to the ring around the image and fine tune it by convolving the resolution shells
by number of stride and calculate the value of the background of the ring
and STD at the location of each pixel.
Input arguments
~~~~~~~~~~~~~~~
inImg_Tensor: a 3D float32 numpy array as the tensor of n_F images: n_f x n_R x n_C
inMask_Tensor: same size as inImg_Tensor, with data type 'uint8',
where 0 is bad and 1 is good. The masked pixels have not effect
in the calculation of the parameters of the plane fit to background.
However, the value of the background at their location can be found.
minRes: minimum distance to the center of the image
default: 0
includeCenter: if you'd like to set the minimum to a higher value and yet get the
circle inside the minimum resolution as one area, set this to one.
this is particularly useful when shellWidth=1, then the area within
radius 6 will have size of more than 200, the finiteSampleBias pf monteCarlo.
So you can set the minRes to 6, set includeCenter to 1 and shellWidth to 1.
default: 0
maxRes: maximum distance to the center of the image
shellWidth : the ring around the center can have a width and a value will be fitted to
all calue in the ring.
finiteSampleBias : size of an area on a ring will be downsampled evenly to no more than finiteSampleBias
default : twice monte carlo finite sample bias 2x200
optIters: number of iterations of FLKOS for this fitting
value 0: returns total mean and total STD
value 1: returns topKthPerc percentile and the scale by MSSE.
value 8 and above is recommended for optimization according
to Newton method
default : 12
MSSE_LAMBDA : How far (normalized by STD of the Gaussian) from the
mean of the Gaussian, data is considered inlier.
default: 3.0
topKthPerc: A rough but certain guess of portion of inliers, between 0 and 1, e.g. 0.5.
Choose the topKthPerc to be as high as you are sure the portion of data is inlier.
if you are not sure at all, refer to the note above this code.
default : 0.5
bottomKthPerc: We'd like to make a sample out of worst inliers from data points that are
between bottomKthPerc and topKthPerc of sorted residuals.
set it to 0.9*topKthPerc, if N is number of data points, then make sure that
(topKthPerc - bottomKthPerc)*N>4,
it is best if bottomKthPerc*N>12 then MSSE makes sense
otherwise the code may return non-robust results.
numStrides: by giving a shellWidth>1, one can desire convolving the shell over radius by
number of strides.
minimumResidual : minimum residual to initialize MSSE just like RANSAC
default: 0
showProgress: shows progress, default: False
return_vecMP: return profile vectors for each frame
defult: False
Output
~~~~~~
if not return_vecMP:
numpy array with 3 parameters for each pixel : 2 x n_F x n_R, n_C : Rmean and RSTD.
else:
2-tuple
first is the above numpy array
and second is the profile vecotr in size 2 x n_F x res
"""
if(inMask_Tensor is None):
inMask_Tensor = np.ones(inImg_Tensor.shape, dtype='uint8')
n_F = inImg_Tensor.shape[0]
n_R = inImg_Tensor.shape[1]
n_C = inImg_Tensor.shape[2]
if(x_Cent is None):
x_Cent = int(n_R/2)
if(y_Cent is None):
y_Cent = int(n_C/2)
if(showProgress):
print('Getting radial profile of a ny image')
radial_mP = np.zeros((2, n_F, n_R, n_C), dtype='float32')
maxDist = np.array([(x_Cent**2 + y_Cent**2)**0.5,
((n_R - x_Cent)**2 + y_Cent**2)**0.5,
((x_Cent)**2 + (n_C - y_Cent)**2)**0.5,
((n_R - x_Cent)**2 + (n_C - y_Cent)**2)**0.5])
print(maxDist)
maxDist = int(np.ceil(maxDist.max()))
if(maxRes is None):
maxRes = maxDist
if(maxRes > maxDist):
maxRes = maxDist
if(return_vecMP):
radial_prof = np.zeros((2, n_F, maxDist), dtype='float32')
if(showProgress):
print('maximum distance from the given center is ' + str(maxDist),
flush=True)
if(showProgress):
print('inImg_Tensor shape-->', inImg_Tensor.shape)
myCPUCount = cpu_count()-1
aQ = Queue()
numProc = n_F
procID = 0
numProcessed = 0
numBusyCores = 0
if(showProgress):
print('starting ' +str(numProc) + ' processes with ' + str(myCPUCount) + ' CPUs')
while(numProcessed<numProc):
if (not aQ.empty()):
aQElement = aQ.get()
_imgCnt = aQElement[0]
radial_mP[:, _imgCnt, :, :] = aQElement[1]
if(return_vecMP):
radial_prof[:, _imgCnt, :] = aQElement[2]
numProcessed += 1
numBusyCores -= 1
if(showProgress):
if(numProcessed == 1):
pBar = textProgBar(numProc-1, title = 'Multiprocessing results progress bar')
if(numProcessed > 1):
pBar.go()
if((procID<numProc) & (numBusyCores < myCPUCount)):
Process(target = fitBackgroundRadiallyTensor_multiprocFunc,
args = (aQ,
inImg_Tensor[procID],
inMask_Tensor[procID],
minRes,
includeCenter,
maxRes,
shellWidth,
stride,
x_Cent,
y_Cent,
finiteSampleBias,
topKthPerc,
bottomKthPerc,
MSSE_LAMBDA,
optIters,
minimumResidual,
return_vecMP,
procID)).start()
procID += 1
numBusyCores += 1
if(showProgress):
pBar.end()
if(return_vecMP):
return(radial_mP, radial_prof)
else:
return(radial_mP) | [
"numpy.ceil",
"numpy.ones",
"numpy.minimum",
"multiprocessing.Process",
"multiprocessing.cpu_count",
"numpy.squeeze",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"multiprocessing.Queue"
] | [((1529, 1580), 'numpy.zeros', 'np.zeros', (['(numRowSegs * numClmSegs, 4)'], {'dtype': '"""int"""'}), "((numRowSegs * numClmSegs, 4), dtype='int')\n", (1537, 1580), True, 'import numpy as np\n'), ((1595, 1657), 'numpy.linspace', 'np.linspace', (['(0)', 'inTensor_shape[1]', '(numRowSegs + 1)'], {'dtype': '"""int"""'}), "(0, inTensor_shape[1], numRowSegs + 1, dtype='int')\n", (1606, 1657), True, 'import numpy as np\n'), ((1753, 1815), 'numpy.linspace', 'np.linspace', (['(0)', 'inTensor_shape[2]', '(numClmSegs + 1)'], {'dtype': '"""int"""'}), "(0, inTensor_shape[2], numClmSegs + 1, dtype='int')\n", (1764, 1815), True, 'import numpy as np\n'), ((1924, 1975), 'numpy.zeros', 'np.zeros', (['(numRowSegs * numClmSegs, 2)'], {'dtype': '"""int"""'}), "((numRowSegs * numClmSegs, 2), dtype='int')\n", (1932, 1975), True, 'import numpy as np\n'), ((7198, 7205), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (7203, 7205), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((7350, 7418), 'numpy.zeros', 'np.zeros', (['(2, inTensor.shape[1], inTensor.shape[2])'], {'dtype': '"""float32"""'}), "((2, inTensor.shape[1], inTensor.shape[2]), dtype='float32')\n", (7358, 7418), True, 'import numpy as np\n'), ((11836, 11843), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (11841, 11843), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((11988, 12058), 'numpy.zeros', 'np.zeros', (['(3, inTensorX.shape[1], inTensorX.shape[2])'], {'dtype': '"""float32"""'}), "((3, inTensorX.shape[1], inTensorX.shape[2]), dtype='float32')\n", (11996, 12058), True, 'import numpy as np\n'), ((18326, 18371), 'numpy.zeros', 'np.zeros', (['(2, f_N, r_N, c_N)'], {'dtype': '"""float32"""'}), "((2, f_N, r_N, c_N), dtype='float32')\n", (18334, 18371), True, 'import numpy as np\n'), ((18382, 18389), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (18387, 18389), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((27607, 27652), 'numpy.zeros', 'np.zeros', (['(2, n_F, n_R, n_C)'], {'dtype': '"""float32"""'}), "((2, n_F, n_R, n_C), dtype='float32')\n", (27615, 27652), True, 'import numpy as np\n'), ((27672, 27866), 'numpy.array', 'np.array', (['[(x_Cent ** 2 + y_Cent ** 2) ** 0.5, ((n_R - x_Cent) ** 2 + y_Cent ** 2) **\n 0.5, (x_Cent ** 2 + (n_C - y_Cent) ** 2) ** 0.5, ((n_R - x_Cent) ** 2 +\n (n_C - y_Cent) ** 2) ** 0.5]'], {}), '([(x_Cent ** 2 + y_Cent ** 2) ** 0.5, ((n_R - x_Cent) ** 2 + y_Cent **\n 2) ** 0.5, (x_Cent ** 2 + (n_C - y_Cent) ** 2) ** 0.5, ((n_R - x_Cent) **\n 2 + (n_C - y_Cent) ** 2) ** 0.5])\n', (27680, 27866), True, 'import numpy as np\n'), ((28423, 28430), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (28428, 28430), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((6979, 7025), 'numpy.ones', 'np.ones', ([], {'shape': 'inTensor.shape', 'dtype': '"""float32"""'}), "(shape=inTensor.shape, dtype='float32')\n", (6986, 7025), True, 'import numpy as np\n'), ((7175, 7186), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (7184, 7186), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((11813, 11824), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (11822, 11824), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((18172, 18211), 'numpy.ones', 'np.ones', (['inDataSet.shape'], {'dtype': '"""uint8"""'}), "(inDataSet.shape, dtype='uint8')\n", (18179, 18211), True, 'import numpy as np\n'), ((18407, 18418), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (18416, 18418), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((18712, 18743), 'numpy.ceil', 'np.ceil', (['(numProc / numProcesses)'], {}), '(numProc / numProcesses)\n', (18719, 18743), True, 'import numpy as np\n'), ((27264, 27306), 'numpy.ones', 'np.ones', (['inImg_Tensor.shape'], {'dtype': '"""uint8"""'}), "(inImg_Tensor.shape, dtype='uint8')\n", (27271, 27306), True, 'import numpy as np\n'), ((28115, 28159), 'numpy.zeros', 'np.zeros', (['(2, n_F, maxDist)'], {'dtype': '"""float32"""'}), "((2, n_F, maxDist), dtype='float32')\n", (28123, 28159), True, 'import numpy as np\n'), ((28400, 28411), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (28409, 28411), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((19442, 19492), 'numpy.minimum', 'np.minimum', (['default_stride', '(numProc - numSubmitted)'], {}), '(default_stride, numProc - numSubmitted)\n', (19452, 19492), True, 'import numpy as np\n'), ((19505, 19826), 'multiprocessing.Process', 'Process', ([], {'target': 'fitBackgroundTensor_multiprocFunc', 'args': '(aQ, numSubmitted, inDataSet[numSubmitted:numSubmitted + stride, :, :],\n inMask[numSubmitted:numSubmitted + stride, :, :], winX, winY,\n topKthPerc, bottomKthPerc, MSSE_LAMBDA, stretch2CornersOpt,\n numModelParams, optIters, numStrides, minimumResidual)'}), '(target=fitBackgroundTensor_multiprocFunc, args=(aQ, numSubmitted,\n inDataSet[numSubmitted:numSubmitted + stride, :, :], inMask[\n numSubmitted:numSubmitted + stride, :, :], winX, winY, topKthPerc,\n bottomKthPerc, MSSE_LAMBDA, stretch2CornersOpt, numModelParams,\n optIters, numStrides, minimumResidual))\n', (19512, 19826), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((29259, 29562), 'multiprocessing.Process', 'Process', ([], {'target': 'fitBackgroundRadiallyTensor_multiprocFunc', 'args': '(aQ, inImg_Tensor[procID], inMask_Tensor[procID], minRes, includeCenter,\n maxRes, shellWidth, stride, x_Cent, y_Cent, finiteSampleBias,\n topKthPerc, bottomKthPerc, MSSE_LAMBDA, optIters, minimumResidual,\n return_vecMP, procID)'}), '(target=fitBackgroundRadiallyTensor_multiprocFunc, args=(aQ,\n inImg_Tensor[procID], inMask_Tensor[procID], minRes, includeCenter,\n maxRes, shellWidth, stride, x_Cent, y_Cent, finiteSampleBias,\n topKthPerc, bottomKthPerc, MSSE_LAMBDA, optIters, minimumResidual,\n return_vecMP, procID))\n', (29266, 29562), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((8246, 8367), 'numpy.squeeze', 'np.squeeze', (['inTensor[:, rowClmInds[partCnt, 0]:rowClmInds[partCnt, 1], rowClmInds[\n partCnt, 2]:rowClmInds[partCnt, 3]]'], {}), '(inTensor[:, rowClmInds[partCnt, 0]:rowClmInds[partCnt, 1],\n rowClmInds[partCnt, 2]:rowClmInds[partCnt, 3]])\n', (8256, 8367), True, 'import numpy as np\n'), ((8534, 8656), 'numpy.squeeze', 'np.squeeze', (['inWeights[:, rowClmInds[partCnt, 0]:rowClmInds[partCnt, 1], rowClmInds[\n partCnt, 2]:rowClmInds[partCnt, 3]]'], {}), '(inWeights[:, rowClmInds[partCnt, 0]:rowClmInds[partCnt, 1],\n rowClmInds[partCnt, 2]:rowClmInds[partCnt, 3]])\n', (8544, 8656), True, 'import numpy as np\n'), ((12895, 13017), 'numpy.squeeze', 'np.squeeze', (['inTensorX[:, rowClmInds[partCnt, 0]:rowClmInds[partCnt, 1], rowClmInds[\n partCnt, 2]:rowClmInds[partCnt, 3]]'], {}), '(inTensorX[:, rowClmInds[partCnt, 0]:rowClmInds[partCnt, 1],\n rowClmInds[partCnt, 2]:rowClmInds[partCnt, 3]])\n', (12905, 13017), True, 'import numpy as np\n'), ((13184, 13306), 'numpy.squeeze', 'np.squeeze', (['inTensorY[:, rowClmInds[partCnt, 0]:rowClmInds[partCnt, 1], rowClmInds[\n partCnt, 2]:rowClmInds[partCnt, 3]]'], {}), '(inTensorY[:, rowClmInds[partCnt, 0]:rowClmInds[partCnt, 1],\n rowClmInds[partCnt, 2]:rowClmInds[partCnt, 3]])\n', (13194, 13306), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
:Author: <NAME>
Notes
-----
This is a script with all the components for running an investigation. I would
recommend making a copy of this for each successful investigation and storing it
with the data.
"""
#%% Import useful functions
import numpy as np
import collections
import simulation
#%% Set the model sets and task sets
number_actions = 6
number_cues = 1
repetitions = 2
alphaSet = np.repeat(np.array([0.1, 0.3, 0.5, 0.7, 0.9]), repetitions)
betaSet = np.array([0.1, 0.3, 0.5, 0.7, 1, 2, 4, 8, 16])
task_parameters = {}
task_static_properties = {'number_actions': number_actions,
'learning_length': 200,
'test_length': 100,
'reward_size': 1,
'action_reward_probabilities': collections.OrderedDict([('A', 0.80),
('B', 0.20),
('C', 0.70),
('D', 0.30),
('E', 0.60),
('F', 0.40)]),
'learning_action_pairs': [('A', 'B'), ('C', 'D'), ('E', 'F')]}
model_parameters = {'alpha': alphaSet,
'beta': betaSet}
model_static_properties = {'number_actions': number_actions,
'number_cues': number_cues,
'action_codes': {'A': 0, 'B': 1, 'C': 2, 'D': 3, 'E': 4, 'F': 5},
'expect': np.ones((number_actions, number_cues)) / 2,
'prior': np.ones(number_actions) / number_actions,
'stimulus_shaper_name': 'StimulusProbSelectDirect',
'reward_shaper_name': 'RewardProbSelectDirect',
'decision_function_name': 'weightProb',
'task_responses': ['A', 'B', 'C', 'D', 'E', 'F']}
#%% For simulating tasks
simulation.run(task_name='ProbSelect',
task_changing_properties=task_parameters,
task_constant_properties=task_static_properties,
model_name='QLearn',
model_changing_properties=model_parameters,
model_constant_properties=model_static_properties,
label='qLearn_probSelectSimSet',
pickle=True,
numpy_error_level='log') # 'raise','log'
| [
"numpy.array",
"collections.OrderedDict",
"numpy.ones",
"simulation.run"
] | [((514, 560), 'numpy.array', 'np.array', (['[0.1, 0.3, 0.5, 0.7, 1, 2, 4, 8, 16]'], {}), '([0.1, 0.3, 0.5, 0.7, 1, 2, 4, 8, 16])\n', (522, 560), True, 'import numpy as np\n'), ((2241, 2575), 'simulation.run', 'simulation.run', ([], {'task_name': '"""ProbSelect"""', 'task_changing_properties': 'task_parameters', 'task_constant_properties': 'task_static_properties', 'model_name': '"""QLearn"""', 'model_changing_properties': 'model_parameters', 'model_constant_properties': 'model_static_properties', 'label': '"""qLearn_probSelectSimSet"""', 'pickle': '(True)', 'numpy_error_level': '"""log"""'}), "(task_name='ProbSelect', task_changing_properties=\n task_parameters, task_constant_properties=task_static_properties,\n model_name='QLearn', model_changing_properties=model_parameters,\n model_constant_properties=model_static_properties, label=\n 'qLearn_probSelectSimSet', pickle=True, numpy_error_level='log')\n", (2255, 2575), False, 'import simulation\n'), ((453, 488), 'numpy.array', 'np.array', (['[0.1, 0.3, 0.5, 0.7, 0.9]'], {}), '([0.1, 0.3, 0.5, 0.7, 0.9])\n', (461, 488), True, 'import numpy as np\n'), ((847, 949), 'collections.OrderedDict', 'collections.OrderedDict', (["[('A', 0.8), ('B', 0.2), ('C', 0.7), ('D', 0.3), ('E', 0.6), ('F', 0.4)]"], {}), "([('A', 0.8), ('B', 0.2), ('C', 0.7), ('D', 0.3), (\n 'E', 0.6), ('F', 0.4)])\n", (870, 949), False, 'import collections\n'), ((1787, 1825), 'numpy.ones', 'np.ones', (['(number_actions, number_cues)'], {}), '((number_actions, number_cues))\n', (1794, 1825), True, 'import numpy as np\n'), ((1868, 1891), 'numpy.ones', 'np.ones', (['number_actions'], {}), '(number_actions)\n', (1875, 1891), True, 'import numpy as np\n')] |
# =============================================================================
# SIMULATION-BASED ENGINEERING LAB (SBEL) - http://sbel.wisc.edu
#
# Copyright (c) 2019 SBEL
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# at https://opensource.org/licenses/BSD-3-Clause
#
# =============================================================================
# Contributors: <NAME>, <NAME>
# =============================================================================
#!/usr/bin/env python3
import numpy as np
import sys
import os
from integrate import integrate
from writefile import writeosprayfile
from writeforcefile import writeforcefile
from params import params
def main(friction, out_dir, top_mass, unique):
if not os.path.exists(out_dir):
os.mkdir(out_dir)
grav = np.array([0,0,-980])
setup = {"nb": 6,
"gravity" : grav,
"envelope" : 1e-7,
"static_friction" : friction, # TODO allow combination of body friction at contact
"mu_tilde" : 0.1,
"eps_0" : 0.1,
"mu_star" : 1e-9,
"eps_star" : 1e-6,
"tau_mu" : 0.2,
"tau_eps" : 0.1,
"tolerance" : 1e-3,
"dt" : 1e-3,
"time_end": 3,
"max_iterations" : 300,
"unique" : unique,
"prefix" : out_dir + "/step",
"suffix" : ".csv"}
gran_params = params(setup)
id = 1
sphere_mass = 1.0
sphere_z = 1.0
sphere_radius = 1.0
for x in [-1.0,1.0]:
for y in [-1.0,1.0]:
pos = np.array([x,y,sphere_z])
rot = np.array([1,0,0,0])
gran_params.add_sphere(pos, rot, sphere_mass, sphere_radius, id)
id += 1
top_id = 0
top_radius = 1.0
top_z = 1 + np.sqrt(top_radius**2 + 2 * top_radius * sphere_radius + sphere_radius**2 - 2)
pos = np.array([0,0,top_z])
rot = np.array([1,0,0,0])
gran_params.add_sphere(pos, rot, top_mass, top_radius, top_id)
box_id = 5
box_mass = 4.0
box_hdims = np.array([4,4,0.5])
box_z = -0.5
pos = np.array([0,0,box_z])
rot = np.array([1,0,0,0])
gran_params.add_box(pos, rot, box_hdims, box_mass, box_id, fixed=True)
c_pos = np.array([])
f_contact = np.array([])
# print(gran_params)
step = 0
t = 0.0
t_settling = 0.1
pushing = False
out_fps = 100.0
out_steps = 1.0 / (out_fps * gran_params.dt)
frame = 0
while t < gran_params.time_end:
if step % out_steps == 0:
frame_s = '%06d' % frame
print('Rendering frame ' + frame_s)
filename = gran_params.prefix + frame_s + gran_params.suffix
writeosprayfile(gran_params.q, gran_params.v, frame_s, gran_params)
filename = gran_params.prefix + frame_s + '_forces' + gran_params.suffix
frame += 1
new_q, new_v, new_a, c_pos, f_contact = integrate(gran_params.q, gran_params.v, gran_params)
gran_params.q = new_q
gran_params.v = new_v
if gran_params.q[7*top_id + 2] <= top_z / 2.0:
return True
t += gran_params.dt
step += 1
return False
if __name__ == '__main__':
argv = sys.argv
if len(sys.argv) != 5:
print("usage " + argv[0] + " <friction> <out_dir> <top_mass> <unique?>")
exit(1)
fric = float(argv[1])
out_dir = argv[2]
mass = float(argv[3])
unique = bool(int(argv[4]))
print("fric: ", fric, " mass: ", mass, " unique: ", unique)
print(main(fric, out_dir, mass, unique))
| [
"os.path.exists",
"numpy.sqrt",
"writefile.writeosprayfile",
"numpy.array",
"os.mkdir",
"params.params",
"integrate.integrate"
] | [((845, 867), 'numpy.array', 'np.array', (['[0, 0, -980]'], {}), '([0, 0, -980])\n', (853, 867), True, 'import numpy as np\n'), ((1398, 1411), 'params.params', 'params', (['setup'], {}), '(setup)\n', (1404, 1411), False, 'from params import params\n'), ((1863, 1886), 'numpy.array', 'np.array', (['[0, 0, top_z]'], {}), '([0, 0, top_z])\n', (1871, 1886), True, 'import numpy as np\n'), ((1895, 1917), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (1903, 1917), True, 'import numpy as np\n'), ((2033, 2054), 'numpy.array', 'np.array', (['[4, 4, 0.5]'], {}), '([4, 4, 0.5])\n', (2041, 2054), True, 'import numpy as np\n'), ((2080, 2103), 'numpy.array', 'np.array', (['[0, 0, box_z]'], {}), '([0, 0, box_z])\n', (2088, 2103), True, 'import numpy as np\n'), ((2112, 2134), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (2120, 2134), True, 'import numpy as np\n'), ((2221, 2233), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2229, 2233), True, 'import numpy as np\n'), ((2250, 2262), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2258, 2262), True, 'import numpy as np\n'), ((782, 805), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (796, 805), False, 'import os\n'), ((815, 832), 'os.mkdir', 'os.mkdir', (['out_dir'], {}), '(out_dir)\n', (823, 832), False, 'import os\n'), ((1774, 1860), 'numpy.sqrt', 'np.sqrt', (['(top_radius ** 2 + 2 * top_radius * sphere_radius + sphere_radius ** 2 - 2)'], {}), '(top_radius ** 2 + 2 * top_radius * sphere_radius + sphere_radius **\n 2 - 2)\n', (1781, 1860), True, 'import numpy as np\n'), ((2905, 2957), 'integrate.integrate', 'integrate', (['gran_params.q', 'gran_params.v', 'gran_params'], {}), '(gran_params.q, gran_params.v, gran_params)\n', (2914, 2957), False, 'from integrate import integrate\n'), ((1561, 1587), 'numpy.array', 'np.array', (['[x, y, sphere_z]'], {}), '([x, y, sphere_z])\n', (1569, 1587), True, 'import numpy as np\n'), ((1604, 1626), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (1612, 1626), True, 'import numpy as np\n'), ((2680, 2747), 'writefile.writeosprayfile', 'writeosprayfile', (['gran_params.q', 'gran_params.v', 'frame_s', 'gran_params'], {}), '(gran_params.q, gran_params.v, frame_s, gran_params)\n', (2695, 2747), False, 'from writefile import writeosprayfile\n')] |
import sys
import os
import subprocess
import numpy
import pyigl as igl
from utils.iglhelpers import e2p, p2e
from utils import my_utils
current_frame = 0
def sample_more_encoded_displacements(encoded_displacements, num_extra_per_pose=10):
max_diff = numpy.zeros(encoded_displacements.shape[1])
for i in range(len(encoded_displacements) - 1):
abs_diff = numpy.abs(encoded_displacements[i] - encoded_displacements[i+1])
max_diff = numpy.max(numpy.stack((abs_diff, max_diff)), 0)
mu = 0
sigma = max(max_diff / 6.0) # Just take max scaled by a factor for now
extra_encoded_displacements = numpy.repeat(encoded_displacements, num_extra_per_pose, axis=0)
extra_encoded_displacements += numpy.random.normal(mu, sigma, extra_encoded_displacements.shape)
# print(extra_encoded_displacements[:len(encoded_displacements)] - encoded_displacements)
return extra_encoded_displacements
def save_mat_with_prefix(path, prefix, mat):
dmat_path = os.path.join(path, '%s.dmat' % (prefix))
my_utils.save_numpy_mat_to_dmat(dmat_path, mat)
return dmat_path
def reencode_and_augment_training_data(model_root, num_extra_per_poses=0):
"""
Loads existing traing data and generates new encoding / energy vector pairs for
1. Energy evalutated on decoded displacements of training data.
2. Energy evaluated on poses sampled around the encoded training poses.
"""
training_data_path = os.path.join(model_root,'training_data/training')
U = igl.eigen.MatrixXd()
igl.readDMAT(os.path.join(model_root, 'pca_results/ae_pca_components.dmat'), U)
displacements = my_utils.load_displacement_dmats_to_numpy(training_data_path)
flatten_displ, unflatten_displ = my_utils.get_flattners(displacements)
from keras.models import Model, load_model
encoder = load_model(os.path.join(model_root,'keras_models/encoder.hdf5'))
decoder = load_model(os.path.join(model_root,'keras_models/decoder.hdf5'))
encoded_displacements = encoder.predict(flatten_displ(displacements) @ U)
decoded_displacements = decoder.predict(encoded_displacements) @ U.transpose()
print('Generating extra samples...')
extra_encoded_displacements = sample_more_encoded_displacements(encoded_displacements, num_extra_per_poses)
extra_decoded_displacements = decoder.predict(extra_encoded_displacements) @ U.transpose()
sampled_training_data_path = os.path.join(model_root, 'augmented_training_data/sampled/')
reencoded_training_data_path = os.path.join(model_root, 'augmented_training_data/reencoded/')
my_utils.create_dir_if_not_exist(sampled_training_data_path)
my_utils.create_dir_if_not_exist(reencoded_training_data_path)
extra_displacements_path = save_mat_with_prefix(sampled_training_data_path, 'displacements', extra_decoded_displacements)
save_mat_with_prefix(sampled_training_data_path, 'enc_displacements', extra_encoded_displacements)
reencoded_displacements_path = save_mat_with_prefix(reencoded_training_data_path, 'displacements', decoded_displacements)
save_mat_with_prefix(reencoded_training_data_path, 'enc_displacements', encoded_displacements)
tet_mesh_path = os.path.join(model_root, 'tets.mesh')
parameters_path = os.path.join(model_root, 'training_data/training/parameters.json')
print('Computing energies for reencoded poses...')
subprocess.call(['./generate_data_for_pose/build/bin/GenerateDataForPose', reencoded_displacements_path, tet_mesh_path, parameters_path])
print('Computing energies for samples...')
subprocess.call(['./generate_data_for_pose/build/bin/GenerateDataForPose', extra_displacements_path, tet_mesh_path, parameters_path])
## TODO
# Save them all as one matrix.. It's more efficient that way
# Now I just have to get the energies and I can plop this into my pipeline
# scale = numpy.max(energies)
# print(numpy.apply_along_axis(numpy.linalg.norm, 1, energies).shape)
# print("scale: ", scale)
# # energies = energies.reshape((len(energies), len(energies[0]))) / scale
# # energies_test = energies_test.reshape((len(energies_test), len(energies_test[0]))) / scale
# energies = energies / numpy.apply_along_axis(numpy.linalg.norm, 1, energies)[:, None]
# # energies_test = energies_test / scale
# # print(energies)
# # energies = numpy.sum(energies,axis=1)
# # print(energies)
# # energies_test = numpy.sum(energies_test,axis=1)
# flatten_data, unflatten_data = my_utils.get_flattners(energies)
# Set up drawings
# np_verts, np_faces = my_utils.load_base_vert_and_face_dmat_to_numpy(training_data_path)
# viewer = igl.viewer.Viewer()
# viewer.data.set_mesh(p2e(np_verts), p2e(np_faces))
# def pre_draw(viewer):
# global current_frame
# if viewer.core.is_animating:
# idx = current_frame % len(extra_decoded_displacements)
# verts = extra_decoded_displacements[current_frame].reshape(np_verts.shape) + np_verts
# viewer.data.set_vertices(p2e(verts))
# viewer.data.compute_normals()
# current_frame += 1
# return False
# viewer.callback_pre_draw = pre_draw
# # viewer.callback_key_down = key_down
# viewer.core.is_animating = False
# # viewer.core.camera_zoom = 2.5
# viewer.core.animation_max_fps = 3
# viewer.launch()
if __name__ == '__main__':
model_root = sys.argv[1]
augment_training_data(model_root)
| [
"numpy.random.normal",
"numpy.abs",
"numpy.repeat",
"utils.my_utils.load_displacement_dmats_to_numpy",
"pyigl.eigen.MatrixXd",
"os.path.join",
"utils.my_utils.get_flattners",
"numpy.stack",
"numpy.zeros",
"utils.my_utils.save_numpy_mat_to_dmat",
"subprocess.call",
"utils.my_utils.create_dir_if... | [((259, 302), 'numpy.zeros', 'numpy.zeros', (['encoded_displacements.shape[1]'], {}), '(encoded_displacements.shape[1])\n', (270, 302), False, 'import numpy\n'), ((629, 692), 'numpy.repeat', 'numpy.repeat', (['encoded_displacements', 'num_extra_per_pose'], {'axis': '(0)'}), '(encoded_displacements, num_extra_per_pose, axis=0)\n', (641, 692), False, 'import numpy\n'), ((728, 793), 'numpy.random.normal', 'numpy.random.normal', (['mu', 'sigma', 'extra_encoded_displacements.shape'], {}), '(mu, sigma, extra_encoded_displacements.shape)\n', (747, 793), False, 'import numpy\n'), ((994, 1032), 'os.path.join', 'os.path.join', (['path', "('%s.dmat' % prefix)"], {}), "(path, '%s.dmat' % prefix)\n", (1006, 1032), False, 'import os\n'), ((1039, 1086), 'utils.my_utils.save_numpy_mat_to_dmat', 'my_utils.save_numpy_mat_to_dmat', (['dmat_path', 'mat'], {}), '(dmat_path, mat)\n', (1070, 1086), False, 'from utils import my_utils\n'), ((1455, 1505), 'os.path.join', 'os.path.join', (['model_root', '"""training_data/training"""'], {}), "(model_root, 'training_data/training')\n", (1467, 1505), False, 'import os\n'), ((1513, 1533), 'pyigl.eigen.MatrixXd', 'igl.eigen.MatrixXd', ([], {}), '()\n', (1531, 1533), True, 'import pyigl as igl\n'), ((1639, 1700), 'utils.my_utils.load_displacement_dmats_to_numpy', 'my_utils.load_displacement_dmats_to_numpy', (['training_data_path'], {}), '(training_data_path)\n', (1680, 1700), False, 'from utils import my_utils\n'), ((1738, 1775), 'utils.my_utils.get_flattners', 'my_utils.get_flattners', (['displacements'], {}), '(displacements)\n', (1760, 1775), False, 'from utils import my_utils\n'), ((2431, 2491), 'os.path.join', 'os.path.join', (['model_root', '"""augmented_training_data/sampled/"""'], {}), "(model_root, 'augmented_training_data/sampled/')\n", (2443, 2491), False, 'import os\n'), ((2527, 2589), 'os.path.join', 'os.path.join', (['model_root', '"""augmented_training_data/reencoded/"""'], {}), "(model_root, 'augmented_training_data/reencoded/')\n", (2539, 2589), False, 'import os\n'), ((2594, 2654), 'utils.my_utils.create_dir_if_not_exist', 'my_utils.create_dir_if_not_exist', (['sampled_training_data_path'], {}), '(sampled_training_data_path)\n', (2626, 2654), False, 'from utils import my_utils\n'), ((2659, 2721), 'utils.my_utils.create_dir_if_not_exist', 'my_utils.create_dir_if_not_exist', (['reencoded_training_data_path'], {}), '(reencoded_training_data_path)\n', (2691, 2721), False, 'from utils import my_utils\n'), ((3198, 3235), 'os.path.join', 'os.path.join', (['model_root', '"""tets.mesh"""'], {}), "(model_root, 'tets.mesh')\n", (3210, 3235), False, 'import os\n'), ((3258, 3324), 'os.path.join', 'os.path.join', (['model_root', '"""training_data/training/parameters.json"""'], {}), "(model_root, 'training_data/training/parameters.json')\n", (3270, 3324), False, 'import os\n'), ((3385, 3526), 'subprocess.call', 'subprocess.call', (["['./generate_data_for_pose/build/bin/GenerateDataForPose',\n reencoded_displacements_path, tet_mesh_path, parameters_path]"], {}), "(['./generate_data_for_pose/build/bin/GenerateDataForPose',\n reencoded_displacements_path, tet_mesh_path, parameters_path])\n", (3400, 3526), False, 'import subprocess\n'), ((3575, 3712), 'subprocess.call', 'subprocess.call', (["['./generate_data_for_pose/build/bin/GenerateDataForPose',\n extra_displacements_path, tet_mesh_path, parameters_path]"], {}), "(['./generate_data_for_pose/build/bin/GenerateDataForPose',\n extra_displacements_path, tet_mesh_path, parameters_path])\n", (3590, 3712), False, 'import subprocess\n'), ((375, 441), 'numpy.abs', 'numpy.abs', (['(encoded_displacements[i] - encoded_displacements[i + 1])'], {}), '(encoded_displacements[i] - encoded_displacements[i + 1])\n', (384, 441), False, 'import numpy\n'), ((1551, 1613), 'os.path.join', 'os.path.join', (['model_root', '"""pca_results/ae_pca_components.dmat"""'], {}), "(model_root, 'pca_results/ae_pca_components.dmat')\n", (1563, 1613), False, 'import os\n'), ((1849, 1902), 'os.path.join', 'os.path.join', (['model_root', '"""keras_models/encoder.hdf5"""'], {}), "(model_root, 'keras_models/encoder.hdf5')\n", (1861, 1902), False, 'import os\n'), ((1928, 1981), 'os.path.join', 'os.path.join', (['model_root', '"""keras_models/decoder.hdf5"""'], {}), "(model_root, 'keras_models/decoder.hdf5')\n", (1940, 1981), False, 'import os\n'), ((469, 502), 'numpy.stack', 'numpy.stack', (['(abs_diff, max_diff)'], {}), '((abs_diff, max_diff))\n', (480, 502), False, 'import numpy\n')] |
## generate cues from grad-cam++
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import torch
from tqdm import tqdm
import torch.nn.functional as F
from gradcam import GradCAMPlusPlus
import numpy as np
from TTT_loader import myImageFloder_img_path
from model import ClassificationModel
from scipy import ndimage
import cv2
import tifffile as tif
import shutil
from PIL import Image
import pandas as pd
IMG_MEAN = np.array([98.3519, 96.9567, 95.5713])
IMG_STD = np.array([52.7343, 45.8798, 44.3465])
def preprocess(img_path):
img = Image.open(img_path).convert('RGB')
img = (img-IMG_MEAN)/IMG_STD
img = torch.from_numpy(img).permute(2, 0, 1).float() # H W C ==> C H W
return img.unsqueeze(0)
# generate grad-cam
def gen_cam_lvwang(model, dataloader, train_cues_dir, backbone='regnet',
device='cuda', aug_smooth = False, eigen_smooth=False):
# model: e.g., vgg-16
# dataloader: generate batch of imgs, and their path
# train_cues_dir: save path
# confidence: class confidence thresholds, where the last dim is background
os.makedirs(train_cues_dir, exist_ok=True)
os.makedirs(os.path.join(train_cues_dir, 'gpc'), exist_ok=True)
# os.makedirs(os.path.join(train_cues_dir, 'jianshe'), exist_ok=True)
# labelfile = os.path.join(train_cues_dir, 'labels.txt')
# f = open(labelfile, 'a')
# num_class = len(confidence)
localization_cues = {}#
target_layer=''
if backbone=="regnet":
target_layer = [model.encoder.s4]
elif backbone=="resnet":
target_layer = [model.encoder.layer4]
else:
raise Exception("Error in set target_layer")
cam_method = GradCAMPlusPlus(model=model, target_layers=target_layer,
use_cuda=True if device == "cuda" else False)
# Process by batch
for img, imgpath in tqdm(dataloader):
# img = preprocess(imgpath)
img = img.to(device, non_blocking=True)
pred_scores = model(img)
pred_scores = F.softmax(pred_scores, dim=1) # N C
pred_scores = pred_scores.cpu().detach().numpy() # N C
# generate grad-cam for a batch of img. H: shape (N C H W)
# xsize = img.shape[0]
# run a batch of imgs and for all classes
H = cam_method(input_tensor=img, target_category=[0],
aug_smooth=aug_smooth, eigen_smooth=eigen_smooth)
# save grad_cam and pred_scores
for i, imp in enumerate(imgpath):
iname = os.path.basename(imp)[:-4]
idir = os.path.basename(os.path.dirname(os.path.dirname(imp)))# "lvwang
icue = os.path.join(train_cues_dir, idir, iname)
j=0
h = H[i, :, :] # N H W C
tif.imwrite(icue+'_%d_%.3f.tif'%(j, pred_scores[i,j]), h) #
shutil.copy(imp, icue+'.png')
def main():
#
train_cues_dir = r'.\pred'
nchannels = 3
classes = 2
device ='cuda'
trainlist = r'.\data\test_list_0.6_gpc_pos.txt'
traindataloader = torch.utils.data.DataLoader(
myImageFloder_img_path(trainlist, aug=False, channels=nchannels),
batch_size=1, shuffle=False, num_workers=0, pin_memory=True)
imgpathlist = pd.read_csv(trainlist, header=None, sep=',')
imgpathlist = imgpathlist[0].values.tolist()
# use balance model
net = ClassificationModel(encoder_name="timm-regnety_040", encoder_weights="imagenet",
in_channels=nchannels, classes=classes).to(device)
pretrainp = r'.\runs\regnet040_0.6_balance\model_best.tar'
if not os.path.exists(pretrainp):
return
net.load_state_dict(torch.load(pretrainp)["state_dict"])
net.eval() # keep the batchnorm layer constant
# target: 0==>gpc
gen_cam_lvwang(model=net, dataloader=traindataloader, train_cues_dir=train_cues_dir,
backbone='regnet', device=device, aug_smooth=False, eigen_smooth=False)
if __name__=="__main__":
main() | [
"os.path.exists",
"PIL.Image.open",
"os.makedirs",
"pandas.read_csv",
"model.ClassificationModel",
"TTT_loader.myImageFloder_img_path",
"tqdm.tqdm",
"os.path.join",
"torch.load",
"torch.from_numpy",
"numpy.array",
"os.path.dirname",
"os.path.basename",
"shutil.copy",
"gradcam.GradCAMPlus... | [((417, 454), 'numpy.array', 'np.array', (['[98.3519, 96.9567, 95.5713]'], {}), '([98.3519, 96.9567, 95.5713])\n', (425, 454), True, 'import numpy as np\n'), ((465, 502), 'numpy.array', 'np.array', (['[52.7343, 45.8798, 44.3465]'], {}), '([52.7343, 45.8798, 44.3465])\n', (473, 502), True, 'import numpy as np\n'), ((1072, 1114), 'os.makedirs', 'os.makedirs', (['train_cues_dir'], {'exist_ok': '(True)'}), '(train_cues_dir, exist_ok=True)\n', (1083, 1114), False, 'import os\n'), ((1656, 1763), 'gradcam.GradCAMPlusPlus', 'GradCAMPlusPlus', ([], {'model': 'model', 'target_layers': 'target_layer', 'use_cuda': "(True if device == 'cuda' else False)"}), "(model=model, target_layers=target_layer, use_cuda=True if \n device == 'cuda' else False)\n", (1671, 1763), False, 'from gradcam import GradCAMPlusPlus\n'), ((1826, 1842), 'tqdm.tqdm', 'tqdm', (['dataloader'], {}), '(dataloader)\n', (1830, 1842), False, 'from tqdm import tqdm\n'), ((3175, 3219), 'pandas.read_csv', 'pd.read_csv', (['trainlist'], {'header': 'None', 'sep': '""","""'}), "(trainlist, header=None, sep=',')\n", (3186, 3219), True, 'import pandas as pd\n'), ((1131, 1166), 'os.path.join', 'os.path.join', (['train_cues_dir', '"""gpc"""'], {}), "(train_cues_dir, 'gpc')\n", (1143, 1166), False, 'import os\n'), ((1983, 2012), 'torch.nn.functional.softmax', 'F.softmax', (['pred_scores'], {'dim': '(1)'}), '(pred_scores, dim=1)\n', (1992, 2012), True, 'import torch.nn.functional as F\n'), ((3022, 3086), 'TTT_loader.myImageFloder_img_path', 'myImageFloder_img_path', (['trainlist'], {'aug': '(False)', 'channels': 'nchannels'}), '(trainlist, aug=False, channels=nchannels)\n', (3044, 3086), False, 'from TTT_loader import myImageFloder_img_path\n'), ((3538, 3563), 'os.path.exists', 'os.path.exists', (['pretrainp'], {}), '(pretrainp)\n', (3552, 3563), False, 'import os\n'), ((540, 560), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (550, 560), False, 'from PIL import Image\n'), ((2597, 2638), 'os.path.join', 'os.path.join', (['train_cues_dir', 'idir', 'iname'], {}), '(train_cues_dir, idir, iname)\n', (2609, 2638), False, 'import os\n'), ((2704, 2766), 'tifffile.imwrite', 'tif.imwrite', (["(icue + '_%d_%.3f.tif' % (j, pred_scores[i, j]))", 'h'], {}), "(icue + '_%d_%.3f.tif' % (j, pred_scores[i, j]), h)\n", (2715, 2766), True, 'import tifffile as tif\n'), ((2776, 2807), 'shutil.copy', 'shutil.copy', (['imp', "(icue + '.png')"], {}), "(imp, icue + '.png')\n", (2787, 2807), False, 'import shutil\n'), ((3303, 3428), 'model.ClassificationModel', 'ClassificationModel', ([], {'encoder_name': '"""timm-regnety_040"""', 'encoder_weights': '"""imagenet"""', 'in_channels': 'nchannels', 'classes': 'classes'}), "(encoder_name='timm-regnety_040', encoder_weights=\n 'imagenet', in_channels=nchannels, classes=classes)\n", (3322, 3428), False, 'from model import ClassificationModel\n'), ((3604, 3625), 'torch.load', 'torch.load', (['pretrainp'], {}), '(pretrainp)\n', (3614, 3625), False, 'import torch\n'), ((2467, 2488), 'os.path.basename', 'os.path.basename', (['imp'], {}), '(imp)\n', (2483, 2488), False, 'import os\n'), ((619, 640), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (635, 640), False, 'import torch\n'), ((2546, 2566), 'os.path.dirname', 'os.path.dirname', (['imp'], {}), '(imp)\n', (2561, 2566), False, 'import os\n')] |
import logging
import math
import os
import functions as F
import hydra
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams["font.size"] = 28
@hydra.main(config_name="config")
def plot(cfg):
m = 3
nt = 60
N, p = nt * m, m
plt.figure(figsize=[20, 6])
_xs = np.arange(0, m * 2 * np.pi, 0.01)
_ys = F.b(_xs)
# plt.plot(_xs, _ys, label=r"$-\cos\theta+\cos^{2}\theta$", color="gray", linewidth=1, zorder=1)
plt.plot(_xs, _ys, color="gray", linewidth=1, zorder=1)
plt.plot([0, m * 2 * np.pi], [0, 0], linestyle="dashed", color="gray", zorder=0)
kc = F.critical_index(nt)
colors = ["white" for _ in range(1, N)]
# kcまでを塗る
for i in range(m):
for j in range(kc - 1):
colors[i * nt + j] = "tab:blue"
colors[i * nt + nt - kc + j] = "tab:blue"
# ntの上を塗る
for i in range(1, m):
colors[i * nt - 1] = "tab:orange"
# 追加で塗る
# (N,p)=(60*3,3)のとき追加で4つ塗る
colors[kc - 1] = "tab:pink"
colors[N - kc - 1] = "tab:pink"
colors[nt - kc - 1] = "tab:pink"
colors[N - nt + kc - 1] = "tab:pink"
xs = [2 * np.pi * l / nt for l in range(1, N)]
ys = F.b(xs)
plt.scatter(xs, ys, zorder=10, color=colors, edgecolors="tab:gray", linewidths=0.3)
plt.xlim(0, m * 2 * np.pi)
plt.xlabel(r"$2{\pi}pl/N$")
plt.ylabel(r"$b^{(N,p)}_{l}$")
xlocs = [
0,
0.5 * np.pi,
np.pi,
1.5 * np.pi,
2 * np.pi,
2.5 * np.pi,
3 * np.pi,
3.5 * np.pi,
4 * np.pi,
4.5 * np.pi,
5 * np.pi,
5.5 * np.pi,
6 * np.pi,
]
xlabs = [
"$0$",
r"$\frac{\pi}{2}$",
r"$\pi$",
r"$\frac{3\pi}{2}$",
r"$2\pi$",
r"$\frac{5\pi}{2}$",
r"$3\pi$",
r"$\frac{7\pi}{2}$",
r"$4\pi$",
r"$\frac{9\pi}{2}$",
r"$5\pi$",
r"$\frac{11\pi}{2}$",
r"$6\pi$",
]
plt.xticks(xlocs, xlabs)
plt.tight_layout()
current_dir = hydra.utils.get_original_cwd()
fig_dir = os.path.join(current_dir, cfg.hp.fig_dir)
os.makedirs(fig_dir, exist_ok=True)
path = os.path.join(fig_dir, "N180p3.png")
plt.savefig(path)
logging.info(f"Save the figure {path}")
if __name__ == "__main__":
plot()
| [
"hydra.utils.get_original_cwd",
"functions.critical_index",
"matplotlib.pyplot.savefig",
"hydra.main",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"os.makedirs",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.sca... | [((177, 209), 'hydra.main', 'hydra.main', ([], {'config_name': '"""config"""'}), "(config_name='config')\n", (187, 209), False, 'import hydra\n'), ((273, 300), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[20, 6]'}), '(figsize=[20, 6])\n', (283, 300), True, 'import matplotlib.pyplot as plt\n'), ((312, 345), 'numpy.arange', 'np.arange', (['(0)', '(m * 2 * np.pi)', '(0.01)'], {}), '(0, m * 2 * np.pi, 0.01)\n', (321, 345), True, 'import numpy as np\n'), ((356, 364), 'functions.b', 'F.b', (['_xs'], {}), '(_xs)\n', (359, 364), True, 'import functions as F\n'), ((470, 525), 'matplotlib.pyplot.plot', 'plt.plot', (['_xs', '_ys'], {'color': '"""gray"""', 'linewidth': '(1)', 'zorder': '(1)'}), "(_xs, _ys, color='gray', linewidth=1, zorder=1)\n", (478, 525), True, 'import matplotlib.pyplot as plt\n'), ((530, 615), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, m * 2 * np.pi]', '[0, 0]'], {'linestyle': '"""dashed"""', 'color': '"""gray"""', 'zorder': '(0)'}), "([0, m * 2 * np.pi], [0, 0], linestyle='dashed', color='gray', zorder=0\n )\n", (538, 615), True, 'import matplotlib.pyplot as plt\n'), ((621, 641), 'functions.critical_index', 'F.critical_index', (['nt'], {}), '(nt)\n', (637, 641), True, 'import functions as F\n'), ((1188, 1195), 'functions.b', 'F.b', (['xs'], {}), '(xs)\n', (1191, 1195), True, 'import functions as F\n'), ((1200, 1287), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {'zorder': '(10)', 'color': 'colors', 'edgecolors': '"""tab:gray"""', 'linewidths': '(0.3)'}), "(xs, ys, zorder=10, color=colors, edgecolors='tab:gray',\n linewidths=0.3)\n", (1211, 1287), True, 'import matplotlib.pyplot as plt\n'), ((1289, 1315), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(m * 2 * np.pi)'], {}), '(0, m * 2 * np.pi)\n', (1297, 1315), True, 'import matplotlib.pyplot as plt\n'), ((1320, 1347), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$2{\\\\pi}pl/N$"""'], {}), "('$2{\\\\pi}pl/N$')\n", (1330, 1347), True, 'import matplotlib.pyplot as plt\n'), ((1352, 1381), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$b^{(N,p)}_{l}$"""'], {}), "('$b^{(N,p)}_{l}$')\n", (1362, 1381), True, 'import matplotlib.pyplot as plt\n'), ((1976, 2000), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xlocs', 'xlabs'], {}), '(xlocs, xlabs)\n', (1986, 2000), True, 'import matplotlib.pyplot as plt\n'), ((2005, 2023), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2021, 2023), True, 'import matplotlib.pyplot as plt\n'), ((2043, 2073), 'hydra.utils.get_original_cwd', 'hydra.utils.get_original_cwd', ([], {}), '()\n', (2071, 2073), False, 'import hydra\n'), ((2088, 2129), 'os.path.join', 'os.path.join', (['current_dir', 'cfg.hp.fig_dir'], {}), '(current_dir, cfg.hp.fig_dir)\n', (2100, 2129), False, 'import os\n'), ((2134, 2169), 'os.makedirs', 'os.makedirs', (['fig_dir'], {'exist_ok': '(True)'}), '(fig_dir, exist_ok=True)\n', (2145, 2169), False, 'import os\n'), ((2182, 2217), 'os.path.join', 'os.path.join', (['fig_dir', '"""N180p3.png"""'], {}), "(fig_dir, 'N180p3.png')\n", (2194, 2217), False, 'import os\n'), ((2222, 2239), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (2233, 2239), True, 'import matplotlib.pyplot as plt\n'), ((2245, 2284), 'logging.info', 'logging.info', (['f"""Save the figure {path}"""'], {}), "(f'Save the figure {path}')\n", (2257, 2284), False, 'import logging\n')] |
from astropy.coordinates import SkyCoord, Distance
import astropy.units as u
from astropy.time import Time
from astroquery.ned import Ned
from astroquery.simbad import Simbad
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import os
import numpy as np
from spectractor import parameters
from spectractor.config import set_logger
from spectractor.extractor.spectroscopy import (Lines, HGAR_LINES, HYDROGEN_LINES, ATMOSPHERIC_LINES,
ISM_LINES, STELLAR_LINES)
if os.getenv("PYSYN_CDBS"):
import pysynphot as S
Simbad.add_votable_fields('flux(U)', 'flux(B)', 'flux(V)', 'flux(R)', 'flux(I)', 'flux(J)', 'sptype')
def load_target(label, verbose=False):
"""Load the target properties according to the type set by parameters.OBS_OBJECT_TYPE.
Currently, the type can be either "STAR", "HG-AR" or "MONOCHROMATOR". The label parameter gives the
name of the source and allows to load its specific properties.
Parameters
----------
label: str
The label of the target.
verbose: bool, optional
If True, more verbosity (default: False).
Examples
--------
>>> parameters.OBS_OBJECT_TYPE = "STAR"
>>> t = load_target("HD111980", verbose=False)
>>> print(t.label)
HD111980
>>> print(t.radec_position.dec)
-18d31m20.009s
>>> parameters.OBS_OBJECT_TYPE = "MONOCHROMATOR"
>>> t = load_target("XX", verbose=False)
>>> print(t.label)
XX
>>> parameters.OBS_OBJECT_TYPE = "HG-AR"
>>> t = load_target("XX", verbose=False)
>>> print([line.wavelength for line in t.lines.lines][:5])
[253.652, 296.728, 302.15, 313.155, 334.148]
"""
if parameters.OBS_OBJECT_TYPE == 'STAR':
return Star(label, verbose)
elif parameters.OBS_OBJECT_TYPE == 'HG-AR':
return ArcLamp(label, verbose)
elif parameters.OBS_OBJECT_TYPE == 'MONOCHROMATOR':
return Monochromator(label, verbose)
else:
raise ValueError(f'Unknown parameters.OBS_OBJECT_TYPE: {parameters.OBS_OBJECT_TYPE}')
class Target:
def __init__(self, label, verbose=False):
"""Initialize Target class.
Parameters
----------
label: str
String label to name the target
verbose: bool, optional
Set True to increase verbosity (default: False)
"""
self.my_logger = set_logger(self.__class__.__name__)
self.label = label
self.type = None
self.wavelengths = []
self.spectra = []
self.verbose = verbose
self.emission_spectrum = False
self.hydrogen_only = False
self.sed = None
self.lines = None
self.radec_position = None
self.radec_position_after_pm = None
self.redshift = 0
self.image = None
self.image_x0 = None
self.image_y0 = None
class ArcLamp(Target):
def __init__(self, label, verbose=False):
"""Initialize ArcLamp class.
Parameters
----------
label: str
String label to name the lamp.
verbose: bool, optional
Set True to increase verbosity (default: False)
Examples
--------
Mercury-Argon lamp:
>>> t = ArcLamp("HG-AR", verbose=False)
>>> print([line.wavelength for line in t.lines.lines][:5])
[253.652, 296.728, 302.15, 313.155, 334.148]
>>> print(t.emission_spectrum)
True
"""
Target.__init__(self, label, verbose=verbose)
self.my_logger = set_logger(self.__class__.__name__)
self.emission_spectrum = True
self.lines = Lines(HGAR_LINES, emission_spectrum=True, orders=[1, 2])
def load(self): # pragma: no cover
pass
class Monochromator(Target):
def __init__(self, label, verbose=False):
"""Initialize Monochromator class.
Parameters
----------
label: str
String label to name the monochromator.
verbose: bool, optional
Set True to increase verbosity (default: False)
Examples
--------
>>> t = Monochromator("XX", verbose=False)
>>> print(t.label)
XX
>>> print(t.emission_spectrum)
True
"""
Target.__init__(self, label, verbose=verbose)
self.my_logger = set_logger(self.__class__.__name__)
self.emission_spectrum = True
self.lines = Lines([], emission_spectrum=True, orders=[1, 2])
def load(self): # pragma: no cover
pass
class Star(Target):
def __init__(self, label, verbose=False):
"""Initialize Star class.
Parameters
----------
label: str
String label to name the target
verbose: bool, optional
Set True to increase verbosity (default: False)
Examples
--------
Emission line object:
>>> s = Star('3C273')
>>> print(s.label)
3C273
>>> print(s.radec_position.dec)
2d03m08.598s
>>> print(s.emission_spectrum)
True
Standard star:
>>> s = Star('HD111980')
>>> print(s.label)
HD111980
>>> print(s.radec_position.dec)
-18d31m20.009s
>>> print(s.emission_spectrum)
False
"""
Target.__init__(self, label, verbose=verbose)
self.my_logger = set_logger(self.__class__.__name__)
self.simbad = None
self.load()
def load(self):
"""Load the coordinates of the target.
Examples
--------
>>> s = Star('3C273')
>>> print(s.radec_position.dec)
2d03m08.598s
"""
Simbad.add_votable_fields('flux(U)', 'flux(B)', 'flux(V)', 'flux(R)', 'flux(I)', 'flux(J)', 'sptype',
'parallax', 'pm', 'z_value')
simbad = Simbad.query_object(self.label)
self.simbad = simbad
if simbad is not None:
if self.verbose or True:
self.my_logger.info(f'\n\tSimbad:\n{simbad}')
self.radec_position = SkyCoord(simbad['RA'][0] + ' ' + simbad['DEC'][0], unit=(u.hourangle, u.deg))
else:
self.my_logger.warning('Target {} not found in Simbad'.format(self.label))
self.get_radec_position_after_pm(date_obs="J2000")
if not np.ma.is_masked(simbad['Z_VALUE']):
self.redshift = float(simbad['Z_VALUE'])
else:
self.redshift = 0
self.load_spectra()
def load_spectra(self):
"""Load reference spectra from Pysynphot database or NED database.
If the object redshift is >0.2, the LAMBDA_MIN and LAMBDA_MAX parameters
are redshifted accordingly.
Examples
--------
>>> s = Star('3C273')
>>> print(s.spectra[0][:4])
[0.0000000e+00 2.5048577e-14 2.4238061e-14 2.4088789e-14]
>>> s = Star('HD111980')
>>> print(s.spectra[0][:4])
[2.16890002e-13 2.66480010e-13 2.03540011e-13 2.38780004e-13]
>>> s = Star('PKS1510-089')
>>> print(s.redshift)
0.36
>>> print(f'{parameters.LAMBDA_MIN:.1f}, {parameters.LAMBDA_MAX:.1f}')
408.0, 1496.0
>>> print(s.spectra[0][:4])
[117.34012 139.27621 87.38032 143.0816 ]
"""
self.wavelengths = [] # in nm
self.spectra = []
# first try with pysynphot
file_names = []
is_calspec = False
if os.getenv("PYSYN_CDBS") is not None:
dirname = os.path.expandvars('$PYSYN_CDBS/calspec/')
for fname in os.listdir(dirname):
if os.path.isfile(dirname + fname):
if self.label.lower() in fname.lower():
file_names.append(dirname + fname)
if len(file_names) > 0:
is_calspec = True
self.emission_spectrum = False
self.hydrogen_only = False
self.lines = Lines(HYDROGEN_LINES + ATMOSPHERIC_LINES + STELLAR_LINES,
redshift=self.redshift, emission_spectrum=self.emission_spectrum,
hydrogen_only=self.hydrogen_only)
for k, f in enumerate(file_names):
if '_mod_' in f:
continue
if self.verbose:
self.my_logger.info('\n\tLoading %s' % f)
data = S.FileSpectrum(f, keepneg=True)
if isinstance(data.waveunits, S.units.Angstrom):
self.wavelengths.append(data.wave / 10.)
self.spectra.append(data.flux * 10.)
else:
self.wavelengths.append(data.wave)
self.spectra.append(data.flux)
elif 'HD' in self.label: # it is a star
self.emission_spectrum = False
self.hydrogen_only = False
self.lines = Lines(ATMOSPHERIC_LINES + HYDROGEN_LINES + STELLAR_LINES,
redshift=self.redshift, emission_spectrum=self.emission_spectrum,
hydrogen_only=self.hydrogen_only)
else:
if 'PNG' not in self.label:
# Try with NED query
# print 'Loading target %s from NED...' % self.label
ned = Ned.query_object(self.label)
hdulists = Ned.get_spectra(self.label, show_progress=False)
self.redshift = ned['Redshift'][0]
self.emission_spectrum = True
self.hydrogen_only = False
if self.redshift > 0.2:
self.hydrogen_only = True
parameters.LAMBDA_MIN *= 1 + self.redshift
parameters.LAMBDA_MAX *= 1 + self.redshift
self.lines = Lines(ATMOSPHERIC_LINES+ISM_LINES+HYDROGEN_LINES,
redshift=self.redshift, emission_spectrum=self.emission_spectrum,
hydrogen_only=self.hydrogen_only)
for k, h in enumerate(hdulists):
if h[0].header['NAXIS'] == 1:
self.spectra.append(h[0].data)
else:
for d in h[0].data:
self.spectra.append(d)
wave_n = len(h[0].data)
if h[0].header['NAXIS'] == 2:
wave_n = len(h[0].data.T)
wave_step = h[0].header['CDELT1']
wave_start = h[0].header['CRVAL1'] - (h[0].header['CRPIX1'] - 1) * wave_step
wave_end = wave_start + wave_n * wave_step
waves = np.linspace(wave_start, wave_end, wave_n)
is_angstrom = False
for key in list(h[0].header.keys()):
if 'angstrom' in str(h[0].header[key]).lower():
is_angstrom = True
if is_angstrom:
waves *= 0.1
if h[0].header['NAXIS'] > 1:
for i in range(h[0].header['NAXIS'] + 1):
self.wavelengths.append(waves)
else:
self.wavelengths.append(waves)
else:
self.emission_spectrum = True
self.lines = Lines(ATMOSPHERIC_LINES+ISM_LINES+HYDROGEN_LINES,
redshift=self.redshift, emission_spectrum=self.emission_spectrum,
hydrogen_only=self.hydrogen_only)
self.build_sed()
self.my_logger.debug(f"\n\tTarget label: {self.label}"
f"\n\tCalspec? {is_calspec}"
f"\n\tNumber of spectra: {len(self.spectra)}"
f"\n\tRedshift: {self.redshift}"
f"\n\tEmission spectrum ? {self.emission_spectrum}"
f"\n\tLines: {[l.label for l in self.lines.lines]}")
def get_radec_position_after_pm(self, date_obs):
target_pmra = self.simbad[0]['PMRA'] * u.mas / u.yr
if np.isnan(target_pmra):
target_pmra = 0 * u.mas / u.yr
target_pmdec = self.simbad[0]['PMDEC'] * u.mas / u.yr
if np.isnan(target_pmdec):
target_pmdec = 0 * u.mas / u.yr
target_parallax = self.simbad[0]['PLX_VALUE'] * u.mas
if target_parallax == 0 * u.mas:
target_parallax = 1e-4 * u.mas
target_coord = SkyCoord(ra=self.radec_position.ra, dec=self.radec_position.dec,
distance=Distance(parallax=target_parallax),
pm_ra_cosdec=target_pmra, pm_dec=target_pmdec, frame='icrs', equinox="J2000",
obstime="J2000")
self.radec_position_after_pm = target_coord.apply_space_motion(new_obstime=Time(date_obs))
return self.radec_position_after_pm
def build_sed(self, index=0):
"""Interpolate the database reference spectra and return self.sed as a function of the wavelength.
Parameters
----------
index: int
Index of the spectrum stored in the self.spectra list
Examples
--------
>>> s = Star('HD111980')
>>> s.build_sed(index=0)
>>> s.sed(550)
array(1.67605113e-11)
"""
if len(self.spectra) == 0:
self.sed = lambda x: np.zeros_like(x)
else:
self.sed = interp1d(self.wavelengths[index], self.spectra[index], kind='linear', bounds_error=False,
fill_value=0.)
def plot_spectra(self):
""" Plot the spectra stored in the self.spectra list.
Examples
--------
>>> s = Star('HD111980')
>>> s.plot_spectra()
"""
# target.load_spectra() ## No global target object available here (SDC)
plt.figure() # necessary to create a new plot (SDC)
for isp, sp in enumerate(self.spectra):
plt.plot(self.wavelengths[isp], sp, label='Spectrum %d' % isp)
plt.xlim((300, 1100))
plt.xlabel(r'$\lambda$ [nm]')
plt.ylabel('Flux')
plt.title(self.label)
plt.legend()
if parameters.DISPLAY: # pragma: no cover
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"matplotlib.pyplot.ylabel",
"scipy.interpolate.interp1d",
"astroquery.ned.Ned.query_object",
"numpy.ma.is_masked",
"pysynphot.FileSpectrum",
"os.listdir",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"doctest.testmod",
"astropy.coordinates.Distance",
"os.path.isfile"... | [((534, 557), 'os.getenv', 'os.getenv', (['"""PYSYN_CDBS"""'], {}), "('PYSYN_CDBS')\n", (543, 557), False, 'import os\n'), ((586, 691), 'astroquery.simbad.Simbad.add_votable_fields', 'Simbad.add_votable_fields', (['"""flux(U)"""', '"""flux(B)"""', '"""flux(V)"""', '"""flux(R)"""', '"""flux(I)"""', '"""flux(J)"""', '"""sptype"""'], {}), "('flux(U)', 'flux(B)', 'flux(V)', 'flux(R)',\n 'flux(I)', 'flux(J)', 'sptype')\n", (611, 691), False, 'from astroquery.simbad import Simbad\n'), ((14434, 14451), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (14449, 14451), False, 'import doctest\n'), ((2404, 2439), 'spectractor.config.set_logger', 'set_logger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (2414, 2439), False, 'from spectractor.config import set_logger\n'), ((3571, 3606), 'spectractor.config.set_logger', 'set_logger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (3581, 3606), False, 'from spectractor.config import set_logger\n'), ((3666, 3722), 'spectractor.extractor.spectroscopy.Lines', 'Lines', (['HGAR_LINES'], {'emission_spectrum': '(True)', 'orders': '[1, 2]'}), '(HGAR_LINES, emission_spectrum=True, orders=[1, 2])\n', (3671, 3722), False, 'from spectractor.extractor.spectroscopy import Lines, HGAR_LINES, HYDROGEN_LINES, ATMOSPHERIC_LINES, ISM_LINES, STELLAR_LINES\n'), ((4369, 4404), 'spectractor.config.set_logger', 'set_logger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (4379, 4404), False, 'from spectractor.config import set_logger\n'), ((4464, 4512), 'spectractor.extractor.spectroscopy.Lines', 'Lines', (['[]'], {'emission_spectrum': '(True)', 'orders': '[1, 2]'}), '([], emission_spectrum=True, orders=[1, 2])\n', (4469, 4512), False, 'from spectractor.extractor.spectroscopy import Lines, HGAR_LINES, HYDROGEN_LINES, ATMOSPHERIC_LINES, ISM_LINES, STELLAR_LINES\n'), ((5425, 5460), 'spectractor.config.set_logger', 'set_logger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (5435, 5460), False, 'from spectractor.config import set_logger\n'), ((5723, 5857), 'astroquery.simbad.Simbad.add_votable_fields', 'Simbad.add_votable_fields', (['"""flux(U)"""', '"""flux(B)"""', '"""flux(V)"""', '"""flux(R)"""', '"""flux(I)"""', '"""flux(J)"""', '"""sptype"""', '"""parallax"""', '"""pm"""', '"""z_value"""'], {}), "('flux(U)', 'flux(B)', 'flux(V)', 'flux(R)',\n 'flux(I)', 'flux(J)', 'sptype', 'parallax', 'pm', 'z_value')\n", (5748, 5857), False, 'from astroquery.simbad import Simbad\n'), ((5905, 5936), 'astroquery.simbad.Simbad.query_object', 'Simbad.query_object', (['self.label'], {}), '(self.label)\n', (5924, 5936), False, 'from astroquery.simbad import Simbad\n'), ((12184, 12205), 'numpy.isnan', 'np.isnan', (['target_pmra'], {}), '(target_pmra)\n', (12192, 12205), True, 'import numpy as np\n'), ((12323, 12345), 'numpy.isnan', 'np.isnan', (['target_pmdec'], {}), '(target_pmdec)\n', (12331, 12345), True, 'import numpy as np\n'), ((13985, 13997), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13995, 13997), True, 'import matplotlib.pyplot as plt\n'), ((14169, 14190), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(300, 1100)'], {}), '((300, 1100))\n', (14177, 14190), True, 'import matplotlib.pyplot as plt\n'), ((14199, 14228), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\lambda$ [nm]"""'], {}), "('$\\\\lambda$ [nm]')\n", (14209, 14228), True, 'import matplotlib.pyplot as plt\n'), ((14237, 14255), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (14247, 14255), True, 'import matplotlib.pyplot as plt\n'), ((14264, 14285), 'matplotlib.pyplot.title', 'plt.title', (['self.label'], {}), '(self.label)\n', (14273, 14285), True, 'import matplotlib.pyplot as plt\n'), ((14294, 14306), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14304, 14306), True, 'import matplotlib.pyplot as plt\n'), ((6130, 6207), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["(simbad['RA'][0] + ' ' + simbad['DEC'][0])"], {'unit': '(u.hourangle, u.deg)'}), "(simbad['RA'][0] + ' ' + simbad['DEC'][0], unit=(u.hourangle, u.deg))\n", (6138, 6207), False, 'from astropy.coordinates import SkyCoord, Distance\n'), ((6382, 6416), 'numpy.ma.is_masked', 'np.ma.is_masked', (["simbad['Z_VALUE']"], {}), "(simbad['Z_VALUE'])\n", (6397, 6416), True, 'import numpy as np\n'), ((7511, 7534), 'os.getenv', 'os.getenv', (['"""PYSYN_CDBS"""'], {}), "('PYSYN_CDBS')\n", (7520, 7534), False, 'import os\n'), ((7570, 7612), 'os.path.expandvars', 'os.path.expandvars', (['"""$PYSYN_CDBS/calspec/"""'], {}), "('$PYSYN_CDBS/calspec/')\n", (7588, 7612), False, 'import os\n'), ((7638, 7657), 'os.listdir', 'os.listdir', (['dirname'], {}), '(dirname)\n', (7648, 7657), False, 'import os\n'), ((7999, 8166), 'spectractor.extractor.spectroscopy.Lines', 'Lines', (['(HYDROGEN_LINES + ATMOSPHERIC_LINES + STELLAR_LINES)'], {'redshift': 'self.redshift', 'emission_spectrum': 'self.emission_spectrum', 'hydrogen_only': 'self.hydrogen_only'}), '(HYDROGEN_LINES + ATMOSPHERIC_LINES + STELLAR_LINES, redshift=self.\n redshift, emission_spectrum=self.emission_spectrum, hydrogen_only=self.\n hydrogen_only)\n', (8004, 8166), False, 'from spectractor.extractor.spectroscopy import Lines, HGAR_LINES, HYDROGEN_LINES, ATMOSPHERIC_LINES, ISM_LINES, STELLAR_LINES\n'), ((13558, 13667), 'scipy.interpolate.interp1d', 'interp1d', (['self.wavelengths[index]', 'self.spectra[index]'], {'kind': '"""linear"""', 'bounds_error': '(False)', 'fill_value': '(0.0)'}), "(self.wavelengths[index], self.spectra[index], kind='linear',\n bounds_error=False, fill_value=0.0)\n", (13566, 13667), False, 'from scipy.interpolate import interp1d\n'), ((14098, 14160), 'matplotlib.pyplot.plot', 'plt.plot', (['self.wavelengths[isp]', 'sp'], {'label': "('Spectrum %d' % isp)"}), "(self.wavelengths[isp], sp, label='Spectrum %d' % isp)\n", (14106, 14160), True, 'import matplotlib.pyplot as plt\n'), ((14370, 14380), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14378, 14380), True, 'import matplotlib.pyplot as plt\n'), ((7678, 7709), 'os.path.isfile', 'os.path.isfile', (['(dirname + fname)'], {}), '(dirname + fname)\n', (7692, 7709), False, 'import os\n'), ((8446, 8477), 'pysynphot.FileSpectrum', 'S.FileSpectrum', (['f'], {'keepneg': '(True)'}), '(f, keepneg=True)\n', (8460, 8477), True, 'import pysynphot as S\n'), ((8945, 9112), 'spectractor.extractor.spectroscopy.Lines', 'Lines', (['(ATMOSPHERIC_LINES + HYDROGEN_LINES + STELLAR_LINES)'], {'redshift': 'self.redshift', 'emission_spectrum': 'self.emission_spectrum', 'hydrogen_only': 'self.hydrogen_only'}), '(ATMOSPHERIC_LINES + HYDROGEN_LINES + STELLAR_LINES, redshift=self.\n redshift, emission_spectrum=self.emission_spectrum, hydrogen_only=self.\n hydrogen_only)\n', (8950, 9112), False, 'from spectractor.extractor.spectroscopy import Lines, HGAR_LINES, HYDROGEN_LINES, ATMOSPHERIC_LINES, ISM_LINES, STELLAR_LINES\n'), ((12666, 12700), 'astropy.coordinates.Distance', 'Distance', ([], {'parallax': 'target_parallax'}), '(parallax=target_parallax)\n', (12674, 12700), False, 'from astropy.coordinates import SkyCoord, Distance\n'), ((12944, 12958), 'astropy.time.Time', 'Time', (['date_obs'], {}), '(date_obs)\n', (12948, 12958), False, 'from astropy.time import Time\n'), ((13504, 13520), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (13517, 13520), True, 'import numpy as np\n'), ((9347, 9375), 'astroquery.ned.Ned.query_object', 'Ned.query_object', (['self.label'], {}), '(self.label)\n', (9363, 9375), False, 'from astroquery.ned import Ned\n'), ((9403, 9451), 'astroquery.ned.Ned.get_spectra', 'Ned.get_spectra', (['self.label'], {'show_progress': '(False)'}), '(self.label, show_progress=False)\n', (9418, 9451), False, 'from astroquery.ned import Ned\n'), ((9833, 9996), 'spectractor.extractor.spectroscopy.Lines', 'Lines', (['(ATMOSPHERIC_LINES + ISM_LINES + HYDROGEN_LINES)'], {'redshift': 'self.redshift', 'emission_spectrum': 'self.emission_spectrum', 'hydrogen_only': 'self.hydrogen_only'}), '(ATMOSPHERIC_LINES + ISM_LINES + HYDROGEN_LINES, redshift=self.\n redshift, emission_spectrum=self.emission_spectrum, hydrogen_only=self.\n hydrogen_only)\n', (9838, 9996), False, 'from spectractor.extractor.spectroscopy import Lines, HGAR_LINES, HYDROGEN_LINES, ATMOSPHERIC_LINES, ISM_LINES, STELLAR_LINES\n'), ((11393, 11556), 'spectractor.extractor.spectroscopy.Lines', 'Lines', (['(ATMOSPHERIC_LINES + ISM_LINES + HYDROGEN_LINES)'], {'redshift': 'self.redshift', 'emission_spectrum': 'self.emission_spectrum', 'hydrogen_only': 'self.hydrogen_only'}), '(ATMOSPHERIC_LINES + ISM_LINES + HYDROGEN_LINES, redshift=self.\n redshift, emission_spectrum=self.emission_spectrum, hydrogen_only=self.\n hydrogen_only)\n', (11398, 11556), False, 'from spectractor.extractor.spectroscopy import Lines, HGAR_LINES, HYDROGEN_LINES, ATMOSPHERIC_LINES, ISM_LINES, STELLAR_LINES\n'), ((10714, 10755), 'numpy.linspace', 'np.linspace', (['wave_start', 'wave_end', 'wave_n'], {}), '(wave_start, wave_end, wave_n)\n', (10725, 10755), True, 'import numpy as np\n')] |
import numpy as np
class her_sampler:
def __init__(self, replay_strategy, replay_k, reward_func=None):
#startegy of her (final, future,episode),(random)
self.replay_strategy = replay_strategy
#replay_k the number of new trajectories added to the replay buffer
self.replay_k = replay_k
if self.replay_strategy == 'future':
# the percentage of replay buffer taken by HER
self.future_p = 1 - (1. / (1 + replay_k))
print('------------------------------------')
print('the future_p = ', self.future_p)
else:
self.future_p = 0
self.reward_func = reward_func
def sample_her_transitions(self, episode_batch, batch_size_in_transitions):
#epiosde_batch = temporary buffer --> buffer[:self.current_size]
#batch_size _in_transition = number of episodes in batch --> each batch (first_dim = batch_size, second_dim = max_timesteps, third_dim = len(key))
# T maximum timesteps in the env
T = episode_batch['actions'].shape[1]
print('--------------------------------------')
print('T is ', T)
#rollout batch size = self.current_size
rollout_batch_size = episode_batch['actions'].shape[0]
print('-----------------------------------------')
print('rollout_batch_size_ ', rollout_batch_size)
#print('self.current_size ', self.current_size)
#batch_size = number_of_episodes in the batch
batch_size = batch_size_in_transitions
print('----------------------------------')
print('bath_size ', batch_size)
# select which rollouts and which timesteps to be used
#np.random.randint(low = 0, high = rollout_batch_size'episode_batch['action'].shape[0]', number = batchsize)
#retruns array of length batch_size and low = 0 , max = rollout_batch_size = self.current_size
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
print('--------------------------------------------------')
print('epiosde_idxs --> (batch_size:num_episodes),(low = ),(rollout_batch_size)', episode_idxs)
#np.random.randint(low = T, high = None, size = batch_size)
#since high = None the values returned in range (0, low)
#t_samples --> array its values ( from zero --> T ) and size = batch_size
#if batch_size = 5
#t_samples= [ 12, 49, 23,30,45] any random shape
t_samples = np.random.randint(T, size=batch_size)
print('---------------------------')
print('len(t_sample', len(t_samples))
print('t_samples, ', t_samples)
#transitions --> episode_batch[take those episode][take those corrosponding timesteps]
transitions = {key: episode_batch[key][episode_idxs, t_samples].copy() for key in episode_batch.keys()}
print('------------------------------------')
print('transitions ', transitions)
# her idx
her_indexes = np.where(np.random.uniform(size=batch_size) < self.future_p)
future_offset = np.random.uniform(size=batch_size) * (T - t_samples)
future_offset = future_offset.astype(int)
future_t = (t_samples + 1 + future_offset)[her_indexes]
# replace go with achieved goal
future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t]
transitions['g'][her_indexes] = future_ag
# to get the params to re-compute reward
transitions['r'] = np.expand_dims(self.reward_func(transitions['ag_next'], transitions['g'], None), 1)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()}
return transitions
| [
"numpy.random.randint",
"numpy.random.uniform"
] | [((1948, 2000), 'numpy.random.randint', 'np.random.randint', (['(0)', 'rollout_batch_size', 'batch_size'], {}), '(0, rollout_batch_size, batch_size)\n', (1965, 2000), True, 'import numpy as np\n'), ((2493, 2530), 'numpy.random.randint', 'np.random.randint', (['T'], {'size': 'batch_size'}), '(T, size=batch_size)\n', (2510, 2530), True, 'import numpy as np\n'), ((3092, 3126), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'batch_size'}), '(size=batch_size)\n', (3109, 3126), True, 'import numpy as np\n'), ((3016, 3050), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'batch_size'}), '(size=batch_size)\n', (3033, 3050), True, 'import numpy as np\n')] |
# code-checked
# server-checked
import pickle
import numpy as np
import cv2
import os
from collections import namedtuple
import random
# (NOTE! this is taken from the official Cityscapes scripts:)
Label = namedtuple( 'Label' , [
'name' , # The identifier of this label, e.g. 'car', 'person', ... .
# We use them to uniquely name a class
'id' , # An integer ID that is associated with this label.
# The IDs are used to represent the label in ground truth images
# An ID of -1 means that this label does not have an ID and thus
# is ignored when creating ground truth images (e.g. license plate).
# Do not modify these IDs, since exactly these IDs are expected by the
# evaluation server.
'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
# ground truth images with train IDs, using the tools provided in the
# 'preparation' folder. However, make sure to validate or submit results
# to our evaluation server using the regular IDs above!
# For trainIds, multiple labels might have the same ID. Then, these labels
# are mapped to the same class in the ground truth images. For the inverse
# mapping, we use the label that is defined first in the list below.
# For example, mapping all void-type classes to the same ID in training,
# might make sense for some approaches.
# Max value is 255!
'category' , # The name of the category that this label belongs to
'categoryId' , # The ID of this category. Used to create ground truth images
# on category level.
'hasInstances', # Whether this label distinguishes between single instances or not
'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
# during evaluations or not
'color' , # The color of this label
] )
# (NOTE! this is taken from the official Cityscapes scripts:)
labels = [
# name id trainId category catId hasInstances ignoreInEval color
Label( 'unlabeled' , 0 , 19 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'ego vehicle' , 1 , 19 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'rectification border' , 2 , 19 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'out of roi' , 3 , 19 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'static' , 4 , 19 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'dynamic' , 5 , 19 , 'void' , 0 , False , True , (111, 74, 0) ),
Label( 'ground' , 6 , 19 , 'void' , 0 , False , True , ( 81, 0, 81) ),
Label( 'road' , 7 , 0 , 'flat' , 1 , False , False , (128, 64,128) ),
Label( 'sidewalk' , 8 , 1 , 'flat' , 1 , False , False , (244, 35,232) ),
Label( 'parking' , 9 , 19 , 'flat' , 1 , False , True , (250,170,160) ),
Label( 'rail track' , 10 , 19 , 'flat' , 1 , False , True , (230,150,140) ),
Label( 'building' , 11 , 2 , 'construction' , 2 , False , False , ( 70, 70, 70) ),
Label( 'wall' , 12 , 3 , 'construction' , 2 , False , False , (102,102,156) ),
Label( 'fence' , 13 , 4 , 'construction' , 2 , False , False , (190,153,153) ),
Label( 'guard rail' , 14 , 19 , 'construction' , 2 , False , True , (180,165,180) ),
Label( 'bridge' , 15 , 19 , 'construction' , 2 , False , True , (150,100,100) ),
Label( 'tunnel' , 16 , 19 , 'construction' , 2 , False , True , (150,120, 90) ),
Label( 'pole' , 17 , 5 , 'object' , 3 , False , False , (153,153,153) ),
Label( 'polegroup' , 18 , 19 , 'object' , 3 , False , True , (153,153,153) ),
Label( 'traffic light' , 19 , 6 , 'object' , 3 , False , False , (250,170, 30) ),
Label( 'traffic sign' , 20 , 7 , 'object' , 3 , False , False , (220,220, 0) ),
Label( 'vegetation' , 21 , 8 , 'nature' , 4 , False , False , (107,142, 35) ),
Label( 'terrain' , 22 , 9 , 'nature' , 4 , False , False , (152,251,152) ),
Label( 'sky' , 23 , 10 , 'sky' , 5 , False , False , ( 70,130,180) ),
Label( 'person' , 24 , 11 , 'human' , 6 , True , False , (220, 20, 60) ),
Label( 'rider' , 25 , 12 , 'human' , 6 , True , False , (255, 0, 0) ),
Label( 'car' , 26 , 13 , 'vehicle' , 7 , True , False , ( 0, 0,142) ),
Label( 'truck' , 27 , 14 , 'vehicle' , 7 , True , False , ( 0, 0, 70) ),
Label( 'bus' , 28 , 15 , 'vehicle' , 7 , True , False , ( 0, 60,100) ),
Label( 'caravan' , 29 , 19 , 'vehicle' , 7 , True , True , ( 0, 0, 90) ),
Label( 'trailer' , 30 , 19 , 'vehicle' , 7 , True , True , ( 0, 0,110) ),
Label( 'train' , 31 , 16 , 'vehicle' , 7 , True , False , ( 0, 80,100) ),
Label( 'motorcycle' , 32 , 17 , 'vehicle' , 7 , True , False , ( 0, 0,230) ),
Label( 'bicycle' , 33 , 18 , 'vehicle' , 7 , True , False , (119, 11, 32) ),
Label( 'license plate' , -1 , 19 , 'vehicle' , 7 , False , True , ( 0, 0,142) ),
]
# create a function which maps id to trainId:
id_to_trainId = {label.id: label.trainId for label in labels}
id_to_trainId_map_func = np.vectorize(id_to_trainId.get)
synscapes_path = "/home/data/synscapes"
synscapes_meta_path = "/home/data/synscapes_meta"
if not os.path.exists(synscapes_meta_path):
os.makedirs(synscapes_meta_path)
if not os.path.exists(synscapes_meta_path + "/gtFine"):
os.makedirs(synscapes_meta_path + "/gtFine")
if not os.path.exists(synscapes_meta_path + "/label_imgs"):
os.makedirs(synscapes_meta_path + "/label_imgs")
img_h = 720
img_w = 1440
new_img_h = 1024
new_img_w = 2048
################################################################################
# randomly select a subset of 2975 images as train and 500 images as val:
################################################################################
img_ids_float = np.linspace(1, 25000, 25000)
img_ids = []
for img_id_float in img_ids_float:
img_id_str = str(int(img_id_float))
img_ids.append(img_id_str)
random.shuffle(img_ids)
random.shuffle(img_ids)
random.shuffle(img_ids)
random.shuffle(img_ids)
train_img_ids = img_ids[0:2975]
print ("num train images: %d" % len(train_img_ids))
with open(synscapes_meta_path + "/train_img_ids.pkl", "wb") as file:
pickle.dump(train_img_ids, file)
val_img_ids = img_ids[2975:(2975+500)]
print ("num val images: %d" % len(val_img_ids))
with open(synscapes_meta_path + "/val_img_ids.pkl", "wb") as file:
pickle.dump(val_img_ids, file)
################################################################################
# enlarge all train labels and save to disk:
################################################################################
label_dir = synscapes_path + "/img/class/"
for (step, img_id) in enumerate(train_img_ids):
if (step % 100) == 0:
print ("enlarging train labels, step: %d/%d" % (step+1, len(train_img_ids)))
gtFine_img_path = label_dir + img_id + ".png"
gtFine_img = cv2.imread(gtFine_img_path, -1) # (shape: (720, 1440))
# resize gtFine_img without interpolation:
gtFine_img = cv2.resize(gtFine_img, (new_img_w, new_img_h), interpolation=cv2.INTER_NEAREST) # (shape: (1024, 2048))
cv2.imwrite(synscapes_meta_path + "/gtFine/" + img_id + ".png", gtFine_img)
# convert gtFine_img from id to trainId pixel values:
label_img = id_to_trainId_map_func(gtFine_img) # (shape: (1024, 2048))
label_img = label_img.astype(np.uint8)
cv2.imwrite(synscapes_meta_path + "/label_imgs/" + img_id + ".png", label_img)
################################################################################
# enlarge all val labels and save to disk:
################################################################################
label_dir = synscapes_path + "/img/class/"
for (step, img_id) in enumerate(val_img_ids):
if (step % 100) == 0:
print ("enlarging val labels, step: %d/%d" % (step+1, len(val_img_ids)))
gtFine_img_path = label_dir + img_id + ".png"
gtFine_img = cv2.imread(gtFine_img_path, -1) # (shape: (720, 1440))
# resize gtFine_img without interpolation:
gtFine_img = cv2.resize(gtFine_img, (new_img_w, new_img_h), interpolation=cv2.INTER_NEAREST) # (shape: (1024, 2048))
cv2.imwrite(synscapes_meta_path + "/gtFine/" + img_id + ".png", gtFine_img)
# convert gtFine_img from id to trainId pixel values:
label_img = id_to_trainId_map_func(gtFine_img) # (shape: (1024, 2048))
label_img = label_img.astype(np.uint8)
cv2.imwrite(synscapes_meta_path + "/label_imgs/" + img_id + ".png", label_img)
################################################################################
# compute the class weigths:
################################################################################
num_classes = 19
trainId_to_count = {}
for trainId in range(num_classes):
trainId_to_count[trainId] = 0
# get the total number of pixels in all train label_imgs that are of each object class:
for step, img_id in enumerate(train_img_ids):
if (step % 100) == 0:
print ("computing class weights, step: %d/%d" % (step+1, len(train_img_ids)))
label_img_path = synscapes_meta_path + "/label_imgs/" + img_id + ".png"
label_img = cv2.imread(label_img_path, -1)
for trainId in range(num_classes):
# count how many pixels in label_img which are of object class trainId:
trainId_mask = np.equal(label_img, trainId)
trainId_count = np.sum(trainId_mask)
# add to the total count:
trainId_to_count[trainId] += trainId_count
# compute the class weights according to the ENet paper:
class_weights = []
total_count = sum(trainId_to_count.values())
for trainId, count in trainId_to_count.items():
trainId_prob = float(count)/float(total_count)
trainId_weight = 1/np.log(1.02 + trainId_prob)
class_weights.append(trainId_weight)
print (class_weights)
with open(synscapes_meta_path + "/class_weights.pkl", "wb") as file:
pickle.dump(class_weights, file, protocol=2) # (protocol=2 is needed to be able to open this file with python2)
| [
"os.path.exists",
"cv2.imwrite",
"collections.namedtuple",
"pickle.dump",
"random.shuffle",
"os.makedirs",
"numpy.log",
"numpy.equal",
"numpy.sum",
"numpy.linspace",
"cv2.resize",
"numpy.vectorize",
"cv2.imread"
] | [((207, 324), 'collections.namedtuple', 'namedtuple', (['"""Label"""', "['name', 'id', 'trainId', 'category', 'categoryId', 'hasInstances',\n 'ignoreInEval', 'color']"], {}), "('Label', ['name', 'id', 'trainId', 'category', 'categoryId',\n 'hasInstances', 'ignoreInEval', 'color'])\n", (217, 324), False, 'from collections import namedtuple\n'), ((7004, 7035), 'numpy.vectorize', 'np.vectorize', (['id_to_trainId.get'], {}), '(id_to_trainId.get)\n', (7016, 7035), True, 'import numpy as np\n'), ((7741, 7769), 'numpy.linspace', 'np.linspace', (['(1)', '(25000)', '(25000)'], {}), '(1, 25000, 25000)\n', (7752, 7769), True, 'import numpy as np\n'), ((7890, 7913), 'random.shuffle', 'random.shuffle', (['img_ids'], {}), '(img_ids)\n', (7904, 7913), False, 'import random\n'), ((7914, 7937), 'random.shuffle', 'random.shuffle', (['img_ids'], {}), '(img_ids)\n', (7928, 7937), False, 'import random\n'), ((7938, 7961), 'random.shuffle', 'random.shuffle', (['img_ids'], {}), '(img_ids)\n', (7952, 7961), False, 'import random\n'), ((7962, 7985), 'random.shuffle', 'random.shuffle', (['img_ids'], {}), '(img_ids)\n', (7976, 7985), False, 'import random\n'), ((7135, 7170), 'os.path.exists', 'os.path.exists', (['synscapes_meta_path'], {}), '(synscapes_meta_path)\n', (7149, 7170), False, 'import os\n'), ((7176, 7208), 'os.makedirs', 'os.makedirs', (['synscapes_meta_path'], {}), '(synscapes_meta_path)\n', (7187, 7208), False, 'import os\n'), ((7216, 7263), 'os.path.exists', 'os.path.exists', (["(synscapes_meta_path + '/gtFine')"], {}), "(synscapes_meta_path + '/gtFine')\n", (7230, 7263), False, 'import os\n'), ((7269, 7313), 'os.makedirs', 'os.makedirs', (["(synscapes_meta_path + '/gtFine')"], {}), "(synscapes_meta_path + '/gtFine')\n", (7280, 7313), False, 'import os\n'), ((7321, 7372), 'os.path.exists', 'os.path.exists', (["(synscapes_meta_path + '/label_imgs')"], {}), "(synscapes_meta_path + '/label_imgs')\n", (7335, 7372), False, 'import os\n'), ((7378, 7426), 'os.makedirs', 'os.makedirs', (["(synscapes_meta_path + '/label_imgs')"], {}), "(synscapes_meta_path + '/label_imgs')\n", (7389, 7426), False, 'import os\n'), ((8144, 8176), 'pickle.dump', 'pickle.dump', (['train_img_ids', 'file'], {}), '(train_img_ids, file)\n', (8155, 8176), False, 'import pickle\n'), ((8336, 8366), 'pickle.dump', 'pickle.dump', (['val_img_ids', 'file'], {}), '(val_img_ids, file)\n', (8347, 8366), False, 'import pickle\n'), ((8845, 8876), 'cv2.imread', 'cv2.imread', (['gtFine_img_path', '(-1)'], {}), '(gtFine_img_path, -1)\n', (8855, 8876), False, 'import cv2\n'), ((8965, 9044), 'cv2.resize', 'cv2.resize', (['gtFine_img', '(new_img_w, new_img_h)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(gtFine_img, (new_img_w, new_img_h), interpolation=cv2.INTER_NEAREST)\n', (8975, 9044), False, 'import cv2\n'), ((9074, 9149), 'cv2.imwrite', 'cv2.imwrite', (["(synscapes_meta_path + '/gtFine/' + img_id + '.png')", 'gtFine_img'], {}), "(synscapes_meta_path + '/gtFine/' + img_id + '.png', gtFine_img)\n", (9085, 9149), False, 'import cv2\n'), ((9332, 9410), 'cv2.imwrite', 'cv2.imwrite', (["(synscapes_meta_path + '/label_imgs/' + img_id + '.png')", 'label_img'], {}), "(synscapes_meta_path + '/label_imgs/' + img_id + '.png', label_img)\n", (9343, 9410), False, 'import cv2\n'), ((9881, 9912), 'cv2.imread', 'cv2.imread', (['gtFine_img_path', '(-1)'], {}), '(gtFine_img_path, -1)\n', (9891, 9912), False, 'import cv2\n'), ((10001, 10080), 'cv2.resize', 'cv2.resize', (['gtFine_img', '(new_img_w, new_img_h)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(gtFine_img, (new_img_w, new_img_h), interpolation=cv2.INTER_NEAREST)\n', (10011, 10080), False, 'import cv2\n'), ((10110, 10185), 'cv2.imwrite', 'cv2.imwrite', (["(synscapes_meta_path + '/gtFine/' + img_id + '.png')", 'gtFine_img'], {}), "(synscapes_meta_path + '/gtFine/' + img_id + '.png', gtFine_img)\n", (10121, 10185), False, 'import cv2\n'), ((10368, 10446), 'cv2.imwrite', 'cv2.imwrite', (["(synscapes_meta_path + '/label_imgs/' + img_id + '.png')", 'label_img'], {}), "(synscapes_meta_path + '/label_imgs/' + img_id + '.png', label_img)\n", (10379, 10446), False, 'import cv2\n'), ((11088, 11118), 'cv2.imread', 'cv2.imread', (['label_img_path', '(-1)'], {}), '(label_img_path, -1)\n', (11098, 11118), False, 'import cv2\n'), ((11832, 11876), 'pickle.dump', 'pickle.dump', (['class_weights', 'file'], {'protocol': '(2)'}), '(class_weights, file, protocol=2)\n', (11843, 11876), False, 'import pickle\n'), ((11262, 11290), 'numpy.equal', 'np.equal', (['label_img', 'trainId'], {}), '(label_img, trainId)\n', (11270, 11290), True, 'import numpy as np\n'), ((11315, 11335), 'numpy.sum', 'np.sum', (['trainId_mask'], {}), '(trainId_mask)\n', (11321, 11335), True, 'import numpy as np\n'), ((11666, 11693), 'numpy.log', 'np.log', (['(1.02 + trainId_prob)'], {}), '(1.02 + trainId_prob)\n', (11672, 11693), True, 'import numpy as np\n')] |
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
# Check if the webcam is opened correctly
if not cap.isOpened():
raise IOError("Cannot open webcam")
# CLAHE (Contrast Limited Adaptive Histogram Equalization)
# https://en.wikipedia.org/wiki/Adaptive_histogram_equalization#Contrast_Limited_AHE
clahe = cv2.createCLAHE(clipLimit=3., tileGridSize=(8,8))
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)
cv2.imshow('Input', frame)
#Sharpen image by increasing contrast
lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB) # convert from BGR to LAB color space
l, a, b = cv2.split(lab) # split on 3 different channels
l2 = clahe.apply(l) # apply CLAHE to the L-channel
lab = cv2.merge((l2,a,b)) # merge channels
img2 = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) # Convert back to bgr
img2 =cv2.fastNlMeansDenoisingColored(img2,None,10,10,7,21) # Image Denoising (https://docs.opencv.org/trunk/d5/d69/tutorial_py_non_local_means.html)
# Convert BGR to HSV
hsv = cv2.cvtColor(img2, cv2.COLOR_BGR2HSV)
# lower mask (0-10) of RED
lower_red = np.array([0, 50, 50])
upper_red = np.array([10, 255, 255])
mask0 = cv2.inRange(hsv, lower_red, upper_red)
# upper mask (170-180) of RED
lower_red = np.array([170, 50, 50])
upper_red = np.array([180, 255, 255])
mask1 = cv2.inRange(hsv, lower_red, upper_red)
# join the masks
mask = mask0 + mask1
# Threshold the HSV image to get only red colors
mask = cv2.inRange(hsv, lower_red, upper_red)
# apply the mask
output = cv2.bitwise_and(frame, frame, mask = mask)
median = cv2.medianBlur(output,15)
# show the images
cv2.imshow("images", np.hstack([frame, img2, output, median]))
c = cv2.waitKey(1)
if c == 27:
break
cap.release()
cv2.destroyAllWindows()
| [
"cv2.merge",
"cv2.fastNlMeansDenoisingColored",
"numpy.hstack",
"cv2.inRange",
"cv2.bitwise_and",
"cv2.medianBlur",
"cv2.imshow",
"cv2.createCLAHE",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.split",
"cv2.resize",
"cv2.waitKey"
] | [((37, 56), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (53, 56), False, 'import cv2\n'), ((316, 367), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(3.0)', 'tileGridSize': '(8, 8)'}), '(clipLimit=3.0, tileGridSize=(8, 8))\n', (331, 367), False, 'import cv2\n'), ((2022, 2045), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2043, 2045), False, 'import cv2\n'), ((419, 488), 'cv2.resize', 'cv2.resize', (['frame', 'None'], {'fx': '(0.5)', 'fy': '(0.5)', 'interpolation': 'cv2.INTER_AREA'}), '(frame, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)\n', (429, 488), False, 'import cv2\n'), ((493, 519), 'cv2.imshow', 'cv2.imshow', (['"""Input"""', 'frame'], {}), "('Input', frame)\n", (503, 519), False, 'import cv2\n'), ((577, 615), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2LAB'], {}), '(frame, cv2.COLOR_BGR2LAB)\n', (589, 615), False, 'import cv2\n'), ((685, 699), 'cv2.split', 'cv2.split', (['lab'], {}), '(lab)\n', (694, 699), False, 'import cv2\n'), ((878, 899), 'cv2.merge', 'cv2.merge', (['(l2, a, b)'], {}), '((l2, a, b))\n', (887, 899), False, 'import cv2\n'), ((962, 998), 'cv2.cvtColor', 'cv2.cvtColor', (['lab', 'cv2.COLOR_LAB2BGR'], {}), '(lab, cv2.COLOR_LAB2BGR)\n', (974, 998), False, 'import cv2\n'), ((1049, 1107), 'cv2.fastNlMeansDenoisingColored', 'cv2.fastNlMeansDenoisingColored', (['img2', 'None', '(10)', '(10)', '(7)', '(21)'], {}), '(img2, None, 10, 10, 7, 21)\n', (1080, 1107), False, 'import cv2\n'), ((1230, 1267), 'cv2.cvtColor', 'cv2.cvtColor', (['img2', 'cv2.COLOR_BGR2HSV'], {}), '(img2, cv2.COLOR_BGR2HSV)\n', (1242, 1267), False, 'import cv2\n'), ((1316, 1337), 'numpy.array', 'np.array', (['[0, 50, 50]'], {}), '([0, 50, 50])\n', (1324, 1337), True, 'import numpy as np\n'), ((1354, 1378), 'numpy.array', 'np.array', (['[10, 255, 255]'], {}), '([10, 255, 255])\n', (1362, 1378), True, 'import numpy as np\n'), ((1391, 1429), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_red', 'upper_red'], {}), '(hsv, lower_red, upper_red)\n', (1402, 1429), False, 'import cv2\n'), ((1480, 1503), 'numpy.array', 'np.array', (['[170, 50, 50]'], {}), '([170, 50, 50])\n', (1488, 1503), True, 'import numpy as np\n'), ((1520, 1545), 'numpy.array', 'np.array', (['[180, 255, 255]'], {}), '([180, 255, 255])\n', (1528, 1545), True, 'import numpy as np\n'), ((1558, 1596), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_red', 'upper_red'], {}), '(hsv, lower_red, upper_red)\n', (1569, 1596), False, 'import cv2\n'), ((1707, 1745), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_red', 'upper_red'], {}), '(hsv, lower_red, upper_red)\n', (1718, 1745), False, 'import cv2\n'), ((1782, 1822), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (1797, 1822), False, 'import cv2\n'), ((1838, 1864), 'cv2.medianBlur', 'cv2.medianBlur', (['output', '(15)'], {}), '(output, 15)\n', (1852, 1864), False, 'import cv2\n'), ((1962, 1976), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1973, 1976), False, 'import cv2\n'), ((1911, 1951), 'numpy.hstack', 'np.hstack', (['[frame, img2, output, median]'], {}), '([frame, img2, output, median])\n', (1920, 1951), True, 'import numpy as np\n')] |
"""Perplexity Sampled mC4 dataset based on Common Crawl."""
import gzip
import json
import datasets
import kenlm # pip install https://github.com/kpu/kenlm/archive/master.zip
import numpy as np
from numpy.random import default_rng
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
A colossal, cleaned version of Common Crawl's web crawl corpus.
Based on Common Crawl dataset: "https://commoncrawl.org".
This is the processed version of Google's mC4 dataset by AllenAI.
"""
_CITATION = """
@article{2019t5,
author = {<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
journal = {arXiv e-prints},
year = {2019},
archivePrefix = {arXiv},
eprint = {1910.10683},
}
"""
_URL = "https://github.com/allenai/allennlp/discussions/5056"
_DATA_URL = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/multilingual/c4-{language}{split_suffix}.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
_LANGUAGES = [
"af",
"am",
"ar",
"az",
"be",
"bg",
"bg-Latn",
"bn",
"ca",
"ceb",
"co",
"cs",
"cy",
"da",
"de",
"el",
"el-Latn",
"en",
"eo",
"es",
"et",
"eu",
"fa",
"fi",
"fil",
"fr",
"fy",
"ga",
"gd",
"gl",
"gu",
"ha",
"haw",
"hi",
"hi-Latn",
"hmn",
"ht",
"hu",
"hy",
"id",
"ig",
"is",
"it",
"iw",
"ja",
"ja-Latn",
"jv",
"ka",
"kk",
"km",
"kn",
"ko",
"ku",
"ky",
"la",
"lb",
"lo",
"lt",
"lv",
"mg",
"mi",
"mk",
"ml",
"mn",
"mr",
"ms",
"mt",
"my",
"ne",
"nl",
"no",
"ny",
"pa",
"pl",
"ps",
"pt",
"ro",
"ru",
"ru-Latn",
"sd",
"si",
"sk",
"sl",
"sm",
"sn",
"so",
"sq",
"sr",
"st",
"su",
"sv",
"sw",
"ta",
"te",
"tg",
"th",
"tr",
"uk",
"und",
"ur",
"uz",
"vi",
"xh",
"yi",
"yo",
"zh",
"zh-Latn",
"zu",
]
_N_SHARDS_PER_SPLIT = {
"af": {"train": 64, "validation": 1},
"am": {"train": 16, "validation": 1},
"ar": {"train": 1024, "validation": 4},
"az": {"train": 256, "validation": 1},
"be": {"train": 128, "validation": 1},
"bg": {"train": 1024, "validation": 1},
"bg-Latn": {"train": 4, "validation": 1},
"bn": {"train": 512, "validation": 1},
"ca": {"train": 512, "validation": 1},
"ceb": {"train": 8, "validation": 1},
"co": {"train": 8, "validation": 1},
"cs": {"train": 1024, "validation": 2},
"cy": {"train": 256, "validation": 1},
"da": {"train": 1024, "validation": 1},
"de": {"train": 2048, "validation": 16},
"el": {"train": 1024, "validation": 2},
"el-Latn": {"train": 16, "validation": 1},
"en": {"train": 11264, "validation": 128},
"eo": {"train": 32, "validation": 1},
"es": {"train": 2048, "validation": 16},
"et": {"train": 256, "validation": 1},
"eu": {"train": 64, "validation": 1},
"fa": {"train": 1024, "validation": 2},
"fi": {"train": 1024, "validation": 1},
"fil": {"train": 64, "validation": 1},
"fr": {"train": 2048, "validation": 16},
"fy": {"train": 16, "validation": 1},
"ga": {"train": 16, "validation": 1},
"gd": {"train": 16, "validation": 1},
"gl": {"train": 128, "validation": 1},
"gu": {"train": 64, "validation": 1},
"ha": {"train": 8, "validation": 1},
"haw": {"train": 2, "validation": 1},
"hi": {"train": 1024, "validation": 2},
"hi-Latn": {"train": 16, "validation": 1},
"hmn": {"train": 8, "validation": 1},
"ht": {"train": 8, "validation": 1},
"hu": {"train": 1024, "validation": 2},
"hy": {"train": 128, "validation": 1},
"id": {"train": 1024, "validation": 4},
"ig": {"train": 4, "validation": 1},
"is": {"train": 128, "validation": 1},
"it": {"train": 1024, "validation": 8},
"iw": {"train": 1024, "validation": 1},
"ja": {"train": 1024, "validation": 8},
"ja-Latn": {"train": 8, "validation": 1},
"jv": {"train": 8, "validation": 1},
"ka": {"train": 256, "validation": 1},
"kk": {"train": 256, "validation": 1},
"km": {"train": 64, "validation": 1},
"kn": {"train": 64, "validation": 1},
"ko": {"train": 1024, "validation": 1},
"ku": {"train": 16, "validation": 1},
"ky": {"train": 64, "validation": 1},
"la": {"train": 64, "validation": 1},
"lb": {"train": 32, "validation": 1},
"lo": {"train": 8, "validation": 1},
"lt": {"train": 512, "validation": 1},
"lv": {"train": 256, "validation": 1},
"mg": {"train": 8, "validation": 1},
"mi": {"train": 4, "validation": 1},
"mk": {"train": 128, "validation": 1},
"ml": {"train": 128, "validation": 1},
"mn": {"train": 128, "validation": 1},
"mr": {"train": 1024, "validation": 1},
"ms": {"train": 512, "validation": 1},
"mt": {"train": 128, "validation": 1},
"my": {"train": 64, "validation": 1},
"ne": {"train": 256, "validation": 1},
"nl": {"train": 1024, "validation": 4},
"no": {"train": 1024, "validation": 1},
"ny": {"train": 4, "validation": 1},
"pa": {"train": 32, "validation": 1},
"pl": {"train": 1024, "validation": 4},
"ps": {"train": 16, "validation": 1},
"pt": {"train": 1024, "validation": 4},
"ro": {"train": 1024, "validation": 2},
"ru": {"train": 4096, "validation": 32},
"ru-Latn": {"train": 32, "validation": 1},
"sd": {"train": 64, "validation": 1},
"si": {"train": 64, "validation": 1},
"sk": {"train": 512, "validation": 1},
"sl": {"train": 256, "validation": 1},
"sm": {"train": 4, "validation": 1},
"sn": {"train": 8, "validation": 1},
"so": {"train": 64, "validation": 1},
"sq": {"train": 128, "validation": 1},
"sr": {"train": 256, "validation": 1},
"st": {"train": 2, "validation": 1},
"su": {"train": 4, "validation": 1},
"sv": {"train": 1024, "validation": 2},
"sw": {"train": 32, "validation": 1},
"ta": {"train": 256, "validation": 1},
"te": {"train": 128, "validation": 1},
"tg": {"train": 64, "validation": 1},
"th": {"train": 1024, "validation": 1},
"tr": {"train": 1024, "validation": 4},
"uk": {"train": 1024, "validation": 2},
"und": {"train": 3072, "validation": 32},
"ur": {"train": 128, "validation": 1},
"uz": {"train": 32, "validation": 1},
"vi": {"train": 1024, "validation": 4},
"xh": {"train": 2, "validation": 1},
"yi": {"train": 16, "validation": 1},
"yo": {"train": 2, "validation": 1},
"zh": {"train": 1024, "validation": 2},
"zh-Latn": {"train": 8, "validation": 1},
"zu": {"train": 8, "validation": 1},
}
class Mc4Config(datasets.BuilderConfig):
"""BuilderConfig for mC4."""
def __init__(self, *args, languages, **kwargs):
"""BuilderConfig for mC4.
Args:
languages (:obj:`List[str]`): list of languages to load
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name="+".join(languages),
**kwargs,
)
self.languages = languages
class Mc4(datasets.GeneratorBasedBuilder):
"""mC4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
BUILDER_CONFIGS = [Mc4Config(languages=[lang]) for lang in _LANGUAGES]
BUILDER_CONFIG_CLASS = Mc4Config
def __init__(self, *args, writer_batch_size=None, **kwargs):
self.data_files = kwargs.pop("data_files", {})
self.sampling_method = kwargs.pop("sampling_method", None)
self.perplexity_model = kwargs.pop("perplexity_model", None)
self.sampling_factor = kwargs.pop("sampling_factor", None)
self.boundaries = kwargs.pop("boundaries", None)
self.seed = kwargs.pop("seed", None)
self.kwargs = kwargs
if self.sampling_method:
if self.seed is not None:
self.rng = default_rng(self.seed)
else:
self.rng = default_rng()
if self.sampling_method == "random":
self.should_keep_doc = self._should_keep_doc_random
else:
# Loading 5-gram model
# http://dl.fbaipublicfiles.com/cc_net/lm/es.arpa.bin
logger.info("loading model = %s", self.perplexity_model)
self.pp_model = kenlm.Model(self.perplexity_model)
if self.sampling_method == "gaussian":
self.should_keep_doc = self._should_keep_doc_gaussian
else:
self.should_keep_doc = self._should_keep_doc_step
super().__init__(*args, writer_batch_size=writer_batch_size, **kwargs)
def get_perplexity(self, doc):
doc_log_score, doc_length = 0, 0
for line in doc.split("\n"):
log_score = self.pp_model.score(line)
length = len(line.split()) + 1
doc_log_score += log_score
doc_length += length
return 10.0 ** (-doc_log_score / doc_length)
def _should_keep_doc_step(self, doc, factor=1.5e5, boundaries=None, **kwargs):
perplexity = self.get_perplexity(doc)
if boundaries is None:
boundaries = [536394.99320948, 662247.50212365, 919250.87225178]
if perplexity <= boundaries[0]:
quartile_range = boundaries[0]
elif boundaries[0] < perplexity < boundaries[1]:
quartile_range = boundaries[1] - boundaries[0]
elif boundaries[1] < perplexity < boundaries[2]:
quartile_range = boundaries[2] - boundaries[1]
elif perplexity >= boundaries[2]:
quartile_range = 10 * boundaries[2]
probability = factor / quartile_range
return self.rng.uniform() < probability
def _should_keep_doc_gaussian(self, doc, factor=0.78, boundaries=None, **kwargs):
width = kwargs.get("width", 9 / 2) # width (spread) of the exponential curve
perplexity = self.get_perplexity(doc)
if boundaries is not None:
m = boundaries[1]
else:
m = 662247.50212365
exponential = np.exp((-1 / width) * ((perplexity - m) / m) ** 2)
weighted_perplexity = factor * exponential
return self.rng.uniform() < weighted_perplexity
def _should_keep_doc_random(self, doc, factor=None, boundaries=None, **kwargs):
if factor is None:
factor = 0.5
return self.rng.uniform() <= factor
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"timestamp": datasets.Value("string"),
"url": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_urls = {}
for split in ["train", "validation"]:
data_urls[split] = [
_DATA_URL.format(
language=self.config.name,
split_suffix="-validation" if split == "validation" else "",
index=index,
n_shards=_N_SHARDS_PER_SPLIT[lang][split],
)
for lang in self.config.languages
for index in range(_N_SHARDS_PER_SPLIT[lang][split])
]
if self.data_files and "train" in self.data_files:
train_downloaded_files = self.data_files["train"]
if not isinstance(train_downloaded_files, (tuple, list)):
train_downloaded_files = [train_downloaded_files]
else:
train_downloaded_files = dl_manager.download(data_urls["train"])
if self.data_files and "validation" in self.data_files:
validation_downloaded_files = self.data_files["validation"]
if not isinstance(validation_downloaded_files, (tuple, list)):
validation_downloaded_files = [validation_downloaded_files]
else:
validation_downloaded_files = dl_manager.download(data_urls["validation"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepaths": train_downloaded_files},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepaths": validation_downloaded_files},
),
]
def _generate_examples(self, filepaths):
"""This function returns the examples in the raw (text) form by iterating on all the files."""
id_ = 0
for filepath in filepaths:
logger.info("generating examples from = %s", filepath)
if filepath.endswith("jsonl"):
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
if line:
example = json.loads(line)
yield id_, example
id_ += 1
else:
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
if self.sampling_method:
logger.info("sampling method = %s", self.sampling_method)
for line in f:
if line:
example = json.loads(line)
if self.should_keep_doc(
example["text"],
factor=self.sampling_factor,
boundaries=self.boundaries,
**self.kwargs,
):
yield id_, example
id_ += 1
else:
for line in f:
if line:
example = json.loads(line)
yield id_, example
id_ += 1
| [
"datasets.SplitGenerator",
"kenlm.Model",
"json.loads",
"numpy.random.default_rng",
"numpy.exp",
"datasets.logging.get_logger",
"datasets.Value"
] | [((245, 282), 'datasets.logging.get_logger', 'datasets.logging.get_logger', (['__name__'], {}), '(__name__)\n', (272, 282), False, 'import datasets\n'), ((10363, 10411), 'numpy.exp', 'np.exp', (['(-1 / width * ((perplexity - m) / m) ** 2)'], {}), '(-1 / width * ((perplexity - m) / m) ** 2)\n', (10369, 10411), True, 'import numpy as np\n'), ((12481, 12585), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.TRAIN', 'gen_kwargs': "{'filepaths': train_downloaded_files}"}), "(name=datasets.Split.TRAIN, gen_kwargs={'filepaths':\n train_downloaded_files})\n", (12504, 12585), False, 'import datasets\n'), ((12642, 12757), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.VALIDATION', 'gen_kwargs': "{'filepaths': validation_downloaded_files}"}), "(name=datasets.Split.VALIDATION, gen_kwargs={\n 'filepaths': validation_downloaded_files})\n", (12665, 12757), False, 'import datasets\n'), ((8176, 8198), 'numpy.random.default_rng', 'default_rng', (['self.seed'], {}), '(self.seed)\n', (8187, 8198), False, 'from numpy.random import default_rng\n'), ((8244, 8257), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (8255, 8257), False, 'from numpy.random import default_rng\n'), ((8607, 8641), 'kenlm.Model', 'kenlm.Model', (['self.perplexity_model'], {}), '(self.perplexity_model)\n', (8618, 8641), False, 'import kenlm\n'), ((10885, 10909), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (10899, 10909), False, 'import datasets\n'), ((10944, 10968), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (10958, 10968), False, 'import datasets\n'), ((10997, 11021), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (11011, 11021), False, 'import datasets\n'), ((13292, 13308), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (13302, 13308), False, 'import json\n'), ((13739, 13755), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (13749, 13755), False, 'import json\n'), ((14325, 14341), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (14335, 14341), False, 'import json\n')] |
from typing import Tuple, Union
import numpy as np
# Implements https://en.wikipedia.org/wiki/Diamond-square_algorithm
#
# Some additional background on distribution of elevation in the real world:
# https://www.wolfram.com/language/12/new-in-geography/distribution-of-elevations.html
def diamond_square(
rng: np.random.Generator,
square_size: int,
num_squares: Tuple[int, int],
primary_scale: Union[float, np.ndarray],
roughness: Union[float, np.ndarray],
base_level: float = 0,
):
sz = square_size
h = np.zeros((num_squares[0] * sz + 1, num_squares[1] * sz + 1))
# Cast primary_scale & roughness to 2D arrays
if not isinstance(primary_scale, np.ndarray):
primary_scale = np.full_like(h, primary_scale)
else:
assert primary_scale.shape == h.shape
if not isinstance(roughness, np.ndarray):
roughness = np.full_like(h, roughness)
else:
assert roughness.shape == h.shape
# sample primary_scale at corner positions and use it to scale an exponential distribution
corner_scale = primary_scale[
0 : num_squares[0] * sz + 1 : sz, 0 : num_squares[1] * sz + 1 : sz
]
corner_values = base_level + rng.exponential(scale=corner_scale)
# for displacement, we go for normal distribution
randoms = primary_scale * roughness * rng.normal(size=primary_scale.shape)
# start with the corners
for i, j in np.ndindex((num_squares[0] + 1, num_squares[1] + 1)):
h[i * sz, j * sz] = corner_values[i, j]
# the interpolation distance starts at sqrt(2) * sz (diagonal of one square)
# and diminishes by a factor of sqrt(2) every half-step
current_scale = np.sqrt(2)
while sz >= 2:
assert sz % 2 == 0
# "diamond" step
for i, j in np.ndindex(num_squares):
# sample 4 corners
c1 = h[i * sz, j * sz]
c2 = h[i * sz, (j + 1) * sz]
c3 = h[(i + 1) * sz, (j + 1) * sz]
c4 = h[(i + 1) * sz, j * sz]
c = np.mean([c1, c2, c3, c4])
displacement = current_scale * randoms[i * sz + sz // 2, j * sz + sz // 2]
h[i * sz + sz // 2, j * sz + sz // 2] = c + displacement
num_squares = (num_squares[0] * 2, num_squares[1] * 2)
sz //= 2
current_scale /= np.sqrt(2)
# "square" step
for j in range(0, num_squares[1] + 1):
if j % 2 == 0:
irange = range(1, num_squares[0], 2)
else:
irange = range(0, num_squares[0] + 1, 2)
for i in irange:
# sample 4 directions
nan = float("NaN")
c1 = h[(i - 1) * sz, j * sz] if i > 0 else nan
c2 = h[i * sz, (j - 1) * sz] if j > 0 else nan
c3 = h[(i + 1) * sz, j * sz] if i < num_squares[0] - 1 else nan
c4 = h[i * sz, (j + 1) * sz] if j < num_squares[1] - 1 else nan
c = np.nanmean([c1, c2, c3, c4])
displacement = current_scale * randoms[i * sz, j * sz]
h[i * sz, j * sz] = c + displacement
current_scale /= np.sqrt(2)
return h
| [
"numpy.mean",
"numpy.sqrt",
"numpy.full_like",
"numpy.ndindex",
"numpy.nanmean",
"numpy.zeros"
] | [((538, 598), 'numpy.zeros', 'np.zeros', (['(num_squares[0] * sz + 1, num_squares[1] * sz + 1)'], {}), '((num_squares[0] * sz + 1, num_squares[1] * sz + 1))\n', (546, 598), True, 'import numpy as np\n'), ((1417, 1469), 'numpy.ndindex', 'np.ndindex', (['(num_squares[0] + 1, num_squares[1] + 1)'], {}), '((num_squares[0] + 1, num_squares[1] + 1))\n', (1427, 1469), True, 'import numpy as np\n'), ((1681, 1691), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1688, 1691), True, 'import numpy as np\n'), ((724, 754), 'numpy.full_like', 'np.full_like', (['h', 'primary_scale'], {}), '(h, primary_scale)\n', (736, 754), True, 'import numpy as np\n'), ((878, 904), 'numpy.full_like', 'np.full_like', (['h', 'roughness'], {}), '(h, roughness)\n', (890, 904), True, 'import numpy as np\n'), ((1785, 1808), 'numpy.ndindex', 'np.ndindex', (['num_squares'], {}), '(num_squares)\n', (1795, 1808), True, 'import numpy as np\n'), ((2310, 2320), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2317, 2320), True, 'import numpy as np\n'), ((3137, 3147), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3144, 3147), True, 'import numpy as np\n'), ((2021, 2046), 'numpy.mean', 'np.mean', (['[c1, c2, c3, c4]'], {}), '([c1, c2, c3, c4])\n', (2028, 2046), True, 'import numpy as np\n'), ((2957, 2985), 'numpy.nanmean', 'np.nanmean', (['[c1, c2, c3, c4]'], {}), '([c1, c2, c3, c4])\n', (2967, 2985), True, 'import numpy as np\n')] |
import os
import cv2
import numpy as np
from PIL import Image
import pandas as pd
import torch
from torch.nn import functional as F
from .base_dataset import BaseDataset
matchLabels = [
# name id trainId category catId hasInstances ignoreInEval color
( 'void' , 0 , 0, 'void' , 0 , False , False , ( 0, 0, 0) ),
( 's_w_d' , 200 , 1 , 'dividing' , 1 , False , False , ( 70, 130, 180) ),
( 's_y_d' , 204 , 1 , 'dividing' , 1 , False , False , (220, 20, 60) ),
( 'ds_w_dn' , 213 , 1 , 'dividing' , 1 , False , True , (128, 0, 128) ),
( 'ds_y_dn' , 209 , 1 , 'dividing' , 1 , False , False , (255, 0, 0) ),
( 'sb_w_do' , 206 , 1 , 'dividing' , 1 , False , True , ( 0, 0, 60) ),
( 'sb_y_do' , 207 , 1 , 'dividing' , 1 , False , True , ( 0, 60, 100) ),
( 'b_w_g' , 201 , 2 , 'guiding' , 2 , False , False , ( 0, 0, 142) ),
( 'b_y_g' , 203 , 2 , 'guiding' , 2 , False , False , (119, 11, 32) ),
( 'db_w_g' , 211 , 2 , 'guiding' , 2 , False , True , (244, 35, 232) ),
( 'db_y_g' , 208 , 2 , 'guiding' , 2 , False , True , ( 0, 0, 160) ),
( 'db_w_s' , 216 , 3 , 'stopping' , 3 , False , True , (153, 153, 153) ),
( 's_w_s' , 217 , 3 , 'stopping' , 3 , False , False , (220, 220, 0) ),
( 'ds_w_s' , 215 , 3 , 'stopping' , 3 , False , True , (250, 170, 30) ),
( 's_w_c' , 218 , 4 , 'chevron' , 4 , False , True , (102, 102, 156) ),
( 's_y_c' , 219 , 4 , 'chevron' , 4 , False , True , (128, 0, 0) ),
( 's_w_p' , 210 , 5 , 'parking' , 5 , False , False , (128, 64, 128) ),
( 's_n_p' , 232 , 5 , 'parking' , 5 , False , True , (238, 232, 170) ),
( 'c_wy_z' , 214 , 6 , 'zebra' , 6 , False , False , (190, 153, 153) ),
( 'a_w_u' , 202 , 7 , 'thru/turn' , 7 , False , True , ( 0, 0, 230) ),
( 'a_w_t' , 220 , 7 , 'thru/turn' , 7 , False , False , (128, 128, 0) ),
( 'a_w_tl' , 221 , 7 , 'thru/turn' , 7 , False , False , (128, 78, 160) ),
( 'a_w_tr' , 222 , 7 , 'thru/turn' , 7 , False , False , (150, 100, 100) ),
( 'a_w_tlr' , 231 , 7 , 'thru/turn' , 7 , False , True , (255, 165, 0) ),
( 'a_w_l' , 224 , 7 , 'thru/turn' , 7 , False , False , (180, 165, 180) ),
( 'a_w_r' , 225 , 7 , 'thru/turn' , 7 , False , False , (107, 142, 35) ),
( 'a_w_lr' , 226 , 7 , 'thru/turn' , 7 , False , False , (201, 255, 229) ),
( 'a_n_lu' , 230 , 7 , 'thru/turn' , 7 , False , True , (0, 191, 255) ),
( 'a_w_tu' , 228 , 7 , 'thru/turn' , 7 , False , True , ( 51, 255, 51) ),
( 'a_w_m' , 229 , 7 , 'thru/turn' , 7 , False , True , (250, 128, 114) ),
( 'a_y_t' , 233 , 7 , 'thru/turn' , 7 , False , True , (127, 255, 0) ),
( 'b_n_sr' , 205 , 8 , 'reduction' , 8 , False , False , (255, 128, 0) ),
( 'd_wy_za' , 212 , 8 , 'attention' , 8 , False , True , ( 0, 255, 255) ),
( 'r_wy_np' , 227 , 8 , 'no parking' , 8 , False , False , (178, 132, 190) ),
( 'vom_wy_n' , 223 , 8 , 'others' , 8 , False , True , (128, 128, 64) ),
( 'om_n_n' , 250 , 8 , 'others' , 8 , False , False , (102, 0, 204) ),
( 'noise' , 249 , 0 , 'ignored' , 0 , False , True , ( 0, 153, 153) ),
( 'ignored' , 255 , 0 , 'ignored' , 0 , False , True , (255, 255, 255) ),
]
class Baidu(BaseDataset):
def __init__(self,
root,
list_path,
num_samples=None,
num_classes=9,
multi_scale=True,
flip=True,
ignore_label=-1,
base_size=2048,
crop_size=(512, 1024),
downsample_rate=1,
scale_factor=16,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]):
super(Baidu, self).__init__(ignore_label, base_size,
crop_size, downsample_rate, scale_factor, mean, std,)
self.root = root
self.list_path = list_path
self.num_classes = num_classes
self.multi_scale = multi_scale
self.flip = flip
self.data = pd.read_csv(os.path.join(root, list_path), header=None, names=["image","label"])
self.img_list = list(zip(self.data["image"].values[1:], self.data["label"].values[1:]))
self.files = self.read_files()
if num_samples:
self.files = self.files[:num_samples]
self.label_mapping = dict()
for info in matchLabels:
self.label_mapping[info[1]] = info[2]
# self.label_mapping = {-1: ignore_label, 0: ignore_label,
# 1: ignore_label, 2: ignore_label,
# 3: ignore_label, 4: ignore_label,
# 5: ignore_label, 6: ignore_label,
# 7: 0, 8: 1, 9: ignore_label,
# 10: ignore_label, 11: 2, 12: 3,
# 13: 4, 14: ignore_label, 15: ignore_label,
# 16: ignore_label, 17: 5, 18: ignore_label,
# 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11,
# 25: 12, 26: 13, 27: 14, 28: 15,
# 29: ignore_label, 30: ignore_label,
# 31: 16, 32: 17, 33: 18}
self.class_weights = torch.ones(9, dtype=torch.float).cuda()
# self.class_weights = torch.FloatTensor([0.8373, 0.918, 0.866, 1.0345,
# 1.0166, 0.9969, 0.9754, 1.0489,
# 0.8786, 1.0023, 0.9539, 0.9843,
# 1.1116, 0.9037, 1.0865, 1.0955,
# 1.0865, 1.1529, 1.0507]).cuda()
def read_files(self):
files = []
if 'test' in self.list_path:
for item in self.img_list:
image_path = item
name = os.path.splitext(os.path.basename(image_path[0]))[0]
files.append({
"img": image_path[0],
"name": name,
})
else:
for item in self.img_list:
image_path, label_path = item
name = os.path.splitext(os.path.basename(label_path))[0]
files.append({
"img": image_path,
"label": label_path,
"name": name,
"weight": 1
})
return files
def convert_label(self, label, inverse=False):
temp = label.copy()
if inverse:
for v, k in self.label_mapping.items():
label[temp == k] = v
else:
for k, v in self.label_mapping.items():
label[temp == k] = v
return label
def __getitem__(self, index):
item = self.files[index]
name = item["name"]
# image = cv2.imread(os.path.join(self.root,'cityscapes',item["img"]),
# cv2.IMREAD_COLOR)
image = cv2.imread(os.path.join(self.root, item["img"]),
cv2.IMREAD_COLOR)
size = image.shape
if 'test' in self.list_path:
image = self.input_transform(image)
image = image.transpose((2, 0, 1))
return image.copy(), np.array(size), name
# label = cv2.imread(os.path.join(self.root,'cityscapes',item["label"]),
# cv2.IMREAD_GRAYSCALE)
label = cv2.imread(os.path.join(self.root, item["label"]),
cv2.IMREAD_GRAYSCALE)
label = self.convert_label(label)
image, label = self.gen_sample(image, label,
self.multi_scale, self.flip)
return image.copy(), label.copy(), np.array(size), name
def multi_scale_inference(self, config, model, image, scales=[1], flip=False):
batch, _, ori_height, ori_width = image.size()
assert batch == 1, "only supporting batchsize 1."
image = image.numpy()[0].transpose((1,2,0)).copy()
stride_h = np.int(self.crop_size[0] * 1.0)
stride_w = np.int(self.crop_size[1] * 1.0)
final_pred = torch.zeros([1, self.num_classes,
ori_height,ori_width]).cuda()
for scale in scales:
new_img = self.multi_scale_aug(image=image,
rand_scale=scale,
rand_crop=False)
height, width = new_img.shape[:-1]
if scale <= 1.0:
new_img = new_img.transpose((2, 0, 1))
new_img = np.expand_dims(new_img, axis=0)
new_img = torch.from_numpy(new_img)
preds = self.inference(config, model, new_img, flip)
preds = preds[:, :, 0:height, 0:width]
else:
new_h, new_w = new_img.shape[:-1]
rows = np.int(np.ceil(1.0 * (new_h -
self.crop_size[0]) / stride_h)) + 1
cols = np.int(np.ceil(1.0 * (new_w -
self.crop_size[1]) / stride_w)) + 1
preds = torch.zeros([1, self.num_classes,
new_h,new_w]).cuda()
count = torch.zeros([1,1, new_h, new_w]).cuda()
for r in range(rows):
for c in range(cols):
h0 = r * stride_h
w0 = c * stride_w
h1 = min(h0 + self.crop_size[0], new_h)
w1 = min(w0 + self.crop_size[1], new_w)
h0 = max(int(h1 - self.crop_size[0]), 0)
w0 = max(int(w1 - self.crop_size[1]), 0)
crop_img = new_img[h0:h1, w0:w1, :]
crop_img = crop_img.transpose((2, 0, 1))
crop_img = np.expand_dims(crop_img, axis=0)
crop_img = torch.from_numpy(crop_img)
pred = self.inference(config, model, crop_img, flip)
preds[:,:,h0:h1,w0:w1] += pred[:,:, 0:h1-h0, 0:w1-w0]
count[:,:,h0:h1,w0:w1] += 1
preds = preds / count
preds = preds[:,:,:height,:width]
preds = F.interpolate(
preds, (ori_height, ori_width),
mode='bilinear', align_corners=config.MODEL.ALIGN_CORNERS
)
final_pred += preds
return final_pred
def get_palette(self, n):
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
def save_pred(self, preds, sv_path, name):
palette = self.get_palette(256)
preds = np.asarray(np.argmax(preds.cpu(), axis=1), dtype=np.uint8)
for i in range(preds.shape[0]):
pred = self.convert_label(preds[i], inverse=True)
save_img = Image.fromarray(pred)
save_img.putpalette(palette)
save_img.save(os.path.join(sv_path, name[i]+'.png'))
| [
"PIL.Image.fromarray",
"numpy.ceil",
"os.path.join",
"torch.from_numpy",
"numpy.array",
"os.path.basename",
"torch.nn.functional.interpolate",
"numpy.expand_dims",
"numpy.int",
"torch.zeros",
"torch.ones"
] | [((8065, 8096), 'numpy.int', 'np.int', (['(self.crop_size[0] * 1.0)'], {}), '(self.crop_size[0] * 1.0)\n', (8071, 8096), True, 'import numpy as np\n'), ((8116, 8147), 'numpy.int', 'np.int', (['(self.crop_size[1] * 1.0)'], {}), '(self.crop_size[1] * 1.0)\n', (8122, 8147), True, 'import numpy as np\n'), ((4029, 4058), 'os.path.join', 'os.path.join', (['root', 'list_path'], {}), '(root, list_path)\n', (4041, 4058), False, 'import os\n'), ((7020, 7056), 'os.path.join', 'os.path.join', (['self.root', "item['img']"], {}), "(self.root, item['img'])\n", (7032, 7056), False, 'import os\n'), ((7478, 7516), 'os.path.join', 'os.path.join', (['self.root', "item['label']"], {}), "(self.root, item['label'])\n", (7490, 7516), False, 'import os\n'), ((7769, 7783), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (7777, 7783), True, 'import numpy as np\n'), ((10349, 10457), 'torch.nn.functional.interpolate', 'F.interpolate', (['preds', '(ori_height, ori_width)'], {'mode': '"""bilinear"""', 'align_corners': 'config.MODEL.ALIGN_CORNERS'}), "(preds, (ori_height, ori_width), mode='bilinear',\n align_corners=config.MODEL.ALIGN_CORNERS)\n", (10362, 10457), True, 'from torch.nn import functional as F\n'), ((11394, 11415), 'PIL.Image.fromarray', 'Image.fromarray', (['pred'], {}), '(pred)\n', (11409, 11415), False, 'from PIL import Image\n'), ((5271, 5303), 'torch.ones', 'torch.ones', (['(9)'], {'dtype': 'torch.float'}), '(9, dtype=torch.float)\n', (5281, 5303), False, 'import torch\n'), ((7297, 7311), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (7305, 7311), True, 'import numpy as np\n'), ((8169, 8226), 'torch.zeros', 'torch.zeros', (['[1, self.num_classes, ori_height, ori_width]'], {}), '([1, self.num_classes, ori_height, ori_width])\n', (8180, 8226), False, 'import torch\n'), ((8649, 8680), 'numpy.expand_dims', 'np.expand_dims', (['new_img'], {'axis': '(0)'}), '(new_img, axis=0)\n', (8663, 8680), True, 'import numpy as np\n'), ((8707, 8732), 'torch.from_numpy', 'torch.from_numpy', (['new_img'], {}), '(new_img)\n', (8723, 8732), False, 'import torch\n'), ((11483, 11522), 'os.path.join', 'os.path.join', (['sv_path', "(name[i] + '.png')"], {}), "(sv_path, name[i] + '.png')\n", (11495, 11522), False, 'import os\n'), ((5890, 5921), 'os.path.basename', 'os.path.basename', (['image_path[0]'], {}), '(image_path[0])\n', (5906, 5921), False, 'import os\n'), ((6191, 6219), 'os.path.basename', 'os.path.basename', (['label_path'], {}), '(label_path)\n', (6207, 6219), False, 'import os\n'), ((8955, 9008), 'numpy.ceil', 'np.ceil', (['(1.0 * (new_h - self.crop_size[0]) / stride_h)'], {}), '(1.0 * (new_h - self.crop_size[0]) / stride_h)\n', (8962, 9008), True, 'import numpy as np\n'), ((9077, 9130), 'numpy.ceil', 'np.ceil', (['(1.0 * (new_w - self.crop_size[1]) / stride_w)'], {}), '(1.0 * (new_w - self.crop_size[1]) / stride_w)\n', (9084, 9130), True, 'import numpy as np\n'), ((9193, 9241), 'torch.zeros', 'torch.zeros', (['[1, self.num_classes, new_h, new_w]'], {}), '([1, self.num_classes, new_h, new_w])\n', (9204, 9241), False, 'import torch\n'), ((9315, 9348), 'torch.zeros', 'torch.zeros', (['[1, 1, new_h, new_w]'], {}), '([1, 1, new_h, new_w])\n', (9326, 9348), False, 'import torch\n'), ((9938, 9970), 'numpy.expand_dims', 'np.expand_dims', (['crop_img'], {'axis': '(0)'}), '(crop_img, axis=0)\n', (9952, 9970), True, 'import numpy as np\n'), ((10006, 10032), 'torch.from_numpy', 'torch.from_numpy', (['crop_img'], {}), '(crop_img)\n', (10022, 10032), False, 'import torch\n')] |
"""Construct sparse matrix from a local stencil."""
# pylint: disable=redefined-builtin
import numpy as np
from scipy import sparse
def stencil_grid(S, grid, dtype=None, format=None):
"""Construct a sparse matrix form a local matrix stencil.
Parameters
----------
S : ndarray
matrix stencil stored in N-d array
grid : tuple
tuple containing the N grid dimensions
dtype :
data type of the result
format : string
sparse matrix format to return, e.g. "csr", "coo", etc.
Returns
-------
A : sparse matrix
Sparse matrix which represents the operator given by applying
stencil S at each vertex of a regular grid with given dimensions.
Notes
-----
The grid vertices are enumerated as arange(prod(grid)).reshape(grid).
This implies that the last grid dimension cycles fastest, while the
first dimension cycles slowest. For example, if grid=(2,3) then the
grid vertices are ordered as (0,0), (0,1), (0,2), (1,0), (1,1), (1,2).
This coincides with the ordering used by the NumPy functions
ndenumerate() and mgrid().
Examples
--------
>>> from pyamg.gallery import stencil_grid
>>> stencil = [-1,2,-1] # 1D Poisson stencil
>>> grid = (5,) # 1D grid with 5 vertices
>>> A = stencil_grid(stencil, grid, dtype=float, format='csr')
>>> A.toarray()
matrix([[ 2., -1., 0., 0., 0.],
[-1., 2., -1., 0., 0.],
[ 0., -1., 2., -1., 0.],
[ 0., 0., -1., 2., -1.],
[ 0., 0., 0., -1., 2.]])
>>> stencil = [[0,-1,0],[-1,4,-1],[0,-1,0]] # 2D Poisson stencil
>>> grid = (3,3) # 2D grid with shape 3x3
>>> A = stencil_grid(stencil, grid, dtype=float, format='csr')
>>> A.toarray()
matrix([[ 4., -1., 0., -1., 0., 0., 0., 0., 0.],
[-1., 4., -1., 0., -1., 0., 0., 0., 0.],
[ 0., -1., 4., 0., 0., -1., 0., 0., 0.],
[-1., 0., 0., 4., -1., 0., -1., 0., 0.],
[ 0., -1., 0., -1., 4., -1., 0., -1., 0.],
[ 0., 0., -1., 0., -1., 4., 0., 0., -1.],
[ 0., 0., 0., -1., 0., 0., 4., -1., 0.],
[ 0., 0., 0., 0., -1., 0., -1., 4., -1.],
[ 0., 0., 0., 0., 0., -1., 0., -1., 4.]])
"""
S = np.asarray(S, dtype=dtype)
grid = tuple(grid)
if not (np.asarray(S.shape) % 2 == 1).all():
raise ValueError('all stencil dimensions must be odd')
if len(grid) != np.ndim(S):
raise ValueError('stencil dimension must equal number of grid\
dimensions')
if min(grid) < 1:
raise ValueError('grid dimensions must be positive')
N_v = np.prod(grid) # number of vertices in the mesh
N_s = (S != 0).sum() # number of nonzero stencil entries
# diagonal offsets
diags = np.zeros(N_s, dtype=int)
# compute index offset of each dof within the stencil
strides = np.cumprod([1] + list(reversed(grid)))[:-1]
indices = tuple(i.copy() for i in S.nonzero())
for i, s in zip(indices, S.shape):
i -= s // 2
# i = (i - s) // 2
# i = i // 2
# i = i - (s // 2)
for stride, coords in zip(strides, reversed(indices)):
diags += stride * coords
data = S[S != 0].repeat(N_v).reshape(N_s, N_v)
indices = np.vstack(indices).T
# zero boundary connections
for index, diag in zip(indices, data):
diag = diag.reshape(grid)
for n, i in enumerate(index):
if i > 0:
s = [slice(None)] * len(grid)
s[n] = slice(0, i)
s = tuple(s)
diag[s] = 0
elif i < 0:
s = [slice(None)]*len(grid)
s[n] = slice(i, None)
s = tuple(s)
diag[s] = 0
# remove diagonals that lie outside matrix
mask = abs(diags) < N_v
if not mask.all():
diags = diags[mask]
data = data[mask]
# sum duplicate diagonals
if len(np.unique(diags)) != len(diags):
new_diags = np.unique(diags)
new_data = np.zeros((len(new_diags), data.shape[1]),
dtype=data.dtype)
for dia, dat in zip(diags, data):
n = np.searchsorted(new_diags, dia)
new_data[n, :] += dat
diags = new_diags
data = new_data
return sparse.dia_matrix((data, diags),
shape=(N_v, N_v)).asformat(format)
| [
"numpy.prod",
"numpy.unique",
"numpy.searchsorted",
"scipy.sparse.dia_matrix",
"numpy.asarray",
"numpy.ndim",
"numpy.zeros",
"numpy.vstack"
] | [((2375, 2401), 'numpy.asarray', 'np.asarray', (['S'], {'dtype': 'dtype'}), '(S, dtype=dtype)\n', (2385, 2401), True, 'import numpy as np\n'), ((2776, 2789), 'numpy.prod', 'np.prod', (['grid'], {}), '(grid)\n', (2783, 2789), True, 'import numpy as np\n'), ((2924, 2948), 'numpy.zeros', 'np.zeros', (['N_s'], {'dtype': 'int'}), '(N_s, dtype=int)\n', (2932, 2948), True, 'import numpy as np\n'), ((2559, 2569), 'numpy.ndim', 'np.ndim', (['S'], {}), '(S)\n', (2566, 2569), True, 'import numpy as np\n'), ((3410, 3428), 'numpy.vstack', 'np.vstack', (['indices'], {}), '(indices)\n', (3419, 3428), True, 'import numpy as np\n'), ((4150, 4166), 'numpy.unique', 'np.unique', (['diags'], {}), '(diags)\n', (4159, 4166), True, 'import numpy as np\n'), ((4097, 4113), 'numpy.unique', 'np.unique', (['diags'], {}), '(diags)\n', (4106, 4113), True, 'import numpy as np\n'), ((4333, 4364), 'numpy.searchsorted', 'np.searchsorted', (['new_diags', 'dia'], {}), '(new_diags, dia)\n', (4348, 4364), True, 'import numpy as np\n'), ((4462, 4512), 'scipy.sparse.dia_matrix', 'sparse.dia_matrix', (['(data, diags)'], {'shape': '(N_v, N_v)'}), '((data, diags), shape=(N_v, N_v))\n', (4479, 4512), False, 'from scipy import sparse\n'), ((2438, 2457), 'numpy.asarray', 'np.asarray', (['S.shape'], {}), '(S.shape)\n', (2448, 2457), True, 'import numpy as np\n')] |
""" Functions to deal with vibrational frequencies
"""
import numpy
from phydat import phycon
def scale_frequencies_and_zpe(freqs, method, basis, scale_method='c3'):
""" Scale frequencies according to some method
obtain a corrected zpe
"""
scaled_freqs = scale_frequencies(
freqs, method, basis, scale_method=scale_method)
scaled_zpe = 0.0
if 'harm' in scale_method:
# Calculate harmonic zpe using scaled frequencies
scaled_zpe = sum(scaled_freqs)/2.0 * phycon.WAVEN2EH
else:
# Calculate the anharmonic zpe using the scaled anharmonic freqs
# but you have to get harmonic version of those freqs first
harm_sfreqs = scale_frequencies(
freqs, method, basis, scale_method=HARM_OF_SM[scale_method])
for freq, scfreq in zip(harm_sfreqs, scaled_freqs):
scaled_zpe += _anharm_zpve_from_scaling(freq, scfreq)
scaled_zpe *= phycon.WAVEN2EH
return scaled_freqs, scaled_zpe
def scale_frequencies(freqs, method, basis, scale_method='c3'):
""" Scale frequencies according to some method
"""
# Scale the frequencies
if scale_method in SCALE_METHODS:
scaled_freqs = SCALE_METHODS[scale_method](freqs, method, basis)
else:
scaled_freqs = freqs
return scaled_freqs
def _anharm_zpve_from_scaling(freq, scaled_freq):
""" Determine what the anharmonic ZPVE should be after scaling
"""
return (freq / 2.0) + (1.0 / 8.0) * (scaled_freq - freq)
def rotor_scale_factor_from_harmonics(rt_freqs, rth_freqs, tors_freqs):
""" scaling factor for rotor potentials to map them into harmonic
"""
# Create a scaling factor for the frequencies
# First sort tors frequencies in ascending order
sort_tors_freqs = sorted(tors_freqs)
# Keep only freqs whose RRHO freqs are above a threshold
freq_thresh = 20.
log_rt_freq = 0.0
nfreq_remove = 0
for freq in rt_freqs:
if freq > freq_thresh:
log_rt_freq += numpy.log(freq)
else:
nfreq_remove += 1
log_freq = [numpy.log(freq) for freq in rth_freqs]
log_freq = sum(log_freq)
log_tors_freq = 0.0
idx_remove = []
for idx, freq in enumerate(sort_tors_freqs):
if idx+1 > nfreq_remove:
log_tors_freq += numpy.log(freq)
else:
idx_remove.append(tors_freqs.index(freq))
# Generate the scaling factor
factor = numpy.exp(log_rt_freq - log_freq - log_tors_freq)
scale_factor = (idx_remove, factor)
# generate the set of indices for torsions that are two be scales
tau_factor = numpy.exp(log_rt_freq - log_freq)
tau_factor_mode = tau_factor
tau_str = '-'.join([str(ridx) for ridx in idx_remove])
print(f'TAU FACTOR {tau_factor_mode:4.6f} \t '
f'{len(tors_freqs):g} \t '
f'{factor:3.6f} '
f'{tau_str}')
# Generate the set of indices for torsions that are two be scales
scale_factor = (idx_remove, factor)
return scale_factor
# Library of vibrational frequency scaling methods
M3_COEFFS_ANHARM = {
# ('b2plypd3', 'cc-pvtz'): (1.066, 0.008045, 0.33),
('b2plypd3', 'cc-pvtz'): (1.045, 0.00851, 0.292),
('wb97xd', '6-31g*'): (1.657244, 0.56000691, 0.029624),
('wb97xd', 'cc-pvtz'): (1.053471, 0.01186224, 0.26174883)
}
M3_COEFFS_HARM = {
('wb97xd', '6-31g*'): (0.91, -0.058, 0.001)
}
def _three_coeff_anharm_scaling(freqs, method, basis):
""" Scales frequencies using factos with three coefficients
"""
cf1, cf2, cf3 = M3_COEFFS_ANHARM.get((method, basis), (1.0, 0.0, 0.0))
scaled_freqs = ()
for freq in freqs:
scale_factor = cf1 - (cf2 * freq**cf3)
scaled_freqs += (freq * scale_factor,)
return scaled_freqs
def _three_coeff_harm_scaling(freqs, method, basis):
""" Scales frequencies using one factor, same factor applies to all frequencies
"""
cf1, cf2, cf3 = M3_COEFFS_HARM.get((method, basis), (1.0, 0.0, 0.0))
scaled_freqs = ()
for freq in freqs:
scale_factor = cf1 - (cf2 * freq**cf3)
scaled_freqs += (freq * scale_factor,)
# print('m3 test:', freq,scale_factor)
# print('scaled freq test:',scaled_freqs)
return scaled_freqs
SCALE_METHODS = {
'c3': _three_coeff_anharm_scaling,
'c3_harm': _three_coeff_harm_scaling
}
HARM_OF_SM = {
'c3': 'c3_harm'
}
| [
"numpy.exp",
"numpy.log"
] | [((2448, 2497), 'numpy.exp', 'numpy.exp', (['(log_rt_freq - log_freq - log_tors_freq)'], {}), '(log_rt_freq - log_freq - log_tors_freq)\n', (2457, 2497), False, 'import numpy\n'), ((2626, 2659), 'numpy.exp', 'numpy.exp', (['(log_rt_freq - log_freq)'], {}), '(log_rt_freq - log_freq)\n', (2635, 2659), False, 'import numpy\n'), ((2092, 2107), 'numpy.log', 'numpy.log', (['freq'], {}), '(freq)\n', (2101, 2107), False, 'import numpy\n'), ((2015, 2030), 'numpy.log', 'numpy.log', (['freq'], {}), '(freq)\n', (2024, 2030), False, 'import numpy\n'), ((2316, 2331), 'numpy.log', 'numpy.log', (['freq'], {}), '(freq)\n', (2325, 2331), False, 'import numpy\n')] |
import numpy as np
class NashV:
def __init__(self, N, M, T, config):
self._N = N
self._M = M
self._alpha = config.get("alpha", 1.0)
self._gamma = config.get("gamma", 1.0)
# Sample counter
self._sample_count = 0
# Reward estimates
self._row_logits= np.zeros(N)
self._column_logits = np.zeros(M)
# Strategies
self._row_strategy = np.ones(N) / N
self._column_strategy = np.ones(M) / M
self._row_average_strategy = np.ones(N) / N
self._column_average_strategy = np.ones(M) / M
def sample(self, G): # Generate samples from the game G - return the number of samples
# Sample payoff
row_action = np.random.choice(self._N, p=self._row_strategy)
column_action = np.random.choice(self._M, p=self._column_strategy)
row_payoff, column_payoff = G.sample(row_action, column_action)
# Update logits
self._sample_count += 1
alpha = self._alpha * 2 / (1 + self._sample_count)
row_gamma = self._gamma * np.sqrt(np.log(self._N) / (self._N * self._sample_count))
column_gamma = self._gamma * np.sqrt(np.log(self._M) / (self._M * self._sample_count))
row_losses = np.zeros(self._N)
row_losses[row_action] = (1 - row_payoff) / (self._row_strategy[row_action] + row_gamma)
column_losses = np.zeros(self._M)
column_losses[column_action] = (1 - column_payoff) / (self._column_strategy[column_action] + column_gamma)
self._row_logits = (1 - alpha) * self._row_logits + alpha * row_losses
self._column_logits = (1 - alpha) * self._column_logits + alpha * column_losses
# Update strategies
row_advantages = self._row_logits - np.max(self._row_logits)
row_weights = np.exp(-(row_gamma / alpha) * row_advantages)
column_advantages = self._column_logits - np.max(self._column_logits)
column_weights = np.exp(-(column_gamma / alpha) * column_advantages)
self._row_strategy = row_weights / np.sum(row_weights)
self._column_strategy = column_weights / np.sum(column_weights)
assert all(np.isfinite(self._row_logits)), "One or more row logits became infinite"
assert all(np.isfinite(self._column_logits)), "One or more column logits became infinite"
# Update average strategies
self._row_average_strategy = (1 - alpha) * self._row_average_strategy + alpha * self._row_strategy
self._column_average_strategy = (1 - alpha) * self._column_average_strategy + alpha * self._column_strategy
def strategies(self): # Return the test strategies, may differ from the sampling strategies
return self._row_average_strategy, self._column_average_strategy
def __repr__(self):
return f"nash_v_alpha_{self._alpha}_gamma_{self._gamma}"
| [
"numpy.ones",
"numpy.random.choice",
"numpy.log",
"numpy.max",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.isfinite"
] | [((322, 333), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (330, 333), True, 'import numpy as np\n'), ((364, 375), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (372, 375), True, 'import numpy as np\n'), ((735, 782), 'numpy.random.choice', 'np.random.choice', (['self._N'], {'p': 'self._row_strategy'}), '(self._N, p=self._row_strategy)\n', (751, 782), True, 'import numpy as np\n'), ((807, 857), 'numpy.random.choice', 'np.random.choice', (['self._M'], {'p': 'self._column_strategy'}), '(self._M, p=self._column_strategy)\n', (823, 857), True, 'import numpy as np\n'), ((1264, 1281), 'numpy.zeros', 'np.zeros', (['self._N'], {}), '(self._N)\n', (1272, 1281), True, 'import numpy as np\n'), ((1404, 1421), 'numpy.zeros', 'np.zeros', (['self._M'], {}), '(self._M)\n', (1412, 1421), True, 'import numpy as np\n'), ((1825, 1870), 'numpy.exp', 'np.exp', (['(-(row_gamma / alpha) * row_advantages)'], {}), '(-(row_gamma / alpha) * row_advantages)\n', (1831, 1870), True, 'import numpy as np\n'), ((1975, 2026), 'numpy.exp', 'np.exp', (['(-(column_gamma / alpha) * column_advantages)'], {}), '(-(column_gamma / alpha) * column_advantages)\n', (1981, 2026), True, 'import numpy as np\n'), ((427, 437), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (434, 437), True, 'import numpy as np\n'), ((474, 484), 'numpy.ones', 'np.ones', (['M'], {}), '(M)\n', (481, 484), True, 'import numpy as np\n'), ((527, 537), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (534, 537), True, 'import numpy as np\n'), ((582, 592), 'numpy.ones', 'np.ones', (['M'], {}), '(M)\n', (589, 592), True, 'import numpy as np\n'), ((1778, 1802), 'numpy.max', 'np.max', (['self._row_logits'], {}), '(self._row_logits)\n', (1784, 1802), True, 'import numpy as np\n'), ((1922, 1949), 'numpy.max', 'np.max', (['self._column_logits'], {}), '(self._column_logits)\n', (1928, 1949), True, 'import numpy as np\n'), ((2071, 2090), 'numpy.sum', 'np.sum', (['row_weights'], {}), '(row_weights)\n', (2077, 2090), True, 'import numpy as np\n'), ((2140, 2162), 'numpy.sum', 'np.sum', (['column_weights'], {}), '(column_weights)\n', (2146, 2162), True, 'import numpy as np\n'), ((2183, 2212), 'numpy.isfinite', 'np.isfinite', (['self._row_logits'], {}), '(self._row_logits)\n', (2194, 2212), True, 'import numpy as np\n'), ((2275, 2307), 'numpy.isfinite', 'np.isfinite', (['self._column_logits'], {}), '(self._column_logits)\n', (2286, 2307), True, 'import numpy as np\n'), ((1097, 1112), 'numpy.log', 'np.log', (['self._N'], {}), '(self._N)\n', (1103, 1112), True, 'import numpy as np\n'), ((1192, 1207), 'numpy.log', 'np.log', (['self._M'], {}), '(self._M)\n', (1198, 1207), True, 'import numpy as np\n')] |
import json
import xml.etree.ElementTree as ET
import copy
import numpy as np
# from skimage.measure import points_in_poly
np.random.seed(0)
class Polygon(object):
"""
Polygon represented as [N, 2] array of vertices
"""
def __init__(self, name, vertices):
"""
Initialize the polygon.
Arguments:
name: string, name of the polygon
vertices: [N, 2] 2D numpy array of int
"""
self._name = name
self._vertices = vertices
def __str__(self):
return self._name
def inside(self, coord):
"""
Determine if a given coordinate is inside the polygon or not.
Arguments:
coord: 2 element tuple of int, e.g. (x, y)
Returns:
bool, if the coord is inside the polygon.
"""
return points_in_poly([coord], self._vertices)[0]
def vertices(self):
return np.array(self._vertices)
class Annotation(object):
"""
Annotation about the regions within WSI in terms of vertices of polygons.
"""
def __init__(self):
self._json_path = ''
self._polygons_positive = []
self._polygons_negative = []
def __str__(self):
return self._json_path
def from_json(self, json_path):
"""
Initialize the annotation from a json file.
Arguments:
json_path: string, path to the json annotation.
"""
self._json_path = json_path
with open(json_path) as f:
annotations_json = json.load(f)
for annotation in annotations_json['positive']:
name = annotation['name']
vertices = np.array(annotation['vertices'])
polygon = Polygon(name, vertices)
self._polygons_positive.append(polygon)
for annotation in annotations_json['negative']:
name = annotation['name']
vertices = np.array(annotation['vertices'])
polygon = Polygon(name, vertices)
self._polygons_negative.append(polygon)
def inside_polygons(self, coord, is_positive):
"""
Determine if a given coordinate is inside the positive/negative
polygons of the annotation.
Arguments:
coord: 2 element tuple of int, e.g. (x, y)
is_positive: bool, inside positive or negative polygons.
Returns:
bool, if the coord is inside the positive/negative polygons of the
annotation.
"""
if is_positive:
polygons = copy.deepcopy(self._polygons_positive)
else:
polygons = copy.deepcopy(self._polygons_negative)
for polygon in polygons:
if polygon.inside(coord):
return True
return False
def polygon_vertices(self, is_positive):
"""
Return the polygon represented as [N, 2] array of vertices
Arguments:
is_positive: bool, return positive or negative polygons.
Returns:
[N, 2] 2D array of int
"""
if is_positive:
return list(map(lambda x: x.vertices(), self._polygons_positive))
else:
return list(map(lambda x: x.vertices(), self._polygons_negative))
class Formatter(object):
"""
Format converter e.g. CAMELYON16 to internal json
"""
def camelyon16xml2json(inxml, outjson):
"""
Convert an annotation of camelyon16 xml format into a json format.
Arguments:
inxml: string, path to the input camelyon16 xml format
outjson: string, path to the output json format
"""
root = ET.parse(inxml).getroot()
annotations_tumor = \
root.findall('./Annotations/Annotation[@PartOfGroup="Tumor"]')
annotations_0 = \
root.findall('./Annotations/Annotation[@PartOfGroup="_0"]')
annotations_1 = \
root.findall('./Annotations/Annotation[@PartOfGroup="_1"]')
annotations_2 = \
root.findall('./Annotations/Annotation[@PartOfGroup="_2"]')
annotations_positive = \
annotations_tumor + annotations_0 + annotations_1
annotations_negative = annotations_2
json_dict = {}
json_dict['positive'] = []
json_dict['negative'] = []
for annotation in annotations_positive:
X = list(map(lambda x: float(x.get('X')),
annotation.findall('./Coordinates/Coordinate')))
Y = list(map(lambda x: float(x.get('Y')),
annotation.findall('./Coordinates/Coordinate')))
vertices = np.round([X, Y]).astype(int).transpose().tolist()
name = annotation.attrib['Name']
json_dict['positive'].append({'name': name, 'vertices': vertices})
for annotation in annotations_negative:
X = list(map(lambda x: float(x.get('X')),
annotation.findall('./Coordinates/Coordinate')))
Y = list(map(lambda x: float(x.get('Y')),
annotation.findall('./Coordinates/Coordinate')))
vertices = np.round([X, Y]).astype(int).transpose().tolist()
name = annotation.attrib['Name']
json_dict['negative'].append({'name': name, 'vertices': vertices})
with open(outjson, 'w') as f:
json.dump(json_dict, f, indent=1)
def vertices2json(outjson, positive_vertices=[], negative_vertices=[]):
json_dict = {}
json_dict['positive'] = []
json_dict['negative'] = []
for i in range(len(positive_vertices)):
name = 'Annotation {}'.format(i)
vertices = positive_vertices[i].astype(int).tolist()
json_dict['positive'].append({'name': name, 'vertices': vertices})
for i in range(len(negative_vertices)):
name = 'Annotation {}'.format(i)
vertices = negative_vertices[i].astype(int).tolist()
json_dict['negative'].append({'name': name, 'vertices': vertices})
with open(outjson, 'w') as f:
json.dump(json_dict, f, indent=1)
| [
"xml.etree.ElementTree.parse",
"numpy.round",
"numpy.array",
"numpy.random.seed",
"copy.deepcopy",
"json.load",
"json.dump"
] | [((125, 142), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (139, 142), True, 'import numpy as np\n'), ((929, 953), 'numpy.array', 'np.array', (['self._vertices'], {}), '(self._vertices)\n', (937, 953), True, 'import numpy as np\n'), ((1553, 1565), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1562, 1565), False, 'import json\n'), ((1684, 1716), 'numpy.array', 'np.array', (["annotation['vertices']"], {}), "(annotation['vertices'])\n", (1692, 1716), True, 'import numpy as np\n'), ((1933, 1965), 'numpy.array', 'np.array', (["annotation['vertices']"], {}), "(annotation['vertices'])\n", (1941, 1965), True, 'import numpy as np\n'), ((2560, 2598), 'copy.deepcopy', 'copy.deepcopy', (['self._polygons_positive'], {}), '(self._polygons_positive)\n', (2573, 2598), False, 'import copy\n'), ((2636, 2674), 'copy.deepcopy', 'copy.deepcopy', (['self._polygons_negative'], {}), '(self._polygons_negative)\n', (2649, 2674), False, 'import copy\n'), ((5370, 5403), 'json.dump', 'json.dump', (['json_dict', 'f'], {'indent': '(1)'}), '(json_dict, f, indent=1)\n', (5379, 5403), False, 'import json\n'), ((6101, 6134), 'json.dump', 'json.dump', (['json_dict', 'f'], {'indent': '(1)'}), '(json_dict, f, indent=1)\n', (6110, 6134), False, 'import json\n'), ((3672, 3687), 'xml.etree.ElementTree.parse', 'ET.parse', (['inxml'], {}), '(inxml)\n', (3680, 3687), True, 'import xml.etree.ElementTree as ET\n'), ((4651, 4667), 'numpy.round', 'np.round', (['[X, Y]'], {}), '([X, Y])\n', (4659, 4667), True, 'import numpy as np\n'), ((5145, 5161), 'numpy.round', 'np.round', (['[X, Y]'], {}), '([X, Y])\n', (5153, 5161), True, 'import numpy as np\n')] |
"""
This file defines the WindowGenerator model.
"""
from typing import Callable, Union, Tuple, List, Optional
import pandas as pd
import numpy as np
import math
import tensorflow as tf
from keras.backend import floatx
from .. import AutopycoinBaseClass
from ..utils import features, date_features, convert_to_list
class WindowGenerator(AutopycoinBaseClass):
"""Transform a time serie into an usable format for tensorflow model.
It can be either a pandas dataframe, tensorflow tensor or numpy array.
Parameters
----------
input_width : int
The number of historical time steps to use during the forecasting.
label_width : int
the number of time steps to forecast.
shift : int
Compute the shift between input time steps (`input_width`) and
labels time steps (`label_width`).
Hence if `label_width` is higher than `shift` label input and label datasets
will have some indentical values.
valid_size : int
The number of examples in the validation set. Use a float between 0 and 1 to use proportion.
test_size : int
The number of examples in the test set. Use a float between 0 and 1 to use proportion.
flat : bool
Flatten the inputs and labels tensors.
batch_size : int
The number of examples per batch. If None, then all examples are stacked in one batch.
Default to None.
preprocessing : callable or None
Preprocessing function to use on the data.
This function needs to take input of shape ((inputs, ...), labels).
It is applied after the train, validation and test split.
Default to None.
Attributes
----------
input_width : int
label_width : int
shift : int
valid_size : int
test_size : int
flat : bool
batch_size : int or None
train : :literal:`dataset`
valid : :literal:`dataset`
test : :literal:`dataset`
data : DataFrame or ndarray or :literal:`Tensor`
Notes
-----
The dataset's shape depends on the columns defined in :literal:`from_array` method.
There are currently four input tensors which can be added inside the inputs dataset.
Output shape:
when all columns components are defined:
Tuple of shape ((inputs, known, date_inputs, date_labels), labels)
inputs tensor:
The input tensor of shape (batch_size, input_width, input_columns) or (batch_size, input_width * input_columns)
depending if flat is set to True. Basically, they are historical values.
known tensor:
The known tensor of shape (batch_size, input_width, known_columns) or (batch_size, input_width * known_columns)
depending if flat is set to True are the variables whose values
are known in advance or estimated.
For example: time dates or temperatures.
date_inputs tensor:
Dates of shape (batch_size, input_width) are the dates associated to the inputs tensor.
Default to a tensor generated by :literal:`tf.range`.
date_labels tensor:
Dates of shape (batch_size, input_width) are the dates associated to the inputs tensor.
Default to a tensor generated by :literal:`tf.range`.
labels tensor:
The Output variables of shape (batch_size, label_width, label_columns) or (batch_size, label_width * label_columns)
depending if flat is set to True. They are the values to predict.
Examples
--------
>>> import pandas as pd
>>> from autopycoin.data import random_ts
>>> from autopycoin.dataset import WindowGenerator
...
... # We generate data
>>> data = random_ts(n_steps=100,
... trend_degree=2,
... periods=[10],
... fourier_orders=[10],
... trend_mean=0,
... trend_std=1,
... seasonality_mean=0,
... seasonality_std=1,
... batch_size=1,
... n_variables=1,
... noise=True,
... seed=42)
...
>>> w_oneshot = WindowGenerator(input_width=3,
... label_width=2,
... shift=10,
... valid_size=2,
... test_size=3,
... flat=True,
... batch_size=None,
... preprocessing=None)
...
... # Here juste inputs and labels tensors are generated
>>> w_oneshot = w_oneshot.from_array(data[0],
... input_columns=[0],
... label_columns=[0])
"""
def __init__(
self,
input_width: int,
label_width: int,
shift: Union[None, int] = None,
valid_size: Union[int, float] = 0,
test_size: Union[int, float] = 0,
flat: bool = False,
sequence_stride: int = 1,
batch_size: int = None,
preprocessing: Union[None, Callable] = None,
):
self._input_width = input_width
self._label_width = label_width
self._shift = shift if shift is not None else label_width
self._sequence_stride = sequence_stride
self._valid_size = valid_size
self._test_size = test_size
self._batch_size = batch_size
self._flat = flat
# We separate init functions in order to perfom validation.
self._compute_window_parameters()
# Preprocessing layers
self._preprocessing = preprocessing
self._initialized = False
def _compute_window_parameters(self) -> None:
"""Calculate the window parameters."""
self._total_window_size = self.input_width + self.shift
self._input_slice = slice(0, self.input_width)
self._input_indices = np.arange(self._total_window_size)[self._input_slice]
self._label_start = self._total_window_size - self.label_width
self._label_slice = slice(self._label_start, self._total_window_size)
self._label_indices = np.arange(self._total_window_size)[self._label_slice]
def from_array(
self,
data: Union[pd.DataFrame, np.ndarray, tf.Tensor, pd.Series],
input_columns: Union[None, List[Union[int, str]]] = None,
label_columns: Union[None, List[Union[int, str]]] = None,
known_columns: Union[None, List[Union[int, str]]] = None,
date_columns: Union[None, List[Union[int, str]]] = None,
):
"""Feed :literal:`WindowGenerator` with a pandas dataframe or a numpy ndarray.
This method has to be called before using `train, `test` or `valid` methods
as it initializes the data.
Parameters
----------
data : :literal:`DataFrame, Serie, list, ndarray or Tensor of shape (timesteps, variables)`
The time series dataframe on which train, valid and test datasets are built.
input_columns : list[str or int]
The input column names. Variables used to forecast target values.
label_columns : list[str or int]
The label column names. Target variables to forecast, default to None.
known_columns : list[str or int]
The known column names, default to None.
Those variables that we know exact or strong estimated values which happen during target period.
Example: Dates or temperatures.
date_columns : list[str or int]
The date column names. Dates associated to each steps, default to None.
Date columns will be cast to string and join by
'-' delimiter to be used as xticks in plot function.
Returns
-------
self : :literal:`WindowGenerator`
return the instance.
"""
if isinstance(data, pd.Series):
data = data.values
if len(data.shape) == 1:
data = tf.expand_dims(data, axis=-1)
if input_columns is None:
input_columns = [col for col in range(data.shape[-1])]
if label_columns is None:
label_columns = [col for col in range(data.shape[-1])]
if isinstance(data, pd.DataFrame):
self._from_dataframe(
data, input_columns, label_columns, known_columns, date_columns
)
elif isinstance(data, (np.ndarray, tf.Tensor)):
self._from_array(
data, input_columns, label_columns, known_columns, date_columns
)
else:
raise ValueError(
f"{type(data)} is not handled, please provide a pandas dataframe, a numpy array or a tensor."
)
self._split_train_valid_test()
return self
def _from_dataframe(
self,
data: pd.DataFrame,
input_columns: Union[None, List[Union[int, str]]],
label_columns: Union[None, List[Union[int, str]]] = None,
known_columns: Union[None, List[Union[int, str]]] = None,
date_columns: Union[None, List[Union[int, str]]] = None,
):
"""Handle dataframe."""
self._initialized = True
# Avoid replacing original dataframe
data = data.copy()
# Convert dataframe into array
self._data_columns = data.columns
self._data = data.values
# Get index for each columns
# In case if columns are not defined
try:
self._input_columns = [
self._data_columns.get_loc(col) for col in input_columns
]
self._label_columns = (
[self._data_columns.get_loc(col) for col in label_columns]
if label_columns
else None
)
self._known_columns = (
[self._data_columns.get_loc(col) for col in known_columns]
if known_columns
else None
)
self._date_columns = (
[self._data_columns.get_loc(col) for col in date_columns]
if date_columns
else None
)
except KeyError as error:
raise KeyError(
f"Columns are not found inside data, got input_columns: {input_columns},"
f"label_columns: {label_columns}, known_columns: {known_columns} and date_columns: {date_columns}."
f"Expected {self._data_columns}."
) from error
def _from_array(
self,
data: Union[np.ndarray, tf.Tensor],
input_columns: Union[None, List[Union[slice, int]]],
label_columns: Union[None, List[Union[slice, int]]] = None,
known_columns: Union[None, List[Union[slice, int]]] = None,
date_columns: Union[None, List[Union[slice, int]]] = None,
):
"""Handle array and tensor."""
self._initialized = True
# Converting data into array
data = np.array(data)
self._data = data
self._data_columns = None # Used in `production`
# In case if columns are not defined
self._input_columns = input_columns if input_columns else None
self._label_columns = label_columns if label_columns else None
self._known_columns = known_columns if known_columns else None
self._date_columns = date_columns if date_columns else None
def _split_train_valid_test(self):
"""Create train, valid and test dataset."""
self._dataset = self._make_dataset(self.data)
n_train_examples, n_valid_examples, n_shift_examples = self._get_dataset_sizes(
self._dataset
)
self._train = self._dataset.take(n_train_examples)
self._valid = self._dataset.skip(n_train_examples + n_shift_examples).take(
n_valid_examples
)
self._test = self._dataset.skip(
n_train_examples + n_valid_examples + 2 * n_shift_examples
)
if self.batch_size:
self._train = self._train.unbatch().batch(self.batch_size)
self._valid = self._valid.unbatch().batch(self.batch_size)
self._test = self._test.unbatch().batch(self.batch_size)
def _get_dataset_sizes(self, dataset: tf.data.Dataset):
"""Calculate the sizes of train, valid and test dataset from the provided dataset and window parameters."""
cardinality = dataset.cardinality()
n_shift_examples = math.floor(self.label_width / self.sequence_stride) - 1
n_test_examples = self.test_size
if isinstance(self.test_size, float) and self.test_size <= 1:
n_test_examples = int(cardinality.numpy() * n_test_examples)
n_valid_examples = self.valid_size
if isinstance(self.valid_size, float) and self.valid_size <= 1:
n_valid_examples = int(
(cardinality.numpy() - n_test_examples) * self.valid_size
)
n_train_examples = (
cardinality.numpy()
- n_valid_examples
- n_test_examples
- 2 * n_shift_examples
)
return n_train_examples, n_valid_examples, n_shift_examples
def _make_dataset(
self, data: Union[pd.DataFrame, np.ndarray, tf.Tensor],
) -> tf.data.Dataset:
"""Compute a tensorflow dataset object.
Parameters
----------
data : :literal:`DataFrame`, ndarray or `Tensor of shape (timestep, variables)`
The time series dataset.
batch_size : int
Set up the batch size a.k.a the number of examples per batch.
Returns
-------
ds : :literal:`PrefetchDataset`
The dataset that can be used in keras model.
"""
data = data.astype(floatx())
dataset = tf.keras.preprocessing.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self._total_window_size,
sequence_stride=self.sequence_stride,
shuffle=False,
batch_size=1,
)
dataset = dataset.map(self._split_window, num_parallel_calls=tf.data.AUTOTUNE)
if self._preprocessing is not None:
dataset = dataset.map(
self._preprocessing, num_parallel_calls=tf.data.AUTOTUNE
)
return dataset.prefetch(tf.data.experimental.AUTOTUNE)
def _split_window(self, feature_tensor: tf.Tensor) -> Tuple[tf.Tensor]:
"""
Compute the windows split.
Parameters
----------
feature_tensor : :literal:`tensor of shape (Batch_size, timestep, variables)`
The window defined by `timeseries_dataset_from_array`.
Returns
-------
inputs : :literal:`Tensor`
The input tensor of shape (batch_size, input_width, input_columns)
if `flat` is set to `False` else (batch_size, input_width * input_columns).
known : :literal:`Tensor`
The known tensor of shape (batch_size, input_width, known_columns)
if :literal:`flat` is set to `False` else (batch_size, input_width * known_columns).
Variables whose values are known.
in advance or estimated. For example: time dates or temperatures.
date_inputs : :literal:`Tensor`
Input dates of shape (batch_size, input_width).
Default to a tensor generated by :literal:`tf.range`.
date_labels : :literal:`Tensor`
label dates of shape (batch_size, label_width).
Default to a tensor generated by `tf.range`.
labels : :literal:`Tensor`
The Output variables of shape (batch_size, label_width, label_columns)
if :literal:`flat` is set to :literal:`False` else (batch_size, label_width * label_columns).
"""
# function used to transform the shape of inputs and labels tensors
if self.flat:
func = tf.keras.layers.Flatten()
else:
func = tf.identity
inputs = features(feature_tensor, self._input_slice, self._input_columns)
output = func(inputs)
# TODO: unit testing
if self.known_columns:
known = features(feature_tensor, self._label_slice, self._known_columns)
output = convert_to_list(output)
output.append(func(known))
if self.date_columns:
date_inputs = date_features(
feature_tensor, self._input_slice, self._date_columns
)
date_labels = date_features(
feature_tensor, self._label_slice, self._date_columns
)
output = convert_to_list(output)
output.append(func(date_inputs))
output.append(func(date_labels))
if isinstance(output, list):
output = tuple(output)
if self.label_columns:
labels = features(feature_tensor, self._label_slice, self._label_columns)
return output, func(labels)
return output
def production(
self,
data: Union[pd.DataFrame, np.array, tf.Tensor],
batch_size: Optional[int] = None,
) -> tf.data.Dataset:
"""
Build the production dataset.
Parameters
----------
data : :literal:`DataFrame of shape (input_width + shift, variables)`
Data to forecast. inputs steps need to be inside data.
Returns
-------
data : :literal:`PrefetchDataset of shape (inputs, known, date_inputs, date_labels), labels`
MapDataset which returns data with shape
((inputs, known, date_inputs, date_labels), labels).
Raises
------
AssertionError
It raises an error if not all columns defined in the constructor method are inside data.
"""
# If a dataframe has been previously initialized then variables columns from the current
# dataframe doesn't need to perfectly match self._data_columns.
if isinstance(data, pd.DataFrame) and self._data_columns is not None:
assert (
data.shape[0] >= self._input_width
), f"The given dataframe doesn't contain enough values, got {data.shape[0]} values, expected at least {self._input_width} values."
# Columns may be none then w e have to translates into []
columns = self._data_columns[
self.input_columns + self.label_columns
if self.label_columns
else [] + self.known_columns
if self.known_columns
else [] + self.date_columns
if self.date_columns
else []
]
assert all(
columns.isin(data.columns)
), f"The given data columns doesn't match the expected columns, got {data.columns}. Expected at least {columns}"
data = data.loc[:, self._data_columns].values
else:
# If an array is provided or a dataframe but `from_dataframe` was not used previously then
# Data shape has to match the specs saved from the methods `from_array` or `from_dataframe`.
assert (
data.shape[0] >= self._input_width
and data.shape[1:] == self.data.shape[1:]
), f"""The given array doesn't contain enough data, got data of shape {data.shape}.
Expected at least shape {(self._input_width, *self.data.shape[1:])}."""
data = self._make_dataset(data)
if batch_size is not None:
data = data.unbatch().batch(batch_size)
return data
def get_config(self):
"""Return the config values."""
return {
"input_width": self.input_width,
"label_width": self.label_width,
"shift": self.shift,
"valid_size": self.valid_size,
"test_size": self.test_size,
"flat": self.flat,
"batch_size": self.batch_size,
"preprocessing": self._preprocessing,
}
@property
def train(self) -> tf.data.Dataset:
"""
Return the train dataset.
Returns
-------
dataset: :literal:`Dataset`
Train dataset. It cannot be empty.
"""
return self._train
@property
def valid(self) -> tf.data.Dataset:
"""
Return the valid dataset.
Returns
-------
dataset: :literal:`Dataset`
"""
return self._valid
@property
def test(self) -> Union[tf.data.Dataset, None]:
"""
Build the test dataset.
Returns
-------
dataset: :literal:`Dataset`
"""
return self._test
@property
def data(self) -> np.ndarray:
"""
Return the original data.
"""
if self._initialized:
return self._data
raise AttributeError(
"""The instance is not initialized.
Call :literal:`from_array` to initialize it."""
)
@data.setter
def data(self, _) -> None:
"""
Set the new data.
"""
raise AttributeError(
"You cannot modify :literal:`data`, use :literal:`from_array` instead."
)
@property
def input_width(self) -> int:
"""
Return the input_width.
"""
return self._input_width
@property
def label_width(self) -> int:
"""
Return the label_width.
"""
return self._label_width
@property
def shift(self) -> int:
"""
Return the shift.
"""
return self._shift
@property
def valid_size(self) -> int:
"""
Return the valid_size.
"""
return self._valid_size
@property
def test_size(self) -> int:
"""
Return the test_size.
"""
return self._test_size
@property
def flat(self):
"""
Return the attribute flat.
"""
return self._flat
@property
def batch_size(self):
"""
Return the attribute batch_size.
"""
return self._batch_size
@property
def sequence_stride(self):
"""
Return the attribute sequence_stride.
"""
return self._sequence_stride
@property
def input_columns(self) -> List[Union[int, slice]]:
"""
Return the input_width.
"""
if self._initialized:
return self._input_columns
raise AttributeError(
"""The instance is not initialized.
Call `from_array` to initialize it."""
)
@input_columns.setter
def input_columns(self, _) -> None:
"""
Set the new data.
"""
raise AttributeError(
"You cannot modify `input_columns`, use `from_array` instead."
)
@property
def label_columns(self) -> List[Union[int, slice]]:
"""
Return the label_columns.
"""
if self._initialized:
return self._label_columns
raise AttributeError(
"""The instance is not initialized.
Call `from_array` to initialize it."""
)
@label_columns.setter
def label_columns(self, _) -> None:
"""
Set the new data.
"""
raise AttributeError(
"You cannot modify `label_columns`, use `from_array` instead."
)
@property
def known_columns(self) -> List[Union[int, slice]]:
"""
Return the known_columns.
"""
if self._initialized:
return self._known_columns
raise AttributeError(
"""The instance is not initialized.
Call `from_array` to initialize it."""
)
@known_columns.setter
def known_columns(self, _) -> None:
"""
Set the new data.
"""
raise AttributeError(
"You cannot modify `known_columns`, use `from_array` instead."
)
@property
def date_columns(self) -> List[Union[int, slice]]:
"""
Return date_columns.
"""
if self._initialized:
return self._date_columns
raise AttributeError(
"""The instance is not initialized.
Call `from_array` to initialize it."""
)
@date_columns.setter
def date_columns(self, _) -> None:
"""
Set the new data.
"""
raise AttributeError(
"You cannot modify `date_columns`, use `from_array` instead."
)
def _val___init__(
self, output: None, *args: list, **kwargs: dict
) -> None: # pylint: disable=unused-argument
"""
Validates attributes and args of __init__ method.
"""
assert (
self.input_width > 0
), f"The input width has to be strictly positive, got {self.input_width}."
assert (
self.label_width > 0
), f"The label width has to be strictly positive, got {self.label_width}."
assert (
self.shift > 0
), f"The shift has to be strictly positive, got {self.shift}."
assert (
self.label_width < self._total_window_size
), f"The label width has to be equal or lower than {self._total_window_size}, got {self.label_width}"
assert (
self.test_size >= 0
), f"The test size has to be positive or null, got {self.test_size}."
assert (
self.valid_size >= 0
), f"The valid size has to be positive or null, got {self.valid_size}."
if self.batch_size:
assert (
self.batch_size > 0
), f"The batch size has to be strictly positive, got {self.batch_size}."
def _val__from_dataframe(
self, output: None, *args: list, **kwargs: dict
) -> None: # pylint: disable=unused-argument
"""
Validates attributes and args of :literal:`_from_dataframe` method.
"""
assert len(self.input_columns) > 0, "The input columns list is empty."
assert np.size(self.data), "The given parameter `data` is an empty DataFrame."
def _val__from_array(
self, output: None, *args: list, **kwargs: dict
) -> None: # pylint: disable=unused-argument
"""
Validates attributes and args of :literal:`_from_array` method.
"""
assert len(self.input_columns) > 0, "The input columns list is empty."
assert np.size(self.data), "The given parameter `data` is an empty DataFrame."
def _val__split_train_valid_test(
self, output: None, *args: list, **kwargs: dict
) -> None: # pylint: disable=unused-argument
"""
Validates attributes and args of :literal:`_compute_train_valid_test_split` method.
"""
n_train_examples, _, _ = self._get_dataset_sizes(self._dataset)
assert (
n_train_examples
) > 0, f"""The training dataset is empty, please redefine the test size or valid size."""
def _val_from_array(
self, output: None, *args: list, **kwargs: dict
) -> None: # pylint: disable=unused-argument
"""
Validates attributes and args of :literal:`_val_from_array` method.
"""
assert (
max(self.input_columns) < self.data.shape[1]
), f"""Indice {max(self.input_columns)} superior to data shape {self.data.shape}."""
if self.label_columns:
assert (
max(self.label_columns) < self.data.shape[1]
), f"""Indice {max(self.label_columns)} superior to data shape {self.data.shape}."""
if self.known_columns:
assert (
max(self.known_columns) < self.data.shape[1]
), f"""Indice {max(self.known_columns)} superior to data shape {self.data.shape}."""
if self.date_columns:
assert (
max(self.date_columns) < self.data.shape[1]
), f"""Indice {max(self.date_columns)} superior to data shape {self.data.shape}."""
| [
"math.floor",
"tensorflow.keras.layers.Flatten",
"numpy.size",
"keras.backend.floatx",
"numpy.array",
"tensorflow.keras.preprocessing.timeseries_dataset_from_array",
"tensorflow.expand_dims",
"numpy.arange"
] | [((10938, 10952), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (10946, 10952), True, 'import numpy as np\n'), ((13773, 13968), 'tensorflow.keras.preprocessing.timeseries_dataset_from_array', 'tf.keras.preprocessing.timeseries_dataset_from_array', ([], {'data': 'data', 'targets': 'None', 'sequence_length': 'self._total_window_size', 'sequence_stride': 'self.sequence_stride', 'shuffle': '(False)', 'batch_size': '(1)'}), '(data=data, targets=\n None, sequence_length=self._total_window_size, sequence_stride=self.\n sequence_stride, shuffle=False, batch_size=1)\n', (13825, 13968), True, 'import tensorflow as tf\n'), ((26162, 26180), 'numpy.size', 'np.size', (['self.data'], {}), '(self.data)\n', (26169, 26180), True, 'import numpy as np\n'), ((26558, 26576), 'numpy.size', 'np.size', (['self.data'], {}), '(self.data)\n', (26565, 26576), True, 'import numpy as np\n'), ((5863, 5897), 'numpy.arange', 'np.arange', (['self._total_window_size'], {}), '(self._total_window_size)\n', (5872, 5897), True, 'import numpy as np\n'), ((6097, 6131), 'numpy.arange', 'np.arange', (['self._total_window_size'], {}), '(self._total_window_size)\n', (6106, 6131), True, 'import numpy as np\n'), ((7952, 7981), 'tensorflow.expand_dims', 'tf.expand_dims', (['data'], {'axis': '(-1)'}), '(data, axis=-1)\n', (7966, 7981), True, 'import tensorflow as tf\n'), ((12431, 12482), 'math.floor', 'math.floor', (['(self.label_width / self.sequence_stride)'], {}), '(self.label_width / self.sequence_stride)\n', (12441, 12482), False, 'import math\n'), ((13744, 13752), 'keras.backend.floatx', 'floatx', ([], {}), '()\n', (13750, 13752), False, 'from keras.backend import floatx\n'), ((15924, 15949), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (15947, 15949), True, 'import tensorflow as tf\n')] |
import numpy
import os
import unittest
import medipy.io.dicom
import medipy.io.dicom.normalize
class TestDiffusion(unittest.TestCase):
def setUp(self):
self.data_directory = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "..", "..", "data"))
def test_siemens_0(self):
""" Normalization of a DWI Siemens image with a b-value of 0.
"""
dataset = medipy.io.dicom.read(
os.path.join(self.data_directory, "input", "siemens_dwi_0.dcm"))
normalized = medipy.io.dicom.normalize.normalize(dataset)
for dataset in normalized :
self.assertTrue("mr_diffusion_sequence" in dataset)
diffusion = dataset.mr_diffusion_sequence.value[0]
self.assertEqual(diffusion.diffusion_bvalue.value, 0)
self.assertEqual(diffusion.diffusion_directionality.value, "DIRECTIONAL")
self.assertTrue("diffusion_gradient_direction_sequence" in diffusion)
direction = diffusion.diffusion_gradient_direction_sequence.value[0].\
diffusion_gradient_orientation
numpy.testing.assert_array_equal(direction.value, [0,0,0])
def test_siemens_1000(self):
""" Normalization of a DWI Siemens image with a b-value of 1000.
"""
dataset = medipy.io.dicom.read(
os.path.join(self.data_directory, "input", "siemens_dwi_1000.dcm"))
normalized = medipy.io.dicom.normalize.normalize(dataset)
for dataset in normalized :
self.assertTrue("mr_diffusion_sequence" in dataset)
diffusion = dataset.mr_diffusion_sequence.value[0]
self.assertEqual(diffusion.diffusion_bvalue.value, 1000)
self.assertEqual(diffusion.diffusion_directionality.value, "DIRECTIONAL")
self.assertTrue("diffusion_gradient_direction_sequence" in diffusion)
direction = diffusion.diffusion_gradient_direction_sequence.value[0].\
diffusion_gradient_orientation
self.assertAlmostEqual(numpy.linalg.norm(direction.value), 1)
if __name__ == "__main__" :
unittest.main()
| [
"os.path.join",
"os.path.dirname",
"numpy.linalg.norm",
"unittest.main",
"numpy.testing.assert_array_equal"
] | [((2187, 2202), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2200, 2202), False, 'import unittest\n'), ((467, 530), 'os.path.join', 'os.path.join', (['self.data_directory', '"""input"""', '"""siemens_dwi_0.dcm"""'], {}), "(self.data_directory, 'input', 'siemens_dwi_0.dcm')\n", (479, 530), False, 'import os\n'), ((1155, 1215), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['direction.value', '[0, 0, 0]'], {}), '(direction.value, [0, 0, 0])\n', (1187, 1215), False, 'import numpy\n'), ((1398, 1464), 'os.path.join', 'os.path.join', (['self.data_directory', '"""input"""', '"""siemens_dwi_1000.dcm"""'], {}), "(self.data_directory, 'input', 'siemens_dwi_1000.dcm')\n", (1410, 1464), False, 'import os\n'), ((235, 260), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (250, 260), False, 'import os\n'), ((2115, 2149), 'numpy.linalg.norm', 'numpy.linalg.norm', (['direction.value'], {}), '(direction.value)\n', (2132, 2149), False, 'import numpy\n')] |
from sklearn import tree
import json
import pickle
import os
import base64
import numpy as np
from rafiki.model import BaseModel, InvalidModelParamsException, validate_model_class, load_dataset
from rafiki.constants import TaskType
class SkDt(BaseModel):
'''
Implements a decision tree classifier on scikit-learn
'''
def get_knob_config(self):
return {
'knobs': {
'max_depth': {
'type': 'int',
'range': [2, 8]
},
'criterion': {
'type': 'string',
'values': ['gini', 'entropy']
},
}
}
def get_predict_label_mapping(self):
return self._predict_label_mapping
def init(self, knobs):
self._max_depth = knobs.get('max_depth')
self._criterion = knobs.get('criterion')
self._clf = self._build_classifier(
self._max_depth,
self._criterion
)
def train(self, dataset_uri, task):
(images, labels) = self._load_dataset(dataset_uri, task)
class_names = np.unique(labels)
num_classes = len(class_names)
self._predict_label_mapping = dict(zip(range(num_classes), class_names))
train_and_evalutate_label_mapping = {v: k for k, v in self._predict_label_mapping.items()}
labels = np.array([train_and_evalutate_label_mapping[label] for label in labels])
X = self._prepare_X(images)
y = labels
self._clf.fit(X, y)
def evaluate(self, dataset_uri, task):
(images, labels) = self._load_dataset(dataset_uri, task)
train_and_evalutate_label_mapping = {v: k for k, v in self._predict_label_mapping.items()}
labels = np.array([train_and_evalutate_label_mapping[label] for label in labels])
X = self._prepare_X(images)
y = labels
preds = self._clf.predict(X)
accuracy = sum(y == preds) / len(y)
return accuracy
def predict(self, queries):
X = self._prepare_X(queries)
probs = self._clf.predict_proba(X)
return probs
def destroy(self):
pass
def dump_parameters(self):
clf_bytes = pickle.dumps(self._clf)
clf_base64 = base64.b64encode(clf_bytes).decode('utf-8')
return {
'clf_base64': clf_base64,
'predict_label_mapping': self._predict_label_mapping
}
def load_parameters(self, params):
if 'clf_base64' in params:
clf_bytes = base64.b64decode(params['clf_base64'].encode('utf-8'))
self._clf = pickle.loads(clf_bytes)
if 'predict_label_mapping' in params:
self._predict_label_mapping = params['predict_label_mapping']
def _prepare_X(self, images):
return [np.array(image).flatten() for image in images]
def _load_dataset(self, dataset_uri, task):
# Here, we use Rafiki's in-built dataset loader
return load_dataset(dataset_uri, task)
def _build_classifier(self, max_depth, criterion):
clf = tree.DecisionTreeClassifier(
max_depth=max_depth,
criterion=criterion
)
return clf
if __name__ == '__main__':
validate_model_class(
model_class=SkDt,
train_dataset_uri='https://github.com/cadmusthefounder/mnist_data/blob/master/output/fashion_train.zip?raw=true',
test_dataset_uri='https://github.com/cadmusthefounder/mnist_data/blob/master/output/fashion_test.zip?raw=true',
task=TaskType.IMAGE_CLASSIFICATION,
queries=[
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 0, 0, 7, 0, 37, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 27, 84, 11, 0, 0, 0, 0, 0, 0, 119, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 88, 143, 110, 0, 0, 0, 0, 22, 93, 106, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 53, 129, 120, 147, 175, 157, 166, 135, 154, 168, 140, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 11, 137, 130, 128, 160, 176, 159, 167, 178, 149, 151, 144, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 2, 1, 0, 3, 0, 0, 115, 114, 106, 137, 168, 153, 156, 165, 167, 143, 157, 158, 11, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 89, 139, 90, 94, 153, 149, 131, 151, 169, 172, 143, 159, 169, 48, 0],
[0, 0, 0, 0, 0, 0, 2, 4, 1, 0, 0, 0, 98, 136, 110, 109, 110, 162, 135, 144, 149, 159, 167, 144, 158, 169, 119, 0],
[0, 0, 2, 2, 1, 2, 0, 0, 0, 0, 26, 108, 117, 99, 111, 117, 136, 156, 134, 154, 154, 156, 160, 141, 147, 156, 178, 0],
[3, 0, 0, 0, 0, 0, 0, 21, 53, 92, 117, 111, 103, 115, 129, 134, 143, 154, 165, 170, 154, 151, 154, 143, 138, 150, 165, 43],
[0, 0, 23, 54, 65, 76, 85, 118, 128, 123, 111, 113, 118, 127, 125, 139, 133, 136, 160, 140, 155, 161, 144, 155, 172, 161, 189, 62],
[0, 68, 94, 90, 111, 114, 111, 114, 115, 127, 135, 136, 143, 126, 127, 151, 154, 143, 148, 125, 162, 162, 144, 138, 153, 162, 196, 58],
[70, 169, 129, 104, 98, 100, 94, 97, 98, 102, 108, 106, 119, 120, 129, 149, 156, 167, 190, 190, 196, 198, 198, 187, 197, 189, 184, 36],
[16, 126, 171, 188, 188, 184, 171, 153, 135, 120, 126, 127, 146, 185, 195, 209, 208, 255, 209, 177, 245, 252, 251, 251, 247, 220, 206, 49],
[0, 0, 0, 12, 67, 106, 164, 185, 199, 210, 211, 210, 208, 190, 150, 82, 8, 0, 0, 0, 178, 208, 188, 175, 162, 158, 151, 11],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
]
)
| [
"numpy.unique",
"pickle.dumps",
"base64.b64encode",
"sklearn.tree.DecisionTreeClassifier",
"numpy.array",
"rafiki.model.load_dataset",
"pickle.loads",
"rafiki.model.validate_model_class"
] | [((3270, 6640), 'rafiki.model.validate_model_class', 'validate_model_class', ([], {'model_class': 'SkDt', 'train_dataset_uri': '"""https://github.com/cadmusthefounder/mnist_data/blob/master/output/fashion_train.zip?raw=true"""', 'test_dataset_uri': '"""https://github.com/cadmusthefounder/mnist_data/blob/master/output/fashion_test.zip?raw=true"""', 'task': 'TaskType.IMAGE_CLASSIFICATION', 'queries': '[[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 3, 1, 0, 0, 7, 0, 37, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 1, 2, 0, 27, 84, 11, 0, 0, 0, 0, 0, 0, 119, 0, 0], [0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 88, 143, 110, 0, 0, 0, 0, 22, 93,\n 106, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 53, 129, 120,\n 147, 175, 157, 166, 135, 154, 168, 140, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 2, 0, 11, 137, 130, 128, 160, 176, 159, 167, 178, 149, 151,\n 144, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 2, 1, 0, 3, 0, 0, 115, 114, 106, \n 137, 168, 153, 156, 165, 167, 143, 157, 158, 11, 0], [0, 0, 0, 0, 1, 0,\n 0, 0, 0, 0, 3, 0, 0, 89, 139, 90, 94, 153, 149, 131, 151, 169, 172, 143,\n 159, 169, 48, 0], [0, 0, 0, 0, 0, 0, 2, 4, 1, 0, 0, 0, 98, 136, 110, \n 109, 110, 162, 135, 144, 149, 159, 167, 144, 158, 169, 119, 0], [0, 0, \n 2, 2, 1, 2, 0, 0, 0, 0, 26, 108, 117, 99, 111, 117, 136, 156, 134, 154,\n 154, 156, 160, 141, 147, 156, 178, 0], [3, 0, 0, 0, 0, 0, 0, 21, 53, 92,\n 117, 111, 103, 115, 129, 134, 143, 154, 165, 170, 154, 151, 154, 143, \n 138, 150, 165, 43], [0, 0, 23, 54, 65, 76, 85, 118, 128, 123, 111, 113,\n 118, 127, 125, 139, 133, 136, 160, 140, 155, 161, 144, 155, 172, 161, \n 189, 62], [0, 68, 94, 90, 111, 114, 111, 114, 115, 127, 135, 136, 143, \n 126, 127, 151, 154, 143, 148, 125, 162, 162, 144, 138, 153, 162, 196, \n 58], [70, 169, 129, 104, 98, 100, 94, 97, 98, 102, 108, 106, 119, 120, \n 129, 149, 156, 167, 190, 190, 196, 198, 198, 187, 197, 189, 184, 36], [\n 16, 126, 171, 188, 188, 184, 171, 153, 135, 120, 126, 127, 146, 185, \n 195, 209, 208, 255, 209, 177, 245, 252, 251, 251, 247, 220, 206, 49], [\n 0, 0, 0, 12, 67, 106, 164, 185, 199, 210, 211, 210, 208, 190, 150, 82, \n 8, 0, 0, 0, 178, 208, 188, 175, 162, 158, 151, 11], [0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]'}), "(model_class=SkDt, train_dataset_uri=\n 'https://github.com/cadmusthefounder/mnist_data/blob/master/output/fashion_train.zip?raw=true'\n , test_dataset_uri=\n 'https://github.com/cadmusthefounder/mnist_data/blob/master/output/fashion_test.zip?raw=true'\n , task=TaskType.IMAGE_CLASSIFICATION, queries=[[[0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, \n 0, 0, 7, 0, 37, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0,\n 27, 84, 11, 0, 0, 0, 0, 0, 0, 119, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 1, 0, 0, 88, 143, 110, 0, 0, 0, 0, 22, 93, 106, 0, 0], [0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 53, 129, 120, 147, 175, 157, 166,\n 135, 154, 168, 140, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, \n 11, 137, 130, 128, 160, 176, 159, 167, 178, 149, 151, 144, 0, 0], [0, 0,\n 0, 0, 0, 0, 1, 0, 2, 1, 0, 3, 0, 0, 115, 114, 106, 137, 168, 153, 156, \n 165, 167, 143, 157, 158, 11, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0,\n 89, 139, 90, 94, 153, 149, 131, 151, 169, 172, 143, 159, 169, 48, 0], [\n 0, 0, 0, 0, 0, 0, 2, 4, 1, 0, 0, 0, 98, 136, 110, 109, 110, 162, 135, \n 144, 149, 159, 167, 144, 158, 169, 119, 0], [0, 0, 2, 2, 1, 2, 0, 0, 0,\n 0, 26, 108, 117, 99, 111, 117, 136, 156, 134, 154, 154, 156, 160, 141, \n 147, 156, 178, 0], [3, 0, 0, 0, 0, 0, 0, 21, 53, 92, 117, 111, 103, 115,\n 129, 134, 143, 154, 165, 170, 154, 151, 154, 143, 138, 150, 165, 43], [\n 0, 0, 23, 54, 65, 76, 85, 118, 128, 123, 111, 113, 118, 127, 125, 139, \n 133, 136, 160, 140, 155, 161, 144, 155, 172, 161, 189, 62], [0, 68, 94,\n 90, 111, 114, 111, 114, 115, 127, 135, 136, 143, 126, 127, 151, 154, \n 143, 148, 125, 162, 162, 144, 138, 153, 162, 196, 58], [70, 169, 129, \n 104, 98, 100, 94, 97, 98, 102, 108, 106, 119, 120, 129, 149, 156, 167, \n 190, 190, 196, 198, 198, 187, 197, 189, 184, 36], [16, 126, 171, 188, \n 188, 184, 171, 153, 135, 120, 126, 127, 146, 185, 195, 209, 208, 255, \n 209, 177, 245, 252, 251, 251, 247, 220, 206, 49], [0, 0, 0, 12, 67, 106,\n 164, 185, 199, 210, 211, 210, 208, 190, 150, 82, 8, 0, 0, 0, 178, 208, \n 188, 175, 162, 158, 151, 11], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]])\n", (3290, 6640), False, 'from rafiki.model import BaseModel, InvalidModelParamsException, validate_model_class, load_dataset\n'), ((1155, 1172), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1164, 1172), True, 'import numpy as np\n'), ((1411, 1483), 'numpy.array', 'np.array', (['[train_and_evalutate_label_mapping[label] for label in labels]'], {}), '([train_and_evalutate_label_mapping[label] for label in labels])\n', (1419, 1483), True, 'import numpy as np\n'), ((1794, 1866), 'numpy.array', 'np.array', (['[train_and_evalutate_label_mapping[label] for label in labels]'], {}), '([train_and_evalutate_label_mapping[label] for label in labels])\n', (1802, 1866), True, 'import numpy as np\n'), ((2251, 2274), 'pickle.dumps', 'pickle.dumps', (['self._clf'], {}), '(self._clf)\n', (2263, 2274), False, 'import pickle\n'), ((3011, 3042), 'rafiki.model.load_dataset', 'load_dataset', (['dataset_uri', 'task'], {}), '(dataset_uri, task)\n', (3023, 3042), False, 'from rafiki.model import BaseModel, InvalidModelParamsException, validate_model_class, load_dataset\n'), ((3114, 3183), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'max_depth': 'max_depth', 'criterion': 'criterion'}), '(max_depth=max_depth, criterion=criterion)\n', (3141, 3183), False, 'from sklearn import tree\n'), ((2648, 2671), 'pickle.loads', 'pickle.loads', (['clf_bytes'], {}), '(clf_bytes)\n', (2660, 2671), False, 'import pickle\n'), ((2296, 2323), 'base64.b64encode', 'base64.b64encode', (['clf_bytes'], {}), '(clf_bytes)\n', (2312, 2323), False, 'import base64\n'), ((2844, 2859), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2852, 2859), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# A simple script to memorize an existing query split.
import argparse
import os
import sys
import numpy as np
sys.path.append('.')
from scripts.config import QUESTION_FILE_JSON, DOCID_FIELD
from scripts.data_convert.convert_common import readQueries
parser = argparse.ArgumentParser('Memorize the query splits')
parser.add_argument('--data_dir',
metavar='data directory',
help='data directory',
type=str, required=True)
parser.add_argument('--out_dir',
metavar='output directory',
help='output directory',
type=str, required=True)
args = parser.parse_args()
print(args)
out_dir = args.out_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for subDir in os.listdir(args.data_dir):
qf = os.path.join(args.data_dir, subDir, QUESTION_FILE_JSON)
if os.path.exists(qf):
print('Reading:', qf)
res = []
for e in readQueries(qf):
res.append(e[DOCID_FIELD])
print('Read', len(res), 'queries')
np.save(os.path.join(out_dir, subDir + '.npy'), np.array(res))
| [
"os.path.exists",
"os.listdir",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"numpy.array",
"sys.path.append",
"scripts.data_convert.convert_common.readQueries"
] | [((134, 154), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (149, 154), False, 'import sys\n'), ((285, 337), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Memorize the query splits"""'], {}), "('Memorize the query splits')\n", (308, 337), False, 'import argparse\n'), ((815, 840), 'os.listdir', 'os.listdir', (['args.data_dir'], {}), '(args.data_dir)\n', (825, 840), False, 'import os\n'), ((750, 773), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (764, 773), False, 'import os\n'), ((779, 799), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (790, 799), False, 'import os\n'), ((851, 906), 'os.path.join', 'os.path.join', (['args.data_dir', 'subDir', 'QUESTION_FILE_JSON'], {}), '(args.data_dir, subDir, QUESTION_FILE_JSON)\n', (863, 906), False, 'import os\n'), ((914, 932), 'os.path.exists', 'os.path.exists', (['qf'], {}), '(qf)\n', (928, 932), False, 'import os\n'), ((998, 1013), 'scripts.data_convert.convert_common.readQueries', 'readQueries', (['qf'], {}), '(qf)\n', (1009, 1013), False, 'from scripts.data_convert.convert_common import readQueries\n'), ((1114, 1152), 'os.path.join', 'os.path.join', (['out_dir', "(subDir + '.npy')"], {}), "(out_dir, subDir + '.npy')\n", (1126, 1152), False, 'import os\n'), ((1154, 1167), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (1162, 1167), True, 'import numpy as np\n')] |
import os.path
import random
import cv2
import numpy as np
import dito.io
####
#%%% resource filenames
####
RESOURCES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "resources")
RESOURCES_FILENAMES = {
# colormaps (self-defined)
"colormap:plot": os.path.join(RESOURCES_DIR, "colormaps", "plot.png"),
"colormap:plot2": os.path.join(RESOURCES_DIR, "colormaps", "plot2.png"),
# colorbrewer colormaps (note: this product includes color specifications and designs developed by Cynthia Brewer (http://colorbrewer.org/).)
"colormap:accent": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "accent.png"),
"colormap:blues": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "blues.png"),
"colormap:brbg": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "brbg.png"),
"colormap:bugn": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "bugn.png"),
"colormap:bupu": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "bupu.png"),
"colormap:dark2": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "dark2.png"),
"colormap:gnbu": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "gnbu.png"),
"colormap:greens": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "greens.png"),
"colormap:greys": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "greys.png"),
"colormap:orrd": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "orrd.png"),
"colormap:oranges": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "oranges.png"),
"colormap:prgn": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "prgn.png"),
"colormap:paired": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "paired.png"),
"colormap:pastel1": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "pastel1.png"),
"colormap:pastel2": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "pastel2.png"),
"colormap:piyg": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "piyg.png"),
"colormap:pubu": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "pubu.png"),
"colormap:pubugn": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "pubugn.png"),
"colormap:puor": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "puor.png"),
"colormap:purd": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "purd.png"),
"colormap:purples": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "purples.png"),
"colormap:rdbu": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "rdbu.png"),
"colormap:rdgy": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "rdgy.png"),
"colormap:rdpu": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "rdpu.png"),
"colormap:rdylbu": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "rdylbu.png"),
"colormap:rdylgn": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "rdylgn.png"),
"colormap:reds": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "reds.png"),
"colormap:set1": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "set1.png"),
"colormap:set2": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "set2.png"),
"colormap:set3": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "set3.png"),
"colormap:spectral": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "spectral.png"),
"colormap:ylgn": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "ylgn.png"),
"colormap:ylgnbu": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "ylgnbu.png"),
"colormap:ylorbr": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "ylorbr.png"),
"colormap:ylorrd": os.path.join(RESOURCES_DIR, "colormaps", "colorbrewer", "ylorrd.png"),
# fonts: Scientifica
"font:scientifica-12": os.path.join(RESOURCES_DIR, "fonts", "scientifica", "scientifica_df2.png"),
# font: Source Code Pro
"font:source-10": os.path.join(RESOURCES_DIR, "fonts", "source_code_pro", "10_df2.png"),
"font:source-15": os.path.join(RESOURCES_DIR, "fonts", "source_code_pro", "15_df2.png"),
"font:source-20": os.path.join(RESOURCES_DIR, "fonts", "source_code_pro", "20_df2.png"),
"font:source-25": os.path.join(RESOURCES_DIR, "fonts", "source_code_pro", "25_df2.png"),
"font:source-30": os.path.join(RESOURCES_DIR, "fonts", "source_code_pro", "30_df2.png"),
"font:source-35": os.path.join(RESOURCES_DIR, "fonts", "source_code_pro", "35_df2.png"),
"font:source-40": os.path.join(RESOURCES_DIR, "fonts", "source_code_pro", "40_df2.png"),
"font:source-50": os.path.join(RESOURCES_DIR, "fonts", "source_code_pro", "50_df2.png"),
"font:source-70": os.path.join(RESOURCES_DIR, "fonts", "source_code_pro", "70_df2.png"),
# font: Terminus
"font:terminus-12": os.path.join(RESOURCES_DIR, "fonts", "terminus", "ter-u12_df2.png"),
"font:terminus-14": os.path.join(RESOURCES_DIR, "fonts", "terminus", "ter-u14_df2.png"),
"font:terminus-16": os.path.join(RESOURCES_DIR, "fonts", "terminus", "ter-u16_df2.png"),
"font:terminus-18": os.path.join(RESOURCES_DIR, "fonts", "terminus", "ter-u18_df2.png"),
"font:terminus-20": os.path.join(RESOURCES_DIR, "fonts", "terminus", "ter-u20_df2.png"),
"font:terminus-22": os.path.join(RESOURCES_DIR, "fonts", "terminus", "ter-u22_df2.png"),
"font:terminus-24": os.path.join(RESOURCES_DIR, "fonts", "terminus", "ter-u24_df2.png"),
"font:terminus-28": os.path.join(RESOURCES_DIR, "fonts", "terminus", "ter-u28_df2.png"),
"font:terminus-32": os.path.join(RESOURCES_DIR, "fonts", "terminus", "ter-u32_df2.png"),
# test images
"image:PM5544": os.path.join(RESOURCES_DIR, "images", "PM5544.png"),
"image:USC-SIPI-4.1.07": os.path.join(RESOURCES_DIR, "images", "USC_SIPI_4.1.07.png"),
}
####
#%%% synthetic images
####
def constant_image(size=(512, 288), color=(0, 255, 0), dtype=np.uint8):
"""
Returns an image where each color channel is constant (but the channel
values may vary).
"""
channel_count = len(color)
image = np.zeros(shape=(size[1], size[0]) + (channel_count,), dtype=dtype)
for n_channel in range(channel_count):
image[:, :, n_channel] = color[n_channel]
if channel_count == 1:
image = image[:, :, 0]
return image
def grid(size=(512, 288), grid_size=16, background_color=(0,), grid_color=(255,), offset=None, dtype=np.uint8):
"""
Returns a gray-scale image of the given `size` containing a grid with a
pitch of size `block_size`.
"""
image = constant_image(size=size, color=background_color, dtype=dtype)
if offset is None:
offset = (0, 0)
else:
offset = dito.utils.get_validated_tuple(x=offset, type_=int, count=2, min_value=0)
for x in range(offset[0] % grid_size, size[0], grid_size):
image[:, x, ...] = grid_color
for y in range(offset[1] % grid_size, size[1], grid_size):
image[y, :, ...] = grid_color
return image
def checkerboard(size=(512, 288), block_size=16, low=0, high=255):
"""
Returns a gray-scale image of the given `size` containing a checkerboard
grid with squares of size `block_size`. The arguments `low` and `high`
specify the gray scale values to be used for the squares.
"""
image = np.zeros(shape=(size[1], size[0]), dtype=np.uint8) + low
for (n_row, y) in enumerate(range(0, size[1], block_size)):
offset = block_size if ((n_row % 2) == 0) else 0
for x in range(offset, size[0], 2 * block_size):
image[y:(y + block_size), x:(x + block_size)] = high
return image
def background_checkerboard(size=(512, 288), block_size=16):
"""
Returns a gray-scale image of the given `shape` containing a checkerboard
grid of light and dark gray squares of size `block_size`.
"""
return checkerboard(size=size, block_size=block_size, low=80, high=120)
def xslope(height=32, width=256, dtype=np.uint8):
"""
Return image containing values increasing from 0 to 255 along the x axis.
"""
dtype_range = dito.core.dtype_range(dtype=dtype)
slope = np.linspace(start=dtype_range[0], stop=dtype_range[1], num=width, endpoint=True, dtype=dtype)
slope.shape = (1,) + slope.shape
slope = np.repeat(a=slope, repeats=height, axis=0)
return slope
def yslope(width=32, height=256, dtype=np.uint8):
"""
Return image containing values increasing from 0 to 255 along the y axis.
"""
return xslope(height=width, width=height, dtype=dtype).T
def random_image(size=(512, 288), color=True, dtype=np.uint8, use_standard_library=False):
"""
Returns a random image of the given `size` and `dtype`.
"""
shape = tuple(size[::-1])
if color:
shape = shape + (3,)
if use_standard_library:
image_random = np.array([random.random() for _ in range(np.prod(shape))], dtype=np.float32).reshape(*shape)
else:
image_random = np.random.rand(*shape)
return dito.core.convert(image=image_random, dtype=dtype)
def test_image_segments():
image = np.zeros(shape=(288, 512), dtype=np.uint8)
sep = 8
count = 10
radii = [round(2**(2 + n_circle / 4)) for n_circle in range(count)]
color = (255,)
# draw series of circles
center_x = sep + max(radii)
center_y = sep
for radius in radii:
center_y += radius
cv2.circle(img=image, center=(center_x, center_y), radius=radius, color=color, thickness=cv2.FILLED, lineType=cv2.LINE_8)
center_y += radius + sep
# draw series of squares
center_x = 2 * sep + 3 * max(radii)
center_y = sep
for radius in radii:
center_y += radius
cv2.rectangle(img=image, pt1=dito.core.tir(center_x - radius, center_y - radius), pt2=dito.core.tir(center_x + radius, center_y + radius), color=color, thickness=cv2.FILLED, lineType=cv2.LINE_8)
center_y += radius + sep
# draw series of ellipses
center_x = 3 * sep + 6 * max(radii)
center_y = sep
for radius in radii:
center_y += radius
cv2.ellipse(img=image, center=(center_x, center_y), axes=(radius * 2, radius), angle=0.0, startAngle=0.0, endAngle=360.0, color=color, thickness=cv2.FILLED, lineType=cv2.LINE_8)
center_y += radius + sep
# draw series of rectangles
center_x = 4 * sep + 10 * max(radii)
center_y = sep
for radius in radii:
center_y += radius
cv2.rectangle(img=image, pt1=dito.core.tir(center_x - radius * 2, center_y - radius), pt2=dito.core.tir(center_x + radius * 2, center_y + radius), color=color, thickness=cv2.FILLED, lineType=cv2.LINE_8)
center_y += radius + sep
return image
class DitoTestImageGeneratorV1():
"""
Generates an image which is useful as a test input for processing functions.
It features:
* background color slope for absolute image position/crop assessment
* grid for assessment of deformations
* corner indicators for image flip/reflection assessment
* pixel ruler for length measurements
* crosshair for image center localization
* gray slopes for gamma measurements
* color areas for channel order assessment
* random color areas for assessment of random seed values of the `random` module and for NumPy.
* lines with different inclinations for rotation assessment
* letters/numbers for text appearance assessment
TODO:
* checkerboard patterns with different resolutions
* lines with different widths/separations for resolution measurements
* OpenCV checkerboard pattern for possible automated detection
* color wheel for color mapping assessment
"""
def __init__(self, size, dtype):
# settings
self.grid_size = 16
self.ruler_size = 16
self.line_color = (240, 240, 240)
# arguments
self.size = size
self.dtype = dtype
# checks
if min(self.size) < 2 * self.grid_size:
raise RuntimeError("Size '{}' is too small".format(self.size))
assert (self.dtype in (np.uint8, np.uint16)) or dito.core.is_float_dtype(dtype=self.dtype)
# derived properties
self.size_min = min(self.size)
self.image_center = (self.size[0] // 2, self.size[1] // 2)
self.dtype_range = dito.core.dtype_range(dtype=self.dtype)
(self.grid_offset, self.grid_inner_offset, self.grid_inner_count) = self.calculate_grid_parameters()
self.min_inner_count = min(self.grid_inner_count)
self.max_inner_count = max(self.grid_inner_count)
# image construction
self.image = self.generate_base_image()
if self.min_inner_count >= 2:
self.draw_corner_identifier_texts()
if self.min_inner_count >= 2:
self.draw_rulers()
if self.max_inner_count >= 4:
self.draw_center_crosshair()
if self.min_inner_count >= 4:
self.draw_gray_slopes()
if self.min_inner_count >= 6:
self.draw_color_areas()
if self.min_inner_count >= 8:
self.draw_rotation_indicators()
#self.draw_checkerboard_patterns()
def adapt_color_for_dtype(self, color):
"""
Map a uint8 color (range [0, 255]) to the correct range of the dtype of
this image.
"""
try:
len(color)
except TypeError:
# color is a scalar
return_scalar = True
color = (color,)
else:
# color is vector-like
return_scalar = False
if self.dtype == np.uint8:
pass
elif self.dtype == np.uint16:
color = tuple(value * 257 for value in color)
elif dito.core.is_float_dtype(dtype=self.dtype):
color = tuple(value / 255.0 for value in color)
else:
raise TypeError("Invalid dtype '{}'".format(self.dtype))
if return_scalar:
assert len(color) == 1
return color[0]
else:
return color
def calculate_grid_parameters(self):
grid_offset = [(self.size[n_dim] % (2 * self.grid_size)) // 2 for n_dim in range(2)]
grid_inner_offset = [grid_offset[n_dim] + self.grid_size if self.ruler_size > grid_offset[n_dim] else grid_offset[n_dim] for n_dim in range(2)]
grid_inner_count = [(self.size[n_dim] - 2 * grid_offset[n_dim]) // self.grid_size - 2 if self.ruler_size > grid_offset[n_dim] else (self.size[n_dim] - 2 * grid_offset[n_dim]) // self.grid_size for n_dim in range(2)]
return (grid_offset, grid_inner_offset, grid_inner_count)
def get_grid_coords(self, index_x, index_y):
# negative values wrap around from the end, just like normal indexing
if index_x < 0:
index_x = index_x % self.grid_inner_count[0]
if index_y < 0:
index_y = index_y % self.grid_inner_count[1]
return [self.grid_inner_offset[n_dim] + [index_x, index_y][n_dim] * self.grid_size for n_dim in range(2)]
def generate_base_image(self):
image_x = xslope(height=self.size[1], width=self.size[0], dtype=self.dtype)
image_y = yslope(height=self.size[1], width=self.size[0], dtype=self.dtype)
image_grid = None
for n_grid_level in range(4):
image_grid_level = grid(size=self.size, grid_size=self.grid_size // (2**n_grid_level), background_color=(0,), grid_color=self.adapt_color_for_dtype((2**(8 - n_grid_level) - 1,)), offset=self.grid_offset, dtype=self.dtype)
if image_grid is None:
image_grid = image_grid_level
else:
image_grid = np.maximum(image_grid, image_grid_level)
image = dito.core.as_channels(b=image_y, g=image_x, r=image_grid)
return image
def draw_center_crosshair(self):
for radius in (0, 2, 5, 9, 14):
dito.draw.draw_symbol(image=self.image, symbol="square", position=self.image_center, radius=radius, color=self.adapt_color_for_dtype(self.line_color), thickness=1, line_type=cv2.LINE_8)
def draw_rulers(self):
for n_dim in range(2):
for n_index in range(0, self.image.shape[n_dim], 2):
for n_channel in range(3):
indices = [None, None, n_channel]
indices[n_dim] = n_index
indices[2] = n_channel
n_index_corrected = (n_index // 2) % self.ruler_size
index = 2 * min(n_index_corrected, self.ruler_size - n_index_corrected) + 1
indices[1 - n_dim] = slice(None, index)
self.image[tuple(indices)] = self.adapt_color_for_dtype(self.line_color[n_channel])
indices[1 - n_dim] = slice(min(-1, -index + 1), None)
self.image[tuple(indices)] = self.adapt_color_for_dtype(self.line_color[n_channel])
def draw_corner_identifier_texts(self):
text_kwargs = {"anchor": "lt", "font": "terminus-14", "style": "bold", "background_color": None, "background_as_outline": False}
(text_x_left, text_y_top) = [int(coord + 1) for coord in self.get_grid_coords(0, 0)]
(text_x_right, text_y_bottom) = [int(coord + 1) for coord in self.get_grid_coords(-1, -1)]
self.image = dito.visual.text(image=self.image, message="TL", position=(text_x_left, text_y_top), **text_kwargs)
self.image = dito.visual.text(image=self.image, message="TR", position=(text_x_right, text_y_top), **text_kwargs)
self.image = dito.visual.text(image=self.image, message="BL", position=(text_x_left, text_y_bottom), **text_kwargs)
self.image = dito.visual.text(image=self.image, message="BR", position=(text_x_right, text_y_bottom), **text_kwargs)
def draw_gray_slopes(self):
slopes = [
{"coord_offset_from": (1, 0), "coord_offset_to": (-1, 1), "direction": "lr"},
{"coord_offset_from": (1, -1), "coord_offset_to": (-1, self.grid_inner_count[1]), "direction": "rl"},
{"coord_offset_from": (0, 1), "coord_offset_to": (1, -1), "direction": "ud"},
{"coord_offset_from": (-1, 1), "coord_offset_to": (self.grid_inner_count[0], -1), "direction": "du"},
]
for slope in slopes:
(x_from, y_from) = self.get_grid_coords(*slope["coord_offset_from"])
(x_to, y_to) = self.get_grid_coords(*slope["coord_offset_to"])
if slope["direction"] in ("lr", "rl"):
slope_image = xslope(height=self.grid_size - 1, width=abs(x_from - x_to), dtype=self.dtype)
slope_image = dito.core.as_color(image=slope_image)
if slope["direction"] == "lr":
slope_image = slope_image[:, ::-1, ...].copy()
slope_image = dito.core.resize(dito.core.resize(slope_image, 1.0 / self.grid_size), dito.core.size(slope_image))
for n_col in range(slope_image.shape[1] // self.grid_size):
(text_x, text_y) = dito.core.tir((n_col + 0.5) * self.grid_size, self.grid_size // 2)
slope_image = dito.visual.text(image=slope_image, message=str(n_col % 100 + 1), position=(text_x, text_y), anchor="cc", font="terminus-12", color=dito.visual.max_distant_color(color=slope_image[text_y, text_x, :]), background_color=None)
self.image[(y_from + 1):y_to, (x_from + 1):(x_to + 1), :] = slope_image
else:
slope_image = yslope(width=self.grid_size - 1, height=abs(y_from - y_to), dtype=self.dtype)
slope_image = dito.core.as_color(image=slope_image)
if slope["direction"] == "ud":
slope_image = slope_image[::-1, :, ...].copy()
slope_image = dito.core.resize(dito.core.resize(slope_image, 1.0 / self.grid_size), dito.core.size(slope_image))
for n_row in range(slope_image.shape[0] // self.grid_size):
(text_x, text_y) = dito.core.tir(self.grid_size // 2, (n_row + 0.5) * self.grid_size)
slope_image = dito.visual.text(image=slope_image, message=chr(ord("A") + n_row % 26), position=(text_x, text_y), anchor="cc", font="terminus-12", color=dito.visual.max_distant_color(color=slope_image[text_y, text_x, :]), background_color=None)
self.image[(y_from + 1):(y_to + 1), (x_from + 1):x_to, :] = dito.core.as_color(image=slope_image)
def draw_color_areas(self):
areas = [
{"color": (255, 0, 0), "text_color": (0, 0, 0), "text": "B", "coord_offset": (-1, -2)},
{"color": (255, 255, 0), "text_color": (0, 0, 0), "text": "C", "coord_offset": (0, -2)},
{"color": (0, 255, 0), "text_color": (0, 0, 0), "text": "G", "coord_offset": (1, -1)},
{"color": (0, 255, 255), "text_color": (0, 0, 0), "text": "Y", "coord_offset": (1, 0)},
{"color": (0, 0, 255), "text_color": (0, 0, 0), "text": "R", "coord_offset": (0, 1)},
{"color": (255, 0, 255), "text_color": (0, 0, 0), "text": "M", "coord_offset": (-1, 1)},
{"color": (255, 255, 255), "text_color": (0, 0, 0), "text": "W", "coord_offset": (-2, 0)},
{"color": (0, 0, 0), "text_color": (255, 255, 255), "text": "K", "coord_offset": (-2, -1)},
]
for area in areas:
(x, y) = self.get_grid_coords(self.grid_inner_count[0] // 2 + area["coord_offset"][0], self.grid_inner_count[1] // 2 + area["coord_offset"][1])
self.image[(y + 1):(y + self.grid_size), (x + 1):(x + self.grid_size), ...] = self.adapt_color_for_dtype(area["color"])
self.image = dito.text(image=self.image, message=area["text"], position=(x + self.grid_size // 2 + 1, y + self.grid_size // 2 + 1), anchor="cc", font="terminus-14", style="bold", color=area["text_color"], background_color=None)
# random areas
text_kwargs = {"anchor": "lt", "font": "terminus-14", "style": "bold", "background_color": None, "background_as_outline": False}
for coord_offset_y in (-2, 1):
if coord_offset_y == -2:
color = (0, 0, 0)
else:
color = (255, 255, 255)
# generated using the random module
(x, y) = self.get_grid_coords(self.grid_inner_count[0] // 2 + coord_offset_y, self.grid_inner_count[1] // 2 - 2)
self.image[(y + 1):(y + self.grid_size), (x + 1):(x + self.grid_size), ...] = random_image(size=(self.grid_size - 1, self.grid_size - 1), color=True, dtype=self.dtype, use_standard_library=True)
self.image = dito.visual.text(image=self.image, message="S", position=(x + 1 + self.grid_size // 4, y + 1), color=color, **text_kwargs)
# generated using NumPy
(x, y) = self.get_grid_coords(self.grid_inner_count[0] // 2 + coord_offset_y, self.grid_inner_count[1] // 2 + 1)
self.image[(y + 1):(y + self.grid_size), (x + 1):(x + self.grid_size), ...] = random_image(size=(self.grid_size - 1, self.grid_size - 1), color=True, dtype=self.dtype, use_standard_library=False)
self.image = dito.visual.text(image=self.image, message="N", position=(x + 1 + self.grid_size // 4, y + 2), color=color, **text_kwargs)
def draw_rotation_indicators(self):
for (n_resolution, resolution) in enumerate([5.0, 1.0]):
sign = (-1)**n_resolution
(x0, y0) = self.get_grid_coords(self.grid_inner_count[0] // 2, self.grid_inner_count[1] // 2 - 2 + 4 * n_resolution)
y0 += -1 + 2 * n_resolution
radius = (self.min_inner_count // 2 - 3) * self.grid_size - 2
for n_angle in range(-9, 10):
x_from = x0 + 4 * n_angle
y_from = y0
angle_deg = sign * resolution * n_angle
angle_rad = angle_deg * np.pi / 180.0
x_to = x_from + sign * radius * np.cos(angle_rad - np.pi * 0.5)
y_to = y_from + sign * radius * np.sin(angle_rad - np.pi * 0.5)
cv2.line(img=self.image, pt1=dito.core.tir(x_from, y_from), pt2=dito.core.tir(x_to, y_to), color=self.adapt_color_for_dtype(self.line_color), thickness=1, lineType=cv2.LINE_AA)
def draw_checkerboard_patterns(self):
for side in (0, 1):
for (n_resolution, resolution) in enumerate([1, 3, 5, 7]):
(x, y) = self.get_grid_coords(self.grid_inner_count[0] // 2 - 3 + 5 * side, self.grid_inner_count[1] // 2 - 2 + n_resolution)
self.image[(y + 1):(y + self.grid_size), (x + 1):(x + self.grid_size), ...] = dito.core.as_color(checkerboard(size=(15, 15), block_size=resolution + side))
def dito_test_image_v1(size=(384, 256), dtype=np.uint8):
return DitoTestImageGeneratorV1(size=size, dtype=dtype).image
####
#%%% real images
####
def pm5544():
return dito.io.load(filename=RESOURCES_FILENAMES["image:PM5544"])
def usc_sipi_beans():
return dito.io.load(filename=RESOURCES_FILENAMES["image:USC-SIPI-4.1.07"])
| [
"numpy.prod",
"numpy.repeat",
"numpy.random.rand",
"cv2.ellipse",
"cv2.circle",
"numpy.zeros",
"numpy.linspace",
"random.random",
"numpy.cos",
"numpy.sin",
"numpy.maximum"
] | [((6081, 6147), 'numpy.zeros', 'np.zeros', ([], {'shape': '((size[1], size[0]) + (channel_count,))', 'dtype': 'dtype'}), '(shape=(size[1], size[0]) + (channel_count,), dtype=dtype)\n', (6089, 6147), True, 'import numpy as np\n'), ((8137, 8235), 'numpy.linspace', 'np.linspace', ([], {'start': 'dtype_range[0]', 'stop': 'dtype_range[1]', 'num': 'width', 'endpoint': '(True)', 'dtype': 'dtype'}), '(start=dtype_range[0], stop=dtype_range[1], num=width, endpoint=\n True, dtype=dtype)\n', (8148, 8235), True, 'import numpy as np\n'), ((8280, 8322), 'numpy.repeat', 'np.repeat', ([], {'a': 'slope', 'repeats': 'height', 'axis': '(0)'}), '(a=slope, repeats=height, axis=0)\n', (8289, 8322), True, 'import numpy as np\n'), ((9096, 9138), 'numpy.zeros', 'np.zeros', ([], {'shape': '(288, 512)', 'dtype': 'np.uint8'}), '(shape=(288, 512), dtype=np.uint8)\n', (9104, 9138), True, 'import numpy as np\n'), ((7312, 7362), 'numpy.zeros', 'np.zeros', ([], {'shape': '(size[1], size[0])', 'dtype': 'np.uint8'}), '(shape=(size[1], size[0]), dtype=np.uint8)\n', (7320, 7362), True, 'import numpy as np\n'), ((8969, 8991), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (8983, 8991), True, 'import numpy as np\n'), ((9399, 9525), 'cv2.circle', 'cv2.circle', ([], {'img': 'image', 'center': '(center_x, center_y)', 'radius': 'radius', 'color': 'color', 'thickness': 'cv2.FILLED', 'lineType': 'cv2.LINE_8'}), '(img=image, center=(center_x, center_y), radius=radius, color=\n color, thickness=cv2.FILLED, lineType=cv2.LINE_8)\n', (9409, 9525), False, 'import cv2\n'), ((10081, 10266), 'cv2.ellipse', 'cv2.ellipse', ([], {'img': 'image', 'center': '(center_x, center_y)', 'axes': '(radius * 2, radius)', 'angle': '(0.0)', 'startAngle': '(0.0)', 'endAngle': '(360.0)', 'color': 'color', 'thickness': 'cv2.FILLED', 'lineType': 'cv2.LINE_8'}), '(img=image, center=(center_x, center_y), axes=(radius * 2,\n radius), angle=0.0, startAngle=0.0, endAngle=360.0, color=color,\n thickness=cv2.FILLED, lineType=cv2.LINE_8)\n', (10092, 10266), False, 'import cv2\n'), ((15657, 15697), 'numpy.maximum', 'np.maximum', (['image_grid', 'image_grid_level'], {}), '(image_grid, image_grid_level)\n', (15667, 15697), True, 'import numpy as np\n'), ((8853, 8868), 'random.random', 'random.random', ([], {}), '()\n', (8866, 8868), False, 'import random\n'), ((23895, 23926), 'numpy.cos', 'np.cos', (['(angle_rad - np.pi * 0.5)'], {}), '(angle_rad - np.pi * 0.5)\n', (23901, 23926), True, 'import numpy as np\n'), ((23975, 24006), 'numpy.sin', 'np.sin', (['(angle_rad - np.pi * 0.5)'], {}), '(angle_rad - np.pi * 0.5)\n', (23981, 24006), True, 'import numpy as np\n'), ((8884, 8898), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (8891, 8898), True, 'import numpy as np\n')] |
import time
import ctypes
from OpenGL.GL import *
from OpenGL.GL.shaders import *
import pygame
from pygame.locals import *
import numpy
width = 640
height = 480
def getFileContents(filename):
return open(filename, 'r').read()
def init():
vertexShader = compileShader(getFileContents("triangle.vert"), GL_VERTEX_SHADER)
fragmentShader = compileShader(getFileContents("triangle.frag"), GL_FRAGMENT_SHADER)
program = glCreateProgram()
glAttachShader(program, vertexShader)
glAttachShader(program, fragmentShader)
glLinkProgram(program)
# Set Clear Color
glClearColor(0.3, 0.3, 0.3, 1.0)
return program
def draw(program):
# Define Vertice List
# X Y Z R G B
vertices = numpy.array([0.0, 0.5, 0.0, 1.0, 0.0, 0.0,
-0.5, -0.5, 0.0, 1.0, 0.0, 0.0,
0.5, -0.5, 0.0, 1.0, 0.0, 0.0], numpy.float32)
# Bind Attribute
glBindAttribLocation(program, 0, "vPosition")
glBindAttribLocation(program, 1, "color")
# Generate Buffers and Bind Buffers
VBO = glGenBuffers(1)
VAO = glGenVertexArrays(1)
glBindVertexArray(VAO)
glBindBuffer(GL_ARRAY_BUFFER, VBO)
glBufferData(GL_ARRAY_BUFFER, vertices, GL_STATIC_DRAW) # Copy data to buffer
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))
glEnableVertexAttribArray(0)
glEnableVertexAttribArray(1)
# Draw and Run
glViewport(0, 0, width, height)
glClear(GL_COLOR_BUFFER_BIT)
glUseProgram(program)
glBindVertexArray(VAO)
glDrawArrays(GL_TRIANGLES, 0, 3)
pygame.display.flip()
def main():
pygame.init()
pygame.display.set_mode((width, height), HWSURFACE|OPENGL|DOUBLEBUF)
program = init()
running = True
while running:
draw(program)
events = pygame.event.get()
# wait for exit
for event in events:
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.quit()
running = False
if __name__ == '__main__':
main()
| [
"pygame.init",
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"numpy.array",
"ctypes.c_void_p"
] | [((733, 859), 'numpy.array', 'numpy.array', (['[0.0, 0.5, 0.0, 1.0, 0.0, 0.0, -0.5, -0.5, 0.0, 1.0, 0.0, 0.0, 0.5, -0.5, \n 0.0, 1.0, 0.0, 0.0]', 'numpy.float32'], {}), '([0.0, 0.5, 0.0, 1.0, 0.0, 0.0, -0.5, -0.5, 0.0, 1.0, 0.0, 0.0, \n 0.5, -0.5, 0.0, 1.0, 0.0, 0.0], numpy.float32)\n', (744, 859), False, 'import numpy\n'), ((1686, 1707), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (1705, 1707), False, 'import pygame\n'), ((1725, 1738), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1736, 1738), False, 'import pygame\n'), ((1743, 1815), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, height)', '(HWSURFACE | OPENGL | DOUBLEBUF)'], {}), '((width, height), HWSURFACE | OPENGL | DOUBLEBUF)\n', (1766, 1815), False, 'import pygame\n'), ((1339, 1357), 'ctypes.c_void_p', 'ctypes.c_void_p', (['(0)'], {}), '(0)\n', (1354, 1357), False, 'import ctypes\n'), ((1415, 1434), 'ctypes.c_void_p', 'ctypes.c_void_p', (['(12)'], {}), '(12)\n', (1430, 1434), False, 'import ctypes\n'), ((1912, 1930), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1928, 1930), False, 'import pygame\n'), ((2085, 2098), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2096, 2098), False, 'import pygame\n')] |
#!/usr/bin/env python
# stdlib imports
import os.path
import sys
# third party imports
from openquake.hazardlib.geo.utils import OrthographicProjection
from openquake.hazardlib.gsim.abrahamson_2014 import AbrahamsonEtAl2014
from openquake.hazardlib.gsim.berge_thierry_2003 \
import BergeThierryEtAl2003SIGMA
import numpy as np
import pandas as pd
import pytest
import time
# local imports
from shakelib.distance import Distance
from shakelib.distance import get_distance
from shakelib.rupture.gc2 import _computeGC2
from shakelib.rupture.origin import Origin
from shakelib.rupture.point_rupture import PointRupture
from shakelib.rupture.quad_rupture import QuadRupture
from shakelib.sites import Sites
from impactutils.time.ancient_time import HistoricTime
do_tests = True
homedir = os.path.dirname(os.path.abspath(__file__)) # where is this script?
shakedir = os.path.abspath(os.path.join(homedir, '..', '..'))
sys.path.insert(0, shakedir)
def test_san_fernando():
# This is a challenging rupture due to overlapping and discordant
# segments, as brought up by <NAME>. Our initial
# implementation put the origin on the wrong side of the rupture.
x0 = np.array([7.1845, 7.8693])
y0 = np.array([-10.3793, -16.2096])
z0 = np.array([3.0000, 0.0000])
x1 = np.array([-7.8506, -7.5856])
y1 = np.array([-4.9073, -12.0682])
z1 = np.array([3.0000, 0.0000])
x2 = np.array([-4.6129, -5.5149])
y2 = np.array([3.9887, -4.3408])
z2 = np.array([16.0300, 8.0000])
x3 = np.array([10.4222, 9.9400])
y3 = np.array([-1.4833, -8.4823])
z3 = np.array([16.0300, 8.0000])
epilat = 34.44000
epilon = -118.41000
proj = OrthographicProjection(
epilon - 1, epilon + 1, epilat + 1, epilat - 1)
lon0, lat0 = proj(x0, y0, reverse=True)
lon1, lat1 = proj(x1, y1, reverse=True)
lon2, lat2 = proj(x2, y2, reverse=True)
lon3, lat3 = proj(x3, y3, reverse=True)
# Rupture requires an origin even when not used:
origin = Origin({'id': 'test', 'lat': 0, 'lon': 0,
'depth': 5.0, 'mag': 7.0, 'netid': '',
'network': '', 'locstring': '',
'time': HistoricTime.utcfromtimestamp(int(time.time()))})
rup = QuadRupture.fromVertices(
lon0, lat0, z0, lon1, lat1, z1, lon2, lat2, z2, lon3, lat3, z3,
origin)
# Make a origin object; most of the 'event' values don't matter
event = {'lat': 0, 'lon': 0, 'depth': 0, 'mag': 6.61,
'id': '', 'locstring': '', 'type': 'ALL',
'netid': '', 'network': '',
'time': HistoricTime.utcfromtimestamp(int(time.time()))}
origin = Origin(event)
# Grid of sites
buf = 0.25
lat = np.linspace(np.nanmin(rup._lat) - buf, np.nanmax(rup._lat) + buf, 10)
lon = np.linspace(np.nanmin(rup._lon) - buf, np.nanmax(rup._lon) + buf, 10)
lons, lats = np.meshgrid(lon, lat)
dep = np.zeros_like(lons)
x, y = proj(lon, lat)
rupx, rupy = proj(rup._lon[~np.isnan(rup._lon)],
rup._lat[~np.isnan(rup._lat)])
# Calculate U and T
dtypes = ['U', 'T']
dists = get_distance(dtypes, lats, lons, dep, rup)
targetU = np.array(
[[29.37395812, 22.56039569, 15.74545461, 8.92543078,
2.09723735, -4.73938823, -11.58093887, -18.42177264,
-25.25743913, -32.08635501],
[31.84149137, 25.03129417, 18.22007124, 11.40292429,
4.57583886, -2.26009972, -9.09790123, -15.92911065,
-22.75071243, -29.56450963],
[34.30623138, 27.49382948, 20.67774678, 13.85111535,
7.0115472, 0.16942111, -6.65327488, -13.45181115,
-20.24352643, -27.03530618],
[36.78170249, 29.96380633, 23.1270492, 16.23906653,
9.32934682, 2.41729624, -4.2732657, -10.94940844,
-17.703852, -24.4792072],
[39.29233805, 32.49155866, 25.68380903, 18.73823089,
12.08780156, 5.99219619, -1.38387344, -8.28331275,
-15.08759643, -21.87909368],
[41.84662959, 35.09745097, 28.42432401, 21.98993679,
15.2994003, 8.38037254, 1.3900846, -5.5601922,
-12.4250749, -19.24690137],
[44.41552101, 37.69652131, 31.0257236, 24.38573309,
17.67059825, 10.84688716, 3.96604399, -2.920931,
-9.78152208, -16.6132751],
[46.97201328, 40.2558351, 33.55821495, 26.85923974,
20.12416451, 13.33640001, 6.50905851, -0.33349597,
-7.17138975, -13.99568321],
[49.51154107, 42.79053584, 36.07536907, 29.35382731,
22.61099757, 15.83894006, 9.04135415, 2.22928601,
-4.58574545, -11.3959888],
[52.03832734, 45.31289877, 38.58842009, 31.85764151,
25.11309728, 18.35066231, 11.57145669, 4.78070229,
-2.01505508, -8.81029694]])
np.testing.assert_allclose(dists['U'], targetU, atol=0.01)
targetT = np.array(
[[-40.32654805, -38.14066537, -35.95781299, -33.79265063,
-31.65892948, -29.56075203, -27.48748112, -25.41823592,
-23.33452174, -21.22822801],
[-32.28894353, -30.06603457, -27.83163648, -25.61482279,
-23.45367121, -21.36959238, -19.34738882, -17.33510593,
-15.28949735, -13.20224592],
[-24.30254163, -22.03532096, -19.70590091, -17.35907062,
-15.10840929, -13.02682541, -11.13554925, -9.25705749,
-7.26675455, -5.19396824],
[-16.41306482, -14.1418547, -11.68888578, -8.9318195,
-6.39939727, -4.10984325, -2.85061088, -1.29211846,
0.68929792, 2.78115216],
[-8.63784529, -6.5089946, -4.32108309, -1.44275161,
-0.05102145, -0.20890633, 3.92700516, 6.36977183,
8.55572399, 10.72128633],
[-0.88135778, 1.06766314, 2.77955566, 3.8241835,
5.99212478, 8.76823285, 11.54715599, 14.0961506,
16.4200502, 18.65346494],
[6.98140207, 8.91888936, 10.77724993, 12.6499521,
14.79454638, 17.18482779, 19.63520498, 22.03525644,
24.35152986, 26.60592498],
[14.95635952, 16.95134069, 18.94768299, 20.99811237,
23.15975573, 25.42700742, 27.74302905, 30.0547134,
32.33583361, 34.58421221],
[22.9921068, 25.0353212, 27.09829391, 29.20364631,
31.3678744, 33.58684524, 35.8383652, 38.09736043,
40.34713771, 42.58152772],
[31.05186177, 33.1252095, 35.21960344, 37.34488267,
39.50633206, 41.70076344, 43.91762786, 46.14415669,
48.37021739, 50.59029205]])
np.testing.assert_allclose(dists['T'], targetT, atol=0.01)
# new method:
ddict = _computeGC2(rup, lons, lats, dep)
np.testing.assert_allclose(ddict['T'], targetT, atol=0.01)
np.testing.assert_allclose(ddict['U'], targetU, atol=0.01)
def test_exceptions():
vs30file = os.path.join(homedir, 'distance_data/Vs30_test.grd')
cx = -118.2
cy = 34.1
dx = 0.0083
dy = 0.0083
xspan = 0.0083 * 5
yspan = 0.0083 * 5
site = Sites.fromCenter(cx, cy, xspan, yspan, dx, dy,
vs30File=vs30file,
padding=True, resample=False)
# Make souce instance
lat0 = np.array([34.1])
lon0 = np.array([-118.2])
lat1 = np.array([34.2])
lon1 = np.array([-118.15])
z = np.array([1.0])
W = np.array([3.0])
dip = np.array([30.])
# Rupture requires an origin even when not used:
origin = Origin({'id': 'test', 'lat': 0, 'lon': 0,
'depth': 5.0, 'mag': 7.0, 'netid': '',
'network': '', 'locstring': '',
'time': HistoricTime.utcfromtimestamp(int(time.time()))})
rup = QuadRupture.fromTrace(lon0, lat0, lon1, lat1, z, W, dip,
origin)
event = {'lat': 34.1, 'lon': -118.2, 'depth': 1, 'mag': 6,
'id': '', 'locstring': '', 'mech': 'RS',
'rake': 90, 'netid': '', 'network': '',
'time': HistoricTime.utcfromtimestamp(int(time.time()))}
origin = Origin(event)
gmpelist = ["Primate"]
with pytest.raises(Exception) as e: # noqa
Distance.fromSites(gmpelist, origin, site, rup)
gmpelist = [AbrahamsonEtAl2014()]
sctx = site.getSitesContext()
dist_types = ['repi', 'rhypo', 'rjb', 'rrup', 'rx', 'ry', 'ry0', 'U', 'V']
with pytest.raises(Exception) as e: # noqa
get_distance(dist_types, sctx.lats, sctx.lons,
np.zeros_like(sctx.lons), rup)
dist_types = ['repi', 'rhypo', 'rjb', 'rrup', 'rx', 'ry', 'ry0', 'U', 'T']
with pytest.raises(Exception) as e: # noqa
get_distance(dist_types, sctx.lats, sctx.lons[0:4, ],
np.zeros_like(sctx.lons), rup)
# Exception when not a GMPE subclass
with pytest.raises(Exception) as e: # noqa
Distance([None], [-118.2], [34.1], [1], rupture=None)
def test_distance_no_rupture():
event = {'lat': 34.1, 'lon': -118.2, 'depth': 1, 'mag': 6,
'id': '', 'locstring': '', 'mech': 'RS',
'rake': 90, 'netid': '', 'network': '',
'time': HistoricTime.utcfromtimestamp(int(time.time()))}
origin = Origin(event)
origin.setMechanism('ALL')
# Make sites instance
vs30file = os.path.join(homedir, 'distance_data/Vs30_test.grd')
cx = -118.2
cy = 34.1
dx = 0.0083
dy = 0.0083
xspan = 0.0083 * 5
yspan = 0.0083 * 5
site = Sites.fromCenter(cx, cy, xspan, yspan, dx, dy,
vs30File=vs30file,
padding=True, resample=False)
# Make souce instance
# - Unknown/no tectonic region
# - Mech is ALL
gmpe = AbrahamsonEtAl2014()
rupture = PointRupture(origin)
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array([
[1.19350211e+00, 1.01453734e+00, 8.94306248e-01, 8.51431703e-01,
8.94306248e-01, 1.01453734e+00, 1.19350211e+00],
[9.23698454e-01, 6.97204114e-01, 5.32067867e-01, 4.69137288e-01,
5.32067867e-01, 6.97204114e-01, 9.23698454e-01],
[7.28251778e-01, 4.44114326e-01, 2.60572550e-01, 1.94977658e-01,
2.60572550e-01, 4.44114326e-01, 7.28251778e-01],
[6.54236979e-01, 3.39249542e-01, 1.57170497e-01, 1.98278110e-05,
1.57170497e-01, 3.39249542e-01, 6.54236979e-01],
[7.28338531e-01, 4.44167697e-01, 2.60583985e-01, 1.94977658e-01,
2.60583985e-01, 4.44167697e-01, 7.28338531e-01],
[9.23844143e-01, 6.97283640e-01, 5.32091716e-01, 4.69137288e-01,
5.32091716e-01, 6.97283640e-01, 9.23844143e-01],
[1.19368104e+00, 1.01462773e+00, 8.94331130e-01, 8.51431703e-01,
8.94331130e-01, 1.01462773e+00, 1.19368104e+00]])
if do_tests is True:
np.testing.assert_allclose(
rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array([
[4.0129619, 3.93137849, 3.87656959, 3.85702467, 3.87656959,
3.93137849, 4.0129619],
[3.88996841, 3.78671803, 3.71143853, 3.68275081, 3.71143853,
3.78671803, 3.88996841],
[3.80087151, 3.67134376, 3.60166506, 3.58311968, 3.60166506,
3.67134376, 3.80087151],
[3.7671309, 3.62390909, 3.57243062, 3.53580973, 3.57243062,
3.62390909, 3.7671309],
[3.80091105, 3.67136809, 3.60166829, 3.58311968, 3.60166829,
3.67136809, 3.80091105],
[3.89003482, 3.78675428, 3.7114494, 3.68275081, 3.7114494,
3.78675428, 3.89003482],
[4.01304347, 3.9314197, 3.87658093, 3.85702467, 3.87658093,
3.9314197, 4.01304347]])
if do_tests is True:
np.testing.assert_allclose(
rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region unsupported
# - Mech is ALL
origin._tectonic_region = 'Volcano'
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjbt = np.array([
[1.19350211e+00, 1.01453734e+00, 8.94306248e-01, 8.51431703e-01,
8.94306248e-01, 1.01453734e+00, 1.19350211e+00],
[9.23698454e-01, 6.97204114e-01, 5.32067867e-01, 4.69137288e-01,
5.32067867e-01, 6.97204114e-01, 9.23698454e-01],
[7.28251778e-01, 4.44114326e-01, 2.60572550e-01, 1.94977658e-01,
2.60572550e-01, 4.44114326e-01, 7.28251778e-01],
[6.54236979e-01, 3.39249542e-01, 1.57170497e-01, 1.98278110e-05,
1.57170497e-01, 3.39249542e-01, 6.54236979e-01],
[7.28338531e-01, 4.44167697e-01, 2.60583985e-01, 1.94977658e-01,
2.60583985e-01, 4.44167697e-01, 7.28338531e-01],
[9.23844143e-01, 6.97283640e-01, 5.32091716e-01, 4.69137288e-01,
5.32091716e-01, 6.97283640e-01, 9.23844143e-01],
[1.19368104e+00, 1.01462773e+00, 8.94331130e-01, 8.51431703e-01,
8.94331130e-01, 1.01462773e+00, 1.19368104e+00]])
if do_tests is True:
np.testing.assert_allclose(
rjbt, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
# Souce instance
# - Tectonic region: active
# - Mech is ALL
origin.setMechanism('ALL')
origin._tectonic_region = 'Active Shallow Crust'
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array([
[1.19350211e+00, 1.01453734e+00, 8.94306248e-01, 8.51431703e-01,
8.94306248e-01, 1.01453734e+00, 1.19350211e+00],
[9.23698454e-01, 6.97204114e-01, 5.32067867e-01, 4.69137288e-01,
5.32067867e-01, 6.97204114e-01, 9.23698454e-01],
[7.28251778e-01, 4.44114326e-01, 2.60572550e-01, 1.94977658e-01,
2.60572550e-01, 4.44114326e-01, 7.28251778e-01],
[6.54236979e-01, 3.39249542e-01, 1.57170497e-01, 1.98278110e-05,
1.57170497e-01, 3.39249542e-01, 6.54236979e-01],
[7.28338531e-01, 4.44167697e-01, 2.60583985e-01, 1.94977658e-01,
2.60583985e-01, 4.44167697e-01, 7.28338531e-01],
[9.23844143e-01, 6.97283640e-01, 5.32091716e-01, 4.69137288e-01,
5.32091716e-01, 6.97283640e-01, 9.23844143e-01],
[1.19368104e+00, 1.01462773e+00, 8.94331130e-01, 8.51431703e-01,
8.94331130e-01, 1.01462773e+00, 1.19368104e+00]])
if do_tests is True:
np.testing.assert_allclose(
rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array([
[4.0129619, 3.93137849, 3.87656959, 3.85702467, 3.87656959,
3.93137849, 4.0129619],
[3.88996841, 3.78671803, 3.71143853, 3.68275081, 3.71143853,
3.78671803, 3.88996841],
[3.80087151, 3.67134376, 3.60166506, 3.58311968, 3.60166506,
3.67134376, 3.80087151],
[3.7671309, 3.62390909, 3.57243062, 3.53580973, 3.57243062,
3.62390909, 3.7671309],
[3.80091105, 3.67136809, 3.60166829, 3.58311968, 3.60166829,
3.67136809, 3.80091105],
[3.89003482, 3.78675428, 3.7114494, 3.68275081, 3.7114494,
3.78675428, 3.89003482],
[4.01304347, 3.9314197, 3.87658093, 3.85702467, 3.87658093,
3.9314197, 4.01304347]])
if do_tests is True:
np.testing.assert_allclose(
rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: active
# - Mech is RS
origin.setMechanism('RS')
origin._tectonic_region = 'Active Shallow Crust'
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array([
[7.76090807e-01, 6.49225734e-01, 5.63995966e-01, 5.33602932e-01,
5.63995966e-01, 6.49225734e-01, 7.76090807e-01],
[5.84831599e-01, 4.24273624e-01, 3.07211355e-01, 2.62600941e-01,
3.07211355e-01, 4.24273624e-01, 5.84831599e-01],
[4.46282784e-01, 2.44862590e-01, 1.32264468e-01, 9.99797788e-02,
1.32264468e-01, 2.44862590e-01, 4.46282784e-01],
[3.93814955e-01, 1.70987945e-01, 8.13717378e-02, 1.03958777e-05,
8.13717378e-02, 1.70987945e-01, 3.93814955e-01],
[4.46344282e-01, 2.44900424e-01, 1.32270097e-01, 9.99797788e-02,
1.32270097e-01, 2.44900424e-01, 4.46344282e-01],
[5.84934876e-01, 4.24329999e-01, 3.07228262e-01, 2.62600941e-01,
3.07228262e-01, 4.24329999e-01, 5.84934876e-01],
[7.76217650e-01, 6.49289812e-01, 5.64013604e-01, 5.33602932e-01,
5.64013604e-01, 6.49289812e-01, 7.76217650e-01]])
if do_tests is True:
np.testing.assert_allclose(
rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array([
[3.42235562, 3.338452, 3.28208435, 3.26198358, 3.28208435,
3.338452, 3.42235562],
[3.29586422, 3.18967743, 3.112257, 3.08275341, 3.112257,
3.18967743, 3.29586422],
[3.20423343, 3.07102195, 2.99912626, 2.97986242, 2.99912626,
3.07102195, 3.20423343],
[3.16953325, 3.02223204, 2.96875925, 2.92616469, 2.96875925,
3.02223204, 3.16953325],
[3.2042741, 3.07104698, 2.99912962, 2.97986242, 2.99912962,
3.07104698, 3.2042741],
[3.29593253, 3.18971471, 3.11226818, 3.08275341, 3.11226818,
3.18971471, 3.29593253],
[3.42243951, 3.33849438, 3.28209601, 3.26198358, 3.28209601,
3.33849438, 3.42243951]])
if do_tests is True:
np.testing.assert_allclose(
rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: active
# - Mech is NM
origin.setMechanism('NM')
origin._tectonic_region = 'Active Shallow Crust'
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array([
[8.32771820e-01, 6.96170087e-01, 6.04399092e-01, 5.71673449e-01,
6.04399092e-01, 6.96170087e-01, 8.32771820e-01],
[6.26833822e-01, 4.53953319e-01, 3.27906737e-01, 2.79872556e-01,
3.27906737e-01, 4.53953319e-01, 6.26833822e-01],
[4.77651641e-01, 2.60772819e-01, 1.38685718e-01, 1.03235484e-01,
1.38685718e-01, 2.60772819e-01, 4.77651641e-01],
[4.21157003e-01, 1.81206068e-01, 8.28029065e-02, 1.03958777e-05,
8.28029065e-02, 1.81206068e-01, 4.21157003e-01],
[4.77717859e-01, 2.60813557e-01, 1.38691898e-01, 1.03235484e-01,
1.38691898e-01, 2.60813557e-01, 4.77717859e-01],
[6.26945025e-01, 4.54014020e-01, 3.27924941e-01, 2.79872556e-01,
3.27924941e-01, 4.54014020e-01, 6.26945025e-01],
[8.32908398e-01, 6.96239083e-01, 6.04418084e-01, 5.71673449e-01,
6.04418084e-01, 6.96239083e-01, 8.32908398e-01]])
if do_tests is True:
np.testing.assert_allclose(
rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array([
[3.3192606, 3.22072248, 3.15452316, 3.13091641, 3.15452316,
3.22072248, 3.3192606],
[3.17070653, 3.0459986, 2.95507447, 2.92042485, 2.95507447,
3.0459986, 3.17070653],
[3.06309346, 2.90664719, 2.82107391, 2.79752673, 2.82107391,
2.90664719, 3.06309346],
[3.02234086, 2.84931729, 2.78395476, 2.73772697, 2.78395476,
2.84931729, 3.02234086],
[3.06314123, 2.90667658, 2.82107802, 2.79752673, 2.82107802,
2.90667658, 3.06314123],
[3.17078675, 3.04604238, 2.9550876, 2.92042485, 2.9550876,
3.04604238, 3.17078675],
[3.31935913, 3.22077225, 3.15453686, 3.13091641, 3.15453686,
3.22077225, 3.31935913]])
if do_tests is True:
np.testing.assert_allclose(
rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: active
# - Mech is SS
origin.setMechanism('SS')
origin._tectonic_region = 'Active Shallow Crust'
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array([
[1.95958776e+00, 1.66988434e+00, 1.47525745e+00, 1.40585328e+00,
1.47525745e+00, 1.66988434e+00, 1.95958776e+00],
[1.52283677e+00, 1.15619376e+00, 8.88875589e-01, 7.87005240e-01,
8.88875589e-01, 1.15619376e+00, 1.52283677e+00],
[1.20645289e+00, 7.46498734e-01, 4.23057706e-01, 2.95503135e-01,
4.23057706e-01, 7.46498734e-01, 1.20645289e+00],
[1.08663970e+00, 5.76051478e-01, 2.21984054e-01, 1.98278110e-05,
2.21984054e-01, 5.76051478e-01, 1.08663970e+00],
[1.20659332e+00, 7.46585130e-01, 4.23079943e-01, 2.95503135e-01,
4.23079943e-01, 7.46585130e-01, 1.20659332e+00],
[1.52307261e+00, 1.15632249e+00, 8.88914196e-01, 7.87005240e-01,
8.88914196e-01, 1.15632249e+00, 1.52307261e+00],
[1.95987741e+00, 1.67003067e+00, 1.47529773e+00, 1.40585328e+00,
1.47529773e+00, 1.67003067e+00, 1.95987741e+00]])
if do_tests is True:
np.testing.assert_allclose(
rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array([
[2.54969772, 2.27038241, 2.08273439, 2.01581889, 2.08273439,
2.27038241, 2.54969772],
[2.12860763, 1.77511159, 1.51737884, 1.41916133, 1.51737884,
1.77511159, 2.12860763],
[1.82356854, 1.38010729, 1.08693739, 0.97911408, 1.08693739,
1.38010729, 1.82356854],
[1.70805158, 1.21626476, 0.91696757, 0.78911491, 0.91696757,
1.21626476, 1.70805158],
[1.82370394, 1.38019059, 1.08695619, 0.97911408, 1.08695619,
1.38019059, 1.82370394],
[2.12883501, 1.77523571, 1.51741606, 1.41916133, 1.51741606,
1.77523571, 2.12883501],
[2.54997699, 2.27052349, 2.08277323, 2.01581889, 2.08277323,
2.27052349, 2.54997699]])
if do_tests is True:
np.testing.assert_allclose(
rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: stable
# - Mech is all
origin.setMechanism('ALL')
origin._tectonic_region = 'Stable Shallow Crust'
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array([
[1.49285078e+00, 1.26359361e+00, 1.10957536e+00, 1.05465228e+00,
1.10957536e+00, 1.26359361e+00, 1.49285078e+00],
[1.14722732e+00, 8.57083889e-01, 6.45541307e-01, 5.64926073e-01,
6.45541307e-01, 8.57083889e-01, 1.14722732e+00],
[8.96856520e-01, 5.32871196e-01, 2.99662245e-01, 2.17185537e-01,
2.99662245e-01, 5.32871196e-01, 8.96856520e-01],
[8.02042196e-01, 3.98587924e-01, 1.69648145e-01, 1.98278110e-05,
1.69648145e-01, 3.98587924e-01, 8.02042196e-01],
[8.96967653e-01, 5.32939565e-01, 2.99676623e-01, 2.17185537e-01,
2.99676623e-01, 5.32939565e-01, 8.96967653e-01],
[1.14741395e+00, 8.57185764e-01, 6.45571858e-01, 5.64926073e-01,
6.45571858e-01, 8.57185764e-01, 1.14741395e+00],
[1.49308000e+00, 1.26370940e+00, 1.10960724e+00, 1.05465228e+00,
1.10960724e+00, 1.26370940e+00, 1.49308000e+00]])
if do_tests is True:
np.testing.assert_allclose(
rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array([
[4.17967552, 4.07332411, 4.00187571, 3.97639713, 4.00187571,
4.07332411, 4.17967552],
[4.01934229, 3.88474601, 3.78661232, 3.74921526, 3.78661232,
3.88474601, 4.01934229],
[3.90319636, 3.73434515, 3.64558217, 3.62308648, 3.64558217,
3.73434515, 3.90319636],
[3.85921241, 3.67256434, 3.61012056, 3.57133422, 3.61012056,
3.67256434, 3.85921241],
[3.90324792, 3.73437686, 3.64558609, 3.62308648, 3.64558609,
3.73437686, 3.90324792],
[4.01942887, 3.88479327, 3.7866265, 3.74921526, 3.7866265,
3.88479327, 4.01942887],
[4.17978186, 4.07337783, 4.0018905, 3.97639713, 4.0018905,
4.07337783, 4.17978186]])
if do_tests is True:
np.testing.assert_allclose(
rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: stable
# - Mech is RS
origin.setMechanism('RS')
origin._tectonic_region = 'Stable Shallow Crust'
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array([
[1.11052523e+00, 9.25877479e-01, 8.01828481e-01, 7.57592465e-01,
8.01828481e-01, 9.25877479e-01, 1.11052523e+00],
[8.32154030e-01, 5.98467416e-01, 4.28087307e-01, 3.63158382e-01,
4.28087307e-01, 5.98467416e-01, 8.32154030e-01],
[6.30500991e-01, 3.37340822e-01, 1.69925286e-01, 1.20068361e-01,
1.69925286e-01, 3.37340822e-01, 6.30500991e-01],
[5.54135870e-01, 2.29725567e-01, 9.13321474e-02, 1.03958777e-05,
9.13321474e-02, 2.29725567e-01, 5.54135870e-01],
[6.30590499e-01, 3.37395888e-01, 1.69933978e-01, 1.20068361e-01,
1.69933978e-01, 3.37395888e-01, 6.30590499e-01],
[8.32304345e-01, 5.98549467e-01, 4.28111914e-01, 3.63158382e-01,
4.28111914e-01, 5.98549467e-01, 8.32304345e-01],
[1.11070985e+00, 9.25970743e-01, 8.01854154e-01, 7.57592465e-01,
8.01854154e-01, 9.25970743e-01, 1.11070985e+00]])
if do_tests is True:
np.testing.assert_allclose(
rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array([
[3.4885951, 3.37216961, 3.29395331, 3.26606128, 3.29395331,
3.37216961, 3.4885951],
[3.3130744, 3.16572856, 3.05829921, 3.01735974, 3.05829921,
3.16572856, 3.3130744],
[3.18592661, 3.00108105, 2.90341742, 2.87839095, 2.90341742,
3.00108105, 3.18592661],
[3.1377763, 2.9334351, 2.86396637, 2.81798622, 2.86396637,
2.9334351, 3.1377763],
[3.18598305, 3.00111577, 2.90342178, 2.87839095, 2.90342178,
3.00111577, 3.18598305],
[3.31316918, 3.16578029, 3.05831472, 3.01735974, 3.05831472,
3.16578029, 3.31316918],
[3.48871151, 3.37222842, 3.29396949, 3.26606128, 3.29396949,
3.37222842, 3.48871151]])
if do_tests is True:
np.testing.assert_allclose(
rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: stable
# - Mech is NM
origin.setMechanism('NM')
origin._tectonic_region = 'Stable Shallow Crust'
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array([
[1.12678662e+00, 9.39133949e-01, 8.13066202e-01, 7.68110298e-01,
8.13066202e-01, 9.39133949e-01, 1.12678662e+00],
[8.43885262e-01, 6.06395679e-01, 4.33242838e-01, 3.67257274e-01,
4.33242838e-01, 6.06395679e-01, 8.43885262e-01],
[6.38950562e-01, 3.41019564e-01, 1.70913434e-01, 1.20272659e-01,
1.70913434e-01, 3.41019564e-01, 6.38950562e-01],
[5.61342691e-01, 2.31653894e-01, 9.10846554e-02, 1.03958777e-05,
9.10846554e-02, 2.31653894e-01, 5.61342691e-01],
[6.39041527e-01, 3.41075526e-01, 1.70922263e-01, 1.20272659e-01,
1.70922263e-01, 3.41075526e-01, 6.39041527e-01],
[8.44038024e-01, 6.06479066e-01, 4.33267846e-01, 3.67257274e-01,
4.33267846e-01, 6.06479066e-01, 8.44038024e-01],
[1.12697424e+00, 9.39228730e-01, 8.13092292e-01, 7.68110298e-01,
8.13092292e-01, 9.39228730e-01, 1.12697424e+00]])
if do_tests is True:
np.testing.assert_allclose(
rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array([
[3.42781739, 3.30181908, 3.21717161, 3.18698623, 3.21717161,
3.30181908, 3.42781739],
[3.23786489, 3.07840387, 2.96214139, 2.91783576, 2.96214139,
3.07840387, 3.23786489],
[3.10026266, 2.9002186, 2.79362772, 2.76581535, 2.79362772,
2.9002186, 3.10026266],
[3.0481533, 2.82698693, 2.74978504, 2.70136713, 2.74978504,
2.82698693, 3.0481533],
[3.10032374, 2.90025617, 2.79363257, 2.76581535, 2.79363257,
2.90025617, 3.10032374],
[3.23796746, 3.07845986, 2.96215818, 2.91783576, 2.96215818,
3.07845986, 3.23796746],
[3.42794337, 3.30188272, 3.21718913, 3.18698623, 3.21718913,
3.30188272, 3.42794337]])
if do_tests is True:
np.testing.assert_allclose(
rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
# Souce instance
# - Tectonic region: stable
# - Mech is SS
origin.setMechanism('SS')
origin._tectonic_region = 'Stable Shallow Crust'
dists = Distance.fromSites(gmpe, site, rupture)
dctx = dists.getDistanceContext()
rjb = np.array([
[1.80104893e+00, 1.52092305e+00, 1.33273049e+00, 1.26562081e+00,
1.33273049e+00, 1.52092305e+00, 1.80104893e+00],
[1.37873685e+00, 1.02421498e+00, 7.65734302e-01, 6.67231768e-01,
7.65734302e-01, 1.02421498e+00, 1.37873685e+00],
[1.07281256e+00, 6.28064399e-01, 3.42919369e-01, 2.41987662e-01,
3.42919369e-01, 6.28064399e-01, 1.07281256e+00],
[9.56960370e-01, 4.63980672e-01, 1.83813296e-01, 1.98278110e-05,
1.83813296e-01, 4.63980672e-01, 9.56960370e-01],
[1.07294835e+00, 6.28147939e-01, 3.42936965e-01, 2.41987662e-01,
3.42936965e-01, 6.28147939e-01, 1.07294835e+00],
[1.37896489e+00, 1.02433946e+00, 7.65771633e-01, 6.67231768e-01,
7.65771633e-01, 1.02433946e+00, 1.37896489e+00],
[1.80132901e+00, 1.52106454e+00, 1.33276944e+00, 1.26562081e+00,
1.33276944e+00, 1.52106454e+00, 1.80132901e+00]])
if do_tests is True:
np.testing.assert_allclose(
rjb, dctx.rjb, rtol=0, atol=0.01)
else:
print(repr(dctx.rjb))
rrup = np.array([
[2.85894272, 2.62140075, 2.46181667, 2.4049088, 2.46181667,
2.62140075, 2.85894272],
[2.50082927, 2.20020077, 1.98101356, 1.89748509, 1.98101356,
2.20020077, 2.50082927],
[2.24141069, 1.86427183, 1.65402932, 1.59405522, 1.65402932,
1.86427183, 2.24141069],
[2.14317001, 1.72596453, 1.55948774, 1.48557451, 1.55948774,
1.72596453, 2.14317001],
[2.24152584, 1.86434267, 1.65403978, 1.59405522, 1.65403978,
1.86434267, 2.24152584],
[2.50102265, 2.20030633, 1.98104522, 1.89748509, 1.98104522,
2.20030633, 2.50102265],
[2.85918022, 2.62152073, 2.46184969, 2.4049088, 2.46184969,
2.62152073, 2.85918022]])
if do_tests is True:
np.testing.assert_allclose(
rrup, dctx.rrup, rtol=0, atol=0.01)
else:
print(repr(dctx.rrup))
def test_distance_from_sites_origin():
# Make sites instance
vs30file = os.path.join(homedir, 'distance_data/Vs30_test.grd')
cx = -118.2
cy = 34.1
dx = 0.0083
dy = 0.0083
xspan = 0.0083 * 5
yspan = 0.0083 * 5
site = Sites.fromCenter(cx, cy, xspan, yspan, dx, dy,
vs30File=vs30file,
padding=True, resample=False)
# Make souce instance
lat0 = np.array([34.1])
lon0 = np.array([-118.2])
lat1 = np.array([34.2])
lon1 = np.array([-118.15])
z = np.array([1.0])
W = np.array([3.0])
dip = np.array([30.])
event = {'lat': 34.1, 'lon': -118.2, 'depth': 1, 'mag': 6,
'id': '', 'locstring': '', 'mech': 'ALL',
'netid': '', 'network': '',
'time': HistoricTime.utcfromtimestamp(int(time.time()))}
origin = Origin(event)
rup = QuadRupture.fromTrace(lon0, lat0, lon1, lat1, z, W, dip, origin)
gmpelist = [AbrahamsonEtAl2014(), BergeThierryEtAl2003SIGMA()]
dists = Distance.fromSites(gmpelist, site, rup)
dctx = dists.getDistanceContext()
rhypo = np.array([[3.74498133, 3.32896405, 3.05225679, 2.95426722,
3.05225679,
3.32896405, 3.74498133],
[3.11965436, 2.60558436, 2.24124201, 2.10583262,
2.24124201,
2.60558436, 3.11965436],
[2.67523213, 2.05265767, 1.564393, 1.36331682,
1.564393,
2.05265767, 2.67523213],
[2.50973226, 1.83166664, 1.26045653, 1., 1.26045653,
1.83166664, 2.50973226],
[2.67542717, 2.05277065, 1.56443006, 1.36331682,
1.56443006,
2.05277065, 2.67542717],
[3.11998886, 2.60576236, 2.24129374, 2.10583262,
2.24129374,
2.60576236, 3.11998886],
[3.74539929, 3.32917303, 3.05231378, 2.95426722,
3.05231378,
3.32917303, 3.74539929]])
np.testing.assert_allclose(
rhypo, dctx.rhypo, rtol=0, atol=0.01)
rx = np.array([[-3.18894050e+00, -2.48001769e+00, -1.77111874e+00,
-1.06224366e+00, -3.53392480e-01, 3.55434794e-01,
1.06423815e+00],
[-2.83506890e+00, -2.12607622e+00, -1.41710740e+00,
-7.08162466e-01, 7.58576362e-04, 7.09655709e-01,
1.41852892e+00],
[-2.48119723e+00, -1.77213470e+00, -1.06309603e+00,
-3.54081243e-01, 3.54909645e-01, 1.06387662e+00,
1.77281967e+00],
[-2.12732550e+00, -1.41819312e+00, -7.09084619e-01,
2.56774082e-12, 7.09060719e-01, 1.41809752e+00,
2.12711040e+00],
[-1.77345370e+00, -1.06425151e+00, -3.55073182e-01,
3.54081255e-01, 1.06321179e+00, 1.77231841e+00,
2.48140110e+00],
[-1.41958186e+00, -7.10309855e-01, -1.06172493e-03,
7.08162516e-01, 1.41736285e+00, 2.12653927e+00,
2.83569175e+00],
[-1.06570997e+00, -3.56368176e-01, 3.52949744e-01,
1.06224377e+00, 1.77151390e+00, 2.48076010e+00,
3.18998236e+00]])
np.testing.assert_allclose(
rx, dctx.rx, rtol=0, atol=0.01)
rjb = np.array([[3.19372137e+00, 2.48373511e+00, 1.77377308e+00,
1.06383562e+00, 3.53925643e-01, 2.25816823e-03,
2.45009861e-03],
[2.83931844e+00, 2.12926243e+00, 1.41923064e+00,
7.09223517e-01, 1.57594916e-03, 1.86044244e-03,
2.05239165e-03],
[2.48510934e+00, 1.77479025e+00, 1.06468863e+00,
3.54611655e-01, 1.04375185e-03, 1.32827303e-03,
1.52024106e-03],
[2.30690967e+00, 1.53793979e+00, 7.68969896e-01,
5.88918451e-12, 3.77111295e-04, 6.61660373e-04,
8.53647223e-04],
[2.48531877e+00, 1.79442084e+00, 1.20242597e+00,
8.54793253e-01, 5.62052963e-01, 2.69254693e-01,
5.26105100e-05],
[2.95646628e+00, 2.40489915e+00, 2.00231070e+00,
1.70958533e+00, 1.41681634e+00, 1.12398937e+00,
8.63761551e-01],
[3.60741953e+00, 3.17112489e+00, 2.85711592e+00,
2.56437623e+00, 2.27157856e+00, 1.97872291e+00,
1.78518260e+00]])
np.testing.assert_allclose(
rjb, dctx.rjb, rtol=0, atol=0.01)
ry0 = np.array([[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[2.29490054e-02, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[8.79341006e-01, 5.86285236e-01, 2.93171565e-01,
6.21003581e-12, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[1.73573289e+00, 1.44264826e+00, 1.14950573e+00,
8.56305300e-01, 5.63046975e-01, 2.69730762e-01,
0.00000000e+00],
[2.59212463e+00, 2.29901116e+00, 2.00583977e+00,
1.71261048e+00, 1.41932329e+00, 1.12597821e+00,
8.32575235e-01],
[3.44851622e+00, 3.15537391e+00, 2.86217367e+00,
2.56891553e+00, 2.27559947e+00, 1.98222553e+00,
1.68879368e+00]])
np.testing.assert_allclose(
ry0, dctx.ry0, rtol=0, atol=0.01)
rrup = np.array([[3.34678672, 2.67788811, 2.03697073, 1.46129187,
1.06271102,
1.06352692, 1.40073832],
[3.01030105, 2.3526499, 1.73673635, 1.22706347,
1.00157564,
1.22283363, 1.57764099],
[2.67858182, 2.03712377, 1.46095502, 1.06170931,
1.06220616,
1.39958479, 1.75442695],
[2.51415965, 1.8343632, 1.26143652, 1., 1.2212501,
1.57621925, 1.9310962],
[2.67877609, 2.05412785, 1.56384179, 1.3617346,
1.50608502,
1.77308319, 2.10764873],
[3.12078859, 2.6043486, 2.23799413, 2.09885629,
2.11696797,
2.23191013, 2.4299612],
[3.74318473, 3.32482368, 3.04635272, 2.9183523,
2.86659485,
2.88815116, 2.98141559]])
np.testing.assert_allclose(
rrup, dctx.rrup, rtol=0, atol=0.01)
def test_chichi_with_get_distance():
# read in rupture file
f = os.path.join(homedir, 'distance_data/0137A.POL')
i0 = np.arange(0, 9 * 11 * 3, 11)
i1 = i0 + 10
cs = list(zip(i0, i1))
df = pd.read_fwf(f, cs, skiprows=2, nrows=5, header=None)
mat = df.values
ix = np.arange(0, 9 * 3, 3)
iy = ix + 1
iz = ix + 2
x0 = mat[0, ix]
x1 = mat[1, ix]
x2 = mat[2, ix]
x3 = mat[3, ix]
y0 = mat[0, iy]
y1 = mat[1, iy]
y2 = mat[2, iy]
y3 = mat[3, iy]
# Depth, positive down
z0 = np.abs(mat[0, iz])
z1 = np.abs(mat[1, iz])
z2 = np.abs(mat[2, iz])
z3 = np.abs(mat[3, iz])
epilat = 23.85
epilon = 120.82
proj = OrthographicProjection(
epilon - 1, epilon + 1, epilat + 1, epilat - 1)
lon0, lat0 = proj(x0, y0, reverse=True)
lon1, lat1 = proj(x1, y1, reverse=True)
lon2, lat2 = proj(x2, y2, reverse=True)
lon3, lat3 = proj(x3, y3, reverse=True)
# event information doesn't matter except hypocenter
event = {'lat': 23.85, 'lon': 120.82, 'depth': 8, 'mag': 7.62,
'id': '', 'locstring': '', 'mech': 'ALL',
'netid': '', 'network': '',
'time': HistoricTime.utcfromtimestamp(int(time.time()))}
origin = Origin(event)
rup = QuadRupture.fromVertices(
lon0, lat0, z0, lon1, lat1, z1, lon2, lat2, z2, lon3, lat3, z3,
origin)
# Get NGA distances
distfile = os.path.join(homedir, 'distance_data/NGAW2_distances.csv')
df = pd.read_csv(distfile)
df2 = df.loc[df['EQID'] == 137]
slat = df2['Station Latitude'].values
slon = df2['Station Longitude'].values
sdep = np.zeros(slat.shape)
nga_repi = df2['EpiD (km)'].values
nga_rhypo = df2['HypD (km)'].values
nga_rrup = df2['ClstD (km)'].values
nga_rjb = df2['Joyner-Boore Dist. (km)'].values
nga_rx = df2['T'].values
nga_T = df2['T'].values
nga_U = df2['U'].values
test_ry = np.array([
-49.25445446, -76.26871272, -37.1288192, -53.47792996,
-50.30711637, -63.96322125, -61.01988704, -81.2001781,
-76.00646939, -74.39038054, -92.23617124, -90.66976945,
-89.68551411, -102.98798328, -114.70036085, -29.83636082,
-28.50133134, -27.86922916, -36.00619214, -44.68826209,
-47.64580208, -53.92619079, -59.11962858, -55.90584822,
-55.00772025, -48.81756715, -59.27542007, -62.13633659,
-70.0673351, -75.96977638, -61.6959293, -60.34564074,
-81.49792285, -78.75933138, -80.80533738, -85.24473008,
-94.07519297, -93.75010471, -96.87089883, -100.06112271,
-98.86980873, -95.92330113, -107.44086722, -119.1065369,
-120.60405905, -113.42995442, -115.94930662, -115.2398216,
-107.37840927, -49.25445446, -48.78386688, -108.49133002,
-88.03303353, -44.66653428, -81.04476548, -38.26801619,
-70.51178983, -69.15679931, -74.74562139, -86.51133446,
-27.62153029, -48.33279375, -30.0808298, -113.98345018,
-97.96609537, -87.9863122, -39.45970018, -80.1387617,
-42.27121388, -82.05027834, -81.55987067, -81.55987067,
-107.25255717, 67.62695516, -3.27797047, -197.98554369,
82.30996151, 18.42180605, -22.88851072, -35.75245916,
-19.54788146, -18.19780517, 19.85077702, 20.33310282,
19.95448398, 20.55508903, 18.17428572, 17.87997374,
16.97323804, 16.0025885, 13.88001846, 18.42180605,
-3.27797047, 51.43098894, 28.97695533, -53.20579538,
38.7537468, 33.48878882, 26.25189111, 22.54251612,
13.37141837, -5.80928302, -6.68056794, -14.50860117,
-15.23992093, -27.63281952, -11.66075049, -36.94595337,
-40.97168031, -41.2814342, -48.64456898, -61.55777751,
-11.15038984, -17.16482959, 55.84202839, 36.78540588,
21.18550074, 19.14658833, 19.22680282, 5.76327358,
-47.45309937, -44.33194991, -55.15852372, 37.33066096,
37.64135657, 14.31598698, 4.60495737, 6.87107021,
18.42180605, 113.59285783, 109.06420877, 104.23416509,
99.21599973, 95.25204545, 90.29487934, 86.26977557,
95.28705209, 87.12907925, 101.40561896, 96.68858152,
92.90287952, 100.36659012, 97.19448577, 92.8627461,
85.01448355, 93.36767736, 96.90824009, 86.48002825,
88.71037964, 106.17282325, 102.56142319, 97.60004093,
99.61798574, 97.36337239, 94.22000798, 86.99488734,
90.05981676, 90.51189502, 100.7166391, 100.31931988,
67.62695516, 94.15062409, 87.77053675, 124.21806013,
99.23108884, 101.48199452, 92.63771423, 78.88723272,
72.7261356, 80.58682246, 73.30258213, 70.20783518,
60.57963211, -87.72265602, -148.10933308, -150.41334959,
-144.12558375, -145.5625388, -132.09479688, -135.12980144,
-121.10883695, -143.75755221, -117.73616176, -115.28563276,
-138.79652905, -143.10405603, -151.78419035, -159.75299736,
-149.69457229, -175.20332448, -181.00970647, -188.86536942,
-176.88178468, -194.20978527, -204.54944453, -161.04413103,
-197.98554369, -96.74089367, -133.49237232, -84.71198922,
-164.97719097, -202.48241157, -74.54550169, -147.37402934,
-144.64074441, -147.94282804, -122.80976842, -133.1671346,
-136.3051809, -113.93174768, -151.02125407, -146.5198829,
-156.19720713, -126.06138725, -131.44422964, -197.62591198,
-204.42320856, -149.84576063, -121.56474664, -130.99947339,
-148.41767074, -145.28448367, 104.58903799, 82.1649906,
67.69977397, 39.46989193, -69.00949731, -133.49237232,
-128.264754, -84.71198922, -108.49133002, 119.86128724,
122.73556155, 126.28254009, 125.12436373, 123.32498578,
123.8337768, 121.39931427, 121.48412837, 122.03669249,
122.59675818, 119.54338365, 120.33961222, 120.69581745,
116.96928355, 117.6687724, 116.62277942, 115.39650689,
112.60751523, 109.82643069, 108.2500678, 130.9143614,
126.50049543, 112.76229057, 132.76840098, 107.27099883,
128.16063464, 123.83157143, 120.46711628, 112.55756637,
135.59953867, 136.66138116, 136.98573162, 134.04528777,
116.27744752, 129.2794577, 119.13550981, 124.67196321,
130.9728774, 130.9039439, 128.70028371, 130.04592892,
140.21819548, 140.60370422, 113.37585901, 123.21523323,
123.88149248, 128.56894995, 128.45186255, 118.74080853,
126.71189149, 119.79833338, 130.00866791, -160.01242472,
13.55424709, 110.26938756, 97.71987778, 110.93671325,
108.71965725, 105.03432063, 106.36049687, 99.27569343,
115.06168146, 77.00378531, 81.50139192, 92.15605815,
79.94311644, 83.16892433, 52.23389149, 50.97110177,
67.95167063, 63.43930833, 40.20494692, 43.22936492,
47.21513635, 38.94380012, 53.85489136, 56.69935207,
48.07036522, 64.46887891, 14.98020647, 17.35046801,
16.15236633, 14.41062231, 19.99605739, 18.31076661,
15.07058247, 12.34339267, 13.57621451, 14.72685201,
22.04539325, 20.47982142, 9.66768974, 8.05139052,
29.22924869, 3.75876894, 7.8610467, 29.20272495,
15.19325822, -2.38981899, 5.58349359, -0.62239018,
-4.38178769, -11.43651893, -20.07048519, -16.0588668,
82.30996151, 13.55424709, 104.49355303, -11.29628168,
82.1649906, 34.22207039, 38.08490923, -10.15855131,
111.0308369, 81.78397481, 73.56334665, 81.27164139,
74.55979012, 16.08437955, 23.8203941, 24.68836209,
28.73767914, 21.06714416, 19.44159522, 4.62135887,
3.41771413, 5.051121, -6.81834189, 6.40341853,
-0.35693923, -17.74409367, -8.91759817, -18.05278804,
7.70695248, -5.52733835, -16.02924961, -4.54310111,
-22.84234773, -1.71908199, 39.46989193, -14.74007542,
23.59992543, -10.49966883, -11.47733869, -22.8200901,
-9.72486483, 95.96997763, -115.36487081, -52.88924268,
-90.2275069, -132.22657274, -100.52455976, -115.24052939,
-113.84482359, -114.41088165, -114.63386688, -115.92829006,
-117.52597227, -114.49770514, -114.46881502, -76.26871272,
-115.36487081, -160.01242472, -110.6429636, -77.47722955,
-80.24672646, -85.90422427, -94.92075147, -102.44309541,
-106.23741455, -111.56110193, -115.13402727, -48.64043046,
-60.86151946, -66.52137871, -110.04628212, -75.27694696,
-78.87041369, -88.08700161, -90.18844188, -93.65776393,
-92.58976279, -107.31364843, -115.04064471, -125.98500718,
-75.9341032, -39.45970018, -14.74007542, -23.16835763])
test_ry0 = np.array([
5.38783354, 32.4020918, 0., 9.61130904,
6.44049545, 20.09660033, 17.15326613, 37.33355718,
32.13984847, 30.52375962, 48.36955032, 46.80314854,
45.81889319, 59.12136236, 70.83373993, 0.,
0., 0., 0., 0.82164117,
3.77918116, 10.05956987, 15.25300766, 12.0392273,
11.14109933, 4.95094623, 15.40879915, 18.26971567,
26.20071419, 32.10315546, 17.82930838, 16.47901983,
37.63130193, 34.89271046, 36.93871646, 41.37810916,
50.20857205, 49.88348379, 53.00427791, 56.19450179,
55.00318781, 52.05668021, 63.5742463, 75.23991598,
76.73743813, 69.5633335, 72.0826857, 71.37320068,
63.51178836, 5.38783354, 4.91724596, 64.6247091,
44.16641261, 0.79991336, 37.17814456, 0.,
26.64516892, 25.2901784, 30.87900047, 42.64471355,
0., 4.46617283, 0., 70.11682926,
54.09947445, 44.11969128, 0., 36.27214079,
0., 38.18365743, 37.69324975, 37.69324975,
63.38593626, 31.95985109, 0., 154.11892278,
46.64285745, 0., 0., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.,
0., 15.76388487, 0., 9.33917446,
3.08664273, 0., 0., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.,
0., 0., 4.77794806, 17.69115659,
0., 0., 20.17492433, 1.11830182,
0., 0., 0., 0.,
3.58647845, 0.46532899, 11.2919028, 1.6635569,
1.97425251, 0., 0., 0.,
0., 77.92575377, 73.39710471, 68.56706103,
63.54889567, 59.58494138, 54.62777528, 50.6026715,
59.61994802, 51.46197518, 65.7385149, 61.02147746,
57.23577546, 64.69948606, 61.52738171, 57.19564204,
49.34737949, 57.7005733, 61.24113602, 50.81292419,
53.04327558, 70.50571919, 66.89431913, 61.93293686,
63.95088168, 61.69626833, 58.55290391, 51.32778327,
54.3927127, 54.84479095, 65.04953504, 64.65221582,
31.95985109, 58.48352003, 52.10343269, 88.55095607,
63.56398477, 65.81489046, 56.97061016, 43.22012866,
37.05903154, 44.9197184, 37.63547806, 34.54073112,
24.91252804, 43.85603511, 104.24271216, 106.54672867,
100.25896283, 101.69591788, 88.22817597, 91.26318052,
77.24221603, 99.89093129, 73.86954084, 71.41901185,
94.92990813, 99.23743511, 107.91756944, 115.88637645,
105.82795138, 131.33670356, 137.14308555, 144.9987485,
133.01516376, 150.34316435, 160.68282361, 117.17751011,
154.11892278, 52.87427275, 89.6257514, 40.8453683,
121.11057005, 158.61579065, 30.67888078, 103.50740842,
100.77412349, 104.07620713, 78.9431475, 89.30051368,
92.43855998, 70.06512676, 107.15463315, 102.65326198,
112.33058622, 82.19476634, 87.57760872, 153.75929106,
160.55658764, 105.97913971, 77.69812572, 87.13285248,
104.55104982, 101.41786275, 68.92193392, 46.49788654,
32.0326699, 3.80278787, 25.14287639, 89.6257514,
84.39813309, 40.8453683, 64.6247091, 84.19418317,
87.06845748, 90.61543602, 89.45725966, 87.65788171,
88.16667274, 85.73221021, 85.81702431, 86.36958842,
86.92965411, 83.87627959, 84.67250815, 85.02871339,
81.30217949, 82.00166833, 80.95567535, 79.72940282,
76.94041117, 74.15932662, 72.58296373, 95.24725733,
90.83339137, 77.0951865, 97.10129692, 71.60389476,
92.49353057, 88.16446736, 84.80001222, 76.89046231,
99.93243461, 100.9942771, 101.31862755, 98.37818371,
80.61034346, 93.61235363, 83.46840575, 89.00485915,
95.30577334, 95.23683984, 93.03317965, 94.37882485,
104.55109142, 104.93660016, 77.70875494, 87.54812917,
88.21438842, 92.90184589, 92.78475848, 83.07370447,
91.04478743, 84.13122931, 94.34156384, 116.14580381,
0., 74.60228349, 62.05277372, 75.26960919,
73.05255319, 69.36721657, 70.69339281, 63.60858937,
79.3945774, 41.33668124, 45.83428785, 56.48895409,
44.27601238, 47.50182027, 16.56678743, 15.30399771,
32.28456656, 27.77220427, 4.53784286, 7.56226086,
11.54803229, 3.27669605, 18.1877873, 21.032248,
12.40326116, 28.80177485, 0., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.,
46.64285745, 0., 68.82644897, 0.,
46.49788654, 0., 2.41780516, 0.,
75.36373283, 46.11687074, 37.89624258, 45.60453732,
38.89268605, 0., 0., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.,
0., 0., 3.80278787, 0.,
0., 0., 0., 0.,
0., 60.30287357, 71.49824989, 9.02262176,
46.36088598, 88.35995182, 56.65793884, 71.37390848,
69.97820268, 70.54426073, 70.76724596, 72.06166914,
73.65935135, 70.63108422, 70.6021941, 32.4020918,
71.49824989, 116.14580381, 66.77634268, 33.61060864,
36.38010555, 42.03760335, 51.05413055, 58.57647449,
62.37079364, 67.69448101, 71.26740635, 4.77380954,
16.99489854, 22.65475779, 66.1796612, 31.41032604,
35.00379277, 44.22038069, 46.32182096, 49.79114301,
48.72314188, 63.44702751, 71.1740238, 82.11838626,
32.06748228, 0., 0., 0.])
dist_types = ['repi', 'rhypo', 'rjb', 'rrup', 'rx', 'ry', 'ry0', 'U', 'T']
dists = get_distance(dist_types, slat, slon, sdep, rup)
np.testing.assert_allclose(
nga_repi, dists['repi'], rtol=0, atol=2)
np.testing.assert_allclose(
nga_rhypo, dists['rhypo'], rtol=0, atol=2)
np.testing.assert_allclose(
nga_rjb, dists['rjb'], rtol=0, atol=2)
np.testing.assert_allclose(
nga_rrup, dists['rrup'], rtol=0, atol=2)
np.testing.assert_allclose(
nga_rx, dists['rx'], rtol=0, atol=2)
np.testing.assert_allclose(
test_ry, dists['ry'], rtol=0, atol=2)
np.testing.assert_allclose(
test_ry0, dists['ry0'], rtol=0, atol=2)
np.testing.assert_allclose(
nga_U, dists['U'], rtol=0, atol=6)
np.testing.assert_allclose(
nga_T, dists['T'], rtol=0, atol=2)
if __name__ == "__main__":
test_san_fernando()
test_exceptions()
test_distance_no_rupture()
test_distance_from_sites_origin()
test_chichi_with_get_distance()
| [
"shakelib.rupture.gc2._computeGC2",
"sys.path.insert",
"pandas.read_csv",
"numpy.array",
"openquake.hazardlib.gsim.abrahamson_2014.AbrahamsonEtAl2014",
"openquake.hazardlib.geo.utils.OrthographicProjection",
"numpy.nanmin",
"numpy.arange",
"shakelib.distance.get_distance",
"shakelib.distance.Dista... | [((923, 951), 'sys.path.insert', 'sys.path.insert', (['(0)', 'shakedir'], {}), '(0, shakedir)\n', (938, 951), False, 'import sys\n'), ((1181, 1207), 'numpy.array', 'np.array', (['[7.1845, 7.8693]'], {}), '([7.1845, 7.8693])\n', (1189, 1207), True, 'import numpy as np\n'), ((1217, 1247), 'numpy.array', 'np.array', (['[-10.3793, -16.2096]'], {}), '([-10.3793, -16.2096])\n', (1225, 1247), True, 'import numpy as np\n'), ((1257, 1277), 'numpy.array', 'np.array', (['[3.0, 0.0]'], {}), '([3.0, 0.0])\n', (1265, 1277), True, 'import numpy as np\n'), ((1293, 1321), 'numpy.array', 'np.array', (['[-7.8506, -7.5856]'], {}), '([-7.8506, -7.5856])\n', (1301, 1321), True, 'import numpy as np\n'), ((1331, 1360), 'numpy.array', 'np.array', (['[-4.9073, -12.0682]'], {}), '([-4.9073, -12.0682])\n', (1339, 1360), True, 'import numpy as np\n'), ((1370, 1390), 'numpy.array', 'np.array', (['[3.0, 0.0]'], {}), '([3.0, 0.0])\n', (1378, 1390), True, 'import numpy as np\n'), ((1406, 1434), 'numpy.array', 'np.array', (['[-4.6129, -5.5149]'], {}), '([-4.6129, -5.5149])\n', (1414, 1434), True, 'import numpy as np\n'), ((1444, 1471), 'numpy.array', 'np.array', (['[3.9887, -4.3408]'], {}), '([3.9887, -4.3408])\n', (1452, 1471), True, 'import numpy as np\n'), ((1481, 1503), 'numpy.array', 'np.array', (['[16.03, 8.0]'], {}), '([16.03, 8.0])\n', (1489, 1503), True, 'import numpy as np\n'), ((1518, 1543), 'numpy.array', 'np.array', (['[10.4222, 9.94]'], {}), '([10.4222, 9.94])\n', (1526, 1543), True, 'import numpy as np\n'), ((1555, 1583), 'numpy.array', 'np.array', (['[-1.4833, -8.4823]'], {}), '([-1.4833, -8.4823])\n', (1563, 1583), True, 'import numpy as np\n'), ((1593, 1615), 'numpy.array', 'np.array', (['[16.03, 8.0]'], {}), '([16.03, 8.0])\n', (1601, 1615), True, 'import numpy as np\n'), ((1679, 1749), 'openquake.hazardlib.geo.utils.OrthographicProjection', 'OrthographicProjection', (['(epilon - 1)', '(epilon + 1)', '(epilat + 1)', '(epilat - 1)'], {}), '(epilon - 1, epilon + 1, epilat + 1, epilat - 1)\n', (1701, 1749), False, 'from openquake.hazardlib.geo.utils import OrthographicProjection\n'), ((2247, 2347), 'shakelib.rupture.quad_rupture.QuadRupture.fromVertices', 'QuadRupture.fromVertices', (['lon0', 'lat0', 'z0', 'lon1', 'lat1', 'z1', 'lon2', 'lat2', 'z2', 'lon3', 'lat3', 'z3', 'origin'], {}), '(lon0, lat0, z0, lon1, lat1, z1, lon2, lat2, z2,\n lon3, lat3, z3, origin)\n', (2271, 2347), False, 'from shakelib.rupture.quad_rupture import QuadRupture\n'), ((2667, 2680), 'shakelib.rupture.origin.Origin', 'Origin', (['event'], {}), '(event)\n', (2673, 2680), False, 'from shakelib.rupture.origin import Origin\n'), ((2894, 2915), 'numpy.meshgrid', 'np.meshgrid', (['lon', 'lat'], {}), '(lon, lat)\n', (2905, 2915), True, 'import numpy as np\n'), ((2926, 2945), 'numpy.zeros_like', 'np.zeros_like', (['lons'], {}), '(lons)\n', (2939, 2945), True, 'import numpy as np\n'), ((3139, 3181), 'shakelib.distance.get_distance', 'get_distance', (['dtypes', 'lats', 'lons', 'dep', 'rup'], {}), '(dtypes, lats, lons, dep, rup)\n', (3151, 3181), False, 'from shakelib.distance import get_distance\n'), ((3197, 4611), 'numpy.array', 'np.array', (['[[29.37395812, 22.56039569, 15.74545461, 8.92543078, 2.09723735, -\n 4.73938823, -11.58093887, -18.42177264, -25.25743913, -32.08635501], [\n 31.84149137, 25.03129417, 18.22007124, 11.40292429, 4.57583886, -\n 2.26009972, -9.09790123, -15.92911065, -22.75071243, -29.56450963], [\n 34.30623138, 27.49382948, 20.67774678, 13.85111535, 7.0115472, \n 0.16942111, -6.65327488, -13.45181115, -20.24352643, -27.03530618], [\n 36.78170249, 29.96380633, 23.1270492, 16.23906653, 9.32934682, \n 2.41729624, -4.2732657, -10.94940844, -17.703852, -24.4792072], [\n 39.29233805, 32.49155866, 25.68380903, 18.73823089, 12.08780156, \n 5.99219619, -1.38387344, -8.28331275, -15.08759643, -21.87909368], [\n 41.84662959, 35.09745097, 28.42432401, 21.98993679, 15.2994003, \n 8.38037254, 1.3900846, -5.5601922, -12.4250749, -19.24690137], [\n 44.41552101, 37.69652131, 31.0257236, 24.38573309, 17.67059825, \n 10.84688716, 3.96604399, -2.920931, -9.78152208, -16.6132751], [\n 46.97201328, 40.2558351, 33.55821495, 26.85923974, 20.12416451, \n 13.33640001, 6.50905851, -0.33349597, -7.17138975, -13.99568321], [\n 49.51154107, 42.79053584, 36.07536907, 29.35382731, 22.61099757, \n 15.83894006, 9.04135415, 2.22928601, -4.58574545, -11.3959888], [\n 52.03832734, 45.31289877, 38.58842009, 31.85764151, 25.11309728, \n 18.35066231, 11.57145669, 4.78070229, -2.01505508, -8.81029694]]'], {}), '([[29.37395812, 22.56039569, 15.74545461, 8.92543078, 2.09723735, -\n 4.73938823, -11.58093887, -18.42177264, -25.25743913, -32.08635501], [\n 31.84149137, 25.03129417, 18.22007124, 11.40292429, 4.57583886, -\n 2.26009972, -9.09790123, -15.92911065, -22.75071243, -29.56450963], [\n 34.30623138, 27.49382948, 20.67774678, 13.85111535, 7.0115472, \n 0.16942111, -6.65327488, -13.45181115, -20.24352643, -27.03530618], [\n 36.78170249, 29.96380633, 23.1270492, 16.23906653, 9.32934682, \n 2.41729624, -4.2732657, -10.94940844, -17.703852, -24.4792072], [\n 39.29233805, 32.49155866, 25.68380903, 18.73823089, 12.08780156, \n 5.99219619, -1.38387344, -8.28331275, -15.08759643, -21.87909368], [\n 41.84662959, 35.09745097, 28.42432401, 21.98993679, 15.2994003, \n 8.38037254, 1.3900846, -5.5601922, -12.4250749, -19.24690137], [\n 44.41552101, 37.69652131, 31.0257236, 24.38573309, 17.67059825, \n 10.84688716, 3.96604399, -2.920931, -9.78152208, -16.6132751], [\n 46.97201328, 40.2558351, 33.55821495, 26.85923974, 20.12416451, \n 13.33640001, 6.50905851, -0.33349597, -7.17138975, -13.99568321], [\n 49.51154107, 42.79053584, 36.07536907, 29.35382731, 22.61099757, \n 15.83894006, 9.04135415, 2.22928601, -4.58574545, -11.3959888], [\n 52.03832734, 45.31289877, 38.58842009, 31.85764151, 25.11309728, \n 18.35066231, 11.57145669, 4.78070229, -2.01505508, -8.81029694]])\n', (3205, 4611), True, 'import numpy as np\n'), ((4952, 5010), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["dists['U']", 'targetU'], {'atol': '(0.01)'}), "(dists['U'], targetU, atol=0.01)\n", (4978, 5010), True, 'import numpy as np\n'), ((5026, 6461), 'numpy.array', 'np.array', (['[[-40.32654805, -38.14066537, -35.95781299, -33.79265063, -31.65892948, -\n 29.56075203, -27.48748112, -25.41823592, -23.33452174, -21.22822801], [\n -32.28894353, -30.06603457, -27.83163648, -25.61482279, -23.45367121, -\n 21.36959238, -19.34738882, -17.33510593, -15.28949735, -13.20224592], [\n -24.30254163, -22.03532096, -19.70590091, -17.35907062, -15.10840929, -\n 13.02682541, -11.13554925, -9.25705749, -7.26675455, -5.19396824], [-\n 16.41306482, -14.1418547, -11.68888578, -8.9318195, -6.39939727, -\n 4.10984325, -2.85061088, -1.29211846, 0.68929792, 2.78115216], [-\n 8.63784529, -6.5089946, -4.32108309, -1.44275161, -0.05102145, -\n 0.20890633, 3.92700516, 6.36977183, 8.55572399, 10.72128633], [-\n 0.88135778, 1.06766314, 2.77955566, 3.8241835, 5.99212478, 8.76823285, \n 11.54715599, 14.0961506, 16.4200502, 18.65346494], [6.98140207, \n 8.91888936, 10.77724993, 12.6499521, 14.79454638, 17.18482779, \n 19.63520498, 22.03525644, 24.35152986, 26.60592498], [14.95635952, \n 16.95134069, 18.94768299, 20.99811237, 23.15975573, 25.42700742, \n 27.74302905, 30.0547134, 32.33583361, 34.58421221], [22.9921068, \n 25.0353212, 27.09829391, 29.20364631, 31.3678744, 33.58684524, \n 35.8383652, 38.09736043, 40.34713771, 42.58152772], [31.05186177, \n 33.1252095, 35.21960344, 37.34488267, 39.50633206, 41.70076344, \n 43.91762786, 46.14415669, 48.37021739, 50.59029205]]'], {}), '([[-40.32654805, -38.14066537, -35.95781299, -33.79265063, -\n 31.65892948, -29.56075203, -27.48748112, -25.41823592, -23.33452174, -\n 21.22822801], [-32.28894353, -30.06603457, -27.83163648, -25.61482279, \n -23.45367121, -21.36959238, -19.34738882, -17.33510593, -15.28949735, -\n 13.20224592], [-24.30254163, -22.03532096, -19.70590091, -17.35907062, \n -15.10840929, -13.02682541, -11.13554925, -9.25705749, -7.26675455, -\n 5.19396824], [-16.41306482, -14.1418547, -11.68888578, -8.9318195, -\n 6.39939727, -4.10984325, -2.85061088, -1.29211846, 0.68929792, \n 2.78115216], [-8.63784529, -6.5089946, -4.32108309, -1.44275161, -\n 0.05102145, -0.20890633, 3.92700516, 6.36977183, 8.55572399, \n 10.72128633], [-0.88135778, 1.06766314, 2.77955566, 3.8241835, \n 5.99212478, 8.76823285, 11.54715599, 14.0961506, 16.4200502, \n 18.65346494], [6.98140207, 8.91888936, 10.77724993, 12.6499521, \n 14.79454638, 17.18482779, 19.63520498, 22.03525644, 24.35152986, \n 26.60592498], [14.95635952, 16.95134069, 18.94768299, 20.99811237, \n 23.15975573, 25.42700742, 27.74302905, 30.0547134, 32.33583361, \n 34.58421221], [22.9921068, 25.0353212, 27.09829391, 29.20364631, \n 31.3678744, 33.58684524, 35.8383652, 38.09736043, 40.34713771, \n 42.58152772], [31.05186177, 33.1252095, 35.21960344, 37.34488267, \n 39.50633206, 41.70076344, 43.91762786, 46.14415669, 48.37021739, \n 50.59029205]])\n', (5034, 6461), True, 'import numpy as np\n'), ((6789, 6847), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["dists['T']", 'targetT'], {'atol': '(0.01)'}), "(dists['T'], targetT, atol=0.01)\n", (6815, 6847), True, 'import numpy as np\n'), ((6879, 6912), 'shakelib.rupture.gc2._computeGC2', '_computeGC2', (['rup', 'lons', 'lats', 'dep'], {}), '(rup, lons, lats, dep)\n', (6890, 6912), False, 'from shakelib.rupture.gc2 import _computeGC2\n'), ((6917, 6975), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["ddict['T']", 'targetT'], {'atol': '(0.01)'}), "(ddict['T'], targetT, atol=0.01)\n", (6943, 6975), True, 'import numpy as np\n'), ((6980, 7038), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["ddict['U']", 'targetU'], {'atol': '(0.01)'}), "(ddict['U'], targetU, atol=0.01)\n", (7006, 7038), True, 'import numpy as np\n'), ((7251, 7351), 'shakelib.sites.Sites.fromCenter', 'Sites.fromCenter', (['cx', 'cy', 'xspan', 'yspan', 'dx', 'dy'], {'vs30File': 'vs30file', 'padding': '(True)', 'resample': '(False)'}), '(cx, cy, xspan, yspan, dx, dy, vs30File=vs30file, padding=\n True, resample=False)\n', (7267, 7351), False, 'from shakelib.sites import Sites\n'), ((7440, 7456), 'numpy.array', 'np.array', (['[34.1]'], {}), '([34.1])\n', (7448, 7456), True, 'import numpy as np\n'), ((7468, 7486), 'numpy.array', 'np.array', (['[-118.2]'], {}), '([-118.2])\n', (7476, 7486), True, 'import numpy as np\n'), ((7498, 7514), 'numpy.array', 'np.array', (['[34.2]'], {}), '([34.2])\n', (7506, 7514), True, 'import numpy as np\n'), ((7526, 7545), 'numpy.array', 'np.array', (['[-118.15]'], {}), '([-118.15])\n', (7534, 7545), True, 'import numpy as np\n'), ((7554, 7569), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (7562, 7569), True, 'import numpy as np\n'), ((7578, 7593), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (7586, 7593), True, 'import numpy as np\n'), ((7604, 7620), 'numpy.array', 'np.array', (['[30.0]'], {}), '([30.0])\n', (7612, 7620), True, 'import numpy as np\n'), ((7931, 7995), 'shakelib.rupture.quad_rupture.QuadRupture.fromTrace', 'QuadRupture.fromTrace', (['lon0', 'lat0', 'lon1', 'lat1', 'z', 'W', 'dip', 'origin'], {}), '(lon0, lat0, lon1, lat1, z, W, dip, origin)\n', (7952, 7995), False, 'from shakelib.rupture.quad_rupture import QuadRupture\n'), ((8282, 8295), 'shakelib.rupture.origin.Origin', 'Origin', (['event'], {}), '(event)\n', (8288, 8295), False, 'from shakelib.rupture.origin import Origin\n'), ((9415, 9428), 'shakelib.rupture.origin.Origin', 'Origin', (['event'], {}), '(event)\n', (9421, 9428), False, 'from shakelib.rupture.origin import Origin\n'), ((9673, 9773), 'shakelib.sites.Sites.fromCenter', 'Sites.fromCenter', (['cx', 'cy', 'xspan', 'yspan', 'dx', 'dy'], {'vs30File': 'vs30file', 'padding': '(True)', 'resample': '(False)'}), '(cx, cy, xspan, yspan, dx, dy, vs30File=vs30file, padding=\n True, resample=False)\n', (9689, 9773), False, 'from shakelib.sites import Sites\n'), ((9920, 9940), 'openquake.hazardlib.gsim.abrahamson_2014.AbrahamsonEtAl2014', 'AbrahamsonEtAl2014', ([], {}), '()\n', (9938, 9940), False, 'from openquake.hazardlib.gsim.abrahamson_2014 import AbrahamsonEtAl2014\n'), ((9955, 9975), 'shakelib.rupture.point_rupture.PointRupture', 'PointRupture', (['origin'], {}), '(origin)\n', (9967, 9975), False, 'from shakelib.rupture.point_rupture import PointRupture\n'), ((9988, 10027), 'shakelib.distance.Distance.fromSites', 'Distance.fromSites', (['gmpe', 'site', 'rupture'], {}), '(gmpe, site, rupture)\n', (10006, 10027), False, 'from shakelib.distance import Distance\n'), ((10077, 10771), 'numpy.array', 'np.array', (['[[1.19350211, 1.01453734, 0.894306248, 0.851431703, 0.894306248, 1.01453734,\n 1.19350211], [0.923698454, 0.697204114, 0.532067867, 0.469137288, \n 0.532067867, 0.697204114, 0.923698454], [0.728251778, 0.444114326, \n 0.26057255, 0.194977658, 0.26057255, 0.444114326, 0.728251778], [\n 0.654236979, 0.339249542, 0.157170497, 1.9827811e-05, 0.157170497, \n 0.339249542, 0.654236979], [0.728338531, 0.444167697, 0.260583985, \n 0.194977658, 0.260583985, 0.444167697, 0.728338531], [0.923844143, \n 0.69728364, 0.532091716, 0.469137288, 0.532091716, 0.69728364, \n 0.923844143], [1.19368104, 1.01462773, 0.89433113, 0.851431703, \n 0.89433113, 1.01462773, 1.19368104]]'], {}), '([[1.19350211, 1.01453734, 0.894306248, 0.851431703, 0.894306248, \n 1.01453734, 1.19350211], [0.923698454, 0.697204114, 0.532067867, \n 0.469137288, 0.532067867, 0.697204114, 0.923698454], [0.728251778, \n 0.444114326, 0.26057255, 0.194977658, 0.26057255, 0.444114326, \n 0.728251778], [0.654236979, 0.339249542, 0.157170497, 1.9827811e-05, \n 0.157170497, 0.339249542, 0.654236979], [0.728338531, 0.444167697, \n 0.260583985, 0.194977658, 0.260583985, 0.444167697, 0.728338531], [\n 0.923844143, 0.69728364, 0.532091716, 0.469137288, 0.532091716, \n 0.69728364, 0.923844143], [1.19368104, 1.01462773, 0.89433113, \n 0.851431703, 0.89433113, 1.01462773, 1.19368104]])\n', (10085, 10771), True, 'import numpy as np\n'), ((11152, 11790), 'numpy.array', 'np.array', (['[[4.0129619, 3.93137849, 3.87656959, 3.85702467, 3.87656959, 3.93137849, \n 4.0129619], [3.88996841, 3.78671803, 3.71143853, 3.68275081, 3.71143853,\n 3.78671803, 3.88996841], [3.80087151, 3.67134376, 3.60166506, \n 3.58311968, 3.60166506, 3.67134376, 3.80087151], [3.7671309, 3.62390909,\n 3.57243062, 3.53580973, 3.57243062, 3.62390909, 3.7671309], [3.80091105,\n 3.67136809, 3.60166829, 3.58311968, 3.60166829, 3.67136809, 3.80091105],\n [3.89003482, 3.78675428, 3.7114494, 3.68275081, 3.7114494, 3.78675428, \n 3.89003482], [4.01304347, 3.9314197, 3.87658093, 3.85702467, 3.87658093,\n 3.9314197, 4.01304347]]'], {}), '([[4.0129619, 3.93137849, 3.87656959, 3.85702467, 3.87656959, \n 3.93137849, 4.0129619], [3.88996841, 3.78671803, 3.71143853, 3.68275081,\n 3.71143853, 3.78671803, 3.88996841], [3.80087151, 3.67134376, \n 3.60166506, 3.58311968, 3.60166506, 3.67134376, 3.80087151], [3.7671309,\n 3.62390909, 3.57243062, 3.53580973, 3.57243062, 3.62390909, 3.7671309],\n [3.80091105, 3.67136809, 3.60166829, 3.58311968, 3.60166829, 3.67136809,\n 3.80091105], [3.89003482, 3.78675428, 3.7114494, 3.68275081, 3.7114494,\n 3.78675428, 3.89003482], [4.01304347, 3.9314197, 3.87658093, 3.85702467,\n 3.87658093, 3.9314197, 4.01304347]])\n', (11160, 11790), True, 'import numpy as np\n'), ((12146, 12185), 'shakelib.distance.Distance.fromSites', 'Distance.fromSites', (['gmpe', 'site', 'rupture'], {}), '(gmpe, site, rupture)\n', (12164, 12185), False, 'from shakelib.distance import Distance\n'), ((12236, 12930), 'numpy.array', 'np.array', (['[[1.19350211, 1.01453734, 0.894306248, 0.851431703, 0.894306248, 1.01453734,\n 1.19350211], [0.923698454, 0.697204114, 0.532067867, 0.469137288, \n 0.532067867, 0.697204114, 0.923698454], [0.728251778, 0.444114326, \n 0.26057255, 0.194977658, 0.26057255, 0.444114326, 0.728251778], [\n 0.654236979, 0.339249542, 0.157170497, 1.9827811e-05, 0.157170497, \n 0.339249542, 0.654236979], [0.728338531, 0.444167697, 0.260583985, \n 0.194977658, 0.260583985, 0.444167697, 0.728338531], [0.923844143, \n 0.69728364, 0.532091716, 0.469137288, 0.532091716, 0.69728364, \n 0.923844143], [1.19368104, 1.01462773, 0.89433113, 0.851431703, \n 0.89433113, 1.01462773, 1.19368104]]'], {}), '([[1.19350211, 1.01453734, 0.894306248, 0.851431703, 0.894306248, \n 1.01453734, 1.19350211], [0.923698454, 0.697204114, 0.532067867, \n 0.469137288, 0.532067867, 0.697204114, 0.923698454], [0.728251778, \n 0.444114326, 0.26057255, 0.194977658, 0.26057255, 0.444114326, \n 0.728251778], [0.654236979, 0.339249542, 0.157170497, 1.9827811e-05, \n 0.157170497, 0.339249542, 0.654236979], [0.728338531, 0.444167697, \n 0.260583985, 0.194977658, 0.260583985, 0.444167697, 0.728338531], [\n 0.923844143, 0.69728364, 0.532091716, 0.469137288, 0.532091716, \n 0.69728364, 0.923844143], [1.19368104, 1.01462773, 0.89433113, \n 0.851431703, 0.89433113, 1.01462773, 1.19368104]])\n', (12244, 12930), True, 'import numpy as np\n'), ((13473, 13512), 'shakelib.distance.Distance.fromSites', 'Distance.fromSites', (['gmpe', 'site', 'rupture'], {}), '(gmpe, site, rupture)\n', (13491, 13512), False, 'from shakelib.distance import Distance\n'), ((13562, 14256), 'numpy.array', 'np.array', (['[[1.19350211, 1.01453734, 0.894306248, 0.851431703, 0.894306248, 1.01453734,\n 1.19350211], [0.923698454, 0.697204114, 0.532067867, 0.469137288, \n 0.532067867, 0.697204114, 0.923698454], [0.728251778, 0.444114326, \n 0.26057255, 0.194977658, 0.26057255, 0.444114326, 0.728251778], [\n 0.654236979, 0.339249542, 0.157170497, 1.9827811e-05, 0.157170497, \n 0.339249542, 0.654236979], [0.728338531, 0.444167697, 0.260583985, \n 0.194977658, 0.260583985, 0.444167697, 0.728338531], [0.923844143, \n 0.69728364, 0.532091716, 0.469137288, 0.532091716, 0.69728364, \n 0.923844143], [1.19368104, 1.01462773, 0.89433113, 0.851431703, \n 0.89433113, 1.01462773, 1.19368104]]'], {}), '([[1.19350211, 1.01453734, 0.894306248, 0.851431703, 0.894306248, \n 1.01453734, 1.19350211], [0.923698454, 0.697204114, 0.532067867, \n 0.469137288, 0.532067867, 0.697204114, 0.923698454], [0.728251778, \n 0.444114326, 0.26057255, 0.194977658, 0.26057255, 0.444114326, \n 0.728251778], [0.654236979, 0.339249542, 0.157170497, 1.9827811e-05, \n 0.157170497, 0.339249542, 0.654236979], [0.728338531, 0.444167697, \n 0.260583985, 0.194977658, 0.260583985, 0.444167697, 0.728338531], [\n 0.923844143, 0.69728364, 0.532091716, 0.469137288, 0.532091716, \n 0.69728364, 0.923844143], [1.19368104, 1.01462773, 0.89433113, \n 0.851431703, 0.89433113, 1.01462773, 1.19368104]])\n', (13570, 14256), True, 'import numpy as np\n'), ((14637, 15275), 'numpy.array', 'np.array', (['[[4.0129619, 3.93137849, 3.87656959, 3.85702467, 3.87656959, 3.93137849, \n 4.0129619], [3.88996841, 3.78671803, 3.71143853, 3.68275081, 3.71143853,\n 3.78671803, 3.88996841], [3.80087151, 3.67134376, 3.60166506, \n 3.58311968, 3.60166506, 3.67134376, 3.80087151], [3.7671309, 3.62390909,\n 3.57243062, 3.53580973, 3.57243062, 3.62390909, 3.7671309], [3.80091105,\n 3.67136809, 3.60166829, 3.58311968, 3.60166829, 3.67136809, 3.80091105],\n [3.89003482, 3.78675428, 3.7114494, 3.68275081, 3.7114494, 3.78675428, \n 3.89003482], [4.01304347, 3.9314197, 3.87658093, 3.85702467, 3.87658093,\n 3.9314197, 4.01304347]]'], {}), '([[4.0129619, 3.93137849, 3.87656959, 3.85702467, 3.87656959, \n 3.93137849, 4.0129619], [3.88996841, 3.78671803, 3.71143853, 3.68275081,\n 3.71143853, 3.78671803, 3.88996841], [3.80087151, 3.67134376, \n 3.60166506, 3.58311968, 3.60166506, 3.67134376, 3.80087151], [3.7671309,\n 3.62390909, 3.57243062, 3.53580973, 3.57243062, 3.62390909, 3.7671309],\n [3.80091105, 3.67136809, 3.60166829, 3.58311968, 3.60166829, 3.67136809,\n 3.80091105], [3.89003482, 3.78675428, 3.7114494, 3.68275081, 3.7114494,\n 3.78675428, 3.89003482], [4.01304347, 3.9314197, 3.87658093, 3.85702467,\n 3.87658093, 3.9314197, 4.01304347]])\n', (14645, 15275), True, 'import numpy as np\n'), ((15670, 15709), 'shakelib.distance.Distance.fromSites', 'Distance.fromSites', (['gmpe', 'site', 'rupture'], {}), '(gmpe, site, rupture)\n', (15688, 15709), False, 'from shakelib.distance import Distance\n'), ((15759, 16467), 'numpy.array', 'np.array', (['[[0.776090807, 0.649225734, 0.563995966, 0.533602932, 0.563995966, \n 0.649225734, 0.776090807], [0.584831599, 0.424273624, 0.307211355, \n 0.262600941, 0.307211355, 0.424273624, 0.584831599], [0.446282784, \n 0.24486259, 0.132264468, 0.0999797788, 0.132264468, 0.24486259, \n 0.446282784], [0.393814955, 0.170987945, 0.0813717378, 1.03958777e-05, \n 0.0813717378, 0.170987945, 0.393814955], [0.446344282, 0.244900424, \n 0.132270097, 0.0999797788, 0.132270097, 0.244900424, 0.446344282], [\n 0.584934876, 0.424329999, 0.307228262, 0.262600941, 0.307228262, \n 0.424329999, 0.584934876], [0.77621765, 0.649289812, 0.564013604, \n 0.533602932, 0.564013604, 0.649289812, 0.77621765]]'], {}), '([[0.776090807, 0.649225734, 0.563995966, 0.533602932, 0.563995966,\n 0.649225734, 0.776090807], [0.584831599, 0.424273624, 0.307211355, \n 0.262600941, 0.307211355, 0.424273624, 0.584831599], [0.446282784, \n 0.24486259, 0.132264468, 0.0999797788, 0.132264468, 0.24486259, \n 0.446282784], [0.393814955, 0.170987945, 0.0813717378, 1.03958777e-05, \n 0.0813717378, 0.170987945, 0.393814955], [0.446344282, 0.244900424, \n 0.132270097, 0.0999797788, 0.132270097, 0.244900424, 0.446344282], [\n 0.584934876, 0.424329999, 0.307228262, 0.262600941, 0.307228262, \n 0.424329999, 0.584934876], [0.77621765, 0.649289812, 0.564013604, \n 0.533602932, 0.564013604, 0.649289812, 0.77621765]])\n', (15767, 16467), True, 'import numpy as np\n'), ((16834, 17472), 'numpy.array', 'np.array', (['[[3.42235562, 3.338452, 3.28208435, 3.26198358, 3.28208435, 3.338452, \n 3.42235562], [3.29586422, 3.18967743, 3.112257, 3.08275341, 3.112257, \n 3.18967743, 3.29586422], [3.20423343, 3.07102195, 2.99912626, \n 2.97986242, 2.99912626, 3.07102195, 3.20423343], [3.16953325, \n 3.02223204, 2.96875925, 2.92616469, 2.96875925, 3.02223204, 3.16953325],\n [3.2042741, 3.07104698, 2.99912962, 2.97986242, 2.99912962, 3.07104698,\n 3.2042741], [3.29593253, 3.18971471, 3.11226818, 3.08275341, 3.11226818,\n 3.18971471, 3.29593253], [3.42243951, 3.33849438, 3.28209601, \n 3.26198358, 3.28209601, 3.33849438, 3.42243951]]'], {}), '([[3.42235562, 3.338452, 3.28208435, 3.26198358, 3.28208435, \n 3.338452, 3.42235562], [3.29586422, 3.18967743, 3.112257, 3.08275341, \n 3.112257, 3.18967743, 3.29586422], [3.20423343, 3.07102195, 2.99912626,\n 2.97986242, 2.99912626, 3.07102195, 3.20423343], [3.16953325, \n 3.02223204, 2.96875925, 2.92616469, 2.96875925, 3.02223204, 3.16953325],\n [3.2042741, 3.07104698, 2.99912962, 2.97986242, 2.99912962, 3.07104698,\n 3.2042741], [3.29593253, 3.18971471, 3.11226818, 3.08275341, 3.11226818,\n 3.18971471, 3.29593253], [3.42243951, 3.33849438, 3.28209601, \n 3.26198358, 3.28209601, 3.33849438, 3.42243951]])\n', (16842, 17472), True, 'import numpy as np\n'), ((17865, 17904), 'shakelib.distance.Distance.fromSites', 'Distance.fromSites', (['gmpe', 'site', 'rupture'], {}), '(gmpe, site, rupture)\n', (17883, 17904), False, 'from shakelib.distance import Distance\n'), ((17954, 18661), 'numpy.array', 'np.array', (['[[0.83277182, 0.696170087, 0.604399092, 0.571673449, 0.604399092, \n 0.696170087, 0.83277182], [0.626833822, 0.453953319, 0.327906737, \n 0.279872556, 0.327906737, 0.453953319, 0.626833822], [0.477651641, \n 0.260772819, 0.138685718, 0.103235484, 0.138685718, 0.260772819, \n 0.477651641], [0.421157003, 0.181206068, 0.0828029065, 1.03958777e-05, \n 0.0828029065, 0.181206068, 0.421157003], [0.477717859, 0.260813557, \n 0.138691898, 0.103235484, 0.138691898, 0.260813557, 0.477717859], [\n 0.626945025, 0.45401402, 0.327924941, 0.279872556, 0.327924941, \n 0.45401402, 0.626945025], [0.832908398, 0.696239083, 0.604418084, \n 0.571673449, 0.604418084, 0.696239083, 0.832908398]]'], {}), '([[0.83277182, 0.696170087, 0.604399092, 0.571673449, 0.604399092, \n 0.696170087, 0.83277182], [0.626833822, 0.453953319, 0.327906737, \n 0.279872556, 0.327906737, 0.453953319, 0.626833822], [0.477651641, \n 0.260772819, 0.138685718, 0.103235484, 0.138685718, 0.260772819, \n 0.477651641], [0.421157003, 0.181206068, 0.0828029065, 1.03958777e-05, \n 0.0828029065, 0.181206068, 0.421157003], [0.477717859, 0.260813557, \n 0.138691898, 0.103235484, 0.138691898, 0.260813557, 0.477717859], [\n 0.626945025, 0.45401402, 0.327924941, 0.279872556, 0.327924941, \n 0.45401402, 0.626945025], [0.832908398, 0.696239083, 0.604418084, \n 0.571673449, 0.604418084, 0.696239083, 0.832908398]])\n', (17962, 18661), True, 'import numpy as np\n'), ((19029, 19670), 'numpy.array', 'np.array', (['[[3.3192606, 3.22072248, 3.15452316, 3.13091641, 3.15452316, 3.22072248, \n 3.3192606], [3.17070653, 3.0459986, 2.95507447, 2.92042485, 2.95507447,\n 3.0459986, 3.17070653], [3.06309346, 2.90664719, 2.82107391, 2.79752673,\n 2.82107391, 2.90664719, 3.06309346], [3.02234086, 2.84931729, \n 2.78395476, 2.73772697, 2.78395476, 2.84931729, 3.02234086], [\n 3.06314123, 2.90667658, 2.82107802, 2.79752673, 2.82107802, 2.90667658,\n 3.06314123], [3.17078675, 3.04604238, 2.9550876, 2.92042485, 2.9550876,\n 3.04604238, 3.17078675], [3.31935913, 3.22077225, 3.15453686, \n 3.13091641, 3.15453686, 3.22077225, 3.31935913]]'], {}), '([[3.3192606, 3.22072248, 3.15452316, 3.13091641, 3.15452316, \n 3.22072248, 3.3192606], [3.17070653, 3.0459986, 2.95507447, 2.92042485,\n 2.95507447, 3.0459986, 3.17070653], [3.06309346, 2.90664719, 2.82107391,\n 2.79752673, 2.82107391, 2.90664719, 3.06309346], [3.02234086, \n 2.84931729, 2.78395476, 2.73772697, 2.78395476, 2.84931729, 3.02234086],\n [3.06314123, 2.90667658, 2.82107802, 2.79752673, 2.82107802, 2.90667658,\n 3.06314123], [3.17078675, 3.04604238, 2.9550876, 2.92042485, 2.9550876,\n 3.04604238, 3.17078675], [3.31935913, 3.22077225, 3.15453686, \n 3.13091641, 3.15453686, 3.22077225, 3.31935913]])\n', (19037, 19670), True, 'import numpy as np\n'), ((20064, 20103), 'shakelib.distance.Distance.fromSites', 'Distance.fromSites', (['gmpe', 'site', 'rupture'], {}), '(gmpe, site, rupture)\n', (20082, 20103), False, 'from shakelib.distance import Distance\n'), ((20153, 20827), 'numpy.array', 'np.array', (['[[1.95958776, 1.66988434, 1.47525745, 1.40585328, 1.47525745, 1.66988434, \n 1.95958776], [1.52283677, 1.15619376, 0.888875589, 0.78700524, \n 0.888875589, 1.15619376, 1.52283677], [1.20645289, 0.746498734, \n 0.423057706, 0.295503135, 0.423057706, 0.746498734, 1.20645289], [\n 1.0866397, 0.576051478, 0.221984054, 1.9827811e-05, 0.221984054, \n 0.576051478, 1.0866397], [1.20659332, 0.74658513, 0.423079943, \n 0.295503135, 0.423079943, 0.74658513, 1.20659332], [1.52307261, \n 1.15632249, 0.888914196, 0.78700524, 0.888914196, 1.15632249, \n 1.52307261], [1.95987741, 1.67003067, 1.47529773, 1.40585328, \n 1.47529773, 1.67003067, 1.95987741]]'], {}), '([[1.95958776, 1.66988434, 1.47525745, 1.40585328, 1.47525745, \n 1.66988434, 1.95958776], [1.52283677, 1.15619376, 0.888875589, \n 0.78700524, 0.888875589, 1.15619376, 1.52283677], [1.20645289, \n 0.746498734, 0.423057706, 0.295503135, 0.423057706, 0.746498734, \n 1.20645289], [1.0866397, 0.576051478, 0.221984054, 1.9827811e-05, \n 0.221984054, 0.576051478, 1.0866397], [1.20659332, 0.74658513, \n 0.423079943, 0.295503135, 0.423079943, 0.74658513, 1.20659332], [\n 1.52307261, 1.15632249, 0.888914196, 0.78700524, 0.888914196, \n 1.15632249, 1.52307261], [1.95987741, 1.67003067, 1.47529773, \n 1.40585328, 1.47529773, 1.67003067, 1.95987741]])\n', (20161, 20827), True, 'import numpy as np\n'), ((21228, 21882), 'numpy.array', 'np.array', (['[[2.54969772, 2.27038241, 2.08273439, 2.01581889, 2.08273439, 2.27038241, \n 2.54969772], [2.12860763, 1.77511159, 1.51737884, 1.41916133, \n 1.51737884, 1.77511159, 2.12860763], [1.82356854, 1.38010729, \n 1.08693739, 0.97911408, 1.08693739, 1.38010729, 1.82356854], [\n 1.70805158, 1.21626476, 0.91696757, 0.78911491, 0.91696757, 1.21626476,\n 1.70805158], [1.82370394, 1.38019059, 1.08695619, 0.97911408, \n 1.08695619, 1.38019059, 1.82370394], [2.12883501, 1.77523571, \n 1.51741606, 1.41916133, 1.51741606, 1.77523571, 2.12883501], [\n 2.54997699, 2.27052349, 2.08277323, 2.01581889, 2.08277323, 2.27052349,\n 2.54997699]]'], {}), '([[2.54969772, 2.27038241, 2.08273439, 2.01581889, 2.08273439, \n 2.27038241, 2.54969772], [2.12860763, 1.77511159, 1.51737884, \n 1.41916133, 1.51737884, 1.77511159, 2.12860763], [1.82356854, \n 1.38010729, 1.08693739, 0.97911408, 1.08693739, 1.38010729, 1.82356854],\n [1.70805158, 1.21626476, 0.91696757, 0.78911491, 0.91696757, 1.21626476,\n 1.70805158], [1.82370394, 1.38019059, 1.08695619, 0.97911408, \n 1.08695619, 1.38019059, 1.82370394], [2.12883501, 1.77523571, \n 1.51741606, 1.41916133, 1.51741606, 1.77523571, 2.12883501], [\n 2.54997699, 2.27052349, 2.08277323, 2.01581889, 2.08277323, 2.27052349,\n 2.54997699]])\n', (21236, 21882), True, 'import numpy as np\n'), ((22271, 22310), 'shakelib.distance.Distance.fromSites', 'Distance.fromSites', (['gmpe', 'site', 'rupture'], {}), '(gmpe, site, rupture)\n', (22289, 22310), False, 'from shakelib.distance import Distance\n'), ((22360, 23040), 'numpy.array', 'np.array', (['[[1.49285078, 1.26359361, 1.10957536, 1.05465228, 1.10957536, 1.26359361, \n 1.49285078], [1.14722732, 0.857083889, 0.645541307, 0.564926073, \n 0.645541307, 0.857083889, 1.14722732], [0.89685652, 0.532871196, \n 0.299662245, 0.217185537, 0.299662245, 0.532871196, 0.89685652], [\n 0.802042196, 0.398587924, 0.169648145, 1.9827811e-05, 0.169648145, \n 0.398587924, 0.802042196], [0.896967653, 0.532939565, 0.299676623, \n 0.217185537, 0.299676623, 0.532939565, 0.896967653], [1.14741395, \n 0.857185764, 0.645571858, 0.564926073, 0.645571858, 0.857185764, \n 1.14741395], [1.49308, 1.2637094, 1.10960724, 1.05465228, 1.10960724, \n 1.2637094, 1.49308]]'], {}), '([[1.49285078, 1.26359361, 1.10957536, 1.05465228, 1.10957536, \n 1.26359361, 1.49285078], [1.14722732, 0.857083889, 0.645541307, \n 0.564926073, 0.645541307, 0.857083889, 1.14722732], [0.89685652, \n 0.532871196, 0.299662245, 0.217185537, 0.299662245, 0.532871196, \n 0.89685652], [0.802042196, 0.398587924, 0.169648145, 1.9827811e-05, \n 0.169648145, 0.398587924, 0.802042196], [0.896967653, 0.532939565, \n 0.299676623, 0.217185537, 0.299676623, 0.532939565, 0.896967653], [\n 1.14741395, 0.857185764, 0.645571858, 0.564926073, 0.645571858, \n 0.857185764, 1.14741395], [1.49308, 1.2637094, 1.10960724, 1.05465228, \n 1.10960724, 1.2637094, 1.49308]])\n', (22368, 23040), True, 'import numpy as np\n'), ((23435, 24079), 'numpy.array', 'np.array', (['[[4.17967552, 4.07332411, 4.00187571, 3.97639713, 4.00187571, 4.07332411, \n 4.17967552], [4.01934229, 3.88474601, 3.78661232, 3.74921526, \n 3.78661232, 3.88474601, 4.01934229], [3.90319636, 3.73434515, \n 3.64558217, 3.62308648, 3.64558217, 3.73434515, 3.90319636], [\n 3.85921241, 3.67256434, 3.61012056, 3.57133422, 3.61012056, 3.67256434,\n 3.85921241], [3.90324792, 3.73437686, 3.64558609, 3.62308648, \n 3.64558609, 3.73437686, 3.90324792], [4.01942887, 3.88479327, 3.7866265,\n 3.74921526, 3.7866265, 3.88479327, 4.01942887], [4.17978186, 4.07337783,\n 4.0018905, 3.97639713, 4.0018905, 4.07337783, 4.17978186]]'], {}), '([[4.17967552, 4.07332411, 4.00187571, 3.97639713, 4.00187571, \n 4.07332411, 4.17967552], [4.01934229, 3.88474601, 3.78661232, \n 3.74921526, 3.78661232, 3.88474601, 4.01934229], [3.90319636, \n 3.73434515, 3.64558217, 3.62308648, 3.64558217, 3.73434515, 3.90319636],\n [3.85921241, 3.67256434, 3.61012056, 3.57133422, 3.61012056, 3.67256434,\n 3.85921241], [3.90324792, 3.73437686, 3.64558609, 3.62308648, \n 3.64558609, 3.73437686, 3.90324792], [4.01942887, 3.88479327, 3.7866265,\n 3.74921526, 3.7866265, 3.88479327, 4.01942887], [4.17978186, 4.07337783,\n 4.0018905, 3.97639713, 4.0018905, 4.07337783, 4.17978186]])\n', (23443, 24079), True, 'import numpy as np\n'), ((24472, 24511), 'shakelib.distance.Distance.fromSites', 'Distance.fromSites', (['gmpe', 'site', 'rupture'], {}), '(gmpe, site, rupture)\n', (24490, 24511), False, 'from shakelib.distance import Distance\n'), ((24561, 25264), 'numpy.array', 'np.array', (['[[1.11052523, 0.925877479, 0.801828481, 0.757592465, 0.801828481, \n 0.925877479, 1.11052523], [0.83215403, 0.598467416, 0.428087307, \n 0.363158382, 0.428087307, 0.598467416, 0.83215403], [0.630500991, \n 0.337340822, 0.169925286, 0.120068361, 0.169925286, 0.337340822, \n 0.630500991], [0.55413587, 0.229725567, 0.0913321474, 1.03958777e-05, \n 0.0913321474, 0.229725567, 0.55413587], [0.630590499, 0.337395888, \n 0.169933978, 0.120068361, 0.169933978, 0.337395888, 0.630590499], [\n 0.832304345, 0.598549467, 0.428111914, 0.363158382, 0.428111914, \n 0.598549467, 0.832304345], [1.11070985, 0.925970743, 0.801854154, \n 0.757592465, 0.801854154, 0.925970743, 1.11070985]]'], {}), '([[1.11052523, 0.925877479, 0.801828481, 0.757592465, 0.801828481, \n 0.925877479, 1.11052523], [0.83215403, 0.598467416, 0.428087307, \n 0.363158382, 0.428087307, 0.598467416, 0.83215403], [0.630500991, \n 0.337340822, 0.169925286, 0.120068361, 0.169925286, 0.337340822, \n 0.630500991], [0.55413587, 0.229725567, 0.0913321474, 1.03958777e-05, \n 0.0913321474, 0.229725567, 0.55413587], [0.630590499, 0.337395888, \n 0.169933978, 0.120068361, 0.169933978, 0.337395888, 0.630590499], [\n 0.832304345, 0.598549467, 0.428111914, 0.363158382, 0.428111914, \n 0.598549467, 0.832304345], [1.11070985, 0.925970743, 0.801854154, \n 0.757592465, 0.801854154, 0.925970743, 1.11070985]])\n', (24569, 25264), True, 'import numpy as np\n'), ((25636, 26274), 'numpy.array', 'np.array', (['[[3.4885951, 3.37216961, 3.29395331, 3.26606128, 3.29395331, 3.37216961, \n 3.4885951], [3.3130744, 3.16572856, 3.05829921, 3.01735974, 3.05829921,\n 3.16572856, 3.3130744], [3.18592661, 3.00108105, 2.90341742, 2.87839095,\n 2.90341742, 3.00108105, 3.18592661], [3.1377763, 2.9334351, 2.86396637,\n 2.81798622, 2.86396637, 2.9334351, 3.1377763], [3.18598305, 3.00111577,\n 2.90342178, 2.87839095, 2.90342178, 3.00111577, 3.18598305], [\n 3.31316918, 3.16578029, 3.05831472, 3.01735974, 3.05831472, 3.16578029,\n 3.31316918], [3.48871151, 3.37222842, 3.29396949, 3.26606128, \n 3.29396949, 3.37222842, 3.48871151]]'], {}), '([[3.4885951, 3.37216961, 3.29395331, 3.26606128, 3.29395331, \n 3.37216961, 3.4885951], [3.3130744, 3.16572856, 3.05829921, 3.01735974,\n 3.05829921, 3.16572856, 3.3130744], [3.18592661, 3.00108105, 2.90341742,\n 2.87839095, 2.90341742, 3.00108105, 3.18592661], [3.1377763, 2.9334351,\n 2.86396637, 2.81798622, 2.86396637, 2.9334351, 3.1377763], [3.18598305,\n 3.00111577, 2.90342178, 2.87839095, 2.90342178, 3.00111577, 3.18598305],\n [3.31316918, 3.16578029, 3.05831472, 3.01735974, 3.05831472, 3.16578029,\n 3.31316918], [3.48871151, 3.37222842, 3.29396949, 3.26606128, \n 3.29396949, 3.37222842, 3.48871151]])\n', (25644, 26274), True, 'import numpy as np\n'), ((26669, 26708), 'shakelib.distance.Distance.fromSites', 'Distance.fromSites', (['gmpe', 'site', 'rupture'], {}), '(gmpe, site, rupture)\n', (26687, 26708), False, 'from shakelib.distance import Distance\n'), ((26758, 27463), 'numpy.array', 'np.array', (['[[1.12678662, 0.939133949, 0.813066202, 0.768110298, 0.813066202, \n 0.939133949, 1.12678662], [0.843885262, 0.606395679, 0.433242838, \n 0.367257274, 0.433242838, 0.606395679, 0.843885262], [0.638950562, \n 0.341019564, 0.170913434, 0.120272659, 0.170913434, 0.341019564, \n 0.638950562], [0.561342691, 0.231653894, 0.0910846554, 1.03958777e-05, \n 0.0910846554, 0.231653894, 0.561342691], [0.639041527, 0.341075526, \n 0.170922263, 0.120272659, 0.170922263, 0.341075526, 0.639041527], [\n 0.844038024, 0.606479066, 0.433267846, 0.367257274, 0.433267846, \n 0.606479066, 0.844038024], [1.12697424, 0.93922873, 0.813092292, \n 0.768110298, 0.813092292, 0.93922873, 1.12697424]]'], {}), '([[1.12678662, 0.939133949, 0.813066202, 0.768110298, 0.813066202, \n 0.939133949, 1.12678662], [0.843885262, 0.606395679, 0.433242838, \n 0.367257274, 0.433242838, 0.606395679, 0.843885262], [0.638950562, \n 0.341019564, 0.170913434, 0.120272659, 0.170913434, 0.341019564, \n 0.638950562], [0.561342691, 0.231653894, 0.0910846554, 1.03958777e-05, \n 0.0910846554, 0.231653894, 0.561342691], [0.639041527, 0.341075526, \n 0.170922263, 0.120272659, 0.170922263, 0.341075526, 0.639041527], [\n 0.844038024, 0.606479066, 0.433267846, 0.367257274, 0.433267846, \n 0.606479066, 0.844038024], [1.12697424, 0.93922873, 0.813092292, \n 0.768110298, 0.813092292, 0.93922873, 1.12697424]])\n', (26766, 27463), True, 'import numpy as np\n'), ((27833, 28477), 'numpy.array', 'np.array', (['[[3.42781739, 3.30181908, 3.21717161, 3.18698623, 3.21717161, 3.30181908, \n 3.42781739], [3.23786489, 3.07840387, 2.96214139, 2.91783576, \n 2.96214139, 3.07840387, 3.23786489], [3.10026266, 2.9002186, 2.79362772,\n 2.76581535, 2.79362772, 2.9002186, 3.10026266], [3.0481533, 2.82698693,\n 2.74978504, 2.70136713, 2.74978504, 2.82698693, 3.0481533], [3.10032374,\n 2.90025617, 2.79363257, 2.76581535, 2.79363257, 2.90025617, 3.10032374],\n [3.23796746, 3.07845986, 2.96215818, 2.91783576, 2.96215818, 3.07845986,\n 3.23796746], [3.42794337, 3.30188272, 3.21718913, 3.18698623, \n 3.21718913, 3.30188272, 3.42794337]]'], {}), '([[3.42781739, 3.30181908, 3.21717161, 3.18698623, 3.21717161, \n 3.30181908, 3.42781739], [3.23786489, 3.07840387, 2.96214139, \n 2.91783576, 2.96214139, 3.07840387, 3.23786489], [3.10026266, 2.9002186,\n 2.79362772, 2.76581535, 2.79362772, 2.9002186, 3.10026266], [3.0481533,\n 2.82698693, 2.74978504, 2.70136713, 2.74978504, 2.82698693, 3.0481533],\n [3.10032374, 2.90025617, 2.79363257, 2.76581535, 2.79363257, 2.90025617,\n 3.10032374], [3.23796746, 3.07845986, 2.96215818, 2.91783576, \n 2.96215818, 3.07845986, 3.23796746], [3.42794337, 3.30188272, \n 3.21718913, 3.18698623, 3.21718913, 3.30188272, 3.42794337]])\n', (27841, 28477), True, 'import numpy as np\n'), ((28870, 28909), 'shakelib.distance.Distance.fromSites', 'Distance.fromSites', (['gmpe', 'site', 'rupture'], {}), '(gmpe, site, rupture)\n', (28888, 28909), False, 'from shakelib.distance import Distance\n'), ((28959, 29639), 'numpy.array', 'np.array', (['[[1.80104893, 1.52092305, 1.33273049, 1.26562081, 1.33273049, 1.52092305, \n 1.80104893], [1.37873685, 1.02421498, 0.765734302, 0.667231768, \n 0.765734302, 1.02421498, 1.37873685], [1.07281256, 0.628064399, \n 0.342919369, 0.241987662, 0.342919369, 0.628064399, 1.07281256], [\n 0.95696037, 0.463980672, 0.183813296, 1.9827811e-05, 0.183813296, \n 0.463980672, 0.95696037], [1.07294835, 0.628147939, 0.342936965, \n 0.241987662, 0.342936965, 0.628147939, 1.07294835], [1.37896489, \n 1.02433946, 0.765771633, 0.667231768, 0.765771633, 1.02433946, \n 1.37896489], [1.80132901, 1.52106454, 1.33276944, 1.26562081, \n 1.33276944, 1.52106454, 1.80132901]]'], {}), '([[1.80104893, 1.52092305, 1.33273049, 1.26562081, 1.33273049, \n 1.52092305, 1.80104893], [1.37873685, 1.02421498, 0.765734302, \n 0.667231768, 0.765734302, 1.02421498, 1.37873685], [1.07281256, \n 0.628064399, 0.342919369, 0.241987662, 0.342919369, 0.628064399, \n 1.07281256], [0.95696037, 0.463980672, 0.183813296, 1.9827811e-05, \n 0.183813296, 0.463980672, 0.95696037], [1.07294835, 0.628147939, \n 0.342936965, 0.241987662, 0.342936965, 0.628147939, 1.07294835], [\n 1.37896489, 1.02433946, 0.765771633, 0.667231768, 0.765771633, \n 1.02433946, 1.37896489], [1.80132901, 1.52106454, 1.33276944, \n 1.26562081, 1.33276944, 1.52106454, 1.80132901]])\n', (28967, 29639), True, 'import numpy as np\n'), ((30034, 30687), 'numpy.array', 'np.array', (['[[2.85894272, 2.62140075, 2.46181667, 2.4049088, 2.46181667, 2.62140075, \n 2.85894272], [2.50082927, 2.20020077, 1.98101356, 1.89748509, \n 1.98101356, 2.20020077, 2.50082927], [2.24141069, 1.86427183, \n 1.65402932, 1.59405522, 1.65402932, 1.86427183, 2.24141069], [\n 2.14317001, 1.72596453, 1.55948774, 1.48557451, 1.55948774, 1.72596453,\n 2.14317001], [2.24152584, 1.86434267, 1.65403978, 1.59405522, \n 1.65403978, 1.86434267, 2.24152584], [2.50102265, 2.20030633, \n 1.98104522, 1.89748509, 1.98104522, 2.20030633, 2.50102265], [\n 2.85918022, 2.62152073, 2.46184969, 2.4049088, 2.46184969, 2.62152073, \n 2.85918022]]'], {}), '([[2.85894272, 2.62140075, 2.46181667, 2.4049088, 2.46181667, \n 2.62140075, 2.85894272], [2.50082927, 2.20020077, 1.98101356, \n 1.89748509, 1.98101356, 2.20020077, 2.50082927], [2.24141069, \n 1.86427183, 1.65402932, 1.59405522, 1.65402932, 1.86427183, 2.24141069],\n [2.14317001, 1.72596453, 1.55948774, 1.48557451, 1.55948774, 1.72596453,\n 2.14317001], [2.24152584, 1.86434267, 1.65403978, 1.59405522, \n 1.65403978, 1.86434267, 2.24152584], [2.50102265, 2.20030633, \n 1.98104522, 1.89748509, 1.98104522, 2.20030633, 2.50102265], [\n 2.85918022, 2.62152073, 2.46184969, 2.4049088, 2.46184969, 2.62152073, \n 2.85918022]])\n', (30042, 30687), True, 'import numpy as np\n'), ((31156, 31256), 'shakelib.sites.Sites.fromCenter', 'Sites.fromCenter', (['cx', 'cy', 'xspan', 'yspan', 'dx', 'dy'], {'vs30File': 'vs30file', 'padding': '(True)', 'resample': '(False)'}), '(cx, cy, xspan, yspan, dx, dy, vs30File=vs30file, padding=\n True, resample=False)\n', (31172, 31256), False, 'from shakelib.sites import Sites\n'), ((31345, 31361), 'numpy.array', 'np.array', (['[34.1]'], {}), '([34.1])\n', (31353, 31361), True, 'import numpy as np\n'), ((31373, 31391), 'numpy.array', 'np.array', (['[-118.2]'], {}), '([-118.2])\n', (31381, 31391), True, 'import numpy as np\n'), ((31403, 31419), 'numpy.array', 'np.array', (['[34.2]'], {}), '([34.2])\n', (31411, 31419), True, 'import numpy as np\n'), ((31431, 31450), 'numpy.array', 'np.array', (['[-118.15]'], {}), '([-118.15])\n', (31439, 31450), True, 'import numpy as np\n'), ((31459, 31474), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (31467, 31474), True, 'import numpy as np\n'), ((31483, 31498), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (31491, 31498), True, 'import numpy as np\n'), ((31509, 31525), 'numpy.array', 'np.array', (['[30.0]'], {}), '([30.0])\n', (31517, 31525), True, 'import numpy as np\n'), ((31768, 31781), 'shakelib.rupture.origin.Origin', 'Origin', (['event'], {}), '(event)\n', (31774, 31781), False, 'from shakelib.rupture.origin import Origin\n'), ((31793, 31857), 'shakelib.rupture.quad_rupture.QuadRupture.fromTrace', 'QuadRupture.fromTrace', (['lon0', 'lat0', 'lon1', 'lat1', 'z', 'W', 'dip', 'origin'], {}), '(lon0, lat0, lon1, lat1, z, W, dip, origin)\n', (31814, 31857), False, 'from shakelib.rupture.quad_rupture import QuadRupture\n'), ((31937, 31976), 'shakelib.distance.Distance.fromSites', 'Distance.fromSites', (['gmpelist', 'site', 'rup'], {}), '(gmpelist, site, rup)\n', (31955, 31976), False, 'from shakelib.distance import Distance\n'), ((32028, 32673), 'numpy.array', 'np.array', (['[[3.74498133, 3.32896405, 3.05225679, 2.95426722, 3.05225679, 3.32896405, \n 3.74498133], [3.11965436, 2.60558436, 2.24124201, 2.10583262, \n 2.24124201, 2.60558436, 3.11965436], [2.67523213, 2.05265767, 1.564393,\n 1.36331682, 1.564393, 2.05265767, 2.67523213], [2.50973226, 1.83166664,\n 1.26045653, 1.0, 1.26045653, 1.83166664, 2.50973226], [2.67542717, \n 2.05277065, 1.56443006, 1.36331682, 1.56443006, 2.05277065, 2.67542717],\n [3.11998886, 2.60576236, 2.24129374, 2.10583262, 2.24129374, 2.60576236,\n 3.11998886], [3.74539929, 3.32917303, 3.05231378, 2.95426722, \n 3.05231378, 3.32917303, 3.74539929]]'], {}), '([[3.74498133, 3.32896405, 3.05225679, 2.95426722, 3.05225679, \n 3.32896405, 3.74498133], [3.11965436, 2.60558436, 2.24124201, \n 2.10583262, 2.24124201, 2.60558436, 3.11965436], [2.67523213, \n 2.05265767, 1.564393, 1.36331682, 1.564393, 2.05265767, 2.67523213], [\n 2.50973226, 1.83166664, 1.26045653, 1.0, 1.26045653, 1.83166664, \n 2.50973226], [2.67542717, 2.05277065, 1.56443006, 1.36331682, \n 1.56443006, 2.05277065, 2.67542717], [3.11998886, 2.60576236, \n 2.24129374, 2.10583262, 2.24129374, 2.60576236, 3.11998886], [\n 3.74539929, 3.32917303, 3.05231378, 2.95426722, 3.05231378, 3.32917303,\n 3.74539929]])\n', (32036, 32673), True, 'import numpy as np\n'), ((33093, 33157), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rhypo', 'dctx.rhypo'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rhypo, dctx.rhypo, rtol=0, atol=0.01)\n', (33119, 33157), True, 'import numpy as np\n'), ((33177, 33872), 'numpy.array', 'np.array', (['[[-3.1889405, -2.48001769, -1.77111874, -1.06224366, -0.35339248, \n 0.355434794, 1.06423815], [-2.8350689, -2.12607622, -1.4171074, -\n 0.708162466, 0.000758576362, 0.709655709, 1.41852892], [-2.48119723, -\n 1.7721347, -1.06309603, -0.354081243, 0.354909645, 1.06387662, \n 1.77281967], [-2.1273255, -1.41819312, -0.709084619, 2.56774082e-12, \n 0.709060719, 1.41809752, 2.1271104], [-1.7734537, -1.06425151, -\n 0.355073182, 0.354081255, 1.06321179, 1.77231841, 2.4814011], [-\n 1.41958186, -0.710309855, -0.00106172493, 0.708162516, 1.41736285, \n 2.12653927, 2.83569175], [-1.06570997, -0.356368176, 0.352949744, \n 1.06224377, 1.7715139, 2.4807601, 3.18998236]]'], {}), '([[-3.1889405, -2.48001769, -1.77111874, -1.06224366, -0.35339248, \n 0.355434794, 1.06423815], [-2.8350689, -2.12607622, -1.4171074, -\n 0.708162466, 0.000758576362, 0.709655709, 1.41852892], [-2.48119723, -\n 1.7721347, -1.06309603, -0.354081243, 0.354909645, 1.06387662, \n 1.77281967], [-2.1273255, -1.41819312, -0.709084619, 2.56774082e-12, \n 0.709060719, 1.41809752, 2.1271104], [-1.7734537, -1.06425151, -\n 0.355073182, 0.354081255, 1.06321179, 1.77231841, 2.4814011], [-\n 1.41958186, -0.710309855, -0.00106172493, 0.708162516, 1.41736285, \n 2.12653927, 2.83569175], [-1.06570997, -0.356368176, 0.352949744, \n 1.06224377, 1.7715139, 2.4807601, 3.18998236]])\n', (33185, 33872), True, 'import numpy as np\n'), ((34451, 34509), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rx', 'dctx.rx'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rx, dctx.rx, rtol=0, atol=0.01)\n', (34477, 34509), True, 'import numpy as np\n'), ((34530, 35234), 'numpy.array', 'np.array', (['[[3.19372137, 2.48373511, 1.77377308, 1.06383562, 0.353925643, \n 0.00225816823, 0.00245009861], [2.83931844, 2.12926243, 1.41923064, \n 0.709223517, 0.00157594916, 0.00186044244, 0.00205239165], [2.48510934,\n 1.77479025, 1.06468863, 0.354611655, 0.00104375185, 0.00132827303, \n 0.00152024106], [2.30690967, 1.53793979, 0.768969896, 5.88918451e-12, \n 0.000377111295, 0.000661660373, 0.000853647223], [2.48531877, \n 1.79442084, 1.20242597, 0.854793253, 0.562052963, 0.269254693, \n 5.261051e-05], [2.95646628, 2.40489915, 2.0023107, 1.70958533, \n 1.41681634, 1.12398937, 0.863761551], [3.60741953, 3.17112489, \n 2.85711592, 2.56437623, 2.27157856, 1.97872291, 1.7851826]]'], {}), '([[3.19372137, 2.48373511, 1.77377308, 1.06383562, 0.353925643, \n 0.00225816823, 0.00245009861], [2.83931844, 2.12926243, 1.41923064, \n 0.709223517, 0.00157594916, 0.00186044244, 0.00205239165], [2.48510934,\n 1.77479025, 1.06468863, 0.354611655, 0.00104375185, 0.00132827303, \n 0.00152024106], [2.30690967, 1.53793979, 0.768969896, 5.88918451e-12, \n 0.000377111295, 0.000661660373, 0.000853647223], [2.48531877, \n 1.79442084, 1.20242597, 0.854793253, 0.562052963, 0.269254693, \n 5.261051e-05], [2.95646628, 2.40489915, 2.0023107, 1.70958533, \n 1.41681634, 1.12398937, 0.863761551], [3.60741953, 3.17112489, \n 2.85711592, 2.56437623, 2.27157856, 1.97872291, 1.7851826]])\n', (34538, 35234), True, 'import numpy as np\n'), ((35814, 35874), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rjb', 'dctx.rjb'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rjb, dctx.rjb, rtol=0, atol=0.01)\n', (35840, 35874), True, 'import numpy as np\n'), ((35895, 36379), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0229490054, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.879341006, 0.586285236,\n 0.293171565, 6.21003581e-12, 0.0, 0.0, 0.0], [1.73573289, 1.44264826, \n 1.14950573, 0.8563053, 0.563046975, 0.269730762, 0.0], [2.59212463, \n 2.29901116, 2.00583977, 1.71261048, 1.41932329, 1.12597821, 0.832575235\n ], [3.44851622, 3.15537391, 2.86217367, 2.56891553, 2.27559947, \n 1.98222553, 1.68879368]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0], [0.0229490054, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.879341006, \n 0.586285236, 0.293171565, 6.21003581e-12, 0.0, 0.0, 0.0], [1.73573289, \n 1.44264826, 1.14950573, 0.8563053, 0.563046975, 0.269730762, 0.0], [\n 2.59212463, 2.29901116, 2.00583977, 1.71261048, 1.41932329, 1.12597821,\n 0.832575235], [3.44851622, 3.15537391, 2.86217367, 2.56891553, \n 2.27559947, 1.98222553, 1.68879368]])\n', (35903, 36379), True, 'import numpy as np\n'), ((37179, 37239), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ry0', 'dctx.ry0'], {'rtol': '(0)', 'atol': '(0.01)'}), '(ry0, dctx.ry0, rtol=0, atol=0.01)\n', (37205, 37239), True, 'import numpy as np\n'), ((37261, 37894), 'numpy.array', 'np.array', (['[[3.34678672, 2.67788811, 2.03697073, 1.46129187, 1.06271102, 1.06352692, \n 1.40073832], [3.01030105, 2.3526499, 1.73673635, 1.22706347, 1.00157564,\n 1.22283363, 1.57764099], [2.67858182, 2.03712377, 1.46095502, \n 1.06170931, 1.06220616, 1.39958479, 1.75442695], [2.51415965, 1.8343632,\n 1.26143652, 1.0, 1.2212501, 1.57621925, 1.9310962], [2.67877609, \n 2.05412785, 1.56384179, 1.3617346, 1.50608502, 1.77308319, 2.10764873],\n [3.12078859, 2.6043486, 2.23799413, 2.09885629, 2.11696797, 2.23191013,\n 2.4299612], [3.74318473, 3.32482368, 3.04635272, 2.9183523, 2.86659485,\n 2.88815116, 2.98141559]]'], {}), '([[3.34678672, 2.67788811, 2.03697073, 1.46129187, 1.06271102, \n 1.06352692, 1.40073832], [3.01030105, 2.3526499, 1.73673635, 1.22706347,\n 1.00157564, 1.22283363, 1.57764099], [2.67858182, 2.03712377, \n 1.46095502, 1.06170931, 1.06220616, 1.39958479, 1.75442695], [\n 2.51415965, 1.8343632, 1.26143652, 1.0, 1.2212501, 1.57621925, \n 1.9310962], [2.67877609, 2.05412785, 1.56384179, 1.3617346, 1.50608502,\n 1.77308319, 2.10764873], [3.12078859, 2.6043486, 2.23799413, 2.09885629,\n 2.11696797, 2.23191013, 2.4299612], [3.74318473, 3.32482368, 3.04635272,\n 2.9183523, 2.86659485, 2.88815116, 2.98141559]])\n', (37269, 37894), True, 'import numpy as np\n'), ((38304, 38366), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rrup', 'dctx.rrup'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rrup, dctx.rrup, rtol=0, atol=0.01)\n', (38330, 38366), True, 'import numpy as np\n'), ((38508, 38536), 'numpy.arange', 'np.arange', (['(0)', '(9 * 11 * 3)', '(11)'], {}), '(0, 9 * 11 * 3, 11)\n', (38517, 38536), True, 'import numpy as np\n'), ((38590, 38642), 'pandas.read_fwf', 'pd.read_fwf', (['f', 'cs'], {'skiprows': '(2)', 'nrows': '(5)', 'header': 'None'}), '(f, cs, skiprows=2, nrows=5, header=None)\n', (38601, 38642), True, 'import pandas as pd\n'), ((38672, 38694), 'numpy.arange', 'np.arange', (['(0)', '(9 * 3)', '(3)'], {}), '(0, 9 * 3, 3)\n', (38681, 38694), True, 'import numpy as np\n'), ((38923, 38941), 'numpy.abs', 'np.abs', (['mat[0, iz]'], {}), '(mat[0, iz])\n', (38929, 38941), True, 'import numpy as np\n'), ((38951, 38969), 'numpy.abs', 'np.abs', (['mat[1, iz]'], {}), '(mat[1, iz])\n', (38957, 38969), True, 'import numpy as np\n'), ((38979, 38997), 'numpy.abs', 'np.abs', (['mat[2, iz]'], {}), '(mat[2, iz])\n', (38985, 38997), True, 'import numpy as np\n'), ((39007, 39025), 'numpy.abs', 'np.abs', (['mat[3, iz]'], {}), '(mat[3, iz])\n', (39013, 39025), True, 'import numpy as np\n'), ((39076, 39146), 'openquake.hazardlib.geo.utils.OrthographicProjection', 'OrthographicProjection', (['(epilon - 1)', '(epilon + 1)', '(epilat + 1)', '(epilat - 1)'], {}), '(epilon - 1, epilon + 1, epilat + 1, epilat - 1)\n', (39098, 39146), False, 'from openquake.hazardlib.geo.utils import OrthographicProjection\n'), ((39635, 39648), 'shakelib.rupture.origin.Origin', 'Origin', (['event'], {}), '(event)\n', (39641, 39648), False, 'from shakelib.rupture.origin import Origin\n'), ((39659, 39759), 'shakelib.rupture.quad_rupture.QuadRupture.fromVertices', 'QuadRupture.fromVertices', (['lon0', 'lat0', 'z0', 'lon1', 'lat1', 'z1', 'lon2', 'lat2', 'z2', 'lon3', 'lat3', 'z3', 'origin'], {}), '(lon0, lat0, z0, lon1, lat1, z1, lon2, lat2, z2,\n lon3, lat3, z3, origin)\n', (39683, 39759), False, 'from shakelib.rupture.quad_rupture import QuadRupture\n'), ((39881, 39902), 'pandas.read_csv', 'pd.read_csv', (['distfile'], {}), '(distfile)\n', (39892, 39902), True, 'import pandas as pd\n'), ((40035, 40055), 'numpy.zeros', 'np.zeros', (['slat.shape'], {}), '(slat.shape)\n', (40043, 40055), True, 'import numpy as np\n'), ((40326, 46518), 'numpy.array', 'np.array', (['[-49.25445446, -76.26871272, -37.1288192, -53.47792996, -50.30711637, -\n 63.96322125, -61.01988704, -81.2001781, -76.00646939, -74.39038054, -\n 92.23617124, -90.66976945, -89.68551411, -102.98798328, -114.70036085, \n -29.83636082, -28.50133134, -27.86922916, -36.00619214, -44.68826209, -\n 47.64580208, -53.92619079, -59.11962858, -55.90584822, -55.00772025, -\n 48.81756715, -59.27542007, -62.13633659, -70.0673351, -75.96977638, -\n 61.6959293, -60.34564074, -81.49792285, -78.75933138, -80.80533738, -\n 85.24473008, -94.07519297, -93.75010471, -96.87089883, -100.06112271, -\n 98.86980873, -95.92330113, -107.44086722, -119.1065369, -120.60405905, \n -113.42995442, -115.94930662, -115.2398216, -107.37840927, -49.25445446,\n -48.78386688, -108.49133002, -88.03303353, -44.66653428, -81.04476548, \n -38.26801619, -70.51178983, -69.15679931, -74.74562139, -86.51133446, -\n 27.62153029, -48.33279375, -30.0808298, -113.98345018, -97.96609537, -\n 87.9863122, -39.45970018, -80.1387617, -42.27121388, -82.05027834, -\n 81.55987067, -81.55987067, -107.25255717, 67.62695516, -3.27797047, -\n 197.98554369, 82.30996151, 18.42180605, -22.88851072, -35.75245916, -\n 19.54788146, -18.19780517, 19.85077702, 20.33310282, 19.95448398, \n 20.55508903, 18.17428572, 17.87997374, 16.97323804, 16.0025885, \n 13.88001846, 18.42180605, -3.27797047, 51.43098894, 28.97695533, -\n 53.20579538, 38.7537468, 33.48878882, 26.25189111, 22.54251612, \n 13.37141837, -5.80928302, -6.68056794, -14.50860117, -15.23992093, -\n 27.63281952, -11.66075049, -36.94595337, -40.97168031, -41.2814342, -\n 48.64456898, -61.55777751, -11.15038984, -17.16482959, 55.84202839, \n 36.78540588, 21.18550074, 19.14658833, 19.22680282, 5.76327358, -\n 47.45309937, -44.33194991, -55.15852372, 37.33066096, 37.64135657, \n 14.31598698, 4.60495737, 6.87107021, 18.42180605, 113.59285783, \n 109.06420877, 104.23416509, 99.21599973, 95.25204545, 90.29487934, \n 86.26977557, 95.28705209, 87.12907925, 101.40561896, 96.68858152, \n 92.90287952, 100.36659012, 97.19448577, 92.8627461, 85.01448355, \n 93.36767736, 96.90824009, 86.48002825, 88.71037964, 106.17282325, \n 102.56142319, 97.60004093, 99.61798574, 97.36337239, 94.22000798, \n 86.99488734, 90.05981676, 90.51189502, 100.7166391, 100.31931988, \n 67.62695516, 94.15062409, 87.77053675, 124.21806013, 99.23108884, \n 101.48199452, 92.63771423, 78.88723272, 72.7261356, 80.58682246, \n 73.30258213, 70.20783518, 60.57963211, -87.72265602, -148.10933308, -\n 150.41334959, -144.12558375, -145.5625388, -132.09479688, -135.12980144,\n -121.10883695, -143.75755221, -117.73616176, -115.28563276, -\n 138.79652905, -143.10405603, -151.78419035, -159.75299736, -\n 149.69457229, -175.20332448, -181.00970647, -188.86536942, -\n 176.88178468, -194.20978527, -204.54944453, -161.04413103, -\n 197.98554369, -96.74089367, -133.49237232, -84.71198922, -164.97719097,\n -202.48241157, -74.54550169, -147.37402934, -144.64074441, -\n 147.94282804, -122.80976842, -133.1671346, -136.3051809, -113.93174768,\n -151.02125407, -146.5198829, -156.19720713, -126.06138725, -\n 131.44422964, -197.62591198, -204.42320856, -149.84576063, -\n 121.56474664, -130.99947339, -148.41767074, -145.28448367, 104.58903799,\n 82.1649906, 67.69977397, 39.46989193, -69.00949731, -133.49237232, -\n 128.264754, -84.71198922, -108.49133002, 119.86128724, 122.73556155, \n 126.28254009, 125.12436373, 123.32498578, 123.8337768, 121.39931427, \n 121.48412837, 122.03669249, 122.59675818, 119.54338365, 120.33961222, \n 120.69581745, 116.96928355, 117.6687724, 116.62277942, 115.39650689, \n 112.60751523, 109.82643069, 108.2500678, 130.9143614, 126.50049543, \n 112.76229057, 132.76840098, 107.27099883, 128.16063464, 123.83157143, \n 120.46711628, 112.55756637, 135.59953867, 136.66138116, 136.98573162, \n 134.04528777, 116.27744752, 129.2794577, 119.13550981, 124.67196321, \n 130.9728774, 130.9039439, 128.70028371, 130.04592892, 140.21819548, \n 140.60370422, 113.37585901, 123.21523323, 123.88149248, 128.56894995, \n 128.45186255, 118.74080853, 126.71189149, 119.79833338, 130.00866791, -\n 160.01242472, 13.55424709, 110.26938756, 97.71987778, 110.93671325, \n 108.71965725, 105.03432063, 106.36049687, 99.27569343, 115.06168146, \n 77.00378531, 81.50139192, 92.15605815, 79.94311644, 83.16892433, \n 52.23389149, 50.97110177, 67.95167063, 63.43930833, 40.20494692, \n 43.22936492, 47.21513635, 38.94380012, 53.85489136, 56.69935207, \n 48.07036522, 64.46887891, 14.98020647, 17.35046801, 16.15236633, \n 14.41062231, 19.99605739, 18.31076661, 15.07058247, 12.34339267, \n 13.57621451, 14.72685201, 22.04539325, 20.47982142, 9.66768974, \n 8.05139052, 29.22924869, 3.75876894, 7.8610467, 29.20272495, \n 15.19325822, -2.38981899, 5.58349359, -0.62239018, -4.38178769, -\n 11.43651893, -20.07048519, -16.0588668, 82.30996151, 13.55424709, \n 104.49355303, -11.29628168, 82.1649906, 34.22207039, 38.08490923, -\n 10.15855131, 111.0308369, 81.78397481, 73.56334665, 81.27164139, \n 74.55979012, 16.08437955, 23.8203941, 24.68836209, 28.73767914, \n 21.06714416, 19.44159522, 4.62135887, 3.41771413, 5.051121, -6.81834189,\n 6.40341853, -0.35693923, -17.74409367, -8.91759817, -18.05278804, \n 7.70695248, -5.52733835, -16.02924961, -4.54310111, -22.84234773, -\n 1.71908199, 39.46989193, -14.74007542, 23.59992543, -10.49966883, -\n 11.47733869, -22.8200901, -9.72486483, 95.96997763, -115.36487081, -\n 52.88924268, -90.2275069, -132.22657274, -100.52455976, -115.24052939, \n -113.84482359, -114.41088165, -114.63386688, -115.92829006, -\n 117.52597227, -114.49770514, -114.46881502, -76.26871272, -115.36487081,\n -160.01242472, -110.6429636, -77.47722955, -80.24672646, -85.90422427, \n -94.92075147, -102.44309541, -106.23741455, -111.56110193, -\n 115.13402727, -48.64043046, -60.86151946, -66.52137871, -110.04628212, \n -75.27694696, -78.87041369, -88.08700161, -90.18844188, -93.65776393, -\n 92.58976279, -107.31364843, -115.04064471, -125.98500718, -75.9341032, \n -39.45970018, -14.74007542, -23.16835763]'], {}), '([-49.25445446, -76.26871272, -37.1288192, -53.47792996, -\n 50.30711637, -63.96322125, -61.01988704, -81.2001781, -76.00646939, -\n 74.39038054, -92.23617124, -90.66976945, -89.68551411, -102.98798328, -\n 114.70036085, -29.83636082, -28.50133134, -27.86922916, -36.00619214, -\n 44.68826209, -47.64580208, -53.92619079, -59.11962858, -55.90584822, -\n 55.00772025, -48.81756715, -59.27542007, -62.13633659, -70.0673351, -\n 75.96977638, -61.6959293, -60.34564074, -81.49792285, -78.75933138, -\n 80.80533738, -85.24473008, -94.07519297, -93.75010471, -96.87089883, -\n 100.06112271, -98.86980873, -95.92330113, -107.44086722, -119.1065369, \n -120.60405905, -113.42995442, -115.94930662, -115.2398216, -\n 107.37840927, -49.25445446, -48.78386688, -108.49133002, -88.03303353, \n -44.66653428, -81.04476548, -38.26801619, -70.51178983, -69.15679931, -\n 74.74562139, -86.51133446, -27.62153029, -48.33279375, -30.0808298, -\n 113.98345018, -97.96609537, -87.9863122, -39.45970018, -80.1387617, -\n 42.27121388, -82.05027834, -81.55987067, -81.55987067, -107.25255717, \n 67.62695516, -3.27797047, -197.98554369, 82.30996151, 18.42180605, -\n 22.88851072, -35.75245916, -19.54788146, -18.19780517, 19.85077702, \n 20.33310282, 19.95448398, 20.55508903, 18.17428572, 17.87997374, \n 16.97323804, 16.0025885, 13.88001846, 18.42180605, -3.27797047, \n 51.43098894, 28.97695533, -53.20579538, 38.7537468, 33.48878882, \n 26.25189111, 22.54251612, 13.37141837, -5.80928302, -6.68056794, -\n 14.50860117, -15.23992093, -27.63281952, -11.66075049, -36.94595337, -\n 40.97168031, -41.2814342, -48.64456898, -61.55777751, -11.15038984, -\n 17.16482959, 55.84202839, 36.78540588, 21.18550074, 19.14658833, \n 19.22680282, 5.76327358, -47.45309937, -44.33194991, -55.15852372, \n 37.33066096, 37.64135657, 14.31598698, 4.60495737, 6.87107021, \n 18.42180605, 113.59285783, 109.06420877, 104.23416509, 99.21599973, \n 95.25204545, 90.29487934, 86.26977557, 95.28705209, 87.12907925, \n 101.40561896, 96.68858152, 92.90287952, 100.36659012, 97.19448577, \n 92.8627461, 85.01448355, 93.36767736, 96.90824009, 86.48002825, \n 88.71037964, 106.17282325, 102.56142319, 97.60004093, 99.61798574, \n 97.36337239, 94.22000798, 86.99488734, 90.05981676, 90.51189502, \n 100.7166391, 100.31931988, 67.62695516, 94.15062409, 87.77053675, \n 124.21806013, 99.23108884, 101.48199452, 92.63771423, 78.88723272, \n 72.7261356, 80.58682246, 73.30258213, 70.20783518, 60.57963211, -\n 87.72265602, -148.10933308, -150.41334959, -144.12558375, -145.5625388,\n -132.09479688, -135.12980144, -121.10883695, -143.75755221, -\n 117.73616176, -115.28563276, -138.79652905, -143.10405603, -\n 151.78419035, -159.75299736, -149.69457229, -175.20332448, -\n 181.00970647, -188.86536942, -176.88178468, -194.20978527, -\n 204.54944453, -161.04413103, -197.98554369, -96.74089367, -133.49237232,\n -84.71198922, -164.97719097, -202.48241157, -74.54550169, -147.37402934,\n -144.64074441, -147.94282804, -122.80976842, -133.1671346, -136.3051809,\n -113.93174768, -151.02125407, -146.5198829, -156.19720713, -\n 126.06138725, -131.44422964, -197.62591198, -204.42320856, -\n 149.84576063, -121.56474664, -130.99947339, -148.41767074, -\n 145.28448367, 104.58903799, 82.1649906, 67.69977397, 39.46989193, -\n 69.00949731, -133.49237232, -128.264754, -84.71198922, -108.49133002, \n 119.86128724, 122.73556155, 126.28254009, 125.12436373, 123.32498578, \n 123.8337768, 121.39931427, 121.48412837, 122.03669249, 122.59675818, \n 119.54338365, 120.33961222, 120.69581745, 116.96928355, 117.6687724, \n 116.62277942, 115.39650689, 112.60751523, 109.82643069, 108.2500678, \n 130.9143614, 126.50049543, 112.76229057, 132.76840098, 107.27099883, \n 128.16063464, 123.83157143, 120.46711628, 112.55756637, 135.59953867, \n 136.66138116, 136.98573162, 134.04528777, 116.27744752, 129.2794577, \n 119.13550981, 124.67196321, 130.9728774, 130.9039439, 128.70028371, \n 130.04592892, 140.21819548, 140.60370422, 113.37585901, 123.21523323, \n 123.88149248, 128.56894995, 128.45186255, 118.74080853, 126.71189149, \n 119.79833338, 130.00866791, -160.01242472, 13.55424709, 110.26938756, \n 97.71987778, 110.93671325, 108.71965725, 105.03432063, 106.36049687, \n 99.27569343, 115.06168146, 77.00378531, 81.50139192, 92.15605815, \n 79.94311644, 83.16892433, 52.23389149, 50.97110177, 67.95167063, \n 63.43930833, 40.20494692, 43.22936492, 47.21513635, 38.94380012, \n 53.85489136, 56.69935207, 48.07036522, 64.46887891, 14.98020647, \n 17.35046801, 16.15236633, 14.41062231, 19.99605739, 18.31076661, \n 15.07058247, 12.34339267, 13.57621451, 14.72685201, 22.04539325, \n 20.47982142, 9.66768974, 8.05139052, 29.22924869, 3.75876894, 7.8610467,\n 29.20272495, 15.19325822, -2.38981899, 5.58349359, -0.62239018, -\n 4.38178769, -11.43651893, -20.07048519, -16.0588668, 82.30996151, \n 13.55424709, 104.49355303, -11.29628168, 82.1649906, 34.22207039, \n 38.08490923, -10.15855131, 111.0308369, 81.78397481, 73.56334665, \n 81.27164139, 74.55979012, 16.08437955, 23.8203941, 24.68836209, \n 28.73767914, 21.06714416, 19.44159522, 4.62135887, 3.41771413, 5.051121,\n -6.81834189, 6.40341853, -0.35693923, -17.74409367, -8.91759817, -\n 18.05278804, 7.70695248, -5.52733835, -16.02924961, -4.54310111, -\n 22.84234773, -1.71908199, 39.46989193, -14.74007542, 23.59992543, -\n 10.49966883, -11.47733869, -22.8200901, -9.72486483, 95.96997763, -\n 115.36487081, -52.88924268, -90.2275069, -132.22657274, -100.52455976, \n -115.24052939, -113.84482359, -114.41088165, -114.63386688, -\n 115.92829006, -117.52597227, -114.49770514, -114.46881502, -76.26871272,\n -115.36487081, -160.01242472, -110.6429636, -77.47722955, -80.24672646,\n -85.90422427, -94.92075147, -102.44309541, -106.23741455, -111.56110193,\n -115.13402727, -48.64043046, -60.86151946, -66.52137871, -110.04628212,\n -75.27694696, -78.87041369, -88.08700161, -90.18844188, -93.65776393, -\n 92.58976279, -107.31364843, -115.04064471, -125.98500718, -75.9341032, \n -39.45970018, -14.74007542, -23.16835763])\n', (40334, 46518), True, 'import numpy as np\n'), ((46960, 51845), 'numpy.array', 'np.array', (['[5.38783354, 32.4020918, 0.0, 9.61130904, 6.44049545, 20.09660033, \n 17.15326613, 37.33355718, 32.13984847, 30.52375962, 48.36955032, \n 46.80314854, 45.81889319, 59.12136236, 70.83373993, 0.0, 0.0, 0.0, 0.0,\n 0.82164117, 3.77918116, 10.05956987, 15.25300766, 12.0392273, \n 11.14109933, 4.95094623, 15.40879915, 18.26971567, 26.20071419, \n 32.10315546, 17.82930838, 16.47901983, 37.63130193, 34.89271046, \n 36.93871646, 41.37810916, 50.20857205, 49.88348379, 53.00427791, \n 56.19450179, 55.00318781, 52.05668021, 63.5742463, 75.23991598, \n 76.73743813, 69.5633335, 72.0826857, 71.37320068, 63.51178836, \n 5.38783354, 4.91724596, 64.6247091, 44.16641261, 0.79991336, \n 37.17814456, 0.0, 26.64516892, 25.2901784, 30.87900047, 42.64471355, \n 0.0, 4.46617283, 0.0, 70.11682926, 54.09947445, 44.11969128, 0.0, \n 36.27214079, 0.0, 38.18365743, 37.69324975, 37.69324975, 63.38593626, \n 31.95985109, 0.0, 154.11892278, 46.64285745, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 15.76388487, 0.0,\n 9.33917446, 3.08664273, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 4.77794806, 17.69115659, 0.0, 0.0, 20.17492433, \n 1.11830182, 0.0, 0.0, 0.0, 0.0, 3.58647845, 0.46532899, 11.2919028, \n 1.6635569, 1.97425251, 0.0, 0.0, 0.0, 0.0, 77.92575377, 73.39710471, \n 68.56706103, 63.54889567, 59.58494138, 54.62777528, 50.6026715, \n 59.61994802, 51.46197518, 65.7385149, 61.02147746, 57.23577546, \n 64.69948606, 61.52738171, 57.19564204, 49.34737949, 57.7005733, \n 61.24113602, 50.81292419, 53.04327558, 70.50571919, 66.89431913, \n 61.93293686, 63.95088168, 61.69626833, 58.55290391, 51.32778327, \n 54.3927127, 54.84479095, 65.04953504, 64.65221582, 31.95985109, \n 58.48352003, 52.10343269, 88.55095607, 63.56398477, 65.81489046, \n 56.97061016, 43.22012866, 37.05903154, 44.9197184, 37.63547806, \n 34.54073112, 24.91252804, 43.85603511, 104.24271216, 106.54672867, \n 100.25896283, 101.69591788, 88.22817597, 91.26318052, 77.24221603, \n 99.89093129, 73.86954084, 71.41901185, 94.92990813, 99.23743511, \n 107.91756944, 115.88637645, 105.82795138, 131.33670356, 137.14308555, \n 144.9987485, 133.01516376, 150.34316435, 160.68282361, 117.17751011, \n 154.11892278, 52.87427275, 89.6257514, 40.8453683, 121.11057005, \n 158.61579065, 30.67888078, 103.50740842, 100.77412349, 104.07620713, \n 78.9431475, 89.30051368, 92.43855998, 70.06512676, 107.15463315, \n 102.65326198, 112.33058622, 82.19476634, 87.57760872, 153.75929106, \n 160.55658764, 105.97913971, 77.69812572, 87.13285248, 104.55104982, \n 101.41786275, 68.92193392, 46.49788654, 32.0326699, 3.80278787, \n 25.14287639, 89.6257514, 84.39813309, 40.8453683, 64.6247091, \n 84.19418317, 87.06845748, 90.61543602, 89.45725966, 87.65788171, \n 88.16667274, 85.73221021, 85.81702431, 86.36958842, 86.92965411, \n 83.87627959, 84.67250815, 85.02871339, 81.30217949, 82.00166833, \n 80.95567535, 79.72940282, 76.94041117, 74.15932662, 72.58296373, \n 95.24725733, 90.83339137, 77.0951865, 97.10129692, 71.60389476, \n 92.49353057, 88.16446736, 84.80001222, 76.89046231, 99.93243461, \n 100.9942771, 101.31862755, 98.37818371, 80.61034346, 93.61235363, \n 83.46840575, 89.00485915, 95.30577334, 95.23683984, 93.03317965, \n 94.37882485, 104.55109142, 104.93660016, 77.70875494, 87.54812917, \n 88.21438842, 92.90184589, 92.78475848, 83.07370447, 91.04478743, \n 84.13122931, 94.34156384, 116.14580381, 0.0, 74.60228349, 62.05277372, \n 75.26960919, 73.05255319, 69.36721657, 70.69339281, 63.60858937, \n 79.3945774, 41.33668124, 45.83428785, 56.48895409, 44.27601238, \n 47.50182027, 16.56678743, 15.30399771, 32.28456656, 27.77220427, \n 4.53784286, 7.56226086, 11.54803229, 3.27669605, 18.1877873, 21.032248,\n 12.40326116, 28.80177485, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 46.64285745, 0.0, 68.82644897, 0.0, 46.49788654, 0.0, \n 2.41780516, 0.0, 75.36373283, 46.11687074, 37.89624258, 45.60453732, \n 38.89268605, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.80278787, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 60.30287357, 71.49824989, 9.02262176, 46.36088598, \n 88.35995182, 56.65793884, 71.37390848, 69.97820268, 70.54426073, \n 70.76724596, 72.06166914, 73.65935135, 70.63108422, 70.6021941, \n 32.4020918, 71.49824989, 116.14580381, 66.77634268, 33.61060864, \n 36.38010555, 42.03760335, 51.05413055, 58.57647449, 62.37079364, \n 67.69448101, 71.26740635, 4.77380954, 16.99489854, 22.65475779, \n 66.1796612, 31.41032604, 35.00379277, 44.22038069, 46.32182096, \n 49.79114301, 48.72314188, 63.44702751, 71.1740238, 82.11838626, \n 32.06748228, 0.0, 0.0, 0.0]'], {}), '([5.38783354, 32.4020918, 0.0, 9.61130904, 6.44049545, 20.09660033,\n 17.15326613, 37.33355718, 32.13984847, 30.52375962, 48.36955032, \n 46.80314854, 45.81889319, 59.12136236, 70.83373993, 0.0, 0.0, 0.0, 0.0,\n 0.82164117, 3.77918116, 10.05956987, 15.25300766, 12.0392273, \n 11.14109933, 4.95094623, 15.40879915, 18.26971567, 26.20071419, \n 32.10315546, 17.82930838, 16.47901983, 37.63130193, 34.89271046, \n 36.93871646, 41.37810916, 50.20857205, 49.88348379, 53.00427791, \n 56.19450179, 55.00318781, 52.05668021, 63.5742463, 75.23991598, \n 76.73743813, 69.5633335, 72.0826857, 71.37320068, 63.51178836, \n 5.38783354, 4.91724596, 64.6247091, 44.16641261, 0.79991336, \n 37.17814456, 0.0, 26.64516892, 25.2901784, 30.87900047, 42.64471355, \n 0.0, 4.46617283, 0.0, 70.11682926, 54.09947445, 44.11969128, 0.0, \n 36.27214079, 0.0, 38.18365743, 37.69324975, 37.69324975, 63.38593626, \n 31.95985109, 0.0, 154.11892278, 46.64285745, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 15.76388487, 0.0,\n 9.33917446, 3.08664273, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 4.77794806, 17.69115659, 0.0, 0.0, 20.17492433, \n 1.11830182, 0.0, 0.0, 0.0, 0.0, 3.58647845, 0.46532899, 11.2919028, \n 1.6635569, 1.97425251, 0.0, 0.0, 0.0, 0.0, 77.92575377, 73.39710471, \n 68.56706103, 63.54889567, 59.58494138, 54.62777528, 50.6026715, \n 59.61994802, 51.46197518, 65.7385149, 61.02147746, 57.23577546, \n 64.69948606, 61.52738171, 57.19564204, 49.34737949, 57.7005733, \n 61.24113602, 50.81292419, 53.04327558, 70.50571919, 66.89431913, \n 61.93293686, 63.95088168, 61.69626833, 58.55290391, 51.32778327, \n 54.3927127, 54.84479095, 65.04953504, 64.65221582, 31.95985109, \n 58.48352003, 52.10343269, 88.55095607, 63.56398477, 65.81489046, \n 56.97061016, 43.22012866, 37.05903154, 44.9197184, 37.63547806, \n 34.54073112, 24.91252804, 43.85603511, 104.24271216, 106.54672867, \n 100.25896283, 101.69591788, 88.22817597, 91.26318052, 77.24221603, \n 99.89093129, 73.86954084, 71.41901185, 94.92990813, 99.23743511, \n 107.91756944, 115.88637645, 105.82795138, 131.33670356, 137.14308555, \n 144.9987485, 133.01516376, 150.34316435, 160.68282361, 117.17751011, \n 154.11892278, 52.87427275, 89.6257514, 40.8453683, 121.11057005, \n 158.61579065, 30.67888078, 103.50740842, 100.77412349, 104.07620713, \n 78.9431475, 89.30051368, 92.43855998, 70.06512676, 107.15463315, \n 102.65326198, 112.33058622, 82.19476634, 87.57760872, 153.75929106, \n 160.55658764, 105.97913971, 77.69812572, 87.13285248, 104.55104982, \n 101.41786275, 68.92193392, 46.49788654, 32.0326699, 3.80278787, \n 25.14287639, 89.6257514, 84.39813309, 40.8453683, 64.6247091, \n 84.19418317, 87.06845748, 90.61543602, 89.45725966, 87.65788171, \n 88.16667274, 85.73221021, 85.81702431, 86.36958842, 86.92965411, \n 83.87627959, 84.67250815, 85.02871339, 81.30217949, 82.00166833, \n 80.95567535, 79.72940282, 76.94041117, 74.15932662, 72.58296373, \n 95.24725733, 90.83339137, 77.0951865, 97.10129692, 71.60389476, \n 92.49353057, 88.16446736, 84.80001222, 76.89046231, 99.93243461, \n 100.9942771, 101.31862755, 98.37818371, 80.61034346, 93.61235363, \n 83.46840575, 89.00485915, 95.30577334, 95.23683984, 93.03317965, \n 94.37882485, 104.55109142, 104.93660016, 77.70875494, 87.54812917, \n 88.21438842, 92.90184589, 92.78475848, 83.07370447, 91.04478743, \n 84.13122931, 94.34156384, 116.14580381, 0.0, 74.60228349, 62.05277372, \n 75.26960919, 73.05255319, 69.36721657, 70.69339281, 63.60858937, \n 79.3945774, 41.33668124, 45.83428785, 56.48895409, 44.27601238, \n 47.50182027, 16.56678743, 15.30399771, 32.28456656, 27.77220427, \n 4.53784286, 7.56226086, 11.54803229, 3.27669605, 18.1877873, 21.032248,\n 12.40326116, 28.80177485, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 46.64285745, 0.0, 68.82644897, 0.0, 46.49788654, 0.0, \n 2.41780516, 0.0, 75.36373283, 46.11687074, 37.89624258, 45.60453732, \n 38.89268605, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.80278787, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 60.30287357, 71.49824989, 9.02262176, 46.36088598, \n 88.35995182, 56.65793884, 71.37390848, 69.97820268, 70.54426073, \n 70.76724596, 72.06166914, 73.65935135, 70.63108422, 70.6021941, \n 32.4020918, 71.49824989, 116.14580381, 66.77634268, 33.61060864, \n 36.38010555, 42.03760335, 51.05413055, 58.57647449, 62.37079364, \n 67.69448101, 71.26740635, 4.77380954, 16.99489854, 22.65475779, \n 66.1796612, 31.41032604, 35.00379277, 44.22038069, 46.32182096, \n 49.79114301, 48.72314188, 63.44702751, 71.1740238, 82.11838626, \n 32.06748228, 0.0, 0.0, 0.0])\n', (46968, 51845), True, 'import numpy as np\n'), ((52333, 52380), 'shakelib.distance.get_distance', 'get_distance', (['dist_types', 'slat', 'slon', 'sdep', 'rup'], {}), '(dist_types, slat, slon, sdep, rup)\n', (52345, 52380), False, 'from shakelib.distance import get_distance\n'), ((52386, 52453), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['nga_repi', "dists['repi']"], {'rtol': '(0)', 'atol': '(2)'}), "(nga_repi, dists['repi'], rtol=0, atol=2)\n", (52412, 52453), True, 'import numpy as np\n'), ((52468, 52537), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['nga_rhypo', "dists['rhypo']"], {'rtol': '(0)', 'atol': '(2)'}), "(nga_rhypo, dists['rhypo'], rtol=0, atol=2)\n", (52494, 52537), True, 'import numpy as np\n'), ((52552, 52617), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['nga_rjb', "dists['rjb']"], {'rtol': '(0)', 'atol': '(2)'}), "(nga_rjb, dists['rjb'], rtol=0, atol=2)\n", (52578, 52617), True, 'import numpy as np\n'), ((52632, 52699), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['nga_rrup', "dists['rrup']"], {'rtol': '(0)', 'atol': '(2)'}), "(nga_rrup, dists['rrup'], rtol=0, atol=2)\n", (52658, 52699), True, 'import numpy as np\n'), ((52714, 52777), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['nga_rx', "dists['rx']"], {'rtol': '(0)', 'atol': '(2)'}), "(nga_rx, dists['rx'], rtol=0, atol=2)\n", (52740, 52777), True, 'import numpy as np\n'), ((52792, 52856), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['test_ry', "dists['ry']"], {'rtol': '(0)', 'atol': '(2)'}), "(test_ry, dists['ry'], rtol=0, atol=2)\n", (52818, 52856), True, 'import numpy as np\n'), ((52871, 52937), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['test_ry0', "dists['ry0']"], {'rtol': '(0)', 'atol': '(2)'}), "(test_ry0, dists['ry0'], rtol=0, atol=2)\n", (52897, 52937), True, 'import numpy as np\n'), ((52952, 53013), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['nga_U', "dists['U']"], {'rtol': '(0)', 'atol': '(6)'}), "(nga_U, dists['U'], rtol=0, atol=6)\n", (52978, 53013), True, 'import numpy as np\n'), ((53028, 53089), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['nga_T', "dists['T']"], {'rtol': '(0)', 'atol': '(2)'}), "(nga_T, dists['T'], rtol=0, atol=2)\n", (53054, 53089), True, 'import numpy as np\n'), ((8333, 8357), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8346, 8357), False, 'import pytest\n'), ((8380, 8427), 'shakelib.distance.Distance.fromSites', 'Distance.fromSites', (['gmpelist', 'origin', 'site', 'rup'], {}), '(gmpelist, origin, site, rup)\n', (8398, 8427), False, 'from shakelib.distance import Distance\n'), ((8445, 8465), 'openquake.hazardlib.gsim.abrahamson_2014.AbrahamsonEtAl2014', 'AbrahamsonEtAl2014', ([], {}), '()\n', (8463, 8465), False, 'from openquake.hazardlib.gsim.abrahamson_2014 import AbrahamsonEtAl2014\n'), ((8589, 8613), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8602, 8613), False, 'import pytest\n'), ((8824, 8848), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8837, 8848), False, 'import pytest\n'), ((9027, 9051), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (9040, 9051), False, 'import pytest\n'), ((9074, 9127), 'shakelib.distance.Distance', 'Distance', (['[None]', '[-118.2]', '[34.1]', '[1]'], {'rupture': 'None'}), '([None], [-118.2], [34.1], [1], rupture=None)\n', (9082, 9127), False, 'from shakelib.distance import Distance\n'), ((11026, 11086), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rjb', 'dctx.rjb'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rjb, dctx.rjb, rtol=0, atol=0.01)\n', (11052, 11086), True, 'import numpy as np\n'), ((11897, 11959), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rrup', 'dctx.rrup'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rrup, dctx.rrup, rtol=0, atol=0.01)\n', (11923, 11959), True, 'import numpy as np\n'), ((13185, 13246), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rjbt', 'dctx.rjb'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rjbt, dctx.rjb, rtol=0, atol=0.01)\n', (13211, 13246), True, 'import numpy as np\n'), ((14511, 14571), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rjb', 'dctx.rjb'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rjb, dctx.rjb, rtol=0, atol=0.01)\n', (14537, 14571), True, 'import numpy as np\n'), ((15382, 15444), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rrup', 'dctx.rrup'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rrup, dctx.rrup, rtol=0, atol=0.01)\n', (15408, 15444), True, 'import numpy as np\n'), ((16708, 16768), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rjb', 'dctx.rjb'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rjb, dctx.rjb, rtol=0, atol=0.01)\n', (16734, 16768), True, 'import numpy as np\n'), ((17577, 17639), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rrup', 'dctx.rrup'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rrup, dctx.rrup, rtol=0, atol=0.01)\n', (17603, 17639), True, 'import numpy as np\n'), ((18903, 18963), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rjb', 'dctx.rjb'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rjb, dctx.rjb, rtol=0, atol=0.01)\n', (18929, 18963), True, 'import numpy as np\n'), ((19776, 19838), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rrup', 'dctx.rrup'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rrup, dctx.rrup, rtol=0, atol=0.01)\n', (19802, 19838), True, 'import numpy as np\n'), ((21102, 21162), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rjb', 'dctx.rjb'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rjb, dctx.rjb, rtol=0, atol=0.01)\n', (21128, 21162), True, 'import numpy as np\n'), ((21981, 22043), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rrup', 'dctx.rrup'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rrup, dctx.rrup, rtol=0, atol=0.01)\n', (22007, 22043), True, 'import numpy as np\n'), ((23309, 23369), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rjb', 'dctx.rjb'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rjb, dctx.rjb, rtol=0, atol=0.01)\n', (23335, 23369), True, 'import numpy as np\n'), ((24184, 24246), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rrup', 'dctx.rrup'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rrup, dctx.rrup, rtol=0, atol=0.01)\n', (24210, 24246), True, 'import numpy as np\n'), ((25510, 25570), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rjb', 'dctx.rjb'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rjb, dctx.rjb, rtol=0, atol=0.01)\n', (25536, 25570), True, 'import numpy as np\n'), ((26381, 26443), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rrup', 'dctx.rrup'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rrup, dctx.rrup, rtol=0, atol=0.01)\n', (26407, 26443), True, 'import numpy as np\n'), ((27707, 27767), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rjb', 'dctx.rjb'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rjb, dctx.rjb, rtol=0, atol=0.01)\n', (27733, 27767), True, 'import numpy as np\n'), ((28582, 28644), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rrup', 'dctx.rrup'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rrup, dctx.rrup, rtol=0, atol=0.01)\n', (28608, 28644), True, 'import numpy as np\n'), ((29908, 29968), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rjb', 'dctx.rjb'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rjb, dctx.rjb, rtol=0, atol=0.01)\n', (29934, 29968), True, 'import numpy as np\n'), ((30785, 30847), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rrup', 'dctx.rrup'], {'rtol': '(0)', 'atol': '(0.01)'}), '(rrup, dctx.rrup, rtol=0, atol=0.01)\n', (30811, 30847), True, 'import numpy as np\n'), ((31874, 31894), 'openquake.hazardlib.gsim.abrahamson_2014.AbrahamsonEtAl2014', 'AbrahamsonEtAl2014', ([], {}), '()\n', (31892, 31894), False, 'from openquake.hazardlib.gsim.abrahamson_2014 import AbrahamsonEtAl2014\n'), ((31896, 31923), 'openquake.hazardlib.gsim.berge_thierry_2003.BergeThierryEtAl2003SIGMA', 'BergeThierryEtAl2003SIGMA', ([], {}), '()\n', (31921, 31923), False, 'from openquake.hazardlib.gsim.berge_thierry_2003 import BergeThierryEtAl2003SIGMA\n'), ((2739, 2758), 'numpy.nanmin', 'np.nanmin', (['rup._lat'], {}), '(rup._lat)\n', (2748, 2758), True, 'import numpy as np\n'), ((2766, 2785), 'numpy.nanmax', 'np.nanmax', (['rup._lat'], {}), '(rup._lat)\n', (2775, 2785), True, 'import numpy as np\n'), ((2819, 2838), 'numpy.nanmin', 'np.nanmin', (['rup._lon'], {}), '(rup._lon)\n', (2828, 2838), True, 'import numpy as np\n'), ((2846, 2865), 'numpy.nanmax', 'np.nanmax', (['rup._lon'], {}), '(rup._lon)\n', (2855, 2865), True, 'import numpy as np\n'), ((8704, 8728), 'numpy.zeros_like', 'np.zeros_like', (['sctx.lons'], {}), '(sctx.lons)\n', (8717, 8728), True, 'import numpy as np\n'), ((8946, 8970), 'numpy.zeros_like', 'np.zeros_like', (['sctx.lons'], {}), '(sctx.lons)\n', (8959, 8970), True, 'import numpy as np\n'), ((2639, 2650), 'time.time', 'time.time', ([], {}), '()\n', (2648, 2650), False, 'import time\n'), ((3004, 3022), 'numpy.isnan', 'np.isnan', (['rup._lon'], {}), '(rup._lon)\n', (3012, 3022), True, 'import numpy as np\n'), ((3057, 3075), 'numpy.isnan', 'np.isnan', (['rup._lat'], {}), '(rup._lat)\n', (3065, 3075), True, 'import numpy as np\n'), ((8254, 8265), 'time.time', 'time.time', ([], {}), '()\n', (8263, 8265), False, 'import time\n'), ((9387, 9398), 'time.time', 'time.time', ([], {}), '()\n', (9396, 9398), False, 'import time\n'), ((31740, 31751), 'time.time', 'time.time', ([], {}), '()\n', (31749, 31751), False, 'import time\n'), ((39607, 39618), 'time.time', 'time.time', ([], {}), '()\n', (39616, 39618), False, 'import time\n'), ((2220, 2231), 'time.time', 'time.time', ([], {}), '()\n', (2229, 2231), False, 'import time\n'), ((7905, 7916), 'time.time', 'time.time', ([], {}), '()\n', (7914, 7916), False, 'import time\n')] |
import numpy as np
import os
import torch
import cv2
from torch.utils.data import Dataset
class OnTheFlySMPLTrainDataset(Dataset):
def __init__(self,
poses_path,
textures_path,
backgrounds_dir_path,
params_from='all',
grey_tex_prob=0.05,
img_wh=256):
assert params_from in ['all', 'h36m', 'up3d', '3dpw', 'amass', 'not_amass']
# Load SMPL poses
data = np.load(poses_path)
self.fnames = data['fnames']
self.poses = data['poses']
if params_from != 'all':
if params_from == 'not_amass':
indices = [i for i, x in enumerate(self.fnames)
if (x.startswith('h36m') or x.startswith('up3d') or x.startswith('3dpw'))]
self.fnames = [self.fnames[i] for i in indices]
self.poses = [self.poses[i] for i in indices]
elif params_from == 'amass':
indices = [i for i, x in enumerate(self.fnames)
if not (x.startswith('h36m') or x.startswith('up3d') or x.startswith('3dpw'))]
self.fnames = [self.fnames[i] for i in indices]
self.poses = [self.poses[i] for i in indices]
else:
indices = [i for i, x in enumerate(self.fnames) if x.startswith(params_from)]
self.fnames = [self.fnames[i] for i in indices]
self.poses = [self.poses[i] for i in indices]
self.poses = np.stack(self.poses, axis=0)
# Load SMPL textures
textures = np.load(textures_path)
self.grey_textures = textures['grey']
self.nongrey_textures = textures['nongrey']
self.grey_tex_prob = grey_tex_prob
# Load LSUN backgrounds
self.backgrounds_paths = sorted([os.path.join(backgrounds_dir_path, f)
for f in os.listdir(backgrounds_dir_path)
if f.endswith('.jpg')])
self.img_wh = img_wh
def __len__(self):
return len(self.poses)
def __getitem__(self, index):
if torch.is_tensor(index):
index = index.tolist()
if isinstance(index, list):
num_samples = len(index)
else:
num_samples = 1
pose = self.poses[index]
pose = torch.from_numpy(pose.astype(np.float32))
sample = {'pose': pose}
# Randomly sample texture
texture_samples = []
for _ in range(num_samples):
if torch.rand(1).item() < self.grey_tex_prob:
tex_idx = torch.randint(low=0, high=len(self.grey_textures), size=(1,)).item()
texture = self.grey_textures[tex_idx]
else:
tex_idx = torch.randint(low=0, high=len(self.nongrey_textures), size=(1,)).item()
texture = self.nongrey_textures[tex_idx]
texture_samples.append(texture)
texture_samples = np.stack(texture_samples, axis=0).squeeze()
assert texture_samples.shape[-3:] == (1200, 800, 3), "Texture shape is wrong: {}".format(texture_samples.shape)
sample['texture'] = torch.from_numpy(texture_samples / 255.).float() # (1200, 800, 3) or (num samples, 1200, 800, 3)
# Randomly sample background if rendering RGB
bg_samples = []
for _ in range(num_samples):
bg_idx = torch.randint(low=0, high=len(self.backgrounds_paths), size=(1,)).item()
bg_path = self.backgrounds_paths[bg_idx]
background = cv2.cvtColor(cv2.imread(bg_path), cv2.COLOR_BGR2RGB)
background = cv2.resize(background, (self.img_wh, self.img_wh), interpolation=cv2.INTER_LINEAR)
background = background.transpose(2, 0, 1)
bg_samples.append(background)
bg_samples = np.stack(bg_samples, axis=0).squeeze()
assert bg_samples.shape[-3:] == (3, self.img_wh, self.img_wh), "BG shape is wrong: {}".format(sample['background'].shape)
sample['background'] = torch.from_numpy(bg_samples / 255.).float() # (3, img_wh, img_wh) or (num samples, 3, img_wh, img_wh)
return sample
| [
"os.listdir",
"os.path.join",
"torch.from_numpy",
"numpy.stack",
"torch.is_tensor",
"cv2.resize",
"numpy.load",
"cv2.imread",
"torch.rand"
] | [((486, 505), 'numpy.load', 'np.load', (['poses_path'], {}), '(poses_path)\n', (493, 505), True, 'import numpy as np\n'), ((1544, 1572), 'numpy.stack', 'np.stack', (['self.poses'], {'axis': '(0)'}), '(self.poses, axis=0)\n', (1552, 1572), True, 'import numpy as np\n'), ((1622, 1644), 'numpy.load', 'np.load', (['textures_path'], {}), '(textures_path)\n', (1629, 1644), True, 'import numpy as np\n'), ((2176, 2198), 'torch.is_tensor', 'torch.is_tensor', (['index'], {}), '(index)\n', (2191, 2198), False, 'import torch\n'), ((3680, 3767), 'cv2.resize', 'cv2.resize', (['background', '(self.img_wh, self.img_wh)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(background, (self.img_wh, self.img_wh), interpolation=cv2.\n INTER_LINEAR)\n', (3690, 3767), False, 'import cv2\n'), ((1860, 1897), 'os.path.join', 'os.path.join', (['backgrounds_dir_path', 'f'], {}), '(backgrounds_dir_path, f)\n', (1872, 1897), False, 'import os\n'), ((3024, 3057), 'numpy.stack', 'np.stack', (['texture_samples'], {'axis': '(0)'}), '(texture_samples, axis=0)\n', (3032, 3057), True, 'import numpy as np\n'), ((3216, 3257), 'torch.from_numpy', 'torch.from_numpy', (['(texture_samples / 255.0)'], {}), '(texture_samples / 255.0)\n', (3232, 3257), False, 'import torch\n'), ((3615, 3634), 'cv2.imread', 'cv2.imread', (['bg_path'], {}), '(bg_path)\n', (3625, 3634), False, 'import cv2\n'), ((3881, 3909), 'numpy.stack', 'np.stack', (['bg_samples'], {'axis': '(0)'}), '(bg_samples, axis=0)\n', (3889, 3909), True, 'import numpy as np\n'), ((4081, 4117), 'torch.from_numpy', 'torch.from_numpy', (['(bg_samples / 255.0)'], {}), '(bg_samples / 255.0)\n', (4097, 4117), False, 'import torch\n'), ((1948, 1980), 'os.listdir', 'os.listdir', (['backgrounds_dir_path'], {}), '(backgrounds_dir_path)\n', (1958, 1980), False, 'import os\n'), ((2589, 2602), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2599, 2602), False, 'import torch\n')] |
from .base import ComputationInterface
from progress.bar import ChargingBar
import numpy as np
import tensorflow as tf
import os
class TensorflowWrapper(ComputationInterface):
def load_elements(self, group_name, model_wrapper, experiment=None):
save_path = os.path.join(model_wrapper.model_path, group_name, "saved.ckpt")
if not os.path.isdir(save_path):
os.makedirs(save_path)
with tf.Session() as session:
saver = tf.train.Saver()
saver.restore(session, save_path)
def store_elements(self, elements, group_name, model_wrapper, use_saver=False, experiment=None):
if not use_saver:
super(TensorflowWrapper, self).store_elements(elements=elements, group_name=group_name,
model_wrapper=model_wrapper)
save_path = os.path.join(model_wrapper.model_path, group_name, "saved.ckpt")
if not os.path.isdir(save_path):
os.makedirs(save_path)
with tf.Session() as session:
for name, var in elements.items():
if not isinstance(var, tf.Variable):
elements[name] = tf.Variable(var)
else:
var.initializer.run()
var.op.run()
saver = tf.train.Saver(elements)
saver.save(session, save_path)
return elements
def calc_inter_layer_covariance(self, model_wrapper, use_training_data=True, batch_size=-1, **options):
is_chainer = model_wrapper.model_type == "chainer"
train, test = model_wrapper.dataset
data_x = train if use_training_data else test
if is_chainer:
data_x = np.moveaxis(data_x, -1, 0)[0]
else:
data_x = data_x[0]
data_x = np.stack(data_x, axis=0)
data_size = n = len(data_x)
if batch_size > 0:
perm = np.random.permutation(data_size)
data_x = data_x[perm[0:batch_size]]
n = batch_size
n_layers = len(model_wrapper.layers())
bar = ChargingBar("Calculating inter layer covariance", max=n_layers)
layer_outputs = model_wrapper.get_layer_outputs(data_x)
to_save = {}
for l, layer_output in enumerate(layer_outputs):
if is_chainer:
layer_output = layer_output.data
flat_shape = layer_output[0].flatten().shape[0]
sigma = tf.zeros(shape=(flat_shape, flat_shape), dtype=tf.float32, name="sigma%d" % l)
for output in layer_output:
g = tf.constant(output.flatten())
sigma += tf.einsum('i,j->ij', g, g)
sigma = tf.Variable(1 / (n - 1) * sigma, name="sigma%d" % l)
eigen_values = tf.self_adjoint_eigvals(sigma, name="eigen_values%d" % l)
to_save["sigma%d" % l] = sigma
to_save["eigen_values%d" % l] = tf.Variable(eigen_values)
bar.next()
self.store_elements(group_name="inter_layer_covariance",
elements=to_save, model_wrapper=model_wrapper)
| [
"progress.bar.ChargingBar",
"os.makedirs",
"tensorflow.Variable",
"tensorflow.Session",
"tensorflow.train.Saver",
"os.path.join",
"tensorflow.einsum",
"numpy.stack",
"os.path.isdir",
"numpy.moveaxis",
"tensorflow.self_adjoint_eigvals",
"tensorflow.zeros",
"numpy.random.permutation"
] | [((272, 336), 'os.path.join', 'os.path.join', (['model_wrapper.model_path', 'group_name', '"""saved.ckpt"""'], {}), "(model_wrapper.model_path, group_name, 'saved.ckpt')\n", (284, 336), False, 'import os\n'), ((873, 937), 'os.path.join', 'os.path.join', (['model_wrapper.model_path', 'group_name', '"""saved.ckpt"""'], {}), "(model_wrapper.model_path, group_name, 'saved.ckpt')\n", (885, 937), False, 'import os\n'), ((1823, 1847), 'numpy.stack', 'np.stack', (['data_x'], {'axis': '(0)'}), '(data_x, axis=0)\n', (1831, 1847), True, 'import numpy as np\n'), ((2101, 2164), 'progress.bar.ChargingBar', 'ChargingBar', (['"""Calculating inter layer covariance"""'], {'max': 'n_layers'}), "('Calculating inter layer covariance', max=n_layers)\n", (2112, 2164), False, 'from progress.bar import ChargingBar\n'), ((352, 376), 'os.path.isdir', 'os.path.isdir', (['save_path'], {}), '(save_path)\n', (365, 376), False, 'import os\n'), ((390, 412), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (401, 412), False, 'import os\n'), ((427, 439), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (437, 439), True, 'import tensorflow as tf\n'), ((473, 489), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (487, 489), True, 'import tensorflow as tf\n'), ((953, 977), 'os.path.isdir', 'os.path.isdir', (['save_path'], {}), '(save_path)\n', (966, 977), False, 'import os\n'), ((991, 1013), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (1002, 1013), False, 'import os\n'), ((1028, 1040), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1038, 1040), True, 'import tensorflow as tf\n'), ((1324, 1348), 'tensorflow.train.Saver', 'tf.train.Saver', (['elements'], {}), '(elements)\n', (1338, 1348), True, 'import tensorflow as tf\n'), ((1931, 1963), 'numpy.random.permutation', 'np.random.permutation', (['data_size'], {}), '(data_size)\n', (1952, 1963), True, 'import numpy as np\n'), ((2466, 2544), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(flat_shape, flat_shape)', 'dtype': 'tf.float32', 'name': "('sigma%d' % l)"}), "(shape=(flat_shape, flat_shape), dtype=tf.float32, name='sigma%d' % l)\n", (2474, 2544), True, 'import tensorflow as tf\n'), ((2709, 2761), 'tensorflow.Variable', 'tf.Variable', (['(1 / (n - 1) * sigma)'], {'name': "('sigma%d' % l)"}), "(1 / (n - 1) * sigma, name='sigma%d' % l)\n", (2720, 2761), True, 'import tensorflow as tf\n'), ((2790, 2847), 'tensorflow.self_adjoint_eigvals', 'tf.self_adjoint_eigvals', (['sigma'], {'name': "('eigen_values%d' % l)"}), "(sigma, name='eigen_values%d' % l)\n", (2813, 2847), True, 'import tensorflow as tf\n'), ((2935, 2960), 'tensorflow.Variable', 'tf.Variable', (['eigen_values'], {}), '(eigen_values)\n', (2946, 2960), True, 'import tensorflow as tf\n'), ((1730, 1756), 'numpy.moveaxis', 'np.moveaxis', (['data_x', '(-1)', '(0)'], {}), '(data_x, -1, 0)\n', (1741, 1756), True, 'import numpy as np\n'), ((2661, 2687), 'tensorflow.einsum', 'tf.einsum', (['"""i,j->ij"""', 'g', 'g'], {}), "('i,j->ij', g, g)\n", (2670, 2687), True, 'import tensorflow as tf\n'), ((1190, 1206), 'tensorflow.Variable', 'tf.Variable', (['var'], {}), '(var)\n', (1201, 1206), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Detection Training Script.
This scripts reads a given config file and runs the training or evaluation.
It is an entry point that is made to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use detectron2 as an library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
"""
import logging
import os
import time
from collections import OrderedDict
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
Kitti2cityscapesInstanceEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
DatasetEvaluator,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
verify_results,
print_csv_format,
)
from detectron2.modeling import GeneralizedRCNNWithTTA
from detectron2.utils.logger import log_every_n_seconds
from detectron2.data import MetadataCatalog
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
from detectron2.structures.instances import Instances
from cityscapesscripts.helpers.labels import labels
import cv2
from collections import deque
from contextlib import contextmanager
import datetime
from PIL import Image
import numpy as np
import copy
import matplotlib.pyplot as plt
def tensor2disp(tensor, vmax=0.18, percentile=None, viewind=0):
cm = plt.get_cmap('magma')
tnp = tensor[viewind, 0, :, :].detach().cpu().numpy()
if percentile is not None:
vmax = np.percentile(tnp, percentile)
tnp = tnp / vmax
tnp = (cm(tnp) * 255).astype(np.uint8)
return Image.fromarray(tnp[:, :, 0:3])
def vls_ins(rgb, anno):
rgbc = copy.deepcopy(rgb)
r = rgbc[:, :, 0].astype(np.float)
g = rgbc[:, :, 1].astype(np.float)
b = rgbc[:, :, 2].astype(np.float)
for i in np.unique(anno):
if i > 0:
rndc = np.random.randint(0, 255, 3).astype(np.float)
selector = anno == i
r[selector] = rndc[0] * 0.25 + r[selector] * 0.75
g[selector] = rndc[1] * 0.25 + g[selector] * 0.75
b[selector] = rndc[2] * 0.25 + b[selector] * 0.75
rgbvls = np.stack([r, g, b], axis=2)
rgbvls = np.clip(rgbvls, a_max=255, a_min=0).astype(np.uint8)
return rgbvls
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.catconfbar = {0 : 0.9, 1 : 0.9, 2 : 0.9}
self.minpixel = {0: 50, 1: 100, 2: 100}
self.selfcontribbar = {0: 0.5, 1: 0.5, 2: 0.5}
def pp_predictions_simple(self, inspred):
selector = torch.ones_like(inspred.scores)
if inspred.scores.shape[0] == 0:
return inspred
for k in range(inspred.scores.shape[0]):
cat = inspred.pred_classes[k].item()
conf = inspred.scores[k].item()
numpixel = torch.sum(inspred.pred_masks).item()
if conf < self.catconfbar[cat]:
selector[k] = 0
if numpixel < self.minpixel[cat]:
selector[k] = 0
pp_inspred = Instances(image_size=inspred.image_size)
selector = selector == 1
pp_inspred.scores = inspred.scores[selector]
pp_inspred.pred_classes = inspred.pred_classes[selector]
pp_inspred.pred_boxes = inspred.pred_boxes[selector]
pp_inspred.pred_masks = inspred.pred_masks[selector]
return pp_inspred
def erase_srhink(self, inspred, mask, cat):
catidx = list()
catscore = list()
for k in range(len(inspred)):
if inspred.pred_classes[k].item() == cat:
catidx.append(k)
catscore.append(inspred.scores[k].item())
if len(catidx) == 0:
return inspred
else:
catidx = np.array(catidx)
catscore = np.array(catscore)
sortedidx = np.argsort(catscore)
catidx = catidx[sortedidx]
catscore = catscore[sortedidx]
refmask = np.copy(mask)
refmask = torch.from_numpy(refmask) == 1
# tensor2disp(refmask.unsqueeze(0).unsqueeze(0), vmax=1, viewind=0).show()
for k in range(catidx.shape[0]):
if catscore[k] < self.catconfbar[cat]:
inspred = self.erase(inspred, catidx, catidx[k], cat)
inspred.pred_masks[catidx[k]] = inspred.pred_masks[catidx[k]] * refmask
return inspred
def erase(self, inspred, catidx, selfidx, cat):
mask_wos = torch.zeros_like(inspred.pred_masks[selfidx])
for k in catidx:
if k == selfidx:
continue
else:
mask_wos += inspred.pred_masks[k]
mask_ws = mask_wos + inspred.pred_masks[selfidx]
solcontrib = (mask_wos == 0) * (mask_ws == 1)
if torch.sum(solcontrib).float() / (torch.sum(inspred.pred_masks[selfidx]) + 1).float() < self.selfcontribbar[cat]:
# erase
inspred.pred_masks[selfidx] = inspred.pred_masks[selfidx] * 0
return inspred
def pp_predictions(self, inspred, carmask, pedmask, cyclistmask):
selector = torch.ones_like(inspred.scores)
if inspred.scores.shape[0] == 0:
return inspred
# Get indices sort by score
inspred = self.erase_srhink(inspred, carmask, cat=2)
inspred = self.erase_srhink(inspred, cyclistmask, cat=1)
inspred = self.erase_srhink(inspred, pedmask, cat=0)
for k in range(inspred.scores.shape[0]):
cat = inspred.pred_classes[k].item()
numpixel = torch.sum(inspred.pred_masks[k]).item()
if numpixel < self.minpixel[cat]:
selector[k] = 0
pp_inspred = Instances(image_size=inspred.image_size)
selector = selector == 1
pp_inspred.scores = inspred.scores[selector]
pp_inspred.pred_classes = inspred.pred_classes[selector]
pp_inspred.pred_boxes = inspred.pred_boxes[selector]
pp_inspred.pred_masks = inspred.pred_masks[selector]
return pp_inspred
def generate_instancemap(self, inspred, h, w):
insmap = torch.zeros([h, w], dtype=torch.int32)
semanmap = torch.zeros([h, w], dtype=torch.int32)
if len(inspred) == 0:
return insmap, semanmap
else:
scores = inspred.scores.numpy()
idx = np.argsort(-scores)
for num, k in enumerate(idx):
insmap[inspred.pred_masks[k]] = num + 1
semanmap[inspred.pred_masks[k]] = inspred.pred_classes[k].item() + 1
return insmap, semanmap
def run_on_image(self, image, predictions, carmask, pedmask, cyclistmask, entryname, args):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
foldname, imgname = entryname.split(' ')
dirmapping = {'left': 'image_02', 'right': 'image_03'}
date = foldname[0:10]
seq = foldname[0:26]
foldname = dirmapping[foldname.split('_')[-1]]
exportfold_ins = os.path.join(args.exportroot, date, seq, 'insmap', foldname)
exportfold_seman = os.path.join(args.exportroot, date, seq, 'semanmap', foldname)
ins_path = os.path.join(exportfold_ins, imgname)
seman_path = os.path.join(exportfold_seman, imgname)
if os.path.exists(ins_path) and os.path.exists(seman_path):
print("%s generated, skip" % ins_path)
return
os.makedirs(exportfold_ins, exist_ok=True)
os.makedirs(exportfold_seman, exist_ok=True)
instances = predictions["instances"].to(self.cpu_device)
if carmask is None:
pp_inspred = self.pp_predictions_simple(copy.deepcopy(instances))
else:
pp_inspred = self.pp_predictions(copy.deepcopy(instances), carmask, pedmask, cyclistmask)
insmap, semanmap = self.generate_instancemap(pp_inspred, h=image.shape[0], w=image.shape[1])
Image.fromarray(insmap.numpy().astype(np.uint8)).save(ins_path)
Image.fromarray(semanmap.numpy().astype(np.uint8)).save(seman_path)
if np.random.randint(0, args.vlsfreq) == 0:
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
vlsfold1 = os.path.join(args.vlsroot, date, seq, 'vls_final')
vlsfold2 = os.path.join(args.vlsroot, date, seq, 'vls_initial')
vlsfold3 = os.path.join(args.vlsroot, date, seq, 'vls_cleaned')
vlsname1 = os.path.join(vlsfold1, imgname)
vlsname2 = os.path.join(vlsfold2, imgname)
vlsname3 = os.path.join(vlsfold3, imgname)
os.makedirs(vlsfold1, exist_ok=True)
os.makedirs(vlsfold2, exist_ok=True)
os.makedirs(vlsfold3, exist_ok=True)
Image.fromarray(vls_ins(rgb=image, anno=insmap.numpy())).save(vlsname1)
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
Image.fromarray(vis_output.get_image()).save(vlsname2)
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
vis_output = visualizer.draw_instance_predictions(predictions=pp_inspred)
Image.fromarray(vis_output.get_image()).save(vlsname3)
return
@contextmanager
def inference_context(model):
"""
A context where the model is temporarily changed to eval mode,
and restored to previous mode afterwards.
Args:
model: a torch Module
"""
training_mode = model.training
model.eval()
yield
model.train(training_mode)
def get_semantics(args, entryname, h, w):
lrmapping = {'left': 'image_02', 'right': 'image_03'}
foldname, imgname = entryname.split(' ')
date = foldname[0:10]
seq = foldname[0:26]
carsemancat = [26, 27, 28, 29, 30, 31]
pedsemancat = [24]
cyclistsemancat = [25, 32, 33]
carmask = None
pedmask = None
cyclistmask = None
semanticspath = os.path.join(args.semanticsroot, date, seq, 'semantic_prediction', lrmapping[foldname[27::]], imgname)
if os.path.exists(semanticspath):
semantics = Image.open(semanticspath).resize([w, h], Image.NEAREST)
semantics = np.array(semantics)
carmask = np.zeros_like(semantics, dtype=np.float32)
for c in carsemancat:
carmask = carmask + (semantics == c).astype(np.float32)
pedmask = np.zeros_like(semantics, dtype=np.float32)
for c in pedsemancat:
pedmask = pedmask + (semantics == c).astype(np.float32)
cyclistmask = np.zeros_like(semantics, dtype=np.float32)
for c in cyclistsemancat:
cyclistmask = cyclistmask + (semantics == c).astype(np.float32)
return carmask, pedmask, cyclistmask
def inference_on_dataset(model, data_loader, vlstool, args):
"""
Run model on the data_loader and evaluate the metrics with evaluator.
Also benchmark the inference speed of `model.forward` accurately.
The model will be used in eval mode.
Args:
model (nn.Module): a module which accepts an object from
`data_loader` and returns some outputs. It will be temporarily set to `eval` mode.
If you wish to evaluate a model in `training` mode instead, you can
wrap the given model and override its behavior of `.eval()` and `.train()`.
data_loader: an iterable object with a length.
The elements it generates will be the inputs to the model.
evaluator (DatasetEvaluator): the evaluator to run. Use `None` if you only want
to benchmark, but don't want to do any evaluation.
Returns:
The return value of `evaluator.evaluate()`
"""
total = len(data_loader) # inference data loader must have a fixed length
num_warmup = min(5, total - 1)
start_time = time.perf_counter()
total_compute_time = 0
with inference_context(model), torch.no_grad():
for idx, inputs in enumerate(data_loader):
if idx == num_warmup:
start_time = time.perf_counter()
total_compute_time = 0
start_compute_time = time.perf_counter()
outputs = model(inputs)
if torch.cuda.is_available():
torch.cuda.synchronize()
carmask, pedmask, cyclistmask = get_semantics(args, inputs[0]['entryname'], inputs[0]['height'], inputs[0]['width'])
vlstool.run_on_image(inputs[0]['orgimage'].cpu().permute([1, 2, 0]).numpy(), outputs[0], carmask, pedmask, cyclistmask, inputs[0]['entryname'], args)
total_compute_time += time.perf_counter() - start_compute_time
iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
seconds_per_img = total_compute_time / iters_after_start
total_seconds_per_img = (time.perf_counter() - start_time) / iters_after_start
eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1)))
print("Inference done {}/{}. {:.4f} s / img. ETA={}".format(
idx + 1, total, seconds_per_img, str(eta)
))
return
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains pre-defined default logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can write your
own training loop. You can use "tools/plain_train_net.py" as an example.
"""
@classmethod
def inference_with_TTA(cls, cfg, model, args):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
model = GeneralizedRCNNWithTTA(cfg, model)
cls.inference(cfg, model, args)
return
@classmethod
def inference(cls, cfg, model, args):
"""
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
vlstool = VisualizationDemo(cfg)
inference_on_dataset(model, data_loader, vlstool, args)
return
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
# res = Trainer.test_with_TTA(cfg, model)
Trainer.inference_with_TTA(cfg, model, args)
return
else:
raise Exception("Only evaluation supported")
def build_inference_dataset(args, removeorg=True):
# Remove original test split
from shutil import copyfile, rmtree
from tqdm import tqdm
import glob
odomseqs = [
'2011_10_03/2011_10_03_drive_0027_sync',
'2011_09_30/2011_09_30_drive_0016_sync',
'2011_09_30/2011_09_30_drive_0018_sync',
'2011_09_30/2011_09_30_drive_0027_sync'
]
if removeorg:
orgTlr = os.path.join(args.kitti2cityscaperoot, 'gtFine/test')
orgTir = os.path.join(args.kitti2cityscaperoot, 'leftImg8bit/test')
if os.path.exists(orgTlr) and os.path.isdir(orgTlr):
print("Removing: %s" % orgTlr)
rmtree(orgTlr)
if os.path.exists(orgTir) and os.path.isdir(orgTir):
print("Removing: %s" % orgTir)
rmtree(orgTir)
txts = ['test_files.txt', 'val_files.txt', 'train_files.txt']
splitfolder = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'split_eigen_full', )
entries = list()
for txtname in txts:
splittxtadd = os.path.join(splitfolder, txtname)
with open(splittxtadd, 'r') as f:
tmpentries = f.readlines()
for entry in tmpentries:
seq, frmidx, dir = entry.split(' ')
key = "{} {}".format(seq, frmidx.zfill(10))
entries.append(key)
entries = list(set(entries))
srcs = list()
dsts = list()
for entry in entries:
seq, frmidx = entry.split(' ')
srcl = os.path.join(args.rawkittiroot, seq, 'image_02/data', "{}.png".format(frmidx))
dstfoldl = os.path.join(args.kitti2cityscaperoot, 'leftImg8bit/test', '{}_left'.format(seq.split('/')[-1]))
dstl = os.path.join(dstfoldl, "{}.png".format(frmidx))
srcs.append(srcl)
dsts.append(dstl)
srcr = os.path.join(args.rawkittiroot, seq, 'image_03/data', "{}.png".format(frmidx))
dstfoldr = os.path.join(args.kitti2cityscaperoot, 'leftImg8bit/test', '{}_right'.format(seq.split('/')[-1]))
dstr = os.path.join(dstfoldr, "{}.png".format(frmidx))
os.makedirs(dstfoldl, exist_ok=True)
os.makedirs(dstfoldr, exist_ok=True)
srcs.append(srcr)
dsts.append(dstr)
assert len(entries) * 2 == len(srcs)
for odomseq in odomseqs:
dstfoldl = os.path.join(args.kitti2cityscaperoot, 'leftImg8bit/test', '{}_left'.format(seq.split('/')[-1]))
leftimgs = glob.glob(os.path.join(args.kittiodomroot, odomseq, 'image_02/data', "*.png"))
for leftimg in leftimgs:
imgname = os.path.basename(leftimg)
srcl = os.path.join(args.kittiodomroot, odomseq, 'image_02/data', imgname)
dstl = os.path.join(dstfoldl, imgname)
srcs.append(srcl)
dsts.append(dstl)
os.makedirs(dstfoldl, exist_ok=True)
for k in tqdm(range(len(entries) * 2)):
if os.path.exists(dsts[k]):
continue
else:
copyfile(srcs[k], dsts[k])
if __name__ == "__main__":
parser = default_argument_parser()
parser.add_argument("--rawkittiroot", type=str)
parser.add_argument("--kitti2cityscaperoot", type=str)
parser.add_argument("--kittiodomroot", type=str)
parser.add_argument("--semanticsroot", type=str)
parser.add_argument("--banremove", action='store_true')
parser.add_argument("--exportroot", type=str)
parser.add_argument("--vlsroot", type=str)
parser.add_argument("--vlsfreq", type=int, default=100)
args = parser.parse_args()
build_inference_dataset(args, removeorg=not args.banremove)
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| [
"logging.getLogger",
"numpy.clip",
"detectron2.structures.instances.Instances",
"detectron2.modeling.GeneralizedRCNNWithTTA",
"torch.from_numpy",
"numpy.argsort",
"numpy.array",
"torch.cuda.synchronize",
"torch.cuda.is_available",
"torch.sum",
"copy.deepcopy",
"detectron2.engine.default_argume... | [((2126, 2147), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""magma"""'], {}), "('magma')\n", (2138, 2147), True, 'import matplotlib.pyplot as plt\n'), ((2358, 2389), 'PIL.Image.fromarray', 'Image.fromarray', (['tnp[:, :, 0:3]'], {}), '(tnp[:, :, 0:3])\n', (2373, 2389), False, 'from PIL import Image\n'), ((2426, 2444), 'copy.deepcopy', 'copy.deepcopy', (['rgb'], {}), '(rgb)\n', (2439, 2444), False, 'import copy\n'), ((2575, 2590), 'numpy.unique', 'np.unique', (['anno'], {}), '(anno)\n', (2584, 2590), True, 'import numpy as np\n'), ((2907, 2934), 'numpy.stack', 'np.stack', (['[r, g, b]'], {'axis': '(2)'}), '([r, g, b], axis=2)\n', (2915, 2934), True, 'import numpy as np\n'), ((11559, 11664), 'os.path.join', 'os.path.join', (['args.semanticsroot', 'date', 'seq', '"""semantic_prediction"""', 'lrmapping[foldname[27:]]', 'imgname'], {}), "(args.semanticsroot, date, seq, 'semantic_prediction',\n lrmapping[foldname[27:]], imgname)\n", (11571, 11664), False, 'import os\n'), ((11669, 11698), 'os.path.exists', 'os.path.exists', (['semanticspath'], {}), '(semanticspath)\n', (11683, 11698), False, 'import os\n'), ((13430, 13449), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (13447, 13449), False, 'import time\n'), ((16263, 16272), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (16270, 16272), False, 'from detectron2.config import get_cfg\n'), ((16371, 16395), 'detectron2.engine.default_setup', 'default_setup', (['cfg', 'args'], {}), '(cfg, args)\n', (16384, 16395), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((19873, 19898), 'detectron2.engine.default_argument_parser', 'default_argument_parser', ([], {}), '()\n', (19896, 19898), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((20472, 20606), 'detectron2.engine.launch', 'launch', (['main', 'args.num_gpus'], {'num_machines': 'args.num_machines', 'machine_rank': 'args.machine_rank', 'dist_url': 'args.dist_url', 'args': '(args,)'}), '(main, args.num_gpus, num_machines=args.num_machines, machine_rank=\n args.machine_rank, dist_url=args.dist_url, args=(args,))\n', (20478, 20606), False, 'from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((2252, 2282), 'numpy.percentile', 'np.percentile', (['tnp', 'percentile'], {}), '(tnp, percentile)\n', (2265, 2282), True, 'import numpy as np\n'), ((3536, 3555), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3548, 3555), False, 'import torch\n'), ((3823, 3854), 'torch.ones_like', 'torch.ones_like', (['inspred.scores'], {}), '(inspred.scores)\n', (3838, 3854), False, 'import torch\n'), ((4303, 4343), 'detectron2.structures.instances.Instances', 'Instances', ([], {'image_size': 'inspred.image_size'}), '(image_size=inspred.image_size)\n', (4312, 4343), False, 'from detectron2.structures.instances import Instances\n'), ((5745, 5790), 'torch.zeros_like', 'torch.zeros_like', (['inspred.pred_masks[selfidx]'], {}), '(inspred.pred_masks[selfidx])\n', (5761, 5790), False, 'import torch\n'), ((6381, 6412), 'torch.ones_like', 'torch.ones_like', (['inspred.scores'], {}), '(inspred.scores)\n', (6396, 6412), False, 'import torch\n'), ((6968, 7008), 'detectron2.structures.instances.Instances', 'Instances', ([], {'image_size': 'inspred.image_size'}), '(image_size=inspred.image_size)\n', (6977, 7008), False, 'from detectron2.structures.instances import Instances\n'), ((7379, 7417), 'torch.zeros', 'torch.zeros', (['[h, w]'], {'dtype': 'torch.int32'}), '([h, w], dtype=torch.int32)\n', (7390, 7417), False, 'import torch\n'), ((7437, 7475), 'torch.zeros', 'torch.zeros', (['[h, w]'], {'dtype': 'torch.int32'}), '([h, w], dtype=torch.int32)\n', (7448, 7475), False, 'import torch\n'), ((8511, 8571), 'os.path.join', 'os.path.join', (['args.exportroot', 'date', 'seq', '"""insmap"""', 'foldname'], {}), "(args.exportroot, date, seq, 'insmap', foldname)\n", (8523, 8571), False, 'import os\n'), ((8599, 8661), 'os.path.join', 'os.path.join', (['args.exportroot', 'date', 'seq', '"""semanmap"""', 'foldname'], {}), "(args.exportroot, date, seq, 'semanmap', foldname)\n", (8611, 8661), False, 'import os\n'), ((8682, 8719), 'os.path.join', 'os.path.join', (['exportfold_ins', 'imgname'], {}), '(exportfold_ins, imgname)\n', (8694, 8719), False, 'import os\n'), ((8741, 8780), 'os.path.join', 'os.path.join', (['exportfold_seman', 'imgname'], {}), '(exportfold_seman, imgname)\n', (8753, 8780), False, 'import os\n'), ((8929, 8971), 'os.makedirs', 'os.makedirs', (['exportfold_ins'], {'exist_ok': '(True)'}), '(exportfold_ins, exist_ok=True)\n', (8940, 8971), False, 'import os\n'), ((8980, 9024), 'os.makedirs', 'os.makedirs', (['exportfold_seman'], {'exist_ok': '(True)'}), '(exportfold_seman, exist_ok=True)\n', (8991, 9024), False, 'import os\n'), ((11796, 11815), 'numpy.array', 'np.array', (['semantics'], {}), '(semantics)\n', (11804, 11815), True, 'import numpy as np\n'), ((11835, 11877), 'numpy.zeros_like', 'np.zeros_like', (['semantics'], {'dtype': 'np.float32'}), '(semantics, dtype=np.float32)\n', (11848, 11877), True, 'import numpy as np\n'), ((11995, 12037), 'numpy.zeros_like', 'np.zeros_like', (['semantics'], {'dtype': 'np.float32'}), '(semantics, dtype=np.float32)\n', (12008, 12037), True, 'import numpy as np\n'), ((12159, 12201), 'numpy.zeros_like', 'np.zeros_like', (['semantics'], {'dtype': 'np.float32'}), '(semantics, dtype=np.float32)\n', (12172, 12201), True, 'import numpy as np\n'), ((13512, 13527), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13525, 13527), False, 'import torch\n'), ((15180, 15219), 'logging.getLogger', 'logging.getLogger', (['"""detectron2.trainer"""'], {}), "('detectron2.trainer')\n", (15197, 15219), False, 'import logging\n'), ((15412, 15446), 'detectron2.modeling.GeneralizedRCNNWithTTA', 'GeneralizedRCNNWithTTA', (['cfg', 'model'], {}), '(cfg, model)\n', (15434, 15446), False, 'from detectron2.modeling import GeneralizedRCNNWithTTA\n'), ((17257, 17310), 'os.path.join', 'os.path.join', (['args.kitti2cityscaperoot', '"""gtFine/test"""'], {}), "(args.kitti2cityscaperoot, 'gtFine/test')\n", (17269, 17310), False, 'import os\n'), ((17328, 17386), 'os.path.join', 'os.path.join', (['args.kitti2cityscaperoot', '"""leftImg8bit/test"""'], {}), "(args.kitti2cityscaperoot, 'leftImg8bit/test')\n", (17340, 17386), False, 'import os\n'), ((17883, 17917), 'os.path.join', 'os.path.join', (['splitfolder', 'txtname'], {}), '(splitfolder, txtname)\n', (17895, 17917), False, 'import os\n'), ((18931, 18967), 'os.makedirs', 'os.makedirs', (['dstfoldl'], {'exist_ok': '(True)'}), '(dstfoldl, exist_ok=True)\n', (18942, 18967), False, 'import os\n'), ((18976, 19012), 'os.makedirs', 'os.makedirs', (['dstfoldr'], {'exist_ok': '(True)'}), '(dstfoldr, exist_ok=True)\n', (18987, 19012), False, 'import os\n'), ((19640, 19676), 'os.makedirs', 'os.makedirs', (['dstfoldl'], {'exist_ok': '(True)'}), '(dstfoldl, exist_ok=True)\n', (19651, 19676), False, 'import os\n'), ((19733, 19756), 'os.path.exists', 'os.path.exists', (['dsts[k]'], {}), '(dsts[k])\n', (19747, 19756), False, 'import os\n'), ((2948, 2983), 'numpy.clip', 'np.clip', (['rgbvls'], {'a_max': '(255)', 'a_min': '(0)'}), '(rgbvls, a_max=255, a_min=0)\n', (2955, 2983), True, 'import numpy as np\n'), ((5019, 5035), 'numpy.array', 'np.array', (['catidx'], {}), '(catidx)\n', (5027, 5035), True, 'import numpy as np\n'), ((5059, 5077), 'numpy.array', 'np.array', (['catscore'], {}), '(catscore)\n', (5067, 5077), True, 'import numpy as np\n'), ((5102, 5122), 'numpy.argsort', 'np.argsort', (['catscore'], {}), '(catscore)\n', (5112, 5122), True, 'import numpy as np\n'), ((5229, 5242), 'numpy.copy', 'np.copy', (['mask'], {}), '(mask)\n', (5236, 5242), True, 'import numpy as np\n'), ((7618, 7637), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (7628, 7637), True, 'import numpy as np\n'), ((8793, 8817), 'os.path.exists', 'os.path.exists', (['ins_path'], {}), '(ins_path)\n', (8807, 8817), False, 'import os\n'), ((8822, 8848), 'os.path.exists', 'os.path.exists', (['seman_path'], {}), '(seman_path)\n', (8836, 8848), False, 'import os\n'), ((9577, 9611), 'numpy.random.randint', 'np.random.randint', (['(0)', 'args.vlsfreq'], {}), '(0, args.vlsfreq)\n', (9594, 9611), True, 'import numpy as np\n'), ((9757, 9807), 'os.path.join', 'os.path.join', (['args.vlsroot', 'date', 'seq', '"""vls_final"""'], {}), "(args.vlsroot, date, seq, 'vls_final')\n", (9769, 9807), False, 'import os\n'), ((9831, 9883), 'os.path.join', 'os.path.join', (['args.vlsroot', 'date', 'seq', '"""vls_initial"""'], {}), "(args.vlsroot, date, seq, 'vls_initial')\n", (9843, 9883), False, 'import os\n'), ((9907, 9959), 'os.path.join', 'os.path.join', (['args.vlsroot', 'date', 'seq', '"""vls_cleaned"""'], {}), "(args.vlsroot, date, seq, 'vls_cleaned')\n", (9919, 9959), False, 'import os\n'), ((9984, 10015), 'os.path.join', 'os.path.join', (['vlsfold1', 'imgname'], {}), '(vlsfold1, imgname)\n', (9996, 10015), False, 'import os\n'), ((10039, 10070), 'os.path.join', 'os.path.join', (['vlsfold2', 'imgname'], {}), '(vlsfold2, imgname)\n', (10051, 10070), False, 'import os\n'), ((10094, 10125), 'os.path.join', 'os.path.join', (['vlsfold3', 'imgname'], {}), '(vlsfold3, imgname)\n', (10106, 10125), False, 'import os\n'), ((10139, 10175), 'os.makedirs', 'os.makedirs', (['vlsfold1'], {'exist_ok': '(True)'}), '(vlsfold1, exist_ok=True)\n', (10150, 10175), False, 'import os\n'), ((10188, 10224), 'os.makedirs', 'os.makedirs', (['vlsfold2'], {'exist_ok': '(True)'}), '(vlsfold2, exist_ok=True)\n', (10199, 10224), False, 'import os\n'), ((10237, 10273), 'os.makedirs', 'os.makedirs', (['vlsfold3'], {'exist_ok': '(True)'}), '(vlsfold3, exist_ok=True)\n', (10248, 10273), False, 'import os\n'), ((10385, 10451), 'detectron2.utils.visualizer.Visualizer', 'Visualizer', (['image', 'self.metadata'], {'instance_mode': 'self.instance_mode'}), '(image, self.metadata, instance_mode=self.instance_mode)\n', (10395, 10451), False, 'from detectron2.utils.visualizer import ColorMode, Visualizer\n'), ((10630, 10696), 'detectron2.utils.visualizer.Visualizer', 'Visualizer', (['image', 'self.metadata'], {'instance_mode': 'self.instance_mode'}), '(image, self.metadata, instance_mode=self.instance_mode)\n', (10640, 10696), False, 'from detectron2.utils.visualizer import ColorMode, Visualizer\n'), ((13736, 13755), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (13753, 13755), False, 'import time\n'), ((13807, 13832), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13830, 13832), False, 'import torch\n'), ((17398, 17420), 'os.path.exists', 'os.path.exists', (['orgTlr'], {}), '(orgTlr)\n', (17412, 17420), False, 'import os\n'), ((17425, 17446), 'os.path.isdir', 'os.path.isdir', (['orgTlr'], {}), '(orgTlr)\n', (17438, 17446), False, 'import os\n'), ((17503, 17517), 'shutil.rmtree', 'rmtree', (['orgTlr'], {}), '(orgTlr)\n', (17509, 17517), False, 'from shutil import copyfile, rmtree\n'), ((17529, 17551), 'os.path.exists', 'os.path.exists', (['orgTir'], {}), '(orgTir)\n', (17543, 17551), False, 'import os\n'), ((17556, 17577), 'os.path.isdir', 'os.path.isdir', (['orgTir'], {}), '(orgTir)\n', (17569, 17577), False, 'import os\n'), ((17634, 17648), 'shutil.rmtree', 'rmtree', (['orgTir'], {}), '(orgTir)\n', (17640, 17648), False, 'from shutil import copyfile, rmtree\n'), ((17763, 17789), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (17779, 17789), False, 'import os\n'), ((19282, 19349), 'os.path.join', 'os.path.join', (['args.kittiodomroot', 'odomseq', '"""image_02/data"""', '"""*.png"""'], {}), "(args.kittiodomroot, odomseq, 'image_02/data', '*.png')\n", (19294, 19349), False, 'import os\n'), ((19406, 19431), 'os.path.basename', 'os.path.basename', (['leftimg'], {}), '(leftimg)\n', (19422, 19431), False, 'import os\n'), ((19452, 19519), 'os.path.join', 'os.path.join', (['args.kittiodomroot', 'odomseq', '"""image_02/data"""', 'imgname'], {}), "(args.kittiodomroot, odomseq, 'image_02/data', imgname)\n", (19464, 19519), False, 'import os\n'), ((19539, 19570), 'os.path.join', 'os.path.join', (['dstfoldl', 'imgname'], {}), '(dstfoldl, imgname)\n', (19551, 19570), False, 'import os\n'), ((19805, 19831), 'shutil.copyfile', 'copyfile', (['srcs[k]', 'dsts[k]'], {}), '(srcs[k], dsts[k])\n', (19813, 19831), False, 'from shutil import copyfile, rmtree\n'), ((5265, 5290), 'torch.from_numpy', 'torch.from_numpy', (['refmask'], {}), '(refmask)\n', (5281, 5290), False, 'import torch\n'), ((9172, 9196), 'copy.deepcopy', 'copy.deepcopy', (['instances'], {}), '(instances)\n', (9185, 9196), False, 'import copy\n'), ((9257, 9281), 'copy.deepcopy', 'copy.deepcopy', (['instances'], {}), '(instances)\n', (9270, 9281), False, 'import copy\n'), ((11720, 11745), 'PIL.Image.open', 'Image.open', (['semanticspath'], {}), '(semanticspath)\n', (11730, 11745), False, 'from PIL import Image\n'), ((13643, 13662), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (13660, 13662), False, 'import time\n'), ((13850, 13874), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (13872, 13874), False, 'import torch\n'), ((14201, 14220), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (14218, 14220), False, 'import time\n'), ((16524, 16577), 'detectron2.checkpoint.DetectionCheckpointer', 'DetectionCheckpointer', (['model'], {'save_dir': 'cfg.OUTPUT_DIR'}), '(model, save_dir=cfg.OUTPUT_DIR)\n', (16545, 16577), False, 'from detectron2.checkpoint import DetectionCheckpointer\n'), ((2629, 2657), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(3)'], {}), '(0, 255, 3)\n', (2646, 2657), True, 'import numpy as np\n'), ((4088, 4117), 'torch.sum', 'torch.sum', (['inspred.pred_masks'], {}), '(inspred.pred_masks)\n', (4097, 4117), False, 'import torch\n'), ((6827, 6859), 'torch.sum', 'torch.sum', (['inspred.pred_masks[k]'], {}), '(inspred.pred_masks[k])\n', (6836, 6859), False, 'import torch\n'), ((14428, 14447), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (14445, 14447), False, 'import time\n'), ((6061, 6082), 'torch.sum', 'torch.sum', (['solcontrib'], {}), '(solcontrib)\n', (6070, 6082), False, 'import torch\n'), ((6094, 6132), 'torch.sum', 'torch.sum', (['inspred.pred_masks[selfidx]'], {}), '(inspred.pred_masks[selfidx])\n', (6103, 6132), False, 'import torch\n')] |
import unittest
from datetime import datetime
import pandas as pd
import numpy as np
from msticpy.analysis.anomalous_sequence.utils.data_structures import Cmd
from msticpy.analysis.anomalous_sequence import anomalous
class TestAnomalous(unittest.TestCase):
def setUp(self) -> None:
self.sessions1 = [
["Set-User", "Set-User"],
["Set-Mailbox", "Set-User", "Set-User"],
]
self.sessions2 = [
[
Cmd("Set-User", {"Identity"}),
Cmd("Set-User", {"Identity", "City", "Name"}),
],
[
Cmd("Set-Mailbox", {"Identity"}),
Cmd("Set-User", {"Identity", "City"}),
Cmd("Set-User", {"Identity"}),
],
]
self.sessions3 = [
[
Cmd("Set-User", {"Identity": "blah"}),
Cmd("Set-User", {"Identity": "haha", "City": "york", "Name": "bob"}),
],
[
Cmd("Set-Mailbox", {"Identity": "blah"}),
Cmd("Set-User", {"Identity": "blah", "City": "london"}),
Cmd("Set-User", {"Identity": "haha"}),
],
]
self.times = [datetime(2019, 3, 1), datetime(2019, 5, 6)]
self.data1 = pd.DataFrame({"session": self.sessions1, "time": self.times})
self.data2 = pd.DataFrame({"session": self.sessions2, "time": self.times})
self.data3 = pd.DataFrame({"session": self.sessions3, "time": self.times})
def tearDown(self) -> None:
self.sessions1 = None
self.sessions2 = None
self.sessions3 = None
self.times = None
self.data1 = None
self.data2 = None
self.data3 = None
def test_score_sessions(self):
actual = anomalous.score_sessions(
data=self.data1, session_column="session", window_length=3
)
self.assertTrue(isinstance(actual, pd.DataFrame))
for col in self.data1.columns:
self.assertTrue(col in actual.columns)
self.assertEqual(len(actual.columns), len(self.data1.columns) + 2)
self.assertEqual(len(actual), len(self.data1))
window = actual["rarest_window3"].iloc[0]
self.assertTrue(isinstance(window, list))
self.assertTrue(isinstance(window[0], str))
actual = anomalous.score_sessions(
data=self.data2, session_column="session", window_length=3
)
window = actual["rarest_window3"].iloc[0]
cmd = window[0]
self.assertTrue(isinstance(window, list))
self.assertTrue("name" in dir(cmd))
self.assertTrue("params" in dir(cmd))
self.assertTrue(isinstance(cmd.params, set))
actual = anomalous.score_sessions(
data=self.data3, session_column="session", window_length=3
)
window = actual["rarest_window3"].iloc[0]
cmd = window[0]
self.assertTrue(isinstance(window, list))
self.assertTrue("name" in dir(cmd))
self.assertTrue("params" in dir(cmd))
self.assertTrue(isinstance(cmd.params, dict))
actual = anomalous.score_sessions(
data=self.data3, session_column="session", window_length=5
)
window = actual["rarest_window5"].iloc[0]
lik = actual["rarest_window5_likelihood"].iloc[0]
self.assertTrue(isinstance(window, list))
self.assertEqual(len(window), 0)
self.assertTrue(np.isnan(lik))
if __name__ == "__main__":
unittest.main()
| [
"datetime.datetime",
"pandas.DataFrame",
"msticpy.analysis.anomalous_sequence.anomalous.score_sessions",
"numpy.isnan",
"msticpy.analysis.anomalous_sequence.utils.data_structures.Cmd",
"unittest.main"
] | [((3509, 3524), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3522, 3524), False, 'import unittest\n'), ((1283, 1344), 'pandas.DataFrame', 'pd.DataFrame', (["{'session': self.sessions1, 'time': self.times}"], {}), "({'session': self.sessions1, 'time': self.times})\n", (1295, 1344), True, 'import pandas as pd\n'), ((1366, 1427), 'pandas.DataFrame', 'pd.DataFrame', (["{'session': self.sessions2, 'time': self.times}"], {}), "({'session': self.sessions2, 'time': self.times})\n", (1378, 1427), True, 'import pandas as pd\n'), ((1449, 1510), 'pandas.DataFrame', 'pd.DataFrame', (["{'session': self.sessions3, 'time': self.times}"], {}), "({'session': self.sessions3, 'time': self.times})\n", (1461, 1510), True, 'import pandas as pd\n'), ((1791, 1879), 'msticpy.analysis.anomalous_sequence.anomalous.score_sessions', 'anomalous.score_sessions', ([], {'data': 'self.data1', 'session_column': '"""session"""', 'window_length': '(3)'}), "(data=self.data1, session_column='session',\n window_length=3)\n", (1815, 1879), False, 'from msticpy.analysis.anomalous_sequence import anomalous\n'), ((2346, 2434), 'msticpy.analysis.anomalous_sequence.anomalous.score_sessions', 'anomalous.score_sessions', ([], {'data': 'self.data2', 'session_column': '"""session"""', 'window_length': '(3)'}), "(data=self.data2, session_column='session',\n window_length=3)\n", (2370, 2434), False, 'from msticpy.analysis.anomalous_sequence import anomalous\n'), ((2738, 2826), 'msticpy.analysis.anomalous_sequence.anomalous.score_sessions', 'anomalous.score_sessions', ([], {'data': 'self.data3', 'session_column': '"""session"""', 'window_length': '(3)'}), "(data=self.data3, session_column='session',\n window_length=3)\n", (2762, 2826), False, 'from msticpy.analysis.anomalous_sequence import anomalous\n'), ((3131, 3219), 'msticpy.analysis.anomalous_sequence.anomalous.score_sessions', 'anomalous.score_sessions', ([], {'data': 'self.data3', 'session_column': '"""session"""', 'window_length': '(5)'}), "(data=self.data3, session_column='session',\n window_length=5)\n", (3155, 3219), False, 'from msticpy.analysis.anomalous_sequence import anomalous\n'), ((1218, 1238), 'datetime.datetime', 'datetime', (['(2019)', '(3)', '(1)'], {}), '(2019, 3, 1)\n', (1226, 1238), False, 'from datetime import datetime\n'), ((1240, 1260), 'datetime.datetime', 'datetime', (['(2019)', '(5)', '(6)'], {}), '(2019, 5, 6)\n', (1248, 1260), False, 'from datetime import datetime\n'), ((3461, 3474), 'numpy.isnan', 'np.isnan', (['lik'], {}), '(lik)\n', (3469, 3474), True, 'import numpy as np\n'), ((474, 503), 'msticpy.analysis.anomalous_sequence.utils.data_structures.Cmd', 'Cmd', (['"""Set-User"""', "{'Identity'}"], {}), "('Set-User', {'Identity'})\n", (477, 503), False, 'from msticpy.analysis.anomalous_sequence.utils.data_structures import Cmd\n'), ((521, 566), 'msticpy.analysis.anomalous_sequence.utils.data_structures.Cmd', 'Cmd', (['"""Set-User"""', "{'Identity', 'City', 'Name'}"], {}), "('Set-User', {'Identity', 'City', 'Name'})\n", (524, 566), False, 'from msticpy.analysis.anomalous_sequence.utils.data_structures import Cmd\n'), ((613, 645), 'msticpy.analysis.anomalous_sequence.utils.data_structures.Cmd', 'Cmd', (['"""Set-Mailbox"""', "{'Identity'}"], {}), "('Set-Mailbox', {'Identity'})\n", (616, 645), False, 'from msticpy.analysis.anomalous_sequence.utils.data_structures import Cmd\n'), ((663, 700), 'msticpy.analysis.anomalous_sequence.utils.data_structures.Cmd', 'Cmd', (['"""Set-User"""', "{'Identity', 'City'}"], {}), "('Set-User', {'Identity', 'City'})\n", (666, 700), False, 'from msticpy.analysis.anomalous_sequence.utils.data_structures import Cmd\n'), ((718, 747), 'msticpy.analysis.anomalous_sequence.utils.data_structures.Cmd', 'Cmd', (['"""Set-User"""', "{'Identity'}"], {}), "('Set-User', {'Identity'})\n", (721, 747), False, 'from msticpy.analysis.anomalous_sequence.utils.data_structures import Cmd\n'), ((831, 868), 'msticpy.analysis.anomalous_sequence.utils.data_structures.Cmd', 'Cmd', (['"""Set-User"""', "{'Identity': 'blah'}"], {}), "('Set-User', {'Identity': 'blah'})\n", (834, 868), False, 'from msticpy.analysis.anomalous_sequence.utils.data_structures import Cmd\n'), ((886, 954), 'msticpy.analysis.anomalous_sequence.utils.data_structures.Cmd', 'Cmd', (['"""Set-User"""', "{'Identity': 'haha', 'City': 'york', 'Name': 'bob'}"], {}), "('Set-User', {'Identity': 'haha', 'City': 'york', 'Name': 'bob'})\n", (889, 954), False, 'from msticpy.analysis.anomalous_sequence.utils.data_structures import Cmd\n'), ((1001, 1041), 'msticpy.analysis.anomalous_sequence.utils.data_structures.Cmd', 'Cmd', (['"""Set-Mailbox"""', "{'Identity': 'blah'}"], {}), "('Set-Mailbox', {'Identity': 'blah'})\n", (1004, 1041), False, 'from msticpy.analysis.anomalous_sequence.utils.data_structures import Cmd\n'), ((1059, 1114), 'msticpy.analysis.anomalous_sequence.utils.data_structures.Cmd', 'Cmd', (['"""Set-User"""', "{'Identity': 'blah', 'City': 'london'}"], {}), "('Set-User', {'Identity': 'blah', 'City': 'london'})\n", (1062, 1114), False, 'from msticpy.analysis.anomalous_sequence.utils.data_structures import Cmd\n'), ((1132, 1169), 'msticpy.analysis.anomalous_sequence.utils.data_structures.Cmd', 'Cmd', (['"""Set-User"""', "{'Identity': 'haha'}"], {}), "('Set-User', {'Identity': 'haha'})\n", (1135, 1169), False, 'from msticpy.analysis.anomalous_sequence.utils.data_structures import Cmd\n')] |
import os
import torch
import torch.nn.functional as F
import numpy as np
from glob import glob
from paths import WEIGHTS_PATH
def cosine_sim(x, z):
cos_sim_fn = torch.nn.CosineSimilarity(dim=1)
return cos_sim_fn(x[..., None], z.T[None, ...])
def cos_dist(x, z):
cos_sim_fn = torch.nn.CosineSimilarity(dim=1)
return (1 - cos_sim_fn(x[..., None], z.T[None, ...])) / 2
def linear_sim(x, z):
return x @ z.T
def l2_dist(x, z):
dist_squared = (x ** 2).sum() + (z ** 2).sum() - 2 * linear_sim(x, z)
return torch.clamp(dist_squared, min=0).sqrt()
def cos_loglikelihood(x, z, gamma=0.1, z_dim=1):
cos_sim = cosine_sim(x, z)
probs = F.softmax(cos_sim / gamma, dim=z_dim)
return torch.log(probs)
def unique_softmax(sim, labels, gamma=1, dim=0):
assert sim.shape[0] == labels.shape[0]
labels = labels.detach().cpu().numpy()
unique_labels, unique_index, unique_inverse_index = np.unique(
labels, return_index=True, return_inverse=True)
unique_sim = sim[unique_index]
unique_softmax_sim = torch.nn.functional.softmax(unique_sim / gamma, dim=dim)
softmax_sim = unique_softmax_sim[unique_inverse_index]
return softmax_sim
def compute_normalization_parameters(dataset):
mean_x, mean_z = torch.zeros(512), torch.zeros(512)
mean_x2, mean_z2 = torch.zeros(512), torch.zeros(512)
x_count, z_count = 0, 0
for s in dataset:
mean_x += s['frame_features'].sum(0)
mean_x2 += (s['frame_features'] ** 2).sum(0)
x_count += s['frame_features'].shape[0]
mean_z += s['step_features'].sum(0)
mean_z2 += (s['step_features'] ** 2).sum(0)
z_count += s['step_features'].shape[0]
mean_x = mean_x / x_count
mean_z = mean_z / z_count
sigma_x = (mean_x2 / x_count - mean_x ** 2).sqrt()
sigma_z = (mean_z2 / z_count - mean_z ** 2).sqrt()
return mean_x, sigma_x, mean_z, sigma_z
def load_last_checkpoint(name, model, device='cuda', strict=True,
remove_name_preffix=None, remove_name_postfix=None):
weights_path = glob(os.path.join(WEIGHTS_PATH, name, f"weights-epoch=*.ckpt"))[0]
state_dict = torch.load(weights_path, map_location=device)['state_dict']
print(f"Loading checkpoint at {weights_path}")
# adjust names in state dict
new_keys = list(state_dict.keys())
if remove_name_preffix:
new_keys = [k[len(remove_name_preffix):] for k in new_keys]
if remove_name_postfix:
new_keys = [k[:-len(remove_name_preffix)] for k in new_keys]
# load state dict with new keys
new_state_dict = dict(zip(new_keys, state_dict.values()))
model.load_state_dict(new_state_dict, strict=strict)
return None
| [
"torch.nn.functional.softmax",
"torch.nn.CosineSimilarity",
"torch.log",
"numpy.unique",
"torch.load",
"os.path.join",
"torch.zeros",
"torch.clamp"
] | [((169, 201), 'torch.nn.CosineSimilarity', 'torch.nn.CosineSimilarity', ([], {'dim': '(1)'}), '(dim=1)\n', (194, 201), False, 'import torch\n'), ((293, 325), 'torch.nn.CosineSimilarity', 'torch.nn.CosineSimilarity', ([], {'dim': '(1)'}), '(dim=1)\n', (318, 325), False, 'import torch\n'), ((671, 708), 'torch.nn.functional.softmax', 'F.softmax', (['(cos_sim / gamma)'], {'dim': 'z_dim'}), '(cos_sim / gamma, dim=z_dim)\n', (680, 708), True, 'import torch.nn.functional as F\n'), ((720, 736), 'torch.log', 'torch.log', (['probs'], {}), '(probs)\n', (729, 736), False, 'import torch\n'), ((930, 987), 'numpy.unique', 'np.unique', (['labels'], {'return_index': '(True)', 'return_inverse': '(True)'}), '(labels, return_index=True, return_inverse=True)\n', (939, 987), True, 'import numpy as np\n'), ((1057, 1113), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['(unique_sim / gamma)'], {'dim': 'dim'}), '(unique_sim / gamma, dim=dim)\n', (1084, 1113), False, 'import torch\n'), ((1266, 1282), 'torch.zeros', 'torch.zeros', (['(512)'], {}), '(512)\n', (1277, 1282), False, 'import torch\n'), ((1284, 1300), 'torch.zeros', 'torch.zeros', (['(512)'], {}), '(512)\n', (1295, 1300), False, 'import torch\n'), ((1324, 1340), 'torch.zeros', 'torch.zeros', (['(512)'], {}), '(512)\n', (1335, 1340), False, 'import torch\n'), ((1342, 1358), 'torch.zeros', 'torch.zeros', (['(512)'], {}), '(512)\n', (1353, 1358), False, 'import torch\n'), ((2178, 2223), 'torch.load', 'torch.load', (['weights_path'], {'map_location': 'device'}), '(weights_path, map_location=device)\n', (2188, 2223), False, 'import torch\n'), ((537, 569), 'torch.clamp', 'torch.clamp', (['dist_squared'], {'min': '(0)'}), '(dist_squared, min=0)\n', (548, 569), False, 'import torch\n'), ((2099, 2156), 'os.path.join', 'os.path.join', (['WEIGHTS_PATH', 'name', 'f"""weights-epoch=*.ckpt"""'], {}), "(WEIGHTS_PATH, name, f'weights-epoch=*.ckpt')\n", (2111, 2156), False, 'import os\n')] |
import os
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
DATASET_DIR: str = "data/"
# https://www.kaggle.com/rakannimer/air-passengers
def read_air_passengers() -> Tuple[pd.DataFrame, np.ndarray]:
indexes = [6, 33, 36, 51, 60, 100, 135]
values = [205, 600, 150, 315, 150, 190, 620]
return _add_outliers_set_datetime(
pd.read_csv(f"{DATASET_DIR}air_passengers.csv"), indexes, values, "date", "passengers"
)
# https://www.kaggle.com/bulentsiyah/for-simple-exercises-time-series-forecasting?select=Alcohol_Sales.csv
def read_alcohol_sales() -> Tuple[pd.DataFrame, np.ndarray]:
indexes = [
72, 128, 151, 208, 253, 315
]
values = [
3000, 2000, 10000, 8300, 12180, 9000,
]
return _add_outliers_set_datetime(
pd.read_csv(f"{DATASET_DIR}alcohol_sales.csv"), indexes, values, "date", "sales_number"
)
# https://www.kaggle.com/arashnic/learn-time-series-forecasting-from-gold-price?select=gold_price_data.csv
def read_gold_price() -> Tuple[pd.DataFrame, np.ndarray]:
path = f"{DATASET_DIR}gold_price_data.csv"
path_preprocessing = _add_suffix(path)
_gold_price_preprocessing(path, path_preprocessing)
indexes = [
37, 140, 220, 306, 404, 441
]
values = [
800, 250, 150, 500, 1350, 120
]
return _add_outliers_set_datetime(
pd.read_csv(path_preprocessing), indexes, values, "date", "price"
)
def _gold_price_preprocessing(path: str, path_preprocessing: str) -> None:
if not os.path.exists(path_preprocessing):
temp_file_path = f"{DATASET_DIR}df_temp.csv"
df = pd.read_csv(path)
df["date"] = pd.to_datetime(df["date"])
df_temp = round(df.groupby([df["date"].dt.year, df["date"].dt.month]).mean(), 2)
df_temp.to_csv(temp_file_path)
df_temp = pd.read_csv(temp_file_path)
os.remove(temp_file_path)
df_temp["date"] = (
df_temp.iloc[:, 0].astype(str) + "-" + df_temp.iloc[:, 1].astype(str)
).apply(pd.to_datetime, format="%Y-%m-%d", errors="coerce")
df_temp.drop(df_temp.columns[1], axis=1, inplace=True)
df_temp.to_csv(path_preprocessing, index=False)
def _add_outliers_set_datetime(
df: pd.DataFrame, indexes: List[int], values: List[Union[int, float]],
date_column_name: str, value_column_name: str
) -> Tuple[pd.DataFrame, np.ndarray]:
if len(indexes) != len(values):
raise Exception("Arrays must have equal length!")
df[date_column_name] = pd.to_datetime(df[date_column_name])
for index, value in zip(indexes, values):
df.loc[index, value_column_name] = value
return df, _get_ground_truth_array(df, indexes)
def _get_ground_truth_array(df: pd.DataFrame, indexes: List[int]) -> np.ndarray:
y = np.zeros(len(df.index))
np.put(y, indexes, 1)
return y
def _add_suffix(path: str) -> str:
return path.replace(".csv", "_preprocessed.csv")
| [
"os.path.exists",
"pandas.read_csv",
"numpy.put",
"pandas.to_datetime",
"os.remove"
] | [((2546, 2582), 'pandas.to_datetime', 'pd.to_datetime', (['df[date_column_name]'], {}), '(df[date_column_name])\n', (2560, 2582), True, 'import pandas as pd\n'), ((2851, 2872), 'numpy.put', 'np.put', (['y', 'indexes', '(1)'], {}), '(y, indexes, 1)\n', (2857, 2872), True, 'import numpy as np\n'), ((372, 419), 'pandas.read_csv', 'pd.read_csv', (['f"""{DATASET_DIR}air_passengers.csv"""'], {}), "(f'{DATASET_DIR}air_passengers.csv')\n", (383, 419), True, 'import pandas as pd\n'), ((808, 854), 'pandas.read_csv', 'pd.read_csv', (['f"""{DATASET_DIR}alcohol_sales.csv"""'], {}), "(f'{DATASET_DIR}alcohol_sales.csv')\n", (819, 854), True, 'import pandas as pd\n'), ((1381, 1412), 'pandas.read_csv', 'pd.read_csv', (['path_preprocessing'], {}), '(path_preprocessing)\n', (1392, 1412), True, 'import pandas as pd\n'), ((1541, 1575), 'os.path.exists', 'os.path.exists', (['path_preprocessing'], {}), '(path_preprocessing)\n', (1555, 1575), False, 'import os\n'), ((1643, 1660), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (1654, 1660), True, 'import pandas as pd\n'), ((1682, 1708), 'pandas.to_datetime', 'pd.to_datetime', (["df['date']"], {}), "(df['date'])\n", (1696, 1708), True, 'import pandas as pd\n'), ((1855, 1882), 'pandas.read_csv', 'pd.read_csv', (['temp_file_path'], {}), '(temp_file_path)\n', (1866, 1882), True, 'import pandas as pd\n'), ((1891, 1916), 'os.remove', 'os.remove', (['temp_file_path'], {}), '(temp_file_path)\n', (1900, 1916), False, 'import os\n')] |
import numpy as np
import numpy.matlib as nm
from time import time
import matplotlib.pyplot as plt
from svgd import SVGD
class MVN:
"""
Multivariate Normal
"""
def __init__(self, mu, A):
self.mu = mu
self.A = A
def dlnprob(self, theta):
return -1*np.matmul(theta-nm.repmat(self.mu, theta.shape[0], 1), self.A)
class GMM:
"""
Gaussian Mixture Model
"""
def __init__(self, mu, A, gmprob):
# mu: LxM with L peaks, M dims
# A: LxMxM
# gmprob: Lx1
assert mu.shape[0]==A.shape[0]==gmprob.shape[0] and gmprob.sum()==1.0
self.mu = mu
self.A = A
self.gmprob = gmprob
def dlnprob(self, theta):
dlnp1, dlnp2 = 0, 0
for l in range(self.gmprob.shape[0]):
tmp1 = theta-nm.repmat(self.mu[l], theta.shape[0], 1)
tmp2 = np.matmul(tmp1, self.A[l])
dlnp1 += -1*np.exp(-0.5*(tmp1*tmp2).sum(axis=1)).reshape((theta.shape[0],1))*tmp2*self.gmprob[l]
dlnp2 += 1*np.exp(-0.5*(tmp1*tmp2).sum(axis=1))*self.gmprob[l]
return dlnp1/dlnp2.reshape((theta.shape[0],1))
if __name__ == '__main__':
# A = np.array([[0.2260,0.1652],[0.1652,0.6779]])
# mu = np.array([-0.6871,0.8010])
A = np.array([[[1.0,0.0],[0.0,1.0]], [[1.0,0.0],[0.0,1.0]]])
mu = np.array([[-5.0,-5.0], [5.0,5.0]])
gmprob = np.array([0.2, 0.8])
# model = MVN(mu, A)
model = GMM(mu, A, gmprob)
tik = time()
# x0 = np.random.normal(0, 1, [1000, 2])
x0 = np.random.uniform(-4.0, 4.0, [1000, 2])
theta = SVGD().update(x0, model.dlnprob, n_iter=1000, stepsize=0.01)
tok = time()
print("ground truth: ", mu)
print("svgd: ", np.mean(theta,axis=0))
print("time: ", tok-tik)
plt.scatter(theta.T[0], theta.T[1], s=1)
plt.show()
| [
"numpy.mean",
"numpy.matlib.repmat",
"numpy.array",
"numpy.matmul",
"matplotlib.pyplot.scatter",
"numpy.random.uniform",
"svgd.SVGD",
"time.time",
"matplotlib.pyplot.show"
] | [((1271, 1333), 'numpy.array', 'np.array', (['[[[1.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 1.0]]]'], {}), '([[[1.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 1.0]]])\n', (1279, 1333), True, 'import numpy as np\n'), ((1337, 1373), 'numpy.array', 'np.array', (['[[-5.0, -5.0], [5.0, 5.0]]'], {}), '([[-5.0, -5.0], [5.0, 5.0]])\n', (1345, 1373), True, 'import numpy as np\n'), ((1385, 1405), 'numpy.array', 'np.array', (['[0.2, 0.8]'], {}), '([0.2, 0.8])\n', (1393, 1405), True, 'import numpy as np\n'), ((1482, 1488), 'time.time', 'time', ([], {}), '()\n', (1486, 1488), False, 'from time import time\n'), ((1543, 1582), 'numpy.random.uniform', 'np.random.uniform', (['(-4.0)', '(4.0)', '[1000, 2]'], {}), '(-4.0, 4.0, [1000, 2])\n', (1560, 1582), True, 'import numpy as np\n'), ((1666, 1672), 'time.time', 'time', ([], {}), '()\n', (1670, 1672), False, 'from time import time\n'), ((1787, 1827), 'matplotlib.pyplot.scatter', 'plt.scatter', (['theta.T[0]', 'theta.T[1]'], {'s': '(1)'}), '(theta.T[0], theta.T[1], s=1)\n', (1798, 1827), True, 'import matplotlib.pyplot as plt\n'), ((1832, 1842), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1840, 1842), True, 'import matplotlib.pyplot as plt\n'), ((1730, 1752), 'numpy.mean', 'np.mean', (['theta'], {'axis': '(0)'}), '(theta, axis=0)\n', (1737, 1752), True, 'import numpy as np\n'), ((876, 902), 'numpy.matmul', 'np.matmul', (['tmp1', 'self.A[l]'], {}), '(tmp1, self.A[l])\n', (885, 902), True, 'import numpy as np\n'), ((1595, 1601), 'svgd.SVGD', 'SVGD', ([], {}), '()\n', (1599, 1601), False, 'from svgd import SVGD\n'), ((816, 856), 'numpy.matlib.repmat', 'nm.repmat', (['self.mu[l]', 'theta.shape[0]', '(1)'], {}), '(self.mu[l], theta.shape[0], 1)\n', (825, 856), True, 'import numpy.matlib as nm\n'), ((314, 351), 'numpy.matlib.repmat', 'nm.repmat', (['self.mu', 'theta.shape[0]', '(1)'], {}), '(self.mu, theta.shape[0], 1)\n', (323, 351), True, 'import numpy.matlib as nm\n')] |
import os
from typing import List, Union
import numpy as np
from ConfigSpace.configuration_space import Configuration
from smac.runhistory.runhistory import RunHistory
from cave.analyzer.base_analyzer import BaseAnalyzer
from cave.plot.scatter import plot_scatter_plot
from cave.utils.helpers import get_cost_dict_for_config, NotApplicable
from cave.utils.hpbandster_helpers import format_budgets
class PlotScatter(BaseAnalyzer):
"""
Scatter plots show the costs of the default and optimized parameter configuration on each instance. Since this
looses detailed information about the individual cost on each instance by looking at aggregated cost values in
tables, scatter plots provide a more detailed picture. They provide insights whether overall performance
improvements can be explained only by some outliers or whether they are due to improvements on the entire
instance set. On the left side the training-data is scattered, on the right side the test-data is scattered.
"""
def __init__(self,
runscontainer,
):
"""
Creates a scatterplot of the two configurations on the given set of instances.
Saves plot to file.
"""
super().__init__(runscontainer)
formatted_budgets = format_budgets(self.runscontainer.get_budgets())
for budget, run in zip(self.runscontainer.get_budgets(),
self.runscontainer.get_aggregated(keep_budgets=True, keep_folders=False)):
self.result[formatted_budgets[budget]] = self._plot_scatter(
default=run.default,
incumbent=run.incumbent,
rh=run.epm_runhistory,
train=run.scenario.train_insts,
test=run.scenario.test_insts,
run_obj=run.scenario.run_obj,
cutoff=run.scenario.cutoff,
output_dir=run.output_dir,
)
def get_name(self):
return "Scatter Plot"
def _plot_scatter(self,
default: Configuration,
incumbent: Configuration,
rh: RunHistory,
train: List[str],
test: Union[List[str], None],
run_obj: str,
cutoff,
output_dir):
"""
Parameters
----------
default, incumbent: Configuration
configurations to be compared
rh: RunHistory
runhistory to use for cost-estimations
train[, test]: list(str)
instance-names
run_obj: str
run-objective (time or quality)
cutoff: float
maximum runtime of ta
output_dir: str
output directory
"""
out_fn_base = os.path.join(output_dir, 'scatter_')
self.logger.info("... plotting scatter")
metric = run_obj
timeout = cutoff
labels = ["default {}".format(run_obj), "incumbent {}".format(run_obj)]
def_costs = get_cost_dict_for_config(rh, default).items()
inc_costs = get_cost_dict_for_config(rh, incumbent).items()
out_fns = []
if len(train) <= 1 and len(test) <= 1:
raise NotApplicable("No instances, so no scatter-plot.")
for insts, name in [(train, 'train'), (test, 'test')]:
if len(insts) <= 1:
self.logger.debug("No %s instances, skipping scatter", name)
continue
default = np.array([v for k, v in def_costs if k in insts])
incumbent = np.array([v for k, v in inc_costs if k in insts])
min_val = min(min(default), min(incumbent))
out_fn = out_fn_base + name + '.png'
out_fns.append(plot_scatter_plot((default,), (incumbent,), labels, metric=metric,
min_val=min_val, max_val=timeout, out_fn=out_fn))
self.logger.debug("Plotted scatter to %s", out_fn)
return {'figure' : out_fns if len(out_fns) > 0 else None}
| [
"os.path.join",
"cave.plot.scatter.plot_scatter_plot",
"numpy.array",
"cave.utils.helpers.NotApplicable",
"cave.utils.helpers.get_cost_dict_for_config"
] | [((2863, 2899), 'os.path.join', 'os.path.join', (['output_dir', '"""scatter_"""'], {}), "(output_dir, 'scatter_')\n", (2875, 2899), False, 'import os\n'), ((3302, 3352), 'cave.utils.helpers.NotApplicable', 'NotApplicable', (['"""No instances, so no scatter-plot."""'], {}), "('No instances, so no scatter-plot.')\n", (3315, 3352), False, 'from cave.utils.helpers import get_cost_dict_for_config, NotApplicable\n'), ((3572, 3621), 'numpy.array', 'np.array', (['[v for k, v in def_costs if k in insts]'], {}), '([v for k, v in def_costs if k in insts])\n', (3580, 3621), True, 'import numpy as np\n'), ((3646, 3695), 'numpy.array', 'np.array', (['[v for k, v in inc_costs if k in insts]'], {}), '([v for k, v in inc_costs if k in insts])\n', (3654, 3695), True, 'import numpy as np\n'), ((3101, 3138), 'cave.utils.helpers.get_cost_dict_for_config', 'get_cost_dict_for_config', (['rh', 'default'], {}), '(rh, default)\n', (3125, 3138), False, 'from cave.utils.helpers import get_cost_dict_for_config, NotApplicable\n'), ((3167, 3206), 'cave.utils.helpers.get_cost_dict_for_config', 'get_cost_dict_for_config', (['rh', 'incumbent'], {}), '(rh, incumbent)\n', (3191, 3206), False, 'from cave.utils.helpers import get_cost_dict_for_config, NotApplicable\n'), ((3828, 3948), 'cave.plot.scatter.plot_scatter_plot', 'plot_scatter_plot', (['(default,)', '(incumbent,)', 'labels'], {'metric': 'metric', 'min_val': 'min_val', 'max_val': 'timeout', 'out_fn': 'out_fn'}), '((default,), (incumbent,), labels, metric=metric, min_val=\n min_val, max_val=timeout, out_fn=out_fn)\n', (3845, 3948), False, 'from cave.plot.scatter import plot_scatter_plot\n')] |
import numpy as np
"""
ref https://stackoverflow.com/questions/56207448/efficient-quaternions-to-euler-transformation
"""
def to_euler(w, x, y, z):
ysqr = y * y
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + ysqr)
X = np.degrees(np.arctan2(t0, t1))
t2 = +2.0 * (w * y - z * x)
t2 = np.clip(t2, a_min=-1.0, a_max=1.0)
Y = np.degrees(np.arcsin(t2))
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (ysqr + z * z)
Z = np.degrees(np.arctan2(t3, t4))
# A second method using the maths library
# ref https://www.meccanismocomplesso.org/en/hamiltons-quaternions-and-3d-rotation-with-python/
# t0 = 2 * (w * x + y * z)
# t1 = 1 - 2 * (x * x + y * y)
# X = m.atan2(t0, t1)
#
# t2 = 2 * (w * y - z * x)
# t2 = 1 if t2 > 1 else t2
# t2 = -1 if t2 < -1 else t2
# Y = m.asin(t2)
#
# t3 = 2 * (w * z + x * y)
# t4 = 1 - 2 * (y * y + z * z)
# Z = m.atan2(t3, t4)
return X, Y, Z
def quaternion_to_euler():
return X, Y, Z
if __name__ == '__main__':
to_euler(w, x, y, z)
| [
"numpy.clip",
"numpy.arcsin",
"numpy.arctan2"
] | [((321, 355), 'numpy.clip', 'np.clip', (['t2'], {'a_min': '(-1.0)', 'a_max': '(1.0)'}), '(t2, a_min=-1.0, a_max=1.0)\n', (328, 355), True, 'import numpy as np\n'), ((258, 276), 'numpy.arctan2', 'np.arctan2', (['t0', 't1'], {}), '(t0, t1)\n', (268, 276), True, 'import numpy as np\n'), ((375, 388), 'numpy.arcsin', 'np.arcsin', (['t2'], {}), '(t2)\n', (384, 388), True, 'import numpy as np\n'), ((479, 497), 'numpy.arctan2', 'np.arctan2', (['t3', 't4'], {}), '(t3, t4)\n', (489, 497), True, 'import numpy as np\n')] |
from tkinter import *
import tkinter.font as tkFont
import random
import numpy as np
import queue
import copy
import time
import threading
import sys
import os
#生成资源文件目录访问路径
def resource_path(relative_path):
if getattr(sys, 'frozen', False): #是否Bundle Resource
base_path = sys._MEIPASS
else:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
from ctypes import windll, byref, create_unicode_buffer, create_string_buffer
FR_PRIVATE = 0x10
FR_NOT_ENUM = 0x20
# https://stackoverflow.com/questions/11993290/truly-custom-font-in-tkinter
def load_font(fontpath, private=True, enumerable=False):
if isinstance(fontpath, bytes):
pathbuf = create_string_buffer(fontpath)
AddFontResourceEx = windll.gdi32.AddFontResourceExA
# elif isinstance(fontpath, unicode):
elif isinstance(fontpath, str):
pathbuf = create_unicode_buffer(fontpath)
AddFontResourceEx = windll.gdi32.AddFontResourceExW
else:
# raise TypeError('fontpath must be of type str or unicode')
raise TypeError('fontpath must be of type str')
flags = (FR_PRIVATE if private else 0) | (FR_NOT_ENUM if not enumerable else 0)
numFontsAdded = AddFontResourceEx(byref(pathbuf), flags, 0)
return bool(numFontsAdded)
class Mine:
def __init__(self, w, h, n):
self.w, self.h = w, h
# 地雷数
self.n = n
# 这次用[x][y]格式
# code:
# -1 - 地雷
# 数字 - 周围数字
# 0 - 安全
self.map = [[0 for i in range(w)] for j in range(h)]
self.dis = [[False for i in range(w)] for j in range(h)]
self.init_mine()
self.update_weights()
def init_mine(self):
# 这种数组生成方式好像还不错
self.map = [[0 for i in range(self.w)] for j in range(self.h)]
sample = [(i, j) for j in range(self.w) for i in range(self.h)]
mlist = random.sample(sample, self.n)
for li in mlist:
self.map[li[0]][li[1]] = -1
def update_weights(self):
# 用np.array才能进行二维数组切片操作
a = np.array(self.map)
for x in range(self.w):
for y in range(self.h):
if a[x][y] == -1:
continue
low_x = max(0, x - 1)
low_y = max(0, y - 1)
s = a[low_x:x + 2, low_y:y + 2]
self.map[x][y] = list(s.reshape(s.size)).count(-1)
# 返回值: True - 挖到地雷; False - 安全
def dig(self, x, y):
if not (0 <= x < self.w and 0 <= y < self.h):
return False
if self.map[x][y] == -1:
return True
# 挖到数字,只挖这个数字
if self.map[x][y] != 0:
self.dis[x][y] = True
return False
self.digging(x, y)
return False
# 更新挖掘状态,使用宽度优先搜索
def digging(self, x, y):
# 不挖挖过的块,不挖地雷
if self.dis[x][y] is True:
return
# 总之先挖起来
self.dis[x][y] = True
if self.map[x][y] == -1:
return
searched = []
q = queue.Queue()
q.put((x, y))
searched.append((x, y))
directions = ((0, 1, 0, -1), (-1, 0, 1, 0))
while not q.empty():
ix, iy = q.get()
for k in range(4):
dx = ix + directions[0][k]
dy = iy + directions[1][k]
if 0 <= dx < self.w and 0 <= dy < self.h and self.dis[dx][dy] is False:
if self.map[dx][dy] == 0:
q.put(copy.deepcopy((dx, dy)))
searched.append(copy.deepcopy((dx, dy)))
self.dis[dx][dy] = True
# 最后扩展一次,显现数字
a = np.array(self.map)
for search in searched:
ix, iy = search
low = max(0, ix - 1), max(0, iy - 1)
s = a[low[0]:ix + 2, low[1]:iy + 2]
for ii in range(s.shape[0]):
for ij in range(s.shape[1]):
tx, ty = ii + ix - 1, ij + iy - 1
if 0 <= tx and tx < self.w and 0 <= ty and ty < self.h \
and self.map[tx][ty] != -1 and self.map[tx][ty] != 0:
self.dis[tx][ty] = True
def win(self):
for x in range(self.w):
for y in range(self.h):
if self.map[x][y] != -1 and self.dis[x][y] is False:
return False
return True
class MineUi:
def __init__(self, root, w=10, h=10, n=10):
self.h, self.w, self.n = w, h, n
self.mine = Mine(w, h, n)
self.root = root
self.root.resizable(width=False, height=False)
# self.root.attributes("-toolwindow", 1)
self.root.attributes('-alpha', 0.9)
self.root.title("PyMine - 扫雷")
# 检查字体环境
if '5x5 Dots' not in tkFont.families():
res = load_font(resource_path(os.path.join('font', '5x5dots.ttf')))
if not res:
print("字体安装失败")
exit(1)
# 配置可变变量和常量
self.var_time = StringVar()
self.var_num = StringVar()
self.var_face = StringVar()
self.var_num.set("%03d" % self.n)
self.var_time.set("000")
self.var_face.set("K")
self.win = None
self.stated = False
self.time = 0
self.thread = None
self.CODE_MINE = '۞'
self.CODE_BLANK = ''
self.CODE_CHECKED = ''
self.CODE = {
-2: self.CODE_BLANK,
-1: self.CODE_MINE,
0: self.CODE_CHECKED,
}
for i in range(1, 10):
self.CODE[i] = "%d" % i
self.COLORS = {
-2: 'snow',
-1: 'Black',
0: 'LightGrey',
1: 'Peru',
2: 'DarkGoldenrod',
3: 'OrangeRed',
4: 'RosyBrown',
5: 'LightSeaGreen',
6: 'Aqua',
7: 'SpringGreen',
8: 'Lime',
}
self.signs = [[False for i in range(self.w)] for j in range(self.h)]
# 笑脸J, 哭脸L,面瘫脸K
self.font_face = tkFont.Font(family='Wingdings', size=15, weight=tkFont.NORMAL)
self.font_num = tkFont.Font(family='5x5 Dots', size=24, weight=tkFont.NORMAL)
self.font_unit = tkFont.Font(family='Consolas', size=9, weight=tkFont.NORMAL)
Label(self.root, textvariable=self.var_time, font=self.font_num).grid(row=0, column=0)
Button(self.root, textvariable=self.var_face, font=self.font_face, command=self.restart, relief='groove',).grid(row=0, column=1)
Label(self.root, textvariable=self.var_num, font=self.font_num).grid(row=0, column=2)
self.frame = Frame(self.root)
self.vars = [[StringVar() for i in range(self.w)] for j in range(self.h)]
self.buttons = [[None for i in range(self.w)] for j in range(self.h)]
self.units = [[MineUnit(self, j, i) for i in range(self.w)] for j in range(self.h)]
for x in range(self.w):
for y in range(self.h):
self.buttons[x][y] = Button(self.frame,
textvariable=self.vars[x][y],
command=self.units[x][y].click,
relief='groove',
bd=1,
font=self.font_unit,
width=3, height=1,
activebackground='gray',
bg='snow')
self.buttons[x][y].bind("<Button-3>", self.units[x][y].right_click)
self.buttons[x][y].grid(row=y, column=x)
self.frame.grid(row=1, columnspan=3)
self.refresh()
def init_data(self):
self.mine = Mine(self.w, self.h, self.n)
self.var_num.set("%03d" % self.n)
self.var_time.set("000")
self.var_face.set("K")
self.win = None
self.stated = False
self.time = 0
self.thread = None
for x in self.buttons:
for y in x:
y.grid_forget()
self.vars = [[StringVar() for i in range(self.w)] for j in range(self.h)]
self.buttons = [[None for i in range(self.w)] for j in range(self.h)]
self.units = [[MineUnit(self, j, i) for i in range(self.w)] for j in range(self.h)]
self.signs = [[False for i in range(self.w)] for j in range(self.h)]
for x in range(self.w):
for y in range(self.h):
self.buttons[x][y] = Button(self.frame,
textvariable=self.vars[x][y],
command=self.units[x][y].click,
relief='groove',
bd=1,
font=self.font_unit,
width=3, height=1,
activebackground='gray',
bg='snow')
self.buttons[x][y].bind("<Button-3>", self.units[x][y].right_click)
self.buttons[x][y].grid(row=y, column=x)
self.refresh()
def time_loop(self):
while self.stated is True:
try:
time.sleep(1)
self.time = self.time + 1
self.var_time.set("%03d" % self.time)
except Exception as e:
print(e)
continue
def refresh(self):
for x in range(self.w):
for y in range(self.h):
self.buttons[x][y].configure(fg=self.COLORS[self.mine.map[x][y]])
if self.mine.dis[x][y] is True:
self.vars[x][y].set(self.CODE[self.mine.map[x][y]])
if self.mine.map[x][y] == 0:
self.buttons[x][y].configure(bg=self.COLORS[self.mine.map[x][y]])
else:
self.vars[x][y].set(self.CODE[-2])
if self.win is True:
if self.signs[x][y] is True:
if self.mine.map[x][y] == -1:
self.buttons[x][y].configure(bg='green')
else:
self.buttons[x][y].configure(bg='red')
if self.mine.map[x][y] == -1:
self.vars[x][y].set(self.CODE[self.mine.map[x][y]])
def restart(self):
# self.root.destroy()
# self.__init__(Tk(), w=self.w, h=self.h, n=self.n)
self.init_data()
class MineUnit:
def __init__(self, ui_: MineUi, x: int, y: int):
self.ui = ui_
self.x, self.y = x, y
def click(self):
if self.ui.win is not None:
return
if self.ui.win is None and self.ui.stated is False:
self.start_timer()
res = self.ui.mine.dig(self.x, self.y)
# You lost
if res is True:
# self.ui.mine.dis = [[True for i in range(self.ui.w)] for j in range(self.ui.h)]
for x in range(self.ui.w):
for y in range(self.ui.h):
if self.ui.mine.map[x][y] == -1:
self.ui.mine.dis[x][y] = True
self.ui.buttons[x][y].configure(bg='red')
self.ui.win = False
self.ui.var_face.set("L")
self.ui.stated = False
self.ui.refresh()
return
if self.ui.mine.win():
self.ui.win = True
self.ui.var_face.set("J")
self.ui.stated = False
self.ui.refresh()
return
self.right_judge()
self.ui.refresh()
def right_click(self, argv):
if self.ui.win is not None:
return
if self.ui.signs[self.x][self.y] is False:
self.ui.signs[self.x][self.y] = True
self.ui.buttons[self.x][self.y].configure(bg='blue')
else:
self.ui.signs[self.x][self.y] = False
self.ui.buttons[self.x][self.y].configure(bg='snow')
self.right_judge()
def right_judge(self):
sumi = 0
for x in range(self.ui.w):
sumi = sumi + self.ui.signs[x].count(True)
if sumi == self.ui.n:
flag = True
for x in range(self.ui.w):
if flag is True:
for y in range(self.ui.h):
if self.ui.signs[x][y] is True and self.ui.mine.map[x][y] != -1:
flag = False
break
if flag is True:
self.ui.win = True
self.ui.var_face.set("J")
self.ui.stated = False
self.ui.refresh()
return
def start_timer(self):
if self.ui.thread is not None:
return
self.ui.thread = threading.Thread(target=self.ui.time_loop)
self.ui.thread.setDaemon(True)
self.ui.stated = True
self.ui.thread.start()
class ConfigUi:
def __init__(self, root):
self.root = root
self.frame = Frame(self.root)
self.vars = [StringVar() for i in range(3)]
self.w = Entry(self.frame, textvariable=self.vars[0])
self.h = Entry(self.frame, textvariable=self.vars[1])
self.n = Entry(self.frame, textvariable=self.vars[2])
self.w.insert(0, "15")
self.h.insert(0, "15")
self.n.insert(0, "10")
Label(self.frame, text="宽度").grid(row=0, column=0)
Label(self.frame, text="高度").grid(row=1, column=0)
Label(self.frame, text="雷数").grid(row=2, column=0)
self.w.grid(row=0, column=1)
self.h.grid(row=1, column=1)
self.n.grid(row=2, column=1)
Button(self.frame, text='开始',
command=lambda: (self.frame.destroy(),
MineUi(self.root, w=int(self.vars[0].get()), h=int(self.vars[1].get()), n=int(self.vars[2].get()))))\
.grid(row=3, columnspan=2, sticky=W+E)
self.frame.grid()
if __name__ == '__main__':
# ui = MineUi(Tk(), w=20, h=20, n=5)
# ui.root.mainloop()
_root = Tk()
cui = ConfigUi(_root)
cui.root.mainloop()
| [
"random.sample",
"ctypes.byref",
"ctypes.create_unicode_buffer",
"tkinter.font.families",
"os.path.join",
"ctypes.create_string_buffer",
"time.sleep",
"numpy.array",
"tkinter.font.Font",
"copy.deepcopy",
"os.path.abspath",
"threading.Thread",
"queue.Queue"
] | [((361, 399), 'os.path.join', 'os.path.join', (['base_path', 'relative_path'], {}), '(base_path, relative_path)\n', (373, 399), False, 'import os\n'), ((329, 349), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (344, 349), False, 'import os\n'), ((706, 736), 'ctypes.create_string_buffer', 'create_string_buffer', (['fontpath'], {}), '(fontpath)\n', (726, 736), False, 'from ctypes import windll, byref, create_unicode_buffer, create_string_buffer\n'), ((1243, 1257), 'ctypes.byref', 'byref', (['pathbuf'], {}), '(pathbuf)\n', (1248, 1257), False, 'from ctypes import windll, byref, create_unicode_buffer, create_string_buffer\n'), ((1893, 1922), 'random.sample', 'random.sample', (['sample', 'self.n'], {}), '(sample, self.n)\n', (1906, 1922), False, 'import random\n'), ((2063, 2081), 'numpy.array', 'np.array', (['self.map'], {}), '(self.map)\n', (2071, 2081), True, 'import numpy as np\n'), ((3023, 3036), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (3034, 3036), False, 'import queue\n'), ((3655, 3673), 'numpy.array', 'np.array', (['self.map'], {}), '(self.map)\n', (3663, 3673), True, 'import numpy as np\n'), ((6048, 6110), 'tkinter.font.Font', 'tkFont.Font', ([], {'family': '"""Wingdings"""', 'size': '(15)', 'weight': 'tkFont.NORMAL'}), "(family='Wingdings', size=15, weight=tkFont.NORMAL)\n", (6059, 6110), True, 'import tkinter.font as tkFont\n'), ((6135, 6196), 'tkinter.font.Font', 'tkFont.Font', ([], {'family': '"""5x5 Dots"""', 'size': '(24)', 'weight': 'tkFont.NORMAL'}), "(family='5x5 Dots', size=24, weight=tkFont.NORMAL)\n", (6146, 6196), True, 'import tkinter.font as tkFont\n'), ((6222, 6282), 'tkinter.font.Font', 'tkFont.Font', ([], {'family': '"""Consolas"""', 'size': '(9)', 'weight': 'tkFont.NORMAL'}), "(family='Consolas', size=9, weight=tkFont.NORMAL)\n", (6233, 6282), True, 'import tkinter.font as tkFont\n'), ((12945, 12987), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.ui.time_loop'}), '(target=self.ui.time_loop)\n', (12961, 12987), False, 'import threading\n'), ((893, 924), 'ctypes.create_unicode_buffer', 'create_unicode_buffer', (['fontpath'], {}), '(fontpath)\n', (914, 924), False, 'from ctypes import windll, byref, create_unicode_buffer, create_string_buffer\n'), ((4787, 4804), 'tkinter.font.families', 'tkFont.families', ([], {}), '()\n', (4802, 4804), True, 'import tkinter.font as tkFont\n'), ((9349, 9362), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (9359, 9362), False, 'import time\n'), ((4848, 4883), 'os.path.join', 'os.path.join', (['"""font"""', '"""5x5dots.ttf"""'], {}), "('font', '5x5dots.ttf')\n", (4860, 4883), False, 'import os\n'), ((3482, 3505), 'copy.deepcopy', 'copy.deepcopy', (['(dx, dy)'], {}), '((dx, dy))\n', (3495, 3505), False, 'import copy\n'), ((3547, 3570), 'copy.deepcopy', 'copy.deepcopy', (['(dx, dy)'], {}), '((dx, dy))\n', (3560, 3570), False, 'import copy\n')] |
import glob
import os
import random
import numpy as np
from PIL import Image
from datasets.BaseDataset import VideoDataset, INFO, IMAGES_, TARGETS
from utils.Resize import ResizeMode
class Davis(VideoDataset):
def __init__(self, root, mode='train', resize_mode=None, resize_shape=None, tw=8, max_temporal_gap=8, num_classes=2,
imset=None):
self.imset = imset
self.videos = []
self.num_frames = {}
self.num_objects = {}
self.shape = {}
self.raw_samples = []
super(Davis, self).__init__(root, mode, resize_mode, resize_shape, tw, max_temporal_gap, num_classes)
def filter_samples(self, video):
filtered_samples = [s for s in self.raw_samples if s[INFO]['video'] == video]
self.samples = filtered_samples
def set_video_id(self, video):
self.current_video = video
self.start_index = self.get_start_index(video)
self.filter_samples(video)
def get_video_ids(self):
# shuffle the list for training
return random.sample(self.videos, len(self.videos)) if self.is_train() else self.videos
def get_support_indices(self, index, sequence):
# index should be start index of the clip
if self.is_train():
index_range = np.arange(index, min(self.num_frames[sequence],
(index + max(self.max_temporal_gap, self.tw))))
else:
index_range = np.arange(index,
min(self.num_frames[sequence], (index + self.tw)))
support_indices = np.random.choice(index_range, min(self.tw, len(index_range)), replace=False)
support_indices = np.sort(np.append(support_indices, np.repeat([index],
self.tw - len(support_indices))))
# print(support_indices)
return support_indices
def create_sample_list(self):
image_dir = os.path.join(self.root, 'JPEGImages', '480p')
mask_dir = os.path.join(self.root, 'Annotations_unsupervised', '480p')
if self.is_train():
_imset_f = '2017/train.txt'
elif self.imset:
_imset_f = self.imset
else:
_imset_f = '2017/val.txt'
with open(os.path.join(self.root, "ImageSets",_imset_f), "r") as lines:
for line in lines:
_video = line.rstrip('\n')
self.videos += [_video]
img_list = list(glob.glob(os.path.join(image_dir, _video, '*.jpg')))
img_list.sort()
# self.videos.append(_video)
num_frames = len(glob.glob(os.path.join(image_dir, _video, '*.jpg')))
self.num_frames[_video] = num_frames
_mask_file = os.path.join(mask_dir, _video, '00000.png')
_mask = np.array(Image.open(os.path.join(mask_dir, _video, '00000.png')).convert("P"))
num_objects = np.max(_mask)
self.num_objects[_video] = num_objects
self.shape[_video] = np.shape(_mask)
for i, img in enumerate(img_list):
sample = {INFO: {}, IMAGES_: [], TARGETS: []}
support_indices = self.get_support_indices(i, _video)
sample[INFO]['support_indices'] = support_indices
images = [os.path.join(image_dir, _video, '{:05d}.jpg'.format(s)) for s in np.sort(support_indices)]
targets = [os.path.join(mask_dir, _video, '{:05d}.png'.format(s)) for s in np.sort(support_indices)]
sample[IMAGES_] = images
sample[TARGETS] = targets
sample[INFO]['video'] = _video
sample[INFO]['num_frames'] = num_frames
sample[INFO]['num_objects'] = num_objects
sample[INFO]['shape'] = np.shape(_mask)
self.samples+=[sample]
self.raw_samples = self.samples
if __name__ == '__main__':
davis = Davis(root="/globalwork/data/DAVIS-Unsupervised/DAVIS/",
resize_shape=(480, 854), resize_mode=ResizeMode.FIXED_SIZE, mode="train", max_temporal_gap=32)
# davis.set_video_id('cat-girl')
print("Dataset size: {}".format(davis.__len__()))
for i, _input in enumerate(davis):
print(_input['info'])
print("Image Max {}, Image Min {}".format(_input['images'].max(), _input['images'].min()),
"Target max {}, Target Min {}".format(_input['target']['mask'].max(), _input['target']['mask'].min())) | [
"numpy.sort",
"numpy.shape",
"os.path.join",
"numpy.max"
] | [((1853, 1898), 'os.path.join', 'os.path.join', (['self.root', '"""JPEGImages"""', '"""480p"""'], {}), "(self.root, 'JPEGImages', '480p')\n", (1865, 1898), False, 'import os\n'), ((1914, 1973), 'os.path.join', 'os.path.join', (['self.root', '"""Annotations_unsupervised"""', '"""480p"""'], {}), "(self.root, 'Annotations_unsupervised', '480p')\n", (1926, 1973), False, 'import os\n'), ((2138, 2184), 'os.path.join', 'os.path.join', (['self.root', '"""ImageSets"""', '_imset_f'], {}), "(self.root, 'ImageSets', _imset_f)\n", (2150, 2184), False, 'import os\n'), ((2576, 2619), 'os.path.join', 'os.path.join', (['mask_dir', '_video', '"""00000.png"""'], {}), "(mask_dir, _video, '00000.png')\n", (2588, 2619), False, 'import os\n'), ((2737, 2750), 'numpy.max', 'np.max', (['_mask'], {}), '(_mask)\n', (2743, 2750), True, 'import numpy as np\n'), ((2827, 2842), 'numpy.shape', 'np.shape', (['_mask'], {}), '(_mask)\n', (2835, 2842), True, 'import numpy as np\n'), ((3538, 3553), 'numpy.shape', 'np.shape', (['_mask'], {}), '(_mask)\n', (3546, 3553), True, 'import numpy as np\n'), ((2326, 2366), 'os.path.join', 'os.path.join', (['image_dir', '_video', '"""*.jpg"""'], {}), "(image_dir, _video, '*.jpg')\n", (2338, 2366), False, 'import os\n'), ((2466, 2506), 'os.path.join', 'os.path.join', (['image_dir', '_video', '"""*.jpg"""'], {}), "(image_dir, _video, '*.jpg')\n", (2478, 2506), False, 'import os\n'), ((3152, 3176), 'numpy.sort', 'np.sort', (['support_indices'], {}), '(support_indices)\n', (3159, 3176), True, 'import numpy as np\n'), ((3263, 3287), 'numpy.sort', 'np.sort', (['support_indices'], {}), '(support_indices)\n', (3270, 3287), True, 'import numpy as np\n'), ((2656, 2699), 'os.path.join', 'os.path.join', (['mask_dir', '_video', '"""00000.png"""'], {}), "(mask_dir, _video, '00000.png')\n", (2668, 2699), False, 'import os\n')] |
import os
import sys
baseName = os.path.basename(__file__)
dirName = os.path.dirname(__file__)
print("basename: ", baseName)
print("dirname: ", dirName)
sys.path.append(dirName + r"/../..")
import pandas as pd
import numpy as np
from RFEM.initModel import *
from RFEM.BasicObjects.node import Node
from RFEM.BasicObjects.material import Material
from RFEM.BasicObjects.section import Section
from RFEM.BasicObjects.member import Member
from RFEM.BasicObjects.line import Line
def util_num_to_ndarray(nodes: np.ndarray, nums):
arr = np.array([])
if isinstance(nums, (int, float)):
arr = np.array([nums] * nodes.shape[0]).flatten()
elif isinstance(nums, list):
arr = np.array(nums).flatten()
elif isinstance(nums, np.ndarray):
arr = np.array([nums]).flatten()
return arr.astype(int)
def load_object(clientModel, type="NODE"):
numbers = ConvertStrToListOfInt(
clientModel.service.get_all_object_numbers(
type="E_OBJECT_TYPE_{}".format(type.upper())
)
)
if type.upper() == "NODE":
return [Model.clientModel.service.get_node(int(n)) for n in numbers]
elif type.upper() == "MEMBER":
return [Model.clientModel.service.get_member(int(n)) for n in numbers]
elif type.upper() == "SECTION":
return [Model.clientModel.service.get_section(int(n)) for n in numbers]
else:
# todo: other object types
pass
def load_dataframe(clientModel, type="NODE"):
object_list = load_object(clientModel, type)
len_numbers = len(object_list)
if len_numbers != 0:
keys = dict(object_list[0]).keys()
dataframe = pd.DataFrame(columns=keys, index=range(len_numbers), dtype=object)
for i, node in enumerate(object_list):
node_dict = dict(node)
values = node_dict.values()
dataframe.iloc[i] = np.array([value for value in values], dtype=object)
return dataframe
else:
return None
def generate_members_from_tuples(nodes: np.ndarray, section_no):
"""generates members from node array tuples
:param nodes: _description_
:type nodes: np.ndarray
:param section_no: _description_
:type section_no: _type_
:return: _description_
:rtype: _type_
"""
section_no = util_num_to_ndarray(nodes, section_no)
if len(nodes.shape) == 1:
node_tup = convert_node_array_to_tuple(nodes)
elif len(nodes.shape) == 2:
node_tup = nodes
member_numbers = np.zeros(nodes.shape[0]).astype(int)
for i in range(node_tup.shape[0]):
member_numbers[i] = int(
FirstFreeIdNumber(memType=ObjectTypes.E_OBJECT_TYPE_MEMBER)
)
Member(
member_numbers[i],
start_node_no=node_tup[i, 0],
end_node_no=node_tup[i, 1],
start_section_no=section_no[i],
end_section_no=section_no[i],
)
return member_numbers
def place_nodes(coords: np.ndarray):
"""places nodes with a given numpy array
:param coords: first col: x |second col: y | third col: z
:type coords: np.ndarray
:return: array of node numbers
:rtype: np.ndarray
"""
node_numbers = np.zeros(coords.shape[0]).astype(int)
for i in range(coords.shape[0]):
node_numbers[i] = int(FirstFreeIdNumber(memType=ObjectTypes.E_OBJECT_TYPE_NODE))
Node(node_numbers[i], coords[i, 0], coords[i, 1], coords[i, 2])
return node_numbers
def convert_node_array_to_tuple(node_numbers: np.ndarray):
"""converts a array of nodes to node tuple pairs for members
:param node_numbers: array of nodes
:type node_numbers: np.ndarray
:return: returns tuple pairs
:rtype: np.ndarray
"""
node_tup = np.zeros((node_numbers.size - 1, 2)).astype(int)
node_tup[:, 0] = node_numbers[:-1]
node_tup[:, 1] = node_numbers[1:]
return node_tup
def place_top_bot_members(coords: np.ndarray, height: float, section_no: int = 1):
"""places top bot with offset and a single array
:param coords: _description_
:type coords: np.ndarray
:param height: _description_
:type height: float
:param section_no: _description_, defaults to 1
:type section_no: int, optional
:return: _description_
:rtype: _type_
"""
node_no_top = place_nodes(coords)
generate_members_from_tuples(node_no_top, section_no=section_no)
height_offset = np.zeros(coords.shape)
height_offset[:, 2] = height_offset[:, 2] + height
node_no_bot = place_nodes(coords + height_offset)
generate_members_from_tuples(node_no_bot, section_no=section_no)
node_no = np.zeros((node_no_top.size, 2)).astype(int)
node_no[:, 0] = node_no_top
node_no[:, 1] = node_no_bot
return node_no
def place_top_bot_members_2(coord_bot: np.ndarray, coord_top: np.ndarray, section_no: int = 1):
"""places top bot with two arrays
:param coords: _description_
:type coords: np.ndarray
:param height: _description_
:type height: float
:param section_no: _description_, defaults to 1
:type section_no: int, optional
:return: _description_
:rtype: _type_
"""
node_no_top = place_nodes(coord_top)
generate_members_from_tuples(node_no_top, section_no=section_no)
node_no_bot = place_nodes(coord_bot)
generate_members_from_tuples(node_no_bot, section_no=section_no)
node_no = np.zeros((node_no_top.size, 2)).astype(int)
node_no[:, 0] = node_no_top
node_no[:, 1] = node_no_bot
return node_no
def extract_truss_nodes(node_no: np.ndarray, num_fields: int):
vertical_indices = np.arange(
0, node_no.shape[0], int(node_no.shape[0] / num_fields), dtype=int
)
return vertical_indices
def inject_node_with_offset(node_1, node_2, distance, reverse=False):
node1 = Model.clientModel.service.get_node(node_1)
node2 = Model.clientModel.service.get_node(node_2)
# df = load_dataframe(Model.clientModel)
node1_coords = np.array(
[
node1.global_coordinates.x,
node1.global_coordinates.y,
node1.global_coordinates.z,
]
)
node2_coords = np.array(
[
node2.global_coordinates.x,
node2.global_coordinates.y,
node2.global_coordinates.z,
]
)
# if reverse==True:
vec = node2_coords - node1_coords
# else:
# vec = node2_coords - node1_coords
vec_norm = vec / np.linalg.norm(vec)
coords = (node1_coords + vec_norm * distance).reshape(-1, 3)
node_num = place_nodes(coords)
return node_num
def place_verticals(node_no: np.ndarray, num_fields: int, section_no: int = 1):
node_no_top = node_no[:, 0]
node_no_bot = node_no[:, 1]
vertical_indices = extract_truss_nodes(node_no=node_no, num_fields=num_fields)
for i in vertical_indices:
generate_members_from_tuples(
np.array([node_no_top[i], node_no_bot[i]]), section_no=section_no
)
generate_members_from_tuples(
np.array([node_no_top[-1], node_no_bot[-1]]), section_no=section_no
)
def place_verticals_connection(
node_no: np.ndarray,
num_fields: int,
section_no_vert=1,
section_no_con=1,
**cs_props
):
h_top = cs_props.get("h_top", 0.1)
h_bot = cs_props.get("h_bot", 0.1)
node_no_top = node_no[:, 0]
node_no_bot = node_no[:, 1]
indices = extract_truss_nodes(node_no=node_no, num_fields=num_fields)
for i in indices:
node_num_top = inject_node_with_offset(node_no_top[i], node_no_bot[i], h_top)[0]
node_num_bot = inject_node_with_offset(node_no_bot[i], node_no_top[i], h_bot)[0]
generate_members_from_tuples(
np.array([node_no_top[i], node_num_top]), section_no=section_no_con
)
generate_members_from_tuples(
np.array([node_num_top, node_num_bot]), section_no=section_no_vert
)
generate_members_from_tuples(
np.array([node_num_bot, node_no_bot[i]]), section_no=section_no_con
)
node_num_top = inject_node_with_offset(node_no_top[-1], node_no_bot[-1], h_top)[0]
node_num_bot = inject_node_with_offset(node_no_bot[-1], node_no_top[-1], h_bot)[0]
generate_members_from_tuples(
np.array([node_no_top[-1], node_num_top]), section_no=section_no_con
)
generate_members_from_tuples(
np.array([node_num_top, node_num_bot]), section_no=section_no_vert
)
generate_members_from_tuples(
np.array([node_num_bot, node_no_bot[-1]]), section_no=section_no_con
)
def place_beams(
node_no: np.ndarray,
num_fields: int,
pattern="\\",
from_field=0,
to_field=-1,
section_no_vert=1,
section_no_diag=2,
):
node_no_top = node_no[:, 0]
node_no_bot = node_no[:, 1]
indices = extract_truss_nodes(node_no=node_no, num_fields=num_fields)
member_nodes = np.zeros((indices.shape[0] - 1, 2), dtype=int)
section_no = util_num_to_ndarray(member_nodes[:, 0], section_no_diag)
if pattern.replace("|", "") == "\\":
member_nodes[:, 0] = node_no_top[indices[:-1]]
member_nodes[:, 1] = node_no_bot[indices[1:]]
elif pattern.replace("|", "") == "/":
member_nodes[:, 0] = node_no_bot[indices[:-1]]
member_nodes[:, 1] = node_no_top[indices[1:]]
elif pattern.replace("|", "") == "/\\":
nodes_diag1_start = node_no_bot[indices[:-1:2]]
nodes_diag1_end = node_no_top[indices[1::2]]
member_nodes[: nodes_diag1_start.size, 0] = nodes_diag1_start
member_nodes[: nodes_diag1_start.size, 1] = nodes_diag1_end
nodes_diag2_start = node_no_top[indices[1:-1:2]]
nodes_diag2_end = node_no_bot[indices[2::2]]
member_nodes[nodes_diag1_start.size :, 0] = nodes_diag2_start
member_nodes[nodes_diag1_start.size :, 1] = nodes_diag2_end
elif pattern.replace("|", "") == "\\/":
nodes_diag1_start = node_no_bot[indices[from_field + 1 : to_field : 2]]
nodes_diag1_end = node_no_top[indices[from_field + 2 :: 2]]
member_nodes[: nodes_diag1_start.size, 0] = nodes_diag1_start
member_nodes[: nodes_diag1_start.size, 1] = nodes_diag1_end
nodes_diag2_start = node_no_top[indices[from_field:to_field:2]]
nodes_diag2_end = node_no_bot[indices[from_field + 1 :: 2]]
member_nodes[nodes_diag1_start.size :, 0] = nodes_diag2_start
member_nodes[nodes_diag1_start.size :, 1] = nodes_diag2_end
generate_members_from_tuples(
member_nodes,
section_no=section_no,
)
if "|" in pattern:
member_nodes = np.zeros((node_no_top[indices].shape[0], 2)).astype(int)
member_nodes[:, 0] = node_no_top[indices]
member_nodes[:, 1] = node_no_bot[indices]
section_no = util_num_to_ndarray(member_nodes[:, 0], section_no_diag)
generate_members_from_tuples(
member_nodes,
section_no=section_no,
)
# place_verticals(node_no, num_fields)
def place_beams_connection(
node_no: np.ndarray,
num_fields: int,
pattern="\\",
from_field=0,
to_field=-1,
section_no_diag=1,
section_no_vert=1,
section_no_con=1,
**cs_props
):
"""_summary_
:param node_no: _description_
:type node_no: np.ndarray
:param num_fields: _description_
:type num_fields: int
:param pattern: _description_, defaults to "\"
:type pattern: str, optional
:param from_field: _description_, defaults to 0
:type from_field: int, optional
:param to_field: _description_, defaults to -1
:type to_field: int, optional
:param section_no_diag: _description_, defaults to 1
:type section_no_diag: int, optional
:param section_no_vert: _description_, defaults to 1
:type section_no_vert: int, optional
:param section_no_con: _description_, defaults to 1
:type section_no_con: int, optional
"""
h_top = cs_props.get("h_top", 0.1)
h_bot = cs_props.get("h_bot", 0.1)
node_no_top = node_no[:, 0]
node_no_bot = node_no[:, 1]
indices = extract_truss_nodes(node_no=node_no, num_fields=num_fields)
if pattern.replace("|", "") == "\\":
for i in range(from_field, indices.size + to_field):
node_num_top = inject_node_with_offset(
node_no_top[indices[i]], node_no_bot[indices[i + 1]], h_top
)[0]
node_num_bot = inject_node_with_offset(
node_no_bot[indices[i + 1]], node_no_top[indices[i]], h_bot
)[0]
generate_members_from_tuples(
np.array([node_no_top[indices[i]], node_num_top]),
section_no=section_no_con,
)
generate_members_from_tuples(
np.array([node_num_top, node_num_bot]),
section_no=section_no_diag,
)
generate_members_from_tuples(
np.array([node_num_bot, node_no_bot[indices[i + 1]]]),
section_no=section_no_con,
)
elif pattern.replace("|", "") == "/":
for i in range(from_field, indices.size + to_field):
node_num_top = inject_node_with_offset(
node_no_top[indices[i + 1]], node_no_bot[indices[i]], h_top
)[0]
node_num_bot = inject_node_with_offset(
node_no_bot[indices[i]], node_no_top[indices[i + 1]], h_bot
)[0]
generate_members_from_tuples(
np.array([node_no_bot[indices[i]], node_num_bot]),
section_no=section_no_con,
)
generate_members_from_tuples(
np.array([node_num_bot, node_num_top]),
section_no=section_no_diag,
)
generate_members_from_tuples(
np.array([node_num_top, node_no_top[indices[i + 1]]]),
section_no=section_no_con,
)
# generate_members_from_tuples(
# np.array([node_no_bot[indices[i]], node_no_top[indices[i + 1]]]),
# section_no=section_no,
# )
elif pattern.replace("|", "") == "/\\":
for i in range(from_field, indices.size + to_field, 2):
node_num_top = inject_node_with_offset(
node_no_top[indices[i + 1]], node_no_bot[indices[i]], h_top
)[0]
node_num_bot = inject_node_with_offset(
node_no_bot[indices[i]], node_no_top[indices[i + 1]], h_bot
)[0]
generate_members_from_tuples(
np.array([node_no_bot[indices[i]], node_num_bot]),
section_no=section_no_con,
)
generate_members_from_tuples(
np.array([node_num_bot, node_num_top]),
section_no=section_no_diag,
)
generate_members_from_tuples(
np.array([node_num_top, node_no_top[indices[i + 1]]]),
section_no=section_no_con,
)
for i in range(from_field + 1, indices.size + to_field, 2):
node_num_top = inject_node_with_offset(
node_no_top[indices[i]], node_no_bot[indices[i + 1]], h_top
)[0]
node_num_bot = inject_node_with_offset(
node_no_bot[indices[i + 1]], node_no_top[indices[i]], h_bot
)[0]
generate_members_from_tuples(
np.array([node_no_top[indices[i]], node_num_top]),
section_no=section_no_con,
)
generate_members_from_tuples(
np.array([node_num_top, node_num_bot]),
section_no=section_no_diag,
)
generate_members_from_tuples(
np.array([node_num_bot, node_no_bot[indices[i + 1]]]),
section_no=section_no_con,
)
elif pattern.replace("|", "") == "\\/":
for i in range(from_field + 1, indices.size + to_field, 2):
node_num_top = inject_node_with_offset(
node_no_top[indices[i + 1]], node_no_bot[indices[i]], h_top
)[0]
node_num_bot = inject_node_with_offset(
node_no_bot[indices[i]], node_no_top[indices[i + 1]], h_bot
)[0]
generate_members_from_tuples(
np.array([node_no_bot[indices[i]], node_num_bot]),
section_no=section_no_con,
)
generate_members_from_tuples(
np.array([node_num_bot, node_num_top]),
section_no=section_no_diag,
)
generate_members_from_tuples(
np.array([node_num_top, node_no_top[indices[i + 1]]]),
section_no=section_no_con,
)
for i in range(from_field, indices.size + to_field, 2):
node_num_top = inject_node_with_offset(
node_no_top[indices[i]], node_no_bot[indices[i + 1]], h_top
)[0]
node_num_bot = inject_node_with_offset(
node_no_bot[indices[i + 1]], node_no_top[indices[i]], h_bot
)[0]
generate_members_from_tuples(
np.array([node_no_top[indices[i]], node_num_top]),
section_no=section_no_con,
)
generate_members_from_tuples(
np.array([node_num_top, node_num_bot]),
section_no=section_no_diag,
)
generate_members_from_tuples(
np.array([node_num_bot, node_no_bot[indices[i + 1]]]),
section_no=section_no_con,
)
if "|" in pattern:
place_verticals_connection(
node_no,
num_fields,
section_no_vert=section_no_vert,
section_no_con=section_no_con,
**cs_props
)
def fachwerk_sinosoidal():
Model(new_model=True, model_name="fachwerk_sinosoidal")
# SetModelType(model_type=ModelType.E_MODEL_TYPE_2D_XZ_PLANE_STRESS)
SetModelType(model_type=ModelType.E_MODEL_TYPE_3D)
Material(1, "GL24h")
Material(2, "S235JRH")
Section(1, "R_M1 50/50")
Section(2, "R_M1 20/20")
Section(3, "ROUND 5/H", material_no=2)
section_OG = dict(Model.clientModel.service.get_section(1))
section_UG = dict(Model.clientModel.service.get_section(1))
height_dict = {
"h_top": section_OG.get("depth_temperature_load") / 2,
"h_bot": section_UG.get("depth_temperature_load") / 2,
}
x = np.linspace(0, 2 * np.pi, 51)
coords = np.zeros((x.size, 3))
coords[:, 0] = x / (2 * np.pi) * 10
coords[:, 2] = 0.3 * np.sin(x)
num_fields = 10
Model.clientModel.service.begin_modification()
node_no = place_top_bot_members(coords, height=1)
place_beams_connection(
node_no,
num_fields,
pattern="\\/|",
section_no_vert=1,
section_no_diag=2,
section_no_con=3,
**height_dict
)
coords = np.zeros((x.size, 3))
coords[:, 0] = x / (2 * np.pi) * 10
coords[:, 2] = 0.3 * np.cos(x) - 2
node_no = place_top_bot_members(coords, height=1)
place_beams_connection(
node_no,
num_fields,
pattern="/\\|",
section_no_vert=1,
section_no_diag=2,
section_no_con=3,
**height_dict
)
# place_beams(node_no, num_fields, pattern="\\/|")
Model.clientModel.service.finish_modification()
def fachwerk():
Model(new_model=True, model_name="fachwerk")
# SetModelType(model_type=ModelType.E_MODEL_TYPE_2D_XZ_PLANE_STRESS)
SetModelType(model_type=ModelType.E_MODEL_TYPE_3D)
Material(1, "GL24h")
Material(2, "S235JRH")
Section(1, "R_M1 50/50")
Section(2, "R_M1 20/20")
Section(3, "ROUND 5/H", material_no=2)
section_OG = dict(Model.clientModel.service.get_section(1))
section_UG = dict(Model.clientModel.service.get_section(1))
height_dict = {
"h_top": section_OG.get("depth_temperature_load") / 2,
"h_bot": section_UG.get("depth_temperature_load") / 2,
}
x = np.linspace(0, 2 * np.pi, 51)
coords = np.zeros((x.size, 3))
coords[:, 0] = x / (2 * np.pi) * 10
coords[:, 2] = 0
num_fields = 10
Model.clientModel.service.begin_modification()
node_no = place_top_bot_members(coords, height=1)
place_beams_connection(
node_no,
num_fields,
pattern="\\/|",
section_no_vert=1,
section_no_diag=2,
section_no_con=3,
**height_dict
)
coords[:, 2] = coords[:, 2] - 2
node_no = place_top_bot_members(coords, height=1)
place_beams_connection(
node_no,
num_fields,
pattern="/\\|",
section_no_vert=1,
section_no_diag=2,
section_no_con=3,
**height_dict
)
# place_beams(node_no, num_fields, pattern="\\/|")
Model.clientModel.service.finish_modification()
def fachwerk_half_circle():
Model(new_model=True, model_name="fachwerk_spiral")
# SetModelType(model_type=ModelType.E_MODEL_TYPE_2D_XZ_PLANE_STRESS)
SetModelType(model_type=ModelType.E_MODEL_TYPE_3D)
Material(1, "GL24h")
Material(2, "S235JRH")
Section(1, "R_M1 50/50")
Section(2, "R_M1 20/20")
Section(3, "ROUND 5/H", material_no=2)
section_OG = dict(Model.clientModel.service.get_section(1))
section_UG = dict(Model.clientModel.service.get_section(1))
height_dict = {
"h_top": section_OG.get("depth_temperature_load") / 2,
"h_bot": section_UG.get("depth_temperature_load") / 2,
}
periods = 1
t = np.linspace(0, 2 * np.pi * periods, 60 * periods + 1)
R = np.linspace(4, 7, t.size)
R = 6
a = 3
coords_top = np.zeros((t.size, 3))
coords_top[:, 0] = R * np.sin(t) # x
coords_top[:, 1] = R * np.cos(t) # y
coords_top[:, 2] = 0
R = np.linspace(3, 6, t.size)
coords_bot = np.zeros((t.size, 3))
coords_bot[:, 0] = R * np.sin(t) # x
coords_bot[:, 1] = R * np.cos(t) # y
coords_bot[:, 2] = 2
num_fields = 20 * periods
Model.clientModel.service.begin_modification()
node_no = place_top_bot_members_2(coord_bot=coords_bot, coord_top=coords_top)
place_beams_connection(
node_no,
num_fields,
pattern="/\\|",
section_no_vert=1,
section_no_diag=2,
section_no_con=3,
**height_dict
)
Model.clientModel.service.finish_modification()
def fachwerk_spiral():
Model(new_model=True, model_name="fachwerk_spiral")
# SetModelType(model_type=ModelType.E_MODEL_TYPE_2D_XZ_PLANE_STRESS)
SetModelType(model_type=ModelType.E_MODEL_TYPE_3D)
Material(1, "GL24h")
Material(2, "S235JRH")
Section(1, "R_M1 50/50")
Section(2, "R_M1 20/20")
Section(3, "ROUND 5/H", material_no=2)
section_OG = dict(Model.clientModel.service.get_section(1))
section_UG = dict(Model.clientModel.service.get_section(1))
height_dict = {
"h_top": section_OG.get("depth_temperature_load") / 2,
"h_bot": section_UG.get("depth_temperature_load") / 2,
}
periods = 2
t = np.linspace(0, 2 * np.pi * periods, 60 * periods + 1)
R = np.linspace(4, 7, t.size)
a = 3
coords = np.zeros((t.size, 3))
coords[:, 0] = R * np.sin(t) # x
coords[:, 1] = R * np.cos(t) # y
coords[:, 2] = a * t / (2 * np.pi) # z
num_fields = 20 * periods
Model.clientModel.service.begin_modification()
node_no = place_top_bot_members(coords, height=1)
place_beams_connection(
node_no,
num_fields,
pattern="/\\|",
section_no_vert=1,
section_no_diag=2,
section_no_con=3,
**height_dict
)
Model.clientModel.service.finish_modification()
if __name__ == "__main__":
# fachwerk_sinosoidal()
# fachwerk_spiral()
# fachwerk()
fachwerk_half_circle()
| [
"RFEM.BasicObjects.material.Material",
"numpy.sin",
"RFEM.BasicObjects.node.Node",
"os.path.dirname",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"os.path.basename",
"numpy.cos",
"numpy.linalg.norm",
"RFEM.BasicObjects.section.Section",
"RFEM.BasicObjects.member.Member",
"sys.path.append... | [((33, 59), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (49, 59), False, 'import os\n'), ((70, 95), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (85, 95), False, 'import os\n'), ((161, 196), 'sys.path.append', 'sys.path.append', (["(dirName + '/../..')"], {}), "(dirName + '/../..')\n", (176, 196), False, 'import sys\n'), ((548, 560), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (556, 560), True, 'import numpy as np\n'), ((4422, 4444), 'numpy.zeros', 'np.zeros', (['coords.shape'], {}), '(coords.shape)\n', (4430, 4444), True, 'import numpy as np\n'), ((5976, 6075), 'numpy.array', 'np.array', (['[node1.global_coordinates.x, node1.global_coordinates.y, node1.\n global_coordinates.z]'], {}), '([node1.global_coordinates.x, node1.global_coordinates.y, node1.\n global_coordinates.z])\n', (5984, 6075), True, 'import numpy as np\n'), ((6152, 6251), 'numpy.array', 'np.array', (['[node2.global_coordinates.x, node2.global_coordinates.y, node2.\n global_coordinates.z]'], {}), '([node2.global_coordinates.x, node2.global_coordinates.y, node2.\n global_coordinates.z])\n', (6160, 6251), True, 'import numpy as np\n'), ((8883, 8929), 'numpy.zeros', 'np.zeros', (['(indices.shape[0] - 1, 2)'], {'dtype': 'int'}), '((indices.shape[0] - 1, 2), dtype=int)\n', (8891, 8929), True, 'import numpy as np\n'), ((18003, 18023), 'RFEM.BasicObjects.material.Material', 'Material', (['(1)', '"""GL24h"""'], {}), "(1, 'GL24h')\n", (18011, 18023), False, 'from RFEM.BasicObjects.material import Material\n'), ((18028, 18050), 'RFEM.BasicObjects.material.Material', 'Material', (['(2)', '"""S235JRH"""'], {}), "(2, 'S235JRH')\n", (18036, 18050), False, 'from RFEM.BasicObjects.material import Material\n'), ((18055, 18079), 'RFEM.BasicObjects.section.Section', 'Section', (['(1)', '"""R_M1 50/50"""'], {}), "(1, 'R_M1 50/50')\n", (18062, 18079), False, 'from RFEM.BasicObjects.section import Section\n'), ((18084, 18108), 'RFEM.BasicObjects.section.Section', 'Section', (['(2)', '"""R_M1 20/20"""'], {}), "(2, 'R_M1 20/20')\n", (18091, 18108), False, 'from RFEM.BasicObjects.section import Section\n'), ((18113, 18151), 'RFEM.BasicObjects.section.Section', 'Section', (['(3)', '"""ROUND 5/H"""'], {'material_no': '(2)'}), "(3, 'ROUND 5/H', material_no=2)\n", (18120, 18151), False, 'from RFEM.BasicObjects.section import Section\n'), ((18442, 18471), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(51)'], {}), '(0, 2 * np.pi, 51)\n', (18453, 18471), True, 'import numpy as np\n'), ((18486, 18507), 'numpy.zeros', 'np.zeros', (['(x.size, 3)'], {}), '((x.size, 3))\n', (18494, 18507), True, 'import numpy as np\n'), ((18920, 18941), 'numpy.zeros', 'np.zeros', (['(x.size, 3)'], {}), '((x.size, 3))\n', (18928, 18941), True, 'import numpy as np\n'), ((19579, 19599), 'RFEM.BasicObjects.material.Material', 'Material', (['(1)', '"""GL24h"""'], {}), "(1, 'GL24h')\n", (19587, 19599), False, 'from RFEM.BasicObjects.material import Material\n'), ((19604, 19626), 'RFEM.BasicObjects.material.Material', 'Material', (['(2)', '"""S235JRH"""'], {}), "(2, 'S235JRH')\n", (19612, 19626), False, 'from RFEM.BasicObjects.material import Material\n'), ((19631, 19655), 'RFEM.BasicObjects.section.Section', 'Section', (['(1)', '"""R_M1 50/50"""'], {}), "(1, 'R_M1 50/50')\n", (19638, 19655), False, 'from RFEM.BasicObjects.section import Section\n'), ((19660, 19684), 'RFEM.BasicObjects.section.Section', 'Section', (['(2)', '"""R_M1 20/20"""'], {}), "(2, 'R_M1 20/20')\n", (19667, 19684), False, 'from RFEM.BasicObjects.section import Section\n'), ((19689, 19727), 'RFEM.BasicObjects.section.Section', 'Section', (['(3)', '"""ROUND 5/H"""'], {'material_no': '(2)'}), "(3, 'ROUND 5/H', material_no=2)\n", (19696, 19727), False, 'from RFEM.BasicObjects.section import Section\n'), ((20018, 20047), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(51)'], {}), '(0, 2 * np.pi, 51)\n', (20029, 20047), True, 'import numpy as np\n'), ((20062, 20083), 'numpy.zeros', 'np.zeros', (['(x.size, 3)'], {}), '((x.size, 3))\n', (20070, 20083), True, 'import numpy as np\n'), ((21082, 21102), 'RFEM.BasicObjects.material.Material', 'Material', (['(1)', '"""GL24h"""'], {}), "(1, 'GL24h')\n", (21090, 21102), False, 'from RFEM.BasicObjects.material import Material\n'), ((21107, 21129), 'RFEM.BasicObjects.material.Material', 'Material', (['(2)', '"""S235JRH"""'], {}), "(2, 'S235JRH')\n", (21115, 21129), False, 'from RFEM.BasicObjects.material import Material\n'), ((21134, 21158), 'RFEM.BasicObjects.section.Section', 'Section', (['(1)', '"""R_M1 50/50"""'], {}), "(1, 'R_M1 50/50')\n", (21141, 21158), False, 'from RFEM.BasicObjects.section import Section\n'), ((21163, 21187), 'RFEM.BasicObjects.section.Section', 'Section', (['(2)', '"""R_M1 20/20"""'], {}), "(2, 'R_M1 20/20')\n", (21170, 21187), False, 'from RFEM.BasicObjects.section import Section\n'), ((21192, 21230), 'RFEM.BasicObjects.section.Section', 'Section', (['(3)', '"""ROUND 5/H"""'], {'material_no': '(2)'}), "(3, 'ROUND 5/H', material_no=2)\n", (21199, 21230), False, 'from RFEM.BasicObjects.section import Section\n'), ((21537, 21590), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi * periods)', '(60 * periods + 1)'], {}), '(0, 2 * np.pi * periods, 60 * periods + 1)\n', (21548, 21590), True, 'import numpy as np\n'), ((21599, 21624), 'numpy.linspace', 'np.linspace', (['(4)', '(7)', 't.size'], {}), '(4, 7, t.size)\n', (21610, 21624), True, 'import numpy as np\n'), ((21663, 21684), 'numpy.zeros', 'np.zeros', (['(t.size, 3)'], {}), '((t.size, 3))\n', (21671, 21684), True, 'import numpy as np\n'), ((21803, 21828), 'numpy.linspace', 'np.linspace', (['(3)', '(6)', 't.size'], {}), '(3, 6, t.size)\n', (21814, 21828), True, 'import numpy as np\n'), ((21846, 21867), 'numpy.zeros', 'np.zeros', (['(t.size, 3)'], {}), '((t.size, 3))\n', (21854, 21867), True, 'import numpy as np\n'), ((22603, 22623), 'RFEM.BasicObjects.material.Material', 'Material', (['(1)', '"""GL24h"""'], {}), "(1, 'GL24h')\n", (22611, 22623), False, 'from RFEM.BasicObjects.material import Material\n'), ((22628, 22650), 'RFEM.BasicObjects.material.Material', 'Material', (['(2)', '"""S235JRH"""'], {}), "(2, 'S235JRH')\n", (22636, 22650), False, 'from RFEM.BasicObjects.material import Material\n'), ((22655, 22679), 'RFEM.BasicObjects.section.Section', 'Section', (['(1)', '"""R_M1 50/50"""'], {}), "(1, 'R_M1 50/50')\n", (22662, 22679), False, 'from RFEM.BasicObjects.section import Section\n'), ((22684, 22708), 'RFEM.BasicObjects.section.Section', 'Section', (['(2)', '"""R_M1 20/20"""'], {}), "(2, 'R_M1 20/20')\n", (22691, 22708), False, 'from RFEM.BasicObjects.section import Section\n'), ((22713, 22751), 'RFEM.BasicObjects.section.Section', 'Section', (['(3)', '"""ROUND 5/H"""'], {'material_no': '(2)'}), "(3, 'ROUND 5/H', material_no=2)\n", (22720, 22751), False, 'from RFEM.BasicObjects.section import Section\n'), ((23058, 23111), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi * periods)', '(60 * periods + 1)'], {}), '(0, 2 * np.pi * periods, 60 * periods + 1)\n', (23069, 23111), True, 'import numpy as np\n'), ((23120, 23145), 'numpy.linspace', 'np.linspace', (['(4)', '(7)', 't.size'], {}), '(4, 7, t.size)\n', (23131, 23145), True, 'import numpy as np\n'), ((23170, 23191), 'numpy.zeros', 'np.zeros', (['(t.size, 3)'], {}), '((t.size, 3))\n', (23178, 23191), True, 'import numpy as np\n'), ((2702, 2857), 'RFEM.BasicObjects.member.Member', 'Member', (['member_numbers[i]'], {'start_node_no': 'node_tup[i, 0]', 'end_node_no': 'node_tup[i, 1]', 'start_section_no': 'section_no[i]', 'end_section_no': 'section_no[i]'}), '(member_numbers[i], start_node_no=node_tup[i, 0], end_node_no=\n node_tup[i, 1], start_section_no=section_no[i], end_section_no=\n section_no[i])\n', (2708, 2857), False, 'from RFEM.BasicObjects.member import Member\n'), ((3378, 3441), 'RFEM.BasicObjects.node.Node', 'Node', (['node_numbers[i]', 'coords[i, 0]', 'coords[i, 1]', 'coords[i, 2]'], {}), '(node_numbers[i], coords[i, 0], coords[i, 1], coords[i, 2])\n', (3382, 3441), False, 'from RFEM.BasicObjects.node import Node\n'), ((6449, 6468), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {}), '(vec)\n', (6463, 6468), True, 'import numpy as np\n'), ((7018, 7062), 'numpy.array', 'np.array', (['[node_no_top[-1], node_no_bot[-1]]'], {}), '([node_no_top[-1], node_no_bot[-1]])\n', (7026, 7062), True, 'import numpy as np\n'), ((8251, 8292), 'numpy.array', 'np.array', (['[node_no_top[-1], node_num_top]'], {}), '([node_no_top[-1], node_num_top])\n', (8259, 8292), True, 'import numpy as np\n'), ((8368, 8406), 'numpy.array', 'np.array', (['[node_num_top, node_num_bot]'], {}), '([node_num_top, node_num_bot])\n', (8376, 8406), True, 'import numpy as np\n'), ((8483, 8524), 'numpy.array', 'np.array', (['[node_num_bot, node_no_bot[-1]]'], {}), '([node_num_bot, node_no_bot[-1]])\n', (8491, 8524), True, 'import numpy as np\n'), ((18573, 18582), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (18579, 18582), True, 'import numpy as np\n'), ((21712, 21721), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (21718, 21721), True, 'import numpy as np\n'), ((21754, 21763), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (21760, 21763), True, 'import numpy as np\n'), ((21895, 21904), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (21901, 21904), True, 'import numpy as np\n'), ((21937, 21946), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (21943, 21946), True, 'import numpy as np\n'), ((23215, 23224), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (23221, 23224), True, 'import numpy as np\n'), ((23253, 23262), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (23259, 23262), True, 'import numpy as np\n'), ((1881, 1932), 'numpy.array', 'np.array', (['[value for value in values]'], {'dtype': 'object'}), '([value for value in values], dtype=object)\n', (1889, 1932), True, 'import numpy as np\n'), ((2503, 2527), 'numpy.zeros', 'np.zeros', (['nodes.shape[0]'], {}), '(nodes.shape[0])\n', (2511, 2527), True, 'import numpy as np\n'), ((3206, 3231), 'numpy.zeros', 'np.zeros', (['coords.shape[0]'], {}), '(coords.shape[0])\n', (3214, 3231), True, 'import numpy as np\n'), ((3748, 3784), 'numpy.zeros', 'np.zeros', (['(node_numbers.size - 1, 2)'], {}), '((node_numbers.size - 1, 2))\n', (3756, 3784), True, 'import numpy as np\n'), ((4637, 4668), 'numpy.zeros', 'np.zeros', (['(node_no_top.size, 2)'], {}), '((node_no_top.size, 2))\n', (4645, 4668), True, 'import numpy as np\n'), ((5395, 5426), 'numpy.zeros', 'np.zeros', (['(node_no_top.size, 2)'], {}), '((node_no_top.size, 2))\n', (5403, 5426), True, 'import numpy as np\n'), ((6900, 6942), 'numpy.array', 'np.array', (['[node_no_top[i], node_no_bot[i]]'], {}), '([node_no_top[i], node_no_bot[i]])\n', (6908, 6942), True, 'import numpy as np\n'), ((7701, 7741), 'numpy.array', 'np.array', (['[node_no_top[i], node_num_top]'], {}), '([node_no_top[i], node_num_top])\n', (7709, 7741), True, 'import numpy as np\n'), ((7829, 7867), 'numpy.array', 'np.array', (['[node_num_top, node_num_bot]'], {}), '([node_num_top, node_num_bot])\n', (7837, 7867), True, 'import numpy as np\n'), ((7956, 7996), 'numpy.array', 'np.array', (['[node_num_bot, node_no_bot[i]]'], {}), '([node_num_bot, node_no_bot[i]])\n', (7964, 7996), True, 'import numpy as np\n'), ((19007, 19016), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (19013, 19016), True, 'import numpy as np\n'), ((614, 647), 'numpy.array', 'np.array', (['([nums] * nodes.shape[0])'], {}), '([nums] * nodes.shape[0])\n', (622, 647), True, 'import numpy as np\n'), ((10596, 10640), 'numpy.zeros', 'np.zeros', (['(node_no_top[indices].shape[0], 2)'], {}), '((node_no_top[indices].shape[0], 2))\n', (10604, 10640), True, 'import numpy as np\n'), ((12571, 12620), 'numpy.array', 'np.array', (['[node_no_top[indices[i]], node_num_top]'], {}), '([node_no_top[indices[i]], node_num_top])\n', (12579, 12620), True, 'import numpy as np\n'), ((12737, 12775), 'numpy.array', 'np.array', (['[node_num_top, node_num_bot]'], {}), '([node_num_top, node_num_bot])\n', (12745, 12775), True, 'import numpy as np\n'), ((12893, 12946), 'numpy.array', 'np.array', (['[node_num_bot, node_no_bot[indices[i + 1]]]'], {}), '([node_num_bot, node_no_bot[indices[i + 1]]])\n', (12901, 12946), True, 'import numpy as np\n'), ((705, 719), 'numpy.array', 'np.array', (['nums'], {}), '(nums)\n', (713, 719), True, 'import numpy as np\n'), ((13457, 13506), 'numpy.array', 'np.array', (['[node_no_bot[indices[i]], node_num_bot]'], {}), '([node_no_bot[indices[i]], node_num_bot])\n', (13465, 13506), True, 'import numpy as np\n'), ((13623, 13661), 'numpy.array', 'np.array', (['[node_num_bot, node_num_top]'], {}), '([node_num_bot, node_num_top])\n', (13631, 13661), True, 'import numpy as np\n'), ((13779, 13832), 'numpy.array', 'np.array', (['[node_num_top, node_no_top[indices[i + 1]]]'], {}), '([node_num_top, node_no_top[indices[i + 1]]])\n', (13787, 13832), True, 'import numpy as np\n'), ((783, 799), 'numpy.array', 'np.array', (['[nums]'], {}), '([nums])\n', (791, 799), True, 'import numpy as np\n'), ((14532, 14581), 'numpy.array', 'np.array', (['[node_no_bot[indices[i]], node_num_bot]'], {}), '([node_no_bot[indices[i]], node_num_bot])\n', (14540, 14581), True, 'import numpy as np\n'), ((14698, 14736), 'numpy.array', 'np.array', (['[node_num_bot, node_num_top]'], {}), '([node_num_bot, node_num_top])\n', (14706, 14736), True, 'import numpy as np\n'), ((14854, 14907), 'numpy.array', 'np.array', (['[node_num_top, node_no_top[indices[i + 1]]]'], {}), '([node_num_top, node_no_top[indices[i + 1]]])\n', (14862, 14907), True, 'import numpy as np\n'), ((15382, 15431), 'numpy.array', 'np.array', (['[node_no_top[indices[i]], node_num_top]'], {}), '([node_no_top[indices[i]], node_num_top])\n', (15390, 15431), True, 'import numpy as np\n'), ((15548, 15586), 'numpy.array', 'np.array', (['[node_num_top, node_num_bot]'], {}), '([node_num_top, node_num_bot])\n', (15556, 15586), True, 'import numpy as np\n'), ((15704, 15757), 'numpy.array', 'np.array', (['[node_num_bot, node_no_bot[indices[i + 1]]]'], {}), '([node_num_bot, node_no_bot[indices[i + 1]]])\n', (15712, 15757), True, 'import numpy as np\n'), ((16276, 16325), 'numpy.array', 'np.array', (['[node_no_bot[indices[i]], node_num_bot]'], {}), '([node_no_bot[indices[i]], node_num_bot])\n', (16284, 16325), True, 'import numpy as np\n'), ((16442, 16480), 'numpy.array', 'np.array', (['[node_num_bot, node_num_top]'], {}), '([node_num_bot, node_num_top])\n', (16450, 16480), True, 'import numpy as np\n'), ((16598, 16651), 'numpy.array', 'np.array', (['[node_num_top, node_no_top[indices[i + 1]]]'], {}), '([node_num_top, node_no_top[indices[i + 1]]])\n', (16606, 16651), True, 'import numpy as np\n'), ((17122, 17171), 'numpy.array', 'np.array', (['[node_no_top[indices[i]], node_num_top]'], {}), '([node_no_top[indices[i]], node_num_top])\n', (17130, 17171), True, 'import numpy as np\n'), ((17288, 17326), 'numpy.array', 'np.array', (['[node_num_top, node_num_bot]'], {}), '([node_num_top, node_num_bot])\n', (17296, 17326), True, 'import numpy as np\n'), ((17444, 17497), 'numpy.array', 'np.array', (['[node_num_bot, node_no_bot[indices[i + 1]]]'], {}), '([node_num_bot, node_no_bot[indices[i + 1]]])\n', (17452, 17497), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""spacecraft.py
"""
import concurrent.futures
import datetime
import heliosat
import logging
import multiprocessing
import numpy as np
import os
import spiceypy
from .caching import cache_add_entry, cache_entry_exists, cache_generate_key, cache_get_entry
from .datafile import DataFile
from .smoothing import smooth_data
from .util import dt_utc, dt_utc_from_ts, fetch_url, load_json, sanitize_dt
from typing import Any, List, Optional, Sequence, Tuple, Union
class Body(object):
"""Body class.
"""
name: str
name_naif: str
def __init__(self, name: str, name_naif: str, kernel_group: Optional[str] = None, **kwargs: Any) -> None:
self.name = name
self.name_naif = name_naif
if "default" not in heliosat._skm.group_list:
heliosat._skm.load_group("default")
if kernel_group:
heliosat._skm.load_group(kernel_group, **kwargs)
def trajectory(self, dt: Union[datetime.datetime, Sequence[datetime.datetime]],
reference_frame: str = "J2000", observer: str = "SUN", units: str = "AU") -> np.ndarray:
logger = logging.getLogger(__name__)
dt = sanitize_dt(dt)
traj = np.array(
spiceypy.spkpos(
self.name_naif,
spiceypy.datetime2et(dt),
reference_frame,
"NONE",
observer
)[0]
)
if units == "AU":
traj *= 6.68459e-9
elif units == "m":
traj *= 1e3
elif units == "km":
pass
else:
logger.exception("unit \"%s\" is not supported", units)
raise ValueError("unit \"%s\" is not supported", units)
return traj
class Spacecraft(Body):
"""Spacecraft class.
"""
name: str
name_naif: str
kernel_group: str
_json: dict
data_file_class = DataFile
def __init__(self, **kwargs: Any) -> None:
logger = logging.getLogger(__name__)
super(Spacecraft, self).__init__(self.name, self.name_naif, self.kernel_group, **kwargs)
# legacy support
self.get_data = self.get
def get(self, dt: Union[str, datetime.datetime, Sequence[str], Sequence[datetime.datetime]], data_key: str, **kwargs: Any) -> Tuple[np.ndarray, np.ndarray]:
logger = logging.getLogger(__name__)
data_key = self.data_key_resolve(data_key)
if isinstance(dt, datetime.datetime):
dt = [dt]
dt = sanitize_dt(dt) # type: ignore
# caching identifier
identifiers = {
"data_key": data_key,
"spacecraft": self.name,
"times": [_t.timestamp() for _t in dt], # type: ignore
"version": heliosat.__version__,
**kwargs
}
# extract relevant kwargs
remove_nans = kwargs.pop("remove_nans", False)
return_datetimes = kwargs.pop("return_datetimes", False)
sampling_freq = kwargs.pop("sampling_freq", 60)
smoothing_kwargs = {"smoothing": kwargs.pop("smoothing", "closest")}
# get additional smoothing args
for key in dict(kwargs):
if "smoothing" in key:
smoothing_kwargs[key] = kwargs.pop(key)
use_cache = kwargs.pop("use_cache", False)
if use_cache:
cache_key = cache_generate_key(identifiers)
if cache_entry_exists(cache_key):
dt_r, dk_r = cache_get_entry(cache_key)
return dt_r, dk_r
else:
logger.info("cache entry \"%s\" not found", cache_key)
# use dt list as endpoints
if kwargs.pop("as_endpoints", False):
if len(dt) < 2: # type: ignore
logger.exception("datetime list must be of length larger of 2 to use endpoints")
raise ValueError("datetime list must be of length larger of 2 to use endpoints")
_ = np.linspace(dt[0].timestamp(), dt[-1].timestamp(), int((dt[-1].timestamp() - dt[0].timestamp()) // sampling_freq)) # type: ignore
dt = [datetime.datetime.fromtimestamp(_, datetime.timezone.utc) for _ in _]
dt_r, dk_r = self._get_data(dt[0], dt[-1], data_key, **kwargs) # type: ignore
if smoothing_kwargs["smoothing"]:
dt_r, dk_r = smooth_data(dt, dt_r, dk_r, **smoothing_kwargs) # type: ignore
if return_datetimes:
_dt = list(dt_r)
for i in range(len(_dt)):
if _dt[i] != np.nan:
_dt[i] = dt_utc_from_ts(dt_r[i])
dt_r = np.array(_dt)
if remove_nans:
nanfilter = np.invert(np.any(np.isnan(dk_r[:, :]), axis=1))
dt_r = dt_r[nanfilter]
dk_r = dk_r[nanfilter]
if use_cache:
logger.info("generating cache entry \"%s\"", cache_key)
cache_add_entry(cache_key, (dt_r, dk_r))
return dt_r, dk_r
def _get_data(self, dt_start: datetime.datetime, dt_end: datetime.datetime, data_key: str, **kwargs: Any) -> Tuple[np.ndarray, np.ndarray]:
logger = logging.getLogger(__name__)
data_key = self.data_key_resolve(data_key)
dt_start = sanitize_dt(dt_start) # type: ignore
dt_end = sanitize_dt(dt_end) # type: ignore
if dt_start > dt_end:
logger.exception("starting date must be before final date")
raise ValueError("starting date must be before final date")
force_download = kwargs.get("force_download", False)
# get necessary files
files = self._get_files(dt_start, dt_end, data_key, force_download=force_download)
logger.info("using %s files to generate "
"data in between %s - %s", len(files), dt_start, dt_end)
columns = kwargs.get("columns", ["~"])
columns.extend(kwargs.get("extra_columns", []))
frame = kwargs.get("frame", kwargs.get("reference_frame", None))
max_workers = min([multiprocessing.cpu_count(), len(files)])
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
futures = [executor.submit(file.read, dt_start, dt_end, data_key, columns, frame) for file in files]
result = [_ for _ in [future.result() for future in futures] if _]
dt_r = np.concatenate([_[0] for _ in result])
dk_r = np.concatenate([_[1] for _ in result])
return dt_r, dk_r
def _get_files(self, dt_start: datetime.datetime, dt_end: datetime.datetime, data_key: str, force_download: bool = False) -> List[DataFile]:
logger = logging.getLogger(__name__)
# adjust ranges slightly
if (dt_end - dt_start).days > 1:
dt_start -= datetime.timedelta(hours=dt_start.hour, minutes=dt_start.minute,
seconds=dt_start.second)
if dt_end.hour == 0 and dt_end.minute == 0 and dt_end.second == 0:
dt_end -= datetime.timedelta(seconds=1)
files = []
# prepare urls
for day in [dt_start + datetime.timedelta(days=i) for i in range((dt_end - dt_start).days + 1)]:
base_urls = []
for url in self._json["keys"][data_key]["base_urls"]:
url = url.replace("{YYYY}", str(day.year))
url = url.replace("{YY}", "{0:02d}".format(day.year % 100))
url = url.replace("{MM}", "{:02d}".format(day.month))
url = url.replace("{MONTH}", day.strftime("%B")[:3].upper())
url = url.replace("{DD}", "{:02d}".format(day.day))
url = url.replace("{DOY}", "{:03d}".format(day.timetuple().tm_yday))
doym1 = dt_utc(day.year, day.month, 1)
if day.month == 12:
doym2 = dt_utc(day.year + 1, 1, 1) - datetime.timedelta(days=1)
else:
doym2 = dt_utc(day.year, day.month + 1, 1) - datetime.timedelta(days=1)
url = url.replace("{DOYM1}", "{:03d}".format(doym1.timetuple().tm_yday))
url = url.replace("{DOYM2}", "{:03d}".format(doym2.timetuple().tm_yday))
base_urls.append(url)
filename = self._json["keys"][data_key].get("filename", None)
if filename:
filename = filename.replace("{YYYY}", str(day.year))
filename = filename.replace("{YY}", "{0:02d}".format(day.year % 100))
filename = filename.replace("{MM}", "{:02d}".format(day.month))
filename = filename.replace("{MONTH}", day.strftime("%B")[:3].upper())
filename = filename.replace("{DD}", "{:02d}".format(day.day))
filename = filename.replace("{DOY}", "{:03d}".format(day.timetuple().tm_yday))
doym1 = dt_utc(day.year, day.month, 1)
if day.month == 12:
doym2 = dt_utc(day.year + 1, 1, 1) - datetime.timedelta(days=1)
else:
doym2 = dt_utc(day.year, day.month + 1, 1) - datetime.timedelta(days=1)
filename = filename.replace("{DOYM1}", "{:03d}".format(doym1.timetuple().tm_yday))
filename = filename.replace("{DOYM2}", "{:03d}".format(doym2.timetuple().tm_yday))
files.append(self.data_file_class(base_urls, filename, data_key, self._json))
with concurrent.futures.ThreadPoolExecutor(max_workers=25) as executor:
futures = [executor.submit(file.prepare, force_download) for file in files]
for future in concurrent.futures.as_completed(futures):
_ = future.result()
for file in list(files):
if not file.ready:
files.remove(file)
return files
@property
def data_keys(self) -> List[str]:
return list(self._json["keys"].keys())
def data_key_resolve(self, data_key: str) -> str:
logger = logging.getLogger(__name__)
if data_key not in self._json["keys"]:
resolved = False
for key in self._json["keys"]:
if data_key in self._json["keys"][key].get("alt_keys", []):
data_key = key
resolved = True
break
if not resolved:
logger.exception("data_key \"%s\" not found", data_key)
raise KeyError("data_key \"%s\" not found", data_key)
return data_key
| [
"logging.getLogger",
"heliosat._skm.load_group",
"datetime.datetime.fromtimestamp",
"spiceypy.datetime2et",
"multiprocessing.cpu_count",
"numpy.array",
"numpy.isnan",
"numpy.concatenate",
"datetime.timedelta"
] | [((1141, 1168), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1158, 1168), False, 'import logging\n'), ((1990, 2017), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2007, 2017), False, 'import logging\n'), ((2354, 2381), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2371, 2381), False, 'import logging\n'), ((5138, 5165), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (5155, 5165), False, 'import logging\n'), ((6359, 6397), 'numpy.concatenate', 'np.concatenate', (['[_[0] for _ in result]'], {}), '([_[0] for _ in result])\n', (6373, 6397), True, 'import numpy as np\n'), ((6413, 6451), 'numpy.concatenate', 'np.concatenate', (['[_[1] for _ in result]'], {}), '([_[1] for _ in result])\n', (6427, 6451), True, 'import numpy as np\n'), ((6642, 6669), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (6659, 6669), False, 'import logging\n'), ((9979, 10006), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (9996, 10006), False, 'import logging\n'), ((808, 843), 'heliosat._skm.load_group', 'heliosat._skm.load_group', (['"""default"""'], {}), "('default')\n", (832, 843), False, 'import heliosat\n'), ((882, 930), 'heliosat._skm.load_group', 'heliosat._skm.load_group', (['kernel_group'], {}), '(kernel_group, **kwargs)\n', (906, 930), False, 'import heliosat\n'), ((4623, 4636), 'numpy.array', 'np.array', (['_dt'], {}), '(_dt)\n', (4631, 4636), True, 'import numpy as np\n'), ((6777, 6871), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'dt_start.hour', 'minutes': 'dt_start.minute', 'seconds': 'dt_start.second'}), '(hours=dt_start.hour, minutes=dt_start.minute, seconds=\n dt_start.second)\n', (6795, 6871), False, 'import datetime\n'), ((4125, 4182), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['_', 'datetime.timezone.utc'], {}), '(_, datetime.timezone.utc)\n', (4156, 4182), False, 'import datetime\n'), ((6021, 6048), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (6046, 6048), False, 'import multiprocessing\n'), ((7016, 7045), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (7034, 7045), False, 'import datetime\n'), ((7121, 7147), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'i'}), '(days=i)\n', (7139, 7147), False, 'import datetime\n'), ((1302, 1326), 'spiceypy.datetime2et', 'spiceypy.datetime2et', (['dt'], {}), '(dt)\n', (1322, 1326), False, 'import spiceypy\n'), ((4703, 4723), 'numpy.isnan', 'np.isnan', (['dk_r[:, :]'], {}), '(dk_r[:, :])\n', (4711, 4723), True, 'import numpy as np\n'), ((7874, 7900), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (7892, 7900), False, 'import datetime\n'), ((7988, 8014), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8006, 8014), False, 'import datetime\n'), ((8980, 9006), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8998, 9006), False, 'import datetime\n'), ((9094, 9120), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (9112, 9120), False, 'import datetime\n')] |
import numpy as np
from vector import Vector, Ray
class Camera():
def __init__(
self, image_width: int, image_height: int,
look_from: Vector, look_at: Vector, v_up=None,
vfov=90, aperture_width=0, focus_dist=1.0) -> None:
self.image_width = image_width
self.image_height = image_height
self.lens_radius = aperture_width / 2
self.vfov = vfov
theta = self.vfov * np.pi / 180
self.viewport_height = 2.0 * np.tan(theta / 2)
self.viewport_width = self.viewport_height * self.aspect_ratio
self.look_from = look_from
self.look_at = look_at
self.v_up = v_up or Vector(0, 1, 0)
self.w = (self.look_from - self.look_at).unit()
self.u = self.v_up.cross(self.w).unit()
self.v = self.w.cross(self.u)
self.horizontal = self.viewport_width * self.u * focus_dist
self.vertical = self.viewport_height * self.v * focus_dist
self.lower_left = \
self.look_from -\
(self.horizontal / 2) -\
(self.vertical / 2) -\
(focus_dist * self.w)
@property
def aspect_ratio(self):
return self.image_width / self.image_height
def _get_offset(self):
if self.lens_radius == 0:
return Vector(0, 0, 0)
while True:
x, y = (np.random.random(2) * 2) - 1
if x**2 + y**2 > 1:
continue
return ((x * self.u) + (y * self.v)) * self.lens_radius
def get_ray(self, horizontal_component, vertical_component):
offset = self._get_offset()
base = self.look_from + offset
direction = self.lower_left +\
(horizontal_component * self.horizontal) +\
(vertical_component * self.vertical) -\
base
return Ray(base, direction)
| [
"numpy.random.random",
"numpy.tan",
"vector.Vector",
"vector.Ray"
] | [((1846, 1866), 'vector.Ray', 'Ray', (['base', 'direction'], {}), '(base, direction)\n', (1849, 1866), False, 'from vector import Vector, Ray\n'), ((493, 510), 'numpy.tan', 'np.tan', (['(theta / 2)'], {}), '(theta / 2)\n', (499, 510), True, 'import numpy as np\n'), ((677, 692), 'vector.Vector', 'Vector', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (683, 692), False, 'from vector import Vector, Ray\n'), ((1312, 1327), 'vector.Vector', 'Vector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1318, 1327), False, 'from vector import Vector, Ray\n'), ((1369, 1388), 'numpy.random.random', 'np.random.random', (['(2)'], {}), '(2)\n', (1385, 1388), True, 'import numpy as np\n')] |
#!/usr/bin/python
import logging
import numpy as np
import pandas as pd
from collections import defaultdict
def summarize_genomes(protein_abund, metadata):
"""From a set of protein abundances, summarize the genomes."""
# Format the protein data as a DataFrame
protein_abund = pd.DataFrame(protein_abund).set_index("protein")
# Add the detected protein information to the metadata table
for k in [
"coverage", "depth", "pctid", "bitscore", "alen", "nreads"
]:
metadata[k] = metadata["protein"].apply(
protein_abund[k].to_dict().get
).fillna(0)
assert (metadata["length"] > 0).all()
# Subset to the GENOMES that have _any_ proteins detected
metadata = pd.concat([
genome_dat
for genome, genome_dat in metadata.groupby("genome")
if (genome_dat["coverage"] > 0).any()
])
# Save the protein summary for these genomes
protein_abund = metadata.to_dict(orient="records")
# Now make a summary on a per-genome basis
genome_abund = []
# Iterate over all of the genomes
for genome, proteins in metadata.groupby("genome"):
# Calculate the aggregate genome length
agg_len = proteins["length"].sum()
# Calculate the aggregate coverage, depth, number of proteins, etc.
dat = {
"total_length": int(agg_len),
"total_proteins": proteins.shape[0],
"detected_proteins": (proteins["coverage"] > 0).sum(),
"genome": genome,
"nreads": int(proteins["nreads"].sum()),
}
for k in ["coverage", "depth", "pctid", "bitscore", "alen"]:
# Make a length-adjusted average
dat[k] = (proteins[k] * proteins["length"]).sum() / agg_len
# For all of the other columns, add them if they are unique
for k in proteins.columns:
if k not in dat:
if len(proteins[k].unique()) == 0:
dat[k] = proteins[k].values[0]
genome_abund.append(dat)
return protein_abund, genome_abund
def parse_alignment(align_fp,
subject_ix=1,
pctid_ix=2,
alen_ix=3,
sstart_ix=6,
send_ix=7,
bitscore_ix=9,
slen_ix=11):
"""
Parse an alignment in BLAST6 format and calculate coverage per subject.
"""
# Keep track of a number of different metrics for each subject
coverage = {}
subject_len = {}
pctid = defaultdict(list)
alen = defaultdict(list)
bitscore = defaultdict(list)
logging.info("Reading from {}".format(align_fp))
with open(align_fp, "rt") as f:
for ix, line in enumerate(f):
if len(line) == 1:
continue
line = line.rstrip("\n").split("\t")
s = line[subject_ix]
if s not in coverage:
slen = int(line[slen_ix])
subject_len[s] = slen
coverage[s] = np.zeros(slen, dtype=int)
pctid[s].append(float(line[pctid_ix]))
alen[s].append(int(line[alen_ix]))
bitscore[s].append(float(line[bitscore_ix]))
coverage[s][
(int(line[sstart_ix]) - 1): int(line[send_ix])
] += 1
if ix > 0 and ix % 1e6 == 0:
logging.info("Parsed {:,} alignments".format(ix))
logging.info("Parsed {:,} alignments".format(ix))
# Calculate the per-subject stats
output = []
for ix, s in enumerate(coverage):
output.append({
"protein": s,
"coverage": (coverage[s] > 0).mean(),
"depth": coverage[s].mean(),
"pctid": np.mean(pctid[s]),
"alen": np.mean(alen[s]),
"bitscore": np.mean(bitscore[s]),
"nreads": len(pctid[s]),
"length": subject_len[s],
})
if ix > 0 and ix % 1e3 == 0:
logging.info("Summarized coverage for {:,} subjects".format(ix))
logging.info("Summarized coverage for {:,} subjects".format(ix))
return output
| [
"pandas.DataFrame",
"numpy.mean",
"numpy.zeros",
"collections.defaultdict"
] | [((2547, 2564), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2558, 2564), False, 'from collections import defaultdict\n'), ((2576, 2593), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2587, 2593), False, 'from collections import defaultdict\n'), ((2609, 2626), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2620, 2626), False, 'from collections import defaultdict\n'), ((292, 319), 'pandas.DataFrame', 'pd.DataFrame', (['protein_abund'], {}), '(protein_abund)\n', (304, 319), True, 'import pandas as pd\n'), ((3037, 3062), 'numpy.zeros', 'np.zeros', (['slen'], {'dtype': 'int'}), '(slen, dtype=int)\n', (3045, 3062), True, 'import numpy as np\n'), ((3745, 3762), 'numpy.mean', 'np.mean', (['pctid[s]'], {}), '(pctid[s])\n', (3752, 3762), True, 'import numpy as np\n'), ((3784, 3800), 'numpy.mean', 'np.mean', (['alen[s]'], {}), '(alen[s])\n', (3791, 3800), True, 'import numpy as np\n'), ((3826, 3846), 'numpy.mean', 'np.mean', (['bitscore[s]'], {}), '(bitscore[s])\n', (3833, 3846), True, 'import numpy as np\n')] |
import numpy as np
#import matplotlib.pyplot as plt
from math import *
Q=[]
def square(x):
return pow(x,2)
D=[]
beacons=int(input("ENter no of beacons"))
bcx=[]
bcy=[]
#bcx beacon coordinate x
#bcy beacon coordinate y
#maybe filter with MAC adress later
for i in range(beacons):
bcx.append(float(input("Enter X"+str(i))))
bcy.append(float(input("Enter Y"+str(i))))
D.append(3) ############CHANGE THIS LATER
A=np.array([2*(bcx[beacons-1]-bcx[0]),2*(bcy[beacons-1]-bcy[0])])
print (A)
for i in range(1,beacons-1):
O=np.array([2*(bcx[beacons-1]-bcx[i]),2*(bcy[beacons-1]-bcy[i])])
A=np.concatenate((A,O)) #concatinating the remaining elements of A
#print ("HEllo")
#A=np.array([2*(bcx[beacons-1]-bcx[0]),2*(bcy[beacons-1]-bcy[0]),2*(bcx[beacons-1]-bcx[1]),2*(bcy[beacons-1]-bcy[1]),2*(bcx[beacons-1]-bcx[2]),2*(bcy[beacons-1]-bcy[2])])
print (A)
B=A.reshape(beacons-1,2) #COnverts to a 2d array with beacons-1
for i in range(beacons-1):
a=square(D[i])+square(bcx[beacons-1])+square(bcy[beacons-1])-(square(bcx[i])+square(bcy[i]))
Q.append(a)
C=np.array(Q)
print (A)
print (B)
m, c = np.linalg.lstsq(B, C)[0]
print (m, c)
| [
"numpy.array",
"numpy.linalg.lstsq",
"numpy.concatenate"
] | [((432, 508), 'numpy.array', 'np.array', (['[2 * (bcx[beacons - 1] - bcx[0]), 2 * (bcy[beacons - 1] - bcy[0])]'], {}), '([2 * (bcx[beacons - 1] - bcx[0]), 2 * (bcy[beacons - 1] - bcy[0])])\n', (440, 508), True, 'import numpy as np\n'), ((1123, 1134), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (1131, 1134), True, 'import numpy as np\n'), ((541, 617), 'numpy.array', 'np.array', (['[2 * (bcx[beacons - 1] - bcx[i]), 2 * (bcy[beacons - 1] - bcy[i])]'], {}), '([2 * (bcx[beacons - 1] - bcx[i]), 2 * (bcy[beacons - 1] - bcy[i])])\n', (549, 617), True, 'import numpy as np\n'), ((609, 631), 'numpy.concatenate', 'np.concatenate', (['(A, O)'], {}), '((A, O))\n', (623, 631), True, 'import numpy as np\n'), ((1169, 1190), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['B', 'C'], {}), '(B, C)\n', (1184, 1190), True, 'import numpy as np\n')] |
from allennlp.common import Params
import numpy as np
from numpy.testing import assert_allclose
import pytest
from contexteval.common.custom_test_case import CustomTestCase
from contexteval.contextualizers import PrecomputedContextualizer
from contexteval.data.dataset_readers import ConstituencyAncestorPredictionDatasetReader
class TestConstituencyAncestorPredictionDatasetReader():
data_path = CustomTestCase.FIXTURES_ROOT / "data" / "syntactic_constituency" / "wsj.txt"
contextualizer_path = (CustomTestCase.FIXTURES_ROOT / "contextualizers" /
"precomputed_elmo" / "elmo_layers_all.hdf5")
@pytest.mark.parametrize('lazy', (True, False))
@pytest.mark.parametrize('use_contextualizer', (True, False))
@pytest.mark.parametrize('ancestor', ('parent', 'grandparent', 'greatgrandparent'))
def test_read_from_file(self, lazy, use_contextualizer, ancestor):
# Set up contextualizer, if using.
contextualizer = None
if use_contextualizer:
contextualizer = PrecomputedContextualizer(self.contextualizer_path)
reader = ConstituencyAncestorPredictionDatasetReader(ancestor=ancestor,
contextualizer=contextualizer,
lazy=lazy)
instances = list(reader.read(str(self.data_path)))
# First read instance
instance = instances[0]
fields = instance.fields
assert [token.metadata for token in fields["raw_tokens"].field_list] == [
'In', 'an', 'Oct.', '19', 'review', 'of', '``', 'The', 'Misanthrope', "''", 'at', 'Chicago',
"'s", 'Goodman', 'Theatre', '(', '``', 'Revitalized', 'Classics', 'Take', 'the', 'Stage',
'in', 'Windy', 'City', ',', "''", 'Leisure', '&', 'Arts', ')', ',', 'the', 'role', 'of',
'Celimene', ',', 'played', 'by', 'Kim', 'Cattrall', ',', 'was', 'mistakenly', 'attributed',
'to', 'Christina', 'Haag', '.']
assert len([token.metadata for token in fields["raw_tokens"].field_list]) == len(fields["labels"].labels)
if ancestor == "parent":
assert fields["labels"].labels == [
'PP', 'NP', 'NP', 'NP', 'NP', 'PP', 'NP', 'NP', 'NP',
'NP', 'PP', 'NP', 'NP', 'NP', 'NP', 'PRN', 'PRN', 'NP',
'NP', 'VP', 'NP', 'NP', 'PP', 'NP', 'NP', 'PRN', 'PRN',
'NP', 'NP', 'NP', 'PRN', 'S', 'NP', 'NP', 'PP', 'NP', 'NP', 'VP',
'PP', 'NP', 'NP', 'NP', 'VP', 'ADVP', 'VP', 'PP', 'NP', 'NP', 'S']
elif ancestor == "grandparent":
assert fields["labels"].labels == [
'S', 'NP', 'NP', 'NP', 'NP', 'NP', 'PP', 'NP', 'NP', 'PP', 'NP',
'NP', 'NP', 'PP', 'PP', 'NP', 'NP', 'S', 'S', 'S', 'VP', 'VP',
'VP', 'PP', 'PP', 'NP', 'NP', 'PRN', 'PRN', 'PRN', 'NP', 'None',
'NP', 'NP', 'NP', 'PP', 'S', 'NP', 'VP', 'PP', 'PP', 'S', 'S',
'VP', 'VP', 'VP', 'PP', 'PP', 'None']
else:
# ancestor is greatgrandparent
assert fields["labels"].labels == [
'None', 'PP', 'PP', 'PP', 'PP', 'PP', 'NP', 'PP', 'PP', 'NP', 'PP', 'PP',
"PP", 'NP', 'NP', 'PP', 'PP', 'PRN', 'PRN', 'PRN', 'S', 'S',
'S', 'VP', 'VP', 'PP', "PP", 'NP', 'NP', 'NP', 'PP', 'None', 'NP', 'NP', 'NP',
'NP', 'None', 'S', 'NP', 'VP', 'VP', 'None', 'None', 'VP', 'S',
'VP', 'VP', 'VP', 'None']
if use_contextualizer:
assert_allclose(
fields["token_representations"].array[:, :2],
np.array([[0.7541596, 0.36606207], [-0.3912218, 0.2728929],
[0.4532569, 0.59446496], [-0.034773, 0.6178972],
[0.05996126, -0.21075758], [-0.00675234, -0.19188942],
[-0.25371405, -0.98044276], [0.55180097, -1.3375797],
[-0.76439965, -0.8849516], [-0.1852389, -0.76670283],
[-0.6538293, -2.109323], [0.11706313, -0.14159685],
[-0.26565668, 0.08206904], [-1.0511935, -0.28469092],
[0.22915375, 0.2485466], [1.4214072, 0.02810444],
[0.7648947, -1.3637407], [-0.01231889, -0.02892348],
[-0.1330762, 0.0219465], [0.8961761, -1.2976432],
[0.83349395, -1.8242016], [0.15122458, -0.9597366],
[0.7570322, -0.73728824], [-0.04838032, -0.8663991],
[0.32632858, -0.5200325], [0.7823914, -1.020006],
[0.5874542, -1.020459], [-0.4918128, -0.85094],
[-0.24947, -0.20599724], [-1.4349735, 0.19630724],
[-0.49690107, -0.58586204], [0.06130999, -0.14850587],
[0.66610545, -0.06235093], [-0.29052478, 0.40215907],
[0.24728307, 0.23677489], [-0.05339833, 0.22958362],
[-0.44152835, -0.58153844], [0.4723678, -0.06656095],
[0.32210657, -0.03144099], [0.6663985, 0.39230958],
[0.57831913, 0.19480982], [-0.96823174, 0.00828598],
[-0.7640736, 0.00441009], [-0.5589211, 0.17509514],
[0.01523143, -0.7975017], [0.3268571, -0.1870772],
[1.4704096, 0.8472788], [0.23348817, -0.48313117],
[-0.57006484, -0.77375746]]),
rtol=1e-3)
# Second read instance
instance = instances[1]
fields = instance.fields
assert [token.metadata for token in fields["raw_tokens"].field_list] == [
'Ms.', 'Haag', 'plays', 'Elianti', '.']
if ancestor == "parent":
assert fields["labels"].labels == ['NP', 'NP', 'VP', 'NP', 'S']
elif ancestor == "grandparent":
assert fields["labels"].labels == ['S', 'S', 'S', 'VP', 'None']
else:
# ancestor is greatgrandparent
assert fields["labels"].labels == ['None', 'None', 'None', 'S', 'None']
if use_contextualizer:
assert_allclose(
fields["token_representations"].array[:, :2],
np.array([[0.6757653, -0.80925614], [-1.9424553, -1.0854281],
[-0.09960067, 0.17525218], [0.09222834, -0.8534998],
[-0.66507375, -0.5633631]]),
rtol=1e-3)
@pytest.mark.parametrize('lazy', (True, False))
@pytest.mark.parametrize('max_instances', (1, 2, 0.5, 0.75, 1.0))
def test_reproducible_with_and_without_contextualization(self, lazy, max_instances):
uncontextualized_params = Params({
"max_instances": max_instances,
"lazy": lazy})
uncontextualized_reader = ConstituencyAncestorPredictionDatasetReader.from_params(uncontextualized_params)
uncontextualized_instances = list(uncontextualized_reader.read(str(self.data_path)))
contextualized_params = Params({
"lazy": lazy,
"max_instances": max_instances,
"contextualizer": {
"type": "precomputed_contextualizer",
"representations_path": self.contextualizer_path
}})
contextualized_reader = ConstituencyAncestorPredictionDatasetReader.from_params(contextualized_params)
contextualized_instances = list(contextualized_reader.read(str(self.data_path)))
# Assert they are the same
for uncontextualized_instance, contextualized_instance in zip(uncontextualized_instances,
contextualized_instances):
assert ([token.metadata for token in uncontextualized_instance.fields["raw_tokens"].field_list] ==
[token.metadata for token in contextualized_instance.fields["raw_tokens"].field_list])
assert (uncontextualized_instance.fields["labels"].labels ==
contextualized_instance.fields["labels"].labels)
contextualized_extra_keys = list(set(contextualized_instance.fields.keys()) -
set(uncontextualized_instance.fields.keys()))
assert (set(contextualized_extra_keys) == set(["token_representations"]))
@pytest.mark.parametrize('lazy', (True, False))
@pytest.mark.parametrize('use_contextualizer', (True, False))
@pytest.mark.parametrize('max_instances', (1, 2, 0.5, 0.75, 1.0))
@pytest.mark.parametrize('ancestor', ('parent', 'grandparent', 'greatgrandparent'))
def test_truncation(self, lazy, use_contextualizer, max_instances, ancestor):
# Set up contextualizer, if using.
contextualizer = None
if use_contextualizer:
contextualizer = PrecomputedContextualizer(self.contextualizer_path)
reader = ConstituencyAncestorPredictionDatasetReader(
contextualizer=contextualizer,
ancestor=ancestor,
max_instances=max_instances,
lazy=lazy)
instances = list(reader.read(str(self.data_path)))
num_total_instances = 2
max_instances_to_num_instances = {
int(1): 1,
int(2): 2,
0.5: int(num_total_instances * 0.5),
0.75: int(num_total_instances * 0.75),
1.0: num_total_instances}
assert len(instances) == max_instances_to_num_instances[max_instances]
| [
"allennlp.common.Params",
"contexteval.data.dataset_readers.ConstituencyAncestorPredictionDatasetReader",
"contexteval.data.dataset_readers.ConstituencyAncestorPredictionDatasetReader.from_params",
"pytest.mark.parametrize",
"numpy.array",
"contexteval.contextualizers.PrecomputedContextualizer"
] | [((637, 683), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lazy"""', '(True, False)'], {}), "('lazy', (True, False))\n", (660, 683), False, 'import pytest\n'), ((689, 749), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_contextualizer"""', '(True, False)'], {}), "('use_contextualizer', (True, False))\n", (712, 749), False, 'import pytest\n'), ((755, 841), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ancestor"""', "('parent', 'grandparent', 'greatgrandparent')"], {}), "('ancestor', ('parent', 'grandparent',\n 'greatgrandparent'))\n", (778, 841), False, 'import pytest\n'), ((6599, 6645), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lazy"""', '(True, False)'], {}), "('lazy', (True, False))\n", (6622, 6645), False, 'import pytest\n'), ((6651, 6715), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""max_instances"""', '(1, 2, 0.5, 0.75, 1.0)'], {}), "('max_instances', (1, 2, 0.5, 0.75, 1.0))\n", (6674, 6715), False, 'import pytest\n'), ((8469, 8515), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lazy"""', '(True, False)'], {}), "('lazy', (True, False))\n", (8492, 8515), False, 'import pytest\n'), ((8521, 8581), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_contextualizer"""', '(True, False)'], {}), "('use_contextualizer', (True, False))\n", (8544, 8581), False, 'import pytest\n'), ((8587, 8651), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""max_instances"""', '(1, 2, 0.5, 0.75, 1.0)'], {}), "('max_instances', (1, 2, 0.5, 0.75, 1.0))\n", (8610, 8651), False, 'import pytest\n'), ((8657, 8743), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ancestor"""', "('parent', 'grandparent', 'greatgrandparent')"], {}), "('ancestor', ('parent', 'grandparent',\n 'greatgrandparent'))\n", (8680, 8743), False, 'import pytest\n'), ((1111, 1219), 'contexteval.data.dataset_readers.ConstituencyAncestorPredictionDatasetReader', 'ConstituencyAncestorPredictionDatasetReader', ([], {'ancestor': 'ancestor', 'contextualizer': 'contextualizer', 'lazy': 'lazy'}), '(ancestor=ancestor,\n contextualizer=contextualizer, lazy=lazy)\n', (1154, 1219), False, 'from contexteval.data.dataset_readers import ConstituencyAncestorPredictionDatasetReader\n'), ((6839, 6893), 'allennlp.common.Params', 'Params', (["{'max_instances': max_instances, 'lazy': lazy}"], {}), "({'max_instances': max_instances, 'lazy': lazy})\n", (6845, 6893), False, 'from allennlp.common import Params\n'), ((6953, 7038), 'contexteval.data.dataset_readers.ConstituencyAncestorPredictionDatasetReader.from_params', 'ConstituencyAncestorPredictionDatasetReader.from_params', (['uncontextualized_params'], {}), '(uncontextualized_params\n )\n', (7008, 7038), False, 'from contexteval.data.dataset_readers import ConstituencyAncestorPredictionDatasetReader\n'), ((7160, 7332), 'allennlp.common.Params', 'Params', (["{'lazy': lazy, 'max_instances': max_instances, 'contextualizer': {'type':\n 'precomputed_contextualizer', 'representations_path': self.\n contextualizer_path}}"], {}), "({'lazy': lazy, 'max_instances': max_instances, 'contextualizer': {\n 'type': 'precomputed_contextualizer', 'representations_path': self.\n contextualizer_path}})\n", (7166, 7332), False, 'from allennlp.common import Params\n'), ((7438, 7516), 'contexteval.data.dataset_readers.ConstituencyAncestorPredictionDatasetReader.from_params', 'ConstituencyAncestorPredictionDatasetReader.from_params', (['contextualized_params'], {}), '(contextualized_params)\n', (7493, 7516), False, 'from contexteval.data.dataset_readers import ConstituencyAncestorPredictionDatasetReader\n'), ((9024, 9161), 'contexteval.data.dataset_readers.ConstituencyAncestorPredictionDatasetReader', 'ConstituencyAncestorPredictionDatasetReader', ([], {'contextualizer': 'contextualizer', 'ancestor': 'ancestor', 'max_instances': 'max_instances', 'lazy': 'lazy'}), '(contextualizer=contextualizer,\n ancestor=ancestor, max_instances=max_instances, lazy=lazy)\n', (9067, 9161), False, 'from contexteval.data.dataset_readers import ConstituencyAncestorPredictionDatasetReader\n'), ((1042, 1093), 'contexteval.contextualizers.PrecomputedContextualizer', 'PrecomputedContextualizer', (['self.contextualizer_path'], {}), '(self.contextualizer_path)\n', (1067, 1093), False, 'from contexteval.contextualizers import PrecomputedContextualizer\n'), ((8955, 9006), 'contexteval.contextualizers.PrecomputedContextualizer', 'PrecomputedContextualizer', (['self.contextualizer_path'], {}), '(self.contextualizer_path)\n', (8980, 9006), False, 'from contexteval.contextualizers import PrecomputedContextualizer\n'), ((3694, 5076), 'numpy.array', 'np.array', (['[[0.7541596, 0.36606207], [-0.3912218, 0.2728929], [0.4532569, 0.59446496],\n [-0.034773, 0.6178972], [0.05996126, -0.21075758], [-0.00675234, -\n 0.19188942], [-0.25371405, -0.98044276], [0.55180097, -1.3375797], [-\n 0.76439965, -0.8849516], [-0.1852389, -0.76670283], [-0.6538293, -\n 2.109323], [0.11706313, -0.14159685], [-0.26565668, 0.08206904], [-\n 1.0511935, -0.28469092], [0.22915375, 0.2485466], [1.4214072, \n 0.02810444], [0.7648947, -1.3637407], [-0.01231889, -0.02892348], [-\n 0.1330762, 0.0219465], [0.8961761, -1.2976432], [0.83349395, -1.8242016\n ], [0.15122458, -0.9597366], [0.7570322, -0.73728824], [-0.04838032, -\n 0.8663991], [0.32632858, -0.5200325], [0.7823914, -1.020006], [\n 0.5874542, -1.020459], [-0.4918128, -0.85094], [-0.24947, -0.20599724],\n [-1.4349735, 0.19630724], [-0.49690107, -0.58586204], [0.06130999, -\n 0.14850587], [0.66610545, -0.06235093], [-0.29052478, 0.40215907], [\n 0.24728307, 0.23677489], [-0.05339833, 0.22958362], [-0.44152835, -\n 0.58153844], [0.4723678, -0.06656095], [0.32210657, -0.03144099], [\n 0.6663985, 0.39230958], [0.57831913, 0.19480982], [-0.96823174, \n 0.00828598], [-0.7640736, 0.00441009], [-0.5589211, 0.17509514], [\n 0.01523143, -0.7975017], [0.3268571, -0.1870772], [1.4704096, 0.8472788\n ], [0.23348817, -0.48313117], [-0.57006484, -0.77375746]]'], {}), '([[0.7541596, 0.36606207], [-0.3912218, 0.2728929], [0.4532569, \n 0.59446496], [-0.034773, 0.6178972], [0.05996126, -0.21075758], [-\n 0.00675234, -0.19188942], [-0.25371405, -0.98044276], [0.55180097, -\n 1.3375797], [-0.76439965, -0.8849516], [-0.1852389, -0.76670283], [-\n 0.6538293, -2.109323], [0.11706313, -0.14159685], [-0.26565668, \n 0.08206904], [-1.0511935, -0.28469092], [0.22915375, 0.2485466], [\n 1.4214072, 0.02810444], [0.7648947, -1.3637407], [-0.01231889, -\n 0.02892348], [-0.1330762, 0.0219465], [0.8961761, -1.2976432], [\n 0.83349395, -1.8242016], [0.15122458, -0.9597366], [0.7570322, -\n 0.73728824], [-0.04838032, -0.8663991], [0.32632858, -0.5200325], [\n 0.7823914, -1.020006], [0.5874542, -1.020459], [-0.4918128, -0.85094],\n [-0.24947, -0.20599724], [-1.4349735, 0.19630724], [-0.49690107, -\n 0.58586204], [0.06130999, -0.14850587], [0.66610545, -0.06235093], [-\n 0.29052478, 0.40215907], [0.24728307, 0.23677489], [-0.05339833, \n 0.22958362], [-0.44152835, -0.58153844], [0.4723678, -0.06656095], [\n 0.32210657, -0.03144099], [0.6663985, 0.39230958], [0.57831913, \n 0.19480982], [-0.96823174, 0.00828598], [-0.7640736, 0.00441009], [-\n 0.5589211, 0.17509514], [0.01523143, -0.7975017], [0.3268571, -\n 0.1870772], [1.4704096, 0.8472788], [0.23348817, -0.48313117], [-\n 0.57006484, -0.77375746]])\n', (3702, 5076), True, 'import numpy as np\n'), ((6370, 6516), 'numpy.array', 'np.array', (['[[0.6757653, -0.80925614], [-1.9424553, -1.0854281], [-0.09960067, \n 0.17525218], [0.09222834, -0.8534998], [-0.66507375, -0.5633631]]'], {}), '([[0.6757653, -0.80925614], [-1.9424553, -1.0854281], [-0.09960067,\n 0.17525218], [0.09222834, -0.8534998], [-0.66507375, -0.5633631]])\n', (6378, 6516), True, 'import numpy as np\n')] |
# from labels import default_labeler
import numpy as np
from six import string_types
class unitsDict(dict):
"""
A dictionary sub-class for tracking units.
unitsDict instances support simple math operations (multiply,
divide, power)
The *key* of unitsDicts objects are the units, the values
represent the power of that unit. For example:
a unitsDict({'s':-1,'m':1}) object represents units of m/s.
"""
def copy(self,):
"""
Return a shallow copy of the present object.
"""
return unitsDict([(ky, val) for ky, val in list(self.items())])
def __mul__(self, other):
"""
Multiple the units in this instance by the units in the *other* object.
"""
out = self.copy()
if other.__class__ is unitsDict:
for u, vl in list(other.items()):
if u in list(out.keys()):
out[u] += vl
else:
out[u] = vl
return out
def __pow__(self, other):
"""
Raise the units in this object to the power of *other*.
"""
out = self.copy()
for u in self:
out[u] *= other
return out
def __div__(self, other):
"""
Divide the units in this instance by the units in the *other* object.
"""
out = self.copy()
if other.__class__ is unitsDict:
for u, vl in list(other.items()):
if u in list(out.keys()):
out[u] -= vl
else:
out[u] = -vl
return out
class varMeta(object):
"""
A class for variable metadata.
In particular, the units and name of the variable are stored here.
*units_style* specifies how to format the units.
0: no fractions (e.g. units of acceleration are: ms^{-2})
1: fractions (e.g. units of acceleration are: m/s^{2})
***Currently only units_style=0 is supported.***
"""
_units_style = 0
latex = True
_scale_place = 'top'
dim_names = []
def __eq__(self, other):
"""
Test for equivalence between varMeta objects.
"""
if (other.__class__ is varMeta and
self.name == other.name and
self._units == other._units):
return True
return False
def __mul__(self, other):
out = self.copy()
out.name = self.name + other.name
out._units = self._units * other._units
return out
def __pow__(self, other):
out = self.copy()
out.name = self.name + '^%d' % (other)
out._units = self._units ** other
return out
def __div__(self, other):
out = self.copy()
if other.name != '':
out.name = self.name + '/' + other.name
out._units = self._units / other._units
return out
def __init__(self, name, units=None, dim_names=[],
units_style=None, scale=0, vecnames={}):
self.vecnames = vecnames
self.dim_names = dim_names
if units.__class__ is not unitsDict:
self._units = unitsDict(units)
elif isinstance(units, string_types):
self._units = unitsDict({units: 1})
else:
self._units = units
self.name = name
self.xformat = r'$%s/[\mathrm{%s}]$'
self.scale = scale
if units_style is not None:
self._units_style = units_style
self.yformat = r'$%s/[\mathrm{%s}]$'
def _copy_rep(self, name=None):
"""
A copy method for use in constructing new varMeta objects from
a basic type.
It behaves as follows:
1) If self.name is None, it return None.
2) If the input is None, it returns a copy of itself.
3) Otherwise, it does a % replace of self.name with the input.
e.g. this is for use such as:
vm=varMeta(r"\overline{%s'%s'}",{'m':2,'s':-2})
vm._copy_rep(('u','u'))
"""
if self.name is None:
return None
if name is None:
name = self.name
else:
name = self.name % name
return varMeta(name,
(self._units and self._units.copy()),
list(self.dim_names),
self._units_style,
self.scale)
def copy(self, name=None):
"""
Return a copy of this varMeta object.
Optional variable *name* may be used to create a copy of these
units, with a new 'name'.
"""
if self.name is None and name is None:
return None
if name is None:
name = self.name
return varMeta(name,
(self._units and self._units.copy()),
list(self.dim_names),
self._units_style,
self.scale)
def __repr__(self,):
return "<varMeta for %s (%s)>" % (self.name, self.units)
def get_label(self, form=None, units_style=None):
"""
Get a formatted label for the variable.
"""
unit = self.get_units(units_style=units_style)
if unit is None:
return '$' + self.get_numer() + '$'
if form is None:
form = r'$%s/[\mathrm{%s}]$'
return form % (self.get_numer(), unit,)
def get_numer(self,):
if self.scale != 0 and self._scale_place == 'top':
return '10^{%d}%s' % (-self.scale, self.name)
else:
return self.name
@property
def units(self,):
"""
A shortcut to the units string.
"""
return self.get_units()
@property
def label(self,):
"""
A shortcut to the label.
"""
return self.get_label()
@property
def ylabel(self,):
"""
A shortcut to the ylabel.
"""
return self.get_label(form=self.yformat)
@property
def xlabel(self,):
"""
A shortcut to the xlabel.
"""
return self.label
def get_units(self, units_style=None,):
"""
Get the properly formatted units string.
"""
if self.scale != 0 and self._scale_place != 'top':
st = r'10^{%d}' % self.scale
else:
st = ''
if self._units is None:
return None
elif self._units.__class__ is str:
return self._units
elif None in self._units:
return self._units[None]
if units_style is None:
units_style = self._units_style
if units_style == 0:
ks = np.unique(np.array(self._units.values()))
ups = np.sort([ks[ks > 0]])[0][::-1]
dns = np.sort([ks[ks < 0]])[0]
st = r''
for ik in ups:
for ky, vl in list(self._units.items()):
if vl == ik:
st += ky
if ik != 1: # If the power is not 1, add an exponent:
st += '^{%d}' % ik
for ik in dns:
for ky, vl in list(self._units.items()):
if vl == ik:
st += '%s^{%d}' % (ky, ik)
return st
| [
"numpy.sort"
] | [((6816, 6837), 'numpy.sort', 'np.sort', (['[ks[ks < 0]]'], {}), '([ks[ks < 0]])\n', (6823, 6837), True, 'import numpy as np\n'), ((6767, 6788), 'numpy.sort', 'np.sort', (['[ks[ks > 0]]'], {}), '([ks[ks > 0]])\n', (6774, 6788), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 22 11:30:32 2018
@author: jkp
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data=pd.read_csv("/home/sysadm/Desktop/JKP BSPro/Used_startup_funding.csv")
#***Basic/General/Normal Information
data.head()
data.dtypes
data.info()
data.describe()
#Well this doesn't make any clear picture about this column, so simply we can ignore
#this feature for now
#One this we can notice that we have date column which canbe very useful in EDA so
#let's do feature of programming on it
##*** Data Modification
def temp(v):
try:
#pd.to_datetime(v)
return(pd.to_datetime(v.replace('.','/').replace('//','/')))
except:
print(v)
data["Date"].apply(lambda v: temp(v))
date=data["Date"].apply(lambda v: temp(v))
data["month_year"]=date.dt.strftime("%Y-%m")
data["Year"]=date.dt.strftime("%Y")
'''data['Month'] = data['Date'].dt.month
data['Year'] = data['Date'].dt.year
data['MY'] = (pd.to_datetime(data['Date'],format='%d/%m/%Y').dt.year*100)+(
pd.to_datetime(data['Date'],format='%d/%m/%Y').dt.month) # Works Fine'''
data['AmountInUSD']
data['amount']=data['AmountInUSD'].apply(lambda x:float(str(x).replace(",","")))
data['amount']
#data["amount"]=data["AmountInUSD"].str.replace(',', '').astype(float)
#print(data[["Date","month_year","Year","amount"]].head())
data[["Date","month_year","Year","amount"]]
#get list of numeric and categorical columns
get_numeric_cols= lambda df:list(df._get_numeric_data().columns)
num_cols=get_numeric_cols(data)
num_cols
cat_cols=np.setdiff1d(data.columns,num_cols)
cat_cols
#Check the data quality – Missing values, Outlier
pd.isnull(data).sum()
print(data['Remarks'])
print(data['Remarks'].unique())
data.isnull().any()
data['Remarks'].fillna(0, inplace=False)
ct=0
for i in data['Remarks']:
if i==0:
ct=ct+1
print('Total no. of NaN cells in Remarks column is ',ct)
print('Dimension of data is',data.shape)
#ct=data['Remarks'].isnull().sum()
RVac=(ct*100)/len(data)
print('Nan_cells_count_percentage in Remark column is ',RVac)
ct0=data['IndustryVertical'].isnull().sum()
RVac0=(ct0*100)/len(data)
print('Nan_cells_count_percentage in IndustryVertical column is ',RVac0)
#data=data.drop(['Remarks'], axis=1)
#data=data[data['Remarks'] != 0]
data['StartupName'].unique().shape
data['IndustryVertical'].unique().shape
data['SubVertical'].unique().shape
data['CityLocation'].unique().shape
data['InvestorsName'].unique().shape
data['InvestmentType'].unique().shape
data['AmountInUSD'].unique().shape
len(data['AmountInUSD'].unique().shape)*100/len(data)
# percentage of null values for all the columns.
pd.isnull(data).sum()/data.shape[0]*100
#So here we can see that 82.33% data has NaN values so we can ignore this
#Column for out prediction
#"Remarks" column has highest missing values, which useless for now
# We cannot analyse by tking null value out_of account
# as we have made lot of change so start from basic again
data.head()
data.dtypes
data.info()
data.describe()
data["amount"].plot.box()
#also anything above 98% and below 2% can be treated as outlier.
print(data["amount"].quantile(0.02))
print(data["amount"].quantile(0.98))
#Here anyting below 40000USD and anything above 100000000 USD is considered outliers
#*** Univariate, bivariate, multivariate
#Apply EDA techniques to identify what influences investment amount
#EDA(Effective Data Analysis)
# Univariate
yearfreq = data['Year'].value_counts().plot.bar()
month_year = data['month_year'].value_counts().plot.bar(figsize=(12,4))
data.groupby(["month_year"]).size().plot.bar(figsize=(12,5), color="steelblue")
x=data["InvestmentType"].value_counts()/data.shape[0]*100
x.head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('Investment Type', fontsize=12)
plt.ylabel('Shaped Count', fontsize=12)
x0=data["IndustryVertical"].value_counts()/data.shape[0]*100
x0.head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('Industry Vertical', fontsize=12)
plt.ylabel('Shaped Count', fontsize=12)
dt_amo=data['IndustryVertical'].groupby([data.IndustryVertical]).agg(
'count').nlargest(30)
dt_amo.plot(kind="bar",figsize=(16,9),grid=True,title="Industry wise distribution",
cmap='rainbow')
data["IndustryVertical"].value_counts().head(20)
data['IndustryVertical'].isnull().sum()
industryvertical = []
for indver in data['IndustryVertical']:
for inv in str(indver).split(","):
if inv != "":
industryvertical.append(inv.strip().lower())
StartUpIndvers = pd.Series(industryvertical).value_counts()#[:20]
StartUpIndvers
for i in range(len(industryvertical)):
if industryvertical[i] =='ECommerce':
industryvertical[i]='eCommerce'
if industryvertical[i] =='Ecommerce':
industryvertical[i]='eCommerce'
if industryvertical[i] =='ecommerce':
industryvertical[i]='eCommerce'
if industryvertical[i] =='Food & Beverages ':
industryvertical[i]='Food & Beverage '
if industryvertical[i] =='Food Delivery Platform':
industryvertical[i]='Online Food Delivery'
#Still we donot have covered all redudency
StartUpIndvers0 = pd.Series(industryvertical).value_counts()#[:20]
StartUpIndvers0.head(20)
StartUpIndvers0.head(20).plot(kind="bar",figsize=(16,9),grid=True,
title="Industry wise distribution",cmap='rainbow')
x1=data["SubVertical"].value_counts()/data.shape[0]*100
x1.head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('Industry SubVertical', fontsize=12)
plt.ylabel('SubVerticalCount', fontsize=12)
x2=data["SubVertical"].value_counts()
x2.head(20).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('Industry SubVertical', fontsize=12)
plt.ylabel('Shaped Count', fontsize=12)
#online pharmacy has highest investments
data["SubVertical"].value_counts().head(20)
data['SubVertical'].isnull().sum()
industrysubvertical = []
for indsver in data['SubVertical']:
for insv in str(indsver).split(","):
if insv != "":
industrysubvertical.append(insv.strip().lower())
#else :
#investornames.append('unknown'.lower())
StartUpIndsvers = pd.Series(industrysubvertical).value_counts()#[:20]
StartUpIndsvers.isnull().sum()
#Still we donot have covered all redudency
StartUpIndsvers.head(20).plot(kind="bar",figsize=(16,9),grid=True,
title="Industry wise distribution",cmap='rainbow')
plt.xlabel('Industry SubVertical', fontsize=12)
plt.ylabel('Count', fontsize=12)
data['CityLocation'].value_counts().head(20)
data_ct=data['CityLocation'].groupby([data.CityLocation]).agg('count')
data_ct.plot(kind="bar",figsize=(16,9),grid=True,title="City wise distribution",
cmap='rainbow')
x3=data["CityLocation"].value_counts()/data.shape[0]*100
x3.head(20).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('City Location', fontsize=12)
plt.ylabel('Shaped Count For The City', fontsize=12)
x4=data["CityLocation"].value_counts()
x4.plot.bar(figsize=(12,5), color="steelblue") #x1.head(20)
plt.xlabel('City Location', fontsize=12)
plt.ylabel('Count For The City', fontsize=12)
x5=data["InvestorsName"].value_counts()#/data.shape[0]*100
x5.head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('Investors Name', fontsize=12)
plt.ylabel('ShapedCount', fontsize=12)
dt_inv=data['InvestorsName'].groupby([data.InvestorsName]).agg('count').nlargest(10)
dt_inv.plot(kind="bar",figsize=(12,9),grid=True,title="Industry wise distribution",
cmap='rainbow')
data["InvestorsName"].value_counts().head(30)
data['InvestorsName'].isnull().sum()
investornames = []
for investor in data['InvestorsName']:
for inv in str(investor).split(","):
if inv != "":
investornames.append(inv.strip().lower())
else :
investornames.append('unknown'.lower())
StartUpInvestors = pd.Series(investornames).value_counts()[:20]
StartUpInvestors#.isnull().sum()
for i in range(len(investornames)):
if investornames[i] =='undisclosed investor':
investornames[i]='undisclosed investors'
if investornames[i] =='undisclosed':
investornames[i]='undisclosed investors'
#Still we donot have covered all undisclosed
StartUpInvestors0 = pd.Series(investornames).value_counts()#[:20]
StartUpInvestors0.head(20)
StartUpInvestors0.head(20).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('InvestorsName', fontsize=12)
plt.ylabel('Count', fontsize=12)
StartUpInvestors0.head(10).plot(kind="pie",figsize=(12,9),
title="Industry wise distribution",
autopct='%1.1f%%', startangle=90,cmap='rainbow')
plt.ylabel('Count/Freq', fontsize=12)
#Bivariet analysis
data.groupby(["Year"])["amount"].sum().plot(kind="pie",figsize=(12,9),
title="Industry wise distribution",
autopct='%1.1f%%', startangle=90,cmap='rainbow')
####shows key error but not in_regular
data.groupby(["month_year"])["amount"].mean().plot.bar(figsize=(12,5), color="steelblue")
plt.ylabel('Count Of Investment', fontsize=12)
#2 months have highest average investment.. March and May of 2017 have highest investements.
#Lowest investment was seen in the month of October 2017
X6=data.groupby('StartupName')['amount'].sum().sort_values(ascending=False)
X6.head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('StatrUp Name', fontsize=12)
plt.ylabel('TotalAmountGotInvestedIn_c_USD ', fontsize=12)
##Paytm and Flipkart are the 2 startups with highest investments put in to them
X7=data.groupby('StartupName')['amount'].size().sort_values(ascending=False)
X7.head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('StatrUp Name', fontsize=12)
plt.ylabel('NumberOfInvestmentGot', fontsize=12)
##Swiggy is the comapany which received highest number of investments i.e,
#7 investments
x=data.groupby(["IndustryVertical"])["amount"].mean().sort_values(
ascending=False).head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('IndustryVertical', fontsize=12)
plt.ylabel('AverageAmountGotInvestedIn_c_USDperInvestor', fontsize=12)
##from the below graph we can see that average of people investing in online
#marketplace is more
x=data.groupby(["InvestorsName"])["amount"].sum().sort_values(
ascending=False).head(10).plot.bar(figsize=(12,5), color="steelblue")
plt.xlabel('InvestorsName', fontsize=12)
plt.ylabel('TotalAmountHvInvestedIn_c_USD', fontsize=12)
#Soft bank is the highest investor group in terms of sum invested
#***Hypothesis Testing
from scipy.stats import chi2_contingency
def df(df,cat_colS):
I = [0,2,6,8,10]
cat_col1=np.delete(cat_colS, I).tolist() # removed remarks,date,amountinusd(kept amount)
#columns
t=[]
t1=[]
for i in range(len(cat_col1)):
for j in range(i + 1, len(cat_col1)):
obsv=df.groupby([cat_col1[i],cat_col1[j]]).size()
obsv.name="Freq"
obsv=obsv.reset_index()
obsv=obsv.pivot_table(index=cat_col1[i],columns=cat_col1[j],values="Freq")
stat, p, dof, exp =chi2_contingency(obsv.fillna(0).values)
if p< 0.05:
t1= (cat_col1[i],cat_col1[j])
t.append(t1)
return(t)
a=df(data,cat_cols)
for b in a:
print( "%s is dependent on %s" %(b[0],b[1]))
####
#Summary:
###AmountinUSD has many missing values about 35% of data is missing.
## subvertical also has many missing values
#Remarks has lot of missing values-->we can ignore/drop remarks column fron analysis
#there are a lot of outliers in amountin USD column.
#Year 2016 had maximum number of investments
#Month July 2016 followed by January of 2016 has large number of funding.
##Seed Funding and Private Equity are the most preferable type of funding
#ConsumerInternet is the Industry vertical on which highest number of investement unlike
#Technology
##bangalore has highest number of investements
#Large number of the startup's funding are from undisclosed source
## ratan tata can be considered a special case, since all others are investment groups and he
#is an individual investing
##online pharmacy has highest investments
# 2 months have highest average investment.. March and May of 2017 have highest investements.
#Lowest investment was seen in the month of October 2017
##Paytm and Flipkart are the 2 startups with highest investments put in to them
##Swiggy is the comapany which received highest number if investments i.e, 7 investments
## from the graph we can see that average of people investing in online marketplace is more
#Soft bank is the highest investor group in terms of sum invested
#Investment type and the Year column influence the amount.
lstmsg=[10,20,10,10,20,10,20]
msg=['T','H','A','N','K','S','!']
plt.figure(figsize=(12,12))
colors=['red','green','orange']
plt.pie(lstmsg, labels=msg,autopct='THANKS!',startangle=310) #colors=colors,
plt.title('Thanks',color = 'blue',fontsize = 15)
plt.xlabel('The END', fontsize=12)
plt.show()
| [
"pandas.Series",
"pandas.isnull",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.delete",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.pie",
"matplotlib.pyplot.figure",
"numpy.setdiff1d",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((182, 252), 'pandas.read_csv', 'pd.read_csv', (['"""/home/sysadm/Desktop/JKP BSPro/Used_startup_funding.csv"""'], {}), "('/home/sysadm/Desktop/JKP BSPro/Used_startup_funding.csv')\n", (193, 252), True, 'import pandas as pd\n'), ((1605, 1641), 'numpy.setdiff1d', 'np.setdiff1d', (['data.columns', 'num_cols'], {}), '(data.columns, num_cols)\n', (1617, 1641), True, 'import numpy as np\n'), ((3798, 3840), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Investment Type"""'], {'fontsize': '(12)'}), "('Investment Type', fontsize=12)\n", (3808, 3840), True, 'import matplotlib.pyplot as plt\n'), ((3841, 3880), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Shaped Count"""'], {'fontsize': '(12)'}), "('Shaped Count', fontsize=12)\n", (3851, 3880), True, 'import matplotlib.pyplot as plt\n'), ((3999, 4043), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Industry Vertical"""'], {'fontsize': '(12)'}), "('Industry Vertical', fontsize=12)\n", (4009, 4043), True, 'import matplotlib.pyplot as plt\n'), ((4044, 4083), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Shaped Count"""'], {'fontsize': '(12)'}), "('Shaped Count', fontsize=12)\n", (4054, 4083), True, 'import matplotlib.pyplot as plt\n'), ((5539, 5586), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Industry SubVertical"""'], {'fontsize': '(12)'}), "('Industry SubVertical', fontsize=12)\n", (5549, 5586), True, 'import matplotlib.pyplot as plt\n'), ((5587, 5630), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SubVerticalCount"""'], {'fontsize': '(12)'}), "('SubVerticalCount', fontsize=12)\n", (5597, 5630), True, 'import matplotlib.pyplot as plt\n'), ((5726, 5773), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Industry SubVertical"""'], {'fontsize': '(12)'}), "('Industry SubVertical', fontsize=12)\n", (5736, 5773), True, 'import matplotlib.pyplot as plt\n'), ((5774, 5813), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Shaped Count"""'], {'fontsize': '(12)'}), "('Shaped Count', fontsize=12)\n", (5784, 5813), True, 'import matplotlib.pyplot as plt\n'), ((6478, 6525), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Industry SubVertical"""'], {'fontsize': '(12)'}), "('Industry SubVertical', fontsize=12)\n", (6488, 6525), True, 'import matplotlib.pyplot as plt\n'), ((6526, 6558), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {'fontsize': '(12)'}), "('Count', fontsize=12)\n", (6536, 6558), True, 'import matplotlib.pyplot as plt\n'), ((6901, 6941), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""City Location"""'], {'fontsize': '(12)'}), "('City Location', fontsize=12)\n", (6911, 6941), True, 'import matplotlib.pyplot as plt\n'), ((6942, 6994), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Shaped Count For The City"""'], {'fontsize': '(12)'}), "('Shaped Count For The City', fontsize=12)\n", (6952, 6994), True, 'import matplotlib.pyplot as plt\n'), ((7096, 7136), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""City Location"""'], {'fontsize': '(12)'}), "('City Location', fontsize=12)\n", (7106, 7136), True, 'import matplotlib.pyplot as plt\n'), ((7137, 7182), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count For The City"""'], {'fontsize': '(12)'}), "('Count For The City', fontsize=12)\n", (7147, 7182), True, 'import matplotlib.pyplot as plt\n'), ((7299, 7340), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Investors Name"""'], {'fontsize': '(12)'}), "('Investors Name', fontsize=12)\n", (7309, 7340), True, 'import matplotlib.pyplot as plt\n'), ((7341, 7379), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ShapedCount"""'], {'fontsize': '(12)'}), "('ShapedCount', fontsize=12)\n", (7351, 7379), True, 'import matplotlib.pyplot as plt\n'), ((8454, 8494), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""InvestorsName"""'], {'fontsize': '(12)'}), "('InvestorsName', fontsize=12)\n", (8464, 8494), True, 'import matplotlib.pyplot as plt\n'), ((8495, 8527), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {'fontsize': '(12)'}), "('Count', fontsize=12)\n", (8505, 8527), True, 'import matplotlib.pyplot as plt\n'), ((8738, 8775), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count/Freq"""'], {'fontsize': '(12)'}), "('Count/Freq', fontsize=12)\n", (8748, 8775), True, 'import matplotlib.pyplot as plt\n'), ((9147, 9193), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count Of Investment"""'], {'fontsize': '(12)'}), "('Count Of Investment', fontsize=12)\n", (9157, 9193), True, 'import matplotlib.pyplot as plt\n'), ((9478, 9517), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""StatrUp Name"""'], {'fontsize': '(12)'}), "('StatrUp Name', fontsize=12)\n", (9488, 9517), True, 'import matplotlib.pyplot as plt\n'), ((9518, 9576), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TotalAmountGotInvestedIn_c_USD """'], {'fontsize': '(12)'}), "('TotalAmountGotInvestedIn_c_USD ', fontsize=12)\n", (9528, 9576), True, 'import matplotlib.pyplot as plt\n'), ((9793, 9832), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""StatrUp Name"""'], {'fontsize': '(12)'}), "('StatrUp Name', fontsize=12)\n", (9803, 9832), True, 'import matplotlib.pyplot as plt\n'), ((9833, 9881), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""NumberOfInvestmentGot"""'], {'fontsize': '(12)'}), "('NumberOfInvestmentGot', fontsize=12)\n", (9843, 9881), True, 'import matplotlib.pyplot as plt\n'), ((10127, 10170), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""IndustryVertical"""'], {'fontsize': '(12)'}), "('IndustryVertical', fontsize=12)\n", (10137, 10170), True, 'import matplotlib.pyplot as plt\n'), ((10171, 10241), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""AverageAmountGotInvestedIn_c_USDperInvestor"""'], {'fontsize': '(12)'}), "('AverageAmountGotInvestedIn_c_USDperInvestor', fontsize=12)\n", (10181, 10241), True, 'import matplotlib.pyplot as plt\n'), ((10493, 10533), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""InvestorsName"""'], {'fontsize': '(12)'}), "('InvestorsName', fontsize=12)\n", (10503, 10533), True, 'import matplotlib.pyplot as plt\n'), ((10534, 10590), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TotalAmountHvInvestedIn_c_USD"""'], {'fontsize': '(12)'}), "('TotalAmountHvInvestedIn_c_USD', fontsize=12)\n", (10544, 10590), True, 'import matplotlib.pyplot as plt\n'), ((12982, 13010), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (12992, 13010), True, 'import matplotlib.pyplot as plt\n'), ((13042, 13104), 'matplotlib.pyplot.pie', 'plt.pie', (['lstmsg'], {'labels': 'msg', 'autopct': '"""THANKS!"""', 'startangle': '(310)'}), "(lstmsg, labels=msg, autopct='THANKS!', startangle=310)\n", (13049, 13104), True, 'import matplotlib.pyplot as plt\n'), ((13119, 13165), 'matplotlib.pyplot.title', 'plt.title', (['"""Thanks"""'], {'color': '"""blue"""', 'fontsize': '(15)'}), "('Thanks', color='blue', fontsize=15)\n", (13128, 13165), True, 'import matplotlib.pyplot as plt\n'), ((13168, 13202), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""The END"""'], {'fontsize': '(12)'}), "('The END', fontsize=12)\n", (13178, 13202), True, 'import matplotlib.pyplot as plt\n'), ((13203, 13213), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13211, 13213), True, 'import matplotlib.pyplot as plt\n'), ((1701, 1716), 'pandas.isnull', 'pd.isnull', (['data'], {}), '(data)\n', (1710, 1716), True, 'import pandas as pd\n'), ((4585, 4612), 'pandas.Series', 'pd.Series', (['industryvertical'], {}), '(industryvertical)\n', (4594, 4612), True, 'import pandas as pd\n'), ((5212, 5239), 'pandas.Series', 'pd.Series', (['industryvertical'], {}), '(industryvertical)\n', (5221, 5239), True, 'import pandas as pd\n'), ((6212, 6242), 'pandas.Series', 'pd.Series', (['industrysubvertical'], {}), '(industrysubvertical)\n', (6221, 6242), True, 'import pandas as pd\n'), ((8309, 8333), 'pandas.Series', 'pd.Series', (['investornames'], {}), '(investornames)\n', (8318, 8333), True, 'import pandas as pd\n'), ((7933, 7957), 'pandas.Series', 'pd.Series', (['investornames'], {}), '(investornames)\n', (7942, 7957), True, 'import pandas as pd\n'), ((10778, 10800), 'numpy.delete', 'np.delete', (['cat_colS', 'I'], {}), '(cat_colS, I)\n', (10787, 10800), True, 'import numpy as np\n'), ((2697, 2712), 'pandas.isnull', 'pd.isnull', (['data'], {}), '(data)\n', (2706, 2712), True, 'import pandas as pd\n')] |
import numpy as np
import cv2
class StrokeWidthDistanceTransform(object):
def __init__(self,
dark_on_bright=True,
clean_ccs=2):
self._dark_on_bright = dark_on_bright
self._clean_ccs = clean_ccs
def apply_swt_dist_trafo(self, img_file):
swt_dist_trafo = self.distance_transform(img_file)
cc_boxes = self.connected_components_cv(swt_dist_trafo)
cc_clean = self.clean_connected_components(cc_boxes)
return swt_dist_trafo, cc_clean
def distance_transform(self, img_file, norm=cv2.DIST_L2, mask=cv2.DIST_MASK_PRECISE):
image = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)
if self._dark_on_bright:
image = -image + 255 # invert black/white
threshold, image = self.otsu_threshold(image) # binarize (otsu)
dist_trafo = cv2.distanceTransform(image, norm, mask)
return dist_trafo.astype(np.uint8)
def otsu_threshold(self, image):
blur = cv2.GaussianBlur(image, (5, 5), 0)
threshold, image_t = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return threshold, image_t
def connected_components_cv(self, image, connectivity=8):
assert connectivity in (4, 8), f"Connectivity has to be 4 or 8 (was {connectivity})."
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(image, connectivity=connectivity)
boxes_ccs = []
# start at 1, to skip background
for i in range(1, num_labels):
x = stats[i, cv2.CC_STAT_LEFT]
y = stats[i, cv2.CC_STAT_TOP]
w = stats[i, cv2.CC_STAT_WIDTH]
h = stats[i, cv2.CC_STAT_HEIGHT]
boxes_ccs.append((x, y, w, h))
return boxes_ccs
def clean_connected_components(self, components):
components_clean = []
# count_rejected_1 = 0
# count_rejected_2 = 0
for component in components:
# component is a 4-tuple (x, y, width, height)
width = component[2]
height = component[3]
if self._clean_ccs > 0:
# test 1: reject components whose size is too small or too large
if width < 3 or height < 3 or height > 500 or width > 500:
# count_rejected_1 += 1
continue
if self._clean_ccs > 1:
# test 2: reject components with too extreme aspect ratios (long narrow components)
if width / height > 8 or height / width > 8:
# count_rejected_2 += 1
continue
# component is accepted
components_clean.append(component)
# print(f"Rejected {count_rejected_1 + count_rejected_2}/{len(components)} connected components due to "
# f"cleaning (Size test: {count_rejected_1}, Ratio test: {count_rejected_2}).")
return components_clean
if __name__ == '__main__':
img_path = "abc"
page_path = "xyz"
from python_util.parser.xml.page.page import Page
# Load page and textlines
page = Page(page_path)
text_lines = page.get_textlines()
# initialize SWT and textline labeling
SWT = StrokeWidthDistanceTransform(dark_on_bright=True)
swt_img = SWT.distance_transform(img_path)
textline_stroke_widths = dict() # stroke widths for every text line
textline_heights = dict() # text height for every text line
for text_line in text_lines:
# build surrounding polygons over text lines
bounding_box = text_line.surr_p.to_polygon().get_bounding_box()
xa, xb = bounding_box.x, bounding_box.x + bounding_box.width
ya, yb = bounding_box.y, bounding_box.y + bounding_box.height
# get swt for text line
text_line_swt = swt_img[ya:yb + 1, xa:xb + 1]
# get connected components in text line
text_line_ccs = SWT.connected_components_cv(text_line_swt)
text_line_ccs = SWT.clean_connected_components(text_line_ccs)
# go over connected components to estimate stroke width and text height of the text line
swt_cc_values = []
text_line_height = 0
for cc in text_line_ccs:
# component is a 4-tuple (x, y, width, height)
# take max value in distance_transform as stroke_width for current CC (can be 0)
swt_cc_values.append(np.max(text_line_swt[cc[1]: cc[1] + cc[3], cc[0]: cc[0] + cc[2]]))
# new text height
if cc[3] > text_line_height:
text_line_height = cc[3]
textline_stroke_widths[text_line.id] = np.median(swt_cc_values) if swt_cc_values else 0.0
textline_heights[text_line.id] = text_line_height | [
"numpy.median",
"python_util.parser.xml.page.page.Page",
"cv2.threshold",
"numpy.max",
"cv2.connectedComponentsWithStats",
"cv2.distanceTransform",
"cv2.GaussianBlur",
"cv2.imread"
] | [((3104, 3119), 'python_util.parser.xml.page.page.Page', 'Page', (['page_path'], {}), '(page_path)\n', (3108, 3119), False, 'from python_util.parser.xml.page.page import Page\n'), ((628, 670), 'cv2.imread', 'cv2.imread', (['img_file', 'cv2.IMREAD_GRAYSCALE'], {}), '(img_file, cv2.IMREAD_GRAYSCALE)\n', (638, 670), False, 'import cv2\n'), ((853, 893), 'cv2.distanceTransform', 'cv2.distanceTransform', (['image', 'norm', 'mask'], {}), '(image, norm, mask)\n', (874, 893), False, 'import cv2\n'), ((990, 1024), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(5, 5)', '(0)'], {}), '(image, (5, 5), 0)\n', (1006, 1024), False, 'import cv2\n'), ((1054, 1118), 'cv2.threshold', 'cv2.threshold', (['blur', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (1067, 1118), False, 'import cv2\n'), ((1357, 1423), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['image'], {'connectivity': 'connectivity'}), '(image, connectivity=connectivity)\n', (1389, 1423), False, 'import cv2\n'), ((4613, 4637), 'numpy.median', 'np.median', (['swt_cc_values'], {}), '(swt_cc_values)\n', (4622, 4637), True, 'import numpy as np\n'), ((4386, 4449), 'numpy.max', 'np.max', (['text_line_swt[cc[1]:cc[1] + cc[3], cc[0]:cc[0] + cc[2]]'], {}), '(text_line_swt[cc[1]:cc[1] + cc[3], cc[0]:cc[0] + cc[2]])\n', (4392, 4449), True, 'import numpy as np\n')] |
import numpy as np
import scipy as sp
import matplotlib
matplotlib.use('Agg')
from math import ceil
import matplotlib.pyplot as plt
from sys import exit
from scipy.stats import gaussian_kde
import sklearn
import pandas as pd
from scipy.integrate import simps
sklearn_major_version = float(sklearn.__version__.split('.')[1])
if sklearn_major_version < 24 :
from sklearn.neighbors.kde import KernelDensity
else :
from sklearn.neighbors import KernelDensity
def kde(z, cdf=False, bandwidth=0.3):
#print(z)
z = np.array(z)
#Set NaN values to 0
z[np.isnan(z)]=0
std = z.std(axis=0)
if std == 0 or np.isnan(std) :
std = 1
z= (z - z.mean(axis=0)) / std
factor=1
#euc_dist = np.array([np.sqrt(np.sum((p-np.min(z,axis=0))**2)) for p in z] ).reshape(-1,1)
if len(z.shape) == 2 :
euc_dist = np.mean(z, axis=1).reshape(-1,1)
else :
euc_dist = z.reshape(-1,1)
#print(euc_dist)
kde = KernelDensity(bandwidth=bandwidth).fit(euc_dist)
density = np.exp(kde.score_samples(euc_dist)).reshape(-1,1)
min_euc_dist = min(euc_dist) #* -factor #0
max_euc_dist = max(euc_dist) #* factor
dd = np.linspace(min_euc_dist,max_euc_dist).reshape(-1,1)
n=int(len(dd))
ddx=(max_euc_dist-min_euc_dist)/n
lin_density=np.exp(kde.score_samples(dd)).reshape(-1,1)
n=len(density)
cum_dense=np.zeros(n).reshape(-1,1)
dd_range = range(len(dd))
if not cdf :
for ed,i in zip(euc_dist,range(n)):
cum_dense[i] = np.sum([ lin_density[j] for j in dd_range if abs(dd[j]) > abs(ed) ]) * ddx
return (cum_dense)
for ed,i in zip(euc_dist,range(n)):
cum_dense[i] = np.sum([ lin_density[j] for j in dd_range if dd[j] < ed]) * ddx
return(cum_dense)
def MAD(z):
z = np.array(z)
if len(z.shape) == 1 :
z=z.reshape(-1,1)
z=(z - z.mean(axis=0))/z.std(axis=0)
z=np.apply_along_axis( lambda x : np.sqrt(np.sum(x**2)) , 1, z)
z=abs((z - np.median(z)) / (0.001+np.median(np.abs(z - np.median(z)))))
z= 1/(0.1 + z)
return z
| [
"numpy.mean",
"numpy.median",
"matplotlib.use",
"sklearn.__version__.split",
"sklearn.neighbors.KernelDensity",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.isnan",
"numpy.sum"
] | [((56, 77), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (70, 77), False, 'import matplotlib\n'), ((524, 535), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (532, 535), True, 'import numpy as np\n'), ((1797, 1808), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (1805, 1808), True, 'import numpy as np\n'), ((289, 319), 'sklearn.__version__.split', 'sklearn.__version__.split', (['"""."""'], {}), "('.')\n", (314, 319), False, 'import sklearn\n'), ((567, 578), 'numpy.isnan', 'np.isnan', (['z'], {}), '(z)\n', (575, 578), True, 'import numpy as np\n'), ((627, 640), 'numpy.isnan', 'np.isnan', (['std'], {}), '(std)\n', (635, 640), True, 'import numpy as np\n'), ((960, 994), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'bandwidth': 'bandwidth'}), '(bandwidth=bandwidth)\n', (973, 994), False, 'from sklearn.neighbors import KernelDensity\n'), ((1172, 1211), 'numpy.linspace', 'np.linspace', (['min_euc_dist', 'max_euc_dist'], {}), '(min_euc_dist, max_euc_dist)\n', (1183, 1211), True, 'import numpy as np\n'), ((1376, 1387), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1384, 1387), True, 'import numpy as np\n'), ((1687, 1743), 'numpy.sum', 'np.sum', (['[lin_density[j] for j in dd_range if dd[j] < ed]'], {}), '([lin_density[j] for j in dd_range if dd[j] < ed])\n', (1693, 1743), True, 'import numpy as np\n'), ((849, 867), 'numpy.mean', 'np.mean', (['z'], {'axis': '(1)'}), '(z, axis=1)\n', (856, 867), True, 'import numpy as np\n'), ((1949, 1963), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (1955, 1963), True, 'import numpy as np\n'), ((1986, 1998), 'numpy.median', 'np.median', (['z'], {}), '(z)\n', (1995, 1998), True, 'import numpy as np\n'), ((2030, 2042), 'numpy.median', 'np.median', (['z'], {}), '(z)\n', (2039, 2042), True, 'import numpy as np\n')] |
# Endianness conversion tools from https://github.com/Qiskit/qiskit-terra/issues/1148#issuecomment-438574708
import numpy as np
def state_num2str(basis_state_as_num, nqubits):
return '{0:b}'.format(basis_state_as_num).zfill(nqubits)
def state_str2num(basis_state_as_str):
return int(basis_state_as_str, 2)
def state_reverse(basis_state_as_num, nqubits):
basis_state_as_str = state_num2str(basis_state_as_num, nqubits)
new_str = basis_state_as_str[::-1]
return state_str2num(new_str)
def get_adjusted_state(state):
nqubits = np.log2(state.shape[0])
if nqubits % 1:
raise ValueError("Input vector is not a valid statevector for qubits.")
nqubits = int(nqubits)
adjusted_state = np.zeros(2**nqubits, dtype=complex)
for basis_state in range(2**nqubits):
adjusted_state[state_reverse(basis_state, nqubits)] = state[basis_state]
return adjusted_state
| [
"numpy.log2",
"numpy.zeros"
] | [((553, 576), 'numpy.log2', 'np.log2', (['state.shape[0]'], {}), '(state.shape[0])\n', (560, 576), True, 'import numpy as np\n'), ((726, 763), 'numpy.zeros', 'np.zeros', (['(2 ** nqubits)'], {'dtype': 'complex'}), '(2 ** nqubits, dtype=complex)\n', (734, 763), True, 'import numpy as np\n')] |
import signatures.fisherVector as fv
import numpy as np
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import Normalizer
def teste():
import csv
import itertools
path = '/Users/romuere/Dropbox/Berkeley/workspace/pycbir 2/files/'
file_train_features = 'feature_vectors_cnn_training.csv'
file_test_features = 'feature_vectors_cnn_test.csv'
file_train_labels = 'labels_cnn_training.csv'
file_test_labels = 'labels_cnn_test.csv'
reader = csv.reader(open(path+file_train_features),delimiter=',')
x = list(reader)
train_features = np.array(x).astype(dtype = np.float64)
reader = csv.reader(open(path+file_test_features),delimiter=',')
x = list(reader)
test_features = np.array(x).astype(dtype = np.float64)
reader = csv.reader(open(path+file_train_labels),delimiter=',')
x = list(reader)
train_labels = np.array(x).astype(dtype = np.uint16)
train_labels = train_labels.reshape(-1)
reader = csv.reader(open(path+file_test_labels),delimiter=',')
x = list(reader)
test_labels = np.array(x).astype(dtype = np.uint16)
test_labels = test_labels.reshape(-1)
"""
feature_vectors_train = np.zeros((3990,8))
feature_vectors_train[:,:-1] = np.concatenate((feature_vectors_database[5:2001,:],feature_vectors_database[2006:,:]))
labels_train = np.concatenate((labels_database[5:2001],labels_database[2006:]))
feature_vectors_train[:,-1] = labels_train
feature_vectors_test = np.zeros((10,8))
feature_vectors_test[:,:-1] = np.concatenate((feature_vectors_database[0:5,:],feature_vectors_database[2001:2006,:]))
labels_test = np.concatenate((labels_database[0:5],labels_database[2001:2006]))
feature_vectors_test[:,-1] = labels_test
"""
feature_size = 192
n_comp = 2
a = fv.fisher(train_features, test_features, n_comp, feature_size)
b = 1
teste() | [
"numpy.array",
"signatures.fisherVector.fisher"
] | [((1912, 1974), 'signatures.fisherVector.fisher', 'fv.fisher', (['train_features', 'test_features', 'n_comp', 'feature_size'], {}), '(train_features, test_features, n_comp, feature_size)\n', (1921, 1974), True, 'import signatures.fisherVector as fv\n'), ((625, 636), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (633, 636), True, 'import numpy as np\n'), ((783, 794), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (791, 794), True, 'import numpy as np\n'), ((939, 950), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (947, 950), True, 'import numpy as np\n'), ((1137, 1148), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1145, 1148), True, 'import numpy as np\n')] |
#This file writes OpenDSS circuit from CYME reader.
#Written for ASU by <NAME>, <NAME>
# This version contains the script to generate cvs files of the system under study
# This version contains the code to generate the PV generators in the system
# This version contains the generation of LineCodes acording to cable type and geometry
# If changes are needed, please do it with discretion
###############################################################################################################
#Usage
#Read from cyme version 8.2
# Change Line Definition
###############################################################################################################
#imports following modules, make sure to install them.
#from collections import namedtuple
import os
#import Cyme_Reader as reader
import math
import numpy as np
import cmath
###############################################################################################################
#function definitions:
#convert functions
def convert_master(Substationlist, HeadNodeslist, Sourcelist, SourceEquivalentlist, Feederlist, Transformerlist, casename):
DSSMaster = []
DSSMaster.append('!Master File for case {}\n'.format(casename))
DSSMaster.append('Clear\n'.format(casename))
DSSMaster.append('Set Datapath = "{}"\n'.format(os.getcwd(),casename))
Line = 'New Circuit.'+casename
Line = Line + ' bus1='+Sourcelist[0].NodeID.replace(".","_")+'.1.2.3'
Line = Line + ' BasekV='+SourceEquivalentlist[0].Voltage
if (float(Sourcelist[0].OperatingVoltageA)*(math.sqrt(3)))/12.47 > 0:
Line = Line + ' pu=' + str((float(Sourcelist[0].OperatingVoltageA)*(math.sqrt(3)))/12.47)
elif (float(SourceEquivalentlist[7].OperatingVoltage1)*(math.sqrt(3)))/12.47 > 0:
Line = Line + ' pu=' + str((float(SourceEquivalentlist[7].OperatingVoltage1)*(math.sqrt(3)))/12.47)
else:
print (' no operating voltage for circuit')
Line = Line + ' r1='+SourceEquivalentlist[0].FirstLevelR1
Line = Line + ' r0='+SourceEquivalentlist[0].FirstLevelR0
Line = Line + ' x1='+SourceEquivalentlist[0].FirstLevelX1
Line = Line + ' x0='+SourceEquivalentlist[0].FirstLevelX0
Line = Line + '\n'
#objects: redirect
#TODO: Maybe do something for definitions/impedances change.
DSSMaster.append(Line)
DSSMaster.append('Redirect linespacing.dss')
DSSMaster.append('Redirect wiredata.dss')
DSSMaster.append('Redirect cablemodel.dss')
DSSMaster.append('Redirect linegeometryover.dss')
DSSMaster.append('Redirect linegeometryunder.dss')
DSSMaster.append('Redirect protection.dss')
DSSMaster.append('Redirect linecodes.dss')
DSSMaster.append('Redirect linesover.dss')
DSSMaster.append('Redirect linesunder.dss')
DSSMaster.append('!Redirect loadshapes.dss')
DSSMaster.append('Redirect loads.dss')
DSSMaster.append('Redirect transformerscodes.dss')
DSSMaster.append('Redirect transformers.dss')
DSSMaster.append('Redirect capacitors.dss')
DSSMaster.append('Redirect generators.dss')
DSSMaster.append('Redirect loadmismatch.dss')
DSSMaster.append('Redirect switchcontrol.dss')
DSSMaster.append('Buscoords buscoords.dss')
DSSMaster.append('')
#energymeters and monitors
DSSMaster.append('')
#solve etc
Line = 'set Voltagebases = ['
Line = Line + SourceEquivalentlist[0].Voltage + ','+ str(2*float(Transformerlist[0].KVLLsec))+ ','+ str(float(Transformerlist[0].KVLLsec)*(math.sqrt(3)))+ ',' + Transformerlist[6].KVLLsec
# for 7.2 kV, str(float(SourceEquivalentlist[0].Voltage)/(math.sqrt(3)))
Line = Line + ']'
DSSMaster.append(Line)
DSSMaster.append('CalcVoltagebases')
DSSMaster.append('')
DSSMaster.append('!New Energymeter.CW13 Element= Line.8638910 Terminal=1 option=(r,e,v)')
DSSMaster.append('set mode = snap')
DSSMaster.append('solve')
DSSMaster.append('')
#outputs
DSSMaster.append('!plot profile')
return DSSMaster
def convert_linespacing(Cablelist, CableConcentricNeutrallist, CableInsulationlist, CableConductorlist, Conductorlist, SpacingTableForLinelist, OverheadByphaseSettinglist, UndergroundlineSettinglist, Sectionlist):
DSSLinespacing = []
for linespace in SpacingTableForLinelist:
Line = 'New Linespacing.'+ linespace.ID.replace(" ","_").replace(".","_")
if linespace.NBPhasesPerCircuit == '':
continue
Line = Line + ' nconds='+str(int(linespace.NBPhasesPerCircuit)*int(linespace.NBConductorsPerPhase)+int(linespace.NBNeutrals))
Line = Line + ' nphases='+linespace.NBPhasesPerCircuit
Line = Line + ' x='+"["+linespace.PosOfCond1_X +" " + linespace.PosOfCond2_X+" " + linespace.PosOfCond3_X+" "
if linespace.NBNeutrals == '1':
Line = Line + linespace.PosOfNeutralCond_X
Line = Line +"]"
Line = Line + ' h='+"["+linespace.PosOfCond1_Y +" " + linespace.PosOfCond2_Y+" " + linespace.PosOfCond3_Y+" "
if linespace.NBNeutrals == '1':
Line = Line + linespace.PosOfNeutralCond_Y
Line = Line +"]"
Line = Line + ' units='+'m'
DSSLinespacing.append(Line)
return DSSLinespacing
def convert_wiredata(Cablelist, CableConcentricNeutrallist, CableInsulationlist, CableConductorlist, Conductorlist, SpacingTableForLinelist, OverheadByphaseSettinglist, UndergroundlineSettinglist, Sectionlist):
DSSWiredata = []
for wiredata in Conductorlist:
if wiredata.ID == 'NONE':
continue
Line = 'New Wiredata.'+ wiredata.ID
Line = Line + ' Capradius='+str(math.sqrt(float(wiredata.Size_mm2)/math.pi))
Line = Line + ' GMR='+wiredata.GMR
if float(wiredata.Diameter) == 0:
continue
Line = Line + ' DIAM='+wiredata.Diameter
Line = Line + ' RAC='+ wiredata.R25
Line = Line + ' RDC='+ wiredata.FirstResistanceDC
Line = Line + ' NormAmps='+wiredata.Amps
Line = Line + ' Runits='+'km'
Line = Line + ' radunits='+'cm'
Line = Line + ' gmrunits='+'cm'
DSSWiredata.append(Line)
return DSSWiredata
def convert_cablemodel(Cablelist, CableConcentricNeutrallist, CableInsulationlist, CableConductorlist, Conductorlist, SpacingTableForLinelist, OverheadByphaseSettinglist, UndergroundlineSettinglist, Sectionlist):
DSSCablemodel = []
for cables in Cablelist:
Line = 'New CNData.'+ cables.ID
Line = Line + ' Runits=mm Radunits=mm GMRunits=mm'
cablename = cables.ID
insulationindex = -1
for i in range(len(CableInsulationlist)):
if cablename == CableInsulationlist[i].ID:
insulationindex = i
if insulationindex == -1 :
print('Insulation name not found')
cablename = cables.ID
conductorindex = -1
for i in range(len(CableConductorlist)):
if cablename == CableConductorlist[i].ID:
conductorindex = i
if conductorindex == -1 :
print('Conductor name not found')
cablename = cables.ID
ccneutralindex = -1
for i in range(len(CableConcentricNeutrallist)):
if cablename == CableConcentricNeutrallist[i].ID:
ccneutralindex = i
elif cablename == 'DEFAULT':
ccneutralindex = -2
if ccneutralindex == -1 :
print('cablemodel_No neutral name, {} not found'.format(cables.ID))
continue
#cable
Line = Line + ' InsLayer='+ CableInsulationlist[insulationindex].Thickness
Line = Line + ' DiaIns='+ str(float(cables.OverallDiameter) - 2*float(CableConcentricNeutrallist[ccneutralindex].Thickness))
Line = Line + ' DiaCable='+ cables.OverallDiameter
Line = Line + ' EpsR='
if CableInsulationlist[insulationindex].InsulationMaterialID == 'XLPE_FILLED':
Line = Line + '2.3'
elif CableInsulationlist[insulationindex].InsulationMaterialID == 'EPR':
Line = Line + '3.0'
elif CableInsulationlist[insulationindex].InsulationMaterialID == 'XLPE_UNFILLED':
Line = Line + '2.5'
else:
print('Insulation material, {} not fount'.format(CableInsulationlist[insulationindex].InsulationMaterialID))
#Phase Conductor
if CableConductorlist[conductorindex].MaterialID == 'ALUMINUM':
Elecresis = 0.0000283
Area = float(CableConductorlist[conductorindex].Size_mm2)
Line = Line + ' RDC='+ str(Elecresis/Area)
elif CableConductorlist[conductorindex].MaterialID == 'COPPER':
Elecresis = 0.000017241
Area = float(CableConductorlist[conductorindex].Size_mm2)
Line = Line + ' RDC='+ str(Elecresis/Area)
else:
print('No matterial found')
Line = Line + ' diam='+ CableConductorlist[conductorindex].Diameter
# Neutral
if CableConcentricNeutrallist[ccneutralindex].MaterialID == 'ALUMINUM':
Elecresis = 0.0000283
Area = math.pi * (float(CableConcentricNeutrallist[ccneutralindex].Thickness)/2)**2
Line = Line + ' Rstrand='+ str(1.02*Elecresis/Area)
elif CableConcentricNeutrallist[ccneutralindex].MaterialID == 'COPPER':
Elecresis = 0.000017241
Area = math.pi * (float(CableConcentricNeutrallist[ccneutralindex].Thickness)/2)**2
Line = Line + ' Rstrand='+ str(1.02*Elecresis/Area)
else:
print('No matterial found')
Line = Line + ' DiaStrand='+ CableConcentricNeutrallist[ccneutralindex].Thickness
Line = Line + ' K='+ CableConcentricNeutrallist[ccneutralindex].NumberOfWires
DSSCablemodel.append(Line)
return DSSCablemodel
def convert_linegeometryover(Cablelist, CableConcentricNeutrallist, CableInsulationlist, CableConductorlist, Conductorlist, SpacingTableForLinelist, OverheadByphaseSettinglist, UndergroundlineSettinglist, Sectionlist):
DSSLinegeometryOver = []
for linegeo in OverheadByphaseSettinglist:
Line = 'New LineGeometry.'+ linegeo.DeviceNumber
overheadgeo= linegeo.SectionID
overheadindex = -1
for i in range(len(Sectionlist)):
if overheadgeo == Sectionlist[i].SectionID:
overheadindex = i
if overheadindex == -1 :
print('No conductors not found')
Line = Line + ' nconds=' + str(len(Sectionlist[overheadindex].Phase))
Line = Line + ' nphases=' + str(len(Sectionlist[overheadindex].Phase))
Line = Line + ' spacing='+linegeo.SpacingID.replace(" ","_").replace(".","_")
Line = Line + ' wires='+ "["+linegeo.CondID_A +" " + linegeo.CondID_B+" " + linegeo.CondID_C +"]"
if len(Sectionlist[overheadindex].Phase) > len(Sectionlist[overheadindex].Phase):
Line = Line + ' reduce=yes'
DSSLinegeometryOver.append(Line)
return DSSLinegeometryOver
def convert_linegeometryunder(Cablelist, CableConcentricNeutrallist, CableInsulationlist, CableConductorlist, Conductorlist, SpacingTableForLinelist, OverheadByphaseSettinglist, UndergroundlineSettinglist, Sectionlist):
DSSLinegeometryUnder = []
for linegeound in UndergroundlineSettinglist:
Line = 'New LineGeometry.'+ linegeound.DeviceNumber
undergeo= linegeound.SectionID
undergeoindex = -1
for i in range(len(Sectionlist)):
if undergeo == Sectionlist[i].SectionID:
undergeoindex = i
if undergeoindex == -1 :
print('No conductors not found')
Line = Line + ' nconds=' + str(int(linegeound.NumberOfCableInParallel)*len(Sectionlist[undergeoindex].Phase))
Line = Line + ' nphases=' + str(len(Sectionlist[undergeoindex].Phase))
#TODO: find a way to get default values for cables
if linegeound.LineCableID == 'UA1/0T_UG':
if str(len(Sectionlist[undergeoindex].Phase)) in ['1']:
Line = Line + ' CNcables=' + linegeound.LineCableID
Line = Line + ' h=0.05'
Line = Line + ' x=0'
Line = Line + ' units=ft'
elif str(len(Sectionlist[undergeoindex].Phase)) in ['3']:
if str(int(linegeound.NumberOfCableInParallel)) in ['1']:
Line = Line + ' CNcables=' + "[" + linegeound.LineCableID +" " + linegeound.LineCableID +" " + linegeound.LineCableID + "]"
Line = Line + ' cond=1 h=0.05 x=-0.05'
Line = Line + ' cond=2 h=0.05 x=0.05'
Line = Line + ' cond=3 h=0.14 x=0'
Line = Line + ' units=ft'
elif str(int(linegeound.NumberOfCableInParallel)) in ['2']:
Line = Line + ' cond=1' + ' CNcable=' + linegeound.LineCableID + ' h=0.08' + " x=-0.08"
Line = Line + ' cond=2' + ' CNcable=' + linegeound.LineCableID + ' h=0.08' + " x=0.08"
Line = Line + ' cond=3' + ' CNcable=' + linegeound.LineCableID + ' h=0.2' + " x=0"
Line = Line + ' cond=4' + ' CNcable=' + linegeound.LineCableID + ' h=0.08' + " x=0.25"
Line = Line + ' cond=5' + ' CNcable=' + linegeound.LineCableID + ' h=0.08' + " x=0.4"
Line = Line + ' cond=6' + ' CNcable=' + linegeound.LineCableID + ' h=0.2' + " x=0.3"
Line = Line + ' units=ft'
else:
print ('do not identified phase')
#TODO: Will need to add new cables for any new case.
else:
print ('No cable type identified for distances')
if int(linegeound.NumberOfCableInParallel)*len(Sectionlist[undergeoindex].Phase) > len(Sectionlist[undergeoindex].Phase):
Line = Line + ' reduce=yes'
DSSLinegeometryUnder.append(Line)
return DSSLinegeometryUnder
def convert_lineover(Cablelist, CableConcentricNeutrallist, CableInsulationlist, CableConductorlist, Conductorlist, SpacingTableForLinelist, OverheadByphaseSettinglist, UndergroundlineSettinglist, Sectionlist, switchlist):
DSSLinesOver = []
CSVLinesOver = ['DeviceID, From bus, To bus, Phase, geocode, length']
for linesys in OverheadByphaseSettinglist:
if linesys.DeviceNumber in switchlist:
Line = 'Edit'
else:
Line = 'New'
Line = Line + ' Line.'+ linesys.DeviceNumber
linebus = linesys.SectionID
linebusindex = -1
for i in range(len(Sectionlist)):
if linebus == Sectionlist[i].SectionID:
linebusindex = i
if linebusindex == -1 :
print('Line bus not found')
if Sectionlist[linebusindex].Phase in ['A']:
Line = Line + ' bus1='+ Sectionlist[linebusindex].FromNodeID.replace(".","_")+'.1.0'
Line = Line + ' bus2='+ Sectionlist[linebusindex].ToNodeID.replace(".","_")+'.1.0'
elif Sectionlist[linebusindex].Phase in ['B']:
Line = Line + ' bus1='+ Sectionlist[linebusindex].FromNodeID.replace(".","_")+'.2.0'
Line = Line + ' bus2='+ Sectionlist[linebusindex].ToNodeID.replace(".","_")+'.2.0'
elif Sectionlist[linebusindex].Phase in ['C']:
Line = Line + ' bus1='+ Sectionlist[linebusindex].FromNodeID.replace(".","_")+'.3.0'
Line = Line + ' bus2='+ Sectionlist[linebusindex].ToNodeID.replace(".","_")+'.3.0'
elif Sectionlist[linebusindex].Phase in ['ABC']:
Line = Line + ' bus1='+ Sectionlist[linebusindex].FromNodeID.replace(".","_")+'.1.2.3'
Line = Line + ' bus2='+ Sectionlist[linebusindex].ToNodeID.replace(".","_")+'.1.2.3'
else:
print ('do not identified phase')
Line = Line + ' Geometry='+linesys.DeviceNumber
Line = Line + ' length='+ linesys.Length
Line = Line + ' units='+'m'
DSSLinesOver.append(Line)
csvline = linesys.DeviceNumber+ "," + Sectionlist[linebusindex].FromNodeID.replace(".","_") + "," + Sectionlist[linebusindex].ToNodeID.replace(".","_") +","+Sectionlist[linebusindex].Phase+","+linesys.DeviceNumber+","+linesys.Length
CSVLinesOver.append(csvline)
return DSSLinesOver,CSVLinesOver
def convert_lineunder(Cablelist, CableConcentricNeutrallist, CableInsulationlist, CableConductorlist, Conductorlist, SpacingTableForLinelist, OverheadByphaseSettinglist, UndergroundlineSettinglist, Sectionlist, switchlist):
DSSLinesUnder = []
CSVLinesUnder = ['Switch, DeviceID, From bus, To bus, Phase, geocode, length']
for lineund in UndergroundlineSettinglist:
if lineund.DeviceNumber in switchlist:
Line = 'Edit'
Switchline = 'Switch'
else:
Line = 'New'
Switchline = 'No'
Line = Line + ' Line.'+ lineund.DeviceNumber
linebus = lineund.SectionID
linebusindex = -1
for i in range(len(Sectionlist)):
if linebus == Sectionlist[i].SectionID:
linebusindex = i
if linebusindex == -1 :
print('Line bus not found')
if Sectionlist[linebusindex].Phase in ['A']:
Line = Line + ' bus1='+ Sectionlist[linebusindex].FromNodeID.replace(".","_")+'.1.0'
Line = Line + ' bus2='+ Sectionlist[linebusindex].ToNodeID.replace(".","_")+'.1.0'
Line = Line + ' phases=1'
elif Sectionlist[linebusindex].Phase in ['B']:
Line = Line + ' bus1='+ Sectionlist[linebusindex].FromNodeID.replace(".","_")+'.2.0'
Line = Line + ' bus2='+ Sectionlist[linebusindex].ToNodeID.replace(".","_")+'.2.0'
Line = Line + ' phases=1'
elif Sectionlist[linebusindex].Phase in ['C']:
Line = Line + ' bus1='+ Sectionlist[linebusindex].FromNodeID.replace(".","_")+'.3.0'
Line = Line + ' bus2='+ Sectionlist[linebusindex].ToNodeID.replace(".","_")+'.3.0'
Line = Line + ' phases=1'
elif Sectionlist[linebusindex].Phase in ['ABC']:
if str(int(lineund.NumberOfCableInParallel)) in ['1']:
Line = Line + ' bus1='+ Sectionlist[linebusindex].FromNodeID.replace(".","_")+'.1.2.3'
Line = Line + ' bus2='+ Sectionlist[linebusindex].ToNodeID.replace(".","_")+'.1.2.3'
Line = Line + ' phases=3'
elif str(int(lineund.NumberOfCableInParallel)) in ['2']:
Line = Line + ' bus1='+ Sectionlist[linebusindex].FromNodeID.replace(".","_")+'.1.2.3.1.2.3'
Line = Line + ' bus2='+ Sectionlist[linebusindex].ToNodeID.replace(".","_")+'.1.2.3.1.2.3'
Line = Line + ' phases=3'
else:
print ('do not identified phase')
#TODO: change if cable type has 0 R1
if lineund.LineCableID in ['CableWith0R1','Cable2With0R1']:
Line = Line + ' R1=1.00E-07 R0=0 X1=0 X0=0 B1=0 B0=0 length=1'
else:
Line = Line + ' geometry='+ lineund.DeviceNumber
Line = Line + ' length='+ lineund.Length
Line = Line + ' units='+'m'
Line = Line + ' basefreq='+'60'
DSSLinesUnder.append(Line)
csvline = Switchline+ "," +lineund.DeviceNumber+ "," + Sectionlist[linebusindex].FromNodeID.replace(".","_") + "," + Sectionlist[linebusindex].ToNodeID.replace(".","_") +","+Sectionlist[linebusindex].Phase+","+lineund.DeviceNumber+","+lineund.Length
CSVLinesUnder.append(csvline)
return DSSLinesUnder, CSVLinesUnder
def convert_transformercodes(Transformerlist):
DSSTransformerscodes = []
CSVTransformers = ['Code ID, phases, R, X, Vprimary, Vsecondary, KVA']
for transformercus in Transformerlist:
Line = 'New XfmrCode.'+transformercus.ID
if transformercus.Type in ['1']:
Line = Line + ' Phases='+'1'
else:
Line = Line + ' Phases='+'3'
Z1 = float(transformercus.Z1) * float (transformercus.KVA) / 100
Z0 = float(transformercus.Z0) * float (transformercus.KVA) / 100
XR = float(transformercus.XR)
XR0 = float(transformercus.XR0)
R1 = Z1 / math.sqrt(1 + XR * XR)
R0 = Z0 / math.sqrt(1 + XR0 * XR0)
X1 = Z1 / math.sqrt(1 + 1 / (XR * XR))
X0 = Z0 / math.sqrt(1 + 1 / (XR0 * XR0))
complex0 = complex(R0, X0)
complex1 = complex(R1, X1)
matrix = np.matrix(
[[complex0, 0, 0], [0, complex1, 0], [0, 0, complex1]]
)
a = 1 * cmath.exp(2 * math.pi * 1j / 3)
T = np.matrix([[1., 1., 1.], [1., a * a, a], [1., a, a * a]])
T_inv = T.I
Zabc = T * matrix * T_inv
Z_perc = ((Zabc.item((0, 0))) / float (transformercus.KVA)) * 100
R_perc = Z_perc.real/2
x12 = Z_perc.imag
Line = Line + ' Windings='+'2' #both cases
Line = Line + ' Wdg='+'1'
if transformercus.Type in ['1']:
Line = Line + ' kV='+transformercus.KVLLprim#'7.2'
else:
Line = Line + ' kV='+transformercus.KVLLprim#'12.47'
Line = Line + ' kVA='+transformercus.KVA
Line = Line + ' %R='+str(R_perc)
Line = Line + ' Wdg='+'2'
if transformercus.Type in ['1']:
Line = Line + ' kV='+transformercus.KVLLsec#'0.207'
else:
Line = Line + ' kV='+transformercus.KVLLsec#'0.480'
Line = Line + ' kVA='+transformercus.KVA
Line = Line + ' %R='+str(R_perc)
Line = Line + ' XHL='+str(x12)
Line = Line + ' %NoLoadLoss='+str((float(transformercus.NoLoadLosses)/(float(transformercus.KVA)))*100)
DSSTransformerscodes.append(Line)
csvtransformers = transformercus.ID + "," + transformercus.Type+ "," + str(2* R_perc) + "," + str(x12)+ "," +transformercus.KVLLprim+ "," +transformercus.KVLLsec+ "," +transformercus.KVA
CSVTransformers.append(csvtransformers)
return DSSTransformerscodes,CSVTransformers
def convert_transformer(Transformerlist, TransformerSettinglist, Sectionlist):
DSSTransformers = []
CSVTransformers2 = ['Code ID, Bus 1, Bus 2, Connection']
for trafos in TransformerSettinglist:
Line = 'New Transformer.'+trafos.DeviceNumber
Line = Line + ' XfmrCode='+trafos.EqID
Line = Line + ' Wdg='+'1'
trafoprimary = trafos.SectionID
trafoprimaryindex = -1
for i in range(len(Sectionlist)):
if trafoprimary == Sectionlist[i].SectionID:
trafoprimaryindex = i
if trafoprimaryindex == -1 :
print('Trafo bus primary not found')
Line = Line + ' Bus='+Sectionlist[trafoprimaryindex].FromNodeID.replace(".","_")
if Sectionlist[trafoprimaryindex].Phase in ['A']:
Line = Line +'.1.0'
elif Sectionlist[trafoprimaryindex].Phase in ['B']:
Line = Line +'.2.0'
elif Sectionlist[trafoprimaryindex].Phase in ['C']:
Line = Line +'.3.0'
elif Sectionlist[trafoprimaryindex].Phase in ['ABC']:
Line = Line +'.1.2.3'
else:
print ('do not identified phase')
Line = Line + ' Tap='+str(float(trafos.PrimTap)/100)
if trafos.Conn == '6':
Line = Line +' Conn=wye'
elif trafos.Conn == '0':
Line = Line +' Conn=wye'
else:
print ('connection is different')
Line = Line + ' Wdg='+'2'
Line = Line + ' Bus='+Sectionlist[trafoprimaryindex].ToNodeID.replace(".","_")
if Sectionlist[trafoprimaryindex].Phase in ['A']:
Line = Line +'.1.0'
elif Sectionlist[trafoprimaryindex].Phase in ['B']:
Line = Line +'.2.0'
elif Sectionlist[trafoprimaryindex].Phase in ['C']:
Line = Line +'.3.0'
elif Sectionlist[trafoprimaryindex].Phase in ['ABC']:
Line = Line +'.1.2.3'
else:
print ('do not identified phase')
Line = Line + ' Tap='+str(float(trafos.SecondaryTap)/100)
if trafos.Conn == '6':
Line = Line +' Conn=delta'
elif trafos.Conn == '0':
Line = Line +' Conn=wye'
else:
print ('connection is different')
Line = Line + ' core=shell'
Line = Line + ' Basefreq='+'60'
DSSTransformers.append(Line)
csvtransformers2 = trafos.DeviceNumber + "," + Sectionlist[trafoprimaryindex].FromNodeID.replace(".","_")+ "," +Sectionlist[trafoprimaryindex].ToNodeID.replace(".","_")+ "," + trafos.Conn
CSVTransformers2.append(csvtransformers2)
return DSSTransformers, CSVTransformers2
def convert_load(CustomerClasslist, Loadslist, CustomerLoadslist, LoadModelInformationlist, LoadEquivalentlist, Sectionlist, Transformerlist, TransformerSettinglist):
DSSLoads = []
CSVLoads = ['Customer Number, Bus, Phase, Active Power [kW], Reactive Power [kVar], PF, Connection, ValueType']
LoadModel = '1'
#TODO: see what loadmodel is proper.
for Loadcust in CustomerLoadslist:
if Loadcust.LoadModelID == LoadModel:
Line = 'New Load.'+Loadcust.CustomerNumber+'_'+Loadcust.LoadModelID
else:
continue
if Loadcust.LoadPhase in ['A','B','C']:
Line = Line + ' Phases='+'1'
else:
Line = Line + ' Phases='+'3'
loadsection = Loadcust.SectionID
loadsectionindex = -1
for i in range(len(Sectionlist)):
if loadsection == Sectionlist[i].SectionID:
loadsectionindex = i
if loadsectionindex == -1 :
print('Load section not found')
Line = Line + ' Bus1='+Sectionlist[loadsectionindex].FromNodeID.replace(".","_")
if Loadcust.LoadPhase in ['A']:
Line = Line +'.1.0'
elif Loadcust.LoadPhase in ['B']:
Line = Line +'.2.0'
elif Loadcust.LoadPhase in ['C']:
Line = Line +'.3.0'
elif Loadcust.LoadPhase in ['ABC']:
Line = Line +'.1.2.3'
else:
print ('do not identified phase')
if (Loadcust.Value1) == "0.000000":
continue
if Loadcust.ValueType == '2':
Line = Line + ' kW='+Loadcust.Value1
Line = Line + ' pf='+str(float(Loadcust.Value2)/100)
elif Loadcust.ValueType == '0':
Line = Line + ' kW='+Loadcust.Value1
Line = Line + ' kVAr='+Loadcust.Value2
else:
print('A load with ValueType = {}'.format(Loadcust.ValueType))
if Loadcust.LoadPhase in ['A','B','C']:
Line = Line + ' kV='+'0.240'
else:
Line = Line + ' kV='+'0.208'
Line = Line + ' Basefreq='+'60'
Line = Line + ' Model='+'1'
Line = Line + ' Vminpu='+'0.95'
Line = Line + ' Vmaxpu='+'1.05'
DSSLoads.append(Line)
csvline = Loadcust.CustomerNumber+","+ Sectionlist[loadsectionindex].FromNodeID.replace(".","_")+","+Loadcust.LoadPhase+","+Loadcust.Value1+","+Loadcust.Value2+","+str(float(Loadcust.Value2)/100)+","+"wye"+","+Loadcust.ValueType
CSVLoads.append(csvline)
return DSSLoads,CSVLoads
def convert_generators(Sectionlist, Electronicconvertergeneratorlist, Electronicconvertergeneratorsettinglist, Converterlist, Convertercontrolsettinglist, Longtermdynamicscurveextlist, Dggenerationmodellist, Controlleddevicelist):
DSSGenerators = []
CSVPVs = ['Name, Bus, Phase, Voltage Level, Power Rating [kVA], PF']
for genpv in Electronicconvertergeneratorsettinglist:
Line = 'New generator.'+genpv.DeviceNumber
pvsection = genpv.SectionID
pvsectionindex = -1
for i in range(len(Sectionlist)):
if pvsection == Sectionlist[i].SectionID:
pvsectionindex = i
if pvsectionindex == -1 :
print('PV section not found')
Line = Line + ' Bus1='+Sectionlist[pvsectionindex].FromNodeID.replace(".","_")
if genpv.EqPhase in ['A']:
Line = Line +'.1.0'
Line = Line +' phases=1'
elif genpv.EqPhase in ['B']:
Line = Line +'.2.0'
Line = Line +' phases=1'
elif genpv.EqPhase in ['C']:
Line = Line +'.3.0'
Line = Line +' phases=1'
elif genpv.EqPhase in ['ABC']:
Line = Line +'.1.2.3'
Line = Line +' phases=3'
else:
print ('do not identified PV phase')
pvrating = genpv.DeviceNumber
pvratingindex = -1
for i in range(len(Dggenerationmodellist)):
if pvrating == Dggenerationmodellist[i].DeviceNumber and Dggenerationmodellist[i].LoadModelName == 'DEFAULT':
pvratingindex = i
if pvratingindex == -1 :
print('PV rating not found')
if float(Dggenerationmodellist[pvratingindex].ActiveGeneration) < 100:
Line = Line + ' kW='+Dggenerationmodellist[pvratingindex].ActiveGeneration
if float(Dggenerationmodellist[pvratingindex].PowerFactor) == 1.000000:
Line = Line + ' pf='+Dggenerationmodellist[pvratingindex].PowerFactor
else:
Line = Line + ' pf='+str(float(Dggenerationmodellist[pvratingindex].PowerFactor)/100)
else:
pvratingindex = -1
for i in range(len(Dggenerationmodellist)):
if pvrating == Dggenerationmodellist[i].DeviceNumber and Dggenerationmodellist[i].LoadModelName == 'LoadModelName':
#TODO: figure the proper load model out
pvratingindex = i
if pvratingindex == -1 :
print('PV rating not found')
if float(Dggenerationmodellist[pvratingindex].ActiveGeneration) < 100:
Line = Line + ' kW='+Dggenerationmodellist[pvratingindex].ActiveGeneration
if float(Dggenerationmodellist[pvratingindex].PowerFactor) == 1.000000:
Line = Line + ' pf='+Dggenerationmodellist[pvratingindex].PowerFactor
else:
Line = Line + ' pf='+str(float(Dggenerationmodellist[pvratingindex].PowerFactor)/100)
else:
#print ('generator {} is not used'.format(genpv.DeviceNumber))
continue
if genpv.EqPhase in ['A','B','C']:
Line = Line + ' kV=0.240'
else:
print('three phase PV found')
Line = Line + ' Basefreq='+'60'
Line = Line + ' Model='+'7'
Line = Line + ' Vminpu='+'0.95'
Line = Line + ' Vmaxpu='+'1.05'
DSSGenerators.append(Line)
csvline = genpv.DeviceNumber+','+Sectionlist[pvsectionindex].FromNodeID+','+genpv.EqPhase+','+'7.2'+','+Dggenerationmodellist[pvratingindex].ActiveGeneration+','+'1.0'
CSVPVs.append(csvline)
return DSSGenerators,CSVPVs
def convert_capacitors(ShuntCapacitorSettinglist, CapacitorExtltdlist, Sectionlist):
DSSCapacitors = []
CSVCapacitors = ['Rating (kVAR), Bus, Phases, Configuration']
for capacitors in ShuntCapacitorSettinglist:
Line = 'New Capacitor.'+capacitors.DeviceNumber
capsection = capacitors.SectionID
capssectionindex = -1
for i in range(len(Sectionlist)):
if capsection == Sectionlist[i].SectionID:
capssectionindex = i
if capssectionindex == -1 :
print('Capacitor section not found')
if capacitors.Location == '2':
Line = Line + ' Bus1='+Sectionlist[capssectionindex].FromNodeID.replace(".","_")
else:
print('Capacitor location is not 2/To bus. Please check and update the node.')
if Sectionlist[capssectionindex].Phase in ['A']:
Line = Line +'.1.0'+ ' Phases='+'1'
elif Sectionlist[capssectionindex].Phase in ['B']:
Line = Line +'.2.0'+ ' Phases='+'1'
elif Sectionlist[capssectionindex].Phase in ['C']:
Line = Line +'.3.0'+ ' Phases='+'1'
elif Sectionlist[capssectionindex].Phase in ['ABC']:
Line = Line +'.1.2.3'+ ' Phases='+'3'
else:
print ('do not identified phase')
Line = Line + ' kVAr='+ str(float(capacitors.SwitchedKVARA)+float(capacitors.SwitchedKVARB)+float(capacitors.SwitchedKVARC))
Line = Line + ' kV=12.47'
if capacitors.Connection == 'Y':
Line = Line + ' Conn='+'wye'
else:
Line = Line + ' Conn='+'delta'
Line = Line + ' Basefreq='+'60'
DSSCapacitors.append(Line)
csvcapacitors = str(float(capacitors.SwitchedKVARA)+float(capacitors.SwitchedKVARB)+float(capacitors.SwitchedKVARC))+ "," + Sectionlist[capssectionindex].FromNodeID.replace(".","_")+ "," + Sectionlist[capssectionindex].Phase+ "," +capacitors.Connection
CSVCapacitors.append(csvcapacitors)
return DSSCapacitors, CSVCapacitors
def convert_protection(Switchlist, Breakerlist, Fuselist, SwitchSettinglist, BreakerSettinglist, FuseSettinglist, OvercurrentRelayInstrumentlist, CurrentTransformerInstrumentlist,OverheadByphaseSettinglist,UndergroundlineSettinglist, Sectionlist):
DSSProtecction = []
DSSSwitchcontrol = []
switchlist = []
CSVSW = ['Device Number, Location - Overhead, Location - Underhead, SW Location, Phase, Type, Cabinate/Transformer Location ']
for protectssw in SwitchSettinglist:
switch = protectssw.SectionID
swindexover = -1
swindexunder = -1
for i in range(len(OverheadByphaseSettinglist)):
if switch == OverheadByphaseSettinglist[i].SectionID:
swindexover = i
if swindexover == -1 :
for i in range(len(UndergroundlineSettinglist)):
if switch == UndergroundlineSettinglist[i].SectionID:
swindexunder = i
if swindexunder == -1 :
print("no switch")
else:
if UndergroundlineSettinglist[swindexunder].DeviceNumber in switchlist:
Line = "Edit Line."
else:
Line = "New Line."
Line = Line +UndergroundlineSettinglist[swindexunder].DeviceNumber
switchlist.append(UndergroundlineSettinglist[swindexunder].DeviceNumber)
SWLine = 'New SwtControl.'+UndergroundlineSettinglist[swindexunder].DeviceNumber
SWLine = SWLine + ' SwitchedObj=Line.'+UndergroundlineSettinglist[swindexunder].DeviceNumber
else:
if OverheadByphaseSettinglist[swindexover].DeviceNumber in switchlist:
Line = "Edit Line."
else:
Line = "New Line."
Line = Line +OverheadByphaseSettinglist[swindexover].DeviceNumber
switchlist.append(OverheadByphaseSettinglist[swindexover].DeviceNumber)
SWLine = 'New SwtControl.'+OverheadByphaseSettinglist[swindexover].DeviceNumber
SWLine = SWLine + ' SwitchedObj=Line.'+OverheadByphaseSettinglist[swindexover].DeviceNumber
Line = Line + ' switch=yes'
SWLine = SWLine + ' SwitchedTerm=1'
if protectssw.NStatus == '1':
print('Switch {} is open'.format(protectssw.SectionID))
SWLine = SWLine + ' Normal=Open Action=Open'
else:
SWLine = SWLine + ' Normal=Close Action=Close'
SWLine = SWLine + ' Delay=0'
DSSProtecction.append(Line)
DSSSwitchcontrol.append(SWLine)
csvsw = protectssw.DeviceNumber + "," + OverheadByphaseSettinglist[swindexover].DeviceNumber+ "," +UndergroundlineSettinglist[swindexunder].DeviceNumber + "," + str(swindexover)
swsectionindex = -1
for i in range(len(Sectionlist)):
if protectssw.SectionID == Sectionlist[i].SectionID:
swsectionindex = i
if swsectionindex == -1 :
print('Switch section not found for {}'.format(switch))
csvsw = csvsw + "," + Sectionlist[swsectionindex].Phase
CabinateNum=''
SwType='-1'
#TODO: switchtype and cabinate number does not work.
csvsw = csvsw + "," + SwType + "," + CabinateNum
CSVSW.append(csvsw)
CSVBR = ['Device Number, Location - Overhead, Location - Underhead, BR Location ']
for protectsbr in BreakerSettinglist:
breaker = protectsbr.SectionID
brindexover = -1
brindexunder = -1
for i in range(len(OverheadByphaseSettinglist)):
if breaker == OverheadByphaseSettinglist[i].SectionID:
brindexover = i
if brindexover == -1 :
for i in range(len(UndergroundlineSettinglist)):
if breaker == UndergroundlineSettinglist[i].SectionID:
brindexunder = i
if brindexunder == -1 :
print("no switch")
else:
if UndergroundlineSettinglist[brindexunder].DeviceNumber in switchlist:
Line = "Edit Line."
else:
Line = "New Line."
Line = Line +UndergroundlineSettinglist[brindexunder].DeviceNumber
switchlist.append(UndergroundlineSettinglist[brindexunder].DeviceNumber)
else:
if OverheadByphaseSettinglist[brindexover].DeviceNumber in switchlist:
Line = "Edit Line."
else:
Line = "New Line."
Line = Line +OverheadByphaseSettinglist[brindexover].DeviceNumber
switchlist.append(OverheadByphaseSettinglist[brindexover].DeviceNumber)
Line = Line + ' switch=yes'
if protectsbr.NStatus == '1':
print('Breaker {} is open'.format(protectsbr.SectionID))
Line = Line + ' enabled=no'
DSSProtecction.append(Line)
csvbr = protectsbr.DeviceNumber + "," + OverheadByphaseSettinglist[brindexover].DeviceNumber+ "," +UndergroundlineSettinglist[brindexunder].DeviceNumber + "," + str(brindexover)
CSVBR.append(csvbr)
CSVFUSE = ['Device Number, Location - Overhead, Location - Underhead, BR Location, Phase, Type, CabinateNum ']
for protectsfuse in FuseSettinglist:
Line = "New Line."
fuse = protectsfuse.SectionID
fsindexover = -1
fsindexunder = -1
for i in range(len(OverheadByphaseSettinglist)):
if fuse == OverheadByphaseSettinglist[i].SectionID:
fsindexover = i
if fsindexover == -1 :
for i in range(len(UndergroundlineSettinglist)):
if fuse == UndergroundlineSettinglist[i].SectionID:
fsindexunder = i
if fsindexunder == -1 :
print("no switch")
else:
if UndergroundlineSettinglist[fsindexunder].DeviceNumber in switchlist:
Line = "Edit Line."
else:
Line = "New Line."
Line = Line +UndergroundlineSettinglist[fsindexunder].DeviceNumber
switchlist.append(UndergroundlineSettinglist[fsindexunder].DeviceNumber)
else:
if OverheadByphaseSettinglist[fsindexover].DeviceNumber in switchlist:
Line = "Edit Line."
else:
Line = "New Line."
Line = Line +OverheadByphaseSettinglist[fsindexover].DeviceNumber
switchlist.append(OverheadByphaseSettinglist[fsindexover].DeviceNumber)
Line = Line + ' switch=yes'
if protectsfuse.NStatus == '1':
print('Fuse {} is open'.format(protectsfuse.SectionID))
Line = Line + ' enabled=no'
DSSProtecction.append(Line)
csvfuse = protectsfuse.SectionID + "," + OverheadByphaseSettinglist[fsindexover].DeviceNumber+ "," +UndergroundlineSettinglist[fsindexunder].DeviceNumber + "," + str(fsindexover)
fusesectionindex = -1
for i in range(len(Sectionlist)):
if protectsfuse.SectionID == Sectionlist[i].SectionID:
fusesectionindex = i
if fusesectionindex == -1 :
print('Switch section not found for {}'.format(fuse))
csvfuse = csvfuse + "," + Sectionlist[fusesectionindex].Phase
CabinateNum=''
FsType='-1'
#TODO: Fstype and cabinate number does not work.
csvfuse = csvfuse + "," + FsType + "," + CabinateNum
CSVFUSE.append(csvfuse)
return DSSProtecction, DSSSwitchcontrol, switchlist, CSVSW, CSVBR, CSVFUSE
def ProperCableName(UndergroundlineSettinglist):
CableNamelist = []
CableSectionID = []
for cable in UndergroundlineSettinglist:
CableNamelist.append(cable.DeviceNumber)
CableSectionID.append(cable.SectionID)
return CableNamelist, CableSectionID
def FindProtection(SwitchSettinglist, BreakerSettinglist, FuseSettinglist):
Isswitchlist = []
for switch in SwitchSettinglist:
if switch.SectionID not in Isswitchlist:
Isswitchlist.append(switch.SectionID)
for breaker in BreakerSettinglist:
if breaker.SectionID not in Isswitchlist:
Isswitchlist.append(breaker.SectionID)
for fuse in FuseSettinglist:
if fuse.SectionID not in Isswitchlist:
Isswitchlist.append(fuse.SectionID)
return Isswitchlist
def LineImpedances(csvname, UndergroundlineSettinglist, SwitchSettinglist, BreakerSettinglist, FuseSettinglist):
CableNamelist, CableSectionID = ProperCableName(UndergroundlineSettinglist)
Isswitchlist = FindProtection(SwitchSettinglist, BreakerSettinglist, FuseSettinglist)
OpenDSSlist = []
with open(csvname,'r') as ipfile:
Lineset = ipfile.readlines()
for i in range(0,len(Lineset)):
Line = Lineset[i].rstrip()
if Line.strip() == '':
continue
Line = Line.split(',')
linesection = Line[1]
devicenumindex = CableSectionID.index(linesection)
if linesection in Isswitchlist:
DSSLine = 'Edit'
else:
DSSLine = 'New'
DSSLine = DSSLine+' Line.'+CableNamelist[devicenumindex]
phaseword='-1'
if Line[5] == 'ABC':
phaseword='.1.2.3'
elif Line[5] == 'A':
phaseword='.1.0'
elif Line[5] == 'B':
phaseword='.2.0'
elif Line[5] == 'C':
phaseword='.3.0'
else:
print('phase not found')
DSSLine = DSSLine + ' bus1='+Line[6].replace(".","_")+phaseword
DSSLine = DSSLine + ' bus2='+Line[7].replace(".","_")+phaseword
DSSLine = DSSLine + ' length='+Line[8]
DSSLine = DSSLine + ' units=m'
DSSLine = DSSLine + ' phases='+str(len(Line[5]))
DSSLine = DSSLine + ' phases='+str(len(Line[5]))
DSSLine = DSSLine + ' LineCode='+Line[2].replace(".","_")+"_"+str(len(Line[5]))+"_"+Line[9]+"Cond"
DSSLine = DSSLine + ' NormAmps='+Line[13]
OpenDSSlist.append(DSSLine)
return OpenDSSlist
def LineCodes(csvname, UndergroundlineSettinglist, SwitchSettinglist, BreakerSettinglist, FuseSettinglist):
CableNamelist, CableSectionID = ProperCableName(UndergroundlineSettinglist)
Isswitchlist = FindProtection(SwitchSettinglist, BreakerSettinglist, FuseSettinglist)
DSSLineCodes = []
CSVLineCodes = ['Cable Type, Phases, Number of Conductors, R1[ohm/m], R0[ohm/m], X1[ohm/m], X0[ohm/m], B1[uS/m], B0[uS/m]']
with open(csvname,'r') as ipfile:
Lineset = ipfile.readlines()
for i in range(0,len(Lineset)):
Line = Lineset[i].rstrip()
if Line.strip() == '':
continue
Line = Line.split(',')
linesection = Line[1]
devicenumindex = CableSectionID.index(linesection)
DSSLine = 'New LineCode.'+Line[2].replace(".","_")+"_"+str(len(Line[5]))+"_"+Line[9]+"Cond"
DSSLine = DSSLine + ' NPhases='+str(len(Line[5]))
if float(Line[21]) == 0:
DSSLine = DSSLine + ' R1=1.00E-07'
else:
DSSLine = DSSLine + ' R1='+Line[21]
if float(Line[24]) == 0:
DSSLine = DSSLine + ' R0=1.00E-07'
else:
DSSLine = DSSLine + ' R0='+Line[24]
if float(Line[22])==0:
DSSLine = DSSLine + ' X1='+'1.00E-07'
else:
DSSLine = DSSLine + ' X1='+Line[22]
DSSLine = DSSLine + ' X0='+Line[25]
DSSLine = DSSLine + ' B1='+Line[23]
DSSLine = DSSLine + ' B0='+Line[26]
DSSLine = DSSLine + ' Units=m'
if DSSLine in DSSLineCodes:
continue
else:
DSSLineCodes.append(DSSLine)
return DSSLineCodes
def convert_buses(Nodelist, IntermediateNodeslist):
DSSBuscoords = []
for Busnode in Nodelist:
Line = Busnode.NodeID.replace(".","_")
Line = Line + ', '+Busnode.CoordX
Line = Line + ', '+Busnode.CoordY
DSSBuscoords.append(Line)
return DSSBuscoords
#write functions
def writefileopendss (circuitName, codeList, openfile) :
with open(circuitName,'w') as f:
if len(codeList) > 0 :
for s in codeList :
f.write(s+'\n')
if (openfile == 1):
print(s)
else :
print('Empty file {}'.format(circuitName))
return
if __name__ == "__main__":
print('Please do not run this file, run the main file!')
| [
"numpy.matrix",
"cmath.exp",
"math.sqrt",
"os.getcwd"
] | [((18718, 18783), 'numpy.matrix', 'np.matrix', (['[[complex0, 0, 0], [0, complex1, 0], [0, 0, complex1]]'], {}), '([[complex0, 0, 0], [0, complex1, 0], [0, 0, complex1]])\n', (18727, 18783), True, 'import numpy as np\n'), ((18843, 18905), 'numpy.matrix', 'np.matrix', (['[[1.0, 1.0, 1.0], [1.0, a * a, a], [1.0, a, a * a]]'], {}), '([[1.0, 1.0, 1.0], [1.0, a * a, a], [1.0, a, a * a]])\n', (18852, 18905), True, 'import numpy as np\n'), ((1337, 1348), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1346, 1348), False, 'import os\n'), ((18499, 18521), 'math.sqrt', 'math.sqrt', (['(1 + XR * XR)'], {}), '(1 + XR * XR)\n', (18508, 18521), False, 'import math\n'), ((18535, 18559), 'math.sqrt', 'math.sqrt', (['(1 + XR0 * XR0)'], {}), '(1 + XR0 * XR0)\n', (18544, 18559), False, 'import math\n'), ((18573, 18601), 'math.sqrt', 'math.sqrt', (['(1 + 1 / (XR * XR))'], {}), '(1 + 1 / (XR * XR))\n', (18582, 18601), False, 'import math\n'), ((18615, 18645), 'math.sqrt', 'math.sqrt', (['(1 + 1 / (XR0 * XR0))'], {}), '(1 + 1 / (XR0 * XR0))\n', (18624, 18645), False, 'import math\n'), ((18804, 18837), 'cmath.exp', 'cmath.exp', (['(2 * math.pi * 1.0j / 3)'], {}), '(2 * math.pi * 1.0j / 3)\n', (18813, 18837), False, 'import cmath\n'), ((1570, 1582), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (1579, 1582), False, 'import math\n'), ((1747, 1759), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (1756, 1759), False, 'import math\n'), ((1667, 1679), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (1676, 1679), False, 'import math\n'), ((3417, 3429), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (3426, 3429), False, 'import math\n'), ((1854, 1866), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (1863, 1866), False, 'import math\n')] |
"""Implementations for the debug VM."""
from copy import copy
from functools import reduce
from typing import Callable
import numpy as np
import math
from .. import dtype as types
from ..dtype import Number, Float, Bool
from ..utils import Registry, overload
from . import ops as primops
py_registry: Registry[primops.Primitive, Callable] = Registry()
vm_registry: Registry[primops.Primitive, Callable] = Registry()
py_register = py_registry.register
vm_register = vm_registry.register
def register(prim):
"""Register an implementation for this primitive.
The same implementation will be used for both the VM and for the pure
Python version.
"""
def deco(fn):
vm_register(prim)(lambda vm, *args: fn(*args))
return py_register(prim)(fn)
return deco
def _assert_scalar(*args):
# TODO: These checks should be stricter, e.g. require that all args
# have exactly the same type, but right now there is some mixing between
# numpy types and int/float.
for x in args:
if isinstance(x, np.ndarray):
if x.shape != ():
msg = f'Expected scalar, not array with shape {x.shape}'
raise TypeError(msg)
elif not isinstance(x, (int, float, np.number)):
raise TypeError(f'Expected scalar, not {type(x)}')
@register(primops.scalar_add)
def scalar_add(x: Number, y: Number) -> Number:
"""Implement `scalar_add`."""
_assert_scalar(x, y)
return x + y
@register(primops.scalar_sub)
def scalar_sub(x: Number, y: Number) -> Number:
"""Implement `scalar_sub`."""
_assert_scalar(x, y)
return x - y
@register(primops.scalar_mul)
def scalar_mul(x: Number, y: Number) -> Number:
"""Implement `scalar_mul`."""
_assert_scalar(x, y)
return x * y
@register(primops.scalar_div)
def scalar_div(x: Number, y: Number) -> Number:
"""Implement `scalar_div`."""
_assert_scalar(x, y)
if isinstance(x, (float, np.floating)):
return x / y
else:
return int(x / y)
@register(primops.scalar_mod)
def scalar_mod(x: Number, y: Number) -> Number:
"""Implement `scalar_mod`."""
_assert_scalar(x, y)
return x % y
@register(primops.scalar_pow)
def scalar_pow(x: Number, y: Number) -> Number:
"""Implement `scalar_pow`."""
_assert_scalar(x, y)
return x ** y
@register(primops.scalar_trunc)
def scalar_trunc(x: Number) -> Number:
"""Implement `scalar_trunc`."""
_assert_scalar(x)
return np.trunc(x)
@register(primops.scalar_floor)
def scalar_floor(x: Number) -> Number:
"""Implement `scalar_floor`."""
_assert_scalar(x)
return np.floor(x)
@register(primops.scalar_uadd)
def scalar_uadd(x: Number) -> Number:
"""Implement `scalar_uadd`."""
_assert_scalar(x)
return x
@register(primops.scalar_usub)
def scalar_usub(x: Number) -> Number:
"""Implement `scalar_usub`."""
_assert_scalar(x)
return -x
@register(primops.scalar_exp)
def scalar_exp(x: Number) -> Number:
"""Implement `scalar_exp`."""
_assert_scalar(x)
return math.exp(x)
@register(primops.scalar_log)
def scalar_log(x: Float) -> Float:
"""Implement `scalar_log`."""
_assert_scalar(x)
return math.log(x)
@register(primops.scalar_sin)
def scalar_sin(x: Number) -> Number:
"""Implement `scalar_sin`."""
_assert_scalar(x)
return math.sin(x)
@register(primops.scalar_cos)
def scalar_cos(x: Number) -> Number:
"""Implement `scalar_cos`."""
_assert_scalar(x)
return math.cos(x)
@register(primops.scalar_tan)
def scalar_tan(x: Number) -> Number:
"""Implement `scalar_tan`."""
_assert_scalar(x)
return math.tan(x)
@register(primops.scalar_eq)
def scalar_eq(x: Number, y: Number) -> Bool:
"""Implement `scalar_eq`."""
_assert_scalar(x, y)
return x == y
@register(primops.scalar_lt)
def scalar_lt(x: Number, y: Number) -> Bool:
"""Implement `scalar_lt`."""
_assert_scalar(x, y)
return x < y
@register(primops.scalar_gt)
def scalar_gt(x: Number, y: Number) -> Bool:
"""Implement `scalar_gt`."""
_assert_scalar(x, y)
return x > y
@register(primops.scalar_ne)
def scalar_ne(x: Number, y: Number) -> Bool:
"""Implement `scalar_ne`."""
_assert_scalar(x, y)
return x != y
@register(primops.scalar_le)
def scalar_le(x: Number, y: Number) -> Bool:
"""Implement `scalar_le`."""
_assert_scalar(x, y)
return x <= y
@register(primops.scalar_ge)
def scalar_ge(x: Number, y: Number) -> Bool:
"""Implement `scalar_ge`."""
_assert_scalar(x, y)
return x >= y
@register(primops.bool_not)
def bool_not(x: Bool) -> Bool:
"""Implement `bool_not`."""
assert x is True or x is False
return not x
@register(primops.bool_and)
def bool_and(x: Bool, y: Bool) -> Bool:
"""Implement `bool_and`."""
assert x is True or x is False
assert y is True or y is False
return x and y
@register(primops.bool_or)
def bool_or(x: Bool, y: Bool) -> Bool:
"""Implement `bool_or`."""
assert x is True or x is False
assert y is True or y is False
return x or y
@register(primops.bool_eq)
def bool_eq(x: Bool, y: Bool) -> Bool:
"""Implement `bool_eq`."""
assert x is True or x is False
assert y is True or y is False
return x == y
@register(primops.typeof)
def typeof(x):
"""Implement typeof."""
if isinstance(x, types.Type) or isinstance(x, type):
return types.TypeType
else:
return types.pytype_to_myiatype(type(x), x)
@overload
def _issubtype_helper(t: types.Array, model):
return issubtype(t.elements, model.elements)
@overload # noqa: F811
def _issubtype_helper(t: types.Tuple, model):
if len(t.elements) != len(model.elements):
return False
return all(issubtype(t1, t2)
for t1, t2 in zip(t.elements, model.elements))
@overload # noqa: F811
def _issubtype_helper(t: types.Class, model):
if t.tag != model.tag:
return False
if tuple(t.attributes.keys()) != tuple(model.attributes.keys()):
raise AssertionError(
'Identical Class tags should imply identical attributes.'
)
return all(issubtype(t1, t2)
for t1, t2 in zip(t.attributes.values(),
model.attributes.values()))
@overload # noqa: F811
def _issubtype_helper(t: object, model):
return False
def issubtype(t, model):
"""Check that type t is represented by model."""
if t == model:
return True
elif types.ismyiatype(model, generic=True):
return types.ismyiatype(t, model)
elif types.get_generic(t, model):
return _issubtype_helper[t.generic](t, model)
else:
return False
@register(primops.hastype)
def hastype(x, t):
"""Implement `hastype`."""
return issubtype(typeof(x), t)
@register(primops.make_tuple)
def make_tuple(*args):
"""Implement `make_tuple`."""
return args
@py_register(primops.tuple_getitem)
@py_register(primops.list_getitem)
@py_register(primops.array_getitem)
def getitem(data, item):
"""Implement `getitem`."""
return data[item]
@vm_register(primops.tuple_getitem)
@vm_register(primops.list_getitem)
@vm_register(primops.array_getitem)
def _vm_getitem(vm, data, item):
"""Implement `getitem`."""
return vm.convert(data[item])
@register(primops.tuple_setitem)
def tuple_setitem(data, item, value):
"""Implement `tuple_setitem`."""
return tuple(value if i == item else x
for i, x in enumerate(data))
@register(primops.list_setitem)
@register(primops.array_setitem)
def list_setitem(data, item, value):
"""Implement `list/array_setitem`."""
data2 = copy(data)
data2[item] = value
return data2
@register(primops.list_append)
def list_append(data, value):
"""Implement `list_append`."""
data2 = copy(data)
data2.append(value)
return data2
@vm_register(primops.getattr)
def _vm_getattr(vm, data, attr):
"""Implement `getattr`."""
from types import MethodType, BuiltinMethodType
from ..vm import Partial
# I don't know how else to get a reference to this type
method_wrapper_type = type((0).__add__)
try:
x = getattr(data, attr)
except AttributeError:
mmap = vm.convert.resources.method_map[typeof(data)]
if attr in mmap:
return Partial(vm.convert(mmap[attr]), [data], vm)
else:
raise # pragma: no cover
if isinstance(x, method_wrapper_type):
# This is returned by <int>.__add__ and the like.
# Don't know how else to retrieve the unwrapped method
unwrapped = getattr(x.__objclass__, x.__name__)
return Partial(vm.convert(unwrapped), [x.__self__], vm)
elif isinstance(x, BuiltinMethodType) and hasattr(type(data), attr):
# This is returned by <list>.__getitem__ and maybe others.
x = getattr(type(data), attr)
return Partial(vm.convert(x), [data], vm)
elif isinstance(x, MethodType):
# This is a method made from a user function
return Partial(vm.convert(x.__func__), [x.__self__], vm)
else:
return vm.convert(x)
py_setattr = setattr
@register(primops.setattr)
def setattr(data, attr, value):
"""Implement `setattr`."""
data2 = copy(data)
py_setattr(data2, attr, value)
return data2
@register(primops.shape)
def shape(array):
"""Implement `shape`."""
return array.shape
@py_register(primops.array_map)
def array_map(fn, *arrays):
"""Implement `array_map`."""
return np.vectorize(fn)(*arrays)
@vm_register(primops.array_map)
def _array_map_vm(vm, fn, *arrays):
def fn_(*args):
return vm.call(fn, args)
return array_map(fn_, *arrays)
@py_register(primops.array_scan)
def array_scan(fn, init, array, axis):
"""Implement `array_scan`."""
# This is inclusive scan because it's easier to implement
# We will have to discuss what semantics we want later
def f(ary):
val = init
it = np.nditer([ary, None])
for x, y in it:
val = fn(val, x)
y[...] = val
return it.operands[1]
return np.apply_along_axis(f, axis, array)
@vm_register(primops.array_scan)
def _array_scan_vm(vm, fn, init, array, axis):
def fn_(a, b):
return vm.call(fn, [a, b])
return array_scan(fn_, init, array, axis)
@py_register(primops.array_reduce)
def array_reduce(fn, array, shp):
"""Implement `array_reduce`."""
idtype = array.dtype
ufn = np.frompyfunc(fn, 2, 1)
delta = len(array.shape) - len(shp)
if delta < 0:
raise ValueError('Shape to reduce to cannot be larger than original')
def is_reduction(ishp, tshp):
if tshp == 1 and ishp > 1:
return True
elif tshp != ishp:
raise ValueError('Dimension mismatch for reduce')
else:
return False
reduction = [(delta + idx if is_reduction(ishp, tshp) else None, True)
for idx, (ishp, tshp)
in enumerate(zip(array.shape[delta:], shp))]
reduction = [(i, False) for i in range(delta)] + reduction
for idx, keep in reversed(reduction):
if idx is not None:
array = ufn.reduce(array, axis=idx, keepdims=keep)
if not isinstance(array, np.ndarray):
# Force result to be ndarray, even if it's 0d
array = np.array(array)
array = array.astype(idtype)
return array
@vm_register(primops.array_reduce)
def _array_reduce_vm(vm, fn, array, shp):
def fn_(a, b):
return vm.call(fn, [a, b])
return array_reduce(fn_, array, shp)
@register(primops.distribute)
def distribute(v, shape):
"""Implement `distribute`."""
return np.broadcast_to(v, shape)
@register(primops.reshape)
def reshape(v, shape):
"""Implement `reshape`."""
return np.reshape(v, shape)
@register(primops.transpose)
def transpose(v, permutation):
"""Implement `transpose`."""
return np.transpose(v, permutation)
@register(primops.dot)
def dot(a, b):
"""Implement `dot`."""
return np.dot(a, b)
@register(primops.return_)
def return_(x):
"""Implement `return_`."""
return x
@register(primops.scalar_cast)
def scalar_cast(x, t):
"""Implement `scalar_cast`."""
assert types.ismyiatype(t, types.Number)
dtype = types.type_to_np_dtype(t)
return getattr(np, dtype)(x)
@py_register(primops.list_map)
def list_map(f, *lsts):
"""Implement `list_map` in pure Python."""
assert len(set(len(l) for l in lsts)) == 1
def f_(args):
return f(*args)
return list(map(f_, zip(*lsts)))
@vm_register(primops.list_map)
def _list_map_vm(vm, f, *lsts):
"""Implement `list_map` for Myia's VM."""
assert len(set(len(l) for l in lsts)) == 1
def f_(args):
return vm.call(f, args)
return list(map(f_, zip(*lsts)))
@register(primops.identity)
def identity(x):
"""Implement `identity`."""
return x
@vm_register(primops.resolve)
def _resolve_vm(vm, data, item):
"""Implement `resolve` for the VM."""
# There is no Python implementation for this one.
value = data[item]
return vm.convert(value)
@py_register(primops.partial)
def partial(f, *args):
"""Implement `partial`."""
def res(*others):
return f(*(args + others))
return res
@register(primops.switch)
def switch(c, x, y):
"""Implement `switch`."""
return x if c else y
@register(primops.scalar_to_array)
def scalar_to_array(x):
"""Implement `scalar_to_array`."""
return np.array(x)
@register(primops.array_to_scalar)
def array_to_scalar(x):
"""Implement `array_to_scalar`."""
assert isinstance(x, np.ndarray)
return x.item()
@register(primops.broadcast_shape)
def broadcast_shape(shpx, shpy):
"""Implement `broadcast_shape`."""
from ..abstract.data import ANYTHING
orig_shpx = shpx
orig_shpy = shpy
dlen = len(shpx) - len(shpy)
if dlen < 0:
shpx = (1,) * -dlen + shpx
elif dlen > 0:
shpy = (1,) * dlen + shpy
assert len(shpx) == len(shpy)
shp = []
for a, b in zip(shpx, shpy):
if a == 1:
shp.append(b)
elif b == 1:
shp.append(a)
elif a == ANYTHING:
shp.append(b)
elif b == ANYTHING:
shp.append(a)
elif a == b:
shp.append(a)
else:
raise ValueError(
f'Cannot broadcast shapes {orig_shpx} and {orig_shpy}.'
)
return tuple(shp)
@register(primops.invert_permutation)
def invert_permutation(perm):
"""Implement `invert_permutation`."""
return tuple(perm.index(i) for i in range(len(perm)))
@register(primops.make_record)
def make_record(typ, *args):
"""Implement `make_record`."""
dataclass = types.tag_to_dataclass[typ.tag]
return dataclass(*args)
@register(primops.tuple_len)
@register(primops.list_len)
@register(primops.array_len)
def _len(x):
"""Implement `len`."""
return len(x)
@register(primops.make_list)
def make_list(*xs):
"""Implement `make_list`."""
return list(xs)
@py_register(primops.list_reduce)
def list_reduce(fn, lst, dflt):
"""Implement `list_reduce`."""
return reduce(fn, lst, dflt)
@vm_register(primops.list_reduce)
def _list_reduce_vm(vm, fn, lst, dflt):
def fn_(a, b):
return vm.call(fn, [a, b])
return list_reduce(fn_, lst, dflt)
@py_register(primops.J)
def J(x):
"""Implement `J`."""
raise NotImplementedError()
@py_register(primops.Jinv)
def Jinv(x):
"""Implement `Jinv`."""
raise NotImplementedError()
@register(primops.embed)
def embed(node):
"""Placeholder for the implementation of `embed`."""
raise NotImplementedError()
@register(primops.env_setitem)
def env_setitem(env, key, x):
"""Implement `env_setitem`."""
return env.set(key, x)
@register(primops.env_getitem)
def env_getitem(env, key, default):
"""Implement `env_getitem`."""
return env.get(key, default)
@register(primops.env_add)
def env_add(env1, env2):
"""Implement `env_add`."""
return env1.add(env2)
| [
"numpy.transpose",
"numpy.reshape",
"math.tan",
"numpy.trunc",
"functools.reduce",
"numpy.nditer",
"numpy.floor",
"math.log",
"math.cos",
"copy.copy",
"numpy.dot",
"numpy.apply_along_axis",
"numpy.array",
"numpy.frompyfunc",
"math.exp",
"math.sin",
"numpy.broadcast_to",
"numpy.vect... | [((2487, 2498), 'numpy.trunc', 'np.trunc', (['x'], {}), '(x)\n', (2495, 2498), True, 'import numpy as np\n'), ((2641, 2652), 'numpy.floor', 'np.floor', (['x'], {}), '(x)\n', (2649, 2652), True, 'import numpy as np\n'), ((3072, 3083), 'math.exp', 'math.exp', (['x'], {}), '(x)\n', (3080, 3083), False, 'import math\n'), ((3218, 3229), 'math.log', 'math.log', (['x'], {}), '(x)\n', (3226, 3229), False, 'import math\n'), ((3366, 3377), 'math.sin', 'math.sin', (['x'], {}), '(x)\n', (3374, 3377), False, 'import math\n'), ((3514, 3525), 'math.cos', 'math.cos', (['x'], {}), '(x)\n', (3522, 3525), False, 'import math\n'), ((3662, 3673), 'math.tan', 'math.tan', (['x'], {}), '(x)\n', (3670, 3673), False, 'import math\n'), ((7691, 7701), 'copy.copy', 'copy', (['data'], {}), '(data)\n', (7695, 7701), False, 'from copy import copy\n'), ((7853, 7863), 'copy.copy', 'copy', (['data'], {}), '(data)\n', (7857, 7863), False, 'from copy import copy\n'), ((9287, 9297), 'copy.copy', 'copy', (['data'], {}), '(data)\n', (9291, 9297), False, 'from copy import copy\n'), ((10156, 10191), 'numpy.apply_along_axis', 'np.apply_along_axis', (['f', 'axis', 'array'], {}), '(f, axis, array)\n', (10175, 10191), True, 'import numpy as np\n'), ((10516, 10539), 'numpy.frompyfunc', 'np.frompyfunc', (['fn', '(2)', '(1)'], {}), '(fn, 2, 1)\n', (10529, 10539), True, 'import numpy as np\n'), ((11731, 11756), 'numpy.broadcast_to', 'np.broadcast_to', (['v', 'shape'], {}), '(v, shape)\n', (11746, 11756), True, 'import numpy as np\n'), ((11851, 11871), 'numpy.reshape', 'np.reshape', (['v', 'shape'], {}), '(v, shape)\n', (11861, 11871), True, 'import numpy as np\n'), ((11978, 12006), 'numpy.transpose', 'np.transpose', (['v', 'permutation'], {}), '(v, permutation)\n', (11990, 12006), True, 'import numpy as np\n'), ((12085, 12097), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (12091, 12097), True, 'import numpy as np\n'), ((13549, 13560), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (13557, 13560), True, 'import numpy as np\n'), ((15234, 15255), 'functools.reduce', 'reduce', (['fn', 'lst', 'dflt'], {}), '(fn, lst, dflt)\n', (15240, 15255), False, 'from functools import reduce\n'), ((9553, 9569), 'numpy.vectorize', 'np.vectorize', (['fn'], {}), '(fn)\n', (9565, 9569), True, 'import numpy as np\n'), ((10014, 10036), 'numpy.nditer', 'np.nditer', (['[ary, None]'], {}), '([ary, None])\n', (10023, 10036), True, 'import numpy as np\n'), ((11386, 11401), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (11394, 11401), True, 'import numpy as np\n')] |
# simple_fuse: simple fusion from acoustic and txt
# run in MPC as : exec(open('simple_fuse_dev.py').read())
# 2019-12-20: initial work, it works!
# 2019-12-21: update to use silence feature from speech network
# new data splitting (6000/2000/2039)
import numpy as np
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from calc_scores import calc_scores
# option: ser, ser_hfs, ser_ws, ter, ter_w2v, ter_glove, is saved change the name
speech = 'ser' # ser, ser_hfs, ser_ws
text = 'ter' # ter, ter_w2v, ter_glove'
ser = np.load('result_iemocap_sd/result_' + speech + '.npy')
ter = np.load('result_iemocap_sd/result_' + text + '.npy')
# split dev and test
split = 1600
ser_dev = ser[:split]
ser_test = ser[split:]
ter_dev = ter[:split]
ter_test = ter[split:]
# load label
vad = np.load('/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy')
# remove outlier
outlier = np.array([1674, 3427, 5086, 5093, 5096, 5104, 7821])
mask = np.ones(len(vad), np.bool)
mask[outlier] = 0
vad = vad[mask]
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad)
scaled_vad = scaler.transform(vad)
vad = scaled_vad
else:
vad = vad
y_dev = vad[6400:8000]
y_test = vad[8000:]
# SVR model
svr_rbf = SVR(kernel='rbf', C=200, gamma=0.1, epsilon=0.01)
# predicting valence
valence_model = svr_rbf.fit(np.array([ter_dev[:,0], ser_dev[:,0]]).T, y_dev[:,0])
valence_pred = valence_model.predict(np.array([ter_test[:,0], ser_test[:,0]]).T)
ccc_v, pcc_v, rmse_v = calc_scores(valence_pred, y_test[:,0])
# predicting arousal
arousal_model = svr_rbf.fit(np.array([ter_dev[:,1], ser_dev[:,1]]).T, y_dev[:,1])
arousal_pred = valence_model.predict(np.array([ter_test[:,1], ser_test[:,1]]).T)
ccc_a, pcc_a, rmse_a = calc_scores(arousal_pred, y_test[:,1])
# predicting dominance
dominance_model = svr_rbf.fit(np.array([ter_dev[:,2], ser_dev[:,2]]).T, y_dev[:,2])
dominance_pred = valence_model.predict(np.array([ter_test[:,2], ser_test[:,2]]).T)
ccc_d, pcc_d, rmse_d = calc_scores(dominance_pred, y_test[:,2])
print('CCC', ccc_v, ccc_a, ccc_d)
print('PCC', pcc_v, pcc_a, pcc_d)
print('RMSE', rmse_v, rmse_a, rmse_d)
vad_pred = np.vstack((valence_pred, arousal_pred, dominance_pred)).T
filename = 'svm_iemocap_loso/' + speech + '_' + text + '.npy'
np.save(filename, vad_pred)
| [
"calc_scores.calc_scores",
"numpy.array",
"numpy.vstack",
"sklearn.svm.SVR",
"sklearn.preprocessing.MinMaxScaler",
"numpy.save",
"numpy.load"
] | [((608, 662), 'numpy.load', 'np.load', (["('result_iemocap_sd/result_' + speech + '.npy')"], {}), "('result_iemocap_sd/result_' + speech + '.npy')\n", (615, 662), True, 'import numpy as np\n'), ((669, 721), 'numpy.load', 'np.load', (["('result_iemocap_sd/result_' + text + '.npy')"], {}), "('result_iemocap_sd/result_' + text + '.npy')\n", (676, 721), True, 'import numpy as np\n'), ((869, 934), 'numpy.load', 'np.load', (['"""/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy"""'], {}), "('/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy')\n", (876, 934), True, 'import numpy as np\n'), ((963, 1015), 'numpy.array', 'np.array', (['[1674, 3427, 5086, 5093, 5096, 5104, 7821]'], {}), '([1674, 3427, 5086, 5093, 5096, 5104, 7821])\n', (971, 1015), True, 'import numpy as np\n'), ((1366, 1415), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'C': '(200)', 'gamma': '(0.1)', 'epsilon': '(0.01)'}), "(kernel='rbf', C=200, gamma=0.1, epsilon=0.01)\n", (1369, 1415), False, 'from sklearn.svm import SVR\n'), ((1624, 1663), 'calc_scores.calc_scores', 'calc_scores', (['valence_pred', 'y_test[:, 0]'], {}), '(valence_pred, y_test[:, 0])\n', (1635, 1663), False, 'from calc_scores import calc_scores\n'), ((1871, 1910), 'calc_scores.calc_scores', 'calc_scores', (['arousal_pred', 'y_test[:, 1]'], {}), '(arousal_pred, y_test[:, 1])\n', (1882, 1910), False, 'from calc_scores import calc_scores\n'), ((2124, 2165), 'calc_scores.calc_scores', 'calc_scores', (['dominance_pred', 'y_test[:, 2]'], {}), '(dominance_pred, y_test[:, 2])\n', (2135, 2165), False, 'from calc_scores import calc_scores\n'), ((2405, 2432), 'numpy.save', 'np.save', (['filename', 'vad_pred'], {}), '(filename, vad_pred)\n', (2412, 2432), True, 'import numpy as np\n'), ((1150, 1185), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (1162, 1185), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler\n'), ((2284, 2339), 'numpy.vstack', 'np.vstack', (['(valence_pred, arousal_pred, dominance_pred)'], {}), '((valence_pred, arousal_pred, dominance_pred))\n', (2293, 2339), True, 'import numpy as np\n'), ((1466, 1506), 'numpy.array', 'np.array', (['[ter_dev[:, 0], ser_dev[:, 0]]'], {}), '([ter_dev[:, 0], ser_dev[:, 0]])\n', (1474, 1506), True, 'import numpy as np\n'), ((1557, 1599), 'numpy.array', 'np.array', (['[ter_test[:, 0], ser_test[:, 0]]'], {}), '([ter_test[:, 0], ser_test[:, 0]])\n', (1565, 1599), True, 'import numpy as np\n'), ((1713, 1753), 'numpy.array', 'np.array', (['[ter_dev[:, 1], ser_dev[:, 1]]'], {}), '([ter_dev[:, 1], ser_dev[:, 1]])\n', (1721, 1753), True, 'import numpy as np\n'), ((1804, 1846), 'numpy.array', 'np.array', (['[ter_test[:, 1], ser_test[:, 1]]'], {}), '([ter_test[:, 1], ser_test[:, 1]])\n', (1812, 1846), True, 'import numpy as np\n'), ((1964, 2004), 'numpy.array', 'np.array', (['[ter_dev[:, 2], ser_dev[:, 2]]'], {}), '([ter_dev[:, 2], ser_dev[:, 2]])\n', (1972, 2004), True, 'import numpy as np\n'), ((2057, 2099), 'numpy.array', 'np.array', (['[ter_test[:, 2], ser_test[:, 2]]'], {}), '([ter_test[:, 2], ser_test[:, 2]])\n', (2065, 2099), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from mpl_toolkits.mplot3d import Axes3D
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import xarray as xr
def flight_path(
ax,
ds,
vmin=0,
vmax=3,
cmap_steps=12,
cmap="jet",
transform=ccrs.PlateCarree(),
add_cmap=True,
mark_end_points=True,
**kwargs
):
lc = colored_line_plot(
ax,
ds.LON_OXTS,
ds.LAT_OXTS,
ds.ALT_OXTS / 1000,
vmin=vmin,
vmax=vmax,
cmap_steps=cmap_steps,
cmap=cmap,
transform=transform,
**kwargs
)
if add_cmap:
cbar = plt.colorbar(lc, ax=ax)
cbar.set_label("Altitude (km)")
ax.set_xlabel(xr.plot.utils.label_from_attrs(ds.LON_OXTS))
ax.set_ylabel(xr.plot.utils.label_from_attrs(ds.LAT_OXTS))
# Add marker for start and end positions
if mark_end_points:
ax.text(ds.LON_OXTS[0], ds.LAT_OXTS[0], "S", transform=ccrs.PlateCarree())
ax.text(ds.LON_OXTS[-1], ds.LAT_OXTS[-1], "F", transform=ccrs.PlateCarree())
return
def flight_path_3d(ds, ax=None):
if ax == None:
ax = plt.gca(projection="3d")
ax.plot(ds.LON_OXTS, ds.LAT_OXTS, zs=0, zdir="z", color="grey")
ax.plot(
ds.LON_OXTS, ds.ALT_OXTS, zs=ds.LAT_OXTS.max() + 0.1, zdir="y", color="grey"
)
ax.plot(
ds.LAT_OXTS, ds.ALT_OXTS, zs=ds.LON_OXTS.max() + 0.1, zdir="x", color="grey"
)
ax.plot(ds.LON_OXTS, ds.LAT_OXTS, ds.ALT_OXTS, color="k", lw=3, zorder=10)
def colored_line_plot(
ax, x, y, color, vmin=None, vmax=None, cmap="gray", cmap_steps=0, **kwargs
):
"""Add a multicolored line to an existing plot
Args:
x (np.array): The x points of the plot
y (np.array): The y points of the plot
color (np.array): The color of the line at the xy points
vmin (scalar, optional): The minimum of the colorscale. Defaults to the
minimum of the color array.
vmax (scalar, optional): The maximum of the colorscale. Defaults to the
maximum of the color array.
cmap (str, optional): Colormap to plot. Default is grey.
cmap_steps (int, optional): Number of discrete steps in the colorscale.
Defaults is zero for a continuous colorscale.
kwargs: Other keyword arguments to pass to LineCollection
returns:
matplotlib.collections.LineCollection:
The plotted LineCollection. Required as argument to
:py:func:`matplotlib.pyplot.colorbar`
"""
# Set the color scalings
if vmin is None:
vmin = color.min()
if vmax is None:
vmax = color.max()
# Break the xy points up in to line segments
segments = np.array([(x[:-1].values, x[1:].values), (y[:-1].values, y[1:].values)])
segments = np.transpose(segments, axes=(2, 1, 0))
# Create discretised colourmap
cmap = plt.get_cmap(cmap)
if cmap_steps != 0:
cmap = mpl.colors.ListedColormap(
[cmap(n / (cmap_steps - 1)) for n in range(cmap_steps)]
)
# Collect the line segments
lc = LineCollection(segments, cmap=cmap, norm=plt.Normalize(vmin, vmax), **kwargs)
# Set the line color to the specified array
lc.set_array(color)
# Add the colored line to the existing plot
ax.add_collection(lc)
# autoscale if limits haven't already been set so that the linecollection
# is visible
if ax.get_xlim() == (0, 1) and ax.get_ylim() == (0, 1):
ax.autoscale()
return lc
def add_land_and_sea(ax):
# Shade land and sea
ax.imshow(
np.tile(
np.array([[cfeature.COLORS["water"] * 255]], dtype=np.uint8), [2, 2, 1]
),
origin="upper",
transform=ccrs.PlateCarree(),
extent=[-180, 180, -180, 180],
)
ax.add_feature(
cfeature.NaturalEarthFeature(
"physical",
"land",
"10m",
edgecolor="black",
facecolor=cfeature.COLORS["land"],
)
)
ax.gridlines(linestyle="--", color="black", draw_labels=True)
return
def add_flight_position(ax, dataset):
ax.plot(
dataset.LON_OXTS,
dataset.LAT_OXTS,
marker=(2, 0, -float(dataset.HDG_OXTS)),
color="red",
)
ax.plot(
dataset.LON_OXTS,
dataset.LAT_OXTS,
marker=(3, 0, -float(dataset.HDG_OXTS)),
color="red",
)
return
| [
"matplotlib.pyplot.gca",
"matplotlib.pyplot.Normalize",
"matplotlib.pyplot.colorbar",
"cartopy.crs.PlateCarree",
"numpy.array",
"numpy.transpose",
"xarray.plot.utils.label_from_attrs",
"cartopy.feature.NaturalEarthFeature",
"matplotlib.pyplot.get_cmap"
] | [((356, 374), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (372, 374), True, 'import cartopy.crs as ccrs\n'), ((2829, 2901), 'numpy.array', 'np.array', (['[(x[:-1].values, x[1:].values), (y[:-1].values, y[1:].values)]'], {}), '([(x[:-1].values, x[1:].values), (y[:-1].values, y[1:].values)])\n', (2837, 2901), True, 'import numpy as np\n'), ((2917, 2955), 'numpy.transpose', 'np.transpose', (['segments'], {'axes': '(2, 1, 0)'}), '(segments, axes=(2, 1, 0))\n', (2929, 2955), True, 'import numpy as np\n'), ((3003, 3021), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (3015, 3021), True, 'import matplotlib.pyplot as plt\n'), ((721, 744), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['lc'], {'ax': 'ax'}), '(lc, ax=ax)\n', (733, 744), True, 'import matplotlib.pyplot as plt\n'), ((804, 847), 'xarray.plot.utils.label_from_attrs', 'xr.plot.utils.label_from_attrs', (['ds.LON_OXTS'], {}), '(ds.LON_OXTS)\n', (834, 847), True, 'import xarray as xr\n'), ((867, 910), 'xarray.plot.utils.label_from_attrs', 'xr.plot.utils.label_from_attrs', (['ds.LAT_OXTS'], {}), '(ds.LAT_OXTS)\n', (897, 910), True, 'import xarray as xr\n'), ((1229, 1253), 'matplotlib.pyplot.gca', 'plt.gca', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1236, 1253), True, 'import matplotlib.pyplot as plt\n'), ((3944, 4057), 'cartopy.feature.NaturalEarthFeature', 'cfeature.NaturalEarthFeature', (['"""physical"""', '"""land"""', '"""10m"""'], {'edgecolor': '"""black"""', 'facecolor': "cfeature.COLORS['land']"}), "('physical', 'land', '10m', edgecolor='black',\n facecolor=cfeature.COLORS['land'])\n", (3972, 4057), True, 'import cartopy.feature as cfeature\n'), ((3249, 3274), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['vmin', 'vmax'], {}), '(vmin, vmax)\n', (3262, 3274), True, 'import matplotlib.pyplot as plt\n'), ((3725, 3785), 'numpy.array', 'np.array', (["[[cfeature.COLORS['water'] * 255]]"], {'dtype': 'np.uint8'}), "([[cfeature.COLORS['water'] * 255]], dtype=np.uint8)\n", (3733, 3785), True, 'import numpy as np\n'), ((3850, 3868), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3866, 3868), True, 'import cartopy.crs as ccrs\n'), ((1045, 1063), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1061, 1063), True, 'import cartopy.crs as ccrs\n'), ((1130, 1148), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1146, 1148), True, 'import cartopy.crs as ccrs\n')] |
# Training on non Augmented data only
# Works only on combination of eGEMAPS and MSF features. Can be altered if necessary
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
import sys
sys.path.insert(1, '../')
import json
import pandas as pd
import numpy as np
from sklearn import preprocessing
from tqdm import tqdm
from STFE import Models, DataPreparer
from tensorflow.keras import optimizers
from pickle import dump
emotion_key = {
'anger' : 0,
'sad' : 1
}
def get_df(gmap_dir, msf_dir, txt_dir, class_name):
print('Processing', gmap_dir, msf_dir, txt_dir)
gmap_list = [x for x in os.listdir(gmap_dir)]
msf_list = [x for x in os.listdir(msf_dir)]
common_list = list(set(gmap_list).intersection(msf_list))
txt_list = [x for x in os.listdir(txt_dir)]
common_list = list(set(common_list).intersection(txt_list))
gmap = [gmap_dir + x for x in common_list]
msf = [msf_dir + x for x in common_list]
txt = [txt_dir + x for x in common_list]
# print(gmap[0])
gmap_len = len(pd.read_csv(gmap[0], sep = ';', header = None, skiprows = [0]).columns)
text_len = 768
# print(pd.read_csv(gmap[0], sep = ';', header = None, skiprows = [0]).columns)
# print(gmap_len)
# print('# 223', gmap_len, text_len)
full = pd.DataFrame(columns = list(range(223 + gmap_len + text_len - 2)) + ['class'])
# print(len(full.columns))
i = 0
for f in tqdm(common_list):
gmap_curr = gmap_dir + f
msf_curr = msf_dir + f
txt_curr = txt_dir + f
gmap_df = pd.read_csv(gmap_curr, sep = ';', header = None, skiprows = [0], index_col = False)
gmap_df.drop([0, 1], axis = 1, inplace = True)
# print('gmap', list(gmap_df.loc[0]))
msf_df = pd.read_csv(msf_curr, sep = ',', header = None).mean(axis = 0)
# print('msf', msf_df)
txt_df = pd.read_csv(txt_curr)
# print(txt_df)
# print('txt', list(txt_df['0']))
# print(msf_df.mean(axis = 0), msf_df.shape)
# print(len(list(gmap_df.loc[0])), len(list(msf_df)), len(list(txt_df['0'])))
full.loc[i] = list(gmap_df.loc[0]) + list(msf_df) + list(txt_df['0']) + [class_name]
# break
i += 1
return full
def wrapper(gmap_grand, msf_grand, txt_grand, set_name, aud_noise, txt_noise):
gmap_anger = gmap_grand + set_name + '_anger_' + aud_noise + '/'
msf_anger = msf_grand + set_name + '_anger_' + aud_noise + '/msf/'
txt_anger = txt_grand + set_name + '_anger_' + txt_noise + '/'
gmap_sad = gmap_grand + set_name + '_sad_' + aud_noise + '/'
msf_sad = msf_grand + set_name + '_sad_' + aud_noise + '/msf/'
txt_sad = txt_grand + set_name + '_sad_' + txt_noise + '/'
df_anger = get_df(gmap_anger, msf_anger, txt_anger, 'anger')
df_sad = get_df(gmap_sad, msf_sad, txt_sad, 'sad')
df_csv = pd.concat([df_anger, df_sad], ignore_index = True)
df_csv = df_csv.sample(frac = 1)
return df_csv
def splitXY(df):
X = df.drop('class', axis = 1)
y = [emotion_key[x] for x in df['class']]
def f(x):
return np.float(x)
f2 = np.vectorize(f)
X = np.array(X)
# print(X)
y = np.array(y)
# print(X.shape, y.shape)
# print(X.dtype, y.dtype)
return f2(X), y
gmap_grand = '/home/amrgaballah/Desktop/exp_1/Human_enh_eGEMAPS/'
msf_grand = '/home/amrgaballah/Desktop/exp_1/Human_enh_MSF/'
txt_grand = '/home/amrgaballah/Desktop/exp_1/Human_enh_tran/MELD_human_enh_BERTtext_feat/'
noise = 'clean'
noise2 = ['_0dB']
#noise_types = [] for using only clean data
noise_types = ['airport']
aud_noise = noise
txt_noise = noise
name = 'text_' + txt_noise + '_aud_' + aud_noise + '_training'
# collecting clean data
train_csv = wrapper(gmap_grand, msf_grand, txt_grand, 'train', noise, noise)
test_csv = wrapper(gmap_grand, msf_grand, txt_grand, 'test', noise, noise)
dev_csv = wrapper(gmap_grand, msf_grand, txt_grand, 'dev', noise, noise)
X_train, y_train = splitXY(train_csv)
X_test, y_test = splitXY(test_csv)
X_dev, y_dev = splitXY(dev_csv)
print(X_train.shape)
# print("Saved files")
# loading noisy data for training
for n in noise2:
for noise_type in noise_types:
n1 = noise_type + n
print('\n', n1, '\n')
dset_csv = wrapper(gmap_grand, msf_grand, txt_grand, 'train', n1, n1)
dx_train, dy_train = splitXY(dset_csv)
# print(dx_train.shape)
X_train = np.concatenate([X_train, dx_train], axis = 0)
y_train = np.concatenate([y_train, dy_train], axis = 0)
# print(X_train.shape)
scaler = preprocessing.StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_dev = scaler.transform(X_dev)
print("X_train shape", X_train.shape)
# TODO: Change scaler name
dump(scaler, open('/home/amrgaballah/Desktop/model/c_ab_scaler.pkl', 'wb'))
print("Done scaling data")
class_names = ['anger', 'sad']
cws = [1, 1.8]
class_weights = {}
for i in range(len(class_names)):
class_weights[i] = cws[i]
sgd = optimizers.SGD(lr=1e-4, decay=1e-6, momentum=0.95, nesterov=False)
nnn = Models.NormalNeuralNetwork(0.5, class_names, (X_train.shape[1], ))
nnn.model_compile(sgd)
nnn.model_fit(class_weights, 850, X_train, y_train, X_dev, y_dev, fig_name = name)
nnn_metrics = nnn.get_metrics(X_test, y_test)
print(nnn_metrics)
model = nnn.get_model()
# TODO change model name
model.save('/home/amrgaballah/Desktop/model/clean_ab.h5', include_optimizer = False)
| [
"sys.path.insert",
"numpy.float",
"os.listdir",
"pandas.read_csv",
"tqdm.tqdm",
"tensorflow.keras.optimizers.SGD",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.vectorize",
"numpy.concatenate",
"pandas.concat",
"STFE.Models.NormalNeuralNetwork"
] | [((216, 241), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../"""'], {}), "(1, '../')\n", (231, 241), False, 'import sys\n'), ((4613, 4643), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (4641, 4643), False, 'from sklearn import preprocessing\n'), ((5075, 5144), 'tensorflow.keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.0001)', 'decay': '(1e-06)', 'momentum': '(0.95)', 'nesterov': '(False)'}), '(lr=0.0001, decay=1e-06, momentum=0.95, nesterov=False)\n', (5089, 5144), False, 'from tensorflow.keras import optimizers\n'), ((5149, 5214), 'STFE.Models.NormalNeuralNetwork', 'Models.NormalNeuralNetwork', (['(0.5)', 'class_names', '(X_train.shape[1],)'], {}), '(0.5, class_names, (X_train.shape[1],))\n', (5175, 5214), False, 'from STFE import Models, DataPreparer\n'), ((1443, 1460), 'tqdm.tqdm', 'tqdm', (['common_list'], {}), '(common_list)\n', (1447, 1460), False, 'from tqdm import tqdm\n'), ((2891, 2939), 'pandas.concat', 'pd.concat', (['[df_anger, df_sad]'], {'ignore_index': '(True)'}), '([df_anger, df_sad], ignore_index=True)\n', (2900, 2939), True, 'import pandas as pd\n'), ((3153, 3168), 'numpy.vectorize', 'np.vectorize', (['f'], {}), '(f)\n', (3165, 3168), True, 'import numpy as np\n'), ((3182, 3193), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3190, 3193), True, 'import numpy as np\n'), ((3217, 3228), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3225, 3228), True, 'import numpy as np\n'), ((1576, 1651), 'pandas.read_csv', 'pd.read_csv', (['gmap_curr'], {'sep': '""";"""', 'header': 'None', 'skiprows': '[0]', 'index_col': '(False)'}), "(gmap_curr, sep=';', header=None, skiprows=[0], index_col=False)\n", (1587, 1651), True, 'import pandas as pd\n'), ((1889, 1910), 'pandas.read_csv', 'pd.read_csv', (['txt_curr'], {}), '(txt_curr)\n', (1900, 1910), True, 'import pandas as pd\n'), ((3132, 3143), 'numpy.float', 'np.float', (['x'], {}), '(x)\n', (3140, 3143), True, 'import numpy as np\n'), ((4460, 4503), 'numpy.concatenate', 'np.concatenate', (['[X_train, dx_train]'], {'axis': '(0)'}), '([X_train, dx_train], axis=0)\n', (4474, 4503), True, 'import numpy as np\n'), ((4524, 4567), 'numpy.concatenate', 'np.concatenate', (['[y_train, dy_train]'], {'axis': '(0)'}), '([y_train, dy_train], axis=0)\n', (4538, 4567), True, 'import numpy as np\n'), ((636, 656), 'os.listdir', 'os.listdir', (['gmap_dir'], {}), '(gmap_dir)\n', (646, 656), False, 'import os\n'), ((685, 704), 'os.listdir', 'os.listdir', (['msf_dir'], {}), '(msf_dir)\n', (695, 704), False, 'import os\n'), ((795, 814), 'os.listdir', 'os.listdir', (['txt_dir'], {}), '(txt_dir)\n', (805, 814), False, 'import os\n'), ((1060, 1116), 'pandas.read_csv', 'pd.read_csv', (['gmap[0]'], {'sep': '""";"""', 'header': 'None', 'skiprows': '[0]'}), "(gmap[0], sep=';', header=None, skiprows=[0])\n", (1071, 1116), True, 'import pandas as pd\n'), ((1778, 1821), 'pandas.read_csv', 'pd.read_csv', (['msf_curr'], {'sep': '""","""', 'header': 'None'}), "(msf_curr, sep=',', header=None)\n", (1789, 1821), True, 'import pandas as pd\n')] |
"""
This module contains the Detector class, which defines the detector and it's response to a plasma. This
modules also contains classes and methods related to the response function (S-curves, absorprion from Si,
transmission through Be, and charge-sharing).
"""
from __future__ import division
import os
import cPickle as pickle
import multiprocessing as mp
import numpy as np
import scipy as sp
import scipy.io
import scipy.special
import pilatus.configuration as cfg
# Other modules in this library
import geometry
import plasma
import physical_profiles as prof
# Get module path
MODULE_PATH = os.path.dirname(__file__)
LoS_FNAME = os.path.join(MODULE_PATH, 'mesxr_mst_los.csv')
# Load the mu coefficients for filter transmission calculations
MU_DICT = pickle.load(open(os.path.join(MODULE_PATH, 'filter_mu.pkl'), 'rb'))
# Common material densities - g/cm^3
DENS = {'Si':2.330,
'Be':1.848,
'mylar':1.39}
# Constants for the ME-SXR detector - cm
BE_THICK = 0.0025
SI_THICK = 0.045
MYLAR_THICK = 0.0012 + 0.0050
#MYLAR_THICK = 0.0012 # Old configuration
# Detector geometry constants
NUM_PIX_Y = 195
NUM_PIX_X = 487
PIXEL_DIM = 0.172 # Pixel dimension in mm
DET_DIST = 30.5 # Distance from detector screen to pinhole, in mm
AREA_PIX = PIXEL_DIM*PIXEL_DIM # Area of the pixel, in mm^2
AREA_PIN = 4 # Area of the pinhole opening, in mm^2
mm_2_to_m_2 = 1.0e-6 # Convert mm^2 to m^2, i.e. for etendue
def take_data_mp(pixel):
"""
This function exists to enable multiporcessing.
"""
return pixel.get_counts()
# --------------------------------------- Detector Classes --------------------------------------- #
class Detector(object):
"""
A detector is a collection of pixels exposed and a plasma.
This model fundamentally assumes the plasma is symmetric in the toroidal
(second index) direction.
"""
def __init__(self, pixel_array, etendue=1.012e-11, num_cores=16):
self.pixel_array = pixel_array
self.etendue = etendue
self.cores = num_cores
# Get the los evaluation points
self.eval_points = []
for pix in self.pixel_array:
for ell in pix.ell_array:
self.eval_points.append(pix.los.get_xy(ell))
def look_at_plasma(self, plasma_obj):
# Preload emission at the evaluation points
self.plasma_in_view = plasma_obj
self.plasma_in_view.preload_points(self.eval_points)
for x_index in range(self.pixel_array.shape[0]):
self.pixel_array[x_index].look_at_plasma(self.plasma_in_view)
# Set up the relative line emission profiles
if self.plasma_in_view.include_excit:
coords = plasma.sunflower_points(180)
self.plasma_in_view.preload_points(coords)
# Break the calculation into two parts since line_emission seems to crash with 100+ points
coords1 = coords[:90]
coords2 = coords[90:]
amp_set1, en_set, mz_set = self.plasma_in_view.line_emission(coords1)
amp_set2, en_set, mz_set = self.plasma_in_view.line_emission(coords2)
amp_set = []
for index in range(len(amp_set1)):
amp_set.append(np.vstack([amp_set1[index], amp_set2[index]]))
# Build a profile for each response function
Ec_map = [pix.response.scurve.E_c for pix in self.pixel_array]
thresholds = np.array([x for x in np.unique(Ec_map) if x !=0])
self.observed_line_profiles = {}
for Ec in thresholds:
index = np.where(Ec_map == Ec)[0][0]
self.observed_line_profiles[Ec] = prof.Observed_Lines(coords, amp_set, en_set, self.pixel_array[index].response)
# Preload the eval points
for Ec in thresholds:
self.observed_line_profiles[Ec].build_lookup_table(self.eval_points)
# Add the profiles to the pixels
for x_index in range(self.pixel_array.shape[0]):
Ec = Ec_map[x_index]
if Ec != 0:
self.pixel_array[x_index].include_lines(self.observed_line_profiles[Ec])
def change_pixel(self, x_index, y_index, new_pixel):
new_pixel.look_at_plasma(self.plasma_in_view)
self.pixel_array[x_index, y_index] = new_pixel
def take_data(self, exp_time):
"""
Given a plasma take data along all ines of sight - now with multiprocessing.
The input exp_time is the exposure time in ms. If only one core is selected,
the multiprocessing library will not be used at all.
"""
if self.cores == 1:
measurements = [pix.get_counts() for pix in self.pixel_array]
else:
pool = mp.Pool(self.cores)
measurements = pool.map(take_data_mp, self.pixel_array)
pool.close()
return np.vstack([measurements]*NUM_PIX_Y).T*self.etendue*exp_time*get_boundary_mask()
class Pilatus3_Detector(Detector):
"""
Build a default PILATUS 3 detector based on spatial calibration results. For now
we will consider only a single row of pixels.
The "include" array allows the user to specify which pixels are active for a given
computation. All inactive pixels will measure zero and require no computational
resources.
"""
def __init__(self, Ec_map, Ew_map, cs_slope=np.zeros(10), cs_en=np.linspace(0, 30000, num=10), num_cores=16, mlyar_thick=MYLAR_THICK):
self.Ec_map = Ec_map
self.Ew_map = Ew_map
# Continers to hold various pixel properties
self.los_set = np.empty(NUM_PIX_X, dtype=object)
self.response_set = np.empty(NUM_PIX_X, dtype=object)
pixel_array = np.empty(NUM_PIX_X, dtype=object)
etendue = np.zeros([NUM_PIX_X, NUM_PIX_Y])
# Build the filter responses, which are the same for all pixels
self.impact_params = self.get_impact_params()
for x_index in xrange(NUM_PIX_X):
# Only need to calculate the plasma emission in 1D
self.los_set[x_index] = geometry.line_of_sight(*self.impact_params[x_index])
self.response_set[x_index] = Pilatus_Response(self.Ec_map[x_index], self.Ew_map[x_index],
Si_thickness=SI_THICK/np.cos(self.theta_i(x_index, NUM_PIX_Y/2.)),
Be_thickness=BE_THICK, mylar_thickness=mlyar_thick, cs_slope=cs_slope, cs_en=cs_en)
pixel_array[x_index] = Pixel(self.los_set[x_index], self.response_set[x_index])
for y_index in xrange(NUM_PIX_Y):
# Must evaluate the etendue factors in 2D
etendue[x_index, y_index] = self.get_etendue(x_index, y_index)
super(Pilatus3_Detector, self).__init__(pixel_array, etendue=etendue, num_cores=num_cores)
def get_impact_params(self):
"""
Get the impact parameters p and zeta from
"""
impact_params = np.loadtxt(LoS_FNAME, delimiter=',')
impact_phi = impact_params[0, :]
impact_p = impact_params[1, :]
return zip(impact_p, impact_phi)
def theta_i(self, x_index, y_index):
"""
Returns the incidence angle used to calculate the etendue of each pixel.
"""
return np.arctan2(PIXEL_DIM*np.sqrt((x_index + 0.5 - NUM_PIX_X/2.)**2 + (y_index + 0.5 - NUM_PIX_Y/2.)**2), DET_DIST)
def get_etendue(self, x_index, y_index):
"""
Return the appropriate etendue for the specified pixel.
"""
return AREA_PIX*AREA_PIN*mm_2_to_m_2/(4*np.pi*DET_DIST**2)*np.cos(self.theta_i(x_index, y_index))**4
class Pixel(object):
"""
A pixel is defined by a single line-of-sight and a detector response function. Given a plasma
it will produce a synthetic measurement.
"""
def __init__(self, los, response, en_lower=1000, en_upper=15000, delta_en=100, delta_ell = 0.05):
self.los = los
self.response = response
self.plasma_in_view = None
self.en_lower = en_lower
self.en_upper = en_upper
self.delta_en = delta_en
# Generate the ell array - points along the line of sight to integrate over
self.delta_ell = delta_ell
self.ell_max = self.los.intercept_with_circle(0.52)
self.ell_array = np.arange(-self.ell_max, self.ell_max, self.delta_ell)
def look_at_plasma(self, plasma_obj):
"""
This designates the Plasma object that the Pixel is looking at. It then creates the observed profile
of the plasma emissivity sepctrum convolved with the response function of this pixel.
"""
self.plasma_in_view = plasma_obj
self.observed_emiss = prof.Observed_Emissivity(self.plasma_in_view.continuum, self.response)
# Include a containers so that emission can be stored later
self.emiss_cont = np.zeros(len(self.ell_array))
self.emiss_lines = np.zeros(len(self.ell_array))
def include_lines(self, observed_prof):
"""
Pulls out line emissions at the evaluation points and stores them for later. This is a more lightweight
approach than trying to save the emission profile to every pixel object, especially when multiprocessing
is considered.
"""
self.emiss_lines = np.zeros(len(self.ell_array))
for index, ell in enumerate(self.ell_array):
self.emiss_lines[index] = observed_prof(*self.los.get_xy(ell))
def get_counts(self):
if self.plasma_in_view == None:
print('No plasma is currently in view.')
else:
for index, ell in enumerate(self.ell_array):
self.emiss_cont[index] = self.observed_emiss.integrate(*self.los.get_xy(ell), en_lower=self.en_lower,
en_upper=self.en_upper, delta_en=self.delta_en)
return np.trapz(self.emiss_cont + self.emiss_lines, x=self.ell_array)
# -------------------------------------- Response Classes -------------------------------------- #
class Response(object):
"""
This class is used to make general spectral response objects. For implementations see
the S_Curve, Filter, and Absorber classes.
"""
def __init__(self, label, en_lims=[0,30000], units='eV'):
self.label = label
self.lims = en_lims
self.units = units
def __str__(self):
return self.label
def __call__(self, en):
return self.evaluate(en)
def __add__(self, other_resp):
return Composite_Response(self, other_resp, operation='add')
def __mul__(self, other_resp):
return Composite_Response(self, other_resp, operation='multiply')
def __radd__(self, other):
return self
def __rmul__(self, other):
return self
def domain(self, en):
return np.amin(en) >= self.lims[0] and np.amax(en) <= self.lims[1]
def value(self, en):
return 1
def evaluate(self, en):
if self.domain(en):
return self.value(en)
else:
return np.zeros(en.shape)
class Composite_Response(Response):
"""
This class permits multiple response objects to be combined together into a single composite.
"""
def __init__(self, resp1, resp2, operation='multiply'):
self.resp1 = resp1
self.resp2 = resp2
self.operation = operation
# Set the limits to be the intersection of the two supplied profiles
en_lims = [np.amin([self.resp1.lims[0], self.resp2.lims[0]]), np.amax([self.resp1.lims[1], self.resp2.lims[1]])]
# Check for unit compatibility
if self.resp1.units == self.resp2.units:
units = resp1.units
else:
raise ValueError('Profiles have incompatible units.')
# Generate the appropriate label
if self.operation == 'add':
label = '{0:} + {1:}'.format(str(self.resp1), str(self.resp2))
elif self.operation == 'multiply':
label = '({0:} x {1:})'.format(str(self.resp1), str(self.resp2))
else:
raise ValueError('Operation not recognized.')
super(Composite_Response, self).__init__(label, en_lims=en_lims, units=units)
def value(self, en):
if self.operation == 'add':
return self.resp1(en) + self.resp2(en)
elif self.operation == 'multiply':
return self.resp1(en) * self.resp2(en)
else:
raise ValueError('Operation not recognized.')
class S_Curve(Response):
"""
This simple class allows for the computation of the pixel S-curve response.
"""
def __init__(self, E_c, E_w, en_lims=[0,30000], units='eV'):
self.E_c = E_c
self.E_w = E_w
super(S_Curve, self).__init__('S-curve', en_lims=en_lims, units=units)
def value(self, en):
return 0.5*sp.special.erfc(-1.*(en - self.E_c)/(np.sqrt(2)*self.E_w))
class Filter(Response):
"""
This simple class allows for the computation of the transmission through a solid filter
(i.e. Be or mylar).
"""
def __init__(self, element, thickness, en_lims=[0,30000], units='eV'):
self.element = element
self.thickness = thickness
self.density = DENS[element]
self.mu = MU_DICT['mu'][self.element]
self.en_mu = MU_DICT['energy']
super(Filter, self).__init__('{0:} Filter'.format(self.element), en_lims=en_lims, units=units)
def value(self, en):
return np.exp(-np.interp(en, self.en_mu, self.mu)*self.density*self.thickness)
class Absorber(Response):
"""
This simple class allows for the computation of the absorption in a solid layer
(i.e. an Si photodiode).
"""
def __init__(self, element, thickness, en_lims=[0,30000], units='eV'):
self.element = element
self.thickness = thickness
self.density = DENS[element]
self.mu = MU_DICT['mu'][self.element]
self.en_mu = MU_DICT['energy']
super(Absorber, self).__init__('{0:} Filter'.format(self.element), en_lims=en_lims, units=units)
def value(self, en):
return 1.0 - np.exp(-np.interp(en, self.en_mu, self.mu)*self.density*self.thickness)
class Charge_Sharing(Response):
"""
This function allows the implementation of charge sharing in the detector response. This is achieved by defining
the slope k_cs at some energies and then interpolating between those points. The effect of charge sharing vanishes
entirely when the slope is set to zero everywhere within the energy range.
"""
def __init__(self, slope_data, slope_energy, E_c, en_lims=[0,30000], units='eV'):
self.data = slope_data
self.energy = slope_energy
self.E_c = E_c
super(Charge_Sharing, self).__init__('Charge-sharing', en_lims=en_lims, units=units)
def value(self, en):
return 1 + np.interp(en, self.energy, self.data)*(en - self.E_c)
class Pilatus_Response(Response):
"""
The class defines the total response for the Pilatus 3 detector. It is simply a wrapper to define
the whole response in a single line. This is convenient because the filter and Si layer properties
are generally not mutable.
"""
def __init__(self, E_c, E_w, Si_thickness=SI_THICK, Be_thickness=BE_THICK, mylar_thickness=MYLAR_THICK, en_lims=[0,30000],
cs_slope=np.zeros(10), cs_en=np.linspace(0, 30000, num=10)):
self.Si = Absorber('Si', Si_thickness, en_lims=en_lims, units='eV')
self.Be = Filter('Be', Be_thickness, en_lims=en_lims, units='eV')
self.mylar = Filter('mylar', mylar_thickness, en_lims=en_lims, units='eV')
self.scurve = S_Curve(E_c, E_w, en_lims=en_lims, units='eV')
self.charge_share = Charge_Sharing(cs_slope, cs_en, E_c, en_lims=en_lims)
self.total = self.Si * self.Be * self.scurve * self.charge_share * self.mylar
super(Pilatus_Response, self).__init__('Total Response', en_lims=en_lims, units='eV')
def value(self, en):
return self.total(en)
# -------------------------------------- Analysis ------------------------------------- #
def prfoile_by_threshold(measurements, Ec_map, center_only=True):
"""
This function sorts data by threshold according to the supplied threshold map. This will
work for real data, but is included here for use with synthetic data.
"""
data = {Ec:[] for Ec in np.unique(Ec_map)}
x_indices = {Ec:[] for Ec in data.keys()}
data_1d = np.sum(measurements, axis=1)
for x_index, Ec in enumerate(Ec_map):
if Ec != 0:
data[Ec].append(data_1d[x_index])
x_indices[Ec].append(x_index)
for Ec in data.keys():
data[Ec] = np.array(data[Ec])
x_indices[Ec] = np.array(x_indices[Ec])
return data, x_indices
def get_boundary_mask():
"""
Returns a boundary mask which simulates the regions between pixels with no response.
"""
mask = np.ones([NUM_PIX_X, NUM_PIX_Y])
for x in range(NUM_PIX_X):
for y in range(NUM_PIX_Y):
if cfg.get_chip_coords(x,y)[0] == -1:
mask[x,y] = 0
return mask | [
"numpy.sqrt",
"numpy.array",
"physical_profiles.Observed_Lines",
"physical_profiles.Observed_Emissivity",
"numpy.arange",
"numpy.where",
"numpy.linspace",
"numpy.empty",
"numpy.vstack",
"numpy.trapz",
"numpy.ones",
"numpy.amin",
"geometry.line_of_sight",
"os.path.dirname",
"numpy.interp"... | [((600, 625), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (615, 625), False, 'import os\n'), ((638, 684), 'os.path.join', 'os.path.join', (['MODULE_PATH', '"""mesxr_mst_los.csv"""'], {}), "(MODULE_PATH, 'mesxr_mst_los.csv')\n", (650, 684), False, 'import os\n'), ((16815, 16843), 'numpy.sum', 'np.sum', (['measurements'], {'axis': '(1)'}), '(measurements, axis=1)\n', (16821, 16843), True, 'import numpy as np\n'), ((17290, 17321), 'numpy.ones', 'np.ones', (['[NUM_PIX_X, NUM_PIX_Y]'], {}), '([NUM_PIX_X, NUM_PIX_Y])\n', (17297, 17321), True, 'import numpy as np\n'), ((777, 819), 'os.path.join', 'os.path.join', (['MODULE_PATH', '"""filter_mu.pkl"""'], {}), "(MODULE_PATH, 'filter_mu.pkl')\n", (789, 819), False, 'import os\n'), ((5488, 5500), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (5496, 5500), True, 'import numpy as np\n'), ((5508, 5537), 'numpy.linspace', 'np.linspace', (['(0)', '(30000)'], {'num': '(10)'}), '(0, 30000, num=10)\n', (5519, 5537), True, 'import numpy as np\n'), ((5722, 5755), 'numpy.empty', 'np.empty', (['NUM_PIX_X'], {'dtype': 'object'}), '(NUM_PIX_X, dtype=object)\n', (5730, 5755), True, 'import numpy as np\n'), ((5784, 5817), 'numpy.empty', 'np.empty', (['NUM_PIX_X'], {'dtype': 'object'}), '(NUM_PIX_X, dtype=object)\n', (5792, 5817), True, 'import numpy as np\n'), ((5840, 5873), 'numpy.empty', 'np.empty', (['NUM_PIX_X'], {'dtype': 'object'}), '(NUM_PIX_X, dtype=object)\n', (5848, 5873), True, 'import numpy as np\n'), ((5892, 5924), 'numpy.zeros', 'np.zeros', (['[NUM_PIX_X, NUM_PIX_Y]'], {}), '([NUM_PIX_X, NUM_PIX_Y])\n', (5900, 5924), True, 'import numpy as np\n'), ((7170, 7206), 'numpy.loadtxt', 'np.loadtxt', (['LoS_FNAME'], {'delimiter': '""","""'}), "(LoS_FNAME, delimiter=',')\n", (7180, 7206), True, 'import numpy as np\n'), ((8552, 8606), 'numpy.arange', 'np.arange', (['(-self.ell_max)', 'self.ell_max', 'self.delta_ell'], {}), '(-self.ell_max, self.ell_max, self.delta_ell)\n', (8561, 8606), True, 'import numpy as np\n'), ((8956, 9026), 'physical_profiles.Observed_Emissivity', 'prof.Observed_Emissivity', (['self.plasma_in_view.continuum', 'self.response'], {}), '(self.plasma_in_view.continuum, self.response)\n', (8980, 9026), True, 'import physical_profiles as prof\n'), ((10173, 10235), 'numpy.trapz', 'np.trapz', (['(self.emiss_cont + self.emiss_lines)'], {'x': 'self.ell_array'}), '(self.emiss_cont + self.emiss_lines, x=self.ell_array)\n', (10181, 10235), True, 'import numpy as np\n'), ((15682, 15694), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (15690, 15694), True, 'import numpy as np\n'), ((15702, 15731), 'numpy.linspace', 'np.linspace', (['(0)', '(30000)'], {'num': '(10)'}), '(0, 30000, num=10)\n', (15713, 15731), True, 'import numpy as np\n'), ((17049, 17067), 'numpy.array', 'np.array', (['data[Ec]'], {}), '(data[Ec])\n', (17057, 17067), True, 'import numpy as np\n'), ((17092, 17115), 'numpy.array', 'np.array', (['x_indices[Ec]'], {}), '(x_indices[Ec])\n', (17100, 17115), True, 'import numpy as np\n'), ((2772, 2800), 'plasma.sunflower_points', 'plasma.sunflower_points', (['(180)'], {}), '(180)\n', (2795, 2800), False, 'import plasma\n'), ((4857, 4876), 'multiprocessing.Pool', 'mp.Pool', (['self.cores'], {}), '(self.cores)\n', (4864, 4876), True, 'import multiprocessing as mp\n'), ((6201, 6253), 'geometry.line_of_sight', 'geometry.line_of_sight', (['*self.impact_params[x_index]'], {}), '(*self.impact_params[x_index])\n', (6223, 6253), False, 'import geometry\n'), ((11358, 11376), 'numpy.zeros', 'np.zeros', (['en.shape'], {}), '(en.shape)\n', (11366, 11376), True, 'import numpy as np\n'), ((11775, 11824), 'numpy.amin', 'np.amin', (['[self.resp1.lims[0], self.resp2.lims[0]]'], {}), '([self.resp1.lims[0], self.resp2.lims[0]])\n', (11782, 11824), True, 'import numpy as np\n'), ((11826, 11875), 'numpy.amax', 'np.amax', (['[self.resp1.lims[1], self.resp2.lims[1]]'], {}), '([self.resp1.lims[1], self.resp2.lims[1]])\n', (11833, 11875), True, 'import numpy as np\n'), ((16731, 16748), 'numpy.unique', 'np.unique', (['Ec_map'], {}), '(Ec_map)\n', (16740, 16748), True, 'import numpy as np\n'), ((3746, 3824), 'physical_profiles.Observed_Lines', 'prof.Observed_Lines', (['coords', 'amp_set', 'en_set', 'self.pixel_array[index].response'], {}), '(coords, amp_set, en_set, self.pixel_array[index].response)\n', (3765, 3824), True, 'import physical_profiles as prof\n'), ((7515, 7607), 'numpy.sqrt', 'np.sqrt', (['((x_index + 0.5 - NUM_PIX_X / 2.0) ** 2 + (y_index + 0.5 - NUM_PIX_Y / 2.0) **\n 2)'], {}), '((x_index + 0.5 - NUM_PIX_X / 2.0) ** 2 + (y_index + 0.5 - NUM_PIX_Y /\n 2.0) ** 2)\n', (7522, 7607), True, 'import numpy as np\n'), ((11131, 11142), 'numpy.amin', 'np.amin', (['en'], {}), '(en)\n', (11138, 11142), True, 'import numpy as np\n'), ((11163, 11174), 'numpy.amax', 'np.amax', (['en'], {}), '(en)\n', (11170, 11174), True, 'import numpy as np\n'), ((15188, 15225), 'numpy.interp', 'np.interp', (['en', 'self.energy', 'self.data'], {}), '(en, self.energy, self.data)\n', (15197, 15225), True, 'import numpy as np\n'), ((3296, 3341), 'numpy.vstack', 'np.vstack', (['[amp_set1[index], amp_set2[index]]'], {}), '([amp_set1[index], amp_set2[index]])\n', (3305, 3341), True, 'import numpy as np\n'), ((17403, 17428), 'pilatus.configuration.get_chip_coords', 'cfg.get_chip_coords', (['x', 'y'], {}), '(x, y)\n', (17422, 17428), True, 'import pilatus.configuration as cfg\n'), ((3522, 3539), 'numpy.unique', 'np.unique', (['Ec_map'], {}), '(Ec_map)\n', (3531, 3539), True, 'import numpy as np\n'), ((3667, 3689), 'numpy.where', 'np.where', (['(Ec_map == Ec)'], {}), '(Ec_map == Ec)\n', (3675, 3689), True, 'import numpy as np\n'), ((4986, 5023), 'numpy.vstack', 'np.vstack', (['([measurements] * NUM_PIX_Y)'], {}), '([measurements] * NUM_PIX_Y)\n', (4995, 5023), True, 'import numpy as np\n'), ((13192, 13202), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13199, 13202), True, 'import numpy as np\n'), ((13795, 13829), 'numpy.interp', 'np.interp', (['en', 'self.en_mu', 'self.mu'], {}), '(en, self.en_mu, self.mu)\n', (13804, 13829), True, 'import numpy as np\n'), ((14447, 14481), 'numpy.interp', 'np.interp', (['en', 'self.en_mu', 'self.mu'], {}), '(en, self.en_mu, self.mu)\n', (14456, 14481), True, 'import numpy as np\n')] |
import os
import sys
root_path = os.path.dirname(os.path.dirname(os.getcwd()))
if root_path not in sys.path: sys.path.append(root_path)
import numpy as np
import tensorflow as tf
from DeepSparseCoding.tf1x.data.dataset import Dataset
import DeepSparseCoding.tf1x.utils.data_processing as dp
import DeepSparseCoding.tf1x.analysis.analysis_picker as ap
import response_contour_analysis.utils.model_handling as model_handling
import response_contour_analysis.utils.dataset_generation as iso_data
import response_contour_analysis.utils.histogram_analysis as hist_funcs
def get_dsc_activations_cell(analyzer, images, neuron, batch_size=10, activation_operation=None):
"""
Returns the activations from a model for given input images
Parameters:
analyzer [DSC analyzer object] an object from the DeepSparseCoding library
images [np.ndarray] of size NumImages x W x H
neuron [int or vector of ints] that points to the neuron index
batch_size [int] specifying the batch size to use for the getting the neuron activations
activation_operation [function] to be used if the DSC model has a unique function handle for getting neuron activations (e.g. in the case of lca_subspace)
Output:
activations [np.ndarray] vector of length len(neuron)
"""
images = dp.reshape_data(images[..., None], flatten=analyzer.model.params.vectorize_data)[0]
activations = analyzer.compute_activations(images, batch_size, activation_operation)[:, neuron]
return activations
def load_analyzer(params):
analyzer = ap.get_analyzer(params.model_type)
analyzer.setup(params)
analyzer.model.setup(analyzer.model_params)
analyzer.load_analysis(save_info=params.save_info)
return analyzer
class lca_512_vh_params(object):
def __init__(self):
self.model_type = "lca"
self.model_name = "lca_512_vh"
self.display_name = "Sparse Coding 512"
self.version = "0.0"
#self.save_info = "analysis_train_carlini_targeted"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class lca_768_vh_params(object):
def __init__(self):
self.model_type = "lca"
self.model_name = "lca_768_vh"
self.display_name = "Sparse Coding 768"
self.version = "0.0"
#self.save_info = "analysis_train_carlini_targeted"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class lca_1024_vh_params(object):
def __init__(self):
self.model_type = "lca"
self.model_name = "lca_1024_vh"
self.display_name = "Sparse Coding 1024"
self.version = "0.0"
#self.save_info = "analysis_train_carlini_targeted"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class lca_2560_vh_params(object):
def __init__(self):
self.model_type = "lca"
self.model_name = "lca_2560_vh"
self.display_name = "Sparse Coding 2560"
self.version = "0.0"
#self.save_info = "analysis_train_kurakin_targeted"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class ae_768_vh_params(object):
def __init__(self):
self.model_type = "ae"
self.model_name = "ae_768_vh"
self.display_name = "ReLU Autoencoder 768"
self.version = "1.0"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class sae_768_vh_params(object):
def __init__(self):
self.model_type = "sae"
self.model_name = "sae_768_vh"
self.display_name = "Sparse Autoencoder 768"
self.version = "1.0"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class rica_768_vh_params(object):
def __init__(self):
self.model_type = "rica"
self.model_name = "rica_768_vh"
self.display_name = "Linear Autoencoder 768"
self.version = "0.0"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class lca_768_mnist_params(object):
def __init__(self):
self.model_type = "lca"
self.model_name = "lca_768_mnist"
self.display_name = "Sparse Coding 768"
self.version = "0.0"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class lca_1536_mnist_params(object):
def __init__(self):
self.model_type = "lca"
self.model_name = "lca_1536_mnist"
self.display_name = "Sparse Coding 1536"
self.version = "0.0"
self.save_info = "analysis_test_carlini_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class ae_768_mnist_params(object):
def __init__(self):
self.model_type = "ae"
self.model_name = "ae_768_mnist"
self.display_name = "ReLU Autoencoder 768"
self.version = "0.0"
self.save_info = "analysis_test_carlini_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class sae_768_mnist_params(object):
def __init__(self):
self.model_type = "sae"
self.model_name = "sae_768_mnist"
self.display_name = "Sparse Autoencoder 768"
self.version = "0.0"
self.save_info = "analysis_test_carlini_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class rica_768_mnist_params(object):
def __init__(self):
self.model_type = "rica"
self.model_name = "rica_768_mnist"
self.display_name = "Linear Autoencoder 768"
self.version = "0.0"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class ae_deep_mnist_params(object):
def __init__(self):
self.model_type = "ae"
self.model_name = "ae_deep_mnist"
self.display_name = "ReLU Autoencoder 768"
self.version = "0.0"
self.save_info = "analysis_test_carlini_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class lca_subspace_params(object):
def __init__(self):
self.model_type = "lca_subspace"
self.model_name = "lca_subspace_vh"
self.display_name = "SSC"
self.version = "3.0"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = True
self.model_dir = (root_path+'/Projects/'+self.model_name)
if __name__ == "__main__":
print("Loading models...")
cont_analysis = {}
cont_analysis['min_angle'] = 15
cont_analysis['batch_size'] = 100
cont_analysis['vh_image_scale'] = 31.773287 # Mean of the l2 norm of the training set
cont_analysis['comparison_method'] = 'closest' # rand or closest
cont_analysis['num_neurons'] = 100 # How many neurons to plot
cont_analysis['num_comparisons'] = 300 # How many planes to construct (None is all of them)
cont_analysis['x_range'] = [-2.0, 2.0]
cont_analysis['y_range'] = [-2.0, 2.0]
cont_analysis['num_images'] = int(30**2)
cont_analysis['params_list'] = [lca_512_vh_params()]
#cont_analysis['params_list'] = [lca_768_vh_params()]
#cont_analysis['params_list'] = [lca_1024_vh_params()]
#cont_analysis['params_list'] = [lca_2560_vh_params()]
#cont_analysis['iso_save_name'] = "iso_curvature_xrange1.3_yrange-2.2_"
#cont_analysis['iso_save_name'] = "iso_curvature_ryan_"
cont_analysis['iso_save_name'] = "rescaled_closecomp_"
#cont_analysis['iso_save_name'] = ''
np.savez(save_root+'iso_params_'+cont_analysis['iso_save_name']+params.save_info+".npz",
data=cont_analysis)
analyzer_list = [load_analyzer(params) for params in cont_analysis['params_list']]
for analyzer, params in zip(analyzer_list, cont_analysis['params_list']):
print(analyzer.analysis_params.display_name)
print("Computing the iso-response vectors...")
cont_analysis['target_neuron_ids'] = iso_data.get_rand_target_neuron_ids(
cont_analysis['num_neurons'], analyzer.model.params.num_neurons)
neuron_weights = [analyzer.bf_stats["basis_functions"][idx]
for idx in range(len(analyzer.bf_stats["basis_functions"]))]
analyzer.target_neuron_ids = cont_analysis['target_neuron_ids']
rand_outputs = iso_data.compute_rand_vectors(
neuron_weights,
cont_analysis["num_comparisons"])
analyzer.rand_target_vectors = rand_outputs[0]
analyzer.rand_orth_vectors = rand_outputs[1]
comp_outputs = iso_data.compute_comp_vectors(
neuron_weights,
cont_analysis['target_neuron_ids'],
cont_analysis['min_angle'],
cont_analysis['num_comparisons'],
cont_analysis['comparison_method'])
analyzer.comparison_neuron_ids = comp_outputs[0]
analyzer.comparison_target_vectors = comp_outputs[1]
analyzer.comparison_vectors = comp_outputs[2]
analyzer.target_vectors = analyzer.comparison_target_vectors
assert len(analyzer.comparison_neuron_ids) == cont_analysis['num_neurons'], (
"Incorrect number of comparison vectors")
for comparison_ids_list in analyzer.comparison_neuron_ids:
assert len(comparison_ids_list) >= cont_analysis['num_comparisons'], (
"Not enough comparison vectors.")
key_list = ["target_neuron_ids", "comparison_neuron_ids", "target_vectors",
"rand_orth_vectors", "comparison_vectors"]
val_list = [analyzer.target_neuron_ids, analyzer.comparison_neuron_ids, analyzer.target_vectors,
analyzer.rand_orth_vectors, analyzer.comparison_vectors]
iso_vectors = dict(zip(key_list, val_list))
np.savez(analyzer.analysis_out_dir+"savefiles/iso_vectors_"+cont_analysis['iso_save_name']+params.save_info+".npz",
data=iso_vectors)
for use_rand_orth_vects, rand_str in zip([True, False], ["rand", "comparison"]):
print("Generating "+rand_str+" dataset...")
comp_vects = analyzer.rand_orth_vectors if use_rand_orth_vects else analyzer.comparison_vectors
contour_dataset, datapoints = iso_data.get_contour_dataset(
analyzer.target_vectors, comp_vects, cont_analysis['x_range'], cont_analysis['y_range'],
cont_analysis['num_images'], cont_analysis['vh_image_scale'])
print("Computing network activations for "+rand_str+" dataset...")
if params.use_group_activations:
activation_operation = analyzer.model.get_reshaped_group_activity
else:
activation_operation = None
activation_function_kwargs = {
'activation_operation': activation_operation,
'batch_size': cont_analysis['batch_size']
}
activations = model_handling.get_normalized_activations(
analyzer,
cont_analysis["target_neuron_ids"],
datapoints,
get_dsc_activations_cell,
activation_function_kwargs)
save_root=analyzer.analysis_out_dir+'savefiles/'
if use_rand_orth_vects:
np.savez(save_root+'iso_rand_activations_'+cont_analysis['iso_save_name']+params.save_info+'.npz',
data=activations)
np.savez(save_root+'iso_rand_contour_dataset_'+cont_analysis['iso_save_name']+params.save_info+'.npz',
data=contour_dataset)
else:
np.savez(save_root+'iso_comp_activations_'+cont_analysis['iso_save_name']+params.save_info+'.npz',
data=activations)
np.savez(save_root+'iso_comp_contour_dataset_'+cont_analysis['iso_save_name']+params.save_info+'.npz',
data=contour_dataset)
cont_analysis['comparison_neuron_ids'] = analyzer.comparison_neuron_ids
cont_analysis['contour_dataset'] = contour_dataset
curvatures, fits = hist_funcs.iso_response_curvature_poly_fits(
cont_analysis['activations'],
target_act=cont_analysis['target_act'],
measure_upper_right=False
)
cont_analysis['curvatures'] = np.stack(np.stack(curvatures, axis=0), axis=0)
np.savez(save_root+'group_iso_vectors_'+cont_analysis['iso_save_name']+params.save_info+'.npz',
data=cont_analysis)
| [
"numpy.savez",
"response_contour_analysis.utils.dataset_generation.get_contour_dataset",
"response_contour_analysis.utils.dataset_generation.compute_comp_vectors",
"response_contour_analysis.utils.model_handling.get_normalized_activations",
"response_contour_analysis.utils.histogram_analysis.iso_response_cu... | [((110, 136), 'sys.path.append', 'sys.path.append', (['root_path'], {}), '(root_path)\n', (125, 136), False, 'import sys\n'), ((1571, 1605), 'DeepSparseCoding.tf1x.analysis.analysis_picker.get_analyzer', 'ap.get_analyzer', (['params.model_type'], {}), '(params.model_type)\n', (1586, 1605), True, 'import DeepSparseCoding.tf1x.analysis.analysis_picker as ap\n'), ((8526, 8646), 'numpy.savez', 'np.savez', (["(save_root + 'iso_params_' + cont_analysis['iso_save_name'] + params.\n save_info + '.npz')"], {'data': 'cont_analysis'}), "(save_root + 'iso_params_' + cont_analysis['iso_save_name'] +\n params.save_info + '.npz', data=cont_analysis)\n", (8534, 8646), True, 'import numpy as np\n'), ((66, 77), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (75, 77), False, 'import os\n'), ((1320, 1405), 'DeepSparseCoding.tf1x.utils.data_processing.reshape_data', 'dp.reshape_data', (['images[..., None]'], {'flatten': 'analyzer.model.params.vectorize_data'}), '(images[..., None], flatten=analyzer.model.params.vectorize_data\n )\n', (1335, 1405), True, 'import DeepSparseCoding.tf1x.utils.data_processing as dp\n'), ((8956, 9061), 'response_contour_analysis.utils.dataset_generation.get_rand_target_neuron_ids', 'iso_data.get_rand_target_neuron_ids', (["cont_analysis['num_neurons']", 'analyzer.model.params.num_neurons'], {}), "(cont_analysis['num_neurons'], analyzer.\n model.params.num_neurons)\n", (8991, 9061), True, 'import response_contour_analysis.utils.dataset_generation as iso_data\n'), ((9296, 9375), 'response_contour_analysis.utils.dataset_generation.compute_rand_vectors', 'iso_data.compute_rand_vectors', (['neuron_weights', "cont_analysis['num_comparisons']"], {}), "(neuron_weights, cont_analysis['num_comparisons'])\n", (9325, 9375), True, 'import response_contour_analysis.utils.dataset_generation as iso_data\n'), ((9522, 9711), 'response_contour_analysis.utils.dataset_generation.compute_comp_vectors', 'iso_data.compute_comp_vectors', (['neuron_weights', "cont_analysis['target_neuron_ids']", "cont_analysis['min_angle']", "cont_analysis['num_comparisons']", "cont_analysis['comparison_method']"], {}), "(neuron_weights, cont_analysis[\n 'target_neuron_ids'], cont_analysis['min_angle'], cont_analysis[\n 'num_comparisons'], cont_analysis['comparison_method'])\n", (9551, 9711), True, 'import response_contour_analysis.utils.dataset_generation as iso_data\n'), ((10677, 10827), 'numpy.savez', 'np.savez', (["(analyzer.analysis_out_dir + 'savefiles/iso_vectors_' + cont_analysis[\n 'iso_save_name'] + params.save_info + '.npz')"], {'data': 'iso_vectors'}), "(analyzer.analysis_out_dir + 'savefiles/iso_vectors_' +\n cont_analysis['iso_save_name'] + params.save_info + '.npz', data=\n iso_vectors)\n", (10685, 10827), True, 'import numpy as np\n'), ((11108, 11296), 'response_contour_analysis.utils.dataset_generation.get_contour_dataset', 'iso_data.get_contour_dataset', (['analyzer.target_vectors', 'comp_vects', "cont_analysis['x_range']", "cont_analysis['y_range']", "cont_analysis['num_images']", "cont_analysis['vh_image_scale']"], {}), "(analyzer.target_vectors, comp_vects,\n cont_analysis['x_range'], cont_analysis['y_range'], cont_analysis[\n 'num_images'], cont_analysis['vh_image_scale'])\n", (11136, 11296), True, 'import response_contour_analysis.utils.dataset_generation as iso_data\n'), ((11768, 11930), 'response_contour_analysis.utils.model_handling.get_normalized_activations', 'model_handling.get_normalized_activations', (['analyzer', "cont_analysis['target_neuron_ids']", 'datapoints', 'get_dsc_activations_cell', 'activation_function_kwargs'], {}), "(analyzer, cont_analysis[\n 'target_neuron_ids'], datapoints, get_dsc_activations_cell,\n activation_function_kwargs)\n", (11809, 11930), True, 'import response_contour_analysis.utils.model_handling as model_handling\n'), ((12100, 12229), 'numpy.savez', 'np.savez', (["(save_root + 'iso_rand_activations_' + cont_analysis['iso_save_name'] +\n params.save_info + '.npz')"], {'data': 'activations'}), "(save_root + 'iso_rand_activations_' + cont_analysis[\n 'iso_save_name'] + params.save_info + '.npz', data=activations)\n", (12108, 12229), True, 'import numpy as np\n'), ((12249, 12386), 'numpy.savez', 'np.savez', (["(save_root + 'iso_rand_contour_dataset_' + cont_analysis['iso_save_name'] +\n params.save_info + '.npz')"], {'data': 'contour_dataset'}), "(save_root + 'iso_rand_contour_dataset_' + cont_analysis[\n 'iso_save_name'] + params.save_info + '.npz', data=contour_dataset)\n", (12257, 12386), True, 'import numpy as np\n'), ((12422, 12551), 'numpy.savez', 'np.savez', (["(save_root + 'iso_comp_activations_' + cont_analysis['iso_save_name'] +\n params.save_info + '.npz')"], {'data': 'activations'}), "(save_root + 'iso_comp_activations_' + cont_analysis[\n 'iso_save_name'] + params.save_info + '.npz', data=activations)\n", (12430, 12551), True, 'import numpy as np\n'), ((12571, 12708), 'numpy.savez', 'np.savez', (["(save_root + 'iso_comp_contour_dataset_' + cont_analysis['iso_save_name'] +\n params.save_info + '.npz')"], {'data': 'contour_dataset'}), "(save_root + 'iso_comp_contour_dataset_' + cont_analysis[\n 'iso_save_name'] + params.save_info + '.npz', data=contour_dataset)\n", (12579, 12708), True, 'import numpy as np\n'), ((12898, 13042), 'response_contour_analysis.utils.histogram_analysis.iso_response_curvature_poly_fits', 'hist_funcs.iso_response_curvature_poly_fits', (["cont_analysis['activations']"], {'target_act': "cont_analysis['target_act']", 'measure_upper_right': '(False)'}), "(cont_analysis['activations'],\n target_act=cont_analysis['target_act'], measure_upper_right=False)\n", (12941, 13042), True, 'import response_contour_analysis.utils.histogram_analysis as hist_funcs\n'), ((13208, 13335), 'numpy.savez', 'np.savez', (["(save_root + 'group_iso_vectors_' + cont_analysis['iso_save_name'] + params\n .save_info + '.npz')"], {'data': 'cont_analysis'}), "(save_root + 'group_iso_vectors_' + cont_analysis['iso_save_name'] +\n params.save_info + '.npz', data=cont_analysis)\n", (13216, 13335), True, 'import numpy as np\n'), ((13156, 13184), 'numpy.stack', 'np.stack', (['curvatures'], {'axis': '(0)'}), '(curvatures, axis=0)\n', (13164, 13184), True, 'import numpy as np\n')] |
"""This module contains a class, Model, that represents the agent-based models.
It interfaces the C++ simulation code with the rest of the software application
which is in Python. """
from collections import OrderedDict
import numpy as np
from common.parameters import CORE_RADIUS, FIELD_SIZE, N_GLOBAL_STATS
from common.tools import counts2slices
try:
import c_code as c_model
except ImportError:
from model.weave_compile import weave_compile
weave_compile()
import c_code as c_model
class Model(object):
"""A model for a system of self-propelled particles.
Methods:
gen_internal_params: Convert user-input parameters to internal format
convenient for the C++ program.
init_particles_state: Initialize the state of a system.
tick: Run the simulation for a given number of steps.
set: Set the model to a given state, used when loading saved genes or
sessions.
Attributes:
state (tuple): Positions and directions of all particles.
global_stats (numpy.ndarray): Global properties (group angular
momentum, segregation, etc.) of the system over time.
user_params (dict): The parameters of the system as seen and written
by users.
internal_params (OrderedDict): The parameters of the system in an
internal format.
"""
def __init__(self, params, scale_factor=1., periodic_boundary=False):
"""
Parameters:
params (dict): The parameters of the model as seen by the users.
scale_factor (float): Specifies the scale of the simulation. The
default arena size is 10x10. For a scale factor (zoom) of 2.0,
the arena size is effectively 20x20.
periodic_boundary (bool): Whether to use periodic boundary
conditions.
"""
# Initialize empty array to store global properties
self.global_stats = np.zeros([N_GLOBAL_STATS, 0])
self.user_params = params
# Periodic boundary settings
if periodic_boundary is False:
self.tick = self.fb_tick
else:
self.tick = self.pb_tick
# Generate internal parameters from user input
self.internal_params = self.gen_internal_params(scale_factor)
self.periodic_boundary = periodic_boundary
@property
def state(self):
"""Return a tuple of four arrays, representing the state of the
particle system: positions and directions. """
return self.pos_x, self.pos_y, self.dir_x, self.dir_y
def gen_internal_params(self, scale_factor):
"""Format user-provided parameters into internal parameters accepted
by the C++ program. """
# Name shortening for frequent variables
uprm = self.user_params
# FORMATTING INTERNAL PARAMS
# Particle core radius
r0_x_2 = CORE_RADIUS * 2
# Calculate actual field size (scaled)
xlim = ylim = FIELD_SIZE / float(scale_factor)
# Calculate number of particles
max_num_particles = (np.sqrt(3)/6.) * (xlim*ylim) / (CORE_RADIUS**2)
nop = int(uprm['Cell Density'] * max_num_particles)
# Calculate force and alignment radii
r1 = uprm['Interaction Range'] * CORE_RADIUS
ra = uprm['Alignment Range'] * CORE_RADIUS
# Just copying
iner_coef = uprm['Angular Inertia']
f0 = uprm['Interaction Force']
fa = uprm['Alignment Force']
noise_coef = uprm['Noise Intensity']
# Change type
v0 = np.array(uprm["Velocity"])
beta = np.array(uprm["Adhesion"])
# Pinned = 0 (none) or 1 (fixed)
pinned = np.array([0 if x == "none" else 1
for x in uprm["Pinned Cells"]]).astype(np.int32)
# Convert ratio to number of cells in each species
ratios = uprm["Cell Ratio"][:2]
cumu_ratios = [sum(ratios[:i+1]) for i in range(len(ratios))] + [1.0]
cumu_n_per_species = [0] + [int(nop*each) for each in cumu_ratios]
n_per_species = np.array(
[cumu_n_per_species[i] - cumu_n_per_species[i - 1]
for i in range(1, len(cumu_n_per_species))]).astype(np.int32)
# Gradient from polar to cartesian
grad_x = np.array(
[np.cos(d*np.pi) * i for d, i in zip(
uprm["Gradient Direction"], uprm["Gradient Intensity"])])
grad_y = np.array(
[np.sin(d*np.pi) * i for d, i in zip(
uprm["Gradient Direction"], uprm["Gradient Intensity"])])
# Effective number of particles (excluding pinned)
eff_nop = float(
np.sum([n_per_species[i] for i in range(len(n_per_species))
if pinned[i] == 0]))
names = [
'nop', 'eff_nop', 'xlim', 'ylim',
'r0_x_2', 'r1', 'ra', # radii
'iner_coef', 'f0', 'fa', 'noise_coef', # strengths
'v0', 'pinned', 'n_per_species', 'beta', 'grad_x', 'grad_y' # arr
]
internal_params = OrderedDict([(x, locals()[x]) for x in names])
return internal_params
def init_particles_state(self):
"""Initialize a system of particles given params."""
iprm, uprm = self.internal_params, self.user_params
nop, xlim, ylim = iprm['nop'], iprm['xlim'], iprm['ylim']
n_per_species = iprm['n_per_species']
# Randomize position
pos_x = np.random.random(nop) * xlim
pos_y = np.random.random(nop) * ylim
# Randomize velocity
theta = np.random.random(nop) * 2 * np.pi
dir_x = np.cos(theta)
dir_y = np.sin(theta)
# Randomize pinned shape
# For different types
types = counts2slices(n_per_species)
for type_, pinned_shape in zip(types, uprm["Pinned Cells"]):
if pinned_shape != "none":
n = type_.stop - type_.start # length of the slice
dir_x[type_] = 0.
dir_y[type_] = 0.
if pinned_shape == "ring":
# Radius = 30%-40% of field size
radius = xlim * (0.3+np.random.random(n)*0.1)
theta = 2 * np.random.random(n) * np.pi
pos_x[type_] = xlim/2. + np.cos(theta)*radius
pos_y[type_] = ylim/2. + np.sin(theta)*radius
elif pinned_shape == "circle":
# 0-20% of field size
radius = xlim * 0.2 * np.random.power(2, n)
theta = 2 * np.random.random(n) * np.pi
pos_x[type_] = xlim/2. + np.cos(theta)*radius
pos_y[type_] = ylim/2. + np.sin(theta)*radius
elif pinned_shape == "square":
# radius ~ 40-50% of field size
side = np.random.randint(0, 4, n)
coord = np.random.random(n) * 0.9 * xlim
depth = np.random.random(n) * 0.1 * xlim
temp_x, temp_y = np.empty(n), np.empty(n)
is_0 = side == 0
temp_x[is_0], temp_y[is_0] = depth[is_0], coord[is_0]
is_1 = side == 1
temp_x[is_1], temp_y[is_1] = (xlim - depth[is_1],
coord[is_1] + 0.1 * ylim)
is_2 = side == 2
temp_x[is_2], temp_y[is_2] = (coord[is_2] + 0.1 * xlim,
depth[is_2])
is_3 = side == 3
temp_x[is_3], temp_y[is_3] = (coord[is_3],
ylim - depth[is_3])
pos_x[type_] = temp_x
pos_y[type_] = temp_y
self.pos_x, self.pos_y, self.dir_x, self.dir_y = (
pos_x, pos_y, dir_x, dir_y)
def fb_tick(self, steps):
"""Run the simulation for a given number of steps under fixed
boundary conditions."""
global_stats_slice = np.zeros(N_GLOBAL_STATS * steps)
c_model.fb_tick(
*self.internal_params.values()
+ [self.pos_x, self.pos_y, self.dir_x, self.dir_y,
global_stats_slice, steps])
self.global_stats = np.hstack(
[self.global_stats,
global_stats_slice.reshape(N_GLOBAL_STATS, steps)])
def pb_tick(self, steps):
"""Run the simulation for a given number of steps under periodic
boundary conditions."""
global_stats_slice = np.zeros(N_GLOBAL_STATS * steps)
c_model.pb_tick(
*self.internal_params.values()
+ [self.pos_x, self.pos_y, self.dir_x, self.dir_y,
global_stats_slice, steps])
self.global_stats = np.hstack(
[self.global_stats,
global_stats_slice.reshape(N_GLOBAL_STATS, steps)])
def set(self, state, global_stats):
"""Load given global properties and state into the Model."""
self.global_stats = np.array(global_stats)
self.pos_x, self.pos_y, self.dir_x, self.dir_y = [
np.array(_) for _ in state]
def main():
# TEST: python -m model.DA
import time
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
def test_model(params, scale_factor, velocity_trace,
periodic_boundary, steps):
"""Plot the results of the simulation given parameters."""
# Track time it takes to run the model
start_time = time.time()
alpha = 0.8
m = Model(params, scale_factor=scale_factor,
periodic_boundary=periodic_boundary)
m.init_particles_state()
# Run model
m.tick(steps)
print("--- %s seconds ---" % (time.time() - start_time))
# Prepare for making plots
x, y, dir_x, dir_y = m.state
species_velocity = m.user_params["Velocity"]
n_per_species = m.internal_params["n_per_species"]
colors = ["blue", "red", "green"]
# Set up plot parameters
figsize = (5,5)
dpi = 100
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
dots = (figsize[0]*dpi)**2
circle_size = dots/100. * 3.14 * (0.08 * scale_factor)**2
multiplier = velocity_trace
# Plot particle system
for k, s in enumerate(counts2slices(n_per_species)):
if multiplier > 0:
segs = []
for j in range(s.start, s.stop):
segs.append(
((x[j], y[j]),
(x[j]-dir_x[j]*multiplier*species_velocity[k],
y[j]-dir_y[j]*multiplier*species_velocity[k])))
ln_coll = LineCollection(segs, colors=colors[k],
linewidths=1, alpha=alpha)
ax.add_collection(ln_coll)
ax.scatter(x[s], y[s], s=circle_size, color=colors[k],
linewidths=0, alpha=alpha)
# Set plot limits
adjusted_limit = 10. / scale_factor
plt.xlim([0, adjusted_limit])
plt.ylim([0, adjusted_limit])
plt.show()
# ----------------------SVM----------------------
params = {
"Alignment Range": 10.0,
"Pinned Cells": [
"none",
"none",
"none"
],
"Interaction Force": 0.0,
"Gradient Intensity": [
0.0,
0.0,
0.0
],
"Cell Ratio": [
1.0,
0.0,
0.0
],
"Alignment Force": 1.0,
"Noise Intensity": 0.016,
"Angular Inertia": 0.01,
"Adhesion": [
[
0.01,
0.01,
0.01
],
[
0.01,
0.01,
0.01
],
[
0.01,
0.01,
0.01
]
],
"Gradient Direction": [
0.0,
0.0,
0.0
],
"Cell Density": 0.42,
"Velocity": [
0.03,
0.03,
0.03
],
"Interaction Range": 10.0
}
scale_factor = 1.0
velocity_trace = 15.
periodic_boundary = True
steps = 100
test_model(params, scale_factor, velocity_trace, periodic_boundary, steps)
# ----------------------SDA----------------------
params = {
"Alignment Range": 2.01,
"Alignment Force": 0.0,
"Interaction Force": 0.005,
"Gradient Intensity": [
0.0,
0.0,
0.0
],
"Cell Ratio": [
0.5,
0.5,
0.0
],
"Pinned Cells": [
"none",
"none",
"none"
],
"Noise Intensity": 0.3,
"Angular Inertia": 0.05,
"Adhesion": [
[
1.2,
1.4,
0.01
],
[
1.4,
1.8,
0.01
],
[
0.01,
0.01,
0.01
]
],
"Velocity": [
0.05,
0.05,
0.05
],
"Cell Density": 0.07,
"Gradient Direction": [
0.0,
0.0,
0.0
],
"Interaction Range": 10.0
}
scale_factor = 0.5
velocity_trace = 0.
periodic_boundary = False
steps = 500
test_model(params, scale_factor, velocity_trace, periodic_boundary, steps)
if __name__ == "__main__":
main()
| [
"numpy.sqrt",
"numpy.random.random",
"numpy.sin",
"numpy.random.power",
"matplotlib.collections.LineCollection",
"common.tools.counts2slices",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"numpy.empty",
"numpy.cos",
"model.weave_compile.weave_compile",
"matplotlib.pyplot.ylim",
"m... | [((459, 474), 'model.weave_compile.weave_compile', 'weave_compile', ([], {}), '()\n', (472, 474), False, 'from model.weave_compile import weave_compile\n'), ((1966, 1995), 'numpy.zeros', 'np.zeros', (['[N_GLOBAL_STATS, 0]'], {}), '([N_GLOBAL_STATS, 0])\n', (1974, 1995), True, 'import numpy as np\n'), ((3588, 3614), 'numpy.array', 'np.array', (["uprm['Velocity']"], {}), "(uprm['Velocity'])\n", (3596, 3614), True, 'import numpy as np\n'), ((3630, 3656), 'numpy.array', 'np.array', (["uprm['Adhesion']"], {}), "(uprm['Adhesion'])\n", (3638, 3656), True, 'import numpy as np\n'), ((5647, 5660), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5653, 5660), True, 'import numpy as np\n'), ((5677, 5690), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5683, 5690), True, 'import numpy as np\n'), ((5771, 5799), 'common.tools.counts2slices', 'counts2slices', (['n_per_species'], {}), '(n_per_species)\n', (5784, 5799), False, 'from common.tools import counts2slices\n'), ((8068, 8100), 'numpy.zeros', 'np.zeros', (['(N_GLOBAL_STATS * steps)'], {}), '(N_GLOBAL_STATS * steps)\n', (8076, 8100), True, 'import numpy as np\n'), ((8576, 8608), 'numpy.zeros', 'np.zeros', (['(N_GLOBAL_STATS * steps)'], {}), '(N_GLOBAL_STATS * steps)\n', (8584, 8608), True, 'import numpy as np\n'), ((9057, 9079), 'numpy.array', 'np.array', (['global_stats'], {}), '(global_stats)\n', (9065, 9079), True, 'import numpy as np\n'), ((9568, 9579), 'time.time', 'time.time', ([], {}), '()\n', (9577, 9579), False, 'import time\n'), ((10167, 10205), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize', 'dpi': 'dpi'}), '(figsize=figsize, dpi=dpi)\n', (10179, 10205), True, 'import matplotlib.pyplot as plt\n'), ((11130, 11159), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, adjusted_limit]'], {}), '([0, adjusted_limit])\n', (11138, 11159), True, 'import matplotlib.pyplot as plt\n'), ((11168, 11197), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, adjusted_limit]'], {}), '([0, adjusted_limit])\n', (11176, 11197), True, 'import matplotlib.pyplot as plt\n'), ((11206, 11216), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11214, 11216), True, 'import matplotlib.pyplot as plt\n'), ((5477, 5498), 'numpy.random.random', 'np.random.random', (['nop'], {}), '(nop)\n', (5493, 5498), True, 'import numpy as np\n'), ((5522, 5543), 'numpy.random.random', 'np.random.random', (['nop'], {}), '(nop)\n', (5538, 5543), True, 'import numpy as np\n'), ((9151, 9162), 'numpy.array', 'np.array', (['_'], {}), '(_)\n', (9159, 9162), True, 'import numpy as np\n'), ((10404, 10432), 'common.tools.counts2slices', 'counts2slices', (['n_per_species'], {}), '(n_per_species)\n', (10417, 10432), False, 'from common.tools import counts2slices\n'), ((3715, 3782), 'numpy.array', 'np.array', (["[(0 if x == 'none' else 1) for x in uprm['Pinned Cells']]"], {}), "([(0 if x == 'none' else 1) for x in uprm['Pinned Cells']])\n", (3723, 3782), True, 'import numpy as np\n'), ((5597, 5618), 'numpy.random.random', 'np.random.random', (['nop'], {}), '(nop)\n', (5613, 5618), True, 'import numpy as np\n'), ((10785, 10850), 'matplotlib.collections.LineCollection', 'LineCollection', (['segs'], {'colors': 'colors[k]', 'linewidths': '(1)', 'alpha': 'alpha'}), '(segs, colors=colors[k], linewidths=1, alpha=alpha)\n', (10799, 10850), False, 'from matplotlib.collections import LineCollection\n'), ((3107, 3117), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (3114, 3117), True, 'import numpy as np\n'), ((4332, 4349), 'numpy.cos', 'np.cos', (['(d * np.pi)'], {}), '(d * np.pi)\n', (4338, 4349), True, 'import numpy as np\n'), ((4483, 4500), 'numpy.sin', 'np.sin', (['(d * np.pi)'], {}), '(d * np.pi)\n', (4489, 4500), True, 'import numpy as np\n'), ((9821, 9832), 'time.time', 'time.time', ([], {}), '()\n', (9830, 9832), False, 'import time\n'), ((6238, 6257), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (6254, 6257), True, 'import numpy as np\n'), ((6311, 6324), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6317, 6324), True, 'import numpy as np\n'), ((6377, 6390), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6383, 6390), True, 'import numpy as np\n'), ((6530, 6551), 'numpy.random.power', 'np.random.power', (['(2)', 'n'], {}), '(2, n)\n', (6545, 6551), True, 'import numpy as np\n'), ((6871, 6897), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)', 'n'], {}), '(0, 4, n)\n', (6888, 6897), True, 'import numpy as np\n'), ((6181, 6200), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (6197, 6200), True, 'import numpy as np\n'), ((6584, 6603), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (6600, 6603), True, 'import numpy as np\n'), ((6657, 6670), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6663, 6670), True, 'import numpy as np\n'), ((6723, 6736), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6729, 6736), True, 'import numpy as np\n'), ((7057, 7068), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (7065, 7068), True, 'import numpy as np\n'), ((7070, 7081), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (7078, 7081), True, 'import numpy as np\n'), ((6926, 6945), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (6942, 6945), True, 'import numpy as np\n'), ((6987, 7006), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (7003, 7006), True, 'import numpy as np\n')] |
import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper
import scipy.stats as stats
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
def plot_behavioral_graphs(NWBFilePath):
"""
Behavioral analysis, plotting six graphs:
1. Probability of responses
2. ROC curves for different sessions
3. Overall performance
4. Histogram of AUC
5. Accuracy over confidence high low
6. Confidence level over correctness of responses
"""
# Get NWB files
allNWBfiles = os.listdir(NWBFilePath)
# Get NWB file paths, append them
filenames = []
for singleNWBfile in allNWBfiles:
NWBfile = os.path.join(NWBFilePath, singleNWBfile)
if not os.path.exists((NWBfile)):
print('This file does not exist: {}'.format(NWBfile))
else:
filenames.append(str(NWBfile))
# Get all the nwb file names from the data folder
#path_analysis = os.path.abspath('analysis')
#path_data = (os.path.dirname(('{}').format(os.path.dirname(path_analysis))))
#path_data = ('{}' + '/' + 'data').format(path_data)
#filenames = helper.get_nwbfile_names(path_data)
n = 0
# make the subplots
fig, axs = plt.subplots(nrows=2, ncols=3, sharex=False, sharey=False, figsize=(20, 10))
# Place holder ready to store separate the new and old response
response_1_old = []
response_2_old = []
response_3_old = []
response_4_old = []
response_5_old = []
response_6_old = []
response_1_new = []
response_2_new = []
response_3_new = []
response_4_new = []
response_5_new = []
response_6_new = []
# Placeholder for overall performance
all_performances = []
# Placeholder for aucs
all_auc = []
# Placeholder for accuracies for different confidence level
accuracies_high = []
accuracies_low = []
accuracies_all = []
# Placeholder for mean confidences over correctness
m_conf_all = []
for filename in filenames:
try:
print('processing file: ' + filename)
nwbfile = helper.read(filename)
except ValueError as e:
print('Problem opening the file: ' + str(e))
logging.warning('Error File: ' + filename + ':' + str(e))
continue
except OSError as e:
print('Problem opening the file:' + str(e))
logging.warning('Error File ' + filename + ':' + str(e))
continue
recog_response = helper.extract_recog_responses(nwbfile)
ground_truth = helper.extract_new_old_label(nwbfile)
if len(recog_response) != len(ground_truth):
print('response length not equal to ground truth, skipped this session')
continue
else:
recog_response_old = recog_response[ground_truth == 1]
n = n + 1
# Calculate the percentage of each responses
response_1_old.append(np.sum(recog_response_old == 1) / len(recog_response_old))
response_2_old.append(np.sum(recog_response_old == 2) / len(recog_response_old))
response_3_old.append(np.sum(recog_response_old == 3) / len(recog_response_old))
response_4_old.append(np.sum(recog_response_old == 4) / len(recog_response_old))
response_5_old.append(np.sum(recog_response_old == 5) / len(recog_response_old))
response_6_old.append(np.sum(recog_response_old == 6) / len(recog_response_old))
recog_response_new = recog_response[ground_truth == 0]
response_1_new.append(np.sum(recog_response_new == 1) / len(recog_response_new))
response_2_new.append(np.sum(recog_response_new == 2) / len(recog_response_new))
response_3_new.append(np.sum(recog_response_new == 3) / len(recog_response_new))
response_4_new.append(np.sum(recog_response_new == 4) / len(recog_response_new))
response_5_new.append(np.sum(recog_response_new == 5) / len(recog_response_new))
response_6_new.append(np.sum(recog_response_new == 6) / len(recog_response_new))
# Calculate the cumulative d and plot the cumulative ROC curve
stats_all = helper.cal_cumulative_d(nwbfile)
x = stats_all[0:5, 4]
y = stats_all[0:5, 3]
axs[0, 1].plot(x, y, marker='.', color='grey', alpha=0.5)
axs[0, 1].set_ylim(0, 1)
axs[0, 1].set_xlim(0, 1)
# Get the overall performance
all_performances.append([stats_all[2, 4], stats_all[2, 3]])
# Calculate the auc
auc = helper.cal_auc(stats_all)
all_auc.append(auc)
# Check if this session should be included in the accuracies over high low section
is_included = helper.check_inclusion(recog_response, auc)
# Calculate the accuracies for high low confidence
if is_included:
split_status, split_mode, ind_TP_high, ind_TP_low, ind_FP_high, ind_FP_low, ind_TN_high, \
ind_TN_low, ind_FN_high, ind_FN_low, n_response = helper.dynamic_split(recog_response, ground_truth)
nr_TN_high = len(ind_TN_high[0])
nr_TP_high = len(ind_TP_high[0])
nr_TN_all = len(ind_TN_high[0]) + len(ind_TN_low[0])
nr_TN_low = len(ind_TP_high[0]) + len(ind_TP_low[0])
nr_TP_low = len(ind_TP_low[0])
nr_TN_low = len(ind_TN_low[0])
nr_high_response = len(ind_TN_high[0]) + len(ind_TP_high[0]) + len(ind_FN_high[0]) + len(ind_FP_high[0])
nr_low_response = len(ind_TN_low[0]) + len(ind_TP_low[0]) + len(ind_FN_low[0]) + len(ind_FP_low[0])
# print(nr_low_response)
# print(len(ind_TN_low[0]))
# print(len(ind_TP_low[0]))
# print(len(ind_FN_low[0]))
# print(len(ind_FP_low[0]))
per_accuracy_high = (nr_TN_high + nr_TP_high) / nr_high_response
per_accuracy_low = (nr_TN_low + nr_TP_low) / nr_low_response
per_accuracy_all = (nr_TN_low + nr_TP_high) / n_response
accuracies_high.append(per_accuracy_high * 100)
accuracies_low.append(per_accuracy_low * 100)
accuracies_all.append(per_accuracy_all * 100)
# get correct/incorrect indexes
correct_inds, incorrect_inds = helper.correct_incorrect_indexes(recog_response, ground_truth)
# remap response
remapped_response = helper.remap_response(recog_response)
# Get the mean confidence for correctness
m_conf_all.append([np.mean(remapped_response[correct_inds]), np.mean(remapped_response[incorrect_inds])])
# Plot the percentage responses
response_old = np.asarray([response_1_old, response_2_old, response_3_old, response_4_old,
response_5_old, response_6_old])
response_new = np.asarray([response_1_new, response_2_new, response_3_new, response_4_new,
response_5_new, response_6_new])
response_percentage_old = np.mean(response_old, axis=1)
std_old = np.std(response_old, axis=1)
se_old = std_old/np.sqrt(n)
response_percentage_new = np.mean(response_new, axis=1)
std_new = np.std(response_new, axis=1)
se_new = std_new/np.sqrt(n)
x = [i for i in range(1, 7, 1)]
axs[0, 0].errorbar(x, response_percentage_old, yerr=se_old, color='blue', label='old stimuli')
axs[0, 0].errorbar(x, response_percentage_new, yerr=se_new, color='red', label='new stimuli')
axs[0, 0].legend()
axs[0, 0].set_xlabel('Confidence')
axs[0, 0].set_ylabel('Probability of Response')
axs[0, 0].set_title('n=' + str(len(filenames)) + ' sessions')
# Other settings for cumulative ROC
axs[0, 1].plot([0, 1], [0, 1], color='black', alpha=0.7)
axs[0, 1].set_xlabel('false alarm rate')
axs[0, 1].set_ylabel('hit rate')
axs[0, 1].set_title('average roc')
# Calculate the average and overall performance
avg_performance = np.average(all_performances, axis=0)
std_performance = np.std(all_performances, axis=0)
# Plot the overall performance
for performance in all_performances:
axs[0, 2].plot(performance[0], performance[1], marker='.', color='grey', alpha=0.6)
axs[0, 2].set_ylim(0, 1)
axs[0, 2].set_xlim(0, 1)
axs[0, 2].plot([0, 1], [0, 1], color='black', alpha=0.7)
axs[0, 2].errorbar(avg_performance[0], avg_performance[1], std_performance[1], std_performance[0])
axs[0, 2].set_xlabel('false alarm rate')
axs[0, 2].set_ylabel('hit rate')
axs[0, 2].set_title('Overall Performance mTP=' + str(avg_performance[0]) + ' mFP=' + str(avg_performance[1]))
# Plot AUC histogram
m_auc = np.mean(all_auc)
axs[1, 0].hist(all_auc, 15, histtype='bar')
axs[1, 0].set_xlim(0.5, 1)
axs[1, 0].set_xlabel('AUC')
axs[1, 0].set_ylabel('nr of subjects')
axs[1, 0].set_title('AUC m=' + str(m_auc))
# Plot the accuracies of different confidence level
p1 = stats.ttest_1samp(accuracies_high, 50)[1]
p2 = stats.ttest_1samp(accuracies_low, 50)[1]
x_axis_label_high = 'high p=' + str(p1)
x_axis_label_low = 'low p=' + str(p2)
x_axis = [x_axis_label_high, x_axis_label_low]
for i in range(len(accuracies_high)):
axs[1, 1].plot(x_axis, [accuracies_high[i], accuracies_low[i]], marker='o', alpha=0.5)
axs[1, 1].plot(x_axis, [50, 50], color='black')
axs[1, 1].set_ylim([0, 100])
tstat, p_val = stats.ttest_ind(accuracies_high, accuracies_low, equal_var=False)
axs[1, 1].set_title('p=' + str(p_val))
axs[1, 1].set_xlabel('confidence p vs. 50%')
axs[1, 1].set_ylabel('accuracy % correct')
# Calculate the mean and standard deviation for the confidence for correctness level
m_conf_all = np.asarray(m_conf_all)
m_conf = np.mean(m_conf_all, axis=0)
std_conf = np.std(m_conf_all, axis=0)
n = m_conf_all.shape[0]
se_conf = std_conf/np.sqrt(n)
tstat, p_val = stats.ttest_ind(m_conf_all[:, 0], m_conf_all[:, 1], equal_var=False)
axs[1, 2].bar(['correct', 'incorrect'], m_conf, yerr=se_conf)
axs[1, 2].set_ylabel('confidence 1=high, 3=guess')
axs[1, 2].set_title('pT2=' + str(p_val) + ' n=' + str(n))
plt.show()
# Functions that plot the graphs seperately.
def plot_prob_response():
"""
Plot single graph of probability of response
"""
filenames = helper.get_nwbfile_names("../data")
x = [i for i in range(1, 7, 1)]
response_percentage_old, std_old, response_percentage_new, std_new = helper.extract_probability_response(filenames)
#type="old")
plt.errorbar(x, response_percentage_old, yerr=std_old, color='blue', label='old stimuli')
plt.errorbar(x, response_percentage_new, yerr=std_new, color='red', label='new stimuli')
plt.legend(bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
plt.xlabel('Confidence')
plt.ylabel('Probability of Response')
plt.title('n=' + str(len(filenames)) + ' sessions')
plt.show()
def plot_cumulative_roc():
"""
Plot the cumulative roc
"""
filenames = get_nwbfile_names("../data")
for filename in filenames:
nwbfile = read(filename)
stats_all = cal_cumulative_d(nwbfile)
x = stats_all[0:5, 4]
y = stats_all[0:5, 3]
plt.plot(x, y, marker='.', color='grey', alpha=0.5)
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.plot([0, 1], [0, 1], color='black', alpha=0.7)
plt.xlabel('false alarm rate')
plt.ylabel('hit rate')
plt.title('average roc')
plt.show()
def plot_overall_performance():
"""
Plot overall performance
"""
filenames = helper.get_nwbfile_names("../data")
all_performances = []
for filenames in filenames:
nwbfile = helper.read(filenames)
stats_all = helper.cal_cumulative_d(nwbfile)
all_performances.append([stats_all[2, 4], stats_all[2, 3]])
avg_performance = np.average(all_performances, axis=0)
std_performance = np.std(all_performances, axis=0)
for performance in all_performances:
plt.plot(performance[0], performance[1], marker='.', color='grey', alpha=0.6)
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.plot([0, 1], [0, 1], color='black', alpha=0.7)
plt.errorbar(avg_performance[0], avg_performance[1], std_performance[1], std_performance[0])
plt.xlabel('false alarm rate')
plt.ylabel('hit rate')
plt.title('Overall Performance mTP=' + str(avg_performance[0]) + ' mFP=' + str(avg_performance[1]))
plt.show()
def plot_auc():
"""
Plot histogram of AUC
"""
filenames = get_nwbfile_names("../data")
all_auc = []
for filenames in filenames:
nwbfile = read(filenames)
stats_all = cal_cumulative_d(nwbfile)
auc = cal_auc(stats_all)
all_auc.append(auc)
m_auc = np.mean(all_auc)
plt.hist(all_auc, 15, histtype='bar')
plt.xlim(0, 1)
plt.xlabel('AUC')
plt.ylabel('nr of subjects')
plt.title('AUC m=' + str(m_auc))
plt.show()
def plot_confidence_accuracy():
"""
Plot accuracy over confidence high low.
"""
filenames = get_nwbfile_names("../data")
accuracies_high = []
accuracies_low = []
accuracies_all = []
for filename in filenames:
nwbfile = read(filename)
recog_response = extract_recog_responses(nwbfile)
ground_truth = extract_new_old_label(nwbfile)
split_status, split_mode, ind_TP_high, ind_TP_low, ind_FP_high, ind_FP_low, ind_TN_high, \
ind_TN_low, ind_FN_high, ind_FN_low, n_response = dynamic_split(recog_response, ground_truth)
nr_TN_high = len(ind_TN_high[0])
nr_TP_high = len(ind_TP_high[0])
nr_TN_all = len(ind_TN_high[0]) + len(ind_TN_low[0])
nr_TN_low = len(ind_TP_high[0]) + len(ind_TP_low[0])
nr_TP_low = len(ind_TP_low[0])
nr_TN_low = len(ind_TN_low[0])
nr_high_response = len(ind_TN_high[0]) + len(ind_TP_high[0]) + len(ind_FN_high[0]) + len(ind_FP_high[0])
nr_low_response = len(ind_TN_low[0]) + len(ind_TP_low[0]) + len(ind_FN_low[0]) + len(ind_FP_low[0])
per_accuracy_high = (nr_TN_high + nr_TP_high) / nr_high_response
per_accuracy_low = (nr_TN_low + nr_TP_low) / nr_low_response
per_accuracy_all = (nr_TN_low + nr_TP_high) / n_response
accuracies_high.append(per_accuracy_high*100)
accuracies_low.append(per_accuracy_low*100)
accuracies_all.append(per_accuracy_all*100)
p1 = stats.ttest_1samp(accuracies_high, 50)[1]
p2 = stats.ttest_1samp(accuracies_low, 50)[1]
x_axis_label_high = 'high p=' + str(p1)
x_axis_label_low = 'low p=' + str(p2)
x_axis = [x_axis_label_high, x_axis_label_low]
for i in range(len(accuracies_high)):
plt.plot(x_axis, [accuracies_high[i], accuracies_low[i]], marker='o')
plt.plot(x_axis, [50, 50], color='black')
plt.ylim([0, 100])
tstat, p_val = stats.ttest_ind(accuracies_high, accuracies_low, equal_var=False)
plt.title('p=' + str(p_val))
plt.xlabel('confidence p vs. 50%')
plt.ylabel('accuracy % correct')
plt.show()
| [
"matplotlib.pyplot.hist",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.get_nwbfile_names",
"scipy.stats.ttest_ind",
"RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.dynamic_split",
"RutishauserLabtoNWB.events.newolddelay.python.an... | [((544, 567), 'os.listdir', 'os.listdir', (['NWBFilePath'], {}), '(NWBFilePath)\n', (554, 567), False, 'import os\n'), ((1242, 1318), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)', 'sharex': '(False)', 'sharey': '(False)', 'figsize': '(20, 10)'}), '(nrows=2, ncols=3, sharex=False, sharey=False, figsize=(20, 10))\n', (1254, 1318), True, 'import matplotlib.pyplot as plt\n'), ((6666, 6778), 'numpy.asarray', 'np.asarray', (['[response_1_old, response_2_old, response_3_old, response_4_old,\n response_5_old, response_6_old]'], {}), '([response_1_old, response_2_old, response_3_old, response_4_old,\n response_5_old, response_6_old])\n', (6676, 6778), True, 'import numpy as np\n'), ((6825, 6937), 'numpy.asarray', 'np.asarray', (['[response_1_new, response_2_new, response_3_new, response_4_new,\n response_5_new, response_6_new]'], {}), '([response_1_new, response_2_new, response_3_new, response_4_new,\n response_5_new, response_6_new])\n', (6835, 6937), True, 'import numpy as np\n'), ((6996, 7025), 'numpy.mean', 'np.mean', (['response_old'], {'axis': '(1)'}), '(response_old, axis=1)\n', (7003, 7025), True, 'import numpy as np\n'), ((7040, 7068), 'numpy.std', 'np.std', (['response_old'], {'axis': '(1)'}), '(response_old, axis=1)\n', (7046, 7068), True, 'import numpy as np\n'), ((7131, 7160), 'numpy.mean', 'np.mean', (['response_new'], {'axis': '(1)'}), '(response_new, axis=1)\n', (7138, 7160), True, 'import numpy as np\n'), ((7175, 7203), 'numpy.std', 'np.std', (['response_new'], {'axis': '(1)'}), '(response_new, axis=1)\n', (7181, 7203), True, 'import numpy as np\n'), ((7948, 7984), 'numpy.average', 'np.average', (['all_performances'], {'axis': '(0)'}), '(all_performances, axis=0)\n', (7958, 7984), True, 'import numpy as np\n'), ((8007, 8039), 'numpy.std', 'np.std', (['all_performances'], {'axis': '(0)'}), '(all_performances, axis=0)\n', (8013, 8039), True, 'import numpy as np\n'), ((8673, 8689), 'numpy.mean', 'np.mean', (['all_auc'], {}), '(all_auc)\n', (8680, 8689), True, 'import numpy as np\n'), ((9429, 9494), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['accuracies_high', 'accuracies_low'], {'equal_var': '(False)'}), '(accuracies_high, accuracies_low, equal_var=False)\n', (9444, 9494), True, 'import scipy.stats as stats\n'), ((9741, 9763), 'numpy.asarray', 'np.asarray', (['m_conf_all'], {}), '(m_conf_all)\n', (9751, 9763), True, 'import numpy as np\n'), ((9777, 9804), 'numpy.mean', 'np.mean', (['m_conf_all'], {'axis': '(0)'}), '(m_conf_all, axis=0)\n', (9784, 9804), True, 'import numpy as np\n'), ((9820, 9846), 'numpy.std', 'np.std', (['m_conf_all'], {'axis': '(0)'}), '(m_conf_all, axis=0)\n', (9826, 9846), True, 'import numpy as np\n'), ((9930, 9998), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['m_conf_all[:, 0]', 'm_conf_all[:, 1]'], {'equal_var': '(False)'}), '(m_conf_all[:, 0], m_conf_all[:, 1], equal_var=False)\n', (9945, 9998), True, 'import scipy.stats as stats\n'), ((10188, 10198), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10196, 10198), True, 'import matplotlib.pyplot as plt\n'), ((10354, 10389), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.get_nwbfile_names', 'helper.get_nwbfile_names', (['"""../data"""'], {}), "('../data')\n", (10378, 10389), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((10499, 10545), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.extract_probability_response', 'helper.extract_probability_response', (['filenames'], {}), '(filenames)\n', (10534, 10545), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((10665, 10759), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'response_percentage_old'], {'yerr': 'std_old', 'color': '"""blue"""', 'label': '"""old stimuli"""'}), "(x, response_percentage_old, yerr=std_old, color='blue', label=\n 'old stimuli')\n", (10677, 10759), True, 'import matplotlib.pyplot as plt\n'), ((10759, 10852), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'response_percentage_new'], {'yerr': 'std_new', 'color': '"""red"""', 'label': '"""new stimuli"""'}), "(x, response_percentage_new, yerr=std_new, color='red', label=\n 'new stimuli')\n", (10771, 10852), True, 'import matplotlib.pyplot as plt\n'), ((10928, 10952), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Confidence"""'], {}), "('Confidence')\n", (10938, 10952), True, 'import matplotlib.pyplot as plt\n'), ((10957, 10994), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability of Response"""'], {}), "('Probability of Response')\n", (10967, 10994), True, 'import matplotlib.pyplot as plt\n'), ((11055, 11065), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11063, 11065), True, 'import matplotlib.pyplot as plt\n'), ((11464, 11514), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""black"""', 'alpha': '(0.7)'}), "([0, 1], [0, 1], color='black', alpha=0.7)\n", (11472, 11514), True, 'import matplotlib.pyplot as plt\n'), ((11519, 11549), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""false alarm rate"""'], {}), "('false alarm rate')\n", (11529, 11549), True, 'import matplotlib.pyplot as plt\n'), ((11554, 11576), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""hit rate"""'], {}), "('hit rate')\n", (11564, 11576), True, 'import matplotlib.pyplot as plt\n'), ((11581, 11605), 'matplotlib.pyplot.title', 'plt.title', (['"""average roc"""'], {}), "('average roc')\n", (11590, 11605), True, 'import matplotlib.pyplot as plt\n'), ((11610, 11620), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11618, 11620), True, 'import matplotlib.pyplot as plt\n'), ((11716, 11751), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.get_nwbfile_names', 'helper.get_nwbfile_names', (['"""../data"""'], {}), "('../data')\n", (11740, 11751), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((11995, 12031), 'numpy.average', 'np.average', (['all_performances'], {'axis': '(0)'}), '(all_performances, axis=0)\n', (12005, 12031), True, 'import numpy as np\n'), ((12054, 12086), 'numpy.std', 'np.std', (['all_performances'], {'axis': '(0)'}), '(all_performances, axis=0)\n', (12060, 12086), True, 'import numpy as np\n'), ((12265, 12315), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""black"""', 'alpha': '(0.7)'}), "([0, 1], [0, 1], color='black', alpha=0.7)\n", (12273, 12315), True, 'import matplotlib.pyplot as plt\n'), ((12320, 12416), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['avg_performance[0]', 'avg_performance[1]', 'std_performance[1]', 'std_performance[0]'], {}), '(avg_performance[0], avg_performance[1], std_performance[1],\n std_performance[0])\n', (12332, 12416), True, 'import matplotlib.pyplot as plt\n'), ((12417, 12447), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""false alarm rate"""'], {}), "('false alarm rate')\n", (12427, 12447), True, 'import matplotlib.pyplot as plt\n'), ((12452, 12474), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""hit rate"""'], {}), "('hit rate')\n", (12462, 12474), True, 'import matplotlib.pyplot as plt\n'), ((12583, 12593), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12591, 12593), True, 'import matplotlib.pyplot as plt\n'), ((12901, 12917), 'numpy.mean', 'np.mean', (['all_auc'], {}), '(all_auc)\n', (12908, 12917), True, 'import numpy as np\n'), ((12922, 12959), 'matplotlib.pyplot.hist', 'plt.hist', (['all_auc', '(15)'], {'histtype': '"""bar"""'}), "(all_auc, 15, histtype='bar')\n", (12930, 12959), True, 'import matplotlib.pyplot as plt\n'), ((12964, 12978), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (12972, 12978), True, 'import matplotlib.pyplot as plt\n'), ((12983, 13000), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""AUC"""'], {}), "('AUC')\n", (12993, 13000), True, 'import matplotlib.pyplot as plt\n'), ((13005, 13033), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""nr of subjects"""'], {}), "('nr of subjects')\n", (13015, 13033), True, 'import matplotlib.pyplot as plt\n'), ((13075, 13085), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13083, 13085), True, 'import matplotlib.pyplot as plt\n'), ((14913, 14954), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', '[50, 50]'], {'color': '"""black"""'}), "(x_axis, [50, 50], color='black')\n", (14921, 14954), True, 'import matplotlib.pyplot as plt\n'), ((14959, 14977), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 100]'], {}), '([0, 100])\n', (14967, 14977), True, 'import matplotlib.pyplot as plt\n'), ((14998, 15063), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['accuracies_high', 'accuracies_low'], {'equal_var': '(False)'}), '(accuracies_high, accuracies_low, equal_var=False)\n', (15013, 15063), True, 'import scipy.stats as stats\n'), ((15101, 15135), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""confidence p vs. 50%"""'], {}), "('confidence p vs. 50%')\n", (15111, 15135), True, 'import matplotlib.pyplot as plt\n'), ((15140, 15172), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy % correct"""'], {}), "('accuracy % correct')\n", (15150, 15172), True, 'import matplotlib.pyplot as plt\n'), ((15177, 15187), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15185, 15187), True, 'import matplotlib.pyplot as plt\n'), ((684, 724), 'os.path.join', 'os.path.join', (['NWBFilePath', 'singleNWBfile'], {}), '(NWBFilePath, singleNWBfile)\n', (696, 724), False, 'import os\n'), ((2526, 2565), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.extract_recog_responses', 'helper.extract_recog_responses', (['nwbfile'], {}), '(nwbfile)\n', (2556, 2565), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((2589, 2626), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.extract_new_old_label', 'helper.extract_new_old_label', (['nwbfile'], {}), '(nwbfile)\n', (2617, 2626), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((4168, 4200), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.cal_cumulative_d', 'helper.cal_cumulative_d', (['nwbfile'], {}), '(nwbfile)\n', (4191, 4200), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((4543, 4568), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.cal_auc', 'helper.cal_auc', (['stats_all'], {}), '(stats_all)\n', (4557, 4568), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((4711, 4754), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.check_inclusion', 'helper.check_inclusion', (['recog_response', 'auc'], {}), '(recog_response, auc)\n', (4733, 4754), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((7090, 7100), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (7097, 7100), True, 'import numpy as np\n'), ((7225, 7235), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (7232, 7235), True, 'import numpy as np\n'), ((8957, 8995), 'scipy.stats.ttest_1samp', 'stats.ttest_1samp', (['accuracies_high', '(50)'], {}), '(accuracies_high, 50)\n', (8974, 8995), True, 'import scipy.stats as stats\n'), ((9008, 9045), 'scipy.stats.ttest_1samp', 'stats.ttest_1samp', (['accuracies_low', '(50)'], {}), '(accuracies_low, 50)\n', (9025, 9045), True, 'import scipy.stats as stats\n'), ((9900, 9910), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (9907, 9910), True, 'import numpy as np\n'), ((11362, 11413), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'marker': '"""."""', 'color': '"""grey"""', 'alpha': '(0.5)'}), "(x, y, marker='.', color='grey', alpha=0.5)\n", (11370, 11413), True, 'import matplotlib.pyplot as plt\n'), ((11422, 11436), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (11430, 11436), True, 'import matplotlib.pyplot as plt\n'), ((11445, 11459), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (11453, 11459), True, 'import matplotlib.pyplot as plt\n'), ((11828, 11850), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.read', 'helper.read', (['filenames'], {}), '(filenames)\n', (11839, 11850), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((11871, 11903), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.cal_cumulative_d', 'helper.cal_cumulative_d', (['nwbfile'], {}), '(nwbfile)\n', (11894, 11903), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((12137, 12214), 'matplotlib.pyplot.plot', 'plt.plot', (['performance[0]', 'performance[1]'], {'marker': '"""."""', 'color': '"""grey"""', 'alpha': '(0.6)'}), "(performance[0], performance[1], marker='.', color='grey', alpha=0.6)\n", (12145, 12214), True, 'import matplotlib.pyplot as plt\n'), ((12223, 12237), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (12231, 12237), True, 'import matplotlib.pyplot as plt\n'), ((12246, 12260), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (12254, 12260), True, 'import matplotlib.pyplot as plt\n'), ((14559, 14597), 'scipy.stats.ttest_1samp', 'stats.ttest_1samp', (['accuracies_high', '(50)'], {}), '(accuracies_high, 50)\n', (14576, 14597), True, 'import scipy.stats as stats\n'), ((14610, 14647), 'scipy.stats.ttest_1samp', 'stats.ttest_1samp', (['accuracies_low', '(50)'], {}), '(accuracies_low, 50)\n', (14627, 14647), True, 'import scipy.stats as stats\n'), ((14839, 14908), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', '[accuracies_high[i], accuracies_low[i]]'], {'marker': '"""o"""'}), "(x_axis, [accuracies_high[i], accuracies_low[i]], marker='o')\n", (14847, 14908), True, 'import matplotlib.pyplot as plt\n'), ((740, 763), 'os.path.exists', 'os.path.exists', (['NWBfile'], {}), '(NWBfile)\n', (754, 763), False, 'import os\n'), ((2123, 2144), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.read', 'helper.read', (['filename'], {}), '(filename)\n', (2134, 2144), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((5003, 5053), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.dynamic_split', 'helper.dynamic_split', (['recog_response', 'ground_truth'], {}), '(recog_response, ground_truth)\n', (5023, 5053), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((6274, 6336), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.correct_incorrect_indexes', 'helper.correct_incorrect_indexes', (['recog_response', 'ground_truth'], {}), '(recog_response, ground_truth)\n', (6306, 6336), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((6399, 6436), 'RutishauserLabtoNWB.events.newolddelay.python.analysis.helper.remap_response', 'helper.remap_response', (['recog_response'], {}), '(recog_response)\n', (6420, 6436), True, 'import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper\n'), ((2974, 3005), 'numpy.sum', 'np.sum', (['(recog_response_old == 1)'], {}), '(recog_response_old == 1)\n', (2980, 3005), True, 'import numpy as np\n'), ((3063, 3094), 'numpy.sum', 'np.sum', (['(recog_response_old == 2)'], {}), '(recog_response_old == 2)\n', (3069, 3094), True, 'import numpy as np\n'), ((3152, 3183), 'numpy.sum', 'np.sum', (['(recog_response_old == 3)'], {}), '(recog_response_old == 3)\n', (3158, 3183), True, 'import numpy as np\n'), ((3241, 3272), 'numpy.sum', 'np.sum', (['(recog_response_old == 4)'], {}), '(recog_response_old == 4)\n', (3247, 3272), True, 'import numpy as np\n'), ((3330, 3361), 'numpy.sum', 'np.sum', (['(recog_response_old == 5)'], {}), '(recog_response_old == 5)\n', (3336, 3361), True, 'import numpy as np\n'), ((3419, 3450), 'numpy.sum', 'np.sum', (['(recog_response_old == 6)'], {}), '(recog_response_old == 6)\n', (3425, 3450), True, 'import numpy as np\n'), ((3572, 3603), 'numpy.sum', 'np.sum', (['(recog_response_new == 1)'], {}), '(recog_response_new == 1)\n', (3578, 3603), True, 'import numpy as np\n'), ((3661, 3692), 'numpy.sum', 'np.sum', (['(recog_response_new == 2)'], {}), '(recog_response_new == 2)\n', (3667, 3692), True, 'import numpy as np\n'), ((3750, 3781), 'numpy.sum', 'np.sum', (['(recog_response_new == 3)'], {}), '(recog_response_new == 3)\n', (3756, 3781), True, 'import numpy as np\n'), ((3839, 3870), 'numpy.sum', 'np.sum', (['(recog_response_new == 4)'], {}), '(recog_response_new == 4)\n', (3845, 3870), True, 'import numpy as np\n'), ((3928, 3959), 'numpy.sum', 'np.sum', (['(recog_response_new == 5)'], {}), '(recog_response_new == 5)\n', (3934, 3959), True, 'import numpy as np\n'), ((4017, 4048), 'numpy.sum', 'np.sum', (['(recog_response_new == 6)'], {}), '(recog_response_new == 6)\n', (4023, 4048), True, 'import numpy as np\n'), ((10901, 10910), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (10908, 10910), True, 'import matplotlib.pyplot as plt\n'), ((6523, 6563), 'numpy.mean', 'np.mean', (['remapped_response[correct_inds]'], {}), '(remapped_response[correct_inds])\n', (6530, 6563), True, 'import numpy as np\n'), ((6565, 6607), 'numpy.mean', 'np.mean', (['remapped_response[incorrect_inds]'], {}), '(remapped_response[incorrect_inds])\n', (6572, 6607), True, 'import numpy as np\n')] |
import numpy as np
num_of_days = 257
fish_numbers = np.zeros(9)
initial_fish = [5,1,1,5,4,2,1,2,1,2,2,1,1,1,4,2,2,4,1,1,1,1,1,4,1,1,1,1,1,5,3,1,4,1,1,1,1,1,4,1,5,1,1,1,4,1,2,2,3,1,5,1,1,5,1,1,5,4,1,1,1,4,3,1,1,1,3,1,5,5,1,1,1,1,5,3,2,1,2,3,1,5,1,1,4,1,1,2,1,5,1,1,1,1,5,4,5,1,3,1,3,3,5,5,1,3,1,5,3,1,1,4,2,3,3,1,2,4,1,1,1,1,1,1,1,2,1,1,4,1,3,2,5,2,1,1,1,4,2,1,1,1,4,2,4,1,1,1,1,4,1,3,5,5,1,2,1,3,1,1,4,1,1,1,1,2,1,1,4,2,3,1,1,1,1,1,1,1,4,5,1,1,3,1,1,2,1,1,1,5,1,1,1,1,1,3,2,1,2,4,5,1,5,4,1,1,3,1,1,5,5,1,3,1,1,1,1,4,4,2,1,2,1,1,5,1,1,4,5,1,1,1,1,1,1,1,1,1,1,3,1,1,1,1,1,4,2,1,1,1,2,5,1,4,1,1,1,4,1,1,5,4,4,3,1,1,4,5,1,1,3,5,3,1,2,5,3,4,1,3,5,4,1,3,1,5,1,4,1,1,4,2,1,1,1,3,2,1,1,4]
for i in initial_fish:
fish_numbers[i] += 1
for day in range(1, num_of_days):
old_fish_numbers = fish_numbers.copy()
for timer in range(8):
fish_numbers[timer] = old_fish_numbers[timer + 1]
fish_numbers[8] = old_fish_numbers[0]
fish_numbers[6] += old_fish_numbers[0]
print(sum(fish_numbers)) | [
"numpy.zeros"
] | [((53, 64), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (61, 64), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import time
import busio
import board
import adafruit_amg88xx
import matplotlib.pyplot as plt
import numpy as np
# Init I2C bus
i2c_bus = busio.I2C(board.SCL, board.SDA)
# Init Sensor
sensor = adafruit_amg88xx.AMG88XX(i2c_bus)
# wait for init Sensor
time.sleep(.1)
# setup 8x8 and bicubic
plt.subplots(figsize=(5,5))
# start loop
try:
while True:
# get sensor data
sensordata = np.array(sensor.pixels)
#for row in sensordata:
# print(["{0:.1f}".format(temp) for temp in row])
#print("\n")
#Mirror sensor data
flip_h = sensordata[:, ::-1]
#for row in flip_h:
# print(["{0:.1f}".format(temp) for temp in row])
#print("\n")
# show 8x8 data
plt.subplot(1, 1, 1)
fig = plt.imshow(flip_h, cmap="inferno")
plt.colorbar()
# show bicubic data
#plt.subplot(1, 2, 2)
#fig = plt.imshow(flip_h, cmap="inferno", interpolation="bicubic")
#plt.colorbar()
plt.pause(.01)
plt.clf()
except KeyboardInterrupt:
print("stop")
| [
"matplotlib.pyplot.imshow",
"busio.I2C",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.clf",
"time.sleep",
"adafruit_amg88xx.AMG88XX",
"numpy.array",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots"
] | [((167, 198), 'busio.I2C', 'busio.I2C', (['board.SCL', 'board.SDA'], {}), '(board.SCL, board.SDA)\n', (176, 198), False, 'import busio\n'), ((223, 256), 'adafruit_amg88xx.AMG88XX', 'adafruit_amg88xx.AMG88XX', (['i2c_bus'], {}), '(i2c_bus)\n', (247, 256), False, 'import adafruit_amg88xx\n'), ((281, 296), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (291, 296), False, 'import time\n'), ((321, 349), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (333, 349), True, 'import matplotlib.pyplot as plt\n'), ((431, 454), 'numpy.array', 'np.array', (['sensor.pixels'], {}), '(sensor.pixels)\n', (439, 454), True, 'import numpy as np\n'), ((786, 806), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (797, 806), True, 'import matplotlib.pyplot as plt\n'), ((821, 855), 'matplotlib.pyplot.imshow', 'plt.imshow', (['flip_h'], {'cmap': '"""inferno"""'}), "(flip_h, cmap='inferno')\n", (831, 855), True, 'import matplotlib.pyplot as plt\n'), ((864, 878), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (876, 878), True, 'import matplotlib.pyplot as plt\n'), ((1046, 1061), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (1055, 1061), True, 'import matplotlib.pyplot as plt\n'), ((1069, 1078), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1076, 1078), True, 'import matplotlib.pyplot as plt\n')] |
import cv2
import numpy as np
# we will use linear_assignment to quickly write experiments,
# later a customerized KM algorithms with various optimization in c++ is employed
# see https://github.com/berhane/LAP-solvers
# This is used for "Complete Matching" and we can remove unreasonable "workers" first and then apply it
import scipy.optimize as Optimizer
# This is used for "Maximum Matching". There is a desired algorithm implementation for our references
import scipy.sparse.csgraph as Graph
from pysvso.lib.maths.nputil import IoU_numeric, UIoU_numeric, cosine_dist
from pysvso.config import Settings
settings = Settings()
# setting debug variable
DEBUG = settings.DEBUG
# Linear Assignment Problems Solver Wrapper
class ROIMatcher:
from enum import Enum
class Algorithm(Enum):
COMPLETE_MATCHING = 0
MAXIMUM_MATCHING = 1
def __init__(self):
self.algorithm = ROIMatcher.Algorithm.COMPLETE_MATCHING
self.THR = 0.85 # 0.75
pass
def mtch(self, trackList, detected_objects, product="composite"):
N = len(trackList)
M = len(detected_objects)
weights = np.zeros((N, M))
distance = np.zeros((N, M))
corr = np.zeros((N, M))
def make_standard_tf_box(box):
y1, x1, y2, x2 = box
return np.array([x1, y1, x2, y2])
def compose_feat_vec(roi_feats, encodedId, score):
new_feats = np.concatenate([roi_feats, encodedId, np.array([score])], axis=0)
return new_feats
INF = float("inf")
EPILON = 1e-9
column_names = list(map(lambda detection: str(detection), detected_objects))
row_names = list(map(lambda landmark: str(landmark), trackList))
for i in range(N):
for j in range(M):
obj1 = trackList[i]
obj2 = detected_objects[j]
# deep feature score
ext_feat1 = compose_feat_vec(obj1.roi_features['roi_feature'],
obj1.roi_features['class_id'],
obj1.roi_features['score'])
ext_feat2 = compose_feat_vec(obj2.roi_features['roi_feature'],
obj2.roi_features['class_id'],
obj2.roi_features['score'])
# compute cosine distance
score = cosine_dist(ext_feat1, ext_feat2)
if np.isinf(score) or np.isnan(score):
raise Exception("Wrong Value!")
corr[i, j] = score
# must hold same semantic meaning if we belive our detectron
if obj1.roi_features['label'] != obj2.roi_features['label']:
weights[i, j] = 1000
continue
box1 = obj1.predicted_states
box2 = make_standard_tf_box(obj2.projected_pos)
left_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
right_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
# 0 ~ 1
# iou = IoU_numeric(box1, box2, left_area, right_area)
# distance[i,j] = 1. - iou
uiou = UIoU_numeric(box1, box2, left_area, right_area)
distance[i, j] = (1 + uiou) / 2.0
if np.isinf(uiou) or np.isnan(uiou):
raise Exception("Wrong Value!")
# assign IoU distance
# weights[i,j] = 1. - iou
# assign UIoU distance
if product == "composite":
weights[i, j] = (1 + uiou) / 2.0
# compute total score
weights[i, j] *= score
weights[i, j] = 1 - weights[i, j]
elif product == "feature_only":
weights[i, j] = 1 - score
else:
raise Exception("Not Implemented Yet!")
mtched, unmtched_landmarks, unmtched_detections = ([], [], [])
row_indice, col_indice = [], []
np.set_printoptions(precision=3)
if self.algorithm is ROIMatcher.Algorithm.COMPLETE_MATCHING:
if DEBUG:
# print weight matrix
# print("%d landmarks, %d detections, forms %d x %d cost matrix :" % (N, M, N, M))
# print(weights)
pass
# remove rows if there are no reasonable matches from cols so that we could
# apply maximum match here. I have to say that this is very important!
# @todo : TODO
# see http://csclab.murraystate.edu/~bob.pilgrim/445/munkres.html,
# also see https://www.kaggle.com/c/santa-workshop-tour-2019/discussion/120020
try:
row_indice, col_indice = Optimizer.linear_sum_assignment(weights)
except Exception as e:
print(e)
import pandas as pd
# iou scores
df = pd.DataFrame(distance, index=row_names, columns=column_names)
# print("UIoUs:")
# print(df)
# entropy scores
df = pd.DataFrame(corr, index=row_names, columns=column_names)
# print("Corr:")
# print(df)
raise (e)
else:
raise Exception("Not Implemented Yet!")
# use maximum matching strategy
assignment = np.zeros((N, M))
for i, col in enumerate(col_indice):
row = row_indice[i]
print("landmark %s +--> Observation %s : score %f, uiou %f, corr %f" % (
row_names[row], column_names[col], weights[row, col], distance[row, col], corr[row, col]
))
# the solver has probability to produce unmatched pairs with different labels
if trackList[row].roi_features['label'] != detected_objects[col].roi_features['label']:
unmtched_landmarks.append(row)
unmtched_detections.append(col)
continue
if weights[row, col] > self.THR or (product == "composite" and distance[row, col] < 0.5): # 0.5 !important
unmtched_landmarks.append(row)
unmtched_detections.append(col)
continue
mtched.append((row, col, weights[row, col], distance[row, col]))
assignment[row, col] = 1
for i in range(N):
if i not in row_indice:
unmtched_landmarks.append(i)
for j in range(M):
if j not in col_indice:
unmtched_detections.append(j)
import pandas as pd
# iou scores
df = pd.DataFrame(distance, index=row_names, columns=column_names)
# print("UIoUs:")
# print(df)
# entropy scores
df = pd.DataFrame(corr, index=row_names, columns=column_names)
# print("Corr:")
# print(df)
# draw matches
df = pd.DataFrame(np.array(assignment), index=row_names, columns=column_names)
# print("assignment:")
# print(df)
return mtched, unmtched_landmarks, unmtched_detections | [
"scipy.optimize.linear_sum_assignment",
"pysvso.config.Settings",
"pysvso.lib.maths.nputil.cosine_dist",
"pysvso.lib.maths.nputil.UIoU_numeric",
"numpy.array",
"numpy.zeros",
"numpy.isnan",
"pandas.DataFrame",
"numpy.isinf",
"numpy.set_printoptions"
] | [((623, 633), 'pysvso.config.Settings', 'Settings', ([], {}), '()\n', (631, 633), False, 'from pysvso.config import Settings\n'), ((1142, 1158), 'numpy.zeros', 'np.zeros', (['(N, M)'], {}), '((N, M))\n', (1150, 1158), True, 'import numpy as np\n'), ((1179, 1195), 'numpy.zeros', 'np.zeros', (['(N, M)'], {}), '((N, M))\n', (1187, 1195), True, 'import numpy as np\n'), ((1211, 1227), 'numpy.zeros', 'np.zeros', (['(N, M)'], {}), '((N, M))\n', (1219, 1227), True, 'import numpy as np\n'), ((4105, 4137), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (4124, 4137), True, 'import numpy as np\n'), ((5490, 5506), 'numpy.zeros', 'np.zeros', (['(N, M)'], {}), '((N, M))\n', (5498, 5506), True, 'import numpy as np\n'), ((6734, 6795), 'pandas.DataFrame', 'pd.DataFrame', (['distance'], {'index': 'row_names', 'columns': 'column_names'}), '(distance, index=row_names, columns=column_names)\n', (6746, 6795), True, 'import pandas as pd\n'), ((6881, 6938), 'pandas.DataFrame', 'pd.DataFrame', (['corr'], {'index': 'row_names', 'columns': 'column_names'}), '(corr, index=row_names, columns=column_names)\n', (6893, 6938), True, 'import pandas as pd\n'), ((1320, 1346), 'numpy.array', 'np.array', (['[x1, y1, x2, y2]'], {}), '([x1, y1, x2, y2])\n', (1328, 1346), True, 'import numpy as np\n'), ((7035, 7055), 'numpy.array', 'np.array', (['assignment'], {}), '(assignment)\n', (7043, 7055), True, 'import numpy as np\n'), ((2434, 2467), 'pysvso.lib.maths.nputil.cosine_dist', 'cosine_dist', (['ext_feat1', 'ext_feat2'], {}), '(ext_feat1, ext_feat2)\n', (2445, 2467), False, 'from pysvso.lib.maths.nputil import IoU_numeric, UIoU_numeric, cosine_dist\n'), ((3250, 3297), 'pysvso.lib.maths.nputil.UIoU_numeric', 'UIoU_numeric', (['box1', 'box2', 'left_area', 'right_area'], {}), '(box1, box2, left_area, right_area)\n', (3262, 3297), False, 'from pysvso.lib.maths.nputil import IoU_numeric, UIoU_numeric, cosine_dist\n'), ((4849, 4889), 'scipy.optimize.linear_sum_assignment', 'Optimizer.linear_sum_assignment', (['weights'], {}), '(weights)\n', (4880, 4889), True, 'import scipy.optimize as Optimizer\n'), ((1469, 1486), 'numpy.array', 'np.array', (['[score]'], {}), '([score])\n', (1477, 1486), True, 'import numpy as np\n'), ((2487, 2502), 'numpy.isinf', 'np.isinf', (['score'], {}), '(score)\n', (2495, 2502), True, 'import numpy as np\n'), ((2506, 2521), 'numpy.isnan', 'np.isnan', (['score'], {}), '(score)\n', (2514, 2521), True, 'import numpy as np\n'), ((3367, 3381), 'numpy.isinf', 'np.isinf', (['uiou'], {}), '(uiou)\n', (3375, 3381), True, 'import numpy as np\n'), ((3385, 3399), 'numpy.isnan', 'np.isnan', (['uiou'], {}), '(uiou)\n', (3393, 3399), True, 'import numpy as np\n'), ((5036, 5097), 'pandas.DataFrame', 'pd.DataFrame', (['distance'], {'index': 'row_names', 'columns': 'column_names'}), '(distance, index=row_names, columns=column_names)\n', (5048, 5097), True, 'import pandas as pd\n'), ((5215, 5272), 'pandas.DataFrame', 'pd.DataFrame', (['corr'], {'index': 'row_names', 'columns': 'column_names'}), '(corr, index=row_names, columns=column_names)\n', (5227, 5272), True, 'import pandas as pd\n')] |
# Copyright 2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import refs
from nbla_test_utils import list_context
from nbla_test_utils import function_tester
ctxs = list_context('DepthwiseDeconvolution')
def ref_depthwise_deconvolution_1d(x, w, b, base_axis, pad, stride, dilation,
divisor):
""" implement depthwise deconvolution by using normal deconvolution
with group = inmaps / divisor """
# insert second dimension to weights
w = np.expand_dims(w, axis=1)
y = []
for xx in x.reshape((-1,) + x.shape[base_axis:]):
groups = xx.shape[0] // divisor
yy = refs.deconvolution_1d(xx, w, b, pad, stride, dilation, groups)
y.append(yy[np.newaxis])
y = np.vstack(y)
return y.reshape(x.shape[:base_axis] + y.shape[1:])
def ref_depthwise_deconvolution_2d(x, w, b, base_axis, pad, stride, dilation,
divisor):
""" implement depthwise deconvolution by using normal deconvolution
with group = inmaps """
# insert second dimension to weights
w = np.expand_dims(w, axis=1)
y = []
for xx in x.reshape((-1,) + x.shape[base_axis:]):
groups = xx.shape[0] // divisor
yy = refs.deconvolution_2d(xx, w, b, pad, stride, dilation, groups)
y.append(yy[np.newaxis])
y = np.vstack(y)
return y.reshape(x.shape[:base_axis] + y.shape[1:])
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("inshape, kernel, pad, stride, dilation, divisor", [
((2, 2, 10, 10), (3, 2), (3, 0), (1, 2), (2, 1), 1),
((2, 3, 10, 10), (3, 2), (3, 0), (1, 2), (2, 1), 3),
((2, 4, 10, 10), (3, 2), (0, 0), (1, 1), (1, 1), 1),
((2, 6, 10, 10), (3, 2), (0, 0), (1, 1), (1, 1), 2),
((3, 2, 10, 10), (3, 3), (3, 0), (1, 2), (2, 1), 1),
((3, 2, 10, 10), (3, 3), (3, 0), (1, 2), (2, 1), 2),
])
@pytest.mark.parametrize("with_bias", [True, False])
def test_depthwise_deconvolution_2d_forward_backward(inshape, kernel, pad,
stride, dilation, divisor,
with_bias, seed, ctx,
func_name):
base_axis = len(inshape) - 3
sample_channels = inshape[base_axis]
outmap_channels = sample_channels // divisor
rng = np.random.RandomState(seed)
x = rng.randn(*inshape).astype(np.float32)
w = rng.randn(*((sample_channels,) + kernel)).astype(np.float32)
b = rng.randn(outmap_channels).astype(np.float32) if with_bias else None
inputs = [x, w, b]
func_args = [base_axis, pad, stride, dilation, divisor]
reference = ref_depthwise_deconvolution_2d
function_tester(rng, F.depthwise_deconvolution, reference, inputs,
func_args, atol_f=1e-4, atol_b=4e-3, dstep=1e-2,
ctx=ctx, func_name=func_name)
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("inshape, kernel, pad, stride, dilation, divisor", [
((2, 2, 10), (3,), (3,), (1,), (2,), 1),
((2, 3, 10), (3,), (3,), (1,), (2,), 3),
((2, 4, 10), (3,), (0,), (2,), (1,), 1),
((2, 4, 10), (3,), (0,), (2,), (1,), 2),
((3, 2, 10), (4,), (3,), (1,), (2,), 1),
((3, 9, 10), (4,), (3,), (1,), (2,), 3),
])
@pytest.mark.parametrize("with_bias", [True, False])
def test_depthwise_deconvolution_1d_forward_backward(inshape, kernel, pad,
stride, dilation, divisor,
with_bias, seed, ctx,
func_name):
base_axis = len(inshape) - 2
sample_channels = inshape[base_axis]
outmap_channels = sample_channels // divisor
rng = np.random.RandomState(seed)
x = rng.randn(*inshape).astype(np.float32)
w = rng.randn(*((sample_channels,) + kernel)).astype(np.float32)
b = rng.randn(outmap_channels).astype(np.float32) if with_bias else None
inputs = [x, w, b]
func_args = [base_axis, pad, stride, dilation, divisor]
reference = ref_depthwise_deconvolution_1d
function_tester(rng, F.depthwise_deconvolution, reference, inputs,
func_args, atol_f=1e-4, atol_b=3e-3, dstep=1e-2,
ctx=ctx, func_name=func_name)
@pytest.mark.parametrize("inshape, kernel, divisor, outshape", [
((2, 4, 10), (3,), 1, (2, 4, 12)),
((2, 4, 10), (3,), 2, (2, 2, 12)),
])
def test_parametric_function_1d(inshape, kernel, divisor, outshape):
base_axis = len(inshape) - 2
sample_channels = inshape[base_axis]
outmap_channels = sample_channels // divisor
x = nn.Variable(inshape)
y = PF.depthwise_deconvolution(x, kernel, divisor=divisor)
p = nn.get_parameters()
assert y.shape == outshape
assert p['depthwise_deconv/W'].shape == (sample_channels,) + kernel
assert p['depthwise_deconv/b'].shape == (outmap_channels,)
nn.clear_parameters()
@pytest.mark.parametrize("inshape, kernel, divisor, outshape", [
((2, 4, 10, 10), (3, 2), 1, (2, 4, 12, 11)),
((2, 4, 10, 10), (3, 2), 2, (2, 2, 12, 11)),
])
def test_parametric_function_2d(inshape, kernel, divisor, outshape):
base_axis = len(inshape) - 3
sample_channels = inshape[base_axis]
outmap_channels = sample_channels // divisor
x = nn.Variable(inshape)
y = PF.depthwise_deconvolution(x, kernel, divisor=divisor)
p = nn.get_parameters()
assert y.shape == outshape
assert p['depthwise_deconv/W'].shape == (sample_channels,) + kernel
assert p['depthwise_deconv/b'].shape == (outmap_channels,)
nn.clear_parameters()
| [
"nnabla.parametric_functions.depthwise_deconvolution",
"nbla_test_utils.function_tester",
"nbla_test_utils.list_context",
"nnabla.clear_parameters",
"refs.deconvolution_2d",
"nnabla.get_parameters",
"refs.deconvolution_1d",
"pytest.mark.parametrize",
"numpy.vstack",
"numpy.expand_dims",
"nnabla.... | [((858, 896), 'nbla_test_utils.list_context', 'list_context', (['"""DepthwiseDeconvolution"""'], {}), "('DepthwiseDeconvolution')\n", (870, 896), False, 'from nbla_test_utils import list_context\n'), ((2096, 2143), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ctx, func_name"""', 'ctxs'], {}), "('ctx, func_name', ctxs)\n", (2119, 2143), False, 'import pytest\n'), ((2145, 2183), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seed"""', '[313]'], {}), "('seed', [313])\n", (2168, 2183), False, 'import pytest\n'), ((2185, 2600), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inshape, kernel, pad, stride, dilation, divisor"""', '[((2, 2, 10, 10), (3, 2), (3, 0), (1, 2), (2, 1), 1), ((2, 3, 10, 10), (3, \n 2), (3, 0), (1, 2), (2, 1), 3), ((2, 4, 10, 10), (3, 2), (0, 0), (1, 1),\n (1, 1), 1), ((2, 6, 10, 10), (3, 2), (0, 0), (1, 1), (1, 1), 2), ((3, 2,\n 10, 10), (3, 3), (3, 0), (1, 2), (2, 1), 1), ((3, 2, 10, 10), (3, 3), (\n 3, 0), (1, 2), (2, 1), 2)]'], {}), "('inshape, kernel, pad, stride, dilation, divisor',\n [((2, 2, 10, 10), (3, 2), (3, 0), (1, 2), (2, 1), 1), ((2, 3, 10, 10),\n (3, 2), (3, 0), (1, 2), (2, 1), 3), ((2, 4, 10, 10), (3, 2), (0, 0), (1,\n 1), (1, 1), 1), ((2, 6, 10, 10), (3, 2), (0, 0), (1, 1), (1, 1), 2), ((\n 3, 2, 10, 10), (3, 3), (3, 0), (1, 2), (2, 1), 1), ((3, 2, 10, 10), (3,\n 3), (3, 0), (1, 2), (2, 1), 2)])\n", (2208, 2600), False, 'import pytest\n'), ((2608, 2659), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""with_bias"""', '[True, False]'], {}), "('with_bias', [True, False])\n", (2631, 2659), False, 'import pytest\n'), ((3632, 3679), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ctx, func_name"""', 'ctxs'], {}), "('ctx, func_name', ctxs)\n", (3655, 3679), False, 'import pytest\n'), ((3681, 3719), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seed"""', '[313]'], {}), "('seed', [313])\n", (3704, 3719), False, 'import pytest\n'), ((3721, 4061), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inshape, kernel, pad, stride, dilation, divisor"""', '[((2, 2, 10), (3,), (3,), (1,), (2,), 1), ((2, 3, 10), (3,), (3,), (1,), (2\n ,), 3), ((2, 4, 10), (3,), (0,), (2,), (1,), 1), ((2, 4, 10), (3,), (0,\n ), (2,), (1,), 2), ((3, 2, 10), (4,), (3,), (1,), (2,), 1), ((3, 9, 10),\n (4,), (3,), (1,), (2,), 3)]'], {}), "('inshape, kernel, pad, stride, dilation, divisor',\n [((2, 2, 10), (3,), (3,), (1,), (2,), 1), ((2, 3, 10), (3,), (3,), (1,),\n (2,), 3), ((2, 4, 10), (3,), (0,), (2,), (1,), 1), ((2, 4, 10), (3,), (\n 0,), (2,), (1,), 2), ((3, 2, 10), (4,), (3,), (1,), (2,), 1), ((3, 9, \n 10), (4,), (3,), (1,), (2,), 3)])\n", (3744, 4061), False, 'import pytest\n'), ((4072, 4123), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""with_bias"""', '[True, False]'], {}), "('with_bias', [True, False])\n", (4095, 4123), False, 'import pytest\n'), ((5096, 5233), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inshape, kernel, divisor, outshape"""', '[((2, 4, 10), (3,), 1, (2, 4, 12)), ((2, 4, 10), (3,), 2, (2, 2, 12))]'], {}), "('inshape, kernel, divisor, outshape', [((2, 4, 10),\n (3,), 1, (2, 4, 12)), ((2, 4, 10), (3,), 2, (2, 2, 12))])\n", (5119, 5233), False, 'import pytest\n'), ((5748, 5910), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inshape, kernel, divisor, outshape"""', '[((2, 4, 10, 10), (3, 2), 1, (2, 4, 12, 11)), ((2, 4, 10, 10), (3, 2), 2, (\n 2, 2, 12, 11))]'], {}), "('inshape, kernel, divisor, outshape', [((2, 4, 10, \n 10), (3, 2), 1, (2, 4, 12, 11)), ((2, 4, 10, 10), (3, 2), 2, (2, 2, 12,\n 11))])\n", (5771, 5910), False, 'import pytest\n'), ((1181, 1206), 'numpy.expand_dims', 'np.expand_dims', (['w'], {'axis': '(1)'}), '(w, axis=1)\n', (1195, 1206), True, 'import numpy as np\n'), ((1431, 1443), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (1440, 1443), True, 'import numpy as np\n'), ((1774, 1799), 'numpy.expand_dims', 'np.expand_dims', (['w'], {'axis': '(1)'}), '(w, axis=1)\n', (1788, 1799), True, 'import numpy as np\n'), ((2024, 2036), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (2033, 2036), True, 'import numpy as np\n'), ((3088, 3115), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (3109, 3115), True, 'import numpy as np\n'), ((3443, 3600), 'nbla_test_utils.function_tester', 'function_tester', (['rng', 'F.depthwise_deconvolution', 'reference', 'inputs', 'func_args'], {'atol_f': '(0.0001)', 'atol_b': '(0.004)', 'dstep': '(0.01)', 'ctx': 'ctx', 'func_name': 'func_name'}), '(rng, F.depthwise_deconvolution, reference, inputs,\n func_args, atol_f=0.0001, atol_b=0.004, dstep=0.01, ctx=ctx, func_name=\n func_name)\n', (3458, 3600), False, 'from nbla_test_utils import function_tester\n'), ((4552, 4579), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (4573, 4579), True, 'import numpy as np\n'), ((4907, 5064), 'nbla_test_utils.function_tester', 'function_tester', (['rng', 'F.depthwise_deconvolution', 'reference', 'inputs', 'func_args'], {'atol_f': '(0.0001)', 'atol_b': '(0.003)', 'dstep': '(0.01)', 'ctx': 'ctx', 'func_name': 'func_name'}), '(rng, F.depthwise_deconvolution, reference, inputs,\n func_args, atol_f=0.0001, atol_b=0.003, dstep=0.01, ctx=ctx, func_name=\n func_name)\n', (4922, 5064), False, 'from nbla_test_utils import function_tester\n'), ((5441, 5461), 'nnabla.Variable', 'nn.Variable', (['inshape'], {}), '(inshape)\n', (5452, 5461), True, 'import nnabla as nn\n'), ((5470, 5524), 'nnabla.parametric_functions.depthwise_deconvolution', 'PF.depthwise_deconvolution', (['x', 'kernel'], {'divisor': 'divisor'}), '(x, kernel, divisor=divisor)\n', (5496, 5524), True, 'import nnabla.parametric_functions as PF\n'), ((5533, 5552), 'nnabla.get_parameters', 'nn.get_parameters', ([], {}), '()\n', (5550, 5552), True, 'import nnabla as nn\n'), ((5723, 5744), 'nnabla.clear_parameters', 'nn.clear_parameters', ([], {}), '()\n', (5742, 5744), True, 'import nnabla as nn\n'), ((6113, 6133), 'nnabla.Variable', 'nn.Variable', (['inshape'], {}), '(inshape)\n', (6124, 6133), True, 'import nnabla as nn\n'), ((6142, 6196), 'nnabla.parametric_functions.depthwise_deconvolution', 'PF.depthwise_deconvolution', (['x', 'kernel'], {'divisor': 'divisor'}), '(x, kernel, divisor=divisor)\n', (6168, 6196), True, 'import nnabla.parametric_functions as PF\n'), ((6205, 6224), 'nnabla.get_parameters', 'nn.get_parameters', ([], {}), '()\n', (6222, 6224), True, 'import nnabla as nn\n'), ((6395, 6416), 'nnabla.clear_parameters', 'nn.clear_parameters', ([], {}), '()\n', (6414, 6416), True, 'import nnabla as nn\n'), ((1326, 1388), 'refs.deconvolution_1d', 'refs.deconvolution_1d', (['xx', 'w', 'b', 'pad', 'stride', 'dilation', 'groups'], {}), '(xx, w, b, pad, stride, dilation, groups)\n', (1347, 1388), False, 'import refs\n'), ((1919, 1981), 'refs.deconvolution_2d', 'refs.deconvolution_2d', (['xx', 'w', 'b', 'pad', 'stride', 'dilation', 'groups'], {}), '(xx, w, b, pad, stride, dilation, groups)\n', (1940, 1981), False, 'import refs\n')] |
import cv2 as cv
import numpy as np
from Step_2_normalize_data import normalize_data
from Step_6_load_model import load_model
from Step_7_predict import predict
frame = np.zeros((400, 400, 1))
model, labels = load_model("model")
def do_predict():
global frame, model, labels
image = frame
image = cv.resize(image, (28, 28))
image = np.reshape(image, (28, 28, 1))
cv.imwrite('temp.jpg', image)
X = np.array([image])
X = X / 255
X = normalize_data(X)
p = np.argmax(predict(X, model)[0])
print(p)
cv.waitKey(1000)
frame = np.zeros((400, 400, 1))
def mouse_callback(event, x, y, flags, param):
global frame
if flags == cv.EVENT_FLAG_LBUTTON:
cv.circle(frame, (x, y), 15, (255, 255, 255), -1)
while True:
cv.namedWindow("frame")
cv.setMouseCallback("frame", mouse_callback)
cv.imshow('frame', frame)
k = cv.waitKey(1)
if k in [ord('q'), 27]:
break
if k in [32]:
do_predict()
| [
"cv2.setMouseCallback",
"cv2.imwrite",
"Step_2_normalize_data.normalize_data",
"numpy.reshape",
"cv2.imshow",
"numpy.array",
"numpy.zeros",
"cv2.circle",
"Step_6_load_model.load_model",
"Step_7_predict.predict",
"cv2.resize",
"cv2.waitKey",
"cv2.namedWindow"
] | [((172, 195), 'numpy.zeros', 'np.zeros', (['(400, 400, 1)'], {}), '((400, 400, 1))\n', (180, 195), True, 'import numpy as np\n'), ((212, 231), 'Step_6_load_model.load_model', 'load_model', (['"""model"""'], {}), "('model')\n", (222, 231), False, 'from Step_6_load_model import load_model\n'), ((314, 340), 'cv2.resize', 'cv.resize', (['image', '(28, 28)'], {}), '(image, (28, 28))\n', (323, 340), True, 'import cv2 as cv\n'), ((353, 383), 'numpy.reshape', 'np.reshape', (['image', '(28, 28, 1)'], {}), '(image, (28, 28, 1))\n', (363, 383), True, 'import numpy as np\n'), ((388, 417), 'cv2.imwrite', 'cv.imwrite', (['"""temp.jpg"""', 'image'], {}), "('temp.jpg', image)\n", (398, 417), True, 'import cv2 as cv\n'), ((427, 444), 'numpy.array', 'np.array', (['[image]'], {}), '([image])\n', (435, 444), True, 'import numpy as np\n'), ((469, 486), 'Step_2_normalize_data.normalize_data', 'normalize_data', (['X'], {}), '(X)\n', (483, 486), False, 'from Step_2_normalize_data import normalize_data\n'), ((544, 560), 'cv2.waitKey', 'cv.waitKey', (['(1000)'], {}), '(1000)\n', (554, 560), True, 'import cv2 as cv\n'), ((573, 596), 'numpy.zeros', 'np.zeros', (['(400, 400, 1)'], {}), '((400, 400, 1))\n', (581, 596), True, 'import numpy as np\n'), ((778, 801), 'cv2.namedWindow', 'cv.namedWindow', (['"""frame"""'], {}), "('frame')\n", (792, 801), True, 'import cv2 as cv\n'), ((806, 850), 'cv2.setMouseCallback', 'cv.setMouseCallback', (['"""frame"""', 'mouse_callback'], {}), "('frame', mouse_callback)\n", (825, 850), True, 'import cv2 as cv\n'), ((855, 880), 'cv2.imshow', 'cv.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (864, 880), True, 'import cv2 as cv\n'), ((890, 903), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (900, 903), True, 'import cv2 as cv\n'), ((710, 759), 'cv2.circle', 'cv.circle', (['frame', '(x, y)', '(15)', '(255, 255, 255)', '(-1)'], {}), '(frame, (x, y), 15, (255, 255, 255), -1)\n', (719, 759), True, 'import cv2 as cv\n'), ((505, 522), 'Step_7_predict.predict', 'predict', (['X', 'model'], {}), '(X, model)\n', (512, 522), False, 'from Step_7_predict import predict\n')] |
from collections import defaultdict, deque, Counter
from pprint import pprint
from day import Day, util
import numpy as np
OCCUPIED = '#'
EMPTY = 'L'
FLOOR = '.'
class Day11Part1(Day):
day = 11
part = 1
OFFSETS = (
(0, -1), (0, 1), # left/right
(-1, 0), (1, 0), # top/down
(1, 1), (-1, -1), (-1, 1), (1, -1), # diagonal
)
def get_sample_input(self):
return ('L.LL.LL.LL\n'
'LLLLLLL.LL\n'
'L.L.L..L..\n'
'LLLL.LL.LL\n'
'L.LL.LL.LL\n'
'L.LLLLL.LL\n'
'..L.L.....\n'
'LLLLLLLLLL\n'
'L.LLLLLL.L\n'
'L.LLLLL.LL')
def get_neighbors(self, grid: np.ndarray, pos: tuple):
return tuple(
grid[pos[0] + y, pos[1] + x]
for y, x in self.OFFSETS
if 0 <= pos[1] + x < grid.shape[1] and 0 <= pos[0] + y < grid.shape[0])
def parse_input(self):
return np.array(tuple(map(list, self.input_text.splitlines())), dtype=str)
def solve(self):
grid = self.parse_input()
buffer = grid.copy()
while True:
for indexes, char in np.ndenumerate(grid):
neighbors = self.get_neighbors(grid, indexes)
if char == EMPTY and OCCUPIED not in neighbors:
buffer[indexes] = OCCUPIED
elif char == OCCUPIED and neighbors.count(OCCUPIED) >= 4:
buffer[indexes] = EMPTY
if (grid == buffer).all():
grid = buffer.copy()
break
grid = buffer.copy()
print('day 11 part 1 answer:', (grid == OCCUPIED).sum())
| [
"numpy.ndenumerate"
] | [((1205, 1225), 'numpy.ndenumerate', 'np.ndenumerate', (['grid'], {}), '(grid)\n', (1219, 1225), True, 'import numpy as np\n')] |
from os import listdir
from os.path import isfile, join
import pickle
import numpy as np
TASK_DICT = {'MRPC': 'mrpc', 'STS-B': 'STSBenchmark', 'SST-2': 'SST2'}
class BaseEncoder():
def __init__(self, model_name, encode_capacity, path_cache):
self.model_name = model_name
self.encode_capacity = encode_capacity
self.path_cache = path_cache
self.model = None
self.tokenizer = None
self.count = 0
def parse_model_name_to_cache_name(self, model_name, task, location):
if '/' in model_name:
temp = model_name.split('/')
task, model, exp_name, seed, ckpt = temp[5:]
task = TASK_DICT[task]
return "{}_{}_{}_{}_{}.pickle".format(task, model, exp_name, seed, ckpt)
else:
return "{}_{}_{}.pickle".format(model_name, task, location)
def load_cache(self, task, location):
cache_name = self.parse_model_name_to_cache_name(self.model_name, task, location)
onlyfiles = [f for f in listdir(self.path_cache) if isfile(join(self.path_cache, f))]
# ====== Look Up existing cache ====== #
if cache_name in onlyfiles:
print("cache Found {}".format(cache_name))
with open(join(self.path_cache, cache_name), 'rb') as f:
cache = pickle.load(f)
print("cache Loaded")
self.flag_cache_save = False
return cache
else:
print("cache not Found {}".format(cache_name))
self.flag_cache_save = True
return {}
def save_cache(self, task, location):
if self.flag_cache_save:
print("Start saving cache")
cache_name = self.parse_model_name_to_cache_name(self.model_name, task, location)
with open(join(self.path_cache, cache_name), 'wb') as f:
pickle.dump(self.cache, f, pickle.HIGHEST_PROTOCOL)
print("Saved cache {}".format(cache_name))
else:
print("Skipping saving cache")
def prepare(self, task, location):
self.cache = self.load_cache(task, location)
if bool(self.cache):
self.model = None
self.tokenizer = None
self.count = 0
else:
self.model, self.tokenizer = self.construct_encoder()
def get_mini_batch_size(self, sentences):
seq_length = max([len(tokens) for tokens in sentences])
mini_batch_size = self.encode_capacity // seq_length + 1
return mini_batch_size
def get_head_embedding(self, output, layer, head, head_size):
if head == -1:
embedding = output[:, layer, :]
else:
embedding = output[:, layer, head * head_size:(head + 1) * head_size]
return embedding
def get_multi_head_embedding(self, output, heads, head_size):
if len(heads) == 1: # If single attention head is probed
layer, head = heads[0]
embedding = self.get_head_embedding(output, layer, head, head_size)
else: # If multiple attention head is selected
list_embedding = []
for layer, head in heads:
embedding = self.get_head_embedding(output, layer, head, head_size)
list_embedding.append(embedding)
embedding = np.concatenate(list_embedding, axis=1)
return embedding
def construct_encoder(self):
raise NotImplementedError
def convert_sentences_to_features(self, sentences, seq_length):
raise NotImplementedError
def encode(self, sentences, heads, head_size, location):
raise NotImplementedError
if __name__ == '__main__':
model = BERTEncoder('bert-base-uncased')
model.prepare('Length')
model.construct_encoder()
| [
"os.listdir",
"pickle.dump",
"os.path.join",
"pickle.load",
"numpy.concatenate"
] | [((3327, 3365), 'numpy.concatenate', 'np.concatenate', (['list_embedding'], {'axis': '(1)'}), '(list_embedding, axis=1)\n', (3341, 3365), True, 'import numpy as np\n'), ((1026, 1050), 'os.listdir', 'listdir', (['self.path_cache'], {}), '(self.path_cache)\n', (1033, 1050), False, 'from os import listdir\n'), ((1322, 1336), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1333, 1336), False, 'import pickle\n'), ((1879, 1930), 'pickle.dump', 'pickle.dump', (['self.cache', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(self.cache, f, pickle.HIGHEST_PROTOCOL)\n', (1890, 1930), False, 'import pickle\n'), ((1061, 1085), 'os.path.join', 'join', (['self.path_cache', 'f'], {}), '(self.path_cache, f)\n', (1065, 1085), False, 'from os.path import isfile, join\n'), ((1251, 1284), 'os.path.join', 'join', (['self.path_cache', 'cache_name'], {}), '(self.path_cache, cache_name)\n', (1255, 1284), False, 'from os.path import isfile, join\n'), ((1816, 1849), 'os.path.join', 'join', (['self.path_cache', 'cache_name'], {}), '(self.path_cache, cache_name)\n', (1820, 1849), False, 'from os.path import isfile, join\n')] |
import numpy as np
import matplotlib.pyplot as plt
import simpy
import math
STATES = 25 # state in ((,-1], [0, 1], [2, 3], ..., [46,))
ACTIONS = 10 # action in (0, 3, 6, 9, 12, 15, 18, 21, 24, 25)
ORDER_COST = 1
STOCK_COST = 0.025
PENALTY = 0.1
DISCOUNT = 0.9
LEARNING = 0.2
class CustomContainer(simpy.Container):
def __init__(self, env, capacity=float('inf'), init=0):
self.env = env
self.orders = [] # list of back orders
super(CustomContainer, self).__init__(env, capacity, init)
@property
def shortage(self): # total amount requested by waiting customers
num = 0
for customer in self.get_queue:
num += customer.amount
return num
def get_state(self): # return encoded state number
total = self.level +sum(self.orders) -self.shortage
return max(0, min(math.floor(total /2) +1, STATES -1))
def get_reward(self, action): # negative reward or cost
reward = self.level *STOCK_COST +self.shortage *PENALTY
if action > 0:
reward += ORDER_COST
return reward
class Agent:
def __init__(self, env):
self.env = env
self.epsilon = 0.1
self.recent_rewards = [0] *100
self.Q = np.random.rand(STATES *ACTIONS).reshape(STATES, ACTIONS) *10
self.Q[0][0] = math.inf
for a in range(1, ACTIONS):
self.Q[STATES -1][a] = math.inf
@property
def average_reward(self):
return sum(self.recent_rewards) /len(self.recent_rewards)
def e_greedy(self, state):
if state == 0: # you should order at least some amount
if self.epsilon <= np.random.rand():
return self.Q[state].argmin() # greedy
else:
return np.random.randint(1, ACTIONS) # random
elif state == STATES -1: # you cannot order
return 0
elif self.epsilon <= np.random.rand():
return self.Q[state].argmin() # greedy
else:
return np.random.randint(0, ACTIONS) # random
def move(self):
state_to = self.env.model.get_state()
while True:
state_from = state_to
action = self.e_greedy(state_from)
if action > 0: # when you order
self.env.model.orders.append(action *3)
self.env.process(deliverer(self.env)) # activate deliverer
if not self.env.record.triggered:
self.env.record.succeed() # record log
yield self.env.timeout(1) # periodic inventory check
state_to = self.env.model.get_state()
reward = self.env.model.get_reward(action)
self.recent_rewards.append(reward)
self.recent_rewards.pop(0)
self.update_Q(state_from, action, state_to, reward)
def update_Q(self, state_from, action, state_to, reward):
Q_to_min = self.Q[state_to].min()
self.Q[state_from][action] += LEARNING *(reward +DISCOUNT *Q_to_min -self.Q[state_from][action])
def update_epsilon(self):
self.epsilon *= 0.9
def deliverer(env):
yield env.timeout(3) # delivery lead time = 3
env.model.put(env.model.orders.pop(0))
def customer(env):
while True:
time_to = np.random.exponential(1)
yield env.timeout(time_to)
how_many = np.random.randint(1, 8) # mean = 4
env.model.get(how_many)
if not env.record.triggered:
env.record.succeed() # record log
def recorder(env): # process for recording log for visualization
_t = []
env.record = env.event()
while True:
yield env.record
_t.append(env.now)
env.y11.append(env.model.level)
env.y12.append(sum(env.model.orders))
env.y13.append(env.model.shortage)
env.t.append(env.now)
env.z.append(env.agent.average_reward)
if env.now > 200:
t_min = env.now -200
env.y11 = [
env.y11[i] for i in range(len(_t)) if _t[i] > t_min
]
env.y12 = [
env.y12[i] for i in range(len(_t)) if _t[i] > t_min
]
env.y13 = [
env.y13[i] for i in range(len(_t)) if _t[i] > t_min
]
_t = [_t[i] for i in range(len(_t)) if _t[i] > t_min]
env.x = [_t[i] -max(_t) +200 for i in range(len(_t))]
else:
env.x = _t
env.record = env.event()
def main():
env = simpy.Environment()
env.model = CustomContainer(env)
env.agent = Agent(env)
env.process(recorder(env))
env.process(customer(env))
env.process(env.agent.move())
# ---------- code for visualization ----------
env.x = []
env.y11 = []
env.y12 = []
env.y13 = []
env.t = []
env.z = []
fig = plt.figure(1, figsize=(12, 8))
ax1 = fig.add_subplot(221)
ax1.set_xlabel('time')
ax1.set_ylabel('cost')
ax1.set_xlim(0, 50000)
ax1.set_ylim(0, 2)
line1, = ax1.plot(env.t, env.z, label='average cost')
ax1.legend()
ax1.grid()
ax2 = fig.add_subplot(222)
ax2.set_xlabel('time')
ax2.set_ylabel('number')
ax2.set_xlim(0, 200)
ax2.set_ylim(0, 60)
line21, = ax2.plot(env.x, env.y11, label='at hand')
line22, = ax2.plot(env.x, env.y12, label='ordered')
line23, = ax2.plot(env.x, env.y13, label='shortage')
ax2.legend()
ax2.grid()
ax3 = fig.add_subplot(223)
for t in range(1, 1000):
env.run(until=t*50) # stepwise execution
if t % 50 == 0:
env.agent.update_epsilon()
print('epsilon = {}'.format(env.agent.epsilon))
line1.set_data(env.t, env.z)
line21.set_data(env.x, env.y11)
line22.set_data(env.x, env.y12)
line23.set_data(env.x, env.y13)
heatmap = ax3.imshow(env.agent.Q, vmin=2, vmax=10, cmap='jet', aspect=0.25)
bar = plt.colorbar(heatmap, ax=ax3)
plt.pause(0.1)
bar.remove()
plt.show()
# ---------- ---------- ---------- ----------
if __name__ == "__main__":
main()
| [
"numpy.random.rand",
"math.floor",
"matplotlib.pyplot.colorbar",
"simpy.Environment",
"numpy.random.exponential",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show"
] | [((4499, 4518), 'simpy.Environment', 'simpy.Environment', ([], {}), '()\n', (4516, 4518), False, 'import simpy\n'), ((4833, 4863), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(12, 8)'}), '(1, figsize=(12, 8))\n', (4843, 4863), True, 'import matplotlib.pyplot as plt\n'), ((5996, 6006), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6004, 6006), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3296), 'numpy.random.exponential', 'np.random.exponential', (['(1)'], {}), '(1)\n', (3293, 3296), True, 'import numpy as np\n'), ((3351, 3374), 'numpy.random.randint', 'np.random.randint', (['(1)', '(8)'], {}), '(1, 8)\n', (3368, 3374), True, 'import numpy as np\n'), ((5918, 5947), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['heatmap'], {'ax': 'ax3'}), '(heatmap, ax=ax3)\n', (5930, 5947), True, 'import matplotlib.pyplot as plt\n'), ((5956, 5970), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (5965, 5970), True, 'import matplotlib.pyplot as plt\n'), ((1657, 1673), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1671, 1673), True, 'import numpy as np\n'), ((1772, 1801), 'numpy.random.randint', 'np.random.randint', (['(1)', 'ACTIONS'], {}), '(1, ACTIONS)\n', (1789, 1801), True, 'import numpy as np\n'), ((856, 877), 'math.floor', 'math.floor', (['(total / 2)'], {}), '(total / 2)\n', (866, 877), False, 'import math\n'), ((1246, 1278), 'numpy.random.rand', 'np.random.rand', (['(STATES * ACTIONS)'], {}), '(STATES * ACTIONS)\n', (1260, 1278), True, 'import numpy as np\n'), ((1915, 1931), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1929, 1931), True, 'import numpy as np\n'), ((2018, 2047), 'numpy.random.randint', 'np.random.randint', (['(0)', 'ACTIONS'], {}), '(0, ACTIONS)\n', (2035, 2047), True, 'import numpy as np\n')] |
import os
import numpy as np
visual_features_path = '../features/IEMOCAP/lexical_features'
new_visual_features_path = '../features/IEMOCAP/lexical_features_normalized'
dimensions = 768
male_mean_list = [0]*dimensions
female_mean_list = [0]*dimensions
male_variance_list = [0]*dimensions
female_variance_list = [0]*dimensions
male_dim_list = {}
female_dim_list = {}
for d in range(0, dimensions):
male_dim_list[d] = []
female_dim_list[d] = []
for session in os.listdir(visual_features_path):
for dialog in os.listdir(os.path.join(visual_features_path, session)):
print(dialog)
for sentence in os.listdir(os.path.join(visual_features_path, session, dialog)):
feature = np.load(os.path.join(visual_features_path, session, dialog, sentence))
gender = sentence[-8]
if gender == 'M':
for d in range(0, dimensions):
male_dim_list[d].append(feature[d].flatten().tolist())
else:
for d in range(0, dimensions):
female_dim_list[d].append(feature[d].flatten().tolist())
for d in range(0, dimensions):
male_dim_list[d] = [item for sublist in male_dim_list[d] for item in sublist]
m = np.mean(male_dim_list[d])
v = np.std(male_dim_list[d])
male_mean_list[d] = m
male_variance_list[d] = v
for d in range(0, dimensions):
female_dim_list[d] = [item for sublist in female_dim_list[d] for item in sublist]
m = np.mean(female_dim_list[d])
v = np.std(female_dim_list[d])
female_mean_list[d] = m
female_variance_list[d] = v
for dialog in os.listdir(os.path.join(visual_features_path, session)):
for sentence in os.listdir(os.path.join(visual_features_path, session, dialog)):
feature = np.load(os.path.join(visual_features_path, session, dialog, sentence))
gender = sentence[-8]
for d in range(0, dimensions):
if gender == 'M':
feature[d] = (feature[d] - male_mean_list[d]) / (male_variance_list[d]+1e-6)
else:
feature[d] = (feature[d] - female_mean_list[d]) / (female_variance_list[d]+1e-6)
output_file = os.path.join(new_visual_features_path, session, dialog)
os.makedirs(output_file, exist_ok=True)
np.save(os.path.join(output_file, sentence[:-4]), feature)
for d in range(0, dimensions):
male_dim_list[d] = []
female_dim_list[d] = []
| [
"numpy.mean",
"os.listdir",
"os.makedirs",
"os.path.join",
"numpy.std"
] | [((463, 495), 'os.listdir', 'os.listdir', (['visual_features_path'], {}), '(visual_features_path)\n', (473, 495), False, 'import os\n'), ((523, 566), 'os.path.join', 'os.path.join', (['visual_features_path', 'session'], {}), '(visual_features_path, session)\n', (535, 566), False, 'import os\n'), ((1119, 1144), 'numpy.mean', 'np.mean', (['male_dim_list[d]'], {}), '(male_dim_list[d])\n', (1126, 1144), True, 'import numpy as np\n'), ((1151, 1175), 'numpy.std', 'np.std', (['male_dim_list[d]'], {}), '(male_dim_list[d])\n', (1157, 1175), True, 'import numpy as np\n'), ((1351, 1378), 'numpy.mean', 'np.mean', (['female_dim_list[d]'], {}), '(female_dim_list[d])\n', (1358, 1378), True, 'import numpy as np\n'), ((1385, 1411), 'numpy.std', 'np.std', (['female_dim_list[d]'], {}), '(female_dim_list[d])\n', (1391, 1411), True, 'import numpy as np\n'), ((1495, 1538), 'os.path.join', 'os.path.join', (['visual_features_path', 'session'], {}), '(visual_features_path, session)\n', (1507, 1538), False, 'import os\n'), ((614, 665), 'os.path.join', 'os.path.join', (['visual_features_path', 'session', 'dialog'], {}), '(visual_features_path, session, dialog)\n', (626, 665), False, 'import os\n'), ((1570, 1621), 'os.path.join', 'os.path.join', (['visual_features_path', 'session', 'dialog'], {}), '(visual_features_path, session, dialog)\n', (1582, 1621), False, 'import os\n'), ((1985, 2040), 'os.path.join', 'os.path.join', (['new_visual_features_path', 'session', 'dialog'], {}), '(new_visual_features_path, session, dialog)\n', (1997, 2040), False, 'import os\n'), ((2044, 2083), 'os.makedirs', 'os.makedirs', (['output_file'], {'exist_ok': '(True)'}), '(output_file, exist_ok=True)\n', (2055, 2083), False, 'import os\n'), ((689, 750), 'os.path.join', 'os.path.join', (['visual_features_path', 'session', 'dialog', 'sentence'], {}), '(visual_features_path, session, dialog, sentence)\n', (701, 750), False, 'import os\n'), ((1645, 1706), 'os.path.join', 'os.path.join', (['visual_features_path', 'session', 'dialog', 'sentence'], {}), '(visual_features_path, session, dialog, sentence)\n', (1657, 1706), False, 'import os\n'), ((2095, 2135), 'os.path.join', 'os.path.join', (['output_file', 'sentence[:-4]'], {}), '(output_file, sentence[:-4])\n', (2107, 2135), False, 'import os\n')] |
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.dummy import DummyClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV, learning_curve
from sklearn.metrics import get_scorer
from sklearn.model_selection import ParameterGrid
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import os
sns.set(style="ticks")
os.chdir("C:/Users/AE250016/Desktop/ACA_DS/Untitled Folder")
titanic = pd.read_csv('train.csv')
titanic = titanic[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
class ModelingStage:
@staticmethod
def dict_model(inputs):
""" Handles dictionary of Sk Learn models
Args:
inputs (String or List or Dict):
1) String with required model name
2) List of required models
3) Dictionary with tuples (model() , {Parameter dictionary} )
Returns: Dictionary with tuples (model() , {Parameter dictionary} )
"""
dictionary = {"Trees": (DecisionTreeClassifier(), {'max_depth': np.arange(3, 10)}),
"Logistic": (LogisticRegression(), {'C': [0.001, 0.01, 0.05, 0.1, 10, 100]}),
'K-nearest-neighbour': (KNeighborsClassifier(),
{'n_neighbors': [5, 6, 7, 8, 9],
'metric': ['minkowski', 'euclidean', 'manhattan'],
'weights': ['uniform', 'distance']})}
if inputs:
if isinstance(inputs, dict):
return inputs
elif isinstance(inputs, str):
filtered_dictionary = {inputs: dictionary[inputs]}
return filtered_dictionary
elif isinstance(inputs, list):
filtered_dictionary = {}
for a in inputs:
filtered_dictionary[a] = dictionary[a]
return filtered_dictionary
else:
return dictionary
def plot_learning_curve(self, loading_eda, scores='neg_log_loss'):
"""
Args:
loading_eda (class.LoeadingEDA ): Object of Loeading EDA class
scores (String): Type of scoring
Returns:
Plot with learning curves
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plt.figure()
plt.title('title')
plt.xlabel("Training examples")
plt.ylabel(scores)
model = self.best_model.fit(loading_eda.X_train, loading_eda.y_train)
train_sizes, train_scores, test_scores = learning_curve(model, loading_eda.x_train, loading_eda.y_train,
cv=5, scoring=scores)
train_scores_mean = np.mean(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
plt.grid()
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
@staticmethod
def dummy_model(x_train, y_train, x_test, y_test, dummy_strategy):
""" calculates accuracy score of the dummy model on test data
Args:
x_train(numpy array): for training data
y_train(numpy array): for training target labels
x_test(numpy array): for testing data
y_test(numpy array): for testing target labels
dummy_strategy (String): type of dummy model to use
Returns: accuracy score of the dummy model on test data
"""
dummy_model = DummyClassifier(strategy=dummy_strategy).fit(x_train, y_train)
y_dummy = dummy_model.predict(x_test)
return accuracy_score(y_test, y_dummy)
@staticmethod
def modeling_stage_k_folds(model_dictionary, x_train, y_train, k_folds, performance_metric):
""" Choosing the best model applying cross_fold validation
Args:
model_dictionary: Dictionary with tuples( model(), {Parameter dictionary})
x_train(numpy array): for training data
y_train(numpy array): for training target labels
k_folds (int): Number of cross folds
performance_metric (String): Metric to be used
Returns:
model_dicts (dict): Dictionary with best accuracy per medel as key, and the model as value
cross_val_results (pd.Dataframe): Results of each run of validation
best_model (Sklearn.Model): model object with best model
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cross_val_results = pd.DataFrame()
model_dicts = {}
for a in model_dictionary.keys():
grid_clf_acc = GridSearchCV(model_dictionary[a][0].fit(x_train, y_train),
param_grid=model_dictionary[a][1],
scoring=performance_metric,
cv=k_folds)
grid_clf_acc = grid_clf_acc.fit(x_train, y_train)
temp = pd.DataFrame(grid_clf_acc.cv_results_)
temp['model'] = a
cross_val_results = cross_val_results.append(temp)
model_dicts[grid_clf_acc.best_score_] = grid_clf_acc.best_estimator_
cross_val_results = cross_val_results.set_index(['model'], append=True)
best_model = model_dicts[max(model_dicts.keys())]
return model_dicts, cross_val_results, best_model
@staticmethod
def modeling_validation(dictionary, x_train, y_train, x_val, y_val, scoring='accuracy'):
"""
Args:
dictionary (dict): Dictionary with tuples( model(), {Parameter dictionary})
x_train(numpy array): for training data
y_train(numpy array): for training target labels
x_val(numpy array): for validation data
y_val(numpy array): for validation testing data
scoring(String): Metric to be used
Returns:
model_dicts (dict): Dictionary with best accuracy per medel as key, and the model as value
cross_val_results (pd.Dataframe): Results of each run of validation
best_model (Sklearn.Model): model object with best model
"""
def score_best_param_per_model(rf, grid, model_type, scoring):
best_score = 0
classifier_results = pd.DataFrame()
scorer = get_scorer(scoring)
for g in ParameterGrid(grid):
rf.set_params(**g)
rf.fit(x_train, y_train)
# save if best
if scorer(X=x_val, estimator=rf, y_true=y_val) > best_score:
best_score = scorer(X=x_val, estimator=rf, y_true=y_val)
best_grid = g
temp = pd.DataFrame({model_type: g}).transpose()
temp['val_score'] = scorer(X=x_val, estimator=rf, y_true=y_val)
temp['train_score'] = scorer(X=x_train, estimator=rf, y_true=y_train)
classifier_results = classifier_results.append(temp)
best_estimator = rf.set_params(**best_grid)
return best_score, best_grid, classifier_results.sort_values('val_score', ascending=False), best_estimator
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
val_results = pd.DataFrame()
model_dicts = {}
for a in dictionary.keys():
best_score, best_grid, classifier_results, best_estimator = score_best_param_per_model(dictionary[a][0],
dictionary[a][1],
a, scoring)
val_results = val_results.append(classifier_results)
model_dicts[best_score] = best_estimator
best_model = model_dicts[max(model_dicts.keys())]
return model_dicts, val_results.sort_values('val_score', ascending=False), best_model
def __init__(self,
loading_eda,
k_folds=10,
performance_metric='accuracy',
dummy_strategy='stratified',
inputs=None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
dictionary = ModelingStage.dict_model(inputs)
if isinstance(k_folds, float):
self.best_accuracy_per_model,\
self.cv_results,\
self.best_model = self.modeling_validation(dictionary,
loading_eda.x_train,
loading_eda.y_train,
loading_eda.x_val,
loading_eda.y_val,
performance_metric)
else:
self.best_accuracy_per_model,\
self.cv_results,\
self.best_model = self.modeling_stage_k_folds(dictionary,
loading_eda.x_train,
loading_eda.y_train,
k_folds,
performance_metric)
self.dummy_accuracy = self.dummy_model(loading_eda.x_train, loading_eda.y_train, loading_eda.x_test,
loading_eda.y_test, dummy_strategy)
self.performance_metric = performance_metric | [
"matplotlib.pyplot.grid",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.arange",
"sklearn.model_selection.ParameterGrid",
"numpy.mean",
"seaborn.set",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"sklearn.tree.DecisionTreeClassifier",
... | [((511, 533), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""'}), "(style='ticks')\n", (518, 533), True, 'import seaborn as sns\n'), ((535, 595), 'os.chdir', 'os.chdir', (['"""C:/Users/AE250016/Desktop/ACA_DS/Untitled Folder"""'], {}), "('C:/Users/AE250016/Desktop/ACA_DS/Untitled Folder')\n", (543, 595), False, 'import os\n'), ((607, 631), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (618, 631), True, 'import pandas as pd\n'), ((4081, 4112), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_dummy'], {}), '(y_test, y_dummy)\n', (4095, 4112), False, 'from sklearn.metrics import accuracy_score\n'), ((2488, 2513), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2511, 2513), False, 'import warnings\n'), ((2527, 2558), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2548, 2558), False, 'import warnings\n'), ((2571, 2583), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2581, 2583), True, 'import matplotlib.pyplot as plt\n'), ((2596, 2614), 'matplotlib.pyplot.title', 'plt.title', (['"""title"""'], {}), "('title')\n", (2605, 2614), True, 'import matplotlib.pyplot as plt\n'), ((2627, 2658), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Training examples"""'], {}), "('Training examples')\n", (2637, 2658), True, 'import matplotlib.pyplot as plt\n'), ((2671, 2689), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['scores'], {}), '(scores)\n', (2681, 2689), True, 'import matplotlib.pyplot as plt\n'), ((2825, 2914), 'sklearn.model_selection.learning_curve', 'learning_curve', (['model', 'loading_eda.x_train', 'loading_eda.y_train'], {'cv': '(5)', 'scoring': 'scores'}), '(model, loading_eda.x_train, loading_eda.y_train, cv=5,\n scoring=scores)\n', (2839, 2914), False, 'from sklearn.model_selection import GridSearchCV, learning_curve\n'), ((3011, 3040), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (3018, 3040), True, 'import numpy as np\n'), ((3072, 3100), 'numpy.mean', 'np.mean', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (3079, 3100), True, 'import numpy as np\n'), ((3113, 3123), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3121, 3123), True, 'import matplotlib.pyplot as plt\n'), ((3136, 3222), 'matplotlib.pyplot.plot', 'plt.plot', (['train_sizes', 'train_scores_mean', '"""o-"""'], {'color': '"""r"""', 'label': '"""Training score"""'}), "(train_sizes, train_scores_mean, 'o-', color='r', label=\n 'Training score')\n", (3144, 3222), True, 'import matplotlib.pyplot as plt\n'), ((3251, 3344), 'matplotlib.pyplot.plot', 'plt.plot', (['train_sizes', 'test_scores_mean', '"""o-"""'], {'color': '"""g"""', 'label': '"""Cross-validation score"""'}), "(train_sizes, test_scores_mean, 'o-', color='g', label=\n 'Cross-validation score')\n", (3259, 3344), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3395), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3383, 3395), True, 'import matplotlib.pyplot as plt\n'), ((4917, 4942), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (4940, 4942), False, 'import warnings\n'), ((4956, 4987), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (4977, 4987), False, 'import warnings\n'), ((5021, 5035), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5033, 5035), True, 'import pandas as pd\n'), ((6812, 6826), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6824, 6826), True, 'import pandas as pd\n'), ((6848, 6867), 'sklearn.metrics.get_scorer', 'get_scorer', (['scoring'], {}), '(scoring)\n', (6858, 6867), False, 'from sklearn.metrics import get_scorer\n'), ((6889, 6908), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['grid'], {}), '(grid)\n', (6902, 6908), False, 'from sklearn.model_selection import ParameterGrid\n'), ((7722, 7747), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (7745, 7747), False, 'import warnings\n'), ((7761, 7792), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (7782, 7792), False, 'import warnings\n'), ((7819, 7833), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7831, 7833), True, 'import pandas as pd\n'), ((8763, 8788), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (8786, 8788), False, 'import warnings\n'), ((8802, 8833), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (8823, 8833), False, 'import warnings\n'), ((1189, 1213), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1211, 1213), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1284, 1304), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1302, 1304), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1395, 1417), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (1415, 1417), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3957, 3997), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': 'dummy_strategy'}), '(strategy=dummy_strategy)\n', (3972, 3997), False, 'from sklearn.dummy import DummyClassifier\n'), ((5500, 5538), 'pandas.DataFrame', 'pd.DataFrame', (['grid_clf_acc.cv_results_'], {}), '(grid_clf_acc.cv_results_)\n', (5512, 5538), True, 'import pandas as pd\n'), ((1229, 1245), 'numpy.arange', 'np.arange', (['(3)', '(10)'], {}), '(3, 10)\n', (1238, 1245), True, 'import numpy as np\n'), ((7228, 7257), 'pandas.DataFrame', 'pd.DataFrame', (['{model_type: g}'], {}), '({model_type: g})\n', (7240, 7257), True, 'import pandas as pd\n')] |
import os
import numpy as np
import pandas as pd
import pickle
import statsmodels.api as sm
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import lightgbm as lgb
from sklearn.linear_model import LogisticRegression, LinearRegression
input_path = "/Users/christianhilscher/Desktop/dynsim/input/"
model_path = "/Users/christianhilscher/desktop/dynsim/src/estimation/models/"
os.chdir("/Users/christianhilscher/desktop/dynsim/src/estimation/")
from standard import getdf, get_dependent_var
###############################################################################
def data_general(dataf, dep_var, estimate=1):
dataf = dataf.copy()
if estimate == 1:
dataf = get_dependent_var(dataf, dep_var)
else:
dataf = get_dependent_var(dataf, dep_var)
dataf.drop('dep_var', axis=1, inplace=True)
dataf.drop('personweight', axis=1, inplace=True)
vars_drop = ["pid",
"hid",
"orighid",
"age_max",
"predicted",
"lfs",
"working",
"fulltime",
"lfs_t1",
"working_t1",
"fulltime_t1"]
for var in vars_drop:
if var in dataf.columns.tolist():
dataf.drop(var, axis=1, inplace=True)
else:
pass
return dataf
def _prepare_classifier(dataf):
dataf = dataf.copy()
y = dataf['dep_var']
X = dataf.drop('dep_var', axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.05)
# Making weights
weights_train = X_train['personweight']
X_train.drop('personweight', axis=1, inplace=True)
weights_test = X_test['personweight']
X_test.drop('personweight', axis=1, inplace=True)
if "personweight_interacted" in X.columns.tolist():
X_train.drop('personweight_interacted', axis=1, inplace=True)
X_test.drop('personweight_interacted', axis=1, inplace=True)
else:
pass
# Scaling
X_train_scaled = StandardScaler().fit_transform(np.asarray(X_train))
X_test_scaled = StandardScaler().fit_transform(np.asarray(X_test))
# Coeffs feature_names
feature_names = X_train.columns.tolist()
# For Standard Part:
X_train = sm.add_constant(X_train)
X_test = sm.add_constant(X_test)
# For ML part:
lgb_train = lgb.Dataset(X_train_scaled, y_train,
weight = weights_train)
lgb_test = lgb.Dataset(X_test_scaled, y_test,
weight = weights_test)
out_dici = {'X_train': X_train_scaled,
'X_test': X_test_scaled,
'y_train': y_train,
'y_test': y_test,
'lgb_train': lgb_train,
'lgb_test': lgb_test,
'features': feature_names,
'weights': weights_train}
return out_dici
def _prepare_regressor(dataf):
dataf = dataf.copy()
y = dataf['dep_var']
X = dataf.drop('dep_var', axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.05)
# Making weights
weights_train = X_train['personweight']
X_train.drop('personweight', axis=1, inplace=True)
weights_test = X_test['personweight']
X_test.drop('personweight', axis=1, inplace=True)
# Scaling
X_train_scaled = StandardScaler().fit_transform(np.asarray(X_train))
X_test_scaled = StandardScaler().fit_transform(np.asarray(X_test))
y_train_scaled = StandardScaler().fit_transform(np.asarray(y_train).reshape(-1,1))
# Saving the scaler of the test data to convert the predicted values again
y_test_scaler = StandardScaler().fit(np.asarray(y_test).reshape(-1,1))
y_test_scaled = y_test_scaler.transform(np.asarray(y_test).reshape(-1,1))
feature_names = X_train.columns.tolist()
y_test_scaled = np.ravel(y_test_scaled)
y_train_scaled = np.ravel(y_train_scaled)
# For Standard Part:
X_train = sm.add_constant(X_train)
X_test = sm.add_constant(X_test)
# For ML part:
lgb_train = lgb.Dataset(X_train_scaled, y_train,
weight = weights_train)
lgb_test = lgb.Dataset(X_test_scaled, y_test,
weight = weights_test)
out_dici = {'X_train': X_train_scaled,
'X_test': X_test,
'y_train': y_train_scaled,
'y_test': y_test,
'scaler': y_test_scaler,
'lgb_train': lgb_train,
'lgb_test': lgb_test,
'features': feature_names,
'weights': weights_train}
return out_dici
def _estimate(dataf, dep_var, type):
dataf = dataf.copy()
dataf = data_general(dataf, dep_var)
dataf.dropna(inplace=True)
if type == 'regression':
dict = _prepare_regressor(dataf)
params = {'boosting_type' : 'gbdt',
'n_estimators': 350,
'objective' : 'l2',
'metric' : 'l2',
'num_leaves' : 31,
'learning_rate' : 0.15,
'feature_fraction': [0.9],
'bagging_fraction': [0.8],
'bagging_freq': [5],
'verbose' : 5,
'early_stopping_rounds': 5}
elif type == 'binary':
dict = _prepare_classifier(dataf)
params = {'task' : 'train',
'boosting_type' : 'gbdt',
'n_estimators': 350,
'objective': 'binary',
'eval_metric': 'logloss',
'learning_rate': 0.05,
'feature_fraction': [0.9],
'num_leaves': 31,
'verbose': 0,
'early_stopping_rounds': 5}
else:
dict = _prepare_classifier(dataf)
params = {'task' : 'train',
'boosting_type' : 'gbdt',
'n_estimators': 350,
'objective': 'multiclass',
'num_class': len(dict['y_train'].unique()),
'eval_metric': 'multi_logloss',
'learning_rate': 0.05,
'feature_fraction': [0.9],
'num_leaves': 31,
'verbose': 0,
'early_stopping_rounds': 5}
modl = lgb.train(params,
train_set = dict['lgb_train'],
valid_sets = dict['lgb_test'],
feature_name = dict['features'])
modl.save_model(model_path + dep_var + "_extended.txt")
#
# df = pd.read_pickle(input_path + 'illmitz10_reduced').dropna()
# df1 = getdf(df)
#
# _estimate(df1, "employment_status", "multiclass")
# _estimate(df1, "hours", "regression")
# _estimate(df1, "gross_earnings", "regression")
| [
"sklearn.model_selection.train_test_split",
"lightgbm.train",
"numpy.asarray",
"os.chdir",
"sklearn.preprocessing.StandardScaler",
"lightgbm.Dataset",
"statsmodels.api.add_constant",
"standard.get_dependent_var",
"numpy.ravel"
] | [((432, 499), 'os.chdir', 'os.chdir', (['"""/Users/christianhilscher/desktop/dynsim/src/estimation/"""'], {}), "('/Users/christianhilscher/desktop/dynsim/src/estimation/')\n", (440, 499), False, 'import os\n'), ((1578, 1616), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.05)'}), '(X, y, test_size=0.05)\n', (1594, 1616), False, 'from sklearn.model_selection import train_test_split\n'), ((2330, 2354), 'statsmodels.api.add_constant', 'sm.add_constant', (['X_train'], {}), '(X_train)\n', (2345, 2354), True, 'import statsmodels.api as sm\n'), ((2368, 2391), 'statsmodels.api.add_constant', 'sm.add_constant', (['X_test'], {}), '(X_test)\n', (2383, 2391), True, 'import statsmodels.api as sm\n'), ((2428, 2486), 'lightgbm.Dataset', 'lgb.Dataset', (['X_train_scaled', 'y_train'], {'weight': 'weights_train'}), '(X_train_scaled, y_train, weight=weights_train)\n', (2439, 2486), True, 'import lightgbm as lgb\n'), ((2532, 2587), 'lightgbm.Dataset', 'lgb.Dataset', (['X_test_scaled', 'y_test'], {'weight': 'weights_test'}), '(X_test_scaled, y_test, weight=weights_test)\n', (2543, 2587), True, 'import lightgbm as lgb\n'), ((3115, 3153), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.05)'}), '(X, y, test_size=0.05)\n', (3131, 3153), False, 'from sklearn.model_selection import train_test_split\n'), ((3919, 3942), 'numpy.ravel', 'np.ravel', (['y_test_scaled'], {}), '(y_test_scaled)\n', (3927, 3942), True, 'import numpy as np\n'), ((3964, 3988), 'numpy.ravel', 'np.ravel', (['y_train_scaled'], {}), '(y_train_scaled)\n', (3972, 3988), True, 'import numpy as np\n'), ((4029, 4053), 'statsmodels.api.add_constant', 'sm.add_constant', (['X_train'], {}), '(X_train)\n', (4044, 4053), True, 'import statsmodels.api as sm\n'), ((4067, 4090), 'statsmodels.api.add_constant', 'sm.add_constant', (['X_test'], {}), '(X_test)\n', (4082, 4090), True, 'import statsmodels.api as sm\n'), ((4127, 4185), 'lightgbm.Dataset', 'lgb.Dataset', (['X_train_scaled', 'y_train'], {'weight': 'weights_train'}), '(X_train_scaled, y_train, weight=weights_train)\n', (4138, 4185), True, 'import lightgbm as lgb\n'), ((4231, 4286), 'lightgbm.Dataset', 'lgb.Dataset', (['X_test_scaled', 'y_test'], {'weight': 'weights_test'}), '(X_test_scaled, y_test, weight=weights_test)\n', (4242, 4286), True, 'import lightgbm as lgb\n'), ((6349, 6459), 'lightgbm.train', 'lgb.train', (['params'], {'train_set': "dict['lgb_train']", 'valid_sets': "dict['lgb_test']", 'feature_name': "dict['features']"}), "(params, train_set=dict['lgb_train'], valid_sets=dict['lgb_test'],\n feature_name=dict['features'])\n", (6358, 6459), True, 'import lightgbm as lgb\n'), ((739, 772), 'standard.get_dependent_var', 'get_dependent_var', (['dataf', 'dep_var'], {}), '(dataf, dep_var)\n', (756, 772), False, 'from standard import getdf, get_dependent_var\n'), ((799, 832), 'standard.get_dependent_var', 'get_dependent_var', (['dataf', 'dep_var'], {}), '(dataf, dep_var)\n', (816, 832), False, 'from standard import getdf, get_dependent_var\n'), ((2125, 2144), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (2135, 2144), True, 'import numpy as np\n'), ((2197, 2215), 'numpy.asarray', 'np.asarray', (['X_test'], {}), '(X_test)\n', (2207, 2215), True, 'import numpy as np\n'), ((3441, 3460), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (3451, 3460), True, 'import numpy as np\n'), ((3513, 3531), 'numpy.asarray', 'np.asarray', (['X_test'], {}), '(X_test)\n', (3523, 3531), True, 'import numpy as np\n'), ((2094, 2110), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2108, 2110), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2166, 2182), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2180, 2182), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3410, 3426), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3424, 3426), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3482, 3498), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3496, 3498), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3554, 3570), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3568, 3570), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3720, 3736), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3734, 3736), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3585, 3604), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (3595, 3604), True, 'import numpy as np\n'), ((3741, 3759), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (3751, 3759), True, 'import numpy as np\n'), ((3819, 3837), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (3829, 3837), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from psyneulink.core.components.functions.distributionfunctions import NormalDist
from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism
from psyneulink.core.components.process import Process
from psyneulink.core.components.system import System
from psyneulink.core.globals.environment import RunError
from psyneulink.core.globals.keywords import ENABLED
class TestSimpleLearningPathway:
def test_dict_target_spec(self):
A = TransferMechanism(name="learning-process-mech-A")
B = TransferMechanism(name="learning-process-mech-B")
LP = Process(name="learning-process",
pathway=[A, B],
learning=ENABLED)
S = System(name="learning-system",
processes=[LP],
)
# S.run(inputs={A: 1.0},
# targets={B: 2.0})
S.run(inputs={A: 1.0},
targets={B: [2.0]})
S.run(inputs={A: 1.0},
targets={B: [[2.0]]})
def test_dict_target_spec_length2(self):
A = TransferMechanism(name="learning-process-mech-A")
B = TransferMechanism(name="learning-process-mech-B",
default_variable=[[0.0, 0.0]])
LP = Process(name="learning-process",
pathway=[A, B],
learning=ENABLED)
S = System(name="learning-system",
processes=[LP])
S.run(inputs={A: 1.0},
targets={B: [2.0, 3.0]})
S.run(inputs={A: 1.0},
targets={B: [[2.0, 3.0]]})
def test_list_target_spec(self):
A = TransferMechanism(name="learning-process-mech-A")
B = TransferMechanism(name="learning-process-mech-B")
LP = Process(name="learning-process",
pathway=[A, B],
learning=ENABLED)
S = System(name="learning-system",
processes=[LP])
# S.run(inputs={A: 1.0},
# targets=2.0)
S.run(inputs={A: 1.0},
targets=[2.0])
S.run(inputs={A: 1.0},
targets=[[2.0]])
input_dictionary = {A: [[[1.0]], [[2.0]], [[3.0]], [[4.0]], [[5.0]]]}
target_dictionary = {B: [[1.0], [2.0], [3.0], [4.0], [5.0]]}
S.run(inputs=input_dictionary,
targets=target_dictionary)
target_list = [[1.0], [2.0], [3.0], [4.0], [5.0]]
S.run(inputs=input_dictionary,
targets=target_list)
def test_list_target_spec_length2(self):
A = TransferMechanism(name="learning-process-mech-A")
B = TransferMechanism(name="learning-process-mech-B",
default_variable=[[0.0, 0.0]])
LP = Process(name="learning-process",
pathway=[A, B],
learning=ENABLED)
S = System(name="learning-system",
processes=[LP],
)
S.run(inputs={A: 1.0},
targets=[2.0, 3.0])
S.run(inputs={A: 1.0},
targets=[[2.0, 3.0]])
def test_function_target_spec(self):
A = TransferMechanism(name="learning-process-mech-A")
B = TransferMechanism(name="learning-process-mech-B",
default_variable=np.array([[0.0, 0.0]]))
LP = Process(name="learning-process",
pathway=[A, B],
learning=ENABLED)
S = System(name="learning-system",
processes=[LP])
def target_function():
val_1 = NormalDist(mean=3.0)()
val_2 = NormalDist(mean=3.0)()
target_value = np.array([val_1, val_2])
return target_value
S.run(inputs={A: [[[1.0]], [[2.0]], [[3.0]]]},
targets={B: target_function})
class TestMultilayerLearning:
def test_dict_target_spec(self):
A = TransferMechanism(name="multilayer-mech-A")
B = TransferMechanism(name="multilayer-mech-B")
C = TransferMechanism(name="multilayer-mech-C")
P = Process(name="multilayer-process",
pathway=[A, B, C],
learning=ENABLED)
S = System(name="learning-system",
processes=[P]
)
S.run(inputs={A: 1.0},
targets={C: 2.0})
S.run(inputs={A: 1.0},
targets={C: [2.0]})
S.run(inputs={A: 1.0},
targets={C: [[2.0]]})
def test_dict_target_spec_length2(self):
A = TransferMechanism(name="multilayer-mech-A")
B = TransferMechanism(name="multilayer-mech-B")
C = TransferMechanism(name="multilayer-mech-C",
default_variable=[[0.0, 0.0]])
P = Process(name="multilayer-process",
pathway=[A, B, C],
learning=ENABLED)
S = System(name="learning-system",
processes=[P])
S.run(inputs={A: 1.0},
targets={C: [2.0, 3.0]})
S.run(inputs={A: 1.0},
targets={C: [[2.0, 3.0]]})
def test_function_target_spec(self):
A = TransferMechanism(name="multilayer-mech-A")
B = TransferMechanism(name="multilayer-mech-B")
C = TransferMechanism(name="multilayer-mech-C")
P = Process(name="multilayer-process",
pathway=[A, B, C],
learning=ENABLED)
S = System(name="learning-system",
processes=[P])
def target_function():
val_1 = NormalDist(mean=3.0)()
return val_1
S.run(inputs={A: 1.0},
targets={C: target_function})
class TestDivergingLearningPathways:
def test_dict_target_spec(self):
A = TransferMechanism(name="diverging-learning-pathways-mech-A")
B = TransferMechanism(name="diverging-learning-pathways-mech-B")
C = TransferMechanism(name="diverging-learning-pathways-mech-C")
D = TransferMechanism(name="diverging-learning-pathways-mech-D")
E = TransferMechanism(name="diverging-learning-pathways-mech-E")
P1 = Process(name="learning-pathway-1",
pathway=[A, B, C],
learning=ENABLED)
P2 = Process(name="learning-pathway-2",
pathway=[A, D, E],
learning=ENABLED)
S = System(name="learning-system",
processes=[P1, P2]
)
S.run(inputs={A: 1.0},
targets={C: 2.0,
E: 4.0})
S.run(inputs={A: 1.0},
targets={C: [2.0],
E: [4.0]})
S.run(inputs={A: 1.0},
targets={C: [[2.0]],
E: [[4.0]]})
def test_dict_target_spec_length2(self):
A = TransferMechanism(name="diverging-learning-pathways-mech-A")
B = TransferMechanism(name="diverging-learning-pathways-mech-B")
C = TransferMechanism(name="diverging-learning-pathways-mech-C",
default_variable=[[0.0, 0.0]])
D = TransferMechanism(name="diverging-learning-pathways-mech-D")
E = TransferMechanism(name="diverging-learning-pathways-mech-E",
default_variable=[[0.0, 0.0]])
P1 = Process(name="learning-pathway-1",
pathway=[A, B, C],
learning=ENABLED)
P2 = Process(name="learning-pathway-2",
pathway=[A, D, E],
learning=ENABLED)
S = System(name="learning-system",
processes=[P1, P2]
)
S.run(inputs={A: 1.0},
targets={C: [2.0, 3.0],
E: [4.0, 5.0]})
S.run(inputs={A: 1.0},
targets={C: [[2.0, 3.0]],
E: [[4.0, 5.0]]})
def test_dict_list_and_function(self):
A = TransferMechanism(name="diverging-learning-pathways-mech-A")
B = TransferMechanism(name="diverging-learning-pathways-mech-B")
C = TransferMechanism(name="diverging-learning-pathways-mech-C")
D = TransferMechanism(name="diverging-learning-pathways-mech-D")
E = TransferMechanism(name="diverging-learning-pathways-mech-E")
P1 = Process(name="learning-pathway-1",
pathway=[A, B, C],
learning=ENABLED)
P2 = Process(name="learning-pathway-2",
pathway=[A, D, E],
learning=ENABLED)
S = System(name="learning-system",
processes=[P1, P2]
)
def target_function():
val_1 = NormalDist(mean=3.0)()
return val_1
S.run(inputs={A: 1.0},
targets={C: 2.0,
E: target_function})
S.run(inputs={A: 1.0},
targets={C: [2.0],
E: target_function})
S.run(inputs={A: 1.0},
targets={C: [[2.0]],
E: target_function})
class TestConvergingLearningPathways:
def test_dict_target_spec(self):
A = TransferMechanism(name="converging-learning-pathways-mech-A")
B = TransferMechanism(name="converging-learning-pathways-mech-B")
C = TransferMechanism(name="converging-learning-pathways-mech-C")
D = TransferMechanism(name="converging-learning-pathways-mech-D")
E = TransferMechanism(name="converging-learning-pathways-mech-E")
P1 = Process(name="learning-pathway-1",
pathway=[A, B, C],
learning=ENABLED)
P2 = Process(name="learning-pathway-2",
pathway=[D, E, C],
learning=ENABLED)
S = System(name="learning-system",
processes=[P1, P2]
)
S.run(inputs={A: 1.0,
D: 1.0},
targets={C: 2.0})
S.run(inputs={A: 1.0,
D: 1.0},
targets={C: [2.0]})
S.run(inputs={A: 1.0,
D: 1.0},
targets={C: [[2.0]]})
def test_dict_target_spec_length2(self):
A = TransferMechanism(name="converging-learning-pathways-mech-A")
B = TransferMechanism(name="converging-learning-pathways-mech-B")
C = TransferMechanism(name="converging-learning-pathways-mech-C",
default_variable=[[0.0, 0.0]])
D = TransferMechanism(name="converging-learning-pathways-mech-D")
E = TransferMechanism(name="converging-learning-pathways-mech-E")
P1 = Process(name="learning-pathway-1",
pathway=[A, B, C],
learning=ENABLED)
P2 = Process(name="learning-pathway-2",
pathway=[D, E, C],
learning=ENABLED)
S = System(name="learning-system",
processes=[P1, P2]
)
S.run(inputs={A: 1.0,
D: 1.0},
targets={C: [2.0, 3.0]})
S.run(inputs={A: 1.0,
D: 1.0},
targets={C: [[2.0, 3.0]]})
class TestInvalidTargetSpecs:
def test_3_targets_4_inputs(self):
A = TransferMechanism(name="learning-process-mech-A")
B = TransferMechanism(name="learning-process-mech-B")
LP = Process(name="learning-process",
pathway=[A, B],
learning=ENABLED)
S = System(name="learning-system",
processes=[LP],
)
with pytest.raises(RunError) as error_text:
S.run(inputs={A: [[[1.0]], [[2.0]], [[3.0]], [[4.0]]]},
targets={B: [[1.0], [2.0], [3.0]]})
assert 'Number of target values specified (3) for each learning sequence' in str(error_text.value) and \
'must equal the number of input values specified (4)' in str(error_text.value)
def test_2_target_mechanisms_1_dict_entry(self):
A = TransferMechanism(name="learning-process-mech-A")
B = TransferMechanism(name="learning-process-mech-B")
C = TransferMechanism(name="learning-process-mech-C")
LP = Process(name="learning-process",
pathway=[A, B],
learning=ENABLED)
LP2 = Process(name="learning-process2",
pathway=[A, C],
learning=ENABLED)
S = System(name="learning-system",
processes=[LP, LP2],
)
with pytest.raises(RunError) as error_text:
S.run(inputs={A: [[[1.0]]]},
targets={B: [[1.0]]})
assert 'missing from specification of targets for run' in str(error_text.value)
def test_1_target_mechanisms_2_dict_entries(self):
A = TransferMechanism(name="learning-process-mech-A")
B = TransferMechanism(name="learning-process-mech-B")
C = TransferMechanism(name="learning-process-mech-C")
LP = Process(name="learning-process",
pathway=[A, B, C],
learning=ENABLED)
S = System(name="learning-system",
processes=[LP],
)
with pytest.raises(RunError) as error_text:
S.run(inputs={A: [[[1.0]]]},
targets={B: [[1.0]],
C: [[1.0]]})
assert 'does not project to a target Mechanism in' in str(error_text.value)
def test_2_target_mechanisms_list_spec(self):
A = TransferMechanism(name="learning-process-mech-A")
B = TransferMechanism(name="learning-process-mech-B")
C = TransferMechanism(name="learning-process-mech-C")
LP = Process(name="learning-process",
pathway=[A, B],
learning=ENABLED)
LP2 = Process(name="learning-process2",
pathway=[A, C],
learning=ENABLED)
S = System(name="learning-system",
processes=[LP, LP2],
)
with pytest.raises(RunError) as error_text:
S.run(inputs={A: [[[1.0]]]},
targets=[[1.0]])
assert 'Target values for' in str(error_text.value) and \
'must be specified in a dictionary' in str(error_text.value)
def test_2_target_mechanisms_fn_spec(self):
A = TransferMechanism(name="learning-process-mech-A")
B = TransferMechanism(name="learning-process-mech-B")
C = TransferMechanism(name="learning-process-mech-C")
LP = Process(name="learning-process",
pathway=[A, B],
learning=ENABLED)
LP2 = Process(name="learning-process2",
pathway=[A, C],
learning=ENABLED)
S = System(name="learning-system",
processes=[LP, LP2],
)
def target_function():
val_1 = NormalDist(mean=3.0)()
val_2 = NormalDist(mean=3.0)()
return [val_1, val_2]
with pytest.raises(RunError) as error_text:
S.run(inputs={A: [[[1.0]]]},
targets=target_function)
assert 'Target values for' in str(error_text.value) and \
'must be specified in a dictionary' in str(error_text.value)
| [
"psyneulink.core.components.process.Process",
"psyneulink.core.components.functions.distributionfunctions.NormalDist",
"psyneulink.core.components.system.System",
"numpy.array",
"psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism",
"pytest.raises"
] | [((515, 564), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-A"""'}), "(name='learning-process-mech-A')\n", (532, 564), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((577, 626), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-B"""'}), "(name='learning-process-mech-B')\n", (594, 626), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((641, 707), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-process"""', 'pathway': '[A, B]', 'learning': 'ENABLED'}), "(name='learning-process', pathway=[A, B], learning=ENABLED)\n", (648, 707), False, 'from psyneulink.core.components.process import Process\n'), ((763, 809), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[LP]'}), "(name='learning-system', processes=[LP])\n", (769, 809), False, 'from psyneulink.core.components.system import System\n'), ((1110, 1159), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-A"""'}), "(name='learning-process-mech-A')\n", (1127, 1159), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((1172, 1257), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-B"""', 'default_variable': '[[0.0, 0.0]]'}), "(name='learning-process-mech-B', default_variable=[[0.0, 0.0]]\n )\n", (1189, 1257), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((1297, 1363), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-process"""', 'pathway': '[A, B]', 'learning': 'ENABLED'}), "(name='learning-process', pathway=[A, B], learning=ENABLED)\n", (1304, 1363), False, 'from psyneulink.core.components.process import Process\n'), ((1419, 1465), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[LP]'}), "(name='learning-system', processes=[LP])\n", (1425, 1465), False, 'from psyneulink.core.components.system import System\n'), ((1679, 1728), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-A"""'}), "(name='learning-process-mech-A')\n", (1696, 1728), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((1741, 1790), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-B"""'}), "(name='learning-process-mech-B')\n", (1758, 1790), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((1805, 1871), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-process"""', 'pathway': '[A, B]', 'learning': 'ENABLED'}), "(name='learning-process', pathway=[A, B], learning=ENABLED)\n", (1812, 1871), False, 'from psyneulink.core.components.process import Process\n'), ((1927, 1973), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[LP]'}), "(name='learning-system', processes=[LP])\n", (1933, 1973), False, 'from psyneulink.core.components.system import System\n'), ((2601, 2650), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-A"""'}), "(name='learning-process-mech-A')\n", (2618, 2650), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((2663, 2748), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-B"""', 'default_variable': '[[0.0, 0.0]]'}), "(name='learning-process-mech-B', default_variable=[[0.0, 0.0]]\n )\n", (2680, 2748), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((2788, 2854), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-process"""', 'pathway': '[A, B]', 'learning': 'ENABLED'}), "(name='learning-process', pathway=[A, B], learning=ENABLED)\n", (2795, 2854), False, 'from psyneulink.core.components.process import Process\n'), ((2910, 2956), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[LP]'}), "(name='learning-system', processes=[LP])\n", (2916, 2956), False, 'from psyneulink.core.components.system import System\n'), ((3185, 3234), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-A"""'}), "(name='learning-process-mech-A')\n", (3202, 3234), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((3382, 3448), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-process"""', 'pathway': '[A, B]', 'learning': 'ENABLED'}), "(name='learning-process', pathway=[A, B], learning=ENABLED)\n", (3389, 3448), False, 'from psyneulink.core.components.process import Process\n'), ((3504, 3550), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[LP]'}), "(name='learning-system', processes=[LP])\n", (3510, 3550), False, 'from psyneulink.core.components.system import System\n'), ((3953, 3996), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""multilayer-mech-A"""'}), "(name='multilayer-mech-A')\n", (3970, 3996), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((4009, 4052), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""multilayer-mech-B"""'}), "(name='multilayer-mech-B')\n", (4026, 4052), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((4065, 4108), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""multilayer-mech-C"""'}), "(name='multilayer-mech-C')\n", (4082, 4108), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((4121, 4192), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""multilayer-process"""', 'pathway': '[A, B, C]', 'learning': 'ENABLED'}), "(name='multilayer-process', pathway=[A, B, C], learning=ENABLED)\n", (4128, 4192), False, 'from psyneulink.core.components.process import Process\n'), ((4248, 4293), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[P]'}), "(name='learning-system', processes=[P])\n", (4254, 4293), False, 'from psyneulink.core.components.system import System\n'), ((4589, 4632), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""multilayer-mech-A"""'}), "(name='multilayer-mech-A')\n", (4606, 4632), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((4645, 4688), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""multilayer-mech-B"""'}), "(name='multilayer-mech-B')\n", (4662, 4688), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((4701, 4775), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""multilayer-mech-C"""', 'default_variable': '[[0.0, 0.0]]'}), "(name='multilayer-mech-C', default_variable=[[0.0, 0.0]])\n", (4718, 4775), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((4818, 4889), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""multilayer-process"""', 'pathway': '[A, B, C]', 'learning': 'ENABLED'}), "(name='multilayer-process', pathway=[A, B, C], learning=ENABLED)\n", (4825, 4889), False, 'from psyneulink.core.components.process import Process\n'), ((4945, 4990), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[P]'}), "(name='learning-system', processes=[P])\n", (4951, 4990), False, 'from psyneulink.core.components.system import System\n'), ((5208, 5251), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""multilayer-mech-A"""'}), "(name='multilayer-mech-A')\n", (5225, 5251), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((5264, 5307), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""multilayer-mech-B"""'}), "(name='multilayer-mech-B')\n", (5281, 5307), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((5320, 5363), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""multilayer-mech-C"""'}), "(name='multilayer-mech-C')\n", (5337, 5363), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((5376, 5447), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""multilayer-process"""', 'pathway': '[A, B, C]', 'learning': 'ENABLED'}), "(name='multilayer-process', pathway=[A, B, C], learning=ENABLED)\n", (5383, 5447), False, 'from psyneulink.core.components.process import Process\n'), ((5501, 5546), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[P]'}), "(name='learning-system', processes=[P])\n", (5507, 5546), False, 'from psyneulink.core.components.system import System\n'), ((5830, 5890), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-A"""'}), "(name='diverging-learning-pathways-mech-A')\n", (5847, 5890), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((5903, 5963), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-B"""'}), "(name='diverging-learning-pathways-mech-B')\n", (5920, 5963), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((5976, 6036), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-C"""'}), "(name='diverging-learning-pathways-mech-C')\n", (5993, 6036), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((6049, 6109), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-D"""'}), "(name='diverging-learning-pathways-mech-D')\n", (6066, 6109), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((6122, 6182), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-E"""'}), "(name='diverging-learning-pathways-mech-E')\n", (6139, 6182), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((6197, 6268), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-pathway-1"""', 'pathway': '[A, B, C]', 'learning': 'ENABLED'}), "(name='learning-pathway-1', pathway=[A, B, C], learning=ENABLED)\n", (6204, 6268), False, 'from psyneulink.core.components.process import Process\n'), ((6324, 6395), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-pathway-2"""', 'pathway': '[A, D, E]', 'learning': 'ENABLED'}), "(name='learning-pathway-2', pathway=[A, D, E], learning=ENABLED)\n", (6331, 6395), False, 'from psyneulink.core.components.process import Process\n'), ((6449, 6499), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[P1, P2]'}), "(name='learning-system', processes=[P1, P2])\n", (6455, 6499), False, 'from psyneulink.core.components.system import System\n'), ((6894, 6954), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-A"""'}), "(name='diverging-learning-pathways-mech-A')\n", (6911, 6954), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((6967, 7027), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-B"""'}), "(name='diverging-learning-pathways-mech-B')\n", (6984, 7027), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((7040, 7135), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-C"""', 'default_variable': '[[0.0, 0.0]]'}), "(name='diverging-learning-pathways-mech-C',\n default_variable=[[0.0, 0.0]])\n", (7057, 7135), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((7174, 7234), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-D"""'}), "(name='diverging-learning-pathways-mech-D')\n", (7191, 7234), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((7247, 7342), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-E"""', 'default_variable': '[[0.0, 0.0]]'}), "(name='diverging-learning-pathways-mech-E',\n default_variable=[[0.0, 0.0]])\n", (7264, 7342), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((7383, 7454), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-pathway-1"""', 'pathway': '[A, B, C]', 'learning': 'ENABLED'}), "(name='learning-pathway-1', pathway=[A, B, C], learning=ENABLED)\n", (7390, 7454), False, 'from psyneulink.core.components.process import Process\n'), ((7510, 7581), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-pathway-2"""', 'pathway': '[A, D, E]', 'learning': 'ENABLED'}), "(name='learning-pathway-2', pathway=[A, D, E], learning=ENABLED)\n", (7517, 7581), False, 'from psyneulink.core.components.process import Process\n'), ((7635, 7685), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[P1, P2]'}), "(name='learning-system', processes=[P1, P2])\n", (7641, 7685), False, 'from psyneulink.core.components.system import System\n'), ((8003, 8063), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-A"""'}), "(name='diverging-learning-pathways-mech-A')\n", (8020, 8063), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((8076, 8136), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-B"""'}), "(name='diverging-learning-pathways-mech-B')\n", (8093, 8136), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((8149, 8209), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-C"""'}), "(name='diverging-learning-pathways-mech-C')\n", (8166, 8209), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((8222, 8282), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-D"""'}), "(name='diverging-learning-pathways-mech-D')\n", (8239, 8282), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((8295, 8355), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""diverging-learning-pathways-mech-E"""'}), "(name='diverging-learning-pathways-mech-E')\n", (8312, 8355), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((8370, 8441), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-pathway-1"""', 'pathway': '[A, B, C]', 'learning': 'ENABLED'}), "(name='learning-pathway-1', pathway=[A, B, C], learning=ENABLED)\n", (8377, 8441), False, 'from psyneulink.core.components.process import Process\n'), ((8497, 8568), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-pathway-2"""', 'pathway': '[A, D, E]', 'learning': 'ENABLED'}), "(name='learning-pathway-2', pathway=[A, D, E], learning=ENABLED)\n", (8504, 8568), False, 'from psyneulink.core.components.process import Process\n'), ((8622, 8672), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[P1, P2]'}), "(name='learning-system', processes=[P1, P2])\n", (8628, 8672), False, 'from psyneulink.core.components.system import System\n'), ((9228, 9289), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""converging-learning-pathways-mech-A"""'}), "(name='converging-learning-pathways-mech-A')\n", (9245, 9289), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((9302, 9363), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""converging-learning-pathways-mech-B"""'}), "(name='converging-learning-pathways-mech-B')\n", (9319, 9363), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((9376, 9437), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""converging-learning-pathways-mech-C"""'}), "(name='converging-learning-pathways-mech-C')\n", (9393, 9437), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((9450, 9511), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""converging-learning-pathways-mech-D"""'}), "(name='converging-learning-pathways-mech-D')\n", (9467, 9511), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((9524, 9585), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""converging-learning-pathways-mech-E"""'}), "(name='converging-learning-pathways-mech-E')\n", (9541, 9585), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((9600, 9671), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-pathway-1"""', 'pathway': '[A, B, C]', 'learning': 'ENABLED'}), "(name='learning-pathway-1', pathway=[A, B, C], learning=ENABLED)\n", (9607, 9671), False, 'from psyneulink.core.components.process import Process\n'), ((9727, 9798), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-pathway-2"""', 'pathway': '[D, E, C]', 'learning': 'ENABLED'}), "(name='learning-pathway-2', pathway=[D, E, C], learning=ENABLED)\n", (9734, 9798), False, 'from psyneulink.core.components.process import Process\n'), ((9852, 9902), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[P1, P2]'}), "(name='learning-system', processes=[P1, P2])\n", (9858, 9902), False, 'from psyneulink.core.components.system import System\n'), ((10288, 10349), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""converging-learning-pathways-mech-A"""'}), "(name='converging-learning-pathways-mech-A')\n", (10305, 10349), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((10362, 10423), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""converging-learning-pathways-mech-B"""'}), "(name='converging-learning-pathways-mech-B')\n", (10379, 10423), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((10436, 10532), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""converging-learning-pathways-mech-C"""', 'default_variable': '[[0.0, 0.0]]'}), "(name='converging-learning-pathways-mech-C',\n default_variable=[[0.0, 0.0]])\n", (10453, 10532), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((10571, 10632), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""converging-learning-pathways-mech-D"""'}), "(name='converging-learning-pathways-mech-D')\n", (10588, 10632), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((10645, 10706), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""converging-learning-pathways-mech-E"""'}), "(name='converging-learning-pathways-mech-E')\n", (10662, 10706), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((10721, 10792), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-pathway-1"""', 'pathway': '[A, B, C]', 'learning': 'ENABLED'}), "(name='learning-pathway-1', pathway=[A, B, C], learning=ENABLED)\n", (10728, 10792), False, 'from psyneulink.core.components.process import Process\n'), ((10848, 10919), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-pathway-2"""', 'pathway': '[D, E, C]', 'learning': 'ENABLED'}), "(name='learning-pathway-2', pathway=[D, E, C], learning=ENABLED)\n", (10855, 10919), False, 'from psyneulink.core.components.process import Process\n'), ((10973, 11023), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[P1, P2]'}), "(name='learning-system', processes=[P1, P2])\n", (10979, 11023), False, 'from psyneulink.core.components.system import System\n'), ((11350, 11399), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-A"""'}), "(name='learning-process-mech-A')\n", (11367, 11399), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((11412, 11461), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-B"""'}), "(name='learning-process-mech-B')\n", (11429, 11461), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((11476, 11542), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-process"""', 'pathway': '[A, B]', 'learning': 'ENABLED'}), "(name='learning-process', pathway=[A, B], learning=ENABLED)\n", (11483, 11542), False, 'from psyneulink.core.components.process import Process\n'), ((11598, 11644), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[LP]'}), "(name='learning-system', processes=[LP])\n", (11604, 11644), False, 'from psyneulink.core.components.system import System\n'), ((12133, 12182), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-A"""'}), "(name='learning-process-mech-A')\n", (12150, 12182), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((12195, 12244), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-B"""'}), "(name='learning-process-mech-B')\n", (12212, 12244), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((12257, 12306), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-C"""'}), "(name='learning-process-mech-C')\n", (12274, 12306), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((12321, 12387), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-process"""', 'pathway': '[A, B]', 'learning': 'ENABLED'}), "(name='learning-process', pathway=[A, B], learning=ENABLED)\n", (12328, 12387), False, 'from psyneulink.core.components.process import Process\n'), ((12444, 12511), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-process2"""', 'pathway': '[A, C]', 'learning': 'ENABLED'}), "(name='learning-process2', pathway=[A, C], learning=ENABLED)\n", (12451, 12511), False, 'from psyneulink.core.components.process import Process\n'), ((12567, 12618), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[LP, LP2]'}), "(name='learning-system', processes=[LP, LP2])\n", (12573, 12618), False, 'from psyneulink.core.components.system import System\n'), ((12950, 12999), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-A"""'}), "(name='learning-process-mech-A')\n", (12967, 12999), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((13012, 13061), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-B"""'}), "(name='learning-process-mech-B')\n", (13029, 13061), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((13074, 13123), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-C"""'}), "(name='learning-process-mech-C')\n", (13091, 13123), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((13138, 13207), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-process"""', 'pathway': '[A, B, C]', 'learning': 'ENABLED'}), "(name='learning-process', pathway=[A, B, C], learning=ENABLED)\n", (13145, 13207), False, 'from psyneulink.core.components.process import Process\n'), ((13263, 13309), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[LP]'}), "(name='learning-system', processes=[LP])\n", (13269, 13309), False, 'from psyneulink.core.components.system import System\n'), ((13671, 13720), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-A"""'}), "(name='learning-process-mech-A')\n", (13688, 13720), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((13733, 13782), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-B"""'}), "(name='learning-process-mech-B')\n", (13750, 13782), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((13795, 13844), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-C"""'}), "(name='learning-process-mech-C')\n", (13812, 13844), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((13859, 13925), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-process"""', 'pathway': '[A, B]', 'learning': 'ENABLED'}), "(name='learning-process', pathway=[A, B], learning=ENABLED)\n", (13866, 13925), False, 'from psyneulink.core.components.process import Process\n'), ((13982, 14049), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-process2"""', 'pathway': '[A, C]', 'learning': 'ENABLED'}), "(name='learning-process2', pathway=[A, C], learning=ENABLED)\n", (13989, 14049), False, 'from psyneulink.core.components.process import Process\n'), ((14105, 14156), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[LP, LP2]'}), "(name='learning-system', processes=[LP, LP2])\n", (14111, 14156), False, 'from psyneulink.core.components.system import System\n'), ((14530, 14579), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-A"""'}), "(name='learning-process-mech-A')\n", (14547, 14579), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((14592, 14641), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-B"""'}), "(name='learning-process-mech-B')\n", (14609, 14641), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((14654, 14703), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""learning-process-mech-C"""'}), "(name='learning-process-mech-C')\n", (14671, 14703), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((14718, 14784), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-process"""', 'pathway': '[A, B]', 'learning': 'ENABLED'}), "(name='learning-process', pathway=[A, B], learning=ENABLED)\n", (14725, 14784), False, 'from psyneulink.core.components.process import Process\n'), ((14841, 14908), 'psyneulink.core.components.process.Process', 'Process', ([], {'name': '"""learning-process2"""', 'pathway': '[A, C]', 'learning': 'ENABLED'}), "(name='learning-process2', pathway=[A, C], learning=ENABLED)\n", (14848, 14908), False, 'from psyneulink.core.components.process import Process\n'), ((14964, 15015), 'psyneulink.core.components.system.System', 'System', ([], {'name': '"""learning-system"""', 'processes': '[LP, LP2]'}), "(name='learning-system', processes=[LP, LP2])\n", (14970, 15015), False, 'from psyneulink.core.components.system import System\n'), ((3715, 3739), 'numpy.array', 'np.array', (['[val_1, val_2]'], {}), '([val_1, val_2])\n', (3723, 3739), True, 'import numpy as np\n'), ((11698, 11721), 'pytest.raises', 'pytest.raises', (['RunError'], {}), '(RunError)\n', (11711, 11721), False, 'import pytest\n'), ((12672, 12695), 'pytest.raises', 'pytest.raises', (['RunError'], {}), '(RunError)\n', (12685, 12695), False, 'import pytest\n'), ((13364, 13387), 'pytest.raises', 'pytest.raises', (['RunError'], {}), '(RunError)\n', (13377, 13387), False, 'import pytest\n'), ((14210, 14233), 'pytest.raises', 'pytest.raises', (['RunError'], {}), '(RunError)\n', (14223, 14233), False, 'import pytest\n'), ((15222, 15245), 'pytest.raises', 'pytest.raises', (['RunError'], {}), '(RunError)\n', (15235, 15245), False, 'import pytest\n'), ((3344, 3366), 'numpy.array', 'np.array', (['[[0.0, 0.0]]'], {}), '([[0.0, 0.0]])\n', (3352, 3366), True, 'import numpy as np\n'), ((3622, 3642), 'psyneulink.core.components.functions.distributionfunctions.NormalDist', 'NormalDist', ([], {'mean': '(3.0)'}), '(mean=3.0)\n', (3632, 3642), False, 'from psyneulink.core.components.functions.distributionfunctions import NormalDist\n'), ((3665, 3685), 'psyneulink.core.components.functions.distributionfunctions.NormalDist', 'NormalDist', ([], {'mean': '(3.0)'}), '(mean=3.0)\n', (3675, 3685), False, 'from psyneulink.core.components.functions.distributionfunctions import NormalDist\n'), ((5618, 5638), 'psyneulink.core.components.functions.distributionfunctions.NormalDist', 'NormalDist', ([], {'mean': '(3.0)'}), '(mean=3.0)\n', (5628, 5638), False, 'from psyneulink.core.components.functions.distributionfunctions import NormalDist\n'), ((8764, 8784), 'psyneulink.core.components.functions.distributionfunctions.NormalDist', 'NormalDist', ([], {'mean': '(3.0)'}), '(mean=3.0)\n', (8774, 8784), False, 'from psyneulink.core.components.functions.distributionfunctions import NormalDist\n'), ((15108, 15128), 'psyneulink.core.components.functions.distributionfunctions.NormalDist', 'NormalDist', ([], {'mean': '(3.0)'}), '(mean=3.0)\n', (15118, 15128), False, 'from psyneulink.core.components.functions.distributionfunctions import NormalDist\n'), ((15151, 15171), 'psyneulink.core.components.functions.distributionfunctions.NormalDist', 'NormalDist', ([], {'mean': '(3.0)'}), '(mean=3.0)\n', (15161, 15171), False, 'from psyneulink.core.components.functions.distributionfunctions import NormalDist\n')] |
import collections
import json
import math
import os
import random
import time
from bert import modeling
from bert import tokenization
import optimization
import six
import tensorflow as tf
import numpy as np
NUM_DOCS = 2
NUM_ANSWER_SPANS = 20
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"train_file", None,
"json file path for training. E.g., train_output.json")
flags.DEFINE_string(
"eval_file", None,
"json file path for training. E.g., dev_output.json")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 512,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_integer("train_batch_size", 3, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 2,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_integer("num_train_epochs", 30,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("eval_steps", 1000,
"How often to run evaluation.")
flags.DEFINE_integer("log_steps", 200,
"How often to run evaluation.")
flags.DEFINE_integer("num_gpus", 1,
"How many gpus to use.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
class OpenQAExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self,
qid,
question_text,
doc_tokens,
orig_answer_text=None,
start_positions=None,
end_positions=None):
self.qid = qid
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_positions = start_positions
self.end_positions = end_positions
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "id: %s" % (self.qid)
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_positions: %s" % (self.start_position)
if self.start_position:
s += ", end_positions: %s" % (self.end_position)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
tokens_list,
input_ids_list,
input_mask_list,
segment_ids_list,
start_positions=None,
end_positions=None):
self.unique_id = unique_id
self.example_index = example_index
self.tokens_list = tokens_list
self.input_ids_list = input_ids_list
self.input_mask_list = input_mask_list
self.segment_ids_list = segment_ids_list
self.start_positions = start_positions
self.end_positions = end_positions
def read_open_qa_examples(inputfile, is_training):
"""Read a json file from DocumentQA into a list of OpenQAExample."""
examples = []
with open(inputfile, "r") as fin:
for line in fin:
item = json.loads(line.strip())
qid = item["question_id"]
question_text = " ".join(item["question"]).replace("< Query >", "%q")
doc_tokens = item["context"]
orig_answer_text = item["answer_text"]
start_positions = [[answer_span[0] for answer_span in x] for x in item["answer_spans"]]
end_positions = [[answer_span[1] for answer_span in x] for x in item["answer_spans"]]
example = OpenQAExample(
qid, question_text, doc_tokens, orig_answer_text, start_positions, end_positions)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
max_query_length, is_training, output_fn):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
c1, c2 = 0, 0
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
c1 += 1
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
if example_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (unique_id))
if is_training:
tf.logging.info("answer: %s" % (example.orig_answer_text))
elif example_index % 100 == 0:
tf.logging.info("example_index: %s" % (example_index))
tokens_list = []
input_ids_list = []
segment_ids_list = []
input_mask_list = []
start_positions = []
end_positions = []
for i in range(NUM_DOCS):
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
if i < len(example.doc_tokens):
for (j, token) in enumerate(example.doc_tokens[i]):
orig_to_tok_index.append(len(all_doc_tokens))
token = token.replace("%%DOCUMENT%%", "%d")
token = token.replace("%%PARAGRAPH%%", "%p")
token = token.replace("%%PARAGRAPH_GROUP%%", "%g")
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(j)
all_doc_tokens.append(sub_token)
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in all_doc_tokens:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Truncate over long sequence
if len(input_ids) > max_seq_length:
input_ids = input_ids[:max_seq_length]
input_mask = input_mask[:max_seq_length]
segment_ids = segment_ids[:max_seq_length]
c2 += 1
start_positions.append([])
end_positions.append([])
if i < len(example.doc_tokens):
for j in range(len(example.start_positions[i])):
sp = example.start_positions[i][j]
sp = len(query_tokens) + 2 + orig_to_tok_index[sp]
ep = example.end_positions[i][j]
if ep != len(orig_to_tok_index) - 1:
ep = len(query_tokens) + 2 + orig_to_tok_index[ep + 1] - 1
else:
ep = len(all_doc_tokens) - 1
if sp < len(input_ids) and ep < len(input_ids):
start_positions[-1].append(sp)
end_positions[-1].append(ep)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if example_index < 20:
tf.logging.info("#%d" % i)
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info(
"start_positions: %s" % start_positions[-1])
tf.logging.info(
"end_positions: %s" % end_positions[-1])
tokens_list.extend(tokens)
input_ids_list.extend(input_ids)
segment_ids_list.extend(segment_ids)
input_mask_list.extend(input_mask)
if all([len(sp) == 0 for sp in start_positions]):
continue
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
tokens_list=tokens_list,
input_ids_list=input_ids_list,
input_mask_list=input_mask_list,
segment_ids_list=segment_ids_list,
start_positions=start_positions,
end_positions=end_positions)
# Run callback
output_fn(feature)
unique_id += 1
tf.logging.info("Num of overlong querys: %d" % c1)
tf.logging.info("Num of overlong documents : %d" % c2)
def create_model(bert_config, is_training, input_ids_list, input_mask_list,
segment_ids_list, use_one_hot_embeddings):
"""Creates a classification model."""
all_logits = []
input_ids_shape = modeling.get_shape_list(input_ids_list, expected_rank=2)
batch_size = input_ids_shape[0]
seq_length = input_ids_shape[1]
seq_length = seq_length // NUM_DOCS
def reshape_and_unstack_inputs(inputs, batch_size):
inputs = tf.reshape(inputs, [batch_size, NUM_DOCS, seq_length])
return tf.unstack(inputs, axis=1)
input_ids_list = reshape_and_unstack_inputs(input_ids_list, batch_size)
input_mask_list = reshape_and_unstack_inputs(input_mask_list, batch_size)
segment_ids_list = reshape_and_unstack_inputs(segment_ids_list, batch_size)
start_logits, end_logits = [], []
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE) as scope:
for i in range(len(input_ids_list)):
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids_list[i],
input_mask=input_mask_list[i],
token_type_ids=segment_ids_list[i],
use_one_hot_embeddings=use_one_hot_embeddings,
scope="bert")
final_hidden = model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/open_qa/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/open_qa/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(s_logits, e_logits) = (unstacked_logits[0], unstacked_logits[1])
start_logits.append(s_logits)
end_logits.append(e_logits)
start_logits = tf.concat(start_logits, axis=-1)
end_logits = tf.concat(end_logits, axis=-1)
return (start_logits, end_logits)
def average_gradients(tower_grads):
average_grads = []
tvars = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
if grad_and_vars[0][0] == None:
print(grad_and_vars[0][1], "grads: None")
grad = None
else:
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
average_grads.append(grad)
tvars.append(v)
return average_grads, tvars
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, num_gpus, is_training):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(input_data): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
optimizer, global_step, current_lr = optimization.create_optimizer(
learning_rate, num_train_steps, num_warmup_steps)
tower_grads = []
losses = []
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
for i in range(num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % ("tower", i)) as scope:
features = input_data.get_next()
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids_list = features["input_ids_list"]
input_mask_list = features["input_mask_list"]
segment_ids_list = features["segment_ids_list"]
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids_list=input_ids_list,
input_mask_list=input_mask_list,
segment_ids_list=segment_ids_list,
use_one_hot_embeddings=False)
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint:
(assignment_map, initialized_variable_names) = \
modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
seq_length = modeling.get_shape_list(input_ids_list)[1]
def compute_loss(logits, positions, weights):
a = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
b = tf.expand_dims(weights, -1)
c = tf.multiply(a, b)
d = tf.reduce_sum(c, 1) # / tf.expand_dims(tf.reduce_sum(weights, -1), -1) # TODO:
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(d * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
weights = tf.cast(features["weights"], tf.float32)
start_loss = compute_loss(start_logits, start_positions, weights)
end_loss = compute_loss(end_logits, end_positions, weights)
total_loss = (start_loss + end_loss) / 2.0
losses.append(total_loss)
# Calculate the gradients for the batch of data on this CIFAR tower.
tvars = tf.trainable_variables()
grads = tf.gradients(total_loss, tvars)
grads_and_tvars = [(g, v) for g, v in zip(grads, tvars)]
# Keep track of the gradients across all towers.
tower_grads.append(grads_and_tvars)
# average gradients
average_grads, tvars = average_gradients(tower_grads)
(grads, _) = tf.clip_by_global_norm(average_grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op, tf.reduce_mean(losses), global_step, current_lr
return model_fn
def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids_list": tf.FixedLenFeature([NUM_DOCS * seq_length], tf.int64),
"input_mask_list": tf.FixedLenFeature([NUM_DOCS * seq_length], tf.int64),
"segment_ids_list": tf.FixedLenFeature([NUM_DOCS * seq_length], tf.int64),
}
name_to_features["start_positions"] = tf.FixedLenFeature([NUM_ANSWER_SPANS], tf.int64)
name_to_features["end_positions"] = tf.FixedLenFeature([NUM_ANSWER_SPANS], tf.int64)
name_to_features["weights"] = tf.FixedLenFeature([NUM_ANSWER_SPANS], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_gpus = params["num_gpus"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
d = d.repeat()
if is_training:
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
d = d.prefetch(num_gpus)
return d
return input_fn
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training, max_seq_length):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
self.max_seq_length = max_seq_length
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids_list"] = create_int_feature(feature.input_ids_list)
features["input_mask_list"] = create_int_feature(feature.input_mask_list)
features["segment_ids_list"] = create_int_feature(feature.segment_ids_list)
if self.is_training:
start_positions = []
for i in range(len(feature.start_positions)):
for sp in feature.start_positions[i]:
start_positions.append(i * self.max_seq_length + sp)
end_positions = []
for i in range(len(feature.end_positions)):
for ep in feature.end_positions[i]:
end_positions.append(i * self.max_seq_length + ep)
weights = [1] * len(start_positions) + [0] * (NUM_ANSWER_SPANS - len(start_positions))
start_positions = start_positions + [0] * (NUM_ANSWER_SPANS - len(start_positions))
end_positions = end_positions + [0] * (NUM_ANSWER_SPANS - len(end_positions))
features["start_positions"] = create_int_feature(start_positions[:NUM_ANSWER_SPANS])
features["end_positions"] = create_int_feature(end_positions[:NUM_ANSWER_SPANS])
features["weights"] = create_int_feature(weights[:NUM_ANSWER_SPANS])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
if not FLAGS.train_file:
raise ValueError(
"`train_file` must be specified.")
if not FLAGS.eval_file:
raise ValueError(
"`eval_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
validate_flags_or_throw(bert_config)
tf.gfile.MakeDirs(FLAGS.output_dir)
tf.gfile.MakeDirs(os.path.join(FLAGS.output_dir, "best_model"))
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
train_examples = None
num_train_steps = None
num_warmup_steps = None
train_examples = read_open_qa_examples(
inputfile=FLAGS.train_file, is_training=True)
num_train_steps = int(
len(train_examples) / FLAGS.num_gpus / FLAGS.train_batch_size)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
eval_examples = read_open_qa_examples(
inputfile=FLAGS.eval_file, is_training=True)
num_eval_steps = int(
len(eval_examples) / FLAGS.num_gpus / FLAGS.train_batch_size)
# Pre-shuffle the input to avoid having to make a very large shuffle
# buffer in in the `input_fn`.
rng = random.Random(12345)
rng.shuffle(train_examples)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
num_gpus=FLAGS.num_gpus,
is_training=True)
train_filename = os.path.join(FLAGS.output_dir, "train.tf_record")
eval_filename = os.path.join(FLAGS.output_dir, "dev.tf_record")
if not os.path.exists(train_filename):
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
train_writer = FeatureWriter(
filename=train_filename,
is_training=True,
max_seq_length=FLAGS.max_seq_length)
convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature)
train_writer.close()
if not os.path.exists(eval_filename):
eval_writer = FeatureWriter(
filename=eval_filename,
is_training=True,
max_seq_length=FLAGS.max_seq_length)
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=eval_writer.process_feature)
eval_writer.close()
tf.logging.info("***** Running training *****")
tf.logging.info(" Num orig train examples = %d", len(train_examples))
tf.logging.info(" Num orig eval examples = %d", len(eval_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
del train_examples, eval_examples
train_input_fn = input_fn_builder(
input_file=train_filename,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
train_dataset = train_input_fn({"batch_size":FLAGS.train_batch_size, "num_gpus":FLAGS.num_gpus})
input_data = train_dataset.make_one_shot_iterator()
train_op, loss, global_step, lr = model_fn(input_data)
eval_input_fn = input_fn_builder(
input_file=eval_filename,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=True)
eval_dataset = eval_input_fn({"batch_size":FLAGS.train_batch_size, "num_gpus":FLAGS.num_gpus})
eval_input_data = eval_dataset.make_one_shot_iterator()
_, eval_loss, _, _ = model_fn(eval_input_data)
saver = tf.train.Saver(max_to_keep=5)
best_saver = tf.train.Saver(max_to_keep=1)
best_eval_loss = float("inf")
current_batch_losses = []
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
init = tf.global_variables_initializer()
sess.run(init)
batch_time = 0
on_step = 0
for epoch in range(FLAGS.num_train_epochs):
for i in range(num_train_steps):
t0 = time.perf_counter()
_, batch_loss, on_step, current_lr = sess.run([train_op, loss, global_step, lr])
on_step = on_step + 1
current_batch_losses.append(batch_loss)
if np.isnan(batch_loss):
raise RuntimeError("NaN loss!")
batch_time += time.perf_counter() - t0
if on_step % FLAGS.log_steps == 0:
print("on epoch=%d batch=%d step=%d time=%.3f loss=%.3f lr=%.6f" %
(epoch, i + 1, on_step, batch_time, np.mean(current_batch_losses), current_lr))
current_batch_losses = []
batch_time = 0
# occasional saving
if on_step % FLAGS.save_checkpoints_steps == 0:
print("Checkpointing:", on_step)
saver.save(sess, os.path.join(FLAGS.output_dir, "checkpoint-" + str(on_step)))
# Occasional evaluation
if on_step % FLAGS.eval_steps == 0:
print("Running evaluation...")
all_eval_losses = []
t0 = time.perf_counter()
for j in range(num_eval_steps):
batch_loss = sess.run([eval_loss])
all_eval_losses.append(batch_loss)
print("Evaluation took: %.3f seconds" % (time.perf_counter() - t0))
average_eval_loss = np.mean(all_eval_losses)
print("Eval loss: %f, Best eval loss: %f" % (average_eval_loss, best_eval_loss))
if average_eval_loss < best_eval_loss:
print("Best model ever since")
best_eval_loss = average_eval_loss
best_saver.save(sess, os.path.join(FLAGS.output_dir, "best_model", "best"))
sess.close()
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| [
"tensorflow.unstack",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.logging.set_verbosity",
"tensorflow.multiply",
"bert.tokenization.FullTokenizer",
"tensorflow.truncated_normal_initializer",
"tensorflow.get_variable_scope",
"tensorflow.gradients",
"tensorflow.gfile.MakeDirs",
"b... | [((9877, 9927), 'tensorflow.logging.info', 'tf.logging.info', (["('Num of overlong querys: %d' % c1)"], {}), "('Num of overlong querys: %d' % c1)\n", (9892, 9927), True, 'import tensorflow as tf\n'), ((9930, 9984), 'tensorflow.logging.info', 'tf.logging.info', (["('Num of overlong documents : %d' % c2)"], {}), "('Num of overlong documents : %d' % c2)\n", (9945, 9984), True, 'import tensorflow as tf\n'), ((10201, 10257), 'bert.modeling.get_shape_list', 'modeling.get_shape_list', (['input_ids_list'], {'expected_rank': '(2)'}), '(input_ids_list, expected_rank=2)\n', (10224, 10257), False, 'from bert import modeling\n'), ((12266, 12298), 'tensorflow.concat', 'tf.concat', (['start_logits'], {'axis': '(-1)'}), '(start_logits, axis=-1)\n', (12275, 12298), True, 'import tensorflow as tf\n'), ((12314, 12344), 'tensorflow.concat', 'tf.concat', (['end_logits'], {'axis': '(-1)'}), '(end_logits, axis=-1)\n', (12323, 12344), True, 'import tensorflow as tf\n'), ((17918, 17966), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[NUM_ANSWER_SPANS]', 'tf.int64'], {}), '([NUM_ANSWER_SPANS], tf.int64)\n', (17936, 17966), True, 'import tensorflow as tf\n'), ((18005, 18053), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[NUM_ANSWER_SPANS]', 'tf.int64'], {}), '([NUM_ANSWER_SPANS], tf.int64)\n', (18023, 18053), True, 'import tensorflow as tf\n'), ((18086, 18134), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[NUM_ANSWER_SPANS]', 'tf.int64'], {}), '([NUM_ANSWER_SPANS], tf.int64)\n', (18104, 18134), True, 'import tensorflow as tf\n'), ((22101, 22142), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (22125, 22142), True, 'import tensorflow as tf\n'), ((22160, 22218), 'bert.modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['FLAGS.bert_config_file'], {}), '(FLAGS.bert_config_file)\n', (22194, 22218), False, 'from bert import modeling\n'), ((22262, 22297), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (22279, 22297), True, 'import tensorflow as tf\n'), ((22379, 22474), 'bert.tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'FLAGS.vocab_file', 'do_lower_case': 'FLAGS.do_lower_case'}), '(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS\n .do_lower_case)\n', (22405, 22474), False, 'from bert import tokenization\n'), ((23109, 23129), 'random.Random', 'random.Random', (['(12345)'], {}), '(12345)\n', (23122, 23129), False, 'import random\n'), ((23464, 23513), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""train.tf_record"""'], {}), "(FLAGS.output_dir, 'train.tf_record')\n", (23476, 23513), False, 'import os\n'), ((23532, 23579), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""dev.tf_record"""'], {}), "(FLAGS.output_dir, 'dev.tf_record')\n", (23544, 23579), False, 'import os\n'), ((24611, 24658), 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running training *****"""'], {}), "('***** Running training *****')\n", (24626, 24658), True, 'import tensorflow as tf\n'), ((24805, 24865), 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.train_batch_size'], {}), "(' Batch size = %d', FLAGS.train_batch_size)\n", (24820, 24865), True, 'import tensorflow as tf\n'), ((24868, 24920), 'tensorflow.logging.info', 'tf.logging.info', (['""" Num steps = %d"""', 'num_train_steps'], {}), "(' Num steps = %d', num_train_steps)\n", (24883, 24920), True, 'import tensorflow as tf\n'), ((25703, 25732), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(5)'}), '(max_to_keep=5)\n', (25717, 25732), True, 'import tensorflow as tf\n'), ((25748, 25777), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(1)'}), '(max_to_keep=1)\n', (25762, 25777), True, 'import tensorflow as tf\n'), ((27886, 27898), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (27896, 27898), True, 'import tensorflow as tf\n'), ((10434, 10488), 'tensorflow.reshape', 'tf.reshape', (['inputs', '[batch_size, NUM_DOCS, seq_length]'], {}), '(inputs, [batch_size, NUM_DOCS, seq_length])\n', (10444, 10488), True, 'import tensorflow as tf\n'), ((10502, 10528), 'tensorflow.unstack', 'tf.unstack', (['inputs'], {'axis': '(1)'}), '(inputs, axis=1)\n', (10512, 10528), True, 'import tensorflow as tf\n'), ((13759, 13838), 'optimization.create_optimizer', 'optimization.create_optimizer', (['learning_rate', 'num_train_steps', 'num_warmup_steps'], {}), '(learning_rate, num_train_steps, num_warmup_steps)\n', (13788, 13838), False, 'import optimization\n'), ((17599, 17631), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (17617, 17631), True, 'import tensorflow as tf\n'), ((17657, 17710), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[NUM_DOCS * seq_length]', 'tf.int64'], {}), '([NUM_DOCS * seq_length], tf.int64)\n', (17675, 17710), True, 'import tensorflow as tf\n'), ((17737, 17790), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[NUM_DOCS * seq_length]', 'tf.int64'], {}), '([NUM_DOCS * seq_length], tf.int64)\n', (17755, 17790), True, 'import tensorflow as tf\n'), ((17818, 17871), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[NUM_DOCS * seq_length]', 'tf.int64'], {}), '([NUM_DOCS * seq_length], tf.int64)\n', (17836, 17871), True, 'import tensorflow as tf\n'), ((18250, 18299), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['record', 'name_to_features'], {}), '(record, name_to_features)\n', (18273, 18299), True, 'import tensorflow as tf\n'), ((18861, 18896), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_file'], {}), '(input_file)\n', (18884, 18896), True, 'import tensorflow as tf\n'), ((19490, 19527), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['filename'], {}), '(filename)\n', (19517, 19527), True, 'import tensorflow as tf\n'), ((19880, 19905), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (19903, 19905), False, 'import collections\n'), ((22318, 22362), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""best_model"""'], {}), "(FLAGS.output_dir, 'best_model')\n", (22330, 22362), False, 'import os\n'), ((23589, 23619), 'os.path.exists', 'os.path.exists', (['train_filename'], {}), '(train_filename)\n', (23603, 23619), False, 'import os\n'), ((24155, 24184), 'os.path.exists', 'os.path.exists', (['eval_filename'], {}), '(eval_filename)\n', (24169, 24184), False, 'import os\n'), ((25926, 25959), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (25957, 25959), True, 'import tensorflow as tf\n'), ((3457, 3504), 'bert.tokenization.printable_text', 'tokenization.printable_text', (['self.question_text'], {}), '(self.question_text)\n', (3484, 3504), False, 'from bert import tokenization\n'), ((5819, 5853), 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Example ***"""'], {}), "('*** Example ***')\n", (5834, 5853), True, 'import tensorflow as tf\n'), ((5860, 5904), 'tensorflow.logging.info', 'tf.logging.info', (["('unique_id: %s' % unique_id)"], {}), "('unique_id: %s' % unique_id)\n", (5875, 5904), True, 'import tensorflow as tf\n'), ((10820, 10843), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (10841, 10843), True, 'import tensorflow as tf\n'), ((10931, 11165), 'bert.modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': 'is_training', 'input_ids': 'input_ids_list[i]', 'input_mask': 'input_mask_list[i]', 'token_type_ids': 'segment_ids_list[i]', 'use_one_hot_embeddings': 'use_one_hot_embeddings', 'scope': '"""bert"""'}), "(config=bert_config, is_training=is_training, input_ids=\n input_ids_list[i], input_mask=input_mask_list[i], token_type_ids=\n segment_ids_list[i], use_one_hot_embeddings=use_one_hot_embeddings,\n scope='bert')\n", (10949, 11165), False, 'from bert import modeling\n'), ((11300, 11354), 'bert.modeling.get_shape_list', 'modeling.get_shape_list', (['final_hidden'], {'expected_rank': '(3)'}), '(final_hidden, expected_rank=3)\n', (11323, 11354), False, 'from bert import modeling\n'), ((11708, 11772), 'tensorflow.reshape', 'tf.reshape', (['final_hidden', '[batch_size * seq_length, hidden_size]'], {}), '(final_hidden, [batch_size * seq_length, hidden_size])\n', (11718, 11772), True, 'import tensorflow as tf\n'), ((11827, 11891), 'tensorflow.matmul', 'tf.matmul', (['final_hidden_matrix', 'output_weights'], {'transpose_b': '(True)'}), '(final_hidden_matrix, output_weights, transpose_b=True)\n', (11836, 11891), True, 'import tensorflow as tf\n'), ((11907, 11942), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), '(logits, output_bias)\n', (11921, 11942), True, 'import tensorflow as tf\n'), ((11959, 12006), 'tensorflow.reshape', 'tf.reshape', (['logits', '[batch_size, seq_length, 2]'], {}), '(logits, [batch_size, seq_length, 2])\n', (11969, 12006), True, 'import tensorflow as tf\n'), ((12022, 12053), 'tensorflow.transpose', 'tf.transpose', (['logits', '[2, 0, 1]'], {}), '(logits, [2, 0, 1])\n', (12034, 12053), True, 'import tensorflow as tf\n'), ((12079, 12105), 'tensorflow.unstack', 'tf.unstack', (['logits'], {'axis': '(0)'}), '(logits, axis=0)\n', (12089, 12105), True, 'import tensorflow as tf\n'), ((13058, 13089), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(0)', 'values': 'grads'}), '(axis=0, values=grads)\n', (13067, 13089), True, 'import tensorflow as tf\n'), ((13103, 13126), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['grad', '(0)'], {}), '(grad, 0)\n', (13117, 13126), True, 'import tensorflow as tf\n'), ((17054, 17106), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['average_grads'], {'clip_norm': '(1.0)'}), '(average_grads, clip_norm=1.0)\n', (17076, 17106), True, 'import tensorflow as tf\n'), ((17343, 17365), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {}), '(losses)\n', (17357, 17365), True, 'import tensorflow as tf\n'), ((5937, 5993), 'tensorflow.logging.info', 'tf.logging.info', (["('answer: %s' % example.orig_answer_text)"], {}), "('answer: %s' % example.orig_answer_text)\n", (5952, 5993), True, 'import tensorflow as tf\n'), ((6037, 6089), 'tensorflow.logging.info', 'tf.logging.info', (["('example_index: %s' % example_index)"], {}), "('example_index: %s' % example_index)\n", (6052, 6089), True, 'import tensorflow as tf\n'), ((8678, 8704), 'tensorflow.logging.info', 'tf.logging.info', (["('#%d' % i)"], {}), "('#%d' % i)\n", (8693, 8704), True, 'import tensorflow as tf\n'), ((9100, 9160), 'tensorflow.logging.info', 'tf.logging.info', (["('start_positions: %s' % start_positions[-1])"], {}), "('start_positions: %s' % start_positions[-1])\n", (9115, 9160), True, 'import tensorflow as tf\n'), ((9182, 9238), 'tensorflow.logging.info', 'tf.logging.info', (["('end_positions: %s' % end_positions[-1])"], {}), "('end_positions: %s' % end_positions[-1])\n", (9197, 9238), True, 'import tensorflow as tf\n'), ((12871, 12891), 'tensorflow.expand_dims', 'tf.expand_dims', (['g', '(0)'], {}), '(g, 0)\n', (12885, 12891), True, 'import tensorflow as tf\n'), ((13913, 13936), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (13934, 13936), True, 'import tensorflow as tf\n'), ((18516, 18530), 'tensorflow.to_int32', 'tf.to_int32', (['t'], {}), '(t)\n', (18527, 18530), True, 'import tensorflow as tf\n'), ((21167, 21202), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'features'}), '(feature=features)\n', (21184, 21202), True, 'import tensorflow as tf\n'), ((25863, 25904), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (25877, 25904), True, 'import tensorflow as tf\n'), ((26115, 26134), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (26132, 26134), False, 'import time\n'), ((26314, 26334), 'numpy.isnan', 'np.isnan', (['batch_loss'], {}), '(batch_loss)\n', (26322, 26334), True, 'import numpy as np\n'), ((11518, 11562), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (11549, 11562), True, 'import tensorflow as tf\n'), ((11655, 11677), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (11675, 11677), True, 'import tensorflow as tf\n'), ((14005, 14029), 'tensorflow.device', 'tf.device', (["('/gpu:%d' % i)"], {}), "('/gpu:%d' % i)\n", (14014, 14029), True, 'import tensorflow as tf\n'), ((26401, 26420), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (26418, 26420), False, 'import time\n'), ((27091, 27110), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (27108, 27110), False, 'import time\n'), ((27355, 27379), 'numpy.mean', 'np.mean', (['all_eval_losses'], {}), '(all_eval_losses)\n', (27362, 27379), True, 'import numpy as np\n'), ((14046, 14083), 'tensorflow.name_scope', 'tf.name_scope', (["('%s_%d' % ('tower', i))"], {}), "('%s_%d' % ('tower', i))\n", (14059, 14083), True, 'import tensorflow as tf\n'), ((14152, 14187), 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Features ***"""'], {}), "('*** Features ***')\n", (14167, 14187), True, 'import tensorflow as tf\n'), ((14900, 14924), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (14922, 14924), True, 'import tensorflow as tf\n'), ((15241, 15289), 'tensorflow.logging.info', 'tf.logging.info', (['"""**** Trainable Variables ****"""'], {}), "('**** Trainable Variables ****')\n", (15256, 15289), True, 'import tensorflow as tf\n'), ((16301, 16341), 'tensorflow.cast', 'tf.cast', (["features['weights']", 'tf.float32'], {}), "(features['weights'], tf.float32)\n", (16308, 16341), True, 'import tensorflow as tf\n'), ((16690, 16714), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (16712, 16714), True, 'import tensorflow as tf\n'), ((16735, 16766), 'tensorflow.gradients', 'tf.gradients', (['total_loss', 'tvars'], {}), '(total_loss, tvars)\n', (16747, 16766), True, 'import tensorflow as tf\n'), ((14251, 14324), 'tensorflow.logging.info', 'tf.logging.info', (["(' name = %s, shape = %s' % (name, features[name].shape))"], {}), "(' name = %s, shape = %s' % (name, features[name].shape))\n", (14266, 14324), True, 'import tensorflow as tf\n'), ((15083, 15150), 'bert.modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), '(tvars, init_checkpoint)\n', (15126, 15150), False, 'from bert import modeling\n'), ((15165, 15227), 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), '(init_checkpoint, assignment_map)\n', (15194, 15227), True, 'import tensorflow as tf\n'), ((15473, 15551), 'tensorflow.logging.info', 'tf.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), "(' name = %s, shape = %s%s', var.name, var.shape, init_string)\n", (15488, 15551), True, 'import tensorflow as tf\n'), ((15608, 15647), 'bert.modeling.get_shape_list', 'modeling.get_shape_list', (['input_ids_list'], {}), '(input_ids_list)\n', (15631, 15647), False, 'from bert import modeling\n'), ((15728, 15785), 'tensorflow.one_hot', 'tf.one_hot', (['positions'], {'depth': 'seq_length', 'dtype': 'tf.float32'}), '(positions, depth=seq_length, dtype=tf.float32)\n', (15738, 15785), True, 'import tensorflow as tf\n'), ((15823, 15850), 'tensorflow.expand_dims', 'tf.expand_dims', (['weights', '(-1)'], {}), '(weights, -1)\n', (15837, 15850), True, 'import tensorflow as tf\n'), ((15869, 15886), 'tensorflow.multiply', 'tf.multiply', (['a', 'b'], {}), '(a, b)\n', (15880, 15886), True, 'import tensorflow as tf\n'), ((15905, 15924), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['c', '(1)'], {}), '(c, 1)\n', (15918, 15924), True, 'import tensorflow as tf\n'), ((16010, 16044), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (16027, 16044), True, 'import tensorflow as tf\n'), ((27644, 27696), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""best_model"""', '"""best"""'], {}), "(FLAGS.output_dir, 'best_model', 'best')\n", (27656, 27696), False, 'import os\n'), ((8767, 8797), 'bert.tokenization.printable_text', 'tokenization.printable_text', (['x'], {}), '(x)\n', (8794, 8797), False, 'from bert import tokenization\n'), ((26601, 26630), 'numpy.mean', 'np.mean', (['current_batch_losses'], {}), '(current_batch_losses)\n', (26608, 26630), True, 'import numpy as np\n'), ((27298, 27317), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (27315, 27317), False, 'import time\n'), ((16101, 16138), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(d * log_probs)'], {'axis': '(-1)'}), '(d * log_probs, axis=-1)\n', (16114, 16138), True, 'import tensorflow as tf\n')] |
import unittest
import sys
from contextlib import contextmanager
from io import StringIO
import pandas as pd
import numpy as np
from context import DataFrameLabeler as dfl
@contextmanager
def capture_stdout():
new_out = StringIO()
old_out = sys.stdout
try:
sys.stdout = new_out
yield sys.stdout
finally:
sys.stdout = old_out
class DataFrameLabelerTestSuite(unittest.TestCase):
def test_ctor(self):
df = pd.DataFrame(np.arange(0, 10))
# capture output to not spoil test output
with capture_stdout():
# ctor needs target_col and either label_col or labels as argument
self.assertRaises(ValueError, dfl.DataFrameLabeler, df)
self.assertRaises(ValueError, dfl.DataFrameLabeler, df, target_col='target')
self.assertRaises(ValueError, dfl.DataFrameLabeler, df, labels=['1', '2', '3'])
# label column does not exists in df
self.assertRaises(ValueError, dfl.DataFrameLabeler, df, label_col='label')
labels=['1', '2', '3']
lbl = dfl.DataFrameLabeler(df, target_col='target', labels=labels)
# check that labels are set correctly
self.assertEqual(lbl.options, labels)
# check that target column is created
self.assertIn('target', lbl.data.columns)
# check that labels are extracted correctly from label_col
df['label'] = np.random.choice(labels, df.shape[0])
lbl = dfl.DataFrameLabeler(df, target_col='target', label_col='label')
self.assertEqual(sorted(lbl.options), sorted(labels))
if __name__ == '__main__':
unittest.main()
| [
"context.DataFrameLabeler.DataFrameLabeler",
"numpy.random.choice",
"unittest.main",
"io.StringIO",
"numpy.arange"
] | [((228, 238), 'io.StringIO', 'StringIO', ([], {}), '()\n', (236, 238), False, 'from io import StringIO\n'), ((1679, 1694), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1692, 1694), False, 'import unittest\n'), ((474, 490), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (483, 490), True, 'import numpy as np\n'), ((1093, 1153), 'context.DataFrameLabeler.DataFrameLabeler', 'dfl.DataFrameLabeler', (['df'], {'target_col': '"""target"""', 'labels': 'labels'}), "(df, target_col='target', labels=labels)\n", (1113, 1153), True, 'from context import DataFrameLabeler as dfl\n'), ((1459, 1496), 'numpy.random.choice', 'np.random.choice', (['labels', 'df.shape[0]'], {}), '(labels, df.shape[0])\n', (1475, 1496), True, 'import numpy as np\n'), ((1515, 1579), 'context.DataFrameLabeler.DataFrameLabeler', 'dfl.DataFrameLabeler', (['df'], {'target_col': '"""target"""', 'label_col': '"""label"""'}), "(df, target_col='target', label_col='label')\n", (1535, 1579), True, 'from context import DataFrameLabeler as dfl\n')] |
import numpy as np
from scipy.integrate import quad
# physical constants
h = 6.626 * 10**-34 # m^2 kg / s
c = 3 * 10**8 # m / s
k = 1.38 * 10**-23 # m^2 kg s^-2 K^-1
# CMB parameters
nu_max = 5.8 * 10**10 * 2.7 # Hz, from Wien's law
T_cmb = 2.715
a_f = (370)**(1/3) # from (current photon density)^3 / a_f^3 = 1,
# this is the estimate of a_f for dropping photo density to 1 photon/cm^3
maxwell_eq = lambda nu, T : 2 * h * nu**3 / c**3 /( np.exp( h * nu / k / T ) - 1)
E_over_the_time = quad(
lambda a : maxwell_eq( nu_max * a / a_f, T_cmb * a / a_f ), 1, a_f )[0]
E_null = maxwell_eq(nu_max, T_cmb)
print('''percentage of the energy at the current CMB frequency
would in fact be from photons created over this time period and
redshifted from higher frequencies is {} > 1.'''.format(E_over_the_time / E_null)) | [
"numpy.exp"
] | [((512, 534), 'numpy.exp', 'np.exp', (['(h * nu / k / T)'], {}), '(h * nu / k / T)\n', (518, 534), True, 'import numpy as np\n')] |
import pytest
import copy
import numpy as np
from numpy.testing import assert_allclose
import os
from astropy import units as u
import setigen as stg
import blimpy as bl
@pytest.fixture()
def antenna_setup():
sample_rate = 3e9
antenna = stg.voltage.Antenna(sample_rate=sample_rate,
fch1=6*u.GHz,
ascending=True,
num_pols=2)
return antenna
@pytest.fixture()
def antenna_array_setup():
sample_rate = 3e9
delays = np.array([0, 100, 200])
antenna_array = stg.voltage.MultiAntennaArray(num_antennas=3,
sample_rate=sample_rate,
fch1=6*u.GHz,
ascending=False,
num_pols=2,
delays=delays)
return antenna_array
@pytest.fixture()
def elements_setup():
num_taps = 8
num_branches = 1024
digitizer = stg.voltage.RealQuantizer(target_fwhm=32,
num_bits=8)
filterbank = stg.voltage.PolyphaseFilterbank(num_taps=num_taps,
num_branches=num_branches)
requantizer = stg.voltage.ComplexQuantizer(target_fwhm=32,
num_bits=8)
return digitizer, filterbank, requantizer
def test_noise_injection(antenna_setup):
antenna = copy.deepcopy(antenna_setup)
for stream in antenna.streams:
stream.add_noise(0, 1)
assert stream.noise_std == 1
samples = antenna.get_samples(10000)
for pol_samples in samples[0]:
assert abs(np.mean(pol_samples)) < 0.1
assert abs(np.std(pol_samples) - 1) < 0.1
def test_noise_injection_array(antenna_array_setup):
antenna_array = copy.deepcopy(antenna_array_setup)
for stream in antenna_array.bg_streams:
stream.add_noise(0, 1)
for antenna in antenna_array.antennas:
for stream in antenna.streams:
assert stream.bg_noise_std == 1
stream.add_noise(0, 1)
assert stream.noise_std == 1
assert stream.get_total_noise_std() == pytest.approx(2**0.5)
samples = antenna_array.get_samples(10000)
for antenna_samples in samples:
for pol_samples in antenna_samples:
assert abs(np.mean(pol_samples)) < 0.1
assert abs(np.std(pol_samples) - 2**0.5) < 0.1
def test_raw_creation(antenna_setup,
elements_setup):
antenna = copy.deepcopy(antenna_setup)
digitizer, filterbank, requantizer = copy.deepcopy(elements_setup)
num_taps = 8
num_branches = 1024
num_chans = 64
num_pols = 2
block_size = num_taps * num_chans * 2 * num_pols
rvb = stg.voltage.RawVoltageBackend(antenna,
digitizer=digitizer,
filterbank=filterbank,
requantizer=requantizer,
start_chan=0,
num_chans=num_chans,
block_size=block_size,
blocks_per_file=128,
num_subblocks=32)
antenna.x.add_noise(v_mean=0,
v_std=1)
antenna.y.add_noise(v_mean=0,
v_std=1)
antenna.x.add_constant_signal(f_start=6002.2e6,
drift_rate=-2*u.Hz/u.s,
level=0.002)
antenna.y.add_constant_signal(f_start=6002.2e6,
drift_rate=-2*u.Hz/u.s,
level=0.002,
phase=np.pi/2)
rvb.record(raw_file_stem='example_1block',
num_blocks=1,
length_mode='num_blocks',
header_dict={'HELLO': 'test_value',
'TELESCOP': 'GBT'},
verbose=False)
# Check out header
header_dict = {}
with open('example_1block.0000.raw', "rb") as f:
i = 1
chunk = f.read(80)
while True:
key, item = chunk.decode().split('=')
header_dict[key.strip()] = item.strip().strip("''")
chunk = f.read(80)
if f"{'END':<80}".encode() in chunk:
break
i += 1
assert header_dict['CHAN_BW'] == '2.9296875'
assert header_dict['OBSBW'] == '187.5'
assert header_dict['OBSFREQ'] == '6092.28515625'
assert header_dict['TBIN'] == '3.41333333333333E-07'
assert header_dict['BLOCSIZE'] == '2048'
assert header_dict['HELLO'] == 'test_value'
# Reduce data
wf_data = stg.voltage.get_waterfall_from_raw('example_1block.0000.raw',
block_size=block_size,
num_chans=num_chans,
int_factor=1,
fftlength=1)
assert wf_data.shape == (8, 64)
os.remove('example_1block.0000.raw') | [
"setigen.voltage.PolyphaseFilterbank",
"numpy.mean",
"pytest.approx",
"setigen.voltage.RealQuantizer",
"setigen.voltage.get_waterfall_from_raw",
"setigen.voltage.RawVoltageBackend",
"numpy.std",
"setigen.voltage.Antenna",
"setigen.voltage.ComplexQuantizer",
"numpy.array",
"copy.deepcopy",
"pyt... | [((174, 190), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (188, 190), False, 'import pytest\n'), ((460, 476), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (474, 476), False, 'import pytest\n'), ((991, 1007), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1005, 1007), False, 'import pytest\n'), ((248, 340), 'setigen.voltage.Antenna', 'stg.voltage.Antenna', ([], {'sample_rate': 'sample_rate', 'fch1': '(6 * u.GHz)', 'ascending': '(True)', 'num_pols': '(2)'}), '(sample_rate=sample_rate, fch1=6 * u.GHz, ascending=True,\n num_pols=2)\n', (267, 340), True, 'import setigen as stg\n'), ((540, 563), 'numpy.array', 'np.array', (['[0, 100, 200]'], {}), '([0, 100, 200])\n', (548, 563), True, 'import numpy as np\n'), ((584, 719), 'setigen.voltage.MultiAntennaArray', 'stg.voltage.MultiAntennaArray', ([], {'num_antennas': '(3)', 'sample_rate': 'sample_rate', 'fch1': '(6 * u.GHz)', 'ascending': '(False)', 'num_pols': '(2)', 'delays': 'delays'}), '(num_antennas=3, sample_rate=sample_rate, fch1\n =6 * u.GHz, ascending=False, num_pols=2, delays=delays)\n', (613, 719), True, 'import setigen as stg\n'), ((1092, 1145), 'setigen.voltage.RealQuantizer', 'stg.voltage.RealQuantizer', ([], {'target_fwhm': '(32)', 'num_bits': '(8)'}), '(target_fwhm=32, num_bits=8)\n', (1117, 1145), True, 'import setigen as stg\n'), ((1206, 1283), 'setigen.voltage.PolyphaseFilterbank', 'stg.voltage.PolyphaseFilterbank', ([], {'num_taps': 'num_taps', 'num_branches': 'num_branches'}), '(num_taps=num_taps, num_branches=num_branches)\n', (1237, 1283), True, 'import setigen as stg\n'), ((1353, 1409), 'setigen.voltage.ComplexQuantizer', 'stg.voltage.ComplexQuantizer', ([], {'target_fwhm': '(32)', 'num_bits': '(8)'}), '(target_fwhm=32, num_bits=8)\n', (1381, 1409), True, 'import setigen as stg\n'), ((1565, 1593), 'copy.deepcopy', 'copy.deepcopy', (['antenna_setup'], {}), '(antenna_setup)\n', (1578, 1593), False, 'import copy\n'), ((1966, 2000), 'copy.deepcopy', 'copy.deepcopy', (['antenna_array_setup'], {}), '(antenna_array_setup)\n', (1979, 2000), False, 'import copy\n'), ((2698, 2726), 'copy.deepcopy', 'copy.deepcopy', (['antenna_setup'], {}), '(antenna_setup)\n', (2711, 2726), False, 'import copy\n'), ((2768, 2797), 'copy.deepcopy', 'copy.deepcopy', (['elements_setup'], {}), '(elements_setup)\n', (2781, 2797), False, 'import copy\n'), ((2943, 3156), 'setigen.voltage.RawVoltageBackend', 'stg.voltage.RawVoltageBackend', (['antenna'], {'digitizer': 'digitizer', 'filterbank': 'filterbank', 'requantizer': 'requantizer', 'start_chan': '(0)', 'num_chans': 'num_chans', 'block_size': 'block_size', 'blocks_per_file': '(128)', 'num_subblocks': '(32)'}), '(antenna, digitizer=digitizer, filterbank=\n filterbank, requantizer=requantizer, start_chan=0, num_chans=num_chans,\n block_size=block_size, blocks_per_file=128, num_subblocks=32)\n', (2972, 3156), True, 'import setigen as stg\n'), ((4959, 5096), 'setigen.voltage.get_waterfall_from_raw', 'stg.voltage.get_waterfall_from_raw', (['"""example_1block.0000.raw"""'], {'block_size': 'block_size', 'num_chans': 'num_chans', 'int_factor': '(1)', 'fftlength': '(1)'}), "('example_1block.0000.raw', block_size=\n block_size, num_chans=num_chans, int_factor=1, fftlength=1)\n", (4993, 5096), True, 'import setigen as stg\n'), ((5334, 5370), 'os.remove', 'os.remove', (['"""example_1block.0000.raw"""'], {}), "('example_1block.0000.raw')\n", (5343, 5370), False, 'import os\n'), ((1797, 1817), 'numpy.mean', 'np.mean', (['pol_samples'], {}), '(pol_samples)\n', (1804, 1817), True, 'import numpy as np\n'), ((2329, 2352), 'pytest.approx', 'pytest.approx', (['(2 ** 0.5)'], {}), '(2 ** 0.5)\n', (2342, 2352), False, 'import pytest\n'), ((1844, 1863), 'numpy.std', 'np.std', (['pol_samples'], {}), '(pol_samples)\n', (1850, 1863), True, 'import numpy as np\n'), ((2514, 2534), 'numpy.mean', 'np.mean', (['pol_samples'], {}), '(pol_samples)\n', (2521, 2534), True, 'import numpy as np\n'), ((2565, 2584), 'numpy.std', 'np.std', (['pol_samples'], {}), '(pol_samples)\n', (2571, 2584), True, 'import numpy as np\n')] |
# implement k-nearest neighbours classification algorithm
# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# calculate distance between two points
def dist(a, b):
diff = a - b
diff = diff * diff
d = np.sqrt(sum(diff))
return d
# determining the class of testing point
def classify(train, target, a, k):
distance = []
# calculating distance from all the points in the training dataset
for i, t in enumerate(train):
d = dist(t, a)
distance.append((i, d))
# sorting the distances maintaining the order
distance = sorted(distance, key=lambda x: x[1])
# separating k nearest neighbors
knn = distance[:k]
# maximum distance in k-nearest neighbors
max_dist = knn[-1][1]
neighbors = []
for i in knn:
neighbors.append(target.iloc[i[0]])
# determining class
u, c = np.unique(neighbors, return_counts=True)
unique, count = zip(*sorted(zip(u, c), key=lambda x:x[1], reverse=True))
return unique[0], max_dist
# read data from the input file
df = pd.read_csv("classify.csv")
# separating the features and target
features = np.array(df[["a", "b"]])
target = df[["t"]]
# testing data
a = np.array([1, 3])
b = np.array([2, 2])
c = np.array([4, 4])
# determination of the classes of these testing data
pred_a, radius_a = classify(features, target, a, 3)
print(a, "is associated with class ", pred_a)
pred_b, radius_b = classify(features, target, b, 7)
print(b, "is associated with class ", pred_b)
pred_c, radius_c = classify(features, target, c, 5)
print(c, "is associated with class ", pred_c)
# colors and markers for the plot
colors = ["r", "b"]
markers = ["^", "s"]
# plotting the data points
fig = plt.figure(figsize=(6, 6))
for i, k in enumerate(features):
v = int(target.iloc[i])
plt.scatter(k[0], k[1], c=colors[v], marker=markers[v])
plt.scatter(a[0], a[1], c="g")
plt.scatter(b[0], b[1], c="g")
plt.scatter(c[0], c[1], c="g")
# plotting the circles around the testing data points
circle1 = plt.Circle((a[0], a[1]), radius_a, fill=False)
plt.gca().add_artist(circle1)
circle2 = plt.Circle((b[0], b[1]), radius_b, fill=False)
plt.gca().add_artist(circle2)
circle3 = plt.Circle((c[0], c[1]), radius_c, fill=False)
plt.gca().add_artist(circle3)
plt.xlim((-0.5, 5.5))
plt.ylim((-0.5, 5.5))
plt.xlabel("x-axis")
plt.ylabel("y-axis")
plt.show()
| [
"matplotlib.pyplot.Circle",
"numpy.unique",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show"
] | [((1118, 1145), 'pandas.read_csv', 'pd.read_csv', (['"""classify.csv"""'], {}), "('classify.csv')\n", (1129, 1145), True, 'import pandas as pd\n'), ((1196, 1220), 'numpy.array', 'np.array', (["df[['a', 'b']]"], {}), "(df[['a', 'b']])\n", (1204, 1220), True, 'import numpy as np\n'), ((1262, 1278), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (1270, 1278), True, 'import numpy as np\n'), ((1284, 1300), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (1292, 1300), True, 'import numpy as np\n'), ((1306, 1322), 'numpy.array', 'np.array', (['[4, 4]'], {}), '([4, 4])\n', (1314, 1322), True, 'import numpy as np\n'), ((1790, 1816), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (1800, 1816), True, 'import matplotlib.pyplot as plt\n'), ((1942, 1972), 'matplotlib.pyplot.scatter', 'plt.scatter', (['a[0]', 'a[1]'], {'c': '"""g"""'}), "(a[0], a[1], c='g')\n", (1953, 1972), True, 'import matplotlib.pyplot as plt\n'), ((1974, 2004), 'matplotlib.pyplot.scatter', 'plt.scatter', (['b[0]', 'b[1]'], {'c': '"""g"""'}), "(b[0], b[1], c='g')\n", (1985, 2004), True, 'import matplotlib.pyplot as plt\n'), ((2006, 2036), 'matplotlib.pyplot.scatter', 'plt.scatter', (['c[0]', 'c[1]'], {'c': '"""g"""'}), "(c[0], c[1], c='g')\n", (2017, 2036), True, 'import matplotlib.pyplot as plt\n'), ((2103, 2149), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(a[0], a[1])', 'radius_a'], {'fill': '(False)'}), '((a[0], a[1]), radius_a, fill=False)\n', (2113, 2149), True, 'import matplotlib.pyplot as plt\n'), ((2192, 2238), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(b[0], b[1])', 'radius_b'], {'fill': '(False)'}), '((b[0], b[1]), radius_b, fill=False)\n', (2202, 2238), True, 'import matplotlib.pyplot as plt\n'), ((2281, 2327), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(c[0], c[1])', 'radius_c'], {'fill': '(False)'}), '((c[0], c[1]), radius_c, fill=False)\n', (2291, 2327), True, 'import matplotlib.pyplot as plt\n'), ((2360, 2381), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.5, 5.5)'], {}), '((-0.5, 5.5))\n', (2368, 2381), True, 'import matplotlib.pyplot as plt\n'), ((2383, 2404), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.5, 5.5)'], {}), '((-0.5, 5.5))\n', (2391, 2404), True, 'import matplotlib.pyplot as plt\n'), ((2406, 2426), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-axis"""'], {}), "('x-axis')\n", (2416, 2426), True, 'import matplotlib.pyplot as plt\n'), ((2428, 2448), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-axis"""'], {}), "('y-axis')\n", (2438, 2448), True, 'import matplotlib.pyplot as plt\n'), ((2450, 2460), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2458, 2460), True, 'import matplotlib.pyplot as plt\n'), ((924, 964), 'numpy.unique', 'np.unique', (['neighbors'], {'return_counts': '(True)'}), '(neighbors, return_counts=True)\n', (933, 964), True, 'import numpy as np\n'), ((1885, 1940), 'matplotlib.pyplot.scatter', 'plt.scatter', (['k[0]', 'k[1]'], {'c': 'colors[v]', 'marker': 'markers[v]'}), '(k[0], k[1], c=colors[v], marker=markers[v])\n', (1896, 1940), True, 'import matplotlib.pyplot as plt\n'), ((2151, 2160), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2158, 2160), True, 'import matplotlib.pyplot as plt\n'), ((2240, 2249), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2247, 2249), True, 'import matplotlib.pyplot as plt\n'), ((2329, 2338), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2336, 2338), True, 'import matplotlib.pyplot as plt\n')] |
from typing import List, Dict
from Data import Data
from Point import Point
from Logging import Logging
from KnnClassifier import KnnClassifier, KernelType, CoordinateSystem
from Stat import Stat, StatSvm
from Metrics import Metrics
import numpy as np
import copy
import random
from SvmClassifier import *
class DataAnalyzer:
@staticmethod
def analyze_svm(data:List[Point]):
# x = np.array([[item.x, item.y] for item in data])
# y = np.array([-1 if item.label == 0 else 1 for item in data])
# x_train = x[:100]
# y_train = y[:100]
# x_test = x[100:]
# y_test = y[100:]
# svm = SvmClassifier(kernel=polynomial_kernel)
# svm.fit(x_train,y_train)
# res = svm.predict(x_test)
stats = []
kernels = [polynomial_kernel, linear_kernel, gaussian_kernel]
numberOfFolds = 10
dataCopy = copy.deepcopy(data)
trainData, testData = DataAnalyzer.make_cross_validation(dataCopy, numberOfFolds)
for numFolds in range(4, numberOfFolds):
for kernel in kernels:
f_scores = []
p_values = []
t_test = ""
for i in range(len(trainData)):
x_train = np.array([[item.x, item.y] for item in trainData[i]])
y_train = np.array([-1 if item.label == 0 else 1 for item in trainData[i]])
x_test = np.array([[item.x, item.y] for item in testData[i]])
y_test = np.array([-1 if item.label == 0 else 1 for item in testData[i]])
classifier = SvmClassifier(kernel=kernel)
classifier.fit(x_train,y_train)
predictions = classifier.predict(x_test)
predictions = list(predictions)
predictions = np.array([0 if item == -1 else 1 for item in predictions])
y_test = np.array([0 if item == -1 else 1 for item in y_test])
f_score = Metrics.f_score(y_test, predictions)
f_scores.append(f_score)
p_value = Metrics.p_value(y_test, predictions, 2)
p_values.append(p_value[1])
t_test = Metrics.t_test(y_test, predictions)
avFscore = DataAnalyzer.calculateAverage(f_scores)
avPvalue = DataAnalyzer.calculateAverage(p_values)
stat = StatSvm(numFolds, kernel, avFscore, avPvalue, t_test)
#print(stat)
stats.append(stat)
print("SVM ---------------------------------------------------------")
print("")
print("Max f_score general")
print(max(stats, key=lambda x: x.f_score))
print("Max p value general")
print(max(stats, key=lambda x: x.p_value))
print("Max t Wilcoxon general")
print(max(stats, key=lambda x: x.p_value))
print("")
print("SVM ---------------------------------------------------------")
@staticmethod
def analyze_svm_one(data:List[Point], kernel = polynomial_kernel, numFolds:int = 4):
trainData, testData = DataAnalyzer.make_cross_validation(data, numFolds)
stats = []
globalReal= []
globalPredict = []
f_scores = []
p_values = []
t_test = ""
for i in range(len(trainData)):
x_train = np.array([[item.x, item.y] for item in trainData[i]])
y_train = np.array([-1 if item.label == 0 else 1 for item in trainData[i]])
x_test = np.array([[item.x, item.y] for item in testData[i]])
y_test = np.array([-1 if item.label == 0 else 1 for item in testData[i]])
classifier = SvmClassifier(kernel=kernel)
classifier.fit(x_train,y_train)
predictions = classifier.predict(x_test)
predictions = list(predictions)
predictions = [0 if item == -1 else 1 for item in predictions]
y_test = [0 if item == -1 else 1 for item in y_test]
f_score = Metrics.f_score(y_test, predictions)
f_scores.append(f_score)
t_test = Metrics.t_test(y_test, predictions)
for item in y_test:
globalReal.append(item)
for item in predictions:
globalPredict.append(item)
avFscore = DataAnalyzer.calculateAverage(f_scores)
stat = StatSvm(numFolds, kernel, avFscore, 0, t_test)
#print(stat)
stats.append(stat)
print("SVM ---------------------------------------------------------")
print("")
print("Max f_score general")
print(max(stats, key=lambda x: x.f_score))
print("Max p value general")
print(max(stats, key=lambda x: x.p_value))
print("Max t Wilcoxon general")
print(max(stats, key=lambda x: x.p_value))
print("")
Metrics.plot_confusion_matrix(globalReal, globalPredict)
print("SVM ---------------------------------------------------------")
return f_scores, globalPredict
@staticmethod
def analyze_knn_one(data:List[Point], kNeighbors:int = 6, kernel:KernelType = KernelType.E, numFolds:int = 4, coordinateSystem:CoordinateSystem = CoordinateSystem.Cartesian):
trainData, testData = DataAnalyzer.make_cross_validation(data, numFolds)
stats = []
globalReal= []
globalPredict = []
f_scores = []
p_values = []
t_test = ""
for i in range(len(trainData)):
classifier = KnnClassifier()
classifier.train(trainData[i], [0,1],kNeighbors,2,kernel,coordinateSystem)
test_item = testData[i]
predictions = classifier.predict(test_item)
real_data = [item.label for item in test_item]
f_score = Metrics.f_score(real_data, predictions)
f_scores.append(f_score)
p_value = Metrics.p_value(real_data, predictions, 2)
p_values.append(p_value[1])
t_test = Metrics.t_test(real_data, predictions)
for item in real_data:
globalReal.append(item)
for item in predictions:
globalPredict.append(item)
avFscore = DataAnalyzer.calculateAverage(f_scores)
stat = Stat(numFolds, kNeighbors, 2, kernel, coordinateSystem, avFscore, 0, t_test)
#print(stat)
stats.append(stat)
#print(np.array([str(i) for i in stats]).T)
print("KNN ---------------------------------------------------------")
print("")
print("Max f_score general")
print(max(stats, key=lambda x: x.f_score))
print("Max p value general")
print(max(stats, key=lambda x: x.p_value))
print("Max t Wilcoxon general")
print(max(stats, key=lambda x: x.p_value))
print("")
Metrics.plot_confusion_matrix(globalReal, globalPredict)
print("KNN ---------------------------------------------------------")
return f_scores, globalPredict
@staticmethod
def make_cross_validation(points:List[Point], folds_count: int):
fold_length = round(len(points) / folds_count)
train_data = list()
test_data = list()
for i in range(folds_count):
n = (folds_count-i) * fold_length
train_fold = points[: n - fold_length] + points[n:]
test_fold = points[n - fold_length : n]
train_data.append(train_fold)
test_data.append(test_fold)
return (train_data, test_data)
@staticmethod
def analyze_knn(data:List[Point]):
N= np.sqrt(len(data)) + 1
numberOfLabels = 2
numberOfNeighbors = 10
numberOfFolds = 10
mumberOfPowers = 3
kernels = KernelType.as_list()
coordinateSystems = CoordinateSystem.as_list()
stats = []
power = 2
dataCopy = copy.deepcopy(data)
trainData, testData = DataAnalyzer.make_cross_validation(dataCopy, numberOfFolds)
for numFolds in range(4, numberOfFolds):
for numNeighbor in range(3, numberOfNeighbors):
#for power in range(2, mumberOfPowers):
for coordinateSystem in coordinateSystems:
for kernel in kernels:
f_scores = []
p_values = []
t_test = ""
for i in range(len(trainData)):
classifier = KnnClassifier()
classifier.train(trainData[i], [0,1],numNeighbor,power,kernel,coordinateSystem)
test_item = testData[i]
predictions = classifier.predict(test_item)
f_score = Metrics.f_score([item.label for item in test_item], predictions)
f_scores.append(f_score)
p_value = Metrics.p_value([item.label for item in test_item], predictions, 2)
p_values.append(p_value[1])
t_test = Metrics.t_test([item.label for item in test_item], predictions)
avFscore = DataAnalyzer.calculateAverage(f_scores)
stat = Stat(numFolds, numNeighbor, power, kernel, coordinateSystem, avFscore, 0, t_test)
#print(stat)
stats.append(stat)
#print(np.array([str(i) for i in stats]).T)
print("KNN ---------------------------------------------------------")
print("")
print("Max f_score general")
print(max(stats, key=lambda x: x.f_score))
print("Max p value general")
print(max(stats, key=lambda x: x.p_value))
print("Max t Wilcoxon general")
print(max(stats, key=lambda x: x.p_value))
print("")
print("KNN ---------------------------------------------------------")
@staticmethod
def calculateAverage(paramsArray):
s = 0
for item in paramsArray:
s += item
return s / len(paramsArray)
| [
"copy.deepcopy",
"KnnClassifier.KnnClassifier",
"KnnClassifier.CoordinateSystem.as_list",
"Metrics.Metrics.f_score",
"Metrics.Metrics.t_test",
"Metrics.Metrics.p_value",
"KnnClassifier.KernelType.as_list",
"Stat.StatSvm",
"numpy.array",
"Stat.Stat",
"Metrics.Metrics.plot_confusion_matrix"
] | [((912, 931), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (925, 931), False, 'import copy\n'), ((4449, 4495), 'Stat.StatSvm', 'StatSvm', (['numFolds', 'kernel', 'avFscore', '(0)', 't_test'], {}), '(numFolds, kernel, avFscore, 0, t_test)\n', (4456, 4495), False, 'from Stat import Stat, StatSvm\n'), ((4937, 4993), 'Metrics.Metrics.plot_confusion_matrix', 'Metrics.plot_confusion_matrix', (['globalReal', 'globalPredict'], {}), '(globalReal, globalPredict)\n', (4966, 4993), False, 'from Metrics import Metrics\n'), ((6340, 6416), 'Stat.Stat', 'Stat', (['numFolds', 'kNeighbors', '(2)', 'kernel', 'coordinateSystem', 'avFscore', '(0)', 't_test'], {}), '(numFolds, kNeighbors, 2, kernel, coordinateSystem, avFscore, 0, t_test)\n', (6344, 6416), False, 'from Stat import Stat, StatSvm\n'), ((6938, 6994), 'Metrics.Metrics.plot_confusion_matrix', 'Metrics.plot_confusion_matrix', (['globalReal', 'globalPredict'], {}), '(globalReal, globalPredict)\n', (6967, 6994), False, 'from Metrics import Metrics\n'), ((7866, 7886), 'KnnClassifier.KernelType.as_list', 'KernelType.as_list', ([], {}), '()\n', (7884, 7886), False, 'from KnnClassifier import KnnClassifier, KernelType, CoordinateSystem\n'), ((7915, 7941), 'KnnClassifier.CoordinateSystem.as_list', 'CoordinateSystem.as_list', ([], {}), '()\n', (7939, 7941), False, 'from KnnClassifier import KnnClassifier, KernelType, CoordinateSystem\n'), ((8002, 8021), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (8015, 8021), False, 'import copy\n'), ((3429, 3482), 'numpy.array', 'np.array', (['[[item.x, item.y] for item in trainData[i]]'], {}), '([[item.x, item.y] for item in trainData[i]])\n', (3437, 3482), True, 'import numpy as np\n'), ((3505, 3572), 'numpy.array', 'np.array', (['[(-1 if item.label == 0 else 1) for item in trainData[i]]'], {}), '([(-1 if item.label == 0 else 1) for item in trainData[i]])\n', (3513, 3572), True, 'import numpy as np\n'), ((3593, 3645), 'numpy.array', 'np.array', (['[[item.x, item.y] for item in testData[i]]'], {}), '([[item.x, item.y] for item in testData[i]])\n', (3601, 3645), True, 'import numpy as np\n'), ((3667, 3733), 'numpy.array', 'np.array', (['[(-1 if item.label == 0 else 1) for item in testData[i]]'], {}), '([(-1 if item.label == 0 else 1) for item in testData[i]])\n', (3675, 3733), True, 'import numpy as np\n'), ((4092, 4128), 'Metrics.Metrics.f_score', 'Metrics.f_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (4107, 4128), False, 'from Metrics import Metrics\n'), ((4187, 4222), 'Metrics.Metrics.t_test', 'Metrics.t_test', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (4201, 4222), False, 'from Metrics import Metrics\n'), ((5591, 5606), 'KnnClassifier.KnnClassifier', 'KnnClassifier', ([], {}), '()\n', (5604, 5606), False, 'from KnnClassifier import KnnClassifier, KernelType, CoordinateSystem\n'), ((5869, 5908), 'Metrics.Metrics.f_score', 'Metrics.f_score', (['real_data', 'predictions'], {}), '(real_data, predictions)\n', (5884, 5908), False, 'from Metrics import Metrics\n'), ((5968, 6010), 'Metrics.Metrics.p_value', 'Metrics.p_value', (['real_data', 'predictions', '(2)'], {}), '(real_data, predictions, 2)\n', (5983, 6010), False, 'from Metrics import Metrics\n'), ((6072, 6110), 'Metrics.Metrics.t_test', 'Metrics.t_test', (['real_data', 'predictions'], {}), '(real_data, predictions)\n', (6086, 6110), False, 'from Metrics import Metrics\n'), ((2460, 2513), 'Stat.StatSvm', 'StatSvm', (['numFolds', 'kernel', 'avFscore', 'avPvalue', 't_test'], {}), '(numFolds, kernel, avFscore, avPvalue, t_test)\n', (2467, 2513), False, 'from Stat import Stat, StatSvm\n'), ((1275, 1328), 'numpy.array', 'np.array', (['[[item.x, item.y] for item in trainData[i]]'], {}), '([[item.x, item.y] for item in trainData[i]])\n', (1283, 1328), True, 'import numpy as np\n'), ((1359, 1426), 'numpy.array', 'np.array', (['[(-1 if item.label == 0 else 1) for item in trainData[i]]'], {}), '([(-1 if item.label == 0 else 1) for item in trainData[i]])\n', (1367, 1426), True, 'import numpy as np\n'), ((1455, 1507), 'numpy.array', 'np.array', (['[[item.x, item.y] for item in testData[i]]'], {}), '([[item.x, item.y] for item in testData[i]])\n', (1463, 1507), True, 'import numpy as np\n'), ((1537, 1603), 'numpy.array', 'np.array', (['[(-1 if item.label == 0 else 1) for item in testData[i]]'], {}), '([(-1 if item.label == 0 else 1) for item in testData[i]])\n', (1545, 1603), True, 'import numpy as np\n'), ((1865, 1925), 'numpy.array', 'np.array', (['[(0 if item == -1 else 1) for item in predictions]'], {}), '([(0 if item == -1 else 1) for item in predictions])\n', (1873, 1925), True, 'import numpy as np\n'), ((1953, 2008), 'numpy.array', 'np.array', (['[(0 if item == -1 else 1) for item in y_test]'], {}), '([(0 if item == -1 else 1) for item in y_test])\n', (1961, 2008), True, 'import numpy as np\n'), ((2038, 2074), 'Metrics.Metrics.f_score', 'Metrics.f_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (2053, 2074), False, 'from Metrics import Metrics\n'), ((2150, 2189), 'Metrics.Metrics.p_value', 'Metrics.p_value', (['y_test', 'predictions', '(2)'], {}), '(y_test, predictions, 2)\n', (2165, 2189), False, 'from Metrics import Metrics\n'), ((2267, 2302), 'Metrics.Metrics.t_test', 'Metrics.t_test', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (2281, 2302), False, 'from Metrics import Metrics\n'), ((9363, 9448), 'Stat.Stat', 'Stat', (['numFolds', 'numNeighbor', 'power', 'kernel', 'coordinateSystem', 'avFscore', '(0)', 't_test'], {}), '(numFolds, numNeighbor, power, kernel, coordinateSystem, avFscore, 0,\n t_test)\n', (9367, 9448), False, 'from Stat import Stat, StatSvm\n'), ((8590, 8605), 'KnnClassifier.KnnClassifier', 'KnnClassifier', ([], {}), '()\n', (8603, 8605), False, 'from KnnClassifier import KnnClassifier, KernelType, CoordinateSystem\n'), ((8876, 8940), 'Metrics.Metrics.f_score', 'Metrics.f_score', (['[item.label for item in test_item]', 'predictions'], {}), '([item.label for item in test_item], predictions)\n', (8891, 8940), False, 'from Metrics import Metrics\n'), ((9032, 9099), 'Metrics.Metrics.p_value', 'Metrics.p_value', (['[item.label for item in test_item]', 'predictions', '(2)'], {}), '([item.label for item in test_item], predictions, 2)\n', (9047, 9099), False, 'from Metrics import Metrics\n'), ((9193, 9256), 'Metrics.Metrics.t_test', 'Metrics.t_test', (['[item.label for item in test_item]', 'predictions'], {}), '([item.label for item in test_item], predictions)\n', (9207, 9256), False, 'from Metrics import Metrics\n')] |
import torch
import numpy as np
from skimage import io
from .fit import forward_pass
from .notebook_utils import draw_pcd_bg, show_nb
from utils.common import tti, to_sigm, itt, dict2device
def infer_pid(dataloader, outfit_codes_dict, image_paths_dict,
draping_network, converter, ndesc_stack, renderer,
pid, device='cuda:0'):
'''
Infer the draping network and renderer (rasterizer) to predict the clothing point cloud with visible points.
'''
# > predict
outfit_code = torch.from_numpy(outfit_codes_dict[pid]).to(device)
print(f'Current style: pid={pid}, shape={outfit_code.shape}')
for i, (data_dict, target_dict) in enumerate(dataloader):
data_dict = dict2device(data_dict, device)
data_dict['zrotMatrix_c3d'] = None
source_pcd = data_dict['source_pcd'][0]
out_dict = forward_pass(data_dict, draping_network, converter, ndesc_stack, renderer,
device=device, outfit_code=outfit_code)
# > visualize
K = data_dict['K'].squeeze(0).to(device).float()
source_pcd = source_pcd @ K.T
source_pcd[:, :2] /= source_pcd[:, 2:]
cloth_pcd = out_dict['cloth_pcd'][0]
cloth_pcd = cloth_pcd[out_dict['visibilty_mask'][0]]
cloth_pcd = cloth_pcd @ K.T
cloth_pcd[:, :2] /= cloth_pcd[:, 2:]
cloth_mask = tti(to_sigm(target_dict['real_segm']))
cloth_mask = np.tile(cloth_mask[:,:,None], (1,1,3))
smpl_img = draw_pcd_bg(cloth_mask, source_pcd[:,:2])
cloth_img = draw_pcd_bg(cloth_mask, cloth_pcd[:,:2])
rgb_img = io.imread(image_paths_dict[pid])
show_nb([rgb_img, smpl_img, cloth_img],
title=f'Outfit point cloud fitted to a single image',
titles=['rgb', 'source pcd (cutted smpl)', 'outfit pcd (visible points only)'], n_cols=3)
| [
"numpy.tile",
"torch.from_numpy",
"skimage.io.imread",
"utils.common.to_sigm",
"utils.common.dict2device"
] | [((1420, 1462), 'numpy.tile', 'np.tile', (['cloth_mask[:, :, None]', '(1, 1, 3)'], {}), '(cloth_mask[:, :, None], (1, 1, 3))\n', (1427, 1462), True, 'import numpy as np\n'), ((1597, 1629), 'skimage.io.imread', 'io.imread', (['image_paths_dict[pid]'], {}), '(image_paths_dict[pid])\n', (1606, 1629), False, 'from skimage import io\n'), ((733, 763), 'utils.common.dict2device', 'dict2device', (['data_dict', 'device'], {}), '(data_dict, device)\n', (744, 763), False, 'from utils.common import tti, to_sigm, itt, dict2device\n'), ((1368, 1401), 'utils.common.to_sigm', 'to_sigm', (["target_dict['real_segm']"], {}), "(target_dict['real_segm'])\n", (1375, 1401), False, 'from utils.common import tti, to_sigm, itt, dict2device\n'), ((528, 568), 'torch.from_numpy', 'torch.from_numpy', (['outfit_codes_dict[pid]'], {}), '(outfit_codes_dict[pid])\n', (544, 568), False, 'import torch\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 30/11/18
@author: XXX
"""
import numpy as np
from RecSysFramework.DataManager import Dataset
from .DatasetPostprocessing import DatasetPostprocessing
class ImplicitURM(DatasetPostprocessing):
"""
This class transforms the URM from explicit (or whatever data content it had) to implicit
"""
def __init__(self, min_rating_threshold=0):
super(ImplicitURM, self).__init__()
self.min_rating_threshold = min_rating_threshold
def get_name(self):
return "implicit_{}".format(self.min_rating_threshold)
def apply(self, dataset):
new_URM_dict = {}
for URM_name in dataset.get_URM_names():
new_URM_dict[URM_name] = dataset.get_URM(URM_name)
mask = np.ones(new_URM_dict[URM_name].data.size, dtype=np.bool)
mask[new_URM_dict[URM_name].data >= self.min_rating_threshold] = False
new_URM_dict[URM_name].data[mask] = 0.0
new_URM_dict[URM_name].eliminate_zeros()
new_URM_dict[URM_name].data[:] = 1.0
return Dataset(dataset.get_name(), base_folder=dataset.get_base_folder(),
postprocessings=dataset.get_postprocessings() + [self],
URM_dict=new_URM_dict, URM_mappers_dict=dataset.get_URM_mappers_dict(),
ICM_dict=dataset.get_ICM_dict(), ICM_mappers_dict=dataset.get_ICM_mappers_dict(),
UCM_dict=dataset.get_UCM_dict(), UCM_mappers_dict=dataset.get_UCM_mappers_dict())
| [
"numpy.ones"
] | [((800, 856), 'numpy.ones', 'np.ones', (['new_URM_dict[URM_name].data.size'], {'dtype': 'np.bool'}), '(new_URM_dict[URM_name].data.size, dtype=np.bool)\n', (807, 856), True, 'import numpy as np\n')] |
## THIS IS A MODIFIED EXAMPLE FROM THE CIRQ WEBSITE ##
##
import cirq
import numpy as np
import matplotlib.pylab as plt
##
"""Plot the probability of measuring a qubit in the ground state."""
# Get a qubit.
a = cirq.NamedQubit('a')
# Get a circuit of a bunch of X rotations.
num_angles = 200
# Number of times to sample.
repetitions = 100
simulator = cirq.Simulator()
##
for rot in ['Rx','Ry','Rz']:
if rot=='Rx':
circuit = cirq.Circuit([cirq.Rx(rads=np.pi / 50.0)(a) for theta in range(num_angles)])
mark = 'xr'
if rot=='Ry':
circuit = cirq.Circuit([cirq.Ry(rads=np.pi / 50.0)(a) for theta in range(num_angles)])
mark = 'oy'
if rot=='Rz':
circuit = cirq.Circuit([cirq.Rz(rads=np.pi / 50.0)(a) for theta in range(num_angles)])
mark= 'bs'
# List to store the probability of the ground state.
sampled_probs = []
for i, step in enumerate(simulator.simulate_moment_steps(circuit)):
samples = step.sample([a], repetitions=repetitions)
prob = np.sum(samples, axis=0)[0] / repetitions
sampled_probs.append(prob)
# Plot the probability of the ground state at each simulation step.
plt.style.use('seaborn-whitegrid')
plt.plot(sampled_probs, mark)
##
plt.xlabel("Step")
plt.ylabel("Probability of ground state");
plt.legend(['Rx','Ry','Rz'])
plt.show()
| [
"cirq.Ry",
"cirq.Rx",
"cirq.NamedQubit",
"matplotlib.pylab.legend",
"matplotlib.pylab.xlabel",
"cirq.Simulator",
"numpy.sum",
"cirq.Rz",
"matplotlib.pylab.show",
"matplotlib.pylab.style.use",
"matplotlib.pylab.plot",
"matplotlib.pylab.ylabel"
] | [((223, 243), 'cirq.NamedQubit', 'cirq.NamedQubit', (['"""a"""'], {}), "('a')\n", (238, 243), False, 'import cirq\n'), ((372, 388), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (386, 388), False, 'import cirq\n'), ((1304, 1322), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Step"""'], {}), "('Step')\n", (1314, 1322), True, 'import matplotlib.pylab as plt\n'), ((1324, 1365), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Probability of ground state"""'], {}), "('Probability of ground state')\n", (1334, 1365), True, 'import matplotlib.pylab as plt\n'), ((1368, 1398), 'matplotlib.pylab.legend', 'plt.legend', (["['Rx', 'Ry', 'Rz']"], {}), "(['Rx', 'Ry', 'Rz'])\n", (1378, 1398), True, 'import matplotlib.pylab as plt\n'), ((1398, 1408), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (1406, 1408), True, 'import matplotlib.pylab as plt\n'), ((1225, 1259), 'matplotlib.pylab.style.use', 'plt.style.use', (['"""seaborn-whitegrid"""'], {}), "('seaborn-whitegrid')\n", (1238, 1259), True, 'import matplotlib.pylab as plt\n'), ((1265, 1294), 'matplotlib.pylab.plot', 'plt.plot', (['sampled_probs', 'mark'], {}), '(sampled_probs, mark)\n', (1273, 1294), True, 'import matplotlib.pylab as plt\n'), ((1066, 1089), 'numpy.sum', 'np.sum', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (1072, 1089), True, 'import numpy as np\n'), ((475, 501), 'cirq.Rx', 'cirq.Rx', ([], {'rads': '(np.pi / 50.0)'}), '(rads=np.pi / 50.0)\n', (482, 501), False, 'import cirq\n'), ((611, 637), 'cirq.Ry', 'cirq.Ry', ([], {'rads': '(np.pi / 50.0)'}), '(rads=np.pi / 50.0)\n', (618, 637), False, 'import cirq\n'), ((747, 773), 'cirq.Rz', 'cirq.Rz', ([], {'rads': '(np.pi / 50.0)'}), '(rads=np.pi / 50.0)\n', (754, 773), False, 'import cirq\n')] |
"""
Licensed under the Unlicense License;
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://unlicense.org
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import cv2
# Sigmoid activation function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Define training array
training_inputs = []
training_outputs = [1, 7, 3, 2, 5, 4, 8, 6, 9, 0]
for i in range(0, 10):
train_binary = cv2.bitwise_not(cv2.imread('train_values/' + str(i) + '.bmp', cv2.IMREAD_UNCHANGED)).flatten()
train_binary[train_binary == 255] = 1
training_inputs.append(train_binary)
training_inputs = np.array(training_inputs)
training_outputs = np.array([training_outputs])
training_outputs = training_outputs / 9
training_outputs = training_outputs.T
print('Training inputs: ')
print(training_inputs) # 0 - 1
print()
print('Training outputs: ')
print(training_outputs) # 0 - 1
print()
# Define random weights
np.random.seed(1)
synaptic_weights = 2 * np.random.random((10, 1)) - 1
print('Synaptic weights: ')
print(synaptic_weights)
print()
exit()
# Predict value
def think(inputs):
global synaptic_weights
inputs = inputs.astype(float)
"""
print('Inputs: ')
print(inputs[0])
print('Weights: ')
print(synaptic_weights)
print('Weights sum : ')
print(synaptic_weights[0] + synaptic_weights[1] + synaptic_weights[2] + synaptic_weights[3] + synaptic_weights[4]
+ synaptic_weights[5] + synaptic_weights[6] + synaptic_weights[7] + synaptic_weights[8] + synaptic_weights[9])
print('NP.DOT: ')
print(np.dot(inputs, synaptic_weights))
print('Activation: ')
print(sigmoid(np.dot(inputs, synaptic_weights)))
#exit(0)
"""
out_value = sigmoid(np.dot(inputs, synaptic_weights))
return out_value
# Training function
for i in range(3000):
output = think(training_inputs)
error = training_outputs - output
# Backpropagation
adjustments = np.dot(training_inputs.T, error * (output * (1 - output)))
synaptic_weights += adjustments
if i == 2000:
print('Output: ')
print(output)
print('Error: ')
print(error)
print('Adjustments : ')
print(adjustments)
print('Synaptic_weights: ')
print(synaptic_weights)
if i % 10 == 0:
print('Iteration ' + str(i) + ' Predicted values: ' + str((np.around(think(training_inputs).flatten() * 9))
.astype(int)))
print()
print('-------------')
print('Predicted values: ')
print((np.around(think(training_inputs).flatten() * 9)).astype(int))
print()
print('Synaptic weights: ')
print(synaptic_weights)
print('-------------')
| [
"numpy.random.random",
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.random.seed"
] | [((971, 996), 'numpy.array', 'np.array', (['training_inputs'], {}), '(training_inputs)\n', (979, 996), True, 'import numpy as np\n'), ((1017, 1045), 'numpy.array', 'np.array', (['[training_outputs]'], {}), '([training_outputs])\n', (1025, 1045), True, 'import numpy as np\n'), ((1298, 1315), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1312, 1315), True, 'import numpy as np\n'), ((2344, 2402), 'numpy.dot', 'np.dot', (['training_inputs.T', '(error * (output * (1 - output)))'], {}), '(training_inputs.T, error * (output * (1 - output)))\n', (2350, 2402), True, 'import numpy as np\n'), ((1340, 1365), 'numpy.random.random', 'np.random.random', (['(10, 1)'], {}), '((10, 1))\n', (1356, 1365), True, 'import numpy as np\n'), ((2122, 2154), 'numpy.dot', 'np.dot', (['inputs', 'synaptic_weights'], {}), '(inputs, synaptic_weights)\n', (2128, 2154), True, 'import numpy as np\n'), ((614, 624), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (620, 624), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# ##########################################################################
# info: This script fetches the screen-data from Karl Deutsch Echograph 1090
#
# date: 2017-03-23
# version: 0.2.1
# author: pluschris
#
# history: V0.2: cleanup code
# V0.1: First version
#
# ##########################################################################
# Import solution :-)
import serial
import numpy
import csv
import time
import os
import sys
import matplotlib.pyplot as plt
# ##########################################################################
# config:
SERIAL_PORT = "/dev/ttyUSB0"
FILE_NAME = "file_name"
############################################################################
def read_device(dev):
data=b''
read_data = True
while read_data:
data+=dev.read()
if dev.inWaiting() == 0:
time.sleep(0.1)
if dev.inWaiting() == 0:
read_data = False
return data
############################################################################
#Start Measurement:
print('Opening Device...')
dev = serial.Serial(SERIAL_PORT,57600)
# send commands to init measurement:
command_read_amplitude=b'\x02AG\x03'
dev.write(command_read_amplitude)
############################################################################
# read data, select the right bytes for screen data:
data=read_device(dev)[6576:-24]
############################################################################
# convert bits to something readable:
i=0
x=1
x_list=[]
y_list=[]
while i<len(data):
#maximum value is 0xE0=224 when Amplitude is 0, min value is 0 when Amplitude is 100%:
#First Byte is difference to second byte, second byte is fix value:
#Devices sends HEX as ASCII
diff = int(data[i:i+2], 16)
fix=224-int(data[i+2:i+4], 16)
diff=fix-diff
#let's scale to 10 divisions
fix=float(fix)/224*10
diff=float(diff)/224*10
x_list.append(x)
x_list.append(x)
if i>0:
#calc in which order we have to add the points to get a smooth drawing:
if y_list[-1] <= fix and y_list[-1] <= diff:
y_list.append(diff) # small value first: diff is always smaller than fix
y_list.append(fix)
else:
y_list.append(fix) #large value first
y_list.append(diff)
else:
y_list.append(diff)
y_list.append(fix)
x+=1
i+=4
#we just need the grid-area of the screen: 10 divisions is x=350 so let's cut off:
if x>350:
break
#scale x-axis to 10 divisions:
x_list=[float(i)/350*10 for i in x_list]
############################################################################
#plot/print:
print(y_list)
plt.figure(figsize=(10,5))
plt.gcf().patch.set_facecolor('white')
plt.plot(x_list, y_list, ls='-', marker=None,color='darkcyan')
plt.xlim(0,10)
plt.ylim(0,10)
plt.xticks(numpy.arange(0,11,1))
plt.gcf().axes[0].yaxis.grid(True, which='major')
plt.gcf().axes[0].xaxis.grid(True, which='major')
plt.gcf().axes[0].set_xticklabels([])
plt.gcf().axes[0].set_yticklabels([])
plt.gcf().axes[0].xaxis.set_ticks_position('none')
plt.gcf().axes[0].yaxis.set_ticks_position('none')
plt.tight_layout()# adjust figure to fit to labels
############################################################################
#save:
if os.path.isfile(FILE_NAME + ".svg") or os.path.isfile(FILE_NAME + ".csv"):
if "n"==input("Do you want to replace the output-file(s)?[y/n]"):
sys.exit()
plt.savefig(FILE_NAME + ".svg")
with open(FILE_NAME+".csv", "w") as file:
writer = csv.writer(file)
i=0
while i<len(x_list):
writer.writerow([x_list[i],y_list[i]])
i += 1
plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot",
"csv.writer",
"time.sleep",
"os.path.isfile",
"matplotlib.pyplot.figure",
"serial.Serial",
"matplotlib.pyplot.tight_layout",
"sys.exit",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.arange",
"matp... | [((1102, 1135), 'serial.Serial', 'serial.Serial', (['SERIAL_PORT', '(57600)'], {}), '(SERIAL_PORT, 57600)\n', (1115, 1135), False, 'import serial\n'), ((2748, 2775), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (2758, 2775), True, 'import matplotlib.pyplot as plt\n'), ((2814, 2877), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {'ls': '"""-"""', 'marker': 'None', 'color': '"""darkcyan"""'}), "(x_list, y_list, ls='-', marker=None, color='darkcyan')\n", (2822, 2877), True, 'import matplotlib.pyplot as plt\n'), ((2879, 2894), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(10)'], {}), '(0, 10)\n', (2887, 2894), True, 'import matplotlib.pyplot as plt\n'), ((2894, 2909), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(10)'], {}), '(0, 10)\n', (2902, 2909), True, 'import matplotlib.pyplot as plt\n'), ((3222, 3240), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3238, 3240), True, 'import matplotlib.pyplot as plt\n'), ((3525, 3556), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(FILE_NAME + '.svg')"], {}), "(FILE_NAME + '.svg')\n", (3536, 3556), True, 'import matplotlib.pyplot as plt\n'), ((3734, 3744), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3742, 3744), True, 'import matplotlib.pyplot as plt\n'), ((2920, 2942), 'numpy.arange', 'numpy.arange', (['(0)', '(11)', '(1)'], {}), '(0, 11, 1)\n', (2932, 2942), False, 'import numpy\n'), ((3362, 3396), 'os.path.isfile', 'os.path.isfile', (["(FILE_NAME + '.svg')"], {}), "(FILE_NAME + '.svg')\n", (3376, 3396), False, 'import os\n'), ((3400, 3434), 'os.path.isfile', 'os.path.isfile', (["(FILE_NAME + '.csv')"], {}), "(FILE_NAME + '.csv')\n", (3414, 3434), False, 'import os\n'), ((3613, 3629), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (3623, 3629), False, 'import csv\n'), ((3514, 3524), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3522, 3524), False, 'import sys\n'), ((868, 883), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (878, 883), False, 'import time\n'), ((2775, 2784), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2782, 2784), True, 'import matplotlib.pyplot as plt\n'), ((3044, 3053), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3051, 3053), True, 'import matplotlib.pyplot as plt\n'), ((3082, 3091), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3089, 3091), True, 'import matplotlib.pyplot as plt\n'), ((2942, 2951), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2949, 2951), True, 'import matplotlib.pyplot as plt\n'), ((2993, 3002), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3000, 3002), True, 'import matplotlib.pyplot as plt\n'), ((3120, 3129), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3127, 3129), True, 'import matplotlib.pyplot as plt\n'), ((3171, 3180), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3178, 3180), True, 'import matplotlib.pyplot as plt\n')] |
import os
import math
import pandas as pd
import sys
import numpy as np
src=sys.argv[1]
dest=sys.argv[2]
for subdir, dirs, files in os.walk(src):
for file in files:
df = pd.DataFrame()
try:
df = pd.read_table(src+file, sep='\t', names=['id', 'position', 'depth'])
except:
print(src+file)
continue
if df.empty:
continue
log_scale = []
for i in df['depth']:
if i != 0:
log_scale.append(math.log(i))
else:
log_scale.append(0)
df['log(depth)'] = log_scale
depth = np.mean(df['depth'])
per_cov = (len(df[df['depth']>=1])/len(df['depth'])) * 100
print('|'+file+' | '+str(per_cov)+'|' +str(depth)+ '|')
# plt.clf()
# df['log(depth)'].plot()
# plt.xlabel('Position', fontsize=16)
# plt.ylabel('ln(depth)', fontsize=16)
# plt.savefig(dest+file.replace(".tsv", ".png"))
| [
"numpy.mean",
"math.log",
"pandas.read_table",
"pandas.DataFrame",
"os.walk"
] | [((134, 146), 'os.walk', 'os.walk', (['src'], {}), '(src)\n', (141, 146), False, 'import os\n'), ((184, 198), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (196, 198), True, 'import pandas as pd\n'), ((634, 654), 'numpy.mean', 'np.mean', (["df['depth']"], {}), "(df['depth'])\n", (641, 654), True, 'import numpy as np\n'), ((229, 299), 'pandas.read_table', 'pd.read_table', (['(src + file)'], {'sep': '"""\t"""', 'names': "['id', 'position', 'depth']"}), "(src + file, sep='\\t', names=['id', 'position', 'depth'])\n", (242, 299), True, 'import pandas as pd\n'), ((514, 525), 'math.log', 'math.log', (['i'], {}), '(i)\n', (522, 525), False, 'import math\n')] |
from __future__ import division
from sklearn.metrics import confusion_matrix
import numpy as np
import sys
def ReadDataset():
# # FOR TESTING WITH REAL DATA.
import pandas as pd
# # Load CSV and columns
df = pd.read_csv("iris.csv")
data = df[list(df.columns.values)]
ind = np.arange(len(df))
np.random.shuffle(ind)
y = df['species']
X = df[['sepal_length', 'sepal_width', 'petal_length', 'petal_width']]
for c,ik in enumerate(y.unique()):
y[y==ik] = c
X = X.get_values()[ind]
y = y.get_values()[ind]
# # Split the data into training/testing sets
X_train = X[:-75]
X_test = X[-75:]
# # Split the targets into training/testing sets
y_train = y[:-75]
y_test = y[-75:]
np.savetxt("y_train.csv", y_train, delimiter=",") # write output to file
np.savetxt("X_train.csv", X_train, delimiter=",") # write output to file
np.savetxt("X_test.csv", X_test, delimiter=",") # write output to file
np.savetxt("y_test.csv", y_test, delimiter=",") # write output to file
def CreateDataset():
from sklearn.datasets import make_moons, make_circles, make_classification
X, y = make_classification(n_samples=8000, n_classes=10, n_features=20, n_redundant=0, n_informative=8,
random_state=0, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 0.1 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
ind = np.arange(len(y))
np.random.shuffle(ind)
X = X[ind]
y = y[ind]
# # Split the data into training/testing sets
X_train = X[:-1000]
X_test = X[-1000:]
# # Split the targets into training/testing sets
y_train = y[:-1000]
y_test = y[-1000:]
np.savetxt("y_train.csv", y_train, delimiter=",") # write output to file
np.savetxt("X_train.csv", X_train, delimiter=",") # write output to file
np.savetxt("X_test.csv", X_test, delimiter=",") # write output to file
np.savetxt("y_test.csv", y_test, delimiter=",") # write output to file
############# CODE FOR CLASSIFICATION AFTER THIS POINT #############
# The code before this point is not necessary to run this file.
# X_train = np.genfromtxt(sys.argv[1], delimiter=",")
# y_train = np.genfromtxt(sys.argv[2])
# X_test = np.genfromtxt(sys.argv[3], delimiter=",")
X_train = np.genfromtxt("X_train.csv", delimiter=",")
y_train = np.genfromtxt("y_train.csv", delimiter=",")
X_test = np.genfromtxt("X_test.csv", delimiter=",")
y_test = np.genfromtxt("y_test.csv", delimiter=",")
## can make more functions if required
def Countclasses(y_train): # just a counter per class
Prior = []
total = len(y_train)
K_classes = np.unique(y_train)
for i in K_classes:
Prior.append(np.uint8(y_train==i).sum()/total)
return Prior
def Probability(x, u, D): # Gaussian Distribution for MLE
exponential_term = np.exp(-0.5 * (np.matmul((x-u) , np.linalg.pinv(D)) * (x-u)).sum(-1) )
return ( exponential_term / np.sqrt(np.linalg.det(D)) ).squeeze()
def ClassConditionalDensity(X_train, y_train): #
K_classes = np.unique(y_train)
mean_y = []
cov_y = []
for i in K_classes:
mask = y_train==i
mean_y.append( X_train[mask].sum(0)/len(X_train[mask]) )
cov_y.append( np.matmul( (X_train[mask]-mean_y[-1]).T , (X_train[mask]-mean_y[-1]) )/len(X_train[mask] ) )
return mean_y, cov_y
## can make more functions if required
def pluginClassifier(X_train, y_train, X_test):
# this function returns the required output
Prior = Countclasses(y_train) # Prior Distribution
mean_y, cov_y = ClassConditionalDensity(X_train, y_train) # u and Cov parameters
Likelihood = np.zeros([X_test.shape[0], len(Prior)])
for k in range(len(Prior)):
Likelihood[:,k] = Prior[k] * Probability(X_test, mean_y[k], cov_y[k]) # computing the Likelihood for Bayes Classifier
Prob = Likelihood/Likelihood.sum(1)[:,None]
return Prob
final_outputs = pluginClassifier(X_train, y_train, X_test) # assuming final_outputs is returned from function
y_ = final_outputs.argmax(1)
m = confusion_matrix(y_test,y_)
print('Bayes Classifier')
print(m)
np.savetxt("probs_test.csv", final_outputs, delimiter=",") # write output to file
| [
"numpy.uint8",
"sklearn.metrics.confusion_matrix",
"pandas.read_csv",
"numpy.unique",
"numpy.linalg.pinv",
"numpy.linalg.det",
"sklearn.datasets.make_classification",
"numpy.matmul",
"numpy.savetxt",
"numpy.genfromtxt",
"numpy.random.RandomState",
"numpy.random.shuffle"
] | [((2317, 2360), 'numpy.genfromtxt', 'np.genfromtxt', (['"""X_train.csv"""'], {'delimiter': '""","""'}), "('X_train.csv', delimiter=',')\n", (2330, 2360), True, 'import numpy as np\n'), ((2371, 2414), 'numpy.genfromtxt', 'np.genfromtxt', (['"""y_train.csv"""'], {'delimiter': '""","""'}), "('y_train.csv', delimiter=',')\n", (2384, 2414), True, 'import numpy as np\n'), ((2424, 2466), 'numpy.genfromtxt', 'np.genfromtxt', (['"""X_test.csv"""'], {'delimiter': '""","""'}), "('X_test.csv', delimiter=',')\n", (2437, 2466), True, 'import numpy as np\n'), ((2476, 2518), 'numpy.genfromtxt', 'np.genfromtxt', (['"""y_test.csv"""'], {'delimiter': '""","""'}), "('y_test.csv', delimiter=',')\n", (2489, 2518), True, 'import numpy as np\n'), ((4100, 4128), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_'], {}), '(y_test, y_)\n', (4116, 4128), False, 'from sklearn.metrics import confusion_matrix\n'), ((4164, 4222), 'numpy.savetxt', 'np.savetxt', (['"""probs_test.csv"""', 'final_outputs'], {'delimiter': '""","""'}), "('probs_test.csv', final_outputs, delimiter=',')\n", (4174, 4222), True, 'import numpy as np\n'), ((225, 248), 'pandas.read_csv', 'pd.read_csv', (['"""iris.csv"""'], {}), "('iris.csv')\n", (236, 248), True, 'import pandas as pd\n'), ((321, 343), 'numpy.random.shuffle', 'np.random.shuffle', (['ind'], {}), '(ind)\n', (338, 343), True, 'import numpy as np\n'), ((750, 799), 'numpy.savetxt', 'np.savetxt', (['"""y_train.csv"""', 'y_train'], {'delimiter': '""","""'}), "('y_train.csv', y_train, delimiter=',')\n", (760, 799), True, 'import numpy as np\n'), ((827, 876), 'numpy.savetxt', 'np.savetxt', (['"""X_train.csv"""', 'X_train'], {'delimiter': '""","""'}), "('X_train.csv', X_train, delimiter=',')\n", (837, 876), True, 'import numpy as np\n'), ((904, 951), 'numpy.savetxt', 'np.savetxt', (['"""X_test.csv"""', 'X_test'], {'delimiter': '""","""'}), "('X_test.csv', X_test, delimiter=',')\n", (914, 951), True, 'import numpy as np\n'), ((979, 1026), 'numpy.savetxt', 'np.savetxt', (['"""y_test.csv"""', 'y_test'], {'delimiter': '""","""'}), "('y_test.csv', y_test, delimiter=',')\n", (989, 1026), True, 'import numpy as np\n'), ((1162, 1302), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(8000)', 'n_classes': '(10)', 'n_features': '(20)', 'n_redundant': '(0)', 'n_informative': '(8)', 'random_state': '(0)', 'n_clusters_per_class': '(1)'}), '(n_samples=8000, n_classes=10, n_features=20,\n n_redundant=0, n_informative=8, random_state=0, n_clusters_per_class=1)\n', (1181, 1302), False, 'from sklearn.datasets import make_moons, make_circles, make_classification\n'), ((1340, 1364), 'numpy.random.RandomState', 'np.random.RandomState', (['(2)'], {}), '(2)\n', (1361, 1364), True, 'import numpy as np\n'), ((1470, 1492), 'numpy.random.shuffle', 'np.random.shuffle', (['ind'], {}), '(ind)\n', (1487, 1492), True, 'import numpy as np\n'), ((1724, 1773), 'numpy.savetxt', 'np.savetxt', (['"""y_train.csv"""', 'y_train'], {'delimiter': '""","""'}), "('y_train.csv', y_train, delimiter=',')\n", (1734, 1773), True, 'import numpy as np\n'), ((1801, 1850), 'numpy.savetxt', 'np.savetxt', (['"""X_train.csv"""', 'X_train'], {'delimiter': '""","""'}), "('X_train.csv', X_train, delimiter=',')\n", (1811, 1850), True, 'import numpy as np\n'), ((1878, 1925), 'numpy.savetxt', 'np.savetxt', (['"""X_test.csv"""', 'X_test'], {'delimiter': '""","""'}), "('X_test.csv', X_test, delimiter=',')\n", (1888, 1925), True, 'import numpy as np\n'), ((1953, 2000), 'numpy.savetxt', 'np.savetxt', (['"""y_test.csv"""', 'y_test'], {'delimiter': '""","""'}), "('y_test.csv', y_test, delimiter=',')\n", (1963, 2000), True, 'import numpy as np\n'), ((2669, 2687), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (2678, 2687), True, 'import numpy as np\n'), ((3082, 3100), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (3091, 3100), True, 'import numpy as np\n'), ((3274, 3343), 'numpy.matmul', 'np.matmul', (['(X_train[mask] - mean_y[-1]).T', '(X_train[mask] - mean_y[-1])'], {}), '((X_train[mask] - mean_y[-1]).T, X_train[mask] - mean_y[-1])\n', (3283, 3343), True, 'import numpy as np\n'), ((2984, 3000), 'numpy.linalg.det', 'np.linalg.det', (['D'], {}), '(D)\n', (2997, 3000), True, 'import numpy as np\n'), ((2733, 2755), 'numpy.uint8', 'np.uint8', (['(y_train == i)'], {}), '(y_train == i)\n', (2741, 2755), True, 'import numpy as np\n'), ((2902, 2919), 'numpy.linalg.pinv', 'np.linalg.pinv', (['D'], {}), '(D)\n', (2916, 2919), True, 'import numpy as np\n')] |
from .arc_nd_interpolator import ArcNDInterpolator
import numpy as np
class Gaze3DInterpolator:
def __init__(self, camera_pose, gaze_points, num_cache=21):
"""
Camera_pos is (a iterable of numpy array with shape (3,)) or (2D numpy array of shape (N, 3)).
Gaze_points can be either (a single numpy array with shape (3,)), or (a iterable of numpy
array with shape (3,)) or (2D numpy array of shape (N, 3)). When the input is a single numpy
array with shape (3,), the camera should always be looking at the same point.
"""
self.camera_pose = camera_pose
# For testing
n = len(camera_pose)
self.gaze_points = gaze_points
self.camera_spline = ArcNDInterpolator(camera_pose, num_cache=num_cache)
self.gaze_spline = None
if len(self.gaze_points.shape) != 1:
self.gaze_spline = ArcNDInterpolator(gaze_points, num_cache=num_cache)
def length(self):
"""
Return the total arc_length of camera position spline
"""
return self.camera_spline.length()
def at(self, arc_length):
"""
Returns a numpy array with shape (4, 4), representing a homogeneous transform matrix. Evaluates
camera position at arc length s, and evaluate gaze point at the same ratio of s/total_length
as camera position.
"""
camera_pose = self.camera_spline.at(arc_length)
if len(self.gaze_points.shape) == 1:
gaze_point = self.gaze_points
else:
gaze_spline_length = self.gaze_spline.length()
gaze_arc_length = arc_length / self.length() * gaze_spline_length
gaze_point = self.gaze_spline.at(gaze_arc_length)
return self._get_transform_matrix(camera_pose, gaze_point)
def generate(self, s_list):
"""
Batch version of self.at()
"""
return [self.at(s) for s in s_list]
def _get_transform_matrix(self, camera_pose, gaze_point):
gaze_point_local = gaze_point - camera_pose
z_local = gaze_point_local / np.linalg.norm(gaze_point_local)
z_global = np.array([0, 0, 1])
x_local = np.cross(z_local, z_global)
x_local /= np.linalg.norm(x_local)
y_local = np.cross(z_local, x_local)
trans = np.eye(4)
trans[:3, 0] = x_local
trans[:3, 1] = y_local
trans[:3, 2] = z_local
trans[:3, 3] = camera_pose
return trans | [
"numpy.array",
"numpy.eye",
"numpy.cross",
"numpy.linalg.norm"
] | [((2149, 2168), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2157, 2168), True, 'import numpy as np\n'), ((2187, 2214), 'numpy.cross', 'np.cross', (['z_local', 'z_global'], {}), '(z_local, z_global)\n', (2195, 2214), True, 'import numpy as np\n'), ((2234, 2257), 'numpy.linalg.norm', 'np.linalg.norm', (['x_local'], {}), '(x_local)\n', (2248, 2257), True, 'import numpy as np\n'), ((2277, 2303), 'numpy.cross', 'np.cross', (['z_local', 'x_local'], {}), '(z_local, x_local)\n', (2285, 2303), True, 'import numpy as np\n'), ((2321, 2330), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2327, 2330), True, 'import numpy as np\n'), ((2097, 2129), 'numpy.linalg.norm', 'np.linalg.norm', (['gaze_point_local'], {}), '(gaze_point_local)\n', (2111, 2129), True, 'import numpy as np\n')] |
from openalea.deploy.shared_data import shared_data
import rsml
from rsml import misc
from rsml import measurements
import numpy as np
from matplotlib import pyplot as plt
from glob import glob
import os
# load rsml
rsml_dir = shared_data(rsml.__path__)#,'AR570/2012_11_25_09h00_chl110_1_1.rsml')
rsml_files = sorted(glob(rsml_dir/"AR570/*.rsml"))
def plot(x,y, label):
l = plt.plot(x,y, '+')[0]
# fit and plot linear regression
# add constraint on (0,0)
x = np.hstack(([0],x))
y = np.hstack(([0],y))
w = np.ones_like(x)
w[0]=2*x.size
# fitting
c = np.polyfit(x,y,2, w=w)
# plot
X = np.linspace(0,x.max(),5)
def poly(x,c):
return sum([ci*(X**i) for i,ci in enumerate(c)])
plt.plot(X,poly(X,c[::-1]),l.get_color(), label=label, lw=2)
ax.set_xlabel("Distance from ramification to primary tip")
ax.set_ylabel("Lateral root length")
def plot_from_file(rsml_file, ax, split_plant, label=""):
g = rsml.rsml2mtg(rsml_file)
# extract properties & measurments for all roots
root = measurements.root_order(g) # ids of lateral roots
root = [r for r,o in root.iteritems() if o==2]
length = measurements.root_length(g,root) # length of roots
ppos = measurements.parent_position(g,roots=root,distance2tip=True) # branching position on parent
plant = dict((r,g.complex(r)) for r in root) # plant id of all roots
if split_plant:
# plot root length w.r.t parent_position, for each plant
for i,pid in enumerate(sorted(set(plant.values()))):
rids = [r for r,p in plant.iteritems() if p==pid]
x = np.array([ppos[r] for r in rids])
y = np.array([length[r] for r in rids])
plot(x,y,"plant %d"%(i+1))
else:
x = np.array([ppos[r] for r in root])
y = np.array([length[r] for r in root])
plot(x,y, label)
plt.ion()
plt.clf()
# plot last time step, for each plant
#ax = plt.subplot(1,1,1)
#filename = os.path.split(rsml_files[-1])[-1]
#plot_from_file(rsml_files[-1], ax, split_plant=True)
#ax.set_title("Individual plants at day 12")
# plot for each time step, no plant distinction
ax = plt.subplot(1,1,1)#, sharex=ax, sharey=ax)
days = [6,7,8,10,11,12]
for i,rsml_file in enumerate(rsml_files):
plot_from_file(rsml_file, ax, split_plant=False, label="day %2d"%days[i])
ax.set_title("All plants from day 6 to 12")
| [
"rsml.measurements.parent_position",
"numpy.ones_like",
"openalea.deploy.shared_data.shared_data",
"rsml.measurements.root_order",
"numpy.hstack",
"numpy.polyfit",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.ion",
"rsml.measurements.root_length",
"matpl... | [((230, 256), 'openalea.deploy.shared_data.shared_data', 'shared_data', (['rsml.__path__'], {}), '(rsml.__path__)\n', (241, 256), False, 'from openalea.deploy.shared_data import shared_data\n'), ((1965, 1974), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1972, 1974), True, 'from matplotlib import pyplot as plt\n'), ((1975, 1984), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1982, 1984), True, 'from matplotlib import pyplot as plt\n'), ((2298, 2318), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (2309, 2318), True, 'from matplotlib import pyplot as plt\n'), ((320, 351), 'glob.glob', 'glob', (["(rsml_dir / 'AR570/*.rsml')"], {}), "(rsml_dir / 'AR570/*.rsml')\n", (324, 351), False, 'from glob import glob\n'), ((490, 509), 'numpy.hstack', 'np.hstack', (['([0], x)'], {}), '(([0], x))\n', (499, 509), True, 'import numpy as np\n'), ((517, 536), 'numpy.hstack', 'np.hstack', (['([0], y)'], {}), '(([0], y))\n', (526, 536), True, 'import numpy as np\n'), ((544, 559), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (556, 559), True, 'import numpy as np\n'), ((606, 630), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(2)'], {'w': 'w'}), '(x, y, 2, w=w)\n', (616, 630), True, 'import numpy as np\n'), ((1004, 1028), 'rsml.rsml2mtg', 'rsml.rsml2mtg', (['rsml_file'], {}), '(rsml_file)\n', (1017, 1028), False, 'import rsml\n'), ((1094, 1120), 'rsml.measurements.root_order', 'measurements.root_order', (['g'], {}), '(g)\n', (1117, 1120), False, 'from rsml import measurements\n'), ((1222, 1255), 'rsml.measurements.root_length', 'measurements.root_length', (['g', 'root'], {}), '(g, root)\n', (1246, 1255), False, 'from rsml import measurements\n'), ((1290, 1352), 'rsml.measurements.parent_position', 'measurements.parent_position', (['g'], {'roots': 'root', 'distance2tip': '(True)'}), '(g, roots=root, distance2tip=True)\n', (1318, 1352), False, 'from rsml import measurements\n'), ((382, 401), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""+"""'], {}), "(x, y, '+')\n", (390, 401), True, 'from matplotlib import pyplot as plt\n'), ((1854, 1887), 'numpy.array', 'np.array', (['[ppos[r] for r in root]'], {}), '([ppos[r] for r in root])\n', (1862, 1887), True, 'import numpy as np\n'), ((1902, 1937), 'numpy.array', 'np.array', (['[length[r] for r in root]'], {}), '([length[r] for r in root])\n', (1910, 1937), True, 'import numpy as np\n'), ((1704, 1737), 'numpy.array', 'np.array', (['[ppos[r] for r in rids]'], {}), '([ppos[r] for r in rids])\n', (1712, 1737), True, 'import numpy as np\n'), ((1756, 1791), 'numpy.array', 'np.array', (['[length[r] for r in rids]'], {}), '([length[r] for r in rids])\n', (1764, 1791), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn import datasets, naive_bayes
import math
import scipy.stats as stats
np.random.seed(1345)
# Load the wine dataset (description here http://scikit-learn.org/stable/datasets/index.html#diabetes-dataset)
wine = datasets.load_wine()
data = wine.data.copy()
target = wine.target.copy()
# Split the data into training/testing sets
total_samples = wine.target.shape[0]
exclude = round(total_samples/3)
indices = np.arange(0, total_samples)
np.random.shuffle(indices)
idx_train = indices[:-exclude]
idx_test = indices[-exclude:]
assert not np.intersect1d(idx_test, idx_train).size
X_train = data[idx_train]
X_test = data[idx_test]
# Split the targets into training/testing sets
y_train = target[idx_train]
y_test = target[idx_test]
class myGaussianNB:
"""
Naive Bayes for continous variables -> Gaussian Naive Bayes
Bayes Theorem
P(y|X) = P(X|y) * P(y) / P(X)
"""
def __init__(self):
# initialise the attributes of this class
self.classes = []
self.features_number = 0
# self.class_prior = dict()
# self.class_mean = dict()
# self.class_std = dict()
self.class_likelihood = dict()
self.posteriors = []
self.predictions = []
def class_mean(self, features, target):
"""
Compute the mean for each feature
Args:
features (ndarray): 2D features array
target (ndarray): 1D target array
Returns:
ndarray: mean array for each feature
"""
self.class_mean = {}
for c in self.classes:
self.class_mean[c] = features[target == c].mean(
0) # np.mean() on 0 axis
return self.class_mean
def class_std(self, features, target, ddof=1):
"""
Compute corrected sample standard deviation.
To copute uncorrected sample standard deviation change ddof to 0
Args:
features (ndarray): 2D features array
target (ndarray): 1D target array
ddof (int): Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements.
Returns:
ndarray: array of the standard deviation for each feature
"""
self.class_std = {}
for c in self.classes:
self.class_std[c] = features[target == c].std(
0, ddof=ddof) # np.std() on 0 axis
return self.class_std
def class_prior(self, target):
"""
In Bayesian statistical inference, a prior probability distribution, often simply called the prior,
of an uncertain quantity is the probability distribution that would express one's beliefs about
this quantity before some evidence is taken into account. (Wikipedia, 2021)
Thus we will evaluate the prior as the propability distribuition of encountering either class 0,1 or 2.
Args:
target (ndarray): 1D target array
Returns:
ndarray: array of length # of calsses
"""
self.class_prior = {}
for c in self.classes:
self.class_prior[c] = np.sum(target == c) / len(target)
return self.class_prior
def fit(self, features, target):
self.classes = np.unique(target.astype(int))
self.features_number = features.shape[1]
self.class_mean(features, target)
self.class_std(features, target)
self.class_prior(target)
def predict(self, X_test):
# 1. evaluate (log) likelihoods of test data for each class
for c in self.classes:
# there will be multiple gaussians that need to be combined using the naive assumption
likelihood = 1
for obs in np.arange(0, self.features_number).astype(int):
likelihood = likelihood * \
stats.norm.pdf(
X_test[:, obs], self.class_mean[c][obs], self.class_std[c][obs])
#likelihood = likelihood * stats.norm.pdf(X_test[:,obs], self.class_mean[c][obs], self.class_std[c][obs])
self.class_likelihood[c] = likelihood
# 2. approximate the posterior using P(X|Y)P(Y)
self.posteriors.append(
self.class_prior[c] * self.class_likelihood[c])
# 3. take the maximum posterior probability as our final class
self.predictions = np.argmax(self.posteriors, 0)
return self.predictions
| [
"numpy.intersect1d",
"numpy.argmax",
"numpy.sum",
"sklearn.datasets.load_wine",
"numpy.random.seed",
"scipy.stats.norm.pdf",
"numpy.arange",
"numpy.random.shuffle"
] | [((124, 144), 'numpy.random.seed', 'np.random.seed', (['(1345)'], {}), '(1345)\n', (138, 144), True, 'import numpy as np\n'), ((264, 284), 'sklearn.datasets.load_wine', 'datasets.load_wine', ([], {}), '()\n', (282, 284), False, 'from sklearn import datasets, naive_bayes\n'), ((462, 489), 'numpy.arange', 'np.arange', (['(0)', 'total_samples'], {}), '(0, total_samples)\n', (471, 489), True, 'import numpy as np\n'), ((490, 516), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (507, 516), True, 'import numpy as np\n'), ((591, 626), 'numpy.intersect1d', 'np.intersect1d', (['idx_test', 'idx_train'], {}), '(idx_test, idx_train)\n', (605, 626), True, 'import numpy as np\n'), ((4454, 4483), 'numpy.argmax', 'np.argmax', (['self.posteriors', '(0)'], {}), '(self.posteriors, 0)\n', (4463, 4483), True, 'import numpy as np\n'), ((3200, 3219), 'numpy.sum', 'np.sum', (['(target == c)'], {}), '(target == c)\n', (3206, 3219), True, 'import numpy as np\n'), ((3805, 3839), 'numpy.arange', 'np.arange', (['(0)', 'self.features_number'], {}), '(0, self.features_number)\n', (3814, 3839), True, 'import numpy as np\n'), ((3917, 3996), 'scipy.stats.norm.pdf', 'stats.norm.pdf', (['X_test[:, obs]', 'self.class_mean[c][obs]', 'self.class_std[c][obs]'], {}), '(X_test[:, obs], self.class_mean[c][obs], self.class_std[c][obs])\n', (3931, 3996), True, 'import scipy.stats as stats\n')] |
"""
Copyright 2021 Max-Planck-Gesellschaft
Code author: <NAME>, <EMAIL>
Embodied Vision Group, Max Planck Institute for Intelligent Systems, Tübingen
This source code is licensed under the MIT license found in the
LICENSE.md file in the root directory of this source tree or at
https://opensource.org/licenses/MIT.
"""
import json
import os
import pickle as pkl
import uuid
import warnings
from collections import namedtuple
from io import BytesIO
from os.path import join
import gym
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
from context_exploration.utils import QuadraticResize
class LazyWrapper(gym.Wrapper):
def __init__(self, env):
# super().__init__ is not called here on purpose
self.env = env
@property
def action_space(self):
return self.env.action_space
@property
def observation_space(self):
return self.env.observation_space
@property
def reward_range(self):
return self.env.reward_range
@property
def metadata(self):
return self.env.metadata
class ActionRepeatWrapper(gym.Wrapper):
def __init__(self, env, action_repeat):
super(ActionRepeatWrapper, self).__init__(env)
self.action_repeat = action_repeat
@property
def dt(self):
return self.env.dt * self.action_repeat
def step(self, action):
done = False
total_reward = 0
current_step = 0
while current_step < self.action_repeat and not done:
observ, reward, done, info = self.env.step(action)
total_reward += reward
current_step += 1
return observ, total_reward, done, info
class MaxDurationWrapper(gym.Wrapper):
def __init__(self, env, max_duration):
super(MaxDurationWrapper, self).__init__(env)
self.max_duration = max_duration
self._step = None
def step(self, action):
if self._step is None:
raise RuntimeError("Must reset environment.")
observ, reward, done, info = self.env.step(action)
self._step += 1
if done or self._step >= self.max_duration:
done = True
self._step = None
return observ, reward, done, info
def reset(self, **kwargs):
self._step = 0
return self.env.reset(**kwargs)
def process_rendering(env, rendering_size, as_png):
screen = env.render(mode="rgb_array")
pil_image = Image.fromarray(screen)
pil_image = QuadraticResize(rendering_size)(pil_image)
if as_png:
with BytesIO() as byte_io:
pil_image.save(byte_io, "PNG")
return byte_io.getvalue()
else:
return np.array(pil_image)
RolloutFilenames = namedtuple(
"RolloutFilenames", ["rollout_filename", "rendering_filename", "metadata_filename"]
)
class CaptureWrapper(LazyWrapper):
def __init__(
self,
env,
rollout_uuid=None,
save_renderings=False,
rendering_size=(64, 64),
rendering_as_png=False,
process_rendering_fcn=None,
):
super(CaptureWrapper, self).__init__(env)
self.save_renderings = save_renderings
self.rendering_size = rendering_size
self.rendering_as_png = rendering_as_png
if process_rendering_fcn is None:
self._process_rendering = lambda: process_rendering(
env, rendering_size, rendering_as_png
)
else:
self._process_rendering = lambda: process_rendering_fcn(
env.render(mode="rgb_array")
)
self.rollout_uuid = rollout_uuid
self._in_capture_mode = False
def reset(self, **kwargs):
if self._in_capture_mode:
raise RuntimeError("env.reset() must only be called once.")
obs = self.env.reset()
self._action = []
self._reward = [np.nan]
self._done = [False]
self._info = [{}]
self._observation = [obs]
if self.save_renderings:
self._rendering = [self._process_rendering()]
self._in_capture_mode = True
return obs
def step(self, action):
if not self._in_capture_mode:
raise RuntimeError("Must reset environment.")
obs, reward, done, info = self.env.step(action)
self._action.append(action)
self._reward.append(reward)
self._done.append(done)
self._info.append(info)
self._observation.append(obs)
if self.save_renderings:
self._rendering.append(self._process_rendering())
return obs, reward, done, info
def close(self):
if not self._in_capture_mode:
raise RuntimeError("Cannot close environment; env.reset() was not called.")
# append 0 action to last step
self._action.append(self._action[-1] * 0)
# bring first reward to right shape
self._reward[0] = np.ones_like(self._reward[1]) * np.nan
rollout = {
"action": np.stack(self._action),
"reward": np.stack(self._reward),
"info": self._info,
"done": self._done,
"observation": self._observation,
}
data_dict = {
"id": self.rollout_uuid,
"meta": {"rollout_length": len(self._done)},
"rollout": rollout,
}
# save renderings
if self.save_renderings:
data_dict["rendering"] = {
"rendering": self._rendering,
"rendering_as_png": self.rendering_as_png,
}
self.env.close()
return data_dict
| [
"numpy.ones_like",
"PIL.Image.fromarray",
"collections.namedtuple",
"io.BytesIO",
"numpy.array",
"numpy.stack",
"context_exploration.utils.QuadraticResize"
] | [((2739, 2838), 'collections.namedtuple', 'namedtuple', (['"""RolloutFilenames"""', "['rollout_filename', 'rendering_filename', 'metadata_filename']"], {}), "('RolloutFilenames', ['rollout_filename', 'rendering_filename',\n 'metadata_filename'])\n", (2749, 2838), False, 'from collections import namedtuple\n'), ((2459, 2482), 'PIL.Image.fromarray', 'Image.fromarray', (['screen'], {}), '(screen)\n', (2474, 2482), False, 'from PIL import Image\n'), ((2499, 2530), 'context_exploration.utils.QuadraticResize', 'QuadraticResize', (['rendering_size'], {}), '(rendering_size)\n', (2514, 2530), False, 'from context_exploration.utils import QuadraticResize\n'), ((2698, 2717), 'numpy.array', 'np.array', (['pil_image'], {}), '(pil_image)\n', (2706, 2717), True, 'import numpy as np\n'), ((2570, 2579), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (2577, 2579), False, 'from io import BytesIO\n'), ((4935, 4964), 'numpy.ones_like', 'np.ones_like', (['self._reward[1]'], {}), '(self._reward[1])\n', (4947, 4964), True, 'import numpy as np\n'), ((5016, 5038), 'numpy.stack', 'np.stack', (['self._action'], {}), '(self._action)\n', (5024, 5038), True, 'import numpy as np\n'), ((5062, 5084), 'numpy.stack', 'np.stack', (['self._reward'], {}), '(self._reward)\n', (5070, 5084), True, 'import numpy as np\n')] |
""" @package forcebalance.interaction Interaction energy fitting module.
@author <NAME>
@date 05/2012
"""
from __future__ import division
from builtins import str
from builtins import range
import os
import shutil
import numpy as np
from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, bohr2ang, commadash, uncommadash, printcool_dictionary
from forcebalance.target import Target
from forcebalance.molecule import Molecule, format_xyz_coord
from re import match, sub
import subprocess
from subprocess import PIPE
from forcebalance.finite_difference import fdwrap, f1d2p, f12d3p, in_fd
from collections import OrderedDict
from forcebalance.output import getLogger
logger = getLogger(__name__)
class Interaction(Target):
""" Subclass of Target for fitting force fields to interaction energies.
Currently TINKER is supported.
We introduce the following concepts:
- The number of snapshots
- The reference interaction energies and the file they belong in (qdata.txt)
This subclass contains the 'get' method for building the objective
function from any simulation software (a driver to run the program and
read output is still required)."""
def __init__(self,options,tgt_opts,forcefield):
# Initialize the SuperClass!
super(Interaction,self).__init__(options,tgt_opts,forcefield)
#======================================#
# Options that are given by the parser #
#======================================#
## Number of snapshots
self.set_option(tgt_opts,'shots','ns')
## Do we call Q-Chem for dielectric energies? (Currently needs to be fixed)
self.set_option(tgt_opts,'do_cosmo','do_cosmo')
## Do we put the reference energy into the denominator?
self.set_option(tgt_opts,'cauchy','cauchy')
## Do we put the reference energy into the denominator?
self.set_option(tgt_opts,'attenuate','attenuate')
## Divide by the number of snapshots?
self.set_option(tgt_opts, 'normalize')
## What is the energy denominator?
self.set_option(tgt_opts,'energy_denom','energy_denom')
## Set fragment 1
self.set_option(tgt_opts,'fragment1','fragment1')
if len(self.fragment1) == 0:
logger.error('You need to define the first fragment using the fragment1 keyword\n')
raise RuntimeError
self.select1 = np.array(uncommadash(self.fragment1))
## Set fragment 2
self.set_option(tgt_opts,'fragment2','fragment2')
if len(self.fragment2) != 0:
self.select2 = np.array(uncommadash(self.fragment2))
else:
self.select2 = None
## Set upper cutoff energy
self.set_option(tgt_opts,'energy_upper','energy_upper')
## Option for how much data to write to disk.
self.set_option(tgt_opts,'writelevel','writelevel')
#======================================#
# Variables which are set here #
#======================================#
## LPW 2018-02-11: This is set to True if the target calculates
## a single-point property over several existing snapshots.
self.loop_over_snapshots = True
## Reference (QM) interaction energies
self.eqm = []
## Snapshot label, useful for graphing
self.label = []
## The qdata.txt file that contains the QM energies and forces
self.qfnm = os.path.join(self.tgtdir,"qdata.txt")
self.e_err = 0.0
self.e_err_pct = None
## Read in the trajectory file
self.mol = Molecule(os.path.join(self.root,self.tgtdir,self.coords),
top=(os.path.join(self.root,self.tgtdir,self.pdb) if hasattr(self, 'pdb') else None), build_topology=False if self.coords.endswith('.pdb') else True)
if self.ns != -1:
self.mol = self.mol[:self.ns]
self.ns = len(self.mol)
if self.select2 is None:
self.select2 = [i for i in range(self.mol.na) if i not in self.select1]
logger.info('Fragment 2 is the complement of fragment 1 : %s\n' % (commadash(self.select2)))
## Build keyword dictionaries to pass to engine.
engine_args = OrderedDict(list(self.OptionDict.items()) + list(options.items()))
engine_args.pop('name', None)
self.engine = self.engine_(target=self, mol=self.mol, **engine_args)
## Read in the reference data
self.read_reference_data()
logger.info("The energy denominator is: %s kcal/mol\n" % str(self.energy_denom))
denom = self.energy_denom
# Create the denominator.
if self.cauchy:
self.divisor = np.sqrt(self.eqm**2 + denom**2)
if self.attenuate:
logger.error('attenuate and cauchy are mutually exclusive\n')
raise RuntimeError
elif self.attenuate:
# Attenuate only large repulsions.
self.divisor = np.zeros(len(self.eqm))
for i in range(len(self.eqm)):
if self.eqm[i] < denom:
self.divisor[i] = denom
else:
self.divisor[i] = np.sqrt(denom**2 + (self.eqm[i]-denom)**2)
else:
self.divisor = np.ones(len(self.eqm)) * denom
if self.cauchy:
logger.info("Each contribution to the interaction energy objective function will be scaled by 1.0 / ( energy_denom**2 + reference**2 )\n")
if self.energy_upper > 0:
ecut = self.energy_upper
self.prefactor = 1.0 * (self.eqm < ecut)
logger.info("Interactions more repulsive than %s will not be fitted (%i/%i excluded) \n" % (str(self.energy_upper), sum(self.eqm > ecut), len(self.eqm)))
else:
self.prefactor = np.ones(len(self.eqm))
if self.normalize:
self.prefactor /= len(self.prefactor)
def read_reference_data(self):
""" Read the reference ab initio data from a file such as qdata.txt.
After reading in the information from qdata.txt, it is converted
into kcal/mol.
"""
# Parse the qdata.txt file
for line in open(os.path.join(self.root,self.qfnm)):
sline = line.split()
if len(sline) == 0: continue
elif sline[0] == 'INTERACTION':
self.eqm.append(float(sline[1]))
elif sline[0] == 'LABEL':
self.label.append(sline[1])
if all(len(i) in [self.ns, 0] for i in [self.eqm]) and len(self.eqm) == self.ns:
break
self.ns = len(self.eqm)
# Turn everything into arrays, convert to kcal/mol
self.eqm = np.array(self.eqm)
self.eqm *= (eqcgmx / 4.184)
def indicate(self):
delta = (self.emm-self.eqm)
deltanrm = self.prefactor*(delta/self.divisor)**2
if len(self.label) == self.ns:
PrintDict = OrderedDict()
for i,label in enumerate(self.label):
PrintDict[label] = "% 9.3f % 9.3f % 9.3f % 9.3f % 11.5f" % (self.emm[i], self.eqm[i], delta[i], self.divisor[i], deltanrm[i])
printcool_dictionary(PrintDict,title="Target: %s\nInteraction Energies (kcal/mol), Objective = % .5e\n %-10s %9s %9s %9s %9s %11s" %
(self.name, self.objective, "Label", "Calc.", "Ref.", "Delta", "Divisor", "Term"),keywidth=15)
else:
# logger.info("Target: %s Objective: % .5e (add LABEL keywords in qdata.txt for full printout)\n" % (self.name,self.objective))
Headings = ["Observable", "Difference\nRMS (Calc-Ref)", "Denominator\n(Specified)", " Percent \nDifference"]
Data = OrderedDict([])
Data['Energy (kcal/mol)'] = ["%8.4f" % np.sqrt(np.mean(delta**2)),
"%8.4f" % np.mean(self.divisor),
"%.4f%%" % (np.sqrt(np.mean(delta/self.divisor)**2)*100)]
self.printcool_table(data=Data, headings=Headings, color=0)
logger.info("add LABEL keywords in qdata.txt to print out each snapshot\n")
# if len(self.RMSDDict) > 0:x
# printcool_dictionary(self.RMSDDict,title="Geometry Optimized Systems (Angstrom), Objective = %.5e\n %-38s %11s %11s" % (self.rmsd_part, "System", "RMSD", "Term"), keywidth=45)
def get(self, mvals, AGrad=False, AHess=False):
""" Evaluate objective function. """
Answer = {'X':0.0, 'G':np.zeros(self.FF.np), 'H':np.zeros((self.FF.np, self.FF.np))}
# If the weight is zero, turn all derivatives off.
if (self.weight == 0.0):
AGrad = False
AHess = False
def callM(mvals_, dielectric=False):
logger.info("\r")
pvals = self.FF.make(mvals_)
return self.engine.interaction_energy(self.select1, self.select2)
logger.info("Executing\r")
emm = callM(mvals)
D = emm - self.eqm
dV = np.zeros((self.FF.np,len(emm)))
if self.writelevel > 0:
# Dump interaction energies to disk.
np.savetxt('M.txt',emm)
np.savetxt('Q.txt',self.eqm)
import pickle
pickle.dump((self.name, self.label, self.prefactor, self.eqm, emm), open("qm_vs_mm.p",'w'))
# select the qm and mm data that has >0 weight to plot
qm_data, mm_data = [], []
for i in range(len(self.eqm)):
if self.prefactor[i] != 0:
qm_data.append(self.eqm[i])
mm_data.append(emm[i])
plot_interaction_qm_vs_mm(qm_data, mm_data, title="Interaction Energy "+self.name)
# Do the finite difference derivative.
if AGrad or AHess:
for p in self.pgrad:
dV[p,:], _ = f12d3p(fdwrap(callM, mvals, p), h = self.h, f0 = emm)
# Create the force field one last time.
pvals = self.FF.make(mvals)
Answer['X'] = np.dot(self.prefactor*D/self.divisor,D/self.divisor)
for p in self.pgrad:
Answer['G'][p] = 2*np.dot(self.prefactor*D/self.divisor, dV[p,:]/self.divisor)
for q in self.pgrad:
Answer['H'][p,q] = 2*np.dot(self.prefactor*dV[p,:]/self.divisor, dV[q,:]/self.divisor)
if not in_fd():
self.emm = emm
self.objective = Answer['X']
## QYD: try to clean up OpenMM engine.simulation objects to free up GPU memory
try:
if self.engine.name == 'openmm':
if hasattr(self.engine, 'simulation'): del self.engine.simulation
if hasattr(self.engine, 'A'): del self.engine.A
if hasattr(self.engine, 'B'): del self.engine.B
except:
pass
return Answer
def plot_interaction_qm_vs_mm(eqm, emm, title=''):
import matplotlib.pyplot as plt
plt.plot(eqm, label='QM Data', marker='^')
plt.plot(emm, label='MM Data', marker='o')
plt.legend()
plt.xlabel('Snapshots')
plt.ylabel('Interaction Energy (kcal/mol)')
plt.title(title)
plt.savefig("e_qm_vs_mm.pdf")
plt.close()
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"forcebalance.output.getLogger",
"builtins.str",
"numpy.array",
"builtins.range",
"forcebalance.nifty.commadash",
"forcebalance.finite_difference.in_fd",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"... | [((719, 738), 'forcebalance.output.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (728, 738), False, 'from forcebalance.output import getLogger\n'), ((10986, 11028), 'matplotlib.pyplot.plot', 'plt.plot', (['eqm'], {'label': '"""QM Data"""', 'marker': '"""^"""'}), "(eqm, label='QM Data', marker='^')\n", (10994, 11028), True, 'import matplotlib.pyplot as plt\n'), ((11033, 11075), 'matplotlib.pyplot.plot', 'plt.plot', (['emm'], {'label': '"""MM Data"""', 'marker': '"""o"""'}), "(emm, label='MM Data', marker='o')\n", (11041, 11075), True, 'import matplotlib.pyplot as plt\n'), ((11080, 11092), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11090, 11092), True, 'import matplotlib.pyplot as plt\n'), ((11097, 11120), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Snapshots"""'], {}), "('Snapshots')\n", (11107, 11120), True, 'import matplotlib.pyplot as plt\n'), ((11125, 11168), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Interaction Energy (kcal/mol)"""'], {}), "('Interaction Energy (kcal/mol)')\n", (11135, 11168), True, 'import matplotlib.pyplot as plt\n'), ((11173, 11189), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (11182, 11189), True, 'import matplotlib.pyplot as plt\n'), ((11194, 11223), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""e_qm_vs_mm.pdf"""'], {}), "('e_qm_vs_mm.pdf')\n", (11205, 11223), True, 'import matplotlib.pyplot as plt\n'), ((11228, 11239), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11237, 11239), True, 'import matplotlib.pyplot as plt\n'), ((3515, 3553), 'os.path.join', 'os.path.join', (['self.tgtdir', '"""qdata.txt"""'], {}), "(self.tgtdir, 'qdata.txt')\n", (3527, 3553), False, 'import os\n'), ((6775, 6793), 'numpy.array', 'np.array', (['self.eqm'], {}), '(self.eqm)\n', (6783, 6793), True, 'import numpy as np\n'), ((10080, 10139), 'numpy.dot', 'np.dot', (['(self.prefactor * D / self.divisor)', '(D / self.divisor)'], {}), '(self.prefactor * D / self.divisor, D / self.divisor)\n', (10086, 10139), True, 'import numpy as np\n'), ((2465, 2492), 'forcebalance.nifty.uncommadash', 'uncommadash', (['self.fragment1'], {}), '(self.fragment1)\n', (2476, 2492), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, bohr2ang, commadash, uncommadash, printcool_dictionary\n'), ((3675, 3724), 'os.path.join', 'os.path.join', (['self.root', 'self.tgtdir', 'self.coords'], {}), '(self.root, self.tgtdir, self.coords)\n', (3687, 3724), False, 'import os\n'), ((4767, 4802), 'numpy.sqrt', 'np.sqrt', (['(self.eqm ** 2 + denom ** 2)'], {}), '(self.eqm ** 2 + denom ** 2)\n', (4774, 4802), True, 'import numpy as np\n'), ((6265, 6299), 'os.path.join', 'os.path.join', (['self.root', 'self.qfnm'], {}), '(self.root, self.qfnm)\n', (6277, 6299), False, 'import os\n'), ((7013, 7026), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7024, 7026), False, 'from collections import OrderedDict\n'), ((7231, 7476), 'forcebalance.nifty.printcool_dictionary', 'printcool_dictionary', (['PrintDict'], {'title': '("""Target: %s\nInteraction Energies (kcal/mol), Objective = % .5e\n %-10s %9s %9s %9s %9s %11s"""\n % (self.name, self.objective, \'Label\', \'Calc.\', \'Ref.\', \'Delta\',\n \'Divisor\', \'Term\'))', 'keywidth': '(15)'}), '(PrintDict, title=\n """Target: %s\nInteraction Energies (kcal/mol), Objective = % .5e\n %-10s %9s %9s %9s %9s %11s"""\n % (self.name, self.objective, \'Label\', \'Calc.\', \'Ref.\', \'Delta\',\n \'Divisor\', \'Term\'), keywidth=15)\n', (7251, 7476), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, bohr2ang, commadash, uncommadash, printcool_dictionary\n'), ((7786, 7801), 'collections.OrderedDict', 'OrderedDict', (['[]'], {}), '([])\n', (7797, 7801), False, 'from collections import OrderedDict\n'), ((8569, 8589), 'numpy.zeros', 'np.zeros', (['self.FF.np'], {}), '(self.FF.np)\n', (8577, 8589), True, 'import numpy as np\n'), ((8595, 8629), 'numpy.zeros', 'np.zeros', (['(self.FF.np, self.FF.np)'], {}), '((self.FF.np, self.FF.np))\n', (8603, 8629), True, 'import numpy as np\n'), ((9201, 9225), 'numpy.savetxt', 'np.savetxt', (['"""M.txt"""', 'emm'], {}), "('M.txt', emm)\n", (9211, 9225), True, 'import numpy as np\n'), ((9237, 9266), 'numpy.savetxt', 'np.savetxt', (['"""Q.txt"""', 'self.eqm'], {}), "('Q.txt', self.eqm)\n", (9247, 9266), True, 'import numpy as np\n'), ((10405, 10412), 'forcebalance.finite_difference.in_fd', 'in_fd', ([], {}), '()\n', (10410, 10412), False, 'from forcebalance.finite_difference import fdwrap, f1d2p, f12d3p, in_fd\n'), ((2651, 2678), 'forcebalance.nifty.uncommadash', 'uncommadash', (['self.fragment2'], {}), '(self.fragment2)\n', (2662, 2678), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, bohr2ang, commadash, uncommadash, printcool_dictionary\n'), ((4624, 4646), 'builtins.str', 'str', (['self.energy_denom'], {}), '(self.energy_denom)\n', (4627, 4646), False, 'from builtins import str\n'), ((10193, 10259), 'numpy.dot', 'np.dot', (['(self.prefactor * D / self.divisor)', '(dV[p, :] / self.divisor)'], {}), '(self.prefactor * D / self.divisor, dV[p, :] / self.divisor)\n', (10199, 10259), True, 'import numpy as np\n'), ((3757, 3803), 'os.path.join', 'os.path.join', (['self.root', 'self.tgtdir', 'self.pdb'], {}), '(self.root, self.tgtdir, self.pdb)\n', (3769, 3803), False, 'import os\n'), ((4074, 4092), 'builtins.range', 'range', (['self.mol.na'], {}), '(self.mol.na)\n', (4079, 4092), False, 'from builtins import range\n'), ((4198, 4221), 'forcebalance.nifty.commadash', 'commadash', (['self.select2'], {}), '(self.select2)\n', (4207, 4221), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, bohr2ang, commadash, uncommadash, printcool_dictionary\n'), ((7930, 7951), 'numpy.mean', 'np.mean', (['self.divisor'], {}), '(self.divisor)\n', (7937, 7951), True, 'import numpy as np\n'), ((9917, 9940), 'forcebalance.finite_difference.fdwrap', 'fdwrap', (['callM', 'mvals', 'p'], {}), '(callM, mvals, p)\n', (9923, 9940), False, 'from forcebalance.finite_difference import fdwrap, f1d2p, f12d3p, in_fd\n'), ((10323, 10396), 'numpy.dot', 'np.dot', (['(self.prefactor * dV[p, :] / self.divisor)', '(dV[q, :] / self.divisor)'], {}), '(self.prefactor * dV[p, :] / self.divisor, dV[q, :] / self.divisor)\n', (10329, 10396), True, 'import numpy as np\n'), ((5257, 5305), 'numpy.sqrt', 'np.sqrt', (['(denom ** 2 + (self.eqm[i] - denom) ** 2)'], {}), '(denom ** 2 + (self.eqm[i] - denom) ** 2)\n', (5264, 5305), True, 'import numpy as np\n'), ((5776, 5798), 'builtins.str', 'str', (['self.energy_upper'], {}), '(self.energy_upper)\n', (5779, 5798), False, 'from builtins import str\n'), ((7861, 7880), 'numpy.mean', 'np.mean', (['(delta ** 2)'], {}), '(delta ** 2)\n', (7868, 7880), True, 'import numpy as np\n'), ((8012, 8041), 'numpy.mean', 'np.mean', (['(delta / self.divisor)'], {}), '(delta / self.divisor)\n', (8019, 8041), True, 'import numpy as np\n')] |
# Copyright (c) 2022 <NAME>.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Classes for decoding various GEMPAK file formats."""
import bisect
from collections import namedtuple
from collections.abc import Iterable
import contextlib
import ctypes
from datetime import datetime, timedelta
from enum import Enum
from itertools import product
import logging
import math
from pathlib import Path
import struct
import sys
import numpy as np
import pyproj
import xarray as xr
from .gemcalc import (interp_logp_height, interp_logp_pressure,
interp_missing_data, interp_moist_height)
from .tools import IOBuffer, NamedStruct
logger = logging.getLogger(__name__)
ANLB_SIZE = 128
BYTES_PER_WORD = 4
NAVB_SIZE = 256
PARAM_ATTR = [('name', (4, 's')), ('scale', (1, 'i')),
('offset', (1, 'i')), ('bits', (1, 'i'))]
USED_FLAG = 9999
UNUSED_FLAG = -9999
GEMPAK_HEADER = 'GEMPAK DATA MANAGEMENT FILE '
GEMPROJ_TO_PROJ = {
'MER': ('merc', 'cyl'),
'NPS': ('stere', 'azm'),
'SPS': ('stere', 'azm'),
'LCC': ('lcc', 'con'),
'SCC': ('lcc', 'con'),
'CED': ('eqc', 'cyl'),
'MCD': ('eqc', 'cyl'),
'NOR': ('ortho', 'azm'),
'SOR': ('ortho', 'azm'),
'STR': ('stere', 'azm'),
'AED': ('aeqd', 'azm'),
'ORT': ('ortho', 'azm'),
'LEA': ('laea', 'azm'),
'GNO': ('gnom', 'azm'),
'TVM': ('tmerc', 'obq'),
'UTM': ('utm', 'obq'),
}
GVCORD_TO_VAR = {
'PRES': 'p',
'HGHT': 'z',
'THTA': 'theta',
}
class FileTypes(Enum):
"""GEMPAK file type."""
surface = 1
sounding = 2
grid = 3
class DataTypes(Enum):
"""Data management library data types."""
real = 1
integer = 2
character = 3
realpack = 4
grid = 5
class VerticalCoordinates(Enum):
"""Veritical coordinates."""
none = 0
pres = 1
thta = 2
hght = 3
sgma = 4
dpth = 5
hybd = 6
pvab = 7
pvbl = 8
class PackingType(Enum):
"""GRIB packing type."""
none = 0
grib = 1
nmc = 2
diff = 3
dec = 4
grib2 = 5
class ForecastType(Enum):
"""Forecast type."""
analysis = 0
forecast = 1
guess = 2
initial = 3
class DataSource(Enum):
"""Data source."""
model = 0
airway_surface = 1
metar = 2
ship = 3
raob_buoy = 4
synop_raob_vas = 5
grid = 6
watch_by_county = 7
unknown = 99
text = 100
metar2 = 102
ship2 = 103
raob_buoy2 = 104
synop_raob_vas2 = 105
Grid = namedtuple('Grid', [
'GRIDNO',
'TYPE',
'DATTIM1',
'DATTIM2',
'PARM',
'LEVEL1',
'LEVEL2',
'COORD',
])
Sounding = namedtuple('Sounding', [
'DTNO',
'SNDNO',
'DATTIM',
'ID',
'NUMBER',
'LAT',
'LON',
'ELEV',
'STATE',
'COUNTRY',
])
Surface = namedtuple('Surface', [
'ROW',
'COL',
'DATTIM',
'ID',
'NUMBER',
'LAT',
'LON',
'ELEV',
'STATE',
'COUNTRY',
])
def _data_source(source):
"""Get data source from stored integer."""
try:
DataSource(source)
except ValueError:
logger.warning('Could not interpret data source `%s`. '
'Setting to `Unknown`.', source)
return DataSource(99)
else:
return DataSource(source)
def _word_to_position(word, bytes_per_word=BYTES_PER_WORD):
"""Return beginning position of a word in bytes."""
return (word * bytes_per_word) - bytes_per_word
class GempakFile():
"""Base class for GEMPAK files.
Reads ubiquitous GEMPAK file headers (i.e., the data managment portion of
each file).
"""
prod_desc_fmt = [('version', 'i'), ('file_headers', 'i'),
('file_keys_ptr', 'i'), ('rows', 'i'),
('row_keys', 'i'), ('row_keys_ptr', 'i'),
('row_headers_ptr', 'i'), ('columns', 'i'),
('column_keys', 'i'), ('column_keys_ptr', 'i'),
('column_headers_ptr', 'i'), ('parts', 'i'),
('parts_ptr', 'i'), ('data_mgmt_ptr', 'i'),
('data_mgmt_length', 'i'), ('data_block_ptr', 'i'),
('file_type', 'i', FileTypes),
('data_source', 'i', _data_source),
('machine_type', 'i'), ('missing_int', 'i'),
(None, '12x'), ('missing_float', 'f')]
grid_nav_fmt = [('grid_definition_type', 'f'),
('projection', '3sx', bytes.decode),
('left_grid_number', 'f'), ('bottom_grid_number', 'f'),
('right_grid_number', 'f'), ('top_grid_number', 'f'),
('lower_left_lat', 'f'), ('lower_left_lon', 'f'),
('upper_right_lat', 'f'), ('upper_right_lon', 'f'),
('proj_angle1', 'f'), ('proj_angle2', 'f'),
('proj_angle3', 'f'), (None, '972x')]
grid_anl_fmt1 = [('analysis_type', 'f'), ('delta_n', 'f'),
('delta_x', 'f'), ('delta_y', 'f'),
(None, '4x'), ('garea_llcr_lat', 'f'),
('garea_llcr_lon', 'f'), ('garea_urcr_lat', 'f'),
('garea_urcr_lon', 'f'), ('extarea_llcr_lat', 'f'),
('extarea_llcr_lon', 'f'), ('extarea_urcr_lat', 'f'),
('extarea_urcr_lon', 'f'), ('datarea_llcr_lat', 'f'),
('datarea_llcr_lon', 'f'), ('datarea_urcr_lat', 'f'),
('datarea_urcrn_lon', 'f'), (None, '444x')]
grid_anl_fmt2 = [('analysis_type', 'f'), ('delta_n', 'f'),
('grid_ext_left', 'f'), ('grid_ext_down', 'f'),
('grid_ext_right', 'f'), ('grid_ext_up', 'f'),
('garea_llcr_lat', 'f'), ('garea_llcr_lon', 'f'),
('garea_urcr_lat', 'f'), ('garea_urcr_lon', 'f'),
('extarea_llcr_lat', 'f'), ('extarea_llcr_lon', 'f'),
('extarea_urcr_lat', 'f'), ('extarea_urcr_lon', 'f'),
('datarea_llcr_lat', 'f'), ('datarea_llcr_lon', 'f'),
('datarea_urcr_lat', 'f'), ('datarea_urcrn_lon', 'f'),
(None, '440x')]
data_management_fmt = ([('next_free_word', 'i'), ('max_free_pairs', 'i'),
('actual_free_pairs', 'i'), ('last_word', 'i')]
+ [('free_word{:d}'.format(n), 'i') for n in range(1, 29)])
def __init__(self, file):
"""Instantiate GempakFile object from file."""
if isinstance(file, Path):
file = str(file)
with contextlib.closing(open(file, 'rb')) as fobj: # noqa: SIM115
self._buffer = IOBuffer.fromfile(fobj)
# Save file start position as pointers use this as reference
self._start = self._buffer.set_mark()
# Process the main GEMPAK header to verify file format
self._process_gempak_header()
meta = self._buffer.set_mark()
# # Check for byte swapping
self._swap_bytes(bytes(self._buffer.read_binary(4)))
self._buffer.jump_to(meta)
# Process main metadata header
self.prod_desc = self._buffer.read_struct(NamedStruct(self.prod_desc_fmt,
self.prefmt,
'ProductDescription'))
# File Keys
# Surface and upper-air files will not have the file headers, so we need to check.
if self.prod_desc.file_headers > 0:
# This would grab any file headers, but NAVB and ANLB are the only ones used.
fkey_prod = product(['header_name', 'header_length', 'header_type'],
range(1, self.prod_desc.file_headers + 1))
fkey_names = ['{}{}'.format(*x) for x in fkey_prod]
fkey_info = list(zip(fkey_names, np.repeat(('4s', 'i', 'i'),
self.prod_desc.file_headers)))
self.file_keys_format = NamedStruct(fkey_info, self.prefmt, 'FileKeys')
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.file_keys_ptr))
self.file_keys = self._buffer.read_struct(self.file_keys_format)
# file_key_blocks = self._buffer.set_mark()
# Navigation Block
navb_size = self._buffer.read_int(4, self.endian, False)
if navb_size != NAVB_SIZE:
raise ValueError('Navigation block size does not match GEMPAK specification')
else:
self.navigation_block = (
self._buffer.read_struct(NamedStruct(self.grid_nav_fmt,
self.prefmt,
'NavigationBlock'))
)
self.kx = int(self.navigation_block.right_grid_number)
self.ky = int(self.navigation_block.top_grid_number)
# Analysis Block
anlb_size = self._buffer.read_int(4, self.endian, False)
anlb_start = self._buffer.set_mark()
if anlb_size != ANLB_SIZE:
raise ValueError('Analysis block size does not match GEMPAK specification')
else:
anlb_type = self._buffer.read_struct(struct.Struct(self.prefmt + 'f'))[0]
self._buffer.jump_to(anlb_start)
if anlb_type == 1:
self.analysis_block = (
self._buffer.read_struct(NamedStruct(self.grid_anl_fmt1,
self.prefmt,
'AnalysisBlock'))
)
elif anlb_type == 2:
self.analysis_block = (
self._buffer.read_struct(NamedStruct(self.grid_anl_fmt2,
self.prefmt,
'AnalysisBlock'))
)
else:
self.analysis_block = None
else:
self.analysis_block = None
self.navigation_block = None
# Data Management
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.data_mgmt_ptr))
self.data_management = self._buffer.read_struct(NamedStruct(self.data_management_fmt,
self.prefmt,
'DataManagement'))
# Row Keys
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.row_keys_ptr))
row_key_info = [('row_key{:d}'.format(n), '4s', self._decode_strip)
for n in range(1, self.prod_desc.row_keys + 1)]
row_key_info.extend([(None, None)])
row_keys_fmt = NamedStruct(row_key_info, self.prefmt, 'RowKeys')
self.row_keys = self._buffer.read_struct(row_keys_fmt)
# Column Keys
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.column_keys_ptr))
column_key_info = [('column_key{:d}'.format(n), '4s', self._decode_strip)
for n in range(1, self.prod_desc.column_keys + 1)]
column_key_info.extend([(None, None)])
column_keys_fmt = NamedStruct(column_key_info, self.prefmt, 'ColumnKeys')
self.column_keys = self._buffer.read_struct(column_keys_fmt)
# Parts
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.parts_ptr))
# parts = self._buffer.set_mark()
self.parts = []
parts_info = [('name', '4s', self._decode_strip),
(None, '{:d}x'.format((self.prod_desc.parts - 1) * BYTES_PER_WORD)),
('header_length', 'i'),
(None, '{:d}x'.format((self.prod_desc.parts - 1) * BYTES_PER_WORD)),
('data_type', 'i', DataTypes),
(None, '{:d}x'.format((self.prod_desc.parts - 1) * BYTES_PER_WORD)),
('parameter_count', 'i')]
parts_info.extend([(None, None)])
parts_fmt = NamedStruct(parts_info, self.prefmt, 'Parts')
for n in range(1, self.prod_desc.parts + 1):
self.parts.append(self._buffer.read_struct(parts_fmt))
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.parts_ptr + n))
# Parameters
# No need to jump to any position as this follows parts information
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.parts_ptr
+ self.prod_desc.parts * 4))
self.parameters = [{key: [] for key, _ in PARAM_ATTR}
for n in range(self.prod_desc.parts)]
for attr, fmt in PARAM_ATTR:
fmt = (fmt[0], self.prefmt + fmt[1] if fmt[1] != 's' else fmt[1])
for n, part in enumerate(self.parts):
for _ in range(part.parameter_count):
if 's' in fmt[1]:
self.parameters[n][attr] += [
self._decode_strip(self._buffer.read_binary(*fmt)[0])
]
else:
self.parameters[n][attr] += self._buffer.read_binary(*fmt)
def _swap_bytes(self, binary):
"""Swap between little and big endian."""
self.swaped_bytes = (struct.pack('@i', 1) != binary)
if self.swaped_bytes:
if sys.byteorder == 'little':
self.prefmt = '>'
self.endian = 'big'
elif sys.byteorder == 'big':
self.prefmt = '<'
self.endian = 'little'
else:
self.prefmt = ''
self.endian = sys.byteorder
def _process_gempak_header(self):
"""Read the GEMPAK header from the file."""
fmt = [('text', '28s', bytes.decode), (None, None)]
header = self._buffer.read_struct(NamedStruct(fmt, '', 'GempakHeader'))
if header.text != GEMPAK_HEADER:
raise TypeError('Unknown file format or invalid GEMPAK file')
@staticmethod
def _convert_dattim(dattim):
"""Convert GEMPAK DATTIM integer to datetime object."""
if dattim:
if dattim < 100000000:
dt = datetime.strptime(f'{dattim:06d}', '%y%m%d')
else:
dt = datetime.strptime('{:010d}'.format(dattim), '%m%d%y%H%M')
else:
dt = None
return dt
@staticmethod
def _convert_ftime(ftime):
"""Convert GEMPAK forecast time and type integer."""
if ftime >= 0:
iftype = ForecastType(ftime // 100000)
iftime = ftime - iftype.value * 100000
hours = iftime // 100
minutes = iftime - hours * 100
out = (iftype.name, timedelta(hours=hours, minutes=minutes))
else:
out = None
return out
@staticmethod
def _convert_level(level):
"""Convert levels."""
if isinstance(level, (int, float)) and level >= 0:
return level
else:
return None
@staticmethod
def _convert_vertical_coord(coord):
"""Convert integer vertical coordinate to name."""
if coord <= 8:
return VerticalCoordinates(coord).name.upper()
else:
return struct.pack('i', coord).decode()
@staticmethod
def _fortran_ishift(i, shift):
"""Python-friendly bit shifting."""
mask = 0xffffffff
if shift > 0:
shifted = ctypes.c_int32(i << shift).value
elif shift < 0:
if i < 0:
shifted = (i & mask) >> abs(shift)
else:
shifted = i >> abs(shift)
elif shift == 0:
shifted = i
else:
raise ValueError('Bad shift value {}.'.format(shift))
return shifted
@staticmethod
def _decode_strip(b):
"""Decode bytes to string and strip whitespace."""
return b.decode().strip()
@staticmethod
def _make_date(dattim):
"""Make a date object from GEMPAK DATTIM integer."""
return GempakFile._convert_dattim(dattim).date()
@staticmethod
def _make_time(t):
"""Make a time object from GEMPAK FTIME integer."""
string = '{:04d}'.format(t)
return datetime.strptime(string, '%H%M').time()
def _unpack_real(self, buffer, parameters, length):
"""Unpack floating point data packed in integers.
Similar to DP_UNPK subroutine in GEMPAK.
"""
nparms = len(parameters['name'])
mskpat = 0xffffffff
pwords = (sum(parameters['bits']) - 1) // 32 + 1
npack = (length - 1) // pwords + 1
unpacked = np.ones(npack * nparms, dtype=np.float32) * self.prod_desc.missing_float
if npack * pwords != length:
raise ValueError('Unpacking length mismatch.')
ir = 0
ii = 0
for _i in range(npack):
pdat = buffer[ii:(ii + pwords)]
rdat = unpacked[ir:(ir + nparms)]
itotal = 0
for idata in range(nparms):
scale = 10**parameters['scale'][idata]
offset = parameters['offset'][idata]
bits = parameters['bits'][idata]
isbitc = (itotal % 32) + 1
iswrdc = (itotal // 32)
imissc = self._fortran_ishift(mskpat, bits - 32)
jbit = bits
jsbit = isbitc
jshift = 1 - jsbit
jsword = iswrdc
jword = pdat[jsword]
mask = self._fortran_ishift(mskpat, jbit - 32)
ifield = self._fortran_ishift(jword, jshift)
ifield &= mask
if (jsbit + jbit - 1) > 32:
jword = pdat[jsword + 1]
jshift += 32
iword = self._fortran_ishift(jword, jshift)
iword &= mask
ifield |= iword
if ifield == imissc:
rdat[idata] = self.prod_desc.missing_float
else:
rdat[idata] = (ifield + offset) * scale
itotal += bits
unpacked[ir:(ir + nparms)] = rdat
ir += nparms
ii += pwords
return unpacked.tolist()
class GempakGrid(GempakFile):
"""Subclass of GempakFile specific to GEMPAK gridded data."""
def __init__(self, file, *args, **kwargs):
"""Instantiate GempakGrid object from file."""
super().__init__(file)
datetime_names = ['GDT1', 'GDT2']
level_names = ['GLV1', 'GLV2']
ftime_names = ['GTM1', 'GTM2']
string_names = ['GPM1', 'GPM2', 'GPM3']
# Row Headers
# Based on GEMPAK source, row/col headers have a 0th element in their Fortran arrays.
# This appears to be a flag value to say a header is used or not. 9999
# means its in use, otherwise -9999. GEMPAK allows empty grids, etc., but
# no real need to keep track of that in Python.
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.row_headers_ptr))
self.row_headers = []
row_headers_info = [(key, 'i') for key in self.row_keys]
row_headers_info.extend([(None, None)])
row_headers_fmt = NamedStruct(row_headers_info, self.prefmt, 'RowHeaders')
for _ in range(1, self.prod_desc.rows + 1):
if self._buffer.read_int(4, self.endian, False) == USED_FLAG:
self.row_headers.append(self._buffer.read_struct(row_headers_fmt))
# Column Headers
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.column_headers_ptr))
self.column_headers = []
column_headers_info = [(key, 'i', self._convert_level) if key in level_names
else (key, 'i', self._convert_vertical_coord) if key == 'GVCD'
else (key, 'i', self._convert_dattim) if key in datetime_names
else (key, 'i', self._convert_ftime) if key in ftime_names
else (key, '4s', self._decode_strip) if key in string_names
else (key, 'i')
for key in self.column_keys]
column_headers_info.extend([(None, None)])
column_headers_fmt = NamedStruct(column_headers_info, self.prefmt, 'ColumnHeaders')
for _ in range(1, self.prod_desc.columns + 1):
if self._buffer.read_int(4, self.endian, False) == USED_FLAG:
self.column_headers.append(self._buffer.read_struct(column_headers_fmt))
self._gdinfo = []
for n, head in enumerate(self.column_headers):
self._gdinfo.append(
Grid(
n,
head.GTM1[0],
head.GDT1 + head.GTM1[1],
head.GDT2 + head.GTM2[1] if head.GDT2 and head.GDTM2 else None,
head.GPM1 + head.GPM2 + head.GPM3,
head.GLV1,
head.GLV2,
head.GVCD,
)
)
# Coordinates
if self.navigation_block is not None:
self._get_crs()
self._set_coordinates()
def gdinfo(self):
"""Return grid information."""
return self._gdinfo
def project_point(self, lon, lat):
"""Project geographic corrdinates.
Parameters
----------
lon : float or array-like of float
Longitude of point(s).
lat : float or array-like of float
Latitude of point(s).
Returns
-------
tuple
Tuple containing lists of x and y projected
coordinate values.
"""
return self._transform(lon, lat)
def _get_crs(self):
"""Create CRS from GEMPAK navigation block."""
gemproj = self.navigation_block.projection
if gemproj not in GEMPROJ_TO_PROJ:
raise NotImplementedError('{} projection not implemented.'
.format(gemproj))
proj, ptype = GEMPROJ_TO_PROJ[gemproj]
ellps = 'sphere' # Kept for posterity
earth_radius = 6371200.0 # R takes precedence over ellps
if ptype == 'azm':
lat_0 = self.navigation_block.proj_angle1
lon_0 = self.navigation_block.proj_angle2
rot = self.navigation_block.proj_angle3
if rot != 0:
logger.warning('Rotated projections currently '
'not supported. Angle3 (%7.2f) ignored.', rot)
self.crs = pyproj.CRS.from_dict({'proj': proj,
'lat_0': lat_0,
'lon_0': lon_0,
'ellps': ellps,
'R': earth_radius})
elif ptype == 'cyl':
if gemproj != 'MCD':
lat_0 = self.navigation_block.proj_angle1
lon_0 = self.navigation_block.proj_angle2
rot = self.navigation_block.proj_angle3
if rot != 0:
logger.warning('Rotated projections currently '
'not supported. Angle3 (%7.2f) ignored.', rot)
self.crs = pyproj.CRS.from_dict({'proj': proj,
'lat_0': lat_0,
'lon_0': lon_0,
'ellps': ellps,
'R': earth_radius})
else:
avglat = (self.navigation_block.upper_right_lat
+ self.navigation_block.lower_left_lat) * 0.5
k_0 = (1 / math.cos(avglat)
if self.navigation_block.proj_angle1 == 0
else self.navigation_block.proj_angle1
)
lon_0 = self.navigation_block.proj_angle2
self.crs = pyproj.CRS.from_dict({'proj': proj,
'lat_0': avglat,
'lon_0': lon_0,
'k_0': k_0,
'ellps': ellps,
'R': earth_radius})
elif ptype == 'con':
lat_1 = self.navigation_block.proj_angle1
lon_0 = self.navigation_block.proj_angle2
lat_2 = self.navigation_block.proj_angle3
self.crs = pyproj.CRS.from_dict({'proj': proj,
'lon_0': lon_0,
'lat_1': lat_1,
'lat_2': lat_2,
'ellps': ellps,
'R': earth_radius})
elif ptype == 'obq':
lon_0 = self.navigation_block.proj_angle1
if gemproj == 'UTM':
zone = np.digitize((lon_0 % 360) / 6 + 1, range(1, 61), right=True)
self.crs = pyproj.CRS.from_dict({'proj': proj,
'zone': zone,
'ellps': ellps,
'R': earth_radius})
else:
self.crs = pyproj.CRS.from_dict({'proj': proj,
'lon_0': lon_0,
'ellps': ellps,
'R': earth_radius})
def _set_coordinates(self):
"""Use GEMPAK navigation block to define coordinates.
Defines geographic and projection coordinates for the object.
"""
transform = pyproj.Proj(self.crs)
self._transform = transform
llx, lly = transform(self.navigation_block.lower_left_lon,
self.navigation_block.lower_left_lat)
urx, ury = transform(self.navigation_block.upper_right_lon,
self.navigation_block.upper_right_lat)
self.x = np.linspace(llx, urx, self.kx, dtype=np.float32)
self.y = np.linspace(lly, ury, self.ky, dtype=np.float32)
xx, yy = np.meshgrid(self.x, self.y, copy=False)
self.lon, self.lat = transform(xx, yy, inverse=True)
self.lon = self.lon.astype(np.float32)
self.lat = self.lat.astype(np.float32)
def _unpack_grid(self, packing_type, part):
"""Read raw GEMPAK grid integers and unpack into floats."""
if packing_type == PackingType.none:
lendat = self.data_header_length - part.header_length - 1
if lendat > 1:
buffer_fmt = '{}{}f'.format(self.prefmt, lendat)
buffer = self._buffer.read_struct(struct.Struct(buffer_fmt))
grid = np.zeros(self.ky * self.kx, dtype=np.float32)
grid[...] = buffer
else:
grid = None
return grid
elif packing_type == PackingType.nmc:
raise NotImplementedError('NMC unpacking not supported.')
# integer_meta_fmt = [('bits', 'i'), ('missing_flag', 'i'), ('kxky', 'i')]
# real_meta_fmt = [('reference', 'f'), ('scale', 'f')]
# self.grid_meta_int = self._buffer.read_struct(NamedStruct(integer_meta_fmt,
# self.prefmt,
# 'GridMetaInt'))
# self.grid_meta_real = self._buffer.read_struct(NamedStruct(real_meta_fmt,
# self.prefmt,
# 'GridMetaReal'))
# grid_start = self._buffer.set_mark()
elif packing_type == PackingType.diff:
integer_meta_fmt = [('bits', 'i'), ('missing_flag', 'i'),
('kxky', 'i'), ('kx', 'i')]
real_meta_fmt = [('reference', 'f'), ('scale', 'f'), ('diffmin', 'f')]
self.grid_meta_int = self._buffer.read_struct(NamedStruct(integer_meta_fmt,
self.prefmt,
'GridMetaInt'))
self.grid_meta_real = self._buffer.read_struct(NamedStruct(real_meta_fmt,
self.prefmt,
'GridMetaReal'))
# grid_start = self._buffer.set_mark()
imiss = 2**self.grid_meta_int.bits - 1
lendat = self.data_header_length - part.header_length - 8
packed_buffer_fmt = '{}{}i'.format(self.prefmt, lendat)
packed_buffer = self._buffer.read_struct(struct.Struct(packed_buffer_fmt))
grid = np.zeros((self.ky, self.kx), dtype=np.float32)
if lendat > 1:
iword = 0
ibit = 1
first = True
for j in range(self.ky):
line = False
for i in range(self.kx):
jshft = self.grid_meta_int.bits + ibit - 33
idat = self._fortran_ishift(packed_buffer[iword], jshft)
idat &= imiss
if jshft > 0:
jshft -= 32
idat2 = self._fortran_ishift(packed_buffer[iword + 1], jshft)
idat |= idat2
ibit += self.grid_meta_int.bits
if ibit > 32:
ibit -= 32
iword += 1
if (self.grid_meta_int.missing_flag and idat == imiss):
grid[j, i] = self.prod_desc.missing_float
else:
if first:
grid[j, i] = self.grid_meta_real.reference
psav = self.grid_meta_real.reference
plin = self.grid_meta_real.reference
line = True
first = False
else:
if not line:
grid[j, i] = plin + (self.grid_meta_real.diffmin
+ idat * self.grid_meta_real.scale)
line = True
plin = grid[j, i]
else:
grid[j, i] = psav + (self.grid_meta_real.diffmin
+ idat * self.grid_meta_real.scale)
psav = grid[j, i]
else:
grid = None
return grid
elif packing_type in [PackingType.grib, PackingType.dec]:
integer_meta_fmt = [('bits', 'i'), ('missing_flag', 'i'), ('kxky', 'i')]
real_meta_fmt = [('reference', 'f'), ('scale', 'f')]
self.grid_meta_int = self._buffer.read_struct(NamedStruct(integer_meta_fmt,
self.prefmt,
'GridMetaInt'))
self.grid_meta_real = self._buffer.read_struct(NamedStruct(real_meta_fmt,
self.prefmt,
'GridMetaReal'))
# grid_start = self._buffer.set_mark()
lendat = self.data_header_length - part.header_length - 6
packed_buffer_fmt = '{}{}i'.format(self.prefmt, lendat)
grid = np.zeros(self.grid_meta_int.kxky, dtype=np.float32)
packed_buffer = self._buffer.read_struct(struct.Struct(packed_buffer_fmt))
if lendat > 1:
imax = 2**self.grid_meta_int.bits - 1
ibit = 1
iword = 0
for cell in range(self.grid_meta_int.kxky):
jshft = self.grid_meta_int.bits + ibit - 33
idat = self._fortran_ishift(packed_buffer[iword], jshft)
idat &= imax
if jshft > 0:
jshft -= 32
idat2 = self._fortran_ishift(packed_buffer[iword + 1], jshft)
idat |= idat2
if (idat == imax) and self.grid_meta_int.missing_flag:
grid[cell] = self.prod_desc.missing_float
else:
grid[cell] = (self.grid_meta_real.reference
+ (idat * self.grid_meta_real.scale))
ibit += self.grid_meta_int.bits
if ibit > 32:
ibit -= 32
iword += 1
else:
grid = None
return grid
elif packing_type == PackingType.grib2:
raise NotImplementedError('GRIB2 unpacking not supported.')
# integer_meta_fmt = [('iuscal', 'i'), ('kx', 'i'),
# ('ky', 'i'), ('iscan_mode', 'i')]
# real_meta_fmt = [('rmsval', 'f')]
# self.grid_meta_int = self._buffer.read_struct(NamedStruct(integer_meta_fmt,
# self.prefmt,
# 'GridMetaInt'))
# self.grid_meta_real = self._buffer.read_struct(NamedStruct(real_meta_fmt,
# self.prefmt,
# 'GridMetaReal'))
# grid_start = self._buffer.set_mark()
else:
raise NotImplementedError('No method for unknown grid packing {}'
.format(packing_type.name))
def gdxarray(self, parameter=None, date_time=None, coordinate=None,
level=None, date_time2=None, level2=None):
"""Select grids and output as list of xarray DataArrays.
Subset the data by parameter values. The default is to not
subset and return the entire dataset.
Parameters
----------
parameter : str or array-like of str
Name of GEMPAK parameter.
date_time : datetime or array-like of datetime
Valid datetime of the grid. Alternatively
can be a string with the format YYYYmmddHHMM.
coordinate : str or array-like of str
Vertical coordinate.
level : float or array-like of float
Vertical level.
date_time2 : datetime or array-like of datetime
Secondary valid datetime of the grid. Alternatively
can be a string with the format YYYYmmddHHMM.
level2: float or array_like of float
Secondary vertical level. Typically used for layers.
Returns
-------
list
List of xarray.DataArray objects for each grid.
"""
if parameter is not None:
if (not isinstance(parameter, Iterable)
or isinstance(parameter, str)):
parameter = [parameter]
parameter = [p.upper() for p in parameter]
if date_time is not None:
if (not isinstance(date_time, Iterable)
or isinstance(date_time, str)):
date_time = [date_time]
for i, dt in enumerate(date_time):
if isinstance(dt, str):
date_time[i] = datetime.strptime(dt, '%Y%m%d%H%M')
if coordinate is not None:
if (not isinstance(coordinate, Iterable)
or isinstance(coordinate, str)):
coordinate = [coordinate]
coordinate = [c.upper() for c in coordinate]
if level is not None and not isinstance(level, Iterable):
level = [level]
if date_time2 is not None:
if (not isinstance(date_time2, Iterable)
or isinstance(date_time2, str)):
date_time2 = [date_time2]
for i, dt in enumerate(date_time2):
if isinstance(dt, str):
date_time2[i] = datetime.strptime(dt, '%Y%m%d%H%M')
if level2 is not None and not isinstance(level2, Iterable):
level2 = [level2]
# Figure out which columns to extract from the file
matched = self._gdinfo.copy()
if parameter is not None:
matched = filter(
lambda grid: grid if grid.PARM in parameter else False,
matched
)
if date_time is not None:
matched = filter(
lambda grid: grid if grid.DATTIM1 in date_time else False,
matched
)
if coordinate is not None:
matched = filter(
lambda grid: grid if grid.COORD in coordinate else False,
matched
)
if level is not None:
matched = filter(
lambda grid: grid if grid.LEVEL1 in level else False,
matched
)
if date_time2 is not None:
matched = filter(
lambda grid: grid if grid.DATTIM2 in date_time2 else False,
matched
)
if level2 is not None:
matched = filter(
lambda grid: grid if grid.LEVEL2 in level2 else False,
matched
)
matched = list(matched)
if len(matched) < 1:
raise KeyError('No grids were matched with given parameters.')
gridno = [g.GRIDNO for g in matched]
grids = []
irow = 0 # Only one row for grids
for icol, col_head in enumerate(self.column_headers):
if icol not in gridno:
continue
for iprt, part in enumerate(self.parts):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts + iprt))
self._buffer.jump_to(self._start, _word_to_position(pointer))
self.data_ptr = self._buffer.read_int(4, self.endian, False)
self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
self.data_header_length = self._buffer.read_int(4, self.endian, False)
data_header = self._buffer.set_mark()
self._buffer.jump_to(data_header,
_word_to_position(part.header_length + 1))
packing_type = PackingType(self._buffer.read_int(4, self.endian, False))
full_name = col_head.GPM1 + col_head.GPM2 + col_head.GPM3
ftype, ftime = col_head.GTM1
valid = col_head.GDT1 + ftime
gvcord = col_head.GVCD.lower() if col_head.GVCD is not None else 'none'
var = (GVCORD_TO_VAR[full_name]
if full_name in GVCORD_TO_VAR
else full_name.lower()
)
data = self._unpack_grid(packing_type, part)
if data is not None:
if data.ndim < 2:
data = np.ma.array(data.reshape((self.ky, self.kx)),
mask=data == self.prod_desc.missing_float,
dtype=np.float32)
else:
data = np.ma.array(data, mask=data == self.prod_desc.missing_float,
dtype=np.float32)
xrda = xr.DataArray(
data=data[np.newaxis, np.newaxis, ...],
coords={
'time': [valid],
gvcord: [col_head.GLV1],
'x': self.x,
'y': self.y,
'lat': (['y', 'x'], self.lat),
'lon': (['y', 'x'], self.lon),
},
dims=['time', gvcord, 'y', 'x'],
name=var,
attrs={
**self.crs.to_cf(),
'grid_type': ftype,
}
)
grids.append(xrda)
else:
logger.warning('Unable to read grid for %s', col_head.GPM1)
return grids
class GempakSounding(GempakFile):
"""Subclass of GempakFile specific to GEMPAK sounding data."""
def __init__(self, file, *args, **kwargs):
"""Instantiate GempakSounding object from file."""
super().__init__(file)
# Row Headers
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.row_headers_ptr))
self.row_headers = []
row_headers_info = [(key, 'i', self._make_date) if key == 'DATE'
else (key, 'i', self._make_time) if key == 'TIME'
else (key, 'i')
for key in self.row_keys]
row_headers_info.extend([(None, None)])
row_headers_fmt = NamedStruct(row_headers_info, self.prefmt, 'RowHeaders')
for _ in range(1, self.prod_desc.rows + 1):
if self._buffer.read_int(4, self.endian, False) == USED_FLAG:
self.row_headers.append(self._buffer.read_struct(row_headers_fmt))
# Column Headers
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.column_headers_ptr))
self.column_headers = []
column_headers_info = [(key, '4s', self._decode_strip) if key == 'STID'
else (key, 'i') if key == 'STNM'
else (key, 'i', lambda x: x / 100) if key == 'SLAT'
else (key, 'i', lambda x: x / 100) if key == 'SLON'
else (key, 'i') if key == 'SELV'
else (key, '4s', self._decode_strip) if key == 'STAT'
else (key, '4s', self._decode_strip) if key == 'COUN'
else (key, '4s', self._decode_strip) if key == 'STD2'
else (key, 'i')
for key in self.column_keys]
column_headers_info.extend([(None, None)])
column_headers_fmt = NamedStruct(column_headers_info, self.prefmt, 'ColumnHeaders')
for _ in range(1, self.prod_desc.columns + 1):
if self._buffer.read_int(4, self.endian, False) == USED_FLAG:
self.column_headers.append(self._buffer.read_struct(column_headers_fmt))
self.merged = 'SNDT' in (part.name for part in self.parts)
self._sninfo = []
for irow, row_head in enumerate(self.row_headers):
for icol, col_head in enumerate(self.column_headers):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts))
self._buffer.jump_to(self._start, _word_to_position(pointer))
data_ptr = self._buffer.read_int(4, self.endian, False)
if data_ptr:
self._sninfo.append(
Sounding(
irow,
icol,
datetime.combine(row_head.DATE, row_head.TIME),
col_head.STID,
col_head.STNM,
col_head.SLAT,
col_head.SLON,
col_head.SELV,
col_head.STAT,
col_head.COUN,
)
)
def sninfo(self):
"""Return sounding information."""
return self._sninfo
def _unpack_merged(self, sndno):
"""Unpack merged sounding data."""
soundings = []
for irow, row_head in enumerate(self.row_headers):
for icol, col_head in enumerate(self.column_headers):
if (irow, icol) not in sndno:
continue
sounding = {'STID': col_head.STID,
'STNM': col_head.STNM,
'SLAT': col_head.SLAT,
'SLON': col_head.SLON,
'SELV': col_head.SELV,
'STAT': col_head.STAT,
'COUN': col_head.COUN,
'DATE': row_head.DATE,
'TIME': row_head.TIME,
}
for iprt, part in enumerate(self.parts):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts + iprt))
self._buffer.jump_to(self._start, _word_to_position(pointer))
self.data_ptr = self._buffer.read_int(4, self.endian, False)
if not self.data_ptr:
continue
self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
self.data_header_length = self._buffer.read_int(4, self.endian, False)
data_header = self._buffer.set_mark()
self._buffer.jump_to(data_header,
_word_to_position(part.header_length + 1))
lendat = self.data_header_length - part.header_length
fmt_code = {
DataTypes.real: 'f',
DataTypes.realpack: 'i',
DataTypes.character: 's',
}.get(part.data_type)
if fmt_code is None:
raise NotImplementedError('No methods for data type {}'
.format(part.data_type))
if fmt_code == 's':
lendat *= BYTES_PER_WORD
packed_buffer = (
self._buffer.read_struct(
struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
)
)
parameters = self.parameters[iprt]
nparms = len(parameters['name'])
if part.data_type == DataTypes.realpack:
unpacked = self._unpack_real(packed_buffer, parameters, lendat)
for iprm, param in enumerate(parameters['name']):
sounding[param] = unpacked[iprm::nparms]
else:
for iprm, param in enumerate(parameters['name']):
sounding[param] = np.array(
packed_buffer[iprm::nparms], dtype=np.float32
)
soundings.append(sounding)
return soundings
def _unpack_unmerged(self, sndno):
"""Unpack unmerged sounding data."""
soundings = []
for irow, row_head in enumerate(self.row_headers):
for icol, col_head in enumerate(self.column_headers):
if (irow, icol) not in sndno:
continue
sounding = {'STID': col_head.STID,
'STNM': col_head.STNM,
'SLAT': col_head.SLAT,
'SLON': col_head.SLON,
'SELV': col_head.SELV,
'STAT': col_head.STAT,
'COUN': col_head.COUN,
'DATE': row_head.DATE,
'TIME': row_head.TIME,
}
for iprt, part in enumerate(self.parts):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts + iprt))
self._buffer.jump_to(self._start, _word_to_position(pointer))
self.data_ptr = self._buffer.read_int(4, self.endian, False)
if not self.data_ptr:
continue
self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
self.data_header_length = self._buffer.read_int(4, self.endian, False)
data_header = self._buffer.set_mark()
self._buffer.jump_to(data_header,
_word_to_position(part.header_length + 1))
lendat = self.data_header_length - part.header_length
fmt_code = {
DataTypes.real: 'f',
DataTypes.realpack: 'i',
DataTypes.character: 's',
}.get(part.data_type)
if fmt_code is None:
raise NotImplementedError('No methods for data type {}'
.format(part.data_type))
if fmt_code == 's':
lendat *= BYTES_PER_WORD
packed_buffer = (
self._buffer.read_struct(
struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
)
)
parameters = self.parameters[iprt]
nparms = len(parameters['name'])
sounding[part.name] = {}
if part.data_type == DataTypes.realpack:
unpacked = self._unpack_real(packed_buffer, parameters, lendat)
for iprm, param in enumerate(parameters['name']):
sounding[part.name][param] = unpacked[iprm::nparms]
elif part.data_type == DataTypes.character:
for iprm, param in enumerate(parameters['name']):
sounding[part.name][param] = (
self._decode_strip(packed_buffer[iprm])
)
else:
for iprm, param in enumerate(parameters['name']):
sounding[part.name][param] = (
np.array(packed_buffer[iprm::nparms], dtype=np.float32)
)
soundings.append(self._merge_sounding(sounding))
return soundings
def _merge_sounding(self, parts):
"""Merge unmerged sounding data."""
merged = {'STID': parts['STID'],
'STNM': parts['STNM'],
'SLAT': parts['SLAT'],
'SLON': parts['SLON'],
'SELV': parts['SELV'],
'STAT': parts['STAT'],
'COUN': parts['COUN'],
'DATE': parts['DATE'],
'TIME': parts['TIME'],
'PRES': [],
'HGHT': [],
'TEMP': [],
'DWPT': [],
'DRCT': [],
'SPED': [],
}
# Number of parameter levels
num_man_levels = len(parts['TTAA']['PRES']) if 'TTAA' in parts else 0
num_man_wind_levels = len(parts['PPAA']['PRES']) if 'PPAA' in parts else 0
num_trop_levels = len(parts['TRPA']['PRES']) if 'TRPA' in parts else 0
num_max_wind_levels = len(parts['MXWA']['PRES']) if 'MXWA' in parts else 0
num_sigt_levels = len(parts['TTBB']['PRES']) if 'TTBB' in parts else 0
num_sigw_levels = len(parts['PPBB']['SPED']) if 'PPBB' in parts else 0
num_above_man_levels = len(parts['TTCC']['PRES']) if 'TTCC' in parts else 0
num_above_trop_levels = len(parts['TRPC']['PRES']) if 'TRPC' in parts else 0
num_above_max_wind_levels = len(parts['MXWC']['SPED']) if 'MXWC' in parts else 0
num_above_sigt_levels = len(parts['TTDD']['PRES']) if 'TTDD' in parts else 0
num_above_sigw_levels = len(parts['PPDD']['SPED']) if 'PPDD' in parts else 0
num_above_man_wind_levels = len(parts['PPCC']['SPED']) if 'PPCC' in parts else 0
total_data = (num_man_levels
+ num_man_wind_levels
+ num_trop_levels
+ num_max_wind_levels
+ num_sigt_levels
+ num_sigw_levels
+ num_above_man_levels
+ num_above_trop_levels
+ num_above_max_wind_levels
+ num_above_sigt_levels
+ num_above_sigw_levels
+ num_above_man_wind_levels
)
if total_data == 0:
return None
# Check SIG wind vertical coordinate
# For some reason, the pressure data can get put into the
# height array. Perhaps this is just a artifact of Python,
# as GEMPAK itself just uses array indices without any
# names involved. Since the first valid pressure of the
# array will be negative in the case of pressure coordinates,
# we can check for it and place data in the appropriate array.
ppbb_is_z = True
if num_sigw_levels:
if 'PRES' in parts['PPBB']:
ppbb_is_z = False
else:
for z in parts['PPBB']['HGHT']:
if z != self.prod_desc.missing_float and z < 0:
ppbb_is_z = False
parts['PPBB']['PRES'] = parts['PPBB']['HGHT']
break
ppdd_is_z = True
if num_above_sigw_levels:
if 'PRES' in parts['PPDD']:
ppdd_is_z = False
else:
for z in parts['PPDD']['HGHT']:
if z != self.prod_desc.missing_float and z < 0:
ppdd_is_z = False
parts['PPDD']['PRES'] = parts['PPDD']['HGHT']
break
# Process surface data
if num_man_levels < 1:
merged['PRES'].append(self.prod_desc.missing_float)
merged['HGHT'].append(self.prod_desc.missing_float)
merged['TEMP'].append(self.prod_desc.missing_float)
merged['DWPT'].append(self.prod_desc.missing_float)
merged['DRCT'].append(self.prod_desc.missing_float)
merged['SPED'].append(self.prod_desc.missing_float)
else:
merged['PRES'].append(parts['TTAA']['PRES'][0])
merged['HGHT'].append(parts['TTAA']['HGHT'][0])
merged['TEMP'].append(parts['TTAA']['TEMP'][0])
merged['DWPT'].append(parts['TTAA']['DWPT'][0])
merged['DRCT'].append(parts['TTAA']['DRCT'][0])
merged['SPED'].append(parts['TTAA']['SPED'][0])
merged['HGHT'][0] = merged['SELV']
first_man_p = self.prod_desc.missing_float
if num_man_levels >= 1:
for mp, mt, mz in zip(parts['TTAA']['PRES'],
parts['TTAA']['TEMP'],
parts['TTAA']['HGHT']):
if (mp != self.prod_desc.missing_float
and mt != self.prod_desc.missing_float
and mz != self.prod_desc.missing_float):
first_man_p = mp
break
surface_p = merged['PRES'][0]
if surface_p > 1060:
surface_p = self.prod_desc.missing_float
if (surface_p == self.prod_desc.missing_float
or (surface_p < first_man_p
and surface_p != self.prod_desc.missing_float)):
merged['PRES'][0] = self.prod_desc.missing_float
merged['HGHT'][0] = self.prod_desc.missing_float
merged['TEMP'][0] = self.prod_desc.missing_float
merged['DWPT'][0] = self.prod_desc.missing_float
merged['DRCT'][0] = self.prod_desc.missing_float
merged['SPED'][0] = self.prod_desc.missing_float
if (num_sigt_levels >= 1
and parts['TTBB']['PRES'][0] != self.prod_desc.missing_float
and parts['TTBB']['TEMP'][0] != self.prod_desc.missing_float):
first_man_p = merged['PRES'][0]
first_sig_p = parts['TTBB']['PRES'][0]
if (first_man_p == self.prod_desc.missing_float
or np.isclose(first_man_p, first_sig_p)):
merged['PRES'][0] = parts['TTBB']['PRES'][0]
merged['DWPT'][0] = parts['TTBB']['DWPT'][0]
merged['TEMP'][0] = parts['TTBB']['TEMP'][0]
if num_sigw_levels >= 1:
if ppbb_is_z:
if (parts['PPBB']['HGHT'][0] == 0
and parts['PPBB']['DRCT'][0] != self.prod_desc.missing_float):
merged['DRCT'][0] = parts['PPBB']['DRCT'][0]
merged['SPED'][0] = parts['PPBB']['SPED'][0]
else:
if (parts['PPBB']['PRES'][0] != self.prod_desc.missing_float
and parts['PPBB']['DRCT'][0] != self.prod_desc.missing_float):
first_man_p = merged['PRES'][0]
first_sig_p = abs(parts['PPBB']['PRES'][0])
if (first_man_p == self.prod_desc.missing_float
or np.isclose(first_man_p, first_sig_p)):
merged['PRES'][0] = abs(parts['PPBB']['PRES'][0])
merged['DRCT'][0] = parts['PPBB']['DRCT'][0]
merged['SPED'][0] = parts['PPBB']['SPED'][0]
# Merge MAN temperature
bgl = 0
qcman = []
if num_man_levels >= 2 or num_above_man_levels >= 1:
if merged['PRES'][0] == self.prod_desc.missing_float:
plast = 2000
else:
plast = merged['PRES'][0]
if num_man_levels >= 2:
for i in range(1, num_man_levels):
if (parts['TTAA']['PRES'][i] < plast
and parts['TTAA']['PRES'][i] != self.prod_desc.missing_float
and parts['TTAA']['TEMP'][i] != self.prod_desc.missing_float
and parts['TTAA']['HGHT'][i] != self.prod_desc.missing_float):
for pname, pval in parts['TTAA'].items():
merged[pname].append(pval[i])
plast = merged['PRES'][-1]
else:
if parts['TTAA']['PRES'][i] > merged['PRES'][0]:
bgl += 1
else:
# GEMPAK ignores MAN data with missing TEMP/HGHT and does not
# interpolate for them.
if parts['TTAA']['PRES'][i] != self.prod_desc.missing_float:
qcman.append(parts['TTAA']['PRES'][i])
if num_above_man_levels >= 1:
for i in range(num_above_man_levels):
if (parts['TTCC']['PRES'][i] < plast
and parts['TTCC']['PRES'][i] != self.prod_desc.missing_float
and parts['TTCC']['TEMP'][i] != self.prod_desc.missing_float
and parts['TTCC']['HGHT'][i] != self.prod_desc.missing_float):
for pname, pval in parts['TTCC'].items():
merged[pname].append(pval[i])
plast = merged['PRES'][-1]
# Merge MAN wind
if num_man_wind_levels >= 1 and num_man_levels >= 1 and len(merged['PRES']) >= 2:
for iwind, pres in enumerate(parts['PPAA']['PRES']):
if pres in merged['PRES'][1:]:
loc = merged['PRES'].index(pres)
if merged['DRCT'][loc] == self.prod_desc.missing_float:
merged['DRCT'][loc] = parts['PPAA']['DRCT'][iwind]
merged['SPED'][loc] = parts['PPAA']['SPED'][iwind]
else:
if pres not in qcman:
size = len(merged['PRES'])
loc = size - bisect.bisect_left(merged['PRES'][1:][::-1], pres)
if loc >= size + 1:
loc = -1
merged['PRES'].insert(loc, pres)
merged['TEMP'].insert(loc, self.prod_desc.missing_float)
merged['DWPT'].insert(loc, self.prod_desc.missing_float)
merged['DRCT'].insert(loc, parts['PPAA']['DRCT'][iwind])
merged['SPED'].insert(loc, parts['PPAA']['SPED'][iwind])
merged['HGHT'].insert(loc, self.prod_desc.missing_float)
if num_above_man_wind_levels >= 1 and num_man_levels >= 1 and len(merged['PRES']) >= 2:
for iwind, pres in enumerate(parts['PPCC']['PRES']):
if pres in merged['PRES'][1:]:
loc = merged['PRES'].index(pres)
if merged['DRCT'][loc] == self.prod_desc.missing_float:
merged['DRCT'][loc] = parts['PPCC']['DRCT'][iwind]
merged['SPED'][loc] = parts['PPCC']['SPED'][iwind]
else:
if pres not in qcman:
size = len(merged['PRES'])
loc = size - bisect.bisect_left(merged['PRES'][1:][::-1], pres)
if loc >= size + 1:
loc = -1
merged['PRES'].insert(loc, pres)
merged['TEMP'].insert(loc, self.prod_desc.missing_float)
merged['DWPT'].insert(loc, self.prod_desc.missing_float)
merged['DRCT'].insert(loc, parts['PPCC']['DRCT'][iwind])
merged['SPED'].insert(loc, parts['PPCC']['SPED'][iwind])
merged['HGHT'].insert(loc, self.prod_desc.missing_float)
# Merge TROP
if num_trop_levels >= 1 or num_above_trop_levels >= 1:
if merged['PRES'][0] != self.prod_desc.missing_float:
pbot = merged['PRES'][0]
elif len(merged['PRES']) > 1:
pbot = merged['PRES'][1]
if pbot < parts['TRPA']['PRES'][1]:
pbot = 1050
else:
pbot = 1050
if num_trop_levels >= 1:
for itrp, pres in enumerate(parts['TRPA']['PRES']):
pres = abs(pres)
if (pres != self.prod_desc.missing_float
and parts['TRPA']['TEMP'][itrp] != self.prod_desc.missing_float
and pres != 0):
if pres > pbot:
continue
elif pres in merged['PRES']:
ploc = merged['PRES'].index(pres)
if merged['TEMP'][ploc] == self.prod_desc.missing_float:
merged['TEMP'][ploc] = parts['TRPA']['TEMP'][itrp]
merged['DWPT'][ploc] = parts['TRPA']['DWPT'][itrp]
if merged['DRCT'][ploc] == self.prod_desc.missing_float:
merged['DRCT'][ploc] = parts['TRPA']['DRCT'][itrp]
merged['SPED'][ploc] = parts['TRPA']['SPED'][itrp]
merged['HGHT'][ploc] = self.prod_desc.missing_float
else:
size = len(merged['PRES'])
loc = size - bisect.bisect_left(merged['PRES'][::-1], pres)
merged['PRES'].insert(loc, pres)
merged['TEMP'].insert(loc, parts['TRPA']['TEMP'][itrp])
merged['DWPT'].insert(loc, parts['TRPA']['DWPT'][itrp])
merged['DRCT'].insert(loc, parts['TRPA']['DRCT'][itrp])
merged['SPED'].insert(loc, parts['TRPA']['SPED'][itrp])
merged['HGHT'].insert(loc, self.prod_desc.missing_float)
pbot = pres
if num_above_trop_levels >= 1:
for itrp, pres in enumerate(parts['TRPC']['PRES']):
pres = abs(pres)
if (pres != self.prod_desc.missing_float
and parts['TRPC']['TEMP'][itrp] != self.prod_desc.missing_float
and pres != 0):
if pres > pbot:
continue
elif pres in merged['PRES']:
ploc = merged['PRES'].index(pres)
if merged['TEMP'][ploc] == self.prod_desc.missing_float:
merged['TEMP'][ploc] = parts['TRPC']['TEMP'][itrp]
merged['DWPT'][ploc] = parts['TRPC']['DWPT'][itrp]
if merged['DRCT'][ploc] == self.prod_desc.missing_float:
merged['DRCT'][ploc] = parts['TRPC']['DRCT'][itrp]
merged['SPED'][ploc] = parts['TRPC']['SPED'][itrp]
merged['HGHT'][ploc] = self.prod_desc.missing_float
else:
size = len(merged['PRES'])
loc = size - bisect.bisect_left(merged['PRES'][::-1], pres)
merged['PRES'].insert(loc, pres)
merged['TEMP'].insert(loc, parts['TRPC']['TEMP'][itrp])
merged['DWPT'].insert(loc, parts['TRPC']['DWPT'][itrp])
merged['DRCT'].insert(loc, parts['TRPC']['DRCT'][itrp])
merged['SPED'].insert(loc, parts['TRPC']['SPED'][itrp])
merged['HGHT'].insert(loc, self.prod_desc.missing_float)
pbot = pres
# Merge SIG temperature
if num_sigt_levels >= 1 or num_above_sigt_levels >= 1:
if merged['PRES'][0] != self.prod_desc.missing_float:
pbot = merged['PRES'][0]
elif len(merged['PRES']) > 1:
pbot = merged['PRES'][1]
if pbot < parts['TTBB']['PRES'][1]:
pbot = 1050
else:
pbot = 1050
if num_sigt_levels >= 1:
for isigt, pres in enumerate(parts['TTBB']['PRES']):
pres = abs(pres)
if (pres != self.prod_desc.missing_float
and parts['TTBB']['TEMP'][isigt] != self.prod_desc.missing_float
and pres != 0):
if pres > pbot:
continue
elif pres in merged['PRES']:
ploc = merged['PRES'].index(pres)
if merged['TEMP'][ploc] == self.prod_desc.missing_float:
merged['TEMP'][ploc] = parts['TTBB']['TEMP'][isigt]
merged['DWPT'][ploc] = parts['TTBB']['DWPT'][isigt]
else:
size = len(merged['PRES'])
loc = size - bisect.bisect_left(merged['PRES'][::-1], pres)
merged['PRES'].insert(loc, pres)
merged['TEMP'].insert(loc, parts['TTBB']['TEMP'][isigt])
merged['DWPT'].insert(loc, parts['TTBB']['DWPT'][isigt])
merged['DRCT'].insert(loc, self.prod_desc.missing_float)
merged['SPED'].insert(loc, self.prod_desc.missing_float)
merged['HGHT'].insert(loc, self.prod_desc.missing_float)
pbot = pres
if num_above_sigt_levels >= 1:
for isigt, pres in enumerate(parts['TTDD']['PRES']):
pres = abs(pres)
if (pres != self.prod_desc.missing_float
and parts['TTDD']['TEMP'][isigt] != self.prod_desc.missing_float
and pres != 0):
if pres > pbot:
continue
elif pres in merged['PRES']:
ploc = merged['PRES'].index(pres)
if merged['TEMP'][ploc] == self.prod_desc.missing_float:
merged['TEMP'][ploc] = parts['TTDD']['TEMP'][isigt]
merged['DWPT'][ploc] = parts['TTDD']['DWPT'][isigt]
else:
size = len(merged['PRES'])
loc = size - bisect.bisect_left(merged['PRES'][::-1], pres)
merged['PRES'].insert(loc, pres)
merged['TEMP'].insert(loc, parts['TTDD']['TEMP'][isigt])
merged['DWPT'].insert(loc, parts['TTDD']['DWPT'][isigt])
merged['DRCT'].insert(loc, self.prod_desc.missing_float)
merged['SPED'].insert(loc, self.prod_desc.missing_float)
merged['HGHT'].insert(loc, self.prod_desc.missing_float)
pbot = pres
# Interpolate heights
interp_moist_height(merged, self.prod_desc.missing_float)
# Merge SIG winds on pressure surfaces
if not ppbb_is_z or not ppdd_is_z:
if num_sigw_levels >= 1 or num_above_sigw_levels >= 1:
if merged['PRES'][0] != self.prod_desc.missing_float:
pbot = merged['PRES'][0]
elif len(merged['PRES']) > 1:
pbot = merged['PRES'][1]
else:
pbot = 0
if num_sigw_levels >= 1 and not ppbb_is_z:
for isigw, pres in enumerate(parts['PPBB']['PRES']):
pres = abs(pres)
if (pres != self.prod_desc.missing_float
and parts['PPBB']['DRCT'][isigw] != self.prod_desc.missing_float
and parts['PPBB']['SPED'][isigw] != self.prod_desc.missing_float
and pres != 0):
if pres > pbot:
continue
elif pres in merged['PRES']:
ploc = merged['PRES'].index(pres)
if (merged['DRCT'][ploc] == self.prod_desc.missing_float
or merged['SPED'][ploc] == self.prod_desc.missing_float):
merged['DRCT'][ploc] = parts['PPBB']['DRCT'][isigw]
merged['SPED'][ploc] = parts['PPBB']['SPED'][isigw]
else:
size = len(merged['PRES'])
loc = size - bisect.bisect_left(merged['PRES'][::-1], pres)
merged['PRES'].insert(loc, pres)
merged['DRCT'].insert(loc, parts['PPBB']['DRCT'][isigw])
merged['SPED'].insert(loc, parts['PPBB']['SPED'][isigw])
merged['TEMP'].insert(loc, self.prod_desc.missing_float)
merged['DWPT'].insert(loc, self.prod_desc.missing_float)
merged['HGHT'].insert(loc, self.prod_desc.missing_float)
pbot = pres
if num_above_sigw_levels >= 1 and not ppdd_is_z:
for isigw, pres in enumerate(parts['PPDD']['PRES']):
pres = abs(pres)
if (pres != self.prod_desc.missing_float
and parts['PPDD']['DRCT'][isigw] != self.prod_desc.missing_float
and parts['PPDD']['SPED'][isigw] != self.prod_desc.missing_float
and pres != 0):
if pres > pbot:
continue
elif pres in merged['PRES']:
ploc = merged['PRES'].index(pres)
if (merged['DRCT'][ploc] == self.prod_desc.missing_float
or merged['SPED'][ploc] == self.prod_desc.missing_float):
merged['DRCT'][ploc] = parts['PPDD']['DRCT'][isigw]
merged['SPED'][ploc] = parts['PPDD']['SPED'][isigw]
else:
size = len(merged['PRES'])
loc = size - bisect.bisect_left(merged['PRES'][::-1], pres)
merged['PRES'].insert(loc, pres)
merged['DRCT'].insert(loc, parts['PPDD']['DRCT'][isigw])
merged['SPED'].insert(loc, parts['PPDD']['SPED'][isigw])
merged['TEMP'].insert(loc, self.prod_desc.missing_float)
merged['DWPT'].insert(loc, self.prod_desc.missing_float)
merged['HGHT'].insert(loc, self.prod_desc.missing_float)
pbot = pres
# Merge max winds on pressure surfaces
if num_max_wind_levels >= 1 or num_above_max_wind_levels >= 1:
if merged['PRES'][0] != self.prod_desc.missing_float:
pbot = merged['PRES'][0]
elif len(merged['PRES']) > 1:
pbot = merged['PRES'][1]
else:
pbot = 0
if num_max_wind_levels >= 1:
for imxw, pres in enumerate(parts['MXWA']['PRES']):
pres = abs(pres)
if (pres != self.prod_desc.missing_float
and parts['MXWA']['DRCT'][imxw] != self.prod_desc.missing_float
and parts['MXWA']['SPED'][imxw] != self.prod_desc.missing_float
and pres != 0):
if pres > pbot:
continue
elif pres in merged['PRES']:
ploc = merged['PRES'].index(pres)
if (merged['DRCT'][ploc] == self.prod_desc.missing_float
or merged['SPED'][ploc] == self.prod_desc.missing_float):
merged['DRCT'][ploc] = parts['MXWA']['DRCT'][imxw]
merged['SPED'][ploc] = parts['MXWA']['SPED'][imxw]
else:
size = len(merged['PRES'])
loc = size - bisect.bisect_left(merged['PRES'][::-1], pres)
merged['PRES'].insert(loc, pres)
merged['DRCT'].insert(loc, parts['MXWA']['DRCT'][imxw])
merged['SPED'].insert(loc, parts['MXWA']['SPED'][imxw])
merged['TEMP'].insert(loc, self.prod_desc.missing_float)
merged['DWPT'].insert(loc, self.prod_desc.missing_float)
merged['HGHT'].insert(loc, self.prod_desc.missing_float)
pbot = pres
if num_above_max_wind_levels >= 1:
for imxw, pres in enumerate(parts['MXWC']['PRES']):
pres = abs(pres)
if (pres != self.prod_desc.missing_float
and parts['MXWC']['DRCT'][imxw] != self.prod_desc.missing_float
and parts['MXWC']['SPED'][imxw] != self.prod_desc.missing_float
and pres != 0):
if pres > pbot:
continue
elif pres in merged['PRES']:
ploc = merged['PRES'].index(pres)
if (merged['DRCT'][ploc] == self.prod_desc.missing_float
or merged['SPED'][ploc] == self.prod_desc.missing_float):
merged['DRCT'][ploc] = parts['MXWC']['DRCT'][imxw]
merged['SPED'][ploc] = parts['MXWC']['SPED'][imxw]
else:
size = len(merged['PRES'])
loc = size - bisect.bisect_left(merged['PRES'][::-1], pres)
merged['PRES'].insert(loc, pres)
merged['DRCT'].insert(loc, parts['MXWC']['DRCT'][imxw])
merged['SPED'].insert(loc, parts['MXWC']['SPED'][imxw])
merged['TEMP'].insert(loc, self.prod_desc.missing_float)
merged['DWPT'].insert(loc, self.prod_desc.missing_float)
merged['HGHT'].insert(loc, self.prod_desc.missing_float)
pbot = pres
# Interpolate height for SIG/MAX winds
interp_logp_height(merged, self.prod_desc.missing_float)
# Merge SIG winds on height surfaces
if ppbb_is_z or ppdd_is_z:
nsgw = num_sigw_levels if ppbb_is_z else 0
nasw = num_above_sigw_levels if ppdd_is_z else 0
if (nsgw >= 1 and (parts['PPBB']['HGHT'][0] == 0
or parts['PPBB']['HGHT'][0] == merged['HGHT'][0])):
istart = 1
else:
istart = 0
size = len(merged['HGHT'])
psfc = merged['PRES'][0]
zsfc = merged['HGHT'][0]
if (size >= 2 and psfc != self.prod_desc.missing_float
and zsfc != self.prod_desc.missing_float):
more = True
zold = merged['HGHT'][0]
znxt = merged['HGHT'][1]
ilev = 1
elif size >= 3:
more = True
zold = merged['HGHT'][1]
znxt = merged['HGHT'][2]
ilev = 2
else:
zold = self.prod_desc.missing_float
znxt = self.prod_desc.missing_float
if (zold == self.prod_desc.missing_float
or znxt == self.prod_desc.missing_float):
more = False
if istart <= nsgw:
above = False
i = istart
iend = nsgw
else:
above = True
i = 0
iend = nasw
while more and i < iend:
if not above:
hght = parts['PPBB']['HGHT'][i]
drct = parts['PPBB']['DRCT'][i]
sped = parts['PPBB']['SPED'][i]
else:
hght = parts['PPDD']['HGHT'][i]
drct = parts['PPDD']['DRCT'][i]
sped = parts['PPDD']['SPED'][i]
skip = False
if ((hght == self.prod_desc.missing_float
and drct == self.prod_desc.missing_float
and sped == self.prod_desc.missing_float)
or hght <= zold):
skip = True
elif abs(zold - hght) < 1:
skip = True
if (merged['DRCT'][ilev - 1] == self.prod_desc.missing_float
or merged['SPED'][ilev - 1] == self.prod_desc.missing_float):
merged['DRCT'][ilev - 1] = drct
merged['SPED'][ilev - 1] = sped
elif hght >= znxt:
while more and hght > znxt:
zold = znxt
ilev += 1
if ilev >= size:
more = False
else:
znxt = merged['HGHT'][ilev]
if znxt == self.prod_desc.missing_float:
more = False
if more and not skip:
if abs(znxt - hght) < 1:
if (merged['DRCT'][ilev - 1] == self.prod_desc.missing_float
or merged['SPED'][ilev - 1] == self.prod_desc.missing_float):
merged['DRCT'][ilev] = drct
merged['SPED'][ilev] = sped
else:
loc = bisect.bisect_left(merged['HGHT'], hght)
merged['HGHT'].insert(loc, hght)
merged['DRCT'].insert(loc, drct)
merged['SPED'].insert(loc, sped)
merged['PRES'].insert(loc, self.prod_desc.missing_float)
merged['TEMP'].insert(loc, self.prod_desc.missing_float)
merged['DWPT'].insert(loc, self.prod_desc.missing_float)
size += 1
ilev += 1
zold = hght
if not above and i == nsgw - 1:
above = True
i = 0
iend = nasw
else:
i += 1
# Interpolate misssing pressure with height
interp_logp_pressure(merged, self.prod_desc.missing_float)
# Interpolate missing data
interp_missing_data(merged, self.prod_desc.missing_float)
# Add below ground MAN data
if merged['PRES'][0] != self.prod_desc.missing_float and bgl > 0:
for ibgl in range(1, num_man_levels):
pres = parts['TTAA']['PRES'][ibgl]
if pres > merged['PRES'][0]:
loc = size - bisect.bisect_left(merged['PRES'][1:][::-1], pres)
merged['PRES'].insert(loc, pres)
merged['TEMP'].insert(loc, parts['TTAA']['TEMP'][ibgl])
merged['DWPT'].insert(loc, parts['TTAA']['DWPT'][ibgl])
merged['DRCT'].insert(loc, parts['TTAA']['DRCT'][ibgl])
merged['SPED'].insert(loc, parts['TTAA']['SPED'][ibgl])
merged['HGHT'].insert(loc, parts['TTAA']['HGHT'][ibgl])
size += 1
# Add text data, if it is included
if 'TXTA' in parts:
merged['TXTA'] = parts['TXTA']['TEXT']
if 'TXTB' in parts:
merged['TXTB'] = parts['TXTB']['TEXT']
if 'TXTC' in parts:
merged['TXTC'] = parts['TXTC']['TEXT']
if 'TXPB' in parts:
merged['TXPB'] = parts['TXPB']['TEXT']
return merged
def snxarray(self, station_id=None, station_number=None,
date_time=None, state=None, country=None):
"""Select soundings and output as list of xarray Datasets.
Subset the data by parameter values. The default is to not
subset and return the entire dataset.
Parameters
----------
station_id : str or array-like of str
Station ID of sounding site.
station_number : int or array-like of int
Station number of sounding site.
date_time : datetime or array-like of datetime
Valid/observed datetime of the sounding. Alternatively
can be a string with the format YYYYmmddHHMM.
state : str or array-like of str
State where sounding site is located.
country : str or array-like of str
Country where sounding site is located.
Returns
-------
list
List of xarray.Dataset objects for each sounding.
"""
if station_id is not None:
if (not isinstance(station_id, Iterable)
or isinstance(station_id, str)):
station_id = [station_id]
station_id = [c.upper() for c in station_id]
if station_number is not None:
if not isinstance(station_number, Iterable):
station_number = [station_number]
station_number = [int(sn) for sn in station_number]
if date_time is not None:
if (not isinstance(date_time, Iterable)
or isinstance(date_time, str)):
date_time = [date_time]
for i, dt in enumerate(date_time):
if isinstance(dt, str):
date_time[i] = datetime.strptime(dt, '%Y%m%d%H%M')
if (state is not None
and (not isinstance(state, Iterable)
or isinstance(state, str))):
state = [state]
state = [s.upper() for s in state]
if (country is not None
and (not isinstance(country, Iterable)
or isinstance(country, str))):
country = [country]
country = [c.upper() for c in country]
# Figure out which columns to extract from the file
matched = self._sninfo.copy()
if station_id is not None:
matched = filter(
lambda snd: snd if snd.ID in station_id else False,
matched
)
if station_number is not None:
matched = filter(
lambda snd: snd if snd.NUMBER in station_number else False,
matched
)
if date_time is not None:
matched = filter(
lambda snd: snd if snd.DATTIM in date_time else False,
matched
)
if state is not None:
matched = filter(
lambda snd: snd if snd.STATE in state else False,
matched
)
if country is not None:
matched = filter(
lambda snd: snd if snd.COUNTRY in country else False,
matched
)
matched = list(matched)
if len(matched) < 1:
raise KeyError('No stations were matched with given parameters.')
sndno = [(s.DTNO, s.SNDNO) for s in matched]
if self.merged:
data = self._unpack_merged(sndno)
else:
data = self._unpack_unmerged(sndno)
soundings = []
for snd in data:
if snd is None or 'PRES' not in snd:
continue
station_pressure = snd['PRES'][0]
radat_text = {}
attrs = {
'station_id': snd.pop('STID'),
'station_number': snd.pop('STNM'),
'lat': snd.pop('SLAT'),
'lon': snd.pop('SLON'),
'elevation': snd.pop('SELV'),
'station_pressure': station_pressure,
'state': snd.pop('STAT'),
'country': snd.pop('COUN'),
}
if 'TXTA' in snd:
radat_text['txta'] = snd.pop('TXTA')
if 'TXTB' in snd:
radat_text['txtb'] = snd.pop('TXTB')
if 'TXTC' in snd:
radat_text['txtc'] = snd.pop('TXTC')
if 'TXPB' in snd:
radat_text['txpb'] = snd.pop('TXPB')
if radat_text:
attrs['RADAT'] = radat_text
dt = datetime.combine(snd.pop('DATE'), snd.pop('TIME'))
pres = np.array(snd.pop('PRES'))
var = {}
for param, values in snd.items():
values = np.array(values)[np.newaxis, ...]
maskval = np.ma.array(values, mask=values == self.prod_desc.missing_float,
dtype=np.float32)
var[param.lower()] = (['time', 'pres'], maskval)
xrds = xr.Dataset(var,
coords={'time': np.atleast_1d(dt), 'pres': pres},
attrs=attrs)
# Sort to fix GEMPAK surface data at first level
xrds = xrds.sortby('pres', ascending=False)
soundings.append(xrds)
return soundings
class GempakSurface(GempakFile):
"""Subclass of GempakFile specific to GEMPAK surface data."""
def __init__(self, file, *args, **kwargs):
"""Instantiate GempakSurface object from file."""
super().__init__(file)
# Row Headers
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.row_headers_ptr))
self.row_headers = []
row_headers_info = self._key_types(self.row_keys)
row_headers_info.extend([(None, None)])
row_headers_fmt = NamedStruct(row_headers_info, self.prefmt, 'RowHeaders')
for _ in range(1, self.prod_desc.rows + 1):
if self._buffer.read_int(4, self.endian, False) == USED_FLAG:
self.row_headers.append(self._buffer.read_struct(row_headers_fmt))
# Column Headers
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.column_headers_ptr))
self.column_headers = []
column_headers_info = self._key_types(self.column_keys)
column_headers_info.extend([(None, None)])
column_headers_fmt = NamedStruct(column_headers_info, self.prefmt, 'ColumnHeaders')
for _ in range(1, self.prod_desc.columns + 1):
if self._buffer.read_int(4, self.endian, False) == USED_FLAG:
self.column_headers.append(self._buffer.read_struct(column_headers_fmt))
self._get_surface_type()
self._sfinfo = []
if self.surface_type == 'standard':
for irow, row_head in enumerate(self.row_headers):
for icol, col_head in enumerate(self.column_headers):
for iprt in range(len(self.parts)):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts + iprt))
self._buffer.jump_to(self._start, _word_to_position(pointer))
data_ptr = self._buffer.read_int(4, self.endian, False)
if data_ptr:
self._sfinfo.append(
Surface(
irow,
icol,
datetime.combine(row_head.DATE, row_head.TIME),
col_head.STID + col_head.STD2,
col_head.STNM,
col_head.SLAT,
col_head.SLON,
col_head.SELV,
col_head.STAT,
col_head.COUN,
)
)
elif self.surface_type == 'ship':
irow = 0
for icol, col_head in enumerate(self.column_headers):
for iprt in range(len(self.parts)):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts + iprt))
self._buffer.jump_to(self._start, _word_to_position(pointer))
data_ptr = self._buffer.read_int(4, self.endian, False)
if data_ptr:
self._sfinfo.append(
Surface(
irow,
icol,
datetime.combine(col_head.DATE, col_head.TIME),
col_head.STID + col_head.STD2,
col_head.STNM,
col_head.SLAT,
col_head.SLON,
col_head.SELV,
col_head.STAT,
col_head.COUN,
)
)
elif self.surface_type == 'climate':
for icol, col_head in enumerate(self.column_headers):
for irow, row_head in enumerate(self.row_headers):
for iprt in range(len(self.parts)):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts + iprt))
self._buffer.jump_to(self._start, _word_to_position(pointer))
data_ptr = self._buffer.read_int(4, self.endian, False)
if data_ptr:
self._sfinfo.append(
Surface(
irow,
icol,
datetime.combine(col_head.DATE, col_head.TIME),
row_head.STID + row_head.STD2,
row_head.STNM,
row_head.SLAT,
row_head.SLON,
row_head.SELV,
row_head.STAT,
row_head.COUN,
)
)
else:
raise TypeError('Unknown surface type {}'.format(self.surface_type))
def sfinfo(self):
"""Return station information."""
return self._sfinfo
def _get_surface_type(self):
"""Determine type of surface file."""
if len(self.row_headers) == 1:
self.surface_type = 'ship'
elif 'DATE' in self.row_keys:
self.surface_type = 'standard'
elif 'DATE' in self.column_keys:
self.surface_type = 'climate'
else:
raise TypeError('Unknown surface data type')
def _key_types(self, keys):
"""Determine header information from a set of keys."""
header_info = [(key, '4s', self._decode_strip) if key == 'STID'
else (key, 'i') if key == 'STNM'
else (key, 'i', lambda x: x / 100) if key == 'SLAT'
else (key, 'i', lambda x: x / 100) if key == 'SLON'
else (key, 'i') if key == 'SELV'
else (key, '4s', self._decode_strip) if key == 'STAT'
else (key, '4s', self._decode_strip) if key == 'COUN'
else (key, '4s', self._decode_strip) if key == 'STD2'
else (key, 'i', self._make_date) if key == 'DATE'
else (key, 'i', self._make_time) if key == 'TIME'
else (key, 'i')
for key in keys]
return header_info
def _unpack_climate(self, sfcno):
"""Unpack a climate surface data file."""
stations = []
for icol, col_head in enumerate(self.column_headers):
for irow, row_head in enumerate(self.row_headers):
if (irow, icol) not in sfcno:
continue
station = {'STID': row_head.STID,
'STNM': row_head.STNM,
'SLAT': row_head.SLAT,
'SLON': row_head.SLON,
'SELV': row_head.SELV,
'STAT': row_head.STAT,
'COUN': row_head.COUN,
'STD2': row_head.STD2,
'SPRI': row_head.SPRI,
'DATE': col_head.DATE,
'TIME': col_head.TIME,
}
for iprt, part in enumerate(self.parts):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts + iprt))
self._buffer.jump_to(self._start, _word_to_position(pointer))
self.data_ptr = self._buffer.read_int(4, self.endian, False)
if not self.data_ptr:
continue
self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
self.data_header_length = self._buffer.read_int(4, self.endian, False)
data_header = self._buffer.set_mark()
self._buffer.jump_to(data_header,
_word_to_position(part.header_length + 1))
lendat = self.data_header_length - part.header_length
fmt_code = {
DataTypes.real: 'f',
DataTypes.realpack: 'i',
DataTypes.character: 's',
}.get(part.data_type)
if fmt_code is None:
raise NotImplementedError('No methods for data type {}'
.format(part.data_type))
if fmt_code == 's':
lendat *= BYTES_PER_WORD
packed_buffer = (
self._buffer.read_struct(
struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
)
)
parameters = self.parameters[iprt]
if part.data_type == DataTypes.realpack:
unpacked = self._unpack_real(packed_buffer, parameters, lendat)
for iprm, param in enumerate(parameters['name']):
station[param] = unpacked[iprm]
elif part.data_type == DataTypes.character:
for iprm, param in enumerate(parameters['name']):
station[param] = self._decode_strip(packed_buffer[iprm])
else:
for iprm, param in enumerate(parameters['name']):
station[param] = np.array(
packed_buffer[iprm], dtype=np.float32
)
stations.append(station)
return stations
def _unpack_ship(self, sfcno):
"""Unpack ship (moving observation) surface data file."""
stations = []
irow = 0
for icol, col_head in enumerate(self.column_headers):
if (irow, icol) not in sfcno:
continue
station = {'STID': col_head.STID,
'STNM': col_head.STNM,
'SLAT': col_head.SLAT,
'SLON': col_head.SLON,
'SELV': col_head.SELV,
'STAT': col_head.STAT,
'COUN': col_head.COUN,
'STD2': col_head.STD2,
'SPRI': col_head.SPRI,
'DATE': col_head.DATE,
'TIME': col_head.TIME,
}
for iprt, part in enumerate(self.parts):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts + iprt))
self._buffer.jump_to(self._start, _word_to_position(pointer))
self.data_ptr = self._buffer.read_int(4, self.endian, False)
if not self.data_ptr:
continue
self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
self.data_header_length = self._buffer.read_int(4, self.endian, False)
data_header = self._buffer.set_mark()
self._buffer.jump_to(data_header,
_word_to_position(part.header_length + 1))
lendat = self.data_header_length - part.header_length
fmt_code = {
DataTypes.real: 'f',
DataTypes.realpack: 'i',
DataTypes.character: 's',
}.get(part.data_type)
if fmt_code is None:
raise NotImplementedError('No methods for data type {}'
.format(part.data_type))
if fmt_code == 's':
lendat *= BYTES_PER_WORD
packed_buffer = (
self._buffer.read_struct(
struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
)
)
parameters = self.parameters[iprt]
if part.data_type == DataTypes.realpack:
unpacked = self._unpack_real(packed_buffer, parameters, lendat)
for iprm, param in enumerate(parameters['name']):
station[param] = unpacked[iprm]
elif part.data_type == DataTypes.character:
for iprm, param in enumerate(parameters['name']):
station[param] = self._decode_strip(packed_buffer[iprm])
else:
for iprm, param in enumerate(parameters['name']):
station[param] = np.array(
packed_buffer[iprm], dtype=np.float32
)
stations.append(station)
return stations
def _unpack_standard(self, sfcno):
"""Unpack a standard surface data file."""
stations = []
for irow, row_head in enumerate(self.row_headers):
for icol, col_head in enumerate(self.column_headers):
if (irow, icol) not in sfcno:
continue
station = {'STID': col_head.STID,
'STNM': col_head.STNM,
'SLAT': col_head.SLAT,
'SLON': col_head.SLON,
'SELV': col_head.SELV,
'STAT': col_head.STAT,
'COUN': col_head.COUN,
'STD2': col_head.STD2,
'SPRI': col_head.SPRI,
'DATE': row_head.DATE,
'TIME': row_head.TIME,
}
for iprt, part in enumerate(self.parts):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts + iprt))
self._buffer.jump_to(self._start, _word_to_position(pointer))
self.data_ptr = self._buffer.read_int(4, self.endian, False)
if not self.data_ptr:
continue
self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
self.data_header_length = self._buffer.read_int(4, self.endian, False)
data_header = self._buffer.set_mark()
# if part.header_length == 1:
# ihhmm = self._buffer.read_int(4, self.endian, False)
# if part.header_length == 2:
# nreps = self._buffer.read_int(4, self.endian, False)
# ihhmm = self._buffer.read_int(4, self.endian, False)
self._buffer.jump_to(data_header,
_word_to_position(part.header_length + 1))
lendat = self.data_header_length - part.header_length
fmt_code = {
DataTypes.real: 'f',
DataTypes.realpack: 'i',
DataTypes.character: 's',
}.get(part.data_type)
if fmt_code is None:
raise NotImplementedError('No methods for data type {}'
.format(part.data_type))
if fmt_code == 's':
lendat *= BYTES_PER_WORD
packed_buffer = (
self._buffer.read_struct(
struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
)
)
parameters = self.parameters[iprt]
if part.data_type == DataTypes.realpack:
unpacked = self._unpack_real(packed_buffer, parameters, lendat)
for iprm, param in enumerate(parameters['name']):
station[param] = unpacked[iprm]
elif part.data_type == DataTypes.character:
for iprm, param in enumerate(parameters['name']):
station[param] = self._decode_strip(packed_buffer[iprm])
else:
for iprm, param in enumerate(parameters['name']):
station[param] = packed_buffer[iprm]
stations.append(station)
return stations
def nearest_time(self, date_time, station_id=None, station_number=None):
"""Get nearest observation to given time for selected stations.
Parameters
----------
date_time : datetime or array-like of datetime
Valid/observed datetime of the surface station. Alternatively
object or a string with the format YYYYmmddHHMM.
station_id : str or array-like of str
Station ID of the surface station.
station_number : int or array-like of int
Station number of the surface station.
Returns
-------
list
List of dicts/JSONs for each surface station.
Notes
-----
One of either station_id or station_number must be used. If both
are present, station_id will take precedence.
"""
if isinstance(date_time, str):
date_time = datetime.strptime(date_time, '%Y%m%d%H%M')
if station_id is None and station_number is None:
raise ValueError('Must have either station_id or station_number')
if station_id is not None and station_number is not None:
station_number = None
if (station_id is not None
and (not isinstance(station_id, Iterable)
or isinstance(station_id, str))):
station_id = [station_id]
station_id = [c.upper() for c in station_id]
if station_number is not None and not isinstance(station_number, Iterable):
station_number = [station_number]
station_number = [int(sn) for sn in station_number]
time_matched = []
if station_id:
for stn in station_id:
matched = self.sfjson(station_id=stn)
nearest = min(
matched,
key=lambda d: abs(d['properties']['date_time'] - date_time)
)
time_matched.append(nearest)
if station_number:
for stn in station_id:
matched = self.sfjson(station_number=stn)
nearest = min(
matched,
key=lambda d: abs(d['properties']['date_time'] - date_time)
)
time_matched.append(nearest)
return time_matched
def sfjson(self, station_id=None, station_number=None,
date_time=None, state=None, country=None):
"""Select surface stations and output as list of JSON objects.
Subset the data by parameter values. The default is to not
subset and return the entire dataset.
Parameters
----------
station_id : str or array-like of str
Station ID of the surface station.
station_number : int or array-like of int
Station number of the surface station.
date_time : datetime or array-like of datetime
Valid/observed datetime of the surface station. Alternatively
object or a string with the format YYYYmmddHHMM.
state : str or array-like of str
State where surface station is located.
country : str or array-like of str
Country where surface station is located.
Returns
-------
list
List of dicts/JSONs for each surface station.
"""
if (station_id is not None
and (not isinstance(station_id, Iterable)
or isinstance(station_id, str))):
station_id = [station_id]
station_id = [c.upper() for c in station_id]
if station_number is not None and not isinstance(station_number, Iterable):
station_number = [station_number]
station_number = [int(sn) for sn in station_number]
if date_time is not None:
if (not isinstance(date_time, Iterable)
or isinstance(date_time, str)):
date_time = [date_time]
for i, dt in enumerate(date_time):
if isinstance(dt, str):
date_time[i] = datetime.strptime(dt, '%Y%m%d%H%M')
if (state is not None
and (not isinstance(state, Iterable)
or isinstance(state, str))):
state = [state]
state = [s.upper() for s in state]
if (country is not None
and (not isinstance(country, Iterable)
or isinstance(country, str))):
country = [country]
country = [c.upper() for c in country]
# Figure out which columns to extract from the file
matched = self._sfinfo.copy()
if station_id is not None:
matched = filter(
lambda sfc: sfc if sfc.ID in station_id else False,
matched
)
if station_number is not None:
matched = filter(
lambda sfc: sfc if sfc.NUMBER in station_number else False,
matched
)
if date_time is not None:
matched = filter(
lambda sfc: sfc if sfc.DATTIM in date_time else False,
matched
)
if state is not None:
matched = filter(
lambda sfc: sfc if sfc.STATE in state else False,
matched
)
if country is not None:
matched = filter(
lambda sfc: sfc if sfc.COUNTRY in country else False,
matched
)
matched = list(matched)
if len(matched) < 1:
raise KeyError('No stations were matched with given parameters.')
sfcno = [(s.ROW, s.COL) for s in matched]
if self.surface_type == 'standard':
data = self._unpack_standard(sfcno)
elif self.surface_type == 'ship':
data = self._unpack_ship(sfcno)
elif self.surface_type == 'climate':
data = self._unpack_climate(sfcno)
stnarr = []
for stn in data:
if stn:
stnobj = {
'properties': {
'date_time': datetime.combine(stn.pop('DATE'),
stn.pop('TIME')),
'station_id': stn.pop('STID') + stn.pop('STD2'),
'station_number': stn.pop('STNM'),
'longitude': stn.pop('SLON'),
'latitude': stn.pop('SLAT'),
'elevation': stn.pop('SELV'),
'state': stn.pop('STAT'),
'country': stn.pop('COUN'),
'priority': stn.pop('SPRI'),
},
'values': {name.lower(): ob for name, ob in stn.items()}
}
stnarr.append(stnobj)
return stnarr
| [
"logging.getLogger",
"ctypes.c_int32",
"pyproj.CRS.from_dict",
"math.cos",
"numpy.array",
"datetime.timedelta",
"numpy.repeat",
"numpy.linspace",
"numpy.meshgrid",
"datetime.datetime.combine",
"collections.namedtuple",
"numpy.ones",
"numpy.ma.array",
"struct.pack",
"struct.Struct",
"bi... | [((707, 734), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (724, 734), False, 'import logging\n'), ((2529, 2630), 'collections.namedtuple', 'namedtuple', (['"""Grid"""', "['GRIDNO', 'TYPE', 'DATTIM1', 'DATTIM2', 'PARM', 'LEVEL1', 'LEVEL2', 'COORD']"], {}), "('Grid', ['GRIDNO', 'TYPE', 'DATTIM1', 'DATTIM2', 'PARM',\n 'LEVEL1', 'LEVEL2', 'COORD'])\n", (2539, 2630), False, 'from collections import namedtuple\n'), ((2674, 2787), 'collections.namedtuple', 'namedtuple', (['"""Sounding"""', "['DTNO', 'SNDNO', 'DATTIM', 'ID', 'NUMBER', 'LAT', 'LON', 'ELEV', 'STATE',\n 'COUNTRY']"], {}), "('Sounding', ['DTNO', 'SNDNO', 'DATTIM', 'ID', 'NUMBER', 'LAT',\n 'LON', 'ELEV', 'STATE', 'COUNTRY'])\n", (2684, 2787), False, 'from collections import namedtuple\n'), ((2838, 2947), 'collections.namedtuple', 'namedtuple', (['"""Surface"""', "['ROW', 'COL', 'DATTIM', 'ID', 'NUMBER', 'LAT', 'LON', 'ELEV', 'STATE',\n 'COUNTRY']"], {}), "('Surface', ['ROW', 'COL', 'DATTIM', 'ID', 'NUMBER', 'LAT', 'LON',\n 'ELEV', 'STATE', 'COUNTRY'])\n", (2848, 2947), False, 'from collections import namedtuple\n'), ((26284, 26305), 'pyproj.Proj', 'pyproj.Proj', (['self.crs'], {}), '(self.crs)\n', (26295, 26305), False, 'import pyproj\n'), ((26629, 26677), 'numpy.linspace', 'np.linspace', (['llx', 'urx', 'self.kx'], {'dtype': 'np.float32'}), '(llx, urx, self.kx, dtype=np.float32)\n', (26640, 26677), True, 'import numpy as np\n'), ((26695, 26743), 'numpy.linspace', 'np.linspace', (['lly', 'ury', 'self.ky'], {'dtype': 'np.float32'}), '(lly, ury, self.ky, dtype=np.float32)\n', (26706, 26743), True, 'import numpy as np\n'), ((26761, 26800), 'numpy.meshgrid', 'np.meshgrid', (['self.x', 'self.y'], {'copy': '(False)'}), '(self.x, self.y, copy=False)\n', (26772, 26800), True, 'import numpy as np\n'), ((13595, 13615), 'struct.pack', 'struct.pack', (['"""@i"""', '(1)'], {}), "('@i', 1)\n", (13606, 13615), False, 'import struct\n'), ((16984, 17025), 'numpy.ones', 'np.ones', (['(npack * nparms)'], {'dtype': 'np.float32'}), '(npack * nparms, dtype=np.float32)\n', (16991, 17025), True, 'import numpy as np\n'), ((22965, 23072), 'pyproj.CRS.from_dict', 'pyproj.CRS.from_dict', (["{'proj': proj, 'lat_0': lat_0, 'lon_0': lon_0, 'ellps': ellps, 'R':\n earth_radius}"], {}), "({'proj': proj, 'lat_0': lat_0, 'lon_0': lon_0, 'ellps':\n ellps, 'R': earth_radius})\n", (22985, 23072), False, 'import pyproj\n'), ((106476, 106518), 'datetime.datetime.strptime', 'datetime.strptime', (['date_time', '"""%Y%m%d%H%M"""'], {}), "(date_time, '%Y%m%d%H%M')\n", (106493, 106518), False, 'from datetime import datetime, timedelta\n'), ((14505, 14549), 'datetime.datetime.strptime', 'datetime.strptime', (['f"""{dattim:06d}"""', '"""%y%m%d"""'], {}), "(f'{dattim:06d}', '%y%m%d')\n", (14522, 14549), False, 'from datetime import datetime, timedelta\n'), ((15046, 15085), 'datetime.timedelta', 'timedelta', ([], {'hours': 'hours', 'minutes': 'minutes'}), '(hours=hours, minutes=minutes)\n', (15055, 15085), False, 'from datetime import datetime, timedelta\n'), ((15779, 15805), 'ctypes.c_int32', 'ctypes.c_int32', (['(i << shift)'], {}), '(i << shift)\n', (15793, 15805), False, 'import ctypes\n'), ((16577, 16610), 'datetime.datetime.strptime', 'datetime.strptime', (['string', '"""%H%M"""'], {}), "(string, '%H%M')\n", (16594, 16610), False, 'from datetime import datetime, timedelta\n'), ((27381, 27426), 'numpy.zeros', 'np.zeros', (['(self.ky * self.kx)'], {'dtype': 'np.float32'}), '(self.ky * self.kx, dtype=np.float32)\n', (27389, 27426), True, 'import numpy as np\n'), ((57736, 57772), 'numpy.isclose', 'np.isclose', (['first_man_p', 'first_sig_p'], {}), '(first_man_p, first_sig_p)\n', (57746, 57772), True, 'import numpy as np\n'), ((87795, 87882), 'numpy.ma.array', 'np.ma.array', (['values'], {'mask': '(values == self.prod_desc.missing_float)', 'dtype': 'np.float32'}), '(values, mask=values == self.prod_desc.missing_float, dtype=np.\n float32)\n', (87806, 87882), True, 'import numpy as np\n'), ((7925, 7981), 'numpy.repeat', 'np.repeat', (["('4s', 'i', 'i')", 'self.prod_desc.file_headers'], {}), "(('4s', 'i', 'i'), self.prod_desc.file_headers)\n", (7934, 7981), True, 'import numpy as np\n'), ((15578, 15601), 'struct.pack', 'struct.pack', (['"""i"""', 'coord'], {}), "('i', coord)\n", (15589, 15601), False, 'import struct\n'), ((23689, 23796), 'pyproj.CRS.from_dict', 'pyproj.CRS.from_dict', (["{'proj': proj, 'lat_0': lat_0, 'lon_0': lon_0, 'ellps': ellps, 'R':\n earth_radius}"], {}), "({'proj': proj, 'lat_0': lat_0, 'lon_0': lon_0, 'ellps':\n ellps, 'R': earth_radius})\n", (23709, 23796), False, 'import pyproj\n'), ((24424, 24544), 'pyproj.CRS.from_dict', 'pyproj.CRS.from_dict', (["{'proj': proj, 'lat_0': avglat, 'lon_0': lon_0, 'k_0': k_0, 'ellps': ellps,\n 'R': earth_radius}"], {}), "({'proj': proj, 'lat_0': avglat, 'lon_0': lon_0, 'k_0':\n k_0, 'ellps': ellps, 'R': earth_radius})\n", (24444, 24544), False, 'import pyproj\n'), ((25000, 25123), 'pyproj.CRS.from_dict', 'pyproj.CRS.from_dict', (["{'proj': proj, 'lon_0': lon_0, 'lat_1': lat_1, 'lat_2': lat_2, 'ellps':\n ellps, 'R': earth_radius}"], {}), "({'proj': proj, 'lon_0': lon_0, 'lat_1': lat_1, 'lat_2':\n lat_2, 'ellps': ellps, 'R': earth_radius})\n", (25020, 25123), False, 'import pyproj\n'), ((27331, 27356), 'struct.Struct', 'struct.Struct', (['buffer_fmt'], {}), '(buffer_fmt)\n', (27344, 27356), False, 'import struct\n'), ((29504, 29550), 'numpy.zeros', 'np.zeros', (['(self.ky, self.kx)'], {'dtype': 'np.float32'}), '((self.ky, self.kx), dtype=np.float32)\n', (29512, 29550), True, 'import numpy as np\n'), ((36473, 36508), 'datetime.datetime.strptime', 'datetime.strptime', (['dt', '"""%Y%m%d%H%M"""'], {}), "(dt, '%Y%m%d%H%M')\n", (36490, 36508), False, 'from datetime import datetime, timedelta\n'), ((37143, 37178), 'datetime.datetime.strptime', 'datetime.strptime', (['dt', '"""%Y%m%d%H%M"""'], {}), "(dt, '%Y%m%d%H%M')\n", (37160, 37178), False, 'from datetime import datetime, timedelta\n'), ((84786, 84821), 'datetime.datetime.strptime', 'datetime.strptime', (['dt', '"""%Y%m%d%H%M"""'], {}), "(dt, '%Y%m%d%H%M')\n", (84803, 84821), False, 'from datetime import datetime, timedelta\n'), ((87735, 87751), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (87743, 87751), True, 'import numpy as np\n'), ((100940, 100989), 'struct.Struct', 'struct.Struct', (['f"""{self.prefmt}{lendat}{fmt_code}"""'], {}), "(f'{self.prefmt}{lendat}{fmt_code}')\n", (100953, 100989), False, 'import struct\n'), ((109645, 109680), 'datetime.datetime.strptime', 'datetime.strptime', (['dt', '"""%Y%m%d%H%M"""'], {}), "(dt, '%Y%m%d%H%M')\n", (109662, 109680), False, 'from datetime import datetime, timedelta\n'), ((9369, 9401), 'struct.Struct', 'struct.Struct', (["(self.prefmt + 'f')"], {}), "(self.prefmt + 'f')\n", (9382, 9401), False, 'import struct\n'), ((29451, 29483), 'struct.Struct', 'struct.Struct', (['packed_buffer_fmt'], {}), '(packed_buffer_fmt)\n', (29464, 29483), False, 'import struct\n'), ((32509, 32560), 'numpy.zeros', 'np.zeros', (['self.grid_meta_int.kxky'], {'dtype': 'np.float32'}), '(self.grid_meta_int.kxky, dtype=np.float32)\n', (32517, 32560), True, 'import numpy as np\n'), ((40500, 40578), 'numpy.ma.array', 'np.ma.array', (['data'], {'mask': '(data == self.prod_desc.missing_float)', 'dtype': 'np.float32'}), '(data, mask=data == self.prod_desc.missing_float, dtype=np.float32)\n', (40511, 40578), True, 'import numpy as np\n'), ((47388, 47437), 'struct.Struct', 'struct.Struct', (['f"""{self.prefmt}{lendat}{fmt_code}"""'], {}), "(f'{self.prefmt}{lendat}{fmt_code}')\n", (47401, 47437), False, 'import struct\n'), ((50605, 50654), 'struct.Struct', 'struct.Struct', (['f"""{self.prefmt}{lendat}{fmt_code}"""'], {}), "(f'{self.prefmt}{lendat}{fmt_code}')\n", (50618, 50654), False, 'import struct\n'), ((58667, 58703), 'numpy.isclose', 'np.isclose', (['first_man_p', 'first_sig_p'], {}), '(first_man_p, first_sig_p)\n', (58677, 58703), True, 'import numpy as np\n'), ((80865, 80905), 'bisect.bisect_left', 'bisect.bisect_left', (["merged['HGHT']", 'hght'], {}), "(merged['HGHT'], hght)\n", (80883, 80905), False, 'import bisect\n'), ((82133, 82183), 'bisect.bisect_left', 'bisect.bisect_left', (["merged['PRES'][1:][::-1]", 'pres'], {}), "(merged['PRES'][1:][::-1], pres)\n", (82151, 82183), False, 'import bisect\n'), ((88063, 88080), 'numpy.atleast_1d', 'np.atleast_1d', (['dt'], {}), '(dt)\n', (88076, 88080), True, 'import numpy as np\n'), ((97678, 97727), 'struct.Struct', 'struct.Struct', (['f"""{self.prefmt}{lendat}{fmt_code}"""'], {}), "(f'{self.prefmt}{lendat}{fmt_code}')\n", (97691, 97727), False, 'import struct\n'), ((104674, 104723), 'struct.Struct', 'struct.Struct', (['f"""{self.prefmt}{lendat}{fmt_code}"""'], {}), "(f'{self.prefmt}{lendat}{fmt_code}')\n", (104687, 104723), False, 'import struct\n'), ((24170, 24186), 'math.cos', 'math.cos', (['avglat'], {}), '(avglat)\n', (24178, 24186), False, 'import math\n'), ((25573, 25662), 'pyproj.CRS.from_dict', 'pyproj.CRS.from_dict', (["{'proj': proj, 'zone': zone, 'ellps': ellps, 'R': earth_radius}"], {}), "({'proj': proj, 'zone': zone, 'ellps': ellps, 'R':\n earth_radius})\n", (25593, 25662), False, 'import pyproj\n'), ((25851, 25942), 'pyproj.CRS.from_dict', 'pyproj.CRS.from_dict', (["{'proj': proj, 'lon_0': lon_0, 'ellps': ellps, 'R': earth_radius}"], {}), "({'proj': proj, 'lon_0': lon_0, 'ellps': ellps, 'R':\n earth_radius})\n", (25871, 25942), False, 'import pyproj\n'), ((32614, 32646), 'struct.Struct', 'struct.Struct', (['packed_buffer_fmt'], {}), '(packed_buffer_fmt)\n', (32627, 32646), False, 'import struct\n'), ((44517, 44563), 'datetime.datetime.combine', 'datetime.combine', (['row_head.DATE', 'row_head.TIME'], {}), '(row_head.DATE, row_head.TIME)\n', (44533, 44563), False, 'from datetime import datetime, timedelta\n'), ((48034, 48089), 'numpy.array', 'np.array', (['packed_buffer[iprm::nparms]'], {'dtype': 'np.float32'}), '(packed_buffer[iprm::nparms], dtype=np.float32)\n', (48042, 48089), True, 'import numpy as np\n'), ((61382, 61432), 'bisect.bisect_left', 'bisect.bisect_left', (["merged['PRES'][1:][::-1]", 'pres'], {}), "(merged['PRES'][1:][::-1], pres)\n", (61400, 61432), False, 'import bisect\n'), ((62616, 62666), 'bisect.bisect_left', 'bisect.bisect_left', (["merged['PRES'][1:][::-1]", 'pres'], {}), "(merged['PRES'][1:][::-1], pres)\n", (62634, 62666), False, 'import bisect\n'), ((101694, 101741), 'numpy.array', 'np.array', (['packed_buffer[iprm]'], {'dtype': 'np.float32'}), '(packed_buffer[iprm], dtype=np.float32)\n', (101702, 101741), True, 'import numpy as np\n'), ((51651, 51706), 'numpy.array', 'np.array', (['packed_buffer[iprm::nparms]'], {'dtype': 'np.float32'}), '(packed_buffer[iprm::nparms], dtype=np.float32)\n', (51659, 51706), True, 'import numpy as np\n'), ((64765, 64811), 'bisect.bisect_left', 'bisect.bisect_left', (["merged['PRES'][::-1]", 'pres'], {}), "(merged['PRES'][::-1], pres)\n", (64783, 64811), False, 'import bisect\n'), ((66454, 66500), 'bisect.bisect_left', 'bisect.bisect_left', (["merged['PRES'][::-1]", 'pres'], {}), "(merged['PRES'][::-1], pres)\n", (66472, 66500), False, 'import bisect\n'), ((68242, 68288), 'bisect.bisect_left', 'bisect.bisect_left', (["merged['PRES'][::-1]", 'pres'], {}), "(merged['PRES'][::-1], pres)\n", (68260, 68288), False, 'import bisect\n'), ((69624, 69670), 'bisect.bisect_left', 'bisect.bisect_left', (["merged['PRES'][::-1]", 'pres'], {}), "(merged['PRES'][::-1], pres)\n", (69642, 69670), False, 'import bisect\n'), ((75364, 75410), 'bisect.bisect_left', 'bisect.bisect_left', (["merged['PRES'][::-1]", 'pres'], {}), "(merged['PRES'][::-1], pres)\n", (75382, 75410), False, 'import bisect\n'), ((76912, 76958), 'bisect.bisect_left', 'bisect.bisect_left', (["merged['PRES'][::-1]", 'pres'], {}), "(merged['PRES'][::-1], pres)\n", (76930, 76958), False, 'import bisect\n'), ((98484, 98531), 'numpy.array', 'np.array', (['packed_buffer[iprm]'], {'dtype': 'np.float32'}), '(packed_buffer[iprm], dtype=np.float32)\n', (98492, 98531), True, 'import numpy as np\n'), ((71771, 71817), 'bisect.bisect_left', 'bisect.bisect_left', (["merged['PRES'][::-1]", 'pres'], {}), "(merged['PRES'][::-1], pres)\n", (71789, 71817), False, 'import bisect\n'), ((73440, 73486), 'bisect.bisect_left', 'bisect.bisect_left', (["merged['PRES'][::-1]", 'pres'], {}), "(merged['PRES'][::-1], pres)\n", (73458, 73486), False, 'import bisect\n'), ((90622, 90668), 'datetime.datetime.combine', 'datetime.combine', (['row_head.DATE', 'row_head.TIME'], {}), '(row_head.DATE, row_head.TIME)\n', (90638, 90668), False, 'from datetime import datetime, timedelta\n'), ((91891, 91937), 'datetime.datetime.combine', 'datetime.combine', (['col_head.DATE', 'col_head.TIME'], {}), '(col_head.DATE, col_head.TIME)\n', (91907, 91937), False, 'from datetime import datetime, timedelta\n'), ((93221, 93267), 'datetime.datetime.combine', 'datetime.combine', (['col_head.DATE', 'col_head.TIME'], {}), '(col_head.DATE, col_head.TIME)\n', (93237, 93267), False, 'from datetime import datetime, timedelta\n')] |
#!/usr/bin/python3
# coding=utf-8
from PIL import Image
import numpy
import sys
import os
gray_num = list(
"$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\|()1{}[]?-_+~<>i!lI;:,\"^`'. ")
def main(filepath: str, width: int=80, height: int=40):
img = Image.open(filepath) # 读取图片
img = img.resize((width, height), Image.ANTIALIAS)
img_gray = img.convert("L") # 转换为灰度图
img_array = numpy.array(img_gray, "f") # 灰度矩阵
numlen = len(gray_num)
for line in img_array:
char_line = []
for pf in line:
n = ((pf/255) * (numlen-1))
index = int(n)
char_line.append(gray_num[index])
print("".join(char_line))
if __name__ == "__main__":
if len(sys.argv) > 1 and os.path.exists(sys.argv[1]):
path = sys.argv[1]
terminal_size = os.get_terminal_size()
width = terminal_size.columns
if len(sys.argv) >= 3:
width = int(sys.argv[2])
height = terminal_size.lines
if len(sys.argv) >= 4:
height = int(sys.argv[3])
main(filepath=path, width=width, height=height)
else:
print("ERROR: 缺少参数\n")
print(f"完整命令: {sys.argv[0]} filepath [width] [height]")
print("filepath: 图片文件路径,支持多数图片格式")
print("width: 输出宽度 (可选,终端尺寸)")
print("height: 输出高度 (可选,终端尺寸)")
print()
| [
"numpy.array",
"PIL.Image.open",
"os.path.exists",
"os.get_terminal_size"
] | [((256, 276), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (266, 276), False, 'from PIL import Image\n'), ((398, 424), 'numpy.array', 'numpy.array', (['img_gray', '"""f"""'], {}), "(img_gray, 'f')\n", (409, 424), False, 'import numpy\n'), ((740, 767), 'os.path.exists', 'os.path.exists', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (754, 767), False, 'import os\n'), ((820, 842), 'os.get_terminal_size', 'os.get_terminal_size', ([], {}), '()\n', (840, 842), False, 'import os\n')] |
# The followings are the DenseNets module, the training was actually taken place in the `run_dense_net.py` file.
# Sorry, I really like Pycharm (and to be fair, Pytorch is so much an easier language to debug)
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
from models import DenseNet
from data_providers.utils import get_data_provider_by_name
import tensorflow as tf
import numpy as np
import json
import pandas as pd
from tqdm import tqdm
import random
import time
from matplotlib import pyplot as plt
# Visualizations will be shown in the notebook.
# % matplotlib inline
from matplotlib import gridspec
# Load pickled data
import pickle
training_file = './data/train.p'
validation_file = './data/valid.p'
testing_file = './data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test_origin = test['features'], test['labels']
train_params_cifar = {
'batch_size': 64,
'n_epochs': 500,
'initial_learning_rate': 0.05,
'reduce_lr_epoch_1': 50, # epochs * 0.5
'reduce_lr_epoch_2': 75, # epochs * 0.75
'validation_set': True,
'validation_split': None, # None or float
'shuffle': 'every_epoch', # None, once_prior_train, every_epoch
'normalization': 'by_chanels', # None, divide_256, divide_255, by_chanels
'use_YUV': True,
'use_Y': True, # use only Y channel
'data_augmentation': 0, # [0, 1]
}
# We save this model params.json from the trained model
with open('model_params.json', 'r') as fp:
model_params = json.load(fp)
# some default params dataset/architecture related
train_params = train_params_cifar
print("Params:")
for k, v in model_params.items():
print("\t%s: %s" % (k, v))
print("Train params:")
for k, v in train_params.items():
print("\t%s: %s" % (k, v))
model_params['use_Y'] = True
print("Prepare training data...")
data_provider = get_data_provider_by_name(model_params['dataset'], train_params)
print("Initialize the model..")
tf.reset_default_graph()
model = DenseNet(data_provider=data_provider, **model_params)
print("Loading trained model")
model.load_model()
print("Data provider test images: ", data_provider.test.num_examples)
print("Testing...")
loss, accuracy = model.test(data_provider.test, batch_size=30)
print("mean cross_entropy: %f, mean accuracy: %f" % (loss, accuracy))
total_prediction, y_test = model.predictions_test(data_provider.test, batch_size=100)
# Plotting incorrect examples
incorrectlist = []
for i in range(len(total_prediction)):
#if not correctness(y_test[i],total_prediction[i]):
for j in range(len(y_test[i])):
if not np.argmax(y_test[i][j]) == np.argmax(total_prediction[i][j]):
correct_classId = np.argmax(y_test[i][j])
predict_classId = np.argmax(total_prediction[i][j])
incorrectlist.append({'index':i*100+j, 'correct':correct_classId, 'predicted':predict_classId})
incorrectmatrix = {}
modeCount = 0
# get the label description from the CSV file.
classLabelList = pd.read_csv('signnames.csv')
for i in range(len(incorrectlist)):
predicted = incorrectlist[i]['predicted']
correct = incorrectlist[i]['correct']
index = incorrectlist[i]['index']
bucket = str(correct) + "+" + str(predicted)
incorrectinstance = incorrectmatrix.get(bucket, {'count': 0, 'samples': []})
# add to the count
count = incorrectinstance['count'] + 1
# add to samples of this correct to predicted condition
samples = incorrectinstance['samples']
samples.append(index)
# put back in the list
incorrectmatrix[bucket] = {'count': count, 'correct': correct, 'predicted': predicted, 'samples': samples}
# update most common error
if count > modeCount:
modeCount = count
modeBucket = bucket
# get the list of buckets and sort them
def compare_bucket_count(bucket):
return modeCount - incorrectmatrix[bucket]['count']
sortedBuckets = list(incorrectmatrix.keys())
sortedBuckets.sort(key=compare_bucket_count)
# get the unique number of original picture sizes and the min and max last instance
n_buckets = len(sortedBuckets)
# print the stats
print("\nNumber of unique buckets in incorrect set: ", n_buckets, "\n")
print("Mode Bucket: ", modeBucket, "with count: ", modeCount)
print("\nTop Twenty Distribution of buckets with incorrect predicted test dataset labels:")
for n in range(20):
bucket = sortedBuckets[n]
cclassId = incorrectmatrix[bucket]['correct']
pclassId = incorrectmatrix[bucket]['predicted']
count = incorrectmatrix[bucket]['count']
cdescription = classLabelList[classLabelList.ClassId == cclassId].SignName.to_string(header=False, index=False)
pdescription = classLabelList[classLabelList.ClassId == pclassId].SignName.to_string(header=False, index=False)
print(
"incorrect set count: {0:4d} CClassId: {1:02d} Description: {2}\n PClassId: {3:02d} Description: {4}".format(
count, cclassId, cdescription, pclassId, pdescription))
def draw_sample_incorrectmatrix(datasettxt, sortedBuckets, incorrectmatix, dataset, cmap=None):
n_samples = 11
n_labels = 10
# size of each sample
fig = plt.figure(figsize=(n_samples * 1.8, n_labels))
w_ratios = [1 for n in range(n_samples)]
w_ratios[:0] = [int(n_samples * 0.8)]
h_ratios = [1 for n in range(n_labels)]
# gridspec
time.sleep(1) # wait for 1 second for the previous print to appear!
grid = gridspec.GridSpec(n_labels, n_samples + 1, wspace=0.0, hspace=0.0, width_ratios=w_ratios,
height_ratios=h_ratios)
labelset_pbar = tqdm(range(n_labels), desc=datasettxt, unit='labels')
for a in labelset_pbar:
cclassId = incorrectmatrix[sortedBuckets[n_labels - a - 1]]['correct']
pclassId = incorrectmatrix[sortedBuckets[n_labels - a - 1]]['predicted']
cdescription = classLabelList[classLabelList.ClassId == cclassId].SignName.to_string(header=False, index=False)
pdescription = classLabelList[classLabelList.ClassId == pclassId].SignName.to_string(header=False, index=False)
count = incorrectmatrix[sortedBuckets[n_labels - a - 1]]['count']
for b in range(n_samples):
i = a * (n_samples + 1) + b
ax = plt.Subplot(fig, grid[i])
if b == 0:
ax.annotate(
'CClassId %d (%d): %s\nPClassId %d: %s' % (cclassId, count, cdescription, pclassId, pdescription),
xy=(0, 0), xytext=(0.0, 0.3))
ax.set_xticks([])
ax.set_yticks([])
fig.add_subplot(ax)
else:
random_i = random.choice(incorrectmatrix[sortedBuckets[n_labels - a - 1]]['samples'])
image = dataset[random_i]
if cmap == None:
ax.imshow(image)
else:
# yuv = cv2.split(image)
# ax.imshow(yuv[0], cmap=cmap)
ax.imshow(image, cmap=cmap)
ax.set_xticks([])
ax.set_yticks([])
fig.add_subplot(ax)
# We also plot the GT image on the right
i = a * (n_samples + 1) + n_samples
ax = plt.Subplot(fig, grid[i])
img_idx = np.where(y_train == pclassId)
random_i = random.choice(img_idx[0])
image = X_train[random_i]
if cmap == None:
ax.imshow(image)
else:
ax.imshow(image, cmap=cmap)
ax.set_xticks([])
ax.set_yticks([])
fig.add_subplot(ax)
# hide the borders\
if a == (n_labels - 1):
all_axes = fig.get_axes()
for ax in all_axes:
for sp in ax.spines.values():
sp.set_visible(False)
plt.show()
draw_sample_incorrectmatrix('Test set 10 ten incorrect sample images using RGB as input, right most is the predicted image in the training set', sortedBuckets, incorrectmatrix, test['features'])
| [
"random.choice",
"tensorflow.reset_default_graph",
"pandas.read_csv",
"models.DenseNet",
"numpy.where",
"pickle.load",
"numpy.argmax",
"time.sleep",
"matplotlib.pyplot.Subplot",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"data_providers.utils.get_data_provider_by_name",
"jso... | [((2107, 2171), 'data_providers.utils.get_data_provider_by_name', 'get_data_provider_by_name', (["model_params['dataset']", 'train_params'], {}), "(model_params['dataset'], train_params)\n", (2132, 2171), False, 'from data_providers.utils import get_data_provider_by_name\n'), ((2204, 2228), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2226, 2228), True, 'import tensorflow as tf\n'), ((2237, 2290), 'models.DenseNet', 'DenseNet', ([], {'data_provider': 'data_provider'}), '(data_provider=data_provider, **model_params)\n', (2245, 2290), False, 'from models import DenseNet\n'), ((3237, 3265), 'pandas.read_csv', 'pd.read_csv', (['"""signnames.csv"""'], {}), "('signnames.csv')\n", (3248, 3265), True, 'import pandas as pd\n'), ((798, 812), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (809, 812), False, 'import pickle\n'), ((869, 883), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (880, 883), False, 'import pickle\n'), ((936, 950), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (947, 950), False, 'import pickle\n'), ((1756, 1769), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1765, 1769), False, 'import json\n'), ((5417, 5464), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(n_samples * 1.8, n_labels)'}), '(figsize=(n_samples * 1.8, n_labels))\n', (5427, 5464), True, 'from matplotlib import pyplot as plt\n'), ((5616, 5629), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5626, 5629), False, 'import time\n'), ((5696, 5813), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['n_labels', '(n_samples + 1)'], {'wspace': '(0.0)', 'hspace': '(0.0)', 'width_ratios': 'w_ratios', 'height_ratios': 'h_ratios'}), '(n_labels, n_samples + 1, wspace=0.0, hspace=0.0,\n width_ratios=w_ratios, height_ratios=h_ratios)\n', (5713, 5813), False, 'from matplotlib import gridspec\n'), ((8033, 8043), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8041, 8043), True, 'from matplotlib import pyplot as plt\n'), ((7467, 7492), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'grid[i]'], {}), '(fig, grid[i])\n', (7478, 7492), True, 'from matplotlib import pyplot as plt\n'), ((7512, 7541), 'numpy.where', 'np.where', (['(y_train == pclassId)'], {}), '(y_train == pclassId)\n', (7520, 7541), True, 'import numpy as np\n'), ((7561, 7586), 'random.choice', 'random.choice', (['img_idx[0]'], {}), '(img_idx[0])\n', (7574, 7586), False, 'import random\n'), ((2940, 2963), 'numpy.argmax', 'np.argmax', (['y_test[i][j]'], {}), '(y_test[i][j])\n', (2949, 2963), True, 'import numpy as np\n'), ((2994, 3027), 'numpy.argmax', 'np.argmax', (['total_prediction[i][j]'], {}), '(total_prediction[i][j])\n', (3003, 3027), True, 'import numpy as np\n'), ((6507, 6532), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'grid[i]'], {}), '(fig, grid[i])\n', (6518, 6532), True, 'from matplotlib import pyplot as plt\n'), ((2848, 2871), 'numpy.argmax', 'np.argmax', (['y_test[i][j]'], {}), '(y_test[i][j])\n', (2857, 2871), True, 'import numpy as np\n'), ((2875, 2908), 'numpy.argmax', 'np.argmax', (['total_prediction[i][j]'], {}), '(total_prediction[i][j])\n', (2884, 2908), True, 'import numpy as np\n'), ((6903, 6977), 'random.choice', 'random.choice', (["incorrectmatrix[sortedBuckets[n_labels - a - 1]]['samples']"], {}), "(incorrectmatrix[sortedBuckets[n_labels - a - 1]]['samples'])\n", (6916, 6977), False, 'import random\n')] |
from collections import deque
from gym import spaces
import numpy as np
from pfrl.env import VectorEnv
from pfrl.wrappers.atari_wrappers import LazyFrames
class VectorEnvWrapper(VectorEnv):
"""VectorEnv analog to gym.Wrapper."""
def __init__(self, env):
self.env = env
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
def __getattr__(self, name):
if name.startswith("_"):
raise AttributeError(
"attempted to get missing private attribute '{}'".format(name)
)
return getattr(self.env, name)
def step(self, action):
return self.env.step(action)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def render(self, mode="human", **kwargs):
return self.env.render(mode, **kwargs)
def close(self):
return self.env.close()
def seed(self, seed=None):
return self.env.seed(seed)
def compute_reward(self, achieved_goal, desired_goal, info):
return self.env.compute_reward(achieved_goal, desired_goal, info)
def __str__(self):
return "<{}{}>".format(type(self).__name__, self.env)
def __repr__(self):
return str(self)
@property
def unwrapped(self):
return self.env.unwrapped
class VectorFrameStack(VectorEnvWrapper):
"""VectorEnv analog to pfrl.wrappers.atari_wrappers.FrameStack.
The original `pfrl.wrappers.atari_wrappers.FrameStack` does not work
properly with `pfrl.envs.MultiprocessVectorEnv` because LazyFrames
becomes not lazy when passed between processes, unnecessarily increasing
memory usage. To avoid the issue, use this wrapper instead of `FrameStack`
so that LazyFrames are not passed between processes.
Args:
env (VectorEnv): Env to wrap.
k (int): How many frames to stack.
stack_axis (int): Axis along which frames are concatenated.
"""
def __init__(self, env, k, stack_axis=0):
"""Stack k last frames."""
VectorEnvWrapper.__init__(self, env)
self.k = k
self.stack_axis = stack_axis
self.frames = [deque([], maxlen=k) for _ in range(env.num_envs)]
orig_obs_space = env.observation_space
assert isinstance(orig_obs_space, spaces.Box)
low = np.repeat(orig_obs_space.low, k, axis=self.stack_axis)
high = np.repeat(orig_obs_space.high, k, axis=self.stack_axis)
self.observation_space = spaces.Box(
low=low, high=high, dtype=orig_obs_space.dtype
)
def reset(self, mask=None):
batch_ob = self.env.reset(mask=mask)
if mask is None:
mask = np.zeros(self.env.num_envs)
for m, frames, ob in zip(mask, self.frames, batch_ob):
if not m:
for _ in range(self.k):
frames.append(ob)
return self._get_ob()
def step(self, action):
batch_ob, reward, done, info = self.env.step(action)
for frames, ob in zip(self.frames, batch_ob):
frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.env.num_envs
assert len(self.frames[0]) == self.k
return [
LazyFrames(list(frames), stack_axis=self.stack_axis)
for frames in self.frames
]
| [
"collections.deque",
"numpy.repeat",
"numpy.zeros",
"gym.spaces.Box"
] | [((2345, 2399), 'numpy.repeat', 'np.repeat', (['orig_obs_space.low', 'k'], {'axis': 'self.stack_axis'}), '(orig_obs_space.low, k, axis=self.stack_axis)\n', (2354, 2399), True, 'import numpy as np\n'), ((2415, 2470), 'numpy.repeat', 'np.repeat', (['orig_obs_space.high', 'k'], {'axis': 'self.stack_axis'}), '(orig_obs_space.high, k, axis=self.stack_axis)\n', (2424, 2470), True, 'import numpy as np\n'), ((2504, 2562), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'low', 'high': 'high', 'dtype': 'orig_obs_space.dtype'}), '(low=low, high=high, dtype=orig_obs_space.dtype)\n', (2514, 2562), False, 'from gym import spaces\n'), ((2180, 2199), 'collections.deque', 'deque', (['[]'], {'maxlen': 'k'}), '([], maxlen=k)\n', (2185, 2199), False, 'from collections import deque\n'), ((2707, 2734), 'numpy.zeros', 'np.zeros', (['self.env.num_envs'], {}), '(self.env.num_envs)\n', (2715, 2734), True, 'import numpy as np\n')] |
import pytest
import numpy as np
from skimage.draw import polygon
from spatial_biofilm_sorting_package.positional_measures import (
create_position_maps
)
# create a square of 4x4 in center of 8x8 image
rr, cc = polygon([2, 2, 6, 6], [2, 6, 6, 2])
test_img = np.zeros((8, 8), dtype=bool)
test_img[rr, cc] = True
test_shape = test_img.shape
def test_shapes_and_len():
flat, img, info = create_position_maps(test_img)
assert img.ndim == 3
assert flat.shape[1] == len(info)
assert flat.shape[1] == img.shape[2]
assert test_shape[0] == img.shape[0] and test_shape[1] == img.shape[1]
assert flat.shape[0] == test_img.sum()
def test_square():
flat, img, info = create_position_maps(test_img)
for i in range(len(info)):
assert flat[:, i].max() == img[..., i].max()
assert img.min() == 0.
assert img[..., 0].max() == 2.
assert img[..., 1].max() == np.sqrt((3.5-2.)**2. + (3.5-2.)**2.)
| [
"numpy.zeros",
"spatial_biofilm_sorting_package.positional_measures.create_position_maps",
"numpy.sqrt",
"skimage.draw.polygon"
] | [((218, 253), 'skimage.draw.polygon', 'polygon', (['[2, 2, 6, 6]', '[2, 6, 6, 2]'], {}), '([2, 2, 6, 6], [2, 6, 6, 2])\n', (225, 253), False, 'from skimage.draw import polygon\n'), ((265, 293), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {'dtype': 'bool'}), '((8, 8), dtype=bool)\n', (273, 293), True, 'import numpy as np\n'), ((397, 427), 'spatial_biofilm_sorting_package.positional_measures.create_position_maps', 'create_position_maps', (['test_img'], {}), '(test_img)\n', (417, 427), False, 'from spatial_biofilm_sorting_package.positional_measures import create_position_maps\n'), ((693, 723), 'spatial_biofilm_sorting_package.positional_measures.create_position_maps', 'create_position_maps', (['test_img'], {}), '(test_img)\n', (713, 723), False, 'from spatial_biofilm_sorting_package.positional_measures import create_position_maps\n'), ((906, 954), 'numpy.sqrt', 'np.sqrt', (['((3.5 - 2.0) ** 2.0 + (3.5 - 2.0) ** 2.0)'], {}), '((3.5 - 2.0) ** 2.0 + (3.5 - 2.0) ** 2.0)\n', (913, 954), True, 'import numpy as np\n')] |
import os
import sys
from pprint import pprint
from altair.vegalite.v4.schema.core import UtcSingleTimeUnit
import ccxt
#import ccxt.async_support as ccxt
from pyti.exponential_moving_average import exponential_moving_average as ema
import pandas as pd
import datetime
import time
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from IPython.display import clear_output
import numpy as np
import datetime as dt
import pytz
# import mplcursors
import streamlit as st
# from compute2d import compute_2d_histogram
import numpy as np
import pandas as pd
import altair as at
from copy import copy
import plotly.graph_objects as go
# from paracoords import create_paracoords
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
import plotly
import plotly.graph_objs as go
import warnings
warnings.filterwarnings('ignore')
# root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# sys.path.append(root + '/python')
# print('CCXT Version:', ccxt.__version__)
def get_last_n_kline_closes(n=50,interval='1h',symbol='BTCUSDT',exchange=None):
if exchange is None:
print('Exchange not initiated')
return None
# exchange = ccxt.binance({
# 'apiKey': g_api_key,
# 'secret': g_secret_key,
# 'enableRateLimit': True,
# 'options': {
# 'defaultType': 'future',
# },
# 'hedgeMode':True
# })
# # symbol = 'BTC/USDT'
# market = exchange.load_markets()
closes = [[dt.datetime.utcfromtimestamp(float(elem[0]) / 1000.),elem[4]] for elem in exchange.fapiPublic_get_klines({'symbol':symbol,'interval':interval})][-n:-1]
dates = [elem[0] for elem in closes]
values = [float(elem[1]) for elem in closes]
df = pd.DataFrame([(elem[0],elem[1]) for elem in zip(dates,values)],columns=['datetime','closes'])
return df
def generate_signal(candles=50,interval='1h',symbol='BTCUSDT',strat='ema_cross_over_under',strat_params={'fast_ema':10,'slow_ema':40},exchange=None):
if exchange is None:
return 'Exchange not Initiated'
allowed_strats = ['ema_diff_peak_trough','ema_cross_over_under','ema_oscillator_peak_trough']
if strat not in allowed_strats:
print('INVALID STRATEGY')
return "NONE"
if strat == 'ema_oscillator_peak_trough':
'''under development'''
return "NONE"
if strat == 'ema_diff_peak_trough':
candles = strat_params['slow_ema'] + 10
current_input=get_last_n_kline_closes(symbol=symbol,n=candles,interval=interval,exchange=exchange)
min_input_length = np.max([float(strat_params['fast_ema']),float(strat_params['slow_ema'])])
if len(list(current_input['closes'].values))<min_input_length:
return "INPUT HAS TOO FEW ELEMENTS"
closes = current_input['closes'].astype(float)
# closes = closes[:-1]
# closes['close'] = closes['close'].astype(float)
ema_diff = pd.DataFrame(ema(closes.tolist(),strat_params['fast_ema']) - ema(closes.tolist(),strat_params['slow_ema']),columns=['ema_diff'])
p = strat_params['slow_ema']+1
closes = closes[p:].reset_index(drop=True)
ema_diff = ema_diff[p:].reset_index(drop=True)
last = ema_diff.values[-1]
second_last = ema_diff.values[-2]
third_last = ema_diff.values[-3]
# short if local peak
if last < second_last and third_last < second_last:
return 'SHORT'
# long if local trough
if last > second_last and third_last > second_last:
return 'LONG'
return "NONE"
if strat == 'ema_cross_over_under':
candles = strat_params['slow_ema'] + 10
current_input=get_last_n_kline_closes(symbol=symbol,n=candles,interval=interval,exchange=exchange)
min_input_length = np.max([float(strat_params['fast_ema']),float(strat_params['slow_ema'])])
if len(list(current_input['closes'].values))<min_input_length:
return "INPUT HAS TOO FEW ELEMENTS"
closes = current_input['closes'].astype(float)
# closes = closes[:-1]
# closes['close'] = closes['close'].astype(float)
ema_diff = pd.DataFrame(ema(closes.tolist(),strat_params['fast_ema']) - ema(closes.tolist(),strat_params['slow_ema']),columns=['ema_diff'])
p = strat_params['slow_ema']+1
closes = closes[p:].reset_index(drop=True)
ema_diff = ema_diff[p:].reset_index(drop=True)
last = ema_diff.values[-1]
second_last = ema_diff.values[-2]
third_last = ema_diff.values[-3]
# long if diff cross over 0
if last > 0 and second_last < 0:
return "LONG"
# short if diff cross under 0
if last < 0 and second_last > 0:
return "SHORT"
return "NONE"
def get_open_positions(mode='live',exchange=None):
if exchange is None:
return 'Exchange Not Initiated'
allowed_modes = ['live','paper']
if mode not in allowed_modes:
return "INVALID MODE"
if mode == 'live':
# exchange = ccxt.binance({
# 'apiKey': g_api_key,
# 'secret': g_secret_key,
# 'enableRateLimit': True,
# 'options': {
# 'defaultType': 'future',
# },
# 'hedgeMode':True
# })
# markets = exchange.load_markets()
#exchange.verbose=True
# market = exchange.market(symbol)
positions = [elem for elem in exchange.fapiPrivate_get_positionrisk() if float(elem['positionAmt'])!=0]
if len(positions)==0:
return 0
return positions
if mode == 'paper':
paper_trades = pd.read_csv('paper_trades.csv',index_col=0)
if paper_trades.position_type.iloc[-1] == '-':
return 0
return paper_trades.iloc[-1]
def get_balance(mode='live',asset='USDT',exchange=None):
if exchange is None:
return 'Exchange not initiated'
allowed_modes = ['paper','live']
if mode not in allowed_modes:
print("INVALID MODE")
return None
# exchange = ccxt.binance({
# 'apiKey': g_api_key,
# 'secret': g_secret_key,
# 'enableRateLimit': True,
# 'options': {
# 'defaultType': 'future',
# },
# 'hedgeMode':True
# })
# markets = exchange.load_markets()
if mode == 'live':
live_balance = str(float([float(elem['balance']) for elem in exchange.fapiPrivate_get_balance() if elem['asset']==asset][0]))
unrealized_pnl = str(sum([float(elem['unRealizedProfit']) for elem in exchange.fapiPrivateV2_get_positionrisk() if float(elem['positionAmt']) >0]))
unrealized_pnl_percent = str(float(unrealized_pnl)/float(live_balance))
balance = {'wallet_balance': live_balance,
'unrealized_pnl_percent': unrealized_pnl_percent}
return balance
if mode == 'paper':
paper_trades = pd.read_csv('paper_trades.csv',index_col=0)
if paper_trades.paper_equity.iloc[-1] == '-':
paper_balance = paper_trades.paper_equity.iloc[-2]
else:
paper_balance = paper_trades.paper_equity.iloc[-1]
if paper_trades.entry_price.iloc[-1] == '-':
entry = None
else:
entry = paper_trades.entry_price.iloc[-1]
if paper_trades.position_type.iloc[-1] == 'LONG':
position = 1
if paper_trades.position_type.iloc[-1] == 'SHORT':
position = -1
else:
position = 0
if entry is not None and position != 0:
if paper_trades.exit_price.iloc[-1] == '-':
symbol = paper_trades.market_name.iloc[-1]
last_price = float(exchange.fapiPublic_get_ticker_price({'symbol':symbol})['price'])
pnl = (last_price-float(entry))/float(entry)*100*float(position)
else:
pnl = 0
else:
pnl = 0
balance = {'wallet_balance':paper_balance,'unrealized_pnl_percent':pnl}
return balance
def close_all_open_positions(mode='live',exchange=None):
if exchange is None:
return 'Exchange not Initiated'
allowed_modes = ['paper','live']
if mode not in allowed_modes:
return "INVALID MODE"
if mode == 'live':
# exchange = ccxt.binance({
# 'apiKey': g_api_key,
# 'secret': g_secret_key,
# 'enableRateLimit': True,
# 'options': {
# 'defaultType': 'future',
# },
# 'hedgeMode':True
# })
# markets = exchange.load_markets()
#exchange.verbose=True
# market = exchange.market(symbol)
open_positions = get_open_positions(mode=mode,exchange=exchange)
if open_positions == 0:
return None
if np.sign(float(open_positions[0]['positionAmt'])) == -1.0:
opp_side = "BUY"
if np.sign(float(open_positions[0]['positionAmt'])) == 1.0:
opp_side = "SELL"
baseqty= abs(float(open_positions[0]['positionAmt']))
symbol = open_positions[0]['symbol']
positionSide = open_positions[0]['positionSide']
order = exchange.fapiPrivatePostOrder({'symbol':symbol, 'type':"MARKET", 'side':opp_side,'positionSide':positionSide ,'quantity':baseqty})
return order
if mode == 'paper':
paper_trades = pd.read_csv('paper_trades.csv',index_col=0)
if paper_trades.position_type.iloc[-1] == '-':
return None
if paper_trades.exit_price.iloc[-1] != '-':
return None
# exchange = ccxt.binance({
# 'apiKey': g_api_key,
# 'secret': g_secret_key,
# 'enableRateLimit': True,
# 'options': {
# 'defaultType': 'future',
# },
# 'hedgeMode':True
# })
# markets = exchange.load_markets()
symbol = paper_trades.market_name.iloc[-1]
entry_price = paper_trades.entry_price.iloc[-1]
leverage= paper_trades.leverage.iloc[-1]
leverage = int(leverage)
if paper_trades.position_type.iloc[-1] == 'LONG':
position = 1
exit_price = 0.999*float(exchange.fapiPublic_get_ticker_price({'symbol':symbol})['price'])
exit_time = datetime.datetime.utcnow()
if paper_trades.position_type.iloc[-1] == 'SHORT':
position = -1
exit_price = 1.001*float(exchange.fapiPublic_get_ticker_price({'symbol':symbol})['price'])
exit_time = datetime.datetime.utcnow()
trade_pnl_pct = float(position)*float(leverage)*(float(exit_price)-float(entry_price))/float(entry_price)*100
balance = float(paper_trades.paper_equity.iloc[-2])*(1+trade_pnl_pct/100)
paper_trades.exit_time.iloc[-1] = exit_time
paper_trades.exit_price.iloc[-1] = exit_price
paper_trades.trade_pnl_pct.iloc[-1] = trade_pnl_pct
paper_trades.paper_equity.iloc[-1] = balance
paper_trades.to_csv('paper_trades.csv')
trade = paper_trades.iloc[-1]
return trade
def open_market_order(mode='live',balance=1,symbol='BTCUSDT',leverage="5",side="BUY",hedge_mode="BOTH",exchange=None):
if exchange is None:
return 'Exchange not initiated'
allowed_modes = ['paper','live']
if mode not in allowed_modes:
return "INVALID MODE"
if mode == 'live':
closed_position = close_all_open_positions(mode=mode,exchange=exchange)
quoteqty = float([elem for elem in exchange.fapiPrivate_get_balance({'asset':"USDT"}) if elem['asset']=='USDT'][0]['balance']) * balance
price = float(exchange.fapiPublic_get_ticker_price({'symbol':symbol})['price'])
baseqty = "{:.3f}".format(quoteqty*float(leverage)/price)
baseqty = float(baseqty)-float([elem for elem in exchange.fapiPublic_get_exchangeinfo()['symbols'] if elem['symbol']==symbol][0]['filters'][2]['minQty'])
baseqty = "{:.3f}".format(baseqty)
baseqty = str(baseqty)
lev_req = exchange.fapiPrivate_post_leverage({'symbol':symbol,'leverage':leverage})
order = exchange.fapiPrivatePostOrder({'symbol':symbol, 'type':"MARKET", 'side':side,'positionSide':hedge_mode ,'quantity':baseqty})
return order,closed_position
if mode == 'paper':
closed_position = close_all_open_positions(mode=mode,exchange=exchange)
paper_trades = pd.read_csv('paper_trades.csv',index_col=0)
if side == 'BUY':
position_type = 'LONG'
entry_price = 1.001*float(exchange.fapiPublic_get_ticker_price({'symbol':symbol})['price'])
entry_time = datetime.datetime.utcnow()
if side == 'SELL':
position_type = 'SHORT'
entry_price = 0.999*float(exchange.fapiPublic_get_ticker_price({'symbol':symbol})['price'])
entry_time = datetime.datetime.utcnow()
trade = pd.DataFrame([[symbol,position_type,entry_time,'-', entry_price,'-',leverage,'-','-']],columns=['market_name','position_type','entry_time','exit_time','entry_price','exit_price','leverage','trade_pnl_pct','paper_equity'])
paper_trades = paper_trades.append(trade,ignore_index=True)
paper_trades.to_csv('paper_trades.csv')
return paper_trades.iloc[-1],closed_position
# def get_strat_performance(strat='ema_cross_over_under',leverage=1,strat_params={'fast_ema':4,'slow_ema':20},interval='4h',symbol='BTCUSDT',candles=50,input=None,exchange=None):
# if exchange is None:
# return 'Exchange not initiated'
# allowed_strats = ['ema_cross_over_under','ema_diff_peak_trough']
# if strat not in allowed_strats:
# print("INVALID STRATEGY")
# return None
# if input == None:
# current_input=get_last_n_kline_closes(symbol=symbol,n=candles,interval=interval,exchange=exchange)
# else:
# current_input = input
# closes = pd.DataFrame(current_input,columns=['close'])
# closes = closes[:-1]
# closes['close'] = closes['close'].astype(float)
# ema_diff = pd.DataFrame(ema(closes['close'].tolist(),strat_params['fast_ema']) - ema(closes['close'].tolist(),strat_params['slow_ema']),columns=['ema_diff'])
# p = strat_params['slow_ema']+1
# closes = closes[p:].reset_index(drop=True)
# ema_diff = ema_diff[p:].reset_index(drop=True)
# if strat == 'ema_cross_over_under':
# signal = [0]+[1 if float(ema_diff.loc[index]) > 0 and float(ema_diff.loc[index-1]) < 0 else -1 if float(ema_diff.loc[index]) < 0 and float(ema_diff.loc[index-1]) > 0 else 0 for index in ema_diff.index[1:]]
# if strat == 'ema_diff_peak_trough':
# signal = [0,0]+ [-1 if float(ema_diff.loc[index]) < float(ema_diff.loc[index-1]) and float(ema_diff.loc[index-1]) > float(ema_diff.loc[index-2]) else 1 if float(ema_diff.loc[index]) > float(ema_diff.loc[index-1]) and float(ema_diff.loc[index-1]) < float(ema_diff.loc[index-2]) else 0 for index in ema_diff.index[2:]]
# trades = list()
# for idx in range(len(signal)):
# if signal[idx] != 0:
# trades.append([closes.loc[idx],signal[idx]])
# result = list()
# for idx in range(len(trades)):
# if idx > 0:
# position = trades[idx-1][1] * leverage
# performance = position * ((trades[idx][0]['close'] - trades[idx-1][0]['close']) / trades[idx-1][0]['close'])*100
# trade = [position,performance]
# result.append(trade)
# equity_curve = list()
# principal = 1
# for elem in result:
# principal = principal * (1 + elem[1]/100)
# # print(principal)
# equity_curve.append(principal)
# pd.DataFrame(equity_curve).plot()
# trade_pnl = list()
# principal = 1
# for elem in result:
# pnl=elem[1]
# # print(principal)
# trade_pnl.append(pnl)
# pd.DataFrame(trade_pnl).plot(kind='bar')
def get_strat_price_ti_plot(strat='ema_cross_over_under',strat_params={'fast_ema':4,'slow_ema':20},symbol='DEFIUSDT',leverage=1,decay=0.995,interval='1d',candles=50,exchange=None,animate=False,data=None):
if exchange is None:
return 'Exchange not initiated'
allowed_strats = ['ema_cross_over_under','ema_diff_peak_trough','ema_oscillator_peak_trough']
if strat not in allowed_strats:
print("INVALID STRATEGY")
return None
if strat == 'ema_oscillator_peak_trough':
'''in devlopment'''
# current_input=list(get_last_n_kline_closes(symbol=symbol,n=candles,interval=interval,exchange=exchange)['closes'].values)
# closes = pd.DataFrame(current_input,columns=['close'])
# # closes = closes[:-1]
# closes['close'] = closes['close'].astype(float)
# ema_oscillator = list()
return None
if data is None:
current_input=get_last_n_kline_closes(symbol=symbol,n=candles,interval=interval,exchange=exchange)
if data == 'complete':
current_input = pd.read_parquet('C:\\Users\\ZankarSS\\Downloads\\BTC-USDT.parquet')['close'].astype(float).values
current_input = pd.read_csv('Binance_BTCUSDT_d.csv').iloc[::-1][['date','close']]
current_input['datetime'] = [pd.Timestamp(elem) for elem in current_input['date'].values]
current_input['closes'] = [np.float64(elem) for elem in current_input['close'].values]
del current_input['date']
del current_input['close']
current_input.reset_index(drop=True)
# dates = True
if strat == 'ema_cross_over_under':
min_input_length = np.max([float(strat_params['fast_ema']),float(strat_params['slow_ema'])])
if len(list(current_input['closes'].values))<min_input_length:
return "INPUT HAS TOO FEW ELEMENTS"
closes = current_input['closes'].astype(float)
datetimes = current_input['datetime']
# closes = closes[:-1]
# closes['close'] = closes['close'].astype(float)
indicator = pd.DataFrame(ema(closes.tolist(),strat_params['fast_ema']) - ema(closes.tolist(),strat_params['slow_ema']),columns=['ema_diff'])
p = strat_params['slow_ema']+1
closes = closes[p:].reset_index(drop=True)
indicator = indicator[p:].reset_index(drop=True)
datetimes = datetimes[p:].reset_index(drop=True)
signal = [0] + [1 if float(indicator.loc[index]) > 0 and float(indicator.loc[index-1]) < 0 else -1 if float(indicator.loc[index]) < 0 and float(indicator.loc[index-1]) > 0 else 0 for index in indicator.index[1:]]
if strat == 'ema_diff_peak_trough':
# current_input=get_last_n_kline_closes(symbol=symbol,n=candles,interval=interval,exchange=exchange)
min_input_length = np.max([float(strat_params['fast_ema']),float(strat_params['slow_ema'])])
if len(list(current_input['closes'].values))<min_input_length:
return "INPUT HAS TOO FEW ELEMENTS"
closes = current_input['closes'].astype(float)
datetimes = current_input['datetime']
# closes = closes[:-1]
# closes['close'] = closes['close'].astype(float)
indicator = pd.DataFrame(ema(closes.tolist(),strat_params['fast_ema']) - ema(closes.tolist(),strat_params['slow_ema']),columns=['ema_diff'])
p = strat_params['slow_ema']+1
closes = closes[p:].reset_index(drop=True)
indicator = indicator[p:].reset_index(drop=True)
datetimes = datetimes[p:].reset_index(drop=True)
signal = [0,0]+ [-1 if float(indicator.loc[index]) < float(indicator.loc[index-1]) and float(indicator.loc[index-1]) > float(indicator.loc[index-2]) else 1 if float(indicator.loc[index]) > float(indicator.loc[index-1]) and float(indicator.loc[index-1]) < float(indicator.loc[index-2]) else 0 for index in indicator.index[2:]]
navs = list()
current_position = 0
current_nav = 1
current_position_entry = 0
cumulative_nav = 1
for idx in indicator.index[2:]:
# if idx == 0 or idx == 1:
# navs.append(current_nav)
# continue
if current_position == 1:
current_position_entry = current_position_entry * float(1/float(decay))
current_nav = (float(closes[idx]) / float(current_position_entry)) * float(cumulative_nav)
navs.append(current_nav)
if current_position == -1:
current_position_entry = current_position_entry * float(decay)
current_nav = (1 + ((float(current_position_entry) - float(closes[idx])) / float(current_position_entry))) * float(cumulative_nav)
navs.append(current_nav)
if current_position == 0:
navs.append(current_nav)
if signal[idx] != current_position and signal[idx] != 0:
current_position = signal[idx]
current_position_entry = closes[idx]
cumulative_nav = current_nav
navs = [1,1] + navs
# for elem in zip(closes.values,signal):
# if elem[1] == 0:
# if last_position == 0:
# last_nav =
# navs.append(last_nav)
# if last_position != 0:
# last_nav =
# navs.append(last_nav)
# if elem[1] == 1:
# last_position =1
# last_nav =
# navs.append(last_nav)
# if elem[1] == -1:
# last_position = -1
# last_nav =
# navs.append(last_nav)
dynamic_closes = list()
dynamic_signal = list()
dynamic_indicator = list()
dynamic_dates = list()
dynamic_nav = list()
assert len(closes) == len(signal) == len(indicator) == len(datetimes) == len(navs)
for elem in zip(closes.values,signal,indicator.values,datetimes.values,navs):
dynamic_closes.append(elem[0])
dynamic_signal.append(elem[1])
dynamic_indicator.append(elem[2])
dynamic_dates.append(elem[3])
dynamic_nav.append(elem[4])
clear_output(wait=True)
if animate is False and elem != [elem for elem in zip(closes.values,signal,indicator.values,datetimes.values,navs)][-1]:
continue
fig, (ax1,ax2,ax3,ax4) = plt.subplots(nrows=4, sharex=True, subplot_kw=dict(frameon=True),figsize=(20,20)) # frameon=False removes frames
# x = range(len(dynamic_signal))
# plt.subplots_adjust(hspace=.0)
ax1.grid()
ax1.plot(dynamic_dates, dynamic_closes, color='green',linewidth=2)
for i in range(len(dynamic_signal)):
if dynamic_signal[i] == 1:
ax1.axvline(pd.DataFrame(dynamic_dates).iloc[pd.DataFrame(dynamic_dates).index.values[i]],color='g')
if dynamic_signal[i] == -1:
ax1.axvline(pd.DataFrame(dynamic_dates).iloc[pd.DataFrame(dynamic_dates).index.values[i]],color='r')
# closes.plot()
ax1.axhline(dynamic_closes[-1],color='k')
ax1.legend([str(symbol)+': '+ str(float(dynamic_closes[-1]))] )
# ax1.set_xlabel('Days')
ax1.set_ylabel('Price')
ax2.grid()
ax2.plot(dynamic_dates, dynamic_indicator, color='lightgreen', linestyle='--',linewidth=2)
# indicator.plot()
for i in range(len(dynamic_signal)):
if dynamic_signal[i] == 1:
ax2.axvline(pd.DataFrame(dynamic_dates).iloc[pd.DataFrame(dynamic_dates).index.values[i]],color='g')
if dynamic_signal[i] == -1:
ax2.axvline(pd.DataFrame(dynamic_dates).iloc[pd.DataFrame(dynamic_dates).index.values[i]],color='r')
ax2.axhline(0,color='k',linestyle='--')
ax2.legend([float(dynamic_indicator[-1])])
ax2.set_ylabel('Trend Indicator')
ax3.grid()
ax3.plot(dynamic_dates, dynamic_nav, color='darkmagenta',linewidth=2)
# leg_strat = ax3.legend(strat_plot,'Strategy: '+ str(dynamic_nav[-1]))
# bh = [dynamic_closes[idx]/dynamic_closes[0] for idx in range(len(dynamic_closes))]
# buy_hold_plot = ax3.plot(dynamic_dates, bh, color='y',linewidth=2)
# leg_bh = ax3.legend(buy_hold_plot,'Strategy: '+ str(bh[-1]))
for i in range(len(dynamic_signal)):
if dynamic_signal[i] == 1:
ax3.axvline(pd.DataFrame(dynamic_dates).iloc[pd.DataFrame(dynamic_dates).index.values[i]],color='g')
if dynamic_signal[i] == -1:
ax3.axvline(pd.DataFrame(dynamic_dates).iloc[pd.DataFrame(dynamic_dates).index.values[i]],color='r')
# ax3.axhline(dynamic_nav[-1],color='b')
# ax3.axhline(1,color='k',linestyle='--')
ax3.legend([round(float(dynamic_nav[-1]),2)])
ax3.set_ylabel('Strategy ROI')
ax4.grid()
bh = [dynamic_closes[idx]/dynamic_closes[0] for idx in range(len(dynamic_closes))]
ax4.plot(dynamic_dates, bh, color='hotpink',linewidth=2)
for i in range(len(dynamic_signal)):
if dynamic_signal[i] == 1:
ax4.axvline(pd.DataFrame(dynamic_dates).iloc[pd.DataFrame(dynamic_dates).index.values[i]],color='g')
if dynamic_signal[i] == -1:
ax4.axvline(pd.DataFrame(dynamic_dates).iloc[pd.DataFrame(dynamic_dates).index.values[i]],color='r')
ax4.legend([round(float(bh[-1]),2)])
ax4.set_ylabel('Buy and Hold ROI')
plt.xlabel('1 Tick = ' + interval)
plt.xticks(rotation=45)
# plt.figure(figsize=(10,70))
plt.show()
return fig
st.set_page_config(page_title="Manne",
page_icon=":money:",
layout='wide')
st.title('Backtester')
# st.write("This interactive webapp allows you to explore and visualize how 3 differently trained machine learning models try to predict the mileage of a car, given various numerical attributes about it, including: cylinders, displacement, horsepower, weight, and acceleration. The models used in creating the second visualization can be interactively customized by changing their parameters in the first visualization.")
# '''ínstantiate exchange object'''
exchange_nmn = ccxt.binance({
'apiKey': '<KEY>',
'secret': '<KEY>',
'enableRateLimit': True,
'options': {
'defaultType': 'delivery',
},
'hedgeMode':True
})
# '''load markets in exchange object'''
markets = exchange_nmn.load_markets()
symbol_selection_slider = st.sidebar.select_slider('Symbol',['BTCUSDT','ADAUSDT','BNBUSDT','ETHUSDT','DOGEUSDT','1000SHIBUSDT'],'BTCUSDT')
interval_selection_slider = st.sidebar.select_slider('Interval',['1d','4h','1h','15m'],'1d')
candles_selection_slider = st.sidebar.select_slider('# Candles',list(np.arange(500)+1),200)
fig = get_strat_price_ti_plot(strat='ema_cross_over_under',strat_params={'fast_ema':10,'slow_ema':40},symbol=symbol_selection_slider,interval=interval_selection_slider,candles=candles_selection_slider,exchange=exchange_nmn,decay=0.9995,animate=False)
st.write(fig)
| [
"pandas.read_parquet",
"pandas.read_csv",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"datetime.datetime.utcnow",
"matplotlib.pyplot.xlabel",
"pandas.Timestamp",
"streamlit.write",
"numpy.float64",
"IPython.display.clear_output",
"ccxt.binance",
"streamlit.set_page_config",
"pandas... | [((826, 859), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (849, 859), False, 'import warnings\n'), ((26022, 26096), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Manne"""', 'page_icon': '""":money:"""', 'layout': '"""wide"""'}), "(page_title='Manne', page_icon=':money:', layout='wide')\n", (26040, 26096), True, 'import streamlit as st\n'), ((26106, 26128), 'streamlit.title', 'st.title', (['"""Backtester"""'], {}), "('Backtester')\n", (26114, 26128), True, 'import streamlit as st\n'), ((26604, 26744), 'ccxt.binance', 'ccxt.binance', (["{'apiKey': '<KEY>', 'secret': '<KEY>', 'enableRateLimit': True, 'options':\n {'defaultType': 'delivery'}, 'hedgeMode': True}"], {}), "({'apiKey': '<KEY>', 'secret': '<KEY>', 'enableRateLimit': True,\n 'options': {'defaultType': 'delivery'}, 'hedgeMode': True})\n", (26616, 26744), False, 'import ccxt\n'), ((26856, 26979), 'streamlit.sidebar.select_slider', 'st.sidebar.select_slider', (['"""Symbol"""', "['BTCUSDT', 'ADAUSDT', 'BNBUSDT', 'ETHUSDT', 'DOGEUSDT', '1000SHIBUSDT']", '"""BTCUSDT"""'], {}), "('Symbol', ['BTCUSDT', 'ADAUSDT', 'BNBUSDT',\n 'ETHUSDT', 'DOGEUSDT', '1000SHIBUSDT'], 'BTCUSDT')\n", (26880, 26979), True, 'import streamlit as st\n'), ((26997, 27066), 'streamlit.sidebar.select_slider', 'st.sidebar.select_slider', (['"""Interval"""', "['1d', '4h', '1h', '15m']", '"""1d"""'], {}), "('Interval', ['1d', '4h', '1h', '15m'], '1d')\n", (27021, 27066), True, 'import streamlit as st\n'), ((27407, 27420), 'streamlit.write', 'st.write', (['fig'], {}), '(fig)\n', (27415, 27420), True, 'import streamlit as st\n'), ((5786, 5830), 'pandas.read_csv', 'pd.read_csv', (['"""paper_trades.csv"""'], {'index_col': '(0)'}), "('paper_trades.csv', index_col=0)\n", (5797, 5830), True, 'import pandas as pd\n'), ((7146, 7190), 'pandas.read_csv', 'pd.read_csv', (['"""paper_trades.csv"""'], {'index_col': '(0)'}), "('paper_trades.csv', index_col=0)\n", (7157, 7190), True, 'import pandas as pd\n'), ((9728, 9772), 'pandas.read_csv', 'pd.read_csv', (['"""paper_trades.csv"""'], {'index_col': '(0)'}), "('paper_trades.csv', index_col=0)\n", (9739, 9772), True, 'import pandas as pd\n'), ((12829, 12873), 'pandas.read_csv', 'pd.read_csv', (['"""paper_trades.csv"""'], {'index_col': '(0)'}), "('paper_trades.csv', index_col=0)\n", (12840, 12873), True, 'import pandas as pd\n'), ((13336, 13585), 'pandas.DataFrame', 'pd.DataFrame', (["[[symbol, position_type, entry_time, '-', entry_price, '-', leverage, '-', '-']\n ]"], {'columns': "['market_name', 'position_type', 'entry_time', 'exit_time', 'entry_price',\n 'exit_price', 'leverage', 'trade_pnl_pct', 'paper_equity']"}), "([[symbol, position_type, entry_time, '-', entry_price, '-',\n leverage, '-', '-']], columns=['market_name', 'position_type',\n 'entry_time', 'exit_time', 'entry_price', 'exit_price', 'leverage',\n 'trade_pnl_pct', 'paper_equity'])\n", (13348, 13585), True, 'import pandas as pd\n'), ((22517, 22540), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (22529, 22540), False, 'from IPython.display import clear_output\n'), ((25876, 25910), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('1 Tick = ' + interval)"], {}), "('1 Tick = ' + interval)\n", (25886, 25910), True, 'from matplotlib import pyplot as plt\n'), ((25920, 25943), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (25930, 25943), True, 'from matplotlib import pyplot as plt\n'), ((25990, 26000), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25998, 26000), True, 'from matplotlib import pyplot as plt\n'), ((10665, 10691), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (10689, 10691), False, 'import datetime\n'), ((10910, 10936), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (10934, 10936), False, 'import datetime\n'), ((13073, 13099), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (13097, 13099), False, 'import datetime\n'), ((13292, 13318), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (13316, 13318), False, 'import datetime\n'), ((17637, 17655), 'pandas.Timestamp', 'pd.Timestamp', (['elem'], {}), '(elem)\n', (17649, 17655), True, 'import pandas as pd\n'), ((17733, 17749), 'numpy.float64', 'np.float64', (['elem'], {}), '(elem)\n', (17743, 17749), True, 'import numpy as np\n'), ((27131, 27145), 'numpy.arange', 'np.arange', (['(500)'], {}), '(500)\n', (27140, 27145), True, 'import numpy as np\n'), ((17534, 17570), 'pandas.read_csv', 'pd.read_csv', (['"""Binance_BTCUSDT_d.csv"""'], {}), "('Binance_BTCUSDT_d.csv')\n", (17545, 17570), True, 'import pandas as pd\n'), ((17412, 17479), 'pandas.read_parquet', 'pd.read_parquet', (['"""C:\\\\Users\\\\ZankarSS\\\\Downloads\\\\BTC-USDT.parquet"""'], {}), "('C:\\\\Users\\\\ZankarSS\\\\Downloads\\\\BTC-USDT.parquet')\n", (17427, 17479), True, 'import pandas as pd\n'), ((23135, 23162), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (23147, 23162), True, 'import pandas as pd\n'), ((23292, 23319), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (23304, 23319), True, 'import pandas as pd\n'), ((23852, 23879), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (23864, 23879), True, 'import pandas as pd\n'), ((24009, 24036), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (24021, 24036), True, 'import pandas as pd\n'), ((24781, 24808), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (24793, 24808), True, 'import pandas as pd\n'), ((24938, 24965), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (24950, 24965), True, 'import pandas as pd\n'), ((25516, 25543), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (25528, 25543), True, 'import pandas as pd\n'), ((25673, 25700), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (25685, 25700), True, 'import pandas as pd\n'), ((23168, 23195), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (23180, 23195), True, 'import pandas as pd\n'), ((23325, 23352), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (23337, 23352), True, 'import pandas as pd\n'), ((23885, 23912), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (23897, 23912), True, 'import pandas as pd\n'), ((24042, 24069), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (24054, 24069), True, 'import pandas as pd\n'), ((24814, 24841), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (24826, 24841), True, 'import pandas as pd\n'), ((24971, 24998), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (24983, 24998), True, 'import pandas as pd\n'), ((25549, 25576), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (25561, 25576), True, 'import pandas as pd\n'), ((25706, 25733), 'pandas.DataFrame', 'pd.DataFrame', (['dynamic_dates'], {}), '(dynamic_dates)\n', (25718, 25733), True, 'import pandas as pd\n')] |
from nose.tools import (assert_true, assert_false, assert_equal,
assert_almost_equal, assert_is, assert_is_not)
from numpy.testing import assert_array_equal, assert_array_almost_equal
import numpy as np
from landlab.graph.sort.sort import remap
def test_remap():
src = np.array([1, 2, 3, 4])
mapping = np.array([-1, 10, 20, 30, 40])
rtn = remap(src, mapping)
assert_array_equal(rtn, [10, 20, 30, 40])
assert_is_not(rtn, src)
def test_remap_inplace():
src = np.array([1, 2, 3, 4])
mapping = np.array([-1, 10, 20, 30, 40])
rtn = remap(src, mapping, inplace=True)
assert_array_equal(rtn, [10, 20, 30, 40])
assert_is(rtn, src)
def test_remap_out():
src = np.array([1, 2, 3, 4])
dst = np.empty_like(src)
mapping = np.array([-1, 10, 20, 30, 40])
rtn = remap(src, mapping, out=dst)
assert_array_equal(rtn, [10, 20, 30, 40])
assert_is(rtn, dst)
| [
"landlab.graph.sort.sort.remap",
"numpy.array",
"numpy.empty_like",
"nose.tools.assert_is",
"nose.tools.assert_is_not",
"numpy.testing.assert_array_equal"
] | [((300, 322), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (308, 322), True, 'import numpy as np\n'), ((337, 367), 'numpy.array', 'np.array', (['[-1, 10, 20, 30, 40]'], {}), '([-1, 10, 20, 30, 40])\n', (345, 367), True, 'import numpy as np\n'), ((379, 398), 'landlab.graph.sort.sort.remap', 'remap', (['src', 'mapping'], {}), '(src, mapping)\n', (384, 398), False, 'from landlab.graph.sort.sort import remap\n'), ((403, 444), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['rtn', '[10, 20, 30, 40]'], {}), '(rtn, [10, 20, 30, 40])\n', (421, 444), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((449, 472), 'nose.tools.assert_is_not', 'assert_is_not', (['rtn', 'src'], {}), '(rtn, src)\n', (462, 472), False, 'from nose.tools import assert_true, assert_false, assert_equal, assert_almost_equal, assert_is, assert_is_not\n'), ((511, 533), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (519, 533), True, 'import numpy as np\n'), ((548, 578), 'numpy.array', 'np.array', (['[-1, 10, 20, 30, 40]'], {}), '([-1, 10, 20, 30, 40])\n', (556, 578), True, 'import numpy as np\n'), ((590, 623), 'landlab.graph.sort.sort.remap', 'remap', (['src', 'mapping'], {'inplace': '(True)'}), '(src, mapping, inplace=True)\n', (595, 623), False, 'from landlab.graph.sort.sort import remap\n'), ((629, 670), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['rtn', '[10, 20, 30, 40]'], {}), '(rtn, [10, 20, 30, 40])\n', (647, 670), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((675, 694), 'nose.tools.assert_is', 'assert_is', (['rtn', 'src'], {}), '(rtn, src)\n', (684, 694), False, 'from nose.tools import assert_true, assert_false, assert_equal, assert_almost_equal, assert_is, assert_is_not\n'), ((729, 751), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (737, 751), True, 'import numpy as np\n'), ((762, 780), 'numpy.empty_like', 'np.empty_like', (['src'], {}), '(src)\n', (775, 780), True, 'import numpy as np\n'), ((795, 825), 'numpy.array', 'np.array', (['[-1, 10, 20, 30, 40]'], {}), '([-1, 10, 20, 30, 40])\n', (803, 825), True, 'import numpy as np\n'), ((837, 865), 'landlab.graph.sort.sort.remap', 'remap', (['src', 'mapping'], {'out': 'dst'}), '(src, mapping, out=dst)\n', (842, 865), False, 'from landlab.graph.sort.sort import remap\n'), ((871, 912), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['rtn', '[10, 20, 30, 40]'], {}), '(rtn, [10, 20, 30, 40])\n', (889, 912), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((917, 936), 'nose.tools.assert_is', 'assert_is', (['rtn', 'dst'], {}), '(rtn, dst)\n', (926, 936), False, 'from nose.tools import assert_true, assert_false, assert_equal, assert_almost_equal, assert_is, assert_is_not\n')] |
import numpy as np
from . import _librootnumpy
__all__ = [
'array',
]
def array(arr, copy=True):
"""Convert a ROOT TArray into a NumPy array.
Parameters
----------
arr : ROOT TArray
A ROOT TArrayD, TArrayF, TArrayL, TArrayI or TArrayS
copy : bool, optional (default=True)
If True (the default) then copy the underlying array, otherwise the
NumPy array will view (and not own) the same memory as the ROOT array.
Returns
-------
arr : NumPy array
A NumPy array
Examples
--------
>>> from root_numpy import array
>>> from ROOT import TArrayD
>>> a = TArrayD(5)
>>> a[3] = 3.141
>>> array(a)
array([ 0. , 0. , 0. , 3.141, 0. ])
"""
import ROOT
if isinstance(arr, ROOT.TArrayD):
arr = _librootnumpy.array_d(ROOT.AsCObject(arr))
elif isinstance(arr, ROOT.TArrayF):
arr = _librootnumpy.array_f(ROOT.AsCObject(arr))
elif isinstance(arr, ROOT.TArrayL):
arr = _librootnumpy.array_l(ROOT.AsCObject(arr))
elif isinstance(arr, ROOT.TArrayI):
arr = _librootnumpy.array_i(ROOT.AsCObject(arr))
elif isinstance(arr, ROOT.TArrayS):
arr = _librootnumpy.array_s(ROOT.AsCObject(arr))
elif isinstance(arr, ROOT.TArrayC):
arr = _librootnumpy.array_c(ROOT.AsCObject(arr))
else:
raise TypeError(
"unable to convert object of type {0} "
"into a numpy array".format(type(arr)))
if copy:
return np.copy(arr)
return arr
| [
"numpy.copy",
"ROOT.AsCObject"
] | [((1515, 1527), 'numpy.copy', 'np.copy', (['arr'], {}), '(arr)\n', (1522, 1527), True, 'import numpy as np\n'), ((842, 861), 'ROOT.AsCObject', 'ROOT.AsCObject', (['arr'], {}), '(arr)\n', (856, 861), False, 'import ROOT\n'), ((939, 958), 'ROOT.AsCObject', 'ROOT.AsCObject', (['arr'], {}), '(arr)\n', (953, 958), False, 'import ROOT\n'), ((1036, 1055), 'ROOT.AsCObject', 'ROOT.AsCObject', (['arr'], {}), '(arr)\n', (1050, 1055), False, 'import ROOT\n'), ((1133, 1152), 'ROOT.AsCObject', 'ROOT.AsCObject', (['arr'], {}), '(arr)\n', (1147, 1152), False, 'import ROOT\n'), ((1230, 1249), 'ROOT.AsCObject', 'ROOT.AsCObject', (['arr'], {}), '(arr)\n', (1244, 1249), False, 'import ROOT\n'), ((1327, 1346), 'ROOT.AsCObject', 'ROOT.AsCObject', (['arr'], {}), '(arr)\n', (1341, 1346), False, 'import ROOT\n')] |
from itertools import zip_longest
import itertools
import tensorflow as tf
from functools import reduce
from operator import mul
import numpy as np
import re
VERY_BIG_NUMBER = 1e30
VERY_SMALL_NUMBER = 1e-30
VERY_POSITIVE_NUMBER = VERY_BIG_NUMBER
VERY_NEGATIVE_NUMBER = -VERY_BIG_NUMBER
def add_summary_zero_fraction(t, threshold=0.0):
tf.summary.scalar(t.op.name+'/sparsity',
tf.nn.zero_fraction(tf.cast(tf.greater(tf.abs(t), threshold), tf.int8))
)
def get_initializer(matrix):
def _initializer(shape, dtype=None, partition_info=None, **kwargs): return matrix
return _initializer
def variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, var in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
assert g is not None, var.name
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def mask(val, mask, name=None):
if name is None:
name = 'mask'
return tf.multiply(val, tf.cast(mask, 'float'), name=name)
def exp_mask(val, mask, name=None):
"""Give very negative number to unmasked elements in val.
For example, [-3, -2, 10], [True, True, False] -> [-3, -2, -1e9].
Typically, this effectively masks in exponential space (e.g. softmax)
Args:
val: values to be masked
mask: masking boolean tensor, same shape as tensor
name: name for output tensor
Returns:
Same shape as val, where some elements are very small (exponentially zero)
"""
if name is None:
name = "exp_mask"
return tf.add(val, (1 - tf.cast(mask, 'float')) * VERY_NEGATIVE_NUMBER, name=name)
def flatten(tensor, keep):
fixed_shape = tensor.get_shape().as_list()
start = len(fixed_shape) - keep
left = reduce(mul, [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start)])
out_shape = [left] + [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start, len(fixed_shape))]
flat = tf.reshape(tensor, out_shape)
return flat
def reconstruct(tensor, ref, keep):
ref_shape = ref.get_shape().as_list()
tensor_shape = tensor.get_shape().as_list()
ref_stop = len(ref_shape) - keep
tensor_start = len(tensor_shape) - keep
pre_shape = [ref_shape[i] or tf.shape(ref)[i] for i in range(ref_stop)]
keep_shape = [tensor_shape[i] or tf.shape(tensor)[i] for i in range(tensor_start, len(tensor_shape))]
# pre_shape = [tf.shape(ref)[i] for i in range(len(ref.get_shape().as_list()[:-keep]))]
# keep_shape = tensor.get_shape().as_list()[-keep:]
target_shape = pre_shape + keep_shape
out = tf.reshape(tensor, target_shape)
return out
def add_wd(wd, scope=None):
scope = scope or tf.get_variable_scope().name
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
with tf.name_scope("weight_decay"):
for var in variables:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name="{}/wd".format(var.op.name))
tf.add_to_collection('losses', weight_decay)
def _excluded_var_pattern():
#return "(main/logits)|(main/p0/bi_attention)|(prepro/u1)"
return "(thisisapatternwetrytoexcludenothing)"
def add_sparsity_regularization(wd, collection_name=None, scope=None):
orig_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
variables = []
for eachvar in orig_variables:
if not re.match(_excluded_var_pattern(), eachvar.op.name):
variables.append(eachvar)
with tf.name_scope("sparsity_regular"):
if len(variables):
the_regularizer = tf.contrib.layers.l1_regularizer(scale=wd, scope=scope)
reg_loss = tf.contrib.layers.apply_regularization(the_regularizer, variables)
tf.add_to_collection('losses', reg_loss)
# add to collections
collection_name = collection_name or 'sparse_vars'
for eachvar in variables:
tf.add_to_collection(collection_name, eachvar)
def reduce_square_sum(var, start=0, end=0, axis=0):
the_shape = var.get_shape().as_list()
if len(the_shape) == 2:
t = tf.square(var)
t = tf.reduce_sum(t, axis=axis)
assert(end>start and axis<2)
t = tf.gather(t,tf.range(start, end))
return t
else:
raise NotImplementedError('variables with shapes != 2 is not implemented.')
def add_mixedlasso(groupwd, l1wd, coef_scaling=False, collection_name=None, scope=None):
orig_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
variables = []
for eachvar in orig_variables:
if not re.match(_excluded_var_pattern(), eachvar.op.name):
variables.append(eachvar)
with tf.name_scope("DimenGroupLasso"):
collection_name = collection_name or 'sparse_vars'
for eachvar in variables:
the_shape = eachvar.get_shape().as_list()
if len(the_shape)<=1: # l1 is group lasso when the group size is 1
the_regularizer = tf.contrib.layers.l1_regularizer(scale=l1wd, scope=scope)
reg = tf.contrib.layers.apply_regularization(the_regularizer, [eachvar])
elif len(the_shape)==2:
reg = 0.0
for s, axis in zip(the_shape, range(len(the_shape))):
if s != np.prod(the_shape):
if s == 1:
the_regularizer = tf.contrib.layers.l1_regularizer(scale=l1wd, scope=scope)
reg = reg + tf.contrib.layers.apply_regularization(the_regularizer, [eachvar])
else:
t = tf.square(eachvar)
t = tf.reduce_sum(t, axis=axis) + tf.constant(1.0e-8)
t = tf.sqrt(t)
if coef_scaling:
reg = reg + tf.reduce_sum(t) * groupwd * np.sqrt(s)
else:
reg = reg + tf.reduce_sum(t) * groupwd
else:
raise NotImplementedError('variables with shapes > 2 is not implemented.')
tf.add_to_collection('losses', reg)
tf.add_to_collection(collection_name, eachvar)
def grouper(iterable, n, fillvalue=None, shorten=False, num_groups=None):
args = [iter(iterable)] * n
out = zip_longest(*args, fillvalue=fillvalue)
out = list(out)
if num_groups is not None:
default = (fillvalue, ) * n
assert isinstance(num_groups, int)
out = list(each for each, _ in zip_longest(out, range(num_groups), fillvalue=default))
if shorten:
assert fillvalue is None
out = (tuple(e for e in each if e is not None) for each in out)
return out
def padded_reshape(tensor, shape, mode='CONSTANT', name=None):
paddings = [[0, shape[i] - tf.shape(tensor)[i]] for i in range(len(shape))]
return tf.pad(tensor, paddings, mode=mode, name=name)
def get_num_params():
num_params = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
num_params += reduce(mul, [dim.value for dim in shape], 1)
return num_params
def zerout_gradients_for_zero_weights(grads_and_vars, zero_threshold=0.0, mode='element'):
""" zerout gradients for weights with zero values, so as to freeze zero weights.
(make sure the history gradients are zeros too, otherwise, zero weights can still be updated in adam etc)
Args:
grads_and_vars: Lists of (gradient, variable).
mode: the mode to freeze weights.
'element': freeze all zero weights
'group': freeze rows/columns that are fully zeros
"""
gradients, variables = zip(*grads_and_vars)
zerout_gradients = []
for gradient, variable in zip(gradients, variables):
if gradient is None:
zerout_gradients.append(None)
continue
if mode=='element':
where_cond = tf.less_equal(tf.abs(variable), zero_threshold)
elif mode=='group':
raise NotImplementedError('Group wise freezing is not implemented yet.')
else:
raise ValueError('Unsupported mode == %s' % mode)
zerout_gradient = tf.where(where_cond,
tf.zeros_like(gradient),
gradient)
zerout_gradients.append(zerout_gradient)
return list(zip(zerout_gradients, variables)) | [
"tensorflow.contrib.layers.apply_regularization",
"numpy.prod",
"tensorflow.shape",
"tensorflow.pad",
"tensorflow.get_variable",
"numpy.sqrt",
"tensorflow.reduce_sum",
"tensorflow.truncated_normal_initializer",
"tensorflow.get_variable_scope",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tens... | [((4437, 4466), 'tensorflow.reshape', 'tf.reshape', (['tensor', 'out_shape'], {}), '(tensor, out_shape)\n', (4447, 4466), True, 'import tensorflow as tf\n'), ((5074, 5106), 'tensorflow.reshape', 'tf.reshape', (['tensor', 'target_shape'], {}), '(tensor, target_shape)\n', (5084, 5106), True, 'import tensorflow as tf\n'), ((5218, 5282), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': 'scope'}), '(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)\n', (5235, 5282), True, 'import tensorflow as tf\n'), ((5744, 5808), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': 'scope'}), '(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)\n', (5761, 5808), True, 'import tensorflow as tf\n'), ((6945, 7009), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': 'scope'}), '(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)\n', (6962, 7009), True, 'import tensorflow as tf\n'), ((8819, 8858), 'itertools.zip_longest', 'zip_longest', (['*args'], {'fillvalue': 'fillvalue'}), '(*args, fillvalue=fillvalue)\n', (8830, 8858), False, 'from itertools import zip_longest\n'), ((9375, 9421), 'tensorflow.pad', 'tf.pad', (['tensor', 'paddings'], {'mode': 'mode', 'name': 'name'}), '(tensor, paddings, mode=mode, name=name)\n', (9381, 9421), True, 'import tensorflow as tf\n'), ((9485, 9509), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (9507, 9509), True, 'import tensorflow as tf\n'), ((914, 933), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (923, 933), True, 'import tensorflow as tf\n'), ((949, 1002), 'tensorflow.get_variable', 'tf.get_variable', (['name', 'shape'], {'initializer': 'initializer'}), '(name, shape, initializer=initializer)\n', (964, 1002), True, 'import tensorflow as tf\n'), ((1640, 1686), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'stddev'}), '(stddev=stddev)\n', (1671, 1686), True, 'import tensorflow as tf\n'), ((1786, 1830), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""losses"""', 'weight_decay'], {}), "('losses', weight_decay)\n", (1806, 1830), True, 'import tensorflow as tf\n'), ((2972, 3003), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(0)', 'values': 'grads'}), '(axis=0, values=grads)\n', (2981, 3003), True, 'import tensorflow as tf\n'), ((3019, 3042), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['grad', '(0)'], {}), '(grad, 0)\n', (3033, 3042), True, 'import tensorflow as tf\n'), ((3466, 3488), 'tensorflow.cast', 'tf.cast', (['mask', '"""float"""'], {}), "(mask, 'float')\n", (3473, 3488), True, 'import tensorflow as tf\n'), ((5292, 5321), 'tensorflow.name_scope', 'tf.name_scope', (['"""weight_decay"""'], {}), "('weight_decay')\n", (5305, 5321), True, 'import tensorflow as tf\n'), ((5977, 6010), 'tensorflow.name_scope', 'tf.name_scope', (['"""sparsity_regular"""'], {}), "('sparsity_regular')\n", (5990, 6010), True, 'import tensorflow as tf\n'), ((6584, 6598), 'tensorflow.square', 'tf.square', (['var'], {}), '(var)\n', (6593, 6598), True, 'import tensorflow as tf\n'), ((6611, 6638), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['t'], {'axis': 'axis'}), '(t, axis=axis)\n', (6624, 6638), True, 'import tensorflow as tf\n'), ((7178, 7210), 'tensorflow.name_scope', 'tf.name_scope', (['"""DimenGroupLasso"""'], {}), "('DimenGroupLasso')\n", (7191, 7210), True, 'import tensorflow as tf\n'), ((9570, 9614), 'functools.reduce', 'reduce', (['mul', '[dim.value for dim in shape]', '(1)'], {}), '(mul, [dim.value for dim in shape], 1)\n', (9576, 9614), False, 'from functools import reduce\n'), ((1734, 1752), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['var'], {}), '(var)\n', (1747, 1752), True, 'import tensorflow as tf\n'), ((2773, 2793), 'tensorflow.expand_dims', 'tf.expand_dims', (['g', '(0)'], {}), '(g, 0)\n', (2787, 2793), True, 'import tensorflow as tf\n'), ((5173, 5196), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (5194, 5196), True, 'import tensorflow as tf\n'), ((5462, 5506), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""losses"""', 'weight_decay'], {}), "('losses', weight_decay)\n", (5482, 5506), True, 'import tensorflow as tf\n'), ((6069, 6124), 'tensorflow.contrib.layers.l1_regularizer', 'tf.contrib.layers.l1_regularizer', ([], {'scale': 'wd', 'scope': 'scope'}), '(scale=wd, scope=scope)\n', (6101, 6124), True, 'import tensorflow as tf\n'), ((6148, 6214), 'tensorflow.contrib.layers.apply_regularization', 'tf.contrib.layers.apply_regularization', (['the_regularizer', 'variables'], {}), '(the_regularizer, variables)\n', (6186, 6214), True, 'import tensorflow as tf\n'), ((6227, 6267), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""losses"""', 'reg_loss'], {}), "('losses', reg_loss)\n", (6247, 6267), True, 'import tensorflow as tf\n'), ((6402, 6448), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['collection_name', 'eachvar'], {}), '(collection_name, eachvar)\n', (6422, 6448), True, 'import tensorflow as tf\n'), ((6700, 6720), 'tensorflow.range', 'tf.range', (['start', 'end'], {}), '(start, end)\n', (6708, 6720), True, 'import tensorflow as tf\n'), ((8607, 8642), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""losses"""', 'reg'], {}), "('losses', reg)\n", (8627, 8642), True, 'import tensorflow as tf\n'), ((8655, 8701), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['collection_name', 'eachvar'], {}), '(collection_name, eachvar)\n', (8675, 8701), True, 'import tensorflow as tf\n'), ((10647, 10670), 'tensorflow.zeros_like', 'tf.zeros_like', (['gradient'], {}), '(gradient)\n', (10660, 10670), True, 'import tensorflow as tf\n'), ((4064, 4086), 'tensorflow.cast', 'tf.cast', (['mask', '"""float"""'], {}), "(mask, 'float')\n", (4071, 4086), True, 'import tensorflow as tf\n'), ((4725, 4738), 'tensorflow.shape', 'tf.shape', (['ref'], {}), '(ref)\n', (4733, 4738), True, 'import tensorflow as tf\n'), ((4805, 4821), 'tensorflow.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (4813, 4821), True, 'import tensorflow as tf\n'), ((5392, 5410), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['var'], {}), '(var)\n', (5405, 5410), True, 'import tensorflow as tf\n'), ((7472, 7529), 'tensorflow.contrib.layers.l1_regularizer', 'tf.contrib.layers.l1_regularizer', ([], {'scale': 'l1wd', 'scope': 'scope'}), '(scale=l1wd, scope=scope)\n', (7504, 7529), True, 'import tensorflow as tf\n'), ((7552, 7618), 'tensorflow.contrib.layers.apply_regularization', 'tf.contrib.layers.apply_regularization', (['the_regularizer', '[eachvar]'], {}), '(the_regularizer, [eachvar])\n', (7590, 7618), True, 'import tensorflow as tf\n'), ((10387, 10403), 'tensorflow.abs', 'tf.abs', (['variable'], {}), '(variable)\n', (10393, 10403), True, 'import tensorflow as tf\n'), ((444, 453), 'tensorflow.abs', 'tf.abs', (['t'], {}), '(t)\n', (450, 453), True, 'import tensorflow as tf\n'), ((4277, 4293), 'tensorflow.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (4285, 4293), True, 'import tensorflow as tf\n'), ((4365, 4381), 'tensorflow.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (4373, 4381), True, 'import tensorflow as tf\n'), ((9315, 9331), 'tensorflow.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (9323, 9331), True, 'import tensorflow as tf\n'), ((7779, 7797), 'numpy.prod', 'np.prod', (['the_shape'], {}), '(the_shape)\n', (7786, 7797), True, 'import numpy as np\n'), ((7880, 7937), 'tensorflow.contrib.layers.l1_regularizer', 'tf.contrib.layers.l1_regularizer', ([], {'scale': 'l1wd', 'scope': 'scope'}), '(scale=l1wd, scope=scope)\n', (7912, 7937), True, 'import tensorflow as tf\n'), ((8107, 8125), 'tensorflow.square', 'tf.square', (['eachvar'], {}), '(eachvar)\n', (8116, 8125), True, 'import tensorflow as tf\n'), ((8240, 8250), 'tensorflow.sqrt', 'tf.sqrt', (['t'], {}), '(t)\n', (8247, 8250), True, 'import tensorflow as tf\n'), ((7978, 8044), 'tensorflow.contrib.layers.apply_regularization', 'tf.contrib.layers.apply_regularization', (['the_regularizer', '[eachvar]'], {}), '(the_regularizer, [eachvar])\n', (8016, 8044), True, 'import tensorflow as tf\n'), ((8158, 8185), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['t'], {'axis': 'axis'}), '(t, axis=axis)\n', (8171, 8185), True, 'import tensorflow as tf\n'), ((8188, 8206), 'tensorflow.constant', 'tf.constant', (['(1e-08)'], {}), '(1e-08)\n', (8199, 8206), True, 'import tensorflow as tf\n'), ((8369, 8379), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (8376, 8379), True, 'import numpy as np\n'), ((8458, 8474), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['t'], {}), '(t)\n', (8471, 8474), True, 'import tensorflow as tf\n'), ((8340, 8356), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['t'], {}), '(t)\n', (8353, 8356), True, 'import tensorflow as tf\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.