code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Tests for fractopo_subsampling.utils.
"""
import numpy as np
import pandas as pd
import pytest
import fractopo_subsampling.utils as utils
import tests
def test_random_sample_of_circles():
"""
Test random_sample_of_circles.
"""
df = pd.DataFrame(
[
{"key": "a", "area": 1},
{"key": "a", "area": 1},
{"key": "b", "area": 2},
{"key": "b", "area": 2},
{"key": "b", "area": 2},
{"key": "b", "area": 2},
{"key": "b", "area": 2},
{"key": "b", "area": 2},
{"key": "b", "area": 1000},
]
).astype({"key": "category"})
df["radius"] = np.sqrt(df["area"] / np.pi)
grouped = df.groupby(by="key")
circle_names_with_diameter = {"a": 50, "b": 1}
single_result = utils.random_sample_of_circles(grouped, circle_names_with_diameter)
assert isinstance(single_result, list)
assert all([isinstance(val, pd.Series) for val in single_result])
results = [
utils.random_sample_of_circles(grouped, circle_names_with_diameter)
for _ in range(100)
]
name_counts = dict()
collect_results = []
for result in results:
for srs in result:
srs_key = srs["key"]
name_counts[srs_key] = (
1 if srs_key not in name_counts else name_counts[srs_key] + 1
)
collect_results.append(result)
assert name_counts["a"] > name_counts["b"]
return collect_results
def test_aggregate_chosen_manual():
"""
Test aggregate_chosen.
"""
chosen_dicts = [
{"area": 1, "intensity": 5},
{"area": 10, "intensity": 5},
]
chosen = [pd.Series(cd) for cd in chosen_dicts]
params_with_func = {"intensity": "mean"}
result = utils.aggregate_chosen(chosen, params_with_func)
assert isinstance(result, dict)
assert all([isinstance(val, str) for val in result])
assert all([isinstance(val, (float, int)) for val in result.values()])
assert np.isclose(result["intensity"], 5)
@pytest.mark.parametrize(
"chosen,params_with_func,assume_result", tests.test_aggregate_chosen_params()
)
def test_aggregate_chosen(chosen, params_with_func, assume_result):
"""
Test aggregate_chosen with pytest params.
"""
result = utils.aggregate_chosen(chosen, params_with_func)
assert isinstance(result, dict)
for key in result:
if key not in assume_result:
continue
assert np.isclose(result[key], assume_result[key])
| [
"pandas.Series",
"numpy.sqrt",
"numpy.isclose",
"fractopo_subsampling.utils.random_sample_of_circles",
"fractopo_subsampling.utils.aggregate_chosen",
"tests.test_aggregate_chosen_params",
"pandas.DataFrame"
] | [((678, 705), 'numpy.sqrt', 'np.sqrt', (["(df['area'] / np.pi)"], {}), "(df['area'] / np.pi)\n", (685, 705), True, 'import numpy as np\n'), ((812, 879), 'fractopo_subsampling.utils.random_sample_of_circles', 'utils.random_sample_of_circles', (['grouped', 'circle_names_with_diameter'], {}), '(grouped, circle_names_with_diameter)\n', (842, 879), True, 'import fractopo_subsampling.utils as utils\n'), ((1797, 1845), 'fractopo_subsampling.utils.aggregate_chosen', 'utils.aggregate_chosen', (['chosen', 'params_with_func'], {}), '(chosen, params_with_func)\n', (1819, 1845), True, 'import fractopo_subsampling.utils as utils\n'), ((2025, 2059), 'numpy.isclose', 'np.isclose', (["result['intensity']", '(5)'], {}), "(result['intensity'], 5)\n", (2035, 2059), True, 'import numpy as np\n'), ((2315, 2363), 'fractopo_subsampling.utils.aggregate_chosen', 'utils.aggregate_chosen', (['chosen', 'params_with_func'], {}), '(chosen, params_with_func)\n', (2337, 2363), True, 'import fractopo_subsampling.utils as utils\n'), ((2133, 2169), 'tests.test_aggregate_chosen_params', 'tests.test_aggregate_chosen_params', ([], {}), '()\n', (2167, 2169), False, 'import tests\n'), ((1019, 1086), 'fractopo_subsampling.utils.random_sample_of_circles', 'utils.random_sample_of_circles', (['grouped', 'circle_names_with_diameter'], {}), '(grouped, circle_names_with_diameter)\n', (1049, 1086), True, 'import fractopo_subsampling.utils as utils\n'), ((1701, 1714), 'pandas.Series', 'pd.Series', (['cd'], {}), '(cd)\n', (1710, 1714), True, 'import pandas as pd\n'), ((2496, 2539), 'numpy.isclose', 'np.isclose', (['result[key]', 'assume_result[key]'], {}), '(result[key], assume_result[key])\n', (2506, 2539), True, 'import numpy as np\n'), ((255, 510), 'pandas.DataFrame', 'pd.DataFrame', (["[{'key': 'a', 'area': 1}, {'key': 'a', 'area': 1}, {'key': 'b', 'area': 2},\n {'key': 'b', 'area': 2}, {'key': 'b', 'area': 2}, {'key': 'b', 'area': \n 2}, {'key': 'b', 'area': 2}, {'key': 'b', 'area': 2}, {'key': 'b',\n 'area': 1000}]"], {}), "([{'key': 'a', 'area': 1}, {'key': 'a', 'area': 1}, {'key': 'b',\n 'area': 2}, {'key': 'b', 'area': 2}, {'key': 'b', 'area': 2}, {'key':\n 'b', 'area': 2}, {'key': 'b', 'area': 2}, {'key': 'b', 'area': 2}, {\n 'key': 'b', 'area': 1000}])\n", (267, 510), True, 'import pandas as pd\n')] |
import pickle
import numpy as np
from matplotlib import pyplot as plt
def plot_stat_mean(stat_key='mean', methods=['method_1', 'method2'], enemy=2, seeds=None, prefix=None, fancy=False, savepath=''):
runs = []
if seeds is None:
seeds = [111, 222, 333, 444, 555, 666, 777, 888, 999, 1010]
for method in methods:
if method == 'method_1':
prefix = 'roundrobin'
elif method == 'method2':
prefix = 'diversity_roundrobin'
for seed in seeds:
log_path = 'results/{}/{}_enemy{}_seed_{}/logs_iter_30'.format(method, prefix, enemy, seed)
logs = pickle.load(open(log_path, "rb"))
runs.append([step[stat_key] for step in logs[0]])
np_runs = np.asarray(runs)
if stat_key == 'diversity':
np_runs /= 100 * 265
mean = np_runs.mean(axis=0)
std = np_runs.std(axis=0)
if fancy:
plt.errorbar(np.arange(len(mean)), mean, std, alpha=.75, fmt=':', capsize=3, capthick=1)
plt.fill_between(np.arange(len(mean)), mean-std, mean+std, alpha=.25)
else:
plt.errorbar(np.arange(len(mean)), mean, std, linestyle='-', marker='o')
#plt.plot(range(len(mean)), mean, '-o')
if savepath == '':
plt.show()
else:
plt.savefig(savepath)
def plot_stat(logs, stat_key='mean', savepath=''):
diversities = [step[stat_key] for step in logs[0]]
plt.plot(range(len(diversities)), diversities)
if savepath == '':
plt.show()
else:
plt.savefig(savepath)
if __name__ == "__main__":
#log_path = "results/method2/diversity_roundrobin_enemy2_seed_222/logs_iter_30"
#logs = pickle.load(open(log_path, "rb"))
#plot_stat(logs, stat_key='max')
plot_stat_mean(stat_key='diversity', enemy=7, fancy=True)
#plot_diversity(logs)
| [
"matplotlib.pyplot.savefig",
"numpy.asarray",
"matplotlib.pyplot.show"
] | [((742, 758), 'numpy.asarray', 'np.asarray', (['runs'], {}), '(runs)\n', (752, 758), True, 'import numpy as np\n'), ((1273, 1283), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1281, 1283), True, 'from matplotlib import pyplot as plt\n'), ((1302, 1323), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savepath'], {}), '(savepath)\n', (1313, 1323), True, 'from matplotlib import pyplot as plt\n'), ((1514, 1524), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1522, 1524), True, 'from matplotlib import pyplot as plt\n'), ((1543, 1564), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savepath'], {}), '(savepath)\n', (1554, 1564), True, 'from matplotlib import pyplot as plt\n')] |
from .signal import *
from .strategy import *
import talib
import numpy as np
class ADX_DI(Strategy):
"""
Trading in the direction of a strong trend reduces risk and increases profit
potential. The average directional index (ADX) is used to determine when
price is trending strongly.
Source:
https://www.investopedia.com/articles/trading/07/adx-trend-indicator.asp
"""
params = dict(
ADX_timeperiod=5,
candle_size='5T',
trading_window=3
)
def __init__(self, **kwargs):
"""
Args:
ADX_length (int) The MA length of ADX
"""
super(ADX_DI, self).__init__(**kwargs)
def check(self):
pass
def eval(self, market, context, data):
# Fetch market history
try:
prices = data.history(
market,
fields=['low', 'high', 'close'],
bar_count=20,
frequency=self.p.candle_size
)
except Exception as e:
self.logger.warn('historical data not available: {}'.format(e))
return
return self.signal(prices)
def signal(self, prices):
di_plus = talib.PLUS_DI(prices['high'].values, prices['low'].values, prices[
'close'].values, timeperiod=self.p.ADX_timeperiod)
di_minus = talib.MINUS_DI(prices['high'].values, prices['low'].values, prices[
'close'].values, timeperiod=self.p.ADX_timeperiod)
crosscall = di_plus[-self.p.trading_window:] > di_minus[
-self.p.trading_window:]
if np.all(crosscall[-self.p.trading_window:]):
arg = 'DIPcross'
return SIGNAL_LONG, arg
elif not np.any(crosscall[-self.p.trading_window:]):
arg = 'DIPput'
return SIGNAL_SHORT, arg
else:
return SIGNAL_NONE, ''
| [
"talib.PLUS_DI",
"numpy.all",
"talib.MINUS_DI",
"numpy.any"
] | [((1206, 1327), 'talib.PLUS_DI', 'talib.PLUS_DI', (["prices['high'].values", "prices['low'].values", "prices['close'].values"], {'timeperiod': 'self.p.ADX_timeperiod'}), "(prices['high'].values, prices['low'].values, prices['close'].\n values, timeperiod=self.p.ADX_timeperiod)\n", (1219, 1327), False, 'import talib\n'), ((1375, 1497), 'talib.MINUS_DI', 'talib.MINUS_DI', (["prices['high'].values", "prices['low'].values", "prices['close'].values"], {'timeperiod': 'self.p.ADX_timeperiod'}), "(prices['high'].values, prices['low'].values, prices['close']\n .values, timeperiod=self.p.ADX_timeperiod)\n", (1389, 1497), False, 'import talib\n'), ((1643, 1685), 'numpy.all', 'np.all', (['crosscall[-self.p.trading_window:]'], {}), '(crosscall[-self.p.trading_window:])\n', (1649, 1685), True, 'import numpy as np\n'), ((1769, 1811), 'numpy.any', 'np.any', (['crosscall[-self.p.trading_window:]'], {}), '(crosscall[-self.p.trading_window:])\n', (1775, 1811), True, 'import numpy as np\n')] |
import random
import Performance
from NeuralNetwork import NeuralNetwork
import DataUtility
import numpy as np
import copy
import matplotlib.pyplot as plt
import DE
import GA
import PSO
import VideoNN
import time
def main():
print("Program Start")
headers = ["Data set", "layers", "pop", "Beta", "CR", "generations", "loss1", "loss2"]
filename = 'DE_experimental_resultsFINAL.csv'
Per = Performance.Results()
Per.PipeToFile([], headers, filename)
data_sets = ["soybean", "glass", "abalone","Cancer","forestfires", "machine"]
regression_data_set = {
"soybean": False,
"Cancer": False,
"glass": False,
"forestfires": True,
"machine": True,
"abalone": True
}
categorical_attribute_indices = {
"soybean": [],
"Cancer": [],
"glass": [],
"forestfires": [],
"machine": [],
"abalone": []
}
tuned_0_hl = {
"soybean": {
"omega": .5,
"c1": .1,
"c2": 5,
"hidden_layer": []
},
"Cancer": {
"omega": .5,
"c1": .5,
"c2": 5,
"hidden_layer": []
},
"glass": {
"omega": .2,
"c1": .9,
"c2": 5,
"hidden_layer": []
},
"forestfires": {
"omega": .2,
"c1": 5,
"c2": .5,
"hidden_layer": []
},
"machine": {
"omega": .5,
"c1": .9,
"c2": 5,
"hidden_layer": []
},
"abalone": {
"omega": .2,
"c1": 5,
"c2": .9,
"hidden_layer": []
}
}
tuned_1_hl = {
"soybean": {
"omega": .5,
"c1": .5,
"c2": 1,
"hidden_layer": [7]
},
"Cancer": {
"omega": .2,
"c1": .5,
"c2": 5,
"hidden_layer": [4]
},
"glass": {
"omega": .2,
"c1": .9,
"c2": 5,
"hidden_layer": [8]
},
"forestfires": {
"omega": .2,
"c1": 5,
"c2": 5,
"hidden_layer": [8]
},
"machine": {
"omega": .5,
"c1": 5,
"c2": .5,
"hidden_layer": [4]
},
"abalone": {
"omega": .2,
"c1": .1,
"c2": 5,
"hidden_layer": [8]
}
}
tuned_2_hl = {
"soybean": {
"omega": .5,
"c1": .9,
"c2": .1,
"hidden_layer": [7,12]
},
"Cancer": {
"omega": .2,
"c1": .5,
"c2": 5,
"hidden_layer": [4,4]
},
"glass": {
"omega": .2,
"c1": .9,
"c2": 5,
"hidden_layer": [8,6]
},
"forestfires": {
"omega": .2,
"c1": .9,
"c2": 5,
"hidden_layer": [8,8]
},
"machine": {
"omega": .2,
"c1": .9,
"c2": .1,
"hidden_layer": [7,2]
},
"abalone": {
"omega": .2,
"c1": 5,
"c2": 5,
"hidden_layer": [6,8]
}
}
du = DataUtility.DataUtility(categorical_attribute_indices, regression_data_set)
total_counter = 0
for data_set in data_sets:
data_set_counter = 0
# ten fold data and labels is a list of [data, labels] pairs, where
# data and labels are numpy arrays:
tenfold_data_and_labels = du.Dataset_and_Labels(data_set)
for j in range(10):
test_data, test_labels = copy.deepcopy(tenfold_data_and_labels[j])
#Append all data folds to the training data set
remaining_data = [x[0] for i, x in enumerate(tenfold_data_and_labels) if i!=j]
remaining_labels = [y[1] for i, y in enumerate(tenfold_data_and_labels) if i!=j]
#Store off a set of the remaining dataset
X = np.concatenate(remaining_data, axis=1)
#Store the remaining data set labels
labels = np.concatenate(remaining_labels, axis=1)
print(data_set, "training data prepared")
regression = regression_data_set[data_set]
#If the data set is a regression dataset
if regression == True:
#The number of output nodes is 1
output_size = 1
#else it is a classification data set
else:
#Count the number of classes in the label data set
output_size = du.CountClasses(labels)
#Get the test data labels in one hot encoding
test_labels = du.ConvertLabels(test_labels, output_size)
#Get the Labels into a One hot encoding
labels = du.ConvertLabels(labels, output_size)
input_size = X.shape[0]
data_set_size = X.shape[1] + test_data.shape[1]
tuned_parameters = [tuned_0_hl[data_set], tuned_1_hl[data_set], tuned_2_hl[data_set]]
for z in range(3):
hidden_layers = tuned_parameters[z]["hidden_layer"]
layers = [input_size] + hidden_layers + [output_size]
nn = NeuralNetwork(input_size, hidden_layers, regression, output_size)
nn.set_input_data(X,labels)
total_weights = 0
for i in range(len(layers)-1):
total_weights += layers[i] * layers[i+1]
hyperparameters = {
"population_size": 10*total_weights,
"beta": .5,
"crossover_rate": .6,
"max_gen": 100
}
hyperparameterss = {
"maxGen":100,
"pop_size":100,
"mutation_rate": .5,
"mutation_range": 10,
"crossover_rate": .5
}
hyperparametersss = {
"position_range": 10,
"velocity_range": 1,
"omega": .1,
# tuned_parameters[z]["omega"],
"c1": .9,
# tuned_parameters[z]["c1"],
"c2": .1,
# tuned_parameters[z]["c2"],
"vmax": 1,
"pop_size": 1000,
"max_t": 50
}
de = DE.DE(hyperparameters,total_weights, nn)
ga = GA.GA(hyperparameterss, total_weights, nn)
pso = PSO.PSO(layers, hyperparametersss, nn)
learning_rate = 3
momentum = 0
VNN = VideoNN.NeuralNetworks(input_size, hidden_layers, regression, output_size,learning_rate,momentum)
counter = 0
print("DE OPERATIONS ")
for gen in range(de.maxgens):
if counter == 1:
break
print("MUTATE AND CROSS OVER ")
de.Pmutate_and_crossover()
counter = counter+1
time.sleep(200)
counter = 0
print("GA OPERATIONS")
for gen in range(ga.maxGen):
if counter == 1:
break
print()
ga.pfitness()
ga.Pselection()
ga.Pcrossover()
counter = counter + 1
time.sleep(200)
counter = 0
print("PSO OPERATIONS")
for epoch in range(pso.max_t):
if counter == 1:
break
pso.Pupdate_fitness()
pso.Pupdate_position_and_velocity()
counter = counter + 1
time.sleep(200)
# plt.plot(list(range(len(de.globalbest))), de.globalbest)
# plt.draw()
# plt.pause(0.00001)
#plt.clf()
# get the best overall solution and set the NN to those weights
#DE
bestSolution = de.bestChromie.getchromie()
bestWeights = de.nn.weight_transform(bestSolution)
de.nn.weights = bestWeights
#GA
#PS
# ################################ new code for de end ###################################
# plt.ioff()
# plt.plot(list(range(len(de.globalbest))), de.globalbest)
# plt.show()
# img_name = data_set + '_l' + str(len(hidden_layers)) + '_pr' + str(a) + '_vr' + str(b) + '_w' + str(c) + '_c' + str(d) + '_cc' + str(e) + '_v' + str(f) + '_ps' + str(g) + '.png'
# plt.savefig('tuning_plots/' + img_name)
# plt.clf()
Estimation_Values = de.nn.classify(test_data,test_labels)
if regression == False:
#Decode the One Hot encoding Value
Estimation_Values = de.nn.PickLargest(Estimation_Values)
test_labels_list = de.nn.PickLargest(test_labels)
# print("ESTiMATION VALUES BY GIVEN INDEX (CLASS GUESS) ")
# print(Estimation_Values)
else:
Estimation_Values = Estimation_Values.tolist()
test_labels_list = test_labels.tolist()[0]
Estimation_Values = Estimation_Values[0]
Estimat = Estimation_Values
groun = test_labels_list
Nice = Per.ConvertResultsDataStructure(groun, Estimat)
# print("THE GROUND VERSUS ESTIMATION:")
# print(Nice)
# headers = ["Data set", "layers", "pop", "Beta", "CR", "generations", "loss1", "loss2"]
Meta = [
data_set,
len(hidden_layers),
hyperparameters["population_size"],
hyperparameters["beta"],
hyperparameters["crossover_rate"],
hyperparameters["max_gen"]
]
Per.StartLossFunction(regression, Nice, Meta, filename)
print(f"{data_set_counter}/30 {data_set}. {total_counter}/180")
data_set_counter += 1
total_counter += 1
print("DEMO FINISHED")
time.sleep(10000)
print("Program End ")
main() | [
"DataUtility.DataUtility",
"Performance.Results",
"time.sleep",
"VideoNN.NeuralNetworks",
"numpy.concatenate",
"copy.deepcopy",
"DE.DE",
"PSO.PSO",
"NeuralNetwork.NeuralNetwork",
"GA.GA"
] | [((412, 433), 'Performance.Results', 'Performance.Results', ([], {}), '()\n', (431, 433), False, 'import Performance\n'), ((3400, 3475), 'DataUtility.DataUtility', 'DataUtility.DataUtility', (['categorical_attribute_indices', 'regression_data_set'], {}), '(categorical_attribute_indices, regression_data_set)\n', (3423, 3475), False, 'import DataUtility\n'), ((3811, 3852), 'copy.deepcopy', 'copy.deepcopy', (['tenfold_data_and_labels[j]'], {}), '(tenfold_data_and_labels[j])\n', (3824, 3852), False, 'import copy\n'), ((4168, 4206), 'numpy.concatenate', 'np.concatenate', (['remaining_data'], {'axis': '(1)'}), '(remaining_data, axis=1)\n', (4182, 4206), True, 'import numpy as np\n'), ((4279, 4319), 'numpy.concatenate', 'np.concatenate', (['remaining_labels'], {'axis': '(1)'}), '(remaining_labels, axis=1)\n', (4293, 4319), True, 'import numpy as np\n'), ((5435, 5500), 'NeuralNetwork.NeuralNetwork', 'NeuralNetwork', (['input_size', 'hidden_layers', 'regression', 'output_size'], {}), '(input_size, hidden_layers, regression, output_size)\n', (5448, 5500), False, 'from NeuralNetwork import NeuralNetwork\n'), ((6945, 6986), 'DE.DE', 'DE.DE', (['hyperparameters', 'total_weights', 'nn'], {}), '(hyperparameters, total_weights, nn)\n', (6950, 6986), False, 'import DE\n'), ((7007, 7049), 'GA.GA', 'GA.GA', (['hyperparameterss', 'total_weights', 'nn'], {}), '(hyperparameterss, total_weights, nn)\n', (7012, 7049), False, 'import GA\n'), ((7072, 7110), 'PSO.PSO', 'PSO.PSO', (['layers', 'hyperparametersss', 'nn'], {}), '(layers, hyperparametersss, nn)\n', (7079, 7110), False, 'import PSO\n'), ((7197, 7300), 'VideoNN.NeuralNetworks', 'VideoNN.NeuralNetworks', (['input_size', 'hidden_layers', 'regression', 'output_size', 'learning_rate', 'momentum'], {}), '(input_size, hidden_layers, regression, output_size,\n learning_rate, momentum)\n', (7219, 7300), False, 'import VideoNN\n'), ((7637, 7652), 'time.sleep', 'time.sleep', (['(200)'], {}), '(200)\n', (7647, 7652), False, 'import time\n'), ((8029, 8044), 'time.sleep', 'time.sleep', (['(200)'], {}), '(200)\n', (8039, 8044), False, 'import time\n'), ((8387, 8402), 'time.sleep', 'time.sleep', (['(200)'], {}), '(200)\n', (8397, 8402), False, 'import time\n'), ((11082, 11099), 'time.sleep', 'time.sleep', (['(10000)'], {}), '(10000)\n', (11092, 11099), False, 'import time\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import argparse
import json
import base64
import os
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", required=True, help="directory containing exported model")
a = parser.parse_args()
def main():
for file in os.listdir('input/'):
if file.endswith('.jpg') or file.endswith('.jpeg') or file.endswidth('.png'):
print(file)
with open('input/' + file, "rb") as f:
input_data = f.read()
input_instance = dict(input=base64.urlsafe_b64encode(input_data).decode("ascii"), key="0")
input_instance = json.loads(json.dumps(input_instance))
with tf.Session() as sess:
saver = tf.train.import_meta_graph(a.model_dir + "/export.meta")
saver.restore(sess, a.model_dir + "/export")
input_vars = json.loads(tf.get_collection("inputs")[0].decode())
output_vars = json.loads(tf.get_collection("outputs")[0].decode())
input = tf.get_default_graph().get_tensor_by_name(input_vars["input"])
output = tf.get_default_graph().get_tensor_by_name(output_vars["output"])
input_value = np.array(input_instance["input"])
output_value = sess.run(output, feed_dict={input: np.expand_dims(input_value, axis=0)})[0]
output_instance = dict(output=output_value.decode("ascii"), key="0")
b64data = output_instance["output"]
b64data += "=" * (-len(b64data) % 4)
output_data = base64.urlsafe_b64decode(b64data.encode("ascii"))
with open('./output/' + os.path.splitext(file)[0] + '.png', "wb") as f:
f.write(output_data)
main()
| [
"os.listdir",
"argparse.ArgumentParser",
"base64.urlsafe_b64encode",
"tensorflow.Session",
"json.dumps",
"os.path.splitext",
"numpy.array",
"tensorflow.train.import_meta_graph",
"numpy.expand_dims",
"tensorflow.get_default_graph",
"tensorflow.get_collection"
] | [((215, 240), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (238, 240), False, 'import argparse\n'), ((385, 405), 'os.listdir', 'os.listdir', (['"""input/"""'], {}), "('input/')\n", (395, 405), False, 'import os\n'), ((696, 722), 'json.dumps', 'json.dumps', (['input_instance'], {}), '(input_instance)\n', (706, 722), False, 'import json\n'), ((733, 745), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (743, 745), True, 'import tensorflow as tf\n'), ((767, 823), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(a.model_dir + '/export.meta')"], {}), "(a.model_dir + '/export.meta')\n", (793, 823), True, 'import tensorflow as tf\n'), ((1185, 1218), 'numpy.array', 'np.array', (["input_instance['input']"], {}), "(input_instance['input'])\n", (1193, 1218), True, 'import numpy as np\n'), ((1025, 1047), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1045, 1047), True, 'import tensorflow as tf\n'), ((1101, 1123), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1121, 1123), True, 'import tensorflow as tf\n'), ((602, 638), 'base64.urlsafe_b64encode', 'base64.urlsafe_b64encode', (['input_data'], {}), '(input_data)\n', (626, 638), False, 'import base64\n'), ((901, 928), 'tensorflow.get_collection', 'tf.get_collection', (['"""inputs"""'], {}), "('inputs')\n", (918, 928), True, 'import tensorflow as tf\n'), ((971, 999), 'tensorflow.get_collection', 'tf.get_collection', (['"""outputs"""'], {}), "('outputs')\n", (988, 999), True, 'import tensorflow as tf\n'), ((1273, 1308), 'numpy.expand_dims', 'np.expand_dims', (['input_value'], {'axis': '(0)'}), '(input_value, axis=0)\n', (1287, 1308), True, 'import numpy as np\n'), ((1562, 1584), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (1578, 1584), False, 'import os\n')] |
import cv2
import numpy as np
before = cv2.imread('2.png')
b, g, r = cv2.split(before)
np.multiply(b, 1.5, out=b, casting="unsafe")
np.multiply(g, .75, out=g, casting="unsafe")
np.multiply(r, 1.25, out=r, casting="unsafe")
after = cv2.merge([b, g, r])
cv2.imshow('before', before)
cv2.imshow('after', after)
cv2.waitKey()
| [
"numpy.multiply",
"cv2.merge",
"cv2.imshow",
"cv2.waitKey",
"cv2.split",
"cv2.imread"
] | [((40, 59), 'cv2.imread', 'cv2.imread', (['"""2.png"""'], {}), "('2.png')\n", (50, 59), False, 'import cv2\n'), ((70, 87), 'cv2.split', 'cv2.split', (['before'], {}), '(before)\n', (79, 87), False, 'import cv2\n'), ((89, 133), 'numpy.multiply', 'np.multiply', (['b', '(1.5)'], {'out': 'b', 'casting': '"""unsafe"""'}), "(b, 1.5, out=b, casting='unsafe')\n", (100, 133), True, 'import numpy as np\n'), ((134, 179), 'numpy.multiply', 'np.multiply', (['g', '(0.75)'], {'out': 'g', 'casting': '"""unsafe"""'}), "(g, 0.75, out=g, casting='unsafe')\n", (145, 179), True, 'import numpy as np\n'), ((179, 224), 'numpy.multiply', 'np.multiply', (['r', '(1.25)'], {'out': 'r', 'casting': '"""unsafe"""'}), "(r, 1.25, out=r, casting='unsafe')\n", (190, 224), True, 'import numpy as np\n'), ((234, 254), 'cv2.merge', 'cv2.merge', (['[b, g, r]'], {}), '([b, g, r])\n', (243, 254), False, 'import cv2\n'), ((256, 284), 'cv2.imshow', 'cv2.imshow', (['"""before"""', 'before'], {}), "('before', before)\n", (266, 284), False, 'import cv2\n'), ((285, 311), 'cv2.imshow', 'cv2.imshow', (['"""after"""', 'after'], {}), "('after', after)\n", (295, 311), False, 'import cv2\n'), ((312, 325), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (323, 325), False, 'import cv2\n')] |
import random
import numpy as np
from sum_tree import SumTree
class Memory:
def __init__(self, tree_memory_length, error_multiplier=0.01, alpha=0.6, beta=0.4, beta_increment_per_sample=0.001):
self.tree = SumTree(tree_memory_length)
self.tree_memory_length = tree_memory_length
self.error_multiplier = error_multiplier
self.per_alpha = alpha
self.per_beta_init = beta
self.beta_increment_per_sample = beta_increment_per_sample
def _get_priority(self, error):
return (np.abs(error) + self.error_multiplier) ** self.per_alpha
def add_sample_to_tree(self, error, sample):
priority = self._get_priority(error)
self.tree.add(priority, sample)
def sample_tree(self, num_samples):
batch = []
idxs = []
segment = self.tree.sum_of_tree() / num_samples
priorities = []
self.beta = np.min([1.0, self.per_beta_init + self.beta_increment_per_sample])
for i in range(num_samples):
a = segment * i
b = segment * (i + 1)
sample = random.uniform(a, b)
idx, priority, data = self.tree.get_sample(sample)
priorities.append(priority)
batch.append(data)
idxs.append(idx)
sampling_prob = priorities / self.tree.sum_of_tree()
is_weight = np.power(self.tree.num_entries * sampling_prob, -self.beta)
is_weight /= is_weight.max()
return batch, idxs, is_weight
def update_tree(self, idx, error):
priority = self._get_priority(error)
self.tree.update_priority(idx, priority)
| [
"numpy.abs",
"random.uniform",
"numpy.power",
"sum_tree.SumTree",
"numpy.min"
] | [((219, 246), 'sum_tree.SumTree', 'SumTree', (['tree_memory_length'], {}), '(tree_memory_length)\n', (226, 246), False, 'from sum_tree import SumTree\n'), ((909, 975), 'numpy.min', 'np.min', (['[1.0, self.per_beta_init + self.beta_increment_per_sample]'], {}), '([1.0, self.per_beta_init + self.beta_increment_per_sample])\n', (915, 975), True, 'import numpy as np\n'), ((1373, 1432), 'numpy.power', 'np.power', (['(self.tree.num_entries * sampling_prob)', '(-self.beta)'], {}), '(self.tree.num_entries * sampling_prob, -self.beta)\n', (1381, 1432), True, 'import numpy as np\n'), ((1098, 1118), 'random.uniform', 'random.uniform', (['a', 'b'], {}), '(a, b)\n', (1112, 1118), False, 'import random\n'), ((534, 547), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (540, 547), True, 'import numpy as np\n')] |
"""WhiteStripe (normal-appearing white matter mean & standard deviation) normalization
Author: <NAME> <<EMAIL>>
Created on: 01 Jun 2021
"""
from __future__ import annotations
__all__ = ["WhiteStripeNormalize"]
import argparse
import builtins
import typing
import numpy as np
import numpy.typing as npt
import intensity_normalization.normalize.base as intnormb
import intensity_normalization.typing as intnormt
import intensity_normalization.util.histogram_tools as intnormhisttool
class WhiteStripeNormalize(
intnormb.LocationScaleCLIMixin, intnormb.SingleImageNormalizeCLI
):
"""
find the normal appearing white matter of the input MR image and
use those values to standardize the data (i.e., subtract the mean of
the values in the indices and divide by the std of those values)
"""
def __init__(
self,
*,
norm_value: builtins.float = 1.0,
width: builtins.float = 0.05,
width_l: builtins.float | None = None,
width_u: builtins.float | None = None,
**kwargs: typing.Any,
):
super().__init__(norm_value=norm_value, **kwargs)
self.width_l = width_l or width
self.width_u = width_u or width
self.whitestripe: npt.NDArray | None = None
def calculate_location(
self,
image: intnormt.ImageLike,
/,
mask: intnormt.ImageLike | None = None,
*,
modality: intnormt.Modalities = intnormt.Modalities.T1,
) -> builtins.float:
loc: builtins.float = image[self.whitestripe].mean()
return loc
def calculate_scale(
self,
image: intnormt.ImageLike,
/,
mask: intnormt.ImageLike | None = None,
*,
modality: intnormt.Modalities = intnormt.Modalities.T1,
) -> builtins.float:
scale: builtins.float = image[self.whitestripe].std()
return scale
def setup(
self,
image: intnormt.ImageLike,
/,
mask: intnormt.ImageLike | None = None,
*,
modality: intnormt.Modalities = intnormt.Modalities.T1,
) -> None:
if modality is None:
modality = "t1"
mask = self._get_mask(image, mask, modality=modality)
masked = image * mask
voi = image[mask]
wm_mode = intnormhisttool.get_tissue_mode(voi, modality=modality)
wm_mode_quantile: builtins.float = np.mean(voi < wm_mode).item()
lower_bound = max(wm_mode_quantile - self.width_l, 0.0)
upper_bound = min(wm_mode_quantile + self.width_u, 1.0)
ws_l: builtins.float
ws_u: builtins.float
ws_l, ws_u = np.quantile(voi, (lower_bound, upper_bound)) # type: ignore[misc]
self.whitestripe = (masked > ws_l) & (masked < ws_u)
def teardown(self) -> None:
del self.whitestripe
@staticmethod
def name() -> builtins.str:
return "ws"
@staticmethod
def fullname() -> builtins.str:
return "WhiteStripe"
@staticmethod
def description() -> builtins.str:
return "Standardize the normal appearing WM of a MR image."
@staticmethod
def add_method_specific_arguments(
parent_parser: argparse.ArgumentParser,
) -> argparse.ArgumentParser:
parser = parent_parser.add_argument_group("method-specific arguments")
parser.add_argument(
"--width",
default=0.05,
type=float,
help="Width of the 'white stripe'. (See original paper.)",
)
return parent_parser
@classmethod
def from_argparse_args(cls, args: argparse.Namespace, /) -> WhiteStripeNormalize:
return cls(width=args.width)
def plot_histogram_from_args(
self,
args: argparse.Namespace,
/,
normalized: intnormt.ImageLike,
mask: intnormt.ImageLike | None = None,
) -> None:
if mask is None:
mask = self.estimate_foreground(normalized)
super().plot_histogram_from_args(args, normalized, mask)
| [
"numpy.mean",
"numpy.quantile",
"intensity_normalization.util.histogram_tools.get_tissue_mode"
] | [((2301, 2356), 'intensity_normalization.util.histogram_tools.get_tissue_mode', 'intnormhisttool.get_tissue_mode', (['voi'], {'modality': 'modality'}), '(voi, modality=modality)\n', (2332, 2356), True, 'import intensity_normalization.util.histogram_tools as intnormhisttool\n'), ((2637, 2681), 'numpy.quantile', 'np.quantile', (['voi', '(lower_bound, upper_bound)'], {}), '(voi, (lower_bound, upper_bound))\n', (2648, 2681), True, 'import numpy as np\n'), ((2400, 2422), 'numpy.mean', 'np.mean', (['(voi < wm_mode)'], {}), '(voi < wm_mode)\n', (2407, 2422), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import torch
import shutil
import pickle
from models.model import Shared_Langage_Model
def Out_Wordemb(id2vocab, lm, vocab_size = 0):
Emb = getattr(lm, "emb") # lookup table for all languages
W = Emb.weight.data.tolist() # Vocab_size , emb_size
vocab2emb_list = []
if vocab_size == 0:
vocab_size = len(W)
for lang in range(len(id2vocab)):
vocab2emb = {}
for id in list(id2vocab[lang].keys())[:vocab_size]:
emb = W[id] #demb
vocab2emb[id2vocab[lang][id]] = emb
vocab2emb_list.append(vocab2emb)
return vocab2emb_list
def Save_Emb(vocab2emb_list, N_dim, filename):
lang_size = len(vocab2emb_list)
for lang in range(lang_size):
vocab2emb = vocab2emb_list[lang]
N_word = len(vocab2emb)
first_line = str(N_word) + " " + str(N_dim)
with open(filename + '.lang' + str(lang) + '.vec', "w") as word_output:
word_output.write(first_line + "\n")
vocab = list(vocab2emb.keys())
vocab.remove("<PAD>")
for word in vocab:
out = word + " " + " ".join(map(str, vocab2emb[word])) + "\n"
with open(filename+'.lang'+ str(lang)+'.vec', "a") as word_output:
word_output.write(out)
def PAD_Sentences(model, lengths, lines_id_input, lines_id_output, index):
s_lengths = lengths[index]
max_length = max(s_lengths)
padding_len = max_length - s_lengths
BOS_lines_id_input = []
lines_id_output_EOS = []
BOS_lines_id_input_bkw = []
lines_id_output_EOS_bkw = []
for i, j in enumerate(index):
input_line = lines_id_input[j]
output_line = lines_id_output[j]
BOS_lines_id_input.append([model.BOS_fwd_index] + input_line + padding_len[i] * [model.PAD_index])
lines_id_output_EOS.append(output_line + [model.EOS_index] + padding_len[i] * [model.ignore_index])
BOS_lines_id_input_bkw.append([model.BOS_bkw_index] + input_line[::-1] + padding_len[i] * [model.PAD_index])
lines_id_output_EOS_bkw.append(output_line[::-1] + [model.EOS_index] + padding_len[i] * [model.ignore_index])
return s_lengths, BOS_lines_id_input, lines_id_output_EOS, BOS_lines_id_input_bkw, lines_id_output_EOS_bkw
def check_options(opt):
np.random.seed(opt.seed)
print("emb_size", opt.emb_size)
print("hidden_size", opt.h_size)
######write option to a file######
shutil.copy("data/" + opt.data + "_inputs.txt", opt.save_dir + '/' +opt.data + "_params.txt")
with open(opt.save_dir + '/' +opt.data + "_params.txt", "a") as f:
opt_dict = vars(opt)
for variable in opt_dict:
f.write(str(variable) + ": " + str(opt_dict[variable]) + "\n")
f.close()
def Setup_model(model, gpuid, vocab_dict):
#initialize params
for param in model.parameters():
param.data.uniform_(-0.1, 0.1)
##assign zero vector for <PAD> embedding##
model.lm.emb.weight.data[model.PAD_index] *= 0
model.Register_vocab(vocab_dict.vocab2id_input, vocab_dict.vocab2id_output, vocab_dict.id2vocab_input,
vocab_dict.id2vocab_output)
model.set_device(gpuid)
if gpuid >= 0:
torch.cuda.set_device(gpuid)
model.to('cuda')
return model
def load_data(data):
file = open("data/" + data + ".data", 'rb')
dataset = pickle.load(file)
file = open("data/" + data + ".vocab_dict", 'rb')
vocab_dict = pickle.load(file)
for i in range(dataset.lang_size):
print("lang: ", i)
print("V_size: ", vocab_dict.V_size[i])
print("train sents: ", len(dataset.lines_id_input[i]))
return dataset, vocab_dict
| [
"pickle.load",
"numpy.random.seed",
"shutil.copy",
"torch.cuda.set_device"
] | [((2336, 2360), 'numpy.random.seed', 'np.random.seed', (['opt.seed'], {}), '(opt.seed)\n', (2350, 2360), True, 'import numpy as np\n'), ((2477, 2576), 'shutil.copy', 'shutil.copy', (["('data/' + opt.data + '_inputs.txt')", "(opt.save_dir + '/' + opt.data + '_params.txt')"], {}), "('data/' + opt.data + '_inputs.txt', opt.save_dir + '/' + opt.\n data + '_params.txt')\n", (2488, 2576), False, 'import shutil\n'), ((3413, 3430), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (3424, 3430), False, 'import pickle\n'), ((3503, 3520), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (3514, 3520), False, 'import pickle\n'), ((3257, 3285), 'torch.cuda.set_device', 'torch.cuda.set_device', (['gpuid'], {}), '(gpuid)\n', (3278, 3285), False, 'import torch\n')] |
import numpy as np
import healpy as hp
from rubin_sim.utils import _hpid2RaDec, Site, _angularSeparation, _xyz_from_ra_dec
import matplotlib.pylab as plt
from rubin_sim.scheduler.basis_functions import Base_basis_function
from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded
__all__ = ['Zenith_mask_basis_function', 'Zenith_shadow_mask_basis_function',
'Moon_avoidance_basis_function', 'Map_cloud_basis_function',
'Planet_mask_basis_function', 'Mask_azimuth_basis_function',
'Solar_elongation_mask_basis_function', 'Area_check_mask_basis_function']
class Area_check_mask_basis_function(Base_basis_function):
"""Take a list of other mask basis functions, and do an additional check for area available
"""
def __init__(self, bf_list, nside=32, min_area=1000.):
super(Area_check_mask_basis_function, self).__init__(nside=nside)
self.bf_list = bf_list
self.result = np.zeros(hp.nside2npix(self.nside), dtype=float)
self.min_area = min_area
def check_feasibility(self, conditions):
result = True
for bf in self.bf_list:
if not bf.check_feasibility(conditions):
return False
area_map = self.result.copy()
for bf in self.bf_list:
area_map *= bf(conditions)
good_pix = np.where(area_map == 0)[0]
if hp.nside2pixarea(self.nside, degrees=True)*good_pix.size < self.min_area:
result = False
return result
def _calc_value(self, conditions, **kwargs):
result = self.result.copy()
for bf in self.bf_list:
result *= bf(conditions)
return result
class Solar_elongation_mask_basis_function(Base_basis_function):
"""Mask things at various solar elongations
Parameters
----------
min_elong : float (0)
The minimum solar elongation to consider (degrees).
max_elong : float (60.)
The maximum solar elongation to consider (degrees).
"""
def __init__(self, min_elong=0., max_elong=60., nside=None, penalty=np.nan):
super(Solar_elongation_mask_basis_function, self).__init__(nside=nside)
self.min_elong = np.radians(min_elong)
self.max_elong = np.radians(max_elong)
self.penalty = penalty
self.result = np.empty(hp.nside2npix(self.nside), dtype=float)
self.result.fill(self.penalty)
def _calc_value(self, conditions, indx=None):
result = self.result.copy()
in_range = np.where((int_rounded(conditions.solar_elongation) >= int_rounded(self.min_elong)) &
(int_rounded(conditions.solar_elongation) <= int_rounded(self.max_elong)))[0]
result[in_range] = 1
return result
class Zenith_mask_basis_function(Base_basis_function):
"""Just remove the area near zenith.
Parameters
----------
min_alt : float (20.)
The minimum possible altitude (degrees)
max_alt : float (82.)
The maximum allowed altitude (degrees)
"""
def __init__(self, min_alt=20., max_alt=82., nside=None):
super(Zenith_mask_basis_function, self).__init__(nside=nside)
self.update_on_newobs = False
self.min_alt = np.radians(min_alt)
self.max_alt = np.radians(max_alt)
self.result = np.empty(hp.nside2npix(self.nside), dtype=float).fill(self.penalty)
def _calc_value(self, conditions, indx=None):
result = self.result.copy()
alt_limit = np.where((int_rounded(conditions.alt) > int_rounded(self.min_alt)) &
(int_rounded(conditions.alt) < int_rounded(self.max_alt)))[0]
result[alt_limit] = 1
return result
class Planet_mask_basis_function(Base_basis_function):
"""Mask the bright planets
Parameters
----------
mask_radius : float (3.5)
The radius to mask around a planet (degrees).
planets : list of str (None)
A list of planet names to mask. Defaults to ['venus', 'mars', 'jupiter']. Not including
Saturn because it moves really slow and has average apparent mag of ~0.4, so fainter than Vega.
"""
def __init__(self, mask_radius=3.5, planets=None, nside=None, scale=1e5):
super(Planet_mask_basis_function, self).__init__(nside=nside)
if planets is None:
planets = ['venus', 'mars', 'jupiter']
self.planets = planets
self.mask_radius = np.radians(mask_radius)
self.result = np.zeros(hp.nside2npix(nside))
# set up a kdtree. Could maybe use healpy.query_disc instead.
self.in_fov = hp_in_lsst_fov(nside=nside, fov_radius=mask_radius, scale=scale)
def _calc_value(self, conditions, indx=None):
result = self.result.copy()
for pn in self.planets:
indices = self.in_fov(conditions.planet_positions[pn+'_RA'], conditions.planet_positions[pn+'_dec'])
result[indices] = np.nan
return result
class Zenith_shadow_mask_basis_function(Base_basis_function):
"""Mask the zenith, and things that will soon pass near zenith. Useful for making sure
observations will not be too close to zenith when they need to be observed again (e.g. for a pair).
Parameters
----------
min_alt : float (20.)
The minimum alititude to alow. Everything lower is masked. (degrees)
max_alt : float (82.)
The maximum altitude to alow. Everything higher is masked. (degrees)
shadow_minutes : float (40.)
Mask anything that will pass through the max alt in the next shadow_minutes time. (minutes)
"""
def __init__(self, nside=None, min_alt=20., max_alt=82.,
shadow_minutes=40., penalty=np.nan, site='LSST'):
super(Zenith_shadow_mask_basis_function, self).__init__(nside=nside)
self.update_on_newobs = False
self.penalty = penalty
self.min_alt = np.radians(min_alt)
self.max_alt = np.radians(max_alt)
self.ra, self.dec = _hpid2RaDec(nside, np.arange(hp.nside2npix(nside)))
self.shadow_minutes = np.radians(shadow_minutes/60. * 360./24.)
# Compute the declination band where things could drift into zenith
self.decband = np.zeros(self.dec.size, dtype=float)
self.zenith_radius = np.radians(90.-max_alt)/2.
site = Site(name=site)
self.lat_rad = site.latitude_rad
self.lon_rad = site.longitude_rad
self.decband[np.where((int_rounded(self.dec) < int_rounded(self.lat_rad+self.zenith_radius)) &
(int_rounded(self.dec) > int_rounded(self.lat_rad-self.zenith_radius)))] = 1
self.result = np.empty(hp.nside2npix(self.nside), dtype=float)
self.result.fill(self.penalty)
def _calc_value(self, conditions, indx=None):
result = self.result.copy()
alt_limit = np.where((int_rounded(conditions.alt) > int_rounded(self.min_alt)) &
(int_rounded(conditions.alt) < int_rounded(self.max_alt)))[0]
result[alt_limit] = 1
to_mask = np.where((int_rounded(conditions.HA) > int_rounded(2.*np.pi-self.shadow_minutes-self.zenith_radius)) &
(self.decband == 1))
result[to_mask] = np.nan
return result
class Moon_avoidance_basis_function(Base_basis_function):
"""Avoid looking too close to the moon.
Parameters
----------
moon_distance: float (30.)
Minimum allowed moon distance. (degrees)
XXX--TODO: This could be a more complicated function of filter and moon phase.
"""
def __init__(self, nside=None, moon_distance=30.):
super(Moon_avoidance_basis_function, self).__init__(nside=nside)
self.update_on_newobs = False
self.moon_distance = int_rounded(np.radians(moon_distance))
self.result = np.ones(hp.nside2npix(self.nside), dtype=float)
def _calc_value(self, conditions, indx=None):
result = self.result.copy()
angular_distance = _angularSeparation(conditions.az, conditions.alt,
conditions.moonAz,
conditions.moonAlt)
result[int_rounded(angular_distance) < self.moon_distance] = np.nan
return result
class Bulk_cloud_basis_function(Base_basis_function):
"""Mark healpixels on a map if their cloud values are greater than
the same healpixels on a maximum cloud map.
Parameters
----------
nside: int (default_nside)
The healpix resolution.
max_cloud_map : numpy array (None)
A healpix map showing the maximum allowed cloud values for all points on the sky
out_of_bounds_val : float (10.)
Point value to give regions where there are no observations requested
"""
def __init__(self, nside=None, max_cloud_map=None, max_val=0.7,
out_of_bounds_val=np.nan):
super(Bulk_cloud_basis_function, self).__init__(nside=nside)
self.update_on_newobs = False
if max_cloud_map is None:
self.max_cloud_map = np.zeros(hp.nside2npix(nside), dtype=float) + max_val
else:
self.max_cloud_map = max_cloud_map
self.out_of_bounds_area = np.where(self.max_cloud_map > 1.)[0]
self.out_of_bounds_val = out_of_bounds_val
self.result = np.ones(hp.nside2npix(self.nside))
def _calc_value(self, conditions, indx=None):
"""
Parameters
----------
indx : list (None)
Index values to compute, if None, full map is computed
Returns
-------
Healpix map where pixels with a cloud value greater than the max_cloud_map
value are marked as unseen.
"""
result = self.result.copy()
clouded = np.where(self.max_cloud_map <= conditions.bulk_cloud)
result[clouded] = self.out_of_bounds_val
return result
class Map_cloud_basis_function(Base_basis_function):
"""Mark healpixels on a map if their cloud values are greater than
the same healpixels on a maximum cloud map. Currently a placeholder for
when the telemetry stream can include a full sky cloud map.
Parameters
----------
nside: int (default_nside)
The healpix resolution.
max_cloud_map : numpy array (None)
A healpix map showing the maximum allowed cloud values for all points on the sky
out_of_bounds_val : float (10.)
Point value to give regions where there are no observations requested
"""
def __init__(self, nside=None, max_cloud_map=None, max_val=0.7,
out_of_bounds_val=np.nan):
super(Bulk_cloud_basis_function, self).__init__(nside=nside)
self.update_on_newobs = False
if max_cloud_map is None:
self.max_cloud_map = np.zeros(hp.nside2npix(nside), dtype=float) + max_val
else:
self.max_cloud_map = max_cloud_map
self.out_of_bounds_area = np.where(self.max_cloud_map > 1.)[0]
self.out_of_bounds_val = out_of_bounds_val
self.result = np.ones(hp.nside2npix(self.nside))
def _calc_value(self, conditions, indx=None):
"""
Parameters
----------
indx : list (None)
Index values to compute, if None, full map is computed
Returns
-------
Healpix map where pixels with a cloud value greater than the max_cloud_map
value are marked as unseen.
"""
result = self.result.copy()
clouded = np.where(self.max_cloud_map <= conditions.bulk_cloud)
result[clouded] = self.out_of_bounds_val
return result
class Mask_azimuth_basis_function(Base_basis_function):
"""Mask pixels based on azimuth
"""
def __init__(self, nside=None, out_of_bounds_val=np.nan, az_min=0., az_max=180.):
super(Mask_azimuth_basis_function, self).__init__(nside=nside)
self.az_min = int_rounded(np.radians(az_min))
self.az_max = int_rounded(np.radians(az_max))
self.out_of_bounds_val = out_of_bounds_val
self.result = np.ones(hp.nside2npix(self.nside))
def _calc_value(self, conditions, indx=None):
to_mask = np.where((int_rounded(conditions.az) > self.az_min) & (int_rounded(conditions.az) < self.az_max))[0]
result = self.result.copy()
result[to_mask] = self.out_of_bounds_val
return result
| [
"numpy.radians",
"numpy.where",
"rubin_sim.utils._angularSeparation",
"healpy.nside2pixarea",
"rubin_sim.utils.Site",
"rubin_sim.scheduler.utils.hp_in_lsst_fov",
"numpy.zeros",
"healpy.nside2npix",
"rubin_sim.scheduler.utils.int_rounded"
] | [((2195, 2216), 'numpy.radians', 'np.radians', (['min_elong'], {}), '(min_elong)\n', (2205, 2216), True, 'import numpy as np\n'), ((2242, 2263), 'numpy.radians', 'np.radians', (['max_elong'], {}), '(max_elong)\n', (2252, 2263), True, 'import numpy as np\n'), ((3230, 3249), 'numpy.radians', 'np.radians', (['min_alt'], {}), '(min_alt)\n', (3240, 3249), True, 'import numpy as np\n'), ((3273, 3292), 'numpy.radians', 'np.radians', (['max_alt'], {}), '(max_alt)\n', (3283, 3292), True, 'import numpy as np\n'), ((4433, 4456), 'numpy.radians', 'np.radians', (['mask_radius'], {}), '(mask_radius)\n', (4443, 4456), True, 'import numpy as np\n'), ((4602, 4666), 'rubin_sim.scheduler.utils.hp_in_lsst_fov', 'hp_in_lsst_fov', ([], {'nside': 'nside', 'fov_radius': 'mask_radius', 'scale': 'scale'}), '(nside=nside, fov_radius=mask_radius, scale=scale)\n', (4616, 4666), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((5895, 5914), 'numpy.radians', 'np.radians', (['min_alt'], {}), '(min_alt)\n', (5905, 5914), True, 'import numpy as np\n'), ((5938, 5957), 'numpy.radians', 'np.radians', (['max_alt'], {}), '(max_alt)\n', (5948, 5957), True, 'import numpy as np\n'), ((6068, 6116), 'numpy.radians', 'np.radians', (['(shadow_minutes / 60.0 * 360.0 / 24.0)'], {}), '(shadow_minutes / 60.0 * 360.0 / 24.0)\n', (6078, 6116), True, 'import numpy as np\n'), ((6209, 6245), 'numpy.zeros', 'np.zeros', (['self.dec.size'], {'dtype': 'float'}), '(self.dec.size, dtype=float)\n', (6217, 6245), True, 'import numpy as np\n'), ((6317, 6332), 'rubin_sim.utils.Site', 'Site', ([], {'name': 'site'}), '(name=site)\n', (6321, 6332), False, 'from rubin_sim.utils import _hpid2RaDec, Site, _angularSeparation, _xyz_from_ra_dec\n'), ((7987, 8079), 'rubin_sim.utils._angularSeparation', '_angularSeparation', (['conditions.az', 'conditions.alt', 'conditions.moonAz', 'conditions.moonAlt'], {}), '(conditions.az, conditions.alt, conditions.moonAz,\n conditions.moonAlt)\n', (8005, 8079), False, 'from rubin_sim.utils import _hpid2RaDec, Site, _angularSeparation, _xyz_from_ra_dec\n'), ((9783, 9836), 'numpy.where', 'np.where', (['(self.max_cloud_map <= conditions.bulk_cloud)'], {}), '(self.max_cloud_map <= conditions.bulk_cloud)\n', (9791, 9836), True, 'import numpy as np\n'), ((11515, 11568), 'numpy.where', 'np.where', (['(self.max_cloud_map <= conditions.bulk_cloud)'], {}), '(self.max_cloud_map <= conditions.bulk_cloud)\n', (11523, 11568), True, 'import numpy as np\n'), ((957, 982), 'healpy.nside2npix', 'hp.nside2npix', (['self.nside'], {}), '(self.nside)\n', (970, 982), True, 'import healpy as hp\n'), ((1342, 1365), 'numpy.where', 'np.where', (['(area_map == 0)'], {}), '(area_map == 0)\n', (1350, 1365), True, 'import numpy as np\n'), ((2326, 2351), 'healpy.nside2npix', 'hp.nside2npix', (['self.nside'], {}), '(self.nside)\n', (2339, 2351), True, 'import healpy as hp\n'), ((4488, 4508), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (4501, 4508), True, 'import healpy as hp\n'), ((6275, 6301), 'numpy.radians', 'np.radians', (['(90.0 - max_alt)'], {}), '(90.0 - max_alt)\n', (6285, 6301), True, 'import numpy as np\n'), ((6658, 6683), 'healpy.nside2npix', 'hp.nside2npix', (['self.nside'], {}), '(self.nside)\n', (6671, 6683), True, 'import healpy as hp\n'), ((7775, 7800), 'numpy.radians', 'np.radians', (['moon_distance'], {}), '(moon_distance)\n', (7785, 7800), True, 'import numpy as np\n'), ((7832, 7857), 'healpy.nside2npix', 'hp.nside2npix', (['self.nside'], {}), '(self.nside)\n', (7845, 7857), True, 'import healpy as hp\n'), ((9224, 9258), 'numpy.where', 'np.where', (['(self.max_cloud_map > 1.0)'], {}), '(self.max_cloud_map > 1.0)\n', (9232, 9258), True, 'import numpy as np\n'), ((9342, 9367), 'healpy.nside2npix', 'hp.nside2npix', (['self.nside'], {}), '(self.nside)\n', (9355, 9367), True, 'import healpy as hp\n'), ((10956, 10990), 'numpy.where', 'np.where', (['(self.max_cloud_map > 1.0)'], {}), '(self.max_cloud_map > 1.0)\n', (10964, 10990), True, 'import numpy as np\n'), ((11074, 11099), 'healpy.nside2npix', 'hp.nside2npix', (['self.nside'], {}), '(self.nside)\n', (11087, 11099), True, 'import healpy as hp\n'), ((11934, 11952), 'numpy.radians', 'np.radians', (['az_min'], {}), '(az_min)\n', (11944, 11952), True, 'import numpy as np\n'), ((11988, 12006), 'numpy.radians', 'np.radians', (['az_max'], {}), '(az_max)\n', (11998, 12006), True, 'import numpy as np\n'), ((12089, 12114), 'healpy.nside2npix', 'hp.nside2npix', (['self.nside'], {}), '(self.nside)\n', (12102, 12114), True, 'import healpy as hp\n'), ((1380, 1422), 'healpy.nside2pixarea', 'hp.nside2pixarea', (['self.nside'], {'degrees': '(True)'}), '(self.nside, degrees=True)\n', (1396, 1422), True, 'import healpy as hp\n'), ((6015, 6035), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (6028, 6035), True, 'import healpy as hp\n'), ((8184, 8213), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['angular_distance'], {}), '(angular_distance)\n', (8195, 8213), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((3324, 3349), 'healpy.nside2npix', 'hp.nside2npix', (['self.nside'], {}), '(self.nside)\n', (3337, 3349), True, 'import healpy as hp\n'), ((7063, 7089), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['conditions.HA'], {}), '(conditions.HA)\n', (7074, 7089), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((7092, 7159), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['(2.0 * np.pi - self.shadow_minutes - self.zenith_radius)'], {}), '(2.0 * np.pi - self.shadow_minutes - self.zenith_radius)\n', (7103, 7159), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((9084, 9104), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (9097, 9104), True, 'import healpy as hp\n'), ((10816, 10836), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (10829, 10836), True, 'import healpy as hp\n'), ((2521, 2561), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['conditions.solar_elongation'], {}), '(conditions.solar_elongation)\n', (2532, 2561), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((2565, 2592), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['self.min_elong'], {}), '(self.min_elong)\n', (2576, 2592), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((2625, 2665), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['conditions.solar_elongation'], {}), '(conditions.solar_elongation)\n', (2636, 2665), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((2669, 2696), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['self.max_elong'], {}), '(self.max_elong)\n', (2680, 2696), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((3501, 3528), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['conditions.alt'], {}), '(conditions.alt)\n', (3512, 3528), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((3531, 3556), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['self.min_alt'], {}), '(self.min_alt)\n', (3542, 3556), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((3590, 3617), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['conditions.alt'], {}), '(conditions.alt)\n', (3601, 3617), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((3620, 3645), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['self.max_alt'], {}), '(self.max_alt)\n', (3631, 3645), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((6447, 6468), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['self.dec'], {}), '(self.dec)\n', (6458, 6468), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((6471, 6517), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['(self.lat_rad + self.zenith_radius)'], {}), '(self.lat_rad + self.zenith_radius)\n', (6482, 6517), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((6550, 6571), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['self.dec'], {}), '(self.dec)\n', (6561, 6571), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((6574, 6620), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['(self.lat_rad - self.zenith_radius)'], {}), '(self.lat_rad - self.zenith_radius)\n', (6585, 6620), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((6855, 6882), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['conditions.alt'], {}), '(conditions.alt)\n', (6866, 6882), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((6885, 6910), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['self.min_alt'], {}), '(self.min_alt)\n', (6896, 6910), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((6944, 6971), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['conditions.alt'], {}), '(conditions.alt)\n', (6955, 6971), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((6974, 6999), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['self.max_alt'], {}), '(self.max_alt)\n', (6985, 6999), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((12195, 12221), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['conditions.az'], {}), '(conditions.az)\n', (12206, 12221), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n'), ((12240, 12266), 'rubin_sim.scheduler.utils.int_rounded', 'int_rounded', (['conditions.az'], {}), '(conditions.az)\n', (12251, 12266), False, 'from rubin_sim.scheduler.utils import hp_in_lsst_fov, int_rounded\n')] |
"""
Tests for weave featurizer.
"""
import numpy as np
import deepchem as dc
from deepchem.feat.graph_features import max_pair_distance_pairs
def test_max_pair_distance_pairs():
"""Test that max pair distance pairs are computed properly."""
from rdkit import Chem
# Carbon
mol = Chem.MolFromSmiles('C')
# Test distance 1
pair_edges = max_pair_distance_pairs(mol, 1)
assert pair_edges.shape == (2, 1)
assert np.all(pair_edges.flatten() == np.array([0, 0]))
# Test distance 2
pair_edges = max_pair_distance_pairs(mol, 2)
assert pair_edges.shape == (2, 1)
assert np.all(pair_edges.flatten() == np.array([0, 0]))
# Test alkane
mol = Chem.MolFromSmiles('CCC')
# Test distance 1
pair_edges = max_pair_distance_pairs(mol, 1)
# 3 self connections and 2 bonds which are both counted twice because of
# symmetry for 7 total
assert pair_edges.shape == (2, 7)
# Test distance 2
pair_edges = max_pair_distance_pairs(mol, 2)
# Everything is connected at this distance
assert pair_edges.shape == (2, 9)
def test_max_pair_distance_infinity():
"""Test that max pair distance pairs are computed properly with infinity distance."""
from rdkit import Chem
# Test alkane
mol = Chem.MolFromSmiles('CCC')
# Test distance infinity
pair_edges = max_pair_distance_pairs(mol, None)
# Everything is connected at this distance
assert pair_edges.shape == (2, 9)
# Test pentane
mol = Chem.MolFromSmiles('CCCCC')
# Test distance infinity
pair_edges = max_pair_distance_pairs(mol, None)
# Everything is connected at this distance
assert pair_edges.shape == (2, 25)
def test_weave_single_carbon():
"""Test that single carbon atom is featurized properly."""
mols = ['C']
featurizer = dc.feat.WeaveFeaturizer()
mol_list = featurizer.featurize(mols)
mol = mol_list[0]
# Only one carbon
assert mol.get_num_atoms() == 1
# Test feature sizes
assert mol.get_num_features() == 75
# No bonds, so only 1 pair feature (for the self interaction)
assert mol.get_pair_features().shape == (1 * 1, 14)
def test_chiral_weave():
"""Test weave features on a molecule with chiral structure."""
mols = ["F\C=C\F"] # noqa: W605
featurizer = dc.feat.WeaveFeaturizer(use_chirality=True)
mol_list = featurizer.featurize(mols)
mol = mol_list[0]
# Only 4 atoms
assert mol.get_num_atoms() == 4
# Test feature sizes for chirality
assert mol.get_num_features() == 78
def test_weave_alkane():
"""Test on simple alkane"""
mols = ['CCC']
featurizer = dc.feat.WeaveFeaturizer()
mol_list = featurizer.featurize(mols)
mol = mol_list[0]
# 3 carbonds in alkane
assert mol.get_num_atoms() == 3
# Test feature sizes
assert mol.get_num_features() == 75
# Should be a 3x3 interaction grid
assert mol.get_pair_features().shape == (3 * 3, 14)
def test_weave_alkane_max_pairs():
"""Test on simple alkane with max pairs distance cutoff"""
mols = ['CCC']
featurizer = dc.feat.WeaveFeaturizer(max_pair_distance=1)
# mol_list = featurizer.featurize(mols)
# mol = mol_list[0]
from rdkit import Chem
mol = featurizer._featurize(Chem.MolFromSmiles(mols[0]))
# 3 carbonds in alkane
assert mol.get_num_atoms() == 3
# Test feature sizes
assert mol.get_num_features() == 75
# Should be a 7x14 interaction grid since there are 7 pairs within graph
# distance 1 (3 self interactions plus 2 bonds counted twice because of
# symmetry)
assert mol.get_pair_features().shape == (7, 14)
def test_carbon_nitrogen():
"""Test on carbon nitrogen molecule"""
# Note there is a central nitrogen of degree 4, with 4 carbons
# of degree 1 (connected only to central nitrogen).
mols = ['C[N+](C)(C)C']
# import rdkit.Chem
# mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.WeaveFeaturizer()
mols = featurizer.featurize(mols)
mol = mols[0]
# 5 atoms in compound
assert mol.get_num_atoms() == 5
# Test feature sizes
assert mol.get_num_features() == 75
# Should be a 3x3 interaction grid
assert mol.get_pair_features().shape == (5 * 5, 14)
| [
"deepchem.feat.graph_features.max_pair_distance_pairs",
"numpy.array",
"rdkit.Chem.MolFromSmiles",
"deepchem.feat.WeaveFeaturizer"
] | [((289, 312), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['"""C"""'], {}), "('C')\n", (307, 312), False, 'from rdkit import Chem\n'), ((348, 379), 'deepchem.feat.graph_features.max_pair_distance_pairs', 'max_pair_distance_pairs', (['mol', '(1)'], {}), '(mol, 1)\n', (371, 379), False, 'from deepchem.feat.graph_features import max_pair_distance_pairs\n'), ((509, 540), 'deepchem.feat.graph_features.max_pair_distance_pairs', 'max_pair_distance_pairs', (['mol', '(2)'], {}), '(mol, 2)\n', (532, 540), False, 'from deepchem.feat.graph_features import max_pair_distance_pairs\n'), ((660, 685), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['"""CCC"""'], {}), "('CCC')\n", (678, 685), False, 'from rdkit import Chem\n'), ((721, 752), 'deepchem.feat.graph_features.max_pair_distance_pairs', 'max_pair_distance_pairs', (['mol', '(1)'], {}), '(mol, 1)\n', (744, 752), False, 'from deepchem.feat.graph_features import max_pair_distance_pairs\n'), ((924, 955), 'deepchem.feat.graph_features.max_pair_distance_pairs', 'max_pair_distance_pairs', (['mol', '(2)'], {}), '(mol, 2)\n', (947, 955), False, 'from deepchem.feat.graph_features import max_pair_distance_pairs\n'), ((1215, 1240), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['"""CCC"""'], {}), "('CCC')\n", (1233, 1240), False, 'from rdkit import Chem\n'), ((1283, 1317), 'deepchem.feat.graph_features.max_pair_distance_pairs', 'max_pair_distance_pairs', (['mol', 'None'], {}), '(mol, None)\n', (1306, 1317), False, 'from deepchem.feat.graph_features import max_pair_distance_pairs\n'), ((1425, 1452), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['"""CCCCC"""'], {}), "('CCCCC')\n", (1443, 1452), False, 'from rdkit import Chem\n'), ((1495, 1529), 'deepchem.feat.graph_features.max_pair_distance_pairs', 'max_pair_distance_pairs', (['mol', 'None'], {}), '(mol, None)\n', (1518, 1529), False, 'from deepchem.feat.graph_features import max_pair_distance_pairs\n'), ((1737, 1762), 'deepchem.feat.WeaveFeaturizer', 'dc.feat.WeaveFeaturizer', ([], {}), '()\n', (1760, 1762), True, 'import deepchem as dc\n'), ((2201, 2244), 'deepchem.feat.WeaveFeaturizer', 'dc.feat.WeaveFeaturizer', ([], {'use_chirality': '(True)'}), '(use_chirality=True)\n', (2224, 2244), True, 'import deepchem as dc\n'), ((2522, 2547), 'deepchem.feat.WeaveFeaturizer', 'dc.feat.WeaveFeaturizer', ([], {}), '()\n', (2545, 2547), True, 'import deepchem as dc\n'), ((2952, 2996), 'deepchem.feat.WeaveFeaturizer', 'dc.feat.WeaveFeaturizer', ([], {'max_pair_distance': '(1)'}), '(max_pair_distance=1)\n', (2975, 2996), True, 'import deepchem as dc\n'), ((3795, 3820), 'deepchem.feat.WeaveFeaturizer', 'dc.feat.WeaveFeaturizer', ([], {}), '()\n', (3818, 3820), True, 'import deepchem as dc\n'), ((3116, 3143), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['mols[0]'], {}), '(mols[0])\n', (3134, 3143), False, 'from rdkit import Chem\n'), ((456, 472), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (464, 472), True, 'import numpy as np\n'), ((617, 633), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (625, 633), True, 'import numpy as np\n')] |
from taskinit import *
import time
import os
import re
import json
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import partitionhelper as ph
from mpi4casa.MPICommandClient import MPICommandClient
# pieflag is released under a BSD 3-Clause License
# See LICENSE for details
# HISTORY:
# 1.0 2005 Initial version by <NAME>, designed
# for use with customized UVLIST in MIRIAD
# 1.1 Jan2006 Various upgrades by Enno Middelberg
# 2.0 31Oct2014 Release of updated and CASA-compatible version
# written by <NAME>
# 2.1 26Nov2014 Fixed subscan bug (only operate on '0') and
# logger default value printout
# 2.2 25Mar2015 Updated handling for pre-flagged baselines and
# hid unimportant runtime display messages
# 3.0 28Mar2015 Enabled parallel processing
# 3.1 10Jun2015 Added error messages for SEFD extrapolation
# and integration time rounding problem, and
# fixed default numthreads
# 3.2 19Feb2016 Fixed parallel processing bug, enabled
# operation using DATA column, and removed
# lock file deletion
# 4.0 4Aug2016 Upgraded to use MPI parallelism in CASA 4.6.0+
# 4.1 13Oct2016 Fixed license, no changes to code
# 4.2 24Oct2016 Updated code category, no changes to code
# 4.3 25Oct2016 Fixed version number (affects 4.1, 4.2)
# 4.4 26Oct2016 Removed flag_row check, CASA does not
# currently respect this column properly
#
# See additional information in pieflag function below
def pieflag_getflagstats(vis,field,spw,npol,feedbasis):
#casalog.filter('WARN')
af.open(msname=vis)
af.selectdata(field=str(field),spw=str(spw))
ag0={'mode':'summary','action':'calculate'}
af.parseagentparameters(ag0)
af.init()
temp=af.run(writeflags=False)
af.done()
#casalog.filter('INFO')
if feedbasis:
RRf=temp['report0']['correlation']['RR']['flagged']
RRt=temp['report0']['correlation']['RR']['total']
LLf=temp['report0']['correlation']['LL']['flagged']
LLt=temp['report0']['correlation']['LL']['total']
else:
RRf=temp['report0']['correlation']['XX']['flagged']
RRt=temp['report0']['correlation']['XX']['total']
LLf=temp['report0']['correlation']['YY']['flagged']
LLt=temp['report0']['correlation']['YY']['total']
TOTf=temp['report0']['flagged']
TOTt=temp['report0']['total']
flagstats=np.array([RRf,RRt,LLf,LLt,TOTf,TOTt])
if npol == 4:
if feedbasis:
RLf=temp['report0']['correlation']['RL']['flagged']
RLt=temp['report0']['correlation']['RL']['total']
LRf=temp['report0']['correlation']['LR']['flagged']
LRt=temp['report0']['correlation']['LR']['total']
else:
RLf=temp['report0']['correlation']['XY']['flagged']
RLt=temp['report0']['correlation']['XY']['total']
LRf=temp['report0']['correlation']['YX']['flagged']
LRt=temp['report0']['correlation']['YX']['total']
flagstats=np.append(flagstats,[RLf,RLt,LRf,LRt])
return flagstats
def pieflag_flag(vis,datacol,nthreads,field,
vtbleLIST,inttime,nant,
ddid,spw,refchan,nchan,npol,feedbasis,
fitorderLIST,sefdLIST,
staticflag,madmax,binsamples,
dynamicflag,chunktime,stdmax,maxoffset,
extendflag,boxtime,boxthresh):
# Go through each baseline, spw, channel, and polarization and compare to reference channel
# while accounting for a spectral fit and the SEFD.
# Perform static, dynamic, and extend operations if requested
casalog.filter('INFO')
if nthreads > 1:
threadID = MPIEnvironment.mpi_processor_rank
casalog.post(' thread '+str(threadID)+'/'+str(nthreads)+' status: 0% complete (updates delivered every 10%)')
else:
casalog.post(' status: 0% complete (updates delivered every 10%)')
vtble = np.array(vtbleLIST)
sefd = np.array(sefdLIST)
fitorder = np.array(fitorderLIST)
tb.open(vis)
temp_ant1 = tb.getcol('ANTENNA1')
temp_ant2 = tb.getcol('ANTENNA2')
tb.close()
ant1 = temp_ant1[0]
ant2 = temp_ant2[0]
# get number of baselines from unique antenna combinations
nb = np.vstack(set(map(tuple, np.transpose(np.array([temp_ant1,temp_ant2])) ))).shape[0]
nspw=len(spw)
if feedbasis:
pSTR = ['RR']
if npol == 2:
pSTR.append('LL')
elif npol == 4:
pSTR.append('RL')
pSTR.append('LR')
pSTR.append('LL')
else:
pSTR = ['XX']
if npol == 2:
pSTR.append('YY')
elif npol == 4:
pSTR.append('XY')
pSTR.append('YX')
pSTR.append('YY')
# dim0 --> npol=2: 0=RR, 1=LL
# npol=4: 0=RR, 1=RL, 2=LR, 3=LL
specfitcoeffS=np.zeros((npol,max(fitorder)+1))
# rc = reference channel
# rcx = frequency in Hz for static flagging
rcx=np.zeros(nspw)
for i in range(nspw):
rcx[i] = vtble[refchan[i]][spw[i]]
# S = static
# Srcy: dim2=(median visibility amplitude, median absolute deviation)
Srcy=np.zeros((nspw,npol,2))
if extendflag:
kernellen = int(boxtime/inttime)
#kernel = np.ones(kernellen)
tb.open(vis)
ms.open(vis,nomodify=False)
printupdate=np.ones(9).astype(bool)
printcounter=1
checkprint=True
for b in range(nb):
if checkprint:
if printupdate[printcounter-1] and b+1>nb/10*printcounter:
if nthreads > 1:
casalog.post(' thread '+str(threadID)+'/'+str(nthreads)+' status: '+str(10*printcounter)+'% complete')
else:
casalog.post(' status: '+str(10*printcounter)+'% complete')
printupdate[printcounter-1]=False
printcounter+=1
if printcounter > 9:
checkprint=False
# get reference channel median and MAD for static flagging
validspw = np.zeros((npol,nspw))
for s in range(nspw):
for p in range(npol):
tempstr1 = '([select from '+vis+' where ANTENNA1=='+str(ant1)+' && ANTENNA2=='+str(ant2)+\
' && FIELD_ID=='+str(field)+' && DATA_DESC_ID=='+str(ddid[s])+\
' && FLAG['+str(p)+','+str(refchan[s])+']==False giving '
# ' && WEIGHT['+str(p)+']>0 giving '
tempstr2 = '[abs('+datacol.upper()+'['+str(p)+','+str(refchan[s])+'])]])'
tempval = tb.calc('count'+tempstr1+tempstr2)[0]
if tempval > 0:
validspw[p][s] = 1
if staticflag:
Srcy[s][p][0] = tb.calc('median'+tempstr1+tempstr2)[0]
tempstr3 = '[abs(abs('+datacol.upper()+'['+str(p)+','+str(refchan[s])+'])-'+\
str(Srcy[s][p][0])+')]])'
Srcy[s][p][1] = tb.calc('median'+tempstr1+tempstr3)[0]
else:
# If the reference channel for any one polarization isn't present,
# flag all data on this baseline in this spw.
# You won't be able to do static or dynamic flagging (nor extend flagging as a result).
# This part of the loop shouldn't get activated much on unflagged data because the
# user should have picked a suitable reference channel in each spw.
validspw[0][s] = 0
casalog.filter('WARN')
ms.reset()
try:
ms.msselect({'field':str(field),'baseline':str(ant1)+'&&'+str(ant2),'spw':str(spw[s])})
tempflag = ms.getdata('flag')
tempflag['flag'][:]=True
ms.putdata(tempflag)
casalog.filter('INFO')
except:
# this gets triggered if the entire baseline is already flagged
casalog.filter('INFO')
break
# get static spectral fits for each polarization
if staticflag:
tempfitorderS = np.copy(fitorder)
for p in range(npol):
# check that there are enough spw's to fit the requested spectral order
if sum(validspw[p]) > 0:
if tempfitorderS[p] > sum(validspw[p])-1:
if sum(validspw[p]) == 2:
tempfitorderS[p] = 1
else:
tempfitorderS[p] = 0
casalog.post('*** WARNING: staticflag fitorder for baseline ant1='+str(ant1)+' ant2='+str(ant2)+\
' pol='+pSTR[p]+' has been reduced to '+str(int(tempfitorderS[p])),'WARN')
# use MAD to weight the points
# (not mathematically correct, should be standard error, but OK approximation)
specfitcoeffS[p,0:tempfitorderS[p]+1] = np.polyfit(np.log10(rcx[validspw[p]>0]),\
np.log10(Srcy[0:,p,0][validspw[p]>0]),\
tempfitorderS[p],w=1.0/np.log10(Srcy[0:,p,1][validspw[p]>0]))
if dynamicflag and sum(validspw[0]) > 0:
# Don't assume that the same number of integrations (dump times) are present in each spw.
# This requirement makes the code messy
casalog.filter('WARN')
ms.reset()
ms.msselect({'field':str(field),'baseline':str(ant1)+'&&'+str(ant2)})
ms.iterinit(interval=chunktime,columns=['TIME'],adddefaultsortcolumns=False)
# get number of chunks and initialize arrays
ms.iterorigin()
moretodo=True
nchunks = 0
while moretodo:
nchunks += 1
moretodo = ms.iternext()
# start and end timestamp for each chunk
timestamps = np.zeros((nchunks,2))
# D = dynamic
# dim3 (per chunk) --> 0=reference channel median, 1=reference channel standard deviation
Drcy=np.zeros((nspw,npol,nchunks,2))
validspwD = np.zeros((npol,nchunks,nspw))
ms.iterorigin()
moretodo=True
chunk = 0
while moretodo:
tempflagD = ms.getdata('flag')['flag']
tempdataD = abs(ms.getdata(datacol.lower())[datacol.lower()])
tempddidD = ms.getdata('data_desc_id')['data_desc_id']
for s in range(nspw):
for p in range(npol):
# messy...
messydata1 = tempdataD[p,refchan[s]][tempflagD[p,refchan[s]]==False]
if len(messydata1) > 0:
messyddid = tempddidD[tempflagD[p,refchan[s]]==False]
messydata2 = messydata1[messyddid==ddid[s]]
if len(messydata2) > 0:
validspwD[p,chunk,s] = 1
Drcy[s,p,chunk,0] = np.median(messydata2)
Drcy[s,p,chunk,1] = np.std(messydata2)
# Get start and end timestamps so the data can be matched up later.
# The overall timespan reported here will be equal to or greater
# than the timespan reported below when ms.getdata is run on an
# individual spw, because we need to account for the possible
# presence of some spw's with less integrations. Messy...
temptimeD = ms.getdata('time')['time']
timestamps[chunk,0] = min(temptimeD)
timestamps[chunk,1] = max(temptimeD)
chunk += 1
moretodo = ms.iternext()
# get dynamic spectral fits for each polarization
tempfitorderD = np.zeros((nchunks,len(fitorder)))
for i in range(len(fitorder)):
tempfitorderD[:,i] = fitorder[i]
# dim0 --> npol=2: 0=RR, 1=LL
# npol=4: 0=RR, 1=RL, 2=LR, 3=LL
specfitcoeffD=np.zeros((npol,nchunks,max(fitorder)+1))
ms.iterorigin()
moretodo=True
chunk = 0
while moretodo:
for p in range(npol):
# check that there are enough spw's to fit the requested spectral order
if sum(validspwD[p,chunk]) > 0:
if tempfitorderD[chunk,p] > sum(validspwD[p,chunk])-1:
if sum(validspwD[p,chunk]) == 2:
tempfitorderD[chunk,p] = 1
else:
tempfitorderD[chunk,p] = 0
# native time is MJD seconds
t1=qa.time(qa.quantity(timestamps[chunk,0],'s'),form='ymd')[0]
t2=qa.time(qa.quantity(timestamps[chunk,1],'s'),form='d')[0]
casalog.post('*** WARNING: dynamicflag fitorder for baseline ant1='+str(ant1)+' ant2='+str(ant2)+\
' pol='+pSTR[p]+' time='+t1+'-'+t2+\
' has been reduced to '+str(int(tempfitorderD[chunk,p])),'WARN')
# prevent numerical warning when MAD=0 (ie single sample)
tempDrcy = Drcy[0:,p,chunk,1][validspwD[p,chunk]>0]
tempDrcy[tempDrcy==0] = 1e-200
specfitcoeffD[p,chunk,0:tempfitorderD[chunk,p]+1] = \
np.polyfit(np.log10(rcx[validspwD[p,chunk]>0]),np.log10(Drcy[0:,p,chunk,0][validspwD[p,chunk]>0]),\
tempfitorderD[chunk,p],w=1.0/np.log10(tempDrcy))
chunk += 1
moretodo = ms.iternext()
casalog.filter('INFO')
for s in range(nspw):
if validspw[0,s] > 0:
casalog.filter('WARN')
ms.reset()
ms.msselect({'field':str(field),'baseline':str(ant1)+'&&'+str(ant2),'spw':str(spw[s])})
# get data for this spw, accounting for existing flags
tempflag = ms.getdata('flag')
tempdata = abs(ms.getdata(datacol.lower())[datacol.lower()])
tempflagpf = np.zeros(tempdata.shape)
temptime = ms.getdata('time')['time']
casalog.filter('INFO')
if staticflag:
windowtime = binsamples * inttime
window = []
casalog.filter('WARN')
ms.iterinit(interval=windowtime)
ms.iterorigin()
# get number of time steps
moretodo=True
while moretodo:
# select from dummy column with small data size, eg int 'antenna1'
# (could also have used float 'time'...)
window.append(len(ms.getdata('antenna1')['antenna1']))
moretodo = ms.iternext()
casalog.filter('INFO')
for f in range(nchan):
# this shouldn't matter, but enforce that flagging
# doesn't take place on the reference channel
if f == refchan[s]:
continue
for p in range(npol):
if tempfitorderS[p] > 0:
specfit = 10.0**(np.poly1d(specfitcoeffS[p,0:tempfitorderS[p]+1])(np.log10(vtble[f][spw[s]])))
else:
specfit = Srcy[s][p][0]
# difference to median of reference channel, accounting for spectrum and sefd
tempdatachan = np.multiply(abs((tempdata[p][f]-specfit)/sefd[s][f]),np.invert(tempflag['flag'][p][f]))
tempbad = np.zeros(tempdatachan.shape)
tempbad[tempdatachan>=Srcy[s,p,1]*madmax] = 1
tempbad[tempdatachan>=Srcy[s,p,1]*madmax*2] += 1
# iterate in units of binsamples*inttime
# flag entire window if sum of badness values >=2
# if flagging needs to take place in one polarization, just flag them all
j=0
for w in window:
if sum(tempbad[j:j+w]) >= 2:
tempflagpf[0:npol,f,j:j+w] = 1
tempflag['flag'][0:npol,f,j:j+w] = True
j+=w
if dynamicflag:
for chunk in range(nchunks):
# calculate index range that matches up with timestamps
tL = np.where(temptime==timestamps[chunk,0])[0][0]
tU = np.where(temptime==timestamps[chunk,1])[0][0]
for p in range(npol):
if validspwD[p,chunk,s] == 1:
for f in range(nchan):
# this shouldn't matter, but enforce that flagging
# doesn't take place on the reference channel
if f == refchan[s]:
continue
if tempfitorderD[chunk,p] > 0:
specfit = 10.0**(np.poly1d(specfitcoeffD[p,chunk,0:tempfitorderD[chunk,p]+1])(np.log10(vtble[f][spw[s]])))
else:
specfit = Drcy[s,p,chunk,0]
# get channel data
tempdatachan = np.multiply(tempdata[p,f,tL:tU+1],np.invert(tempflag['flag'][p,f,tL:tU+1]))
# prevent display of runtime warnings when tempdatachan is empty or all-zero
if len(tempdatachan[tempdatachan>0]) > 0:
tempstd = np.std(tempdatachan[tempdatachan>0])/sefd[s][f]
if (tempstd >= stdmax*Drcy[s,p,chunk,1]) or \
(abs(np.median(tempdatachan[tempdatachan>0])-specfit) >= maxoffset*tempstd):
# if flagging needs to take place in one polarization, just flag them all
tempflagpf[0:npol,f,tL:tU+1] = 2
tempflag['flag'][0:npol,f,tL:tU+1] = True
else:
# If the reference channel for any one polarization isn't present,
# flag all data in this chunk on this baseline in this spw.
# This part of the loop shouldn't get activated much on unflagged data because the
# user should have picked a suitable reference channel in each spw.
tempflag['flag'][0:npol,0:nchan,tL:tU+1]=True
break
if extendflag:
tempscanfull = ms.getscansummary()
tempscankeys = map(int,tempscanfull.keys())
tempscankeys.sort()
tempscan = []
for j in tempscankeys:
tempscan.append(tempscanfull[str(j)]['0']['nRow'])
# only consider flags that have been set by pieflag, not pre-existing flags
j=0
for w in tempscan:
for f in range(nchan):
if f == refchan[s]:
continue
for p in range(npol):
# convolve if kernel is smaller than scan length
# otherwise, just use fraction of flagged values in scan
if w > kernellen:
#tempcon = np.convolve(tempflag['flag'][p][f][j:j+w],kernel,'valid')
#tempcon = np.convolve(tempflagchan[j:j+w],kernel,'valid')
for k in range(w-kernellen+1):
#tempfrac = float(sum(tempflag['flag'][p][f][j+k:j+k+kernellen]))/float(kernellen)
tempfrac = float(sum(tempflagpf[p,f,j+k:j+k+kernellen]))/float(kernellen)
if tempfrac > boxthresh/100.0:
tempflag['flag'][0:npol,f,j+k:j+k+kernellen] = True
else:
#tempfrac=float(sum(tempflag['flag'][p][f][j:j+w]))/float(w)
tempfrac=float(sum(tempflagpf[p,f,j:j+w]))/float(w)
if tempfrac > boxthresh/100.0:
tempflag['flag'][0:npol,f,j:j+w] = True
j+=w
ms.putdata(tempflag)
ant2 += 1
if ant2 > nant-1:
ant1 += 1
ant2 = ant1 + 1
ms.close()
tb.close()
if nthreads > 1:
casalog.post(' thread '+str(threadID)+'/'+str(nthreads)+' status: 100% complete')
casalog.filter('WARN')
else:
casalog.post(' status: 100% complete')
return
def pieflag(vis,
field, # data selection parameters
refchanfile,
fitorder_RR_LL,
fitorder_RL_LR,
scalethresh,
SEFDfile, # scalethresh parameter
plotSEFD,
dynamicflag,
chunktime, # dynamicflag parameters
stdmax,
maxoffset,
staticflag,
madmax, # staticflag parameter
binsamples,
extendflag,
boxtime, # extendflag parameters
boxthresh):
#
# Task pieflag
# Flags bad data by comparing with clean channels in bandpass-calibrated data.
#
# Original reference: <NAME>, 2006, PASA, 23, 64
# Rewritten for use in CASA and updated to account for wideband
# and SEFD effects by <NAME> 2014.
#
# Thanks to <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, and of course Enno Middelberg
# for expert advice. Thanks to <NAME> for providing
# Jansky VLA SEFD data for L and X bands (EVLA Memos 152 and 166)
# and to Bryan Butler for providing access to all other bands
# from the Jansky VLA Exposure Calculator.
#
# Version 4.4 released 26 October 2016
# Tested with CASA 4.7.0 using Jansky VLA data
# Available at: http://github.com/chrishales/pieflag
#
# Reference for this version:
# <NAME>, <NAME>, 2014, Astrophysics Source Code Library, 1408.014
# http://adsabs.harvard.edu/abs/2014ascl.soft08014H
#
startTime = time.time()
casalog.origin('pieflag')
casalog.post('--> pieflag version 4.4')
if (not staticflag) and (not dynamicflag):
casalog.post('*** ERROR: You need to select static or dynamic flagging.', 'ERROR')
casalog.post('*** ERROR: Exiting pieflag.', 'ERROR')
return
ms.open(vis)
vis=ms.name()
ms.close()
useMPI = MPIEnvironment.is_mpi_enabled
if useMPI:
if vis.lower().endswith('.ms'):
useMPI=False
casalog.post('--> MS will be processed in serial mode.')
elif ph.axisType(vis) == 'baseline':
# client is ID 0 and will not perform parallel processing, servers start from ID 1
nthreads = MPIEnvironment.rank
subms_path = vis+'/SUBMSS/'
subms = filter(lambda x: os.path.isdir(os.path.join(subms_path, x)), os.listdir(subms_path))
if len(subms) != nthreads:
casalog.post('*** ERROR: Mismatch, MMS tailored for '+str(len(subms))+' engines but '+\
'CASA session tailored for '+str(nthreads)+' engines.', 'ERROR')
casalog.post('*** ERROR: Exiting pieflag.', 'ERROR')
return
server_list = MPIEnvironment.mpi_server_rank_list()
casalog.post('--> Initializing MPI parallel cluster with '+str(nthreads)+' engines.')
client = MPICommandClient()
client.start_services()
# do some detective work to find appropriate path to push to clients
syspaths = sys.path
n = 0
for k in range(len(syspaths)):
if os.path.isfile(syspaths[k]+'/mytasks.py'):
for line in open(syspaths[k]+'/mytasks.py','r'):
if re.search("task_location\['pieflag'\]",line):
if n==0:
n += 1
addpath = syspaths[k]
elif syspaths[k] != addpath:
n += 1
if n == 1:
casalog.filter('WARN')
#client.set_log_level('WARN')
client.push_command_request("casalog.filter('WARN')",True,server_list)
client.push_command_request("sys.path.append('"+addpath+"')",True,server_list)
client.push_command_request('from task_pieflag import pieflag_getflagstats',True,server_list)
client.push_command_request('from task_pieflag import pieflag_flag',True,server_list)
casalog.filter('INFO')
else:
if n == 0:
casalog.post('*** ERROR: pieflag mytasks.py installation not found in sys.path', 'ERROR')
else:
casalog.post('*** ERROR: Ambiguity, sys.path contains more than 1 pieflag installation', 'ERROR')
casalog.post('*** (pieflag referenced in '+str(n)+' unique path/mytasks.py)', 'ERROR')
casalog.post('*** ERROR: Exiting pieflag.', 'ERROR')
return
fcall1 = 'pieflag_getflagstats(vis,field,spw,npol,feedbasis)'
fcall2 = 'pieflag_flag(vis,datacol,nthreads,field,vtbleLIST,inttime,nant,ddid,spw,refchan,nchan,npol,'+\
'feedbasis,fitorderLIST,sefdLIST,staticflag,madmax,binsamples,dynamicflag,chunktime,stdmax,'+\
'maxoffset,extendflag,boxtime,boxthresh)'
else:
casalog.post('*** ERROR: MMS is not partitioned by baseline. Cannot process.', 'ERROR')
casalog.post('*** Use partition() to revert to MS then create baseline MMS.', 'ERROR')
casalog.post('*** ERROR: Exiting pieflag.', 'ERROR')
return
else:
if vis.lower().endswith('.mms'):
casalog.post('*** ERROR: pieflag cannot handle MMS in non-MPI-enabled CASA session.', 'ERROR')
casalog.post('*** ERROR: Exiting pieflag.', 'ERROR')
return
else:
casalog.post('--> MS will be processed in serial mode.')
tb.open(vis)
if any('CORRECTED_DATA' in colnames for colnames in tb.colnames()):
datacol='CORRECTED_DATA'
else:
datacol='DATA'
tb.close()
# load in reference channel details
# OK, there are probably more elegant ways
# of implementing the following code...meh
refchandict=json.load(open(refchanfile))
spw=[]
for i in refchandict.keys():
spw.append(int(i))
nspw=len(spw)
# json doesn't seem to load in the spw order properly
# The user might not have entered spw's in order either
# so perform sort just in case
# note: no need to perform sort on the string versions
spw.sort()
# now get reference channels in corresponding sorted order
refchan=[]
for i in range(nspw):
refchan.append(refchandict[str(spw[i])])
# open MS and select relevant data
ms.open(vis)
ms.msselect({'field':str(field)})
# get integration time
scan_summary = ms.getscansummary()
ms.close()
scan_list = []
for scan in scan_summary:
if scan_summary[scan]['0']['FieldId'] == field:
scan_list.append(int(scan))
inttime=scan_summary[str(scan_list[0])]['0']['IntegrationTime']
# get around potential floating point issues by rounding to nearest 1e-5 seconds
if inttime != round(inttime,5):
casalog.post('*** WARNING: It seems your integration time is specified to finer than 1e-5 seconds.','WARN')
casalog.post('*** pieflag will assume this is a rounding error and carry on.','WARN')
for i in range(len(scan_list)):
if round(inttime,5) != round(scan_summary[str(scan_list[i])]['0']['IntegrationTime'],5):
casalog.post('*** ERROR: Bummer, pieflag is not set up to handle '+\
'changing integration times throughout your MS.', 'ERROR')
casalog.post('*** ERROR: Exiting pieflag.','ERROR')
return
# get number of baselines
tb.open(vis+'/ANTENNA')
atble=tb.getcol('NAME')
tb.close()
nant=atble.shape[0]
nbaselines=nant*(nant-1)/2
# channel to frequency (Hz) conversion
tb.open(vis+'/SPECTRAL_WINDOW')
vtble=tb.getcol('CHAN_FREQ')
tb.close()
# vtble format is vtble[channel][spw]
# assume each spw has the same number of channels
nchan=vtble.shape[0]
# check that spw frequencies increase monotonically
spwcheck=vtble[0,0]
for s in range(1,len(vtble[0,:])):
if vtble[0,s]<spwcheck:
casalog.post("*** ERROR: Your spw's are not ordered with increasing frequency.",'ERROR')
casalog.post('*** ERROR: Consider splitting your data and restarting pieflag. Exiting','ERROR')
return
spwcheck=vtble[0,s]
# get number of polarizations, assume they don't change throughout observation
# get details from the first user-selected spw within the first scan on target field
# note: I won't assume that spw specifies data_desc_id in the main table, even
# though in most cases it probably does. Probably overkill given the lack
# of checks done elsewhere in this code...
tb.open(vis+'/DATA_DESCRIPTION')
temptb=tb.query('SPECTRAL_WINDOW_ID='+str(spw[0]))
# while here, get the data_desc_id values that pair with spw number
tempddid=tb.getcol('SPECTRAL_WINDOW_ID').tolist()
ddid=[]
for s in range(nspw):
ddid.append(tempddid.index(spw[s]))
tb.close()
polid=temptb.getcell('POLARIZATION_ID')
tb.open(vis+'/POLARIZATION')
npol=tb.getcell('NUM_CORR',polid)
poltype=tb.getcell('CORR_TYPE',polid)
tb.close()
if not (npol == 2 or npol == 4):
casalog.post('*** ERROR: Your data contains '+str(npol)+' polarization products.','ERROR')
casalog.post('*** ERROR: pieflag can only handle 2 (eg RR/LL) or 4 (eg RR/RL/LR/LL). Exiting.','ERROR')
return
# see stokes.h for details
if poltype[0] == 5:
# circular
feedbasis = 1
elif poltype[0] == 9:
#linear
feedbasis = 0
else:
casalog.post('*** ERROR: Your data uses an unsupported feed basis. Exiting','ERROR')
return
casalog.post('--> Some details about your data:')
casalog.post(' data column to process = '+datacol)
casalog.post(' integration time = '+str(inttime)+' sec')
casalog.post(' number of baselines = '+str(nbaselines))
casalog.post(' spectral windows to process = '+str(spw))
casalog.post(' number of channels per spectral window = '+str(nchan))
if feedbasis:
casalog.post(' feed basis = circular')
else:
casalog.post(' feed basis = linear')
casalog.post(' number of polarization products to process = '+str(npol))
casalog.post('--> Statistics of pre-existing flags:')
flag0 = np.zeros((nspw,2*npol+2))
for i in range(nspw):
casalog.filter('WARN')
if useMPI:
for k in range(nthreads):
param = {'vis':vis+'/SUBMSS/'+subms[k],'field':field,\
'spw':spw[i],'npol':npol,'feedbasis':feedbasis}
if k == 0:
pid = client.push_command_request(fcall1,False,None,param)
else:
pid.append((client.push_command_request(fcall1,False,None,param))[0])
presults = client.get_command_response(pid,True)
for k in range(nthreads):
flag0[i] += presults[k]['ret']
else:
flag0[i] = pieflag_getflagstats(vis,field,spw[i],npol,feedbasis)
casalog.filter('INFO')
RRs="{:.1f}".format(flag0[i][0]/flag0[i][1]*100.)
LLs="{:.1f}".format(flag0[i][2]/flag0[i][3]*100.)
TOTs="{:.1f}".format(flag0[i][4]/flag0[i][5]*100.)
if npol == 2:
if feedbasis:
outstr=' flagged data in spw='+str(spw[i])+': RR='+RRs+'% LL='+LLs+'% total='+TOTs+'%'
else:
outstr=' flagged data in spw='+str(spw[i])+': XX='+RRs+'% YY='+LLs+'% total='+TOTs+'%'
else:
RLs="{:.1f}".format(flag0[i][6]/flag0[i][7]*100.)
LRs="{:.1f}".format(flag0[i][8]/flag0[i][9]*100.)
if feedbasis:
outstr=' flagged data in spw='+str(spw[i])+': RR='+RRs+'% RL='+RLs+'% LR='+LRs+'% LL='+LLs+'% total='+TOTs+'%'
else:
outstr=' flagged data in spw='+str(spw[i])+': XX='+RRs+'% XY='+RLs+'% YX='+LRs+'% YY='+LLs+'% total='+TOTs+'%'
casalog.post(outstr)
# Check there are enough spectral windows to perform the fitting later on. If not, lower the order.
if fitorder_RR_LL > nspw-1:
if fitorder_RR_LL == 2:
if feedbasis:
casalog.post('*** WARNING: pieflag needs at least 3 spectral windows to fit for RR or LL spectral curvature.','WARN')
else:
casalog.post('*** WARNING: pieflag needs at least 3 spectral windows to fit for XX or YY spectral curvature.','WARN')
else:
if feedbasis:
casalog.post('*** WARNING: pieflag needs at least 2 spectral windows to fit for RR or LL spectral index.','WARN')
else:
casalog.post('*** WARNING: pieflag needs at least 2 spectral windows to fit for XX or YY spectral index.','WARN')
if nspw == 2:
fitorder_RR_LL=1
else:
fitorder_RR_LL=0
casalog.post('*** WARNING: fitorder_RR_LL has been reduced to '+str(int(fitorder_RR_LL))+ ' and','WARN')
casalog.post('*** may be reduced further for some baselines if the','WARN')
casalog.post('*** reference channel isn\'t available in all selected spw\'s.','WARN')
if npol == 2:
fitorder = np.zeros(2)
fitorder[0] = fitorder_RR_LL
fitorder[1] = fitorder_RR_LL
elif npol == 4:
if fitorder_RL_LR > nspw-1:
if fitorder_RL_LR == 2:
casalog.post('*** WARNING: pieflag needs at least 3 spectral windows to fit for RL or LR spectral curvature.','WARN')
else:
casalog.post('*** WARNING: pieflag needs at least 2 spectral windows to fit for RL or LR spectral index.','WARN')
if nspw == 2:
fitorder_RL_LR=1
else:
fitorder_RL_LR=0
casalog.post('*** WARNING: fitorder_RL_LR has been reduced to '+str(int(fitorder_RL_LR))+' and','WARN')
casalog.post('*** may be reduced further for some baselines if the','WARN')
casalog.post('*** reference channel isn\'t available in all selected spw\'s.','WARN')
fitorder = np.zeros(4)
fitorder[0] = fitorder_RR_LL
fitorder[1] = fitorder_RL_LR
fitorder[2] = fitorder_RL_LR
fitorder[3] = fitorder_RR_LL
if scalethresh:
# read in SEFD data and interpolate to get values at our channel frequencies
casalog.post('--> Reading in SEFD and interpolating at channel frequencies...')
sefdRAW=np.loadtxt(SEFDfile)
sefd=np.zeros((nspw,nchan))
if not np.all(np.diff(sefdRAW[:,0]) >= 0):
casalog.post('*** ERROR: Your SEFD file must be in order of increasing frequency.','ERROR')
casalog.post('*** ERROR: Exiting pieflag.','ERROR')
return
for i in range(nspw):
if (vtble[:,spw[i]].min() < sefdRAW[:,0].min()) or (vtble[:,spw[i]].max() > sefdRAW[:,0].max()):
casalog.post('*** ERROR: pieflag cannot extrapolate your SEFD.','ERROR')
casalog.post('*** ERROR: Provide new SEFD covering your entire frequency range.','ERROR')
casalog.post('*** ERROR: Exiting pieflag.','ERROR')
return
sefdINTERP = interp1d(sefdRAW[:,0],sefdRAW[:,1])
for i in range(nspw):
sefdREFCHAN = sefdINTERP(vtble[refchan[i]][spw[i]])
for j in range(nchan):
# values in each spectral window will be relative to the reference channel value
sefd[i][j] = sefdINTERP(vtble[j][spw[i]]) / sefdREFCHAN
if plotSEFD:
# clunky, but works, meh...
sefdPLOT=np.zeros((nspw*nchan,3))
k=0
for i in range(nspw):
sefdREFCHAN = sefdINTERP(vtble[refchan[i]][spw[i]])
for j in range(nchan):
sefdPLOT[k][0] = vtble[j][spw[i]]/1.0e9
sefdPLOT[k][1] = sefd[i][j] * sefdREFCHAN
sefdPLOT[k][2] = sefd[i][j]
k += 1
f, (ax1, ax2) = plt.subplots(2,sharex=True)
ax1.plot(sefdRAW[:,0]/1.0e9,sefdRAW[:,1],'b-',sefdPLOT[:,0],sefdPLOT[:,1],'r.',markersize=10)
ax2.plot([sefdRAW[0,0]/1.0e9,sefdRAW[len(sefdRAW[:,0])-1,0]/1.0e9],[1.,1.],'c-',sefdPLOT[:,0],sefdPLOT[:,2],'r.',markersize=10)
f.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)
ax1.set_title('relative sensitivity assumed across your band,\nnormalized to the reference channel in each spw')
ax1.legend(['raw input','interpolated'])
ax1.set_ylabel('SEFD (arbitrary units)')
ax2.set_xlabel('frequency (GHz)')
ax2.set_ylabel('SEFD (normalized units per spw)')
else:
sefd=np.ones((nspw,nchan))
if not staticflag:
madmax = 0
binsamples = 0
if not dynamicflag:
chunktime = 0
stdmax = 0
maxoffset = 0
if not extendflag:
boxtime = 0
boxthresh = 0
# forcibly remove all lock files
#os.system('find '+vis+' -name "*lock" -print | xargs rm')
if useMPI:
casalog.post('--> pieflag will now flag your data using '+str(nthreads)+' parallel threads.')
casalog.filter('WARN')
for k in range(nthreads):
param = {'vis':vis+'/SUBMSS/'+subms[k],'datacol':datacol,'nthreads':nthreads,'field':field,
'vtbleLIST':vtble.tolist(),'inttime':inttime,'nant':nant,
'ddid':ddid,'spw':spw,'refchan':refchan,'nchan':nchan,'npol':npol,'feedbasis':feedbasis,
'fitorderLIST':fitorder.tolist(),'sefdLIST':sefd.tolist(),
'staticflag':staticflag,'madmax':madmax,'binsamples':binsamples,
'dynamicflag':dynamicflag,'chunktime':chunktime,'stdmax':stdmax,'maxoffset':maxoffset,
'extendflag':extendflag,'boxtime':boxtime,'boxthresh':boxthresh}
if k == 0:
pid = client.push_command_request(fcall2,False,None,param)
else:
pid.append((client.push_command_request(fcall2,False,None,param))[0])
presults = client.get_command_response(pid,True)
casalog.filter('INFO')
else:
casalog.post('--> pieflag will now flag your data in serial mode.')
pieflag_flag(vis,datacol,1,field,
vtble.tolist(),inttime,nant,
ddid,spw,refchan,nchan,npol,feedbasis,
fitorder.tolist(),sefd.tolist(),
staticflag,madmax,binsamples,
dynamicflag,chunktime,stdmax,maxoffset,
extendflag,boxtime,boxthresh)
# show updated flagging statistics
casalog.post('--> Statistics of final flags (including pre-existing):')
flag1 = np.zeros((nspw,2*npol+2))
for i in range(nspw):
casalog.filter('WARN')
if useMPI:
for k in range(nthreads):
param = {'vis':vis+'/SUBMSS/'+subms[k],'field':field,\
'spw':spw[i],'npol':npol,'feedbasis':feedbasis}
if k == 0:
pid = client.push_command_request(fcall1,False,None,param)
else:
pid.append((client.push_command_request(fcall1,False,None,param))[0])
presults = client.get_command_response(pid,True)
for k in range(nthreads):
flag1[i] += presults[k]['ret']
else:
flag1[i] = pieflag_getflagstats(vis,field,spw[i],npol,feedbasis)
casalog.filter('INFO')
RRs="{:.1f}".format(flag1[i][0]/flag1[i][1]*100.)
LLs="{:.1f}".format(flag1[i][2]/flag1[i][3]*100.)
TOTs="{:.1f}".format(flag1[i][4]/flag1[i][5]*100.)
if npol == 2:
if feedbasis:
outstr=' flagged data in spw='+str(spw[i])+': RR='+RRs+'% LL='+LLs+'% total='+TOTs+'%'
else:
outstr=' flagged data in spw='+str(spw[i])+': XX='+RRs+'% YY='+LLs+'% total='+TOTs+'%'
else:
RLs="{:.1f}".format(flag1[i][6]/flag1[i][7]*100.)
LRs="{:.1f}".format(flag1[i][8]/flag1[i][9]*100.)
if feedbasis:
outstr=' flagged data in spw='+str(spw[i])+': RR='+RRs+'% RL='+RLs+'% LR='+LRs+'% LL='+LLs+'% total='+TOTs+'%'
else:
outstr=' flagged data in spw='+str(spw[i])+': XX='+RRs+'% XY='+RLs+'% YX='+LRs+'% YY='+LLs+'% total='+TOTs+'%'
casalog.post(outstr)
casalog.post('--> Statistics of pieflag flags (excluding pre-existing):')
for i in range(nspw):
RRs="{:.1f}".format((flag1[i][0]-flag0[i][0])/flag0[i][1]*100.)
LLs="{:.1f}".format((flag1[i][2]-flag0[i][2])/flag0[i][3]*100.)
TOTs="{:.1f}".format((flag1[i][4]-flag0[i][4])/flag0[i][5]*100.)
if npol == 2:
if feedbasis:
outstr=' data flagged in spw='+str(spw[i])+': RR='+RRs+'% LL='+LLs+'% total='+TOTs+'%'
else:
outstr=' data flagged in spw='+str(spw[i])+': XX='+RRs+'% YY='+LLs+'% total='+TOTs+'%'
else:
RLs="{:.1f}".format((flag1[i][6]-flag0[i][6])/flag0[i][7]*100.)
LRs="{:.1f}".format((flag1[i][8]-flag0[i][8])/flag0[i][9]*100.)
if feedbasis:
outstr=' data flagged in spw='+str(spw[i])+': RR='+RRs+'% RL='+RLs+'% LR='+LRs+'% LL='+LLs+'% total='+TOTs+'%'
else:
outstr=' data flagged in spw='+str(spw[i])+': XX='+RRs+'% XY='+RLs+'% YX='+LRs+'% YY='+LLs+'% total='+TOTs+'%'
casalog.post(outstr)
# forcibly remove all lock files
#os.system('find '+vis+' -name "*lock" -print | xargs rm')
if useMPI:
#client.set_log_level('INFO')
client.push_command_request("casalog.filter('INFO')",True,server_list)
t=time.time()-startTime
casalog.post('--> pieflag run time: '+str(int(t//3600))+' hours '+\
str(int(t%3600//60))+' minutes '+str(int(t%60))+' seconds')
| [
"numpy.log10",
"scipy.interpolate.interp1d",
"numpy.invert",
"numpy.array",
"numpy.poly1d",
"re.search",
"os.listdir",
"numpy.where",
"mpi4casa.MPICommandClient.MPICommandClient",
"numpy.diff",
"numpy.ones",
"os.path.isfile",
"numpy.std",
"time.time",
"numpy.copy",
"numpy.median",
"o... | [((2566, 2608), 'numpy.array', 'np.array', (['[RRf, RRt, LLf, LLt, TOTf, TOTt]'], {}), '([RRf, RRt, LLf, LLt, TOTf, TOTt])\n', (2574, 2608), True, 'import numpy as np\n'), ((4149, 4168), 'numpy.array', 'np.array', (['vtbleLIST'], {}), '(vtbleLIST)\n', (4157, 4168), True, 'import numpy as np\n'), ((4180, 4198), 'numpy.array', 'np.array', (['sefdLIST'], {}), '(sefdLIST)\n', (4188, 4198), True, 'import numpy as np\n'), ((4214, 4236), 'numpy.array', 'np.array', (['fitorderLIST'], {}), '(fitorderLIST)\n', (4222, 4236), True, 'import numpy as np\n'), ((5208, 5222), 'numpy.zeros', 'np.zeros', (['nspw'], {}), '(nspw)\n', (5216, 5222), True, 'import numpy as np\n'), ((5397, 5422), 'numpy.zeros', 'np.zeros', (['(nspw, npol, 2)'], {}), '((nspw, npol, 2))\n', (5405, 5422), True, 'import numpy as np\n'), ((24555, 24566), 'time.time', 'time.time', ([], {}), '()\n', (24564, 24566), False, 'import time\n'), ((33633, 33663), 'numpy.zeros', 'np.zeros', (['(nspw, 2 * npol + 2)'], {}), '((nspw, 2 * npol + 2))\n', (33641, 33663), True, 'import numpy as np\n'), ((42422, 42452), 'numpy.zeros', 'np.zeros', (['(nspw, 2 * npol + 2)'], {}), '((nspw, 2 * npol + 2))\n', (42430, 42452), True, 'import numpy as np\n'), ((3189, 3231), 'numpy.append', 'np.append', (['flagstats', '[RLf, RLt, LRf, LRt]'], {}), '(flagstats, [RLf, RLt, LRf, LRt])\n', (3198, 3231), True, 'import numpy as np\n'), ((6306, 6328), 'numpy.zeros', 'np.zeros', (['(npol, nspw)'], {}), '((npol, nspw))\n', (6314, 6328), True, 'import numpy as np\n'), ((36656, 36667), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (36664, 36667), True, 'import numpy as np\n'), ((37977, 37997), 'numpy.loadtxt', 'np.loadtxt', (['SEFDfile'], {}), '(SEFDfile)\n', (37987, 37997), True, 'import numpy as np\n'), ((38011, 38034), 'numpy.zeros', 'np.zeros', (['(nspw, nchan)'], {}), '((nspw, nchan))\n', (38019, 38034), True, 'import numpy as np\n'), ((38736, 38774), 'scipy.interpolate.interp1d', 'interp1d', (['sefdRAW[:, 0]', 'sefdRAW[:, 1]'], {}), '(sefdRAW[:, 0], sefdRAW[:, 1])\n', (38744, 38774), False, 'from scipy.interpolate import interp1d\n'), ((40337, 40359), 'numpy.ones', 'np.ones', (['(nspw, nchan)'], {}), '((nspw, nchan))\n', (40344, 40359), True, 'import numpy as np\n'), ((45557, 45568), 'time.time', 'time.time', ([], {}), '()\n', (45566, 45568), False, 'import time\n'), ((5593, 5603), 'numpy.ones', 'np.ones', (['(9)'], {}), '(9)\n', (5600, 5603), True, 'import numpy as np\n'), ((8582, 8599), 'numpy.copy', 'np.copy', (['fitorder'], {}), '(fitorder)\n', (8589, 8599), True, 'import numpy as np\n'), ((10522, 10544), 'numpy.zeros', 'np.zeros', (['(nchunks, 2)'], {}), '((nchunks, 2))\n', (10530, 10544), True, 'import numpy as np\n'), ((10702, 10736), 'numpy.zeros', 'np.zeros', (['(nspw, npol, nchunks, 2)'], {}), '((nspw, npol, nchunks, 2))\n', (10710, 10736), True, 'import numpy as np\n'), ((10771, 10802), 'numpy.zeros', 'np.zeros', (['(npol, nchunks, nspw)'], {}), '((npol, nchunks, nspw))\n', (10779, 10802), True, 'import numpy as np\n'), ((37603, 37614), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (37611, 37614), True, 'import numpy as np\n'), ((39161, 39188), 'numpy.zeros', 'np.zeros', (['(nspw * nchan, 3)'], {}), '((nspw * nchan, 3))\n', (39169, 39188), True, 'import numpy as np\n'), ((39581, 39609), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'sharex': '(True)'}), '(2, sharex=True)\n', (39593, 39609), True, 'import matplotlib.pyplot as plt\n'), ((15136, 15160), 'numpy.zeros', 'np.zeros', (['tempdata.shape'], {}), '(tempdata.shape)\n', (15144, 15160), True, 'import numpy as np\n'), ((25125, 25141), 'partitionhelper.axisType', 'ph.axisType', (['vis'], {}), '(vis)\n', (25136, 25141), True, 'import partitionhelper as ph\n'), ((25977, 25995), 'mpi4casa.MPICommandClient.MPICommandClient', 'MPICommandClient', ([], {}), '()\n', (25993, 25995), False, 'from mpi4casa.MPICommandClient import MPICommandClient\n'), ((25416, 25438), 'os.listdir', 'os.listdir', (['subms_path'], {}), '(subms_path)\n', (25426, 25438), False, 'import os\n'), ((26225, 26268), 'os.path.isfile', 'os.path.isfile', (["(syspaths[k] + '/mytasks.py')"], {}), "(syspaths[k] + '/mytasks.py')\n", (26239, 26268), False, 'import os\n'), ((38056, 38078), 'numpy.diff', 'np.diff', (['sefdRAW[:, 0]'], {}), '(sefdRAW[:, 0])\n', (38063, 38078), True, 'import numpy as np\n'), ((9504, 9534), 'numpy.log10', 'np.log10', (['rcx[validspw[p] > 0]'], {}), '(rcx[validspw[p] > 0])\n', (9512, 9534), True, 'import numpy as np\n'), ((9595, 9636), 'numpy.log10', 'np.log10', (['Srcy[0:, p, 0][validspw[p] > 0]'], {}), '(Srcy[0:, p, 0][validspw[p] > 0])\n', (9603, 9636), True, 'import numpy as np\n'), ((4508, 4540), 'numpy.array', 'np.array', (['[temp_ant1, temp_ant2]'], {}), '([temp_ant1, temp_ant2])\n', (4516, 4540), True, 'import numpy as np\n'), ((14373, 14411), 'numpy.log10', 'np.log10', (['rcx[validspwD[p, chunk] > 0]'], {}), '(rcx[validspwD[p, chunk] > 0])\n', (14381, 14411), True, 'import numpy as np\n'), ((14409, 14465), 'numpy.log10', 'np.log10', (['Drcy[0:, p, chunk, 0][validspwD[p, chunk] > 0]'], {}), '(Drcy[0:, p, chunk, 0][validspwD[p, chunk] > 0])\n', (14417, 14465), True, 'import numpy as np\n'), ((16928, 16956), 'numpy.zeros', 'np.zeros', (['tempdatachan.shape'], {}), '(tempdatachan.shape)\n', (16936, 16956), True, 'import numpy as np\n'), ((25386, 25413), 'os.path.join', 'os.path.join', (['subms_path', 'x'], {}), '(subms_path, x)\n', (25398, 25413), False, 'import os\n'), ((26364, 26411), 're.search', 're.search', (['"""task_location\\\\[\'pieflag\'\\\\]"""', 'line'], {}), '("task_location\\\\[\'pieflag\'\\\\]", line)\n', (26373, 26411), False, 'import re\n'), ((9718, 9759), 'numpy.log10', 'np.log10', (['Srcy[0:, p, 1][validspw[p] > 0]'], {}), '(Srcy[0:, p, 1][validspw[p] > 0])\n', (9726, 9759), True, 'import numpy as np\n'), ((11694, 11715), 'numpy.median', 'np.median', (['messydata2'], {}), '(messydata2)\n', (11703, 11715), True, 'import numpy as np\n'), ((11768, 11786), 'numpy.std', 'np.std', (['messydata2'], {}), '(messydata2)\n', (11774, 11786), True, 'import numpy as np\n'), ((16826, 16859), 'numpy.invert', 'np.invert', (["tempflag['flag'][p][f]"], {}), "(tempflag['flag'][p][f])\n", (16835, 16859), True, 'import numpy as np\n'), ((17944, 17986), 'numpy.where', 'np.where', (['(temptime == timestamps[chunk, 0])'], {}), '(temptime == timestamps[chunk, 0])\n', (17952, 17986), True, 'import numpy as np\n'), ((18019, 18061), 'numpy.where', 'np.where', (['(temptime == timestamps[chunk, 1])'], {}), '(temptime == timestamps[chunk, 1])\n', (18027, 18061), True, 'import numpy as np\n'), ((14517, 14535), 'numpy.log10', 'np.log10', (['tempDrcy'], {}), '(tempDrcy)\n', (14525, 14535), True, 'import numpy as np\n'), ((16427, 16478), 'numpy.poly1d', 'np.poly1d', (['specfitcoeffS[p, 0:tempfitorderS[p] + 1]'], {}), '(specfitcoeffS[p, 0:tempfitorderS[p] + 1])\n', (16436, 16478), True, 'import numpy as np\n'), ((16476, 16502), 'numpy.log10', 'np.log10', (['vtble[f][spw[s]]'], {}), '(vtble[f][spw[s]])\n', (16484, 16502), True, 'import numpy as np\n'), ((19036, 19080), 'numpy.invert', 'np.invert', (["tempflag['flag'][p, f, tL:tU + 1]"], {}), "(tempflag['flag'][p, f, tL:tU + 1])\n", (19045, 19080), True, 'import numpy as np\n'), ((19356, 19394), 'numpy.std', 'np.std', (['tempdatachan[tempdatachan > 0]'], {}), '(tempdatachan[tempdatachan > 0])\n', (19362, 19394), True, 'import numpy as np\n'), ((18659, 18724), 'numpy.poly1d', 'np.poly1d', (['specfitcoeffD[p, chunk, 0:tempfitorderD[chunk, p] + 1]'], {}), '(specfitcoeffD[p, chunk, 0:tempfitorderD[chunk, p] + 1])\n', (18668, 18724), True, 'import numpy as np\n'), ((18720, 18746), 'numpy.log10', 'np.log10', (['vtble[f][spw[s]]'], {}), '(vtble[f][spw[s]])\n', (18728, 18746), True, 'import numpy as np\n'), ((19538, 19579), 'numpy.median', 'np.median', (['tempdatachan[tempdatachan > 0]'], {}), '(tempdatachan[tempdatachan > 0])\n', (19547, 19579), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Create the TODO list which is used by the pipeline to keep track of the
targets that needs to be processed.
"""
import os
import numpy as np
import logging
import sqlite3
import h5py
import re
import itertools
import functools
import contextlib
import multiprocessing
from scipy.ndimage.morphology import distance_transform_edt
from scipy.interpolate import RectBivariateSpline
from astropy.table import Table, vstack, Column
from astropy.io import fits
from astropy.wcs import WCS
from timeit import default_timer
from .utilities import find_tpf_files, find_hdf5_files, find_catalog_files, sphere_distance
from .catalog import catalog_sqlite_search_footprint, download_catalogs
#--------------------------------------------------------------------------------------------------
def calc_cbv_area(catalog_row, settings):
"""
CBV area that a given target falls within.
Parameters:
catalog_row (dict): Target catalog entry.
settings (dict): Catalog settings.
Returns:
int: CBV area that the star falls within.
"""
# The distance from the camera centre to the corner furthest away:
camera_radius = np.sqrt( 12**2 + 12**2 )
# Distance to centre of the camera in degrees:
camera_centre_dist = sphere_distance(
catalog_row['ra'],
catalog_row['decl'],
settings['camera_centre_ra'],
settings['camera_centre_dec'])
cbv_area = settings['camera']*100 + settings['ccd']*10
if camera_centre_dist < 0.25*camera_radius:
cbv_area += 1
elif camera_centre_dist < 0.5*camera_radius:
cbv_area += 2
elif camera_centre_dist < 0.75*camera_radius:
cbv_area += 3
else:
cbv_area += 4
return cbv_area
#--------------------------------------------------------------------------------------------------
def edge_distance(row, column, aperture=None, image_shape=None):
"""
Distance to nearest edge.
Parameters:
row (ndarray): Array of row positions to calculate distance of.
column (ndarray): Array of column positions to calculate distance of.
aperture (ndarray, optional): Boolean array indicating pixels to be
considered "holes" (False) and good (True).
image_shape (tuple, optional): Shape of aperture image.
Returns:
float: Distance in pixels to the nearest edge (outer or internal).
"""
# Basic check of input:
if image_shape is None and aperture is None:
raise Exception("Please provide either aperture or image_shape.")
if image_shape is None and aperture is not None:
image_shape = aperture.shape
# Distance from position to outer edges of image:
EdgeDistOuter = np.minimum.reduce([
column+0.5,
row+0.5,
image_shape[1]-(column+0.5),
image_shape[0]-(row+0.5)
])
# If we have been provided with an aperture and it contains "holes",
# we should include the distance to these holes:
if aperture is not None and np.any(~aperture):
# TODO: This doesn't return the correct answer near internal corners.
aperture_dist = distance_transform_edt(aperture)
EdgeDistFunc = RectBivariateSpline(
np.arange(image_shape[0]),
np.arange(image_shape[1]),
np.clip(aperture_dist-0.5, 0, None),
kx=1, ky=1)
return np.minimum(EdgeDistFunc(row, column), EdgeDistOuter)
return EdgeDistOuter
#--------------------------------------------------------------------------------------------------
def _ffi_todo_wrapper(args):
return _ffi_todo(*args)
def _ffi_todo(input_folder, sector, camera, ccd):
logger = logging.getLogger(__name__)
cat_tmp = []
# See if there are any FFIs for this camera and ccd.
# We just check if an HDF5 file exist.
hdf5_file = find_hdf5_files(input_folder, sector=sector, camera=camera, ccd=ccd)
if len(hdf5_file) != 1:
raise FileNotFoundError("Could not find HDF5 file")
# Load the relevant information from the HDF5 file for this camera and ccd:
with h5py.File(hdf5_file[0], 'r') as hdf:
if isinstance(hdf['wcs'], h5py.Group):
refindx = hdf['wcs'].attrs['ref_frame']
hdr_string = hdf['wcs']['%04d' % refindx][0]
else:
hdr_string = hdf['wcs'][0]
if not isinstance(hdr_string, str): hdr_string = hdr_string.decode("utf-8") # For Python 3
wcs = WCS(header=fits.Header().fromstring(hdr_string))
offset_rows = hdf['images'].attrs.get('PIXEL_OFFSET_ROW', 0)
offset_cols = hdf['images'].attrs.get('PIXEL_OFFSET_COLUMN', 0)
image_shape = hdf['images']['0000'].shape
# Load the corresponding catalog:
catalog_file = find_catalog_files(input_folder, sector=sector, camera=camera, ccd=ccd)
if len(catalog_file) != 1:
raise FileNotFoundError("Catalog file not found: SECTOR=%s, CAMERA=%s, CCD=%s" % (sector, camera, ccd))
with contextlib.closing(sqlite3.connect(catalog_file[0])) as conn:
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
# Load the settings:
cursor.execute("SELECT * FROM settings WHERE camera=? AND ccd=? LIMIT 1;", (camera, ccd))
settings = cursor.fetchone()
# Find all the stars in the catalog brigher than a certain limit:
cursor.execute("SELECT starid,tmag,ra,decl FROM catalog WHERE tmag < 15 ORDER BY tmag;")
for row in cursor.fetchall():
logger.debug("%011d - %.3f", row['starid'], row['tmag'])
# Calculate the position of this star on the CCD using the WCS:
ra_dec = np.atleast_2d([row['ra'], row['decl']])
x, y = wcs.all_world2pix(ra_dec, 0)[0]
# Subtract the pixel offset if there is one:
x -= offset_cols
y -= offset_rows
# If the target falls outside silicon, do not add it to the todo list:
# The reason for the strange 0.5's is that pixel centers are at integers.
if x < -0.5 or y < -0.5 or x > image_shape[1]-0.5 or y > image_shape[0]-0.5:
continue
# Calculate distance from target to edge of image:
EdgeDist = edge_distance(y, x, image_shape=image_shape)
# Calculate the Cotrending Basis Vector area the star falls in:
cbv_area = calc_cbv_area(row, settings)
# The targets is on silicon, so add it to the todo list:
cat_tmp.append({
'starid': row['starid'],
'sector': sector,
'camera': camera,
'ccd': ccd,
'datasource': 'ffi',
'tmag': row['tmag'],
'cbv_area': cbv_area,
'edge_dist': EdgeDist
})
cursor.close()
# Create the TODO list as a table which we will fill with targets:
return Table(
rows=cat_tmp,
names=('starid', 'sector', 'camera', 'ccd', 'datasource', 'tmag', 'cbv_area', 'edge_dist'),
dtype=('int64', 'int32', 'int32', 'int32', 'S256', 'float32', 'int32', 'float32')
)
#--------------------------------------------------------------------------------------------------
def _tpf_todo(fname, input_folder=None, cameras=None, ccds=None,
find_secondary_targets=True, exclude=[]):
logger = logging.getLogger(__name__)
# Create the TODO list as a table which we will fill with targets:
cat_tmp = []
empty_table = Table(
names=('starid', 'sector', 'camera', 'ccd', 'datasource', 'tmag', 'cbv_area', 'edge_dist'),
dtype=('int64', 'int32', 'int32', 'int32', 'S256', 'float32', 'int32', 'float32')
)
logger.debug("Processing TPF file: '%s'", fname)
with fits.open(fname, memmap=True, mode='readonly') as hdu:
starid = hdu[0].header['TICID']
sector = hdu[0].header['SECTOR']
camera = hdu[0].header['CAMERA']
ccd = hdu[0].header['CCD']
aperture_observed_pixels = (hdu['APERTURE'].data & 1 != 0)
if (starid, sector, 'tpf') in exclude or (starid, sector, 'all') in exclude:
logger.debug("Target excluded: STARID=%d, SECTOR=%d, DATASOURCE=tpf", starid, sector)
return empty_table
if camera in cameras and ccd in ccds:
# Load the corresponding catalog:
catalog_file = find_catalog_files(input_folder, sector=sector, camera=camera, ccd=ccd)
if len(catalog_file) != 1:
raise FileNotFoundError("Catalog file not found: SECTOR=%s, CAMERA=%s, CCD=%s" % (sector, camera, ccd))
with contextlib.closing(sqlite3.connect(catalog_file[0])) as conn:
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("SELECT * FROM settings WHERE camera=? AND ccd=? LIMIT 1;", (camera, ccd))
settings = cursor.fetchone()
if settings is None:
logger.error("Settings could not be loaded for camera=%d, ccd=%d.", camera, ccd)
raise ValueError("Settings could not be loaded for camera=%d, ccd=%d." % (camera, ccd))
# Get information about star:
cursor.execute("SELECT * FROM catalog WHERE starid=? LIMIT 1;", (starid, ))
row = cursor.fetchone()
if row is None:
logger.error("Starid %d was not found in catalog (camera=%d, ccd=%d).", starid, camera, ccd)
return empty_table
# Calculate CBV area that target falls in:
cbv_area = calc_cbv_area(row, settings)
# Add the main target to the list:
cat_tmp.append({
'starid': starid,
'sector': sector,
'camera': camera,
'ccd': ccd,
'datasource': 'tpf',
'tmag': row['tmag'],
'cbv_area': cbv_area,
'edge_dist': np.NaN
})
if find_secondary_targets:
# Load all other targets in this stamp:
# Use the WCS of the stamp to find all stars that fall within
# the footprint of the stamp.
image_shape = hdu[2].shape
wcs = WCS(header=hdu[2].header)
footprint = wcs.calc_footprint(center=False)
secondary_targets = catalog_sqlite_search_footprint(cursor, footprint, constraints='starid != %d AND tmag < 15' % starid, buffer_size=2)
for row in secondary_targets:
# Calculate the position of this star on the CCD using the WCS:
ra_dec = np.atleast_2d([row['ra'], row['decl']])
x, y = wcs.all_world2pix(ra_dec, 0)[0]
# If the target falls outside silicon, do not add it to the todo list:
# The reason for the strange 0.5's is that pixel centers are at integers.
if x < -0.5 or y < -0.5 or x > image_shape[1]-0.5 or y > image_shape[0]-0.5:
continue
# Make sure that the pixel that the target falls on has actually been
# collected by the spacecraft:
if not aperture_observed_pixels[int(np.round(y)), int(np.round(x))]:
logger.debug("Secondary target rejected. Falls on non-observed pixel. (primary=%d, secondary=%d)", starid, row['starid'])
continue
# Calculate distance from target to edge of image:
EdgeDist = edge_distance(y, x, aperture=aperture_observed_pixels)
# Add this secondary target to the list:
# Note that we are storing the starid of the target
# in which target pixel file the target can be found.
logger.debug("Adding extra target: TIC %d", row['starid'])
cat_tmp.append({
'starid': row['starid'],
'sector': sector,
'camera': camera,
'ccd': ccd,
'datasource': 'tpf:' + str(starid),
'tmag': row['tmag'],
'cbv_area': cbv_area,
'edge_dist': EdgeDist
})
# Close the connection to the catalog SQLite database:
cursor.close()
else:
logger.debug("Target not on requested CAMERA and CCD")
return empty_table
# TODO: Could we avoid fixed-size strings in datasource column?
return Table(
rows=cat_tmp,
names=('starid', 'sector', 'camera', 'ccd', 'datasource', 'tmag', 'cbv_area', 'edge_dist'),
dtype=('int64', 'int32', 'int32', 'int32', 'S256', 'float32', 'int32', 'float32')
)
#--------------------------------------------------------------------------------------------------
def make_todo(input_folder=None, cameras=None, ccds=None, overwrite=False,
find_secondary_targets=True, output_file=None):
"""
Create the TODO list which is used by the pipeline to keep track of the
targets that needs to be processed.
Will create the file `todo.sqlite` in the directory.
Parameters:
input_folder (string, optional): Input folder to create TODO list for.
If ``None``, the input directory in the environment variable
``TESSPHOT_INPUT`` is used.
cameras (iterable of integers, optional): TESS camera number (1-4). If ``None``,
all cameras will be included.
ccds (iterable of integers, optional): TESS CCD number (1-4). If ``None``,
all cameras will be included.
overwrite (boolean): Overwrite existing TODO file. Default=``False``.
find_secondary_targets (boolean): Should secondary targets from TPFs be included?
Default=True.
output_file (string, optional): The file path where the output file should be saved.
If not specified, the file will be saved into the input directory.
Should only be used for testing, since the file would (proberly) otherwise end up with
a wrong file name for running with the rest of the pipeline.
Raises:
NotADirectoryError: If the specified ``input_folder`` is not an existing directory.
.. codeauthor:: <NAME> <<EMAIL>>
"""
logger = logging.getLogger(__name__)
# Check the input folder, and load the default if not provided:
if input_folder is None:
input_folder = os.environ.get('TESSPHOT_INPUT', os.path.join(os.path.dirname(__file__), 'tests', 'input'))
# Check that the given input directory is indeed a directory:
if not os.path.isdir(input_folder):
raise NotADirectoryError("The given path does not exist or is not a directory")
# Make sure cameras and ccds are iterable:
cameras = (1, 2, 3, 4) if cameras is None else (cameras, )
ccds = (1, 2, 3, 4) if ccds is None else (ccds, )
# The TODO file that we want to create. Delete it if it already exits:
if output_file is None:
todo_file = os.path.join(input_folder, 'todo.sqlite')
else:
output_file = os.path.abspath(output_file)
if not output_file.endswith('.sqlite'):
output_file = output_file + '.sqlite'
todo_file = output_file
if os.path.exists(todo_file):
if overwrite:
os.remove(todo_file)
else:
logger.info("TODO file already exists")
return
# Number of threads available for parallel processing:
threads_max = int(os.environ.get('SLURM_CPUS_PER_TASK', multiprocessing.cpu_count()))
# Load file with targets to be excluded from processing for some reason:
exclude_file = os.path.join(os.path.dirname(__file__), 'data', 'todolist-exclude.dat')
exclude = np.genfromtxt(exclude_file, usecols=(0,1,2), dtype=None, encoding='utf-8')
exclude = set([tuple(e) for e in exclude])
# Create the TODO list as a table which we will fill with targets:
cat = Table(
names=('starid', 'sector', 'camera', 'ccd', 'datasource', 'tmag', 'cbv_area', 'edge_dist'),
dtype=('int64', 'int32', 'int32', 'int32', 'S256', 'float32', 'int32', 'float32')
)
sectors = set()
# Load list of all Target Pixel files in the directory:
tpf_files = find_tpf_files(input_folder)
logger.info("Number of TPF files: %d", len(tpf_files))
# TODO: Could we change this so we dont have to parse the filename?
regex_tpf = re.compile(r'-s(\d+)[-_]')
for fname in tpf_files:
m = regex_tpf.search(os.path.basename(fname))
sectors.add(int(m.group(1)))
# Find list of all HDF5 files:
hdf_files = find_hdf5_files(input_folder, camera=cameras, ccd=ccds)
logger.info("Number of HDF5 files: %d", len(hdf_files))
# TODO: Could we change this so we dont have to parse the filename?
hdf_inputs = []
regex_hdf = re.compile(r'^sector(\d+)_camera(\d)_ccd(\d)\.hdf5$')
for fname in hdf_files:
m = regex_hdf.match(os.path.basename(fname))
sectors.add(int(m.group(1)))
hdf_inputs.append( (input_folder, int(m.group(1)), int(m.group(2)), int(m.group(3))) )
# Make sure that catalog files are available in the input directory.
# If they are not already, they will be downloaded from the cache:
for sector, camera, ccd in itertools.product(sectors, cameras, ccds):
download_catalogs(input_folder, sector, camera=camera, ccd=ccd)
# Add the target pixel files to the TODO list:
if len(tpf_files) > 0:
# Open a pool of workers:
logger.info("Starting pool of workers for TPFs...")
threads = min(threads_max, len(tpf_files)) # No reason to use more than the number of jobs
logger.info("Using %d processes.", threads)
if threads > 1:
pool = multiprocessing.Pool(threads)
m = pool.imap_unordered
else:
m = map
# Run the TPF files in parallel:
tic = default_timer()
_tpf_todo_wrapper = functools.partial(_tpf_todo,
input_folder=input_folder,
cameras=cameras,
ccds=ccds,
find_secondary_targets=find_secondary_targets,
exclude=exclude)
for cat2 in m(_tpf_todo_wrapper, tpf_files):
cat = vstack([cat, cat2], join_type='exact')
if threads > 1:
pool.close()
pool.join()
# Amount of time it took to process TPF files:
toc = default_timer()
logger.info("Elaspsed time: %f seconds (%f per file)", toc-tic, (toc-tic)/len(tpf_files))
# Remove secondary TPF targets if they are also the primary target:
indx_remove = np.zeros(len(cat), dtype='bool')
cat.add_index('starid')
for k, row in enumerate(cat):
if row['datasource'].startswith('tpf:'):
indx = cat.loc['starid', row['starid']]['datasource'] == 'tpf'
if np.any(indx):
indx_remove[k] = True
cat.remove_indices('starid')
logger.info("Removing %d secondary TPF files as they are also primary", np.sum(indx_remove))
cat = cat[~indx_remove]
if len(hdf_files) > 0:
# Open a pool of workers:
logger.info("Starting pool of workers for FFIs...")
threads = min(threads_max, len(hdf_inputs)) # No reason to use more than the number of jobs
logger.info("Using %d processes.", threads)
if threads > 1:
pool = multiprocessing.Pool(threads)
m = pool.imap_unordered
else:
m = map
tic = default_timer()
ccds_done = 0
for cat2 in m(_ffi_todo_wrapper, hdf_inputs):
cat = vstack([cat, cat2], join_type='exact')
ccds_done += 1
logger.info("CCDs done: %d/%d", ccds_done, len(hdf_inputs))
# Amount of time it took to process TPF files:
toc = default_timer()
logger.info("Elaspsed time: %f seconds (%f per file)", toc-tic, (toc-tic)/len(hdf_inputs))
if threads > 1:
pool.close()
pool.join()
# Check if any targets were found:
if len(cat) == 0:
logger.error("No targets found")
return
# Remove duplicates!
logger.info("Removing duplicate entries...")
_, idx = np.unique(cat[('starid', 'sector', 'camera', 'ccd', 'datasource')], return_index=True, axis=0)
cat = cat[np.sort(idx)]
# If the target is present in more than one TPF file, pick the one
# where the target is the furthest from the edge of the image
# and discard the target in all the other TPFs:
if find_secondary_targets:
# Add an index column to the table for later use:
cat.add_column(Column(name='priority', data=np.arange(len(cat))))
# Create index that will only find secondary targets:
indx = [row['datasource'].strip().startswith('tpf:') for row in cat]
# Group the table on the starids and find groups with more than 1 target:
# Equivalent to the SQL code "GROUP BY starid HAVING COUNT(*) > 1"
remove_indx = []
for g in cat[indx].group_by('starid').groups:
if len(g) > 1:
# Find the target farthest from the edge and mark the rest
# for removal:
logger.debug(g)
im = np.argmax(g['edge_dist'])
ir = np.ones(len(g), dtype='bool')
ir[im] = False
remove_indx += list(g[ir]['priority'])
# Remove the list of duplicate secondary targets:
logger.info("Removing %d secondary targets as duplicates.", len(remove_indx))
logger.debug(remove_indx)
cat.remove_rows(remove_indx)
# Exclude targets from exclude list:
# Add an index and use that to search for starid, and then further check sector and datasource:
cat.add_index('starid')
remove_indx = []
for ex in exclude:
try:
indx = np.atleast_1d(cat.loc_indices['starid', ex[0]])
except KeyError:
indx = []
for i in indx:
if cat[i]['sector'] == ex[1] and cat[i]['datasource'] == ex[2]:
remove_indx.append(i)
if remove_indx:
del cat[remove_indx]
cat.remove_indices('starid')
# Load file with specific method settings and create lookup-table of them:
methods_file = os.path.join(os.path.dirname(__file__), 'data', 'todolist-methods.dat')
methods_file = np.genfromtxt(methods_file, usecols=(0,1,2,3), dtype=None, encoding='utf-8')
methods = {}
for m in methods_file:
methods[(m[0], m[1], m[2])] = m[3].strip().lower()
# Sort the final list:
cat.sort('tmag')
# Write the TODO list to the SQLite database file:
logger.info("Writing TODO file...")
with contextlib.closing(sqlite3.connect(todo_file)) as conn:
cursor = conn.cursor()
# Change settings of SQLite file:
cursor.execute("PRAGMA page_size=4096;")
cursor.execute("PRAGMA foreign_keys=ON;")
cursor.execute("PRAGMA locking_mode=EXCLUSIVE;")
cursor.execute("PRAGMA journal_mode=TRUNCATE;")
# Create TODO-list table:
cursor.execute("""CREATE TABLE todolist (
priority INTEGER PRIMARY KEY ASC NOT NULL,
starid INTEGER NOT NULL,
sector INTEGER NOT NULL,
datasource TEXT NOT NULL DEFAULT 'ffi',
camera INTEGER NOT NULL,
ccd INTEGER NOT NULL,
method TEXT DEFAULT NULL,
tmag REAL,
status INTEGER DEFAULT NULL,
cbv_area INTEGER NOT NULL
);""")
for pri, row in enumerate(cat):
# Find if there is a specific method defined for this target:
method = methods.get((int(row['starid']), int(row['sector']), row['datasource'].strip()), None)
# For very bright stars, we might as well just use Halo photometry right away:
if method is None and row['tmag'] <= 2.0 and row['datasource'] == 'ffi':
method = 'halo'
# Add target to TODO-list:
cursor.execute("INSERT INTO todolist (priority,starid,sector,camera,ccd,datasource,tmag,cbv_area,method) VALUES (?,?,?,?,?,?,?,?,?);", (
pri+1,
int(row['starid']),
int(row['sector']),
int(row['camera']),
int(row['ccd']),
row['datasource'].strip(),
float(row['tmag']),
int(row['cbv_area']),
method
))
conn.commit()
cursor.execute("CREATE UNIQUE INDEX unique_target_idx ON todolist (starid, datasource, sector, camera, ccd);")
cursor.execute("CREATE INDEX status_idx ON todolist (status);")
cursor.execute("CREATE INDEX starid_idx ON todolist (starid);")
conn.commit()
# Analyze the tables for better query planning:
cursor.execute("ANALYZE;")
conn.commit()
# Run a VACUUM of the table which will force a recreation of the
# underlying "pages" of the file.
# Please note that we are changing the "isolation_level" of the connection here,
# but since we closing the conmnection just after, we are not changing it back
conn.isolation_level = None
cursor.execute("VACUUM;")
# Close connection:
cursor.close()
logger.info("TODO done.")
| [
"logging.getLogger",
"numpy.clip",
"numpy.sqrt",
"astropy.table.Table",
"re.compile",
"multiprocessing.cpu_count",
"astropy.io.fits.open",
"astropy.table.vstack",
"numpy.genfromtxt",
"numpy.arange",
"os.remove",
"os.path.exists",
"numpy.atleast_2d",
"numpy.sort",
"itertools.product",
"... | [((1162, 1188), 'numpy.sqrt', 'np.sqrt', (['(12 ** 2 + 12 ** 2)'], {}), '(12 ** 2 + 12 ** 2)\n', (1169, 1188), True, 'import numpy as np\n'), ((2569, 2680), 'numpy.minimum.reduce', 'np.minimum.reduce', (['[column + 0.5, row + 0.5, image_shape[1] - (column + 0.5), image_shape[0] -\n (row + 0.5)]'], {}), '([column + 0.5, row + 0.5, image_shape[1] - (column + 0.5),\n image_shape[0] - (row + 0.5)])\n', (2586, 2680), True, 'import numpy as np\n'), ((3424, 3451), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3441, 3451), False, 'import logging\n'), ((6215, 6417), 'astropy.table.Table', 'Table', ([], {'rows': 'cat_tmp', 'names': "('starid', 'sector', 'camera', 'ccd', 'datasource', 'tmag', 'cbv_area',\n 'edge_dist')", 'dtype': "('int64', 'int32', 'int32', 'int32', 'S256', 'float32', 'int32', 'float32')"}), "(rows=cat_tmp, names=('starid', 'sector', 'camera', 'ccd',\n 'datasource', 'tmag', 'cbv_area', 'edge_dist'), dtype=('int64', 'int32',\n 'int32', 'int32', 'S256', 'float32', 'int32', 'float32'))\n", (6220, 6417), False, 'from astropy.table import Table, vstack, Column\n'), ((6639, 6666), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (6656, 6666), False, 'import logging\n'), ((6765, 6953), 'astropy.table.Table', 'Table', ([], {'names': "('starid', 'sector', 'camera', 'ccd', 'datasource', 'tmag', 'cbv_area',\n 'edge_dist')", 'dtype': "('int64', 'int32', 'int32', 'int32', 'S256', 'float32', 'int32', 'float32')"}), "(names=('starid', 'sector', 'camera', 'ccd', 'datasource', 'tmag',\n 'cbv_area', 'edge_dist'), dtype=('int64', 'int32', 'int32', 'int32',\n 'S256', 'float32', 'int32', 'float32'))\n", (6770, 6953), False, 'from astropy.table import Table, vstack, Column\n'), ((10957, 11159), 'astropy.table.Table', 'Table', ([], {'rows': 'cat_tmp', 'names': "('starid', 'sector', 'camera', 'ccd', 'datasource', 'tmag', 'cbv_area',\n 'edge_dist')", 'dtype': "('int64', 'int32', 'int32', 'int32', 'S256', 'float32', 'int32', 'float32')"}), "(rows=cat_tmp, names=('starid', 'sector', 'camera', 'ccd',\n 'datasource', 'tmag', 'cbv_area', 'edge_dist'), dtype=('int64', 'int32',\n 'int32', 'int32', 'S256', 'float32', 'int32', 'float32'))\n", (10962, 11159), False, 'from astropy.table import Table, vstack, Column\n'), ((12595, 12622), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (12612, 12622), False, 'import logging\n'), ((13483, 13508), 'os.path.exists', 'os.path.exists', (['todo_file'], {}), '(todo_file)\n', (13497, 13508), False, 'import os\n'), ((13929, 14005), 'numpy.genfromtxt', 'np.genfromtxt', (['exclude_file'], {'usecols': '(0, 1, 2)', 'dtype': 'None', 'encoding': '"""utf-8"""'}), "(exclude_file, usecols=(0, 1, 2), dtype=None, encoding='utf-8')\n", (13942, 14005), True, 'import numpy as np\n'), ((14124, 14312), 'astropy.table.Table', 'Table', ([], {'names': "('starid', 'sector', 'camera', 'ccd', 'datasource', 'tmag', 'cbv_area',\n 'edge_dist')", 'dtype': "('int64', 'int32', 'int32', 'int32', 'S256', 'float32', 'int32', 'float32')"}), "(names=('starid', 'sector', 'camera', 'ccd', 'datasource', 'tmag',\n 'cbv_area', 'edge_dist'), dtype=('int64', 'int32', 'int32', 'int32',\n 'S256', 'float32', 'int32', 'float32'))\n", (14129, 14312), False, 'from astropy.table import Table, vstack, Column\n'), ((14568, 14594), 're.compile', 're.compile', (['"""-s(\\\\d+)[-_]"""'], {}), "('-s(\\\\d+)[-_]')\n", (14578, 14594), False, 'import re\n'), ((14958, 15014), 're.compile', 're.compile', (['"""^sector(\\\\d+)_camera(\\\\d)_ccd(\\\\d)\\\\.hdf5$"""'], {}), "('^sector(\\\\d+)_camera(\\\\d)_ccd(\\\\d)\\\\.hdf5$')\n", (14968, 15014), False, 'import re\n'), ((15371, 15412), 'itertools.product', 'itertools.product', (['sectors', 'cameras', 'ccds'], {}), '(sectors, cameras, ccds)\n', (15388, 15412), False, 'import itertools\n'), ((17891, 17987), 'numpy.unique', 'np.unique', (["cat['starid', 'sector', 'camera', 'ccd', 'datasource']"], {'return_index': '(True)', 'axis': '(0)'}), "(cat['starid', 'sector', 'camera', 'ccd', 'datasource'],\n return_index=True, axis=0)\n", (17900, 17987), True, 'import numpy as np\n'), ((19786, 19865), 'numpy.genfromtxt', 'np.genfromtxt', (['methods_file'], {'usecols': '(0, 1, 2, 3)', 'dtype': 'None', 'encoding': '"""utf-8"""'}), "(methods_file, usecols=(0, 1, 2, 3), dtype=None, encoding='utf-8')\n", (19799, 19865), True, 'import numpy as np\n'), ((2826, 2843), 'numpy.any', 'np.any', (['(~aperture)'], {}), '(~aperture)\n', (2832, 2843), True, 'import numpy as np\n'), ((2935, 2967), 'scipy.ndimage.morphology.distance_transform_edt', 'distance_transform_edt', (['aperture'], {}), '(aperture)\n', (2957, 2967), False, 'from scipy.ndimage.morphology import distance_transform_edt\n'), ((3807, 3835), 'h5py.File', 'h5py.File', (['hdf5_file[0]', '"""r"""'], {}), "(hdf5_file[0], 'r')\n", (3816, 3835), False, 'import h5py\n'), ((7010, 7056), 'astropy.io.fits.open', 'fits.open', (['fname'], {'memmap': '(True)', 'mode': '"""readonly"""'}), "(fname, memmap=True, mode='readonly')\n", (7019, 7056), False, 'from astropy.io import fits\n'), ((12896, 12923), 'os.path.isdir', 'os.path.isdir', (['input_folder'], {}), '(input_folder)\n', (12909, 12923), False, 'import os\n'), ((13275, 13316), 'os.path.join', 'os.path.join', (['input_folder', '"""todo.sqlite"""'], {}), "(input_folder, 'todo.sqlite')\n", (13287, 13316), False, 'import os\n'), ((13340, 13368), 'os.path.abspath', 'os.path.abspath', (['output_file'], {}), '(output_file)\n', (13355, 13368), False, 'import os\n'), ((13859, 13884), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (13874, 13884), False, 'import os\n'), ((15923, 15938), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (15936, 15938), False, 'from timeit import default_timer\n'), ((15961, 16112), 'functools.partial', 'functools.partial', (['_tpf_todo'], {'input_folder': 'input_folder', 'cameras': 'cameras', 'ccds': 'ccds', 'find_secondary_targets': 'find_secondary_targets', 'exclude': 'exclude'}), '(_tpf_todo, input_folder=input_folder, cameras=cameras,\n ccds=ccds, find_secondary_targets=find_secondary_targets, exclude=exclude)\n', (15978, 16112), False, 'import functools\n'), ((16328, 16343), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (16341, 16343), False, 'from timeit import default_timer\n'), ((17286, 17301), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (17299, 17301), False, 'from timeit import default_timer\n'), ((17553, 17568), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (17566, 17568), False, 'from timeit import default_timer\n'), ((17997, 18009), 'numpy.sort', 'np.sort', (['idx'], {}), '(idx)\n', (18004, 18009), True, 'import numpy as np\n'), ((19711, 19736), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (19726, 19736), False, 'import os\n'), ((3009, 3034), 'numpy.arange', 'np.arange', (['image_shape[0]'], {}), '(image_shape[0])\n', (3018, 3034), True, 'import numpy as np\n'), ((3039, 3064), 'numpy.arange', 'np.arange', (['image_shape[1]'], {}), '(image_shape[1])\n', (3048, 3064), True, 'import numpy as np\n'), ((3069, 3106), 'numpy.clip', 'np.clip', (['(aperture_dist - 0.5)', '(0)', 'None'], {}), '(aperture_dist - 0.5, 0, None)\n', (3076, 3106), True, 'import numpy as np\n'), ((4621, 4653), 'sqlite3.connect', 'sqlite3.connect', (['catalog_file[0]'], {}), '(catalog_file[0])\n', (4636, 4653), False, 'import sqlite3\n'), ((5201, 5240), 'numpy.atleast_2d', 'np.atleast_2d', (["[row['ra'], row['decl']]"], {}), "([row['ra'], row['decl']])\n", (5214, 5240), True, 'import numpy as np\n'), ((13529, 13549), 'os.remove', 'os.remove', (['todo_file'], {}), '(todo_file)\n', (13538, 13549), False, 'import os\n'), ((13725, 13752), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (13750, 13752), False, 'import multiprocessing\n'), ((14643, 14666), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (14659, 14666), False, 'import os\n'), ((15059, 15082), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (15075, 15082), False, 'import os\n'), ((15803, 15832), 'multiprocessing.Pool', 'multiprocessing.Pool', (['threads'], {}), '(threads)\n', (15823, 15832), False, 'import multiprocessing\n'), ((16181, 16219), 'astropy.table.vstack', 'vstack', (['[cat, cat2]'], {'join_type': '"""exact"""'}), "([cat, cat2], join_type='exact')\n", (16187, 16219), False, 'from astropy.table import Table, vstack, Column\n'), ((16878, 16897), 'numpy.sum', 'np.sum', (['indx_remove'], {}), '(indx_remove)\n', (16884, 16897), True, 'import numpy as np\n'), ((17201, 17230), 'multiprocessing.Pool', 'multiprocessing.Pool', (['threads'], {}), '(threads)\n', (17221, 17230), False, 'import multiprocessing\n'), ((17375, 17413), 'astropy.table.vstack', 'vstack', (['[cat, cat2]'], {'join_type': '"""exact"""'}), "([cat, cat2], join_type='exact')\n", (17381, 17413), False, 'from astropy.table import Table, vstack, Column\n'), ((19345, 19392), 'numpy.atleast_1d', 'np.atleast_1d', (["cat.loc_indices['starid', ex[0]]"], {}), "(cat.loc_indices['starid', ex[0]])\n", (19358, 19392), True, 'import numpy as np\n'), ((20112, 20138), 'sqlite3.connect', 'sqlite3.connect', (['todo_file'], {}), '(todo_file)\n', (20127, 20138), False, 'import sqlite3\n'), ((12778, 12803), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (12793, 12803), False, 'import os\n'), ((16732, 16744), 'numpy.any', 'np.any', (['indx'], {}), '(indx)\n', (16738, 16744), True, 'import numpy as np\n'), ((18810, 18835), 'numpy.argmax', 'np.argmax', (["g['edge_dist']"], {}), "(g['edge_dist'])\n", (18819, 18835), True, 'import numpy as np\n'), ((7784, 7816), 'sqlite3.connect', 'sqlite3.connect', (['catalog_file[0]'], {}), '(catalog_file[0])\n', (7799, 7816), False, 'import sqlite3\n'), ((9078, 9103), 'astropy.wcs.WCS', 'WCS', ([], {'header': 'hdu[2].header'}), '(header=hdu[2].header)\n', (9081, 9103), False, 'from astropy.wcs import WCS\n'), ((4126, 4139), 'astropy.io.fits.Header', 'fits.Header', ([], {}), '()\n', (4137, 4139), False, 'from astropy.io import fits\n'), ((9417, 9456), 'numpy.atleast_2d', 'np.atleast_2d', (["[row['ra'], row['decl']]"], {}), "([row['ra'], row['decl']])\n", (9430, 9456), True, 'import numpy as np\n'), ((9915, 9926), 'numpy.round', 'np.round', (['y'], {}), '(y)\n', (9923, 9926), True, 'import numpy as np\n'), ((9933, 9944), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (9941, 9944), True, 'import numpy as np\n')] |
import inspect
import logging
import warnings
import numpy as np
import astropy.units as u
from spectral_cube import SpectralCube
from . import scDerivativeRoutines as scdr
warnings.filterwarnings("ignore")
def _nicestr(quantity):
if quantity.value == int(quantity.value):
return(str(int(quantity.value))+' '+str(quantity.unit))
else:
return(str(quantity))
def _func_and_kwargs_for_moment(moment_tag=None):
"""
Return function name and defalt kwargs for a moment tag.
"""
func = None
kwargs = None
if moment_tag is None:
return(func,kwargs)
if moment_tag == 'mom0':
func = scdr.write_moment0
kwargs ={'unit': u.K * u.km / u.s}
elif moment_tag == 'mom1':
func = scdr.write_moment1
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'mom2':
func = scdr.write_moment2
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'ew':
func = scdr.write_ew
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'vquad':
func = scdr.write_vquad
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'vpeak':
func = scdr.write_vmax
kwargs = {'unit': u.km / u.s}
elif moment_tag == 'tpeak':
func = scdr.write_tmax
kwargs = {'unit': u.K}
elif moment_tag == 'mom1wprior':
func = scdr.write_moment1_hybrid
kwargs = {'unit': u.km / u.s}
return(func, kwargs)
def moment_tag_known(moment_tag=None):
"""
Test whether the programs know about a moment tag.
"""
func, kwargs = _func_and_kwargs_for_moment(moment_tag)
if func is None:
return(False)
return(True)
def moment_generator(
cubein, mask=None, noise=None,
moment=None, momkwargs=None,
outfile=None, errorfile=None,
channel_correlation=None,
context=None, assignkunits=False):
"""
Generate one moment map from input cube, noise, and masks.
"""
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Set up the call
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Get the relevant function and keyword arguments for this moment
func, kwargs = _func_and_kwargs_for_moment(moment)
if func is None:
logging.error("Moment tag not recognized: "+str(moment))
raise NotImplementedError
return(None)
# Add any user-supplied kwargs to the dictionary
if momkwargs is not None:
if type(momkwargs) != type({}):
logging.error("Type of momkwargs should be dictionary.")
raise NotImplementedError
for this_kwarg in momkwargs:
kwargs[this_kwarg] = momkwargs[this_kwarg]
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Read in the data
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Read in the cube (if needed)
if type(cubein) is str:
cube = SpectralCube.read(cubein)
elif type(cubein) is SpectralCube:
cube = cubein
else:
logging.error('Unrecognized input type for cubein')
raise NotImplementedError
cube.allow_huge_operations = True
# Force Kelvin. We will be unit agnostic later.
cube = cube.to(u.K)
# Attach a mask if needed
if mask is not None:
if type(mask) is str:
mask = SpectralCube.read(mask)
elif type(mask) is SpectralCube:
mask = mask
else:
logging.error('Unrecognized input type for mask')
raise NotImplementedError
# Ensure the mask is booleans and attach it to the cube. This
# just assumes a match in astrometry. Could add reprojection
# here or (better) build a masking routine to apply masks with
# arbitrary astrometry.
mask = np.array(mask.filled_data[:].value, dtype=np.bool)
cube = cube.with_mask(mask, inherit_mask=False)
# Read in the noise (if present)
if noise is not None:
if type(noise) is str:
noisecube = SpectralCube.read(noise)
elif type(noise) is SpectralCube:
noisecube = noise
else:
logging.error('Unrecognized input type for noise.')
raise NotImplementedError
noisecube.allow_huge_operations = True
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Call the moment generation
# &%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Probably not needed anymore
theseargs = (inspect.getfullargspec(func)).args
if 'context' in theseargs:
moment_map, error_map = func(
cube, rms=noisecube,
outfile=outfile, errorfile=errorfile,
channel_correlation=channel_correlation,
#context=context,
**kwargs)
else:
moment_map, error_map = func(
cube, rms=noisecube,
outfile=outfile, errorfile=errorfile,
channel_correlation=channel_correlation,
**kwargs)
return(moment_map, error_map)
| [
"spectral_cube.SpectralCube.read",
"inspect.getfullargspec",
"numpy.array",
"logging.error",
"warnings.filterwarnings"
] | [((176, 209), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (199, 209), False, 'import warnings\n'), ((2943, 2968), 'spectral_cube.SpectralCube.read', 'SpectralCube.read', (['cubein'], {}), '(cubein)\n', (2960, 2968), False, 'from spectral_cube import SpectralCube\n'), ((3829, 3879), 'numpy.array', 'np.array', (['mask.filled_data[:].value'], {'dtype': 'np.bool'}), '(mask.filled_data[:].value, dtype=np.bool)\n', (3837, 3879), True, 'import numpy as np\n'), ((4535, 4563), 'inspect.getfullargspec', 'inspect.getfullargspec', (['func'], {}), '(func)\n', (4557, 4563), False, 'import inspect\n'), ((2527, 2583), 'logging.error', 'logging.error', (['"""Type of momkwargs should be dictionary."""'], {}), "('Type of momkwargs should be dictionary.')\n", (2540, 2583), False, 'import logging\n'), ((3048, 3099), 'logging.error', 'logging.error', (['"""Unrecognized input type for cubein"""'], {}), "('Unrecognized input type for cubein')\n", (3061, 3099), False, 'import logging\n'), ((3367, 3390), 'spectral_cube.SpectralCube.read', 'SpectralCube.read', (['mask'], {}), '(mask)\n', (3384, 3390), False, 'from spectral_cube import SpectralCube\n'), ((4063, 4087), 'spectral_cube.SpectralCube.read', 'SpectralCube.read', (['noise'], {}), '(noise)\n', (4080, 4087), False, 'from spectral_cube import SpectralCube\n'), ((3482, 3531), 'logging.error', 'logging.error', (['"""Unrecognized input type for mask"""'], {}), "('Unrecognized input type for mask')\n", (3495, 3531), False, 'import logging\n'), ((4186, 4237), 'logging.error', 'logging.error', (['"""Unrecognized input type for noise."""'], {}), "('Unrecognized input type for noise.')\n", (4199, 4237), False, 'import logging\n')] |
import numpy as np
from sklearn.cluster import KMeans, AgglomerativeClustering
from netaddr import IPAddress, IPNetwork
def agglomerative(ipaddr: [IPAddress], max_n_clusters):
hie = AgglomerativeClustering(n_clusters=max_n_clusters).fit(np.array([[int(_)] for _ in ipaddr]))
cidrs = []
for c in range(max_n_clusters):
members = np.array(ipaddr)[hie.labels_ == c]
xor = int(min(members)) ^ int(max(members))
xor = f"{xor:032b}"
plen = 32 if xor.find('1') == -1 else xor.find('1')
cidrs.append(IPNetwork(f"{max(members)}/{plen}").cidr)
cidrs = sorted(cidrs)
aggr = []
for ip in ipaddr:
a = [_ for _ in cidrs if ip in _]
assert len(a) >= 1
aggr.append(a[0])
aggr = sorted(list(set(aggr)))
return aggr
def kmeans(ipaddr: [IPAddress], max_n_clusters):
hie = KMeans(n_clusters=max_n_clusters).fit(np.array([[int(_)] for _ in ipaddr]))
cidrs = []
for c in range(max_n_clusters):
members = np.array(ipaddr)[hie.labels_ == c]
xor = int(min(members)) ^ int(max(members))
xor = f"{xor:032b}"
plen = 32 if xor.find('1') == -1 else xor.find('1')
cidrs.append(IPNetwork(f"{max(members)}/{plen}").cidr)
cidrs = sorted(cidrs)
aggr = []
for ip in ipaddr:
a = [_ for _ in cidrs if ip in _]
assert len(a) >= 1
aggr.append(a[0])
aggr = sorted(list(set(aggr)))
return aggr
| [
"sklearn.cluster.KMeans",
"numpy.array",
"sklearn.cluster.AgglomerativeClustering"
] | [((188, 238), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': 'max_n_clusters'}), '(n_clusters=max_n_clusters)\n', (211, 238), False, 'from sklearn.cluster import KMeans, AgglomerativeClustering\n'), ((350, 366), 'numpy.array', 'np.array', (['ipaddr'], {}), '(ipaddr)\n', (358, 366), True, 'import numpy as np\n'), ((859, 892), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'max_n_clusters'}), '(n_clusters=max_n_clusters)\n', (865, 892), False, 'from sklearn.cluster import KMeans, AgglomerativeClustering\n'), ((1004, 1020), 'numpy.array', 'np.array', (['ipaddr'], {}), '(ipaddr)\n', (1012, 1020), True, 'import numpy as np\n')] |
"""
Tests for SimpleFFCEngine
"""
from __future__ import division
from unittest import TestCase
from itertools import product
from numpy import (
full,
isnan,
nan,
)
from numpy.testing import assert_array_equal
from pandas import (
DataFrame,
date_range,
Int64Index,
MultiIndex,
rolling_mean,
Series,
Timestamp,
)
from pandas.util.testing import assert_frame_equal
from testfixtures import TempDirectory
from zipline.data.equities import USEquityPricing
from zipline.data.ffc.synthetic import (
ConstantLoader,
MultiColumnLoader,
NullAdjustmentReader,
SyntheticDailyBarWriter,
)
from zipline.data.ffc.frame import (
DataFrameFFCLoader,
MULTIPLY,
)
from zipline.data.ffc.loaders.us_equity_pricing import (
BcolzDailyBarReader,
USEquityPricingLoader,
)
from zipline.finance.trading import TradingEnvironment
from zipline.modelling.engine import SimpleFFCEngine
from zipline.modelling.factor import TestingFactor
from zipline.modelling.factor.technical import (
MaxDrawdown,
SimpleMovingAverage,
)
from zipline.utils.lazyval import lazyval
from zipline.utils.test_utils import (
make_rotating_asset_info,
make_simple_asset_info,
product_upper_triangle,
check_arrays,
)
class RollingSumDifference(TestingFactor):
window_length = 3
inputs = [USEquityPricing.open, USEquityPricing.close]
def from_windows(self, open, close):
return (open - close).sum(axis=0)
def assert_product(case, index, *levels):
"""Assert that a MultiIndex contains the product of `*levels`."""
case.assertIsInstance(index, MultiIndex, "%s is not a MultiIndex" % index)
case.assertEqual(set(index), set(product(*levels)))
class ConstantInputTestCase(TestCase):
def setUp(self):
self.constants = {
# Every day, assume every stock starts at 2, goes down to 1,
# goes up to 4, and finishes at 3.
USEquityPricing.low: 1,
USEquityPricing.open: 2,
USEquityPricing.close: 3,
USEquityPricing.high: 4,
}
self.assets = [1, 2, 3]
self.dates = date_range('2014-01-01', '2014-02-01', freq='D', tz='UTC')
self.loader = ConstantLoader(
constants=self.constants,
dates=self.dates,
assets=self.assets,
)
self.asset_info = make_simple_asset_info(
self.assets,
start_date=self.dates[0],
end_date=self.dates[-1],
)
environment = TradingEnvironment()
environment.write_data(equities_df=self.asset_info)
self.asset_finder = environment.asset_finder
def test_bad_dates(self):
loader = self.loader
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
msg = "start_date must be before end_date .*"
with self.assertRaisesRegexp(ValueError, msg):
engine.factor_matrix({}, self.dates[2], self.dates[1])
with self.assertRaisesRegexp(ValueError, msg):
engine.factor_matrix({}, self.dates[2], self.dates[2])
def test_single_factor(self):
loader = self.loader
finder = self.asset_finder
assets = self.assets
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
result_shape = (num_dates, num_assets) = (5, len(assets))
dates = self.dates[10:10 + num_dates]
factor = RollingSumDifference()
result = engine.factor_matrix({'f': factor}, dates[0], dates[-1])
self.assertEqual(set(result.columns), {'f'})
assert_product(self, result.index, dates, finder.retrieve_all(assets))
assert_array_equal(
result['f'].unstack().values,
full(result_shape, -factor.window_length),
)
def test_multiple_rolling_factors(self):
loader = self.loader
finder = self.asset_finder
assets = self.assets
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
shape = num_dates, num_assets = (5, len(assets))
dates = self.dates[10:10 + num_dates]
short_factor = RollingSumDifference(window_length=3)
long_factor = RollingSumDifference(window_length=5)
high_factor = RollingSumDifference(
window_length=3,
inputs=[USEquityPricing.open, USEquityPricing.high],
)
results = engine.factor_matrix(
{'short': short_factor, 'long': long_factor, 'high': high_factor},
dates[0],
dates[-1],
)
self.assertEqual(set(results.columns), {'short', 'high', 'long'})
assert_product(self, results.index, dates, finder.retrieve_all(assets))
# row-wise sum over an array whose values are all (1 - 2)
assert_array_equal(
results['short'].unstack().values,
full(shape, -short_factor.window_length),
)
assert_array_equal(
results['long'].unstack().values,
full(shape, -long_factor.window_length),
)
# row-wise sum over an array whose values are all (1 - 3)
assert_array_equal(
results['high'].unstack().values,
full(shape, -2 * high_factor.window_length),
)
def test_numeric_factor(self):
constants = self.constants
loader = self.loader
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
num_dates = 5
dates = self.dates[10:10 + num_dates]
high, low = USEquityPricing.high, USEquityPricing.low
open, close = USEquityPricing.open, USEquityPricing.close
high_minus_low = RollingSumDifference(inputs=[high, low])
open_minus_close = RollingSumDifference(inputs=[open, close])
avg = (high_minus_low + open_minus_close) / 2
results = engine.factor_matrix(
{
'high_low': high_minus_low,
'open_close': open_minus_close,
'avg': avg,
},
dates[0],
dates[-1],
)
high_low_result = results['high_low'].unstack()
expected_high_low = 3.0 * (constants[high] - constants[low])
assert_frame_equal(
high_low_result,
DataFrame(
expected_high_low,
index=dates,
columns=self.assets,
)
)
open_close_result = results['open_close'].unstack()
expected_open_close = 3.0 * (constants[open] - constants[close])
assert_frame_equal(
open_close_result,
DataFrame(
expected_open_close,
index=dates,
columns=self.assets,
)
)
avg_result = results['avg'].unstack()
expected_avg = (expected_high_low + expected_open_close) / 2.0
assert_frame_equal(
avg_result,
DataFrame(
expected_avg,
index=dates,
columns=self.assets,
)
)
class FrameInputTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
day = cls.env.trading_day
cls.assets = Int64Index([1, 2, 3])
cls.dates = date_range(
'2015-01-01',
'2015-01-31',
freq=day,
tz='UTC',
)
asset_info = make_simple_asset_info(
cls.assets,
start_date=cls.dates[0],
end_date=cls.dates[-1],
)
cls.env.write_data(equities_df=asset_info)
cls.asset_finder = cls.env.asset_finder
@classmethod
def tearDownClass(cls):
del cls.env
del cls.asset_finder
def setUp(self):
self.dates = FrameInputTestCase.dates
self.assets = FrameInputTestCase.assets
@lazyval
def base_mask(self):
return self.make_frame(True)
def make_frame(self, data):
return DataFrame(data, columns=self.assets, index=self.dates)
def test_compute_with_adjustments(self):
dates, assets = self.dates, self.assets
low, high = USEquityPricing.low, USEquityPricing.high
apply_idxs = [3, 10, 16]
def apply_date(idx, offset=0):
return dates[apply_idxs[idx] + offset]
adjustments = DataFrame.from_records(
[
dict(
kind=MULTIPLY,
sid=assets[1],
value=2.0,
start_date=None,
end_date=apply_date(0, offset=-1),
apply_date=apply_date(0),
),
dict(
kind=MULTIPLY,
sid=assets[1],
value=3.0,
start_date=None,
end_date=apply_date(1, offset=-1),
apply_date=apply_date(1),
),
dict(
kind=MULTIPLY,
sid=assets[1],
value=5.0,
start_date=None,
end_date=apply_date(2, offset=-1),
apply_date=apply_date(2),
),
]
)
low_base = DataFrame(self.make_frame(30.0))
low_loader = DataFrameFFCLoader(low, low_base.copy(), adjustments=None)
# Pre-apply inverse of adjustments to the baseline.
high_base = DataFrame(self.make_frame(30.0))
high_base.iloc[:apply_idxs[0], 1] /= 2.0
high_base.iloc[:apply_idxs[1], 1] /= 3.0
high_base.iloc[:apply_idxs[2], 1] /= 5.0
high_loader = DataFrameFFCLoader(high, high_base, adjustments)
loader = MultiColumnLoader({low: low_loader, high: high_loader})
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
for window_length in range(1, 4):
low_mavg = SimpleMovingAverage(
inputs=[USEquityPricing.low],
window_length=window_length,
)
high_mavg = SimpleMovingAverage(
inputs=[USEquityPricing.high],
window_length=window_length,
)
bounds = product_upper_triangle(range(window_length, len(dates)))
for start, stop in bounds:
results = engine.factor_matrix(
{'low': low_mavg, 'high': high_mavg},
dates[start],
dates[stop],
)
self.assertEqual(set(results.columns), {'low', 'high'})
iloc_bounds = slice(start, stop + 1) # +1 to include end date
low_results = results.unstack()['low']
assert_frame_equal(low_results, low_base.iloc[iloc_bounds])
high_results = results.unstack()['high']
assert_frame_equal(high_results, high_base.iloc[iloc_bounds])
class SyntheticBcolzTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.first_asset_start = Timestamp('2015-04-01', tz='UTC')
cls.env = TradingEnvironment()
cls.trading_day = cls.env.trading_day
cls.asset_info = make_rotating_asset_info(
num_assets=6,
first_start=cls.first_asset_start,
frequency=cls.trading_day,
periods_between_starts=4,
asset_lifetime=8,
)
cls.all_assets = cls.asset_info.index
cls.all_dates = date_range(
start=cls.first_asset_start,
end=cls.asset_info['end_date'].max(),
freq=cls.trading_day,
)
cls.env.write_data(equities_df=cls.asset_info)
cls.finder = cls.env.asset_finder
cls.temp_dir = TempDirectory()
cls.temp_dir.create()
cls.writer = SyntheticDailyBarWriter(
asset_info=cls.asset_info[['start_date', 'end_date']],
calendar=cls.all_dates,
)
table = cls.writer.write(
cls.temp_dir.getpath('testdata.bcolz'),
cls.all_dates,
cls.all_assets,
)
cls.ffc_loader = USEquityPricingLoader(
BcolzDailyBarReader(table),
NullAdjustmentReader(),
)
@classmethod
def tearDownClass(cls):
del cls.env
cls.temp_dir.cleanup()
def test_SMA(self):
engine = SimpleFFCEngine(
self.ffc_loader,
self.env.trading_days,
self.finder,
)
dates, assets = self.all_dates, self.all_assets
window_length = 5
SMA = SimpleMovingAverage(
inputs=(USEquityPricing.close,),
window_length=window_length,
)
results = engine.factor_matrix(
{'sma': SMA},
dates[window_length],
dates[-1],
)
raw_closes = self.writer.expected_values_2d(dates, assets, 'close')
expected_sma_result = rolling_mean(
raw_closes,
window_length,
min_periods=1,
)
expected_sma_result[isnan(raw_closes)] = nan
expected_sma_result = expected_sma_result[window_length:]
sma_result = results['sma'].unstack()
assert_frame_equal(
sma_result,
DataFrame(
expected_sma_result,
index=dates[window_length:],
columns=assets,
),
)
def test_drawdown(self):
# The monotonically-increasing data produced by SyntheticDailyBarWriter
# exercises two pathological cases for MaxDrawdown. The actual
# computed results are pretty much useless (everything is either NaN)
# or zero, but verifying we correctly handle those corner cases is
# valuable.
engine = SimpleFFCEngine(
self.ffc_loader,
self.env.trading_days,
self.finder,
)
dates, assets = self.all_dates, self.all_assets
window_length = 5
drawdown = MaxDrawdown(
inputs=(USEquityPricing.close,),
window_length=window_length,
)
results = engine.factor_matrix(
{'drawdown': drawdown},
dates[window_length],
dates[-1],
)
dd_result = results['drawdown']
# We expect NaNs when the asset was undefined, otherwise 0 everywhere,
# since the input is always increasing.
expected = self.writer.expected_values_2d(dates, assets, 'close')
expected[~isnan(expected)] = 0
expected = expected[window_length:]
assert_frame_equal(
dd_result.unstack(),
DataFrame(
expected,
index=dates[window_length:],
columns=assets,
),
)
class MultiColumnLoaderTestCase(TestCase):
def setUp(self):
self.assets = [1, 2, 3]
self.dates = date_range('2014-01-01', '2014-02-01', freq='D', tz='UTC')
asset_info = make_simple_asset_info(
self.assets,
start_date=self.dates[0],
end_date=self.dates[-1],
)
env = TradingEnvironment()
env.write_data(equities_df=asset_info)
self.asset_finder = env.asset_finder
def test_engine_with_multicolumn_loader(self):
open_, close = USEquityPricing.open, USEquityPricing.close
loader = MultiColumnLoader({
open_: ConstantLoader(dates=self.dates,
assets=self.assets,
constants={open_: 1}),
close: ConstantLoader(dates=self.dates,
assets=self.assets,
constants={close: 2})
})
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
factor = RollingSumDifference()
result = engine.factor_matrix({'f': factor},
self.dates[2],
self.dates[-1])
self.assertIsNotNone(result)
self.assertEqual({'f'}, set(result.columns))
# (close - open) * window = (1 - 2) * 3 = -3
# skipped 2 from the start, so that the window is full
check_arrays(result['f'],
Series([-3] * len(self.assets) * (len(self.dates) - 2)))
| [
"testfixtures.TempDirectory",
"zipline.data.ffc.synthetic.NullAdjustmentReader",
"zipline.utils.test_utils.make_rotating_asset_info",
"zipline.data.ffc.frame.DataFrameFFCLoader",
"zipline.utils.test_utils.make_simple_asset_info",
"pandas.date_range",
"pandas.util.testing.assert_frame_equal",
"itertool... | [((2144, 2202), 'pandas.date_range', 'date_range', (['"""2014-01-01"""', '"""2014-02-01"""'], {'freq': '"""D"""', 'tz': '"""UTC"""'}), "('2014-01-01', '2014-02-01', freq='D', tz='UTC')\n", (2154, 2202), False, 'from pandas import DataFrame, date_range, Int64Index, MultiIndex, rolling_mean, Series, Timestamp\n'), ((2225, 2303), 'zipline.data.ffc.synthetic.ConstantLoader', 'ConstantLoader', ([], {'constants': 'self.constants', 'dates': 'self.dates', 'assets': 'self.assets'}), '(constants=self.constants, dates=self.dates, assets=self.assets)\n', (2239, 2303), False, 'from zipline.data.ffc.synthetic import ConstantLoader, MultiColumnLoader, NullAdjustmentReader, SyntheticDailyBarWriter\n'), ((2378, 2469), 'zipline.utils.test_utils.make_simple_asset_info', 'make_simple_asset_info', (['self.assets'], {'start_date': 'self.dates[0]', 'end_date': 'self.dates[-1]'}), '(self.assets, start_date=self.dates[0], end_date=self\n .dates[-1])\n', (2400, 2469), False, 'from zipline.utils.test_utils import make_rotating_asset_info, make_simple_asset_info, product_upper_triangle, check_arrays\n'), ((2534, 2554), 'zipline.finance.trading.TradingEnvironment', 'TradingEnvironment', ([], {}), '()\n', (2552, 2554), False, 'from zipline.finance.trading import TradingEnvironment\n'), ((2745, 2799), 'zipline.modelling.engine.SimpleFFCEngine', 'SimpleFFCEngine', (['loader', 'self.dates', 'self.asset_finder'], {}), '(loader, self.dates, self.asset_finder)\n', (2760, 2799), False, 'from zipline.modelling.engine import SimpleFFCEngine\n'), ((3244, 3298), 'zipline.modelling.engine.SimpleFFCEngine', 'SimpleFFCEngine', (['loader', 'self.dates', 'self.asset_finder'], {}), '(loader, self.dates, self.asset_finder)\n', (3259, 3298), False, 'from zipline.modelling.engine import SimpleFFCEngine\n'), ((3952, 4006), 'zipline.modelling.engine.SimpleFFCEngine', 'SimpleFFCEngine', (['loader', 'self.dates', 'self.asset_finder'], {}), '(loader, self.dates, self.asset_finder)\n', (3967, 4006), False, 'from zipline.modelling.engine import SimpleFFCEngine\n'), ((5376, 5430), 'zipline.modelling.engine.SimpleFFCEngine', 'SimpleFFCEngine', (['loader', 'self.dates', 'self.asset_finder'], {}), '(loader, self.dates, self.asset_finder)\n', (5391, 5430), False, 'from zipline.modelling.engine import SimpleFFCEngine\n'), ((7149, 7169), 'zipline.finance.trading.TradingEnvironment', 'TradingEnvironment', ([], {}), '()\n', (7167, 7169), False, 'from zipline.finance.trading import TradingEnvironment\n'), ((7226, 7247), 'pandas.Int64Index', 'Int64Index', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (7236, 7247), False, 'from pandas import DataFrame, date_range, Int64Index, MultiIndex, rolling_mean, Series, Timestamp\n'), ((7268, 7326), 'pandas.date_range', 'date_range', (['"""2015-01-01"""', '"""2015-01-31"""'], {'freq': 'day', 'tz': '"""UTC"""'}), "('2015-01-01', '2015-01-31', freq=day, tz='UTC')\n", (7278, 7326), False, 'from pandas import DataFrame, date_range, Int64Index, MultiIndex, rolling_mean, Series, Timestamp\n'), ((7408, 7496), 'zipline.utils.test_utils.make_simple_asset_info', 'make_simple_asset_info', (['cls.assets'], {'start_date': 'cls.dates[0]', 'end_date': 'cls.dates[-1]'}), '(cls.assets, start_date=cls.dates[0], end_date=cls.\n dates[-1])\n', (7430, 7496), False, 'from zipline.utils.test_utils import make_rotating_asset_info, make_simple_asset_info, product_upper_triangle, check_arrays\n'), ((7973, 8027), 'pandas.DataFrame', 'DataFrame', (['data'], {'columns': 'self.assets', 'index': 'self.dates'}), '(data, columns=self.assets, index=self.dates)\n', (7982, 8027), False, 'from pandas import DataFrame, date_range, Int64Index, MultiIndex, rolling_mean, Series, Timestamp\n'), ((9649, 9697), 'zipline.data.ffc.frame.DataFrameFFCLoader', 'DataFrameFFCLoader', (['high', 'high_base', 'adjustments'], {}), '(high, high_base, adjustments)\n', (9667, 9697), False, 'from zipline.data.ffc.frame import DataFrameFFCLoader, MULTIPLY\n'), ((9715, 9770), 'zipline.data.ffc.synthetic.MultiColumnLoader', 'MultiColumnLoader', (['{low: low_loader, high: high_loader}'], {}), '({low: low_loader, high: high_loader})\n', (9732, 9770), False, 'from zipline.data.ffc.synthetic import ConstantLoader, MultiColumnLoader, NullAdjustmentReader, SyntheticDailyBarWriter\n'), ((9789, 9843), 'zipline.modelling.engine.SimpleFFCEngine', 'SimpleFFCEngine', (['loader', 'self.dates', 'self.asset_finder'], {}), '(loader, self.dates, self.asset_finder)\n', (9804, 9843), False, 'from zipline.modelling.engine import SimpleFFCEngine\n'), ((11031, 11064), 'pandas.Timestamp', 'Timestamp', (['"""2015-04-01"""'], {'tz': '"""UTC"""'}), "('2015-04-01', tz='UTC')\n", (11040, 11064), False, 'from pandas import DataFrame, date_range, Int64Index, MultiIndex, rolling_mean, Series, Timestamp\n'), ((11083, 11103), 'zipline.finance.trading.TradingEnvironment', 'TradingEnvironment', ([], {}), '()\n', (11101, 11103), False, 'from zipline.finance.trading import TradingEnvironment\n'), ((11175, 11323), 'zipline.utils.test_utils.make_rotating_asset_info', 'make_rotating_asset_info', ([], {'num_assets': '(6)', 'first_start': 'cls.first_asset_start', 'frequency': 'cls.trading_day', 'periods_between_starts': '(4)', 'asset_lifetime': '(8)'}), '(num_assets=6, first_start=cls.first_asset_start,\n frequency=cls.trading_day, periods_between_starts=4, asset_lifetime=8)\n', (11199, 11323), False, 'from zipline.utils.test_utils import make_rotating_asset_info, make_simple_asset_info, product_upper_triangle, check_arrays\n'), ((11730, 11745), 'testfixtures.TempDirectory', 'TempDirectory', ([], {}), '()\n', (11743, 11745), False, 'from testfixtures import TempDirectory\n'), ((11798, 11905), 'zipline.data.ffc.synthetic.SyntheticDailyBarWriter', 'SyntheticDailyBarWriter', ([], {'asset_info': "cls.asset_info[['start_date', 'end_date']]", 'calendar': 'cls.all_dates'}), "(asset_info=cls.asset_info[['start_date', 'end_date'\n ]], calendar=cls.all_dates)\n", (11821, 11905), False, 'from zipline.data.ffc.synthetic import ConstantLoader, MultiColumnLoader, NullAdjustmentReader, SyntheticDailyBarWriter\n'), ((12361, 12429), 'zipline.modelling.engine.SimpleFFCEngine', 'SimpleFFCEngine', (['self.ffc_loader', 'self.env.trading_days', 'self.finder'], {}), '(self.ffc_loader, self.env.trading_days, self.finder)\n', (12376, 12429), False, 'from zipline.modelling.engine import SimpleFFCEngine\n'), ((12573, 12659), 'zipline.modelling.factor.technical.SimpleMovingAverage', 'SimpleMovingAverage', ([], {'inputs': '(USEquityPricing.close,)', 'window_length': 'window_length'}), '(inputs=(USEquityPricing.close,), window_length=\n window_length)\n', (12592, 12659), False, 'from zipline.modelling.factor.technical import MaxDrawdown, SimpleMovingAverage\n'), ((12930, 12984), 'pandas.rolling_mean', 'rolling_mean', (['raw_closes', 'window_length'], {'min_periods': '(1)'}), '(raw_closes, window_length, min_periods=1)\n', (12942, 12984), False, 'from pandas import DataFrame, date_range, Int64Index, MultiIndex, rolling_mean, Series, Timestamp\n'), ((13784, 13852), 'zipline.modelling.engine.SimpleFFCEngine', 'SimpleFFCEngine', (['self.ffc_loader', 'self.env.trading_days', 'self.finder'], {}), '(self.ffc_loader, self.env.trading_days, self.finder)\n', (13799, 13852), False, 'from zipline.modelling.engine import SimpleFFCEngine\n'), ((14001, 14074), 'zipline.modelling.factor.technical.MaxDrawdown', 'MaxDrawdown', ([], {'inputs': '(USEquityPricing.close,)', 'window_length': 'window_length'}), '(inputs=(USEquityPricing.close,), window_length=window_length)\n', (14012, 14074), False, 'from zipline.modelling.factor.technical import MaxDrawdown, SimpleMovingAverage\n'), ((14912, 14970), 'pandas.date_range', 'date_range', (['"""2014-01-01"""', '"""2014-02-01"""'], {'freq': '"""D"""', 'tz': '"""UTC"""'}), "('2014-01-01', '2014-02-01', freq='D', tz='UTC')\n", (14922, 14970), False, 'from pandas import DataFrame, date_range, Int64Index, MultiIndex, rolling_mean, Series, Timestamp\n'), ((14993, 15084), 'zipline.utils.test_utils.make_simple_asset_info', 'make_simple_asset_info', (['self.assets'], {'start_date': 'self.dates[0]', 'end_date': 'self.dates[-1]'}), '(self.assets, start_date=self.dates[0], end_date=self\n .dates[-1])\n', (15015, 15084), False, 'from zipline.utils.test_utils import make_rotating_asset_info, make_simple_asset_info, product_upper_triangle, check_arrays\n'), ((15141, 15161), 'zipline.finance.trading.TradingEnvironment', 'TradingEnvironment', ([], {}), '()\n', (15159, 15161), False, 'from zipline.finance.trading import TradingEnvironment\n'), ((15765, 15819), 'zipline.modelling.engine.SimpleFFCEngine', 'SimpleFFCEngine', (['loader', 'self.dates', 'self.asset_finder'], {}), '(loader, self.dates, self.asset_finder)\n', (15780, 15819), False, 'from zipline.modelling.engine import SimpleFFCEngine\n'), ((1704, 1720), 'itertools.product', 'product', (['*levels'], {}), '(*levels)\n', (1711, 1720), False, 'from itertools import product\n'), ((3742, 3783), 'numpy.full', 'full', (['result_shape', '(-factor.window_length)'], {}), '(result_shape, -factor.window_length)\n', (3746, 3783), False, 'from numpy import full, isnan, nan\n'), ((4863, 4903), 'numpy.full', 'full', (['shape', '(-short_factor.window_length)'], {}), '(shape, -short_factor.window_length)\n', (4867, 4903), False, 'from numpy import full, isnan, nan\n'), ((5001, 5040), 'numpy.full', 'full', (['shape', '(-long_factor.window_length)'], {}), '(shape, -long_factor.window_length)\n', (5005, 5040), False, 'from numpy import full, isnan, nan\n'), ((5204, 5247), 'numpy.full', 'full', (['shape', '(-2 * high_factor.window_length)'], {}), '(shape, -2 * high_factor.window_length)\n', (5208, 5247), False, 'from numpy import full, isnan, nan\n'), ((6258, 6320), 'pandas.DataFrame', 'DataFrame', (['expected_high_low'], {'index': 'dates', 'columns': 'self.assets'}), '(expected_high_low, index=dates, columns=self.assets)\n', (6267, 6320), False, 'from pandas import DataFrame, date_range, Int64Index, MultiIndex, rolling_mean, Series, Timestamp\n'), ((6599, 6663), 'pandas.DataFrame', 'DataFrame', (['expected_open_close'], {'index': 'dates', 'columns': 'self.assets'}), '(expected_open_close, index=dates, columns=self.assets)\n', (6608, 6663), False, 'from pandas import DataFrame, date_range, Int64Index, MultiIndex, rolling_mean, Series, Timestamp\n'), ((6919, 6976), 'pandas.DataFrame', 'DataFrame', (['expected_avg'], {'index': 'dates', 'columns': 'self.assets'}), '(expected_avg, index=dates, columns=self.assets)\n', (6928, 6976), False, 'from pandas import DataFrame, date_range, Int64Index, MultiIndex, rolling_mean, Series, Timestamp\n'), ((9910, 9988), 'zipline.modelling.factor.technical.SimpleMovingAverage', 'SimpleMovingAverage', ([], {'inputs': '[USEquityPricing.low]', 'window_length': 'window_length'}), '(inputs=[USEquityPricing.low], window_length=window_length)\n', (9929, 9988), False, 'from zipline.modelling.factor.technical import MaxDrawdown, SimpleMovingAverage\n'), ((10060, 10139), 'zipline.modelling.factor.technical.SimpleMovingAverage', 'SimpleMovingAverage', ([], {'inputs': '[USEquityPricing.high]', 'window_length': 'window_length'}), '(inputs=[USEquityPricing.high], window_length=window_length)\n', (10079, 10139), False, 'from zipline.modelling.factor.technical import MaxDrawdown, SimpleMovingAverage\n'), ((12148, 12174), 'zipline.data.ffc.loaders.us_equity_pricing.BcolzDailyBarReader', 'BcolzDailyBarReader', (['table'], {}), '(table)\n', (12167, 12174), False, 'from zipline.data.ffc.loaders.us_equity_pricing import BcolzDailyBarReader, USEquityPricingLoader\n'), ((12188, 12210), 'zipline.data.ffc.synthetic.NullAdjustmentReader', 'NullAdjustmentReader', ([], {}), '()\n', (12208, 12210), False, 'from zipline.data.ffc.synthetic import ConstantLoader, MultiColumnLoader, NullAdjustmentReader, SyntheticDailyBarWriter\n'), ((13060, 13077), 'numpy.isnan', 'isnan', (['raw_closes'], {}), '(raw_closes)\n', (13065, 13077), False, 'from numpy import full, isnan, nan\n'), ((13262, 13337), 'pandas.DataFrame', 'DataFrame', (['expected_sma_result'], {'index': 'dates[window_length:]', 'columns': 'assets'}), '(expected_sma_result, index=dates[window_length:], columns=assets)\n', (13271, 13337), False, 'from pandas import DataFrame, date_range, Int64Index, MultiIndex, rolling_mean, Series, Timestamp\n'), ((14654, 14718), 'pandas.DataFrame', 'DataFrame', (['expected'], {'index': 'dates[window_length:]', 'columns': 'assets'}), '(expected, index=dates[window_length:], columns=assets)\n', (14663, 14718), False, 'from pandas import DataFrame, date_range, Int64Index, MultiIndex, rolling_mean, Series, Timestamp\n'), ((10718, 10777), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['low_results', 'low_base.iloc[iloc_bounds]'], {}), '(low_results, low_base.iloc[iloc_bounds])\n', (10736, 10777), False, 'from pandas.util.testing import assert_frame_equal\n'), ((10852, 10913), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['high_results', 'high_base.iloc[iloc_bounds]'], {}), '(high_results, high_base.iloc[iloc_bounds])\n', (10870, 10913), False, 'from pandas.util.testing import assert_frame_equal\n'), ((14515, 14530), 'numpy.isnan', 'isnan', (['expected'], {}), '(expected)\n', (14520, 14530), False, 'from numpy import full, isnan, nan\n'), ((15430, 15504), 'zipline.data.ffc.synthetic.ConstantLoader', 'ConstantLoader', ([], {'dates': 'self.dates', 'assets': 'self.assets', 'constants': '{open_: 1}'}), '(dates=self.dates, assets=self.assets, constants={open_: 1})\n', (15444, 15504), False, 'from zipline.data.ffc.synthetic import ConstantLoader, MultiColumnLoader, NullAdjustmentReader, SyntheticDailyBarWriter\n'), ((15593, 15667), 'zipline.data.ffc.synthetic.ConstantLoader', 'ConstantLoader', ([], {'dates': 'self.dates', 'assets': 'self.assets', 'constants': '{close: 2}'}), '(dates=self.dates, assets=self.assets, constants={close: 2})\n', (15607, 15667), False, 'from zipline.data.ffc.synthetic import ConstantLoader, MultiColumnLoader, NullAdjustmentReader, SyntheticDailyBarWriter\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy
import stats
from common import *
class htkReader(BaseReader):
def __init__(self, featureFile, labelFile, byteOrder=None):
BaseReader.__init__(self, featureFile, labelFile, byteOrder)
def Read(self):
#return numpy.ones((256, 819)).astype('float32'), numpy.ones(256).astype('int32')
with open(self.featureFile,"rb") as f:
dt = numpy.dtype([('numSamples',(numpy.int32,1)),('sampPeriod',(numpy.int32,1)),('sampSize',(numpy.int16,1)),('sampKind',(numpy.int16,1))])
header = numpy.fromfile(f,dt.newbyteorder('>' if self.byteOrder==ByteOrder.BigEndian else '<'),count=1)
numSamples = header[0]['numSamples']
sampPeriod = header[0]['sampPeriod']
sampSize = header[0]['sampSize']
sampKind = header[0]['sampKind']
# print 'Num samples = {}'.format(numSamples)
# print 'Sample period = {}'.format(sampPeriod)
# print 'Sample size = {}'.format(sampSize)
# print 'Sample kind = {}'.format(sampKind)
dt = numpy.dtype([('sample',(numpy.float32,sampSize/4))])
samples = numpy.fromfile(f,dt.newbyteorder('>' if self.byteOrder==ByteOrder.BigEndian else '<'),count=numSamples)
self._markDone()
if self.labelFile is None:
labels = None
else:
labels = ReadLabel(self.labelFile)
return samples[:]['sample'], labels
| [
"numpy.dtype"
] | [((1174, 1323), 'numpy.dtype', 'numpy.dtype', (["[('numSamples', (numpy.int32, 1)), ('sampPeriod', (numpy.int32, 1)), (\n 'sampSize', (numpy.int16, 1)), ('sampKind', (numpy.int16, 1))]"], {}), "([('numSamples', (numpy.int32, 1)), ('sampPeriod', (numpy.int32,\n 1)), ('sampSize', (numpy.int16, 1)), ('sampKind', (numpy.int16, 1))])\n", (1185, 1323), False, 'import numpy\n'), ((1867, 1923), 'numpy.dtype', 'numpy.dtype', (["[('sample', (numpy.float32, sampSize / 4))]"], {}), "([('sample', (numpy.float32, sampSize / 4))])\n", (1878, 1923), False, 'import numpy\n')] |
## @ingroup Input_Output-Results
#print_compress_drag.py
# Created: SUAVE team
# Modified: <NAME>, Feb 2016
# Apr 2020, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
import numpy as np
from SUAVE.Core import Units,Data
# Imports
import time # importing library
import datetime # importing library
# ----------------------------------------------------------------------
# Print output file with compressibility drag
# ----------------------------------------------------------------------
## @ingroup Input_Output-Results
def print_compress_drag(vehicle,analyses,filename = 'compress_drag.dat'):
"""This creates a file showing a breakdown of compressibility drag for the vehicle.
Assumptions:
None
Source:
N/A
Inputs:
vehicle.wings.main_wing.
sweeps.quarter_chord [-]
vehicle.wings.*.
tag <string>
thickness_to_chord [-]
vehicle.
tag <string>
reference_area [m^2]
analyses.configs.cruise.aerodynamics.settings Used in called function:
analyses.configs.cruise.aerodynamics.process.compute.drag.compressibility.wings.wing(state,settings,wing)
filename Sets file name to save (optional)
Outputs:
filename Saved file with name as above
Properties Used:
N/A
"""
# Unpack
sweep = vehicle.wings['main_wing'].sweeps.quarter_chord / Units.deg
t_c = vehicle.wings['main_wing'].thickness_to_chord
sref = vehicle.reference_area
settings = analyses.configs.cruise.aerodynamics.settings
# Define mach and CL vectors
mach_vec = np.linspace(0.45,0.95,11)
cl_vec = np.linspace(0.30,0.80,11)
# allocating array for each wing
cd_compress = Data()
for idw,wing in enumerate(vehicle.wings):
cd_compress[wing.tag] = np.zeros((len(mach_vec),len(cl_vec)))
cd_compress_tot = np.zeros_like(cd_compress.main_wing)
# Alocatting array necessary for the drag estimation method
state = Data()
state.conditions = SUAVE.Analyses.Mission.Segments.Conditions.Aerodynamics()
state.conditions.freestream.mach_number = mach_vec
# write header of file
fid = open(filename,'w') # Open output file
fid.write('Output file with compressibility drag breakdown\n\n')
fid.write(' VEHICLE TAG : ' + vehicle.tag + '\n\n')
fid.write(' REFERENCE AREA ................ ' + str('%5.1f' % sref ) + ' m2 ' + '\n')
fid.write(' MAIN WING SWEEP ............... ' + str('%5.1f' % sweep ) + ' deg' + '\n')
fid.write(' MAIN WING THICKNESS RATIO ..... ' + str('%5.2f' % t_c ) + ' ' + '\n')
fid.write(' \n')
fid.write(' TOTAL COMPRESSIBILITY DRAG \n')
fid.write(str(np.insert(np.transpose(list(map('M={:5.3f} | '.format,(mach_vec)))),0,' CL | ')))
fid.write('\n')
# call aerodynamic method for each CL
for idcl, cl in enumerate(cl_vec):
state.conditions.aerodynamics.lift_breakdown.compressible_wings = Data()
for wing in vehicle.wings:
state.conditions.aerodynamics.lift_breakdown.compressible_wings[wing.tag] = np.atleast_1d(cl)
analyses.configs.cruise.aerodynamics.process.compute.drag.compressibility.wings.wing(state,settings,wing)
# process output for print
drag_breakdown = state.conditions.aerodynamics.drag_breakdown.compressible
for wing in vehicle.wings:
cd_compress[wing.tag][:,idcl] = drag_breakdown[wing.tag].compressibility_drag
cd_compress_tot[:,idcl] += drag_breakdown[wing.tag].compressibility_drag
# print first the TOTAL COMPRESSIBILITY DRAG
fid.write(str(np.insert((np.transpose(list(map('{:7.5f} | '.format,(cd_compress_tot[:,idcl]))))),0,' {:5.3f} | '.format(cl))))
fid.write('\n')
fid.write( 119*'-' )
# print results of other components
for wing in vehicle.wings:
fid.write('\n ' + wing.tag.upper() + ' ( t/c: {:4.3f} )'.format(wing.thickness_to_chord) + '\n')
fid.write(str(np.insert(np.transpose(list(map('M={:5.3f} | '.format,(mach_vec)))),0,' CL | ')))
fid.write('\n')
for idcl, cl in enumerate(cl_vec):
fid.write(str(np.insert((np.transpose(list(map('{:7.5f} | '.format,(cd_compress[wing.tag][:,idcl]))))),0,' {:5.3f} | '.format(cl))))
fid.write('\n')
fid.write(119*'-')
# close file
fid.close
# Print timestamp
fid.write('\n\n' + datetime.datetime.now().strftime(" %A, %d. %B %Y %I:%M:%S %p"))
#done!
return
# ----------------------------------------------------------------------
# Module Test
# ----------------------------------------------------------------------
if __name__ == '__main__':
print(' Error: No test defined ! ') | [
"SUAVE.Analyses.Mission.Segments.Conditions.Aerodynamics",
"datetime.datetime.now",
"numpy.linspace",
"SUAVE.Core.Data",
"numpy.zeros_like",
"numpy.atleast_1d"
] | [((1896, 1923), 'numpy.linspace', 'np.linspace', (['(0.45)', '(0.95)', '(11)'], {}), '(0.45, 0.95, 11)\n', (1907, 1923), True, 'import numpy as np\n'), ((1952, 1977), 'numpy.linspace', 'np.linspace', (['(0.3)', '(0.8)', '(11)'], {}), '(0.3, 0.8, 11)\n', (1963, 1977), True, 'import numpy as np\n'), ((2037, 2043), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (2041, 2043), False, 'from SUAVE.Core import Units, Data\n'), ((2182, 2218), 'numpy.zeros_like', 'np.zeros_like', (['cd_compress.main_wing'], {}), '(cd_compress.main_wing)\n', (2195, 2218), True, 'import numpy as np\n'), ((2308, 2314), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (2312, 2314), False, 'from SUAVE.Core import Units, Data\n'), ((2338, 2395), 'SUAVE.Analyses.Mission.Segments.Conditions.Aerodynamics', 'SUAVE.Analyses.Mission.Segments.Conditions.Aerodynamics', ([], {}), '()\n', (2393, 2395), False, 'import SUAVE\n'), ((3339, 3345), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (3343, 3345), False, 'from SUAVE.Core import Units, Data\n'), ((3469, 3486), 'numpy.atleast_1d', 'np.atleast_1d', (['cl'], {}), '(cl)\n', (3482, 3486), True, 'import numpy as np\n'), ((4813, 4836), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4834, 4836), False, 'import datetime\n')] |
"""This module tests our model against the results from the oi-VAE paper on CMU subject 7."""
from src.models.dp_gp_lvm import dp_gp_lvm
from src.utils.constants import RESULTS_FILE_NAME, DATA_PATH
from src.utils.types import NP_DTYPE
import src.visualisation.plotters as vis
import matplotlib.cm as color_map
import matplotlib.pyplot as plot
import numpy as np
from os.path import isfile
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from time import time
if __name__ == '__main__':
# Optimisation variables.
learning_rate = 0.025
num_iter_train = 2500
num_iter_predict = 2000
# Model hyperparameters.
num_training_samples = 150
num_inducing_points = 40
num_latent_dimensions = 16 # oi-VAE uses 4, 8, and 16.
truncation_level = 15
# Read data.
training_data = None
for i in range(10):
sequence = i + 1
np_file = '07_0{}_joint_angles.npy'.format(sequence) if sequence < 10 \
else '07_{}_joint_angles.npy'.format(sequence)
cmu_data = np.load(DATA_PATH + 'cmu_mocap/' + np_file)
if training_data is None:
training_data = cmu_data
else:
training_data = np.vstack((training_data, cmu_data))
total_num_frames = training_data.shape[0]
num_output_dimensions = training_data.shape[1]
# Randomly sample 200 frames and normalise data to zero mean and unit variance.
np.random.seed(seed=1) # Set seed.
training_indices = np.random.choice(training_data.shape[0], size=num_training_samples, replace=False)
scaler = StandardScaler()
y_train = scaler.fit_transform(training_data[training_indices, 6:]) # Remove first 6 dimensions to ignore root.
# Print info.
print('\nCMU Subject 7 - Sequences 1-10:')
print(' Total number of observations (N): {}'.format(num_training_samples))
print(' Total number of output dimensions (D): {}'.format(num_output_dimensions))
print(' Total number of inducing points (M): {}'.format(num_inducing_points))
print(' Total number of latent dimensions (Q): {}'.format(num_latent_dimensions))
# Define file path for results.
dataset_str = 'cmu_subject7_joint_angles'
dp_gp_lvm_results_file = RESULTS_FILE_NAME.format(model='dp_gp_lvm', dataset=dataset_str)
# Define instance of necessary model.
if not isfile(dp_gp_lvm_results_file):
# Reset default graph before building new model graph. This speeds up script.
tf.reset_default_graph()
np.random.seed(1) # Random seed.
# Define instance of DP-GP-LVM.
model = dp_gp_lvm(y_train=y_train,
num_inducing_points=num_inducing_points,
num_latent_dims=num_latent_dimensions,
truncation_level=truncation_level,
mask_size=1) # Treat each observed dimension as independent.
model_training_objective = model.objective
# Optimisation.
model_opt_train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(
loss=model_training_objective)
with tf.Session() as s:
# Initialise variables.
s.run(tf.global_variables_initializer())
# Training optimisation loop.
start_time = time()
print('\nTraining DP-GP-LVM:')
for c in range(num_iter_train):
s.run(model_opt_train)
if (c % 100) == 0:
print(' DP-GP-LVM opt iter {:5}: {}'.format(c, s.run(model_training_objective)))
end_time = time()
train_opt_time = end_time - start_time
final_cost = s.run(model_training_objective)
print('Final iter {:5}:'.format(c))
print(' DP-GP-LVM: {}'.format(s.run(model_training_objective)))
print('Time to optimise: {} s'.format(train_opt_time))
# Get converged values as numpy arrays.
ard_weights, noise_precision, signal_variance, inducing_input, assignments = \
s.run((model.ard_weights, model.noise_precision, model.signal_variance, model.inducing_input,
model.assignments))
x_mean, x_covar = s.run(model.q_x)
w_1, w_2 = s.run(model.dp.q_alpha)
gamma_atoms, alpha_atoms, beta_atoms = s.run(model.dp_atoms)
# Save results.
print('\nSaving results to .npz file.')
np.savez(dp_gp_lvm_results_file, original_data=training_data, y_train=y_train,
ard_weights=ard_weights, noise_precision=noise_precision, signal_variance=signal_variance,
x_u=inducing_input, assignments=assignments, x_mean=x_mean, x_covar=x_covar,
gamma_atoms=gamma_atoms, alpha_atoms=alpha_atoms, beta_atoms=beta_atoms,
q_alpha_w1=w_1, q_alpha_w2=w_2, train_opt_time=train_opt_time, final_cost=final_cost)
else:
# Load results.
results = np.load(dp_gp_lvm_results_file)
# Load number of dimensions per joint.
joint_dim_dict = np.load(DATA_PATH + 'cmu_mocap/' + '07_joint_dims.npy').item()
labels = list(joint_dim_dict.keys())
ticks = np.array(list(joint_dim_dict.values()), dtype=int)
# labels = list(joint_dim_dict.keys())[1:] # Remove root joint.
# ticks = np.array(list(joint_dim_dict.values()), dtype=int)[1:] # Remove root joint.
# Plot latent spaces.
dp_gp_lvm_ard = results['ard_weights']
# dp_gp_lvm_ard[dp_gp_lvm_ard < 0.1] = 0.0
dp_gp_lvm_ard = np.sqrt(dp_gp_lvm_ard)
# plot.figure()
# # plot.imshow(dp_gp_lvm_ard, interpolation='nearest', aspect='auto',
# # extent=(0, num_latent_dimensions, num_output_dimensions, 0), origin='upper')
# plot.imshow(dp_gp_lvm_ard, interpolation='nearest', aspect='auto',
# extent=(0, num_latent_dimensions, num_output_dimensions, 0), origin='upper', cmap=color_map.Blues)
# # plot.colorbar()
# plot.title('Latent factorization for each joint')
# plot.xlabel('X-Dimension')
# plot.ylabel('')
# ax = plot.gca()
# # ax.set_xticks(np.arange(0.5, num_latent_dimensions, 1))
# ax.set_xticks(np.arange(num_latent_dimensions))
# ax.set_xticklabels([])
# ax.set_yticks(np.cumsum(ticks), minor=False)
# ax.set_yticklabels([], minor=False)
# ax.set_yticks(np.cumsum(ticks) - 0.5 * ticks, minor=True)
# ax.set_yticklabels(labels, minor=True)
# plot.show()
# Sum sort.
index = np.argsort(np.sum(dp_gp_lvm_ard, axis=0))
plot.figure(figsize=(10,5))
plot.imshow(np.transpose(dp_gp_lvm_ard[:, index[::-1]]), interpolation='nearest', aspect='auto',
extent=(0, num_output_dimensions, num_latent_dimensions, 0), origin='upper', cmap=color_map.Blues)
plot.ylabel('X', rotation='horizontal')
plot.xlabel('')
ax = plot.gca()
ax.set_yticks(np.arange(num_latent_dimensions))
ax.set_yticklabels([])
ax.set_xticks(np.cumsum(ticks), minor=False)
ax.set_xticklabels([], minor=False)
ax.set_xticks(np.cumsum(ticks) - 0.5 * ticks, minor=True)
ax.set_xticklabels(labels, minor=True, rotation='vertical', fontweight='bold')
plot.savefig('cmu_7_sum_sort.pdf', bbox_inches='tight')
plot.show()
# # Largest sum.
# index = np.argsort(np.sum(dp_gp_lvm_ard, axis=1))[::-1]
# index = np.argsort(dp_gp_lvm_ard[index[0], :])[::-1]
# plot.figure(figsize=(7,10))
# plot.imshow(dp_gp_lvm_ard[:, index], interpolation='nearest', aspect='auto',
# extent=(0, num_latent_dimensions, num_output_dimensions, 0), origin='upper', cmap=color_map.Blues)
# plot.title('Latent factorization for each joint')
# plot.xlabel('X-Dimension')
# plot.ylabel('')
# ax = plot.gca()
# ax.set_xticks(np.arange(num_latent_dimensions))
# ax.set_xticklabels([])
# ax.set_yticks(np.cumsum(ticks), minor=False)
# ax.set_yticklabels([], minor=False)
# ax.set_yticks(np.cumsum(ticks) - 0.5 * ticks, minor=True)
# ax.set_yticklabels(labels, minor=True)
# plot.savefig('cmu_7_largest_sum.pdf', bbox_inches='tight')
# # plot.show()
#
# # Variance
# index = np.argsort(np.var(dp_gp_lvm_ard, axis=0))[::-1]
# plot.figure(figsize=(7,10))
# plot.imshow(dp_gp_lvm_ard[:, index], interpolation='nearest', aspect='auto',
# extent=(0, num_latent_dimensions, num_output_dimensions, 0), origin='upper', cmap=color_map.Blues)
# plot.title('Latent factorization for each joint')
# plot.xlabel('X-Dimension')
# plot.ylabel('')
# ax = plot.gca()
# ax.set_xticks(np.arange(num_latent_dimensions))
# ax.set_xticklabels([])
# ax.set_yticks(np.cumsum(ticks), minor=False)
# ax.set_yticklabels([], minor=False)
# ax.set_yticks(np.cumsum(ticks) - 0.5 * ticks, minor=True)
# ax.set_yticklabels(labels, minor=True)
# plot.savefig('cmu_7_variance_sort.pdf', bbox_inches='tight')
# plot.show()
# Using cartesian coordinates.
# # Read data.
# training_data = None
# for i in range(10):
# sequence = i + 1
# np_file = '07_0{}.npy'.format(sequence) if sequence < 10 else '07_{}.npy'.format(sequence)
# cmu_data = np.load(DATA_PATH + 'cmu_mocap/' + np_file)
# if training_data is None:
# training_data = cmu_data
# else:
# training_data = np.vstack((training_data, cmu_data))
# total_num_frames = training_data.shape[0]
# num_output_dimensions = training_data.shape[1]
#
# # Randomly sample 200 frames and normalise data to zero mean and unit variance.
# np.random.seed(seed=1) # Set seed.
# training_indices = np.random.choice(training_data.shape[0], size=num_training_samples, replace=False)
# scaler = StandardScaler()
# y_train = scaler.fit_transform(training_data[training_indices, :])
#
# # Print info.
# print('\nCMU Subject 7 - Sequences 1-10:')
# print(' Total number of observations (N): {}'.format(num_training_samples))
# print(' Total number of output dimensions (D): {}'.format(num_output_dimensions))
# print(' Total number of inducing points (M): {}'.format(num_inducing_points))
# print(' Total number of latent dimensions (Q): {}'.format(num_latent_dimensions))
#
# # Define file path for results.
# dataset_str = 'cmu_subject7'
# dp_gp_lvm_results_file = RESULTS_FILE_NAME.format(model='dp_gp_lvm', dataset=dataset_str) # Keep 3d points together
#
# # Define instance of necessary model.
# if not isfile(dp_gp_lvm_results_file):
# # Reset default graph before building new model graph. This speeds up script.
# tf.reset_default_graph()
# np.random.seed(1) # Random seed.
# # Define instance of DP-GP-LVM.
# model = dp_gp_lvm(y_train=y_train,
# num_inducing_points=num_inducing_points,
# num_latent_dims=num_latent_dimensions,
# truncation_level=truncation_level,
# mask_size=3)
#
# model_training_objective = model.objective
# # Optimisation.
# model_opt_train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(
# loss=model_training_objective)
#
# with tf.Session() as s:
# # Initialise variables.
# s.run(tf.global_variables_initializer())
#
# # Training optimisation loop.
# start_time = time()
# print('\nTraining DP-GP-LVM:')
# for c in range(num_iter_train):
# s.run(model_opt_train)
# if (c % 100) == 0:
# print(' DP-GP-LVM opt iter {:5}: {}'.format(c, s.run(model_training_objective)))
# end_time = time()
# train_opt_time = end_time - start_time
# final_cost = s.run(model_training_objective)
# print('Final iter {:5}:'.format(c))
# print(' DP-GP-LVM: {}'.format(s.run(model_training_objective)))
# print('Time to optimise: {} s'.format(train_opt_time))
#
# # Get converged values as numpy arrays.
# ard_weights, noise_precision, signal_variance, inducing_input, assignments = \
# s.run((model.ard_weights, model.noise_precision, model.signal_variance, model.inducing_input,
# model.assignments))
# x_mean, x_covar = s.run(model.q_x)
# w_1, w_2 = s.run(model.dp.q_alpha)
# gamma_atoms, alpha_atoms, beta_atoms = s.run(model.dp_atoms)
#
# # Save results.
# print('\nSaving results to .npz file.')
# np.savez(dp_gp_lvm_results_file, original_data=training_data, y_train=y_train,
# ard_weights=ard_weights, noise_precision=noise_precision, signal_variance=signal_variance,
# x_u=inducing_input, assignments=assignments, x_mean=x_mean, x_covar=x_covar,
# gamma_atoms=gamma_atoms, alpha_atoms=alpha_atoms, beta_atoms=beta_atoms,
# q_alpha_w1=w_1, q_alpha_w2=w_2, train_opt_time=train_opt_time, final_cost=final_cost)
#
# else:
# # Load results.
# results = np.load(dp_gp_lvm_results_file)
#
# # Plot latent spaces.
# dp_gp_lvm_ard = results['ard_weights']
#
# plot.figure()
# # plot.imshow(np.sqrt(dp_gp_lvm_ard).T, interpolation='nearest', aspect='auto',
# # extent=(0, num_output_dimensions, num_latent_dimensions, 0), origin='upper')
# plot.imshow(np.sqrt(dp_gp_lvm_ard), interpolation='nearest', aspect='auto',
# extent=(0, num_latent_dimensions, num_output_dimensions, 0), origin='upper')
# plot.colorbar()
# plot.title('ARD Weights')
# plot.show()
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"src.utils.constants.RESULTS_FILE_NAME.format",
"src.models.dp_gp_lvm.dp_gp_lvm",
"numpy.arange",
"numpy.savez",
"matplotlib.pyplot.xlabel",
"tensorflow.Session",
"numpy.random.seed",
"numpy.vstack",
"tensorflow.train.AdamOptimizer",
"matplotlib.pyplot... | [((1433, 1455), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(1)'}), '(seed=1)\n', (1447, 1455), True, 'import numpy as np\n'), ((1492, 1579), 'numpy.random.choice', 'np.random.choice', (['training_data.shape[0]'], {'size': 'num_training_samples', 'replace': '(False)'}), '(training_data.shape[0], size=num_training_samples, replace\n =False)\n', (1508, 1579), True, 'import numpy as np\n'), ((1588, 1604), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1602, 1604), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2238, 2302), 'src.utils.constants.RESULTS_FILE_NAME.format', 'RESULTS_FILE_NAME.format', ([], {'model': '"""dp_gp_lvm"""', 'dataset': 'dataset_str'}), "(model='dp_gp_lvm', dataset=dataset_str)\n", (2262, 2302), False, 'from src.utils.constants import RESULTS_FILE_NAME, DATA_PATH\n'), ((1053, 1096), 'numpy.load', 'np.load', (["(DATA_PATH + 'cmu_mocap/' + np_file)"], {}), "(DATA_PATH + 'cmu_mocap/' + np_file)\n", (1060, 1096), True, 'import numpy as np\n'), ((2357, 2387), 'os.path.isfile', 'isfile', (['dp_gp_lvm_results_file'], {}), '(dp_gp_lvm_results_file)\n', (2363, 2387), False, 'from os.path import isfile\n'), ((2483, 2507), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2505, 2507), True, 'import tensorflow as tf\n'), ((2516, 2533), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2530, 2533), True, 'import numpy as np\n'), ((2606, 2769), 'src.models.dp_gp_lvm.dp_gp_lvm', 'dp_gp_lvm', ([], {'y_train': 'y_train', 'num_inducing_points': 'num_inducing_points', 'num_latent_dims': 'num_latent_dimensions', 'truncation_level': 'truncation_level', 'mask_size': '(1)'}), '(y_train=y_train, num_inducing_points=num_inducing_points,\n num_latent_dims=num_latent_dimensions, truncation_level=\n truncation_level, mask_size=1)\n', (2615, 2769), False, 'from src.models.dp_gp_lvm import dp_gp_lvm\n'), ((4456, 4883), 'numpy.savez', 'np.savez', (['dp_gp_lvm_results_file'], {'original_data': 'training_data', 'y_train': 'y_train', 'ard_weights': 'ard_weights', 'noise_precision': 'noise_precision', 'signal_variance': 'signal_variance', 'x_u': 'inducing_input', 'assignments': 'assignments', 'x_mean': 'x_mean', 'x_covar': 'x_covar', 'gamma_atoms': 'gamma_atoms', 'alpha_atoms': 'alpha_atoms', 'beta_atoms': 'beta_atoms', 'q_alpha_w1': 'w_1', 'q_alpha_w2': 'w_2', 'train_opt_time': 'train_opt_time', 'final_cost': 'final_cost'}), '(dp_gp_lvm_results_file, original_data=training_data, y_train=\n y_train, ard_weights=ard_weights, noise_precision=noise_precision,\n signal_variance=signal_variance, x_u=inducing_input, assignments=\n assignments, x_mean=x_mean, x_covar=x_covar, gamma_atoms=gamma_atoms,\n alpha_atoms=alpha_atoms, beta_atoms=beta_atoms, q_alpha_w1=w_1,\n q_alpha_w2=w_2, train_opt_time=train_opt_time, final_cost=final_cost)\n', (4464, 4883), True, 'import numpy as np\n'), ((4983, 5014), 'numpy.load', 'np.load', (['dp_gp_lvm_results_file'], {}), '(dp_gp_lvm_results_file)\n', (4990, 5014), True, 'import numpy as np\n'), ((5584, 5606), 'numpy.sqrt', 'np.sqrt', (['dp_gp_lvm_ard'], {}), '(dp_gp_lvm_ard)\n', (5591, 5606), True, 'import numpy as np\n'), ((6673, 6701), 'matplotlib.pyplot.figure', 'plot.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (6684, 6701), True, 'import matplotlib.pyplot as plot\n'), ((6933, 6972), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""X"""'], {'rotation': '"""horizontal"""'}), "('X', rotation='horizontal')\n", (6944, 6972), True, 'import matplotlib.pyplot as plot\n'), ((6981, 6996), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['""""""'], {}), "('')\n", (6992, 6996), True, 'import matplotlib.pyplot as plot\n'), ((7010, 7020), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (7018, 7020), True, 'import matplotlib.pyplot as plot\n'), ((7366, 7421), 'matplotlib.pyplot.savefig', 'plot.savefig', (['"""cmu_7_sum_sort.pdf"""'], {'bbox_inches': '"""tight"""'}), "('cmu_7_sum_sort.pdf', bbox_inches='tight')\n", (7378, 7421), True, 'import matplotlib.pyplot as plot\n'), ((7430, 7441), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (7439, 7441), True, 'import matplotlib.pyplot as plot\n'), ((1210, 1246), 'numpy.vstack', 'np.vstack', (['(training_data, cmu_data)'], {}), '((training_data, cmu_data))\n', (1219, 1246), True, 'import numpy as np\n'), ((3135, 3147), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3145, 3147), True, 'import tensorflow as tf\n'), ((3311, 3317), 'time.time', 'time', ([], {}), '()\n', (3315, 3317), False, 'from time import time\n'), ((3604, 3610), 'time.time', 'time', ([], {}), '()\n', (3608, 3610), False, 'from time import time\n'), ((6634, 6663), 'numpy.sum', 'np.sum', (['dp_gp_lvm_ard'], {'axis': '(0)'}), '(dp_gp_lvm_ard, axis=0)\n', (6640, 6663), True, 'import numpy as np\n'), ((6721, 6764), 'numpy.transpose', 'np.transpose', (['dp_gp_lvm_ard[:, index[::-1]]'], {}), '(dp_gp_lvm_ard[:, index[::-1]])\n', (6733, 6764), True, 'import numpy as np\n'), ((7043, 7075), 'numpy.arange', 'np.arange', (['num_latent_dimensions'], {}), '(num_latent_dimensions)\n', (7052, 7075), True, 'import numpy as np\n'), ((7130, 7146), 'numpy.cumsum', 'np.cumsum', (['ticks'], {}), '(ticks)\n', (7139, 7146), True, 'import numpy as np\n'), ((3016, 3067), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (3038, 3067), True, 'import tensorflow as tf\n'), ((3208, 3241), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3239, 3241), True, 'import tensorflow as tf\n'), ((5088, 5143), 'numpy.load', 'np.load', (["(DATA_PATH + 'cmu_mocap/' + '07_joint_dims.npy')"], {}), "(DATA_PATH + 'cmu_mocap/' + '07_joint_dims.npy')\n", (5095, 5143), True, 'import numpy as np\n'), ((7227, 7243), 'numpy.cumsum', 'np.cumsum', (['ticks'], {}), '(ticks)\n', (7236, 7243), True, 'import numpy as np\n')] |
import numpy as np
from numpy import pi
from matplotlib import pyplot as plt
def make_VandermondeT(omega, time):
"""
Compute the transpose of the Vandermonde
matrix from the times and frequencies
Parameters
----------
omega: 1D numpy array of complex values
(r = truncation rank of the DMD)
DMD frequencies
time: 1D numpy array of floats
(M = number of time samples)
Time range for the DMD
Returns
----------
VandermondeT: 2D numpy array of complex values
(r, M)
Transpose of the Vandermonde matrix for DMD reconstructions
"""
VandermondeT = np.exp(np.outer(time, omega))
return VandermondeT
def dmd(data, r, time, M_train):
"""
Compute the DMD of a set of time series
Parameters
----------
data: 2D numpy array of spacetime data
(N = number of spatial locations, M = number of time samples)
Data to use the DMD on
r: integer
Truncation rank for the DMD
time: 1D numpy array of floats
(M = number of time samples)
Time range for the DMD
M_train: integer
The length of the time range for building the DMD model,
the remainder is used for testing the model.
Returns
----------
Bfield: 2D numpy array of floats
(N = number of spatial locations, M = number of time samples)
DMD reconstruction of the data variable
"""
tsize = len(time)
time = time / 1e6
Qsize = int(np.shape(data)[0] / 6)
Bfield = np.zeros((np.shape(data)[0], tsize - M_train), dtype='complex')
dt = 1e-6
X = data[:, 0:M_train]
Xprime = data[:, 1:M_train + 1]
Udmd, Sdmd, Vdmd = np.linalg.svd(X, full_matrices=False)
Vdmd = np.transpose(Vdmd)
Udmd = Udmd[:, 0:r]
Sdmd = Sdmd[0:r]
Vdmd = Vdmd[:, 0:r]
S = np.diag(Sdmd)
A = np.dot(np.dot(np.transpose(Udmd), Xprime), Vdmd / Sdmd)
eigvals, Y = np.linalg.eig(A)
Bt = np.dot(np.dot(Xprime, Vdmd / Sdmd), Y)
omega = np.log(eigvals) / dt
VandermondeT = make_VandermondeT(omega, time - time[0])
Vandermonde = np.transpose(VandermondeT)
q = np.conj(np.diag(np.dot(np.dot(np.dot(
Vandermonde[:, :M_train], Vdmd), np.conj(S)), Y)))
P = np.dot(np.conj(np.transpose(Y)), Y) * np.conj(
np.dot(Vandermonde[:, :M_train], np.conj(
VandermondeT[:M_train, :])))
b = np.dot(np.linalg.inv(P), q)
c = 0.5 * (b + np.conj(b)).real
c = 500 * c / np.max(c)
energy_sort = np.flip(np.argsort(c))
plt.figure()
omega_sizes = c
for i in range(len(c)):
omega_sizes[i] = max(omega_sizes[i], 50)
# plot the frequencies
plt.scatter(omega.imag/(2 * pi * 1e3),
omega.real/(2 * pi * 1e3), s=omega_sizes, c='k')
plt.savefig('Pictures/dmd_freqs.pdf')
for mode in range(r):
Bfield += 0.5 * b[mode] * np.outer(Bt[:, mode],
Vandermonde[mode, M_train:])
Bfield += np.conj(Bfield)
return Bfield.real
| [
"matplotlib.pyplot.savefig",
"numpy.linalg.eig",
"numpy.conj",
"numpy.log",
"numpy.diag",
"numpy.max",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.outer",
"numpy.dot",
"numpy.linalg.inv",
"matplotlib.pyplot.scatter",
"numpy.linalg.svd",
"numpy.shape",
"numpy.transpose"
] | [((1689, 1726), 'numpy.linalg.svd', 'np.linalg.svd', (['X'], {'full_matrices': '(False)'}), '(X, full_matrices=False)\n', (1702, 1726), True, 'import numpy as np\n'), ((1738, 1756), 'numpy.transpose', 'np.transpose', (['Vdmd'], {}), '(Vdmd)\n', (1750, 1756), True, 'import numpy as np\n'), ((1834, 1847), 'numpy.diag', 'np.diag', (['Sdmd'], {}), '(Sdmd)\n', (1841, 1847), True, 'import numpy as np\n'), ((1929, 1945), 'numpy.linalg.eig', 'np.linalg.eig', (['A'], {}), '(A)\n', (1942, 1945), True, 'import numpy as np\n'), ((2105, 2131), 'numpy.transpose', 'np.transpose', (['VandermondeT'], {}), '(VandermondeT)\n', (2117, 2131), True, 'import numpy as np\n'), ((2550, 2562), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2560, 2562), True, 'from matplotlib import pyplot as plt\n'), ((2691, 2792), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(omega.imag / (2 * pi * 1000.0))', '(omega.real / (2 * pi * 1000.0))'], {'s': 'omega_sizes', 'c': '"""k"""'}), "(omega.imag / (2 * pi * 1000.0), omega.real / (2 * pi * 1000.0),\n s=omega_sizes, c='k')\n", (2702, 2792), True, 'from matplotlib import pyplot as plt\n'), ((2799, 2836), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Pictures/dmd_freqs.pdf"""'], {}), "('Pictures/dmd_freqs.pdf')\n", (2810, 2836), True, 'from matplotlib import pyplot as plt\n'), ((3005, 3020), 'numpy.conj', 'np.conj', (['Bfield'], {}), '(Bfield)\n', (3012, 3020), True, 'import numpy as np\n'), ((641, 662), 'numpy.outer', 'np.outer', (['time', 'omega'], {}), '(time, omega)\n', (649, 662), True, 'import numpy as np\n'), ((1962, 1989), 'numpy.dot', 'np.dot', (['Xprime', '(Vdmd / Sdmd)'], {}), '(Xprime, Vdmd / Sdmd)\n', (1968, 1989), True, 'import numpy as np\n'), ((2006, 2021), 'numpy.log', 'np.log', (['eigvals'], {}), '(eigvals)\n', (2012, 2021), True, 'import numpy as np\n'), ((2420, 2436), 'numpy.linalg.inv', 'np.linalg.inv', (['P'], {}), '(P)\n', (2433, 2436), True, 'import numpy as np\n'), ((2495, 2504), 'numpy.max', 'np.max', (['c'], {}), '(c)\n', (2501, 2504), True, 'import numpy as np\n'), ((2531, 2544), 'numpy.argsort', 'np.argsort', (['c'], {}), '(c)\n', (2541, 2544), True, 'import numpy as np\n'), ((1870, 1888), 'numpy.transpose', 'np.transpose', (['Udmd'], {}), '(Udmd)\n', (1882, 1888), True, 'import numpy as np\n'), ((2897, 2947), 'numpy.outer', 'np.outer', (['Bt[:, mode]', 'Vandermonde[mode, M_train:]'], {}), '(Bt[:, mode], Vandermonde[mode, M_train:])\n', (2905, 2947), True, 'import numpy as np\n'), ((1489, 1503), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (1497, 1503), True, 'import numpy as np\n'), ((1535, 1549), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (1543, 1549), True, 'import numpy as np\n'), ((2268, 2283), 'numpy.transpose', 'np.transpose', (['Y'], {}), '(Y)\n', (2280, 2283), True, 'import numpy as np\n'), ((2348, 2382), 'numpy.conj', 'np.conj', (['VandermondeT[:M_train, :]'], {}), '(VandermondeT[:M_train, :])\n', (2355, 2382), True, 'import numpy as np\n'), ((2460, 2470), 'numpy.conj', 'np.conj', (['b'], {}), '(b)\n', (2467, 2470), True, 'import numpy as np\n'), ((2170, 2208), 'numpy.dot', 'np.dot', (['Vandermonde[:, :M_train]', 'Vdmd'], {}), '(Vandermonde[:, :M_train], Vdmd)\n', (2176, 2208), True, 'import numpy as np\n'), ((2227, 2237), 'numpy.conj', 'np.conj', (['S'], {}), '(S)\n', (2234, 2237), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 16:05:40 2020
@author: Engel
"""
import numpy as np
import matplotlib.pyplot as plt
#Creamos una funcion para coseno
g = lambda x: np.cos(x) + 2
#Creamos una fucnion exponencial
h = lambda x: np.exp(x)
#Creamos una funcion seno
f = lambda x: np.sin(x)+1
#Para definir el rango y numero de puntos
x=np.arange(0,10,0.1)
#Vamos a hacer una matriz cuadrada de dos filas y dos columnas,
#Para mostrar las fucniones en cuadros distintos
plt.subplot(2,2,1)
plt.plot(x,g(x))
plt.subplot(2,2,2)
plt.plot(x,h(x))
plt.subplot(2,2,3)
plt.plot(x,f(x))
plt.savefig('Graficas.png')
| [
"matplotlib.pyplot.savefig",
"numpy.exp",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.subplot",
"numpy.arange"
] | [((361, 382), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.1)'], {}), '(0, 10, 0.1)\n', (370, 382), True, 'import numpy as np\n'), ((498, 518), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (509, 518), True, 'import matplotlib.pyplot as plt\n'), ((540, 560), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (551, 560), True, 'import matplotlib.pyplot as plt\n'), ((578, 598), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (589, 598), True, 'import matplotlib.pyplot as plt\n'), ((617, 644), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Graficas.png"""'], {}), "('Graficas.png')\n", (628, 644), True, 'import matplotlib.pyplot as plt\n'), ((249, 258), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (255, 258), True, 'import numpy as np\n'), ((187, 196), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (193, 196), True, 'import numpy as np\n'), ((301, 310), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (307, 310), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, accuracy_score, precision_recall_fscore_support, classification_report
from seqeval.metrics import (accuracy_score as seqeval_accuracy_score,
classification_report as seqeval_classification_report,
f1_score as seqeval_f1_score,
precision_score as seqeval_precision_score,
recall_score as seqeval_recall_score)
def sk_classification_metrics(pred, pred_labs=False):
result = classification_metrics(pred)
labels = pred.label_ids
preds = pred.predictions if pred_labs else pred.predictions.argmax(-1)
result['classification_report'] = classification_report(labels, preds, digits=4)
return result
def classification_metrics(pred, pred_labs=False):
labels = pred.label_ids
preds = pred.predictions if pred_labs else pred.predictions.argmax(-1)
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, average="macro")
precision_micro, recall_micro, f1_micro, _ = precision_recall_fscore_support(labels, preds, average="micro")
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1_micro': f1_micro,
'precision_micro': precision_micro,
'recall_micro': recall_micro,
'f1_macro': f1_macro,
'precision_macro': precision_macro,
'recall_macro': recall_macro,
'nb_samples': len(labels)
}
def seqeval_classification_metrics(pred):
from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report
labels = pred.label_ids
preds = pred.predictions
precision_macro = precision_score(labels, preds, average='macro')
recall_macro = recall_score(labels, preds, average='macro')
f1_macro = f1_score(labels, preds, average='macro')
precision_micro = precision_score(labels, preds, average='micro')
recall_micro = recall_score(labels, preds, average='micro')
f1_micro = f1_score(labels, preds, average='micro')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1_micro': f1_micro,
'precision_micro': precision_micro,
'recall_micro': recall_micro,
'f1_macro': f1_macro,
'precision_macro': precision_macro,
'recall_macro': recall_macro,
'nb_samples': len(labels),
'classification_report': classification_report(labels, preds, digits=4)
}
def _compute_best_threshold(targets, probs):
f1s = []
for threshold in range(1,100):
preds = (probs > (threshold / 100)).astype(int)
f1s.append((
threshold/100,
f1_score(targets,
preds,
average='binary')
))
f1s_df = pd.DataFrame(f1s).sort_values(1,ascending=False).reset_index(drop=True)
f1s_df.columns = ['threshold_label','f1_label']
return f1s_df.threshold_label[0], f1s_df.f1_label[0]
def _select_best_thresholds(targets, probs, n_labels):
best_thresholds = dict()
for i in range(0, n_labels):
best_thresholds[f'label-{i}'] = _compute_best_threshold(targets[:,i], probs[:,i])
return best_thresholds
def sigmoid(x):
return 1/(1 + np.exp(-x))
def multilabel_classification_metrics(pred, n_labels):
labels = pred.label_ids
logits = pred.predictions
probs = sigmoid(logits)
best_threshold_mapping = _select_best_thresholds(labels, probs, n_labels)
best_thresholds = [ v[0] for k,v in best_threshold_mapping.items() ]
preds = np.array(probs > best_thresholds)
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, average='macro')
precision_micro, recall_micro, f1_micro, _ = precision_recall_fscore_support(labels, preds, average='micro')
acc = accuracy_score(labels, preds)
accuracy_micro = (labels == preds).mean()
return {
'accuracy': acc,
'accuracy_micro': accuracy_micro,
'f1_micro': f1_micro,
'precision_micro': precision_micro,
'recall_micro': recall_micro,
'f1_macro': f1_macro,
'precision_macro': precision_macro,
'recall_macro': recall_macro,
'nb_samples': len(labels)
}
| [
"seqeval.metrics.accuracy_score",
"seqeval.metrics.classification_report",
"seqeval.metrics.precision_score",
"seqeval.metrics.f1_score",
"sklearn.metrics.precision_recall_fscore_support",
"seqeval.metrics.recall_score",
"numpy.array",
"numpy.exp",
"pandas.DataFrame"
] | [((745, 791), 'seqeval.metrics.classification_report', 'classification_report', (['labels', 'preds'], {'digits': '(4)'}), '(labels, preds, digits=4)\n', (766, 791), False, 'from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report\n'), ((1015, 1078), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['labels', 'preds'], {'average': '"""macro"""'}), "(labels, preds, average='macro')\n", (1046, 1078), False, 'from sklearn.metrics import f1_score, accuracy_score, precision_recall_fscore_support, classification_report\n'), ((1128, 1191), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['labels', 'preds'], {'average': '"""micro"""'}), "(labels, preds, average='micro')\n", (1159, 1191), False, 'from sklearn.metrics import f1_score, accuracy_score, precision_recall_fscore_support, classification_report\n'), ((1202, 1231), 'seqeval.metrics.accuracy_score', 'accuracy_score', (['labels', 'preds'], {}), '(labels, preds)\n', (1216, 1231), False, 'from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report\n'), ((1768, 1815), 'seqeval.metrics.precision_score', 'precision_score', (['labels', 'preds'], {'average': '"""macro"""'}), "(labels, preds, average='macro')\n", (1783, 1815), False, 'from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report\n'), ((1835, 1879), 'seqeval.metrics.recall_score', 'recall_score', (['labels', 'preds'], {'average': '"""macro"""'}), "(labels, preds, average='macro')\n", (1847, 1879), False, 'from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report\n'), ((1895, 1935), 'seqeval.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'average': '"""macro"""'}), "(labels, preds, average='macro')\n", (1903, 1935), False, 'from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report\n'), ((1958, 2005), 'seqeval.metrics.precision_score', 'precision_score', (['labels', 'preds'], {'average': '"""micro"""'}), "(labels, preds, average='micro')\n", (1973, 2005), False, 'from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report\n'), ((2025, 2069), 'seqeval.metrics.recall_score', 'recall_score', (['labels', 'preds'], {'average': '"""micro"""'}), "(labels, preds, average='micro')\n", (2037, 2069), False, 'from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report\n'), ((2085, 2125), 'seqeval.metrics.f1_score', 'f1_score', (['labels', 'preds'], {'average': '"""micro"""'}), "(labels, preds, average='micro')\n", (2093, 2125), False, 'from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report\n'), ((2136, 2165), 'seqeval.metrics.accuracy_score', 'accuracy_score', (['labels', 'preds'], {}), '(labels, preds)\n', (2150, 2165), False, 'from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report\n'), ((3660, 3693), 'numpy.array', 'np.array', (['(probs > best_thresholds)'], {}), '(probs > best_thresholds)\n', (3668, 3693), True, 'import numpy as np\n'), ((3743, 3806), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['labels', 'preds'], {'average': '"""macro"""'}), "(labels, preds, average='macro')\n", (3774, 3806), False, 'from sklearn.metrics import f1_score, accuracy_score, precision_recall_fscore_support, classification_report\n'), ((3856, 3919), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['labels', 'preds'], {'average': '"""micro"""'}), "(labels, preds, average='micro')\n", (3887, 3919), False, 'from sklearn.metrics import f1_score, accuracy_score, precision_recall_fscore_support, classification_report\n'), ((3930, 3959), 'seqeval.metrics.accuracy_score', 'accuracy_score', (['labels', 'preds'], {}), '(labels, preds)\n', (3944, 3959), False, 'from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report\n'), ((2496, 2542), 'seqeval.metrics.classification_report', 'classification_report', (['labels', 'preds'], {'digits': '(4)'}), '(labels, preds, digits=4)\n', (2517, 2542), False, 'from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report\n'), ((3339, 3349), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (3345, 3349), True, 'import numpy as np\n'), ((2759, 2801), 'seqeval.metrics.f1_score', 'f1_score', (['targets', 'preds'], {'average': '"""binary"""'}), "(targets, preds, average='binary')\n", (2767, 2801), False, 'from seqeval.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report\n'), ((2869, 2886), 'pandas.DataFrame', 'pd.DataFrame', (['f1s'], {}), '(f1s)\n', (2881, 2886), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 31 18:43:23 2018
@author: <NAME>
@email : <EMAIL>
"""
import pyorb_core.pde_problem.fom_problem as fp
import numpy as np
def ns_theta_f( _param, _q = 0 ):
return 1.0
def ns_full_theta_f( _param ):
return np.array([1.0])
class ns_theta_fs( ):
def __init__( self ):
return
def ns_theta_f( self, _param, _q ):
n_deim = self.M_deim.get_num_basis( )
if _q >= 0 and _q < n_deim:
return self.M_deim.compute_deim_theta_coefficients_q( _param, _q )
# diffusion affine component
if _q == n_deim:
return 0.01 * _param[0]
if _q == n_deim + 1:
return 1.0
def ns_full_theta_f( self, _param ):
n_deim = self.M_deim.get_num_basis( )
theta_f = np.zeros( (n_deim + 2, 1) )
theta_f[0:n_deim] = self.M_deim.compute_deim_theta_coefficients( _param )
theta_f[n_deim] = 0.01 * _param[0]
theta_f[n_deim+1] = 1.0
return theta_f
def set_deim( self, _deim ):
self.M_deim = _deim
M_deim = None
class ns_theta_As( ):
def __init__( self ):
return
def ns_theta_a( self, _param, _q ):
n_m_deim = self.M_mdeim.get_num_mdeim_basis( )
if _q >= 0 and _q < n_m_deim:
return self.M_mdeim.compute_theta_coefficients_q( _param, _q )
if _q == n_m_deim:
return 1.0
# diffusion affine component
if _q == n_m_deim + 1:
return 0.01 * _param[0]
def ns_full_theta_a( self, _param ):
n_m_deim = self.M_mdeim.get_num_mdeim_basis( )
theta_a = np.zeros( n_m_deim + 2 )
theta_a[0:n_m_deim] = self.M_mdeim.compute_theta_coefficients( _param )
# print(theta_a)
theta_a[n_m_deim] = 1.0
theta_a[n_m_deim+1] = 0.01 * _param[0]
return theta_a
def set_mdeim( self, _mdeim ):
self.M_mdeim = _mdeim
M_mdeim = None
class navier_stokes_problem( fp.fom_problem ):
def __init__( self, _parameter_handler, _external_engine = None, _fom_specifics = None ):
fp.fom_problem.__init__( self, _parameter_handler, _external_engine, _fom_specifics )
return
def set_deim( self, _deim ):
self.M_ns_theta_fs.set_deim( _deim )
def set_mdeim( self, _mdeim ):
self.M_ns_theta_As.set_mdeim( _mdeim )
def define_theta_functions( self ):
self.M_theta_a = self.M_ns_theta_As.ns_theta_a
self.M_full_theta_a = self.M_ns_theta_As.ns_full_theta_a
self.M_theta_f = ns_theta_f
self.M_full_theta_f = self.M_ns_theta_fs.ns_full_theta_f
return
def build_ns_rb_jacobian( self, _uN, _rb_affine_decomposition, _theta_a ):
ns_jac = np.zeros( _rb_affine_decomposition.get_rb_affine_matrix(0).shape )
for iQa in range( len(_theta_a) ):
ns_jac = ns_jac + _theta_a[iQa] * _rb_affine_decomposition.get_rb_affine_matrix( iQa )
for iQn in range( len(_uN) ):
ns_jac = ns_jac + _uN[iQn] * _rb_affine_decomposition.get_rb_affine_matrix( iQn + len(_theta_a) )
# the 2nd time is to include the flipped terms coming from the derivation
for iQn in range( len(_uN) ):
ns_jac = ns_jac + _uN[iQn] * _rb_affine_decomposition.get_rb_affine_matrix( iQn + len(_theta_a) + len(_uN) )
return ns_jac
def build_ns_rb_matrix( self, _uN, _rb_affine_decomposition, _theta_a ):
ns_rb_mat = np.zeros( _rb_affine_decomposition.get_rb_affine_matrix(0).shape )
for iQa in range( len(_theta_a) ):
ns_rb_mat = ns_rb_mat + _theta_a[iQa] * _rb_affine_decomposition.get_rb_affine_matrix( iQa )
for iQn in range( len(_uN) ):
ns_rb_mat = ns_rb_mat + _uN[iQn] * _rb_affine_decomposition.get_rb_affine_matrix( iQn + len(_theta_a) )
return ns_rb_mat
def build_ns_rb_vector( self, _theta_f, _rb_affine_decomposition ):
ns_rb_rhs = np.zeros( _rb_affine_decomposition.get_rb_affine_vector(0).shape )
for iQf in range( len(_theta_f) ):
ns_rb_rhs = ns_rb_rhs + _theta_f[iQf] * _rb_affine_decomposition.get_rb_affine_vector( iQf )
return ns_rb_rhs
def rb_residual( self, _uN, _rb_affine_decomposition, _theta_f, _theta_a ):
ns_rb_mat = self.build_ns_rb_matrix( _uN, _rb_affine_decomposition, _theta_a )
ns_rb_rhs = self.build_ns_rb_vector( _theta_f, _rb_affine_decomposition )
res = ns_rb_mat.dot( _uN ) - ns_rb_rhs
return res
def solve_rb_ns_problem( self, _param, _affine_decomposition ):
import time
start = time.time()
th_f = self.get_full_theta_f( _param )
end = time.time()
print( 'Time to compute theta F' )
print(end - start)
start = time.time()
th_a = self.get_full_theta_a( _param )
end = time.time()
print( 'Time to compute theta A' )
print(end - start)
def nsns_fixed_residual( _un ):
return self.rb_residual( _un, _affine_decomposition, th_f, th_a )
def nsns_fixed_jacobian( _un ):
return self.build_ns_rb_jacobian( _un, _affine_decomposition, th_a )
import pyorb_core.algorithms.newton_solver as new_sol
un = new_sol.newton_solver( nsns_fixed_jacobian, nsns_fixed_residual, np.zeros( _affine_decomposition.get_rb_affine_matrix(0).shape[0] ), \
_tol=1.e-14, _n_max=20 )
return un
M_ns_theta_As = ns_theta_As( )
M_ns_theta_fs = ns_theta_fs( )
| [
"numpy.array",
"numpy.zeros",
"time.time",
"pyorb_core.pde_problem.fom_problem.fom_problem.__init__"
] | [((286, 301), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (294, 301), True, 'import numpy as np\n'), ((859, 884), 'numpy.zeros', 'np.zeros', (['(n_deim + 2, 1)'], {}), '((n_deim + 2, 1))\n', (867, 884), True, 'import numpy as np\n'), ((1730, 1752), 'numpy.zeros', 'np.zeros', (['(n_m_deim + 2)'], {}), '(n_m_deim + 2)\n', (1738, 1752), True, 'import numpy as np\n'), ((2201, 2288), 'pyorb_core.pde_problem.fom_problem.fom_problem.__init__', 'fp.fom_problem.__init__', (['self', '_parameter_handler', '_external_engine', '_fom_specifics'], {}), '(self, _parameter_handler, _external_engine,\n _fom_specifics)\n', (2224, 2288), True, 'import pyorb_core.pde_problem.fom_problem as fp\n'), ((4818, 4829), 'time.time', 'time.time', ([], {}), '()\n', (4827, 4829), False, 'import time\n'), ((4891, 4902), 'time.time', 'time.time', ([], {}), '()\n', (4900, 4902), False, 'import time\n'), ((4990, 5001), 'time.time', 'time.time', ([], {}), '()\n', (4999, 5001), False, 'import time\n'), ((5063, 5074), 'time.time', 'time.time', ([], {}), '()\n', (5072, 5074), False, 'import time\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/utils.ipynb (unless otherwise specified).
__all__ = ['first_not_na']
# Internal Cell
from math import sqrt
from typing import Optional, Tuple
import numpy as np
from numba import njit # type: ignore
# Internal Cell
@njit
def _validate_rolling_sizes(window_size: int,
min_samples: Optional[int] = None) -> Tuple[int,int]:
# have to split the following if because of numba
if min_samples is None:
min_samples = window_size
if min_samples > window_size:
min_samples = window_size
return window_size, min_samples
@njit
def _gt(x: float, y: float) -> float:
return x - y
@njit
def _lt(x: float, y: float) -> float:
return -_gt(x, y)
# Cell
@njit
def first_not_na(input_array: np.ndarray) -> int:
"""Returns the index of the first non-na value in the array."""
for index, element in enumerate(input_array):
if not np.isnan(element):
return index
return input_array.size | [
"numpy.isnan"
] | [((948, 965), 'numpy.isnan', 'np.isnan', (['element'], {}), '(element)\n', (956, 965), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import gen_batches, resample
from sklearn.model_selection import train_test_split
from typing import Optional, Tuple
from sklearn.utils import shuffle
class Ensemble:
def __init__(self):
pass
def _fit(self, X: np.ndarray) -> BaseEstimator:
pass
def _fit_ensemble(self, X: np.ndarray, n_models: int = 10) -> float:
raise NotImplemented
class Batch:
"""Abstract class to be used to estimate scores in batches.
Parameters
----------
batch_size : int, default = 1_000
batch size
min_batch_size : int, default = 100
the minimum batch size to be used for the indices generator
shuffle : bool, default = True
option to shuffle the data before doing batches
random_state : int, default = None
the random seed when doing the shuffling if option chosen
summary : str, default = 'mean'
the way to summarize the scores {'mean', 'median'}
Attributes
----------
raw_scores : np.ndarray
the raw batchsize scores
batch_score : float
the final score after the summary stat (e.g. mean)
"""
def __init__(
self,
batch_size: int = 1_000,
min_batch_size: int = 100,
shuffle: bool = True,
random_state: int = 123,
summary: str = "mean",
):
self.batch_size = batch_size
self.min_batch_size = min_batch_size
self.random_state = random_state
self.shuffle = shuffle
self.summary = summary
def _fit(self, X: np.ndarray, y: Optional[np.ndarray] = None):
"""
IT method to fit to batches. Must be implemented by the user.
"""
pass
def _fit_batches(self, X: np.ndarray, Y: Optional[np.ndarray] = None) -> float:
"""
Fits models to inherited class
Parameters
----------
X : np.ndarray
The data to be fit.
y : np.ndarray
The second dataset to be fit
Returns
-------
batch_score : float
the score after the summary
"""
it_measure = list()
# Shuffle dataset
if self.shuffle:
if Y is not None:
X, Y = shuffle(X, Y, random_state=self.random_state)
else:
X = shuffle(X, random_state=self.random_state)
# batch scores
for idx in gen_batches(X.shape[0], self.batch_size, self.min_batch_size):
if Y is not None:
it_measure.append(self._fit(X[idx], Y[idx]))
else:
it_measure.append(self._fit(X[idx]))
# save raw scores
self.raw_scores = it_measure
# return summary score
if self.summary == "mean":
self.batch_score = np.mean(it_measure)
elif self.summary == "median":
self.batch_score = np.median(it_measure)
else:
raise ValueError("Unrecognized summarizer: {}".format(self.summary))
return self.batch_score
class BootStrap:
def __init__(self, n_iterations=100):
self.n_iterations = n_iterations
def _fit(self, X: np.ndarray) -> BaseEstimator:
pass
def run_bootstrap(
self,
X: np.ndarray,
y: Optional[np.ndarray] = None,
sample_size: Optional[int] = 1_000,
) -> None:
raw_scores = list()
if sample_size is not None:
n_samples = min(X.shape[0], sample_size)
else:
n_samples = X.shape[0]
for i in range(self.n_iterations):
if y is None:
X_sample = resample(X, n_samples=sample_size)
raw_scores.append(self._fit(X_sample))
else:
X_sample, Y_sample = resample(X, y, n_samples=sample_size)
raw_scores.append(self._fit(X_sample, Y_sample))
self.raw_scores = raw_scores
return np.mean(raw_scores)
def ci(self, p: float) -> Tuple[float, float]:
"""
Return 2-sided symmetric confidence interval specified
by p.
"""
u_pval = (1 + p) / 2.0
l_pval = 1 - u_pval
l_indx = int(np.floor(self.n_iterations * l_pval))
u_indx = int(np.floor(self.n_iterations * u_pval))
return self.raw_scores[l_indx], self.raw_scores[u_indx]
| [
"sklearn.utils.gen_batches",
"numpy.mean",
"numpy.median",
"sklearn.utils.shuffle",
"numpy.floor",
"sklearn.utils.resample"
] | [((2499, 2560), 'sklearn.utils.gen_batches', 'gen_batches', (['X.shape[0]', 'self.batch_size', 'self.min_batch_size'], {}), '(X.shape[0], self.batch_size, self.min_batch_size)\n', (2510, 2560), False, 'from sklearn.utils import gen_batches, resample\n'), ((4020, 4039), 'numpy.mean', 'np.mean', (['raw_scores'], {}), '(raw_scores)\n', (4027, 4039), True, 'import numpy as np\n'), ((2886, 2905), 'numpy.mean', 'np.mean', (['it_measure'], {}), '(it_measure)\n', (2893, 2905), True, 'import numpy as np\n'), ((4273, 4309), 'numpy.floor', 'np.floor', (['(self.n_iterations * l_pval)'], {}), '(self.n_iterations * l_pval)\n', (4281, 4309), True, 'import numpy as np\n'), ((4332, 4368), 'numpy.floor', 'np.floor', (['(self.n_iterations * u_pval)'], {}), '(self.n_iterations * u_pval)\n', (4340, 4368), True, 'import numpy as np\n'), ((2329, 2374), 'sklearn.utils.shuffle', 'shuffle', (['X', 'Y'], {'random_state': 'self.random_state'}), '(X, Y, random_state=self.random_state)\n', (2336, 2374), False, 'from sklearn.utils import shuffle\n'), ((2413, 2455), 'sklearn.utils.shuffle', 'shuffle', (['X'], {'random_state': 'self.random_state'}), '(X, random_state=self.random_state)\n', (2420, 2455), False, 'from sklearn.utils import shuffle\n'), ((2977, 2998), 'numpy.median', 'np.median', (['it_measure'], {}), '(it_measure)\n', (2986, 2998), True, 'import numpy as np\n'), ((3719, 3753), 'sklearn.utils.resample', 'resample', (['X'], {'n_samples': 'sample_size'}), '(X, n_samples=sample_size)\n', (3727, 3753), False, 'from sklearn.utils import gen_batches, resample\n'), ((3864, 3901), 'sklearn.utils.resample', 'resample', (['X', 'y'], {'n_samples': 'sample_size'}), '(X, y, n_samples=sample_size)\n', (3872, 3901), False, 'from sklearn.utils import gen_batches, resample\n')] |
"""
Unit testing for the measure module.
"""
import numpy as np
import pytest
import molecool
def test_calculate_distance():
r1 = np.array([0, 0, 0])
r2 = np.array([0.1, 0, 0])
expected_distance = 0.1
calculate_distance = molecool.calculate_distance(r1, r2)
assert expected_distance == calculate_distance
def test_calculate_angle():
r1 = np.array([0, 0, -1])
r2 = np.array([0, 0, 0])
r3 = np.array([1, 0, 0])
expected_angle = 90
calculate_angle = molecool.calculate_angle(r1, r2, r3, degrees=True)
assert expected_angle == calculate_angle
@pytest.mark.parametrize("p1, p2, p3, expected_angle",[
(np.array([np.sqrt(2)/2, np.sqrt(2)/2, 0]), np.array([0, 0, 0]), np.array([1, 0, 0]), 45),
(np.array([0, 0, -1]), np.array([0, 1, 0]), np.array([1, 0, 0]), 60),
(np.array([np.sqrt(3)/2, 1./2., 0]), np.array([0, 0, 0]), np.array([1, 0, 0]), 30)
])
def test_calculate_angle_many(p1, p2, p3, expected_angle):
calculated_angle = molecool.calculate_angle(p1, p2, p3, degrees=True)
assert calculated_angle == pytest.approx(expected_angle)
| [
"molecool.calculate_distance",
"pytest.approx",
"numpy.sqrt",
"molecool.calculate_angle",
"numpy.array"
] | [((137, 156), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (145, 156), True, 'import numpy as np\n'), ((166, 187), 'numpy.array', 'np.array', (['[0.1, 0, 0]'], {}), '([0.1, 0, 0])\n', (174, 187), True, 'import numpy as np\n'), ((243, 278), 'molecool.calculate_distance', 'molecool.calculate_distance', (['r1', 'r2'], {}), '(r1, r2)\n', (270, 278), False, 'import molecool\n'), ((370, 390), 'numpy.array', 'np.array', (['[0, 0, -1]'], {}), '([0, 0, -1])\n', (378, 390), True, 'import numpy as np\n'), ((400, 419), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (408, 419), True, 'import numpy as np\n'), ((429, 448), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (437, 448), True, 'import numpy as np\n'), ((497, 547), 'molecool.calculate_angle', 'molecool.calculate_angle', (['r1', 'r2', 'r3'], {'degrees': '(True)'}), '(r1, r2, r3, degrees=True)\n', (521, 547), False, 'import molecool\n'), ((1006, 1056), 'molecool.calculate_angle', 'molecool.calculate_angle', (['p1', 'p2', 'p3'], {'degrees': '(True)'}), '(p1, p2, p3, degrees=True)\n', (1030, 1056), False, 'import molecool\n'), ((1089, 1118), 'pytest.approx', 'pytest.approx', (['expected_angle'], {}), '(expected_angle)\n', (1102, 1118), False, 'import pytest\n'), ((703, 722), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (711, 722), True, 'import numpy as np\n'), ((724, 743), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (732, 743), True, 'import numpy as np\n'), ((759, 779), 'numpy.array', 'np.array', (['[0, 0, -1]'], {}), '([0, 0, -1])\n', (767, 779), True, 'import numpy as np\n'), ((781, 800), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (789, 800), True, 'import numpy as np\n'), ((802, 821), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (810, 821), True, 'import numpy as np\n'), ((873, 892), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (881, 892), True, 'import numpy as np\n'), ((894, 913), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (902, 913), True, 'import numpy as np\n'), ((670, 680), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (677, 680), True, 'import numpy as np\n'), ((684, 694), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (691, 694), True, 'import numpy as np\n'), ((847, 857), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (854, 857), True, 'import numpy as np\n')] |
import numpy as np
import cudamat as cm
def matrix_factorization_clustering(X_aux, k, l, norm=False, num_iters=100):
cm.cublas_init()
m, n = X_aux.shape
U = cm.CUDAMatrix(np.random.rand(m, k))
S = cm.CUDAMatrix(np.random.rand(k, l))
V = cm.CUDAMatrix(np.random.rand(n, l))
X = cm.CUDAMatrix(X_aux)
# if norm:
# X = Normalizer().fit_transform(X)
XV = cm.CUDAMatrix(np.random.rand(m, l))
XVSt = cm.CUDAMatrix(np.random.rand(m, k))
US = cm.CUDAMatrix(np.random.rand(m, l))
USVt = cm.CUDAMatrix(np.random.rand(m, n))
USVtXt = cm.CUDAMatrix(np.random.rand(m, m))
USVtXtU = cm.CUDAMatrix(np.random.rand(m, k))
U_aux = cm.CUDAMatrix(np.random.rand(m, k))
XtUS = cm.CUDAMatrix(np.random.rand(m, l))
VSt = cm.CUDAMatrix(np.random.rand(n, k))
VStUt = cm.CUDAMatrix(np.random.rand(n, m))
UtX = cm.CUDAMatrix(np.random.rand(k, n))
VStUtXV = cm.CUDAMatrix(np.random.rand(n, l))
V_aux = cm.CUDAMatrix(np.random.rand(n, l))
UtXV = cm.CUDAMatrix(np.random.rand(k, l))
UtUS = cm.CUDAMatrix(np.random.rand(k, l))
UtUSVt = cm.CUDAMatrix(np.random.rand(k, n))
UtUSVtV = cm.CUDAMatrix(np.random.rand(k, l))
S_aux = cm.CUDAMatrix(np.random.rand(k, l))
error_best = np.inf
error = np.inf
for i in range(num_iters):
# compute U
cm.dot(X, V, target=XV)
cm.dot(XV, S.T, target=XVSt)
if i is 0:
cm.dot(U, S, target=US)
cm.dot(US, V.T, target=USVt)
cm.dot(USVt, X.T, target=USVtXt)
cm.dot(USVtXt, U, target=USVtXtU)
cm.divide(XVSt, USVtXtU, U_aux)
cm.mult(U, U_aux, U)
# compute V
cm.dot(U, S, target=US)
cm.dot(X.T, US, target=XtUS)
cm.dot(V, S.T, target=VSt)
cm.dot(VSt, U.T, target=VStUt)
cm.dot(VStUt, XV, target=VStUtXV)
cm.divide(XtUS, VStUtXV, target=V_aux)
cm.mult(V, V_aux, V)
# compute S
cm.dot(U.T, X, target=UtX)
cm.dot(UtX, V, target=UtXV)
cm.dot(U.T, US, target=UtUS)
cm.dot(UtUS, V.T, UtUSVt)
cm.dot(UtUSVt, V, target=UtUSVtV)
cm.divide(UtXV, UtUSVtV, target=S_aux)
cm.mult(S, S_aux, target=S)
error_ant = error
cm.dot(U, S, target=US)
cm.dot(US, V.T, target=USVt)
error = cm.sum(cm.pow(cm.subtract(X, USVt), 2), axis=0)
if error < error_best:
U_best_cm = U
S_best_cm = S
V_best_cm = V
error_best = error
if np.abs(error - error_ant) <= 0.000001:
break
U_best = U_best_cm.asarray()
S_best = S_best_cm.asarray()
V_best = V_best_cm.asarray()
Du = np.diag(np.ones(m).dot(U_best))
Dv = np.diag(np.ones(n).dot(V_best))
U_norm = U_best.dot( np.diag(S_best.dot(Dv).dot(np.ones(l))) )
V_norm = V_best.dot( np.diag(np.ones(k).dot(Du).dot(S_best)) )
rows_ind = np.argmax(U_best, axis=1)
cols_ind = np.argmax(V_best, axis=1)
cm.shutdown()
return U_norm, S_best, V_norm, rows_ind, cols_ind, error_best
| [
"cudamat.cublas_init",
"numpy.abs",
"numpy.random.rand",
"cudamat.divide",
"numpy.ones",
"numpy.argmax",
"cudamat.CUDAMatrix",
"cudamat.dot",
"cudamat.subtract",
"cudamat.mult",
"cudamat.shutdown"
] | [((124, 140), 'cudamat.cublas_init', 'cm.cublas_init', ([], {}), '()\n', (138, 140), True, 'import cudamat as cm\n'), ((306, 326), 'cudamat.CUDAMatrix', 'cm.CUDAMatrix', (['X_aux'], {}), '(X_aux)\n', (319, 326), True, 'import cudamat as cm\n'), ((2952, 2977), 'numpy.argmax', 'np.argmax', (['U_best'], {'axis': '(1)'}), '(U_best, axis=1)\n', (2961, 2977), True, 'import numpy as np\n'), ((2993, 3018), 'numpy.argmax', 'np.argmax', (['V_best'], {'axis': '(1)'}), '(V_best, axis=1)\n', (3002, 3018), True, 'import numpy as np\n'), ((3024, 3037), 'cudamat.shutdown', 'cm.shutdown', ([], {}), '()\n', (3035, 3037), True, 'import cudamat as cm\n'), ((187, 207), 'numpy.random.rand', 'np.random.rand', (['m', 'k'], {}), '(m, k)\n', (201, 207), True, 'import numpy as np\n'), ((231, 251), 'numpy.random.rand', 'np.random.rand', (['k', 'l'], {}), '(k, l)\n', (245, 251), True, 'import numpy as np\n'), ((275, 295), 'numpy.random.rand', 'np.random.rand', (['n', 'l'], {}), '(n, l)\n', (289, 295), True, 'import numpy as np\n'), ((411, 431), 'numpy.random.rand', 'np.random.rand', (['m', 'l'], {}), '(m, l)\n', (425, 431), True, 'import numpy as np\n'), ((458, 478), 'numpy.random.rand', 'np.random.rand', (['m', 'k'], {}), '(m, k)\n', (472, 478), True, 'import numpy as np\n'), ((503, 523), 'numpy.random.rand', 'np.random.rand', (['m', 'l'], {}), '(m, l)\n', (517, 523), True, 'import numpy as np\n'), ((550, 570), 'numpy.random.rand', 'np.random.rand', (['m', 'n'], {}), '(m, n)\n', (564, 570), True, 'import numpy as np\n'), ((599, 619), 'numpy.random.rand', 'np.random.rand', (['m', 'm'], {}), '(m, m)\n', (613, 619), True, 'import numpy as np\n'), ((649, 669), 'numpy.random.rand', 'np.random.rand', (['m', 'k'], {}), '(m, k)\n', (663, 669), True, 'import numpy as np\n'), ((697, 717), 'numpy.random.rand', 'np.random.rand', (['m', 'k'], {}), '(m, k)\n', (711, 717), True, 'import numpy as np\n'), ((745, 765), 'numpy.random.rand', 'np.random.rand', (['m', 'l'], {}), '(m, l)\n', (759, 765), True, 'import numpy as np\n'), ((791, 811), 'numpy.random.rand', 'np.random.rand', (['n', 'k'], {}), '(n, k)\n', (805, 811), True, 'import numpy as np\n'), ((839, 859), 'numpy.random.rand', 'np.random.rand', (['n', 'm'], {}), '(n, m)\n', (853, 859), True, 'import numpy as np\n'), ((885, 905), 'numpy.random.rand', 'np.random.rand', (['k', 'n'], {}), '(k, n)\n', (899, 905), True, 'import numpy as np\n'), ((935, 955), 'numpy.random.rand', 'np.random.rand', (['n', 'l'], {}), '(n, l)\n', (949, 955), True, 'import numpy as np\n'), ((983, 1003), 'numpy.random.rand', 'np.random.rand', (['n', 'l'], {}), '(n, l)\n', (997, 1003), True, 'import numpy as np\n'), ((1031, 1051), 'numpy.random.rand', 'np.random.rand', (['k', 'l'], {}), '(k, l)\n', (1045, 1051), True, 'import numpy as np\n'), ((1078, 1098), 'numpy.random.rand', 'np.random.rand', (['k', 'l'], {}), '(k, l)\n', (1092, 1098), True, 'import numpy as np\n'), ((1127, 1147), 'numpy.random.rand', 'np.random.rand', (['k', 'n'], {}), '(k, n)\n', (1141, 1147), True, 'import numpy as np\n'), ((1177, 1197), 'numpy.random.rand', 'np.random.rand', (['k', 'l'], {}), '(k, l)\n', (1191, 1197), True, 'import numpy as np\n'), ((1225, 1245), 'numpy.random.rand', 'np.random.rand', (['k', 'l'], {}), '(k, l)\n', (1239, 1245), True, 'import numpy as np\n'), ((1351, 1374), 'cudamat.dot', 'cm.dot', (['X', 'V'], {'target': 'XV'}), '(X, V, target=XV)\n', (1357, 1374), True, 'import cudamat as cm\n'), ((1383, 1411), 'cudamat.dot', 'cm.dot', (['XV', 'S.T'], {'target': 'XVSt'}), '(XV, S.T, target=XVSt)\n', (1389, 1411), True, 'import cudamat as cm\n'), ((1517, 1549), 'cudamat.dot', 'cm.dot', (['USVt', 'X.T'], {'target': 'USVtXt'}), '(USVt, X.T, target=USVtXt)\n', (1523, 1549), True, 'import cudamat as cm\n'), ((1558, 1591), 'cudamat.dot', 'cm.dot', (['USVtXt', 'U'], {'target': 'USVtXtU'}), '(USVtXt, U, target=USVtXtU)\n', (1564, 1591), True, 'import cudamat as cm\n'), ((1601, 1632), 'cudamat.divide', 'cm.divide', (['XVSt', 'USVtXtU', 'U_aux'], {}), '(XVSt, USVtXtU, U_aux)\n', (1610, 1632), True, 'import cudamat as cm\n'), ((1641, 1661), 'cudamat.mult', 'cm.mult', (['U', 'U_aux', 'U'], {}), '(U, U_aux, U)\n', (1648, 1661), True, 'import cudamat as cm\n'), ((1691, 1714), 'cudamat.dot', 'cm.dot', (['U', 'S'], {'target': 'US'}), '(U, S, target=US)\n', (1697, 1714), True, 'import cudamat as cm\n'), ((1723, 1751), 'cudamat.dot', 'cm.dot', (['X.T', 'US'], {'target': 'XtUS'}), '(X.T, US, target=XtUS)\n', (1729, 1751), True, 'import cudamat as cm\n'), ((1760, 1786), 'cudamat.dot', 'cm.dot', (['V', 'S.T'], {'target': 'VSt'}), '(V, S.T, target=VSt)\n', (1766, 1786), True, 'import cudamat as cm\n'), ((1795, 1825), 'cudamat.dot', 'cm.dot', (['VSt', 'U.T'], {'target': 'VStUt'}), '(VSt, U.T, target=VStUt)\n', (1801, 1825), True, 'import cudamat as cm\n'), ((1834, 1867), 'cudamat.dot', 'cm.dot', (['VStUt', 'XV'], {'target': 'VStUtXV'}), '(VStUt, XV, target=VStUtXV)\n', (1840, 1867), True, 'import cudamat as cm\n'), ((1877, 1915), 'cudamat.divide', 'cm.divide', (['XtUS', 'VStUtXV'], {'target': 'V_aux'}), '(XtUS, VStUtXV, target=V_aux)\n', (1886, 1915), True, 'import cudamat as cm\n'), ((1924, 1944), 'cudamat.mult', 'cm.mult', (['V', 'V_aux', 'V'], {}), '(V, V_aux, V)\n', (1931, 1944), True, 'import cudamat as cm\n'), ((1974, 2000), 'cudamat.dot', 'cm.dot', (['U.T', 'X'], {'target': 'UtX'}), '(U.T, X, target=UtX)\n', (1980, 2000), True, 'import cudamat as cm\n'), ((2009, 2036), 'cudamat.dot', 'cm.dot', (['UtX', 'V'], {'target': 'UtXV'}), '(UtX, V, target=UtXV)\n', (2015, 2036), True, 'import cudamat as cm\n'), ((2046, 2074), 'cudamat.dot', 'cm.dot', (['U.T', 'US'], {'target': 'UtUS'}), '(U.T, US, target=UtUS)\n', (2052, 2074), True, 'import cudamat as cm\n'), ((2083, 2108), 'cudamat.dot', 'cm.dot', (['UtUS', 'V.T', 'UtUSVt'], {}), '(UtUS, V.T, UtUSVt)\n', (2089, 2108), True, 'import cudamat as cm\n'), ((2117, 2150), 'cudamat.dot', 'cm.dot', (['UtUSVt', 'V'], {'target': 'UtUSVtV'}), '(UtUSVt, V, target=UtUSVtV)\n', (2123, 2150), True, 'import cudamat as cm\n'), ((2160, 2198), 'cudamat.divide', 'cm.divide', (['UtXV', 'UtUSVtV'], {'target': 'S_aux'}), '(UtXV, UtUSVtV, target=S_aux)\n', (2169, 2198), True, 'import cudamat as cm\n'), ((2207, 2234), 'cudamat.mult', 'cm.mult', (['S', 'S_aux'], {'target': 'S'}), '(S, S_aux, target=S)\n', (2214, 2234), True, 'import cudamat as cm\n'), ((2271, 2294), 'cudamat.dot', 'cm.dot', (['U', 'S'], {'target': 'US'}), '(U, S, target=US)\n', (2277, 2294), True, 'import cudamat as cm\n'), ((2303, 2331), 'cudamat.dot', 'cm.dot', (['US', 'V.T'], {'target': 'USVt'}), '(US, V.T, target=USVt)\n', (2309, 2331), True, 'import cudamat as cm\n'), ((1444, 1467), 'cudamat.dot', 'cm.dot', (['U', 'S'], {'target': 'US'}), '(U, S, target=US)\n', (1450, 1467), True, 'import cudamat as cm\n'), ((1480, 1508), 'cudamat.dot', 'cm.dot', (['US', 'V.T'], {'target': 'USVt'}), '(US, V.T, target=USVt)\n', (1486, 1508), True, 'import cudamat as cm\n'), ((2549, 2574), 'numpy.abs', 'np.abs', (['(error - error_ant)'], {}), '(error - error_ant)\n', (2555, 2574), True, 'import numpy as np\n'), ((2362, 2382), 'cudamat.subtract', 'cm.subtract', (['X', 'USVt'], {}), '(X, USVt)\n', (2373, 2382), True, 'import cudamat as cm\n'), ((2736, 2746), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (2743, 2746), True, 'import numpy as np\n'), ((2777, 2787), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2784, 2787), True, 'import numpy as np\n'), ((2854, 2864), 'numpy.ones', 'np.ones', (['l'], {}), '(l)\n', (2861, 2864), True, 'import numpy as np\n'), ((2902, 2912), 'numpy.ones', 'np.ones', (['k'], {}), '(k)\n', (2909, 2912), True, 'import numpy as np\n')] |
from trimesh import TriMesh, Vertex
import pytest
import numpy as np
def test_manifold_creation():
trimesh = TriMesh()
assert np.array_equal(trimesh.get_faces_by_index(), np.array([]))
assert np.array_equal(trimesh.get_vertices(), np.array([]))
def test_trimesh_add_vertices():
trimesh = TriMesh()
vertices = [Vertex(1, 0, 0), Vertex(0, 1, 0), Vertex(0, 0, 1)]
trimesh.add_vertices(vertices)
assert np.array_equal(trimesh.get_vertices(), np.array(vertices))
def test_trimesh_add_faces():
trimesh = TriMesh()
vertices = [Vertex(1, 0, 0), Vertex(0, 1, 0)]
faces = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
trimesh.add_vertices(vertices)
trimesh.add_faces_by_index(faces)
assert np.array_equal(trimesh.get_faces_by_index(), np.array(faces))
def test_trimesh_remove_duplicate_faces():
trimesh = TriMesh()
vertices = [Vertex(1, 0, 0)]
faces = [(0, 0, 0), (0, 0, 0), (0, 0, 0)]
trimesh.add_vertices(vertices)
trimesh.add_faces_by_index(faces)
trimesh.remove_duplicate_faces()
assert np.array_equal(trimesh.get_faces_by_index(), np.array([(0, 0, 0)]))
def test_trimesh_remove_empty_faces():
trimesh = TriMesh()
vertices = [Vertex(1, 0, 0), Vertex(0, 1, 0)]
faces = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
trimesh.add_vertices(vertices)
trimesh.add_faces_by_index(faces)
trimesh.remove_empty_faces()
assert np.array_equal(trimesh.get_faces_by_index(), np.array([]))
#Generated mesh saved to "stl/pyramidtest.stl"
import stl
def test_trimesh_pyramid_test():
trimesh = TriMesh()
vertices = [Vertex(1, 0, 0), Vertex(-1, 0, 0), Vertex(0, 1, 0), Vertex(0, -1, 0), Vertex(0, 0, 1)]
faces = [(0, 2, 3), (1, 2, 3), (0, 2, 4), (1, 2, 4), (0, 3, 4), (1, 3, 4)]
trimesh.add_vertices(vertices)
trimesh.add_faces_by_index(faces)
trimesh.trimesh_to_npmesh().save('stl/pyramidtest.stl', mode=stl.Mode.BINARY)
assert np.array_equal(trimesh.get_faces_by_index(), np.array(faces))
assert trimesh.euler_characteristic() == 3
#Generated mesh saved to "stl/cubetest.stl"
def test_trimesh_cube_test():
trimesh = TriMesh()
vertices = [Vertex(-1, 0, 0), Vertex(1, 0, 0), Vertex(0, 1, 0), Vertex(0, -1, 0), Vertex(1, 0, 1), Vertex(-1, 0, 1), Vertex(0, 1, 1), Vertex(0, -1, 1)]
# Horizontial
trimesh.add_quad(vertices[0:4])
trimesh.add_quad(vertices[4:])
# Vertical
trimesh.add_quad((vertices[0], vertices[2], vertices[5], vertices[6]))
trimesh.add_quad((vertices[0], vertices[3], vertices[5], vertices[7]))
trimesh.add_quad((vertices[1], vertices[3], vertices[4], vertices[7]))
trimesh.add_quad((vertices[1], vertices[2], vertices[4], vertices[6]))
trimesh.trimesh_to_npmesh().save('stl/cubetest.stl', mode=stl.Mode.BINARY)
assert trimesh.euler_characteristic() == 2
def test_trimesh_merge():
# Create squared pyramid
pyramid_trimesh = TriMesh()
vertices = [Vertex(1, 0, 1), Vertex(-1, 0, 1), Vertex(0, 1, 1), Vertex(0, -1, 1), Vertex(0, 0, 2)]
faces = [(0, 2, 4), (1, 2, 4), (0, 3, 4), (1, 3, 4)]
pyramid_trimesh.add_vertices(vertices)
pyramid_trimesh.add_faces_by_index(faces)
# Create cube
cube_trimesh = TriMesh()
vertices = [Vertex(-1, 0, 0), Vertex(1, 0, 0), Vertex(0, 1, 0), Vertex(0, -1, 0), Vertex(1, 0, 1), Vertex(-1, 0, 1), Vertex(0, 1, 1), Vertex(0, -1, 1)]
cube_trimesh.add_quad(vertices[0:4])
cube_trimesh.add_quad((vertices[0], vertices[2], vertices[5], vertices[6]))
cube_trimesh.add_quad((vertices[0], vertices[3], vertices[5], vertices[7]))
cube_trimesh.add_quad((vertices[1], vertices[3], vertices[4], vertices[7]))
cube_trimesh.add_quad((vertices[1], vertices[2], vertices[4], vertices[6]))
trimesh = pyramid_trimesh.merge(cube_trimesh)
trimesh.trimesh_to_npmesh().save('stl/house_test.stl', mode=stl.Mode.BINARY)
assert trimesh.euler_characteristic() == 5
| [
"trimesh.TriMesh",
"numpy.array",
"trimesh.Vertex"
] | [((114, 123), 'trimesh.TriMesh', 'TriMesh', ([], {}), '()\n', (121, 123), False, 'from trimesh import TriMesh, Vertex\n'), ((306, 315), 'trimesh.TriMesh', 'TriMesh', ([], {}), '()\n', (313, 315), False, 'from trimesh import TriMesh, Vertex\n'), ((533, 542), 'trimesh.TriMesh', 'TriMesh', ([], {}), '()\n', (540, 542), False, 'from trimesh import TriMesh, Vertex\n'), ((843, 852), 'trimesh.TriMesh', 'TriMesh', ([], {}), '()\n', (850, 852), False, 'from trimesh import TriMesh, Vertex\n'), ((1175, 1184), 'trimesh.TriMesh', 'TriMesh', ([], {}), '()\n', (1182, 1184), False, 'from trimesh import TriMesh, Vertex\n'), ((1564, 1573), 'trimesh.TriMesh', 'TriMesh', ([], {}), '()\n', (1571, 1573), False, 'from trimesh import TriMesh, Vertex\n'), ((2120, 2129), 'trimesh.TriMesh', 'TriMesh', ([], {}), '()\n', (2127, 2129), False, 'from trimesh import TriMesh, Vertex\n'), ((2898, 2907), 'trimesh.TriMesh', 'TriMesh', ([], {}), '()\n', (2905, 2907), False, 'from trimesh import TriMesh, Vertex\n'), ((3195, 3204), 'trimesh.TriMesh', 'TriMesh', ([], {}), '()\n', (3202, 3204), False, 'from trimesh import TriMesh, Vertex\n'), ((180, 192), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (188, 192), True, 'import numpy as np\n'), ((244, 256), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (252, 256), True, 'import numpy as np\n'), ((332, 347), 'trimesh.Vertex', 'Vertex', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (338, 347), False, 'from trimesh import TriMesh, Vertex\n'), ((349, 364), 'trimesh.Vertex', 'Vertex', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (355, 364), False, 'from trimesh import TriMesh, Vertex\n'), ((366, 381), 'trimesh.Vertex', 'Vertex', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (372, 381), False, 'from trimesh import TriMesh, Vertex\n'), ((468, 486), 'numpy.array', 'np.array', (['vertices'], {}), '(vertices)\n', (476, 486), True, 'import numpy as np\n'), ((559, 574), 'trimesh.Vertex', 'Vertex', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (565, 574), False, 'from trimesh import TriMesh, Vertex\n'), ((576, 591), 'trimesh.Vertex', 'Vertex', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (582, 591), False, 'from trimesh import TriMesh, Vertex\n'), ((768, 783), 'numpy.array', 'np.array', (['faces'], {}), '(faces)\n', (776, 783), True, 'import numpy as np\n'), ((869, 884), 'trimesh.Vertex', 'Vertex', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (875, 884), False, 'from trimesh import TriMesh, Vertex\n'), ((1098, 1119), 'numpy.array', 'np.array', (['[(0, 0, 0)]'], {}), '([(0, 0, 0)])\n', (1106, 1119), True, 'import numpy as np\n'), ((1201, 1216), 'trimesh.Vertex', 'Vertex', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (1207, 1216), False, 'from trimesh import TriMesh, Vertex\n'), ((1218, 1233), 'trimesh.Vertex', 'Vertex', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (1224, 1233), False, 'from trimesh import TriMesh, Vertex\n'), ((1443, 1455), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1451, 1455), True, 'import numpy as np\n'), ((1590, 1605), 'trimesh.Vertex', 'Vertex', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (1596, 1605), False, 'from trimesh import TriMesh, Vertex\n'), ((1607, 1623), 'trimesh.Vertex', 'Vertex', (['(-1)', '(0)', '(0)'], {}), '(-1, 0, 0)\n', (1613, 1623), False, 'from trimesh import TriMesh, Vertex\n'), ((1625, 1640), 'trimesh.Vertex', 'Vertex', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (1631, 1640), False, 'from trimesh import TriMesh, Vertex\n'), ((1642, 1658), 'trimesh.Vertex', 'Vertex', (['(0)', '(-1)', '(0)'], {}), '(0, -1, 0)\n', (1648, 1658), False, 'from trimesh import TriMesh, Vertex\n'), ((1660, 1675), 'trimesh.Vertex', 'Vertex', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (1666, 1675), False, 'from trimesh import TriMesh, Vertex\n'), ((1967, 1982), 'numpy.array', 'np.array', (['faces'], {}), '(faces)\n', (1975, 1982), True, 'import numpy as np\n'), ((2146, 2162), 'trimesh.Vertex', 'Vertex', (['(-1)', '(0)', '(0)'], {}), '(-1, 0, 0)\n', (2152, 2162), False, 'from trimesh import TriMesh, Vertex\n'), ((2164, 2179), 'trimesh.Vertex', 'Vertex', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (2170, 2179), False, 'from trimesh import TriMesh, Vertex\n'), ((2181, 2196), 'trimesh.Vertex', 'Vertex', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (2187, 2196), False, 'from trimesh import TriMesh, Vertex\n'), ((2198, 2214), 'trimesh.Vertex', 'Vertex', (['(0)', '(-1)', '(0)'], {}), '(0, -1, 0)\n', (2204, 2214), False, 'from trimesh import TriMesh, Vertex\n'), ((2216, 2231), 'trimesh.Vertex', 'Vertex', (['(1)', '(0)', '(1)'], {}), '(1, 0, 1)\n', (2222, 2231), False, 'from trimesh import TriMesh, Vertex\n'), ((2233, 2249), 'trimesh.Vertex', 'Vertex', (['(-1)', '(0)', '(1)'], {}), '(-1, 0, 1)\n', (2239, 2249), False, 'from trimesh import TriMesh, Vertex\n'), ((2251, 2266), 'trimesh.Vertex', 'Vertex', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2257, 2266), False, 'from trimesh import TriMesh, Vertex\n'), ((2268, 2284), 'trimesh.Vertex', 'Vertex', (['(0)', '(-1)', '(1)'], {}), '(0, -1, 1)\n', (2274, 2284), False, 'from trimesh import TriMesh, Vertex\n'), ((2924, 2939), 'trimesh.Vertex', 'Vertex', (['(1)', '(0)', '(1)'], {}), '(1, 0, 1)\n', (2930, 2939), False, 'from trimesh import TriMesh, Vertex\n'), ((2941, 2957), 'trimesh.Vertex', 'Vertex', (['(-1)', '(0)', '(1)'], {}), '(-1, 0, 1)\n', (2947, 2957), False, 'from trimesh import TriMesh, Vertex\n'), ((2959, 2974), 'trimesh.Vertex', 'Vertex', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (2965, 2974), False, 'from trimesh import TriMesh, Vertex\n'), ((2976, 2992), 'trimesh.Vertex', 'Vertex', (['(0)', '(-1)', '(1)'], {}), '(0, -1, 1)\n', (2982, 2992), False, 'from trimesh import TriMesh, Vertex\n'), ((2994, 3009), 'trimesh.Vertex', 'Vertex', (['(0)', '(0)', '(2)'], {}), '(0, 0, 2)\n', (3000, 3009), False, 'from trimesh import TriMesh, Vertex\n'), ((3221, 3237), 'trimesh.Vertex', 'Vertex', (['(-1)', '(0)', '(0)'], {}), '(-1, 0, 0)\n', (3227, 3237), False, 'from trimesh import TriMesh, Vertex\n'), ((3239, 3254), 'trimesh.Vertex', 'Vertex', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (3245, 3254), False, 'from trimesh import TriMesh, Vertex\n'), ((3256, 3271), 'trimesh.Vertex', 'Vertex', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (3262, 3271), False, 'from trimesh import TriMesh, Vertex\n'), ((3273, 3289), 'trimesh.Vertex', 'Vertex', (['(0)', '(-1)', '(0)'], {}), '(0, -1, 0)\n', (3279, 3289), False, 'from trimesh import TriMesh, Vertex\n'), ((3291, 3306), 'trimesh.Vertex', 'Vertex', (['(1)', '(0)', '(1)'], {}), '(1, 0, 1)\n', (3297, 3306), False, 'from trimesh import TriMesh, Vertex\n'), ((3308, 3324), 'trimesh.Vertex', 'Vertex', (['(-1)', '(0)', '(1)'], {}), '(-1, 0, 1)\n', (3314, 3324), False, 'from trimesh import TriMesh, Vertex\n'), ((3326, 3341), 'trimesh.Vertex', 'Vertex', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (3332, 3341), False, 'from trimesh import TriMesh, Vertex\n'), ((3343, 3359), 'trimesh.Vertex', 'Vertex', (['(0)', '(-1)', '(1)'], {}), '(0, -1, 1)\n', (3349, 3359), False, 'from trimesh import TriMesh, Vertex\n')] |
import os
import sys
import keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.callbacks import CSVLogger, History
from keras.layers import BatchNormalization, Dense, Dropout, Input
from keras.models import Model
# from .IntegratedGradient import integrated_gradients
"""
Created by <NAME> on 6/15/18.
Email : <EMAIL> or <EMAIL>
Website: http://ce.sharif.edu/~naghipourfar
Github: https://github.com/naghipourfar
Skype: mn7697np
"""
def loadGradients(path="../Results/IntegratedGradient/integrated_gradients.csv"):
return pd.read_csv(path, header=None)
def save_summaries_for_each_feature(feature_importance, path="./IntegratedGradient/Summaries/"):
for i in range(feature_importance.shape[1]):
description = feature_importance[i].describe()
description.to_csv(path + "feature_{0}.txt".format(i))
def analyze_top_100_features_for_each_sample(feature_importance):
top_importances = []
for i in range(feature_importance.shape[0]):
importances = feature_importance.iloc[i, :]
importances = list(reversed(sorted(abs(importances))))
top_100_importances = importances[:100]
top_importances.append(top_100_importances)
np.savetxt(fname="./top100_deeplift.csv",
X=np.array(top_importances), delimiter=',')
def plot_heatmaps(feature_importances, path="./IntegratedGradient/Heatmaps/"):
plt.rcParams["figure.figsize"] = 5, 2
for i in range(feature_importances.shape[0]):
y = feature_importances.iloc[i, :]
fig, ax = plt.subplots(nrows=1, sharex='all')
# extent = [x[0] - (x[1] - x[0]) / 2., x[-1] + (x[1] - x[0]) / 2., 0, 1]
heatmap = ax.imshow(y[np.newaxis, :], cmap="plasma", aspect="auto")
ax.set_yticks([])
# ax.set_xlim(extent[0], extent[1])
plt.tight_layout()
plt.colorbar(heatmap)
plt.savefig(path + "sample_{0}.png".format(i))
plt.close()
def plot_distributions(feature_importance, path="../Results/IntegratedGradient/DistPlots/"):
import seaborn as sns
for i in range(feature_importance.shape[1]):
plt.figure()
sns.distplot(feature_importance[i])
plt.xlabel("Feature Importance")
plt.ylabel("Density")
plt.title("Feature_{0} Distribution of Importance".format(i))
plt.savefig(path + "feature_{0}.png".format(i))
plt.close()
def plot_distribution(feature_importance, path="../Results/IntegratedGradient/"):
file_name = "distribution.png"
feature_importance = feature_importance.as_matrix() # Convert to numpy ndarray
new_shape = (feature_importance.shape[0] * feature_importance.shape[1],)
feature_importance = np.reshape(feature_importance, newshape=new_shape)
import seaborn as sns
sns.distplot(feature_importance)
plt.xlabel("Feature Importance")
plt.ylabel("Density")
plt.title("Distribution of all feature importances")
plt.savefig(path + file_name)
plt.close()
def box_plot(feature_importance, path="../Results/IntegratedGradient/"):
pass
def calculate_statistical_criteria(feature_importance=None, criteria="absolute_error",
path="../Results/IntegratedGradient/"):
file_name = "intgrad_" + criteria + ".csv"
feature_importance = feature_importance.as_matrix() # Convert to np.ndarray
statistical_criteria = np.zeros(shape=(feature_importance.shape[1], 1))
if criteria == "absolute_error":
num_features = feature_importance.shape[1]
statistical_criteria = np.array([[np.max(feature_importance[:, i]) - np.min(
feature_importance[:, i])] for i in range(num_features)])
elif criteria == "relative_error":
statistical_criteria = np.array([[(np.max(feature_importance[:, i]) - np.min(
feature_importance[:, i])) / (np.max(feature_importance[:, i]))] for i in
range(feature_importance.shape[1])])
np.savetxt(fname=path + file_name,
X=statistical_criteria, delimiter=",")
def plot_statistical_criteria(criteria="absolute_error", data_path="../Results/IntegratedGradient/",
save_path="../Results/IntegratedGradient/"):
data_path = data_path + "intgrad_" + criteria + ".csv"
save_path = save_path + "intgrad_" + criteria + ".png"
statistical_criteria = pd.read_csv(data_path, header=None).as_matrix()
import seaborn as sns
sns.distplot(statistical_criteria)
if criteria == "absolute_error":
plt.xlabel("Absolute Error")
plt.title("Distribution of Absolute Error")
elif criteria == "relative_error":
plt.xlabel("Relative Error")
plt.title("Distribution of Relative Error")
plt.ylabel("Density")
plt.savefig(save_path)
plt.close()
def make_summary_data(feature_importance, path="../Results/IntegratedGradient/"):
file_name = "summaries.csv"
feature_importance = feature_importance
num_features = feature_importance.shape[1]
all_describtions = np.zeros(
shape=(num_features, 4)) # mean - std - min - max
for i in range(num_features):
describtion = feature_importance[i].describe()
describtion = describtion.iloc[[1, 2, 3, 7]].as_matrix()
all_describtions[i] = describtion.T
print(all_describtions.shape)
np.savetxt(fname=path + file_name, X=all_describtions, delimiter=',')
def compute_integrated_gradient(machine="damavand", save_path="../Results/IntegratedGradient/", verbose=1):
file_name = "integrated_gradient.csv"
if machine == "damavand":
mrna_address = "~/f/Behrooz/dataset_local/fpkm_normalized.csv"
else:
mrna_address = "../Data/fpkm_normalized.csv"
m_rna = pd.read_csv(mrna_address, header=None)
model = keras.models.load_model("../Results/classifier.h5")
ig = integrated_gradients(model, verbose=verbose)
num_samples = m_rna.shape[0]
num_features = m_rna.shape[1]
feature_importances = np.zeros(shape=(num_samples, num_features))
for i in range(num_samples):
feature_importances[i] = ig.explain(m_rna.as_matrix()[i, :])
if verbose == 1:
sys.stdout.flush()
sys.stdout.write('\r')
sys.stdout.write("Progress: " + str((i / 10787) * 100) + " %")
sys.stdout.flush()
if verbose == 1:
sys.stdout.flush()
sys.stdout.write('\r')
sys.stdout.write("Progress: " + str((10787 / 10787) * 100) + " %")
sys.stdout.flush()
np.savetxt(fname="../Results/IntegratedGradient/integrated_gradients.csv",
X=np.array(feature_importances), delimiter=',')
return pd.DataFrame(feature_importances)
machine = "local"
if __name__ == '__main__':
general_path = "../Results/IntegratedGradient/"
data_path = general_path + "integrated_gradients.csv"
summary_path = general_path + "summary.csv"
distplot_path = general_path + "distribution.png"
if os.path.exists(data_path):
feature_importance = loadGradients(path=data_path)
print("Data has been loaded!")
else:
feature_importance = compute_integrated_gradient(
machine=machine, save_path=data_path, verbose=1)
print("Data has been computed and saved!")
# plot_distribution(feature_importance, path=distplot_path)
# print("General Distribution has been drawn!")
calculate_statistical_criteria(
feature_importance, criteria="absolute_error")
print("Statistical Criteria AE Calculation has been finished!")
plot_statistical_criteria(criteria="absolute_error")
print("Statistical Criteria AE Distribution plot has been drawn!")
# calculate_statistical_criteria(None, criteria="relative_error")
# print("Statistical Criteria RE Calculation has been finished!")
# plot_statistical_criteria(criteria="relative_error")
# print("Statistical Criteria RE Distribution plot has been drawn!")
# make_summary_data(feature_importance)
# print("Summary of all features has been made!")
print("Finished!")
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.array",
"os.path.exists",
"numpy.reshape",
"seaborn.distplot",
"matplotlib.pyplot.xlabel",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.min",
"pandas.DataFrame",
"sys.stdout.flush",
"matplotlib.pyplot.savefig",
"numpy.savetxt",
"mat... | [((612, 642), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': 'None'}), '(path, header=None)\n', (623, 642), True, 'import pandas as pd\n'), ((2758, 2808), 'numpy.reshape', 'np.reshape', (['feature_importance'], {'newshape': 'new_shape'}), '(feature_importance, newshape=new_shape)\n', (2768, 2808), True, 'import numpy as np\n'), ((2840, 2872), 'seaborn.distplot', 'sns.distplot', (['feature_importance'], {}), '(feature_importance)\n', (2852, 2872), True, 'import seaborn as sns\n'), ((2877, 2909), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Feature Importance"""'], {}), "('Feature Importance')\n", (2887, 2909), True, 'import matplotlib.pyplot as plt\n'), ((2914, 2935), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (2924, 2935), True, 'import matplotlib.pyplot as plt\n'), ((2940, 2992), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of all feature importances"""'], {}), "('Distribution of all feature importances')\n", (2949, 2992), True, 'import matplotlib.pyplot as plt\n'), ((2997, 3026), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(path + file_name)'], {}), '(path + file_name)\n', (3008, 3026), True, 'import matplotlib.pyplot as plt\n'), ((3031, 3042), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3040, 3042), True, 'import matplotlib.pyplot as plt\n'), ((3446, 3494), 'numpy.zeros', 'np.zeros', ([], {'shape': '(feature_importance.shape[1], 1)'}), '(shape=(feature_importance.shape[1], 1))\n', (3454, 3494), True, 'import numpy as np\n'), ((4031, 4104), 'numpy.savetxt', 'np.savetxt', ([], {'fname': '(path + file_name)', 'X': 'statistical_criteria', 'delimiter': '""","""'}), "(fname=path + file_name, X=statistical_criteria, delimiter=',')\n", (4041, 4104), True, 'import numpy as np\n'), ((4522, 4556), 'seaborn.distplot', 'sns.distplot', (['statistical_criteria'], {}), '(statistical_criteria)\n', (4534, 4556), True, 'import seaborn as sns\n'), ((4815, 4836), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (4825, 4836), True, 'import matplotlib.pyplot as plt\n'), ((4841, 4863), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (4852, 4863), True, 'import matplotlib.pyplot as plt\n'), ((4868, 4879), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4877, 4879), True, 'import matplotlib.pyplot as plt\n'), ((5110, 5143), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_features, 4)'}), '(shape=(num_features, 4))\n', (5118, 5143), True, 'import numpy as np\n'), ((5415, 5484), 'numpy.savetxt', 'np.savetxt', ([], {'fname': '(path + file_name)', 'X': 'all_describtions', 'delimiter': '""","""'}), "(fname=path + file_name, X=all_describtions, delimiter=',')\n", (5425, 5484), True, 'import numpy as np\n'), ((5815, 5853), 'pandas.read_csv', 'pd.read_csv', (['mrna_address'], {'header': 'None'}), '(mrna_address, header=None)\n', (5826, 5853), True, 'import pandas as pd\n'), ((5866, 5917), 'keras.models.load_model', 'keras.models.load_model', (['"""../Results/classifier.h5"""'], {}), "('../Results/classifier.h5')\n", (5889, 5917), False, 'import keras\n'), ((6067, 6110), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_samples, num_features)'}), '(shape=(num_samples, num_features))\n', (6075, 6110), True, 'import numpy as np\n'), ((6746, 6779), 'pandas.DataFrame', 'pd.DataFrame', (['feature_importances'], {}), '(feature_importances)\n', (6758, 6779), True, 'import pandas as pd\n'), ((7049, 7074), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (7063, 7074), False, 'import os\n'), ((1606, 1641), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'sharex': '"""all"""'}), "(nrows=1, sharex='all')\n", (1618, 1641), True, 'import matplotlib.pyplot as plt\n'), ((1877, 1895), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1893, 1895), True, 'import matplotlib.pyplot as plt\n'), ((1904, 1925), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['heatmap'], {}), '(heatmap)\n', (1916, 1925), True, 'import matplotlib.pyplot as plt\n'), ((1989, 2000), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1998, 2000), True, 'import matplotlib.pyplot as plt\n'), ((2179, 2191), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2189, 2191), True, 'import matplotlib.pyplot as plt\n'), ((2200, 2235), 'seaborn.distplot', 'sns.distplot', (['feature_importance[i]'], {}), '(feature_importance[i])\n', (2212, 2235), True, 'import seaborn as sns\n'), ((2244, 2276), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Feature Importance"""'], {}), "('Feature Importance')\n", (2254, 2276), True, 'import matplotlib.pyplot as plt\n'), ((2285, 2306), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (2295, 2306), True, 'import matplotlib.pyplot as plt\n'), ((2441, 2452), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2450, 2452), True, 'import matplotlib.pyplot as plt\n'), ((4602, 4630), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Absolute Error"""'], {}), "('Absolute Error')\n", (4612, 4630), True, 'import matplotlib.pyplot as plt\n'), ((4639, 4682), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of Absolute Error"""'], {}), "('Distribution of Absolute Error')\n", (4648, 4682), True, 'import matplotlib.pyplot as plt\n'), ((6439, 6457), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6455, 6457), False, 'import sys\n'), ((6466, 6488), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (6482, 6488), False, 'import sys\n'), ((6572, 6590), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6588, 6590), False, 'import sys\n'), ((1329, 1354), 'numpy.array', 'np.array', (['top_importances'], {}), '(top_importances)\n', (1337, 1354), True, 'import numpy as np\n'), ((4443, 4478), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'header': 'None'}), '(data_path, header=None)\n', (4454, 4478), True, 'import pandas as pd\n'), ((4730, 4758), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Relative Error"""'], {}), "('Relative Error')\n", (4740, 4758), True, 'import matplotlib.pyplot as plt\n'), ((4767, 4810), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of Relative Error"""'], {}), "('Distribution of Relative Error')\n", (4776, 4810), True, 'import matplotlib.pyplot as plt\n'), ((6250, 6268), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6266, 6268), False, 'import sys\n'), ((6281, 6303), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (6297, 6303), False, 'import sys\n'), ((6391, 6409), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6407, 6409), False, 'import sys\n'), ((6688, 6717), 'numpy.array', 'np.array', (['feature_importances'], {}), '(feature_importances)\n', (6696, 6717), True, 'import numpy as np\n'), ((3625, 3657), 'numpy.max', 'np.max', (['feature_importance[:, i]'], {}), '(feature_importance[:, i])\n', (3631, 3657), True, 'import numpy as np\n'), ((3660, 3692), 'numpy.min', 'np.min', (['feature_importance[:, i]'], {}), '(feature_importance[:, i])\n', (3666, 3692), True, 'import numpy as np\n'), ((3905, 3937), 'numpy.max', 'np.max', (['feature_importance[:, i]'], {}), '(feature_importance[:, i])\n', (3911, 3937), True, 'import numpy as np\n'), ((3820, 3852), 'numpy.max', 'np.max', (['feature_importance[:, i]'], {}), '(feature_importance[:, i])\n', (3826, 3852), True, 'import numpy as np\n'), ((3855, 3887), 'numpy.min', 'np.min', (['feature_importance[:, i]'], {}), '(feature_importance[:, i])\n', (3861, 3887), True, 'import numpy as np\n')] |
"""Make BIDS compatible directory structures and infer meta data from MNE."""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os
import errno
import shutil as sh
import pandas as pd
from collections import defaultdict, OrderedDict
import numpy as np
from mne import Epochs
from mne.io.constants import FIFF
from mne.io.pick import channel_type
from mne.io import BaseRaw
from mne.channels.channels import _unit2human
from mne.externals.six import string_types
from mne.utils import check_version
from datetime import datetime
from warnings import warn
from .pick import coil_type
from .utils import (make_bids_filename, make_bids_folders,
make_dataset_description, _write_json, _write_tsv,
_read_events, _mkdir_p, age_on_date,
copyfile_brainvision, copyfile_eeglab,
_infer_eeg_placement_scheme)
from .io import (_parse_ext, _read_raw, ALLOWED_EXTENSIONS)
ALLOWED_KINDS = ['meg', 'eeg', 'ieeg']
# Orientation of the coordinate system dependent on manufacturer
ORIENTATION = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.pdf': 'ALS',
'.ds': 'ALS'}
UNITS = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.pdf': 'm', '.ds': 'cm'}
meg_manufacturers = {'.sqd': 'KIT/Yokogawa', '.con': 'KIT/Yokogawa',
'.fif': 'Elekta', '.pdf': '4D Magnes', '.ds': 'CTF',
'.meg4': 'CTF'}
eeg_manufacturers = {'.vhdr': 'BrainProducts', '.eeg': 'BrainProducts',
'.edf': 'Mixed', '.bdf': 'Biosemi', '.set': 'Mixed',
'.fdt': 'Mixed', '.cnt': 'Neuroscan'}
# Merge the manufacturer dictionaries in a python2 / python3 compatible way
MANUFACTURERS = dict()
MANUFACTURERS.update(meg_manufacturers)
MANUFACTURERS.update(eeg_manufacturers)
# List of synthetic channels by manufacturer that are to be excluded from the
# channel list. Currently this is only for stimulus channels.
IGNORED_CHANNELS = {'KIT/Yokogawa': ['STI 014'],
'BrainProducts': ['STI 014'],
'Mixed': ['STI 014'],
'Biosemi': ['STI 014'],
'Neuroscan': ['STI 014']}
def _channels_tsv(raw, fname, overwrite=False, verbose=True):
"""Create a channels.tsv file and save it.
Parameters
----------
raw : instance of Raw
The data as MNE-Python Raw object.
fname : str
Filename to save the channels.tsv to.
overwrite : bool
Whether to overwrite the existing file.
Defaults to False.
verbose : bool
Set verbose output to true or false.
"""
map_chs = defaultdict(lambda: 'OTHER')
map_chs.update(meggradaxial='MEGGRADAXIAL',
megrefgradaxial='MEGREFGRADAXIAL',
meggradplanar='MEGGRADPLANAR',
megmag='MEGMAG', megrefmag='MEGREFMAG',
eeg='EEG', misc='MISC', stim='TRIG', emg='EMG',
ecog='ECOG', seeg='SEEG', eog='EOG', ecg='ECG')
map_desc = defaultdict(lambda: 'Other type of channel')
map_desc.update(meggradaxial='Axial Gradiometer',
megrefgradaxial='Axial Gradiometer Reference',
meggradplanar='Planar Gradiometer',
megmag='Magnetometer',
megrefmag='Magnetometer Reference',
stim='Trigger', eeg='ElectroEncephaloGram',
ecog='Electrocorticography',
seeg='StereoEEG',
ecg='ElectroCardioGram',
eog='ElectroOculoGram',
emg='ElectroMyoGram',
misc='Miscellaneous')
get_specific = ('mag', 'ref_meg', 'grad')
# get the manufacturer from the file in the Raw object
manufacturer = None
if hasattr(raw, 'filenames'):
# XXX: Hack for EEGLAB bug in MNE-Python 0.16; fixed in MNE-Python
# 0.17, ... remove the hack after upgrading dependencies in MNE-BIDS
if raw.filenames[0] is None: # hack
ext = '.set' # hack
else:
_, ext = _parse_ext(raw.filenames[0], verbose=verbose)
manufacturer = MANUFACTURERS[ext]
ignored_indexes = [raw.ch_names.index(ch_name) for ch_name in raw.ch_names
if ch_name in
IGNORED_CHANNELS.get(manufacturer, list())]
status, ch_type, description = list(), list(), list()
for idx, ch in enumerate(raw.info['ch_names']):
status.append('bad' if ch in raw.info['bads'] else 'good')
_channel_type = channel_type(raw.info, idx)
if _channel_type in get_specific:
_channel_type = coil_type(raw.info, idx)
ch_type.append(map_chs[_channel_type])
description.append(map_desc[_channel_type])
low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass'])
units = [_unit2human.get(ch_i['unit'], 'n/a') for ch_i in raw.info['chs']]
units = [u if u not in ['NA'] else 'n/a' for u in units]
n_channels = raw.info['nchan']
sfreq = raw.info['sfreq']
df = pd.DataFrame(OrderedDict([
('name', raw.info['ch_names']),
('type', ch_type),
('units', units),
('description', description),
('sampling_frequency', np.full((n_channels), sfreq)),
('low_cutoff', np.full((n_channels), low_cutoff)),
('high_cutoff', np.full((n_channels), high_cutoff)),
('status', status)]))
df.drop(ignored_indexes, inplace=True)
_write_tsv(fname, df, overwrite, verbose)
return fname
def _events_tsv(events, raw, fname, trial_type, overwrite=False,
verbose=True):
"""Create an events.tsv file and save it.
This function will write the mandatory 'onset', and 'duration' columns as
well as the optional 'event_value' and 'event_sample'. The 'event_value'
corresponds to the marker value as found in the TRIG channel of the
recording. In addition, the 'trial_type' field can be written.
Parameters
----------
events : array, shape = (n_events, 3)
The first column contains the event time in samples and the third
column contains the event id. The second column is ignored for now but
typically contains the value of the trigger channel either immediately
before the event or immediately after.
raw : instance of Raw
The data as MNE-Python Raw object.
fname : str
Filename to save the events.tsv to.
trial_type : dict | None
Dictionary mapping a brief description key to an event id (value). For
example {'Go': 1, 'No Go': 2}.
overwrite : bool
Whether to overwrite the existing file.
Defaults to False.
verbose : bool
Set verbose output to true or false.
Notes
-----
The function writes durations of zero for each event.
"""
# Start by filling all data that we know into a df
first_samp = raw.first_samp
sfreq = raw.info['sfreq']
events[:, 0] -= first_samp
data = OrderedDict([('onset', events[:, 0]),
('duration', np.zeros(events.shape[0])),
('trial_type', events[:, 2]),
('event_value', events[:, 2]),
('event_sample', events[:, 0])])
df = pd.DataFrame.from_dict(data)
# Now check if trial_type is specified or should be removed
if trial_type:
trial_type_map = {v: k for k, v in trial_type.items()}
df.trial_type = df.trial_type.map(trial_type_map)
else:
df.drop(labels=['trial_type'], axis=1, inplace=True)
# Onset column needs to be specified in seconds
df.onset /= sfreq
_write_tsv(fname, df, overwrite, verbose)
return fname
def _participants_tsv(raw, subject_id, group, fname, overwrite=False,
verbose=True):
"""Create a participants.tsv file and save it.
This will append any new participant data to the current list if it
exists. Otherwise a new file will be created with the provided information.
Parameters
----------
raw : instance of Raw
The data as MNE-Python Raw object.
subject_id : str
The subject name in BIDS compatible format ('01', '02', etc.)
group : str
Name of group participant belongs to.
fname : str
Filename to save the participants.tsv to.
overwrite : bool
Whether to overwrite the existing file.
Defaults to False.
If there is already data for the given `subject_id` and overwrite is
False, an error will be raised.
verbose : bool
Set verbose output to true or false.
"""
subject_id = 'sub-' + subject_id
data = {'participant_id': [subject_id]}
subject_info = raw.info['subject_info']
if subject_info is not None:
genders = {0: 'U', 1: 'M', 2: 'F'}
sex = genders[subject_info.get('sex', 0)]
# determine the age of the participant
age = subject_info.get('birthday', None)
meas_date = raw.info.get('meas_date', None)
if isinstance(meas_date, (tuple, list, np.ndarray)):
meas_date = meas_date[0]
if meas_date is not None and age is not None:
bday = datetime(age[0], age[1], age[2])
meas_datetime = datetime.fromtimestamp(meas_date)
subject_age = age_on_date(bday, meas_datetime)
else:
subject_age = "n/a"
data.update({'age': [subject_age], 'sex': [sex], 'group': [group]})
df = pd.DataFrame(data=data,
columns=['participant_id', 'age', 'sex', 'group'])
if os.path.exists(fname):
orig_df = pd.read_csv(fname, sep='\t')
# whether the data exists identically in the current DataFrame
exact_included = df.values.tolist()[0] in orig_df.values.tolist()
# whether the subject id is in the existing DataFrame
sid_included = subject_id in orig_df['participant_id'].values
# if the subject data provided is different to the currently existing
# data and overwrite is not True raise an error
if (sid_included and not exact_included) and not overwrite:
raise OSError(errno.EEXIST, '"%s" already exists in the '
'participant list. Please set overwrite to '
'True.' % subject_id)
# otherwise add the new data
df = orig_df.append(df)
# and drop any duplicates as we want overwrite = True to force the old
# data to be overwritten
df.drop_duplicates(subset='participant_id', keep='last',
inplace=True)
df = df.sort_values(by='participant_id')
# overwrite is forced to True as all issues with overwrite == False have
# been handled by this point
_write_tsv(fname, df, True, verbose)
return fname
def _scans_tsv(raw, raw_fname, fname, overwrite=False, verbose=True):
"""Create a scans.tsv file and save it.
Parameters
----------
raw : instance of Raw
The data as MNE-Python Raw object.
raw_fname : str
Relative path to the raw data file.
fname : str
Filename to save the scans.tsv to.
overwrite : bool
Defaults to False.
Whether to overwrite the existing data in the file.
If there is already data for the given `fname` and overwrite is False,
an error will be raised.
verbose : bool
Set verbose output to true or false.
"""
# get measurement date from the data info
meas_date = raw.info['meas_date']
if isinstance(meas_date, (tuple, list, np.ndarray)):
meas_date = meas_date[0]
acq_time = datetime.fromtimestamp(
meas_date).strftime('%Y-%m-%dT%H:%M:%S')
else:
acq_time = 'n/a'
df = pd.DataFrame(data={'filename': ['%s' % raw_fname],
'acq_time': [acq_time]},
columns=['filename', 'acq_time'])
if os.path.exists(fname):
orig_df = pd.read_csv(fname, sep='\t')
# if the file name is already in the file raise an error
if raw_fname in orig_df['filename'].values and not overwrite:
raise OSError(errno.EEXIST, '"%s" already exists in the '
'scans list. Please set overwrite to '
'True.' % raw_fname)
# otherwise add the new data
df = orig_df.append(df)
# and drop any duplicates as we want overwrite = True to force the old
# data to be overwritten
df.drop_duplicates(subset='filename', keep='last', inplace=True)
df = df.sort_values(by='acq_time')
# overwrite is forced to True as all issues with overwrite == False have
# been handled by this point
_write_tsv(fname, df, True, verbose)
return fname
def _coordsystem_json(raw, unit, orient, manufacturer, fname,
overwrite=False, verbose=True):
"""Create a coordsystem.json file and save it.
Parameters
----------
raw : instance of Raw
The data as MNE-Python Raw object.
unit : str
Units to be used in the coordsystem specification.
orient : str
Used to define the coordinate system for the head coils.
manufacturer : str
Used to define the coordinate system for the MEG sensors.
fname : str
Filename to save the coordsystem.json to.
overwrite : bool
Whether to overwrite the existing file.
Defaults to False.
verbose : bool
Set verbose output to true or false.
"""
dig = raw.info['dig']
coords = dict()
fids = {d['ident']: d for d in dig if d['kind'] ==
FIFF.FIFFV_POINT_CARDINAL}
if fids:
if FIFF.FIFFV_POINT_NASION in fids:
coords['NAS'] = fids[FIFF.FIFFV_POINT_NASION]['r'].tolist()
if FIFF.FIFFV_POINT_LPA in fids:
coords['LPA'] = fids[FIFF.FIFFV_POINT_LPA]['r'].tolist()
if FIFF.FIFFV_POINT_RPA in fids:
coords['RPA'] = fids[FIFF.FIFFV_POINT_RPA]['r'].tolist()
hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI}
if hpi:
for ident in hpi.keys():
coords['coil%d' % ident] = hpi[ident]['r'].tolist()
coord_frame = set([dig[ii]['coord_frame'] for ii in range(len(dig))])
if len(coord_frame) > 1:
err = 'All HPI and Fiducials must be in the same coordinate frame.'
raise ValueError(err)
fid_json = {'MEGCoordinateSystem': manufacturer,
'MEGCoordinateUnits': unit, # XXX validate this
'HeadCoilCoordinates': coords,
'HeadCoilCoordinateSystem': orient,
'HeadCoilCoordinateUnits': unit # XXX validate this
}
_write_json(fid_json, fname, overwrite)
return fname
def _sidecar_json(raw, task, manufacturer, fname, kind, eeg_ref=None,
eeg_gnd=None, overwrite=False, verbose=True):
"""Create a sidecar json file depending on the kind and save it.
The sidecar json file provides meta data about the data of a certain kind.
Parameters
----------
raw : instance of Raw
The data as MNE-Python Raw object.
task : str
Name of the task the data is based on.
manufacturer : str
Manufacturer of the acquisition system. For MEG also used to define the
coordinate system for the MEG sensors.
fname : str
Filename to save the sidecar json to.
kind : str
Type of the data as in ALLOWED_KINDS.
eeg_ref : str
Description of the type of reference used and (when applicable) of
location of the reference electrode. Defaults to None.
eeg_gnd : str
Description of the location of the ground electrode. Defaults to None.
overwrite : bool
Whether to overwrite the existing file.
Defaults to False.
verbose : bool
Set verbose output to true or false. Defaults to true.
"""
sfreq = raw.info['sfreq']
powerlinefrequency = raw.info.get('line_freq', None)
if powerlinefrequency is None:
warn('No line frequency found, defaulting to 50 Hz')
powerlinefrequency = 50
if not eeg_ref:
eeg_ref = 'n/a'
if not eeg_gnd:
eeg_gnd = 'n/a'
if isinstance(raw, BaseRaw):
rec_type = 'continuous'
elif isinstance(raw, Epochs):
rec_type = 'epoched'
else:
rec_type = 'n/a'
# determine whether any channels have to be ignored:
n_ignored = len([ch_name for ch_name in
IGNORED_CHANNELS.get(manufacturer, list()) if
ch_name in raw.ch_names])
# all ignored channels are trigger channels at the moment...
n_megchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_MEG_CH])
n_megrefchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_REF_MEG_CH])
n_eegchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_EEG_CH])
n_ecogchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_ECOG_CH])
n_seegchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_SEEG_CH])
n_eogchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_EOG_CH])
n_ecgchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_ECG_CH])
n_emgchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_EMG_CH])
n_miscchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_MISC_CH])
n_stimchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_STIM_CH]) - n_ignored
# Define modality-specific JSON dictionaries
ch_info_json_common = [
('TaskName', task),
('Manufacturer', manufacturer),
('PowerLineFrequency', powerlinefrequency),
('SamplingFrequency', sfreq),
('SoftwareFilters', 'n/a'),
('RecordingDuration', raw.times[-1]),
('RecordingType', rec_type)]
ch_info_json_meg = [
('DewarPosition', 'n/a'),
('DigitizedLandmarks', False),
('DigitizedHeadPoints', False),
('MEGChannelCount', n_megchan),
('MEGREFChannelCount', n_megrefchan)]
ch_info_json_eeg = [
('EEGReference', eeg_ref),
('EEGGround', eeg_gnd),
('EEGPlacementScheme', _infer_eeg_placement_scheme(raw)),
('Manufacturer', manufacturer)]
ch_info_json_ieeg = [
('ECOGChannelCount', n_ecogchan),
('SEEGChannelCount', n_seegchan)]
ch_info_ch_counts = [
('EEGChannelCount', n_eegchan),
('EOGChannelCount', n_eogchan),
('ECGChannelCount', n_ecgchan),
('EMGChannelCount', n_emgchan),
('MiscChannelCount', n_miscchan),
('TriggerChannelCount', n_stimchan)]
# Stitch together the complete JSON dictionary
ch_info_json = ch_info_json_common
if kind == 'meg':
append_kind_json = ch_info_json_meg
elif kind == 'eeg':
append_kind_json = ch_info_json_eeg
elif kind == 'ieeg':
append_kind_json = ch_info_json_ieeg
else:
raise ValueError('Unexpected "kind": {}'
' Use one of: {}'.format(kind, ALLOWED_KINDS))
ch_info_json += append_kind_json
ch_info_json += ch_info_ch_counts
ch_info_json = OrderedDict(ch_info_json)
_write_json(ch_info_json, fname, overwrite, verbose)
return fname
def raw_to_bids(subject_id, task, raw_file, output_path, session_id=None,
acquisition=None, run=None, kind='meg', events_data=None,
event_id=None, hpi=None, electrode=None, hsp=None,
eeg_ref=None, eeg_gnd=None, config=None,
overwrite=False, verbose=True):
"""Walk over a folder of files and create BIDS compatible folder.
Parameters
----------
subject_id : str
The subject name in BIDS compatible format ('01', '02', etc.)
task : str
Name of the task the data is based on.
raw_file : str | instance of mne.Raw
The raw data. If a string, it is assumed to be the path to the raw data
file. Otherwise it must be an instance of mne.Raw
output_path : str
The path of the BIDS compatible folder
session_id : str | None
The session name in BIDS compatible format.
acquisition : str | None
Acquisition parameter for the dataset.
run : int | None
The run number for this dataset.
kind : str, one of ('meg', 'eeg', 'ieeg')
The kind of data being converted. Defaults to "meg".
events_data : str | array | None
The events file. If a string, a path to the events file. If an array,
the MNE events array (shape n_events, 3). If None, events will be
inferred from the stim channel using `mne.find_events`.
event_id : dict | None
The event id dict used to create a 'trial_type' column in events.tsv
hpi : None | str
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
electrode : None | str
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape = (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10`000 points are in the head shape, they are automatically decimated.
eeg_ref : str
Description of the type of reference used and (when applicable) of
location of the reference electrode. Defaults to None.
eeg_gnd : str
Description of the location of the ground electrode. Defaults to None.
config : str | None
A path to the configuration file to use if the data is from a BTi
system.
overwrite : bool
Whether to overwrite existing files or data in files.
Defaults to False.
If overwrite is True, any existing files with the same BIDS parameters
will be overwritten with the exception of the `participants.tsv` and
`scans.tsv` files. For these files, parts of pre-existing data that
match the current data will be replaced.
If overwrite is False, no existing data will be overwritten or
replaced.
verbose : bool
If verbose is True, this will print a snippet of the sidecar files. If
False, no content will be printed.
Notes
-----
For the participants.tsv file, the raw.info['subjects_info'] should be
updated and raw.info['meas_date'] should not be None to compute the age
of the participant correctly.
"""
if isinstance(raw_file, string_types):
# We must read in the raw data
raw = _read_raw(raw_file, electrode=electrode, hsp=hsp, hpi=hpi,
config=config, verbose=verbose)
_, ext = _parse_ext(raw_file, verbose=verbose)
raw_fname = raw_file
elif isinstance(raw_file, BaseRaw):
# We got a raw mne object, get back the filename if possible
# Assume that if no filename attr exists, it's a fif file.
raw = raw_file.copy()
if hasattr(raw, 'filenames'):
_, ext = _parse_ext(raw.filenames[0], verbose=verbose)
raw_fname = raw.filenames[0]
else:
# FIXME: How to get the filename if no filenames attribute?
raw_fname = 'unknown_file_name'
ext = '.fif'
else:
raise ValueError('raw_file must be an instance of str or BaseRaw, '
'got %s' % type(raw_file))
data_path = make_bids_folders(subject=subject_id, session=session_id,
kind=kind, root=output_path,
overwrite=False, verbose=verbose)
if session_id is None:
ses_path = os.sep.join(data_path.split(os.sep)[:-1])
else:
ses_path = make_bids_folders(subject=subject_id, session=session_id,
root=output_path, make_dir=False,
overwrite=False, verbose=verbose)
# create filenames
scans_fname = make_bids_filename(
subject=subject_id, session=session_id, suffix='scans.tsv',
prefix=ses_path)
participants_fname = make_bids_filename(prefix=output_path,
suffix='participants.tsv')
coordsystem_fname = make_bids_filename(
subject=subject_id, session=session_id, acquisition=acquisition,
suffix='coordsystem.json', prefix=data_path)
data_meta_fname = make_bids_filename(
subject=subject_id, session=session_id, task=task, run=run,
acquisition=acquisition, suffix='%s.json' % kind, prefix=data_path)
if ext in ['.fif', '.ds', '.vhdr', '.edf', '.bdf', '.set', '.cnt']:
raw_file_bids = make_bids_filename(
subject=subject_id, session=session_id, task=task, run=run,
acquisition=acquisition, suffix='%s%s' % (kind, ext))
else:
raw_folder = make_bids_filename(
subject=subject_id, session=session_id, task=task, run=run,
acquisition=acquisition, suffix='%s' % kind)
raw_file_bids = make_bids_filename(
subject=subject_id, session=session_id, task=task, run=run,
acquisition=acquisition, suffix='%s%s' % (kind, ext),
prefix=raw_folder)
events_tsv_fname = make_bids_filename(
subject=subject_id, session=session_id, task=task,
acquisition=acquisition, run=run, suffix='events.tsv',
prefix=data_path)
channels_fname = make_bids_filename(
subject=subject_id, session=session_id, task=task, run=run,
acquisition=acquisition, suffix='channels.tsv', prefix=data_path)
# Read in Raw object and extract metadata from Raw object if needed
orient = ORIENTATION.get(ext, 'n/a')
unit = UNITS.get(ext, 'n/a')
manufacturer = MANUFACTURERS.get(ext, 'n/a')
if manufacturer == 'Mixed':
manufacturer = 'n/a'
# save all meta data
_participants_tsv(raw, subject_id, "n/a", participants_fname, overwrite,
verbose)
_scans_tsv(raw, os.path.join(kind, raw_file_bids), scans_fname,
overwrite, verbose)
# TODO: Implement coordystem.json and electrodes.tsv for EEG and iEEG
if kind == 'meg':
_coordsystem_json(raw, unit, orient, manufacturer, coordsystem_fname,
overwrite, verbose)
events = _read_events(events_data, raw)
if len(events) > 0:
_events_tsv(events, raw, events_tsv_fname, event_id, overwrite,
verbose)
make_dataset_description(output_path, name=" ", verbose=verbose)
_sidecar_json(raw, task, manufacturer, data_meta_fname, kind, eeg_ref,
eeg_gnd, overwrite, verbose)
_channels_tsv(raw, channels_fname, overwrite, verbose)
# set the raw file name to now be the absolute path to ensure the files
# are placed in the right location
raw_file_bids = os.path.join(data_path, raw_file_bids)
if os.path.exists(raw_file_bids) and not overwrite:
raise OSError(errno.EEXIST, '"%s" already exists. Please set '
'overwrite to True.' % raw_file_bids)
_mkdir_p(os.path.dirname(raw_file_bids))
if verbose:
print('Writing data files to %s' % raw_file_bids)
if ext not in ALLOWED_EXTENSIONS:
raise ValueError('ext must be in %s, got %s'
% (''.join(ALLOWED_EXTENSIONS), ext))
# Copy the imaging data files
if ext in ['.fif']:
n_rawfiles = len(raw.filenames)
if n_rawfiles > 1:
# TODO Update MNE requirement to version 0.17 when it's released
if check_version('mne', '0.17.dev'):
split_naming = 'bids'
raw.save(raw_file_bids, split_naming=split_naming,
overwrite=True)
else:
raise NotImplementedError(
'Renaming split fif files is not supported on your '
'version of MNE. Please upgrade to at least "0.17.dev". '
'Please contact MNE developers if you have '
'any questions.')
else:
# TODO insert arg `split_naming=split_naming`
# when MNE releases 0.17
raw.save(raw_file_bids, overwrite=True)
# CTF data is saved in a directory
elif ext == '.ds':
sh.copytree(raw_fname, raw_file_bids)
# BrainVision is multifile, copy over all of them and fix pointers
elif ext == '.vhdr':
copyfile_brainvision(raw_fname, raw_file_bids)
# EEGLAB .set might be accompanied by a .fdt - find out and copy it too
elif ext == '.set':
copyfile_eeglab(raw_fname, raw_file_bids)
else:
sh.copyfile(raw_fname, raw_file_bids)
# KIT data requires the marker file to be copied over too
if hpi is not None:
if isinstance(hpi, list):
# No currently accepted way to name multiple marker files. See:
# https://github.com/bids-standard/bids-specification/issues/45
raise ValueError('Only single marker coils supported currently')
_, marker_ext = _parse_ext(hpi)
marker_fname = make_bids_filename(
subject=subject_id, session=session_id, task=task, run=run,
acquisition=acquisition, suffix='markers%s' % marker_ext,
prefix=os.path.join(data_path, raw_folder))
sh.copyfile(hpi, marker_fname)
return output_path
| [
"datetime.datetime",
"os.path.exists",
"collections.OrderedDict",
"datetime.datetime.fromtimestamp",
"pandas.read_csv",
"mne.channels.channels._unit2human.get",
"mne.io.pick.channel_type",
"os.path.join",
"mne.utils.check_version",
"pandas.DataFrame.from_dict",
"shutil.copytree",
"os.path.dirn... | [((2775, 2804), 'collections.defaultdict', 'defaultdict', (["(lambda : 'OTHER')"], {}), "(lambda : 'OTHER')\n", (2786, 2804), False, 'from collections import defaultdict, OrderedDict\n'), ((3164, 3209), 'collections.defaultdict', 'defaultdict', (["(lambda : 'Other type of channel')"], {}), "(lambda : 'Other type of channel')\n", (3175, 3209), False, 'from collections import defaultdict, OrderedDict\n'), ((7571, 7599), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {}), '(data)\n', (7593, 7599), True, 'import pandas as pd\n'), ((9794, 9868), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': "['participant_id', 'age', 'sex', 'group']"}), "(data=data, columns=['participant_id', 'age', 'sex', 'group'])\n", (9806, 9868), True, 'import pandas as pd\n'), ((9899, 9920), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (9913, 9920), False, 'import os\n'), ((12090, 12203), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'filename': ['%s' % raw_fname], 'acq_time': [acq_time]}", 'columns': "['filename', 'acq_time']"}), "(data={'filename': ['%s' % raw_fname], 'acq_time': [acq_time]},\n columns=['filename', 'acq_time'])\n", (12102, 12203), True, 'import pandas as pd\n'), ((12258, 12279), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (12272, 12279), False, 'import os\n'), ((19817, 19842), 'collections.OrderedDict', 'OrderedDict', (['ch_info_json'], {}), '(ch_info_json)\n', (19828, 19842), False, 'from collections import defaultdict, OrderedDict\n'), ((27614, 27652), 'os.path.join', 'os.path.join', (['data_path', 'raw_file_bids'], {}), '(data_path, raw_file_bids)\n', (27626, 27652), False, 'import os\n'), ((4716, 4743), 'mne.io.pick.channel_type', 'channel_type', (['raw.info', 'idx'], {}), '(raw.info, idx)\n', (4728, 4743), False, 'from mne.io.pick import channel_type\n'), ((5025, 5061), 'mne.channels.channels._unit2human.get', '_unit2human.get', (["ch_i['unit']", '"""n/a"""'], {}), "(ch_i['unit'], 'n/a')\n", (5040, 5061), False, 'from mne.channels.channels import _unit2human\n'), ((9940, 9968), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'sep': '"""\t"""'}), "(fname, sep='\\t')\n", (9951, 9968), True, 'import pandas as pd\n'), ((12299, 12327), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'sep': '"""\t"""'}), "(fname, sep='\\t')\n", (12310, 12327), True, 'import pandas as pd\n'), ((16406, 16458), 'warnings.warn', 'warn', (['"""No line frequency found, defaulting to 50 Hz"""'], {}), "('No line frequency found, defaulting to 50 Hz')\n", (16410, 16458), False, 'from warnings import warn\n'), ((26752, 26785), 'os.path.join', 'os.path.join', (['kind', 'raw_file_bids'], {}), '(kind, raw_file_bids)\n', (26764, 26785), False, 'import os\n'), ((27660, 27689), 'os.path.exists', 'os.path.exists', (['raw_file_bids'], {}), '(raw_file_bids)\n', (27674, 27689), False, 'import os\n'), ((27853, 27883), 'os.path.dirname', 'os.path.dirname', (['raw_file_bids'], {}), '(raw_file_bids)\n', (27868, 27883), False, 'import os\n'), ((30097, 30127), 'shutil.copyfile', 'sh.copyfile', (['hpi', 'marker_fname'], {}), '(hpi, marker_fname)\n', (30108, 30127), True, 'import shutil as sh\n'), ((9507, 9539), 'datetime.datetime', 'datetime', (['age[0]', 'age[1]', 'age[2]'], {}), '(age[0], age[1], age[2])\n', (9515, 9539), False, 'from datetime import datetime\n'), ((9568, 9601), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['meas_date'], {}), '(meas_date)\n', (9590, 9601), False, 'from datetime import datetime\n'), ((28333, 28365), 'mne.utils.check_version', 'check_version', (['"""mne"""', '"""0.17.dev"""'], {}), "('mne', '0.17.dev')\n", (28346, 28365), False, 'from mne.utils import check_version\n'), ((29064, 29101), 'shutil.copytree', 'sh.copytree', (['raw_fname', 'raw_file_bids'], {}), '(raw_fname, raw_file_bids)\n', (29075, 29101), True, 'import shutil as sh\n'), ((7367, 7392), 'numpy.zeros', 'np.zeros', (['events.shape[0]'], {}), '(events.shape[0])\n', (7375, 7392), True, 'import numpy as np\n'), ((11968, 12001), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['meas_date'], {}), '(meas_date)\n', (11990, 12001), False, 'from datetime import datetime\n'), ((30052, 30087), 'os.path.join', 'os.path.join', (['data_path', 'raw_folder'], {}), '(data_path, raw_folder)\n', (30064, 30087), False, 'import os\n'), ((5486, 5512), 'numpy.full', 'np.full', (['n_channels', 'sfreq'], {}), '(n_channels, sfreq)\n', (5493, 5512), True, 'import numpy as np\n'), ((5554, 5585), 'numpy.full', 'np.full', (['n_channels', 'low_cutoff'], {}), '(n_channels, low_cutoff)\n', (5561, 5585), True, 'import numpy as np\n'), ((5628, 5660), 'numpy.full', 'np.full', (['n_channels', 'high_cutoff'], {}), '(n_channels, high_cutoff)\n', (5635, 5660), True, 'import numpy as np\n'), ((29421, 29458), 'shutil.copyfile', 'sh.copyfile', (['raw_fname', 'raw_file_bids'], {}), '(raw_fname, raw_file_bids)\n', (29432, 29458), True, 'import shutil as sh\n')] |
import param
import numpy as np
from bokeh.models import Patches
from ...core.data import Dataset
from ...core.util import basestring, max_range, dimension_sanitizer
from .graphs import GraphPlot
class SankeyPlot(GraphPlot):
color_index = param.ClassSelector(default=2, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the node colors will be drawn""")
label_index = param.ClassSelector(default=2, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the node labels will be drawn""")
label_position = param.ObjectSelector(default='right', objects=['left', 'right'],
doc="""
Whether node labels should be placed to the left or right.""")
show_values = param.Boolean(default=True, doc="""
Whether to show the values.""")
node_width = param.Number(default=15, doc="""
Width of the nodes.""")
node_padding = param.Integer(default=10, doc="""
Number of pixels of padding relative to the bounds.""")
iterations = param.Integer(default=32, doc="""
Number of iterations to run the layout algorithm.""")
_style_groups = dict(GraphPlot._style_groups, quad='nodes', text='label')
_draw_order = ['patches', 'quad', 'text']
style_opts = GraphPlot.style_opts + ['edge_fill_alpha', 'nodes_line_color',
'label_text_font_size']
filled = True
def _init_glyphs(self, plot, element, ranges, source):
ret = super(SankeyPlot, self)._init_glyphs(plot, element, ranges, source)
renderer = plot.renderers.pop(plot.renderers.index(self.handles['glyph_renderer']))
plot.renderers = [renderer] + plot.renderers
return ret
def get_data(self, element, ranges, style):
data, mapping, style = super(SankeyPlot, self).get_data(element, ranges, style)
self._compute_quads(element, data, mapping)
style['nodes_line_color'] = 'black'
lidx = element.nodes.get_dimension(self.label_index)
if lidx is None:
if self.label_index is not None:
dims = element.nodes.dimensions()[2:]
self.warning("label_index supplied to Sankey not found, "
"expected one of %s, got %s." %
(dims, self.label_index))
return data, mapping, style
self._compute_labels(element, data, mapping)
self._patch_hover(element, data)
return data, mapping, style
def _compute_quads(self, element, data, mapping):
"""
Computes the node quad glyph data.x
"""
quad_mapping = {'left': 'x0', 'right': 'x1', 'bottom': 'y0', 'top': 'y1'}
quad_data = dict(data['scatter_1'])
quad_data.update({'x0': [], 'x1': [], 'y0': [], 'y1': []})
for node in element._sankey['nodes']:
quad_data['x0'].append(node['x0'])
quad_data['y0'].append(node['y0'])
quad_data['x1'].append(node['x1'])
quad_data['y1'].append(node['y1'])
data['quad_1'] = quad_data
if 'node_fill_color' in mapping['scatter_1']:
quad_mapping['fill_color'] = mapping['scatter_1']['node_fill_color']
mapping['quad_1'] = quad_mapping
def _compute_labels(self, element, data, mapping):
"""
Computes labels for the nodes and adds it to the data.
"""
lidx = element.nodes.get_dimension(self.label_index)
if element.vdims:
edges = Dataset(element)[element[element.vdims[0].name]>0]
nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))
nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})
else:
nodes = element
value_dim = element.vdims[0]
labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]
if self.show_values:
value_labels = []
for i, node in enumerate(element._sankey['nodes']):
value = value_dim.pprint_value(node['value'])
label = '%s - %s' % (labels[i], value)
if value_dim.unit:
label += ' %s' % value_dim.unit
value_labels.append(label)
labels = value_labels
ys = nodes.dimension_values(1)
nodes = element._sankey['nodes']
offset = (nodes[0]['x1']-nodes[0]['x0'])/4.
if self.label_position == 'right':
xs = np.array([node['x1'] for node in nodes])+offset
else:
xs = np.array([node['x0'] for node in nodes])-offset
data['text_1'] = dict(x=xs, y=ys, text=[str(l) for l in labels])
align = 'left' if self.label_position == 'right' else 'right'
mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align=align)
def _patch_hover(self, element, data):
"""
Replace edge start and end hover data with label_index data.
"""
if not (self.inspection_policy == 'edges' and 'hover' in self.handles):
return
lidx = element.nodes.get_dimension(self.label_index)
src, tgt = [dimension_sanitizer(kd.name) for kd in element.kdims[:2]]
if src == 'start': src += '_values'
if tgt == 'end': tgt += '_values'
lookup = dict(zip(*(element.nodes.dimension_values(d) for d in (2, lidx))))
src_vals = data['patches_1'][src]
tgt_vals = data['patches_1'][tgt]
data['patches_1'][src] = [lookup.get(v, v) for v in src_vals]
data['patches_1'][tgt] = [lookup.get(v, v) for v in tgt_vals]
def get_extents(self, element, ranges):
xdim, ydim = element.nodes.kdims[:2]
xpad = .05 if self.label_index is None else 0.25
x0, x1 = ranges[xdim.name]
y0, y1 = ranges[ydim.name]
xdiff = (x1-x0)
ydiff = (y1-y0)
if self.label_position == 'right':
x0, x1 = x0-(0.05*xdiff), x1+xpad*xdiff
else:
x0, x1 = x0-xpad*xdiff, x1+(0.05*xdiff)
x0, x1 = max_range([xdim.range, (x0, x1)])
y0, y1 = max_range([ydim.range, (y0-(0.05*ydiff), y1+(0.05*ydiff))])
return (x0, y0, x1, y1)
def _postprocess_hover(self, renderer, source):
if self.inspection_policy == 'edges':
if not isinstance(renderer.glyph, Patches):
return
else:
if isinstance(renderer.glyph, Patches):
return
super(SankeyPlot, self)._postprocess_hover(renderer, source)
| [
"param.ClassSelector",
"param.Number",
"param.ObjectSelector",
"param.Boolean",
"param.Integer",
"numpy.array"
] | [((249, 420), 'param.ClassSelector', 'param.ClassSelector', ([], {'default': '(2)', 'class_': '(basestring, int)', 'allow_None': '(True)', 'doc': '"""\n Index of the dimension from which the node colors will be drawn"""'}), '(default=2, class_=(basestring, int), allow_None=True,\n doc=\n """\n Index of the dimension from which the node colors will be drawn"""\n )\n', (268, 420), False, 'import param\n'), ((464, 635), 'param.ClassSelector', 'param.ClassSelector', ([], {'default': '(2)', 'class_': '(basestring, int)', 'allow_None': '(True)', 'doc': '"""\n Index of the dimension from which the node labels will be drawn"""'}), '(default=2, class_=(basestring, int), allow_None=True,\n doc=\n """\n Index of the dimension from which the node labels will be drawn"""\n )\n', (483, 635), False, 'import param\n'), ((682, 830), 'param.ObjectSelector', 'param.ObjectSelector', ([], {'default': '"""right"""', 'objects': "['left', 'right']", 'doc': '"""\n Whether node labels should be placed to the left or right."""'}), '(default=\'right\', objects=[\'left\', \'right\'], doc=\n """\n Whether node labels should be placed to the left or right.""")\n', (702, 830), False, 'import param\n'), ((887, 962), 'param.Boolean', 'param.Boolean', ([], {'default': '(True)', 'doc': '"""\n Whether to show the values."""'}), '(default=True, doc="""\n Whether to show the values.""")\n', (900, 962), False, 'import param\n'), ((981, 1045), 'param.Number', 'param.Number', ([], {'default': '(15)', 'doc': '"""\n Width of the nodes."""'}), '(default=15, doc="""\n Width of the nodes.""")\n', (993, 1045), False, 'import param\n'), ((1066, 1168), 'param.Integer', 'param.Integer', ([], {'default': '(10)', 'doc': '"""\n Number of pixels of padding relative to the bounds."""'}), '(default=10, doc=\n """\n Number of pixels of padding relative to the bounds.""")\n', (1079, 1168), False, 'import param\n'), ((1182, 1282), 'param.Integer', 'param.Integer', ([], {'default': '(32)', 'doc': '"""\n Number of iterations to run the layout algorithm."""'}), '(default=32, doc=\n """\n Number of iterations to run the layout algorithm.""")\n', (1195, 1282), False, 'import param\n'), ((4636, 4676), 'numpy.array', 'np.array', (["[node['x1'] for node in nodes]"], {}), "([node['x1'] for node in nodes])\n", (4644, 4676), True, 'import numpy as np\n'), ((4715, 4755), 'numpy.array', 'np.array', (["[node['x0'] for node in nodes]"], {}), "([node['x0'] for node in nodes])\n", (4723, 4755), True, 'import numpy as np\n')] |
from collections.abc import Sequence, Callable
from operator import itemgetter
from random import sample
import itertools
import abc
import numpy as np
from genetic.individuals import BaseIndividual
from genetic.util import Workers, filter_duplicates
__all__ = ["BasePopulation", "PanmicticPopulation"]
class BasePopulation:
@abc.abstractmethod
def evolve(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractproperty
def individuals(self):
"""
:rtype: Sequence[(Any, BaseIndividual)]
"""
raise NotImplementedError
@abc.abstractproperty
def legends(self):
"""
:rtype: Sequence[(Any, BaseIndividual)]
"""
raise NotImplementedError
@abc.abstractproperty
def nlegends(self):
"""
:rtype: int
"""
raise NotImplementedError
@abc.abstractproperty
def fitness_func(self):
"""
:rtype: Callable
"""
raise NotImplementedError
@abc.abstractproperty
def selection(self):
"""
:rtype: Callable
"""
raise NotImplementedError
@abc.abstractproperty
def size(self):
raise NotImplementedError
class PanmicticPopulation(BasePopulation):
"""
:type _individuals: list[(Any, BaseIndividual)]
:type _legends: list[(Any, BaseIndividual)]
:type _fitness_func: (BaseIndividual) -> Any
"""
def __init__(self, ancestors, size, fitness, selection, nlegends=100):
"""
:type ancestors: Sequence[BaseIndividual]
:param ancestors: a bunch of individuals to begin with
:type size: int
:param size: population size
:type fitness: (BaseIndividual) -> Any
:param fitness: a callable that requires one argument - an
instance of Individual - and returns an
instance of a class that supports comparison
operators, i.e. can be used to evaluate and
compare fitness of different Individuals.
:type selection: Callable
:param selection: a selection engine
:type nlegends: int
:param nlegends: the number of legends to remember
:return:
"""
if not isinstance(nlegends, int) or nlegends < 0:
raise ValueError("`n_legends` must be a non-negative integer")
if not isinstance(size, int) or size <= 0:
raise ValueError("`size` must be a positive integer")
if not isinstance(ancestors, Sequence) or len(ancestors) < 2:
raise ValueError("`ancestors` must be a nonempty sequence of "
"length >= 2")
if not all(isinstance(indiv, BaseIndividual) for indiv in ancestors):
raise ValueError("`ancestors` can only contain instances of"
"`Individual`")
try:
if fitness(ancestors[0]) is None:
raise ValueError("`fitness_function` mustn't return `NoneType` "
"values")
except (TypeError, AttributeError):
raise ValueError("Your `fitness` doesn't suit your Idividuals")
self._size = size
self._fitness_func = fitness
self._selection = selection
self._nlegends = nlegends
self._evolving = False
self._individuals = list(zip(map(fitness, ancestors), ancestors))
self._legends = []
@property
def size(self):
return self._size
@property
def legends(self):
"""
:rtype: list[(Any, BaseIndividual)]
"""
return self._legends
@property
def nlegends(self):
return self._nlegends
@property
def individuals(self):
return self._individuals
@property
def fitness_func(self):
return self._fitness_func
@property
def selection(self):
return self._selection
def evolve(self, n, jobs=1):
"""
:rtype: Generator[Any]
"""
if not isinstance(jobs, int) or jobs < 1:
raise ValueError("`jobs` must be a positive integer")
def repopulate(evaluated_individuals):
"""
:type evaluated_individuals: list[(Any, BaseIndividual)]
:rtype: list[(Any, BaseIndividual)]
"""
n_pairs = self.size - len(evaluated_individuals)
pairs = [sample(self.individuals, 2) for _ in range(n_pairs)]
new_individuals = [ind1[1].mate(ind2[1]) for ind1, ind2 in pairs]
scores = workers.map(self.fitness_func, new_individuals)
return evaluated_individuals + list(zip(scores, new_individuals))
def new_legends(old_legends, contenders):
"""
:type old_legends: list[(Any, BaseIndividual)]
:type contenders: list[(Any, BaseIndividual)]
"""
merged = sorted(filter_duplicates(old_legends + contenders),
key=itemgetter(0), reverse=True)
return merged[:self.nlegends]
workers = Workers(jobs)
if len(self.individuals) < self.size:
self._individuals = repopulate(self.individuals)
for _ in itertools.repeat(None, n):
survivors = self.selection(sorted(self.individuals,
key=itemgetter(0), reverse=True))
self._individuals = repopulate(survivors)
# Update legends
self._legends = new_legends(self.legends, self.individuals)
yield np.mean([indiv[0] for indiv in self.individuals])
workers.terminate()
if __name__ == "__main__":
raise RuntimeError
| [
"numpy.mean",
"random.sample",
"genetic.util.filter_duplicates",
"genetic.util.Workers",
"operator.itemgetter",
"itertools.repeat"
] | [((5129, 5142), 'genetic.util.Workers', 'Workers', (['jobs'], {}), '(jobs)\n', (5136, 5142), False, 'from genetic.util import Workers, filter_duplicates\n'), ((5269, 5294), 'itertools.repeat', 'itertools.repeat', (['None', 'n'], {}), '(None, n)\n', (5285, 5294), False, 'import itertools\n'), ((4456, 4483), 'random.sample', 'sample', (['self.individuals', '(2)'], {}), '(self.individuals, 2)\n', (4462, 4483), False, 'from random import sample\n'), ((4962, 5005), 'genetic.util.filter_duplicates', 'filter_duplicates', (['(old_legends + contenders)'], {}), '(old_legends + contenders)\n', (4979, 5005), False, 'from genetic.util import Workers, filter_duplicates\n'), ((5613, 5662), 'numpy.mean', 'np.mean', (['[indiv[0] for indiv in self.individuals]'], {}), '([indiv[0] for indiv in self.individuals])\n', (5620, 5662), True, 'import numpy as np\n'), ((5039, 5052), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (5049, 5052), False, 'from operator import itemgetter\n'), ((5410, 5423), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (5420, 5423), False, 'from operator import itemgetter\n')] |
import numpy as np
def sharpe_ratio(returns, periods_per_year):
return np.sqrt(periods_per_year) * (np.mean(returns) - 1) / np.std(returns)
| [
"numpy.mean",
"numpy.sqrt",
"numpy.std"
] | [((129, 144), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (135, 144), True, 'import numpy as np\n'), ((76, 101), 'numpy.sqrt', 'np.sqrt', (['periods_per_year'], {}), '(periods_per_year)\n', (83, 101), True, 'import numpy as np\n'), ((105, 121), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (112, 121), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
def weighted_avg_and_std(values, weights=None):
"""Get the weighted average and standard deviation.
Input
-----
values : ndarray
The list of values from which to calculate the weighted
average and standard deviation
Output
------
average : float
The weighted average
std : float
The weighted standard deviation
"""
values = np.array(values)
if weights is None:
weights = (np.abs(values-np.median(values))/(np.std(values)+1E-13)+0.25)**(-1)
average = round(np.average(values, weights=weights), 3)
std = np.sqrt(np.average((values-average)**2, weights=weights))
std1 = np.sqrt(np.sum((1/weights)**2))/len(weights)
return average, std, std1
if __name__ == '__main__':
stars = ('10Leo.csv', 'arcturus.csv', 'HD20010.csv')
columns = 'star,teff,tefferr,logg,loggerr,feh,feherr,vt,vterr,fixlogg'.split(',')
df = pd.DataFrame(index=range(2*len(stars)), columns=columns)
df.fillna(0, inplace=True)
idx = 0
for star in stars:
df_star = pd.read_csv(star)
df1 = df_star[df_star.fixlogg]
df2 = df_star[~df_star.fixlogg]
df.loc[idx, 'star'] = star.replace('.csv', '')
df.loc[idx+1, 'star'] = star.replace('.csv', '')
for parameter in df_star.loc[:, 'teff':'vt':2].columns:
v1, e1, x1 = weighted_avg_and_std(df1[parameter], 1/df1[parameter+'err'])
v2, e2, x2 = weighted_avg_and_std(df2[parameter], 1/df2[parameter+'err'])
e1 = max([e1, x1])
e2 = max([e2, x2])
df.loc[idx, parameter] = v1
df.loc[idx, parameter+'err'] = e1
df.loc[idx, 'fixlogg'] = True
df.loc[idx+1, parameter] = v2
df.loc[idx+1, parameter+'err'] = e2
df.loc[idx+1, 'fixlogg'] = False
idx += 2
df.to_csv('stellar_parameters.csv', index=False)
| [
"numpy.median",
"pandas.read_csv",
"numpy.average",
"numpy.array",
"numpy.sum",
"numpy.std"
] | [((431, 447), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (439, 447), True, 'import numpy as np\n'), ((579, 614), 'numpy.average', 'np.average', (['values'], {'weights': 'weights'}), '(values, weights=weights)\n', (589, 614), True, 'import numpy as np\n'), ((637, 689), 'numpy.average', 'np.average', (['((values - average) ** 2)'], {'weights': 'weights'}), '((values - average) ** 2, weights=weights)\n', (647, 689), True, 'import numpy as np\n'), ((1097, 1114), 'pandas.read_csv', 'pd.read_csv', (['star'], {}), '(star)\n', (1108, 1114), True, 'import pandas as pd\n'), ((706, 732), 'numpy.sum', 'np.sum', (['((1 / weights) ** 2)'], {}), '((1 / weights) ** 2)\n', (712, 732), True, 'import numpy as np\n'), ((525, 539), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (531, 539), True, 'import numpy as np\n'), ((505, 522), 'numpy.median', 'np.median', (['values'], {}), '(values)\n', (514, 522), True, 'import numpy as np\n')] |
from exoplanet.gp import terms, GP
from exoplanet.distributions import estimate_inverse_gamma_parameters
import exoplanet as xo
import corner, os, pickle
import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm
import pickle, os, corner
from collections import OrderedDict
from pymc3.backends.tracetab import trace_to_dataframe
import exoplanet as xo
np.random.seed(42)
mean = 1
amp = 1.5e-2
P_rot = 2.4
w_rot = 2*np.pi/P_rot
amp_mix = 0.7
phase_off = 2.1
true_d = {}
true_d['mean'] = mean
true_d['period'] = P_rot
true_d['amp'] = amp
true_d['mix'] = amp_mix
true_d['log_Q0'] = np.nan
true_d['log_deltaQ'] = np.nan
t = np.sort(
np.concatenate([np.random.uniform(0, 3.8, 57),
np.random.uniform(5.5, 10, 68),
np.random.uniform(11, 16.8, 57),
np.random.uniform(19, 25, 68)])
) # The input coordinates must be sorted
yerr = amp*np.random.uniform(0.08, 0.22, len(t))
y = (
mean +
+ amp*np.sin(w_rot * t )
+ amp*amp_mix*np.sin(2*w_rot * t + phase_off)
+ yerr * np.random.randn(len(t))
)
true_t = np.linspace(0, 25, 5000)
true_y = (
mean +
+ amp*np.sin(w_rot * true_t )
+ amp*amp_mix*np.sin(2*w_rot * true_t + phase_off)
)
plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0, label="data")
plt.plot(true_t, true_y, "k", lw=1.5, alpha=0.3, label="truth")
plt.legend(fontsize=12)
plt.xlabel("t")
plt.ylabel("y")
plt.savefig('../results/test_results/rot_model/data.png', dpi=200)
plt.close('all')
pklpath = os.path.join(os.path.expanduser('~'), 'local', 'timmy',
'model_rot.pkl')
if os.path.exists(pklpath):
d = pickle.load(open(pklpath, 'rb'))
model, trace, map_estimate, gp = (
d['model'], d['trace'], d['map_estimate'], d['gp']
)
else:
with pm.Model() as model:
# NOTE: a more principled prior might be acquired using
# "estimate_inverse_gamma_parameters"
mean = pm.Normal("mean", mu=0.0, sigma=1.0)
period = pm.Normal("period", mu=2.4, sigma=1.0)
amp = pm.Uniform("amp", lower=5e-3, upper=2.5e-2)
# This approach has (at least) three nuisance parameters. The mixing
# amplitude between modes, "mix". Q0 or log_Q0 (tensor) – The quality
# factor (or really the quality factor minus one half) for the
# secondary oscillation. deltaQ or log_deltaQ (tensor) – The
# difference between the quality factors of the first and the second
# modes. This parameterization (if deltaQ > 0) ensures that the primary
# mode alway has higher quality. Note for the simple case of two
# sinusoids, for log_deltaQ to work requires going between nplog(1e-1)
# to nplog(1e20). And it does not run fast!
mix = pm.Uniform("mix", lower=0, upper=1)
log_Q0 = pm.Uniform("log_Q0", lower=np.log(2), upper=np.log(1e10))
log_deltaQ = pm.Uniform("log_deltaQ",
lower=np.log(1e-1),
upper=np.log(1e10))
kernel = terms.RotationTerm(
amp=amp,
period=period,
mix=mix,
log_Q0=log_Q0,
log_deltaQ=log_deltaQ,
)
gp = GP(kernel, t, yerr**2, mean=mean)
# Condition the GP on the observations and add the marginal likelihood
# to the model
gp.marginal("gp", observed=y)
with model:
map_estimate = xo.optimize(start=model.test_point)
with model:
mu, var = xo.eval_in_model(
gp.predict(true_t, return_var=True, predict_mean=True), map_estimate
)
# Plot the prediction and the 1-sigma uncertainty
plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0, label="data")
plt.plot(true_t, true_y, "k", lw=1.5, alpha=0.3, label="truth")
sd = np.sqrt(var)
art = plt.fill_between(true_t, mu + sd, mu - sd, color="C1", alpha=0.3)
art.set_edgecolor("none")
plt.plot(true_t, mu, color="C1", label="prediction")
plt.legend(fontsize=12)
plt.xlabel("t")
plt.ylabel("y")
plt.savefig('../results/test_results/rot_model/prediction_uncert.png', dpi=200)
plt.close('all')
with model:
trace = pm.sample(
tune=2000,
draws=2000,
start=map_estimate,
cores=2,
chains=2,
step=xo.get_dense_nuts_step(target_accept=0.9),
)
with open(pklpath, 'wb') as buff:
pickle.dump({'model': model, 'trace': trace,
'map_estimate': map_estimate, 'gp':gp}, buff)
print(pm.summary(trace))
samples = pm.trace_to_dataframe(trace)
truths = [true_d[k] for k in list(samples.columns)]
fig = corner.corner(
samples,
labels=list(samples.columns),
truths=truths
)
fig.savefig('../results/test_results/rot_model/test_rot_corner.png')
plt.close('all')
#
# Generate 50 realizations of the prediction sampling randomly from the chain
#
N_pred = 50
pred_mu = np.empty((N_pred, len(true_t)))
pred_var = np.empty((N_pred, len(true_t)))
with model:
pred = gp.predict(true_t, return_var=True, predict_mean=True)
for i, sample in enumerate(xo.get_samples_from_trace(trace, size=N_pred)):
pred_mu[i], pred_var[i] = xo.eval_in_model(pred, sample)
# Plot the predictions
for i in range(len(pred_mu)):
mu = pred_mu[i]
sd = np.sqrt(pred_var[i])
label = None if i else "prediction"
art = plt.fill_between(true_t, mu + sd, mu - sd, color="C1", alpha=0.1)
art.set_edgecolor("none")
plt.plot(true_t, mu, color="C1", label=label, alpha=0.1)
plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0, label="data")
plt.plot(true_t, true_y, "k", lw=1.5, alpha=0.3, label="truth")
plt.legend(fontsize=12, loc=2)
plt.xlabel("t")
plt.ylabel("y")
plt.savefig('../results/test_results/rot_model/test_rot_sampling.png')
plt.close('all')
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"pymc3.summary",
"numpy.log",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.errorbar",
"numpy.sin",
"exoplanet.gp.GP",
"exoplanet.get_dense_nuts_step",
"os.path.exists",
"pymc3.Uniform",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
... | [((369, 387), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (383, 387), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1091, 1115), 'numpy.linspace', 'np.linspace', (['(0)', '(25)', '(5000)'], {}), '(0, 25, 5000)\n', (1102, 1115), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1230, 1294), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['t', 'y'], {'yerr': 'yerr', 'fmt': '""".k"""', 'capsize': '(0)', 'label': '"""data"""'}), "(t, y, yerr=yerr, fmt='.k', capsize=0, label='data')\n", (1242, 1294), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1295, 1358), 'matplotlib.pyplot.plot', 'plt.plot', (['true_t', 'true_y', '"""k"""'], {'lw': '(1.5)', 'alpha': '(0.3)', 'label': '"""truth"""'}), "(true_t, true_y, 'k', lw=1.5, alpha=0.3, label='truth')\n", (1303, 1358), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1359, 1382), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (1369, 1382), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1383, 1398), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (1393, 1398), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1399, 1414), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1409, 1414), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1415, 1481), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../results/test_results/rot_model/data.png"""'], {'dpi': '(200)'}), "('../results/test_results/rot_model/data.png', dpi=200)\n", (1426, 1481), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1482, 1498), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1491, 1498), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1610, 1633), 'os.path.exists', 'os.path.exists', (['pklpath'], {}), '(pklpath)\n', (1624, 1633), False, 'import pickle, os, corner\n'), ((4611, 4639), 'pymc3.trace_to_dataframe', 'pm.trace_to_dataframe', (['trace'], {}), '(trace)\n', (4632, 4639), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((4849, 4865), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4858, 4865), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((5581, 5645), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['t', 'y'], {'yerr': 'yerr', 'fmt': '""".k"""', 'capsize': '(0)', 'label': '"""data"""'}), "(t, y, yerr=yerr, fmt='.k', capsize=0, label='data')\n", (5593, 5645), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((5646, 5709), 'matplotlib.pyplot.plot', 'plt.plot', (['true_t', 'true_y', '"""k"""'], {'lw': '(1.5)', 'alpha': '(0.3)', 'label': '"""truth"""'}), "(true_t, true_y, 'k', lw=1.5, alpha=0.3, label='truth')\n", (5654, 5709), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((5710, 5740), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)', 'loc': '(2)'}), '(fontsize=12, loc=2)\n', (5720, 5740), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((5741, 5756), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (5751, 5756), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((5757, 5772), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (5767, 5772), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((5773, 5843), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../results/test_results/rot_model/test_rot_sampling.png"""'], {}), "('../results/test_results/rot_model/test_rot_sampling.png')\n", (5784, 5843), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((5844, 5860), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5853, 5860), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1523, 1546), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1541, 1546), False, 'import pickle, os, corner\n'), ((3685, 3749), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['t', 'y'], {'yerr': 'yerr', 'fmt': '""".k"""', 'capsize': '(0)', 'label': '"""data"""'}), "(t, y, yerr=yerr, fmt='.k', capsize=0, label='data')\n", (3697, 3749), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((3754, 3817), 'matplotlib.pyplot.plot', 'plt.plot', (['true_t', 'true_y', '"""k"""'], {'lw': '(1.5)', 'alpha': '(0.3)', 'label': '"""truth"""'}), "(true_t, true_y, 'k', lw=1.5, alpha=0.3, label='truth')\n", (3762, 3817), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((3828, 3840), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (3835, 3840), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((3851, 3916), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['true_t', '(mu + sd)', '(mu - sd)'], {'color': '"""C1"""', 'alpha': '(0.3)'}), "(true_t, mu + sd, mu - sd, color='C1', alpha=0.3)\n", (3867, 3916), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((3951, 4003), 'matplotlib.pyplot.plot', 'plt.plot', (['true_t', 'mu'], {'color': '"""C1"""', 'label': '"""prediction"""'}), "(true_t, mu, color='C1', label='prediction')\n", (3959, 4003), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((4009, 4032), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (4019, 4032), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((4037, 4052), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (4047, 4052), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((4057, 4072), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (4067, 4072), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((4077, 4156), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../results/test_results/rot_model/prediction_uncert.png"""'], {'dpi': '(200)'}), "('../results/test_results/rot_model/prediction_uncert.png', dpi=200)\n", (4088, 4156), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((4161, 4177), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4170, 4177), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((4581, 4598), 'pymc3.summary', 'pm.summary', (['trace'], {}), '(trace)\n', (4591, 4598), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((5352, 5372), 'numpy.sqrt', 'np.sqrt', (['pred_var[i]'], {}), '(pred_var[i])\n', (5359, 5372), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((5423, 5488), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['true_t', '(mu + sd)', '(mu - sd)'], {'color': '"""C1"""', 'alpha': '(0.1)'}), "(true_t, mu + sd, mu - sd, color='C1', alpha=0.1)\n", (5439, 5488), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((5523, 5579), 'matplotlib.pyplot.plot', 'plt.plot', (['true_t', 'mu'], {'color': '"""C1"""', 'label': 'label', 'alpha': '(0.1)'}), "(true_t, mu, color='C1', label=label, alpha=0.1)\n", (5531, 5579), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1190, 1228), 'numpy.sin', 'np.sin', (['(2 * w_rot * true_t + phase_off)'], {}), '(2 * w_rot * true_t + phase_off)\n', (1196, 1228), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1797, 1807), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (1805, 1807), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1945, 1981), 'pymc3.Normal', 'pm.Normal', (['"""mean"""'], {'mu': '(0.0)', 'sigma': '(1.0)'}), "('mean', mu=0.0, sigma=1.0)\n", (1954, 1981), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((2000, 2038), 'pymc3.Normal', 'pm.Normal', (['"""period"""'], {'mu': '(2.4)', 'sigma': '(1.0)'}), "('period', mu=2.4, sigma=1.0)\n", (2009, 2038), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((2054, 2097), 'pymc3.Uniform', 'pm.Uniform', (['"""amp"""'], {'lower': '(0.005)', 'upper': '(0.025)'}), "('amp', lower=0.005, upper=0.025)\n", (2064, 2097), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((2772, 2807), 'pymc3.Uniform', 'pm.Uniform', (['"""mix"""'], {'lower': '(0)', 'upper': '(1)'}), "('mix', lower=0, upper=1)\n", (2782, 2807), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((3053, 3146), 'exoplanet.gp.terms.RotationTerm', 'terms.RotationTerm', ([], {'amp': 'amp', 'period': 'period', 'mix': 'mix', 'log_Q0': 'log_Q0', 'log_deltaQ': 'log_deltaQ'}), '(amp=amp, period=period, mix=mix, log_Q0=log_Q0,\n log_deltaQ=log_deltaQ)\n', (3071, 3146), False, 'from exoplanet.gp import terms, GP\n'), ((3228, 3263), 'exoplanet.gp.GP', 'GP', (['kernel', 't', '(yerr ** 2)'], {'mean': 'mean'}), '(kernel, t, yerr ** 2, mean=mean)\n', (3230, 3263), False, 'from exoplanet.gp import terms, GP\n'), ((3444, 3479), 'exoplanet.optimize', 'xo.optimize', ([], {'start': 'model.test_point'}), '(start=model.test_point)\n', (3455, 3479), True, 'import exoplanet as xo\n'), ((4462, 4557), 'pickle.dump', 'pickle.dump', (["{'model': model, 'trace': trace, 'map_estimate': map_estimate, 'gp': gp}", 'buff'], {}), "({'model': model, 'trace': trace, 'map_estimate': map_estimate,\n 'gp': gp}, buff)\n", (4473, 4557), False, 'import pickle, os, corner\n'), ((5156, 5201), 'exoplanet.get_samples_from_trace', 'xo.get_samples_from_trace', (['trace'], {'size': 'N_pred'}), '(trace, size=N_pred)\n', (5181, 5201), True, 'import exoplanet as xo\n'), ((5238, 5268), 'exoplanet.eval_in_model', 'xo.eval_in_model', (['pred', 'sample'], {}), '(pred, sample)\n', (5254, 5268), True, 'import exoplanet as xo\n'), ((669, 698), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(3.8)', '(57)'], {}), '(0, 3.8, 57)\n', (686, 698), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((719, 749), 'numpy.random.uniform', 'np.random.uniform', (['(5.5)', '(10)', '(68)'], {}), '(5.5, 10, 68)\n', (736, 749), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((770, 801), 'numpy.random.uniform', 'np.random.uniform', (['(11)', '(16.8)', '(57)'], {}), '(11, 16.8, 57)\n', (787, 801), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((822, 851), 'numpy.random.uniform', 'np.random.uniform', (['(19)', '(25)', '(68)'], {}), '(19, 25, 68)\n', (839, 851), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1010, 1043), 'numpy.sin', 'np.sin', (['(2 * w_rot * t + phase_off)'], {}), '(2 * w_rot * t + phase_off)\n', (1016, 1043), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((1148, 1170), 'numpy.sin', 'np.sin', (['(w_rot * true_t)'], {}), '(w_rot * true_t)\n', (1154, 1170), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((973, 990), 'numpy.sin', 'np.sin', (['(w_rot * t)'], {}), '(w_rot * t)\n', (979, 990), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((2853, 2862), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2859, 2862), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((2870, 2891), 'numpy.log', 'np.log', (['(10000000000.0)'], {}), '(10000000000.0)\n', (2876, 2891), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((2969, 2980), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (2975, 2980), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((3021, 3042), 'numpy.log', 'np.log', (['(10000000000.0)'], {}), '(10000000000.0)\n', (3027, 3042), True, 'import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm\n'), ((4362, 4403), 'exoplanet.get_dense_nuts_step', 'xo.get_dense_nuts_step', ([], {'target_accept': '(0.9)'}), '(target_accept=0.9)\n', (4384, 4403), True, 'import exoplanet as xo\n')] |
import gc
import psutil
import pathlib
import logging
import numpy as np
from helita.sim.bifrost import BifrostData
def inverted_zdn(bifrost_data):
return -(2 * bifrost_data.z - bifrost_data.zdn)[::-1]
def exclusive_coord_slice(lower_edges, coord_range):
if coord_range is None:
return slice(None)
if not isinstance(coord_range, (tuple, list)):
coord_range = float(coord_range)
return slice(0, 0)
if coord_range[0] is None:
start_idx = None
else:
start_idx = np.searchsorted(lower_edges, coord_range[0])
if coord_range[1] is None or coord_range[1] >= lower_edges[-1] + (
lower_edges[-1] - lower_edges[-2]
):
end_idx = None
else:
end_idx = np.searchsorted(lower_edges, coord_range[1], side="right") - 1
return slice(start_idx, end_idx)
def inclusive_coord_slice(lower_edges, coord_range):
if coord_range is None:
return slice(None)
if not isinstance(coord_range, (tuple, list)):
coord_range = float(coord_range)
coord_range = (coord_range, coord_range)
if coord_range[0] is None or coord_range[0] <= lower_edges[0]:
start_idx = None
else:
start_idx = np.searchsorted(lower_edges, coord_range[0], side="right") - 1
if coord_range[1] is None:
end_idx = None
else:
end_idx = np.searchsorted(lower_edges, coord_range[1])
if end_idx == start_idx:
end_idx += 1
return slice(start_idx, end_idx)
class BifrostDataCache:
def __init__(self, logger=logging):
self.snaps = []
self.fields = {}
self._logger = logger
def number_of_snaps(self):
return len(self.snaps)
def has_snap(self, snap):
return snap in self.snaps
def has_var(self, snap, var):
return self.has_snap(snap) and var in self.fields[snap]
def cache_field(self, snap, var, field):
while not self.field_fits_in_memory(field):
if self.number_of_snaps() == 0:
self._logger.debug(f"No more snaps to remove, using memmap")
return field
else:
removed_snap = self.snaps.pop(0)
self._logger.debug(f"Removed snap {removed_snap} from cache")
self.fields.pop(removed_snap)
gc.collect()
if not self.has_snap(snap):
self.snaps.append(snap)
self.fields[snap] = {}
self._logger.debug(f"Cached {var} for snap {snap}")
self.fields[snap][var] = np.array(field)
return self.get_cached_field(snap, var)
def get_cached_field(self, snap, var):
self._logger.debug(f"Found {var} for snap {snap} in cache")
return self.fields[snap][var]
def field_fits_in_memory(self, field, buffer_factor=2):
memory_requirement = field.size * field.dtype.itemsize
available_memory = psutil.virtual_memory().available
self._logger.debug(
f"Required memory: {memory_requirement*1e-9:.2f} GB ({available_memory*1e-9:.2f} GB available)"
)
return buffer_factor * memory_requirement < available_memory
class CachingBifrostData(BifrostData):
def __init__(self, *args, logger=logging, **kwargs):
super().__init__(*args, **kwargs)
self._cache = BifrostDataCache(logger=logger)
self.root_name = pathlib.Path(self.file_root).name
def get_var(self, var, *args, snap=None, **kwargs):
active_snap = self.snap if snap is None else snap
if self._cache.has_var(active_snap, var):
return self._cache.get_cached_field(active_snap, var)
else:
return self._cache.cache_field(
active_snap, var, super().get_var(var, *args, snap=snap, **kwargs)
)
| [
"pathlib.Path",
"numpy.searchsorted",
"psutil.virtual_memory",
"numpy.array",
"gc.collect"
] | [((525, 569), 'numpy.searchsorted', 'np.searchsorted', (['lower_edges', 'coord_range[0]'], {}), '(lower_edges, coord_range[0])\n', (540, 569), True, 'import numpy as np\n'), ((1364, 1408), 'numpy.searchsorted', 'np.searchsorted', (['lower_edges', 'coord_range[1]'], {}), '(lower_edges, coord_range[1])\n', (1379, 1408), True, 'import numpy as np\n'), ((2547, 2562), 'numpy.array', 'np.array', (['field'], {}), '(field)\n', (2555, 2562), True, 'import numpy as np\n'), ((742, 800), 'numpy.searchsorted', 'np.searchsorted', (['lower_edges', 'coord_range[1]'], {'side': '"""right"""'}), "(lower_edges, coord_range[1], side='right')\n", (757, 800), True, 'import numpy as np\n'), ((1218, 1276), 'numpy.searchsorted', 'np.searchsorted', (['lower_edges', 'coord_range[0]'], {'side': '"""right"""'}), "(lower_edges, coord_range[0], side='right')\n", (1233, 1276), True, 'import numpy as np\n'), ((2912, 2935), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (2933, 2935), False, 'import psutil\n'), ((3380, 3408), 'pathlib.Path', 'pathlib.Path', (['self.file_root'], {}), '(self.file_root)\n', (3392, 3408), False, 'import pathlib\n'), ((2332, 2344), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2342, 2344), False, 'import gc\n')] |
"""Testing module for the utility functions"""
# Authors: <NAME> <<EMAIL>>
# License: BSD (3-clause)
import pytest
import numpy as np
from hemolearn.deconvolution import _update_z
from hemolearn.checks import (check_random_state, check_len_hrf,
check_if_vanished, _get_lambda_max, check_obj,
EarlyStopping, CostFunctionIncreased, check_lbda)
from hemolearn.utils import _set_up_test
def test_check_lbda():
""" Test the check on lbda. """
with pytest.raises(ValueError):
check_lbda(lbda=None, lbda_strategy='foo', X=None, u=None, H=None,
rois_idx=None)
with pytest.raises(ValueError):
check_lbda(lbda='foo', lbda_strategy='fixed', X=None, u=None, H=None,
rois_idx=None)
@pytest.mark.repeat(3)
def test_check_random_state():
""" Test the check random state. """
rng = check_random_state(None)
assert isinstance(rng, np.random.RandomState)
rng = check_random_state(np.random)
assert isinstance(rng, np.random.RandomState)
rng = check_random_state(3)
assert isinstance(rng, np.random.RandomState)
rng = check_random_state(check_random_state(None))
assert isinstance(rng, np.random.RandomState)
with pytest.raises(ValueError):
check_random_state('foo')
@pytest.mark.repeat(3)
def test_check_len_hrf():
""" Test the check HRF length. """
length = 30
assert len(check_len_hrf(np.empty(length - 1), length)) == length
assert len(check_len_hrf(np.empty(length + 1), length)) == length
assert len(check_len_hrf(np.empty(length), length)) == length
@pytest.mark.repeat(3)
def test_check_if_vanished():
""" Test the check on vanished estimated vectors. """
A = np.ones((10, 10))
check_if_vanished(A)
A = np.ones((10, 10))
A[0, 0] = 0.0
check_if_vanished(A)
A = 1.0e-30 * np.ones((10, 10))
pytest.raises(AssertionError, check_if_vanished, A=A)
A = np.ones((10, 10))
A[0, :] = 0.0
pytest.raises(AssertionError, check_if_vanished, A=A)
@pytest.mark.repeat(3)
@pytest.mark.parametrize('seed', [None])
def test_get_lambda_max(seed):
""" Test the lambda max estimation. """
kwargs = _set_up_test(seed)
z, u, H, v = kwargs['z'], kwargs['u'], kwargs['H'], kwargs['v']
rois_idx, X = kwargs['rois_idx'], kwargs['X']
lbda_max = _get_lambda_max(X, u, H, rois_idx)
constants = dict(H=H, v=v, u=u, rois_idx=rois_idx, X=X, lbda=lbda_max,
prox_z='tv', rho=2.0)
z_hat = _update_z(z, constants)
assert np.sum(np.abs(np.diff(z_hat, axis=1))) < 1e-6
@pytest.mark.parametrize('level', [1, 2])
def test_check_obj(level):
""" Test the cost-function check-function. """
value_start = 0.0
value_final = 100.0
lobj = np.linspace(0.0, 100.0, int(value_final - value_start + 1))[::-1]
# case 1: no exception
check_obj(lobj=lobj, ii=2, max_iter=100, early_stopping=True,
raise_on_increase=True, eps=np.finfo(np.float64).eps,
level=level)
# case 2: early stoppping exception
with pytest.raises(EarlyStopping):
check_obj(lobj=lobj, ii=2, max_iter=100, early_stopping=True,
raise_on_increase=True, eps=1.1, level=level)
# case 3: cost-funcion raising exception
with pytest.raises(CostFunctionIncreased):
check_obj(lobj=lobj[::-1], ii=2, max_iter=100, early_stopping=True,
raise_on_increase=True, eps=np.finfo(np.float64).eps,
level=level)
| [
"hemolearn.checks.check_lbda",
"hemolearn.checks.check_random_state",
"numpy.ones",
"hemolearn.deconvolution._update_z",
"numpy.diff",
"pytest.mark.parametrize",
"pytest.raises",
"pytest.mark.repeat",
"hemolearn.utils._set_up_test",
"hemolearn.checks.check_if_vanished",
"numpy.empty",
"numpy.f... | [((801, 822), 'pytest.mark.repeat', 'pytest.mark.repeat', (['(3)'], {}), '(3)\n', (819, 822), False, 'import pytest\n'), ((1330, 1351), 'pytest.mark.repeat', 'pytest.mark.repeat', (['(3)'], {}), '(3)\n', (1348, 1351), False, 'import pytest\n'), ((1642, 1663), 'pytest.mark.repeat', 'pytest.mark.repeat', (['(3)'], {}), '(3)\n', (1660, 1663), False, 'import pytest\n'), ((2071, 2092), 'pytest.mark.repeat', 'pytest.mark.repeat', (['(3)'], {}), '(3)\n', (2089, 2092), False, 'import pytest\n'), ((2094, 2133), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seed"""', '[None]'], {}), "('seed', [None])\n", (2117, 2133), False, 'import pytest\n'), ((2623, 2663), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""level"""', '[1, 2]'], {}), "('level', [1, 2])\n", (2646, 2663), False, 'import pytest\n'), ((905, 929), 'hemolearn.checks.check_random_state', 'check_random_state', (['None'], {}), '(None)\n', (923, 929), False, 'from hemolearn.checks import check_random_state, check_len_hrf, check_if_vanished, _get_lambda_max, check_obj, EarlyStopping, CostFunctionIncreased, check_lbda\n'), ((990, 1019), 'hemolearn.checks.check_random_state', 'check_random_state', (['np.random'], {}), '(np.random)\n', (1008, 1019), False, 'from hemolearn.checks import check_random_state, check_len_hrf, check_if_vanished, _get_lambda_max, check_obj, EarlyStopping, CostFunctionIncreased, check_lbda\n'), ((1080, 1101), 'hemolearn.checks.check_random_state', 'check_random_state', (['(3)'], {}), '(3)\n', (1098, 1101), False, 'from hemolearn.checks import check_random_state, check_len_hrf, check_if_vanished, _get_lambda_max, check_obj, EarlyStopping, CostFunctionIncreased, check_lbda\n'), ((1760, 1777), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1767, 1777), True, 'import numpy as np\n'), ((1782, 1802), 'hemolearn.checks.check_if_vanished', 'check_if_vanished', (['A'], {}), '(A)\n', (1799, 1802), False, 'from hemolearn.checks import check_random_state, check_len_hrf, check_if_vanished, _get_lambda_max, check_obj, EarlyStopping, CostFunctionIncreased, check_lbda\n'), ((1811, 1828), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1818, 1828), True, 'import numpy as np\n'), ((1851, 1871), 'hemolearn.checks.check_if_vanished', 'check_if_vanished', (['A'], {}), '(A)\n', (1868, 1871), False, 'from hemolearn.checks import check_random_state, check_len_hrf, check_if_vanished, _get_lambda_max, check_obj, EarlyStopping, CostFunctionIncreased, check_lbda\n'), ((1912, 1965), 'pytest.raises', 'pytest.raises', (['AssertionError', 'check_if_vanished'], {'A': 'A'}), '(AssertionError, check_if_vanished, A=A)\n', (1925, 1965), False, 'import pytest\n'), ((1974, 1991), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1981, 1991), True, 'import numpy as np\n'), ((2014, 2067), 'pytest.raises', 'pytest.raises', (['AssertionError', 'check_if_vanished'], {'A': 'A'}), '(AssertionError, check_if_vanished, A=A)\n', (2027, 2067), False, 'import pytest\n'), ((2222, 2240), 'hemolearn.utils._set_up_test', '_set_up_test', (['seed'], {}), '(seed)\n', (2234, 2240), False, 'from hemolearn.utils import _set_up_test\n'), ((2374, 2408), 'hemolearn.checks._get_lambda_max', '_get_lambda_max', (['X', 'u', 'H', 'rois_idx'], {}), '(X, u, H, rois_idx)\n', (2389, 2408), False, 'from hemolearn.checks import check_random_state, check_len_hrf, check_if_vanished, _get_lambda_max, check_obj, EarlyStopping, CostFunctionIncreased, check_lbda\n'), ((2539, 2562), 'hemolearn.deconvolution._update_z', '_update_z', (['z', 'constants'], {}), '(z, constants)\n', (2548, 2562), False, 'from hemolearn.deconvolution import _update_z\n'), ((514, 539), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (527, 539), False, 'import pytest\n'), ((549, 635), 'hemolearn.checks.check_lbda', 'check_lbda', ([], {'lbda': 'None', 'lbda_strategy': '"""foo"""', 'X': 'None', 'u': 'None', 'H': 'None', 'rois_idx': 'None'}), "(lbda=None, lbda_strategy='foo', X=None, u=None, H=None, rois_idx\n =None)\n", (559, 635), False, 'from hemolearn.checks import check_random_state, check_len_hrf, check_if_vanished, _get_lambda_max, check_obj, EarlyStopping, CostFunctionIncreased, check_lbda\n'), ((659, 684), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (672, 684), False, 'import pytest\n'), ((694, 782), 'hemolearn.checks.check_lbda', 'check_lbda', ([], {'lbda': '"""foo"""', 'lbda_strategy': '"""fixed"""', 'X': 'None', 'u': 'None', 'H': 'None', 'rois_idx': 'None'}), "(lbda='foo', lbda_strategy='fixed', X=None, u=None, H=None,\n rois_idx=None)\n", (704, 782), False, 'from hemolearn.checks import check_random_state, check_len_hrf, check_if_vanished, _get_lambda_max, check_obj, EarlyStopping, CostFunctionIncreased, check_lbda\n'), ((1181, 1205), 'hemolearn.checks.check_random_state', 'check_random_state', (['None'], {}), '(None)\n', (1199, 1205), False, 'from hemolearn.checks import check_random_state, check_len_hrf, check_if_vanished, _get_lambda_max, check_obj, EarlyStopping, CostFunctionIncreased, check_lbda\n'), ((1266, 1291), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1279, 1291), False, 'import pytest\n'), ((1301, 1326), 'hemolearn.checks.check_random_state', 'check_random_state', (['"""foo"""'], {}), "('foo')\n", (1319, 1326), False, 'from hemolearn.checks import check_random_state, check_len_hrf, check_if_vanished, _get_lambda_max, check_obj, EarlyStopping, CostFunctionIncreased, check_lbda\n'), ((1890, 1907), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1897, 1907), True, 'import numpy as np\n'), ((3104, 3132), 'pytest.raises', 'pytest.raises', (['EarlyStopping'], {}), '(EarlyStopping)\n', (3117, 3132), False, 'import pytest\n'), ((3142, 3253), 'hemolearn.checks.check_obj', 'check_obj', ([], {'lobj': 'lobj', 'ii': '(2)', 'max_iter': '(100)', 'early_stopping': '(True)', 'raise_on_increase': '(True)', 'eps': '(1.1)', 'level': 'level'}), '(lobj=lobj, ii=2, max_iter=100, early_stopping=True,\n raise_on_increase=True, eps=1.1, level=level)\n', (3151, 3253), False, 'from hemolearn.checks import check_random_state, check_len_hrf, check_if_vanished, _get_lambda_max, check_obj, EarlyStopping, CostFunctionIncreased, check_lbda\n'), ((3323, 3359), 'pytest.raises', 'pytest.raises', (['CostFunctionIncreased'], {}), '(CostFunctionIncreased)\n', (3336, 3359), False, 'import pytest\n'), ((1462, 1482), 'numpy.empty', 'np.empty', (['(length - 1)'], {}), '(length - 1)\n', (1470, 1482), True, 'import numpy as np\n'), ((1532, 1552), 'numpy.empty', 'np.empty', (['(length + 1)'], {}), '(length + 1)\n', (1540, 1552), True, 'import numpy as np\n'), ((1602, 1618), 'numpy.empty', 'np.empty', (['length'], {}), '(length)\n', (1610, 1618), True, 'import numpy as np\n'), ((2588, 2610), 'numpy.diff', 'np.diff', (['z_hat'], {'axis': '(1)'}), '(z_hat, axis=1)\n', (2595, 2610), True, 'import numpy as np\n'), ((3001, 3021), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (3009, 3021), True, 'import numpy as np\n'), ((3483, 3503), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (3491, 3503), True, 'import numpy as np\n')] |
from functools import singledispatch
from dxl.data.tensor import Tensor
import numpy as np
__all__ = ['abs_', 'unit', 'as_scalar']
@singledispatch
def abs_(t):
raise TypeError()
@abs_.register(Tensor)
def _(t):
return t.fmap(abs_)
@abs_.register(np.ndarray)
def _(t):
return np.abs(t)
@singledispatch
def unit(t):
raise TypeError
@unit.register(Tensor)
def _(t):
return t.fmap(unit)
@unit.register(np.ndarray)
def _(t):
return t / np.linalg.norm(t)
@singledispatch
def as_scalar(t):
raise TypeError()
@as_scalar.register(Tensor)
def _(t):
return as_scalar(t.join())
@as_scalar.register(np.ndarray)
def _(t):
return np.asscalar(t)
@singledispatch
def square(t):
raise TypeError
@square.register(np.ndarray)
def _(t):
return np.square(t)
@square.register(Tensor)
def _(t):
return square(t.join())
@as_scalar.register(int)
@as_scalar.register(float)
@as_scalar.register(np.int32)
@as_scalar.register(np.int64)
@as_scalar.register(np.float32)
@as_scalar.register(np.float64)
def _(t):
return t
| [
"numpy.abs",
"numpy.linalg.norm",
"numpy.asscalar",
"numpy.square"
] | [((294, 303), 'numpy.abs', 'np.abs', (['t'], {}), '(t)\n', (300, 303), True, 'import numpy as np\n'), ((670, 684), 'numpy.asscalar', 'np.asscalar', (['t'], {}), '(t)\n', (681, 684), True, 'import numpy as np\n'), ((790, 802), 'numpy.square', 'np.square', (['t'], {}), '(t)\n', (799, 802), True, 'import numpy as np\n'), ((468, 485), 'numpy.linalg.norm', 'np.linalg.norm', (['t'], {}), '(t)\n', (482, 485), True, 'import numpy as np\n')] |
import os
import sys
import pickle
import numpy as np
from argparse import ArgumentParser
from models.text_model_ps import TextModel
from utils.prepro_text import load_datasets, build_vocabulary, dataset_to_indices, remap_labels
from utils.data_funcs import gen_nary_ecoc, compute_ensemble_accuracy, boolean_string, get_conf_matrix
parser = ArgumentParser()
parser.add_argument("--gpu_idx", type=str, default="0", help="")
parser.add_argument("--training", type=boolean_string, default=True, help="if True, train the model")
parser.add_argument("--task", type=str, default="trec", help="[stsa or trec]")
parser.add_argument("--word_dim", type=int, default=300, help="word embedding dimension")
parser.add_argument("--char_dim", type=int, default=50, help="char embedding dimension")
parser.add_argument("--kernel_sizes", type=int, nargs="+", default=[2, 3, 4], help="kernel sizes for char cnn")
parser.add_argument("--filters", type=int, nargs="+", default=[25, 25, 25], help="filters for char cnn")
parser.add_argument("--emb_drop_rate", type=float, default=0.2, help="embedding drop rate")
parser.add_argument("--drop_rate", type=float, default=0.3, help="encoder drop rate")
parser.add_argument("--num_layers", type=int, default=3, help="number of layers for encoder")
parser.add_argument("--num_units", type=int, default=200, help="number of encoder units")
parser.add_argument("--batch_size", type=int, default=100, help="batch size")
parser.add_argument("--epochs", type=int, default=30, help="training epochs")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate")
parser.add_argument("--lr_decay", type=float, default=0.01, help="learning rate decay")
parser.add_argument("--grad_clip", type=float, default=5.0, help="maximal gradient value")
parser.add_argument("--num_meta_class", type=int, default=3, help="number of meta class")
parser.add_argument("--num_classifier", type=int, default=60, help="number of classifiers")
parser.add_argument("--ablation", type=boolean_string, default=False, help="")
config = parser.parse_args()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_idx
wordvec_path = os.path.join(os.path.expanduser("~"), "utilities", "embeddings", "monolingual", "cc.en.300.vec")
if config.task == "stsa":
num_class = 5
num_meta_class = config.num_meta_class
num_classifier = config.num_classifier
if num_meta_class == 2:
name = "stsa_part_model_ecoc"
elif num_meta_class == num_class:
name = "stsa_part_model_ri"
elif 2 < num_meta_class < num_class:
name = "stsa_part_model_nary_ecoc_{}".format(num_meta_class)
else:
raise ValueError("num_meta_class must in [2, num_class]!!!")
save_path = "ckpt/{}/".format(name)
ckpt_path = save_path + "{}/"
data_path = os.path.join("datasets", "raw", "stsa")
files = [data_path + "/train.txt", data_path + "/dev.txt", data_path + "/test.txt"]
pretrained_model = "ckpt/stsa_model/text_model-16"
else:
num_class = 6
num_meta_class = config.num_meta_class
num_classifier = config.num_classifier
if num_meta_class == 2:
name = "trec_part_model_ecoc"
elif num_meta_class == num_class:
name = "trec_part_model_ri"
elif 2 < num_meta_class < num_class:
name = "trec_part_model_nary_ecoc_{}".format(num_meta_class)
else:
raise ValueError("num_meta_class must in [2, num_class]!!!")
save_path = "ckpt/{}/".format(name)
ckpt_path = save_path + "{}/"
data_path = os.path.join("datasets", "raw", "trec")
files = [data_path + "/train.txt", data_path + "/test.txt"]
pretrained_model = "ckpt/trec_model/text_model-23"
# load datasets
train_data, test_data = load_datasets(files)
# build vocabulary and load pre-trained word embeddings
if not os.path.exists(os.path.join(data_path, "processed.pkl")):
word_dict, char_dict, vectors = build_vocabulary([train_data, test_data], wordvec_path, dim=config.word_dim)
dd = {"word_dict": word_dict, "char_dict": char_dict, "vectors": vectors}
with open(os.path.join(data_path, "processed.pkl"), mode="wb") as handle:
pickle.dump(dd, handle, protocol=pickle.HIGHEST_PROTOCOL)
else:
with open(os.path.join(data_path, "processed.pkl"), mode="rb") as handle:
dd = pickle.load(handle)
word_dict = dd["word_dict"]
char_dict = dd["char_dict"]
vectors = dd["vectors"]
# convert data into indices
train_words, train_chars, train_labels = dataset_to_indices(train_data, word_dict, char_dict)
test_words, test_chars, test_labels = dataset_to_indices(test_data, word_dict, char_dict)
if not os.path.exists(save_path):
os.makedirs(save_path)
if config.training:
nary_ecoc = gen_nary_ecoc(num_class=num_class, num_meta_class=num_meta_class, num_classifier=num_classifier)
np.savez_compressed(save_path + "nary_ecoc.npz", embeddings=nary_ecoc)
else:
nary_ecoc = np.load(save_path + "nary_ecoc.npz")["embeddings"]
# start training...
nary_ecoc_test_result = []
for i in range(num_classifier):
sys.stdout.write("\nThe {}/{} classifier:\n".format(i + 1, num_classifier))
sys.stdout.flush()
ecoc_array = nary_ecoc[:, i]
train_words_ith, train_chars_ith, train_labels_ith = train_words.copy(), train_chars.copy(), train_labels.copy()
test_words_ith, test_chars_ith, test_labels_ith = test_words.copy(), test_chars.copy(), test_labels.copy()
train_labels_ith = remap_labels(train_labels_ith, ecoc_array)
test_labels_ith = remap_labels(test_labels_ith, ecoc_array)
model = TextModel(config, num_meta_class, word_dict, char_dict, vectors, ckpt_path=ckpt_path.format(i),
pretrained_model=pretrained_model)
if config.training:
model.train(train_words_ith, train_chars_ith, train_labels_ith, test_words_ith, test_chars_ith,
test_labels_ith)
_, pred_labels = model.test(test_words_ith, test_chars_ith, test_labels_ith, batch_size=200)
model.close_session()
nary_ecoc_test_result.append(pred_labels)
nary_ecoc_labels = np.concatenate(nary_ecoc_test_result, axis=1)
np.savez_compressed(save_path + "pred_labels.npz", embeddings=nary_ecoc_labels)
if config.ablation:
nl = list(range(5, num_classifier + 1, 5))
for n in nl:
accuracy = compute_ensemble_accuracy(nary_ecoc_labels[:, 0:n], nary_ecoc[:, 0:n], test_labels)
print("{}: {}\t{:4.2f}%".format(n, accuracy, accuracy * 100))
accuracy = compute_ensemble_accuracy(nary_ecoc_labels, nary_ecoc, test_labels)
print(accuracy)
# get_conf_matrix(nary_ecoc_labels, nary_ecoc, test_labels, filename="assets/{}_heatmap".format(name))
with open(save_path + "results.txt", mode="w", encoding="utf-8") as f:
f.write(str(accuracy))
| [
"sys.stdout.flush",
"os.path.exists",
"pickle.dump",
"utils.data_funcs.compute_ensemble_accuracy",
"utils.prepro_text.load_datasets",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"pickle.load",
"utils.data_funcs.gen_nary_ecoc",
"utils.prepro_text.remap_labels",
"utils.prepro_text.... | [((342, 358), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (356, 358), False, 'from argparse import ArgumentParser\n'), ((3735, 3755), 'utils.prepro_text.load_datasets', 'load_datasets', (['files'], {}), '(files)\n', (3748, 3755), False, 'from utils.prepro_text import load_datasets, build_vocabulary, dataset_to_indices, remap_labels\n'), ((4492, 4544), 'utils.prepro_text.dataset_to_indices', 'dataset_to_indices', (['train_data', 'word_dict', 'char_dict'], {}), '(train_data, word_dict, char_dict)\n', (4510, 4544), False, 'from utils.prepro_text import load_datasets, build_vocabulary, dataset_to_indices, remap_labels\n'), ((4583, 4634), 'utils.prepro_text.dataset_to_indices', 'dataset_to_indices', (['test_data', 'word_dict', 'char_dict'], {}), '(test_data, word_dict, char_dict)\n', (4601, 4634), False, 'from utils.prepro_text import load_datasets, build_vocabulary, dataset_to_indices, remap_labels\n'), ((6072, 6117), 'numpy.concatenate', 'np.concatenate', (['nary_ecoc_test_result'], {'axis': '(1)'}), '(nary_ecoc_test_result, axis=1)\n', (6086, 6117), True, 'import numpy as np\n'), ((6118, 6197), 'numpy.savez_compressed', 'np.savez_compressed', (["(save_path + 'pred_labels.npz')"], {'embeddings': 'nary_ecoc_labels'}), "(save_path + 'pred_labels.npz', embeddings=nary_ecoc_labels)\n", (6137, 6197), True, 'import numpy as np\n'), ((6468, 6535), 'utils.data_funcs.compute_ensemble_accuracy', 'compute_ensemble_accuracy', (['nary_ecoc_labels', 'nary_ecoc', 'test_labels'], {}), '(nary_ecoc_labels, nary_ecoc, test_labels)\n', (6493, 6535), False, 'from utils.data_funcs import gen_nary_ecoc, compute_ensemble_accuracy, boolean_string, get_conf_matrix\n'), ((2188, 2211), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2206, 2211), False, 'import os\n'), ((2823, 2862), 'os.path.join', 'os.path.join', (['"""datasets"""', '"""raw"""', '"""stsa"""'], {}), "('datasets', 'raw', 'stsa')\n", (2835, 2862), False, 'import os\n'), ((3535, 3574), 'os.path.join', 'os.path.join', (['"""datasets"""', '"""raw"""', '"""trec"""'], {}), "('datasets', 'raw', 'trec')\n", (3547, 3574), False, 'import os\n'), ((3914, 3990), 'utils.prepro_text.build_vocabulary', 'build_vocabulary', (['[train_data, test_data]', 'wordvec_path'], {'dim': 'config.word_dim'}), '([train_data, test_data], wordvec_path, dim=config.word_dim)\n', (3930, 3990), False, 'from utils.prepro_text import load_datasets, build_vocabulary, dataset_to_indices, remap_labels\n'), ((4643, 4668), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (4657, 4668), False, 'import os\n'), ((4674, 4696), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (4685, 4696), False, 'import os\n'), ((4734, 4834), 'utils.data_funcs.gen_nary_ecoc', 'gen_nary_ecoc', ([], {'num_class': 'num_class', 'num_meta_class': 'num_meta_class', 'num_classifier': 'num_classifier'}), '(num_class=num_class, num_meta_class=num_meta_class,\n num_classifier=num_classifier)\n', (4747, 4834), False, 'from utils.data_funcs import gen_nary_ecoc, compute_ensemble_accuracy, boolean_string, get_conf_matrix\n'), ((4835, 4905), 'numpy.savez_compressed', 'np.savez_compressed', (["(save_path + 'nary_ecoc.npz')"], {'embeddings': 'nary_ecoc'}), "(save_path + 'nary_ecoc.npz', embeddings=nary_ecoc)\n", (4854, 4905), True, 'import numpy as np\n'), ((5143, 5161), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5159, 5161), False, 'import sys\n'), ((5446, 5488), 'utils.prepro_text.remap_labels', 'remap_labels', (['train_labels_ith', 'ecoc_array'], {}), '(train_labels_ith, ecoc_array)\n', (5458, 5488), False, 'from utils.prepro_text import load_datasets, build_vocabulary, dataset_to_indices, remap_labels\n'), ((5511, 5552), 'utils.prepro_text.remap_labels', 'remap_labels', (['test_labels_ith', 'ecoc_array'], {}), '(test_labels_ith, ecoc_array)\n', (5523, 5552), False, 'from utils.prepro_text import load_datasets, build_vocabulary, dataset_to_indices, remap_labels\n'), ((3835, 3875), 'os.path.join', 'os.path.join', (['data_path', '"""processed.pkl"""'], {}), "(data_path, 'processed.pkl')\n", (3847, 3875), False, 'import os\n'), ((4155, 4212), 'pickle.dump', 'pickle.dump', (['dd', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(dd, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (4166, 4212), False, 'import pickle\n'), ((4310, 4329), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (4321, 4329), False, 'import pickle\n'), ((4928, 4964), 'numpy.load', 'np.load', (["(save_path + 'nary_ecoc.npz')"], {}), "(save_path + 'nary_ecoc.npz')\n", (4935, 4964), True, 'import numpy as np\n'), ((6302, 6389), 'utils.data_funcs.compute_ensemble_accuracy', 'compute_ensemble_accuracy', (['nary_ecoc_labels[:, 0:n]', 'nary_ecoc[:, 0:n]', 'test_labels'], {}), '(nary_ecoc_labels[:, 0:n], nary_ecoc[:, 0:n],\n test_labels)\n', (6327, 6389), False, 'from utils.data_funcs import gen_nary_ecoc, compute_ensemble_accuracy, boolean_string, get_conf_matrix\n'), ((4083, 4123), 'os.path.join', 'os.path.join', (['data_path', '"""processed.pkl"""'], {}), "(data_path, 'processed.pkl')\n", (4095, 4123), False, 'import os\n'), ((4233, 4273), 'os.path.join', 'os.path.join', (['data_path', '"""processed.pkl"""'], {}), "(data_path, 'processed.pkl')\n", (4245, 4273), False, 'import os\n')] |
import matplotlib.pyplot as plt
import numpy as np
def conjecture_sequence(n_value, y_list, x_list):
# While input value (n) is more than one the value will get:
# divided by 2 if n is even.
# If n is odd, n will get multiplied by 3 and added to 1.
y_list.append(n_value)
while n_value > 1:
if n_value % 2 == 0:
n_value = n_value // 2
y_list.append(n_value)
else:
n_value = 3 * n_value + 1
y_list.append(n_value)
x_list.append(x_list[-1] + 1)
def main():
initial_value = int(input("Enter a natural number (1-1,000,000): "))
if initial_value <= 1000000 and initial_value > 1:
x = [1] # x-axis
y = [] # y-axis
plt.figure(figsize=(10, 10), dpi=250)
conjecture_sequence(initial_value,y,x)
# Set the x-axis tick interval
if x[-1] >=50:
if x[-1] >= 500:
graph_x_interval = np.arange(10, max(x), 10)
elif x[-1] >= 100:
graph_x_interval = np.arange(5, max(x), 5)
else:
graph_x_interval = np.arange(2, max(x), 2)
x_tick = np.insert(graph_x_interval, 0, 1)
plt.xticks(x_tick)
else:
plt.xticks(np.arange(1, max(x)))
# Set the y-axis tick interval
if max(y) >= 1000:
if max(y) >= 100000:
interval_y = round(max(y) // 20, -3)
elif max(y) >= 5000:
interval_y = round(max(y) // 20, -2)
else:
interval_y = round(max(y) // 10, -2)
y_tick = np.arange(interval_y, max(y), interval_y)
plt.yticks(y_tick)
# Display Figure
plt.plot(x, y, marker='o')
plt.title("3n+1 Conjecture", fontsize=16)
plt.xlabel("Step / Iteration", fontsize=12)
plt.ylabel("Number (n)", fontsize=12)
# Print sequence
print("Sequence:")
print(*y, sep= ", ")
print("\nIt takes", x[-2], "steps to get to 1.")
print("Max Y value (n):", max(y))
# Save Figure
fig = plt.gcf()
fig.savefig(str(initial_value) + '_graph.png')
plt.show()
else:
print("You have entered an invalid value.")
if __name__ == '__main__':
main()
| [
"numpy.insert",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((765, 802), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)', 'dpi': '(250)'}), '(figsize=(10, 10), dpi=250)\n', (775, 802), True, 'import matplotlib.pyplot as plt\n'), ((1771, 1797), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'marker': '"""o"""'}), "(x, y, marker='o')\n", (1779, 1797), True, 'import matplotlib.pyplot as plt\n'), ((1806, 1847), 'matplotlib.pyplot.title', 'plt.title', (['"""3n+1 Conjecture"""'], {'fontsize': '(16)'}), "('3n+1 Conjecture', fontsize=16)\n", (1815, 1847), True, 'import matplotlib.pyplot as plt\n'), ((1856, 1899), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Step / Iteration"""'], {'fontsize': '(12)'}), "('Step / Iteration', fontsize=12)\n", (1866, 1899), True, 'import matplotlib.pyplot as plt\n'), ((1908, 1945), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number (n)"""'], {'fontsize': '(12)'}), "('Number (n)', fontsize=12)\n", (1918, 1945), True, 'import matplotlib.pyplot as plt\n'), ((2172, 2181), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2179, 2181), True, 'import matplotlib.pyplot as plt\n'), ((2245, 2255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2253, 2255), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1232), 'numpy.insert', 'np.insert', (['graph_x_interval', '(0)', '(1)'], {}), '(graph_x_interval, 0, 1)\n', (1208, 1232), True, 'import numpy as np\n'), ((1245, 1263), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x_tick'], {}), '(x_tick)\n', (1255, 1263), True, 'import matplotlib.pyplot as plt\n'), ((1708, 1726), 'matplotlib.pyplot.yticks', 'plt.yticks', (['y_tick'], {}), '(y_tick)\n', (1718, 1726), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# LICENSE
#
# Copyright (C) 2010-2018 GEM Foundation, <NAME>, <NAME>, <NAME>
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is free software: you can
# redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein is
# released as a prototype implementation on behalf of scientists and engineers
# working within the GEM Foundation (Global Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the hope that
# it will be useful to the scientific, engineering, disaster risk and software
# design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (https://www.globalquakemodel.org/tools-products) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software developers,
# as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be directed to
# the hazard scientific staff of the GEM Model Facility
# (<EMAIL>).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# The GEM Foundation, and the authors of the software, assume no liability for
# use of the software.
'''
Module :mod:`openquake.hmtk.seismicity.max_magnitude.kijko_sellevol_bayes`
implements the Kijko & Sellevol (1989) method for estimating maximum magnitude
from observed seismicity with uncertain b-value
'''
import numpy as np
from math import fabs
from scipy.integrate import quadrature
from openquake.hmtk.seismicity.max_magnitude.base import (
BaseMaximumMagnitude, MAX_MAGNITUDE_METHODS,
_get_observed_mmax, _get_magnitude_vector_properties)
def check_config(config, data):
'''Check config file inputs
:param dict config:
Configuration settings for the function
'''
essential_keys = ['input_mmin', 'b-value', 'sigma-b']
for key in essential_keys:
if not key in config.keys():
raise ValueError('For KijkoSellevolBayes the key %s needs to '
'be set in the configuation' % key)
if 'tolerance' not in config.keys() or not config['tolerance']:
config['tolerance'] = 1E-5
if not config.get('maximum_iterations', False):
config['maximum_iterations'] = 1000
if config['input_mmin'] < np.min(data['magnitude']):
config['input_mmin'] = np.min(data['magnitude'])
if fabs(config['sigma-b'] < 1E-15):
raise ValueError('Sigma-b must be greater than zero!')
return config
@MAX_MAGNITUDE_METHODS.add(
"get_mmax",
**{"input_mmin": lambda cat: np.min(cat.data['magnitude']),
"input_mmax": lambda cat: cat.data['magnitude'][
np.argmax(cat.data['magnitude'])],
"input_mmax_uncertainty": lambda cat: cat.get_observed_mmax_sigma(0.2),
"b-value": np.float,
"sigma-b": np.float,
"maximum_iterations": 1000,
"tolerance": 1E-5})
class KijkoSellevolBayes(BaseMaximumMagnitude):
'''
Class to implement Kijko & Sellevol Bayesian estimator of Mmax, with
uncertain b-value
'''
def get_mmax(self, catalogue, config):
'''Calculate maximum magnitude
:returns: **mmax** Maximum magnitude and **mmax_sig** corresponding uncertainty
:rtype: Float
'''
# Check configuration file
config = check_config(config, catalogue.data)
# Negative b-values will return nan - this simply skips the integral
if config['b-value'] <= 0.0:
return np.nan, np.nan
obsmax, obsmaxsig = _get_observed_mmax(catalogue.data, config)
beta = config['b-value'] * np.log(10.)
sigbeta = config['sigma-b'] * np.log(10.)
neq, mmin = _get_magnitude_vector_properties(catalogue.data, config)
pval = beta / (sigbeta ** 2.)
qval = (beta / sigbeta) ** 2.
mmax = np.copy(obsmax)
d_t = np.inf
iterator = 0
while d_t > config['tolerance']:
rval = pval / (pval + mmax - mmin)
ldelt = (1. / (1. - (rval ** qval))) ** neq
delta = ldelt * quadrature(self._ksb_intfunc, mmin, mmax,
args=(neq, mmin, pval, qval))[0]
tmmax = obsmax + delta
d_t = np.abs(tmmax - mmax)
mmax = np.copy(tmmax)
iterator += 1
if iterator > config['maximum_iterations']:
print('Kijko-Sellevol-Bayes estimator reached'
'maximum # of iterations')
d_t = -np.inf
return mmax.item(), np.sqrt(obsmaxsig ** 2. + delta ** 2.)
def _ksb_intfunc(self, mval, neq, mmin, pval, qval):
'''
Integral function inside Kijko-Sellevol-Bayes estimator
(part of Eq. 10 in Kijko, 2004 - section 3.2)
:param float mval:
Magnitude
:param float neq:
Number of Earthquakes
:param float mmin:
Minimum Magnitude
:param float pval:
p-value (see Kijko, 2004 - section 3.2)
:param float qval:
q-value (see Kijki, 2004 - section 3.2)
:returns:
Output of function integrand
'''
func1 = (1. - ((pval / (pval + mval - mmin)) ** qval)) ** neq
return func1
| [
"numpy.copy",
"numpy.abs",
"numpy.sqrt",
"scipy.integrate.quadrature",
"openquake.hmtk.seismicity.max_magnitude.base._get_observed_mmax",
"numpy.log",
"numpy.argmax",
"openquake.hmtk.seismicity.max_magnitude.base._get_magnitude_vector_properties",
"math.fabs",
"numpy.min"
] | [((3063, 3094), 'math.fabs', 'fabs', (["(config['sigma-b'] < 1e-15)"], {}), "(config['sigma-b'] < 1e-15)\n", (3067, 3094), False, 'from math import fabs\n'), ((2971, 2996), 'numpy.min', 'np.min', (["data['magnitude']"], {}), "(data['magnitude'])\n", (2977, 2996), True, 'import numpy as np\n'), ((3029, 3054), 'numpy.min', 'np.min', (["data['magnitude']"], {}), "(data['magnitude'])\n", (3035, 3054), True, 'import numpy as np\n'), ((4218, 4260), 'openquake.hmtk.seismicity.max_magnitude.base._get_observed_mmax', '_get_observed_mmax', (['catalogue.data', 'config'], {}), '(catalogue.data, config)\n', (4236, 4260), False, 'from openquake.hmtk.seismicity.max_magnitude.base import BaseMaximumMagnitude, MAX_MAGNITUDE_METHODS, _get_observed_mmax, _get_magnitude_vector_properties\n'), ((4380, 4436), 'openquake.hmtk.seismicity.max_magnitude.base._get_magnitude_vector_properties', '_get_magnitude_vector_properties', (['catalogue.data', 'config'], {}), '(catalogue.data, config)\n', (4412, 4436), False, 'from openquake.hmtk.seismicity.max_magnitude.base import BaseMaximumMagnitude, MAX_MAGNITUDE_METHODS, _get_observed_mmax, _get_magnitude_vector_properties\n'), ((4530, 4545), 'numpy.copy', 'np.copy', (['obsmax'], {}), '(obsmax)\n', (4537, 4545), True, 'import numpy as np\n'), ((4297, 4309), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (4303, 4309), True, 'import numpy as np\n'), ((4347, 4359), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (4353, 4359), True, 'import numpy as np\n'), ((4928, 4948), 'numpy.abs', 'np.abs', (['(tmmax - mmax)'], {}), '(tmmax - mmax)\n', (4934, 4948), True, 'import numpy as np\n'), ((4968, 4982), 'numpy.copy', 'np.copy', (['tmmax'], {}), '(tmmax)\n', (4975, 4982), True, 'import numpy as np\n'), ((5236, 5276), 'numpy.sqrt', 'np.sqrt', (['(obsmaxsig ** 2.0 + delta ** 2.0)'], {}), '(obsmaxsig ** 2.0 + delta ** 2.0)\n', (5243, 5276), True, 'import numpy as np\n'), ((3257, 3286), 'numpy.min', 'np.min', (["cat.data['magnitude']"], {}), "(cat.data['magnitude'])\n", (3263, 3286), True, 'import numpy as np\n'), ((4760, 4831), 'scipy.integrate.quadrature', 'quadrature', (['self._ksb_intfunc', 'mmin', 'mmax'], {'args': '(neq, mmin, pval, qval)'}), '(self._ksb_intfunc, mmin, mmax, args=(neq, mmin, pval, qval))\n', (4770, 4831), False, 'from scipy.integrate import quadrature\n'), ((3355, 3387), 'numpy.argmax', 'np.argmax', (["cat.data['magnitude']"], {}), "(cat.data['magnitude'])\n", (3364, 3387), True, 'import numpy as np\n')] |
import unittest
import numpy
from become_yukarin.dataset import dataset
class TestDataset(unittest.TestCase):
def setUp(self):
self.sample_rate = 24000
self.len_time = len_time = 100
self.fft_size = fft_size = 1024
self.order = order = 59
self.dummy_feature = dataset.AcousticFeature(
f0=numpy.arange(len_time).reshape((len_time, -1)),
spectrogram=numpy.arange(len_time * (fft_size // 2 + 1)).reshape((len_time, -1)),
aperiodicity=numpy.arange(len_time * (fft_size // 2 + 1)).reshape((len_time, -1)),
mfcc=numpy.arange(len_time * (order + 1)).reshape((len_time, -1)),
voiced=(numpy.arange(len_time) % 2 == 1).reshape((len_time, -1)),
)
self.feature_sizes = dataset.AcousticFeature.get_sizes(
sampling_rate=self.sample_rate,
order=self.order,
)
def test_encode_decode_feature(self):
encode_feature = dataset.EncodeFeatureProcess(['mfcc'])
decode_feature = dataset.DecodeFeatureProcess(['mfcc'], self.feature_sizes)
e = encode_feature(self.dummy_feature, test=True)
d = decode_feature(e, test=True)
self.assertTrue(numpy.all(self.dummy_feature.mfcc == d.mfcc))
def test_encode_decode_feature2(self):
encode_feature = dataset.EncodeFeatureProcess(['mfcc', 'f0'])
decode_feature = dataset.DecodeFeatureProcess(['mfcc', 'f0'], self.feature_sizes)
e = encode_feature(self.dummy_feature, test=True)
d = decode_feature(e, test=True)
self.assertTrue(numpy.all(self.dummy_feature.mfcc == d.mfcc))
self.assertTrue(numpy.all(self.dummy_feature.f0 == d.f0))
def test_encode_decode_feature3(self):
encode_feature = dataset.EncodeFeatureProcess(['mfcc', 'f0'])
decode_feature = dataset.DecodeFeatureProcess(['mfcc', 'f0'], self.feature_sizes)
e = encode_feature(self.dummy_feature, test=True)
e[0] = numpy.nan
d = decode_feature(e, test=True)
self.assertFalse(numpy.all(self.dummy_feature.mfcc == d.mfcc))
self.assertTrue(numpy.all(self.dummy_feature.f0 == d.f0))
if __name__ == '__main__':
unittest.main()
| [
"become_yukarin.dataset.dataset.EncodeFeatureProcess",
"numpy.arange",
"unittest.main",
"numpy.all",
"become_yukarin.dataset.dataset.AcousticFeature.get_sizes",
"become_yukarin.dataset.dataset.DecodeFeatureProcess"
] | [((2196, 2211), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2209, 2211), False, 'import unittest\n'), ((780, 868), 'become_yukarin.dataset.dataset.AcousticFeature.get_sizes', 'dataset.AcousticFeature.get_sizes', ([], {'sampling_rate': 'self.sample_rate', 'order': 'self.order'}), '(sampling_rate=self.sample_rate, order=\n self.order)\n', (813, 868), False, 'from become_yukarin.dataset import dataset\n'), ((967, 1005), 'become_yukarin.dataset.dataset.EncodeFeatureProcess', 'dataset.EncodeFeatureProcess', (["['mfcc']"], {}), "(['mfcc'])\n", (995, 1005), False, 'from become_yukarin.dataset import dataset\n'), ((1031, 1089), 'become_yukarin.dataset.dataset.DecodeFeatureProcess', 'dataset.DecodeFeatureProcess', (["['mfcc']", 'self.feature_sizes'], {}), "(['mfcc'], self.feature_sizes)\n", (1059, 1089), False, 'from become_yukarin.dataset import dataset\n'), ((1328, 1372), 'become_yukarin.dataset.dataset.EncodeFeatureProcess', 'dataset.EncodeFeatureProcess', (["['mfcc', 'f0']"], {}), "(['mfcc', 'f0'])\n", (1356, 1372), False, 'from become_yukarin.dataset import dataset\n'), ((1398, 1462), 'become_yukarin.dataset.dataset.DecodeFeatureProcess', 'dataset.DecodeFeatureProcess', (["['mfcc', 'f0']", 'self.feature_sizes'], {}), "(['mfcc', 'f0'], self.feature_sizes)\n", (1426, 1462), False, 'from become_yukarin.dataset import dataset\n'), ((1767, 1811), 'become_yukarin.dataset.dataset.EncodeFeatureProcess', 'dataset.EncodeFeatureProcess', (["['mfcc', 'f0']"], {}), "(['mfcc', 'f0'])\n", (1795, 1811), False, 'from become_yukarin.dataset import dataset\n'), ((1837, 1901), 'become_yukarin.dataset.dataset.DecodeFeatureProcess', 'dataset.DecodeFeatureProcess', (["['mfcc', 'f0']", 'self.feature_sizes'], {}), "(['mfcc', 'f0'], self.feature_sizes)\n", (1865, 1901), False, 'from become_yukarin.dataset import dataset\n'), ((1213, 1257), 'numpy.all', 'numpy.all', (['(self.dummy_feature.mfcc == d.mfcc)'], {}), '(self.dummy_feature.mfcc == d.mfcc)\n', (1222, 1257), False, 'import numpy\n'), ((1586, 1630), 'numpy.all', 'numpy.all', (['(self.dummy_feature.mfcc == d.mfcc)'], {}), '(self.dummy_feature.mfcc == d.mfcc)\n', (1595, 1630), False, 'import numpy\n'), ((1656, 1696), 'numpy.all', 'numpy.all', (['(self.dummy_feature.f0 == d.f0)'], {}), '(self.dummy_feature.f0 == d.f0)\n', (1665, 1696), False, 'import numpy\n'), ((2051, 2095), 'numpy.all', 'numpy.all', (['(self.dummy_feature.mfcc == d.mfcc)'], {}), '(self.dummy_feature.mfcc == d.mfcc)\n', (2060, 2095), False, 'import numpy\n'), ((2121, 2161), 'numpy.all', 'numpy.all', (['(self.dummy_feature.f0 == d.f0)'], {}), '(self.dummy_feature.f0 == d.f0)\n', (2130, 2161), False, 'import numpy\n'), ((347, 369), 'numpy.arange', 'numpy.arange', (['len_time'], {}), '(len_time)\n', (359, 369), False, 'import numpy\n'), ((419, 463), 'numpy.arange', 'numpy.arange', (['(len_time * (fft_size // 2 + 1))'], {}), '(len_time * (fft_size // 2 + 1))\n', (431, 463), False, 'import numpy\n'), ((514, 558), 'numpy.arange', 'numpy.arange', (['(len_time * (fft_size // 2 + 1))'], {}), '(len_time * (fft_size // 2 + 1))\n', (526, 558), False, 'import numpy\n'), ((601, 637), 'numpy.arange', 'numpy.arange', (['(len_time * (order + 1))'], {}), '(len_time * (order + 1))\n', (613, 637), False, 'import numpy\n'), ((683, 705), 'numpy.arange', 'numpy.arange', (['len_time'], {}), '(len_time)\n', (695, 705), False, 'import numpy\n')] |
import matplotlib.pyplot as plt
import os
import numpy as np
from eratosthenes.generic.mapping_io import read_geo_image, make_geo_im
from eratosthenes.input.read_sentinel2 import \
list_central_wavelength_msi
from eratosthenes.preprocessing.image_transforms import mat_to_gray
toi = 15
boi = ['red', 'green', 'blue', 'nir']
s2_df = list_central_wavelength_msi()
s2_df = s2_df[s2_df['common_name'].isin(boi)]
if toi==15:
s2path = '/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019/'
elif toi == 25:
s2path = '/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-25-10-2019/'
fpath = os.path.join(s2path, 'shadow.tif')
M_dir, M_name = os.path.split(fpath)
Z_file = "COP_DEM_red.tif"
R_file = "5VMG_RGI_red.tif"
Z_dir = os.path.join('/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/',
'Cop-DEM-GLO-30')
dir_path = os.path.dirname(os.path.realpath(__file__))
if os.getcwd()!=dir_path:
os.chdir(dir_path) # change to directory where script is situated
(M, spatialRefM, geoTransformM, targetprjM) = read_geo_image(fpath)
M *= -1
Z = read_geo_image(os.path.join(Z_dir, Z_file))[0]
R = read_geo_image(os.path.join(Z_dir, R_file))[0]
# create observation angles
if toi==15:
fpath = os.path.join('/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019-full', \
'T05VMG_20191015T213531_B08.jp2')
elif toi==25:
fpath = os.path.join('/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019-full', \
'T05VMG_20191015T213531_B08.jp2')
_,spatialRefI,geoTransformI,targetprjI = read_geo_image(fpath)
path_meta = '/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/'+\
'S2A_MSIL1C_20191015T213531_N0208_R086_T05VMG_20191015T230223.SAFE/'+\
'GRANULE/L1C_T05VMG_A022534_20191015T213843/QI_DATA'
if toi==15:
Det = read_geo_image(os.path.join(s2path, 'detIdBlue.tif'))[0]
Zn = read_geo_image(os.path.join(s2path, 'viewZn.tif'))[0]
Az = read_geo_image(os.path.join(s2path, 'viewAz.tif'))[0]
Blue = read_geo_image(os.path.join(s2path, 'B2.tif'))[0]
Green = read_geo_image(os.path.join(s2path, 'B3.tif'))[0]
Red = read_geo_image(os.path.join(s2path, 'B4.tif'))[0]
Near = read_geo_image(os.path.join(s2path, 'B8.tif'))[0]
from eratosthenes.preprocessing.shadow_transforms import \
entropy_shade_removal, shadow_free_rgb, shade_index, \
normalized_range_shadow_index
from eratosthenes.preprocessing.color_transforms import rgb2lms
Ia,Ib,Ic = mat_to_gray(Blue), mat_to_gray(Red), mat_to_gray(Near)
Ia[Ia == 0] = 1e-6
Ib[Ib == 0] = 1e-6
sqIc = np.power(Ic, 1. / 2)
sqIc[sqIc == 0] = 1
chi1 = np.log(np.divide(Ia, sqIc))
chi2 = np.log(np.divide(Ib, sqIc))
fig = plt.figure()
plt.hist2d(chi1.flatten(),chi2.flatten(),
bins=100, range=[[-3,0],[-3,0]],
cmap=plt.cm.CMRmap)
plt.axis('equal'), plt.grid()
num_classes = 8
c_p = plt.cm.Paired
for i in np.arange(num_classes):
xy = plt.ginput(4)
x,y = [p[0] for p in xy], [p[1] for p in xy]
x.append(x[0]) # make polygon
y.append(y[0])
plt.plot(x, y, color=c_p.colors[i]) # do plotting on the figure
plt.draw()
if i == 0:
X,Y = np.array(x)[:,np.newaxis], np.array(y)[:,np.newaxis]
else:
X = np.hstack((X, np.array(x)[:,np.newaxis]))
Y = np.hstack((Y, np.array(y)[:,np.newaxis]))
from matplotlib.path import Path
Class = np.zeros_like(Red, dtype=int)
for i in np.arange(num_classes):
p = Path(np.stack((X[:,i], Y[:,i]), axis=1)) # make a polygon
IN = p.contains_points(np.stack((chi1.flatten(), chi2.flatten()), axis=1))
Class[IN.reshape(Red.shape)] = int(i + 1)
plt.figure(), plt.imshow(Class, cmap=c_p, vmin=1, vmax=12)
#plt.colorbar(boundaries=np.linspace(0, num_classes, num_classes))
#plt.figure()
#plt.hist2d(Ia.flatten(),Ib.flatten(), bins=100,
# range=[[0.,.1],[0.,.1]]) | [
"matplotlib.pyplot.grid",
"numpy.array",
"numpy.divide",
"numpy.arange",
"matplotlib.pyplot.imshow",
"eratosthenes.generic.mapping_io.read_geo_image",
"matplotlib.pyplot.plot",
"eratosthenes.input.read_sentinel2.list_central_wavelength_msi",
"os.path.split",
"numpy.stack",
"matplotlib.pyplot.axi... | [((341, 370), 'eratosthenes.input.read_sentinel2.list_central_wavelength_msi', 'list_central_wavelength_msi', ([], {}), '()\n', (368, 370), False, 'from eratosthenes.input.read_sentinel2 import list_central_wavelength_msi\n'), ((637, 671), 'os.path.join', 'os.path.join', (['s2path', '"""shadow.tif"""'], {}), "(s2path, 'shadow.tif')\n", (649, 671), False, 'import os\n'), ((689, 709), 'os.path.split', 'os.path.split', (['fpath'], {}), '(fpath)\n', (702, 709), False, 'import os\n'), ((773, 861), 'os.path.join', 'os.path.join', (['"""/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/"""', '"""Cop-DEM-GLO-30"""'], {}), "('/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/',\n 'Cop-DEM-GLO-30')\n", (785, 861), False, 'import os\n'), ((1079, 1100), 'eratosthenes.generic.mapping_io.read_geo_image', 'read_geo_image', (['fpath'], {}), '(fpath)\n', (1093, 1100), False, 'from eratosthenes.generic.mapping_io import read_geo_image, make_geo_im\n'), ((1648, 1669), 'eratosthenes.generic.mapping_io.read_geo_image', 'read_geo_image', (['fpath'], {}), '(fpath)\n', (1662, 1669), False, 'from eratosthenes.generic.mapping_io import read_geo_image, make_geo_im\n'), ((2646, 2667), 'numpy.power', 'np.power', (['Ic', '(1.0 / 2)'], {}), '(Ic, 1.0 / 2)\n', (2654, 2667), True, 'import numpy as np\n'), ((2765, 2777), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2775, 2777), True, 'import matplotlib.pyplot as plt\n'), ((2972, 2994), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (2981, 2994), True, 'import numpy as np\n'), ((3448, 3477), 'numpy.zeros_like', 'np.zeros_like', (['Red'], {'dtype': 'int'}), '(Red, dtype=int)\n', (3461, 3477), True, 'import numpy as np\n'), ((3487, 3509), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (3496, 3509), True, 'import numpy as np\n'), ((908, 934), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (924, 934), False, 'import os\n'), ((939, 950), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (948, 950), False, 'import os\n'), ((966, 984), 'os.chdir', 'os.chdir', (['dir_path'], {}), '(dir_path)\n', (974, 984), False, 'import os\n'), ((1265, 1404), 'os.path.join', 'os.path.join', (['"""/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019-full"""', '"""T05VMG_20191015T213531_B08.jp2"""'], {}), "(\n '/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019-full'\n , 'T05VMG_20191015T213531_B08.jp2')\n", (1277, 1404), False, 'import os\n'), ((2545, 2562), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Blue'], {}), '(Blue)\n', (2556, 2562), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray\n'), ((2564, 2580), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Red'], {}), '(Red)\n', (2575, 2580), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray\n'), ((2582, 2599), 'eratosthenes.preprocessing.image_transforms.mat_to_gray', 'mat_to_gray', (['Near'], {}), '(Near)\n', (2593, 2599), False, 'from eratosthenes.preprocessing.image_transforms import mat_to_gray\n'), ((2702, 2721), 'numpy.divide', 'np.divide', (['Ia', 'sqIc'], {}), '(Ia, sqIc)\n', (2711, 2721), True, 'import numpy as np\n'), ((2737, 2756), 'numpy.divide', 'np.divide', (['Ib', 'sqIc'], {}), '(Ib, sqIc)\n', (2746, 2756), True, 'import numpy as np\n'), ((2895, 2912), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2903, 2912), True, 'import matplotlib.pyplot as plt\n'), ((2914, 2924), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2922, 2924), True, 'import matplotlib.pyplot as plt\n'), ((3005, 3018), 'matplotlib.pyplot.ginput', 'plt.ginput', (['(4)'], {}), '(4)\n', (3015, 3018), True, 'import matplotlib.pyplot as plt\n'), ((3126, 3161), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': 'c_p.colors[i]'}), '(x, y, color=c_p.colors[i])\n', (3134, 3161), True, 'import matplotlib.pyplot as plt\n'), ((3194, 3204), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3202, 3204), True, 'import matplotlib.pyplot as plt\n'), ((3705, 3717), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3715, 3717), True, 'import matplotlib.pyplot as plt\n'), ((3719, 3763), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Class'], {'cmap': 'c_p', 'vmin': '(1)', 'vmax': '(12)'}), '(Class, cmap=c_p, vmin=1, vmax=12)\n', (3729, 3763), True, 'import matplotlib.pyplot as plt\n'), ((1129, 1156), 'os.path.join', 'os.path.join', (['Z_dir', 'Z_file'], {}), '(Z_dir, Z_file)\n', (1141, 1156), False, 'import os\n'), ((1180, 1207), 'os.path.join', 'os.path.join', (['Z_dir', 'R_file'], {}), '(Z_dir, R_file)\n', (1192, 1207), False, 'import os\n'), ((1449, 1588), 'os.path.join', 'os.path.join', (['"""/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019-full"""', '"""T05VMG_20191015T213531_B08.jp2"""'], {}), "(\n '/Users/Alten005/surfdrive/Eratosthenes/RedGlacier/Sentinel-2/S2-15-10-2019-full'\n , 'T05VMG_20191015T213531_B08.jp2')\n", (1461, 1588), False, 'import os\n'), ((2110, 2140), 'os.path.join', 'os.path.join', (['s2path', '"""B2.tif"""'], {}), "(s2path, 'B2.tif')\n", (2122, 2140), False, 'import os\n'), ((2168, 2198), 'os.path.join', 'os.path.join', (['s2path', '"""B3.tif"""'], {}), "(s2path, 'B3.tif')\n", (2180, 2198), False, 'import os\n'), ((2224, 2254), 'os.path.join', 'os.path.join', (['s2path', '"""B4.tif"""'], {}), "(s2path, 'B4.tif')\n", (2236, 2254), False, 'import os\n'), ((2281, 2311), 'os.path.join', 'os.path.join', (['s2path', '"""B8.tif"""'], {}), "(s2path, 'B8.tif')\n", (2293, 2311), False, 'import os\n'), ((3524, 3560), 'numpy.stack', 'np.stack', (['(X[:, i], Y[:, i])'], {'axis': '(1)'}), '((X[:, i], Y[:, i]), axis=1)\n', (3532, 3560), True, 'import numpy as np\n'), ((1919, 1956), 'os.path.join', 'os.path.join', (['s2path', '"""detIdBlue.tif"""'], {}), "(s2path, 'detIdBlue.tif')\n", (1931, 1956), False, 'import os\n'), ((1985, 2019), 'os.path.join', 'os.path.join', (['s2path', '"""viewZn.tif"""'], {}), "(s2path, 'viewZn.tif')\n", (1997, 2019), False, 'import os\n'), ((2048, 2082), 'os.path.join', 'os.path.join', (['s2path', '"""viewAz.tif"""'], {}), "(s2path, 'viewAz.tif')\n", (2060, 2082), False, 'import os\n'), ((3234, 3245), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3242, 3245), True, 'import numpy as np\n'), ((3261, 3272), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3269, 3272), True, 'import numpy as np\n'), ((3323, 3334), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3331, 3334), True, 'import numpy as np\n'), ((3377, 3388), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3385, 3388), True, 'import numpy as np\n')] |
# Practicing Dynamic Programming (DP) with the log cutting problem given on the practice midterm.
import numpy as np
################################################################################
# Slow Algorithm
################################################################################
def cut_log(d, j, k):
if j+1 == k:
return 0
c = float('Inf')
for i in range(j+1, k):
c = min(c, d[k] + cut_log(d, j, i) + cut_log(d - d[i], i, k))
return c
################################################################################
# Top Down Algorithm
################################################################################
def memoized_cut_log(d):
k = len(d)
j = 0
r = np.ones([k, k])*np.inf
v = memoized_cut_log_aux(d, j, k-1, r)
return v
def memoized_cut_log_aux(d, j, k, r):
if r[j, k] < np.inf:
return r[j, k]
if j+1 == k:
r[j, k] = 0
else:
c = float('Inf')
for i in range(j+1, k):
c = min(c, d[k] + memoized_cut_log_aux(d, j, i, r) + memoized_cut_log_aux(d - d[i], i, k, r))
r[j, k] = c
return r[j, k]
################################################################################
# Bottom Up Algorithm
################################################################################
# def bottom_up_cut_log(d):
# k = len(d)
# r = np.zeros([k, k])
# for i in range(2, k):
# c = np.inf
# for j in range(1, i):
# c = min(c, d[i] + r[j, i-1] + r[j-1, i])
# r[j, i] = c
# print(r)
# return r[0, k-1]
################################################################################
# Main
################################################################################
if __name__ == '__main__':
dist = np.array([0, 3, 8, 10])
print("min cost (slow) = $" + str(cut_log(dist, 0, 3)))
print("min cost (top down) = $" + str(memoized_cut_log(dist)))
# print("min cost (slow) = $" + str(bottom_up_cut_log(dist)))
| [
"numpy.array",
"numpy.ones"
] | [((1879, 1902), 'numpy.array', 'np.array', (['[0, 3, 8, 10]'], {}), '([0, 3, 8, 10])\n', (1887, 1902), True, 'import numpy as np\n'), ((759, 774), 'numpy.ones', 'np.ones', (['[k, k]'], {}), '([k, k])\n', (766, 774), True, 'import numpy as np\n')] |
#!/usr/bin/python
''' Processes simulated files (e.g., draw samples from query points, create graphs, etc.) '''
import os
import yaml
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt
class Sampler():
''' class that takes in a simulation batch and can dissect it according to various input parameters '''
def __init__(self, config_file):
''' initialize the sampler '''
with open(config_file) as file:
params = yaml.load(file) #creates a dictionary of parameters which describe the environment from a yaml
self.Lx = params['Lx'] #length of environment (m)
self.Ly = params['Ly'] #width/height of environment (m)
self.T = params['T'] #total simulated time (s)
self.dx = params['dx'] #length discretization (m)
self.dy = params['dy'] #width/height discretization (m)
self.dt = params['dt'] #time discretization (s)
self.NI = int(self.Lx/self.dx)+1 #number of discrete cells in length
self.NJ = int(self.Ly/self.dy)+1 #number of discrete cells in width/height
self.history = np.load(params['file'])['arr_0']
self.x, self.y = np.linspace(0, self.Lx, self.NI), np.linspace(0, self.Ly, self.NJ)
self.X, self.Y = np.meshgrid(np.linspace(0, self.Lx, self.NI), np.linspace(0, self.Ly, self.NJ), indexing='xy') #define X,Y coordinates for cells for location lookup
self.T = np.linspace(0, self.dt, self.T)
self.interpolator = params['interpolator']
def query_path(self, points):
''' returns the observation of the phenomenon for the x, y, t coordinate(s) passed in. '''
obs = []
for point in points:
historyidx = np.abs(point[2] - self.T).argmin() #find the closest time snapshot
world = self.history[:,historyidx].reshape((self.NI, self.NJ))
f = interpolate.interp2d(self.x, self.y, world, kind=self.interpolator)
obs.append(f(point[0], point[1])[0])
return obs
def query_snapshot(self, dimensions=None, time=0):
''' returns a uniformly sampled snapshot of a phenomenon at a given time at the fidelity specified by dimensions '''
if dimensions is None: #just default snapshot
historyidx = np.abs(time - self.T).argmin()
self.snapX, self.snapY = self.x, self.y
return self.history[:,historyidx].reshape((self.NI, self.NJ))
historyidx = np.abs(time - self.T).argmin()
world = self.history[:,historyidx].reshape((self.NI, self.NJ))
f = interpolate.interp2d(self.x, self.y, world, kind=self.interpolator)
self.snapX, self.snapY = np.linspace(0, self.Lx, dimensions[0]), np.linspace(0, self.Ly, dimensions[1])
return f(self.snapX, self.snapY)
if __name__ == '__main__':
sampler = Sampler('../config/simple_sampler.yaml')
path = [(0, 0, 0), (5, 1, 1), (10, 2, 2), (10, 10, 3)]
observations = sampler.query_path(path)
snapshots = []
for point in path:
snapshots.append(sampler.query_snapshot((10,10), point[2]))
for point, obs, snap in zip(path, observations, snapshots):
plt.contourf(sampler.snapX, sampler.snapY, snap, cmap='viridis', vmin=np.nanmin(sampler.history), vmax=np.nanmax(sampler.history))
plt.scatter(point[0], point[1], c=obs, cmap='viridis', vmin=np.nanmin(sampler.history), vmax=np.nanmax(sampler.history), edgecolor='red')
plt.show()
| [
"numpy.abs",
"yaml.load",
"numpy.linspace",
"numpy.nanmax",
"numpy.nanmin",
"numpy.load",
"scipy.interpolate.interp2d",
"matplotlib.pyplot.show"
] | [((1441, 1472), 'numpy.linspace', 'np.linspace', (['(0)', 'self.dt', 'self.T'], {}), '(0, self.dt, self.T)\n', (1452, 1472), True, 'import numpy as np\n'), ((2576, 2643), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['self.x', 'self.y', 'world'], {'kind': 'self.interpolator'}), '(self.x, self.y, world, kind=self.interpolator)\n', (2596, 2643), False, 'from scipy import interpolate\n'), ((3455, 3465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3463, 3465), True, 'import matplotlib.pyplot as plt\n'), ((480, 495), 'yaml.load', 'yaml.load', (['file'], {}), '(file)\n', (489, 495), False, 'import yaml\n'), ((1124, 1147), 'numpy.load', 'np.load', (["params['file']"], {}), "(params['file'])\n", (1131, 1147), True, 'import numpy as np\n'), ((1183, 1215), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Lx', 'self.NI'], {}), '(0, self.Lx, self.NI)\n', (1194, 1215), True, 'import numpy as np\n'), ((1217, 1249), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Ly', 'self.NJ'], {}), '(0, self.Ly, self.NJ)\n', (1228, 1249), True, 'import numpy as np\n'), ((1287, 1319), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Lx', 'self.NI'], {}), '(0, self.Lx, self.NI)\n', (1298, 1319), True, 'import numpy as np\n'), ((1321, 1353), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Ly', 'self.NJ'], {}), '(0, self.Ly, self.NJ)\n', (1332, 1353), True, 'import numpy as np\n'), ((1888, 1955), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['self.x', 'self.y', 'world'], {'kind': 'self.interpolator'}), '(self.x, self.y, world, kind=self.interpolator)\n', (1908, 1955), False, 'from scipy import interpolate\n'), ((2677, 2715), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Lx', 'dimensions[0]'], {}), '(0, self.Lx, dimensions[0])\n', (2688, 2715), True, 'import numpy as np\n'), ((2717, 2755), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Ly', 'dimensions[1]'], {}), '(0, self.Ly, dimensions[1])\n', (2728, 2755), True, 'import numpy as np\n'), ((2462, 2483), 'numpy.abs', 'np.abs', (['(time - self.T)'], {}), '(time - self.T)\n', (2468, 2483), True, 'import numpy as np\n'), ((3240, 3266), 'numpy.nanmin', 'np.nanmin', (['sampler.history'], {}), '(sampler.history)\n', (3249, 3266), True, 'import numpy as np\n'), ((3273, 3299), 'numpy.nanmax', 'np.nanmax', (['sampler.history'], {}), '(sampler.history)\n', (3282, 3299), True, 'import numpy as np\n'), ((3369, 3395), 'numpy.nanmin', 'np.nanmin', (['sampler.history'], {}), '(sampler.history)\n', (3378, 3395), True, 'import numpy as np\n'), ((3402, 3428), 'numpy.nanmax', 'np.nanmax', (['sampler.history'], {}), '(sampler.history)\n', (3411, 3428), True, 'import numpy as np\n'), ((1730, 1755), 'numpy.abs', 'np.abs', (['(point[2] - self.T)'], {}), '(point[2] - self.T)\n', (1736, 1755), True, 'import numpy as np\n'), ((2284, 2305), 'numpy.abs', 'np.abs', (['(time - self.T)'], {}), '(time - self.T)\n', (2290, 2305), True, 'import numpy as np\n')] |
import os
import os.path
from os import listdir
from os.path import join, isdir
import cv2
import numpy as np
from images_utils import get_images_recursively
def imread(path):
img_bgr = cv2.imread(path)
img_rgb = img_bgr[..., ::-1]
# img_hsl = convert_to_hsl(img_rgb)
return img_rgb.astype(np.float)
def normalize_rgb(image):
return np.array(image) / 127.5 - 1.
def is_normalized(image):
for x, row in enumerate(image):
for y, p in enumerate(row):
if -1 > p[0] > 1 and -1 > p[1] > 1 and -1 > p[2] > 1:
return False
return True
def create_folder_is_not_exists(folder):
if not os.path.exists(folder):
os.mkdir(folder)
def convert_to_files(data_set):
images_paths = get_images_recursively('data/' + data_set)
data_set_rgb = data_set + '-rgb'
# data_set_hsl = data_set + '-hsl'
if not os.path.exists('data/' + data_set_rgb):
os.mkdir('data/' + data_set_rgb)
count = 0
for image_path in images_paths:
print('converting {} to RGB'.format(image_path))
rgb_normalized = normalize_rgb(imread(image_path))
np.save('data/' + data_set_rgb + '/' + str(count) + '.npy', rgb_normalized)
count += 1
# if not os.path.exists('data/' + data_set_hsl):
# os.mkdir('data/' + data_set_hsl)
# count = 0
# for image_path in images_paths:
# print('converting {} to HSL'.format(image_path))
# hsl = convert_to_hsl(imread(image_path))
# np.save('data/' + data_set_hsl + '/' + str(count) + '.npy', hsl)
# count += 1
folders = [f for f in listdir('data') if isdir(join('data', f))]
for data_set in folders:
if not data_set.endswith('-rgb') and not data_set.endswith('-hsl'):
convert_to_files(data_set)
| [
"images_utils.get_images_recursively",
"os.path.exists",
"os.listdir",
"os.path.join",
"numpy.array",
"os.mkdir",
"cv2.imread"
] | [((192, 208), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (202, 208), False, 'import cv2\n'), ((716, 758), 'images_utils.get_images_recursively', 'get_images_recursively', (["('data/' + data_set)"], {}), "('data/' + data_set)\n", (738, 758), False, 'from images_utils import get_images_recursively\n'), ((620, 642), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (634, 642), False, 'import os\n'), ((648, 664), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (656, 664), False, 'import os\n'), ((842, 880), 'os.path.exists', 'os.path.exists', (["('data/' + data_set_rgb)"], {}), "('data/' + data_set_rgb)\n", (856, 880), False, 'import os\n'), ((886, 918), 'os.mkdir', 'os.mkdir', (["('data/' + data_set_rgb)"], {}), "('data/' + data_set_rgb)\n", (894, 918), False, 'import os\n'), ((1547, 1562), 'os.listdir', 'listdir', (['"""data"""'], {}), "('data')\n", (1554, 1562), False, 'from os import listdir\n'), ((349, 364), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (357, 364), True, 'import numpy as np\n'), ((1572, 1587), 'os.path.join', 'join', (['"""data"""', 'f'], {}), "('data', f)\n", (1576, 1587), False, 'from os.path import join, isdir\n')] |
""" Testing for Problem.check_partials and check_totals."""
import unittest
from six import iteritems
import numpy as np
from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, \
ImplicitComponent, NonlinearBlockGS
from openmdao.devtools.testutil import assert_rel_error, TestLogger
from openmdao.test_suite.components.impl_comp_array import TestImplCompArrayMatVec
from openmdao.test_suite.components.paraboloid_mat_vec import ParaboloidMatVec
from openmdao.test_suite.components.sellar import SellarDerivatives
class ParaboloidTricky(ExplicitComponent):
"""
Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3.
"""
def setup(self):
self.add_input('x', val=0.0)
self.add_input('y', val=0.0)
self.add_output('f_xy', val=0.0)
self.scale = 1e-7
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
"""
f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3
Optimal solution (minimum): x = 6.6667; y = -7.3333
"""
sc = self.scale
x = inputs['x']*sc
y = inputs['y']*sc
outputs['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0
def compute_partials(self, inputs, partials):
"""
Jacobian for our paraboloid.
"""
sc = self.scale
x = inputs['x']
y = inputs['y']
partials['f_xy', 'x'] = 2.0*x*sc*sc - 6.0*sc + y*sc*sc
partials['f_xy', 'y'] = 2.0*y*sc*sc + 8.0*sc + x*sc*sc
class TestProblemCheckPartials(unittest.TestCase):
def test_incorrect_jacobian(self):
class MyComp(ExplicitComponent):
def setup(self):
self.add_input('x1', 3.0)
self.add_input('x2', 5.0)
self.add_output('y', 5.5)
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
""" Doesn't do much. """
outputs['y'] = 3.0*inputs['x1'] + 4.0*inputs['x2']
def compute_partials(self, inputs, partials):
"""Intentionally incorrect derivative."""
J = partials
J['y', 'x1'] = np.array([4.0])
J['y', 'x2'] = np.array([40])
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x1', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('x2', 5.0))
prob.model.add_subsystem('comp', MyComp())
prob.model.connect('p1.x1', 'comp.x1')
prob.model.connect('p2.x2', 'comp.x2')
prob.set_solver_print(level=0)
prob.setup(check=False)
prob.run_model()
testlogger = TestLogger()
prob.check_partials(logger=testlogger)
lines = testlogger.get('info')
y_wrt_x1_line = lines.index(" comp: 'y' wrt 'x1'\n")
self.assertTrue(lines[y_wrt_x1_line+4].endswith('*'),
msg='Error flag expected in output but not displayed')
self.assertTrue(lines[y_wrt_x1_line+5].endswith('*'),
msg='Error flag expected in output but not displayed')
self.assertFalse(lines[y_wrt_x1_line+6].endswith('*'),
msg='Error flag not expected in output but displayed')
def test_component_only(self):
class MyComp(ExplicitComponent):
def setup(self):
self.add_input('x1', 3.0)
self.add_input('x2', 5.0)
self.add_output('y', 5.5)
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
""" Doesn't do much. """
outputs['y'] = 3.0*inputs['x1'] + 4.0*inputs['x2']
def compute_partials(self, inputs, partials):
"""Intentionally incorrect derivative."""
J = partials
J['y', 'x1'] = np.array([4.0])
J['y', 'x2'] = np.array([40])
prob = Problem()
prob.model = MyComp()
prob.set_solver_print(level=0)
prob.setup(check=False)
prob.run_model()
testlogger = TestLogger()
prob.check_partials(logger=testlogger)
lines = testlogger.get('info')
y_wrt_x1_line = lines.index(" : 'y' wrt 'x1'\n")
self.assertTrue(lines[y_wrt_x1_line+4].endswith('*'),
msg='Error flag expected in output but not displayed')
self.assertTrue(lines[y_wrt_x1_line+5].endswith('*'),
msg='Error flag expected in output but not displayed')
self.assertFalse(lines[y_wrt_x1_line+6].endswith('*'),
msg='Error flag not expected in output but displayed')
def test_component_only_suppress(self):
class MyComp(ExplicitComponent):
def setup(self):
self.add_input('x1', 3.0)
self.add_input('x2', 5.0)
self.add_output('y', 5.5)
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
""" Doesn't do much. """
outputs['y'] = 3.0*inputs['x1'] + 4.0*inputs['x2']
def compute_partials(self, inputs, partials):
"""Intentionally incorrect derivative."""
J = partials
J['y', 'x1'] = np.array([4.0])
J['y', 'x2'] = np.array([40])
prob = Problem()
prob.model = MyComp()
prob.set_solver_print(level=0)
prob.setup(check=False)
prob.run_model()
testlogger = TestLogger()
data = prob.check_partials(logger=testlogger, suppress_output=True)
subheads = data[''][('y', 'x1')]
self.assertTrue('J_fwd' in subheads)
self.assertTrue('rel error' in subheads)
self.assertTrue('abs error' in subheads)
self.assertTrue('magnitude' in subheads)
lines = testlogger.get('info')
self.assertEqual(len(lines), 0)
def test_missing_entry(self):
class MyComp(ExplicitComponent):
def setup(self):
self.add_input('x1', 3.0)
self.add_input('x2', 5.0)
self.add_output('y', 5.5)
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
""" Doesn't do much. """
outputs['y'] = 3.0*inputs['x1'] + 4.0*inputs['x2']
def compute_partials(self, inputs, partials):
"""Intentionally left out derivative."""
J = partials
J['y', 'x1'] = np.array([3.0])
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x1', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('x2', 5.0))
prob.model.add_subsystem('comp', MyComp())
prob.model.connect('p1.x1', 'comp.x1')
prob.model.connect('p2.x2', 'comp.x2')
prob.set_solver_print(level=0)
prob.setup(check=False)
prob.run_model()
data = prob.check_partials(suppress_output=True)
abs_error = data['comp']['y', 'x1']['abs error']
rel_error = data['comp']['y', 'x1']['rel error']
self.assertAlmostEqual(abs_error.forward, 0.)
self.assertAlmostEqual(abs_error.reverse, 0.)
self.assertAlmostEqual(rel_error.forward, 0.)
self.assertAlmostEqual(rel_error.reverse, 0.)
self.assertAlmostEqual(np.linalg.norm(data['comp']['y', 'x1']['J_fd'] - 3.), 0.,
delta=1e-6)
abs_error = data['comp']['y', 'x2']['abs error']
rel_error = data['comp']['y', 'x2']['rel error']
self.assertAlmostEqual(abs_error.forward, 4.)
self.assertAlmostEqual(abs_error.reverse, 4.)
self.assertAlmostEqual(rel_error.forward, 1.)
self.assertAlmostEqual(rel_error.reverse, 1.)
self.assertAlmostEqual(np.linalg.norm(data['comp']['y', 'x2']['J_fd'] - 4.), 0.,
delta=1e-6)
def test_nested_fd_units(self):
class UnitCompBase(ExplicitComponent):
def setup(self):
self.add_input('T', val=284., units="degR", desc="Temperature")
self.add_input('P', val=1., units='lbf/inch**2', desc="Pressure")
self.add_output('flow:T', val=284., units="degR", desc="Temperature")
self.add_output('flow:P', val=1., units='lbf/inch**2', desc="Pressure")
# Finite difference everything
self.declare_partials(of='*', wrt='*', method='fd')
def compute(self, inputs, outputs):
outputs['flow:T'] = inputs['T']
outputs['flow:P'] = inputs['P']
p = Problem()
model = p.model = Group()
indep = model.add_subsystem('indep', IndepVarComp(), promotes=['*'])
indep.add_output('T', val=100., units='degK')
indep.add_output('P', val=1., units='bar')
units = model.add_subsystem('units', UnitCompBase(), promotes=['*'])
p.setup()
data = p.check_partials(suppress_output=True)
for comp_name, comp in iteritems(data):
for partial_name, partial in iteritems(comp):
forward = partial['J_fwd']
reverse = partial['J_rev']
fd = partial['J_fd']
self.assertAlmostEqual(np.linalg.norm(forward - reverse), 0.)
self.assertAlmostEqual(np.linalg.norm(forward - fd), 0., delta=1e-6)
def test_units(self):
class UnitCompBase(ExplicitComponent):
def setup(self):
self.add_input('T', val=284., units="degR", desc="Temperature")
self.add_input('P', val=1., units='lbf/inch**2', desc="Pressure")
self.add_output('flow:T', val=284., units="degR", desc="Temperature")
self.add_output('flow:P', val=1., units='lbf/inch**2', desc="Pressure")
self.run_count = 0
self.declare_partials(of='*', wrt='*')
def compute_partials(self, inputs, partials):
partials['flow:T', 'T'] = 1.
partials['flow:P', 'P'] = 1.
def compute(self, inputs, outputs):
outputs['flow:T'] = inputs['T']
outputs['flow:P'] = inputs['P']
self.run_count += 1
p = Problem()
model = p.model = Group()
indep = model.add_subsystem('indep', IndepVarComp(), promotes=['*'])
indep.add_output('T', val=100., units='degK')
indep.add_output('P', val=1., units='bar')
units = model.add_subsystem('units', UnitCompBase(), promotes=['*'])
model.nonlinear_solver = NonLinearRunOnce()
p.setup()
data = p.check_partials(suppress_output=True)
for comp_name, comp in iteritems(data):
for partial_name, partial in iteritems(comp):
abs_error = partial['abs error']
self.assertAlmostEqual(abs_error.forward, 0.)
self.assertAlmostEqual(abs_error.reverse, 0.)
self.assertAlmostEqual(abs_error.forward_reverse, 0.)
# Make sure we only FD this twice.
# The count is 5 because in check_partials, there are two calls to apply_nonlinear
# when compute the fwd and rev analytic derivatives, then one call to apply_nonlinear
# to compute the reference point for FD, then two additional calls for the two inputs.
comp = model.get_subsystem('units')
self.assertEqual(comp.run_count, 5)
def test_scalar_val(self):
class PassThrough(ExplicitComponent):
"""
Helper component that is needed when variables must be passed
directly from input to output
"""
def __init__(self, i_var, o_var, val, units=None):
super(PassThrough, self).__init__()
self.i_var = i_var
self.o_var = o_var
self.units = units
self.val = val
if isinstance(val, (float, int)) or np.isscalar(val):
size=1
else:
size = np.prod(val.shape)
self.size = size
def setup(self):
if self.units is None:
self.add_input(self.i_var, self.val)
self.add_output(self.o_var, self.val)
else:
self.add_input(self.i_var, self.val, units=self.units)
self.add_output(self.o_var, self.val, units=self.units)
row_col = np.arange(self.size)
self.declare_partials(of=self.o_var, wrt=self.i_var,
val=1, rows=row_col, cols=row_col)
def compute(self, inputs, outputs):
outputs[self.o_var] = inputs[self.i_var]
def linearize(self, inputs, outputs, J):
pass
p = Problem()
indeps = p.model.add_subsystem('indeps', IndepVarComp(), promotes=['*'])
indeps.add_output('foo', val=np.ones(4))
indeps.add_output('foo2', val=np.ones(4))
p.model.add_subsystem('pt', PassThrough("foo", "bar", val=np.ones(4)), promotes=['*'])
p.model.add_subsystem('pt2', PassThrough("foo2", "bar2", val=np.ones(4)), promotes=['*'])
p.set_solver_print(level=0)
p.setup()
p.run_model()
data = p.check_partials(suppress_output=True)
identity = np.eye(4)
assert_rel_error(self, data['pt'][('bar', 'foo')]['J_fwd'], identity, 1e-15)
assert_rel_error(self, data['pt'][('bar', 'foo')]['J_rev'], identity, 1e-15)
assert_rel_error(self, data['pt'][('bar', 'foo')]['J_fd'], identity, 1e-9)
assert_rel_error(self, data['pt2'][('bar2', 'foo2')]['J_fwd'], identity, 1e-15)
assert_rel_error(self, data['pt2'][('bar2', 'foo2')]['J_rev'], identity, 1e-15)
assert_rel_error(self, data['pt2'][('bar2', 'foo2')]['J_fd'], identity, 1e-9)
def test_matrix_free_explicit(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
prob.model.add_subsystem('comp', ParaboloidMatVec())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.set_solver_print(level=0)
prob.setup(check=False)
prob.run_model()
data = prob.check_partials(suppress_output=True)
for comp_name, comp in iteritems(data):
for partial_name, partial in iteritems(comp):
abs_error = partial['abs error']
rel_error = partial['rel error']
assert_rel_error(self, abs_error.forward, 0., 1e-5)
assert_rel_error(self, abs_error.reverse, 0., 1e-5)
assert_rel_error(self, abs_error.forward_reverse, 0., 1e-5)
assert_rel_error(self, rel_error.forward, 0., 1e-5)
assert_rel_error(self, rel_error.reverse, 0., 1e-5)
assert_rel_error(self, rel_error.forward_reverse, 0., 1e-5)
assert_rel_error(self, data['comp'][('f_xy', 'x')]['J_fwd'][0][0], 5.0, 1e-6)
assert_rel_error(self, data['comp'][('f_xy', 'x')]['J_rev'][0][0], 5.0, 1e-6)
assert_rel_error(self, data['comp'][('f_xy', 'y')]['J_fwd'][0][0], 21.0, 1e-6)
assert_rel_error(self, data['comp'][('f_xy', 'y')]['J_rev'][0][0], 21.0, 1e-6)
def test_matrix_free_implicit(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('rhs', np.ones((2, ))))
prob.model.add_subsystem('comp', TestImplCompArrayMatVec())
prob.model.connect('p1.rhs', 'comp.rhs')
prob.set_solver_print(level=0)
prob.setup(check=False)
prob.run_model()
data = prob.check_partials(suppress_output=True)
for comp_name, comp in iteritems(data):
for partial_name, partial in iteritems(comp):
abs_error = partial['abs error']
rel_error = partial['rel error']
assert_rel_error(self, abs_error.forward, 0., 1e-5)
assert_rel_error(self, abs_error.reverse, 0., 1e-5)
assert_rel_error(self, abs_error.forward_reverse, 0., 1e-5)
assert_rel_error(self, rel_error.forward, 0., 1e-5)
assert_rel_error(self, rel_error.reverse, 0., 1e-5)
assert_rel_error(self, rel_error.forward_reverse, 0., 1e-5)
def test_implicit_undeclared(self):
# Test to see that check_partials works when state_wrt_input and state_wrt_state
# partials are missing.
class ImplComp4Test(ImplicitComponent):
def setup(self):
self.add_input('x', np.ones(2))
self.add_input('dummy', np.ones(2))
self.add_output('y', np.ones(2))
self.add_output('extra', np.ones(2))
self.mtx = np.array([
[ 3., 4.],
[ 2., 3.],
])
self.declare_partials(of='*', wrt='*')
def apply_nonlinear(self, inputs, outputs, residuals):
residuals['y'] = self.mtx.dot(outputs['y']) - inputs['x']
def linearize(self, inputs, outputs, partials):
partials['y', 'x'] = -np.eye(2)
partials['y', 'y'] = self.mtx
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', np.ones((2, ))))
prob.model.add_subsystem('p2', IndepVarComp('dummy', np.ones((2, ))))
prob.model.add_subsystem('comp', ImplComp4Test())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.dummy', 'comp.dummy')
prob.set_solver_print(level=0)
prob.setup(check=False)
prob.run_model()
data = prob.check_partials(suppress_output=True)
assert_rel_error(self, data['comp']['y', 'extra']['J_fwd'], np.zeros((2, 2)))
assert_rel_error(self, data['comp']['y', 'extra']['J_rev'], np.zeros((2, 2)))
assert_rel_error(self, data['comp']['y', 'dummy']['J_fwd'], np.zeros((2, 2)))
assert_rel_error(self, data['comp']['y', 'dummy']['J_rev'], np.zeros((2, 2)))
def test_dependent_false_hide(self):
# Test that we omit derivs declared with dependent=False
class SimpleComp1(ExplicitComponent):
def setup(self):
self.add_input('z', shape=(2, 2))
self.add_input('x', shape=(2, 2))
self.add_output('g', shape=(2, 2))
self.declare_partials(of='g', wrt='x')
self.declare_partials(of='g', wrt='z', dependent=False)
def compute(self, inputs, outputs):
outputs['g'] = 3.0*inputs['x']
def compute_partials(self, inputs, partials):
partials['g', 'x'] = 3.
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('z', np.ones((2, 2))))
prob.model.add_subsystem('p2', IndepVarComp('x', np.ones((2, 2))))
prob.model.add_subsystem('comp', SimpleComp1())
prob.model.connect('p1.z', 'comp.z')
prob.model.connect('p2.x', 'comp.x')
prob.setup(check=False)
testlogger = TestLogger()
data = prob.check_partials(logger=testlogger)
lines = testlogger.get('info')
self.assertTrue(" comp: 'g' wrt 'z'\n" not in lines)
self.assertTrue(('g', 'z') not in data['comp'])
self.assertTrue(" comp: 'g' wrt 'x'\n" in lines)
self.assertTrue(('g', 'x') in data['comp'])
def test_dependent_false_show(self):
# Test that we show derivs declared with dependent=False if the fd is not
# ~zero.
class SimpleComp2(ExplicitComponent):
def setup(self):
self.add_input('z', shape=(2, 2))
self.add_input('x', shape=(2, 2))
self.add_output('g', shape=(2, 2))
self.declare_partials(of='g', wrt='x')
self.declare_partials('g', 'z', dependent=False)
def compute(self, inputs, outputs):
outputs['g'] = 2.0*inputs['z'] + 3.0*inputs['x']
def compute_partials(self, inputs, partials):
partials['g', 'x'] = 3.
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('z', np.ones((2, 2))))
prob.model.add_subsystem('p2', IndepVarComp('x', np.ones((2, 2))))
prob.model.add_subsystem('comp', SimpleComp2())
prob.model.connect('p1.z', 'comp.z')
prob.model.connect('p2.x', 'comp.x')
prob.setup(check=False)
testlogger = TestLogger()
data = prob.check_partials(logger=testlogger)
lines = testlogger.get('info')
self.assertTrue(" comp: 'g' wrt 'z'\n" in lines)
self.assertTrue(('g', 'z') in data['comp'])
self.assertTrue(" comp: 'g' wrt 'x'\n" in lines)
self.assertTrue(('g', 'x') in data['comp'])
def test_set_step_on_comp(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.set_solver_print(level=0)
comp.set_check_partial_options(wrt='*', step=1e-2)
prob.setup(check=False)
prob.run_model()
data = prob.check_partials(suppress_output=True)
# This will fail unless you set the check_step.
x_error = data['comp']['f_xy', 'x']['rel error']
self.assertLess(x_error.forward, 1e-5)
self.assertLess(x_error.reverse, 1e-5)
def test_set_step_global(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.set_solver_print(level=0)
opts = {'step' : 1e-2}
prob.setup(check=False)
prob.run_model()
data = prob.check_partials(suppress_output=True, global_options=opts)
# This will fail unless you set the global step.
x_error = data['comp']['f_xy', 'x']['rel error']
self.assertLess(x_error.forward, 1e-5)
self.assertLess(x_error.reverse, 1e-5)
def test_complex_step_not_allocated(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.set_solver_print(level=0)
comp.set_check_partial_options(wrt='*', method='cs')
prob.setup(check=False)
prob.run_model()
with self.assertRaises(RuntimeError) as context:
data = prob.check_partials(suppress_output=True)
msg = 'In order to check partials with complex step, you need to set ' + \
'"force_alloc_complex" to True during setup.'
self.assertEqual(str(context.exception), msg)
def test_set_method_on_comp(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.set_solver_print(level=0)
comp.set_check_partial_options(wrt='*', method='cs')
prob.setup(check=False, force_alloc_complex=True)
prob.run_model()
data = prob.check_partials(suppress_output=True)
x_error = data['comp']['f_xy', 'x']['rel error']
self.assertLess(x_error.forward, 1e-5)
self.assertLess(x_error.reverse, 1e-5)
def test_set_method_global(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.set_solver_print(level=0)
opts = {'method' : 'cs'}
prob.setup(check=False, force_alloc_complex=True)
prob.run_model()
data = prob.check_partials(suppress_output=True, global_options=opts)
x_error = data['comp']['f_xy', 'x']['rel error']
self.assertLess(x_error.forward, 1e-5)
self.assertLess(x_error.reverse, 1e-5)
def test_set_form_on_comp(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.set_solver_print(level=0)
comp.set_check_partial_options(wrt='*', form='central')
prob.setup(check=False)
prob.run_model()
data = prob.check_partials(suppress_output=True)
# This will fail unless you set the check_step.
x_error = data['comp']['f_xy', 'x']['rel error']
self.assertLess(x_error.forward, 1e-3)
self.assertLess(x_error.reverse, 1e-3)
def test_set_form_global(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.set_solver_print(level=0)
opts = {'form' : 'central'}
prob.setup(check=False)
prob.run_model()
data = prob.check_partials(suppress_output=True, global_options=opts)
# This will fail unless you set the check_step.
x_error = data['comp']['f_xy', 'x']['rel error']
self.assertLess(x_error.forward, 1e-3)
self.assertLess(x_error.reverse, 1e-3)
def test_set_step_calc_on_comp(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.set_solver_print(level=0)
comp.set_check_partial_options(wrt='*', step_calc='rel')
prob.setup(check=False)
prob.run_model()
data = prob.check_partials(suppress_output=True)
# This will fail unless you set the check_step.
x_error = data['comp']['f_xy', 'x']['rel error']
self.assertLess(x_error.forward, 3e-3)
self.assertLess(x_error.reverse, 3e-3)
def test_set_step_calc_global(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.set_solver_print(level=0)
opts = {'step_calc' : 'rel'}
prob.setup(check=False)
prob.run_model()
data = prob.check_partials(suppress_output=True, global_options=opts)
# This will fail unless you set the global step.
x_error = data['comp']['f_xy', 'x']['rel error']
self.assertLess(x_error.forward, 3e-3)
self.assertLess(x_error.reverse, 3e-3)
def test_set_check_option_precedence(self):
# Test that we omit derivs declared with dependent=False
class SimpleComp1(ExplicitComponent):
def setup(self):
self.add_input('ab', 13.0)
self.add_input('aba', 13.0)
self.add_input('ba', 13.0)
self.add_output('y', 13.0)
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
ab = inputs['ab']
aba = inputs['aba']
ba = inputs['ba']
outputs['y'] = ab**3 + aba**3 + ba**3
def compute_partials(self, inputs, partials):
ab = inputs['ab']
aba = inputs['aba']
ba = inputs['ba']
partials['y', 'ab'] = 3.0*ab**2
partials['y', 'aba'] = 3.0*aba**2
partials['y', 'ba'] = 3.0*ba**2
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('ab', 13.0))
prob.model.add_subsystem('p2', IndepVarComp('aba', 13.0))
prob.model.add_subsystem('p3', IndepVarComp('ba', 13.0))
comp = prob.model.add_subsystem('comp', SimpleComp1())
prob.model.connect('p1.ab', 'comp.ab')
prob.model.connect('p2.aba', 'comp.aba')
prob.model.connect('p3.ba', 'comp.ba')
prob.setup(check=False)
comp.set_check_partial_options(wrt='a*', step=1e-2)
comp.set_check_partial_options(wrt='*a', step=1e-4)
prob.run_model()
data = prob.check_partials(suppress_output=True)
# Note 'aba' gets the better value from the second options call with the *a wildcard.
assert_rel_error(self, data['comp']['y', 'ab']['J_fd'][0][0], 507.3901, 1e-4)
assert_rel_error(self, data['comp']['y', 'aba']['J_fd'][0][0], 507.0039, 1e-4)
assert_rel_error(self, data['comp']['y', 'ba']['J_fd'][0][0], 507.0039, 1e-4)
def test_option_printing(self):
# Make sure we print the approximation type for each variable.
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.set_solver_print(level=0)
comp.set_check_partial_options(wrt='x', method='cs')
comp.set_check_partial_options(wrt='y', form='central')
prob.setup(check=False, force_alloc_complex=True)
prob.run_model()
testlogger = TestLogger()
prob.check_partials()
totals = prob.check_partials(logger=testlogger)
lines = testlogger.get('info')
self.assertTrue('cs' in lines[6], msg='Did you change the format for printing check derivs?')
self.assertTrue('fd' in lines[28], msg='Did you change the format for printing check derivs?')
class TestCheckPartialsFeature(unittest.TestCase):
def test_feature_incorrect_jacobian(self):
class MyComp(ExplicitComponent):
def setup(self):
self.add_input('x1', 3.0)
self.add_input('x2', 5.0)
self.add_output('y', 5.5)
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
""" Doesn't do much. """
outputs['y'] = 3.0*inputs['x1'] + 4.0*inputs['x2']
def compute_partials(self, inputs, partials):
"""Intentionally incorrect derivative."""
J = partials
J['y', 'x1'] = np.array([4.0])
J['y', 'x2'] = np.array([40])
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x1', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('x2', 5.0))
prob.model.add_subsystem('comp', MyComp())
prob.model.connect('p1.x1', 'comp.x1')
prob.model.connect('p2.x2', 'comp.x2')
prob.set_solver_print(level=0)
prob.setup(check=False)
prob.run_model()
data = prob.check_partials()
x1_error = data['comp']['y', 'x1']['abs error']
assert_rel_error(self, x1_error.forward, 1., 1e-8)
assert_rel_error(self, x1_error.reverse, 1., 1e-8)
x2_error = data['comp']['y', 'x2']['rel error']
assert_rel_error(self, x2_error.forward, 9., 1e-8)
assert_rel_error(self, x2_error.reverse, 9., 1e-8)
def test_feature_check_partials_suppress(self):
class MyComp(ExplicitComponent):
def setup(self):
self.add_input('x1', 3.0)
self.add_input('x2', 5.0)
self.add_output('y', 5.5)
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
""" Doesn't do much. """
outputs['y'] = 3.0*inputs['x1'] + 4.0*inputs['x2']
def compute_partials(self, inputs, partials):
"""Intentionally incorrect derivative."""
J = partials
J['y', 'x1'] = np.array([4.0])
J['y', 'x2'] = np.array([40])
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x1', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('x2', 5.0))
prob.model.add_subsystem('comp', MyComp())
prob.model.connect('p1.x1', 'comp.x1')
prob.model.connect('p2.x2', 'comp.x2')
prob.set_solver_print(level=0)
prob.setup(check=False)
prob.run_model()
data = prob.check_partials(suppress_output=True)
print(data)
def test_set_step_on_comp(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.add_subsystem('comp2', ParaboloidMatVec())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.model.connect('comp.f_xy', 'comp2.x')
prob.set_solver_print(level=0)
comp.set_check_partial_options(wrt='*', step=1e-2)
prob.setup()
prob.run_model()
prob.check_partials()
def test_set_step_global(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.add_subsystem('comp2', ParaboloidMatVec())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.model.connect('comp.f_xy', 'comp2.x')
prob.set_solver_print(level=0)
opts = {'step' : 1e-2}
prob.setup()
prob.run_model()
prob.check_partials(global_options=opts)
def test_set_method_on_comp(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.add_subsystem('comp2', ParaboloidMatVec())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.model.connect('comp.f_xy', 'comp2.x')
prob.set_solver_print(level=0)
comp.set_check_partial_options(wrt='*', method='cs')
prob.setup(force_alloc_complex=True)
prob.run_model()
prob.check_partials()
def test_set_method_global(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.add_subsystem('comp2', ParaboloidMatVec())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.model.connect('comp.f_xy', 'comp2.x')
prob.set_solver_print(level=0)
opts = {'method' : 'cs'}
prob.setup(force_alloc_complex=True)
prob.run_model()
prob.check_partials(global_options=opts)
def test_set_form_on_comp(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.add_subsystem('comp2', ParaboloidMatVec())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.model.connect('comp.f_xy', 'comp2.x')
prob.set_solver_print(level=0)
comp.set_check_partial_options(wrt='*', form='central')
prob.setup()
prob.run_model()
prob.check_partials()
def test_set_form_global(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.add_subsystem('comp2', ParaboloidMatVec())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.model.connect('comp.f_xy', 'comp2.x')
prob.set_solver_print(level=0)
opts = {'form' : 'central'}
prob.setup()
prob.run_model()
prob.check_partials(global_options=opts)
def test_set_step_calc_on_comp(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.add_subsystem('comp2', ParaboloidMatVec())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.model.connect('comp.f_xy', 'comp2.x')
prob.set_solver_print(level=0)
comp.set_check_partial_options(wrt='*', step_calc='rel')
prob.setup()
prob.run_model()
prob.check_partials()
def test_set_step_calc_global(self):
prob = Problem()
prob.model = Group()
prob.model.add_subsystem('p1', IndepVarComp('x', 3.0))
prob.model.add_subsystem('p2', IndepVarComp('y', 5.0))
comp = prob.model.add_subsystem('comp', ParaboloidTricky())
prob.model.connect('p1.x', 'comp.x')
prob.model.connect('p2.y', 'comp.y')
prob.set_solver_print(level=0)
opts = {'step_calc' : 'rel'}
prob.setup()
prob.run_model()
prob.check_partials(global_options=opts)
class TestProblemCheckTotals(unittest.TestCase):
def test_cs(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_objective('obj')
prob.model.add_constraint('con1', upper=0.0)
prob.model.add_constraint('con2', upper=0.0)
prob.set_solver_print(level=0)
prob.setup(force_alloc_complex=True)
# We don't call run_driver() here because we don't
# actually want the optimizer to run
prob.run_model()
# check derivatives with complex step and a larger step size.
testlogger = TestLogger()
totals = prob.check_totals(method='cs', step=1.0e-1, logger=testlogger)
lines = testlogger.get('info')
self.assertTrue('9.80614' in lines[4], "'9.80614' not found in '%s'" % lines[4])
self.assertTrue('9.80614' in lines[5], "'9.80614' not found in '%s'" % lines[5])
assert_rel_error(self, totals['con_cmp2.con2', 'px.x']['J_fwd'], [[0.09692762]], 1e-5)
assert_rel_error(self, totals['con_cmp2.con2', 'px.x']['J_fd'], [[0.09692762]], 1e-5)
def test_desvar_as_obj(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_objective('x')
prob.set_solver_print(level=0)
prob.setup(force_alloc_complex=True)
# We don't call run_driver() here because we don't
# actually want the optimizer to run
prob.run_model()
# check derivatives with complex step and a larger step size.
testlogger = TestLogger()
totals = prob.check_totals(method='cs', step=1.0e-1, logger=testlogger)
lines = testlogger.get('info')
self.assertTrue('1.000' in lines[4])
self.assertTrue('1.000' in lines[5])
self.assertTrue('0.000' in lines[6])
self.assertTrue('0.000' in lines[8])
assert_rel_error(self, totals['px.x', 'px.x']['J_fwd'], [[1.0]], 1e-5)
assert_rel_error(self, totals['px.x', 'px.x']['J_fd'], [[1.0]], 1e-5)
def test_desvar_and_response_with_indices(self):
class ArrayComp2D(ExplicitComponent):
"""
A fairly simple array component.
"""
def setup(self):
self.JJ = np.array([[1.0, 3.0, -2.0, 7.0],
[6.0, 2.5, 2.0, 4.0],
[-1.0, 0.0, 8.0, 1.0],
[1.0, 4.0, -5.0, 6.0]])
# Params
self.add_input('x1', np.zeros([4]))
# Unknowns
self.add_output('y1', np.zeros([4]))
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
"""
Execution.
"""
outputs['y1'] = self.JJ.dot(inputs['x1'])
def compute_partials(self, inputs, partials):
"""
Analytical derivatives.
"""
partials[('y1', 'x1')] = self.JJ
prob = Problem()
prob.model = model = Group()
model.add_subsystem('x_param1', IndepVarComp('x1', np.ones((4))),
promotes=['x1'])
model.add_subsystem('mycomp', ArrayComp2D(), promotes=['x1', 'y1'])
model.add_design_var('x1', indices=[1, 3])
model.add_constraint('y1', indices=[0, 2])
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob.run_model()
Jbase = model.get_subsystem('mycomp').JJ
of = ['y1']
wrt = ['x1']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['y1', 'x1'][0][0], Jbase[0, 1], 1e-8)
assert_rel_error(self, J['y1', 'x1'][0][1], Jbase[0, 3], 1e-8)
assert_rel_error(self, J['y1', 'x1'][1][0], Jbase[2, 1], 1e-8)
assert_rel_error(self, J['y1', 'x1'][1][1], Jbase[2, 3], 1e-8)
totals = prob.check_totals()
jac = totals[('mycomp.y1', 'x_param1.x1')]['J_fd']
assert_rel_error(self, jac[0][0], Jbase[0, 1], 1e-8)
assert_rel_error(self, jac[0][1], Jbase[0, 3], 1e-8)
assert_rel_error(self, jac[1][0], Jbase[2, 1], 1e-8)
assert_rel_error(self, jac[1][1], Jbase[2, 3], 1e-8)
# Objective instead
prob = Problem()
prob.model = model = Group()
model.add_subsystem('x_param1', IndepVarComp('x1', np.ones((4))),
promotes=['x1'])
model.add_subsystem('mycomp', ArrayComp2D(), promotes=['x1', 'y1'])
model.add_design_var('x1', indices=[1, 3])
model.add_objective('y1', index=1)
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob.run_model()
Jbase = model.get_subsystem('mycomp').JJ
of = ['y1']
wrt = ['x1']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['y1', 'x1'][0][0], Jbase[1, 1], 1e-8)
assert_rel_error(self, J['y1', 'x1'][0][1], Jbase[1, 3], 1e-8)
totals = prob.check_totals()
jac = totals[('mycomp.y1', 'x_param1.x1')]['J_fd']
assert_rel_error(self, jac[0][0], Jbase[1, 1], 1e-8)
assert_rel_error(self, jac[0][1], Jbase[1, 3], 1e-8)
def test_cs_suppress(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_objective('obj')
prob.model.add_constraint('con1', upper=0.0)
prob.model.add_constraint('con2', upper=0.0)
prob.set_solver_print(level=0)
prob.setup(force_alloc_complex=True)
# We don't call run_driver() here because we don't
# actually want the optimizer to run
prob.run_model()
# check derivatives with complex step and a larger step size.
testlogger = TestLogger()
totals = prob.check_totals(method='cs', step=1.0e-1, logger=testlogger,
suppress_output=True)
data = totals['con_cmp2.con2', 'px.x']
self.assertTrue('J_fwd' in data)
self.assertTrue('rel error' in data)
self.assertTrue('abs error' in data)
self.assertTrue('magnitude' in data)
lines = testlogger.get('info')
self.assertEqual(len(lines), 0)
def test_two_desvar_as_con(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_design_var('x', lower=-100, upper=100)
prob.model.add_constraint('x', upper=0.0)
prob.model.add_constraint('z', upper=0.0)
prob.set_solver_print(level=0)
prob.setup(check=False)
# We don't call run_driver() here because we don't
# actually want the optimizer to run
prob.run_model()
testlogger = TestLogger()
totals = prob.check_totals(method='fd', step=1.0e-1, logger=testlogger)
lines = testlogger.get('info')
assert_rel_error(self, totals['px.x', 'px.x']['J_fwd'], [[1.0]], 1e-5)
assert_rel_error(self, totals['px.x', 'px.x']['J_fd'], [[1.0]], 1e-5)
assert_rel_error(self, totals['pz.z', 'pz.z']['J_fwd'], np.eye(2), 1e-5)
assert_rel_error(self, totals['pz.z', 'pz.z']['J_fd'], np.eye(2), 1e-5)
assert_rel_error(self, totals['px.x', 'pz.z']['J_fwd'], [[0.0, 0.0]], 1e-5)
assert_rel_error(self, totals['px.x', 'pz.z']['J_fd'], [[0.0, 0.0]], 1e-5)
assert_rel_error(self, totals['pz.z', 'px.x']['J_fwd'], [[0.0], [0.0]], 1e-5)
assert_rel_error(self, totals['pz.z', 'px.x']['J_fd'], [[0.0], [0.0]], 1e-5)
def test_full_con_with_index_desvar(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('z', lower=-100, upper=100, indices=[1])
prob.model.add_constraint('z', upper=0.0)
prob.set_solver_print(level=0)
prob.setup(check=False)
# We don't call run_driver() here because we don't
# actually want the optimizer to run
prob.run_model()
testlogger = TestLogger()
totals = prob.check_totals(method='fd', step=1.0e-1, logger=testlogger)
lines = testlogger.get('info')
assert_rel_error(self, totals['pz.z', 'pz.z']['J_fwd'], [[0.0], [1.0]], 1e-5)
assert_rel_error(self, totals['pz.z', 'pz.z']['J_fd'], [[0.0], [1.0]], 1e-5)
def test_full_desvar_with_index_con(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_constraint('z', upper=0.0, indices=[1])
prob.set_solver_print(level=0)
prob.setup(check=False)
# We don't call run_driver() here because we don't
# actually want the optimizer to run
prob.run_model()
testlogger = TestLogger()
totals = prob.check_totals(method='fd', step=1.0e-1, logger=testlogger)
lines = testlogger.get('info')
assert_rel_error(self, totals['pz.z', 'pz.z']['J_fwd'], [[0.0, 1.0]], 1e-5)
assert_rel_error(self, totals['pz.z', 'pz.z']['J_fd'], [[0.0, 1.0]], 1e-5)
def test_full_desvar_with_index_obj(self):
prob = Problem()
prob.model = SellarDerivatives()
prob.model.nonlinear_solver = NonlinearBlockGS()
prob.model.add_design_var('z', lower=-100, upper=100)
prob.model.add_objective('z', index=1)
prob.set_solver_print(level=0)
prob.setup(check=False)
# We don't call run_driver() here because we don't
# actually want the optimizer to run
prob.run_model()
testlogger = TestLogger()
totals = prob.check_totals(method='fd', step=1.0e-1, logger=testlogger)
lines = testlogger.get('info')
assert_rel_error(self, totals['pz.z', 'pz.z']['J_fwd'], [[0.0, 1.0]], 1e-5)
assert_rel_error(self, totals['pz.z', 'pz.z']['J_fd'], [[0.0, 1.0]], 1e-5)
if __name__ == "__main__":
unittest.main()
| [
"numpy.prod",
"openmdao.test_suite.components.paraboloid_mat_vec.ParaboloidMatVec",
"openmdao.api.IndepVarComp",
"openmdao.api.Group",
"numpy.array",
"numpy.linalg.norm",
"unittest.main",
"numpy.arange",
"numpy.isscalar",
"openmdao.api.NonlinearBlockGS",
"openmdao.api.Problem",
"openmdao.devto... | [((50379, 50394), 'unittest.main', 'unittest.main', ([], {}), '()\n', (50392, 50394), False, 'import unittest\n'), ((2295, 2304), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (2302, 2304), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((2326, 2333), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (2331, 2333), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((2729, 2741), 'openmdao.devtools.testutil.TestLogger', 'TestLogger', ([], {}), '()\n', (2739, 2741), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((4018, 4027), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (4025, 4027), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((4178, 4190), 'openmdao.devtools.testutil.TestLogger', 'TestLogger', ([], {}), '()\n', (4188, 4190), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((5472, 5481), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (5479, 5481), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((5632, 5644), 'openmdao.devtools.testutil.TestLogger', 'TestLogger', ([], {}), '()\n', (5642, 5644), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((6688, 6697), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (6695, 6697), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((6719, 6726), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (6724, 6726), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((8807, 8816), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (8814, 8816), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((8843, 8850), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (8848, 8850), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((9217, 9232), 'six.iteritems', 'iteritems', (['data'], {}), '(data)\n', (9226, 9232), False, 'from six import iteritems\n'), ((10454, 10463), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (10461, 10463), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((10490, 10497), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (10495, 10497), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((10793, 10811), 'openmdao.api.NonLinearRunOnce', 'NonLinearRunOnce', ([], {}), '()\n', (10809, 10811), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((10917, 10932), 'six.iteritems', 'iteritems', (['data'], {}), '(data)\n', (10926, 10932), False, 'from six import iteritems\n'), ((13066, 13075), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (13073, 13075), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((13603, 13612), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (13609, 13612), True, 'import numpy as np\n'), ((13621, 13695), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "data['pt']['bar', 'foo']['J_fwd']", 'identity', '(1e-15)'], {}), "(self, data['pt']['bar', 'foo']['J_fwd'], identity, 1e-15)\n", (13637, 13695), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((13706, 13780), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "data['pt']['bar', 'foo']['J_rev']", 'identity', '(1e-15)'], {}), "(self, data['pt']['bar', 'foo']['J_rev'], identity, 1e-15)\n", (13722, 13780), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((13791, 13864), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "data['pt']['bar', 'foo']['J_fd']", 'identity', '(1e-09)'], {}), "(self, data['pt']['bar', 'foo']['J_fd'], identity, 1e-09)\n", (13807, 13864), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((13875, 13952), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "data['pt2']['bar2', 'foo2']['J_fwd']", 'identity', '(1e-15)'], {}), "(self, data['pt2']['bar2', 'foo2']['J_fwd'], identity, 1e-15)\n", (13891, 13952), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((13963, 14040), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "data['pt2']['bar2', 'foo2']['J_rev']", 'identity', '(1e-15)'], {}), "(self, data['pt2']['bar2', 'foo2']['J_rev'], identity, 1e-15)\n", (13979, 14040), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((14051, 14127), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "data['pt2']['bar2', 'foo2']['J_fd']", 'identity', '(1e-09)'], {}), "(self, data['pt2']['bar2', 'foo2']['J_fd'], identity, 1e-09)\n", (14067, 14127), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((14186, 14195), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (14193, 14195), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((14217, 14224), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (14222, 14224), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((14692, 14707), 'six.iteritems', 'iteritems', (['data'], {}), '(data)\n', (14701, 14707), False, 'from six import iteritems\n'), ((15298, 15374), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "data['comp']['f_xy', 'x']['J_fwd'][0][0]", '(5.0)', '(1e-06)'], {}), "(self, data['comp']['f_xy', 'x']['J_fwd'][0][0], 5.0, 1e-06)\n", (15314, 15374), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((15384, 15460), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "data['comp']['f_xy', 'x']['J_rev'][0][0]", '(5.0)', '(1e-06)'], {}), "(self, data['comp']['f_xy', 'x']['J_rev'][0][0], 5.0, 1e-06)\n", (15400, 15460), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((15470, 15547), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "data['comp']['f_xy', 'y']['J_fwd'][0][0]", '(21.0)', '(1e-06)'], {}), "(self, data['comp']['f_xy', 'y']['J_fwd'][0][0], 21.0, 1e-06)\n", (15486, 15547), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((15557, 15634), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "data['comp']['f_xy', 'y']['J_rev'][0][0]", '(21.0)', '(1e-06)'], {}), "(self, data['comp']['f_xy', 'y']['J_rev'][0][0], 21.0, 1e-06)\n", (15573, 15634), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((15693, 15702), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (15700, 15702), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((15724, 15731), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (15729, 15731), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((16115, 16130), 'six.iteritems', 'iteritems', (['data'], {}), '(data)\n', (16124, 16130), False, 'from six import iteritems\n'), ((17643, 17652), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (17650, 17652), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((17674, 17681), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (17679, 17681), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((19167, 19176), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (19174, 19176), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((19198, 19205), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (19203, 19205), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((19558, 19570), 'openmdao.devtools.testutil.TestLogger', 'TestLogger', ([], {}), '()\n', (19568, 19570), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((20613, 20622), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (20620, 20622), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((20644, 20651), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (20649, 20651), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((21004, 21016), 'openmdao.devtools.testutil.TestLogger', 'TestLogger', ([], {}), '()\n', (21014, 21016), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((21385, 21394), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (21392, 21394), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((21416, 21423), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (21421, 21423), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((22186, 22195), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (22193, 22195), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((22217, 22224), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (22222, 22224), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((22992, 23001), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (22999, 23001), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((23023, 23030), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (23028, 23030), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((23847, 23856), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (23854, 23856), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((23878, 23885), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (23883, 23885), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((24622, 24631), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (24629, 24631), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((24653, 24660), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (24658, 24660), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((25389, 25398), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (25396, 25398), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((25420, 25427), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (25425, 25427), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((26195, 26204), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (26202, 26204), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((26226, 26233), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (26231, 26233), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((27000, 27009), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (27007, 27009), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((27031, 27038), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (27036, 27038), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((27812, 27821), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (27819, 27821), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((27843, 27850), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (27848, 27850), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((29515, 29524), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (29522, 29524), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((29546, 29553), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (29551, 29553), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((30299, 30378), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "data['comp']['y', 'ab']['J_fd'][0][0]", '(507.3901)', '(0.0001)'], {}), "(self, data['comp']['y', 'ab']['J_fd'][0][0], 507.3901, 0.0001)\n", (30315, 30378), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((30385, 30470), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "data['comp']['y', 'aba']['J_fd'][0][0]", '(507.0039)', '(0.0001)'], {}), "(self, data['comp']['y', 'aba']['J_fd'][0][0], 507.0039, 0.0001\n )\n", (30401, 30470), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((30472, 30551), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "data['comp']['y', 'ba']['J_fd'][0][0]", '(507.0039)', '(0.0001)'], {}), "(self, data['comp']['y', 'ba']['J_fd'][0][0], 507.0039, 0.0001)\n", (30488, 30551), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((30673, 30682), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (30680, 30682), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((30704, 30711), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (30709, 30711), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((31270, 31282), 'openmdao.devtools.testutil.TestLogger', 'TestLogger', ([], {}), '()\n', (31280, 31282), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((32380, 32389), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (32387, 32389), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((32411, 32418), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (32416, 32418), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((32895, 32947), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'x1_error.forward', '(1.0)', '(1e-08)'], {}), '(self, x1_error.forward, 1.0, 1e-08)\n', (32911, 32947), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((32954, 33006), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'x1_error.reverse', '(1.0)', '(1e-08)'], {}), '(self, x1_error.reverse, 1.0, 1e-08)\n', (32970, 33006), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((33070, 33122), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'x2_error.forward', '(9.0)', '(1e-08)'], {}), '(self, x2_error.forward, 9.0, 1e-08)\n', (33086, 33122), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((33129, 33181), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'x2_error.reverse', '(9.0)', '(1e-08)'], {}), '(self, x2_error.reverse, 9.0, 1e-08)\n', (33145, 33181), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((33898, 33907), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (33905, 33907), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((33929, 33936), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (33934, 33936), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((34441, 34450), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (34448, 34450), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((34472, 34479), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (34477, 34479), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((35109, 35118), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (35116, 35118), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((35140, 35147), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (35145, 35147), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((35771, 35780), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (35778, 35780), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((35802, 35809), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (35807, 35809), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((36467, 36476), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (36474, 36476), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((36498, 36505), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (36503, 36505), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((37153, 37162), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (37160, 37162), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((37184, 37191), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (37189, 37191), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((37826, 37835), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (37833, 37835), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((37857, 37864), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (37862, 37864), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((38496, 38505), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (38503, 38505), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((38527, 38534), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (38532, 38534), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((39175, 39184), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (39182, 39184), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((39206, 39213), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (39211, 39213), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((39766, 39775), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (39773, 39775), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((39797, 39816), 'openmdao.test_suite.components.sellar.SellarDerivatives', 'SellarDerivatives', ([], {}), '()\n', (39814, 39816), False, 'from openmdao.test_suite.components.sellar import SellarDerivatives\n'), ((39855, 39873), 'openmdao.api.NonlinearBlockGS', 'NonlinearBlockGS', ([], {}), '()\n', (39871, 39873), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((40453, 40465), 'openmdao.devtools.testutil.TestLogger', 'TestLogger', ([], {}), '()\n', (40463, 40465), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((40774, 40866), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['con_cmp2.con2', 'px.x']['J_fwd']", '[[0.09692762]]', '(1e-05)'], {}), "(self, totals['con_cmp2.con2', 'px.x']['J_fwd'], [[\n 0.09692762]], 1e-05)\n", (40790, 40866), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((40869, 40960), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['con_cmp2.con2', 'px.x']['J_fd']", '[[0.09692762]]', '(1e-05)'], {}), "(self, totals['con_cmp2.con2', 'px.x']['J_fd'], [[\n 0.09692762]], 1e-05)\n", (40885, 40960), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((41005, 41014), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (41012, 41014), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((41036, 41055), 'openmdao.test_suite.components.sellar.SellarDerivatives', 'SellarDerivatives', ([], {}), '()\n', (41053, 41055), False, 'from openmdao.test_suite.components.sellar import SellarDerivatives\n'), ((41094, 41112), 'openmdao.api.NonlinearBlockGS', 'NonlinearBlockGS', ([], {}), '()\n', (41110, 41112), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((41522, 41534), 'openmdao.devtools.testutil.TestLogger', 'TestLogger', ([], {}), '()\n', (41532, 41534), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((41845, 41916), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['px.x', 'px.x']['J_fwd']", '[[1.0]]', '(1e-05)'], {}), "(self, totals['px.x', 'px.x']['J_fwd'], [[1.0]], 1e-05)\n", (41861, 41916), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((41924, 41994), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['px.x', 'px.x']['J_fd']", '[[1.0]]', '(1e-05)'], {}), "(self, totals['px.x', 'px.x']['J_fd'], [[1.0]], 1e-05)\n", (41940, 41994), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((43032, 43041), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (43039, 43041), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((43071, 43078), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (43076, 43078), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((43662, 43725), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "J['y1', 'x1'][0][0]", 'Jbase[0, 1]', '(1e-08)'], {}), "(self, J['y1', 'x1'][0][0], Jbase[0, 1], 1e-08)\n", (43678, 43725), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((43733, 43796), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "J['y1', 'x1'][0][1]", 'Jbase[0, 3]', '(1e-08)'], {}), "(self, J['y1', 'x1'][0][1], Jbase[0, 3], 1e-08)\n", (43749, 43796), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((43804, 43867), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "J['y1', 'x1'][1][0]", 'Jbase[2, 1]', '(1e-08)'], {}), "(self, J['y1', 'x1'][1][0], Jbase[2, 1], 1e-08)\n", (43820, 43867), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((43875, 43938), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "J['y1', 'x1'][1][1]", 'Jbase[2, 3]', '(1e-08)'], {}), "(self, J['y1', 'x1'][1][1], Jbase[2, 3], 1e-08)\n", (43891, 43938), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((44043, 44096), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'jac[0][0]', 'Jbase[0, 1]', '(1e-08)'], {}), '(self, jac[0][0], Jbase[0, 1], 1e-08)\n', (44059, 44096), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((44104, 44157), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'jac[0][1]', 'Jbase[0, 3]', '(1e-08)'], {}), '(self, jac[0][1], Jbase[0, 3], 1e-08)\n', (44120, 44157), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((44165, 44218), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'jac[1][0]', 'Jbase[2, 1]', '(1e-08)'], {}), '(self, jac[1][0], Jbase[2, 1], 1e-08)\n', (44181, 44218), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((44226, 44279), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'jac[1][1]', 'Jbase[2, 3]', '(1e-08)'], {}), '(self, jac[1][1], Jbase[2, 3], 1e-08)\n', (44242, 44279), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((44324, 44333), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (44331, 44333), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((44363, 44370), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (44368, 44370), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((44946, 45009), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "J['y1', 'x1'][0][0]", 'Jbase[1, 1]', '(1e-08)'], {}), "(self, J['y1', 'x1'][0][0], Jbase[1, 1], 1e-08)\n", (44962, 45009), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((45017, 45080), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "J['y1', 'x1'][0][1]", 'Jbase[1, 3]', '(1e-08)'], {}), "(self, J['y1', 'x1'][0][1], Jbase[1, 3], 1e-08)\n", (45033, 45080), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((45185, 45238), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'jac[0][0]', 'Jbase[1, 1]', '(1e-08)'], {}), '(self, jac[0][0], Jbase[1, 1], 1e-08)\n', (45201, 45238), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((45246, 45299), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'jac[0][1]', 'Jbase[1, 3]', '(1e-08)'], {}), '(self, jac[0][1], Jbase[1, 3], 1e-08)\n', (45262, 45299), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((45347, 45356), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (45354, 45356), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((45378, 45397), 'openmdao.test_suite.components.sellar.SellarDerivatives', 'SellarDerivatives', ([], {}), '()\n', (45395, 45397), False, 'from openmdao.test_suite.components.sellar import SellarDerivatives\n'), ((45436, 45454), 'openmdao.api.NonlinearBlockGS', 'NonlinearBlockGS', ([], {}), '()\n', (45452, 45454), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((46034, 46046), 'openmdao.devtools.testutil.TestLogger', 'TestLogger', ([], {}), '()\n', (46044, 46046), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((46542, 46551), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (46549, 46551), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((46573, 46592), 'openmdao.test_suite.components.sellar.SellarDerivatives', 'SellarDerivatives', ([], {}), '()\n', (46590, 46592), False, 'from openmdao.test_suite.components.sellar import SellarDerivatives\n'), ((46631, 46649), 'openmdao.api.NonlinearBlockGS', 'NonlinearBlockGS', ([], {}), '()\n', (46647, 46649), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((47100, 47112), 'openmdao.devtools.testutil.TestLogger', 'TestLogger', ([], {}), '()\n', (47110, 47112), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((47242, 47313), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['px.x', 'px.x']['J_fwd']", '[[1.0]]', '(1e-05)'], {}), "(self, totals['px.x', 'px.x']['J_fwd'], [[1.0]], 1e-05)\n", (47258, 47313), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((47321, 47391), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['px.x', 'px.x']['J_fd']", '[[1.0]]', '(1e-05)'], {}), "(self, totals['px.x', 'px.x']['J_fd'], [[1.0]], 1e-05)\n", (47337, 47391), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((47560, 47636), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['px.x', 'pz.z']['J_fwd']", '[[0.0, 0.0]]', '(1e-05)'], {}), "(self, totals['px.x', 'pz.z']['J_fwd'], [[0.0, 0.0]], 1e-05)\n", (47576, 47636), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((47644, 47719), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['px.x', 'pz.z']['J_fd']", '[[0.0, 0.0]]', '(1e-05)'], {}), "(self, totals['px.x', 'pz.z']['J_fd'], [[0.0, 0.0]], 1e-05)\n", (47660, 47719), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((47727, 47805), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['pz.z', 'px.x']['J_fwd']", '[[0.0], [0.0]]', '(1e-05)'], {}), "(self, totals['pz.z', 'px.x']['J_fwd'], [[0.0], [0.0]], 1e-05)\n", (47743, 47805), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((47813, 47890), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['pz.z', 'px.x']['J_fd']", '[[0.0], [0.0]]', '(1e-05)'], {}), "(self, totals['pz.z', 'px.x']['J_fd'], [[0.0], [0.0]], 1e-05)\n", (47829, 47890), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((47953, 47962), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (47960, 47962), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((47984, 48003), 'openmdao.test_suite.components.sellar.SellarDerivatives', 'SellarDerivatives', ([], {}), '()\n', (48001, 48003), False, 'from openmdao.test_suite.components.sellar import SellarDerivatives\n'), ((48042, 48060), 'openmdao.api.NonlinearBlockGS', 'NonlinearBlockGS', ([], {}), '()\n', (48058, 48060), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((48412, 48424), 'openmdao.devtools.testutil.TestLogger', 'TestLogger', ([], {}), '()\n', (48422, 48424), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((48554, 48632), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['pz.z', 'pz.z']['J_fwd']", '[[0.0], [1.0]]', '(1e-05)'], {}), "(self, totals['pz.z', 'pz.z']['J_fwd'], [[0.0], [1.0]], 1e-05)\n", (48570, 48632), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((48640, 48717), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['pz.z', 'pz.z']['J_fd']", '[[0.0], [1.0]]', '(1e-05)'], {}), "(self, totals['pz.z', 'pz.z']['J_fd'], [[0.0], [1.0]], 1e-05)\n", (48656, 48717), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((48780, 48789), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (48787, 48789), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((48811, 48830), 'openmdao.test_suite.components.sellar.SellarDerivatives', 'SellarDerivatives', ([], {}), '()\n', (48828, 48830), False, 'from openmdao.test_suite.components.sellar import SellarDerivatives\n'), ((48869, 48887), 'openmdao.api.NonlinearBlockGS', 'NonlinearBlockGS', ([], {}), '()\n', (48885, 48887), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((49239, 49251), 'openmdao.devtools.testutil.TestLogger', 'TestLogger', ([], {}), '()\n', (49249, 49251), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((49381, 49457), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['pz.z', 'pz.z']['J_fwd']", '[[0.0, 1.0]]', '(1e-05)'], {}), "(self, totals['pz.z', 'pz.z']['J_fwd'], [[0.0, 1.0]], 1e-05)\n", (49397, 49457), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((49465, 49540), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['pz.z', 'pz.z']['J_fd']", '[[0.0, 1.0]]', '(1e-05)'], {}), "(self, totals['pz.z', 'pz.z']['J_fd'], [[0.0, 1.0]], 1e-05)\n", (49481, 49540), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((49603, 49612), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (49610, 49612), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((49634, 49653), 'openmdao.test_suite.components.sellar.SellarDerivatives', 'SellarDerivatives', ([], {}), '()\n', (49651, 49653), False, 'from openmdao.test_suite.components.sellar import SellarDerivatives\n'), ((49692, 49710), 'openmdao.api.NonlinearBlockGS', 'NonlinearBlockGS', ([], {}), '()\n', (49708, 49710), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((50046, 50058), 'openmdao.devtools.testutil.TestLogger', 'TestLogger', ([], {}), '()\n', (50056, 50058), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((50188, 50264), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['pz.z', 'pz.z']['J_fwd']", '[[0.0, 1.0]]', '(1e-05)'], {}), "(self, totals['pz.z', 'pz.z']['J_fwd'], [[0.0, 1.0]], 1e-05)\n", (50204, 50264), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((50272, 50347), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', "totals['pz.z', 'pz.z']['J_fd']", '[[0.0, 1.0]]', '(1e-05)'], {}), "(self, totals['pz.z', 'pz.z']['J_fd'], [[0.0, 1.0]], 1e-05)\n", (50288, 50347), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((2374, 2397), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x1"""', '(3.0)'], {}), "('x1', 3.0)\n", (2386, 2397), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((2438, 2461), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x2"""', '(5.0)'], {}), "('x2', 5.0)\n", (2450, 2461), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((6766, 6789), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x1"""', '(3.0)'], {}), "('x1', 3.0)\n", (6778, 6789), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((6830, 6853), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x2"""', '(5.0)'], {}), "('x2', 5.0)\n", (6842, 6853), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((7519, 7572), 'numpy.linalg.norm', 'np.linalg.norm', (["(data['comp']['y', 'x1']['J_fd'] - 3.0)"], {}), "(data['comp']['y', 'x1']['J_fd'] - 3.0)\n", (7533, 7572), True, 'import numpy as np\n'), ((7982, 8035), 'numpy.linalg.norm', 'np.linalg.norm', (["(data['comp']['y', 'x2']['J_fd'] - 4.0)"], {}), "(data['comp']['y', 'x2']['J_fd'] - 4.0)\n", (7996, 8035), True, 'import numpy as np\n'), ((8896, 8910), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (8908, 8910), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((9275, 9290), 'six.iteritems', 'iteritems', (['comp'], {}), '(comp)\n', (9284, 9290), False, 'from six import iteritems\n'), ((10543, 10557), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (10555, 10557), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((10975, 10990), 'six.iteritems', 'iteritems', (['comp'], {}), '(comp)\n', (10984, 10990), False, 'from six import iteritems\n'), ((13126, 13140), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (13138, 13140), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((14265, 14287), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (14277, 14287), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((14328, 14350), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (14340, 14350), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((14393, 14411), 'openmdao.test_suite.components.paraboloid_mat_vec.ParaboloidMatVec', 'ParaboloidMatVec', ([], {}), '()\n', (14409, 14411), False, 'from openmdao.test_suite.components.paraboloid_mat_vec import ParaboloidMatVec\n'), ((14750, 14765), 'six.iteritems', 'iteritems', (['comp'], {}), '(comp)\n', (14759, 14765), False, 'from six import iteritems\n'), ((15850, 15875), 'openmdao.test_suite.components.impl_comp_array.TestImplCompArrayMatVec', 'TestImplCompArrayMatVec', ([], {}), '()\n', (15873, 15875), False, 'from openmdao.test_suite.components.impl_comp_array import TestImplCompArrayMatVec\n'), ((16173, 16188), 'six.iteritems', 'iteritems', (['comp'], {}), '(comp)\n', (16182, 16188), False, 'from six import iteritems\n'), ((18217, 18233), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (18225, 18233), True, 'import numpy as np\n'), ((18303, 18319), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (18311, 18319), True, 'import numpy as np\n'), ((18389, 18405), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (18397, 18405), True, 'import numpy as np\n'), ((18475, 18491), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (18483, 18491), True, 'import numpy as np\n'), ((21464, 21486), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (21476, 21486), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((21527, 21549), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (21539, 21549), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((22265, 22287), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (22277, 22287), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((22328, 22350), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (22340, 22350), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((23071, 23093), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (23083, 23093), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((23134, 23156), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (23146, 23156), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((23926, 23948), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (23938, 23948), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((23989, 24011), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (24001, 24011), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((24701, 24723), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (24713, 24723), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((24764, 24786), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (24776, 24786), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((25468, 25490), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (25480, 25490), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((25531, 25553), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (25543, 25553), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((26274, 26296), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (26286, 26296), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((26337, 26359), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (26349, 26359), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((27079, 27101), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (27091, 27101), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((27142, 27164), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (27154, 27164), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((27891, 27913), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (27903, 27913), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((27954, 27976), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (27966, 27976), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((29594, 29618), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""ab"""', '(13.0)'], {}), "('ab', 13.0)\n", (29606, 29618), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((29659, 29684), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""aba"""', '(13.0)'], {}), "('aba', 13.0)\n", (29671, 29684), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((29725, 29749), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""ba"""', '(13.0)'], {}), "('ba', 13.0)\n", (29737, 29749), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((30752, 30774), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (30764, 30774), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((30815, 30837), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (30827, 30837), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((32459, 32482), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x1"""', '(3.0)'], {}), "('x1', 3.0)\n", (32471, 32482), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((32523, 32546), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x2"""', '(5.0)'], {}), "('x2', 5.0)\n", (32535, 32546), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((33977, 34000), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x1"""', '(3.0)'], {}), "('x1', 3.0)\n", (33989, 34000), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((34041, 34064), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x2"""', '(5.0)'], {}), "('x2', 5.0)\n", (34053, 34064), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((34520, 34542), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (34532, 34542), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((34583, 34605), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (34595, 34605), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((34717, 34735), 'openmdao.test_suite.components.paraboloid_mat_vec.ParaboloidMatVec', 'ParaboloidMatVec', ([], {}), '()\n', (34733, 34735), False, 'from openmdao.test_suite.components.paraboloid_mat_vec import ParaboloidMatVec\n'), ((35188, 35210), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (35200, 35210), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((35251, 35273), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (35263, 35273), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((35385, 35403), 'openmdao.test_suite.components.paraboloid_mat_vec.ParaboloidMatVec', 'ParaboloidMatVec', ([], {}), '()\n', (35401, 35403), False, 'from openmdao.test_suite.components.paraboloid_mat_vec import ParaboloidMatVec\n'), ((35850, 35872), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (35862, 35872), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((35913, 35935), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (35925, 35935), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((36047, 36065), 'openmdao.test_suite.components.paraboloid_mat_vec.ParaboloidMatVec', 'ParaboloidMatVec', ([], {}), '()\n', (36063, 36065), False, 'from openmdao.test_suite.components.paraboloid_mat_vec import ParaboloidMatVec\n'), ((36546, 36568), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (36558, 36568), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((36609, 36631), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (36621, 36631), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((36743, 36761), 'openmdao.test_suite.components.paraboloid_mat_vec.ParaboloidMatVec', 'ParaboloidMatVec', ([], {}), '()\n', (36759, 36761), False, 'from openmdao.test_suite.components.paraboloid_mat_vec import ParaboloidMatVec\n'), ((37232, 37254), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (37244, 37254), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((37295, 37317), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (37307, 37317), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((37429, 37447), 'openmdao.test_suite.components.paraboloid_mat_vec.ParaboloidMatVec', 'ParaboloidMatVec', ([], {}), '()\n', (37445, 37447), False, 'from openmdao.test_suite.components.paraboloid_mat_vec import ParaboloidMatVec\n'), ((37905, 37927), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (37917, 37927), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((37968, 37990), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (37980, 37990), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((38102, 38120), 'openmdao.test_suite.components.paraboloid_mat_vec.ParaboloidMatVec', 'ParaboloidMatVec', ([], {}), '()\n', (38118, 38120), False, 'from openmdao.test_suite.components.paraboloid_mat_vec import ParaboloidMatVec\n'), ((38575, 38597), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (38587, 38597), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((38638, 38660), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (38650, 38660), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((38772, 38790), 'openmdao.test_suite.components.paraboloid_mat_vec.ParaboloidMatVec', 'ParaboloidMatVec', ([], {}), '()\n', (38788, 38790), False, 'from openmdao.test_suite.components.paraboloid_mat_vec import ParaboloidMatVec\n'), ((39254, 39276), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(3.0)'], {}), "('x', 3.0)\n", (39266, 39276), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((39317, 39339), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""y"""', '(5.0)'], {}), "('y', 5.0)\n", (39329, 39339), False, 'from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, NonLinearRunOnce, ImplicitComponent, NonlinearBlockGS\n'), ((47455, 47464), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (47461, 47464), True, 'import numpy as np\n'), ((47535, 47544), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (47541, 47544), True, 'import numpy as np\n'), ((2217, 2232), 'numpy.array', 'np.array', (['[4.0]'], {}), '([4.0])\n', (2225, 2232), True, 'import numpy as np\n'), ((2264, 2278), 'numpy.array', 'np.array', (['[40]'], {}), '([40])\n', (2272, 2278), True, 'import numpy as np\n'), ((3940, 3955), 'numpy.array', 'np.array', (['[4.0]'], {}), '([4.0])\n', (3948, 3955), True, 'import numpy as np\n'), ((3987, 4001), 'numpy.array', 'np.array', (['[40]'], {}), '([40])\n', (3995, 4001), True, 'import numpy as np\n'), ((5394, 5409), 'numpy.array', 'np.array', (['[4.0]'], {}), '([4.0])\n', (5402, 5409), True, 'import numpy as np\n'), ((5441, 5455), 'numpy.array', 'np.array', (['[40]'], {}), '([40])\n', (5449, 5455), True, 'import numpy as np\n'), ((6656, 6671), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (6664, 6671), True, 'import numpy as np\n'), ((12709, 12729), 'numpy.arange', 'np.arange', (['self.size'], {}), '(self.size)\n', (12718, 12729), True, 'import numpy as np\n'), ((13195, 13205), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (13202, 13205), True, 'import numpy as np\n'), ((13245, 13255), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (13252, 13255), True, 'import numpy as np\n'), ((14881, 14934), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'abs_error.forward', '(0.0)', '(1e-05)'], {}), '(self, abs_error.forward, 0.0, 1e-05)\n', (14897, 14934), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((14949, 15002), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'abs_error.reverse', '(0.0)', '(1e-05)'], {}), '(self, abs_error.reverse, 0.0, 1e-05)\n', (14965, 15002), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((15017, 15078), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'abs_error.forward_reverse', '(0.0)', '(1e-05)'], {}), '(self, abs_error.forward_reverse, 0.0, 1e-05)\n', (15033, 15078), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((15093, 15146), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'rel_error.forward', '(0.0)', '(1e-05)'], {}), '(self, rel_error.forward, 0.0, 1e-05)\n', (15109, 15146), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((15161, 15214), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'rel_error.reverse', '(0.0)', '(1e-05)'], {}), '(self, rel_error.reverse, 0.0, 1e-05)\n', (15177, 15214), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((15229, 15290), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'rel_error.forward_reverse', '(0.0)', '(1e-05)'], {}), '(self, rel_error.forward_reverse, 0.0, 1e-05)\n', (15245, 15290), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((15792, 15805), 'numpy.ones', 'np.ones', (['(2,)'], {}), '((2,))\n', (15799, 15805), True, 'import numpy as np\n'), ((16304, 16357), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'abs_error.forward', '(0.0)', '(1e-05)'], {}), '(self, abs_error.forward, 0.0, 1e-05)\n', (16320, 16357), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((16372, 16425), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'abs_error.reverse', '(0.0)', '(1e-05)'], {}), '(self, abs_error.reverse, 0.0, 1e-05)\n', (16388, 16425), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((16440, 16501), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'abs_error.forward_reverse', '(0.0)', '(1e-05)'], {}), '(self, abs_error.forward_reverse, 0.0, 1e-05)\n', (16456, 16501), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((16516, 16569), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'rel_error.forward', '(0.0)', '(1e-05)'], {}), '(self, rel_error.forward, 0.0, 1e-05)\n', (16532, 16569), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((16584, 16637), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'rel_error.reverse', '(0.0)', '(1e-05)'], {}), '(self, rel_error.reverse, 0.0, 1e-05)\n', (16600, 16637), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((16652, 16713), 'openmdao.devtools.testutil.assert_rel_error', 'assert_rel_error', (['self', 'rel_error.forward_reverse', '(0.0)', '(1e-05)'], {}), '(self, rel_error.forward_reverse, 0.0, 1e-05)\n', (16668, 16713), False, 'from openmdao.devtools.testutil import assert_rel_error, TestLogger\n'), ((17182, 17216), 'numpy.array', 'np.array', (['[[3.0, 4.0], [2.0, 3.0]]'], {}), '([[3.0, 4.0], [2.0, 3.0]])\n', (17190, 17216), True, 'import numpy as np\n'), ((17740, 17753), 'numpy.ones', 'np.ones', (['(2,)'], {}), '((2,))\n', (17747, 17753), True, 'import numpy as np\n'), ((17818, 17831), 'numpy.ones', 'np.ones', (['(2,)'], {}), '((2,))\n', (17825, 17831), True, 'import numpy as np\n'), ((19264, 19279), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (19271, 19279), True, 'import numpy as np\n'), ((19339, 19354), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (19346, 19354), True, 'import numpy as np\n'), ((20710, 20725), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (20717, 20725), True, 'import numpy as np\n'), ((20785, 20800), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (20792, 20800), True, 'import numpy as np\n'), ((32302, 32317), 'numpy.array', 'np.array', (['[4.0]'], {}), '([4.0])\n', (32310, 32317), True, 'import numpy as np\n'), ((32349, 32363), 'numpy.array', 'np.array', (['[40]'], {}), '([40])\n', (32357, 32363), True, 'import numpy as np\n'), ((33820, 33835), 'numpy.array', 'np.array', (['[4.0]'], {}), '([4.0])\n', (33828, 33835), True, 'import numpy as np\n'), ((33867, 33881), 'numpy.array', 'np.array', (['[40]'], {}), '([40])\n', (33875, 33881), True, 'import numpy as np\n'), ((42229, 42335), 'numpy.array', 'np.array', (['[[1.0, 3.0, -2.0, 7.0], [6.0, 2.5, 2.0, 4.0], [-1.0, 0.0, 8.0, 1.0], [1.0, \n 4.0, -5.0, 6.0]]'], {}), '([[1.0, 3.0, -2.0, 7.0], [6.0, 2.5, 2.0, 4.0], [-1.0, 0.0, 8.0, 1.0\n ], [1.0, 4.0, -5.0, 6.0]])\n', (42237, 42335), True, 'import numpy as np\n'), ((43138, 43148), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (43145, 43148), True, 'import numpy as np\n'), ((44430, 44440), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (44437, 44440), True, 'import numpy as np\n'), ((9454, 9487), 'numpy.linalg.norm', 'np.linalg.norm', (['(forward - reverse)'], {}), '(forward - reverse)\n', (9468, 9487), True, 'import numpy as np\n'), ((9532, 9560), 'numpy.linalg.norm', 'np.linalg.norm', (['(forward - fd)'], {}), '(forward - fd)\n', (9546, 9560), True, 'import numpy as np\n'), ((12178, 12194), 'numpy.isscalar', 'np.isscalar', (['val'], {}), '(val)\n', (12189, 12194), True, 'import numpy as np\n'), ((12272, 12290), 'numpy.prod', 'np.prod', (['val.shape'], {}), '(val.shape)\n', (12279, 12290), True, 'import numpy as np\n'), ((13324, 13334), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (13331, 13334), True, 'import numpy as np\n'), ((13422, 13432), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (13429, 13432), True, 'import numpy as np\n'), ((16989, 16999), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (16996, 16999), True, 'import numpy as np\n'), ((17041, 17051), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (17048, 17051), True, 'import numpy as np\n'), ((17090, 17100), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (17097, 17100), True, 'import numpy as np\n'), ((17143, 17153), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (17150, 17153), True, 'import numpy as np\n'), ((17571, 17580), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (17577, 17580), True, 'import numpy as np\n'), ((42502, 42515), 'numpy.zeros', 'np.zeros', (['[4]'], {}), '([4])\n', (42510, 42515), True, 'import numpy as np\n'), ((42583, 42596), 'numpy.zeros', 'np.zeros', (['[4]'], {}), '([4])\n', (42591, 42596), True, 'import numpy as np\n')] |
# This program is the first step of project 2: Advanced Lane Finding
# STEP 1: Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
#print(objp)
#quit()
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
#img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
#cv2.imshow('img',img)
#cv2.waitKey(500)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
testimg = cv2.imread('camera_cal/calibration1.jpg')
cv2.imshow('testimg distorted',testimg)
dst = cv2.undistort(testimg, mtx, dist, None, mtx)
# to save a frame, please uncomment
#cv2.imwrite('output_images/calibration1_undistorted.jpg', dst)
print("Distortion Coefficient: ")
print(dist)
print("Camera Matrix: ")
print(mtx)
# Note: later in the code, you can use the hardcoded distortion coefficient and camera matrix
cv2.imshow('testimg undistorted', dst)
cv2.waitKey(5000)
cv2.destroyAllWindows() | [
"cv2.undistort",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.calibrateCamera",
"cv2.findChessboardCorners",
"cv2.imread",
"glob.glob"
] | [((331, 363), 'numpy.zeros', 'np.zeros', (['(6 * 9, 3)', 'np.float32'], {}), '((6 * 9, 3), np.float32)\n', (339, 363), True, 'import numpy as np\n'), ((637, 677), 'glob.glob', 'glob.glob', (['"""camera_cal/calibration*.jpg"""'], {}), "('camera_cal/calibration*.jpg')\n", (646, 677), False, 'import glob\n'), ((1263, 1334), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'gray.shape[::-1]', 'None', 'None'], {}), '(objpoints, imgpoints, gray.shape[::-1], None, None)\n', (1282, 1334), False, 'import cv2\n'), ((1346, 1387), 'cv2.imread', 'cv2.imread', (['"""camera_cal/calibration1.jpg"""'], {}), "('camera_cal/calibration1.jpg')\n", (1356, 1387), False, 'import cv2\n'), ((1388, 1428), 'cv2.imshow', 'cv2.imshow', (['"""testimg distorted"""', 'testimg'], {}), "('testimg distorted', testimg)\n", (1398, 1428), False, 'import cv2\n'), ((1434, 1478), 'cv2.undistort', 'cv2.undistort', (['testimg', 'mtx', 'dist', 'None', 'mtx'], {}), '(testimg, mtx, dist, None, mtx)\n', (1447, 1478), False, 'import cv2\n'), ((1759, 1797), 'cv2.imshow', 'cv2.imshow', (['"""testimg undistorted"""', 'dst'], {}), "('testimg undistorted', dst)\n", (1769, 1797), False, 'import cv2\n'), ((1798, 1815), 'cv2.waitKey', 'cv2.waitKey', (['(5000)'], {}), '(5000)\n', (1809, 1815), False, 'import cv2\n'), ((1816, 1839), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1837, 1839), False, 'import cv2\n'), ((768, 785), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (778, 785), False, 'import cv2\n'), ((797, 834), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (809, 834), False, 'import cv2\n'), ((888, 933), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(9, 6)', 'None'], {}), '(gray, (9, 6), None)\n', (913, 933), False, 'import cv2\n')] |
"""Write on Job processes POST jobs in Jobs/
Load data in Job, Convert to databunch, package, and sent POST.
"""
import requests
import numpy as np
import pandas as pd
import time
from multiprocessing import Pool
from functools import partial
import logging
import os
from jobs import get_jobs, pop_current_job, read_job,\
download_data, delete_local_file, delete_s3_file, load_data
###Logging###
request_logger = logging.getLogger(__name__+" requests")
log_path = os.path.join(os.getcwd(), 'logs/debug.log')
logging.basicConfig(filename=log_path, level=logging.INFO)
def df_to_query(df, tablename):
"""Transform dataframe into dictionary object of correct form for database api request parsing.
param df: Tabular data to transform
type df: Pandas.DataFrame
"""
import json
def transform_df(df):
# Generate a list of stringified dictionaries from the dataframe
# Note: Will transform the entire supplied dataframe. Split datframe into chunks prior.
records_list = df.to_json(orient='records', lines=True).split('\n')
# Cast stringified row entris as python dict vis json loads (important for request)
cast_records = [json.loads(x) for x in records_list]
return cast_records
package = {
'table_name': tablename,
'data': transform_df(df)
}
return package
def build_databunch(query, num_splits=3, max_size=None):
import math
databunch = []
# Caclulate number or splits or set (dependent on max_size)
if max_size:
num_splits = math.ceil(len(query['data'])/max_size)
bunch_size = int(len(query['data']) / num_splits)
for i in range(num_splits):
if i < num_splits-1:
data_range = (i*bunch_size, (i+1)*bunch_size)
else:
data_range = (i*bunch_size, len(query['data']))
databunch.append(
{
'table_name': query['table_name'],
'data': query['data'][data_range[0]:data_range[1]]
}
)
return databunch
def parallel_post_requests(databunch, url, max_requests=10):
"""Request handler that will parallelize databunch POST requests.
param databunch: Packages to POST to database API
type databunch: list of packages
param max_requests: How many simultaneous requests sessions to attempt
type max_requests: int
param url: Endpoint url. Must be valid ipv4 or dns entry.
type url: string
"""
runner = partial(run_request, url=url)
p = Pool(max_requests)
p.map(runner, databunch)
p.close()
p.join()
def run_request(bunch, url, retry_size=20):
"""Run and time a request with the python requests library
"""
import requests
import time
import numpy as np
try:
time.sleep(np.random.random_sample()*10)
start = time.time()
response = requests.post(url=url, json=bunch, timeout=None)
assert response.status_code == 200
request_logger.info("POST succeded. Status= {}".format(response.status_code))
stop = time.time()
request_logger.info('Batch of {} processed in {}'.format(len(bunch['data']), stop-start))
return True
except:
min_size = retry_size - 1
request_logger.error("POST failed. Trying again with smaller bunch of {}.".format(min_size))
if min_size < 1:
request_logger.error("POST failed at single element. Dropping Request.")
return False
databunch = build_databunch(query=bunch, max_size=min_size)
for mini_bunch in databunch:
run_request(bunch=mini_bunch, url=url, retry_size=min_size)
# Deprecated. Table name now in job file under key tablename
# def get_source_from_name(filename):
# for table_name in tables.keys():
# if table_name in filename:
# return tables[table_name]
# raise NameError('Tablename not found. Aborting.')
# tables = {
# 'business': 'businesses',
# 'user': 'users',
# 'checkin': 'checkins',
# 'photo': 'photos',
# 'tip': 'tips',
# 'review': 'reviews',
# }
if __name__ == "__main__":
write_logger = logging.getLogger(__name__+' DB-writer')
num_jobs = len(get_jobs('post'))
for i in range(num_jobs):
# Get a job and read out the datapath
current_job = pop_current_job()
asset = read_job(current_job)['file']
tablename = read_job(current_job)['tablename']
write_logger.info('Running job {}. Read file {}'.format(current_job, asset))
# Load the data
datapath = download_data(asset)
data = load_data(datapath)
# Build query package
package = df_to_query(df=data, tablename=tablename)
# Split package
databunch = build_databunch(query=package, max_size=50)
# Connect and write to database via api
parallel_post_requests(
databunch=databunch,
url='https://db-api-yelp18-staging.herokuapp.com/api/data',
max_requests=12
)
# Cleanup
delete_local_file(datapath)
delete_s3_file(current_job)
write_logger.info("Deleted Job: {}".format(current_job))
| [
"logging.getLogger",
"logging.basicConfig",
"json.loads",
"requests.post",
"jobs.read_job",
"numpy.random.random_sample",
"jobs.download_data",
"jobs.delete_s3_file",
"jobs.get_jobs",
"jobs.delete_local_file",
"os.getcwd",
"jobs.load_data",
"functools.partial",
"multiprocessing.Pool",
"j... | [((425, 466), 'logging.getLogger', 'logging.getLogger', (["(__name__ + ' requests')"], {}), "(__name__ + ' requests')\n", (442, 466), False, 'import logging\n'), ((520, 578), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'log_path', 'level': 'logging.INFO'}), '(filename=log_path, level=logging.INFO)\n', (539, 578), False, 'import logging\n'), ((489, 500), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (498, 500), False, 'import os\n'), ((2493, 2522), 'functools.partial', 'partial', (['run_request'], {'url': 'url'}), '(run_request, url=url)\n', (2500, 2522), False, 'from functools import partial\n'), ((2531, 2549), 'multiprocessing.Pool', 'Pool', (['max_requests'], {}), '(max_requests)\n', (2535, 2549), False, 'from multiprocessing import Pool\n'), ((4171, 4213), 'logging.getLogger', 'logging.getLogger', (["(__name__ + ' DB-writer')"], {}), "(__name__ + ' DB-writer')\n", (4188, 4213), False, 'import logging\n'), ((2856, 2867), 'time.time', 'time.time', ([], {}), '()\n', (2865, 2867), False, 'import time\n'), ((2887, 2935), 'requests.post', 'requests.post', ([], {'url': 'url', 'json': 'bunch', 'timeout': 'None'}), '(url=url, json=bunch, timeout=None)\n', (2900, 2935), False, 'import requests\n'), ((3081, 3092), 'time.time', 'time.time', ([], {}), '()\n', (3090, 3092), False, 'import time\n'), ((4232, 4248), 'jobs.get_jobs', 'get_jobs', (['"""post"""'], {}), "('post')\n", (4240, 4248), False, 'from jobs import get_jobs, pop_current_job, read_job, download_data, delete_local_file, delete_s3_file, load_data\n'), ((4348, 4365), 'jobs.pop_current_job', 'pop_current_job', ([], {}), '()\n', (4363, 4365), False, 'from jobs import get_jobs, pop_current_job, read_job, download_data, delete_local_file, delete_s3_file, load_data\n'), ((4597, 4617), 'jobs.download_data', 'download_data', (['asset'], {}), '(asset)\n', (4610, 4617), False, 'from jobs import get_jobs, pop_current_job, read_job, download_data, delete_local_file, delete_s3_file, load_data\n'), ((4633, 4652), 'jobs.load_data', 'load_data', (['datapath'], {}), '(datapath)\n', (4642, 4652), False, 'from jobs import get_jobs, pop_current_job, read_job, download_data, delete_local_file, delete_s3_file, load_data\n'), ((5088, 5115), 'jobs.delete_local_file', 'delete_local_file', (['datapath'], {}), '(datapath)\n', (5105, 5115), False, 'from jobs import get_jobs, pop_current_job, read_job, download_data, delete_local_file, delete_s3_file, load_data\n'), ((5124, 5151), 'jobs.delete_s3_file', 'delete_s3_file', (['current_job'], {}), '(current_job)\n', (5138, 5151), False, 'from jobs import get_jobs, pop_current_job, read_job, download_data, delete_local_file, delete_s3_file, load_data\n'), ((1199, 1212), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (1209, 1212), False, 'import json\n'), ((4382, 4403), 'jobs.read_job', 'read_job', (['current_job'], {}), '(current_job)\n', (4390, 4403), False, 'from jobs import get_jobs, pop_current_job, read_job, download_data, delete_local_file, delete_s3_file, load_data\n'), ((4432, 4453), 'jobs.read_job', 'read_job', (['current_job'], {}), '(current_job)\n', (4440, 4453), False, 'from jobs import get_jobs, pop_current_job, read_job, download_data, delete_local_file, delete_s3_file, load_data\n'), ((2810, 2835), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (2833, 2835), True, 'import numpy as np\n')] |
from collections import defaultdict
from typing import Dict, Sequence, Tuple
import numpy as np
from ..dataset import MotionPredictionDataset
from ..proto import (ObjectPrediction, Submission, Trajectory, Vector3,
WeightedTrajectory)
from ..utils.map import repeated_points_to_array
from .metrics import (avg_ade, avg_fde, corrected_negative_log_likelihood,
log_likelihood, min_ade, min_fde, top1_ade, top1_fde,
weighted_ade, weighted_fde)
MAX_NUM_MODES = 25
def save_submission_proto(filepath: str, submission: Submission) -> None:
"""Save serialized submission protobuf to file.
Args:
filepath (str): Path to output file.
submission (Submission): Submission proto to save.
"""
with open(filepath, 'wb') as fout:
fout.write(submission.SerializeToString())
def load_submission_proto(filepath: str) -> Submission:
"""Load and deserialized submission proto from file.
Args:
filepath (str): File with serialized protobuf message.
Returns:
Submission: Deserialized message.
"""
with open(filepath, 'rb') as fin:
serialized = fin.read()
submission = Submission()
submission.ParseFromString(serialized)
return submission
def evaluate_submission_with_proto(
submission: Submission,
ground_truth: Submission,
) -> Dict[str, float]:
"""Calculates various motion prediction metrics given
the submission and ground truth protobuf messages.
Args:
submission (Submission): Proto message with predicted trajectories.
ground_truth (Submission): Proto message with ground truth trajectories.
Raises:
ValueError:
Number of objects in submission is not equal to number of objects in ground truth.
ValueError:
Objects order in submission violates objects order in ground truth.
Returns:
Dict[str, float]: Mapping from metric name to its aggregated value.
"""
_check_submission_and_ground_truth(submission, ground_truth)
metrics = defaultdict(list)
gt_map = {
(prediction.scene_id, prediction.track_id): prediction
for prediction in ground_truth.predictions
}
for i in range(len(submission.predictions)):
pred = submission.predictions[i]
gt = gt_map[(pred.scene_id, pred.track_id)]
if pred.scene_id != gt.scene_id:
raise ValueError(f'Check scenes order: {pred.scene_id} != {gt.scene_id}')
if pred.track_id != gt.track_id:
raise ValueError(f'Check objects order: {pred.track_id} != {gt.track_id}')
pred_trajectories, weights = get_trajectories_weights_arrays(pred.weighted_trajectories)
pred_trajectories = pred_trajectories[np.argsort(weights)][-MAX_NUM_MODES:]
weights = weights[np.argsort(weights)][-MAX_NUM_MODES:]
gt_trajectory, _ = get_trajectories_weights_arrays(gt.weighted_trajectories)
gt_trajectory = gt_trajectory[0] # Reduce modes dim
metrics['avg_ade'].append(avg_ade(gt_trajectory, pred_trajectories))
metrics['avg_fde'].append(avg_fde(gt_trajectory, pred_trajectories))
metrics['min_ade'].append(min_ade(gt_trajectory, pred_trajectories))
metrics['min_fde'].append(min_fde(gt_trajectory, pred_trajectories))
metrics['top1_ade'].append(top1_ade(gt_trajectory, pred_trajectories, weights))
metrics['top1_fde'].append(top1_fde(gt_trajectory, pred_trajectories, weights))
metrics['weighted_ade'].append(weighted_ade(gt_trajectory, pred_trajectories, weights))
metrics['weighted_fde'].append(weighted_fde(gt_trajectory, pred_trajectories, weights))
metrics['log_likelihood'].append(log_likelihood(gt_trajectory, pred_trajectories, weights))
metrics['corrected_nll'].append(
corrected_negative_log_likelihood(gt_trajectory, pred_trajectories, weights))
metrics['is_ood'].append(gt.is_ood)
return metrics
def get_trajectories_weights_arrays(
trajectories: Sequence[WeightedTrajectory],
) -> Tuple[np.ndarray, np.ndarray]:
"""Return numpy array of trajectories and respective weights
given the sequence of WeightedTrajectory protobuf messages.
Args:
trajectories (Sequence[WeightedTrajectory]): sequence of protobuf messsages
to extract array from
Returns:
Tuple[np.ndarray, np.ndarray]: trajectories of shape (n_modes, prediction_horizon, 2) and
respective weights of shape (n_modes,)
"""
n_modes = len(trajectories)
prediction_horizon = get_prediction_horizon(trajectories)
trajectories_array = np.empty((n_modes, prediction_horizon, 2))
weights = np.empty(n_modes)
for i, weighted_trajectory in enumerate(trajectories):
trajectories_array[i] = repeated_points_to_array(weighted_trajectory.trajectory)
weights[i] = weighted_trajectory.weight
return trajectories_array, weights
def ground_truth_from_dataset(dataset: MotionPredictionDataset) -> Submission:
"""Generates a Submission protobuf instance with ground truth trajectories.
Args:
dataset (MotionPredictionDataset): Dataset to get trajectories from.
Returns:
Submission: Resulting protobuf message.
"""
dataset_iter = iter(dataset)
ground_truth = Submission()
for data_item in dataset_iter:
pred = ObjectPrediction()
pred.track_id = data_item['track_id']
pred.scene_id = data_item['scene_id']
pred.weighted_trajectories.append(WeightedTrajectory(
trajectory=trajectory_array_to_proto(data_item['ground_truth_trajectory']),
weight=1.0,
))
ground_truth.predictions.append(pred)
return ground_truth
def trajectory_array_to_proto(trajectory: np.ndarray) -> Trajectory:
"""Transforms a numpy array with 2D trajectory to Trajectory proto message.
Args:
trajectory (np.ndarray): Trajectory array, shape (N, 2)
Returns:
Trajectory: Resulting protobuf messsage.
"""
assert len(trajectory.shape) == 2
trajectory_proto = Trajectory()
for i in range(trajectory.shape[0]):
trajectory_proto.points.append(Vector3(x=trajectory[i, 0], y=trajectory[i, 1]))
return trajectory_proto
def get_prediction_horizon(trajectories: Sequence[WeightedTrajectory]) -> int:
"""Returns a common number of timestamps for trajectories.
Args:
trajectories (Sequence[WeightedTrajectory]): sequence of weighted trajectoies.
Raises:
ValueError: If any trajectory has deviating number of timestamps.
Returns:
int: A number of timestamps.
"""
horizon = len(trajectories[0].trajectory.points)
if not all(len(w.trajectory.points) == horizon for w in trajectories):
raise ValueError('All modes must have the same prediction horizon')
return horizon
def object_prediction_from_model_output(
track_id: int,
scene_id: str,
model_output: Dict[str, np.ndarray],
is_ood: bool,
) -> ObjectPrediction:
"""Generates an instance of ObjectPrediction proto from scene data and model predictions.
Args:
track_id (int): prediction request id
scene_id (str): unique scene id
model_output (Dict[str, np.ndarray]): model predictions stored in dict:
trajectories with associated weights and scene-level prediction confidence.
is_ood (bool): whether the sample is out of domain or not.
Returns:
ObjectPrediction: resulting message instance with fields set.
"""
object_prediction = ObjectPrediction()
object_prediction.track_id = track_id
object_prediction.scene_id = scene_id
object_prediction.is_ood = is_ood
n_trajectories = len(model_output['predictions_list'])
n_weights = len(model_output['plan_confidence_scores_list'])
if n_trajectories != n_weights:
raise ValueError(f'Number of predicted trajectories is not equal to number of weights:'
f'{n_trajectories} != {n_weights}')
for i in range(len(model_output['predictions_list'])):
weighted_trajectory = WeightedTrajectory(
trajectory=trajectory_array_to_proto(model_output['predictions_list'][i]),
weight=model_output['plan_confidence_scores_list'][i],
)
object_prediction.weighted_trajectories.append(weighted_trajectory)
object_prediction.uncertainty_measure = model_output['pred_request_uncertainty_measure']
return object_prediction
def _check_submission_and_ground_truth(
submission: Submission,
ground_truth: Submission,
) -> None:
if len(submission.predictions) != len(ground_truth.predictions):
raise ValueError(f'Check number of submitted predictions:'
f'{len(submission.predictions)} != {len(ground_truth.predictions)}')
submission_keys = {(op.scene_id, op.track_id) for op in submission.predictions}
gt_keys = {(op.scene_id, op.track_id) for op in ground_truth.predictions}
if len(submission_keys) != len(submission.predictions):
raise ValueError('Submission has duplicate keys.')
if len(gt_keys) != len(ground_truth.predictions):
raise ValueError('Ground truth has duplicate keys.')
if submission_keys != gt_keys:
raise ValueError('Submission and ground truth keys are not identical sets.')
| [
"numpy.argsort",
"numpy.empty",
"collections.defaultdict"
] | [((2099, 2116), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2110, 2116), False, 'from collections import defaultdict\n'), ((4685, 4727), 'numpy.empty', 'np.empty', (['(n_modes, prediction_horizon, 2)'], {}), '((n_modes, prediction_horizon, 2))\n', (4693, 4727), True, 'import numpy as np\n'), ((4742, 4759), 'numpy.empty', 'np.empty', (['n_modes'], {}), '(n_modes)\n', (4750, 4759), True, 'import numpy as np\n'), ((2792, 2811), 'numpy.argsort', 'np.argsort', (['weights'], {}), '(weights)\n', (2802, 2811), True, 'import numpy as np\n'), ((2856, 2875), 'numpy.argsort', 'np.argsort', (['weights'], {}), '(weights)\n', (2866, 2875), True, 'import numpy as np\n')] |
# ~*~ coding:utf-8 ~*~
from PyQt5.Qt import QColor
import math
import numpy as np
from geometry import Geometry
class Sphere:
"""
Класс для общего описания сферы
"""
def __init__(self, render_area):
self.render_area = render_area
self.approximation_step = 0
self.radius = 0
self.projection_name = "default"
# Координаты источника света
self.light_x = 0
self.light_y = 0
self.light_z = -1000
self.geom = Geometry()
def recalculate(self):
# Настройка шагов аппроксимации
circle_count = self.approximation_step
circle_points_count = self.approximation_step + 2
# Считаем окружность
self.geom.clear()
angle_step = 2*math.pi/circle_points_count
for circle_number in range(0, circle_count):
radius_for_point_1 = self.radius * math.sqrt(1 - math.pow((circle_count - (circle_number+1))/circle_count, 2))
z_axis_for_point_1 = self.radius * (circle_count-(circle_number+1))/circle_count
radius_for_point_2 = self.radius * math.sqrt(1 - math.pow((circle_count - circle_number)/circle_count, 2))
z_axis_for_point_2 = self.radius * (circle_count - circle_number) / circle_count
angle = 0
while angle < 2*math.pi:
self.geom.points.append(Geometry.from_polar(radius_for_point_1, angle, z_axis_for_point_1))
self.geom.points.append(Geometry.from_polar(radius_for_point_1, angle+angle_step, z_axis_for_point_1))
self.geom.edges.append((len(self.geom.points)-2, len(self.geom.points)-1))
self.geom.points.append(Geometry.from_polar(radius_for_point_2, angle, z_axis_for_point_2))
self.geom.points.append(Geometry.from_polar(radius_for_point_2, angle+angle_step, z_axis_for_point_2))
self.geom.edges.append((len(self.geom.points)-2, len(self.geom.points)-1))
angle += angle_step
angle = 2*math.pi
while angle > 0:
self.geom.points.append(Geometry.from_polar(radius_for_point_1, angle, -z_axis_for_point_1))
self.geom.points.append(Geometry.from_polar(radius_for_point_1, angle-angle_step, -z_axis_for_point_1))
self.geom.edges.append((len(self.geom.points)-2, len(self.geom.points)-1))
self.geom.points.append(Geometry.from_polar(radius_for_point_2, angle, -z_axis_for_point_2))
self.geom.points.append(Geometry.from_polar(radius_for_point_2, angle-angle_step, -z_axis_for_point_2))
self.geom.edges.append((len(self.geom.points)-2, len(self.geom.points)-1))
angle -= angle_step
for index in range(0, len(self.geom.points), 4):
self.geom.faces.append((index, index+1, index+3, index+2))
self.geom.apply_projection(self.projection_name)
def is_face_visible(self, face):
"""
Определение видимости грани на основе алгоритма Робертса
:param face: грань
:return: True, если видимо, иначе False
"""
p1_index = face[0]
x0 = self.geom.points[p1_index][0]
y0 = self.geom.points[p1_index][1]
z0 = self.geom.points[p1_index][2]
p2_index = face[1]
x1 = self.geom.points[p2_index][0]
y1 = self.geom.points[p2_index][1]
z1 = self.geom.points[p2_index][2]
p3_index = face[2]
x2 = self.geom.points[p3_index][0]
y2 = self.geom.points[p3_index][1]
z2 = self.geom.points[p3_index][2]
a = y0*(z1 - z2) + y1*(z2 - z0) + y2*(z0 - z1)
b = z0*(x1 - x2) + z1*(x2 - x0) + z2*(x0 - x1)
c = x0*(y1 - y2) + x1*(y2 - y0) + x2*(y0 - y1)
d = -(x0*(y1*z2 - y2*z1) + x1*(y2*z0 - y0*z2) + x2*(y0*z1 - y1*z0))
"""
Знак result = Ax + By + Cz + D определяет, с какой стороны по отношению к плоскости находится точка s(x,y,z,w).
Если result > 0, то точка внутри тела
Если result < 0 - на противаположной стороне, а в случае result = 0 точка принадлежит плоскости.
"""
s = np.array([[1, 1, -1000, 1]])
p = np.array([[a],
[b],
[c],
[d]])
result = Geometry.multiplication_matrix(s, p)
return True if result[0][0] < 0 else False
def get_face_light(self, face, color):
"""
Закраска грани с учётом освещения на основе вычисления угла между нормалью грани и вектором освещения
:param face: грань
:param color: цвет грани
:return: цвет
"""
p1_index = face[0]
x0 = self.geom.clear_points[p1_index][0]
y0 = self.geom.clear_points[p1_index][1]
z0 = self.geom.clear_points[p1_index][2]
p2_index = face[1]
x1 = self.geom.clear_points[p2_index][0]
y1 = self.geom.clear_points[p2_index][1]
z1 = self.geom.clear_points[p2_index][2]
p3_index = face[2]
x2 = self.geom.clear_points[p3_index][0]
y2 = self.geom.clear_points[p3_index][1]
z2 = self.geom.clear_points[p3_index][2]
# Вычисляем два вектора, принадлежащих грани
a_x = x1 - x0
a_y = y1 - y0
a_z = z1 - z0
b_x = x2 - x1
b_y = y2 - y1
b_z = z2 - z1
# Считаем нормаль к грани по найденным векторам
normal_x = a_y * b_z - a_z * b_y
normal_y = a_x * b_z - a_z * b_x
normal_z = a_x * b_y - a_y * b_x
# Длина нормали
normal_length = math.sqrt(math.pow(normal_x, 2) + math.pow(normal_y, 2) + math.pow(normal_z, 2))
# Зная координаты источника света, можно вычислить длину вектора от источника света до точки рассмотрения:
light_length = math.sqrt(math.pow(self.light_x, 2) + math.pow(self.light_y, 2) + math.pow(self.light_z, 2))
normal_length = normal_length if normal_length != 0 else 0.0001
light_length = light_length if light_length != 0 else 0.0001
# Косинус угла между данными векторами находим следующим образом:
result = (normal_x * self.light_x + normal_y * self.light_y + normal_z * self.light_z)/(normal_length * light_length)
# Находим интенсивность
return QColor(int(color.red() * (0.5 + 0.5 * result)),
int(color.green() * (0.5 + 0.5 * result)),
int(color.blue() * (0.5 + 0.5 * result)))
def set_approximation_step(self, step):
self.approximation_step = step
self.render_area.update()
def set_radius(self, radius):
self.radius = radius * 6
self.render_area.update()
def set_x_rotate_angle(self, angle):
self.geom.x_rotate_angle = angle
self.render_area.update()
def set_y_rotate_angle(self, angle):
self.geom.y_rotate_angle = angle
self.render_area.update()
def set_z_rotate_angle(self, angle):
self.geom.z_rotate_angle = angle
self.render_area.update()
def set_x_move(self, value):
self.geom.x_move = value
self.render_area.update()
def set_y_move(self, value):
self.geom.y_move = value
self.render_area.update()
def set_z_move(self, value):
self.geom.z_move = value
self.render_area.update()
def set_x_scale(self, value):
self.geom.x_scale = value
self.render_area.update()
def set_y_scale(self, value):
self.geom.y_scale = value
self.render_area.update()
def set_z_scale(self, value):
self.geom.z_scale = value
self.render_area.update()
def set_axonometric_angle_fi(self, value):
self.geom.axonometric_angle_fi = value
self.render_area.update()
def set_axonometric_angle_psi(self, value):
self.geom.axonometric_angle_psi = value
self.render_area.update()
def set_oblique_angle_alpha(self, value):
self.geom.oblique_angle_alpha = value
self.render_area.update()
def set_oblique_L(self, value):
self.geom.oblique_L = value
self.render_area.update()
def set_perspective_angle_fi(self, value):
self.geom.perspective_angle_fi = value
self.render_area.update()
def set_perspective_angle_teta(self, value):
self.geom.perspective_angle_teta = value
self.render_area.update()
def set_perspective_ro(self, value):
self.geom.perspective_ro = value
self.render_area.update()
def set_perspective_d(self, value):
self.geom.perspective_d = value
self.render_area.update()
def set_light_x(self, x):
self.light_x = x*10
self.render_area.update()
def set_light_y(self, y):
self.light_y = -y*10
self.render_area.update()
def set_light_z(self, z):
self.light_z = -z*10
self.render_area.update()
| [
"geometry.Geometry.from_polar",
"math.pow",
"geometry.Geometry.multiplication_matrix",
"numpy.array",
"geometry.Geometry"
] | [((498, 508), 'geometry.Geometry', 'Geometry', ([], {}), '()\n', (506, 508), False, 'from geometry import Geometry\n'), ((4153, 4181), 'numpy.array', 'np.array', (['[[1, 1, -1000, 1]]'], {}), '([[1, 1, -1000, 1]])\n', (4161, 4181), True, 'import numpy as np\n'), ((4194, 4224), 'numpy.array', 'np.array', (['[[a], [b], [c], [d]]'], {}), '([[a], [b], [c], [d]])\n', (4202, 4224), True, 'import numpy as np\n'), ((4308, 4344), 'geometry.Geometry.multiplication_matrix', 'Geometry.multiplication_matrix', (['s', 'p'], {}), '(s, p)\n', (4338, 4344), False, 'from geometry import Geometry\n'), ((5655, 5676), 'math.pow', 'math.pow', (['normal_z', '(2)'], {}), '(normal_z, 2)\n', (5663, 5676), False, 'import math\n'), ((5883, 5908), 'math.pow', 'math.pow', (['self.light_z', '(2)'], {}), '(self.light_z, 2)\n', (5891, 5908), False, 'import math\n'), ((1371, 1437), 'geometry.Geometry.from_polar', 'Geometry.from_polar', (['radius_for_point_1', 'angle', 'z_axis_for_point_1'], {}), '(radius_for_point_1, angle, z_axis_for_point_1)\n', (1390, 1437), False, 'from geometry import Geometry\n'), ((1479, 1558), 'geometry.Geometry.from_polar', 'Geometry.from_polar', (['radius_for_point_1', '(angle + angle_step)', 'z_axis_for_point_1'], {}), '(radius_for_point_1, angle + angle_step, z_axis_for_point_1)\n', (1498, 1558), False, 'from geometry import Geometry\n'), ((1690, 1756), 'geometry.Geometry.from_polar', 'Geometry.from_polar', (['radius_for_point_2', 'angle', 'z_axis_for_point_2'], {}), '(radius_for_point_2, angle, z_axis_for_point_2)\n', (1709, 1756), False, 'from geometry import Geometry\n'), ((1798, 1877), 'geometry.Geometry.from_polar', 'Geometry.from_polar', (['radius_for_point_2', '(angle + angle_step)', 'z_axis_for_point_2'], {}), '(radius_for_point_2, angle + angle_step, z_axis_for_point_2)\n', (1817, 1877), False, 'from geometry import Geometry\n'), ((2105, 2172), 'geometry.Geometry.from_polar', 'Geometry.from_polar', (['radius_for_point_1', 'angle', '(-z_axis_for_point_1)'], {}), '(radius_for_point_1, angle, -z_axis_for_point_1)\n', (2124, 2172), False, 'from geometry import Geometry\n'), ((2214, 2299), 'geometry.Geometry.from_polar', 'Geometry.from_polar', (['radius_for_point_1', '(angle - angle_step)', '(-z_axis_for_point_1)'], {}), '(radius_for_point_1, angle - angle_step, -z_axis_for_point_1\n )\n', (2233, 2299), False, 'from geometry import Geometry\n'), ((2426, 2493), 'geometry.Geometry.from_polar', 'Geometry.from_polar', (['radius_for_point_2', 'angle', '(-z_axis_for_point_2)'], {}), '(radius_for_point_2, angle, -z_axis_for_point_2)\n', (2445, 2493), False, 'from geometry import Geometry\n'), ((2535, 2620), 'geometry.Geometry.from_polar', 'Geometry.from_polar', (['radius_for_point_2', '(angle - angle_step)', '(-z_axis_for_point_2)'], {}), '(radius_for_point_2, angle - angle_step, -z_axis_for_point_2\n )\n', (2554, 2620), False, 'from geometry import Geometry\n'), ((5607, 5628), 'math.pow', 'math.pow', (['normal_x', '(2)'], {}), '(normal_x, 2)\n', (5615, 5628), False, 'import math\n'), ((5631, 5652), 'math.pow', 'math.pow', (['normal_y', '(2)'], {}), '(normal_y, 2)\n', (5639, 5652), False, 'import math\n'), ((5827, 5852), 'math.pow', 'math.pow', (['self.light_x', '(2)'], {}), '(self.light_x, 2)\n', (5835, 5852), False, 'import math\n'), ((5855, 5880), 'math.pow', 'math.pow', (['self.light_y', '(2)'], {}), '(self.light_y, 2)\n', (5863, 5880), False, 'import math\n'), ((903, 967), 'math.pow', 'math.pow', (['((circle_count - (circle_number + 1)) / circle_count)', '(2)'], {}), '((circle_count - (circle_number + 1)) / circle_count, 2)\n', (911, 967), False, 'import math\n'), ((1120, 1178), 'math.pow', 'math.pow', (['((circle_count - circle_number) / circle_count)', '(2)'], {}), '((circle_count - circle_number) / circle_count, 2)\n', (1128, 1178), False, 'import math\n')] |
# coding: utf-8
"""
StatisticsVolumeCorrection - Clinica Utilities.
"""
def peak_correction(t_map, t_threshold, output_name=None):
"""
Threshold the t_map with t_threshold. Pixel intensities that are less than t_threshold are set to 0, other values
are left unchanged.
Args:
t_map: (str) path to t-statistics nifti map
t_threshold: (float) threshold on t value
output_name: (str) optional output name
Returns:
path to the generated file.
"""
import nibabel as nib
from os.path import join, basename, abspath
original_nifti = nib.load(t_map)
data = original_nifti.get_data()
data[data < t_threshold] = 0
new_data = nib.Nifti1Image(data, affine=original_nifti.affine, header=original_nifti.header)
if output_name:
filename = output_name
else:
filename = join('./peak_corrected_' + str(t_threshold) + basename(t_map))
nib.save(new_data, filename)
return abspath(filename)
def cluster_correction(t_map, t_thresh, c_thresh, output_name=None):
"""
Performs cluster correction. First t_map is thresholded with t_thresh (like in peak_correction()). Then, clusters
that have a size less than c_thresh are removed
Args:
t_map: (str) path to t-statistics nifti map
t_thresh: (float) threshold on t value
c_thresh: (int) minimal size of clusters after thresholding
output_name: (str) optional output name
Returns:
path to the generated file.
"""
import nibabel as nib
from os.path import join, basename, abspath
import numpy as np
from scipy.ndimage.measurements import label
original_nifti = nib.load(t_map)
data = original_nifti.get_data()
data[data < t_thresh] = 0
labeled_mask, num_features = label(data)
for i in range(1, num_features + 1):
if np.sum(labeled_mask == i) < c_thresh:
print('Label number ' + str(i) + ' cluster size is: ' + str(np.sum(labeled_mask == i)) + ' so it is removed')
data[labeled_mask == i] = 0
new_data = nib.Nifti1Image(data, affine=original_nifti.affine, header=original_nifti.header)
if output_name:
filename = output_name
else:
filename = join('./cluster_corrected_t-' + str(t_thresh) + '_c-' + str(c_thresh) + basename(t_map))
nib.save(new_data, filename)
return abspath(filename)
def produce_figures(nii_file, template, type_of_correction, t_thresh, c_thresh, n_cuts):
"""
Produce the output figures
Args:
nii_file: (str) path to the nifti file (generated at previous steps)
template: (str) path to template used for the stat map plot
type_of_correction: (str) Can be either FWE or FDR (used only in potential figure titles)
t_thresh: (str) t value threshold used (used only in potential figure titles)
c_thresh: (int) cluster minimal size used (used only in potential figure titles)
n_cuts: (int) number of cuts in fig
Returns:
List of path to image files: glass brain, statmap along x, statmap along y, statmap along z
"""
from nilearn import plotting
import numpy as np
from os.path import abspath
assert type_of_correction in ['FWE', 'FDR'], 'Type of correction must be FWE or FDR'
if not np.isnan(c_thresh):
correction = 'Cluster'
else:
correction = 'Peak'
my_title = correction + ' correction ' + type_of_correction + ' Threshold = ' + str(t_thresh)
if not np.isnan(c_thresh):
my_title = my_title + ' - min cluster size = ' + str(c_thresh),
plotting.plot_glass_brain(nii_file,
output_file='./glass_brain.png')
plotting.plot_stat_map(nii_file,
display_mode='x',
cut_coords=np.linspace(-70, 67, n_cuts),
bg_img=template,
colorbar=False,
draw_cross=True,
output_file='./statmap_x.png')
plotting.plot_stat_map(nii_file,
display_mode='y',
cut_coords=np.linspace(-104, 69, n_cuts),
bg_img=template,
colorbar=False,
draw_cross=True,
output_file='./statmap_y.png')
plotting.plot_stat_map(nii_file,
display_mode='z',
cut_coords=np.linspace(-45, 78, n_cuts),
bg_img=template,
colorbar=False,
draw_cross=True,
output_file='./statmap_z.png')
return [abspath('./glass_brain.png'),
abspath('./statmap_x.png'),
abspath('./statmap_y.png'),
abspath('./statmap_z.png')]
def generate_output(t_map, figs, name):
"""
Produce output
Args:
t_map: (str) path to t-map on which whole pipeline was based
figs: (list of str) paths to figs to save
name: (str) name of the correction (ex: cluster_correction_FWE)
Returns:
Nothing
"""
from os import makedirs
from os.path import join, dirname, basename, splitext
from shutil import copyfile
# Will extract group-GroupTest_AD-lt-CN_measure-fdg_fwhm-8_TStatistics from TStatistics file
t_map_basename = splitext(basename(t_map))[0]
out_folder = join(dirname(t_map), t_map_basename.replace('TStatistics', name))
makedirs(out_folder)
copyfile(figs[0], join(out_folder, t_map_basename.replace('TStatistics', 'desc-' + name + '_GlassBrain.png')))
copyfile(figs[1], join(out_folder, t_map_basename.replace('TStatistics', 'desc-' + name + '_axis-x_TStatistics.png')))
copyfile(figs[2], join(out_folder, t_map_basename.replace('TStatistics', 'desc-' + name + '_axis-y_TStatistics.png')))
copyfile(figs[3], join(out_folder, t_map_basename.replace('TStatistics', 'desc-' + name + '_axis-z_TStatistics.png')))
| [
"nibabel.save",
"os.makedirs",
"nibabel.load",
"nilearn.plotting.plot_glass_brain",
"scipy.ndimage.measurements.label",
"os.path.dirname",
"numpy.sum",
"numpy.linspace",
"numpy.isnan",
"os.path.basename",
"nibabel.Nifti1Image",
"os.path.abspath"
] | [((599, 614), 'nibabel.load', 'nib.load', (['t_map'], {}), '(t_map)\n', (607, 614), True, 'import nibabel as nib\n'), ((700, 786), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['data'], {'affine': 'original_nifti.affine', 'header': 'original_nifti.header'}), '(data, affine=original_nifti.affine, header=original_nifti.\n header)\n', (715, 786), True, 'import nibabel as nib\n'), ((929, 957), 'nibabel.save', 'nib.save', (['new_data', 'filename'], {}), '(new_data, filename)\n', (937, 957), True, 'import nibabel as nib\n'), ((969, 986), 'os.path.abspath', 'abspath', (['filename'], {}), '(filename)\n', (976, 986), False, 'from os.path import abspath\n'), ((1687, 1702), 'nibabel.load', 'nib.load', (['t_map'], {}), '(t_map)\n', (1695, 1702), True, 'import nibabel as nib\n'), ((1803, 1814), 'scipy.ndimage.measurements.label', 'label', (['data'], {}), '(data)\n', (1808, 1814), False, 'from scipy.ndimage.measurements import label\n'), ((2082, 2168), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['data'], {'affine': 'original_nifti.affine', 'header': 'original_nifti.header'}), '(data, affine=original_nifti.affine, header=original_nifti.\n header)\n', (2097, 2168), True, 'import nibabel as nib\n'), ((2337, 2365), 'nibabel.save', 'nib.save', (['new_data', 'filename'], {}), '(new_data, filename)\n', (2345, 2365), True, 'import nibabel as nib\n'), ((2377, 2394), 'os.path.abspath', 'abspath', (['filename'], {}), '(filename)\n', (2384, 2394), False, 'from os.path import abspath\n'), ((3605, 3673), 'nilearn.plotting.plot_glass_brain', 'plotting.plot_glass_brain', (['nii_file'], {'output_file': '"""./glass_brain.png"""'}), "(nii_file, output_file='./glass_brain.png')\n", (3630, 3673), False, 'from nilearn import plotting\n'), ((5554, 5574), 'os.makedirs', 'makedirs', (['out_folder'], {}), '(out_folder)\n', (5562, 5574), False, 'from os import makedirs\n'), ((3309, 3327), 'numpy.isnan', 'np.isnan', (['c_thresh'], {}), '(c_thresh)\n', (3317, 3327), True, 'import numpy as np\n'), ((3508, 3526), 'numpy.isnan', 'np.isnan', (['c_thresh'], {}), '(c_thresh)\n', (3516, 3526), True, 'import numpy as np\n'), ((4738, 4766), 'os.path.abspath', 'abspath', (['"""./glass_brain.png"""'], {}), "('./glass_brain.png')\n", (4745, 4766), False, 'from os.path import abspath\n'), ((4780, 4806), 'os.path.abspath', 'abspath', (['"""./statmap_x.png"""'], {}), "('./statmap_x.png')\n", (4787, 4806), False, 'from os.path import abspath\n'), ((4820, 4846), 'os.path.abspath', 'abspath', (['"""./statmap_y.png"""'], {}), "('./statmap_y.png')\n", (4827, 4846), False, 'from os.path import abspath\n'), ((4860, 4886), 'os.path.abspath', 'abspath', (['"""./statmap_z.png"""'], {}), "('./statmap_z.png')\n", (4867, 4886), False, 'from os.path import abspath\n'), ((5489, 5503), 'os.path.dirname', 'dirname', (['t_map'], {}), '(t_map)\n', (5496, 5503), False, 'from os.path import join, dirname, basename, splitext\n'), ((1867, 1892), 'numpy.sum', 'np.sum', (['(labeled_mask == i)'], {}), '(labeled_mask == i)\n', (1873, 1892), True, 'import numpy as np\n'), ((3825, 3853), 'numpy.linspace', 'np.linspace', (['(-70)', '(67)', 'n_cuts'], {}), '(-70, 67, n_cuts)\n', (3836, 3853), True, 'import numpy as np\n'), ((4165, 4194), 'numpy.linspace', 'np.linspace', (['(-104)', '(69)', 'n_cuts'], {}), '(-104, 69, n_cuts)\n', (4176, 4194), True, 'import numpy as np\n'), ((4506, 4534), 'numpy.linspace', 'np.linspace', (['(-45)', '(78)', 'n_cuts'], {}), '(-45, 78, n_cuts)\n', (4517, 4534), True, 'import numpy as np\n'), ((5446, 5461), 'os.path.basename', 'basename', (['t_map'], {}), '(t_map)\n', (5454, 5461), False, 'from os.path import join, dirname, basename, splitext\n'), ((908, 923), 'os.path.basename', 'basename', (['t_map'], {}), '(t_map)\n', (916, 923), False, 'from os.path import join, dirname, basename, splitext\n'), ((2316, 2331), 'os.path.basename', 'basename', (['t_map'], {}), '(t_map)\n', (2324, 2331), False, 'from os.path import join, dirname, basename, splitext\n'), ((1977, 2002), 'numpy.sum', 'np.sum', (['(labeled_mask == i)'], {}), '(labeled_mask == i)\n', (1983, 2002), True, 'import numpy as np\n')] |
"""
conduct stereo matching based on trained model + a series of post-processing
"""
import os
import util
import time
import cv2
import numpy as np
import tensorflow as tf
import argparse
from datetime import datetime
from tqdm import tqdm
from process_functional import *
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="stereo matching based on trained model and post-processing")
parser.add_argument("-g", "--gpu", type=str, default="0", help="gpu id to use, \
multiple ids should be separated by commons(e.g. 0,1,2,3)")
parser.add_argument("-ps", "--patch_size", type=int, default=11, help="length for height/width of square patch")
parser.add_argument("--list_file", type=str, required=True, help="path to file containing left image list")
parser.add_argument("--resume", type=str, default=None, help="path to checkpoint to resume from. \
if None(default), model is initialized using default methods")
parser.add_argument("--data_dir", type=str, required=True, help="path to root dir to data.")
parser.add_argument("--save_dir", type=str, required=True, help="path to root dir to save results")
parser.add_argument("-t", "--tag", type=str, required=True, help="tag used to indicate one run")
parser.add_argument("-s", "--start", type=int, required=True, help="index of first image to do matching,\
this is used for parallel matching of different images")
parser.add_argument("-e", "--end", type=int, required=True, help="index of last image to do matching")
# hyperparemeters, use suggested value from origin paper as default
parser.add_argument("--cbca_intensity", type=float, default=0.02, help="intensity threshold for cross-based cost aggregation")
parser.add_argument("--cbca_distance", type=float, default=14, help="distance threshold for cross-based cost aggregation")
parser.add_argument("--cbca_num_iterations1", type=float, default=2, help="distance threshold for cross-based cost aggregation")
parser.add_argument("--cbca_num_iterations2", type=float, default=16, help="distance threshold for cross-based cost aggregation")
parser.add_argument("--sgm_P1", type=float, default=2.3, help="hyperparemeter used in semi-global matching")
parser.add_argument("--sgm_P2", type=float, default=55.9, help="hyperparemeter used in semi-global matching")
parser.add_argument("--sgm_Q1", type=float, default=4, help="hyperparemeter used in semi-global matching")
parser.add_argument("--sgm_Q2", type=float, default=8, help="hyperparemeter used in semi-global matching")
parser.add_argument("--sgm_D", type=float, default=0.08, help="hyperparemeter used in semi-global matching")
parser.add_argument("--sgm_V", type=float, default=1.5, help="hyperparemeter used in semi-global matching")
parser.add_argument("--blur_sigma", type=float, default=6, help="hyperparemeter used in bilateral filter")
parser.add_argument("--blur_threshold", type=float, default=2, help="hyperparemeter used in bilateral filter")
# different file names
left_image_suffix = "im0.png"
left_gt_suffix = "disp0GT.pfm"
right_image_suffix = "im1.png"
right_gt_suffix = "disp1GT.pfm"
calib_suffix = "calib.txt"
out_file = "disp0MCCNN.pfm"
out_img_file = "disp0MCCNN.pgm"
out_time_file = "timeMCCNN.txt"
def main():
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
patch_height = args.patch_size
patch_width = args.patch_size
######################
left_image_list = args.list_file
save_dir = args.save_dir
data_dir = args.data_dir
save_res_dir = os.path.join(save_dir, "submit_{}".format(args.tag))
save_img_dir = os.path.join(save_dir, "submit_{}_imgs".format(args.tag))
util.testMk(save_res_dir)
util.testMk(save_img_dir)
index = 0
start = args.start
end = args.end
with open(left_image_list, "r") as i:
img_paths = i.readlines()
####################
# do matching
for left_path in tqdm(img_paths):
print("index: ".format(index))
if index < start:
index += 1
print("passed")
continue
if index > end:
break
index += 1
# get data path
left_path = left_path.strip()
right_path = left_path.replace(left_image_suffix, right_image_suffix)
calib_path = left_path.replace(left_image_suffix, calib_suffix)
# generate output path
res_dir = left_path.replace(data_dir, save_res_dir)
img_dir = left_path.replace(data_dir, save_img_dir)
res_dir = res_dir[:res_dir.rfind(left_image_suffix)-1]
img_dir = img_dir[:img_dir.rfind(left_image_suffix)-1]
util.recurMk(res_dir)
util.recurMk(img_dir)
out_path = os.path.join(res_dir, out_file)
out_time_path = os.path.join(res_dir, out_time_file)
out_img_path = os.path.join(img_dir, out_img_file)
height, width, ndisp = util.parseCalib(calib_path)
print("left_image: {}\nright_image: {}".format(left_path, right_path))
print("height: {}, width: {}, ndisp: {}".format(height, width, ndisp))
print("out_path: {}\nout_time_path: {}\nout_img_path: {}".format(out_path, out_time_path, out_img_path))
# reading images
left_image = cv2.imread(left_path, cv2.IMREAD_GRAYSCALE).astype(np.float32)
right_image = cv2.imread(right_path, cv2.IMREAD_GRAYSCALE).astype(np.float32)
left_image = (left_image - np.mean(left_image, axis=(0, 1))) / np.std(left_image, axis=(0, 1))
right_image = (right_image - np.mean(right_image, axis=(0, 1))) / np.std(right_image, axis=(0, 1))
left_image = np.expand_dims(left_image, axis=2)
right_image = np.expand_dims(right_image, axis=2)
assert left_image.shape == (height, width, 1)
assert right_image.shape == (height, width, 1)
print("{}: images read".format(datetime.now()))
# start timer for time file
stTime = time.time()
# compute features
left_feature, right_feature = compute_features(left_image, right_image, patch_height, patch_width, args.resume)
print(left_feature.shape)
print("{}: features computed".format(datetime.now()))
# form cost-volume
left_cost_volume, right_cost_volume = compute_cost_volume(left_feature, right_feature, ndisp)
print("{}: cost-volume computed".format(datetime.now()))
# cost-volume aggregation
print("{}: begin cost-volume aggregation. This could take long".format(datetime.now()))
left_cost_volume, right_cost_volume = cost_volume_aggregation(left_image, right_image,left_cost_volume,right_cost_volume,\
args.cbca_intensity, args.cbca_distance, args.cbca_num_iterations1)
print("{}: cost-volume aggregated".format(datetime.now()))
# semi-global matching
print("{}: begin semi-global matching. This could take long".format(datetime.now()))
left_cost_volume, right_cost_volume = SGM_average(left_cost_volume, right_cost_volume, left_image, right_image, \
args.sgm_P1, args.sgm_P2, args.sgm_Q1, args.sgm_Q2, args.sgm_D, args.sgm_V)
print("{}: semi-global matched".format(datetime.now()))
# cost-volume aggregation afterhand
print("{}: begin cost-volume aggregation. This could take long".format(datetime.now()))
left_cost_volume, right_cost_volume = cost_volume_aggregation(left_image, right_image,left_cost_volume,right_cost_volume,\
args.cbca_intensity, args.cbca_distance, args.cbca_num_iterations2)
print("{}: cost-volume aggregated".format(datetime.now()))
# disparity map making
left_disparity_map, right_disparity_map = disparity_prediction(left_cost_volume, right_cost_volume)
print("{}: disparity predicted".format(datetime.now()))
# interpolation
left_disparity_map = interpolation(left_disparity_map, right_disparity_map, ndisp)
print("{}: disparity interpolated".format(datetime.now()))
# subpixel enhancement
left_disparity_map = subpixel_enhance(left_disparity_map, left_cost_volume)
print("{}: subpixel enhanced".format(datetime.now()))
# refinement
# 5*5 median filter
left_disparity_map = median_filter(left_disparity_map, 5, 5)
# bilateral filter
left_disparity_map = bilateral_filter(left_image, left_disparity_map, 5, 5, 0, args.blur_sigma, args.blur_threshold)
print("{}: refined".format(datetime.now()))
# end timer
endTime = time.time()
# save as pgm and pfm
util.saveDisparity(left_disparity_map, out_img_path)
util.writePfm(left_disparity_map, out_path)
util.saveTimeFile(endTime-stTime, out_time_path)
print("{}: saved".format(datetime.now()))
if __name__ == "__main__":
main()
| [
"numpy.mean",
"argparse.ArgumentParser",
"util.saveDisparity",
"tqdm.tqdm",
"os.path.join",
"util.writePfm",
"util.saveTimeFile",
"datetime.datetime.now",
"numpy.expand_dims",
"numpy.std",
"util.parseCalib",
"time.time",
"util.recurMk",
"util.testMk",
"cv2.imread"
] | [((288, 451), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""stereo matching based on trained model and post-processing"""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n 'stereo matching based on trained model and post-processing')\n", (311, 451), False, 'import argparse\n'), ((3820, 3845), 'util.testMk', 'util.testMk', (['save_res_dir'], {}), '(save_res_dir)\n', (3831, 3845), False, 'import util\n'), ((3850, 3875), 'util.testMk', 'util.testMk', (['save_img_dir'], {}), '(save_img_dir)\n', (3861, 3875), False, 'import util\n'), ((4075, 4090), 'tqdm.tqdm', 'tqdm', (['img_paths'], {}), '(img_paths)\n', (4079, 4090), False, 'from tqdm import tqdm\n'), ((4799, 4820), 'util.recurMk', 'util.recurMk', (['res_dir'], {}), '(res_dir)\n', (4811, 4820), False, 'import util\n'), ((4829, 4850), 'util.recurMk', 'util.recurMk', (['img_dir'], {}), '(img_dir)\n', (4841, 4850), False, 'import util\n'), ((4871, 4902), 'os.path.join', 'os.path.join', (['res_dir', 'out_file'], {}), '(res_dir, out_file)\n', (4883, 4902), False, 'import os\n'), ((4927, 4963), 'os.path.join', 'os.path.join', (['res_dir', 'out_time_file'], {}), '(res_dir, out_time_file)\n', (4939, 4963), False, 'import os\n'), ((4987, 5022), 'os.path.join', 'os.path.join', (['img_dir', 'out_img_file'], {}), '(img_dir, out_img_file)\n', (4999, 5022), False, 'import os\n'), ((5055, 5082), 'util.parseCalib', 'util.parseCalib', (['calib_path'], {}), '(calib_path)\n', (5070, 5082), False, 'import util\n'), ((5789, 5823), 'numpy.expand_dims', 'np.expand_dims', (['left_image'], {'axis': '(2)'}), '(left_image, axis=2)\n', (5803, 5823), True, 'import numpy as np\n'), ((5846, 5881), 'numpy.expand_dims', 'np.expand_dims', (['right_image'], {'axis': '(2)'}), '(right_image, axis=2)\n', (5860, 5881), True, 'import numpy as np\n'), ((6101, 6112), 'time.time', 'time.time', ([], {}), '()\n', (6110, 6112), False, 'import time\n'), ((8854, 8865), 'time.time', 'time.time', ([], {}), '()\n', (8863, 8865), False, 'import time\n'), ((8905, 8957), 'util.saveDisparity', 'util.saveDisparity', (['left_disparity_map', 'out_img_path'], {}), '(left_disparity_map, out_img_path)\n', (8923, 8957), False, 'import util\n'), ((8966, 9009), 'util.writePfm', 'util.writePfm', (['left_disparity_map', 'out_path'], {}), '(left_disparity_map, out_path)\n', (8979, 9009), False, 'import util\n'), ((9018, 9068), 'util.saveTimeFile', 'util.saveTimeFile', (['(endTime - stTime)', 'out_time_path'], {}), '(endTime - stTime, out_time_path)\n', (9035, 9068), False, 'import util\n'), ((5629, 5660), 'numpy.std', 'np.std', (['left_image'], {'axis': '(0, 1)'}), '(left_image, axis=(0, 1))\n', (5635, 5660), True, 'import numpy as np\n'), ((5735, 5767), 'numpy.std', 'np.std', (['right_image'], {'axis': '(0, 1)'}), '(right_image, axis=(0, 1))\n', (5741, 5767), True, 'import numpy as np\n'), ((5409, 5452), 'cv2.imread', 'cv2.imread', (['left_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(left_path, cv2.IMREAD_GRAYSCALE)\n', (5419, 5452), False, 'import cv2\n'), ((5494, 5538), 'cv2.imread', 'cv2.imread', (['right_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(right_path, cv2.IMREAD_GRAYSCALE)\n', (5504, 5538), False, 'import cv2\n'), ((5593, 5625), 'numpy.mean', 'np.mean', (['left_image'], {'axis': '(0, 1)'}), '(left_image, axis=(0, 1))\n', (5600, 5625), True, 'import numpy as np\n'), ((5698, 5731), 'numpy.mean', 'np.mean', (['right_image'], {'axis': '(0, 1)'}), '(right_image, axis=(0, 1))\n', (5705, 5731), True, 'import numpy as np\n'), ((6030, 6044), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6042, 6044), False, 'from datetime import datetime\n'), ((6340, 6354), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6352, 6354), False, 'from datetime import datetime\n'), ((6535, 6549), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6547, 6549), False, 'from datetime import datetime\n'), ((6666, 6680), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6678, 6680), False, 'from datetime import datetime\n'), ((6996, 7010), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7008, 7010), False, 'from datetime import datetime\n'), ((7121, 7135), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7133, 7135), False, 'from datetime import datetime\n'), ((7436, 7450), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7448, 7450), False, 'from datetime import datetime\n'), ((7577, 7591), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7589, 7591), False, 'from datetime import datetime\n'), ((7907, 7921), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7919, 7921), False, 'from datetime import datetime\n'), ((8112, 8126), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8124, 8126), False, 'from datetime import datetime\n'), ((8295, 8309), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8307, 8309), False, 'from datetime import datetime\n'), ((8473, 8487), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8485, 8487), False, 'from datetime import datetime\n'), ((8798, 8812), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8810, 8812), False, 'from datetime import datetime\n'), ((9100, 9114), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9112, 9114), False, 'from datetime import datetime\n')] |
"""
Merge predictions
"""
from __future__ import division
from shapely.geometry import MultiPolygon
from pylab import *
import pandas as pd
rcParams['figure.figsize'] = 20, 20
def stretch_8bit(bands, lower_percent=2, higher_percent=98):
out = np.zeros_like(bands).astype(np.float32)
for i in range(3):
a = 0
b = 1
c = np.percentile(bands[:, :, i], lower_percent)
d = np.percentile(bands[:, :, i], higher_percent)
t = a + (bands[:, :, i] - c) * (b - a) / (d - c)
t[t < a] = a
t[t > b] = b
out[:, :, i] = t
return out.astype(np.float32)
import pandas as pd
import numpy as np
from shapely.wkt import loads as wkt_loads
from matplotlib.patches import Polygon, Patch
# decartes package makes plotting with holes much easier
from descartes.patch import PolygonPatch
import matplotlib.pyplot as plt
import tifffile as tiff
import pylab
# turn interactive mode on so that plots immediately
# See: http://stackoverflow.com/questions/2130913/no-plot-window-in-matplotlib
# pylab.ion()
inDir = '../data'
# Give short names, sensible colors and zorders to object types
CLASSES = {
1: 'Bldg',
2: 'Struct',
3: 'Road',
4: 'Track',
5: 'Trees',
6: 'Crops',
7: 'Fast H20',
8: 'Slow H20',
9: 'Truck',
10: 'Car',
}
COLORS = {
1: '0.7',
2: '0.4',
3: '#b35806',
4: '#dfc27d',
5: '#1b7837',
6: '#a6dba0',
7: '#74add1',
8: '#4575b4',
9: '#f46d43',
10: '#d73027',
}
ZORDER = {
1: 5,
2: 5,
3: 4,
4: 1,
5: 3,
6: 2,
7: 7,
8: 8,
9: 9,
10: 10,
}
# read the training data from train_wkt_v4.csv
df = pd.read_csv('cleaned_joined.csv')
# df = pd.read_csv('joined.csv')
# df = df[df['ImageId'].isin(['6140_4_0', '6030_2_3', '6170_2_1', '6160_0_4', '6160_2_4', '6170_4_1'])]
# df = pd.read_csv('../submissions/temp_sub.csv')
print(df.head())
# grid size will also be needed later..
gs = pd.read_csv(inDir + '/grid_sizes.csv', names=['ImageId', 'Xmax', 'Ymin'], skiprows=1)
print(gs.head())
# imageIds in a DataFrame
allImageIds = gs.ImageId.unique()
trainImageIds = df.ImageId.unique()
def get_image_names(imageId):
'''
Get the names of the tiff files
'''
d = {'3': '{}/three_band/{}.tif'.format(inDir, imageId),
'A': '{}/sixteen_band/{}_A.tif'.format(inDir, imageId),
'M': '{}/sixteen_band/{}_M.tif'.format(inDir, imageId),
'P': '{}/sixteen_band/{}_P.tif'.format(inDir, imageId),
}
return d
def get_images(imageId, img_key=None):
'''
Load images correspoding to imageId
Parameters
----------
imageId : str
imageId as used in grid_size.csv
img_key : {None, '3', 'A', 'M', 'P'}, optional
Specify this to load single image
None loads all images and returns in a dict
'3' loads image from three_band/
'A' loads '_A' image from sixteen_band/
'M' loads '_M' image from sixteen_band/
'P' loads '_P' image from sixteen_band/
Returns
-------
images : dict
A dict of image data from TIFF files as numpy array
'''
img_names = get_image_names(imageId)
images = dict()
if img_key is None:
for k in img_names.keys():
images[k] = tiff.imread(img_names[k])
else:
images[img_key] = tiff.imread(img_names[img_key])
return images
def get_size(imageId):
"""
Get the grid size of the image
Parameters
----------
imageId : str
imageId as used in grid_size.csv
"""
xmax, ymin = gs[gs.ImageId == imageId].iloc[0, 1:].astype(float)
W, H = get_images(imageId, '3')['3'].shape[1:]
return (xmax, ymin, W, H)
def is_training_image(imageId):
'''
Returns
-------
is_training_image : bool
True if imageId belongs to training data
'''
return any(trainImageIds == imageId)
def plot_polygons(fig, ax, polygonsList):
'''
Plot descrates.PolygonPatch from list of polygons objs for each CLASS
'''
legend_patches = []
for cType in polygonsList:
print('{} : {} \tcount = {}'.format(cType, CLASSES[cType], len(polygonsList[cType])))
legend_patches.append(Patch(color=COLORS[cType],
label='{} ({})'.format(CLASSES[cType], len(polygonsList[cType]))))
for polygon in polygonsList[cType]:
mpl_poly = PolygonPatch(polygon,
color=COLORS[cType],
lw=0,
alpha=0.7,
zorder=ZORDER[cType])
ax.add_patch(mpl_poly)
# ax.relim()
ax.autoscale_view()
ax.set_title('Objects')
ax.set_xticks([])
ax.set_yticks([])
return legend_patches
def stretch_n(bands, lower_percent=2, higher_percent=98):
out = np.zeros_like(bands).astype(np.float32)
n = bands.shape[0]
for i in range(n):
a = 0 # np.min(band)
b = 1 # np.max(band)
c = np.percentile(bands[i, :, :], lower_percent)
d = np.percentile(bands[i, :, :], higher_percent)
t = a + (bands[i, :, :] - c) * (b - a) / (d - c)
t[t < a] = a
t[t > b] = b
out[i, :, :] = t
return out
real_test_ids = ['6080_4_4', '6080_4_1', '6010_0_1', '6150_3_4', '6020_0_4', '6020_4_3',
'6150_4_3', '6070_3_4', '6020_1_3', '6060_1_4', '6050_4_4', '6110_2_3',
'6060_4_1', '6100_2_4', '6050_3_3', '6100_0_2', '6060_0_0', '6060_0_1',
'6060_0_3', '6060_2_0', '6120_1_4', '6160_1_4', '6120_3_3', '6140_2_3',
'6090_3_2', '6090_3_4', '6170_4_4', '6120_4_4', '6030_1_4', '6120_0_2',
'6030_1_2', '6160_0_0']
# real_test_ids = [
# '6080_4_4',
# # '6080_4_1', '6010_0_1',
# '6150_3_4',
# # '6020_0_4', '6020_4_3',
# # '6150_4_3', '6070_3_4', '6020_1_3', '6060_1_4', '6050_4_4', '6110_2_3',
# # '6060_4_1', '6100_2_4', '6050_3_3',
# '6100_0_2',
# # '6060_0_0', '6060_0_1',
# # '6060_0_3', '6060_2_0', '6120_1_4', '6160_1_4', '6120_3_3', '6140_2_3',
# # '6090_3_2', '6090_3_4', '6170_4_4', '6120_4_4', '6030_1_4', '6120_0_2',
# # '6030_1_2', '6160_0_0'
# ]
def plot_image(fig, ax, imageId, img_key, selected_channels=None):
'''
Plot get_images(imageId)[image_key] on axis/fig
Optional: select which channels of the image are used (used for sixteen_band/ images)
Parameters
----------
img_key : str, {'3', 'P', 'N', 'A'}
See get_images for description.
'''
images = get_images(imageId, img_key)
img = images[img_key]
title_suffix = ''
if selected_channels is not None:
img = img[selected_channels]
title_suffix = ' (' + ','.join([repr(i) for i in selected_channels]) + ')'
if len(img.shape) == 2:
new_img = np.zeros((3, img.shape[0], img.shape[1]))
new_img[0] = img
new_img[1] = img
new_img[2] = img
img = new_img
tiff.imshow(stretch_n(img), figure=fig, subplot=ax)
ax.set_title(imageId + ' - ' + img_key + title_suffix)
ax.set_xlabel(img.shape[-2])
ax.set_ylabel(img.shape[-1])
ax.set_xticks([])
ax.set_yticks([])
def visualize_image(imageId, plot_all=True):
'''
Plot all images and object-polygons
Parameters
----------
imageId : str
imageId as used in grid_size.csv
plot_all : bool, True by default
If True, plots all images (from three_band/ and sixteen_band/) as subplots.
Otherwise, only plots Polygons.
'''
df_image = df[df.ImageId == imageId]
xmax, ymin, W, H = get_size(imageId)
if plot_all:
fig, axArr = plt.subplots(figsize=(60, 30), ncols=2)
ax = axArr[0]
else:
fig, axArr = plt.subplots(figsize=(20, 20))
ax = axArr
if is_training_image(imageId):
print('ImageId : {}'.format(imageId))
polygonsList = {}
for cType in CLASSES.keys():
all_polygons = wkt_loads(df_image[df_image.ClassType == cType].MultipolygonWKT.values[0])
if all_polygons.type == 'Polygon':
all_polygons = MultiPolygon([all_polygons])
polygonsList[cType] = all_polygons
legend_patches = plot_polygons(fig, ax, polygonsList)
ax.set_xlim(0, xmax)
ax.set_ylim(ymin, 0)
ax.set_xlabel(xmax)
ax.set_ylabel(ymin)
if plot_all:
plot_image(fig, axArr[1], imageId, '3')
# plot_image(fig, axArr[0][2], imageId, 'P')
# plot_image(fig, axArr[1][0], imageId, 'A', [0, 3, 6])
# plot_image(fig, axArr[1][1], imageId, 'A', [1, 4, 7])
# plot_image(fig, axArr[1][2], imageId, 'A', [2, 5, 0])
# plot_image(fig, axArr[2][0], imageId, 'M', [0, 3, 6])
# plot_image(fig, axArr[2][1], imageId, 'M', [1, 4, 7])
# plot_image(fig, axArr[2][2], imageId, 'M', [2, 5, 0])
if is_training_image(imageId):
ax.legend(handles=legend_patches,
# loc='upper center',
bbox_to_anchor=(0.9, 1),
bbox_transform=plt.gcf().transFigure,
ncol=5,
fontsize='xx-large',
title='Objects-' + imageId,
# mode="expand",
framealpha=0.3)
return (fig, axArr, ax)
# Loop over few training images and save to files
for imageId in real_test_ids:
fig, axArr, ax = visualize_image(imageId, plot_all=True)
plt.tight_layout()
plt.savefig('predictions/Objects--' + imageId + '.png')
plt.clf()
| [
"matplotlib.pyplot.savefig",
"tifffile.imread",
"pandas.read_csv",
"shapely.wkt.loads",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.clf",
"descartes.patch.PolygonPatch",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
"numpy.percentile",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"sh... | [((1682, 1715), 'pandas.read_csv', 'pd.read_csv', (['"""cleaned_joined.csv"""'], {}), "('cleaned_joined.csv')\n", (1693, 1715), True, 'import pandas as pd\n'), ((1966, 2055), 'pandas.read_csv', 'pd.read_csv', (["(inDir + '/grid_sizes.csv')"], {'names': "['ImageId', 'Xmax', 'Ymin']", 'skiprows': '(1)'}), "(inDir + '/grid_sizes.csv', names=['ImageId', 'Xmax', 'Ymin'],\n skiprows=1)\n", (1977, 2055), True, 'import pandas as pd\n'), ((9623, 9641), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9639, 9641), True, 'import matplotlib.pyplot as plt\n'), ((9646, 9701), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('predictions/Objects--' + imageId + '.png')"], {}), "('predictions/Objects--' + imageId + '.png')\n", (9657, 9701), True, 'import matplotlib.pyplot as plt\n'), ((9706, 9715), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9713, 9715), True, 'import matplotlib.pyplot as plt\n'), ((356, 400), 'numpy.percentile', 'np.percentile', (['bands[:, :, i]', 'lower_percent'], {}), '(bands[:, :, i], lower_percent)\n', (369, 400), True, 'import numpy as np\n'), ((413, 458), 'numpy.percentile', 'np.percentile', (['bands[:, :, i]', 'higher_percent'], {}), '(bands[:, :, i], higher_percent)\n', (426, 458), True, 'import numpy as np\n'), ((3356, 3387), 'tifffile.imread', 'tiff.imread', (['img_names[img_key]'], {}), '(img_names[img_key])\n', (3367, 3387), True, 'import tifffile as tiff\n'), ((5053, 5097), 'numpy.percentile', 'np.percentile', (['bands[i, :, :]', 'lower_percent'], {}), '(bands[i, :, :], lower_percent)\n', (5066, 5097), True, 'import numpy as np\n'), ((5110, 5155), 'numpy.percentile', 'np.percentile', (['bands[i, :, :]', 'higher_percent'], {}), '(bands[i, :, :], higher_percent)\n', (5123, 5155), True, 'import numpy as np\n'), ((6970, 7011), 'numpy.zeros', 'np.zeros', (['(3, img.shape[0], img.shape[1])'], {}), '((3, img.shape[0], img.shape[1]))\n', (6978, 7011), True, 'import numpy as np\n'), ((7810, 7849), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(60, 30)', 'ncols': '(2)'}), '(figsize=(60, 30), ncols=2)\n', (7822, 7849), True, 'import matplotlib.pyplot as plt\n'), ((7903, 7933), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (7915, 7933), True, 'import matplotlib.pyplot as plt\n'), ((253, 273), 'numpy.zeros_like', 'np.zeros_like', (['bands'], {}), '(bands)\n', (266, 273), True, 'import numpy as np\n'), ((3294, 3319), 'tifffile.imread', 'tiff.imread', (['img_names[k]'], {}), '(img_names[k])\n', (3305, 3319), True, 'import tifffile as tiff\n'), ((4425, 4511), 'descartes.patch.PolygonPatch', 'PolygonPatch', (['polygon'], {'color': 'COLORS[cType]', 'lw': '(0)', 'alpha': '(0.7)', 'zorder': 'ZORDER[cType]'}), '(polygon, color=COLORS[cType], lw=0, alpha=0.7, zorder=ZORDER[\n cType])\n', (4437, 4511), False, 'from descartes.patch import PolygonPatch\n'), ((4895, 4915), 'numpy.zeros_like', 'np.zeros_like', (['bands'], {}), '(bands)\n', (4908, 4915), True, 'import numpy as np\n'), ((8124, 8198), 'shapely.wkt.loads', 'wkt_loads', (['df_image[df_image.ClassType == cType].MultipolygonWKT.values[0]'], {}), '(df_image[df_image.ClassType == cType].MultipolygonWKT.values[0])\n', (8133, 8198), True, 'from shapely.wkt import loads as wkt_loads\n'), ((8277, 8305), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (['[all_polygons]'], {}), '([all_polygons])\n', (8289, 8305), False, 'from shapely.geometry import MultiPolygon\n'), ((9245, 9254), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9252, 9254), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
#-*- encoding:utf-8 -*-
"""
Demonstrate how to create an interactive histogram, in which bars
are hidden or shown by cliking on legend markers.
The interactivity is encoded in ecmascript and inserted in the SVG code
in a post-processing step. To render the image, open it in a web
browser. SVG is supported in most web browsers used by Linux and OSX
users. Windows IE9 supports SVG, but earlier versions do not.
__author__="<EMAIL>"
"""
import numpy as np
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
from StringIO import StringIO
plt.rcParams['svg.embed_char_paths'] = 'none'
# Apparently, this `register_namespace` method works only with
# python 2.7 and up and is necessary to avoid garbling the XML name
# space with ns0.
ET.register_namespace("","http://www.w3.org/2000/svg")
def python2js(d):
"""Return a string representation of a python dictionary in
ecmascript object syntax."""
objs = []
for key, value in d.items():
objs.append( key + ':' + str(value) )
return '{' + ', '.join(objs) + '}'
# --- Create histogram, legend and title ---
plt.figure()
r = np.random.randn(100)
r1 = r + 1
labels = ['Rabbits', 'Frogs']
H = plt.hist([r,r1], label=labels)
containers = H[-1]
leg = plt.legend(frameon=False)
plt.title("""From a web browser, click on the legend
marker to toggle the corresponding histogram.""")
# --- Add ids to the svg objects we'll modify
hist_patches = {}
for ic, c in enumerate(containers):
hist_patches['hist_%d'%ic] = []
for il, element in enumerate(c):
element.set_gid('hist_%d_patch_%d'%(ic, il))
hist_patches['hist_%d'%ic].append('hist_%d_patch_%d'%(ic,il))
# Set ids for the legend patches
for i, t in enumerate(leg.get_patches()):
t.set_gid('leg_patch_%d'%i)
# Save SVG in a fake file object.
f = StringIO()
plt.savefig(f, format="svg")
# Create XML tree from the SVG file.
tree, xmlid = ET.XMLID(f.getvalue())
# --- Add interactivity ---
# Add attributes to the patch objects.
for i, t in enumerate(leg.get_patches()):
el = xmlid['leg_patch_%d'%i]
el.set('cursor', 'pointer')
el.set('opacity', '1.0')
el.set('onclick', "toggle_element(evt, 'hist_%d')"%i)
# Create script defining the function `toggle_element`.
# We create a global variable `container` that stores the patches id
# belonging to each histogram. Then a function "toggle_element" sets the
# visibility attribute of all patches of each histogram and the opacity
# of the marker itself.
script = """
<script type="text/ecmascript">
<![CDATA[
var container = %s
function toggle_element(evt, element) {
var names = container[element]
var el, state;
state = evt.target.getAttribute("opacity") == 1.0 ||
evt.target.getAttribute("opacity") == null;
if (state) {
evt.target.setAttribute("opacity", 0.5);
for (var i=0; i < names.length; i++) {
el = document.getElementById(names[i]);
el.setAttribute("visibility","hidden");
}
}
else {
evt.target.setAttribute("opacity", 1);
for (var i=0; i < names.length; i++) {
el = document.getElementById(names[i]);
el.setAttribute("visibility","visible");
}
};
};
]]>
</script>
"""%python2js(hist_patches)
# Insert the script and save to file.
tree.insert(0, ET.XML(script))
ET.ElementTree(tree).write("svg_histogram.svg")
| [
"StringIO.StringIO",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.savefig",
"xml.etree.ElementTree.XML",
"xml.etree.ElementTree.ElementTree",
"matplotlib.pyplot.figure",
"xml.etree.ElementTree.register_namespace",
"matplotlib.pyplot.title",
"numpy.random.randn",
"matplotlib.pyplot.legend"
] | [((781, 836), 'xml.etree.ElementTree.register_namespace', 'ET.register_namespace', (['""""""', '"""http://www.w3.org/2000/svg"""'], {}), "('', 'http://www.w3.org/2000/svg')\n", (802, 836), True, 'import xml.etree.ElementTree as ET\n'), ((1151, 1163), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1161, 1163), True, 'import matplotlib.pyplot as plt\n'), ((1168, 1188), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (1183, 1188), True, 'import numpy as np\n'), ((1234, 1265), 'matplotlib.pyplot.hist', 'plt.hist', (['[r, r1]'], {'label': 'labels'}), '([r, r1], label=labels)\n', (1242, 1265), True, 'import matplotlib.pyplot as plt\n'), ((1290, 1315), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)'}), '(frameon=False)\n', (1300, 1315), True, 'import matplotlib.pyplot as plt\n'), ((1316, 1429), 'matplotlib.pyplot.title', 'plt.title', (['"""From a web browser, click on the legend \nmarker to toggle the corresponding histogram."""'], {}), '(\n """From a web browser, click on the legend \nmarker to toggle the corresponding histogram."""\n )\n', (1325, 1429), True, 'import matplotlib.pyplot as plt\n'), ((1879, 1889), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (1887, 1889), False, 'from StringIO import StringIO\n'), ((1890, 1918), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f'], {'format': '"""svg"""'}), "(f, format='svg')\n", (1901, 1918), True, 'import matplotlib.pyplot as plt\n'), ((3500, 3514), 'xml.etree.ElementTree.XML', 'ET.XML', (['script'], {}), '(script)\n', (3506, 3514), True, 'import xml.etree.ElementTree as ET\n'), ((3517, 3537), 'xml.etree.ElementTree.ElementTree', 'ET.ElementTree', (['tree'], {}), '(tree)\n', (3531, 3537), True, 'import xml.etree.ElementTree as ET\n')] |
# ==============================================================================
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import pytest
import numpy as np
import cntk
from cntk.contrib.crosstalkcaffe import utils
from cntk.contrib.crosstalkcaffe.unimodel import cntkmodel
from cntk.contrib.crosstalkcaffe.unimodel.cntkinstance import ApiSetup
def _layer_eq(layer, inputs, expected_out):
out = layer(*inputs)
assert (np.squeeze(np.array(out)) == np.squeeze(np.array(expected_out))).all()
def _layer_lt(layer, inputs, expected_out, eps=0.0001):
out = layer(*inputs)
assert (np.squeeze(np.array(out)) - np.squeeze(np.array(expected_out)) < eps).all()
def _install_test_layer(op_type, parameters, weights, input_data):
para_cls_id = 'Cntk' + op_type + 'Parameters'
para_instance = eval('.'.join(('cntkmodel', para_cls_id)))()
for key, value in parameters.items():
setattr(para_instance, key, value)
layer_def = cntkmodel.CntkLayersDefinition()
layer_def.parameters = para_instance
layer_def.op_type = getattr(cntkmodel.CntkLayerType, utils.format.camel_to_snake(op_type))
layer_def.op_name = '_'.join(('test', op_type))
layer_def.parameter_tensor = []
if weights is not None:
for weight in weights:
weight_tensor = cntkmodel.CntkTensorDefinition()
weight_tensor.tensor = np.array(weight).shape
weight_tensor.data = weight
layer_def.parameter_tensor.append(weight_tensor)
inputs_variable = []
for input_tensor in input_data:
inputs_variable.append(cntk.input(input_tensor.shape))
return layer_def, inputs_variable
API_SETUP_CONV_DATA = [
# The test case of conv ops
(
'Convolution',
{
'output': 2,
'stride': [2, 2],
'kernel': [3, 3],
'auto_pad': False,
'need_bias': True,
'group': 1
},
[
[[[[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]]],
[[[10., 11., 12.],
[13., 14., 15.],
[16., 17., 18.]]]],
[[1., 2.]]
],
[
[[[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]]]
],
[
[[[[286.]], [[692.]]]],
],
),
]
@pytest.mark.parametrize("op_type, parameters, weights, input_data, expected_out", API_SETUP_CONV_DATA)
def test_conv_setup(op_type, parameters, weights, input_data, expected_out):
"""
The function to test conv api setup
"""
inputs = [np.array(item, dtype=np.float32) for item in input_data]
outputs = [np.array(item, dtype=np.float32) for item in expected_out]
layer_def, input_variants = _install_test_layer(op_type, parameters, weights, inputs)
layer = getattr(ApiSetup, utils.format.camel_to_snake(op_type))(layer_def, input_variants)
_layer_eq(layer, inputs, outputs)
# API_SETUP_POOLING_DATA = [
# # The test cases of pool ops
# (
# 'Pooling',
# {
# 'stride': [2, 2],
# 'kernel': [2, 2],
# 'auto_pad': False,
# 'pooling_type': 0
# },
# [
# [[[1., 2., 3., 4.],
# [5., 6., 7., 8.],
# [9., 10., 11., 12.],
# [13., 14., 15., 16.]]]
# ],
# [
# [[[[6., 8.],
# [14., 16.]]]]
# ]
# ),
# (
# 'Pooling',
# {
# 'stride': [2, 2],
# 'kernel': [3, 3],
# 'auto_pad': True,
# 'pooling_type': 1
# },
# [
# [[[1., 2., 3., 4.],
# [5., 6., 7., 8.],
# [9., 10., 11., 12.],
# [13., 14., 15., 16.]]]
# ],
# [
# [[[[3.5, 5., 6.],
# [9.5, 11., 12.],
# [13.5, 15, 16.]]]]
# ]
# )
# ]
# @pytest.mark.parametrize("op_type, parameters, input_data, expected_out", API_SETUP_POOLING_DATA)
# def test_pooling_setup(op_type, parameters, input_data, expected_out):
# """
# The function to test pooling api setup
# """
# inputs = [np.array(item, dtype=np.float32) for item in input_data]
# outputs = [np.array(item, dtype=np.float32) for item in expected_out]
# layer_def, input_variants = _install_test_layer(op_type, parameters, None, inputs)
# layer = getattr(ApiSetup, utils.format.camel_to_snake(op_type))(layer_def, input_variants)
# _layer_eq(layer, inputs, outputs)
API_SETUP_BN_DATA = [
(
'BatchNorm',
{
'epsilon': 0
},
[
[[1., 1.]],
[[2., 2.]],
[1],
[[0.5, 0.5]],
[[1., 1.]]
],
[
[[[1., 2.],
[3., 4.]],
[[5., 6.],
[7., 8.]]]
],
[
[[[[1., 1.353553],
[1.707107, 2.06066]],
[[2.414213, 2.76768],
[3.12132, 3.474874]]]]
]
)
]
@pytest.mark.parametrize("op_type, parameters, weights, input_data, expected_out", API_SETUP_BN_DATA)
def test_batch_norm_setup(op_type, parameters, weights, input_data, expected_out):
"""
The function to test batch norm api setup
"""
inputs = [np.array(item, dtype=np.float32) for item in input_data]
outputs = [np.array(item, dtype=np.float32) for item in expected_out]
layer_def, input_variants = _install_test_layer(op_type, parameters, weights, inputs)
layer = getattr(ApiSetup, utils.format.camel_to_snake(op_type))(layer_def, input_variants)
_layer_lt(layer, inputs, outputs)
API_SETUP_DENSE_DATA = [
(
'Dense',
{
'num_output': 2,
},
[
[[1.], [2.]],
[[1.]]
],
[
[[[1, ]]]
],
[
[[[[2.], [3.]]]]
]
)
]
@pytest.mark.parametrize("op_type, parameters, weights, input_data, expected_out", API_SETUP_DENSE_DATA)
def test_dense_setup(op_type, parameters, weights, input_data, expected_out):
"""
The function to test dense api setup
"""
inputs = [np.array(item, dtype=np.float32) for item in input_data]
outputs = [np.array(item, dtype=np.float32) for item in expected_out]
layer_def, input_variants = _install_test_layer(op_type, parameters, weights, inputs)
layer = getattr(ApiSetup, utils.format.camel_to_snake(op_type))(layer_def, input_variants)
_layer_eq(layer, inputs, outputs)
# API_SETUP_LRN_DATA = [
# (
# 'LRN',
# {
# 'kernel_size': 2,
# },
# [
# [[[1., 2.]],
# [[2., 3.]],
# [[3., 4.]],
# [[4., 5.]]]
# ],
# [
# [[[[0.007416, 0.000463]],
# [[0.000342, 0.000022]],
# [[0.000022, 0.000002]],
# [[0.000056, 0.000007]]]]
# ]
# )
# ]
# @pytest.mark.parametrize("op_type, parameters, input_data, expected_out", API_SETUP_LRN_DATA)
# def test_lrn_setup(op_type, parameters, input_data, expected_out):
# """
# The function to test dense api setup
# """
# inputs = [np.array(item, dtype=np.float32) for item in input_data]
# outputs = [np.array(item, dtype=np.float32) for item in expected_out]
# layer_def, input_variants = _install_test_layer(op_type, parameters, None, inputs)
# layer = getattr(ApiSetup, utils.format.camel_to_snake(op_type))(layer_def, input_variants)
# _layer_lt(layer, inputs, outputs)
| [
"cntk.contrib.crosstalkcaffe.unimodel.cntkmodel.CntkLayersDefinition",
"cntk.contrib.crosstalkcaffe.unimodel.cntkmodel.CntkTensorDefinition",
"pytest.mark.parametrize",
"numpy.array",
"cntk.input",
"cntk.contrib.crosstalkcaffe.utils.format.camel_to_snake"
] | [((2530, 2641), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""op_type, parameters, weights, input_data, expected_out"""', 'API_SETUP_CONV_DATA'], {}), "(\n 'op_type, parameters, weights, input_data, expected_out',\n API_SETUP_CONV_DATA)\n", (2553, 2641), False, 'import pytest\n'), ((5269, 5379), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""op_type, parameters, weights, input_data, expected_out"""', 'API_SETUP_BN_DATA'], {}), "(\n 'op_type, parameters, weights, input_data, expected_out', API_SETUP_BN_DATA\n )\n", (5292, 5379), False, 'import pytest\n'), ((6152, 6264), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""op_type, parameters, weights, input_data, expected_out"""', 'API_SETUP_DENSE_DATA'], {}), "(\n 'op_type, parameters, weights, input_data, expected_out',\n API_SETUP_DENSE_DATA)\n", (6175, 6264), False, 'import pytest\n'), ((1148, 1180), 'cntk.contrib.crosstalkcaffe.unimodel.cntkmodel.CntkLayersDefinition', 'cntkmodel.CntkLayersDefinition', ([], {}), '()\n', (1178, 1180), False, 'from cntk.contrib.crosstalkcaffe.unimodel import cntkmodel\n'), ((1279, 1315), 'cntk.contrib.crosstalkcaffe.utils.format.camel_to_snake', 'utils.format.camel_to_snake', (['op_type'], {}), '(op_type)\n', (1306, 1315), False, 'from cntk.contrib.crosstalkcaffe import utils\n'), ((2780, 2812), 'numpy.array', 'np.array', (['item'], {'dtype': 'np.float32'}), '(item, dtype=np.float32)\n', (2788, 2812), True, 'import numpy as np\n'), ((2852, 2884), 'numpy.array', 'np.array', (['item'], {'dtype': 'np.float32'}), '(item, dtype=np.float32)\n', (2860, 2884), True, 'import numpy as np\n'), ((5530, 5562), 'numpy.array', 'np.array', (['item'], {'dtype': 'np.float32'}), '(item, dtype=np.float32)\n', (5538, 5562), True, 'import numpy as np\n'), ((5602, 5634), 'numpy.array', 'np.array', (['item'], {'dtype': 'np.float32'}), '(item, dtype=np.float32)\n', (5610, 5634), True, 'import numpy as np\n'), ((6406, 6438), 'numpy.array', 'np.array', (['item'], {'dtype': 'np.float32'}), '(item, dtype=np.float32)\n', (6414, 6438), True, 'import numpy as np\n'), ((6478, 6510), 'numpy.array', 'np.array', (['item'], {'dtype': 'np.float32'}), '(item, dtype=np.float32)\n', (6486, 6510), True, 'import numpy as np\n'), ((1492, 1524), 'cntk.contrib.crosstalkcaffe.unimodel.cntkmodel.CntkTensorDefinition', 'cntkmodel.CntkTensorDefinition', ([], {}), '()\n', (1522, 1524), False, 'from cntk.contrib.crosstalkcaffe.unimodel import cntkmodel\n'), ((1776, 1806), 'cntk.input', 'cntk.input', (['input_tensor.shape'], {}), '(input_tensor.shape)\n', (1786, 1806), False, 'import cntk\n'), ((3031, 3067), 'cntk.contrib.crosstalkcaffe.utils.format.camel_to_snake', 'utils.format.camel_to_snake', (['op_type'], {}), '(op_type)\n', (3058, 3067), False, 'from cntk.contrib.crosstalkcaffe import utils\n'), ((5781, 5817), 'cntk.contrib.crosstalkcaffe.utils.format.camel_to_snake', 'utils.format.camel_to_snake', (['op_type'], {}), '(op_type)\n', (5808, 5817), False, 'from cntk.contrib.crosstalkcaffe import utils\n'), ((6657, 6693), 'cntk.contrib.crosstalkcaffe.utils.format.camel_to_snake', 'utils.format.camel_to_snake', (['op_type'], {}), '(op_type)\n', (6684, 6693), False, 'from cntk.contrib.crosstalkcaffe import utils\n'), ((1560, 1576), 'numpy.array', 'np.array', (['weight'], {}), '(weight)\n', (1568, 1576), True, 'import numpy as np\n'), ((632, 645), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (640, 645), True, 'import numpy as np\n'), ((661, 683), 'numpy.array', 'np.array', (['expected_out'], {}), '(expected_out)\n', (669, 683), True, 'import numpy as np\n'), ((798, 811), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (806, 811), True, 'import numpy as np\n'), ((826, 848), 'numpy.array', 'np.array', (['expected_out'], {}), '(expected_out)\n', (834, 848), True, 'import numpy as np\n')] |
# Adapted from https://github.com/ArrowLuo/CLIP4Clip/blob/668334707c493a4eaee7b4a03b2dae04915ce170/main_task_retrieval.py#L457
import os
import sys
sys.path.append(os.path.dirname(__file__) + os.sep + '../')
import numpy as np
from evaluation.metrics import compute_metrics
from evaluation.metrics import tensor_text_to_video_metrics
from evaluation.metrics import tensor_video_to_text_sim
from utils.utils import parallel_apply
import torch
def _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list):
"""run similarity in one single gpu
Args:
model: CLIP2Video
batch_list_t: id of text embedding
batch_list_v: id of visual embedding
batch_sequence_output_list: batch text embedding
batch_visual_output_list: batch visual embedding
Returns:
sim_matrix: similarity
"""
sim_matrix = []
for idx1, b1 in enumerate(batch_list_t):
input_mask, segment_ids, *_tmp = b1
sequence_output = batch_sequence_output_list[idx1]
each_row = []
for idx2, b2 in enumerate(batch_list_v):
video_mask, *_tmp = b2
visual_output = batch_visual_output_list[idx2]
# calculate the similarity
b1b2_logits, *_tmp = model.get_inference_logits(sequence_output, visual_output, input_mask, video_mask)
b1b2_logits = b1b2_logits.cpu().detach().numpy()
each_row.append(b1b2_logits)
each_row = np.concatenate(tuple(each_row), axis=-1)
sim_matrix.append(each_row)
return sim_matrix
def eval_epoch(model, test_dataloader, device, n_gpu, logger):
"""run similarity in one single gpu
Args:
model: CLIP2Video
test_dataloader: data loader for test
device: device to run model
n_gpu: GPU number
batch_sequence_output_list: batch text embedding
batch_visual_output_list: batch visual embedding
Returns:
R1: rank 1 of text-to-video retrieval
"""
if hasattr(model, 'module'):
model = model.module.to(device)
else:
model = model.to(device)
# if multi_sentence_ == True: compute the similarity with multi-sentences retrieval
multi_sentence_ = False
cut_off_points_, sentence_num_, video_num_ = [], -1, -1
if hasattr(test_dataloader.dataset, 'multi_sentence_per_video') \
and test_dataloader.dataset.multi_sentence_per_video:
multi_sentence_ = True
cut_off_points_ = test_dataloader.dataset.cut_off_points # used to tag the label when calculate the metric
sentence_num_ = test_dataloader.dataset.sentence_num # used to cut the sentence representation
video_num_ = test_dataloader.dataset.video_num # used to cut the video representation
cut_off_points_ = [itm - 1 for itm in cut_off_points_]
if multi_sentence_:
logger.warning("Eval under the multi-sentence per video clip setting.")
logger.warning("sentence num: {}, video num: {}".format(sentence_num_, video_num_))
model.eval()
with torch.no_grad():
batch_list_t = []
batch_list_v = []
batch_sequence_output_list, batch_visual_output_list = [], []
total_video_num = 0
for bid, batch in enumerate(test_dataloader):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, video, video_mask = batch
if multi_sentence_:
# multi-sentences retrieval means: one frame clip has two or more descriptions.
b, *_t = video.shape
sequence_output = model.get_sequence_output(input_ids, segment_ids, input_mask)
batch_sequence_output_list.append(sequence_output)
batch_list_t.append((input_mask, segment_ids,))
s_, e_ = total_video_num, total_video_num + b
filter_inds = [itm - s_ for itm in cut_off_points_ if itm >= s_ and itm < e_]
if len(filter_inds) > 0:
video, video_mask = video[filter_inds, ...], video_mask[filter_inds, ...]
visual_output = model.get_visual_output(video, video_mask)
batch_visual_output_list.append(visual_output)
batch_list_v.append((video_mask,))
total_video_num += b
else:
sequence_output, visual_output = model.get_sequence_visual_output(input_ids, segment_ids, input_mask, video, video_mask)
batch_sequence_output_list.append(sequence_output)
batch_list_t.append((input_mask, segment_ids,))
batch_visual_output_list.append(visual_output)
batch_list_v.append((video_mask,))
print("{}/{}\r".format(bid, len(test_dataloader)), end="")
if n_gpu > 1:
device_ids = list(range(n_gpu))
batch_list_t_splits = []
batch_list_v_splits = []
batch_t_output_splits = []
batch_v_output_splits = []
bacth_len = len(batch_list_t)
split_len = (bacth_len + n_gpu - 1) // n_gpu
# split the pairs for multi-GPU
for dev_id in device_ids:
s_, e_ = dev_id * split_len, (dev_id + 1) * split_len
if dev_id == 0:
batch_list_t_splits.append(batch_list_t[s_:e_])
batch_list_v_splits.append(batch_list_v)
batch_t_output_splits.append(batch_sequence_output_list[s_:e_])
batch_v_output_splits.append(batch_visual_output_list)
else:
devc = torch.device('cuda:{}'.format(str(dev_id)))
devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list_t[s_:e_]]
batch_list_t_splits.append(devc_batch_list)
devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list_v]
batch_list_v_splits.append(devc_batch_list)
if isinstance(batch_sequence_output_list[s_], tuple):
# for multi_output
devc_batch_list = [(b[0].to(devc), b[1].to(devc)) for b in batch_sequence_output_list[s_:e_]]
else:
# for single_output
devc_batch_list = [b.to(devc) for b in batch_sequence_output_list[s_:e_]]
batch_t_output_splits.append(devc_batch_list)
devc_batch_list = [b.to(devc) for b in batch_visual_output_list]
batch_v_output_splits.append(devc_batch_list)
parameters_tuple_list = [(batch_list_t_splits[dev_id], batch_list_v_splits[dev_id],
batch_t_output_splits[dev_id], batch_v_output_splits[dev_id]) for dev_id in device_ids]
# calculate the similarity respectively and concatenate them
parallel_outputs = parallel_apply(_run_on_single_gpu, model, parameters_tuple_list, device_ids)
sim_matrix = []
for idx in range(len(parallel_outputs)):
sim_matrix += parallel_outputs[idx]
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
else:
# calculate the similarity in one GPU
sim_matrix = _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list)
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
R1 = logging_rank(sim_matrix, multi_sentence_, cut_off_points_, logger)
return R1
def logging_rank(sim_matrix, multi_sentence_, cut_off_points_, logger):
"""run similarity in one single gpu
Args:
sim_matrix: similarity matrix
multi_sentence_: indicate whether the multi sentence retrieval
cut_off_points_: tag the label when calculate the metric
logger: logger for metric
Returns:
R1: rank 1 of text-to-video retrieval
"""
if multi_sentence_:
# if adopting multi-sequence retrieval, the similarity matrix should be reshaped
logger.info("before reshape, sim matrix size: {} x {}".format(sim_matrix.shape[0], sim_matrix.shape[1]))
cut_off_points2len_ = [itm + 1 for itm in cut_off_points_]
max_length = max([e_-s_ for s_, e_ in zip([0]+cut_off_points2len_[:-1], cut_off_points2len_)])
sim_matrix_new = []
for s_, e_ in zip([0] + cut_off_points2len_[:-1], cut_off_points2len_):
sim_matrix_new.append(np.concatenate((sim_matrix[s_:e_],
np.full((max_length-e_+s_, sim_matrix.shape[1]), -np.inf)), axis=0))
sim_matrix = np.stack(tuple(sim_matrix_new), axis=0)
logger.info("after reshape, sim matrix size: {} x {} x {}".
format(sim_matrix.shape[0], sim_matrix.shape[1], sim_matrix.shape[2]))
# compute text-to-video retrieval
tv_metrics = tensor_text_to_video_metrics(sim_matrix)
# compute video-to-text retrieval
vt_metrics = compute_metrics(tensor_video_to_text_sim(sim_matrix))
else:
logger.info("sim matrix size: {}, {}".format(sim_matrix.shape[0], sim_matrix.shape[1]))
# compute text-to-video retrieval
tv_metrics = compute_metrics(sim_matrix)
# compute video-to-text retrieval
vt_metrics = compute_metrics(sim_matrix.T)
logger.info('\t Length-T: {}, Length-V:{}'.format(len(sim_matrix), len(sim_matrix[0])))
# logging the result of text-to-video retrieval
logger.info("Text-to-Video:")
logger.info('\t>>> R@1: {:.1f} - R@5: {:.1f} - R@10: {:.1f} - Median R: {:.1f} - Mean R: {:.1f}'.
format(tv_metrics['R1'], tv_metrics['R5'], tv_metrics['R10'], tv_metrics['MR'], tv_metrics['MeanR']))
# logging the result of video-to-text retrieval
logger.info("Video-to-Text:")
logger.info(
'\t>>> V2T$R@1: {:.1f} - V2T$R@5: {:.1f} - V2T$R@10: {:.1f} - V2T$Median R: {:.1f} - V2T$Mean R: {:.1f}'.format(
vt_metrics['R1'], vt_metrics['R5'], vt_metrics['R10'], vt_metrics['MR'], vt_metrics['MeanR']))
R1 = tv_metrics['R1']
return R1
| [
"evaluation.metrics.compute_metrics",
"evaluation.metrics.tensor_text_to_video_metrics",
"numpy.full",
"evaluation.metrics.tensor_video_to_text_sim",
"os.path.dirname",
"utils.utils.parallel_apply",
"torch.no_grad"
] | [((3090, 3105), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3103, 3105), False, 'import torch\n'), ((9009, 9049), 'evaluation.metrics.tensor_text_to_video_metrics', 'tensor_text_to_video_metrics', (['sim_matrix'], {}), '(sim_matrix)\n', (9037, 9049), False, 'from evaluation.metrics import tensor_text_to_video_metrics\n'), ((9338, 9365), 'evaluation.metrics.compute_metrics', 'compute_metrics', (['sim_matrix'], {}), '(sim_matrix)\n', (9353, 9365), False, 'from evaluation.metrics import compute_metrics\n'), ((9430, 9459), 'evaluation.metrics.compute_metrics', 'compute_metrics', (['sim_matrix.T'], {}), '(sim_matrix.T)\n', (9445, 9459), False, 'from evaluation.metrics import compute_metrics\n'), ((165, 190), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (180, 190), False, 'import os\n'), ((6991, 7067), 'utils.utils.parallel_apply', 'parallel_apply', (['_run_on_single_gpu', 'model', 'parameters_tuple_list', 'device_ids'], {}), '(_run_on_single_gpu, model, parameters_tuple_list, device_ids)\n', (7005, 7067), False, 'from utils.utils import parallel_apply\n'), ((9130, 9166), 'evaluation.metrics.tensor_video_to_text_sim', 'tensor_video_to_text_sim', (['sim_matrix'], {}), '(sim_matrix)\n', (9154, 9166), False, 'from evaluation.metrics import tensor_video_to_text_sim\n'), ((8656, 8717), 'numpy.full', 'np.full', (['(max_length - e_ + s_, sim_matrix.shape[1])', '(-np.inf)'], {}), '((max_length - e_ + s_, sim_matrix.shape[1]), -np.inf)\n', (8663, 8717), True, 'import numpy as np\n')] |
from pysc2.agents import base_agent
from pysc2.lib import actions, features, units
from DwseoRLAgent import QLearningTable
import pandas as pd
import numpy as np
import random
import time
import os
import math
DATA_FILE = 'rlagent_learning_data_terran'
ACTION_DO_NOTHING = 'donothing'
ACTION_SELECT_SCV = 'selectscv'
ACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'
ACTION_BUILD_BARRACKS = 'buildbarracks'
ACTION_SELECT_BARRACKS = 'selectbarracks'
ACTION_BUILD_MARINE = 'buildmarine'
ACTION_SELECT_ARMY = 'selectarmy'
ACTION_ATTACK = 'attack'
smart_actions = [
ACTION_DO_NOTHING,
ACTION_SELECT_SCV,
ACTION_BUILD_SUPPLY_DEPOT,
ACTION_BUILD_BARRACKS,
ACTION_SELECT_BARRACKS,
ACTION_BUILD_MARINE,
ACTION_SELECT_ARMY,
]
#for mm_x in range(0, 64):
# for mm_y in range(0, 64):
# smart_actions.append(ACTION_ATTACK + '_' + str(mm_x) + '_' + str(mm_y))
for mm_x in range(0, 64):
for mm_y in range(0, 64):
if (mm_x + 1) % 16 == 0 and (mm_y + 1) % 16 == 0:
smart_actions.append(ACTION_ATTACK + '_' + str(mm_x - 8) + '_' + str(mm_y - 8))
KILL_UNIT_REWARD = 0.2
KILL_BUILDING_REWARD = 0.5
class TerranRLAgent(base_agent.BaseAgent):
def __init__(self):
super(TerranRLAgent, self).__init__()
self.base_top_left = None
self.qlearn = QLearningTable(actions=list(range(len(smart_actions))))
self.previous_killed_unit_score = 0
self.previous_killed_building_score = 0
self.previous_action = None
self.previous_state = None
if os.path.isfile(DATA_FILE + '.gz'):
self.qlearn.q_table = pd.read_pickle(DATA_FILE + '.gz', compression='gzip')
def transformDistance(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def transformLocation(self, x, y):
if not self.base_top_left:
return [64 - x, 64 - y]
return [x, y]
def getMeanLocation(self, unitList):
sum_x = 0
sum_y = 0
for unit in unitList:
sum_x += unit.x
sum_y += unit.y
mean_x = sum_x / len(unitList)
mean_y = sum_y / len(unitList)
return [mean_x, mean_y]
def unit_type_is_selected(self, obs, unit_type):
if (len(obs.observation.single_select) > 0 and
obs.observation.single_select[0].unit_type == unit_type):
return True
if (len(obs.observation.multi_select) > 0 and
obs.observation.multi_select[0].unit_type == unit_type):
return True
return False
def get_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.feature_units
if unit.unit_type == unit_type]
def can_do(self, obs, action):
return action in obs.observation.available_actions
def step(self, obs):
super(TerranRLAgent, self).step(obs)
#time.sleep(0.5)
if obs.last():
self.qlearn.q_table.to_pickle(DATA_FILE + '.gz', 'gzip')
if obs.first():
player_y, player_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.SELF).nonzero()
self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0
supply_depot_count = len(self.get_units_by_type(obs, units.Terran.SupplyDepot))
barracks_count = len(self.get_units_by_type(obs, units.Terran.Barracks))
supply_limit = obs.observation.player.food_cap
army_supply = obs.observation.player.food_used
killed_unit_score = obs.observation.score_cumulative.killed_value_units
killed_building_score = obs.observation.score_cumulative.killed_value_structures
# current_state = np.zeros(5000)
# current_state[0] = supply_depot_count
# current_state[1] = barracks_count
# current_state[2] = supply_limit
# current_state[3] = army_supply
#
# hot_squares = np.zeros(4096)
# enemy_y, enemy_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.ENEMY).nonzero()
# for i in range(0, len(enemy_y)):
# y = int(enemy_y[i])
# x = int(enemy_x[i])
#
# hot_squares[((y - 1) * 64) + (x - 1)] = 1
#
# if not self.base_top_left:
# hot_squares = hot_squares[::-1]
#
# for i in range(0, 4096):
# current_state[i + 4] = hot_squares[i]
current_state = np.zeros(20)
current_state[0] = supply_depot_count
current_state[1] = barracks_count
current_state[2] = supply_limit
current_state[3] = army_supply
hot_squares = np.zeros(16)
enemy_y, enemy_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.ENEMY).nonzero()
for i in range(0, len(enemy_y)):
y = int(math.ceil((enemy_y[i] + 1) / 16))
x = int(math.ceil((enemy_x[i] + 1) / 16))
hot_squares[((y - 1) * 4) + (x - 1)] = 1
if not self.base_top_left:
hot_squares = hot_squares[::-1]
for i in range(0, 16):
current_state[i + 4] = hot_squares[i]
if self.previous_action is not None:
reward = 0
if killed_unit_score > self.previous_killed_unit_score:
reward += KILL_UNIT_REWARD
if killed_building_score > self.previous_killed_building_score:
reward += KILL_BUILDING_REWARD
self.qlearn.learn(str(self.previous_state), self.previous_action, reward, str(current_state))
rl_action = self.qlearn.choose_action(str(current_state))
smart_action = smart_actions[rl_action]
self.previous_killed_unit_score = killed_unit_score
self.previous_killed_building_score = killed_building_score
self.previous_state = current_state
self.previous_action = rl_action
x = 0
y = 0
if '_' in smart_action:
smart_action, x, y = smart_action.split('_')
if smart_action == ACTION_DO_NOTHING:
return actions.FUNCTIONS.no_op()
elif smart_action == ACTION_SELECT_SCV:
if self.can_do(obs, actions.FUNCTIONS.select_point.id):
scvs = self.get_units_by_type(obs, units.Terran.SCV)
if len(scvs) > 0:
scv = random.choice(scvs)
if scv.x >= 0 and scv.y >= 0:
return actions.FUNCTIONS.select_point("select", (scv.x,
scv.y))
elif smart_action == ACTION_BUILD_SUPPLY_DEPOT:
if self.can_do(obs, actions.FUNCTIONS.Build_SupplyDepot_screen.id):
ccs = self.get_units_by_type(obs, units.Terran.CommandCenter)
if len(ccs) > 0:
mean_x, mean_y = self.getMeanLocation(ccs)
target = self.transformDistance(int(mean_x), 0, int(mean_y), 20)
return actions.FUNCTIONS.Build_SupplyDepot_screen("now", target)
elif smart_action == ACTION_BUILD_BARRACKS:
if self.can_do(obs, actions.FUNCTIONS.Build_Barracks_screen.id):
ccs = self.get_units_by_type(obs, units.Terran.CommandCenter)
if len(ccs) > 0:
mean_x, mean_y = self.getMeanLocation(ccs)
target = self.transformDistance(int(mean_x), 20, int(mean_y), 0)
return actions.FUNCTIONS.Build_Barracks_screen("now", target)
elif smart_action == ACTION_SELECT_BARRACKS:
if self.can_do(obs, actions.FUNCTIONS.select_point.id):
barracks = self.get_units_by_type(obs, units.Terran.Barracks)
if len(barracks) > 0:
barrack = random.choice(barracks)
if barrack.x >= 0 and barrack.y >= 0:
return actions.FUNCTIONS.select_point("select", (barrack.x,
barrack.y))
elif smart_action == ACTION_BUILD_MARINE:
if self.can_do(obs, actions.FUNCTIONS.Train_Marine_quick.id):
return actions.FUNCTIONS.Train_Marine_quick("queued")
elif smart_action == ACTION_SELECT_ARMY:
if self.can_do(obs, actions.FUNCTIONS.select_army.id):
return actions.FUNCTIONS.select_army("select")
elif smart_action == ACTION_ATTACK:
#if self.can_do(obs, actions.FUNCTIONS.Attack_minimap.id):
if not self.unit_type_is_selected(obs, units.Terran.SCV) and self.can_do(obs, actions.FUNCTIONS.Attack_minimap.id):
return actions.FUNCTIONS.Attack_minimap("now", self.transformLocation(int(x), int(y)))
return actions.FUNCTIONS.no_op()
| [
"pandas.read_pickle",
"random.choice",
"math.ceil",
"pysc2.lib.actions.FUNCTIONS.Build_SupplyDepot_screen",
"pysc2.lib.actions.FUNCTIONS.select_army",
"os.path.isfile",
"numpy.zeros",
"pysc2.lib.actions.FUNCTIONS.Build_Barracks_screen",
"pysc2.lib.actions.FUNCTIONS.select_point",
"pysc2.lib.action... | [((1549, 1582), 'os.path.isfile', 'os.path.isfile', (["(DATA_FILE + '.gz')"], {}), "(DATA_FILE + '.gz')\n", (1563, 1582), False, 'import os\n'), ((4525, 4537), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (4533, 4537), True, 'import numpy as np\n'), ((4728, 4740), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (4736, 4740), True, 'import numpy as np\n'), ((8875, 8900), 'pysc2.lib.actions.FUNCTIONS.no_op', 'actions.FUNCTIONS.no_op', ([], {}), '()\n', (8898, 8900), False, 'from pysc2.lib import actions, features, units\n'), ((1618, 1671), 'pandas.read_pickle', 'pd.read_pickle', (["(DATA_FILE + '.gz')"], {'compression': '"""gzip"""'}), "(DATA_FILE + '.gz', compression='gzip')\n", (1632, 1671), True, 'import pandas as pd\n'), ((6151, 6176), 'pysc2.lib.actions.FUNCTIONS.no_op', 'actions.FUNCTIONS.no_op', ([], {}), '()\n', (6174, 6176), False, 'from pysc2.lib import actions, features, units\n'), ((4922, 4954), 'math.ceil', 'math.ceil', (['((enemy_y[i] + 1) / 16)'], {}), '((enemy_y[i] + 1) / 16)\n', (4931, 4954), False, 'import math\n'), ((4976, 5008), 'math.ceil', 'math.ceil', (['((enemy_x[i] + 1) / 16)'], {}), '((enemy_x[i] + 1) / 16)\n', (4985, 5008), False, 'import math\n'), ((6423, 6442), 'random.choice', 'random.choice', (['scvs'], {}), '(scvs)\n', (6436, 6442), False, 'import random\n'), ((6524, 6580), 'pysc2.lib.actions.FUNCTIONS.select_point', 'actions.FUNCTIONS.select_point', (['"""select"""', '(scv.x, scv.y)'], {}), "('select', (scv.x, scv.y))\n", (6554, 6580), False, 'from pysc2.lib import actions, features, units\n'), ((7083, 7140), 'pysc2.lib.actions.FUNCTIONS.Build_SupplyDepot_screen', 'actions.FUNCTIONS.Build_SupplyDepot_screen', (['"""now"""', 'target'], {}), "('now', target)\n", (7125, 7140), False, 'from pysc2.lib import actions, features, units\n'), ((7558, 7612), 'pysc2.lib.actions.FUNCTIONS.Build_Barracks_screen', 'actions.FUNCTIONS.Build_Barracks_screen', (['"""now"""', 'target'], {}), "('now', target)\n", (7597, 7612), False, 'from pysc2.lib import actions, features, units\n'), ((7881, 7904), 'random.choice', 'random.choice', (['barracks'], {}), '(barracks)\n', (7894, 7904), False, 'import random\n'), ((8285, 8331), 'pysc2.lib.actions.FUNCTIONS.Train_Marine_quick', 'actions.FUNCTIONS.Train_Marine_quick', (['"""queued"""'], {}), "('queued')\n", (8321, 8331), False, 'from pysc2.lib import actions, features, units\n'), ((7994, 8058), 'pysc2.lib.actions.FUNCTIONS.select_point', 'actions.FUNCTIONS.select_point', (['"""select"""', '(barrack.x, barrack.y)'], {}), "('select', (barrack.x, barrack.y))\n", (8024, 8058), False, 'from pysc2.lib import actions, features, units\n'), ((8472, 8511), 'pysc2.lib.actions.FUNCTIONS.select_army', 'actions.FUNCTIONS.select_army', (['"""select"""'], {}), "('select')\n", (8501, 8511), False, 'from pysc2.lib import actions, features, units\n')] |
import os
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from torch.distributions import MultivariateNormal
def sample_digits_maf(model, epoch, random_order=False, seed=None, test=False):
model.eval()
n_samples = 80
if seed is not None:
torch.manual_seed(seed)
np.random.seed(seed)
if random_order is True:
np.random.seed(seed)
order = np.random.permutation(784)
else:
order = np.arange(784)
u = torch.zeros(n_samples, 784).normal_(0, 1)
mvn = MultivariateNormal(torch.zeros(28 * 28), torch.eye(28 * 28))
log_prob = mvn.log_prob(u)
samples, log_det = model.backward(u)
# log_det = log_prob - log_det
# log_det = log_det[np.logical_not(np.isnan(log_det.detach().numpy()))]
# idx = np.argsort(log_det.detach().numpy())
# samples = samples[idx].flip(dims=(0,))
# samples = samples[80 : 80 + n_samples]
samples = (torch.sigmoid(samples) - 1e-6) / (1 - 2e-6)
samples = samples.detach().cpu().view(n_samples, 28, 28)
fig, axes = plt.subplots(ncols=10, nrows=8)
ax = axes.ravel()
for i in range(n_samples):
ax[i].imshow(
np.transpose(samples[i], (0, 1)), cmap="gray", interpolation="none"
)
ax[i].axis("off")
ax[i].set_xticklabels([])
ax[i].set_yticklabels([])
ax[i].set_frame_on(False)
if not os.path.exists("gif_results"):
os.makedirs("gif_results")
if test is False:
save_path = "gif_results/samples_gaussian_" + str(epoch) + ".png"
else:
save_path = "figs/samples_gaussian_" + str(epoch) + ".png"
fig.subplots_adjust(wspace=-0.35, hspace=0.065)
plt.gca().set_axis_off()
plt.savefig(
save_path, dpi=300, bbox_inches="tight", pad_inches=0,
)
plt.close()
def plot_losses(epochs, train_losses, val_losses, title=None):
sns.set(style="white")
fig, axes = plt.subplots(
ncols=1, nrows=1, figsize=[10, 5], sharey=True, sharex=True, dpi=400
)
train = pd.Series(train_losses).astype(float)
val = pd.Series(val_losses).astype(float)
train.index += 1
val.index += 1
axes = sns.lineplot(data=train, color="gray", label="Training loss")
axes = sns.lineplot(data=val, color="orange", label="Validation loss")
axes.set_ylabel("Negative log-likelihood")
axes.legend(
frameon=False,
prop={"size": 14},
fancybox=False,
handletextpad=0.5,
handlelength=1,
)
axes.set_ylim(1250, 1600)
axes.set_xlim(0, 50)
axes.set_title(title) if title is not None else axes.set_title(None)
if not os.path.exists("plots"):
os.makedirs("plots")
save_path = "plots/train_plots" + str(epochs[-1]) + ".pdf"
plt.savefig(
save_path, dpi=300, bbox_inches="tight", pad_inches=0,
)
plt.close()
| [
"pandas.Series",
"torch.manual_seed",
"seaborn.set",
"os.path.exists",
"matplotlib.pyplot.savefig",
"os.makedirs",
"torch.eye",
"matplotlib.pyplot.gca",
"torch.sigmoid",
"matplotlib.pyplot.close",
"seaborn.lineplot",
"numpy.random.seed",
"torch.zeros",
"numpy.transpose",
"matplotlib.pypl... | [((1108, 1139), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(10)', 'nrows': '(8)'}), '(ncols=10, nrows=8)\n', (1120, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1770, 1836), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {'dpi': '(300)', 'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "(save_path, dpi=300, bbox_inches='tight', pad_inches=0)\n", (1781, 1836), True, 'import matplotlib.pyplot as plt\n'), ((1856, 1867), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1865, 1867), True, 'import matplotlib.pyplot as plt\n'), ((1937, 1959), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""'}), "(style='white')\n", (1944, 1959), True, 'import seaborn as sns\n'), ((1976, 2062), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)', 'nrows': '(1)', 'figsize': '[10, 5]', 'sharey': '(True)', 'sharex': '(True)', 'dpi': '(400)'}), '(ncols=1, nrows=1, figsize=[10, 5], sharey=True, sharex=True,\n dpi=400)\n', (1988, 2062), True, 'import matplotlib.pyplot as plt\n'), ((2222, 2283), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'train', 'color': '"""gray"""', 'label': '"""Training loss"""'}), "(data=train, color='gray', label='Training loss')\n", (2234, 2283), True, 'import seaborn as sns\n'), ((2295, 2358), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'val', 'color': '"""orange"""', 'label': '"""Validation loss"""'}), "(data=val, color='orange', label='Validation loss')\n", (2307, 2358), True, 'import seaborn as sns\n'), ((2815, 2881), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {'dpi': '(300)', 'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "(save_path, dpi=300, bbox_inches='tight', pad_inches=0)\n", (2826, 2881), True, 'import matplotlib.pyplot as plt\n'), ((2901, 2912), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2910, 2912), True, 'import matplotlib.pyplot as plt\n'), ((330, 353), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (347, 353), False, 'import torch\n'), ((362, 382), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (376, 382), True, 'import numpy as np\n'), ((420, 440), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (434, 440), True, 'import numpy as np\n'), ((457, 483), 'numpy.random.permutation', 'np.random.permutation', (['(784)'], {}), '(784)\n', (478, 483), True, 'import numpy as np\n'), ((510, 524), 'numpy.arange', 'np.arange', (['(784)'], {}), '(784)\n', (519, 524), True, 'import numpy as np\n'), ((605, 625), 'torch.zeros', 'torch.zeros', (['(28 * 28)'], {}), '(28 * 28)\n', (616, 625), False, 'import torch\n'), ((627, 645), 'torch.eye', 'torch.eye', (['(28 * 28)'], {}), '(28 * 28)\n', (636, 645), False, 'import torch\n'), ((1445, 1474), 'os.path.exists', 'os.path.exists', (['"""gif_results"""'], {}), "('gif_results')\n", (1459, 1474), False, 'import os\n'), ((1484, 1510), 'os.makedirs', 'os.makedirs', (['"""gif_results"""'], {}), "('gif_results')\n", (1495, 1510), False, 'import os\n'), ((2694, 2717), 'os.path.exists', 'os.path.exists', (['"""plots"""'], {}), "('plots')\n", (2708, 2717), False, 'import os\n'), ((2727, 2747), 'os.makedirs', 'os.makedirs', (['"""plots"""'], {}), "('plots')\n", (2738, 2747), False, 'import os\n'), ((534, 561), 'torch.zeros', 'torch.zeros', (['n_samples', '(784)'], {}), '(n_samples, 784)\n', (545, 561), False, 'import torch\n'), ((986, 1008), 'torch.sigmoid', 'torch.sigmoid', (['samples'], {}), '(samples)\n', (999, 1008), False, 'import torch\n'), ((1227, 1259), 'numpy.transpose', 'np.transpose', (['samples[i]', '(0, 1)'], {}), '(samples[i], (0, 1))\n', (1239, 1259), True, 'import numpy as np\n'), ((1741, 1750), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1748, 1750), True, 'import matplotlib.pyplot as plt\n'), ((2086, 2109), 'pandas.Series', 'pd.Series', (['train_losses'], {}), '(train_losses)\n', (2095, 2109), True, 'import pandas as pd\n'), ((2134, 2155), 'pandas.Series', 'pd.Series', (['val_losses'], {}), '(val_losses)\n', (2143, 2155), True, 'import pandas as pd\n')] |
# encoding: utf-8
'''
@author: <NAME>
@contact: <EMAIL>
@software: nef
@file: scanner_to_crystal.py
@date: 6/14/2019
@desc:
'''
from srfnef import nef_class
from srfnef.geometry import PetEcatScanner
import numpy as np
@nef_class
class ScannerToCrystal:
def __call__(self, scanner: PetEcatScanner) -> np.ndarray:
nb_crystals_per_ring = scanner.nb_blocks_per_ring * scanner.blocks.shape[1] * \
scanner.blocks.shape[2]
nb_crystals = nb_crystals_per_ring * scanner.nb_rings
lors = np.zeros((nb_crystals, 3), dtype = np.float32)
x = np.ones(scanner.blocks.shape[1], ) * scanner.average_radius
y = (np.arange(scanner.blocks.shape[1]) + 0.5) * scanner.blocks.unit_size[1] - \
scanner.blocks.size[1] / 2
z = (np.arange(scanner.blocks.shape[2]) + 0.5) * scanner.blocks.unit_size[2] - \
scanner.blocks.size[2] / 2
x1 = np.kron(x, [[1]] * scanner.blocks.shape[2]).ravel()
y1 = np.kron(y, [[1]] * scanner.blocks.shape[2]).ravel()
xx = np.kron(x1, [[1]] * scanner.nb_blocks_per_ring).ravel()
yy = np.kron(y1, [[1]] * scanner.nb_blocks_per_ring).ravel()
zz = np.kron(z, [1] * scanner.blocks.shape[1])
theta = 2 * np.pi / scanner.nb_blocks_per_ring * np.arange(scanner.nb_blocks_per_ring)
theta1 = np.kron(theta, [1] * scanner.blocks.shape[1] * scanner.blocks.shape[2])
xx1 = xx * np.cos(theta1) - yy * np.sin(theta1)
yy1 = xx * np.sin(theta1) + yy * np.cos(theta1)
lors[:, 0] = np.kron(xx1, [[1]] * scanner.nb_rings).ravel()
lors[:, 1] = np.kron(yy1, [[1]] * scanner.nb_rings).ravel()
for i in range(scanner.nb_rings):
lors[nb_crystals_per_ring * i:nb_crystals_per_ring * (i + 1), 2] = \
np.kron(zz, [[1]] * scanner.nb_blocks_per_ring).ravel() - scanner.axial_length / 2 \
+ i * (scanner.blocks.size[2] + scanner.gap) + 0.5 * scanner.blocks.size[2]
return lors
| [
"numpy.ones",
"numpy.kron",
"numpy.zeros",
"numpy.cos",
"numpy.sin",
"numpy.arange"
] | [((539, 583), 'numpy.zeros', 'np.zeros', (['(nb_crystals, 3)'], {'dtype': 'np.float32'}), '((nb_crystals, 3), dtype=np.float32)\n', (547, 583), True, 'import numpy as np\n'), ((1196, 1237), 'numpy.kron', 'np.kron', (['z', '([1] * scanner.blocks.shape[1])'], {}), '(z, [1] * scanner.blocks.shape[1])\n', (1203, 1237), True, 'import numpy as np\n'), ((1351, 1422), 'numpy.kron', 'np.kron', (['theta', '([1] * scanner.blocks.shape[1] * scanner.blocks.shape[2])'], {}), '(theta, [1] * scanner.blocks.shape[1] * scanner.blocks.shape[2])\n', (1358, 1422), True, 'import numpy as np\n'), ((599, 631), 'numpy.ones', 'np.ones', (['scanner.blocks.shape[1]'], {}), '(scanner.blocks.shape[1])\n', (606, 631), True, 'import numpy as np\n'), ((1295, 1332), 'numpy.arange', 'np.arange', (['scanner.nb_blocks_per_ring'], {}), '(scanner.nb_blocks_per_ring)\n', (1304, 1332), True, 'import numpy as np\n'), ((928, 971), 'numpy.kron', 'np.kron', (['x', '([[1]] * scanner.blocks.shape[2])'], {}), '(x, [[1]] * scanner.blocks.shape[2])\n', (935, 971), True, 'import numpy as np\n'), ((993, 1036), 'numpy.kron', 'np.kron', (['y', '([[1]] * scanner.blocks.shape[2])'], {}), '(y, [[1]] * scanner.blocks.shape[2])\n', (1000, 1036), True, 'import numpy as np\n'), ((1058, 1105), 'numpy.kron', 'np.kron', (['x1', '([[1]] * scanner.nb_blocks_per_ring)'], {}), '(x1, [[1]] * scanner.nb_blocks_per_ring)\n', (1065, 1105), True, 'import numpy as np\n'), ((1127, 1174), 'numpy.kron', 'np.kron', (['y1', '([[1]] * scanner.nb_blocks_per_ring)'], {}), '(y1, [[1]] * scanner.nb_blocks_per_ring)\n', (1134, 1174), True, 'import numpy as np\n'), ((1442, 1456), 'numpy.cos', 'np.cos', (['theta1'], {}), '(theta1)\n', (1448, 1456), True, 'import numpy as np\n'), ((1464, 1478), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (1470, 1478), True, 'import numpy as np\n'), ((1498, 1512), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (1504, 1512), True, 'import numpy as np\n'), ((1520, 1534), 'numpy.cos', 'np.cos', (['theta1'], {}), '(theta1)\n', (1526, 1534), True, 'import numpy as np\n'), ((1557, 1595), 'numpy.kron', 'np.kron', (['xx1', '([[1]] * scanner.nb_rings)'], {}), '(xx1, [[1]] * scanner.nb_rings)\n', (1564, 1595), True, 'import numpy as np\n'), ((1625, 1663), 'numpy.kron', 'np.kron', (['yy1', '([[1]] * scanner.nb_rings)'], {}), '(yy1, [[1]] * scanner.nb_rings)\n', (1632, 1663), True, 'import numpy as np\n'), ((672, 706), 'numpy.arange', 'np.arange', (['scanner.blocks.shape[1]'], {}), '(scanner.blocks.shape[1])\n', (681, 706), True, 'import numpy as np\n'), ((800, 834), 'numpy.arange', 'np.arange', (['scanner.blocks.shape[2]'], {}), '(scanner.blocks.shape[2])\n', (809, 834), True, 'import numpy as np\n'), ((1811, 1858), 'numpy.kron', 'np.kron', (['zz', '([[1]] * scanner.nb_blocks_per_ring)'], {}), '(zz, [[1]] * scanner.nb_blocks_per_ring)\n', (1818, 1858), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import xgboost
import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn import tree
from sklearn import metrics
from sklearn import datasets
from sklearn import ensemble
from sklearn import pipeline
from sklearn import linear_model
from sklearn import preprocessing
from sklearn import model_selection
from sklearn import feature_selection
#
# """#Creating Data
#
# ##Classification
# """
#
# x, y = datasets.make_classification(n_samples=100, n_features=10, random_state=42)
# xd = pd.DataFrame(x)
# yd = pd.DataFrame(y)
# x_train, x_test, y_train, y_test = model_selection.train_test_split(xd, y, random_state=42, test_size=0.2)
#
# #create a data of combination of x and the target(y)
# data = xd.copy()
# data[10] = yd
#
# """##Regression"""
#
# X, Y = datasets.make_regression(n_samples=1000, n_features=10, noise=0.2, n_informative=20, random_state=46)
# Xd = pd.DataFrame(X)
# Yd = pd.DataFrame(Y)
# X_train, X_test, Y_train, Y_test = model_selection.train_test_split(Xd, Y, random_state=46, test_size=0.2)
#
# #create a data of combination of x and the target(y)
# Data = Xd.copy()
# Data[10] = Yd
"""#Only Classification
##Confusion Matrix
"""
def clf_confusion_matrix(model, y_true, y_pred):
cm_knn = metrics.confusion_matrix(y_true, y_pred)
sns.heatmap(cm_knn, square=True, annot=True, cbar=False, xticklabels=names, yticklabels=names, cmap="RdYlGn")
plt.xlabel("Actual")
plt.ylabel("Predicted")
plt.show()
# model = linear_model.SGDClassifier()
# model.fit(x_train, y_train)
# pred = model.predict(x_test)
# clf_confusion_matrix(model, y_test, pred)
"""##Bivariate Analysis
### Box Plot Chart
"""
def clf_boxplot(X, Y, sizes=4):
for i in range(X.shape[1]):
plt.figure(figsize=(sizes, sizes))
sns.boxplot(x=Y, y=X[X.columns[i]], palette="RdYlGn", saturation=1)
# clf_boxplot(xd, y)
"""###Grouped Bar Chart"""
def clf_countplot(X, Y, sizes=4):
print("Remmember this plot is only for categorical data")
if len(X) == 0: return "Sorry, Please Enter categorical data"
for i in range(X.shape[1]):
plt.figure(figsize=(sizes, sizes))
sns.countplot(x=X[X.columns[i]], hue=Y, palette="RdYlGn", saturation=1)
# clf_countplot(xd, y)
"""#All Types
##HeatMap
values near to 0, has less correlation
but, values near 1 or -1 has the most correlation
"""
def heatmap(data, with_mask=True):
corr = data.corr()
f, ax = plt.subplots(figsize=(11, 9))
if ~with_mask:
mask = None
else:
mask = np.triu(np.ones_like(corr, dtype=np.bool))
sns.heatmap(corr, mask=mask, cmap="RdYlGn", annot=True, square=True, linewidths=0.5, cbar_kws={"shrink": 0.5}, ax=ax)
# print(Data.corr(), end="\n")
# print(Data[[10, 2]].corr(), end="\n")
# heatmap(data, False)
"""##Importance Plot
Higher, better
"""
def importance_plot(X, Y, size=8):
model = ensemble.ExtraTreesRegressor()
model.fit(X, Y)
plt.style.use('ggplot')
plt.figure(figsize=(size,size))
feat_importances = pd.Series(model.feature_importances_, index=X.columns)
feat_importances.nlargest(50).plot(kind='barh')
plt.show()
# importance_plot(xd,y)
"""##Multicollinearity
detect by: variance inflation factor or the VIF for each predicting variable.
detect: remove features with scores of more than 10
1 <= VIF <= inf
"""
from statsmodels.stats.outliers_influence import variance_inflation_factor
def multicollinearity(Data):
Data = Data[~Data.isin([np.nan, np.inf, -np.inf]).any(1)]
vif_data = pd.DataFrame()
vif_data["feature"] = Data.columns
vif_data["VIF"] = [variance_inflation_factor(Data.values, i) for i in range(len(Data.columns))]
return vif_data
# multicollinearity(Data)
# multicollinearity(data)
"""##Feature importances"""
import xgboost
def Features_Importance(X, Y, Xte, Yte, reg=True):
print("""WARNING;
If You Enter the wrong data type, it will train for so long. and won't work properly, reg == True""")
if reg:
model = xgboost.XGBRegressor()
model.fit(X, Y)
y_pred = model.predict(Xte)
predictions = [value for value in y_pred]
else:
model = xgboost.XGBClassifier()
model.fit(X, Y)
y_pred = model.predict(Xte)
predictions = [round(value) for value in y_pred]
accuracy = metrics.r2_score(Yte, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
importances = model.feature_importances_
indices = np.argsort(importances)
thresholds = np.sort(importances)
plt.figure(figsize=(6, 6))
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='b', align='center')
plt.yticks(range(len(indices)), [X.columns[i] for i in indices])
plt.xlabel('Relative Importance')
plt.show()
for thresh in thresholds:
# select features using threshold
selection = feature_selection.SelectFromModel(model, threshold=thresh, prefit=True)
select_X_train = selection.transform(X)
if reg:
selection_model = xgboost.XGBRegressor()
selection_model.fit(select_X_train, Y)
select_x_test = selection.transform(Xte)
y_pred = selection_model.predict(select_x_test)
predictions = [value for value in y_pred]
else:
selection_model = xgboost.XGBClassifier()
selection_model.fit(select_X_train, Y)
select_x_test = selection.transform(Xte)
y_pred = selection_model.predict(select_x_test)
predictions = [round(value) for value in y_pred]
accuracy = metrics.r2_score(Yte, predictions)
print("Thresh=%.3f, n=%d, Accuracy: %.2f%%" % (thresh, select_X_train.shape[1], accuracy*100.0))
# Features_Importance(X_train, Y_train, X_test, Y_test)
# Features_Importance(x_train, y_train, x_test, y_test, False)
"""##Automated feature selection
###Variance threshold
In statistics, variance is the squared deviation of a variable from its mean, in other words, how far are the data points spread out for a given variable?
Suppose we were building a machine learning model to detect breast cancer and the data set had a boolean variable for gender.
This data set is likely to consist almost entirely of one gender and therefore nearly all data points would be 1. This variable would have extremely low variance and would be not at all useful for predicting the target variable.
This is one of the most simple approaches to feature selection. The scikit-learn library has a method called VarianceThreshold . This method takes a threshold value and when fitted to a feature set will remove any features below this threshold. The default value for the threshold is 0 and this will remove any features with zero variance, or in other words where all values are the same.
"""
def afs_variance_threshold(X, ret=False):
selector = feature_selection.VarianceThreshold()
print("Original feature shape:", X.shape)
new_X = selector.fit_transform(X)
print("Transformed feature shape:", new_X.shape, end="\n")
if ret:
return new_X
# afs_variance_threshold(X)
"""###Recursive feature elimination"""
def afs_recursive(X, Y, ret=False):
X_normalized = preprocessing.normalize(X, norm='l2')
estimator = svm.SVR(kernel="linear")
selector = feature_selection.RFECV(estimator, step=1, cv=2)
selector = selector.fit(X, Y)
print("Features selected", selector.support_)
print("Feature ranking", selector.ranking_)
if ret:
return selector.fit_transform(X, Y)
# afs_recursive(X, Y)
"""##Filter Method
One of the best feature selection ways with filtering
####based on chi-square, ANVOA and mutual information
###Univariate feature selection
Univariate feature selection applies univariate statistical tests to features and selects those which perform the best in these tests. Univariate tests are tests which involve only one dependent variable.
This includes analysis of variance (ANOVA), linear regressions and t-tests of means.
###It chooses the most important features to the target
"""
def univariate_feature_selection(X, Y, Xte, hints=False,sf_choice=0):
score_functions = ["f_classif","mutual_info_classif", "chi2","f_regression","mutual_info_regression","SelectPercentile","SelectFdr","SelectFwe","GenericUnivariateSelect"]
score_functions = [feature_selection.f_classif, feature_selection.mutual_info_classif, feature_selection.chi2, feature_selection.f_regression, feature_selection.mutual_info_regression, feature_selection.SelectPercentile, feature_selection.SelectPercentile, feature_selection.SelectFdr,feature_selection.SelectFwe, feature_selection.GenericUnivariateSelect]
if hints:
print(""" SelectKBest score function, choices
f_classif ===> 0,
mutual_info_classif ===> 1,
chi2 ===> 2,
f_regression ===> 3,
mutual_info_regression ===> 4
# There are some that I don't understand, so I don't put them in the list, but I will
SelectPercentile ===> 5,
SelectFdr ===> 6,
SelectFwe ===> 7,
GenericUnivariateSelect ===> 8
""")
selection_model = feature_selection.SelectKBest(score_func=score_functions[sf_choice], k='all') # Mutual Information Feature Selection
selection_model.fit(X, Y)
X_train_selected = selection_model.transform(X)
X_test_selected = selection_model.transform(Xte)
socres = selection_model.scores_
for i in range(len(socres)):
print(f'Feature_{i} is: {X.columns[i]} :==> {round(socres[i], 4)}')
plt.bar([i for i in range(len(socres))], socres)
plt.show()
# X_train_fs, X_test_fs, fs = univariate_feature_selection(X_train, Y_train, X_test, sf_choice=3)
# X_train_fs, X_test_fs, fs = univariate_feature_selection(x_train, y_train, x_test, hints=False, sf_choice=0)
"""### The best method for Tunning
It selects the best parameters for the model
"""
def model_parameters_chooser(X, Y, model, hints=True, reg=True, sf_choice=0, score_choice=0):
Clustering_scores=["adjusted_mutual_info_score","adjusted_rand_score","completeness_score","fowlkes_mallows_score","homogeneity_score","mutual_info_score","normalized_mutual_info_score","rand_score","v_measure_score"]
Regression_scores = ["explained_variance","max_error","neg_mean_absolute_error","neg_mean_squared_error","neg_root_mean_squared_error","neg_mean_squared_log_error","neg_median_absolute_error","r2","neg_mean_poisson_deviance","neg_mean_gamma_devian"]
Classification_socres = ["accuracy","balanced_accuracy","top_k_accuracy","average_precision","neg_brier_score","f1","f1_micro","f1_weighted","f1_samples","neg_log_loss","precision","recall","jaccard","roc_auc","roc_auc_ovr","roc_auc_ovr_weighted"]
if reg:
score = Regression_scores[score_choice]
else:
score = Classification_socres[score_choice]
score_functions = ["f_classif","mutual_info_classif", "chi2","f_regression","mutual_info_regression","SelectPercentile","SelectFdr","SelectFwe","GenericUnivariateSelect"]
score_functions = [feature_selection.f_classif, feature_selection.mutual_info_classif, feature_selection.chi2, feature_selection.f_regression, feature_selection.mutual_info_regression, feature_selection.SelectPercentile, feature_selection.SelectPercentile, feature_selection.SelectFdr,feature_selection.SelectFwe, feature_selection.GenericUnivariateSelect]
if hints:
print(""" SelectKBest score function, choices
f_classif ===> 0,
mutual_info_classif ===> 1,
chi2 ===> 2,
f_regression ===> 3,
mutual_info_regression ===> 4
# There are some that I don't understand, so I don't put them in the list, but I will
SelectPercentile ===> 5,
SelectFdr ===> 6,
SelectFwe ===> 7,
GenericUnivariateSelect ===> 8
""")
print("""Regression Scores
explained_variance ===> 0,
max_error ===> 1,
neg_mean_absolute_error ===> 2,
neg_mean_squared_error ===> 3,
neg_root_mean_squared_error ===> 4,
neg_mean_squared_log_error ===> 5,
neg_median_absolute_error ===> 6,
r2 ===> 7
neg_mean_poisson_deviance ===> 8,
neg_mean_gamma_devian ===> 9
""")
print("""Classification Scores
accuracy ===> 0,
balanced_accuracy ===> 1,
top_k_accuracy ===> 2,
average_precision ===> 3,
neg_brier_score ===> 4,
f1 ===> 5,
f1_micro ===> 6,
f1_weighted ===> 7,
f1_samples ===> 8,
neg_log_loss ===> 9,
precision ===> 10,
recall ===> 11,
jaccard ===> 12,
roc_auc ===> 13,
roc_auc_ovr ===> 14,
roc_auc_ovr_weighted ===> 15
""")
print("""Clustering Scores
adjusted_mutual_info_score ===> 0,
adjusted_rand_score ===> 1,
completeness_score ===> 2,
fowlkes_mallows_score ===> 3,
homogeneity_score ===> 4,
mutual_info_score ===> 5,
normalized_mutual_info_score ===> 6,
rand_score ===> 7,
v_measure_score ===> 8
""")
cv = model_selection.RepeatedKFold(n_splits=10, n_repeats=3, random_state=64)
featureSelectioner = feature_selection.SelectKBest(score_func=score_functions[sf_choice])
pipeLine =pipeline.Pipeline(steps=[('sel',featureSelectioner), ('lr', model)])
grid = dict()
grid['sel__k'] = [i for i in range(X.shape[1]//10, X.shape[1]+1)]
search = model_selection.GridSearchCV(pipeLine, grid, scoring=score, n_jobs=-1, cv=cv)
results = search.fit(X, Y)
print('Best MAE: %.3f' % results.best_score_)
print('Best Config: %s' % results.best_params_, end="\n")
# summarize all
means = results.cv_results_['mean_test_score']
params = results.cv_results_['params']
for mean, param in zip(means, params):
print(">%.3f with: %r" % (mean, param))
# clf_model =linear_model.LogisticRegressionCV()
# reg_model = linear_model.LinearRegression()
# model_parameters_chooser(x, y, clf_model, hints=False,reg=False, sf_choice=0)
# model_parameters_chooser(X, Y, reg_model, hints=False,reg=True, sf_choice=0)
"""###Evaluate Model with K BestFeatures and Cross Validation"""
# automatically select the number of features for RFE
def clf_Evaluate_Model_with_K_BestFeatures(X, Y, reg=False, number_of_selected_features=1, score_choice=0):
if reg:
rfe = feature_selection.RFE(estimator=tree.DecisionTreeRegressor(), n_features_to_select=number_of_selected_features)
model = tree.DecisionTreeRegressor()
else:
rfe = feature_selection.RFE(estimator=tree.DecisionTreeRegressor(), n_features_to_select=number_of_selected_features)
rfe = feature_selection.RFECV(estimator=tree.DecisionTreeClassifier(), min_features_to_select=number_of_selected_features)
model = tree.DecisionTreeClassifier()
my_pipeline = pipeline.Pipeline(steps=[('s',rfe),('m',model)])
# automatically choose the number of features
cv = model_selection.RepeatedStratifiedKFold(n_splits=10, n_repeats=8, random_state=64)
n_scores = model_selection.cross_val_score(my_pipeline, X, Y, scoring="accuracy", cv=cv, n_jobs=-1, error_score='raise')
# report performance
print(n_scores)
print('Accuracy: %.3f (%.3f)' % (np.mean(n_scores), np.std(n_scores)))
## It doesn't work with Regression data
# Evaluate_Model_with_K_BestFeatures(X, Y, reg=True, number_of_selected_features=5)
# Evaluate_Model_with_K_BestFeatures(x, y, reg=False, number_of_selected_features=5)
"""##Wrapper Methods
####based on forward selection and backward elimination
###Forward Selection
Forward Selection starts with no features in the model and incrementally adds one feature to the feature subset at a time. During each iteration, the new feature is chosen based on the evaluation of the model trained by the feature subset.
"""
"""###1. Backward Elimination
####A good way to show the bad features (each attr near 1, is worse, so we will remove it)
Simply put, it is just the opposite of the forward selection, starting with including all features to train the model. Then, features are iteratively removed from the feature subset based on whether they contribute to the model performance.
"""
def backward_elimination(X, Y):
cols = list(X.columns)
pmax = 1
while (len(cols)>0):
model_pvalues_list = []
selected_X = X[cols]
#Adding constant column of ones, mandatory for sm.OLS model
selected_X = statsmodels.api.add_constant(selected_X)
model = statsmodels.api.OLS(Y, selected_X).fit()
print(f"The Model's PValues are: \n{model.pvalues}")
model_pvalues_list = pd.Series(model.pvalues.values[1:],index = cols)
pmax = max(model_pvalues_list)
feature_with_p_max = model_pvalues_list.idxmax()
if(pmax > 0.05):
cols.remove(feature_with_p_max)
else:
break
selected_features_BE = cols
# print(f"The Selected Features are: \n{selected_features_BE}")
return selected_features_BE
# backward_elimination(Xd, Y)
"""###2. Embedded Method"""
def embeded_method(X, Y, reg=True):
if reg:
model = linear_model.Lasso()
model.fit(X, Y)
model_alpha = model.alpha
else:
model = linear_model.LassoCV()
model.fit(X, Y)
model_alpha = model.alpha_
print("Best alpha using built-in Lasso(CV): %f" % model_alpha)
print("Best score using built-in Lasso(CV): %f" %model.score(X, Y))
coef = pd.Series(model.coef_, index = X.columns)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(sum(coef == 0)) + " variables")
imp_coef = coef.sort_values()
matplotlib.rcParams['figure.figsize'] = (8.0, 10.0)
imp_coef.plot(kind = "barh")
plt.title("Feature importance using Lasso Model")
# embeded_method(xd, y, False)
# embeded_method(Xd, Y)
| [
"sklearn.model_selection.GridSearchCV",
"sklearn.feature_selection.VarianceThreshold",
"sklearn.tree.DecisionTreeRegressor",
"matplotlib.pyplot.ylabel",
"sklearn.linear_model.Lasso",
"sklearn.ensemble.ExtraTreesRegressor",
"numpy.argsort",
"sklearn.feature_selection.SelectKBest",
"statsmodels.api.OL... | [((1362, 1402), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1386, 1402), False, 'from sklearn import metrics\n'), ((1405, 1518), 'seaborn.heatmap', 'sns.heatmap', (['cm_knn'], {'square': '(True)', 'annot': '(True)', 'cbar': '(False)', 'xticklabels': 'names', 'yticklabels': 'names', 'cmap': '"""RdYlGn"""'}), "(cm_knn, square=True, annot=True, cbar=False, xticklabels=names,\n yticklabels=names, cmap='RdYlGn')\n", (1416, 1518), True, 'import seaborn as sns\n'), ((1517, 1537), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Actual"""'], {}), "('Actual')\n", (1527, 1537), True, 'import matplotlib.pyplot as plt\n'), ((1540, 1563), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Predicted"""'], {}), "('Predicted')\n", (1550, 1563), True, 'import matplotlib.pyplot as plt\n'), ((1566, 1576), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1574, 1576), True, 'import matplotlib.pyplot as plt\n'), ((2509, 2538), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(11, 9)'}), '(figsize=(11, 9))\n', (2521, 2538), True, 'import matplotlib.pyplot as plt\n'), ((2638, 2759), 'seaborn.heatmap', 'sns.heatmap', (['corr'], {'mask': 'mask', 'cmap': '"""RdYlGn"""', 'annot': '(True)', 'square': '(True)', 'linewidths': '(0.5)', 'cbar_kws': "{'shrink': 0.5}", 'ax': 'ax'}), "(corr, mask=mask, cmap='RdYlGn', annot=True, square=True,\n linewidths=0.5, cbar_kws={'shrink': 0.5}, ax=ax)\n", (2649, 2759), True, 'import seaborn as sns\n'), ((2939, 2969), 'sklearn.ensemble.ExtraTreesRegressor', 'ensemble.ExtraTreesRegressor', ([], {}), '()\n', (2967, 2969), False, 'from sklearn import ensemble\n'), ((2990, 3013), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (3003, 3013), True, 'import matplotlib.pyplot as plt\n'), ((3016, 3048), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(size, size)'}), '(figsize=(size, size))\n', (3026, 3048), True, 'import matplotlib.pyplot as plt\n'), ((3069, 3123), 'pandas.Series', 'pd.Series', (['model.feature_importances_'], {'index': 'X.columns'}), '(model.feature_importances_, index=X.columns)\n', (3078, 3123), True, 'import pandas as pd\n'), ((3176, 3186), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3184, 3186), True, 'import matplotlib.pyplot as plt\n'), ((3576, 3590), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3588, 3590), True, 'import pandas as pd\n'), ((4355, 4389), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['Yte', 'predictions'], {}), '(Yte, predictions)\n', (4371, 4389), False, 'from sklearn import metrics\n'), ((4500, 4523), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (4510, 4523), True, 'import numpy as np\n'), ((4541, 4561), 'numpy.sort', 'np.sort', (['importances'], {}), '(importances)\n', (4548, 4561), True, 'import numpy as np\n'), ((4566, 4592), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (4576, 4592), True, 'import matplotlib.pyplot as plt\n'), ((4597, 4629), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature Importances"""'], {}), "('Feature Importances')\n", (4606, 4629), True, 'import matplotlib.pyplot as plt\n'), ((4786, 4819), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Relative Importance"""'], {}), "('Relative Importance')\n", (4796, 4819), True, 'import matplotlib.pyplot as plt\n'), ((4824, 4834), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4832, 4834), True, 'import matplotlib.pyplot as plt\n'), ((6876, 6913), 'sklearn.feature_selection.VarianceThreshold', 'feature_selection.VarianceThreshold', ([], {}), '()\n', (6911, 6913), False, 'from sklearn import feature_selection\n'), ((7205, 7242), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['X'], {'norm': '"""l2"""'}), "(X, norm='l2')\n", (7228, 7242), False, 'from sklearn import preprocessing\n'), ((7257, 7281), 'sklearn.svm.SVR', 'svm.SVR', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (7264, 7281), False, 'from sklearn import svm\n'), ((7295, 7343), 'sklearn.feature_selection.RFECV', 'feature_selection.RFECV', (['estimator'], {'step': '(1)', 'cv': '(2)'}), '(estimator, step=1, cv=2)\n', (7318, 7343), False, 'from sklearn import feature_selection\n'), ((9055, 9132), 'sklearn.feature_selection.SelectKBest', 'feature_selection.SelectKBest', ([], {'score_func': 'score_functions[sf_choice]', 'k': '"""all"""'}), "(score_func=score_functions[sf_choice], k='all')\n", (9084, 9132), False, 'from sklearn import feature_selection\n'), ((9486, 9496), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9494, 9496), True, 'import matplotlib.pyplot as plt\n'), ((12790, 12862), 'sklearn.model_selection.RepeatedKFold', 'model_selection.RepeatedKFold', ([], {'n_splits': '(10)', 'n_repeats': '(3)', 'random_state': '(64)'}), '(n_splits=10, n_repeats=3, random_state=64)\n', (12819, 12862), False, 'from sklearn import model_selection\n'), ((12886, 12954), 'sklearn.feature_selection.SelectKBest', 'feature_selection.SelectKBest', ([], {'score_func': 'score_functions[sf_choice]'}), '(score_func=score_functions[sf_choice])\n', (12915, 12954), False, 'from sklearn import feature_selection\n'), ((12967, 13036), 'sklearn.pipeline.Pipeline', 'pipeline.Pipeline', ([], {'steps': "[('sel', featureSelectioner), ('lr', model)]"}), "(steps=[('sel', featureSelectioner), ('lr', model)])\n", (12984, 13036), False, 'from sklearn import pipeline\n'), ((13131, 13208), 'sklearn.model_selection.GridSearchCV', 'model_selection.GridSearchCV', (['pipeLine', 'grid'], {'scoring': 'score', 'n_jobs': '(-1)', 'cv': 'cv'}), '(pipeLine, grid, scoring=score, n_jobs=-1, cv=cv)\n', (13159, 13208), False, 'from sklearn import model_selection\n'), ((14518, 14569), 'sklearn.pipeline.Pipeline', 'pipeline.Pipeline', ([], {'steps': "[('s', rfe), ('m', model)]"}), "(steps=[('s', rfe), ('m', model)])\n", (14535, 14569), False, 'from sklearn import pipeline\n'), ((14623, 14709), 'sklearn.model_selection.RepeatedStratifiedKFold', 'model_selection.RepeatedStratifiedKFold', ([], {'n_splits': '(10)', 'n_repeats': '(8)', 'random_state': '(64)'}), '(n_splits=10, n_repeats=8,\n random_state=64)\n', (14662, 14709), False, 'from sklearn import model_selection\n'), ((14720, 14834), 'sklearn.model_selection.cross_val_score', 'model_selection.cross_val_score', (['my_pipeline', 'X', 'Y'], {'scoring': '"""accuracy"""', 'cv': 'cv', 'n_jobs': '(-1)', 'error_score': '"""raise"""'}), "(my_pipeline, X, Y, scoring='accuracy', cv=\n cv, n_jobs=-1, error_score='raise')\n", (14751, 14834), False, 'from sklearn import model_selection\n'), ((17078, 17117), 'pandas.Series', 'pd.Series', (['model.coef_'], {'index': 'X.columns'}), '(model.coef_, index=X.columns)\n', (17087, 17117), True, 'import pandas as pd\n'), ((17365, 17414), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature importance using Lasso Model"""'], {}), "('Feature importance using Lasso Model')\n", (17374, 17414), True, 'import matplotlib.pyplot as plt\n'), ((1838, 1872), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(sizes, sizes)'}), '(figsize=(sizes, sizes))\n', (1848, 1872), True, 'import matplotlib.pyplot as plt\n'), ((1877, 1944), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'Y', 'y': 'X[X.columns[i]]', 'palette': '"""RdYlGn"""', 'saturation': '(1)'}), "(x=Y, y=X[X.columns[i]], palette='RdYlGn', saturation=1)\n", (1888, 1944), True, 'import seaborn as sns\n'), ((2187, 2221), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(sizes, sizes)'}), '(figsize=(sizes, sizes))\n', (2197, 2221), True, 'import matplotlib.pyplot as plt\n'), ((2226, 2297), 'seaborn.countplot', 'sns.countplot', ([], {'x': 'X[X.columns[i]]', 'hue': 'Y', 'palette': '"""RdYlGn"""', 'saturation': '(1)'}), "(x=X[X.columns[i]], hue=Y, palette='RdYlGn', saturation=1)\n", (2239, 2297), True, 'import seaborn as sns\n'), ((3653, 3694), 'statsmodels.stats.outliers_influence.variance_inflation_factor', 'variance_inflation_factor', (['Data.values', 'i'], {}), '(Data.values, i)\n', (3678, 3694), False, 'from statsmodels.stats.outliers_influence import variance_inflation_factor\n'), ((4053, 4075), 'xgboost.XGBRegressor', 'xgboost.XGBRegressor', ([], {}), '()\n', (4073, 4075), False, 'import xgboost\n'), ((4204, 4227), 'xgboost.XGBClassifier', 'xgboost.XGBClassifier', ([], {}), '()\n', (4225, 4227), False, 'import xgboost\n'), ((4924, 4995), 'sklearn.feature_selection.SelectFromModel', 'feature_selection.SelectFromModel', (['model'], {'threshold': 'thresh', 'prefit': '(True)'}), '(model, threshold=thresh, prefit=True)\n', (4957, 4995), False, 'from sklearn import feature_selection\n'), ((5597, 5631), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['Yte', 'predictions'], {}), '(Yte, predictions)\n', (5613, 5631), False, 'from sklearn import metrics\n'), ((14173, 14201), 'sklearn.tree.DecisionTreeRegressor', 'tree.DecisionTreeRegressor', ([], {}), '()\n', (14199, 14201), False, 'from sklearn import tree\n'), ((14471, 14500), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (14498, 14500), False, 'from sklearn import tree\n'), ((16106, 16146), 'statsmodels.api.add_constant', 'statsmodels.api.add_constant', (['selected_X'], {}), '(selected_X)\n', (16134, 16146), False, 'import statsmodels\n'), ((16289, 16336), 'pandas.Series', 'pd.Series', (['model.pvalues.values[1:]'], {'index': 'cols'}), '(model.pvalues.values[1:], index=cols)\n', (16298, 16336), True, 'import pandas as pd\n'), ((16768, 16788), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', ([], {}), '()\n', (16786, 16788), False, 'from sklearn import linear_model\n'), ((16859, 16881), 'sklearn.linear_model.LassoCV', 'linear_model.LassoCV', ([], {}), '()\n', (16879, 16881), False, 'from sklearn import linear_model\n'), ((2600, 2633), 'numpy.ones_like', 'np.ones_like', (['corr'], {'dtype': 'np.bool'}), '(corr, dtype=np.bool)\n', (2612, 2633), True, 'import numpy as np\n'), ((5083, 5105), 'xgboost.XGBRegressor', 'xgboost.XGBRegressor', ([], {}), '()\n', (5103, 5105), False, 'import xgboost\n'), ((5346, 5369), 'xgboost.XGBClassifier', 'xgboost.XGBClassifier', ([], {}), '()\n', (5367, 5369), False, 'import xgboost\n'), ((14081, 14109), 'sklearn.tree.DecisionTreeRegressor', 'tree.DecisionTreeRegressor', ([], {}), '()\n', (14107, 14109), False, 'from sklearn import tree\n'), ((14252, 14280), 'sklearn.tree.DecisionTreeRegressor', 'tree.DecisionTreeRegressor', ([], {}), '()\n', (14278, 14280), False, 'from sklearn import tree\n'), ((14376, 14405), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (14403, 14405), False, 'from sklearn import tree\n'), ((14907, 14924), 'numpy.mean', 'np.mean', (['n_scores'], {}), '(n_scores)\n', (14914, 14924), True, 'import numpy as np\n'), ((14926, 14942), 'numpy.std', 'np.std', (['n_scores'], {}), '(n_scores)\n', (14932, 14942), True, 'import numpy as np\n'), ((16161, 16195), 'statsmodels.api.OLS', 'statsmodels.api.OLS', (['Y', 'selected_X'], {}), '(Y, selected_X)\n', (16180, 16195), False, 'import statsmodels\n')] |
from numpy import power, array, random
def trilateration(P, R):
if len(R) < 3: return None
# To avoid division by 0 rotate anchors and measurements
if abs(P[1,0] - P[0,0]) < 1e-5:
P[[2,0,1]] = P[[0,1,2]]
R[2], R[0], R[1] = R[0], R[1], R[2]
ps = power(P[2, 0], 2) - power(P[1, 0], 2) + \
power(P[2, 1], 2) - power(P[1, 1], 2)
pt = power(P[0, 0], 2) - power(P[1, 0], 2) + \
power(P[0, 1], 2) - power(P[1, 1], 2)
bs = (P[1, 0] - P[2, 0]) * (P[0, 1] - P[1, 1]) - \
(P[2, 1] - P[1, 1]) * (P[1, 0] - P[0, 0])
s = (ps + power(R[1], 2) - power(R[2], 2)) / 2.0
t = (pt + power(R[1], 2) - power(R[0], 2)) / 2.0
if bs == 0:
return None
try:
y = (t * (P[1, 0] - P[2, 0]) - s * (P[1, 0] - P[0, 0])) / bs
except:
return None
x = (y * (P[0, 1] - P[1, 1]) - t) / (P[1, 0] - P[0, 0])
return [x, y]
| [
"numpy.power"
] | [((349, 366), 'numpy.power', 'power', (['P[1, 1]', '(2)'], {}), '(P[1, 1], 2)\n', (354, 366), False, 'from numpy import power, array, random\n'), ((447, 464), 'numpy.power', 'power', (['P[1, 1]', '(2)'], {}), '(P[1, 1], 2)\n', (452, 464), False, 'from numpy import power, array, random\n'), ((329, 346), 'numpy.power', 'power', (['P[2, 1]', '(2)'], {}), '(P[2, 1], 2)\n', (334, 346), False, 'from numpy import power, array, random\n'), ((427, 444), 'numpy.power', 'power', (['P[0, 1]', '(2)'], {}), '(P[0, 1], 2)\n', (432, 444), False, 'from numpy import power, array, random\n'), ((602, 616), 'numpy.power', 'power', (['R[2]', '(2)'], {}), '(R[2], 2)\n', (607, 616), False, 'from numpy import power, array, random\n'), ((655, 669), 'numpy.power', 'power', (['R[0]', '(2)'], {}), '(R[0], 2)\n', (660, 669), False, 'from numpy import power, array, random\n'), ((278, 295), 'numpy.power', 'power', (['P[2, 0]', '(2)'], {}), '(P[2, 0], 2)\n', (283, 295), False, 'from numpy import power, array, random\n'), ((298, 315), 'numpy.power', 'power', (['P[1, 0]', '(2)'], {}), '(P[1, 0], 2)\n', (303, 315), False, 'from numpy import power, array, random\n'), ((376, 393), 'numpy.power', 'power', (['P[0, 0]', '(2)'], {}), '(P[0, 0], 2)\n', (381, 393), False, 'from numpy import power, array, random\n'), ((396, 413), 'numpy.power', 'power', (['P[1, 0]', '(2)'], {}), '(P[1, 0], 2)\n', (401, 413), False, 'from numpy import power, array, random\n'), ((585, 599), 'numpy.power', 'power', (['R[1]', '(2)'], {}), '(R[1], 2)\n', (590, 599), False, 'from numpy import power, array, random\n'), ((638, 652), 'numpy.power', 'power', (['R[1]', '(2)'], {}), '(R[1], 2)\n', (643, 652), False, 'from numpy import power, array, random\n')] |
'''
Este script tem por intenção testar a performance do modulo
DS_Method que desenvolvido para o meu trabalho de conclusão de
curso.
Aqui é montada uma treliça de 10 barras.
'''
import numpy as np
import DS_Method as dsm
def test():
# coordenadas dos nós em mm
nos = np.array([[0,0], [0, 9.144], [9.144, 0],[9.144,9.144],[18.288,0],[18.288,9.144]])
barras = [(0,2),(0,3),(1,2),(1,3),(3,2),(3,4),(5,2),(3,5),(2,4),(4,5)]
# area das barras
areas = [50 for i in range(len(barras))]
# momento de inercia
m = [6895e4 for i in range(len(barras))]
# carregamentos em
load = [[5, -444.82], [9, -444.82]]
# nós com restrição
drest = [0,1,2,3]
####### Inicio dos calculos do modulo
# Calular as matrizes de rigidez
mrl, mrg = dsm.stiff_matrices(barras, nos, areas, m)
# Calcula o deslocamento dos nós
des = dsm.desloc(drest,barras, nos, areas, m, load)
# Calcula a forca por grau de liberadade da barra
bf = dsm.bar_force(drest, barras, nos, areas, m, load)
# Calcula as reacoes dos suportes
sr = dsm.support_reac(drest, barras, nos, areas, m, load)
# Calculo das forcas por barra
forca = dsm.scalar_bar_force(drest,barras, nos, areas, m, load)
if __name__ == '__main__':
test()
| [
"DS_Method.scalar_bar_force",
"DS_Method.bar_force",
"numpy.array",
"DS_Method.desloc",
"DS_Method.stiff_matrices",
"DS_Method.support_reac"
] | [((275, 368), 'numpy.array', 'np.array', (['[[0, 0], [0, 9.144], [9.144, 0], [9.144, 9.144], [18.288, 0], [18.288, 9.144]]'], {}), '([[0, 0], [0, 9.144], [9.144, 0], [9.144, 9.144], [18.288, 0], [\n 18.288, 9.144]])\n', (283, 368), True, 'import numpy as np\n'), ((737, 778), 'DS_Method.stiff_matrices', 'dsm.stiff_matrices', (['barras', 'nos', 'areas', 'm'], {}), '(barras, nos, areas, m)\n', (755, 778), True, 'import DS_Method as dsm\n'), ((820, 866), 'DS_Method.desloc', 'dsm.desloc', (['drest', 'barras', 'nos', 'areas', 'm', 'load'], {}), '(drest, barras, nos, areas, m, load)\n', (830, 866), True, 'import DS_Method as dsm\n'), ((923, 972), 'DS_Method.bar_force', 'dsm.bar_force', (['drest', 'barras', 'nos', 'areas', 'm', 'load'], {}), '(drest, barras, nos, areas, m, load)\n', (936, 972), True, 'import DS_Method as dsm\n'), ((1014, 1066), 'DS_Method.support_reac', 'dsm.support_reac', (['drest', 'barras', 'nos', 'areas', 'm', 'load'], {}), '(drest, barras, nos, areas, m, load)\n', (1030, 1066), True, 'import DS_Method as dsm\n'), ((1108, 1164), 'DS_Method.scalar_bar_force', 'dsm.scalar_bar_force', (['drest', 'barras', 'nos', 'areas', 'm', 'load'], {}), '(drest, barras, nos, areas, m, load)\n', (1128, 1164), True, 'import DS_Method as dsm\n')] |
import numpy as np
import matplotlib.pyplot as plt
class BasicTG:
def __init__(self):
self.num_pts = 200
self.total_time = 1.0
self.current_index = 0
self.x = np.zeros(self.num_pts)
self.y = np.zeros(self.num_pts)
# Parameterize the eight shape path
self.a_x_start = 2.0
self.a_y_start = 2.0
self.a_x = self.a_x_start
self.a_y = self.a_y_start
self.a_x_history = np.zeros(self.num_pts)
self.a_y_history = np.zeros(self.num_pts)
for i in range(self.num_pts):
next_time = (float(i)/self.num_pts) * self.total_time
next_x = self.a_x * np.sin(2 * np.pi * next_time)
next_y = (self.a_y/2.0) * (np.sin(2 * np.pi * next_time) * np.cos(2 * np.pi * next_time))
self.x[i] = next_x
self.y[i] = next_y
self.degrade_path()
self.a_x_history[i] = self.a_x
self.a_y_history[i] = self.a_y
def compute_tg_at_index(self, time, a_x, a_y):
time = (float(time)/self.num_pts) * self.total_time
next_x = a_x * np.sin(2 * np.pi * time)
next_y = (a_y/2.0) * (np.sin(2 * np.pi * time) * np.cos(2 * np.pi * time))
return next_x, next_y
def degrade_path(self):
'''Add some deformation to the infinity fig
'''
self.a_x = self.a_x - (2 * self.a_x / self.num_pts)/10.0
self.a_y = self.a_y - (self.a_y / (2 * self.num_pts))/10.0
def view_plot(self):
plt.scatter(self.x[0:50], self.y[0:50])
plt.show()
def reset(self):
self.current_index = 0
self.a_x = self.a_x_start
self.a_y = self.a_y_start
return (self.current_index, self.x[self.current_index],
self.y[self.current_index], self.a_x, self.a_y)
def reward(self, x, y):
optimal_x, optimal_y = (self.x[self.current_index],
self.y[self.current_index])
return -np.sqrt(((x - optimal_x)**2) + ((y - optimal_y)**2))
def step(self, new_a_x, new_a_y, nn_x, nn_y):
# need to recompute the TG's at index value
x_tg, y_tg = self.compute_tg_at_index(self.current_index,
new_a_x, new_a_y)
self.current_index = self.current_index + 1
# [time, agent's_x, agent's_y, TG's a_x, TG's a_y]
return (self.current_index, (x_tg + nn_x), (y_tg + nn_y),
new_a_x, new_a_y)
def is_done(self):
if (self.current_index >= self.num_pts - 1):
return True
return False
| [
"numpy.sqrt",
"numpy.zeros",
"numpy.cos",
"matplotlib.pyplot.scatter",
"numpy.sin",
"matplotlib.pyplot.show"
] | [((199, 221), 'numpy.zeros', 'np.zeros', (['self.num_pts'], {}), '(self.num_pts)\n', (207, 221), True, 'import numpy as np\n'), ((239, 261), 'numpy.zeros', 'np.zeros', (['self.num_pts'], {}), '(self.num_pts)\n', (247, 261), True, 'import numpy as np\n'), ((462, 484), 'numpy.zeros', 'np.zeros', (['self.num_pts'], {}), '(self.num_pts)\n', (470, 484), True, 'import numpy as np\n'), ((512, 534), 'numpy.zeros', 'np.zeros', (['self.num_pts'], {}), '(self.num_pts)\n', (520, 534), True, 'import numpy as np\n'), ((1520, 1559), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.x[0:50]', 'self.y[0:50]'], {}), '(self.x[0:50], self.y[0:50])\n', (1531, 1559), True, 'import matplotlib.pyplot as plt\n'), ((1568, 1578), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1576, 1578), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1146), 'numpy.sin', 'np.sin', (['(2 * np.pi * time)'], {}), '(2 * np.pi * time)\n', (1128, 1146), True, 'import numpy as np\n'), ((1996, 2048), 'numpy.sqrt', 'np.sqrt', (['((x - optimal_x) ** 2 + (y - optimal_y) ** 2)'], {}), '((x - optimal_x) ** 2 + (y - optimal_y) ** 2)\n', (2003, 2048), True, 'import numpy as np\n'), ((672, 701), 'numpy.sin', 'np.sin', (['(2 * np.pi * next_time)'], {}), '(2 * np.pi * next_time)\n', (678, 701), True, 'import numpy as np\n'), ((1177, 1201), 'numpy.sin', 'np.sin', (['(2 * np.pi * time)'], {}), '(2 * np.pi * time)\n', (1183, 1201), True, 'import numpy as np\n'), ((1204, 1228), 'numpy.cos', 'np.cos', (['(2 * np.pi * time)'], {}), '(2 * np.pi * time)\n', (1210, 1228), True, 'import numpy as np\n'), ((741, 770), 'numpy.sin', 'np.sin', (['(2 * np.pi * next_time)'], {}), '(2 * np.pi * next_time)\n', (747, 770), True, 'import numpy as np\n'), ((773, 802), 'numpy.cos', 'np.cos', (['(2 * np.pi * next_time)'], {}), '(2 * np.pi * next_time)\n', (779, 802), True, 'import numpy as np\n')] |
'''
This is the main class which, given an input file (.nc, .tar.gz or .zip), it creates the xarray dataset.
PAY ATTENTION:
- The file MUST be netcdf file, it can be compressed into zip or tar.gz but it has to be netcdf inside!!
- File cannot contain ".tar.gz" or ".zip" inside the name, jsut at the end
'''
# importing required modules
from zipfile import ZipFile
import tarfile
import xarray as xr
import numpy as np
class CustomDataset:
def __init__(self, filename):
self.filename = filename
self.decompress()
def decompress(self):
if self.filename.endswith("tar.gz"):
tar = tarfile.open(fname, "r:gz")
print('Extracting all the files now...')
tar.extractall()
tar.close()
print('Done!')
self.filename = self.filename.replace("tar.gz", "nc")
elif self.filename.endswith(".zip"):
# opening the zip file in READ mode
with ZipFile(file_name, 'r') as zip:
# extracting all the files
print('Extracting all the files now...')
zip.extractall()
print('Done!')
self.filename = self.filename.replace("zip", "nc")
self.open_dataset()
def set_filename(self, filename):
self.filename = filename
def open_dataset(self):
print("Opening dataset at : ", self.filename)
self.dataset = xr.open_dataset(self.filename)
if ('lat' in self.dataset.variables) and ('lon' in self.dataset.variables):
self.dataset = self.dataset.rename({'lat':'latitude', 'lon':'longitude'})
print("Done!")
def cut_region (self, lat_n, lat_s, lon_e, lon_w, res = 0.25):
new_lat_values = np.arange(lat_s, lat_n + res, res)
new_long_values = np.arange(lon_e, lon_w + res, res)
self.dataset = self.dataset.interp(latitude = new_lat_values, longitude = new_long_values, method = 'linear')
'''self.dataset = self.dataset.where(self.dataset.latitude <= lat_n, drop = True)
self.dataset = self.dataset.where(self.dataset.latitude >= lat_s, drop = True)
self.dataset = self.dataset.where(self.dataset.longitude >= lon_e, drop = True)
self.dataset = self.dataset.where(self.dataset.longitude <= lon_w, drop = True)'''
def rescale(self, target_res = 0.25, method = 'nearest'):
assert ('latitude' in self.dataset.variables),"latitude column missing (name must be 'latitude')"
assert ('longitude' in self.dataset.variables),"longitude column missing (name must be 'longitude')"
#da = data.to_array()
lats = self.dataset.latitude.values
longs = self.dataset.longitude.values
lats.sort()
longs.sort()
#lats_res = round(abs(lats[1] - lats[0]), 2) # supposing at least 2 values and max_precision = 2 decimals and res_lat = res_long
#lat_interval = abs(round(lats[-1],2) - round(lats[0], 2))
#long_interval = abs(round(longs[-1],2) - round(longs[0], 2))
lat_interval = np.float32(lats[-1] - lats[0])
long_interval = np.float32(longs[-1] - longs[0])
#print("lat interval is ",lat_interval)
#print("long interval is --> " , long_interval)
lat_new_squares = lat_interval // target_res
#print(lat_new_squares)
long_new_squares = long_interval // target_res
#print(long_new_squares)
new_lat_values= np.around(np.arange(0, lat_new_squares +1 , 1) * target_res + round(lats[0], 2), decimals=2)
#print("New latitude values are -> ", new_lat_values)
new_long_values= np.around(np.arange(0, long_new_squares +1 , 1) * target_res + round(longs[0], 2), decimals=2)
#print("New longitude values are -> ", new_long_values)
#da = da.sortby(['latitude','longitude','time'])
#df_temp = data.interp(latitude = new_lat_values, longitude = new_long_values, method = method)
#df= df_temp.interp(longitude = new_long_values, method = method)
self.dataset = self.dataset.interp(latitude = new_lat_values, longitude = new_long_values, method = method)
def get_dataset(self):
return self.dataset
def rename_var(self, new_dict):
self.dataset = self.dataset.rename(new_dict)
def resample(self, t):
self.dataset = self.dataset.sortby('time', ascending=True)
self.dataset = self.dataset.sortby('latitude', ascending=True)
self.dataset = self.dataset.sortby('longitude', ascending=True)
self.dataset = self.dataset.resample(time=t).interpolate("linear")
| [
"tarfile.open",
"zipfile.ZipFile",
"xarray.open_dataset",
"numpy.float32",
"numpy.arange"
] | [((1356, 1386), 'xarray.open_dataset', 'xr.open_dataset', (['self.filename'], {}), '(self.filename)\n', (1371, 1386), True, 'import xarray as xr\n'), ((1659, 1693), 'numpy.arange', 'np.arange', (['lat_s', '(lat_n + res)', 'res'], {}), '(lat_s, lat_n + res, res)\n', (1668, 1693), True, 'import numpy as np\n'), ((1717, 1751), 'numpy.arange', 'np.arange', (['lon_e', '(lon_w + res)', 'res'], {}), '(lon_e, lon_w + res, res)\n', (1726, 1751), True, 'import numpy as np\n'), ((2933, 2963), 'numpy.float32', 'np.float32', (['(lats[-1] - lats[0])'], {}), '(lats[-1] - lats[0])\n', (2943, 2963), True, 'import numpy as np\n'), ((2985, 3017), 'numpy.float32', 'np.float32', (['(longs[-1] - longs[0])'], {}), '(longs[-1] - longs[0])\n', (2995, 3017), True, 'import numpy as np\n'), ((633, 660), 'tarfile.open', 'tarfile.open', (['fname', '"""r:gz"""'], {}), "(fname, 'r:gz')\n", (645, 660), False, 'import tarfile\n'), ((932, 955), 'zipfile.ZipFile', 'ZipFile', (['file_name', '"""r"""'], {}), "(file_name, 'r')\n", (939, 955), False, 'from zipfile import ZipFile\n'), ((3308, 3344), 'numpy.arange', 'np.arange', (['(0)', '(lat_new_squares + 1)', '(1)'], {}), '(0, lat_new_squares + 1, 1)\n', (3317, 3344), True, 'import numpy as np\n'), ((3482, 3519), 'numpy.arange', 'np.arange', (['(0)', '(long_new_squares + 1)', '(1)'], {}), '(0, long_new_squares + 1, 1)\n', (3491, 3519), True, 'import numpy as np\n')] |
# Copyright (c) 2017-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from decimal import Decimal
import numpy as np
class ReaderMock(object):
"""Reads a unischema based mock dataset."""
def __init__(self, schema, schema_data_generator, ngram=None):
"""Initializes a reader object.
:param schema: unischema instance
:param schema_data_generator: A function that takes names of fields in unischema and returns the actual
values that complies with the schema.
"""
self.schema = schema
self.schema_data_generator = schema_data_generator
if ngram is not None:
raise ValueError('Sequence argument not supported for ReaderMock')
self.ngram = ngram
def fetch(self):
"""
Generates the mock dataset based on the schema.
:return: named tuple data according to schema.
"""
fields_as_dict = self.schema_data_generator(self.schema)
return self.schema.make_namedtuple(**fields_as_dict)
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
return self.fetch()
# Functions needed to treat reader as a context manager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def stop(self):
pass
def join(self):
pass
def schema_data_generator_example(schema):
"""
Generates dummy data for a given schema.
:param schema: unischema instance
:return: A dictionary of schema dummy values.
"""
fields_as_dict = {}
for field in schema.fields.values():
if field.numpy_dtype is Decimal:
fields_as_dict[field.name] = Decimal('0.0')
else:
field_shape = tuple([10 if dim is None else dim for dim in field.shape])
if field.numpy_dtype == np.string_:
if field_shape == ():
default_val = 'default'
else:
default_val = ['default'] * field_shape[0]
fields_as_dict[field.name] = np.array(default_val, dtype=field.numpy_dtype)
else:
fields_as_dict[field.name] = np.zeros(field_shape, dtype=field.numpy_dtype)
return fields_as_dict
| [
"numpy.array",
"numpy.zeros",
"decimal.Decimal"
] | [((2289, 2303), 'decimal.Decimal', 'Decimal', (['"""0.0"""'], {}), "('0.0')\n", (2296, 2303), False, 'from decimal import Decimal\n'), ((2663, 2709), 'numpy.array', 'np.array', (['default_val'], {'dtype': 'field.numpy_dtype'}), '(default_val, dtype=field.numpy_dtype)\n', (2671, 2709), True, 'import numpy as np\n'), ((2773, 2819), 'numpy.zeros', 'np.zeros', (['field_shape'], {'dtype': 'field.numpy_dtype'}), '(field_shape, dtype=field.numpy_dtype)\n', (2781, 2819), True, 'import numpy as np\n')] |
import argparse
import pickle
import dnnlib
import torch
import json
from glob import glob
import os
import numpy as np
import PIL.Image
from tqdm import tqdm
from utils.visualization import get_palette
from utils.data_util import cgpart_car_simplify_v0 as trans_mask
#----------------------------------------------------------------------------
def wide_crop_tensor(x):
B, C, H, W = x.shape
CH = int(H * 3 // 4)
return x[:, :, (H - CH) // 2 : (H + CH) // 2]
#----------------------------------------------------------------------------
def main(args):
# Setup
device = torch.device(f'cuda')
network_path = os.path.dirname(args.network_pth)
palette = get_palette(args.palette_name)
# Load generator & annotator
generator_file = os.path.join(network_path, "generator.pkl")
with open(generator_file, 'rb') as f:
G_kwargs = pickle.load(f).pop('G_kwargs')
print(G_kwargs)
G = dnnlib.util.construct_class_by_name(**G_kwargs).eval().requires_grad_(False).to(device)
with open(args.network_pth, 'rb') as f:
A = pickle.load(f)['A'].eval().requires_grad_(False).to(device)
# Visualize labeled data
outlabel_dir = os.path.join(args.outdir, 'labeled_data')
os.makedirs(outlabel_dir, exist_ok=True)
indices = [os.path.relpath(x, os.path.join(args.label_path, 'image'))
for x in sorted(glob(os.path.join(args.label_path, 'image', '*/*.png')))]
np.random.shuffle(indices)
indices = indices[:args.num_vis]
print('Saving labeled images')
for idx, index in enumerate(tqdm(indices)):
img = PIL.Image.open(os.path.join(args.label_path, 'image', index))
model_id, base_name = index.split('/')
seg = PIL.Image.open(os.path.join(args.label_path, 'seg', model_id, model_id + base_name[6:]))
if img.mode == 'RGBA':
img = img.convert('RGB')
assert seg.mode == 'P'
seg = trans_mask(np.asarray(seg).copy())
seg = PIL.Image.fromarray(seg, mode='P')
seg.putpalette(palette)
seg = seg.convert('RGB')
img = np.concatenate([np.asarray(img), np.asarray(seg)], axis=0)
img = PIL.Image.fromarray(img, 'RGB')
img_fname = os.path.join(outlabel_dir, f'{idx:04d}.png')
img.save(img_fname, format='png', compress_level=0, optimize=False)
# Generate and visualize
outgen_dir = os.path.join(args.outdir, 'generated_data')
os.makedirs(outgen_dir, exist_ok=True)
for idx in tqdm(range(args.num_vis)):
# Fetch data
with torch.no_grad():
z = torch.randn(1, G.z_dim, device=device)
c = torch.zeros(1, G.c_dim, device=device)
img, features = G(z, c)
if args.wide_crop:
img = wide_crop_tensor(img)
img = img[0].permute(1, 2, 0) # (H, W, C)
seg = A(features) # (B, C, H, W)
if args.wide_crop:
seg = wide_crop_tensor(seg)
_, seg = torch.max(seg, dim=1) # (nA, H, W)
# Save the visualization
img_fname = os.path.join(outgen_dir, f'{idx:04d}.png')
img = (img * 127.5 + 128).clamp(0, 255).to(dtype=torch.uint8, device='cpu').numpy()
seg = PIL.Image.fromarray(seg[0].to(dtype=torch.uint8, device='cpu').numpy(), 'P')
seg.putpalette(palette)
seg = np.asarray(seg.convert('RGB'))
img = np.concatenate([img, seg], axis=0)
img = PIL.Image.fromarray(img, 'RGB')
img.save(img_fname, format='png', compress_level=0, optimize=False)
#----------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--network_pth', help='The model file to be resumed', type=str)
parser.add_argument('--label_path', help='The path to labeled data', type=str)
parser.add_argument('--num_vis', help='The number of checkpoints to be visualized', type=int, default=200)
parser.add_argument('--palette_name', help='The palette name for visualization', type=str)
parser.add_argument('--outdir', help='The output directory to save the visualization results', type=str, default='./output/paper_plots/cross_domain_demo')
parser.add_argument('--wide-crop', help='Whether to crop the generated images/segmentation into wide size', action='store_true')
args = parser.parse_args()
main(args)
| [
"numpy.random.shuffle",
"os.makedirs",
"utils.visualization.get_palette",
"argparse.ArgumentParser",
"tqdm.tqdm",
"os.path.join",
"torch.max",
"pickle.load",
"numpy.asarray",
"os.path.dirname",
"torch.zeros",
"numpy.concatenate",
"dnnlib.util.construct_class_by_name",
"torch.no_grad",
"t... | [((618, 639), 'torch.device', 'torch.device', (['f"""cuda"""'], {}), "(f'cuda')\n", (630, 639), False, 'import torch\n'), ((660, 693), 'os.path.dirname', 'os.path.dirname', (['args.network_pth'], {}), '(args.network_pth)\n', (675, 693), False, 'import os\n'), ((709, 739), 'utils.visualization.get_palette', 'get_palette', (['args.palette_name'], {}), '(args.palette_name)\n', (720, 739), False, 'from utils.visualization import get_palette\n'), ((798, 841), 'os.path.join', 'os.path.join', (['network_path', '"""generator.pkl"""'], {}), "(network_path, 'generator.pkl')\n", (810, 841), False, 'import os\n'), ((1232, 1273), 'os.path.join', 'os.path.join', (['args.outdir', '"""labeled_data"""'], {}), "(args.outdir, 'labeled_data')\n", (1244, 1273), False, 'import os\n'), ((1279, 1319), 'os.makedirs', 'os.makedirs', (['outlabel_dir'], {'exist_ok': '(True)'}), '(outlabel_dir, exist_ok=True)\n', (1290, 1319), False, 'import os\n'), ((1494, 1520), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (1511, 1520), True, 'import numpy as np\n'), ((2456, 2499), 'os.path.join', 'os.path.join', (['args.outdir', '"""generated_data"""'], {}), "(args.outdir, 'generated_data')\n", (2468, 2499), False, 'import os\n'), ((2505, 2543), 'os.makedirs', 'os.makedirs', (['outgen_dir'], {'exist_ok': '(True)'}), '(outgen_dir, exist_ok=True)\n', (2516, 2543), False, 'import os\n'), ((3767, 3792), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3790, 3792), False, 'import argparse\n'), ((1628, 1641), 'tqdm.tqdm', 'tqdm', (['indices'], {}), '(indices)\n', (1632, 1641), False, 'from tqdm import tqdm\n'), ((2284, 2328), 'os.path.join', 'os.path.join', (['outlabel_dir', 'f"""{idx:04d}.png"""'], {}), "(outlabel_dir, f'{idx:04d}.png')\n", (2296, 2328), False, 'import os\n'), ((3161, 3203), 'os.path.join', 'os.path.join', (['outgen_dir', 'f"""{idx:04d}.png"""'], {}), "(outgen_dir, f'{idx:04d}.png')\n", (3173, 3203), False, 'import os\n'), ((3483, 3517), 'numpy.concatenate', 'np.concatenate', (['[img, seg]'], {'axis': '(0)'}), '([img, seg], axis=0)\n', (3497, 3517), True, 'import numpy as np\n'), ((1355, 1393), 'os.path.join', 'os.path.join', (['args.label_path', '"""image"""'], {}), "(args.label_path, 'image')\n", (1367, 1393), False, 'import os\n'), ((1674, 1719), 'os.path.join', 'os.path.join', (['args.label_path', '"""image"""', 'index'], {}), "(args.label_path, 'image', index)\n", (1686, 1719), False, 'import os\n'), ((1799, 1871), 'os.path.join', 'os.path.join', (['args.label_path', '"""seg"""', 'model_id', '(model_id + base_name[6:])'], {}), "(args.label_path, 'seg', model_id, model_id + base_name[6:])\n", (1811, 1871), False, 'import os\n'), ((2623, 2638), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2636, 2638), False, 'import torch\n'), ((2657, 2695), 'torch.randn', 'torch.randn', (['(1)', 'G.z_dim'], {'device': 'device'}), '(1, G.z_dim, device=device)\n', (2668, 2695), False, 'import torch\n'), ((2713, 2751), 'torch.zeros', 'torch.zeros', (['(1)', 'G.c_dim'], {'device': 'device'}), '(1, G.c_dim, device=device)\n', (2724, 2751), False, 'import torch\n'), ((3068, 3089), 'torch.max', 'torch.max', (['seg'], {'dim': '(1)'}), '(seg, dim=1)\n', (3077, 3089), False, 'import torch\n'), ((905, 919), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (916, 919), False, 'import pickle\n'), ((2173, 2188), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (2183, 2188), True, 'import numpy as np\n'), ((2190, 2205), 'numpy.asarray', 'np.asarray', (['seg'], {}), '(seg)\n', (2200, 2205), True, 'import numpy as np\n'), ((1436, 1485), 'os.path.join', 'os.path.join', (['args.label_path', '"""image"""', '"""*/*.png"""'], {}), "(args.label_path, 'image', '*/*.png')\n", (1448, 1485), False, 'import os\n'), ((2001, 2016), 'numpy.asarray', 'np.asarray', (['seg'], {}), '(seg)\n', (2011, 2016), True, 'import numpy as np\n'), ((974, 1021), 'dnnlib.util.construct_class_by_name', 'dnnlib.util.construct_class_by_name', ([], {}), '(**G_kwargs)\n', (1009, 1021), False, 'import dnnlib\n'), ((1120, 1134), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1131, 1134), False, 'import pickle\n')] |
import neurokit2 as nk
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from preprocess_ludb import read_ludb_files
import numpy as np
import pandas as pd
pd.set_option('display.width', 400)
pd.set_option('display.max_columns', 25)
np.set_printoptions(linewidth=400)
import seaborn as sns
plt.rcParams['figure.figsize'] = [30, 25] # Bigger images
plt.rcParams['font.size']= 14
# load ECG data from mit-bih database into pandas dataframes
#data, anno, metadata = read_mit_bit_files()
# load ECG data from ludb database into pandas dataframes
data, anno, metadata = read_ludb_files()
#print(data["ECG"][:30000])
# Preprocess the data (filter, find peaks, etc.)
# pantompkins1985', 'engzeemod2012', 'christov2004'
processed_data, info = nk.ecg_process(ecg_signal=data["ECG"][:30000], sampling_rate=int(metadata["Sampling Frequency"]), method="pantompkins1985")
print(processed_data.head)
# Visualise the processing
plt.rcParams['figure.figsize'] = [15, 9] # Bigger images
nk.ecg_plot(processed_data, sampling_rate=int(metadata["Sampling Frequency"]), show_type="default")
plt.show()
plt.rcParams['figure.figsize'] = [40, 40] # Bigger images
nk.ecg_plot(processed_data, sampling_rate=int(metadata["Sampling Frequency"]), show_type="artifacts")
plt.show()
# Compute relevant features
results = nk.ecg_analyze(data=processed_data, sampling_rate=int(metadata["Sampling Frequency"]))
print(results)
# Compute HRV indices
plt.rcParams['figure.figsize'] = [25, 20] # Bigger images
hrv_indices = nk.hrv(peaks=processed_data["ECG_R_Peaks"], sampling_rate=int(metadata["Sampling Frequency"]), show=True)
print(hrv_indices)
plt.show()
# Delineate
plt.rcParams['figure.figsize'] = [25, 20] # Bigger images processed_data["ECG_Clean"]
signal, waves = nk.ecg_delineate(ecg_cleaned=processed_data["ECG_Clean"].to_numpy(), rpeaks=info["ECG_R_Peaks"], sampling_rate=int(metadata["Sampling Frequency"]), method="dwt", show=True, show_type='all')
plt.show()
# Distort the signal (add noise, linear trend, artifacts etc.)
distorted = nk.signal_distort(signal=data["ECG"][:30000].to_numpy(),
noise_amplitude=0.1,
noise_frequency=[5, 10, 20],
powerline_amplitude=0.05,
artifacts_amplitude=0.3,
artifacts_number=3,
linear_drift=0.5)
# Clean (filter and detrend)
cleaned = nk.signal_detrend(distorted)
cleaned = nk.signal_filter(cleaned, lowcut=0.5, highcut=1.5)
# Compare the 3 signals
plt.rcParams['figure.figsize'] = [15, 10] # Bigger images
plot = nk.signal_plot([data["ECG"][:30000].to_numpy(), distorted, cleaned], sampling_rate=int(metadata["Sampling Frequency"]), subplots=True, standardize=True, labels=["Raw Signal", "Distorted Signal", "Cleaned Signal"])
plt.show()
# Find optimal time delay, embedding dimension and r
#plt.rcParams['figure.figsize'] = [25, 20] # Bigger images
#parameters = nk.complexity_optimize(signal=data["ECG"].to_numpy(), show=True) #[:150000].to_numpy()
#plt.show()
# Detrended Fluctuation Analysis
sample_entropy = nk.entropy_sample(signal=data["ECG"][:30000].to_numpy())
approximate_entropy = nk.entropy_approximate(signal=data["ECG"][:30000].to_numpy())
print(sample_entropy, approximate_entropy)
# Decompose signal using Empirical Mode Decomposition (EMD)
#plt.rcParams['figure.figsize'] = [25, 20] # Bigger images
#components = nk.signal_decompose(signal=data["ECG"][:30000].to_numpy(), method='emd')
# Visualize components
#nk.signal_plot(components=components)
#plt.show()
# Recompose merging correlated components
#plt.rcParams['figure.figsize'] = [25, 20] # Bigger images
#recomposed = nk.signal_recompose(components=components, threshold=0.99)
# Visualize components
#nk.signal_plot(recomposed=recomposed)
#plt.show()
# Get the Signal Power Spectrum Density (PSD) using different methods
plt.rcParams['figure.figsize'] = [25, 20] # Bigger images
welch = nk.signal_psd(signal=data["ECG"][:30000].to_numpy(), method="welch", min_frequency=1, max_frequency=20, show=True)
plt.show()
multitaper = nk.signal_psd(signal=data["ECG"][:30000].to_numpy(), method="multitapers", max_frequency=20, show=True)
lomb = nk.signal_psd(signal=data["ECG"][:30000].to_numpy(), method="lomb", min_frequency=1, max_frequency=20, show=True)
burg = nk.signal_psd(signal=data["ECG"][:30000].to_numpy(), method="burg", min_frequency=1, max_frequency=20, order=10, show=True)
plt.show()
# Highest Density Interval (HDI)
plt.rcParams['figure.figsize'] = [25, 20] # Bigger images
ci_min, ci_max = nk.hdi(x=processed_data["ECG_Clean"], ci=0.95, show=True)
plt.show()
# Find events
events = nk.events_find(event_channel=data["ECG"][:30000].to_numpy())
# Plot the location of event with the signals
plot = nk.events_plot(events, data["ECG"][:30000].to_numpy())
plt.show()
# Build and plot epochs
epochs = nk.epochs_create(processed_data, events, sampling_rate=int(metadata["Sampling Frequency"]), epochs_start=-1, epochs_end=6)
for i, epoch in enumerate (epochs):
epoch = epochs[epoch] # iterate epochs",
epoch = epoch[['ECG_Clean', 'ECG_Rate']] # Select relevant columns",
nk.standardize(epoch).plot(legend=True) # Plot scaled signals"
plt.show()
# Extract Event Related Features
# With these segments, we are able to compare how the physiological signals vary across
# the different events. We do this by:
# 1. Iterating through our object epochs
# 2. Storing the mean value of :math:`X` feature of each condition in a new dictionary
# 3. Saving the results in a readable format
# We can call them epochs-dictionary, the mean-dictionary and our results-dataframe
df = {} # Initialize an empty dict,
for epoch_index in epochs:
df[epoch_index] = {} # then Initialize an empty dict inside of it with the iterative
# Save a temp var with dictionary called <epoch_index> in epochs-dictionary
epoch = epochs[epoch_index]
# We want its features:
# Feature 1 ECG
ecg_baseline = epoch["ECG_Rate"].loc[-100:0].mean() # Baseline
ecg_mean = epoch["ECG_Rate"].loc[0:30000].mean() # Mean heart rate in the 0-4 seconds
# Store ECG in df
df[epoch_index]["ECG_Rate"] = ecg_mean - ecg_baseline # Correct for baseline
df = pd.DataFrame.from_dict(df, orient="index") # Convert to a dataframe
print(df) # Print DataFrame
# You can now plot and compare how these features differ according to the event of interest.
sns.boxplot(y="ECG_Rate", data=df)
plt.show()
# Half the data
#epochs = nk.epochs_create(data["ECG"][:30000], events=[0, 15000], sampling_rate=int(metadata["Sampling Frequency"]), epochs_start=0, epochs_end=150)
#print(epochs)
# Analyze
#ECG_features = nk.ecg_intervalrelated(epochs)
#print(ECG_features)
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(anno["Rpeaks"][:59], info["ECG_R_Peaks"])
TN = confusion_matrix[0][0]
FN = confusion_matrix[1][0]
TP = confusion_matrix[1][1]
FP = confusion_matrix[0][1]
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
# Overall accuracy
ACC = (TP+TN)/(TP+FP+FN+TN)
| [
"preprocess_ludb.read_ludb_files",
"neurokit2.standardize",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.show",
"neurokit2.signal_detrend",
"pandas.DataFrame.from_dict",
"pandas.set_option",
"seaborn.boxplot",
"neurokit2.signal_filter",
"neurokit2.hdi",
"numpy.set_printoptions"
] | [((181, 216), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(400)'], {}), "('display.width', 400)\n", (194, 216), True, 'import pandas as pd\n'), ((218, 258), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(25)'], {}), "('display.max_columns', 25)\n", (231, 258), True, 'import pandas as pd\n'), ((260, 294), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(400)'}), '(linewidth=400)\n', (279, 294), True, 'import numpy as np\n'), ((606, 623), 'preprocess_ludb.read_ludb_files', 'read_ludb_files', ([], {}), '()\n', (621, 623), False, 'from preprocess_ludb import read_ludb_files\n'), ((1125, 1135), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1133, 1135), True, 'import matplotlib.pyplot as plt\n'), ((1300, 1310), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1308, 1310), True, 'import matplotlib.pyplot as plt\n'), ((1683, 1693), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1691, 1693), True, 'import matplotlib.pyplot as plt\n'), ((2005, 2015), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2013, 2015), True, 'import matplotlib.pyplot as plt\n'), ((2520, 2548), 'neurokit2.signal_detrend', 'nk.signal_detrend', (['distorted'], {}), '(distorted)\n', (2537, 2548), True, 'import neurokit2 as nk\n'), ((2560, 2610), 'neurokit2.signal_filter', 'nk.signal_filter', (['cleaned'], {'lowcut': '(0.5)', 'highcut': '(1.5)'}), '(cleaned, lowcut=0.5, highcut=1.5)\n', (2576, 2610), True, 'import neurokit2 as nk\n'), ((2921, 2931), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2929, 2931), True, 'import matplotlib.pyplot as plt\n'), ((4208, 4218), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4216, 4218), True, 'import matplotlib.pyplot as plt\n'), ((4592, 4602), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4600, 4602), True, 'import matplotlib.pyplot as plt\n'), ((4717, 4774), 'neurokit2.hdi', 'nk.hdi', ([], {'x': "processed_data['ECG_Clean']", 'ci': '(0.95)', 'show': '(True)'}), "(x=processed_data['ECG_Clean'], ci=0.95, show=True)\n", (4723, 4774), True, 'import neurokit2 as nk\n'), ((4776, 4786), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4784, 4786), True, 'import matplotlib.pyplot as plt\n'), ((4988, 4998), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4996, 4998), True, 'import matplotlib.pyplot as plt\n'), ((6439, 6481), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['df'], {'orient': '"""index"""'}), "(df, orient='index')\n", (6461, 6481), True, 'import pandas as pd\n'), ((6635, 6669), 'seaborn.boxplot', 'sns.boxplot', ([], {'y': '"""ECG_Rate"""', 'data': 'df'}), "(y='ECG_Rate', data=df)\n", (6646, 6669), True, 'import seaborn as sns\n'), ((6671, 6681), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6679, 6681), True, 'import matplotlib.pyplot as plt\n'), ((7017, 7075), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (["anno['Rpeaks'][:59]", "info['ECG_R_Peaks']"], {}), "(anno['Rpeaks'][:59], info['ECG_R_Peaks'])\n", (7033, 7075), False, 'from sklearn.metrics import confusion_matrix\n'), ((5398, 5408), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5406, 5408), True, 'import matplotlib.pyplot as plt\n'), ((5329, 5350), 'neurokit2.standardize', 'nk.standardize', (['epoch'], {}), '(epoch)\n', (5343, 5350), True, 'import neurokit2 as nk\n')] |
import pickle
from tracker import Tracker
import cv2
import numpy as np
from moviepy.editor import VideoFileClip
from image_gen import (
abs_sobel_thresh, color_thresh, window_mask
)
dist_pickle = pickle.load(open('camera_cal/calibration_pickle.p', 'rb'))
mtx = dist_pickle['mtx']
dist = dist_pickle['dist']
def process_img(img):
img = cv2.undistort(img, mtx, dist, None, mtx)
preprocess_image = np.zeros_like(img[:,:,0])
gradx = abs_sobel_thresh(img, orient='x', thresh=(12, 255))
grady = abs_sobel_thresh(img, orient='y', thresh=(25, 255))
c_binary = color_thresh(img, sthresh=(100, 255), vthresh=(50, 255))
preprocess_image[((gradx == 1) & (grady == 1) | (c_binary == 1))] = 255
img_size = (img.shape[1], img.shape[0])
bot_width = .76
mid_width = .08
height_pct = .62
bottom_trim = .935
src = np.float32([
[img.shape[1]*(.5-mid_width/2), img.shape[0]*height_pct],
[img.shape[1]*(.5+mid_width/2), img.shape[0]*height_pct],
[img.shape[1]*(.5+bot_width/2), img.shape[0]*bottom_trim],
[img.shape[1]*(.5-bot_width/2), img.shape[0]*bottom_trim],
])
offset = img.shape[1]*.25
dst = np.float32([
[offset, 0],
[img.shape[1]-offset, 0],
[img.shape[1]-offset, img.shape[0]],
[offset, img.shape[0]]
])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(preprocess_image, M, img_size, flags=cv2.INTER_LINEAR)
window_width = 25
window_height = 80
curve_centers = Tracker(Mywindow_width=window_width, Mywindow_height=window_height, Mymargin=25, My_ym=10/720, My_xm=4/384, Mysmooth_factor=15)
window_centroids = curve_centers.find_window_centroids(warped)
l_points = np.zeros_like(warped)
r_points = np.zeros_like(warped)
leftx = []
rightx = []
for level in range(len(window_centroids)):
leftx.append(window_centroids[level][0])
rightx.append(window_centroids[level][1])
l_mask = window_mask(window_width, window_height, warped, window_centroids[level][0], level)
r_mask = window_mask(window_width, window_height, warped, window_centroids[level][1], level)
l_points[(l_points == 255) | (l_mask == 1)] = 255
r_points[(r_points == 255) | (r_mask == 1)] = 255
# Draw
# template = np.array(r_points+l_points, np.uint8)
# zero_channel=np.zeros_like(template)
# template = np.array(cv2.merge((zero_channel, template, zero_channel)), np.uint8)
# warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8)
# result = cv2.addWeighted(warpage, 1, template, 0.5, 0.0)
# result = warped
yvals = range(0, warped.shape[0])
res_yvals = np.arange(warped.shape[0]-(window_height/2), 0, -window_height)
left_fit = np.polyfit(res_yvals, leftx, 2)
left_fitx = left_fit[0]*yvals*yvals + left_fit[1]*yvals + left_fit[2]
left_fitx = np.array(left_fitx, np.int32)
right_fit = np.polyfit(res_yvals, rightx, 2)
right_fitx = right_fit[0]*yvals*yvals + right_fit[1]*yvals + right_fit[2]
right_fitx = np.array(right_fitx, np.int32)
left_lane = np.array(list(zip(np.concatenate((left_fitx-window_width/2,left_fitx[::-1]+window_width/2), axis=0),
np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
right_lane = np.array(list(zip(np.concatenate((right_fitx-window_width/2,right_fitx[::-1]+window_width/2), axis=0),
np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
inner_lane = np.array(list(zip(np.concatenate((left_fitx+window_width/2,right_fitx[::-1]-window_width/2), axis=0),
np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
road = np.zeros_like(img)
road_bkg = np.zeros_like(img)
cv2.fillPoly(road, [left_lane], color=[255, 0,0])
cv2.fillPoly(road, [right_lane], color=[0, 0, 255])
cv2.fillPoly(road, [inner_lane], color=[0, 255, 0])
cv2.fillPoly(road_bkg, [left_lane], color=[255, 255, 255])
cv2.fillPoly(road_bkg, [right_lane], color=[255, 255, 255])
road_warped = cv2.warpPerspective(road, Minv, img_size, flags=cv2.INTER_LINEAR)
road_warped_bkg = cv2.warpPerspective(road_bkg, Minv, img_size, flags=cv2.INTER_LINEAR)
base = cv2.addWeighted(img, 1.0, road_warped_bkg, -1.0, 0.0)
result = cv2.addWeighted(base, 1.0, road_warped, .7, 0.0)
ym_per_pix = curve_centers.ym_per_pix
xm_per_pix= curve_centers.xm_per_pix
curve_fit_cr = np.polyfit(np.array(res_yvals,np.float32)*ym_per_pix, np.array(leftx, np.float32)*xm_per_pix, 2)
curverad = ((1 + (2*curve_fit_cr[0]*yvals[-1]*ym_per_pix + curve_fit_cr[1])**2)**1.5) / np.absolute(2*curve_fit_cr[0])
camera_center = (left_fitx[-1] + right_fitx[-1])/2
center_diff = (camera_center-warped.shape[1]/2)*xm_per_pix
side_pos = 'left'
if center_diff <=0:
side_pos = 'right'
cv2.putText(result, 'Radius of Curvature = '+str(round(curverad, 3))+'(m)',(50,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
cv2.putText(result, 'Vehicle is '+str(abs(round(center_diff, 3)))+'m '+side_pos+' of center',(50,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
return result
if __name__ == '__main__':
output_video = 'output_tracked1.mp4'
input_video = 'project_video.mp4'
clip1 = VideoFileClip(input_video)
video_clip = clip1.fl_image(process_img)
video_clip.write_videofile(output_video, audio=False)
| [
"cv2.fillPoly",
"image_gen.window_mask",
"cv2.getPerspectiveTransform",
"numpy.polyfit",
"numpy.absolute",
"cv2.undistort",
"numpy.array",
"cv2.warpPerspective",
"image_gen.color_thresh",
"cv2.addWeighted",
"tracker.Tracker",
"numpy.concatenate",
"image_gen.abs_sobel_thresh",
"numpy.zeros_... | [((350, 390), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist', 'None', 'mtx'], {}), '(img, mtx, dist, None, mtx)\n', (363, 390), False, 'import cv2\n'), ((414, 441), 'numpy.zeros_like', 'np.zeros_like', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (427, 441), True, 'import numpy as np\n'), ((452, 503), 'image_gen.abs_sobel_thresh', 'abs_sobel_thresh', (['img'], {'orient': '"""x"""', 'thresh': '(12, 255)'}), "(img, orient='x', thresh=(12, 255))\n", (468, 503), False, 'from image_gen import abs_sobel_thresh, color_thresh, window_mask\n'), ((516, 567), 'image_gen.abs_sobel_thresh', 'abs_sobel_thresh', (['img'], {'orient': '"""y"""', 'thresh': '(25, 255)'}), "(img, orient='y', thresh=(25, 255))\n", (532, 567), False, 'from image_gen import abs_sobel_thresh, color_thresh, window_mask\n'), ((584, 640), 'image_gen.color_thresh', 'color_thresh', (['img'], {'sthresh': '(100, 255)', 'vthresh': '(50, 255)'}), '(img, sthresh=(100, 255), vthresh=(50, 255))\n', (596, 640), False, 'from image_gen import abs_sobel_thresh, color_thresh, window_mask\n'), ((856, 1154), 'numpy.float32', 'np.float32', (['[[img.shape[1] * (0.5 - mid_width / 2), img.shape[0] * height_pct], [img.\n shape[1] * (0.5 + mid_width / 2), img.shape[0] * height_pct], [img.\n shape[1] * (0.5 + bot_width / 2), img.shape[0] * bottom_trim], [img.\n shape[1] * (0.5 - bot_width / 2), img.shape[0] * bottom_trim]]'], {}), '([[img.shape[1] * (0.5 - mid_width / 2), img.shape[0] *\n height_pct], [img.shape[1] * (0.5 + mid_width / 2), img.shape[0] *\n height_pct], [img.shape[1] * (0.5 + bot_width / 2), img.shape[0] *\n bottom_trim], [img.shape[1] * (0.5 - bot_width / 2), img.shape[0] *\n bottom_trim]])\n', (866, 1154), True, 'import numpy as np\n'), ((1182, 1302), 'numpy.float32', 'np.float32', (['[[offset, 0], [img.shape[1] - offset, 0], [img.shape[1] - offset, img.shape\n [0]], [offset, img.shape[0]]]'], {}), '([[offset, 0], [img.shape[1] - offset, 0], [img.shape[1] - offset,\n img.shape[0]], [offset, img.shape[0]]])\n', (1192, 1302), True, 'import numpy as np\n'), ((1341, 1378), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (1368, 1378), False, 'import cv2\n'), ((1390, 1427), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['dst', 'src'], {}), '(dst, src)\n', (1417, 1427), False, 'import cv2\n'), ((1441, 1515), 'cv2.warpPerspective', 'cv2.warpPerspective', (['preprocess_image', 'M', 'img_size'], {'flags': 'cv2.INTER_LINEAR'}), '(preprocess_image, M, img_size, flags=cv2.INTER_LINEAR)\n', (1460, 1515), False, 'import cv2\n'), ((1582, 1717), 'tracker.Tracker', 'Tracker', ([], {'Mywindow_width': 'window_width', 'Mywindow_height': 'window_height', 'Mymargin': '(25)', 'My_ym': '(10 / 720)', 'My_xm': '(4 / 384)', 'Mysmooth_factor': '(15)'}), '(Mywindow_width=window_width, Mywindow_height=window_height,\n Mymargin=25, My_ym=10 / 720, My_xm=4 / 384, Mysmooth_factor=15)\n', (1589, 1717), False, 'from tracker import Tracker\n'), ((1794, 1815), 'numpy.zeros_like', 'np.zeros_like', (['warped'], {}), '(warped)\n', (1807, 1815), True, 'import numpy as np\n'), ((1831, 1852), 'numpy.zeros_like', 'np.zeros_like', (['warped'], {}), '(warped)\n', (1844, 1852), True, 'import numpy as np\n'), ((2760, 2825), 'numpy.arange', 'np.arange', (['(warped.shape[0] - window_height / 2)', '(0)', '(-window_height)'], {}), '(warped.shape[0] - window_height / 2, 0, -window_height)\n', (2769, 2825), True, 'import numpy as np\n'), ((2840, 2871), 'numpy.polyfit', 'np.polyfit', (['res_yvals', 'leftx', '(2)'], {}), '(res_yvals, leftx, 2)\n', (2850, 2871), True, 'import numpy as np\n'), ((2962, 2991), 'numpy.array', 'np.array', (['left_fitx', 'np.int32'], {}), '(left_fitx, np.int32)\n', (2970, 2991), True, 'import numpy as np\n'), ((3009, 3041), 'numpy.polyfit', 'np.polyfit', (['res_yvals', 'rightx', '(2)'], {}), '(res_yvals, rightx, 2)\n', (3019, 3041), True, 'import numpy as np\n'), ((3137, 3167), 'numpy.array', 'np.array', (['right_fitx', 'np.int32'], {}), '(right_fitx, np.int32)\n', (3145, 3167), True, 'import numpy as np\n'), ((3813, 3831), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (3826, 3831), True, 'import numpy as np\n'), ((3847, 3865), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (3860, 3865), True, 'import numpy as np\n'), ((3870, 3920), 'cv2.fillPoly', 'cv2.fillPoly', (['road', '[left_lane]'], {'color': '[255, 0, 0]'}), '(road, [left_lane], color=[255, 0, 0])\n', (3882, 3920), False, 'import cv2\n'), ((3924, 3975), 'cv2.fillPoly', 'cv2.fillPoly', (['road', '[right_lane]'], {'color': '[0, 0, 255]'}), '(road, [right_lane], color=[0, 0, 255])\n', (3936, 3975), False, 'import cv2\n'), ((3980, 4031), 'cv2.fillPoly', 'cv2.fillPoly', (['road', '[inner_lane]'], {'color': '[0, 255, 0]'}), '(road, [inner_lane], color=[0, 255, 0])\n', (3992, 4031), False, 'import cv2\n'), ((4036, 4094), 'cv2.fillPoly', 'cv2.fillPoly', (['road_bkg', '[left_lane]'], {'color': '[255, 255, 255]'}), '(road_bkg, [left_lane], color=[255, 255, 255])\n', (4048, 4094), False, 'import cv2\n'), ((4099, 4158), 'cv2.fillPoly', 'cv2.fillPoly', (['road_bkg', '[right_lane]'], {'color': '[255, 255, 255]'}), '(road_bkg, [right_lane], color=[255, 255, 255])\n', (4111, 4158), False, 'import cv2\n'), ((4178, 4243), 'cv2.warpPerspective', 'cv2.warpPerspective', (['road', 'Minv', 'img_size'], {'flags': 'cv2.INTER_LINEAR'}), '(road, Minv, img_size, flags=cv2.INTER_LINEAR)\n', (4197, 4243), False, 'import cv2\n'), ((4266, 4335), 'cv2.warpPerspective', 'cv2.warpPerspective', (['road_bkg', 'Minv', 'img_size'], {'flags': 'cv2.INTER_LINEAR'}), '(road_bkg, Minv, img_size, flags=cv2.INTER_LINEAR)\n', (4285, 4335), False, 'import cv2\n'), ((4348, 4401), 'cv2.addWeighted', 'cv2.addWeighted', (['img', '(1.0)', 'road_warped_bkg', '(-1.0)', '(0.0)'], {}), '(img, 1.0, road_warped_bkg, -1.0, 0.0)\n', (4363, 4401), False, 'import cv2\n'), ((4415, 4464), 'cv2.addWeighted', 'cv2.addWeighted', (['base', '(1.0)', 'road_warped', '(0.7)', '(0.0)'], {}), '(base, 1.0, road_warped, 0.7, 0.0)\n', (4430, 4464), False, 'import cv2\n'), ((5401, 5427), 'moviepy.editor.VideoFileClip', 'VideoFileClip', (['input_video'], {}), '(input_video)\n', (5414, 5427), False, 'from moviepy.editor import VideoFileClip\n'), ((2049, 2136), 'image_gen.window_mask', 'window_mask', (['window_width', 'window_height', 'warped', 'window_centroids[level][0]', 'level'], {}), '(window_width, window_height, warped, window_centroids[level][0],\n level)\n', (2060, 2136), False, 'from image_gen import abs_sobel_thresh, color_thresh, window_mask\n'), ((2150, 2237), 'image_gen.window_mask', 'window_mask', (['window_width', 'window_height', 'warped', 'window_centroids[level][1]', 'level'], {}), '(window_width, window_height, warped, window_centroids[level][1],\n level)\n', (2161, 2237), False, 'from image_gen import abs_sobel_thresh, color_thresh, window_mask\n'), ((4757, 4789), 'numpy.absolute', 'np.absolute', (['(2 * curve_fit_cr[0])'], {}), '(2 * curve_fit_cr[0])\n', (4768, 4789), True, 'import numpy as np\n'), ((4579, 4610), 'numpy.array', 'np.array', (['res_yvals', 'np.float32'], {}), '(res_yvals, np.float32)\n', (4587, 4610), True, 'import numpy as np\n'), ((4622, 4649), 'numpy.array', 'np.array', (['leftx', 'np.float32'], {}), '(leftx, np.float32)\n', (4630, 4649), True, 'import numpy as np\n'), ((3203, 3298), 'numpy.concatenate', 'np.concatenate', (['(left_fitx - window_width / 2, left_fitx[::-1] + window_width / 2)'], {'axis': '(0)'}), '((left_fitx - window_width / 2, left_fitx[::-1] + \n window_width / 2), axis=0)\n', (3217, 3298), True, 'import numpy as np\n'), ((3320, 3364), 'numpy.concatenate', 'np.concatenate', (['(yvals, yvals[::-1])'], {'axis': '(0)'}), '((yvals, yvals[::-1]), axis=0)\n', (3334, 3364), True, 'import numpy as np\n'), ((3413, 3510), 'numpy.concatenate', 'np.concatenate', (['(right_fitx - window_width / 2, right_fitx[::-1] + window_width / 2)'], {'axis': '(0)'}), '((right_fitx - window_width / 2, right_fitx[::-1] + \n window_width / 2), axis=0)\n', (3427, 3510), True, 'import numpy as np\n'), ((3532, 3576), 'numpy.concatenate', 'np.concatenate', (['(yvals, yvals[::-1])'], {'axis': '(0)'}), '((yvals, yvals[::-1]), axis=0)\n', (3546, 3576), True, 'import numpy as np\n'), ((3625, 3721), 'numpy.concatenate', 'np.concatenate', (['(left_fitx + window_width / 2, right_fitx[::-1] - window_width / 2)'], {'axis': '(0)'}), '((left_fitx + window_width / 2, right_fitx[::-1] - \n window_width / 2), axis=0)\n', (3639, 3721), True, 'import numpy as np\n'), ((3743, 3787), 'numpy.concatenate', 'np.concatenate', (['(yvals, yvals[::-1])'], {'axis': '(0)'}), '((yvals, yvals[::-1]), axis=0)\n', (3757, 3787), True, 'import numpy as np\n')] |
"""
Agosto 08 de 2021
@author: <NAME>, <NAME> y <NAME>
Método de Aitken
Análisis Numérico
"""
import numpy as np
from bokeh.plotting import figure, output_file, show
from decimal import Decimal, getcontext
import math
limite = 5000
def funcionuno(x):
return np.cos(x)**2-(x**2)
def funciondos(x):
return x*math.sin(x)-(1)
def funciontres(x):
return 3*(x**2) - 4*(x) + (4/3)
def funcioncuatro(x):
e = math.e
return ((68.1*9.81)/x)*(1-(e)*(-(10/68.1)*x))-40
def funcioncinco(x):
return (x**3) - (2*x) - 5
def aitken(tol,x0,f, grafica, num_eq):
errores = []
y_vals = []
y_vals.append(x0)
x1 = f(x0)
errores.append(abs(x1-x0))
y_vals.append(x1)
x2 = f(x1)
errores.append(abs(x2-x1))
y_vals.append(x2)
x3 = x0 - ((x1 - x0)**2)/(x2 - 2*x1 + x0)
errores.append(abs(x3-x2))
y_vals.append(x3)
iterador = 3
if(x3 != 0):
while abs((x3-x0)/x3) > tol:
x0 = x3
x1 = f(x0)
errores.append(abs(x1-x0))
y_vals.append(x1)
x2 = f(x1)
errores.append(abs(x2-x1))
y_vals.append(x2)
if ((x2 - 2*x1 + x0) != 0):
x3 = x0 - ((x1 - x0)**2)/(x2 - 2*x1 + x0)
errores.append(abs(x3-x2))
y_vals.append(x3)
iterador += 3
if x3 == 0:
break
x_vals = list(range(len(y_vals)))
output_file(grafica + f"-{tol}.html")
fig = figure()
fig.line(x_vals, y_vals, line_width=2)
if x3 < 14:
show(fig)
x_errores = list(range(len(errores)))
output_file(f'erroresaitken{num_eq}-{tol}.html')
fig2 = figure()
fig2.line(x_errores, errores, line_width=2)
show(fig2)
resultado = []
resultado.append(iterador)
resultado.append(x3)
return resultado
if __name__ == "__main__":
getcontext().prec = 56
tols = [10**-8, 10**-16, 10**-32, 10**-56]
x = 0.7
for tol in tols:
retorno = aitken(tol, x, funcionuno, "aitken",1)
iteraciones = retorno[0]
raiz = retorno[1]
print(f'En {iteraciones} iteraciones y tolerancia de {tol} se obtuvieron las raices {Decimal(raiz)} y {-1*Decimal(raiz)}')
print("---------------------------------------------------------------------------------------------------------------------------")
for tol in tols:
retorno = aitken(tol, x, funciondos, "aitken2",2)
iteraciones = retorno[0]
raiz = retorno[1]
print(f'En {iteraciones} iteraciones y tolerancia de {tol} se obtuvieron las raices {-1*Decimal(raiz)}')
print("---------------------------------------------------------------------------------------------------------------------------")
for tol in tols:
retorno = aitken(tol, x, funciontres, "aitken3",3)
iteraciones = retorno[0]
raiz = retorno[1]
print(f'En {iteraciones} iteraciones y tolerancia de {tol} se obtuvieron las raices {Decimal(raiz)}')
print("---------------------------------------------------------------------------------------------------------------------------")
for tol in tols:
retorno = aitken(tol, x, funcioncuatro, "aitken4",4)
iteraciones = retorno[0]
raiz = retorno[1]
print(f'En {iteraciones} iteraciones y tolerancia de {tol} se obtuvieron las raices {raiz}')
print("---------------------------------------------------------------------------------------------------------------------------")
for tol in tols:
x = 0.8
retorno = aitken(tol, x, funcioncinco, "aitken5",5)
iteraciones = retorno[0]
raiz = retorno[1]
print(f'En {iteraciones} iteraciones y tolerancia de {tol} se obtuvieron las raices {Decimal(raiz)}')
| [
"decimal.getcontext",
"bokeh.plotting.show",
"bokeh.plotting.figure",
"decimal.Decimal",
"numpy.cos",
"math.sin",
"bokeh.plotting.output_file"
] | [((1439, 1476), 'bokeh.plotting.output_file', 'output_file', (["(grafica + f'-{tol}.html')"], {}), "(grafica + f'-{tol}.html')\n", (1450, 1476), False, 'from bokeh.plotting import figure, output_file, show\n'), ((1487, 1495), 'bokeh.plotting.figure', 'figure', ([], {}), '()\n', (1493, 1495), False, 'from bokeh.plotting import figure, output_file, show\n'), ((1564, 1573), 'bokeh.plotting.show', 'show', (['fig'], {}), '(fig)\n', (1568, 1573), False, 'from bokeh.plotting import figure, output_file, show\n'), ((1628, 1676), 'bokeh.plotting.output_file', 'output_file', (['f"""erroresaitken{num_eq}-{tol}.html"""'], {}), "(f'erroresaitken{num_eq}-{tol}.html')\n", (1639, 1676), False, 'from bokeh.plotting import figure, output_file, show\n'), ((1692, 1700), 'bokeh.plotting.figure', 'figure', ([], {}), '()\n', (1698, 1700), False, 'from bokeh.plotting import figure, output_file, show\n'), ((1761, 1771), 'bokeh.plotting.show', 'show', (['fig2'], {}), '(fig2)\n', (1765, 1771), False, 'from bokeh.plotting import figure, output_file, show\n'), ((1923, 1935), 'decimal.getcontext', 'getcontext', ([], {}), '()\n', (1933, 1935), False, 'from decimal import Decimal, getcontext\n'), ((266, 275), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (272, 275), True, 'import numpy as np\n'), ((320, 331), 'math.sin', 'math.sin', (['x'], {}), '(x)\n', (328, 331), False, 'import math\n'), ((2242, 2255), 'decimal.Decimal', 'Decimal', (['raiz'], {}), '(raiz)\n', (2249, 2255), False, 'from decimal import Decimal, getcontext\n'), ((3037, 3050), 'decimal.Decimal', 'Decimal', (['raiz'], {}), '(raiz)\n', (3044, 3050), False, 'from decimal import Decimal, getcontext\n'), ((3823, 3836), 'decimal.Decimal', 'Decimal', (['raiz'], {}), '(raiz)\n', (3830, 3836), False, 'from decimal import Decimal, getcontext\n'), ((2263, 2276), 'decimal.Decimal', 'Decimal', (['raiz'], {}), '(raiz)\n', (2270, 2276), False, 'from decimal import Decimal, getcontext\n'), ((2653, 2666), 'decimal.Decimal', 'Decimal', (['raiz'], {}), '(raiz)\n', (2660, 2666), False, 'from decimal import Decimal, getcontext\n')] |
# Despy: A discrete event simulation framework for Python
# Version 0.1
# Released under the MIT License (MIT)
# Copyright (c) 2015, <NAME>
"""
****************
despy.model.queue
****************
.. autosummary::
Queue
.. todo
Add property and output for total items entering the queue.
Add property and output for total items leaving the queue.
Add properties and output for max and min items in queue.
Replace length property with __len__
Remove reference to histogram folder from get_data method.
Refactor time in queue to use Statistic class.
Change getData so it doesn't need folder from session.
"""
from collections import deque, namedtuple, OrderedDict
import numpy as np
from despy.model.component import Component
from despy.output.report import Datatype
import despy.output.plot as plot
from despy.output.statistic import DiscreteStatistic
from despy.output.statistic import TimeWeightedStatistic
class Queue(Component):
"""A component that represents a real world queue.
A Queue object represents a real-world queue, such as a line of
customers waiting for a server, or a line of products waiting for a
machine.
**Inherited Classes**
* :class:`despy.base.named_object2.NamedObject`
* :class:`despy.model.component.Component`
**Members**
.. autosummary::
Item
length
times_in_queue
add
remove
get_data
"""
def __init__(self, name, max_length = None,
description = None):
"""Create a Queue object.
*Arguments*
``model`` (:class:`despy.model.model.Model`)
The Queue must be assigned to a Model object.
``name`` (String)
A short descriptive name for the Queue object.
``max_length`` (Integer)
If ``None`` (default value), then the Queue length can
grow indefinitely. If set to an integer, the Queue will
be limited to ``max_length``.
``description`` (String)
Optional. Default is None.
"""
super().__init__(name, description = description)
if isinstance(max_length, int):
self._queue = deque(max_length)
else:
self._queue = deque()
self._times_in_queue = []
self.results.stats['Queue_time'] = DiscreteStatistic('w_q', 'u4')
self.results.stats['Queue_length'] = TimeWeightedStatistic('L_q', 'u4')
Item = namedtuple('Item', ['item_fld', 'time_in_fld'])
"""(Class) A named tuple that contains an item added to the queue.
*Attributes*
``item_fld``
An object that is added to the queue.
``time_in_fld``
The time the object was added to the queue.
"""
@property
def length(self):
"""The number of entities in the queue at the current time.
*Type:* Integer, read-only.
"""
return len(self._queue)
@property
def times_in_queue(self):
"""List of times (integers) that entities spent in the queue.
*Type:* List of integers, read-only.
The first element of the list is the time that the first entity
to leave the queue spent in the queue, the second element is for
the second entity to leave the queue, etc.
"""
return self._times_in_queue
def setup(self):
self.results.stats['Queue_length'].append(self.sim.now,
self.length)
#
# def finalize(self):
# self.statistics['Queue_length'].append(self.sim.now,
# self.length)
def teardown(self):
self.clear()
def add(self, item):
"""Add an item to the end of the queue.
*Arguments*
``item``
The item that will be added to the queue.
"""
self._queue.append(Queue.Item(item_fld = item, \
time_in_fld = self.sim.now))
message = "Entering Queue"
fields = OrderedDict()
fields["Length"] = self.length
fields["Entity"] = str(item)
self.sim.results.trace.add_message(message, fields)
def remove(self):
"""Remove an item from the beginning of the queue.
*Arguments*
``item``
The item that will be removed from the queue.
*Returns:* The item that was removed from the queue.
"""
item = self._queue.popleft()
q_time = self.sim.now - item.time_in_fld
self.times_in_queue.append(q_time)
self.results.stats['Queue_time'].append(self.sim.now, q_time)
message = "Leaving Queue"
fields = OrderedDict()
fields["Length"] = self.length
fields["Entity"] = str(item.item_fld)
fields["Time_in_Q"] = q_time
self.sim.results.trace.add_message(message, fields)
return item.item_fld
def clear(self):
self._queue.clear()
def get_data(self, full_path):
"""Creates charts and adds data to final report.
*Arguments*
``folder`` (String)
All charts will be saved to the location denoted by
'folder'.
*Returns:* A despy.model.output.Datatype formatted list
containing data for the final report.
"""
# Create Time in Queue Histogram
qtimes = np.array(self.times_in_queue, np.int32)
qtime_filename = '{0}_time_in_q'.format(self.id)
full_fname = plot.Histogram(self.times_in_queue, full_path,
qtime_filename,
title = self.name,
x_label = "Time in Queue",
y_label = "Frequency")
# Create output
output = [(Datatype.title, "Queue Results: {0}".format(self.name)),
(Datatype.paragraph, self.description.__str__()),
(Datatype.param_list,
[('Maximum Time in Queue', np.amax(qtimes)),
('Minimum Time in Queue', np.amin(qtimes)),
('Mean Time in Queue', np.mean(qtimes))]),
(Datatype.image, full_fname)]
return output | [
"despy.output.statistic.TimeWeightedStatistic",
"numpy.mean",
"collections.OrderedDict",
"collections.namedtuple",
"collections.deque",
"numpy.amin",
"numpy.array",
"despy.output.statistic.DiscreteStatistic",
"despy.output.plot.Histogram",
"numpy.amax"
] | [((2622, 2669), 'collections.namedtuple', 'namedtuple', (['"""Item"""', "['item_fld', 'time_in_fld']"], {}), "('Item', ['item_fld', 'time_in_fld'])\n", (2632, 2669), False, 'from collections import deque, namedtuple, OrderedDict\n'), ((2491, 2521), 'despy.output.statistic.DiscreteStatistic', 'DiscreteStatistic', (['"""w_q"""', '"""u4"""'], {}), "('w_q', 'u4')\n", (2508, 2521), False, 'from despy.output.statistic import DiscreteStatistic\n'), ((2567, 2601), 'despy.output.statistic.TimeWeightedStatistic', 'TimeWeightedStatistic', (['"""L_q"""', '"""u4"""'], {}), "('L_q', 'u4')\n", (2588, 2601), False, 'from despy.output.statistic import TimeWeightedStatistic\n'), ((4266, 4279), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4277, 4279), False, 'from collections import deque, namedtuple, OrderedDict\n'), ((4963, 4976), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4974, 4976), False, 'from collections import deque, namedtuple, OrderedDict\n'), ((5708, 5747), 'numpy.array', 'np.array', (['self.times_in_queue', 'np.int32'], {}), '(self.times_in_queue, np.int32)\n', (5716, 5747), True, 'import numpy as np\n'), ((5826, 5956), 'despy.output.plot.Histogram', 'plot.Histogram', (['self.times_in_queue', 'full_path', 'qtime_filename'], {'title': 'self.name', 'x_label': '"""Time in Queue"""', 'y_label': '"""Frequency"""'}), "(self.times_in_queue, full_path, qtime_filename, title=self.\n name, x_label='Time in Queue', y_label='Frequency')\n", (5840, 5956), True, 'import despy.output.plot as plot\n'), ((2348, 2365), 'collections.deque', 'deque', (['max_length'], {}), '(max_length)\n', (2353, 2365), False, 'from collections import deque, namedtuple, OrderedDict\n'), ((2406, 2413), 'collections.deque', 'deque', ([], {}), '()\n', (2411, 2413), False, 'from collections import deque, namedtuple, OrderedDict\n'), ((6318, 6333), 'numpy.amax', 'np.amax', (['qtimes'], {}), '(qtimes)\n', (6325, 6333), True, 'import numpy as np\n'), ((6383, 6398), 'numpy.amin', 'np.amin', (['qtimes'], {}), '(qtimes)\n', (6390, 6398), True, 'import numpy as np\n'), ((6445, 6460), 'numpy.mean', 'np.mean', (['qtimes'], {}), '(qtimes)\n', (6452, 6460), True, 'import numpy as np\n')] |
import os,pathlib
import numpy as np
from matplotlib import pyplot as plt
from . import dirDATA
class _Dataset(object):
fpath = None # path to data file
def __init__(self):
self.dv = None # dependent variable
self.group = None # group
self._load()
def __repr__(self):
s = f'Dataset: {self.name}\n'
s += f' fpath = {self.fpath}\n'
s += f' shape = {self.shape}\n'
s += f' groups = {self.ug.tolist()}\n'
return s
def _load(self):
a = np.loadtxt( self.fpath, delimiter=',')
self.group = np.asarray(a[:,0], dtype=int)
self.dv = a[:,1:]
@property
def J(self): # number of observations
return self.dv.shape[0]
@property
def Q(self): # number of grid points
return self.dv.shape[1]
@property
def filename(self): # dataset name
return os.path.split( self.fpath )[1]
@property
def name(self): # dataset name
return self.__class__.__name__
@property
def q(self): # grid points (equally spaced over [0,1])
return np.linspace(0, 1, self.Q)
@property
def shape(self): # dependent variable array shape
return self.dv.shape
@property
def ug(self): # unique group labels
return np.unique(self.group)
def get_dv_by_group(self):
return [self.dv[self.group==u] for u in self.ug]
def plot(self, ax=None, colors=('b', 'r')):
ax = plt.gca() if (ax is None) else ax
y0,y1 = self.get_dv_by_group()
ax.plot(self.q, y0.T, color=colors[0], lw=0.3)
ax.plot(self.q, y1.T, color=colors[1], lw=0.3)
h0 = ax.plot(self.q, y0.mean(axis=0), color=colors[0], lw=5)[0]
h1 = ax.plot(self.q, y1.mean(axis=0), color=colors[1], lw=5)[0]
ax.legend([h0,h1], [f'Group {self.ug[0]} mean', f'Group {self.ug[1]} mean'])
ax.set_title( self.name )
class Besier2009VastusForce(_Dataset):
fpath = os.path.join( dirDATA, 'Besier2009-vastus.csv' )
class Dorn2012(_Dataset):
fpath = os.path.join( dirDATA, 'Dorn2021-reduced.npz' )
def _load(self):
with np.load( self.fpath, allow_pickle=True ) as z:
self.group = z['speed']
self.dv = z['y']
class Pataky2014MediolateralCOP(_Dataset):
fpath = os.path.join( dirDATA, 'Pataky2014-mediolateral.csv' )
class SimulatedA(_Dataset):
fpath = os.path.join( dirDATA, 'SimulatedA.csv' )
class SimulatedB(_Dataset):
fpath = os.path.join( dirDATA, 'SimulatedB.csv' )
| [
"numpy.unique",
"matplotlib.pyplot.gca",
"os.path.join",
"numpy.asarray",
"os.path.split",
"numpy.linspace",
"numpy.loadtxt",
"numpy.load"
] | [((1816, 1862), 'os.path.join', 'os.path.join', (['dirDATA', '"""Besier2009-vastus.csv"""'], {}), "(dirDATA, 'Besier2009-vastus.csv')\n", (1828, 1862), False, 'import os, pathlib\n'), ((1902, 1947), 'os.path.join', 'os.path.join', (['dirDATA', '"""Dorn2021-reduced.npz"""'], {}), "(dirDATA, 'Dorn2021-reduced.npz')\n", (1914, 1947), False, 'import os, pathlib\n'), ((2127, 2179), 'os.path.join', 'os.path.join', (['dirDATA', '"""Pataky2014-mediolateral.csv"""'], {}), "(dirDATA, 'Pataky2014-mediolateral.csv')\n", (2139, 2179), False, 'import os, pathlib\n'), ((2220, 2259), 'os.path.join', 'os.path.join', (['dirDATA', '"""SimulatedA.csv"""'], {}), "(dirDATA, 'SimulatedA.csv')\n", (2232, 2259), False, 'import os, pathlib\n'), ((2301, 2340), 'os.path.join', 'os.path.join', (['dirDATA', '"""SimulatedB.csv"""'], {}), "(dirDATA, 'SimulatedB.csv')\n", (2313, 2340), False, 'import os, pathlib\n'), ((515, 552), 'numpy.loadtxt', 'np.loadtxt', (['self.fpath'], {'delimiter': '""","""'}), "(self.fpath, delimiter=',')\n", (525, 552), True, 'import numpy as np\n'), ((569, 599), 'numpy.asarray', 'np.asarray', (['a[:, 0]'], {'dtype': 'int'}), '(a[:, 0], dtype=int)\n', (579, 599), True, 'import numpy as np\n'), ((1024, 1049), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.Q'], {}), '(0, 1, self.Q)\n', (1035, 1049), True, 'import numpy as np\n'), ((1196, 1217), 'numpy.unique', 'np.unique', (['self.group'], {}), '(self.group)\n', (1205, 1217), True, 'import numpy as np\n'), ((837, 862), 'os.path.split', 'os.path.split', (['self.fpath'], {}), '(self.fpath)\n', (850, 862), False, 'import os, pathlib\n'), ((1356, 1365), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1363, 1365), True, 'from matplotlib import pyplot as plt\n'), ((1977, 2015), 'numpy.load', 'np.load', (['self.fpath'], {'allow_pickle': '(True)'}), '(self.fpath, allow_pickle=True)\n', (1984, 2015), True, 'import numpy as np\n')] |
"""
This module implements the Phenotype Phase Plane Analysis.
(Edwards et al. 2001, Characterizing the metabolic phenotype: A phenotype phase plane analysis)
Author: <NAME>
"""
from __future__ import absolute_import
from builtins import object
from .simulation import FBA
from ..solvers import solver_instance
from framed.solvers.solution import Status
import numpy
import matplotlib.pyplot as plt
class PhenotypePhasePlane(object):
def __init__(self, rxn_x, rxn_y, rxn_x_range, rxn_y_range):
self.rxn_x = rxn_x
self.rxn_y = rxn_y
# converting reaction ranges to numpy array and storing it inside self
self.x_range = numpy.array(rxn_x_range)
self.y_range = numpy.array(rxn_y_range)
# find length of reaction ranges
len_x = len(self.x_range)
len_y = len(self.y_range)
# creating empty arrays for storing analysis results
self.f_objective = numpy.zeros((len_x, len_y))
self.shadow_price_x = numpy.zeros((len_x, len_y))
self.shadow_price_y = numpy.zeros((len_x, len_y))
def plot_objective_function(self, new_figure=True, show_plot=True):
"""
new_figure: if set to True, a new matplotlib figure will be created.
show_plot: if set to True, current figure will be shown
"""
if new_figure:
plt.figure()
f = self.f_objective
x = self.x_range
y = self.y_range
if 'EX_' in self.rxn_x:
x = x * -1
if 'EX_' in self.rxn_y:
y = y * -1
plt.pcolormesh(x, y, numpy.transpose(f))
plt.colorbar()
if show_plot:
plt.show()
def plot_shadow_price_x(self, new_figure=True, show_plot=True):
"""
this method plots the shadow price of metabolites that are associated with reaction x
new_figure: if set to True, a new matplotlib figure will be created.
show_plot: if set to True, current figure will be shown
"""
if new_figure:
plt.figure()
sp_x = self.shadow_price_x
x = self.x_range
y = self.y_range
if 'EX_' in self.rxn_x:
x = x * -1
if 'EX_' in self.rxn_y:
y = y * -1
plt.pcolormesh(x, y, numpy.transpose(sp_x))
plt.colorbar()
if show_plot:
plt.show()
def plot_shadow_price_y(self, new_figure=True, show_plot=True):
"""
this method plots the shadow price of metabolites that are associated with reaction x
new_figure: if set to True, a new matplotlib figure will be created.
show_plot: if set to True, current figure will be shown
"""
if new_figure:
plt.figure()
sp_y = self.shadow_price_y
x = self.x_range
y = self.y_range
if 'EX_' in self.rxn_x:
x = x * -1
if 'EX_' in self.rxn_y:
y = y * -1
plt.pcolormesh(x, y, numpy.transpose(sp_y))
plt.colorbar()
if show_plot:
plt.show()
def PhPP(model, rxn_x, rxn_y, rxn_x_range, rxn_y_range, target=None, maximize=True):
"""
Phenotype Phase Plane Analysis
analyze the changes in the objective function and the shadow prices
Arguments:
model (CBModel): the metabolic model
rxn_x (str): reaction to be plotted along x axis. must be of a type convertable to numpy.array
rxn_y (str): reaction to be plotted along y axis. must be of a type convertable to numpy.array
rxn_x_range (list or array): the range of the reaction x
rxn_y_range (list or array): the range of the reaction y
target (str): the reaction id of the optimization target.
if None is included, it will attempt to detect the biomass function
maximize: True or False. the sense of the optimization
Returns:
phaseplane
"""
solver = solver_instance(model)
# find metabolite ids corresponding to reactions x and y
met_x = list(model.reactions[rxn_x].stoichiometry.keys())[0]
met_y = list(model.reactions[rxn_y].stoichiometry.keys())[0]
# create a PhenotypePhasePlane instance for storing results
phase_plane = PhenotypePhasePlane(rxn_x, rxn_y, rxn_x_range, rxn_y_range)
for i, v_x in enumerate(rxn_x_range):
for j, v_y in enumerate(rxn_y_range):
constraints = {rxn_x: v_x, rxn_y: v_y}
solution = FBA(model, constraints=constraints, target=target, maximize=maximize, solver=solver,
get_shadow_prices=True)
if solution.status == Status.OPTIMAL:
phase_plane.f_objective[i, j] = solution.fobj
phase_plane.shadow_price_x[i, j] = solution.shadow_prices[met_x]
phase_plane.shadow_price_y[i, j] = solution.shadow_prices[met_y]
return phase_plane
| [
"matplotlib.pyplot.colorbar",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.transpose",
"matplotlib.pyplot.show"
] | [((661, 685), 'numpy.array', 'numpy.array', (['rxn_x_range'], {}), '(rxn_x_range)\n', (672, 685), False, 'import numpy\n'), ((709, 733), 'numpy.array', 'numpy.array', (['rxn_y_range'], {}), '(rxn_y_range)\n', (720, 733), False, 'import numpy\n'), ((933, 960), 'numpy.zeros', 'numpy.zeros', (['(len_x, len_y)'], {}), '((len_x, len_y))\n', (944, 960), False, 'import numpy\n'), ((991, 1018), 'numpy.zeros', 'numpy.zeros', (['(len_x, len_y)'], {}), '((len_x, len_y))\n', (1002, 1018), False, 'import numpy\n'), ((1049, 1076), 'numpy.zeros', 'numpy.zeros', (['(len_x, len_y)'], {}), '((len_x, len_y))\n', (1060, 1076), False, 'import numpy\n'), ((1613, 1627), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1625, 1627), True, 'import matplotlib.pyplot as plt\n'), ((2308, 2322), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2320, 2322), True, 'import matplotlib.pyplot as plt\n'), ((3003, 3017), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3015, 3017), True, 'import matplotlib.pyplot as plt\n'), ((1351, 1363), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1361, 1363), True, 'import matplotlib.pyplot as plt\n'), ((1585, 1603), 'numpy.transpose', 'numpy.transpose', (['f'], {}), '(f)\n', (1600, 1603), False, 'import numpy\n'), ((1662, 1672), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1670, 1672), True, 'import matplotlib.pyplot as plt\n'), ((2037, 2049), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2047, 2049), True, 'import matplotlib.pyplot as plt\n'), ((2277, 2298), 'numpy.transpose', 'numpy.transpose', (['sp_x'], {}), '(sp_x)\n', (2292, 2298), False, 'import numpy\n'), ((2357, 2367), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2365, 2367), True, 'import matplotlib.pyplot as plt\n'), ((2732, 2744), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2742, 2744), True, 'import matplotlib.pyplot as plt\n'), ((2972, 2993), 'numpy.transpose', 'numpy.transpose', (['sp_y'], {}), '(sp_y)\n', (2987, 2993), False, 'import numpy\n'), ((3052, 3062), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3060, 3062), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
def flatten(l):
return list(_flatten_(l))
def _flatten_(*args):
for x in args:
if hasattr(x, '__iter__'):
for y in _flatten_(*x):
yield y
else:
yield x
def check_valid_data(data):
d = flatten(data)
return not np.isnan(d).any()
def clamp(n, minn, maxn):
return max(min(maxn, n), minn)
| [
"numpy.isnan"
] | [((306, 317), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (314, 317), True, 'import numpy as np\n')] |
# Copyright 2022 Cisco Systems, Inc. and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
"""utils for adult dataset."""
import numpy as np
import pandas as pd
import torch
from sklearn.preprocessing import StandardScaler
class MyAdultDataset(torch.utils.data.Dataset):
"""MyDataset class."""
def __init__(self, X: np.array, y=None, features=None):
"""
Initialize.
Parameters:
X: numpy array
y: optional target column
features: optional feature list
"""
self.X = X
self.y = y
self.features = features
def __len__(self):
"""Return the length of dataset."""
return self.X.shape[0]
def __getitem__(self, idx):
"""Get item."""
if not self.features:
if self.y is not None:
return self.X[idx, :], self.y[idx]
else:
return self.X[idx, :]
X = []
def onehot(x, n):
res = [0] * n
res[int(x)] = 1
return res
for i, f in enumerate(self.features):
if f.categorical:
X.extend(onehot(self.X[idx, i], len(f.values)))
else:
X.append(self.X[idx, i])
if self.y is not None:
return np.array(X, dtype="float32"), self.y[idx].astype("float32")
else:
return np.array(X, dtype="float32")
class Feature:
"""Feature class."""
def __init__(self,
name,
dtype,
description,
categorical=False,
values=None) -> None:
"""Initialize."""
self.name = name
self.dtype = dtype
self.description = description
self.categorical = categorical
self.values = values
def __repr__(self) -> str:
"""Represent."""
return f"{self.name}:{self.dtype}"
def clean_dataframe(df: pd.DataFrame, clear_nans=True, extra_symbols="?"):
"""Clean dataframe by removing NaNs."""
if not clear_nans:
return
for i in df:
df[i].replace('nan', np.nan, inplace=True)
for s in extra_symbols:
df[i].replace(s, np.nan, inplace=True)
df.dropna(inplace=True)
def process_dataframe(df: pd.DataFrame,
target_column=None,
normalize="Scalar"):
"""Process dataframe and return numpy datasets and feature info."""
if normalize and normalize == "Scalar":
num_d = df.select_dtypes(exclude=['object', 'category'])
df[num_d.columns] = StandardScaler().fit_transform(num_d)
y = None
if target_column:
y = df.pop(target_column)
if y.dtype in ("object", "category"):
y = y.factorize(sort=True)[0]
features = []
for c in df:
if df.dtypes[c] == "object":
fact = df[c].factorize(sort=True)
df[c] = fact[0]
values = {i: v for i, v in enumerate(fact[1])}
f = Feature(c, "integer", c, categorical=True, values=values)
else:
f = Feature(c, "float32", c)
features.append(f)
X = df.to_numpy().astype('float32')
return X, y, features
| [
"sklearn.preprocessing.StandardScaler",
"numpy.array"
] | [((1953, 1981), 'numpy.array', 'np.array', (['X'], {'dtype': '"""float32"""'}), "(X, dtype='float32')\n", (1961, 1981), True, 'import numpy as np\n'), ((1860, 1888), 'numpy.array', 'np.array', (['X'], {'dtype': '"""float32"""'}), "(X, dtype='float32')\n", (1868, 1888), True, 'import numpy as np\n'), ((3162, 3178), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3176, 3178), False, 'from sklearn.preprocessing import StandardScaler\n')] |
from numba import njit
import numpy as np
def det1(A):
"""Compute the determinants of a series of 1x1 matrices.
Parameters
----------
A : nx1x1 array_like
Arrays to compute determinants of
Returns
-------
dets : array of length n
Matrix determinants
"""
return A.flatten()
@njit(cache=True)
def det2(A):
"""Compute the determinants of a series of 2x2 matrices.
Parameters
----------
A : nx2x2 array_like
Arrays to compute determinants of
Returns
-------
dets : array of length n
Matrix determinants
"""
n = np.shape(A)[0]
dets = np.zeros(n)
for i in range(n):
dets[i] = A[i, 0, 0] * A[i, 1, 1] - A[i, 0, 1] * A[i, 1, 0]
return dets
@njit(cache=True)
def det3(A):
"""Compute the determinants of a series of 3x3 matrices.
Parameters
----------
A : nx3x3 array_like
Arrays to compute determinants of
Returns
-------
dets : array of length n
Matrix determinants
"""
n = np.shape(A)[0]
dets = np.zeros(n)
for i in range(n):
dets[i] = (
A[i, 0, 0] * (A[i, 1, 1] * A[i, 2, 2] - A[i, 1, 2] * A[i, 2, 1])
- A[i, 0, 1] * (A[i, 1, 0] * A[i, 2, 2] - A[i, 1, 2] * A[i, 2, 0])
+ A[i, 0, 2] * (A[i, 1, 0] * A[i, 2, 1] - A[i, 1, 1] * A[i, 2, 0])
)
return dets
def inv1(A):
"""Compute the inverses of a series of 1x1 matrices.
Parameters
----------
A : nx1x1 array_like
Arrays to compute inverses of
Returns
-------
dets : nx1x1 array
Matrix inverses
"""
return 1.0 / A
@njit(cache=True)
def inv2(A):
"""Compute the inverses of a series of 2x2 matrices.
Parameters
----------
A : nx2x2 array_like
Arrays to compute inverses of
Returns
-------
dets : nx2x2 array
Matrix inverses
"""
invdets = 1.0 / det2(A)
n = len(invdets)
invs = np.zeros((n, 2, 2))
for i in range(n):
invs[i, 0, 0] = invdets[i] * A[i, 1, 1]
invs[i, 1, 1] = invdets[i] * A[i, 0, 0]
invs[i, 0, 1] = -invdets[i] * A[i, 0, 1]
invs[i, 1, 0] = -invdets[i] * A[i, 1, 0]
return invs
@njit(cache=True)
def inv3(A):
"""Compute the inverses of a series of 3x3 matrices.
Parameters
----------
A : nx3x3 array_like
Arrays to compute inverses of
Returns
-------
dets : nx3x3 array
Matrix inverses
"""
invdets = 1.0 / det3(A)
n = len(invdets)
invs = np.zeros((n, 3, 3))
for i in range(n):
invs[i, 0, 0] = invdets[i] * (A[i, 1, 1] * A[i, 2, 2] - A[i, 1, 2] * A[i, 2, 1])
invs[i, 0, 1] = -invdets[i] * (A[i, 0, 1] * A[i, 2, 2] - A[i, 0, 2] * A[i, 2, 1])
invs[i, 0, 2] = invdets[i] * (A[i, 0, 1] * A[i, 1, 2] - A[i, 0, 2] * A[i, 1, 1])
invs[i, 1, 0] = -invdets[i] * (A[i, 1, 0] * A[i, 2, 2] - A[i, 1, 2] * A[i, 2, 0])
invs[i, 1, 1] = invdets[i] * (A[i, 0, 0] * A[i, 2, 2] - A[i, 0, 2] * A[i, 2, 0])
invs[i, 1, 2] = -invdets[i] * (A[i, 0, 0] * A[i, 1, 2] - A[i, 0, 2] * A[i, 1, 0])
invs[i, 2, 0] = invdets[i] * (A[i, 1, 0] * A[i, 2, 1] - A[i, 1, 1] * A[i, 2, 0])
invs[i, 2, 1] = -invdets[i] * (A[i, 0, 0] * A[i, 2, 1] - A[i, 0, 1] * A[i, 2, 0])
invs[i, 2, 2] = invdets[i] * (A[i, 0, 0] * A[i, 1, 1] - A[i, 0, 1] * A[i, 1, 0])
return invs
if __name__ == "__main__":
np.random.seed(0)
A2 = np.random.rand(1000, 2, 2)
A3 = np.random.rand(1000, 3, 3)
dets2 = det2(A2)
dets3 = det3(A3)
invs2 = inv2(A2)
invs3 = inv3(A3)
# Check error between this implementation and numpy, use hybrid error measure (f_ref - f_test)/(f_ref + 1) which
# measures the relative error for large numbers and absolute error for small numbers
errors = {}
errors["det2"] = np.linalg.norm((dets2 - np.linalg.det(A2)) / (dets2 + 1.0))
errors["det3"] = np.linalg.norm((dets3 - np.linalg.det(A3)) / (dets3 + 1.0))
errors["inv2"] = np.linalg.norm((invs2 - np.linalg.inv(A2)) / (invs2 + 1.0))
errors["inv3"] = np.linalg.norm((invs3 - np.linalg.inv(A3)) / (invs3 + 1.0))
for name, error in errors.items():
print(f"Error norm in {name} = {error:.03e}")
| [
"numpy.random.rand",
"numba.njit",
"numpy.linalg.det",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.random.seed",
"numpy.shape"
] | [((332, 348), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (336, 348), False, 'from numba import njit\n'), ((767, 783), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (771, 783), False, 'from numba import njit\n'), ((1664, 1680), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (1668, 1680), False, 'from numba import njit\n'), ((2241, 2257), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (2245, 2257), False, 'from numba import njit\n'), ((645, 656), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (653, 656), True, 'import numpy as np\n'), ((1080, 1091), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1088, 1091), True, 'import numpy as np\n'), ((1985, 2004), 'numpy.zeros', 'np.zeros', (['(n, 2, 2)'], {}), '((n, 2, 2))\n', (1993, 2004), True, 'import numpy as np\n'), ((2562, 2581), 'numpy.zeros', 'np.zeros', (['(n, 3, 3)'], {}), '((n, 3, 3))\n', (2570, 2581), True, 'import numpy as np\n'), ((3459, 3476), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3473, 3476), True, 'import numpy as np\n'), ((3486, 3512), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(2)', '(2)'], {}), '(1000, 2, 2)\n', (3500, 3512), True, 'import numpy as np\n'), ((3522, 3548), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)', '(3)'], {}), '(1000, 3, 3)\n', (3536, 3548), True, 'import numpy as np\n'), ((619, 630), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (627, 630), True, 'import numpy as np\n'), ((1054, 1065), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (1062, 1065), True, 'import numpy as np\n'), ((3900, 3917), 'numpy.linalg.det', 'np.linalg.det', (['A2'], {}), '(A2)\n', (3913, 3917), True, 'import numpy as np\n'), ((3981, 3998), 'numpy.linalg.det', 'np.linalg.det', (['A3'], {}), '(A3)\n', (3994, 3998), True, 'import numpy as np\n'), ((4062, 4079), 'numpy.linalg.inv', 'np.linalg.inv', (['A2'], {}), '(A2)\n', (4075, 4079), True, 'import numpy as np\n'), ((4143, 4160), 'numpy.linalg.inv', 'np.linalg.inv', (['A3'], {}), '(A3)\n', (4156, 4160), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as si
plt.style.use('seaborn-whitegrid')
def f(x) :
s = 1/(1+25*np.power(x,2))
return s
def spline(n) :
x_nodes=np.linspace(-1,1,n)
y_val=f(x_nodes)
x=np.linspace(-1,1,100)
y = np.interp(x,x_nodes, y_val)
return x_nodes,y_val,x,y
#------------------ Main Programme ----------------------#
n=10
x_nodes,y_val,x,y = spline(n)
plt.plot(x,f(x),'gold')
plt.plot(x_nodes,y_val,'*')
plt.plot(x,y,'yellow')
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(-1,1)
plt.legend(['f(x)','Points',' spline'])
plt.show()
| [
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"numpy.linspace",
"numpy.interp",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((85, 119), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-whitegrid"""'], {}), "('seaborn-whitegrid')\n", (98, 119), True, 'import matplotlib.pyplot as plt\n'), ((521, 550), 'matplotlib.pyplot.plot', 'plt.plot', (['x_nodes', 'y_val', '"""*"""'], {}), "(x_nodes, y_val, '*')\n", (529, 550), True, 'import matplotlib.pyplot as plt\n'), ((550, 574), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""yellow"""'], {}), "(x, y, 'yellow')\n", (558, 574), True, 'import matplotlib.pyplot as plt\n'), ((574, 589), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (584, 589), True, 'import matplotlib.pyplot as plt\n'), ((591, 606), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (601, 606), True, 'import matplotlib.pyplot as plt\n'), ((608, 623), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (616, 623), True, 'import matplotlib.pyplot as plt\n'), ((624, 665), 'matplotlib.pyplot.legend', 'plt.legend', (["['f(x)', 'Points', ' spline']"], {}), "(['f(x)', 'Points', ' spline'])\n", (634, 665), True, 'import matplotlib.pyplot as plt\n'), ((665, 675), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (673, 675), True, 'import matplotlib.pyplot as plt\n'), ((234, 255), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'n'], {}), '(-1, 1, n)\n', (245, 255), True, 'import numpy as np\n'), ((283, 306), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (294, 306), True, 'import numpy as np\n'), ((314, 342), 'numpy.interp', 'np.interp', (['x', 'x_nodes', 'y_val'], {}), '(x, x_nodes, y_val)\n', (323, 342), True, 'import numpy as np\n'), ((159, 173), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (167, 173), True, 'import numpy as np\n')] |
import numpy as np
from typing import Callable, Iterable
def custom_scheduler(
max_steps: int,
update_fn: Callable[[int], float]) -> float:
"""
Create a custom generator for an input param
"""
for step in range(max_steps):
yield update_fn(step)
def get_custom_exp(
max_steps: int,
start_val: float,
end_val: float) -> Iterable:
"""
Create a custom exponential scheduler
"""
assert isinstance(max_steps, int) and max_steps >= 1
N0 = start_val
N1 = np.log(start_val/end_val)/(max_steps-1)
update_fn = lambda x: N0 * np.exp(N1 * x)
return custom_scheduler(max_steps, update_fn)
def get_custom_linear(
max_steps: int,
start_val: float,
end_val: float) -> Iterable:
"""
Create a custom linear scheduler
"""
assert isinstance(max_steps, int) and max_steps >= 1
N1 = (end_val-start_val)/(max_steps-1)
update_fn = lambda x: N1 * x + start_val
return custom_scheduler(max_steps, update_fn)
| [
"numpy.exp",
"numpy.log"
] | [((539, 566), 'numpy.log', 'np.log', (['(start_val / end_val)'], {}), '(start_val / end_val)\n', (545, 566), True, 'import numpy as np\n'), ((610, 624), 'numpy.exp', 'np.exp', (['(N1 * x)'], {}), '(N1 * x)\n', (616, 624), True, 'import numpy as np\n')] |
import bpy, bmesh
import socket
from struct import unpack
import numpy
from .common import PROTOCOL_VERSION, send_protobuf, receive_protobuf, receive_buffer, receive_into_numpy_array
from .connection import Connection
from .messages_pb2 import ClientMessage, HelloResult, QueryBoundResult, ServerStateResult
# XXX if this operator gets called during rendering, then what? :)
class OSPRayUpdateMeshBound(bpy.types.Operator):
"""Update bounding geometry with bound provided by plugin"""
bl_idname = "ospray.update_mesh_bound"
bl_label = "Update bounding mesh from server"
bl_options = {'REGISTER'}#, 'UNDO'} # Enable undo for the operator?
def execute(self, context):
obj = context.active_object
assert obj.type == 'MESH'
mesh = obj.data
if obj.mode == 'EDIT':
self.report({'ERROR'}, 'Mesh should be in object mode')
return {'CANCELLED'}
scene = context.scene
ospray = scene.ospray
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.connect((ospray.host, ospray.port))
# Handshake
client_message = ClientMessage()
client_message.type = ClientMessage.HELLO
client_message.uint_value = PROTOCOL_VERSION
send_protobuf(sock, client_message)
result = HelloResult()
receive_protobuf(sock, result)
if not result.success:
print('ERROR: Handshake with server:')
print(result.message)
self.report({'ERROR'}, 'Handshake with server failed: %s' % result.message)
return {'CANCELLED'}
# Volume data (i.e. mesh)
print('Getting extent for mesh %s (ospray volume)' % mesh.name)
# Send request
client_message = ClientMessage()
client_message.type = ClientMessage.QUERY_BOUND
client_message.string_value = mesh.name
send_protobuf(sock, client_message)
# Get result
result = QueryBoundResult()
receive_protobuf(sock, result)
if not result.success:
print('ERROR: extent query failed:')
print(result.message)
self.report({'ERROR'}, 'Query failed: %s' % result.message)
return {'CANCELLED'}
# Receive actual geometry
# Lengths are for the complete vector, not the number of higher
# level elements
vertices_len, edges_len, faces_len, loop_len = unpack('<IIII', receive_buffer(sock, 4*4))
vertices = numpy.empty(vertices_len, dtype=numpy.float32)
edges = numpy.empty(edges_len, dtype=numpy.uint32)
faces = numpy.empty(faces_len, dtype=numpy.uint32)
loop_start = numpy.empty(loop_len, dtype=numpy.uint32)
loop_total = numpy.empty(loop_len, dtype=numpy.uint32)
print('Mesh bound: %d v, %d e, %d f, %d l' % (vertices_len, edges_len, faces_len, loop_len))
receive_into_numpy_array(sock, vertices, vertices_len*4)
receive_into_numpy_array(sock, edges, edges_len*4)
receive_into_numpy_array(sock, faces, faces_len*4)
receive_into_numpy_array(sock, loop_start, loop_len*4)
receive_into_numpy_array(sock, loop_total, loop_len*4)
#print(vertices)
#print(edges)
#print(faces)
#print(loop_start)
#print(loop_total)
# Bye
client_message.type = ClientMessage.BYE
send_protobuf(sock, client_message)
sock.close()
# XXX use new mesh replace from 2.81 when it becomes available
bm = bmesh.new()
verts = []
for x, y, z in vertices.reshape((-1,3)):
verts.append(bm.verts.new((x, y, z)))
for i, j in edges.reshape((-1,2)):
bm.edges.new((verts[i], verts[j]))
for start, total in zip(loop_start, loop_total):
vv = []
for i in range(total):
vi = faces[start+i]
vv.append(verts[vi])
bm.faces.new(vv)
bm.to_mesh(mesh)
mesh.update()
return {'FINISHED'}
class OSPRayGetServerState(bpy.types.Operator):
"""Retrieve server state and store in text editor block"""
bl_idname = "ospray.get_server_state"
bl_label = "Get server state"
bl_options = {'REGISTER'}
def execute(self, context):
scene = context.scene
ospray = scene.ospray
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.connect((ospray.host, ospray.port))
# Handshake
client_message = ClientMessage()
client_message.type = ClientMessage.HELLO
client_message.uint_value = PROTOCOL_VERSION
send_protobuf(sock, client_message)
result = HelloResult()
receive_protobuf(sock, result)
if not result.success:
print('ERROR: Handshake with server:')
print(result.message)
self.report({'ERROR'}, 'Handshake with server failed: %s' % result.message)
return {'CANCELLED'}
# Send request
print('Getting server state')
client_message = ClientMessage()
client_message.type = ClientMessage.GET_SERVER_STATE
send_protobuf(sock, client_message)
# Get result
result = ServerStateResult()
receive_protobuf(sock, result)
# Bye
client_message.type = ClientMessage.BYE
send_protobuf(sock, client_message)
sock.close()
# Set in text
text = bpy.data.texts.new('BLOSPRAY server report')
text.write(result.state)
text.current_line_index = 0
return {'FINISHED'}
classes = (
OSPRayUpdateMeshBound,
OSPRayGetServerState
)
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
def unregister():
from bpy.utils import unregister_class
for cls in classes:
unregister_class(cls)
| [
"bpy.utils.unregister_class",
"socket.socket",
"bmesh.new",
"numpy.empty",
"bpy.data.texts.new",
"bpy.utils.register_class"
] | [((1033, 1085), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM', '(0)'], {}), '(socket.AF_INET, socket.SOCK_STREAM, 0)\n', (1046, 1085), False, 'import socket\n'), ((2583, 2629), 'numpy.empty', 'numpy.empty', (['vertices_len'], {'dtype': 'numpy.float32'}), '(vertices_len, dtype=numpy.float32)\n', (2594, 2629), False, 'import numpy\n'), ((2646, 2688), 'numpy.empty', 'numpy.empty', (['edges_len'], {'dtype': 'numpy.uint32'}), '(edges_len, dtype=numpy.uint32)\n', (2657, 2688), False, 'import numpy\n'), ((2705, 2747), 'numpy.empty', 'numpy.empty', (['faces_len'], {'dtype': 'numpy.uint32'}), '(faces_len, dtype=numpy.uint32)\n', (2716, 2747), False, 'import numpy\n'), ((2769, 2810), 'numpy.empty', 'numpy.empty', (['loop_len'], {'dtype': 'numpy.uint32'}), '(loop_len, dtype=numpy.uint32)\n', (2780, 2810), False, 'import numpy\n'), ((2832, 2873), 'numpy.empty', 'numpy.empty', (['loop_len'], {'dtype': 'numpy.uint32'}), '(loop_len, dtype=numpy.uint32)\n', (2843, 2873), False, 'import numpy\n'), ((3656, 3667), 'bmesh.new', 'bmesh.new', ([], {}), '()\n', (3665, 3667), False, 'import bpy, bmesh\n'), ((4589, 4641), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM', '(0)'], {}), '(socket.AF_INET, socket.SOCK_STREAM, 0)\n', (4602, 4641), False, 'import socket\n'), ((5713, 5757), 'bpy.data.texts.new', 'bpy.data.texts.new', (['"""BLOSPRAY server report"""'], {}), "('BLOSPRAY server report')\n", (5731, 5757), False, 'import bpy, bmesh\n'), ((6016, 6035), 'bpy.utils.register_class', 'register_class', (['cls'], {}), '(cls)\n', (6030, 6035), False, 'from bpy.utils import register_class\n'), ((6131, 6152), 'bpy.utils.unregister_class', 'unregister_class', (['cls'], {}), '(cls)\n', (6147, 6152), False, 'from bpy.utils import unregister_class\n')] |
from src.modules.Generator import Generator
from src.modules.Process import Process
from src.modules.Router import Router
from src.modules.Terminator import Terminator
import csv
import json
import numpy as np
class Model(object):
def __init__(self):
self.__components = {}
self.__currentTime = 0
self.__simulationFile = 'simulation.txt'
self.__reportsFile = 'reports.json'
# Columns for saving and loading
self.__columns = [
# Component attibutes
'type', # G=Generator, P=Process, R=Router, and T=Terminator
'name', 'target',
# Random attributes
'min_range', 'max_range', 'distribution',
# Generator attributes
'max_entities', 'entity_name',
# Process attributes
'num_resources', 'resource_name', 'discipline'
]
# Simulation log methods
def __createLog(self):
with open(self.__simulationFile, 'w'):
pass
def __printLog(self, message):
with open(self.__simulationFile, 'a') as f:
f.write(message+'\n')
# Component creation methods
def __nameInUse(self, name):
return name in self.__components.keys()
def createGenerator(self, name, *args, **kwargs):
if self.__nameInUse(name):
raise ValueError('Component name "' + name + '" is already in use!')
self.__components[name] = Generator(name, *args, **kwargs)
self.__components[name].printLog = self.__printLog
def createProcess(self, name, *args, **kwargs):
if self.__nameInUse(name):
raise ValueError('Component name "' + name + '" is already in use!')
self.__components[name] = Process(name, *args, **kwargs)
self.__components[name].printLog = self.__printLog
def createRouter(self, name, *args, **kwargs):
if self.__nameInUse(name):
raise ValueError('Component name "' + name + '" is already in use!')
self.__components[name] = Router(name, *args, **kwargs)
self.__components[name].printLog = self.__printLog
def createTerminator(self, name, *args, **kwargs):
if self.__nameInUse(name):
raise ValueError('Component name "' + name + '" is already in use!')
self.__components[name] = Terminator(name, *args, **kwargs)
self.__components[name].printLog = self.__printLog
# Component methods
def __getComponentsByType(self, component_type):
for name, component in self.__components.items():
if isinstance(component, component_type):
yield component
def __routeEntity(self, origin, entity):
target = self.__components[origin.target]
if isinstance(target, (Process, Terminator)):
target.receiveEntity(entity)
elif isinstance(target, Router):
self.__routeEntity(target, entity)
def __runGenerators(self):
for generator in self.__getComponentsByType(Generator):
entity = generator.generateEntity(self.__currentTime)
if entity:
self.__routeEntity(generator, entity)
def __runProcesses(self):
for process in self.__getComponentsByType(Process):
entities = process.outputEntities(self.__currentTime)
for entity in entities:
self.__routeEntity(process, entity)
process.process(self.__currentTime)
def __resetComponents(self):
for name, component in self.__components.items():
component.reset()
# Running
@property
def running(self):
for name, component in self.__components.items():
if (isinstance(component, Generator) and component.remainingEntities > 0) or (isinstance(component, Process) and component.processing > 0):
return True
return False
def run(self, random_state=None):
self.__currentTime = 0
self.__resetComponents()
np.random.seed(random_state)
self.__createLog()
self.__printLog('Model: simulation started')
while self.running:
self.__runGenerators()
self.__runProcesses()
self.__currentTime += 1
self.__printLog('Model: simulation ended')
self.__createReports()
# Model saving and loading
def save(self, csv_name):
with open(csv_name, mode='w') as opened_file:
writer = csv.DictWriter(opened_file, fieldnames=self.__columns)
writer.writeheader()
for name, component in self.__components.items():
component.saveMe(writer, self.__columns)
def load(csv_name):
model = Model()
with open(csv_name, mode='r') as opened_file:
reader = csv.DictReader(opened_file)
for row in reader:
if row['type'] == 'G' and isinstance(model, Model):
model.createGenerator(
name=row['name'],
target=row['target'],
min_range=int(row['min_range']),
max_range=int(row['max_range']),
distribution=row['distribution'],
max_entities=int(row['max_entities']),
entity_name=row['entity_name']
)
elif row['type'] == 'P' and isinstance(model, Model):
model.createProcess(
name=row['name'],
target=row['target'],
min_range=int(row['min_range']),
max_range=int(row['max_range']),
distribution=row['distribution'],
num_resources=int(row['num_resources']) if row['num_resources'] else None,
resource_name=row['resource_name'],
discipline=row['discipline']
)
elif row['type'] == 'R' and isinstance(model, Model):
model.createRouter(
name=row['name'],
targets=row['target'].split('$$'),
distribution=row['distribution']
)
elif row['type'] == 'T' and isinstance(model, Model):
model.createTerminator(
name=row['name']
)
return model
# Reports
def __createReports(self):
reports = []
for name, component in self.__components.items():
if isinstance(component, Process):
idle_time = component.reportIdleTime(self.__currentTime)
waiting_time = component.reportWaitingTime()
waiting_count = component.reportWaitingCount()
durations = component.reportDurationTime()
reports.append({
'name': component.name,
'resourceIdleTime': idle_time if len(idle_time) > 0 else None,
'minIdleTime': min(idle_time) if len(idle_time) > 0 else None,
'meanIdleTime': sum(idle_time)/len(idle_time) if len(idle_time) > 0 else None,
'maxIdleTime': max(idle_time) if len(idle_time) > 0 else None,
'minDurationTime': min(durations) if len(durations) > 0 else None,
'meanDurationTime': sum(durations)/len(durations) if len(durations) > 0 else None,
'maxDurationTime': max(durations) if len(durations) > 0 else None,
'immediateProcessing': component.reportImmediateProcessing(),
'minWaitingTime': min(waiting_time) if len(waiting_time) > 0 else None,
'meanWaitingTime': sum(waiting_time)/len(waiting_time) if len(waiting_time) > 0 else None,
'maxWaitingTime': max(waiting_time) if len(waiting_time) > 0 else None,
'minWaitingCount': min(waiting_count) if len(waiting_count) > 0 else None,
'meanWaitingCount': sum(waiting_count)/len(waiting_count) if len(waiting_count) > 0 else None,
'maxWaitingCount': max(waiting_count) if len(waiting_count) > 0 else None,
})
reports_json = json.dumps(reports, indent=2)
# print(reports_json)
with open(self.__reportsFile, 'w') as f:
f.write(reports_json) | [
"csv.DictWriter",
"csv.DictReader",
"src.modules.Generator.Generator",
"src.modules.Terminator.Terminator",
"json.dumps",
"src.modules.Router.Router",
"numpy.random.seed",
"src.modules.Process.Process"
] | [((1461, 1493), 'src.modules.Generator.Generator', 'Generator', (['name', '*args'], {}), '(name, *args, **kwargs)\n', (1470, 1493), False, 'from src.modules.Generator import Generator\n'), ((1760, 1790), 'src.modules.Process.Process', 'Process', (['name', '*args'], {}), '(name, *args, **kwargs)\n', (1767, 1790), False, 'from src.modules.Process import Process\n'), ((2056, 2085), 'src.modules.Router.Router', 'Router', (['name', '*args'], {}), '(name, *args, **kwargs)\n', (2062, 2085), False, 'from src.modules.Router import Router\n'), ((2355, 2388), 'src.modules.Terminator.Terminator', 'Terminator', (['name', '*args'], {}), '(name, *args, **kwargs)\n', (2365, 2388), False, 'from src.modules.Terminator import Terminator\n'), ((4065, 4093), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (4079, 4093), True, 'import numpy as np\n'), ((8412, 8441), 'json.dumps', 'json.dumps', (['reports'], {'indent': '(2)'}), '(reports, indent=2)\n', (8422, 8441), False, 'import json\n'), ((4533, 4587), 'csv.DictWriter', 'csv.DictWriter', (['opened_file'], {'fieldnames': 'self.__columns'}), '(opened_file, fieldnames=self.__columns)\n', (4547, 4587), False, 'import csv\n'), ((4877, 4904), 'csv.DictReader', 'csv.DictReader', (['opened_file'], {}), '(opened_file)\n', (4891, 4904), False, 'import csv\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Filename: self_training_validation.py
# @Author: <NAME>
# @Time: 23/1/22 17:09
import csv
import logging
import os
import sys
import time
from math import floor
from os import walk
import numpy as np
import yagmail
from sklearn.metrics import f1_score, mean_squared_error, accuracy_score
from sklearn.model_selection import StratifiedKFold
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.svm import SVC
from sklearn.utils import Bunch
threshold = 0.75
k = 3
time_str = time.strftime("%Y%m%d-%H%M%S")
file_name = 'hyp_self_training'
log_file = os.path.join('..', 'logs', '_'.join([file_name, time_str]) + '.log')
csv_path = os.path.join('tests', file_name + '_' + time_str
+ '.csv')
logging.basicConfig(level=logging.DEBUG,
format=' %(asctime)s :: %(levelname)-8s :: %(message)s',
handlers=[logging.FileHandler(log_file),
logging.StreamHandler(sys.stdout)]
)
current = os.path.dirname(os.path.realpath(__file__))
parent = os.path.dirname(current)
sys.path.append(parent)
from utils.arff2dataset import arff_data
from instance_selection.ENN import ENN
from instance_selection.ENN_self_training \
import ENN_self_training
def working_datasets(folder):
if os.path.isdir(folder):
logging.info(f'Looking up for datasets in {folder}')
else:
logging.error(f'{folder} does not exist')
datasets_found = next(walk(folder), (None, None, []))[2]
datasets_found.sort()
logging.info(f'Founded {len(datasets_found)} - {datasets_found}')
header = [
'dataset',
'percent labeled',
'fold',
'f1-score SVC',
'mean squared error SVC',
'accuracy score SVC',
'f1-score before',
'mean squared error before',
'accuracy score before',
'f1-score after with deletion',
'mean squared error after with deletion',
'accuracy score after with deletion',
'f1-score after without deletion',
'mean squared error after without deletion',
'accuracy score after without deletion',
'initial samples',
'samples after self-training',
'samples after filtering with deletion',
'samples after filtering without deletion'
]
with open(csv_path, 'w') as save:
w = csv.writer(save)
w.writerow(header)
save.close()
datasets = dict.fromkeys(datasets_found)
for dataset in datasets_found:
bunch = arff_data(os.path.join(folder, dataset))
datasets[dataset] = tuple([bunch['data'], bunch['target']])
logging.debug('Datasets ready to be used')
return datasets
def training_model(x_train, y_train, x_test, y_test, csv_output, pre):
logging.debug('\t\tCreating model')
svc = SVC(probability=True, gamma="auto")
model = SelfTrainingClassifier(svc, threshold=threshold)
logging.debug('\t\tFitting model')
try:
model.fit(x_train, y_train)
fit_ok = True
except ValueError:
fit_ok = False
logging.exception('Error while fitting')
if fit_ok:
logging.debug('\t\tPredicting')
y_pred = model.predict(x_test)
y_proba = model.predict_proba(x_test)
f1 = f1_score(y_true=y_test, y_pred=y_pred, average="weighted")
mse = mean_squared_error(y_true=y_test, y_pred=y_pred)
acc = accuracy_score(y_true=y_test, y_pred=y_pred)
logging.info(
f'\t{"pre" if pre else "post"} f1 {f1:.2f} - mse {mse:.2f} - '
f'acc {acc:.2f}')
else:
f1 = mse = acc = ''
y_proba = None
csv_output += f1, mse, acc
return y_proba, fit_ok, csv_output
def self_training_hypothesis(datasets):
logging.info('Starting hypothesis testing')
skf = StratifiedKFold(n_splits=10, random_state=42, shuffle=True)
for dataset, (X, y) in datasets.items():
logging.info(f'Current dataset: {dataset} - Total samples: {len(X)}')
if len(X) != len(set([tuple(i) for i in X])):
logging.warning('\tThe dataset contains repeated samples')
for precision in precisions:
for fold, (train_index, test_index) in enumerate(skf.split(X, y)):
t_start = time.time()
csv_output = [dataset, precision, fold]
logging.info(f'\tprecision {precision} - iter {fold + 1}')
x_train, x_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
unlabeled_indexes = np.random.choice(len(x_train), floor(len(
x_train) * (1 - precision)), replace=False)
labeled_indexes = [i for i in [*range(len(x_train))] if i
not in unlabeled_indexes]
samples_before = len(labeled_indexes)
y_modified = np.copy(y_train)
x_labeled = x_train[labeled_indexes]
y_labeled = y_modified[labeled_indexes]
y_modified[unlabeled_indexes] = -1
# SVC
logging.debug('\t\tStarting SVC')
svc = SVC(probability=True, gamma="auto")
try:
svc.fit(x_labeled, y_labeled)
y_pred_svc = svc.predict(x_test)
svc_f1 = f1_score(y_true=y_test, y_pred=y_pred_svc,
average="weighted")
svc_mse = mean_squared_error(y_true=y_test,
y_pred=y_pred_svc)
svc_acc = accuracy_score(y_true=y_test, y_pred=y_pred_svc)
logging.info(
f'\t{"svc"} f1 {svc_f1:.2f} - mse {svc_mse:.2f} - '
f'acc {svc_acc:.2f}')
logging.debug('\t\tSVC - done')
except ValueError:
logging.exception('SVC failed.')
svc_f1 = svc_mse = svc_acc = ''
csv_output += svc_f1, svc_mse, svc_acc
# Semi Supervised
logging.debug(f'\t\tSamples before: {samples_before}')
y_proba, fit_ok, csv_output = training_model(
x_train, y_modified, x_test, y_test, csv_output, True)
if not fit_ok:
logging.warning(f'Fold {fold} failed with this precision.')
csv_output += ['', '', '', '', '', '',
samples_before, '', '', '']
with open(csv_path, 'a') as save:
w = csv.writer(save)
w.writerow(csv_output)
save.close()
continue
else:
logging.debug('\t\tBefore - done')
samples_after_sl = samples_before
x_labeled_before = np.copy(x_labeled)
y_labeled_before = np.copy(y_labeled)
for index0, y_p in enumerate(y_proba):
for index1, y_p1 in enumerate(y_p):
if y_p1 >= threshold:
samples_after_sl += 1
y_labeled = np.concatenate((y_labeled, [index1]))
x_labeled = np.concatenate((x_labeled, [x_train[
index0]]
))
break
logging.debug(f'\t\tSamples after SL: {samples_after_sl}')
try:
assert len(x_labeled) == len(y_labeled)
except AssertionError:
logging.exception(f'len(x_labeled) != len(y_labeled) -'
f' {len(x_labeled)} != {len(y_labeled)}')
exit(1)
logging.debug('\t\tFiltering with deletion')
try:
dataset_filtered_deleting = ENN(Bunch(data=x_labeled,
target=y_labeled), k)
logging.debug('\t\tFiltered')
except ValueError:
dataset_filtered_deleting = None
logging.exception(f'Expected n_neighbors <= n_samples, '
f'but n_samples = {len(x_labeled)}, '
f'n_neighbors = {k}')
logging.debug('\t\tFiltering without deletion')
try:
dataset_filtered_no_deleting = ENN_self_training(
Bunch(data=x_labeled_before, target=y_labeled_before),
Bunch(data=x_labeled, target=y_labeled), k
) if len(x_labeled) > len(x_labeled_before) else 0
except ValueError:
dataset_filtered_no_deleting = None
logging.exception('Failed filtering without deletion')
if dataset_filtered_no_deleting is not None:
logging.debug('\t\tFiltered')
if dataset_filtered_deleting is not None:
logging.debug('\t\tStarting with the deletion model')
y_after_filtering = np.copy(y_train)
samples_after_filtering_with_deletion = len(
dataset_filtered_deleting['data'])
logging.debug(f'\t\tSamples after filtering with deletion:'
f' {samples_after_filtering_with_deletion}')
x_samples_filtered = dataset_filtered_deleting['data']
indexes = []
for index0, x_sample in enumerate(x_train):
for index1, y_sample in enumerate(x_samples_filtered):
if np.array_equal(x_sample, y_sample):
indexes.append(index0)
break
indexes_to_remove = [x for x in [*range(len(x_train))]
if x not in indexes]
y_after_filtering[indexes_to_remove] = -1
logging.debug('\t\tDataset ready to train the new model')
_, _, csv_output = training_model(
x_train, y_after_filtering, x_test, y_test,
csv_output, False)
else:
csv_output += ['', '', '']
samples_after_filtering_with_deletion = ''
if dataset_filtered_no_deleting != 0 and \
dataset_filtered_no_deleting is not None:
logging.debug('\t\tStarting with the non deletion model')
y_after_filtering = np.copy(y_train)
samples_after_filtering_without_deletion = len(
dataset_filtered_no_deleting['data'])
logging.debug('\t\tSamples after filtering without '
'deletion '
f'{samples_after_filtering_without_deletion}')
x_samples_filtered = dataset_filtered_no_deleting['data']
indexes = []
for index0, x_sample in enumerate(x_train):
for index1, y_sample in enumerate(x_samples_filtered):
if np.array_equal(x_sample, y_sample):
indexes.append(index0)
break
indexes_to_remove = [x for x in [*range(len(x_train))]
if x not in indexes]
y_after_filtering[indexes_to_remove] = -1
logging.debug('\t\tDataset ready to train the new model')
_, _, csv_output = training_model(
x_train, y_after_filtering, x_test, y_test,
csv_output, False)
elif dataset_filtered_no_deleting == 0:
logging.debug('\t\tDataset ready to train the new model')
_, _, csv_output = training_model(
x_train, y_modified, x_test, y_test,
csv_output, False)
samples_after_filtering_without_deletion = samples_after_sl
else:
csv_output += ['', '', '']
samples_after_filtering_without_deletion = ''
csv_output += [samples_before, samples_after_sl,
samples_after_filtering_with_deletion,
samples_after_filtering_without_deletion]
with open(csv_path, 'a') as save:
w = csv.writer(save)
w.writerow(csv_output)
save.close()
logging.debug('\t\tWritten to file.')
t_end = time.time()
logging.info(
f'\t\tElapsed: {(t_end - t_start) / 60:.2f} minutes')
logging.info('\n\n')
if __name__ == "__main__":
yag = yagmail.SMTP(user='<email>', password='<<PASSWORD>>')
try:
logging.info('--- Starting ---')
precisions = [0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.5]
hyp_datasets = working_datasets(folder=os.path.join('..', 'datasets',
'hypothesis'))
self_training_hypothesis(hyp_datasets)
logging.info('--- Process completed ---')
attach = [csv_path]
yag.send(to='<email>', subject='self_training_validation '
'COMPLETED',
contents='self_training_validation has been completed.',
attachments=attach)
except Exception as e:
content = f'FATAL ERROR - Check the attached log'
yag.send(to='<email>', subject='self_training_validation '
'ERROR',
contents=content, attachments=[log_file])
logging.exception('--- Process has broken ---')
logging.info("Email sent successfully")
| [
"logging.StreamHandler",
"logging.debug",
"logging.exception",
"sklearn.model_selection.StratifiedKFold",
"logging.info",
"logging.error",
"sys.path.append",
"os.walk",
"os.path.isdir",
"logging.FileHandler",
"numpy.concatenate",
"sklearn.semi_supervised.SelfTrainingClassifier",
"csv.writer"... | [((559, 589), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (572, 589), False, 'import time\n'), ((713, 771), 'os.path.join', 'os.path.join', (['"""tests"""', "(file_name + '_' + time_str + '.csv')"], {}), "('tests', file_name + '_' + time_str + '.csv')\n", (725, 771), False, 'import os\n'), ((1127, 1151), 'os.path.dirname', 'os.path.dirname', (['current'], {}), '(current)\n', (1142, 1151), False, 'import os\n'), ((1152, 1175), 'sys.path.append', 'sys.path.append', (['parent'], {}), '(parent)\n', (1167, 1175), False, 'import sys\n'), ((1090, 1116), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1106, 1116), False, 'import os\n'), ((1369, 1390), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (1382, 1390), False, 'import os\n'), ((2713, 2755), 'logging.debug', 'logging.debug', (['"""Datasets ready to be used"""'], {}), "('Datasets ready to be used')\n", (2726, 2755), False, 'import logging\n'), ((2853, 2888), 'logging.debug', 'logging.debug', (['"""\t\tCreating model"""'], {}), "('\\t\\tCreating model')\n", (2866, 2888), False, 'import logging\n'), ((2899, 2934), 'sklearn.svm.SVC', 'SVC', ([], {'probability': '(True)', 'gamma': '"""auto"""'}), "(probability=True, gamma='auto')\n", (2902, 2934), False, 'from sklearn.svm import SVC\n'), ((2947, 2995), 'sklearn.semi_supervised.SelfTrainingClassifier', 'SelfTrainingClassifier', (['svc'], {'threshold': 'threshold'}), '(svc, threshold=threshold)\n', (2969, 2995), False, 'from sklearn.semi_supervised import SelfTrainingClassifier\n'), ((3000, 3034), 'logging.debug', 'logging.debug', (['"""\t\tFitting model"""'], {}), "('\\t\\tFitting model')\n", (3013, 3034), False, 'import logging\n'), ((3837, 3880), 'logging.info', 'logging.info', (['"""Starting hypothesis testing"""'], {}), "('Starting hypothesis testing')\n", (3849, 3880), False, 'import logging\n'), ((3891, 3950), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'random_state': '(42)', 'shuffle': '(True)'}), '(n_splits=10, random_state=42, shuffle=True)\n', (3906, 3950), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((13304, 13357), 'yagmail.SMTP', 'yagmail.SMTP', ([], {'user': '"""<email>"""', 'password': '"""<<PASSWORD>>"""'}), "(user='<email>', password='<<PASSWORD>>')\n", (13316, 13357), False, 'import yagmail\n'), ((14310, 14349), 'logging.info', 'logging.info', (['"""Email sent successfully"""'], {}), "('Email sent successfully')\n", (14322, 14349), False, 'import logging\n'), ((1400, 1452), 'logging.info', 'logging.info', (['f"""Looking up for datasets in {folder}"""'], {}), "(f'Looking up for datasets in {folder}')\n", (1412, 1452), False, 'import logging\n'), ((1471, 1512), 'logging.error', 'logging.error', (['f"""{folder} does not exist"""'], {}), "(f'{folder} does not exist')\n", (1484, 1512), False, 'import logging\n'), ((2438, 2454), 'csv.writer', 'csv.writer', (['save'], {}), '(save)\n', (2448, 2454), False, 'import csv\n'), ((3220, 3251), 'logging.debug', 'logging.debug', (['"""\t\tPredicting"""'], {}), "('\\t\\tPredicting')\n", (3233, 3251), False, 'import logging\n'), ((3350, 3408), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'y_test', 'y_pred': 'y_pred', 'average': '"""weighted"""'}), "(y_true=y_test, y_pred=y_pred, average='weighted')\n", (3358, 3408), False, 'from sklearn.metrics import f1_score, mean_squared_error, accuracy_score\n'), ((3423, 3471), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', ([], {'y_true': 'y_test', 'y_pred': 'y_pred'}), '(y_true=y_test, y_pred=y_pred)\n', (3441, 3471), False, 'from sklearn.metrics import f1_score, mean_squared_error, accuracy_score\n'), ((3486, 3530), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_test', 'y_pred': 'y_pred'}), '(y_true=y_test, y_pred=y_pred)\n', (3500, 3530), False, 'from sklearn.metrics import f1_score, mean_squared_error, accuracy_score\n'), ((3539, 3638), 'logging.info', 'logging.info', (['f"""\t{\'pre\' if pre else \'post\'} f1 {f1:.2f} - mse {mse:.2f} - acc {acc:.2f}"""'], {}), '(\n f"\\t{\'pre\' if pre else \'post\'} f1 {f1:.2f} - mse {mse:.2f} - acc {acc:.2f}"\n )\n', (3551, 3638), False, 'import logging\n'), ((13375, 13407), 'logging.info', 'logging.info', (['"""--- Starting ---"""'], {}), "('--- Starting ---')\n", (13387, 13407), False, 'import logging\n'), ((13679, 13720), 'logging.info', 'logging.info', (['"""--- Process completed ---"""'], {}), "('--- Process completed ---')\n", (13691, 13720), False, 'import logging\n'), ((945, 974), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {}), '(log_file)\n', (964, 974), False, 'import logging\n'), ((1006, 1039), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1027, 1039), False, 'import logging\n'), ((1540, 1552), 'os.walk', 'walk', (['folder'], {}), '(folder)\n', (1544, 1552), False, 'from os import walk\n'), ((2610, 2639), 'os.path.join', 'os.path.join', (['folder', 'dataset'], {}), '(folder, dataset)\n', (2622, 2639), False, 'import os\n'), ((3156, 3196), 'logging.exception', 'logging.exception', (['"""Error while fitting"""'], {}), "('Error while fitting')\n", (3173, 3196), False, 'import logging\n'), ((4140, 4198), 'logging.warning', 'logging.warning', (['"""\tThe dataset contains repeated samples"""'], {}), "('\\tThe dataset contains repeated samples')\n", (4155, 4198), False, 'import logging\n'), ((14258, 14305), 'logging.exception', 'logging.exception', (['"""--- Process has broken ---"""'], {}), "('--- Process has broken ---')\n", (14275, 14305), False, 'import logging\n'), ((4342, 4353), 'time.time', 'time.time', ([], {}), '()\n', (4351, 4353), False, 'import time\n'), ((4426, 4484), 'logging.info', 'logging.info', (['f"""\tprecision {precision} - iter {fold + 1}"""'], {}), "(f'\\tprecision {precision} - iter {fold + 1}')\n", (4438, 4484), False, 'import logging\n'), ((4975, 4991), 'numpy.copy', 'np.copy', (['y_train'], {}), '(y_train)\n', (4982, 4991), True, 'import numpy as np\n'), ((5191, 5224), 'logging.debug', 'logging.debug', (['"""\t\tStarting SVC"""'], {}), "('\\t\\tStarting SVC')\n", (5204, 5224), False, 'import logging\n'), ((5247, 5282), 'sklearn.svm.SVC', 'SVC', ([], {'probability': '(True)', 'gamma': '"""auto"""'}), "(probability=True, gamma='auto')\n", (5250, 5282), False, 'from sklearn.svm import SVC\n'), ((6202, 6256), 'logging.debug', 'logging.debug', (['f"""\t\tSamples before: {samples_before}"""'], {}), "(f'\\t\\tSamples before: {samples_before}')\n", (6215, 6256), False, 'import logging\n'), ((7004, 7022), 'numpy.copy', 'np.copy', (['x_labeled'], {}), '(x_labeled)\n', (7011, 7022), True, 'import numpy as np\n'), ((7058, 7076), 'numpy.copy', 'np.copy', (['y_labeled'], {}), '(y_labeled)\n', (7065, 7076), True, 'import numpy as np\n'), ((7630, 7688), 'logging.debug', 'logging.debug', (['f"""\t\tSamples after SL: {samples_after_sl}"""'], {}), "(f'\\t\\tSamples after SL: {samples_after_sl}')\n", (7643, 7688), False, 'import logging\n'), ((8011, 8055), 'logging.debug', 'logging.debug', (['"""\t\tFiltering with deletion"""'], {}), "('\\t\\tFiltering with deletion')\n", (8024, 8055), False, 'import logging\n'), ((8600, 8647), 'logging.debug', 'logging.debug', (['"""\t\tFiltering without deletion"""'], {}), "('\\t\\tFiltering without deletion')\n", (8613, 8647), False, 'import logging\n'), ((13050, 13087), 'logging.debug', 'logging.debug', (['"""\t\tWritten to file."""'], {}), "('\\t\\tWritten to file.')\n", (13063, 13087), False, 'import logging\n'), ((13112, 13123), 'time.time', 'time.time', ([], {}), '()\n', (13121, 13123), False, 'import time\n'), ((13140, 13206), 'logging.info', 'logging.info', (['f"""\t\tElapsed: {(t_end - t_start) / 60:.2f} minutes"""'], {}), "(f'\\t\\tElapsed: {(t_end - t_start) / 60:.2f} minutes')\n", (13152, 13206), False, 'import logging\n'), ((13244, 13264), 'logging.info', 'logging.info', (['"""\n\n"""'], {}), "('\\n\\n')\n", (13256, 13264), False, 'import logging\n'), ((13516, 13560), 'os.path.join', 'os.path.join', (['""".."""', '"""datasets"""', '"""hypothesis"""'], {}), "('..', 'datasets', 'hypothesis')\n", (13528, 13560), False, 'import os\n'), ((5436, 5498), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'y_test', 'y_pred': 'y_pred_svc', 'average': '"""weighted"""'}), "(y_true=y_test, y_pred=y_pred_svc, average='weighted')\n", (5444, 5498), False, 'from sklearn.metrics import f1_score, mean_squared_error, accuracy_score\n'), ((5567, 5619), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', ([], {'y_true': 'y_test', 'y_pred': 'y_pred_svc'}), '(y_true=y_test, y_pred=y_pred_svc)\n', (5585, 5619), False, 'from sklearn.metrics import f1_score, mean_squared_error, accuracy_score\n'), ((5699, 5747), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_test', 'y_pred': 'y_pred_svc'}), '(y_true=y_test, y_pred=y_pred_svc)\n', (5713, 5747), False, 'from sklearn.metrics import f1_score, mean_squared_error, accuracy_score\n'), ((5768, 5855), 'logging.info', 'logging.info', (['f"""\t{\'svc\'} f1 {svc_f1:.2f} - mse {svc_mse:.2f} - acc {svc_acc:.2f}"""'], {}), '(\n f"\\t{\'svc\'} f1 {svc_f1:.2f} - mse {svc_mse:.2f} - acc {svc_acc:.2f}")\n', (5780, 5855), False, 'import logging\n'), ((5924, 5955), 'logging.debug', 'logging.debug', (['"""\t\tSVC - done"""'], {}), "('\\t\\tSVC - done')\n", (5937, 5955), False, 'import logging\n'), ((6446, 6505), 'logging.warning', 'logging.warning', (['f"""Fold {fold} failed with this precision."""'], {}), "(f'Fold {fold} failed with this precision.')\n", (6461, 6505), False, 'import logging\n'), ((6883, 6917), 'logging.debug', 'logging.debug', (['"""\t\tBefore - done"""'], {}), "('\\t\\tBefore - done')\n", (6896, 6917), False, 'import logging\n'), ((8251, 8280), 'logging.debug', 'logging.debug', (['"""\t\tFiltered"""'], {}), "('\\t\\tFiltered')\n", (8264, 8280), False, 'import logging\n'), ((9204, 9233), 'logging.debug', 'logging.debug', (['"""\t\tFiltered"""'], {}), "('\\t\\tFiltered')\n", (9217, 9233), False, 'import logging\n'), ((9313, 9366), 'logging.debug', 'logging.debug', (['"""\t\tStarting with the deletion model"""'], {}), "('\\t\\tStarting with the deletion model')\n", (9326, 9366), False, 'import logging\n'), ((9407, 9423), 'numpy.copy', 'np.copy', (['y_train'], {}), '(y_train)\n', (9414, 9423), True, 'import numpy as np\n'), ((9568, 9678), 'logging.debug', 'logging.debug', (['f"""\t\tSamples after filtering with deletion: {samples_after_filtering_with_deletion}"""'], {}), "(\n f'\\t\\tSamples after filtering with deletion: {samples_after_filtering_with_deletion}'\n )\n", (9581, 9678), False, 'import logging\n'), ((10339, 10396), 'logging.debug', 'logging.debug', (['"""\t\tDataset ready to train the new model"""'], {}), "('\\t\\tDataset ready to train the new model')\n", (10352, 10396), False, 'import logging\n'), ((10842, 10899), 'logging.debug', 'logging.debug', (['"""\t\tStarting with the non deletion model"""'], {}), "('\\t\\tStarting with the non deletion model')\n", (10855, 10899), False, 'import logging\n'), ((10940, 10956), 'numpy.copy', 'np.copy', (['y_train'], {}), '(y_train)\n', (10947, 10956), True, 'import numpy as np\n'), ((11107, 11222), 'logging.debug', 'logging.debug', (['f"""\t\tSamples after filtering without deletion {samples_after_filtering_without_deletion}"""'], {}), "(\n f'\\t\\tSamples after filtering without deletion {samples_after_filtering_without_deletion}'\n )\n", (11120, 11222), False, 'import logging\n'), ((11922, 11979), 'logging.debug', 'logging.debug', (['"""\t\tDataset ready to train the new model"""'], {}), "('\\t\\tDataset ready to train the new model')\n", (11935, 11979), False, 'import logging\n'), ((12940, 12956), 'csv.writer', 'csv.writer', (['save'], {}), '(save)\n', (12950, 12956), False, 'import csv\n'), ((6011, 6043), 'logging.exception', 'logging.exception', (['"""SVC failed."""'], {}), "('SVC failed.')\n", (6028, 6043), False, 'import logging\n'), ((6711, 6727), 'csv.writer', 'csv.writer', (['save'], {}), '(save)\n', (6721, 6727), False, 'import csv\n'), ((8129, 8168), 'sklearn.utils.Bunch', 'Bunch', ([], {'data': 'x_labeled', 'target': 'y_labeled'}), '(data=x_labeled, target=y_labeled)\n', (8134, 8168), False, 'from sklearn.utils import Bunch\n'), ((9067, 9121), 'logging.exception', 'logging.exception', (['"""Failed filtering without deletion"""'], {}), "('Failed filtering without deletion')\n", (9084, 9121), False, 'import logging\n'), ((12223, 12280), 'logging.debug', 'logging.debug', (['"""\t\tDataset ready to train the new model"""'], {}), "('\\t\\tDataset ready to train the new model')\n", (12236, 12280), False, 'import logging\n'), ((7324, 7361), 'numpy.concatenate', 'np.concatenate', (['(y_labeled, [index1])'], {}), '((y_labeled, [index1]))\n', (7338, 7361), True, 'import numpy as np\n'), ((7402, 7448), 'numpy.concatenate', 'np.concatenate', (['(x_labeled, [x_train[index0]])'], {}), '((x_labeled, [x_train[index0]]))\n', (7416, 7448), True, 'import numpy as np\n'), ((8763, 8816), 'sklearn.utils.Bunch', 'Bunch', ([], {'data': 'x_labeled_before', 'target': 'y_labeled_before'}), '(data=x_labeled_before, target=y_labeled_before)\n', (8768, 8816), False, 'from sklearn.utils import Bunch\n'), ((8842, 8881), 'sklearn.utils.Bunch', 'Bunch', ([], {'data': 'x_labeled', 'target': 'y_labeled'}), '(data=x_labeled, target=y_labeled)\n', (8847, 8881), False, 'from sklearn.utils import Bunch\n'), ((9990, 10024), 'numpy.array_equal', 'np.array_equal', (['x_sample', 'y_sample'], {}), '(x_sample, y_sample)\n', (10004, 10024), True, 'import numpy as np\n'), ((11573, 11607), 'numpy.array_equal', 'np.array_equal', (['x_sample', 'y_sample'], {}), '(x_sample, y_sample)\n', (11587, 11607), True, 'import numpy as np\n')] |
import numpy as np
from scipy.signal import fftconvolve
"""
This script contains dilation and erosion implementations described in the following link.
https://stackoverflow.com/questions/25034259/scipy-ndimage-morphology-operators-saturate-my-computer-memory-ram-8gb
This implementation deals with memory error faced in scipy ones.
"""
def binary_dilation(A, B):
return fftconvolve(A, B,'same')>0.5
def binary_erosion(A, B):
return _erode_v2(A, B)
def _erode_v1(A,B,R):
#R should be the radius of the spherical kernel, i.e. half the width of B
A_inv = np.logical_not(A)
A_inv = np.pad(A_inv, R, 'constant', constant_values=1)
tmp = fftconvolve(A_inv, B, 'same') > 0.5
#now we must un-pad the result, and invert it again
return np.logical_not(tmp[R:-R, R:-R, R:-R])
def _erode_v2(A,B):
thresh = np.count_nonzero(B)-0.5
return fftconvolve(A,B,'same') > thresh
def binary_opening(image, structure=None):
"""Return fast binary morphological opening of an image.
This function returns the same result as greyscale opening but performs
faster for binary images.
The morphological opening on an image is defined as an erosion followed by
a dilation. Opening can remove small bright spots (i.e. "salt") and connect
small dark cracks. This tends to "open" up (dark) gaps between (bright)
features.
Parameters
----------
image : ndarray
Binary input image.
selem : ndarray, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use a cross-shaped structuring element (connectivity=1).
Returns
-------
opening : ndarray of bool
The result of the morphological opening.
"""
eroded = binary_erosion(image, structure)
out = binary_dilation(eroded, structure)
# eroded = erode_v2(image, structure)
# out = dilate(eroded, structure)
return out
| [
"numpy.count_nonzero",
"numpy.pad",
"numpy.logical_not",
"scipy.signal.fftconvolve"
] | [((570, 587), 'numpy.logical_not', 'np.logical_not', (['A'], {}), '(A)\n', (584, 587), True, 'import numpy as np\n'), ((600, 647), 'numpy.pad', 'np.pad', (['A_inv', 'R', '"""constant"""'], {'constant_values': '(1)'}), "(A_inv, R, 'constant', constant_values=1)\n", (606, 647), True, 'import numpy as np\n'), ((761, 798), 'numpy.logical_not', 'np.logical_not', (['tmp[R:-R, R:-R, R:-R]'], {}), '(tmp[R:-R, R:-R, R:-R])\n', (775, 798), True, 'import numpy as np\n'), ((377, 402), 'scipy.signal.fftconvolve', 'fftconvolve', (['A', 'B', '"""same"""'], {}), "(A, B, 'same')\n", (388, 402), False, 'from scipy.signal import fftconvolve\n'), ((658, 687), 'scipy.signal.fftconvolve', 'fftconvolve', (['A_inv', 'B', '"""same"""'], {}), "(A_inv, B, 'same')\n", (669, 687), False, 'from scipy.signal import fftconvolve\n'), ((833, 852), 'numpy.count_nonzero', 'np.count_nonzero', (['B'], {}), '(B)\n', (849, 852), True, 'import numpy as np\n'), ((868, 893), 'scipy.signal.fftconvolve', 'fftconvolve', (['A', 'B', '"""same"""'], {}), "(A, B, 'same')\n", (879, 893), False, 'from scipy.signal import fftconvolve\n')] |
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__affiliation__ = 'Living Analytics Research Centre, Singapore Management University'
__website__ = 'http://mysmu.edu/phdis2012/wei.xie.2012'
import scipy.sparse.linalg as la
import numpy as np
# solve m2 = sum a_k * v_k * v_k', m3 = smu a_k * (r' * v_k) * v_k * v_k'
# return result (a, (r'*v), v)
def solve(_m2, _m3, _n, _k):
vals, vecs = la.eigsh(_m2, _k)
vals = vals + 0j
W = np.array(vecs, dtype=np.complex128)
for i in range(_k):
for j in range(_n):
W[j, i] /= np.sqrt(vals[i])
T = np.dot(W.T, _m3.dot(W))
vals, vecs = np.linalg.eig(T)
v = np.dot(W, np.linalg.solve(np.dot(W.T, W), vecs))
s = []
for i in range(_k):
s.append(sum(v[:, i]))
for i in range(_k):
for j in range(_n):
v[j, i] /= s[i]
a = []
for i in range(_k):
v_ = np.dot(W.T, v[:, i])
a.append(1. / np.dot(v_.T, v_))
a = np.array(a)
r = vals
return a, r, v
| [
"numpy.sqrt",
"numpy.linalg.eig",
"scipy.sparse.linalg.eigsh",
"numpy.array",
"numpy.dot"
] | [((396, 413), 'scipy.sparse.linalg.eigsh', 'la.eigsh', (['_m2', '_k'], {}), '(_m2, _k)\n', (404, 413), True, 'import scipy.sparse.linalg as la\n'), ((444, 479), 'numpy.array', 'np.array', (['vecs'], {'dtype': 'np.complex128'}), '(vecs, dtype=np.complex128)\n', (452, 479), True, 'import numpy as np\n'), ((623, 639), 'numpy.linalg.eig', 'np.linalg.eig', (['T'], {}), '(T)\n', (636, 639), True, 'import numpy as np\n'), ((965, 976), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (973, 976), True, 'import numpy as np\n'), ((895, 915), 'numpy.dot', 'np.dot', (['W.T', 'v[:, i]'], {}), '(W.T, v[:, i])\n', (901, 915), True, 'import numpy as np\n'), ((555, 571), 'numpy.sqrt', 'np.sqrt', (['vals[i]'], {}), '(vals[i])\n', (562, 571), True, 'import numpy as np\n'), ((675, 689), 'numpy.dot', 'np.dot', (['W.T', 'W'], {}), '(W.T, W)\n', (681, 689), True, 'import numpy as np\n'), ((938, 954), 'numpy.dot', 'np.dot', (['v_.T', 'v_'], {}), '(v_.T, v_)\n', (944, 954), True, 'import numpy as np\n')] |
#!/usr/bin/python
import numpy as np
class BLC:
'Black Level Compensation'
def __init__(self, img, parameter, bayer_pattern, clip):
self.img = img
self.parameter = parameter
self.bayer_pattern = bayer_pattern
self.clip = clip
def clipping(self):
np.clip(self.img, 0, self.clip, out=self.img)
return self.img
def execute(self):
bl_r = self.parameter[0]
bl_gr = self.parameter[1]
bl_gb = self.parameter[2]
bl_b = self.parameter[3]
alpha = self.parameter[4]
beta = self.parameter[5]
raw_h = self.img.shape[0]
raw_w = self.img.shape[1]
blc_img = np.empty((raw_h,raw_w), np.int16)
for y in range(0, raw_h-1, 2):
for x in range(0, raw_w-1, 2):
if self.bayer_pattern == 'rggb':
r = self.img[y,x] + bl_r
b = self.img[y+1,x+1] + bl_b
gr = self.img[y,x+1] + bl_gr + alpha * r / 256
gb = self.img[y+1,x] + bl_gb + beta * b / 256
blc_img[y,x] = r
blc_img[y,x+1] = gr
blc_img[y+1,x] = gb
blc_img[y+1,x+1] = b
elif bayer_pattern == 'bggr':
b = self.img[y,x] + bl_b
r = self.img[y+1,x+1] + bl_r
gb = self.img[y,x+1] + bl_gb + beta * b / 256
gr = self.img[y+1,x] + bl_gr + alpha * r / 256
blc_img[y,x] = b
blc_img[y,x+1] = gb
blc_img[y+1,x] = gr
blc_img[y+1,x+1] = r
elif bayer_pattern == 'gbrg':
b = self.img[y,x+1] + bl_b
r = self.img[y+1,x] + bl_r
gb = self.img[y,x] + bl_gb + beta * b / 256
gr = self.img[y+1,x+1] + bl_gr + alpha * r / 256
blc_img[y,x] = gb
blc_img[y,x+1] = b
blc_img[y+1,x] = r
blc_img[y+1,x+1] = gr
elif bayer_pattern == 'grbg':
r = self.img[y,x+1] + bl_r
b = self.img[y+1,x] + bl_b
gr = self.img[y,x] + bl_gr + alpha * r / 256
gb = self.img[y+1,x+1] + bl_gb + beta * b / 256
blc_img[y,x] = gr
blc_img[y,x+1] = r
blc_img[y+1,x] = b
blc_img[y+1,x+1] = gb
self.img = blc_img
return self.clipping()
| [
"numpy.clip",
"numpy.empty"
] | [((301, 346), 'numpy.clip', 'np.clip', (['self.img', '(0)', 'self.clip'], {'out': 'self.img'}), '(self.img, 0, self.clip, out=self.img)\n', (308, 346), True, 'import numpy as np\n'), ((682, 716), 'numpy.empty', 'np.empty', (['(raw_h, raw_w)', 'np.int16'], {}), '((raw_h, raw_w), np.int16)\n', (690, 716), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from keras.models import load_model
from keras import backend as K
import tensorflow as tf
expressionDictionary = {
0 : 'angry',
1 : 'disgust',
2 : 'fear',
3 : 'happy',
4 : 'sad',
5 : 'surprise',
6 : 'neutral'
}
expressionModel = load_model('./ML/Expression/model.h5')
expressionModel._make_predict_function()
face_cascade = cv2.CascadeClassifier("./ML/Expression/haarcascade_frontalface_alt.xml")
def getExpression(img):
frame = img
faces = face_cascade.detectMultiScale(img, 1.3, 5)
for (x,y,w,h) in faces:
frame = frame[y:y+w,x:x+h]
frame = cv2.resize(frame,(48,48))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = np.array(frame, dtype='float32').reshape(-1,48,48,1)/255.0
y = expressionModel.predict(frame)[0]
y = np.argmax(y)
if(y==1):
return 'happy'
if(y==2):
return 'relaxed'
if(y==5):
return 'energetic'
return expressionDictionary[y]
return 'happy'
| [
"keras.models.load_model",
"numpy.argmax",
"numpy.array",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"cv2.resize"
] | [((289, 327), 'keras.models.load_model', 'load_model', (['"""./ML/Expression/model.h5"""'], {}), "('./ML/Expression/model.h5')\n", (299, 327), False, 'from keras.models import load_model\n'), ((384, 456), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""./ML/Expression/haarcascade_frontalface_alt.xml"""'], {}), "('./ML/Expression/haarcascade_frontalface_alt.xml')\n", (405, 456), False, 'import cv2\n'), ((633, 660), 'cv2.resize', 'cv2.resize', (['frame', '(48, 48)'], {}), '(frame, (48, 48))\n', (643, 660), False, 'import cv2\n'), ((675, 714), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (687, 714), False, 'import cv2\n'), ((857, 869), 'numpy.argmax', 'np.argmax', (['y'], {}), '(y)\n', (866, 869), True, 'import numpy as np\n'), ((731, 763), 'numpy.array', 'np.array', (['frame'], {'dtype': '"""float32"""'}), "(frame, dtype='float32')\n", (739, 763), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# number of output figures = 3
import helper.basis
from helper.figure import Figure
import helper.grid
import helper.topo_opt
import matplotlib as mpl
import numpy as np
normalTransformation = helper.topo_opt.Stats.Transformation.normal
transformations = [
helper.topo_opt.Stats.Transformation.normal,
helper.topo_opt.Stats.Transformation.cholesky,
]
d, p = 2, 3
basis1D = helper.basis.HierarchicalBSpline(p)
basis = helper.basis.TensorProduct(basis1D, d)
for q in range(2):
fig = Figure.create(figsize=(3, 3), scale=0.93)
ax = fig.gca()
stats = helper.topo_opt.Stats()
stats.load("./data/topoOpt/stats/cross-reg6b4-lb0.01-ub0.99")
stats.transform(transformations[q])
stats.hierarchize(basis)
nn = 100
_, _, XX = helper.grid.generateMeshGrid((nn, nn))
XX = stats.convertGridToDomainCoords(XX)
YY = stats.evaluate(XX)
YY = helper.topo_opt.Stats.transformValues(
YY, transformations[q], helper.topo_opt.Stats.Transformation.normal)
YY = helper.topo_opt.Stats.getSmallestEigenvalues(YY)
YY = np.reshape(YY, (nn, nn))
XX = stats.convertDomainToGridCoords(XX)
XX0, XX1 = np.reshape(XX[:,0], (nn, nn)), np.reshape(XX[:,1], (nn, nn))
v = np.linspace(0, 0.4, 33)
ax.contourf(XX0, XX1, -YY, [0, 100], colors="C1")
ax.contour(XX0, XX1, YY, v)
# remove weird hairline from (0.00015128, 0) to (0, 0.00015128)
# (behind origin grid point, has line width of 0.0311
# according to Preflight)
for child in ax.get_children():
if isinstance(child, mpl.collections.LineCollection):
segments = [XX for XX in child.get_segments()
if not np.any(np.all(XX < 0.01, axis=1))]
if len(segments) != len(child.get_segments()):
child.set_segments(segments)
X = stats.convertDomainToGridCoords(stats.X)
ax.plot(X[:,0], X[:,1], "k.", clip_on=False)
ax.set_aspect("equal")
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
xt, yt = np.linspace(0, 1, 5), np.linspace(0, 1, 5)
ax.set_xticks(xt)
ax.set_yticks(yt)
ax.set_xticklabels(["${:g}$".format(x) for x in xt])
ax.set_yticklabels(["${:g}$".format(y) for y in yt])
ax.set_xlabel(r"\hspace*{42mm}$x_1$", labelpad=-12)
ax.set_ylabel(r"\hspace*{40mm}$x_2$", labelpad=-22)
fig.save()
fig = Figure.create(figsize=(0.9, 2.47), scale=1.05)
ax = fig.gca()
colorMap = mpl.cm.viridis
colorMap.set_under("C1")
norm = mpl.colors.BoundaryNorm(v, colorMap.N)
colorBar = mpl.colorbar.ColorbarBase(
ax, cmap=colorMap, norm=norm, extend="min", drawedges=True)
colorBar.dividers.set_color(
[colorMap(x) for x in np.linspace(0, 1, len(v)-1)])
fig.save()
| [
"helper.figure.Figure.create",
"numpy.all",
"numpy.reshape",
"matplotlib.colorbar.ColorbarBase",
"numpy.linspace",
"matplotlib.colors.BoundaryNorm"
] | [((2272, 2318), 'helper.figure.Figure.create', 'Figure.create', ([], {'figsize': '(0.9, 2.47)', 'scale': '(1.05)'}), '(figsize=(0.9, 2.47), scale=1.05)\n', (2285, 2318), False, 'from helper.figure import Figure\n'), ((2393, 2431), 'matplotlib.colors.BoundaryNorm', 'mpl.colors.BoundaryNorm', (['v', 'colorMap.N'], {}), '(v, colorMap.N)\n', (2416, 2431), True, 'import matplotlib as mpl\n'), ((2443, 2532), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['ax'], {'cmap': 'colorMap', 'norm': 'norm', 'extend': '"""min"""', 'drawedges': '(True)'}), "(ax, cmap=colorMap, norm=norm, extend='min',\n drawedges=True)\n", (2468, 2532), True, 'import matplotlib as mpl\n'), ((513, 554), 'helper.figure.Figure.create', 'Figure.create', ([], {'figsize': '(3, 3)', 'scale': '(0.93)'}), '(figsize=(3, 3), scale=0.93)\n', (526, 554), False, 'from helper.figure import Figure\n'), ((1060, 1084), 'numpy.reshape', 'np.reshape', (['YY', '(nn, nn)'], {}), '(YY, (nn, nn))\n', (1070, 1084), True, 'import numpy as np\n'), ((1211, 1234), 'numpy.linspace', 'np.linspace', (['(0)', '(0.4)', '(33)'], {}), '(0, 0.4, 33)\n', (1222, 1234), True, 'import numpy as np\n'), ((1144, 1174), 'numpy.reshape', 'np.reshape', (['XX[:, 0]', '(nn, nn)'], {}), '(XX[:, 0], (nn, nn))\n', (1154, 1174), True, 'import numpy as np\n'), ((1175, 1205), 'numpy.reshape', 'np.reshape', (['XX[:, 1]', '(nn, nn)'], {}), '(XX[:, 1], (nn, nn))\n', (1185, 1205), True, 'import numpy as np\n'), ((1943, 1963), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (1954, 1963), True, 'import numpy as np\n'), ((1965, 1985), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (1976, 1985), True, 'import numpy as np\n'), ((1646, 1671), 'numpy.all', 'np.all', (['(XX < 0.01)'], {'axis': '(1)'}), '(XX < 0.01, axis=1)\n', (1652, 1671), True, 'import numpy as np\n')] |
import os
import atexit
import signal
import numpy as np
import tensorflow as tf
from subprocess import Popen, PIPE
from glearn.utils.log import log, log_warning
from glearn.utils.path import remove_empty_dirs
from glearn.utils import tf_utils
SUMMARY_KEY_PREFIX = "_summary_"
DEFAULT_EVALUATE_QUERY = "evaluate"
DEFAULT_EXPERIMENT_QUERY = "experiment"
class SummaryWriter(object):
class Results(object):
def __init__(self, query):
self.query = query
self.results = []
self.simple_summaries = {}
def __init__(self, config):
self.config = config
self.server = None
# TODO - measure the performance impact of all these sanitizations
self.debug_numerics = self.config.is_debugging("debug_numerics")
@property
def sess(self):
return self.config.sess
def start(self, **kwargs):
self.summary_path = self.config.summary_path
self.summaries = {}
self.summary_fetches = {}
self.summary_results = {}
self.run_metadatas = {}
self.writers = {}
# get graph
self.kwargs = kwargs
if "graph" not in self.kwargs:
self.kwargs["graph"] = self.sess.graph
server = self.config.get("tensorboard", False)
if server:
# start tensorboard server
if self.server is None:
# if this is the first evaluation, clean all experiment summaries
self.clean_summaries(self.config.tensorboard_path)
self.start_server()
else:
# clean only this evaluation's summaries
self.clean_summaries(self.summary_path)
os.makedirs(self.summary_path, exist_ok=True)
def stop(self):
for _, writer in self.writers.items():
writer.close()
self.writers = {}
def start_server(self):
if self.server is None:
# tensorboard should ignore Ctrl-C interrupts, and only be terminated explicitly
def ignore_interrupt():
signal.signal(signal.SIGINT, signal.SIG_IGN)
# start tensorboard server
path = self.config.tensorboard_path
port = 6006
self.server = Popen(["tensorboard", "--logdir", path], preexec_fn=ignore_interrupt,
stdout=PIPE, stderr=PIPE)
atexit.register(self.stop_server)
url = f"http://{self.config.ip}:{port}"
log(f"Started tensorboard server: {url} ({path})", color="white", bold=True)
def stop_server(self):
# stop tensorboard server
if self.server is not None:
log(f"Stopping tensorboard server")
self.server.terminate()
self.server = None
def clean_summaries(self, path):
# delete all events.out.tfevents files, and cleanup empty dirs
for root, dirs, files in os.walk(path):
for sub_path in files:
if sub_path.startswith("events.out.tfevents"):
os.remove(os.path.join(root, sub_path))
remove_empty_dirs(path)
def get_summary_results(self, query):
if query not in self.summary_results:
self.summary_results[query] = self.Results(query) # TODO FIXME - what is thiS?!
return self.summary_results[query]
def add_simple_summary(self, name, query=None, allow_overwrite=False, **kwargs):
query = query or DEFAULT_EXPERIMENT_QUERY
summary_results = self.get_summary_results(query)
tag = self.summary_scope(name, query)
if not allow_overwrite and tag in summary_results.simple_summaries:
log_warning(f"Overwriting simple summary value: {tag} "
"(Use set_simple_value to avoid warning.)")
summary_results.simple_summaries[tag] = tf.Summary.Value(tag=tag, **kwargs)
def add_simple_value(self, name, value, query=None, allow_overwrite=False):
if self.debug_numerics:
value = np.nan_to_num(value)
self.add_simple_summary(name, simple_value=value, query=query,
allow_overwrite=allow_overwrite)
def set_simple_value(self, name, value, query=None):
self.add_simple_value(name, value, query=query, allow_overwrite=True)
def add_summary_value(self, name, summary, query=None):
query = query or DEFAULT_EVALUATE_QUERY
if query in self.summaries:
query_summaries = self.summaries[query]
else:
query_summaries = []
self.summaries[query] = query_summaries
query_summaries.append(summary)
return summary
def add_scalar(self, name, tensor, query=None):
if self.debug_numerics:
tensor = tf_utils.nan_to_num(tensor)
summary = tf.summary.scalar(name, tensor)
return self.add_summary_value(name, summary, query=query)
def add_histogram(self, name, values, query=None):
if self.debug_numerics:
values = tf_utils.nan_to_num(values)
summary = tf.summary.histogram(name, values)
return self.add_summary_value(name, summary, query=query)
def add_activation(self, tensor, query=None):
if tensor is None:
return
name = tensor.op.name
self.add_histogram(f"{name}/activation", tensor, query=query)
self.add_scalar(f"{name}/sparsity", tf.nn.zero_fraction(tensor), query=query)
def add_variables(self, tvars, query=None):
for tvar in tvars:
name = tvar.op.name
self.add_histogram(f"{name}/value", tvar, query=query)
def add_gradients(self, grads_tvars, query=None):
for grad, tvar in grads_tvars:
if grad is None:
continue
name = tvar.op.name
self.add_histogram(f"{name}/gradient", grad, query=query)
def add_images(self, name, images, max_outputs=3, query=None):
if self.debug_numerics:
images = tf_utils.nan_to_num(images)
summary = tf.summary.image(name, images, max_outputs=max_outputs)
return self.add_summary_value(name, summary, query=query)
def add_simple_images(self, name, images, max_outputs=3, query=None, allow_overwrite=False):
# matplotlib allows image encoding. the imports are here since they are slow.
import io
import matplotlib
try:
# first try this
matplotlib.use('TkAgg')
except Exception:
# fallback backend
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# convert image to 3-channel
if images.shape[-1] == 1:
images = np.stack((np.squeeze(images, axis=-1),) * 3, axis=-1)
if self.debug_numerics:
images = np.nan_to_num(images)
for i, image in enumerate(images):
im_bytes = io.BytesIO()
plt.imsave(im_bytes, image, format='png')
summary_image = tf.Summary.Image(encoded_image_string=im_bytes.getvalue())
self.add_simple_summary(f"{name}/{i}", image=summary_image, query=query)
def set_simple_images(self, name, images, max_outputs=3, query=None):
self.add_simple_images(name, images, max_outputs=max_outputs, query=query,
allow_overwrite=True)
def add_text(self, name, tensor, query=None):
summary = tf.summary.text(name, tensor)
return self.add_summary_value(name, summary, query=query)
def write_text(self, name, tensor, query=None):
# TODO - convert to: add_simple_text(...)
query = query or DEFAULT_EXPERIMENT_QUERY
tag = self.summary_scope(name, query)
summary = tf.summary.text(tag, tensor)
self.config.sess.run({self.get_query_key(query): summary})
def add_run_metadata(self, run_metadata, query=None):
self.run_metadatas[query] = run_metadata
def get_fetch(self, query=None):
if query in self.summary_fetches:
return self.summary_fetches[query]
if query in self.summaries:
fetch = tf.summary.merge(self.summaries[query])
self.summary_fetches[query] = fetch
return fetch
return None
def get_query_key(self, query=None):
if query is None:
return SUMMARY_KEY_PREFIX
return f"{SUMMARY_KEY_PREFIX}{query}"
def prepare_fetches(self, fetches, query=None):
if not isinstance(query, list):
query = [query]
for query_name in query:
fetch = self.get_fetch(query_name)
if fetch is not None:
fetches[self.get_query_key(query_name)] = fetch
def process_results(self, results):
results_keys = list(results.keys())
for key in results_keys:
if key.startswith(SUMMARY_KEY_PREFIX):
query = key[len(SUMMARY_KEY_PREFIX):]
if len(query) == 0:
query = None
query_results = results.pop(key, None)
summary_results = self.get_summary_results(query)
summary_results.results.append(query_results)
def summary_scope(self, name, query=None):
if query is None:
return name
return f"{query}/{name}"
def flush(self, global_step=None):
# collect all relevant query
query = set(list(self.summary_results.keys()) + list(self.run_metadatas.keys()))
# flush summary data
for query_name in query:
# get writer
path = os.path.abspath(self.summary_path)
if query_name is None:
query_name = DEFAULT_EVALUATE_QUERY
path = os.path.join(path, query_name)
if query_name in self.writers:
writer = self.writers[query_name]
else:
writer = tf.summary.FileWriter(path, **self.kwargs)
self.writers[query_name] = writer
# write any summary results for query
summary_results = self.summary_results.pop(query_name, None)
if summary_results is not None:
# write results
if len(summary_results.results) > 0:
summary = summary_results.results[0] # TODO - average
writer.add_summary(summary, global_step=global_step)
# write simple values
summary_values = list(summary_results.simple_summaries.values())
simple_summary = tf.Summary(value=summary_values)
writer.add_summary(simple_summary, global_step=global_step)
# write any metadata results for query
run_metadata = self.run_metadatas.pop(query_name, None)
if run_metadata is not None:
if query_name is not None:
tag = f"{query_name}/step{global_step}"
else:
tag = f"step{global_step}"
writer.add_run_metadata(run_metadata, tag, global_step)
# flush writer
writer.flush()
class NullSummaryWriter(object):
def __init__(self, **kwargs):
pass
def start(self, **kwargs):
pass
def stop(self, **kwargs):
pass
def add_simple_value(self, **kwargs):
pass
def add_scalar(self, **kwargs):
return None
def add_histogram(self, **kwargs):
return None
def add_activation(self, **kwargs):
return None
def add_gradients(self, **kwargs):
return None
def get_fetch(self, **kwargs):
return None
def prepare_fetches(self, **kwargs):
pass
def process_results(self, **kwargs):
pass
def flush(self, **kwargs):
pass
| [
"io.BytesIO",
"tensorflow.Summary.Value",
"os.walk",
"tensorflow.summary.image",
"glearn.utils.log.log_warning",
"glearn.utils.log.log",
"glearn.utils.tf_utils.nan_to_num",
"subprocess.Popen",
"tensorflow.nn.zero_fraction",
"glearn.utils.path.remove_empty_dirs",
"tensorflow.summary.scalar",
"a... | [((1696, 1741), 'os.makedirs', 'os.makedirs', (['self.summary_path'], {'exist_ok': '(True)'}), '(self.summary_path, exist_ok=True)\n', (1707, 1741), False, 'import os\n'), ((2925, 2938), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (2932, 2938), False, 'import os\n'), ((3106, 3129), 'glearn.utils.path.remove_empty_dirs', 'remove_empty_dirs', (['path'], {}), '(path)\n', (3123, 3129), False, 'from glearn.utils.path import remove_empty_dirs\n'), ((3858, 3893), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'tag'}), '(tag=tag, **kwargs)\n', (3874, 3893), True, 'import tensorflow as tf\n'), ((4833, 4864), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['name', 'tensor'], {}), '(name, tensor)\n', (4850, 4864), True, 'import tensorflow as tf\n'), ((5087, 5121), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['name', 'values'], {}), '(name, values)\n', (5107, 5121), True, 'import tensorflow as tf\n'), ((6064, 6119), 'tensorflow.summary.image', 'tf.summary.image', (['name', 'images'], {'max_outputs': 'max_outputs'}), '(name, images, max_outputs=max_outputs)\n', (6080, 6119), True, 'import tensorflow as tf\n'), ((7433, 7462), 'tensorflow.summary.text', 'tf.summary.text', (['name', 'tensor'], {}), '(name, tensor)\n', (7448, 7462), True, 'import tensorflow as tf\n'), ((7746, 7774), 'tensorflow.summary.text', 'tf.summary.text', (['tag', 'tensor'], {}), '(tag, tensor)\n', (7761, 7774), True, 'import tensorflow as tf\n'), ((2252, 2351), 'subprocess.Popen', 'Popen', (["['tensorboard', '--logdir', path]"], {'preexec_fn': 'ignore_interrupt', 'stdout': 'PIPE', 'stderr': 'PIPE'}), "(['tensorboard', '--logdir', path], preexec_fn=ignore_interrupt,\n stdout=PIPE, stderr=PIPE)\n", (2257, 2351), False, 'from subprocess import Popen, PIPE\n'), ((2392, 2425), 'atexit.register', 'atexit.register', (['self.stop_server'], {}), '(self.stop_server)\n', (2407, 2425), False, 'import atexit\n'), ((2491, 2568), 'glearn.utils.log.log', 'log', (['f"""Started tensorboard server: {url} ({path})"""'], {'color': '"""white"""', 'bold': '(True)'}), "(f'Started tensorboard server: {url} ({path})', color='white', bold=True)\n", (2494, 2568), False, 'from glearn.utils.log import log, log_warning\n'), ((2679, 2714), 'glearn.utils.log.log', 'log', (['f"""Stopping tensorboard server"""'], {}), "(f'Stopping tensorboard server')\n", (2682, 2714), False, 'from glearn.utils.log import log, log_warning\n'), ((3684, 3791), 'glearn.utils.log.log_warning', 'log_warning', (['f"""Overwriting simple summary value: {tag} (Use set_simple_value to avoid warning.)"""'], {}), "(\n f'Overwriting simple summary value: {tag} (Use set_simple_value to avoid warning.)'\n )\n", (3695, 3791), False, 'from glearn.utils.log import log, log_warning\n'), ((4027, 4047), 'numpy.nan_to_num', 'np.nan_to_num', (['value'], {}), '(value)\n', (4040, 4047), True, 'import numpy as np\n'), ((4786, 4813), 'glearn.utils.tf_utils.nan_to_num', 'tf_utils.nan_to_num', (['tensor'], {}), '(tensor)\n', (4805, 4813), False, 'from glearn.utils import tf_utils\n'), ((5040, 5067), 'glearn.utils.tf_utils.nan_to_num', 'tf_utils.nan_to_num', (['values'], {}), '(values)\n', (5059, 5067), False, 'from glearn.utils import tf_utils\n'), ((5429, 5456), 'tensorflow.nn.zero_fraction', 'tf.nn.zero_fraction', (['tensor'], {}), '(tensor)\n', (5448, 5456), True, 'import tensorflow as tf\n'), ((6017, 6044), 'glearn.utils.tf_utils.nan_to_num', 'tf_utils.nan_to_num', (['images'], {}), '(images)\n', (6036, 6044), False, 'from glearn.utils import tf_utils\n'), ((6469, 6492), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (6483, 6492), False, 'import matplotlib\n'), ((6825, 6846), 'numpy.nan_to_num', 'np.nan_to_num', (['images'], {}), '(images)\n', (6838, 6846), True, 'import numpy as np\n'), ((6914, 6926), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (6924, 6926), False, 'import io\n'), ((6939, 6980), 'matplotlib.pyplot.imsave', 'plt.imsave', (['im_bytes', 'image'], {'format': '"""png"""'}), "(im_bytes, image, format='png')\n", (6949, 6980), True, 'import matplotlib.pyplot as plt\n'), ((8133, 8172), 'tensorflow.summary.merge', 'tf.summary.merge', (['self.summaries[query]'], {}), '(self.summaries[query])\n', (8149, 8172), True, 'import tensorflow as tf\n'), ((9596, 9630), 'os.path.abspath', 'os.path.abspath', (['self.summary_path'], {}), '(self.summary_path)\n', (9611, 9630), False, 'import os\n'), ((9737, 9767), 'os.path.join', 'os.path.join', (['path', 'query_name'], {}), '(path, query_name)\n', (9749, 9767), False, 'import os\n'), ((2069, 2113), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.SIG_IGN'], {}), '(signal.SIGINT, signal.SIG_IGN)\n', (2082, 2113), False, 'import signal\n'), ((6562, 6583), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (6576, 6583), False, 'import matplotlib\n'), ((9904, 9946), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['path'], {}), '(path, **self.kwargs)\n', (9925, 9946), True, 'import tensorflow as tf\n'), ((10551, 10583), 'tensorflow.Summary', 'tf.Summary', ([], {'value': 'summary_values'}), '(value=summary_values)\n', (10561, 10583), True, 'import tensorflow as tf\n'), ((3068, 3096), 'os.path.join', 'os.path.join', (['root', 'sub_path'], {}), '(root, sub_path)\n', (3080, 3096), False, 'import os\n'), ((6727, 6754), 'numpy.squeeze', 'np.squeeze', (['images'], {'axis': '(-1)'}), '(images, axis=-1)\n', (6737, 6754), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_DisplayNormWishMargEllipsBand [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_DisplayNormWishMargEllipsBand&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=EllipsBandNormWishMarg).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
import numpy as np
from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, \
diag, sqrt, r_
from numpy.linalg import det
from numpy.random import multivariate_normal as mvnrnd
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, legend, scatter, ylabel, \
xlabel, title
plt.style.use('seaborn')
from ARPM_utils import save_plot
from PlotTwoDimEllipsoid import PlotTwoDimEllipsoid
from PlotTwoDimBand import PlotTwoDimBand
# input parameters
sigvec = array([[1], [1]]) # dispersion parameters
rho = -0.9 # correlation parameter
nu = 5 # deegrees of freedom
j_ = 10000 # number of simulations
n_points = 1000 # points of the uncertainty band
r = 3 # radius of the ellipsoid
# -
# ## Generate simulations
# +
W_11 = zeros((1, j_))
W_22 = zeros((1, j_))
W_12 = zeros((1, j_))
vec_W = zeros((4, j_))
dets = zeros((1, j_))
traces = zeros((1, j_))
sig2 = np.diagflat(sigvec) @ array([[1, rho], [rho, 1]]) @ np.diagflat(sigvec)
for j in range(j_):
X = mvnrnd(zeros(2), sig2, nu).T
W = X @ X.T
dets[0, j] = det(W)
traces[0, j] = trace(W)
W_11[0, j] = W[0, 0]
W_22[0, j] = W[1, 1]
W_12[0, j] = W[0, 1]
vec_W[:, [j]] = reshape(W, (4, 1))
# expected values of W_11 and W_12
E_11 = nu * sig2[0, 0]
E_12 = nu * sig2[0, 1]
# covariance matrix of W_11 and W_12
V_11 = nu * (sig2[0, 0] * sig2[0, 0] + sig2[0, 0] * sig2[0, 0])
V_12 = nu * (sig2[0, 0] * sig2[1, 1] + sig2[0, 1] * sig2[1, 0])
Cv_11_12 = nu * (sig2[0, 0] * sig2[0, 1] + sig2[0, 1] * sig2[0, 0])
Cv_W11_W12 = array([[V_11, Cv_11_12], [Cv_11_12, V_12]])
# -
# ## Compute normalized variables X_1 and X_2
# +
X_1 = (W_11 - E_11) / sqrt(V_11)
X_2 = (W_12 - E_12) / sqrt(V_12)
X = r_[X_1, X_2]
# expected value and covariance of (X_1, X_2)
E_X = array([[0], [0]])
Sd_W11_W12 = array([[sqrt(V_11)], [sqrt(V_12)]])
Cv_X = np.diagflat(1 / Sd_W11_W12) @ Cv_W11_W12 @ np.diagflat(1 / Sd_W11_W12)
# -
# ## Compute the standard deviations along the directions
# +
theta = linspace(0, 2 * pi, n_points).reshape(1, -1)
u = r_[cos(theta), sin(theta)] # directions
s_u = sqrt(diag(u.T @ Cv_X @ u)) # projected standard deviations
# -
# ## Display the band, the ellipsoid and overlay the scatterplot
# +
figure(figsize=(10, 10))
p1 = PlotTwoDimBand(E_X, s_u, u, r, 'b')
p2 = PlotTwoDimEllipsoid(E_X, Cv_X, r, [], [], 'r')
scatter(X[0], X[1], s=5, c=[.3, .3, .3], marker='*')
legend(['Mean-Cov band', 'Mean-Cov ellipsoid'])
title('Normalized Wishart marginals')
xlabel('$X_1$')
ylabel('$X_2$')
plt.axis('equal');
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
| [
"numpy.trace",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.array",
"PlotTwoDimBand.PlotTwoDimBand",
"numpy.sin",
"numpy.reshape",
"PlotTwoDimEllipsoid.PlotTwoDimEllipsoid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"matplot... | [((1094, 1118), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (1107, 1118), True, 'import matplotlib.pyplot as plt\n'), ((1276, 1293), 'numpy.array', 'array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (1281, 1293), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((1547, 1561), 'numpy.zeros', 'zeros', (['(1, j_)'], {}), '((1, j_))\n', (1552, 1561), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((1569, 1583), 'numpy.zeros', 'zeros', (['(1, j_)'], {}), '((1, j_))\n', (1574, 1583), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((1591, 1605), 'numpy.zeros', 'zeros', (['(1, j_)'], {}), '((1, j_))\n', (1596, 1605), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((1614, 1628), 'numpy.zeros', 'zeros', (['(4, j_)'], {}), '((4, j_))\n', (1619, 1628), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((1636, 1650), 'numpy.zeros', 'zeros', (['(1, j_)'], {}), '((1, j_))\n', (1641, 1650), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((1660, 1674), 'numpy.zeros', 'zeros', (['(1, j_)'], {}), '((1, j_))\n', (1665, 1674), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((2328, 2371), 'numpy.array', 'array', (['[[V_11, Cv_11_12], [Cv_11_12, V_12]]'], {}), '([[V_11, Cv_11_12], [Cv_11_12, V_12]])\n', (2333, 2371), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((2564, 2581), 'numpy.array', 'array', (['[[0], [0]]'], {}), '([[0], [0]])\n', (2569, 2581), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((3017, 3041), 'matplotlib.pyplot.figure', 'figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (3023, 3041), False, 'from matplotlib.pyplot import figure, legend, scatter, ylabel, xlabel, title\n'), ((3048, 3083), 'PlotTwoDimBand.PlotTwoDimBand', 'PlotTwoDimBand', (['E_X', 's_u', 'u', 'r', '"""b"""'], {}), "(E_X, s_u, u, r, 'b')\n", (3062, 3083), False, 'from PlotTwoDimBand import PlotTwoDimBand\n'), ((3089, 3135), 'PlotTwoDimEllipsoid.PlotTwoDimEllipsoid', 'PlotTwoDimEllipsoid', (['E_X', 'Cv_X', 'r', '[]', '[]', '"""r"""'], {}), "(E_X, Cv_X, r, [], [], 'r')\n", (3108, 3135), False, 'from PlotTwoDimEllipsoid import PlotTwoDimEllipsoid\n'), ((3136, 3191), 'matplotlib.pyplot.scatter', 'scatter', (['X[0]', 'X[1]'], {'s': '(5)', 'c': '[0.3, 0.3, 0.3]', 'marker': '"""*"""'}), "(X[0], X[1], s=5, c=[0.3, 0.3, 0.3], marker='*')\n", (3143, 3191), False, 'from matplotlib.pyplot import figure, legend, scatter, ylabel, xlabel, title\n'), ((3189, 3236), 'matplotlib.pyplot.legend', 'legend', (["['Mean-Cov band', 'Mean-Cov ellipsoid']"], {}), "(['Mean-Cov band', 'Mean-Cov ellipsoid'])\n", (3195, 3236), False, 'from matplotlib.pyplot import figure, legend, scatter, ylabel, xlabel, title\n'), ((3237, 3274), 'matplotlib.pyplot.title', 'title', (['"""Normalized Wishart marginals"""'], {}), "('Normalized Wishart marginals')\n", (3242, 3274), False, 'from matplotlib.pyplot import figure, legend, scatter, ylabel, xlabel, title\n'), ((3275, 3290), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""$X_1$"""'], {}), "('$X_1$')\n", (3281, 3290), False, 'from matplotlib.pyplot import figure, legend, scatter, ylabel, xlabel, title\n'), ((3291, 3306), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""$X_2$"""'], {}), "('$X_2$')\n", (3297, 3306), False, 'from matplotlib.pyplot import figure, legend, scatter, ylabel, xlabel, title\n'), ((3307, 3324), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (3315, 3324), True, 'import matplotlib.pyplot as plt\n'), ((740, 778), 'os.path.abspath', 'path.abspath', (['"""../../functions-legacy"""'], {}), "('../../functions-legacy')\n", (752, 778), True, 'import os.path as path\n'), ((1735, 1754), 'numpy.diagflat', 'np.diagflat', (['sigvec'], {}), '(sigvec)\n', (1746, 1754), True, 'import numpy as np\n'), ((1847, 1853), 'numpy.linalg.det', 'det', (['W'], {}), '(W)\n', (1850, 1853), False, 'from numpy.linalg import det\n'), ((1873, 1881), 'numpy.trace', 'trace', (['W'], {}), '(W)\n', (1878, 1881), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((1979, 1997), 'numpy.reshape', 'reshape', (['W', '(4, 1)'], {}), '(W, (4, 1))\n', (1986, 1997), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((2450, 2460), 'numpy.sqrt', 'sqrt', (['V_11'], {}), '(V_11)\n', (2454, 2460), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((2483, 2493), 'numpy.sqrt', 'sqrt', (['V_12'], {}), '(V_12)\n', (2487, 2493), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((2681, 2708), 'numpy.diagflat', 'np.diagflat', (['(1 / Sd_W11_W12)'], {}), '(1 / Sd_W11_W12)\n', (2692, 2708), True, 'import numpy as np\n'), ((2887, 2907), 'numpy.diag', 'diag', (['(u.T @ Cv_X @ u)'], {}), '(u.T @ Cv_X @ u)\n', (2891, 2907), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((1683, 1702), 'numpy.diagflat', 'np.diagflat', (['sigvec'], {}), '(sigvec)\n', (1694, 1702), True, 'import numpy as np\n'), ((1705, 1732), 'numpy.array', 'array', (['[[1, rho], [rho, 1]]'], {}), '([[1, rho], [rho, 1]])\n', (1710, 1732), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((2638, 2665), 'numpy.diagflat', 'np.diagflat', (['(1 / Sd_W11_W12)'], {}), '(1 / Sd_W11_W12)\n', (2649, 2665), True, 'import numpy as np\n'), ((2785, 2814), 'numpy.linspace', 'linspace', (['(0)', '(2 * pi)', 'n_points'], {}), '(0, 2 * pi, n_points)\n', (2793, 2814), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((2837, 2847), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (2840, 2847), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((2849, 2859), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (2852, 2859), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((1791, 1799), 'numpy.zeros', 'zeros', (['(2)'], {}), '(2)\n', (1796, 1799), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((2603, 2613), 'numpy.sqrt', 'sqrt', (['V_11'], {}), '(V_11)\n', (2607, 2613), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n'), ((2617, 2627), 'numpy.sqrt', 'sqrt', (['V_12'], {}), '(V_12)\n', (2621, 2627), False, 'from numpy import reshape, trace, array, zeros, cos, sin, pi, linspace, diag, sqrt, r_\n')] |
# Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cunumeric.array import (
broadcast_shapes,
convert_to_cunumeric_ndarray,
ndarray,
)
from numpy import can_cast as np_can_cast, dtype as np_dtype
_UNARY_DOCSTRING_TEMPLATE = """{}
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple[ndarray or None], optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
numpy.{}
Availability
--------
Multiple GPUs, Multiple CPUs
"""
_BINARY_DOCSTRING_TEMPLATE = """{}
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable
to a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple[ndarray or None], optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
Returns
-------
y : ndarray or scalar
Result.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
numpy.{}
Availability
--------
Multiple GPUs, Multiple CPUs
"""
float_dtypes = ["e", "f", "d"]
complex_dtypes = ["F", "D"]
float_and_complex = float_dtypes + complex_dtypes
integer_dtypes = [
"b",
"B",
"h",
"H",
"i",
"I",
"l",
"L",
"q",
"Q",
]
all_but_boolean = integer_dtypes + float_and_complex
all_dtypes = ["?"] + all_but_boolean
def predicate_types_of(dtypes):
return [ty + "?" for ty in dtypes]
def relation_types_of(dtypes):
return [ty * 2 + "?" for ty in dtypes]
class ufunc:
def _maybe_cast_input(self, arr, to_dtype, casting):
if arr.dtype == to_dtype:
return arr
if not np_can_cast(arr.dtype, to_dtype):
raise TypeError(
f"Cannot cast ufunc '{self._name}' input from "
f"{arr.dtype} to {to_dtype} with casting rule '{casting}'"
)
return arr.astype(to_dtype)
class unary_ufunc(ufunc):
def __init__(self, name, doc, op_code, types, overrides):
self._name = name
self._op_code = op_code
self._types = types
self._resolution_cache = {}
self.__doc__ = doc
self._overrides = overrides
@property
def nin(self):
return 1
@property
def nout(self):
return 1
@property
def types(self):
return [f"{in_ty}->{out_ty}" for in_ty, out_ty in self._types.items()]
@property
def ntypes(self):
return len(self._types)
def _resolve_dtype(self, arr, casting, precision_fixed):
if arr.dtype.char in self._types:
return arr, np_dtype(self._types[arr.dtype.char])
if arr.dtype in self._resolution_cache:
to_dtype = self._resolution_cache[arr.dtype]
arr = arr.astype(to_dtype)
return arr, np_dtype(self._types[to_dtype.char])
chosen = None
if not precision_fixed:
for in_ty in self._types.keys():
if np_can_cast(arr.dtype, in_ty):
chosen = in_ty
break
if chosen is None:
raise TypeError(
f"No matching signature of ufunc {self._name} is found "
"for the given casting"
)
to_dtype = np_dtype(chosen)
self._resolution_cache[arr.dtype] = to_dtype
return arr.astype(to_dtype), np_dtype(self._types[to_dtype.char])
def __call__(
self,
x,
out=None,
where=True,
casting="same_kind",
order="K",
dtype=None,
**kwargs,
):
x = convert_to_cunumeric_ndarray(x)
if out is not None:
if isinstance(out, tuple):
if len(out) != 1:
raise ValueError(
"The 'out' tuple must have exactly one entry "
"per ufunc output"
)
out = out[0]
if not isinstance(out, ndarray):
raise TypeError("return arrays must be of ArrayType")
# Check if the broadcasting is possible
broadcast_shapes(x.shape, out.shape)
if not isinstance(where, bool) or not where:
raise NotImplementedError(
"the 'where' keyword is not yet supported"
)
# If no dtype is given to prescribe the accuracy, we use the dtype
# of the input
precision_fixed = False
if dtype is not None:
# If a dtype is given, that determines the precision
# of the computation.
precision_fixed = True
x = self._maybe_cast_input(x, dtype, casting)
# Resolve the dtype to use for the computation and cast the input
# if necessary. If the dtype is already fixed by the caller,
# the dtype must be one of the dtypes supported by this operation.
x, res_dtype = self._resolve_dtype(x, casting, precision_fixed)
if out is None:
result = ndarray(shape=x.shape, dtype=res_dtype, inputs=(x, where))
out = result
else:
if out.dtype != res_dtype:
if not np_can_cast(res_dtype, out.dtype):
raise TypeError(
f"Cannot cast ufunc '{self._name}' output from "
f"{res_dtype} to {out.dtype} with casting rule "
f"'{casting}'"
)
result = ndarray(
shape=out.shape, dtype=res_dtype, inputs=(x, where)
)
else:
result = out
op_code = self._overrides.get(x.dtype.char, self._op_code)
result._thunk.unary_op(op_code, x._thunk, where, ())
if out is not result:
out._thunk.convert(result._thunk, warn=False)
return out
def __repr__(self):
return f"<ufunc {self._name}>"
class binary_ufunc(ufunc):
def __init__(self, name, doc, op_code, types, red_code=None):
self._name = name
self._op_code = op_code
self._types = types
self._resolution_cache = {}
self._red_code = red_code
self.__doc__ = doc
@property
def nin(self):
return 2
@property
def nout(self):
return 1
@property
def types(self):
return [
f"{''.join(in_tys)}->{out_ty}"
for in_tys, out_ty in self._types.items()
]
@property
def ntypes(self):
return len(self._types)
def _resolve_dtype(self, arrs, casting, precision_fixed):
common_dtype = ndarray.find_common_type(*arrs)
key = (common_dtype.char, common_dtype.char)
if key in self._types:
arrs = [arr.astype(common_dtype) for arr in arrs]
return arrs, np_dtype(self._types[key])
if key in self._resolution_cache:
to_dtypes = self._resolution_cache[key]
arrs = [
arr.astype(to_dtype) for arr, to_dtype in zip(arrs, to_dtypes)
]
return arrs, np_dtype(self._types[to_dtypes])
chosen = None
if not precision_fixed:
for in_dtypes in self._types.keys():
if all(
np_can_cast(arr.dtype, to_dtype)
for arr, to_dtype in zip(arrs, in_dtypes)
):
chosen = in_dtypes
break
if chosen is None:
raise TypeError(
f"No matching signature of ufunc {self._name} is found "
"for the given casting"
)
self._resolution_cache[key] = chosen
arrs = [arr.astype(to_dtype) for arr, to_dtype in zip(arrs, chosen)]
return arrs, np_dtype(self._types[chosen])
def __call__(
self,
x1,
x2,
out=None,
where=True,
casting="same_kind",
order="K",
dtype=None,
**kwargs,
):
arrs = [convert_to_cunumeric_ndarray(arr) for arr in (x1, x2)]
if out is not None:
if isinstance(out, tuple):
if len(out) != 1:
raise ValueError(
"The 'out' tuple must have exactly one entry "
"per ufunc output"
)
out = out[0]
if not isinstance(out, ndarray):
raise TypeError("return arrays must be of ArrayType")
# Check if the broadcasting is possible
out_shape = broadcast_shapes(
arrs[0].shape, arrs[1].shape, out.shape
)
else:
# Check if the broadcasting is possible
out_shape = broadcast_shapes(arrs[0].shape, arrs[1].shape)
if not isinstance(where, bool) or not where:
raise NotImplementedError(
"the 'where' keyword is not yet supported"
)
# If no dtype is given to prescribe the accuracy, we use the dtype
# of the input
precision_fixed = False
if dtype is not None:
# If a dtype is given, that determines the precision
# of the computation.
precision_fixed = True
arrs = [
self._maybe_cast_input(arr, dtype, casting) for arr in arrs
]
# Resolve the dtype to use for the computation and cast the input
# if necessary. If the dtype is already fixed by the caller,
# the dtype must be one of the dtypes supported by this operation.
arrs, res_dtype = self._resolve_dtype(arrs, casting, precision_fixed)
if out is None:
result = ndarray(
shape=out_shape, dtype=res_dtype, inputs=(*arrs, where)
)
out = result
else:
if out.dtype != res_dtype:
if not np_can_cast(res_dtype, out.dtype):
raise TypeError(
f"Cannot cast ufunc '{self._name}' output from "
f"{res_dtype} to {out.dtype} with casting rule "
f"'{casting}'"
)
result = ndarray(
shape=out.shape, dtype=res_dtype, inputs=(*arrs, where)
)
else:
result = out
x1, x2 = arrs
result._thunk.binary_op(self._op_code, x1._thunk, x2._thunk, where, ())
if out is not result:
out._thunk.convert(result._thunk, warn=False)
return out
def reduce(
self,
array,
axis=0,
dtype=None,
out=None,
keepdims=False,
initial=None,
where=True,
):
"""
reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=<no
value>, where=True)
Reduces `array`'s dimension by one, by applying ufunc along one axis.
For example, add.reduce() is equivalent to sum().
Parameters
----------
array : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed. The default
(`axis` = 0) is perform a reduction over the first dimension of the
input array. `axis` may be negative, in which case it counts from
the last to the first axis.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults to
the data-type of the output array if this is provided, or the
data-type
of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or
None, a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original `array`.
initial : scalar, optional
The value with which to start the reduction. If the ufunc has no
identity or the dtype is object, this defaults to None - otherwise
it defaults to ufunc.identity. If ``None`` is given, the first
element of the reduction is used, and an error is thrown if the
reduction is empty.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions of
`array`, and selects elements to include in the reduction. Note
that for ufuncs like ``minimum`` that do not have an identity
defined, one has to pass in also ``initial``.
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
See Also
--------
numpy.ufunc.reduce
"""
array = convert_to_cunumeric_ndarray(array)
if self._red_code is None:
raise NotImplementedError(
f"reduction for {self} is not yet implemented"
)
if out is not None:
raise NotImplementedError(
"reduction for {self} does not take an `out` argument"
)
if not isinstance(where, bool) or not where:
raise NotImplementedError(
"the 'where' keyword is not yet supported"
)
# NumPy seems to be using None as the default axis value for scalars
if array.ndim == 0 and axis == 0:
axis = None
# TODO: Unary reductions still need to be refactored
return array._perform_unary_reduction(
self._red_code,
array,
axis=axis,
dtype=dtype,
# out=out,
keepdims=keepdims,
initial=initial,
where=where,
)
def __repr__(self):
return f"<ufunc {self._name}>"
def _parse_unary_ufunc_type(ty):
if len(ty) == 1:
return (ty, ty)
else:
if len(ty) > 2:
raise NotImplementedError("Unary ufunc must have only one output")
return (ty[0], ty[1])
def create_unary_ufunc(summary, name, op_code, types, overrides={}):
doc = _UNARY_DOCSTRING_TEMPLATE.format(summary, name)
types = dict(_parse_unary_ufunc_type(ty) for ty in types)
return unary_ufunc(name, doc, op_code, types, overrides)
def _parse_binary_ufunc_type(ty):
if len(ty) == 1:
return ((ty, ty), ty)
else:
if len(ty) != 3:
raise NotImplementedError(
"Binary ufunc must have two inputs and one output"
)
elif ty[0] != ty[1]:
raise NotImplementedError(
"Operands of binary ufunc must have the same dtype"
)
return ((ty[0], ty[1]), ty[2])
def create_binary_ufunc(summary, name, op_code, types, red_code=None):
doc = _BINARY_DOCSTRING_TEMPLATE.format(summary, name)
types = dict(_parse_binary_ufunc_type(ty) for ty in types)
return binary_ufunc(name, doc, op_code, types, red_code)
| [
"cunumeric.array.ndarray",
"numpy.can_cast",
"cunumeric.array.broadcast_shapes",
"cunumeric.array.convert_to_cunumeric_ndarray",
"numpy.dtype",
"cunumeric.array.ndarray.find_common_type"
] | [((5350, 5366), 'numpy.dtype', 'np_dtype', (['chosen'], {}), '(chosen)\n', (5358, 5366), True, 'from numpy import can_cast as np_can_cast, dtype as np_dtype\n'), ((5682, 5713), 'cunumeric.array.convert_to_cunumeric_ndarray', 'convert_to_cunumeric_ndarray', (['x'], {}), '(x)\n', (5710, 5713), False, 'from cunumeric.array import broadcast_shapes, convert_to_cunumeric_ndarray, ndarray\n'), ((8701, 8732), 'cunumeric.array.ndarray.find_common_type', 'ndarray.find_common_type', (['*arrs'], {}), '(*arrs)\n', (8725, 8732), False, 'from cunumeric.array import broadcast_shapes, convert_to_cunumeric_ndarray, ndarray\n'), ((15294, 15329), 'cunumeric.array.convert_to_cunumeric_ndarray', 'convert_to_cunumeric_ndarray', (['array'], {}), '(array)\n', (15322, 15329), False, 'from cunumeric.array import broadcast_shapes, convert_to_cunumeric_ndarray, ndarray\n'), ((3748, 3780), 'numpy.can_cast', 'np_can_cast', (['arr.dtype', 'to_dtype'], {}), '(arr.dtype, to_dtype)\n', (3759, 3780), True, 'from numpy import can_cast as np_can_cast, dtype as np_dtype\n'), ((5458, 5494), 'numpy.dtype', 'np_dtype', (['self._types[to_dtype.char]'], {}), '(self._types[to_dtype.char])\n', (5466, 5494), True, 'from numpy import can_cast as np_can_cast, dtype as np_dtype\n'), ((6200, 6236), 'cunumeric.array.broadcast_shapes', 'broadcast_shapes', (['x.shape', 'out.shape'], {}), '(x.shape, out.shape)\n', (6216, 6236), False, 'from cunumeric.array import broadcast_shapes, convert_to_cunumeric_ndarray, ndarray\n'), ((7093, 7151), 'cunumeric.array.ndarray', 'ndarray', ([], {'shape': 'x.shape', 'dtype': 'res_dtype', 'inputs': '(x, where)'}), '(shape=x.shape, dtype=res_dtype, inputs=(x, where))\n', (7100, 7151), False, 'from cunumeric.array import broadcast_shapes, convert_to_cunumeric_ndarray, ndarray\n'), ((9855, 9884), 'numpy.dtype', 'np_dtype', (['self._types[chosen]'], {}), '(self._types[chosen])\n', (9863, 9884), True, 'from numpy import can_cast as np_can_cast, dtype as np_dtype\n'), ((10089, 10122), 'cunumeric.array.convert_to_cunumeric_ndarray', 'convert_to_cunumeric_ndarray', (['arr'], {}), '(arr)\n', (10117, 10122), False, 'from cunumeric.array import broadcast_shapes, convert_to_cunumeric_ndarray, ndarray\n'), ((10642, 10699), 'cunumeric.array.broadcast_shapes', 'broadcast_shapes', (['arrs[0].shape', 'arrs[1].shape', 'out.shape'], {}), '(arrs[0].shape, arrs[1].shape, out.shape)\n', (10658, 10699), False, 'from cunumeric.array import broadcast_shapes, convert_to_cunumeric_ndarray, ndarray\n'), ((10820, 10866), 'cunumeric.array.broadcast_shapes', 'broadcast_shapes', (['arrs[0].shape', 'arrs[1].shape'], {}), '(arrs[0].shape, arrs[1].shape)\n', (10836, 10866), False, 'from cunumeric.array import broadcast_shapes, convert_to_cunumeric_ndarray, ndarray\n'), ((11782, 11846), 'cunumeric.array.ndarray', 'ndarray', ([], {'shape': 'out_shape', 'dtype': 'res_dtype', 'inputs': '(*arrs, where)'}), '(shape=out_shape, dtype=res_dtype, inputs=(*arrs, where))\n', (11789, 11846), False, 'from cunumeric.array import broadcast_shapes, convert_to_cunumeric_ndarray, ndarray\n'), ((4691, 4728), 'numpy.dtype', 'np_dtype', (['self._types[arr.dtype.char]'], {}), '(self._types[arr.dtype.char])\n', (4699, 4728), True, 'from numpy import can_cast as np_can_cast, dtype as np_dtype\n'), ((4898, 4934), 'numpy.dtype', 'np_dtype', (['self._types[to_dtype.char]'], {}), '(self._types[to_dtype.char])\n', (4906, 4934), True, 'from numpy import can_cast as np_can_cast, dtype as np_dtype\n'), ((5054, 5083), 'numpy.can_cast', 'np_can_cast', (['arr.dtype', 'in_ty'], {}), '(arr.dtype, in_ty)\n', (5065, 5083), True, 'from numpy import can_cast as np_can_cast, dtype as np_dtype\n'), ((7557, 7617), 'cunumeric.array.ndarray', 'ndarray', ([], {'shape': 'out.shape', 'dtype': 'res_dtype', 'inputs': '(x, where)'}), '(shape=out.shape, dtype=res_dtype, inputs=(x, where))\n', (7564, 7617), False, 'from cunumeric.array import broadcast_shapes, convert_to_cunumeric_ndarray, ndarray\n'), ((8905, 8931), 'numpy.dtype', 'np_dtype', (['self._types[key]'], {}), '(self._types[key])\n', (8913, 8931), True, 'from numpy import can_cast as np_can_cast, dtype as np_dtype\n'), ((9166, 9198), 'numpy.dtype', 'np_dtype', (['self._types[to_dtypes]'], {}), '(self._types[to_dtypes])\n', (9174, 9198), True, 'from numpy import can_cast as np_can_cast, dtype as np_dtype\n'), ((12282, 12346), 'cunumeric.array.ndarray', 'ndarray', ([], {'shape': 'out.shape', 'dtype': 'res_dtype', 'inputs': '(*arrs, where)'}), '(shape=out.shape, dtype=res_dtype, inputs=(*arrs, where))\n', (12289, 12346), False, 'from cunumeric.array import broadcast_shapes, convert_to_cunumeric_ndarray, ndarray\n'), ((7253, 7286), 'numpy.can_cast', 'np_can_cast', (['res_dtype', 'out.dtype'], {}), '(res_dtype, out.dtype)\n', (7264, 7286), True, 'from numpy import can_cast as np_can_cast, dtype as np_dtype\n'), ((11978, 12011), 'numpy.can_cast', 'np_can_cast', (['res_dtype', 'out.dtype'], {}), '(res_dtype, out.dtype)\n', (11989, 12011), True, 'from numpy import can_cast as np_can_cast, dtype as np_dtype\n'), ((9347, 9379), 'numpy.can_cast', 'np_can_cast', (['arr.dtype', 'to_dtype'], {}), '(arr.dtype, to_dtype)\n', (9358, 9379), True, 'from numpy import can_cast as np_can_cast, dtype as np_dtype\n')] |
# Copyright (c) 2016 <NAME> <<EMAIL>>
# MIT license
"""
Design of a class namely PeakDetector
Parameters
----------
img_mat: np.ndarray
Two dimensional image matrix
egfilter:EGFilter object
The elliptical Gaussian filter
Methods
-------
smooth:
Convolve the img_mat with egfilter
locate_peaks:
Detect peaks and output peaklist
save_peaks:
Save peaks to csv files
References
----------
[1] Multidimensioanl convolution
http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.
convolve.html#scipy.ndimage.convolve
[2] Scipy.ndimage.image
http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.imread.
html#scipy.ndimage.imread
"""
from scipy.ndimage import convolve
import numpy as np
from .pointsource import PointSource
from ..utils import utils
# Defination of class
class PeakDetector():
def __init__(self,Configs,imgmat,egfilter):
"""Initialization of parameters"""
self.Configs = Configs
self.imgmat = imgmat
self.egfilter = egfilter
self.peaklist = []
self._get_configs()
def _get_configs(self):
"""Get configurations from the Configs"""
self.threshold = self.Configs.getn_value("peaks/threshold")
def smooth(self):
"""
Smooth the image to improve significant of the point sources with
respect to the parameters of the egfilter.
"""
psf = self.egfilter.get_filter()
# Convolve
imgsmooth = convolve(self.imgmat,psf,mode='constant',cval=0.0)
self.imgsmooth = imgsmooth
def locate_peaks(self):
"""Locate peaks with respect to the threshold"""
# Smooth
self.smooth()
# Init
peaks = []
cord_x = []
cord_y = []
rows,cols = self.imgsmooth.shape
self.neighbors = max(self.egfilter.scale_x,self.egfilter.scale_y)
# Normalize
max_value = self.imgsmooth.max()
min_value = self.imgsmooth.min()
imgnorm = (self.imgsmooth - min_value)/(max_value-min_value)
self.imgnorm = imgnorm.copy()
# Find peaks
flag = 1
while flag == 1:
peak_max = imgnorm.max()
if peak_max >= self.threshold:
peak_y,peak_x = np.where(imgnorm==peak_max)
for i in range(len(peak_x)):
# Judge and fill
mask_x = np.arange(peak_x[i]-self.neighbors,peak_x[i]+self.neighbors+1,1)
mask_y = np.arange(peak_y[i]-1-self.neighbors,peak_y[i]+self.neighbors+1,1)
x_b = np.where(mask_x>=0)[0][0]
x_e = np.where(mask_x<=cols)[0][-1]
y_b = np.where(mask_y>=0)[0][0]
y_e = np.where(mask_y<=rows)[0][-1]
imgnorm[mask_y[y_b]:mask_y[y_e],mask_x[x_b]:mask_x[x_e]] *= 0.0
# append
peaks.append(peak_max)
cord_x.append(peak_x[i])
cord_y.append(peak_y[i])
else:
flag = 0
self.peaklist = [peaks,cord_x,cord_y]
def get_pslist(self):
"""Get potential point source list
"""
# Get peaklist
self.locate_peaks()
# Init
pslist = []
numps = len(self.peaklist[0])
# Gen pslist
radius_x = self.egfilter.radius_x
radius_y = self.egfilter.radius_y
angle = self.egfilter.angle / np.pi * 180
for i in range(numps):
ps = PointSource(core=(self.peaklist[1][i],self.peaklist[2][i]),
axis=(radius_x,radius_y),peak=self.peaklist[0][i],
ang = angle)
snr = ps.get_snr(self.imgmat)
ps_temp = ps.get_ps()
ps_temp.append(snr)
pslist.append(ps_temp)
return np.array(pslist) | [
"numpy.where",
"numpy.array",
"scipy.ndimage.convolve",
"numpy.arange"
] | [((1492, 1545), 'scipy.ndimage.convolve', 'convolve', (['self.imgmat', 'psf'], {'mode': '"""constant"""', 'cval': '(0.0)'}), "(self.imgmat, psf, mode='constant', cval=0.0)\n", (1500, 1545), False, 'from scipy.ndimage import convolve\n'), ((3880, 3896), 'numpy.array', 'np.array', (['pslist'], {}), '(pslist)\n', (3888, 3896), True, 'import numpy as np\n'), ((2276, 2305), 'numpy.where', 'np.where', (['(imgnorm == peak_max)'], {}), '(imgnorm == peak_max)\n', (2284, 2305), True, 'import numpy as np\n'), ((2415, 2487), 'numpy.arange', 'np.arange', (['(peak_x[i] - self.neighbors)', '(peak_x[i] + self.neighbors + 1)', '(1)'], {}), '(peak_x[i] - self.neighbors, peak_x[i] + self.neighbors + 1, 1)\n', (2424, 2487), True, 'import numpy as np\n'), ((2509, 2585), 'numpy.arange', 'np.arange', (['(peak_y[i] - 1 - self.neighbors)', '(peak_y[i] + self.neighbors + 1)', '(1)'], {}), '(peak_y[i] - 1 - self.neighbors, peak_y[i] + self.neighbors + 1, 1)\n', (2518, 2585), True, 'import numpy as np\n'), ((2602, 2623), 'numpy.where', 'np.where', (['(mask_x >= 0)'], {}), '(mask_x >= 0)\n', (2610, 2623), True, 'import numpy as np\n'), ((2654, 2678), 'numpy.where', 'np.where', (['(mask_x <= cols)'], {}), '(mask_x <= cols)\n', (2662, 2678), True, 'import numpy as np\n'), ((2710, 2731), 'numpy.where', 'np.where', (['(mask_y >= 0)'], {}), '(mask_y >= 0)\n', (2718, 2731), True, 'import numpy as np\n'), ((2762, 2786), 'numpy.where', 'np.where', (['(mask_y <= rows)'], {}), '(mask_y <= rows)\n', (2770, 2786), True, 'import numpy as np\n')] |
import json
import copy
from .train import TrainingExperiment
from .. import strategies
from ..metrics import model_size, flops
from ..util import printc
import numpy as np
import torch
import time
from shrinkbench.plot import df_from_results, param_label
from PySSM import fix_seed_PySSM
import shutil
import os
import torch.nn.utils.prune as prune
from ..pruning.structured_utils import get_module
class StructuredPruningExperiment(TrainingExperiment):
def __init__(self,
dataset,
model,
strategy,
fractions, # fraction or list of fractions of prunable channels to keep
reweight=False,
bias=False,
structure=None,
prune_layers=list(),
nbatches=1,
prune_kwargs=dict(), # include 'onelayer_results_dir' to select perlayer fractions accordingly,
seed=42,
path=None,
rootdir=None,
dl_kwargs=dict(),
train_kwargs=dict(),
verif=False, # use verification set as validation set if True
debug=False,
pretrained=True,
pruned_path=None, # if not None, load pruned model from pruned_path instead of pruning
finetune=False, # allow fine tuning even if fraction = 1, and use all training set (empty verif set)
limited_data=False, # None, # only use nbatches of data for both pruning and finetuning if True, if None finetune with both limited and full data
resume=None,
resume_optim=False,
save_freq=10):
self.fix_seed(seed)
fix_seed_PySSM(seed)
if limited_data:
dl_kwargs['nbatches'] = nbatches
super().__init__(dataset, model, seed, path, dl_kwargs, train_kwargs, debug, pretrained, finetune, resume,
resume_optim, save_freq)
size, size_nz = model_size(self.model)
self.size_nz_orig = size_nz
self.to_device()
x, y = next(iter(self.val_dl))
x, y = x.to(self.device), y.to(self.device)
ops, ops_nz = flops(self.model, x)
self.ops_nz_orig = ops_nz
print("compression ratio before pruning = ", size / size_nz)
print("speedup before pruning = ", ops / ops_nz)
if np.isscalar(fractions):
fractions = [fractions]
self.add_params(strategy=strategy, fractions=fractions, reweight=reweight, bias=bias, structure=structure,
prune_layers=prune_layers, nbatches=nbatches, prune_kwargs=prune_kwargs, verif=verif,
finetune=finetune, pruned_path=pruned_path)
self.pruned_path = pruned_path
self.build_pruning(strategy, fractions, reweight, bias, structure, prune_layers, nbatches, **prune_kwargs)
self.verif = verif
self.path = path
if rootdir is not None:
self.rootdir = rootdir
self.save_freq = save_freq
def build_pruning(self, strategy, fractions, reweight, bias, structure, prune_layers, nbatches, **prune_kwargs):
if self.pruned_path is None:
constructor = getattr(strategies, strategy)
for i, (x, y) in zip(range(nbatches), self.train_dl): # self.prune_dl
if i == 0:
xs, ys = ([x], [y])
else:
xs = xs + [x]
ys = ys + [y]
if 'onelayer_results_dir' in prune_kwargs:
if prune_kwargs['onelayer_results_dir'] is not None:
df = df_from_results(prune_kwargs['onelayer_results_dir'], structured=True)
strategy_name = strategy + ''.join(sorted([param_label(k, v) if k not in ['sequential', 'asymmetric', 'epsilon']
else '' for k, v in prune_kwargs.items()]))
onelayer_results_df = df[(df['strategy'] == strategy_name) & (df['reweight'] == reweight)] # (df['structure'] == structure)
else:
onelayer_results_df = None
del prune_kwargs['onelayer_results_dir']
prune_kwargs['onelayer_results_df'] = onelayer_results_df
if 'full_data' in prune_kwargs:
if prune_kwargs['full_data']:
prune_kwargs['train_dl'] = self.train_dl
printc(f"Pruning model with {strategy}", color='GREEN')
since = time.perf_counter()
self.pruning = constructor(self.model, xs, ys, fractions=fractions, reweight=reweight, bias=bias,
structure=structure, prune_layers=prune_layers, **prune_kwargs)
self.pruning_time = (time.perf_counter() - since)/len(fractions) # assuming all fractions required similar time..
def run(self):
for fraction in self.params['fractions']:
child = copy.deepcopy(self) if len(self.params['fractions']) > 1 else self
del child.params['fractions']
child.add_params(fraction=fraction)
if self.pruned_path is None:
since = time.perf_counter()
child.pruning.apply(fraction)
child.pruning_time += time.perf_counter() - since
printc(f"Masked model with fraction={fraction} of prunable channels kept", color='GREEN')
else:
model_state = torch.load(f'{self.pruned_path}/checkpoints/checkpoint--1.pt', map_location=self.device)['model_state_dict']
# generate pruning parametrization
for key in model_state.keys():
if key.endswith("_orig"):
mod, pname = key[:-5].rsplit('.', 1)
prune.identity(get_module(child.model, mod), pname)
child.model.load_state_dict(model_state)
printc(f"Loaded masked model with fraction={fraction} of prunable channels kept", color='GREEN')
child.freeze()
printc(f"Running {repr(child)}", color='YELLOW')
child.to_device()
child.build_logging(child.train_metrics, child.path)
if self.pruned_path is None:
child.save_metrics()
else:
assert os.path.isfile(f'{self.pruned_path}/metrics.json'), "missing metrics"
shutil.copy(f'{self.pruned_path}/metrics.json', f'{self.path}/metrics.json')
# log validation loss and accuracy before finetuning in logs file
since = time.perf_counter()
_ = self.run_epoch(False, opt=False, epoch=-1, verif=self.verif)
self.log(timestamp=time.perf_counter() - since)
self.log_epoch(-1)
if fraction < 1 or self.params['finetune']:
child.run_epochs()
def save_metrics(self):
self.metrics = self.pruning_metrics()
with open(self.path / 'metrics.json', 'w') as f:
json.dump(self.metrics, f, indent=4)
printc(json.dumps(self.metrics, indent=4), color='GRASS')
summary = self.pruning.summary(self.params['fraction'])
summary_path = self.path / 'masks_summary.csv'
summary.to_csv(summary_path)
print(summary)
def pruning_metrics(self):
metrics = {}
# Time
metrics['pruning_time'] = self.pruning_time
# Model Size
size, size_nz = model_size(self.model)
metrics['size'] = size
metrics['size_nz_orig'] = self.size_nz_orig
metrics['size_nz'] = size_nz
metrics['compression_ratio'] = self.size_nz_orig / size_nz
x, y = next(iter(self.val_dl))
x, y = x.to(self.device), y.to(self.device)
# FLOPS
ops, ops_nz = flops(self.model, x)
metrics['flops'] = ops
metrics['flops_nz_orig'] = self.ops_nz_orig
metrics['flops_nz'] = ops_nz
metrics['theoretical_speedup'] = self.ops_nz_orig / ops_nz
# Training and validation loss and accuracy
since = time.perf_counter() # in sec
for train in [False]:
prefix = 'train' if train else 'val'
loss, acc1, acc5 = self.run_epoch(train, opt=False, epoch=-1, verif=self.verif)
metrics[f'{prefix}_loss'] = loss
metrics[f'{prefix}_acc1'] = acc1
metrics[f'{prefix}_acc5'] = acc5
self.log(timestamp=time.perf_counter() - since)
# checkpoint pruned model before fine-tuning
self.checkpoint(-1)
self.log_epoch(-1)
return metrics
| [
"numpy.isscalar",
"torch.load",
"json.dumps",
"time.perf_counter",
"shrinkbench.plot.param_label",
"os.path.isfile",
"shutil.copy",
"copy.deepcopy",
"shrinkbench.plot.df_from_results",
"json.dump",
"PySSM.fix_seed_PySSM"
] | [((1750, 1770), 'PySSM.fix_seed_PySSM', 'fix_seed_PySSM', (['seed'], {}), '(seed)\n', (1764, 1770), False, 'from PySSM import fix_seed_PySSM\n'), ((2421, 2443), 'numpy.isscalar', 'np.isscalar', (['fractions'], {}), '(fractions)\n', (2432, 2443), True, 'import numpy as np\n'), ((8172, 8191), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8189, 8191), False, 'import time\n'), ((4580, 4599), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4597, 4599), False, 'import time\n'), ((7105, 7141), 'json.dump', 'json.dump', (['self.metrics', 'f'], {'indent': '(4)'}), '(self.metrics, f, indent=4)\n', (7114, 7141), False, 'import json\n'), ((7157, 7191), 'json.dumps', 'json.dumps', (['self.metrics'], {'indent': '(4)'}), '(self.metrics, indent=4)\n', (7167, 7191), False, 'import json\n'), ((5030, 5049), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (5043, 5049), False, 'import copy\n'), ((5252, 5271), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5269, 5271), False, 'import time\n'), ((6401, 6451), 'os.path.isfile', 'os.path.isfile', (['f"""{self.pruned_path}/metrics.json"""'], {}), "(f'{self.pruned_path}/metrics.json')\n", (6415, 6451), False, 'import os\n'), ((6487, 6563), 'shutil.copy', 'shutil.copy', (['f"""{self.pruned_path}/metrics.json"""', 'f"""{self.path}/metrics.json"""'], {}), "(f'{self.pruned_path}/metrics.json', f'{self.path}/metrics.json')\n", (6498, 6563), False, 'import shutil\n'), ((6670, 6689), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6687, 6689), False, 'import time\n'), ((3684, 3754), 'shrinkbench.plot.df_from_results', 'df_from_results', (["prune_kwargs['onelayer_results_dir']"], {'structured': '(True)'}), "(prune_kwargs['onelayer_results_dir'], structured=True)\n", (3699, 3754), False, 'from shrinkbench.plot import df_from_results, param_label\n'), ((4846, 4865), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4863, 4865), False, 'import time\n'), ((5356, 5375), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5373, 5375), False, 'import time\n'), ((5538, 5631), 'torch.load', 'torch.load', (['f"""{self.pruned_path}/checkpoints/checkpoint--1.pt"""'], {'map_location': 'self.device'}), "(f'{self.pruned_path}/checkpoints/checkpoint--1.pt', map_location\n =self.device)\n", (5548, 5631), False, 'import torch\n'), ((8536, 8555), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8553, 8555), False, 'import time\n'), ((6806, 6825), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6823, 6825), False, 'import time\n'), ((3818, 3835), 'shrinkbench.plot.param_label', 'param_label', (['k', 'v'], {}), '(k, v)\n', (3829, 3835), False, 'from shrinkbench.plot import df_from_results, param_label\n')] |
from pprint import pprint
import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import MinMaxScaler
class ExtraTreeT:
def score_to_numeric(self, x):
if x == 'Stub':
return 0
if x == 'Start':
return 1
if x == 'C':
return 2
if x == 'B':
return 3
if x == 'GA':
return 4
if x == 'FA':
return 5
def __init__(self, args):
self.test_data_path = args[0]
self.train_data_path = args[1]
self.train = pd.read_csv(self.train_data_path,low_memory=False)
self.test = pd.read_csv(self.test_data_path,low_memory=False)
self.target_names = self.train['rating'].unique()
self.features = ['infonoisescore', 'logcontentlength', 'logreferences', 'logpagelinks', 'numimageslength',
'num_citetemplates', 'lognoncitetemplates',
'num_categories', 'hasinfobox', 'lvl2headings', 'lvl3heading', 'number_chars', 'number_words',
'number_types', 'number_sentences', 'number_syllables',
'number_polysyllable_words', 'difficult_words', 'number_words_longer_4',
'number_words_longer_6', 'number_words_longer_10',
'number_words_longer_longer_13', 'flesch_reading_ease', 'flesch_kincaid_grade_level',
'coleman_liau_index',
'gunning_fog_index', 'smog_index', 'ari_index', 'lix_index',
'dale_chall_score', 'linsear_write_formula', 'grammar']
self.train['score'] = self.train['rating'].apply(self.score_to_numeric)
self.test['score'] = self.test['rating'].apply(self.score_to_numeric)
self.classes = ['Stub', 'Start', 'C', 'B', 'GA', 'FA']
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(self.train[self.features])
self.train_mm = scaler.transform(self.train[self.features])
self.test_mm = scaler.transform(self.test[self.features])
def hyperTune(self):
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start=10, stop=60, num=11)]
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num=11)]
# Minimum number of samples required to split a node
min_samples_split = [1.0, 2, 3]
# Minimum number of samples required at each leaf node
min_samples_leaf = [0.5, 1, 2]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
pprint(random_grid)
self.clf = ExtraTreesClassifier()
rf_random = RandomizedSearchCV(estimator=self.clf, param_distributions=random_grid, n_iter=200, cv=5, verbose=2,
random_state=42, n_jobs=-1)
# Fit the random search model
rf_random.fit(self.train[self.features], self.train_y)
pprint(rf_random.best_params_)
preds = self.target_names[rf_random.predict(self.test[self.features])]
print(pd.crosstab(self.test['rating'], preds, rownames=['Actual Species'], colnames=['predicted']))
print('Classification accuracy without selecting features: {:.3f}'
.format(accuracy_score(self.test['rating'], preds)))
return rf_random
def learn(self):
self.clf = ExtraTreesClassifier(n_estimators=300, max_depth=30)
self.clf.fit(self.train[self.features], self.train['score'])
# kf = KFold(shuffle=True, n_splits=5)
# scores = cross_val_score(self.clf, self.train[self.features], self.train_y, cv=kf, n_jobs=-1,
# scoring='accuracy')
# print(scores)
# print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
return self.clf
def fetchScore(self):
preds = self.clf.predict(self.test[self.features])
preds = np.array([self.classes[x] for x in preds])
print(pd.crosstab(self.test['rating'], preds, rownames=['Actual Species'], colnames=['predicted']))
print('Classification accuracy without selecting features: {:.3f}'
.format(accuracy_score(self.test['rating'], preds)))
def evaluate(self, model):
predictions = model.predict(self.test[self.features])
predictions = np.array([self.classes[x] for x in predictions])
print(pd.crosstab(self.test['rating'], predictions, rownames=['Actual Species'], colnames=['predicted']))
print('Classification accuracy without selecting features: {:.3f}'
.format(accuracy_score(self.test['rating'], predictions)))
| [
"sklearn.metrics.accuracy_score",
"sklearn.ensemble.ExtraTreesClassifier",
"pandas.read_csv",
"pandas.crosstab",
"numpy.array",
"numpy.linspace",
"sklearn.preprocessing.MinMaxScaler",
"pprint.pprint",
"sklearn.model_selection.RandomizedSearchCV"
] | [((708, 759), 'pandas.read_csv', 'pd.read_csv', (['self.train_data_path'], {'low_memory': '(False)'}), '(self.train_data_path, low_memory=False)\n', (719, 759), True, 'import pandas as pd\n'), ((779, 829), 'pandas.read_csv', 'pd.read_csv', (['self.test_data_path'], {'low_memory': '(False)'}), '(self.test_data_path, low_memory=False)\n', (790, 829), True, 'import pandas as pd\n'), ((2013, 2047), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (2025, 2047), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3095, 3114), 'pprint.pprint', 'pprint', (['random_grid'], {}), '(random_grid)\n', (3101, 3114), False, 'from pprint import pprint\n'), ((3135, 3157), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {}), '()\n', (3155, 3157), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((3178, 3310), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', ([], {'estimator': 'self.clf', 'param_distributions': 'random_grid', 'n_iter': '(200)', 'cv': '(5)', 'verbose': '(2)', 'random_state': '(42)', 'n_jobs': '(-1)'}), '(estimator=self.clf, param_distributions=random_grid,\n n_iter=200, cv=5, verbose=2, random_state=42, n_jobs=-1)\n', (3196, 3310), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((3457, 3487), 'pprint.pprint', 'pprint', (['rf_random.best_params_'], {}), '(rf_random.best_params_)\n', (3463, 3487), False, 'from pprint import pprint\n'), ((3884, 3936), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'n_estimators': '(300)', 'max_depth': '(30)'}), '(n_estimators=300, max_depth=30)\n', (3904, 3936), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((4447, 4489), 'numpy.array', 'np.array', (['[self.classes[x] for x in preds]'], {}), '([self.classes[x] for x in preds])\n', (4455, 4489), True, 'import numpy as np\n'), ((4856, 4904), 'numpy.array', 'np.array', (['[self.classes[x] for x in predictions]'], {}), '([self.classes[x] for x in predictions])\n', (4864, 4904), True, 'import numpy as np\n'), ((3582, 3678), 'pandas.crosstab', 'pd.crosstab', (["self.test['rating']", 'preds'], {'rownames': "['Actual Species']", 'colnames': "['predicted']"}), "(self.test['rating'], preds, rownames=['Actual Species'],\n colnames=['predicted'])\n", (3593, 3678), True, 'import pandas as pd\n'), ((4504, 4600), 'pandas.crosstab', 'pd.crosstab', (["self.test['rating']", 'preds'], {'rownames': "['Actual Species']", 'colnames': "['predicted']"}), "(self.test['rating'], preds, rownames=['Actual Species'],\n colnames=['predicted'])\n", (4515, 4600), True, 'import pandas as pd\n'), ((4919, 5021), 'pandas.crosstab', 'pd.crosstab', (["self.test['rating']", 'predictions'], {'rownames': "['Actual Species']", 'colnames': "['predicted']"}), "(self.test['rating'], predictions, rownames=['Actual Species'],\n colnames=['predicted'])\n", (4930, 5021), True, 'import pandas as pd\n'), ((2337, 2375), 'numpy.linspace', 'np.linspace', ([], {'start': '(10)', 'stop': '(60)', 'num': '(11)'}), '(start=10, stop=60, num=11)\n', (2348, 2375), True, 'import numpy as np\n'), ((2457, 2483), 'numpy.linspace', 'np.linspace', (['(5)', '(30)'], {'num': '(11)'}), '(5, 30, num=11)\n', (2468, 2483), True, 'import numpy as np\n'), ((3773, 3815), 'sklearn.metrics.accuracy_score', 'accuracy_score', (["self.test['rating']", 'preds'], {}), "(self.test['rating'], preds)\n", (3787, 3815), False, 'from sklearn.metrics import accuracy_score\n'), ((4695, 4737), 'sklearn.metrics.accuracy_score', 'accuracy_score', (["self.test['rating']", 'preds'], {}), "(self.test['rating'], preds)\n", (4709, 4737), False, 'from sklearn.metrics import accuracy_score\n'), ((5116, 5164), 'sklearn.metrics.accuracy_score', 'accuracy_score', (["self.test['rating']", 'predictions'], {}), "(self.test['rating'], predictions)\n", (5130, 5164), False, 'from sklearn.metrics import accuracy_score\n')] |
import pandas as pd
import time
import seaborn
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model
import kernelml
train=pd.read_csv("data/kc_house_train_data.csv",dtype = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':str, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int})
test=pd.read_csv("data/kc_house_test_data.csv",dtype = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':str, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int})
full = pd.concat([train[['price','date']],test[['price','date']]])
full.sort_values(by='date',inplace=True)
full=full.groupby('date').count()
plt.plot(full[['price']].values)
plt.title("average housing prices by date - full data")
plt.show()
ts_train = full[:int(len(full)*0.7)].copy()
ts_train['i'] = np.arange(0,len(ts_train))
plt.plot(ts_train[['price']].values)
plt.title("average housing prices by date - train data")
plt.show()
ts_test = full[int(len(full)*0.7):].copy()
ts_test['i'] = np.arange(len(ts_train),len(ts_train)+len(ts_test))
plt.plot(ts_test[['price']].values)
plt.title("average housing prices by date - valid data")
plt.show()
def sin_non_linear_model(x,w):
return w[0]*x[:,0:1] + np.cos(x[:,1:2]*w[1])*w[2] + np.sin(x[:,1:2]*w[1])*w[3]
def sin_mean_loss(x,y,w):
hypothesis = sin_non_linear_model(x,w)
loss = hypothesis-y
return np.mean(np.abs(loss))
#set the window to include a random set of 80% of the data (batch_size=200)
def mini_batch_random_window(X,y,batch_size):
W = batch_size//2
center = np.random.randint(W,X.shape[0]-W)
X_batch = X[center-W:center+W]
y_batch = y[center-W:center+W]
return X_batch,y_batch
runs = 10
zscore = 0.5
tinterations = 10
nupdates = 5
sequpdate = True
kml = KernelML(
prior_sampler_fcn=None,
sampler_fcn=None,
intermediate_sampler_fcn=None,
mini_batch_sampler_fcn=mini_batch_random_window,
parameter_transform_fcn=None,
batch_size=int(X_train.shape[0]*0.8))
simulation_factor = 1000
mutation_factor = 1
breed_factor = 3
X_train = ts_train[['i']].values
y_train = ts_train[["price"]].values
X_train = np.column_stack((np.ones(X_train.shape[0]),X_train))
parameter_by_run,loss_by_run = kml.optimize(X_train,y_train,loss_function=sin_mean_loss,
num_param=4,
args=[],
runs=runs,
total_iterations=tinterations,
n_parameter_updates=nupdates,
simulation_factor=simulation_factor,
mutation_factor=mutation_factor,
breed_factor=breed_factor,
convergence_z_score=zscore,
prior_uniform_low=-0.001,
prior_uniform_high=0.001,
plot_feedback=False,
print_feedback=True)
plt.plot(parameter_by_run)
plt.show()
plt.plot(loss_by_run)
plt.show()
### Ensemble Model
#Create train and test datasets
X_train = ts_train[['i']].values
y_train = ts_train[["price"]].values
X_train = np.column_stack((np.ones(X_train.shape[0]),X_train))
X_test = ts_test[['i']].values
y_test = ts_test[['price']].values
X_test = np.column_stack((np.ones(X_test.shape[0]),X_test))
#Get the model parameters by iteration
params = kml.model.get_param_by_iter()
errors = kml.model.get_loss_by_iter()
def get_rsq(y,yp):
return 1-np.sum((yp-y)**2)/np.sum((np.mean(y)-y)**2)
#Create ensemble of features
feature_num = 10
best_w_arr = errors.argsort()[:feature_num]
w = np.mean(parameter_by_run[-10:],axis=0)
plt.plot(sin_non_linear_model(X_test,w).flatten())
plt.plot(y_test)
plt.show()
print(get_rsq(y_test.flatten(),sin_non_linear_model(X_test,w).flatten()))
predicted_output_as_feature_train = np.zeros((X_train.shape[0],feature_num))
predicted_output_as_feature_test = np.zeros((X_test.shape[0],feature_num))
#Features from last three parameter updates
i=0
for w in params[best_w_arr,:]:
predicted_output_as_feature_train[:,i] = sin_non_linear_model(X_train,w).flatten()
predicted_output_as_feature_test[:,i] = sin_non_linear_model(X_test,w).flatten()
i+=1
plt.plot(np.mean(predicted_output_as_feature_test,axis=1))
plt.plot(y_test)
plt.show()
| [
"numpy.mean",
"numpy.abs",
"numpy.ones",
"pandas.read_csv",
"matplotlib.pyplot.plot",
"numpy.sum",
"numpy.zeros",
"numpy.random.randint",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.title",
"pandas.concat",
"matplotlib.pyplot.show"
] | [((159, 596), 'pandas.read_csv', 'pd.read_csv', (['"""data/kc_house_train_data.csv"""'], {'dtype': "{'bathrooms': float, 'waterfront': int, 'sqft_above': int, 'sqft_living15':\n float, 'grade': int, 'yr_renovated': int, 'price': float, 'bedrooms':\n float, 'zipcode': str, 'long': float, 'sqft_lot15': float,\n 'sqft_living': float, 'floors': str, 'condition': int, 'lat': float,\n 'date': str, 'sqft_basement': int, 'yr_built': int, 'id': str,\n 'sqft_lot': int, 'view': int}"}), "('data/kc_house_train_data.csv', dtype={'bathrooms': float,\n 'waterfront': int, 'sqft_above': int, 'sqft_living15': float, 'grade':\n int, 'yr_renovated': int, 'price': float, 'bedrooms': float, 'zipcode':\n str, 'long': float, 'sqft_lot15': float, 'sqft_living': float, 'floors':\n str, 'condition': int, 'lat': float, 'date': str, 'sqft_basement': int,\n 'yr_built': int, 'id': str, 'sqft_lot': int, 'view': int})\n", (170, 596), True, 'import pandas as pd\n'), ((562, 998), 'pandas.read_csv', 'pd.read_csv', (['"""data/kc_house_test_data.csv"""'], {'dtype': "{'bathrooms': float, 'waterfront': int, 'sqft_above': int, 'sqft_living15':\n float, 'grade': int, 'yr_renovated': int, 'price': float, 'bedrooms':\n float, 'zipcode': str, 'long': float, 'sqft_lot15': float,\n 'sqft_living': float, 'floors': str, 'condition': int, 'lat': float,\n 'date': str, 'sqft_basement': int, 'yr_built': int, 'id': str,\n 'sqft_lot': int, 'view': int}"}), "('data/kc_house_test_data.csv', dtype={'bathrooms': float,\n 'waterfront': int, 'sqft_above': int, 'sqft_living15': float, 'grade':\n int, 'yr_renovated': int, 'price': float, 'bedrooms': float, 'zipcode':\n str, 'long': float, 'sqft_lot15': float, 'sqft_living': float, 'floors':\n str, 'condition': int, 'lat': float, 'date': str, 'sqft_basement': int,\n 'yr_built': int, 'id': str, 'sqft_lot': int, 'view': int})\n", (573, 998), True, 'import pandas as pd\n'), ((967, 1029), 'pandas.concat', 'pd.concat', (["[train[['price', 'date']], test[['price', 'date']]]"], {}), "([train[['price', 'date']], test[['price', 'date']]])\n", (976, 1029), True, 'import pandas as pd\n'), ((1103, 1135), 'matplotlib.pyplot.plot', 'plt.plot', (["full[['price']].values"], {}), "(full[['price']].values)\n", (1111, 1135), True, 'from matplotlib import pyplot as plt\n'), ((1136, 1191), 'matplotlib.pyplot.title', 'plt.title', (['"""average housing prices by date - full data"""'], {}), "('average housing prices by date - full data')\n", (1145, 1191), True, 'from matplotlib import pyplot as plt\n'), ((1192, 1202), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1200, 1202), True, 'from matplotlib import pyplot as plt\n'), ((1291, 1327), 'matplotlib.pyplot.plot', 'plt.plot', (["ts_train[['price']].values"], {}), "(ts_train[['price']].values)\n", (1299, 1327), True, 'from matplotlib import pyplot as plt\n'), ((1328, 1384), 'matplotlib.pyplot.title', 'plt.title', (['"""average housing prices by date - train data"""'], {}), "('average housing prices by date - train data')\n", (1337, 1384), True, 'from matplotlib import pyplot as plt\n'), ((1385, 1395), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1393, 1395), True, 'from matplotlib import pyplot as plt\n'), ((1507, 1542), 'matplotlib.pyplot.plot', 'plt.plot', (["ts_test[['price']].values"], {}), "(ts_test[['price']].values)\n", (1515, 1542), True, 'from matplotlib import pyplot as plt\n'), ((1543, 1599), 'matplotlib.pyplot.title', 'plt.title', (['"""average housing prices by date - valid data"""'], {}), "('average housing prices by date - valid data')\n", (1552, 1599), True, 'from matplotlib import pyplot as plt\n'), ((1600, 1610), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1608, 1610), True, 'from matplotlib import pyplot as plt\n'), ((3498, 3524), 'matplotlib.pyplot.plot', 'plt.plot', (['parameter_by_run'], {}), '(parameter_by_run)\n', (3506, 3524), True, 'from matplotlib import pyplot as plt\n'), ((3525, 3535), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3533, 3535), True, 'from matplotlib import pyplot as plt\n'), ((3537, 3558), 'matplotlib.pyplot.plot', 'plt.plot', (['loss_by_run'], {}), '(loss_by_run)\n', (3545, 3558), True, 'from matplotlib import pyplot as plt\n'), ((3559, 3569), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3567, 3569), True, 'from matplotlib import pyplot as plt\n'), ((4173, 4212), 'numpy.mean', 'np.mean', (['parameter_by_run[-10:]'], {'axis': '(0)'}), '(parameter_by_run[-10:], axis=0)\n', (4180, 4212), True, 'import numpy as np\n'), ((4264, 4280), 'matplotlib.pyplot.plot', 'plt.plot', (['y_test'], {}), '(y_test)\n', (4272, 4280), True, 'from matplotlib import pyplot as plt\n'), ((4281, 4291), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4289, 4291), True, 'from matplotlib import pyplot as plt\n'), ((4404, 4445), 'numpy.zeros', 'np.zeros', (['(X_train.shape[0], feature_num)'], {}), '((X_train.shape[0], feature_num))\n', (4412, 4445), True, 'import numpy as np\n'), ((4480, 4520), 'numpy.zeros', 'np.zeros', (['(X_test.shape[0], feature_num)'], {}), '((X_test.shape[0], feature_num))\n', (4488, 4520), True, 'import numpy as np\n'), ((4841, 4857), 'matplotlib.pyplot.plot', 'plt.plot', (['y_test'], {}), '(y_test)\n', (4849, 4857), True, 'from matplotlib import pyplot as plt\n'), ((4858, 4868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4866, 4868), True, 'from matplotlib import pyplot as plt\n'), ((2011, 2047), 'numpy.random.randint', 'np.random.randint', (['W', '(X.shape[0] - W)'], {}), '(W, X.shape[0] - W)\n', (2028, 2047), True, 'import numpy as np\n'), ((4791, 4840), 'numpy.mean', 'np.mean', (['predicted_output_as_feature_test'], {'axis': '(1)'}), '(predicted_output_as_feature_test, axis=1)\n', (4798, 4840), True, 'import numpy as np\n'), ((1839, 1851), 'numpy.abs', 'np.abs', (['loss'], {}), '(loss)\n', (1845, 1851), True, 'import numpy as np\n'), ((2640, 2665), 'numpy.ones', 'np.ones', (['X_train.shape[0]'], {}), '(X_train.shape[0])\n', (2647, 2665), True, 'import numpy as np\n'), ((3720, 3745), 'numpy.ones', 'np.ones', (['X_train.shape[0]'], {}), '(X_train.shape[0])\n', (3727, 3745), True, 'import numpy as np\n'), ((3849, 3873), 'numpy.ones', 'np.ones', (['X_test.shape[0]'], {}), '(X_test.shape[0])\n', (3856, 3873), True, 'import numpy as np\n'), ((1699, 1723), 'numpy.sin', 'np.sin', (['(x[:, 1:2] * w[1])'], {}), '(x[:, 1:2] * w[1])\n', (1705, 1723), True, 'import numpy as np\n'), ((4033, 4054), 'numpy.sum', 'np.sum', (['((yp - y) ** 2)'], {}), '((yp - y) ** 2)\n', (4039, 4054), True, 'import numpy as np\n'), ((1670, 1694), 'numpy.cos', 'np.cos', (['(x[:, 1:2] * w[1])'], {}), '(x[:, 1:2] * w[1])\n', (1676, 1694), True, 'import numpy as np\n'), ((4059, 4069), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (4066, 4069), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings; warnings.simplefilter('ignore')
from scipy import stats
from ast import literal_eval
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import linear_kernel, cosine_similarity
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import wordnet
from surprise import Reader, Dataset, SVD, evaluate
from collections import defaultdict
class hybrid(object):
def __init__ (self,user_id,ratings):
self.user_id = user_id
self.md = pd.read_csv('CustomData/FinalData.csv')
self.ratings = ratings
print(ratings[(ratings['user_id'] == user_id)][['user_id','book_id', 'rating']])
self.popularity_rating = self.popularity(self.md)
self.collaborative_rating = self.collaborative(self.ratings, self.user_id)
self.content_rating = self.content_based(self.md,self.ratings,self.user_id)
self.final_hybrid(self.md, self.popularity_rating , self.collaborative_rating, self.content_rating, self.user_id)
#Popularity#
def popularity(self,md):
fd = pd.read_csv('CustomData/AverageRatings.csv')
fd1 = pd.read_csv('CustomData/RatingsCount.csv')
fd[fd['rating'].notnull()]['rating'] = fd[fd['rating'].notnull()]['rating'].astype('float')
vote_averages= fd[fd['rating'].notnull()]['rating']
C = vote_averages.mean()
fd1[fd1['rating'].notnull()]['rating'] = fd1[fd1['rating'].notnull()]['rating'].astype('float')
vote_counts = fd1[fd1['rating'].notnull()]['rating']
m = len(vote_counts)
md['ratings_count'] = fd1['rating']
md['average_rating'] = fd['rating']
qualified = md[(md['ratings_count'].notnull())][['book_id','title', 'authors', 'ratings_count', 'average_rating']]
qualified['ratings_count'] = qualified['ratings_count'].astype('float')
qualified['average_rating'] = qualified['average_rating'].astype('float')
qualified.shape
def weighted_rating(x):
v = x['ratings_count']
R = x['average_rating']
return (v/(v+m) * R) + (m/(m+v) * C)
qualified['popularity_rating'] = qualified.apply(weighted_rating, axis=1)
pop = qualified[['book_id','popularity_rating']]
print(qualified.shape)
print(pop.shape)
return pop
### Collaborative ##
def collaborative(self,ratings,user_id):
reader = Reader()
#ratings.head()
temp_ratings = ratings
data = Dataset.load_from_df(temp_ratings[['user_id', 'book_id', 'rating']], reader)
data.split(n_folds=2)
## Training the data ##
svd = SVD()
evaluate(svd, data, measures=['RMSE', 'MAE'])
trainset = data.build_full_trainset()
algo = SVD()
algo.fit(trainset)
#svd.train(trainset)
## Testing the data ##
testset = trainset.build_anti_testset()
predictions = algo.test(testset)
count = 0
for uid, iid, true_r, est, _ in predictions:
if uid == user_id:
count = count+1
temp_ratings.loc[len(temp_ratings)+1]= [uid,iid,est]
cb = temp_ratings[(temp_ratings['user_id'] == user_id)][['book_id', 'rating']]
return(cb)
##### CONTENT ######
def content_based(self,md,ratings,user_id):
md['book_id'] = md['book_id'].astype('int')
ratings['book_id'] = ratings['book_id'].astype('int')
ratings['user_id'] = ratings['user_id'].astype('int')
ratings['rating'] = ratings['rating'].astype('int')
md['authors'] = md['authors'].str.replace(' ','')
md['authors'] = md['authors'].str.lower()
md['authors'] = md['authors'].str.replace(',',' ')
#print(md.head())
md['authors'] = md['authors'].apply(lambda x: [x,x])
#print(md['authors'])
md['Genres']=md['Genres'].str.split(';')
#print(md['Genres'])
md['soup'] = md['authors'] + md['Genres']
#print(md['soup'])
md['soup'] = md['soup'].str.join(' ')
count = CountVectorizer(analyzer='word',ngram_range=(1,1),min_df=0, stop_words='english')
count_matrix = count.fit_transform(md['soup'])
print (count_matrix.shape)
cosine_sim = cosine_similarity(count_matrix, count_matrix)
def build_user_profiles():
user_profiles=np.zeros((60001,999))
#taking only the first 100000 ratings to build user_profile
for i in range(0,100000):
u=ratings.iloc[i]['user_id']
b=ratings.iloc[i]['book_id']
user_profiles[u][b-1]=ratings.iloc[i]['rating']
return user_profiles
user_profiles=build_user_profiles()
def _get_similar_items_to_user_profile(person_id):
#Computes the cosine similarity between the user profile and all item profiles
user_ratings = np.empty((999,1))
cnt=0
for i in range(0,998):
book_sim=cosine_sim[i]
user_sim=user_profiles[person_id]
user_ratings[i]=(book_sim.dot(user_sim))/sum(cosine_sim[i])
maxval = max(user_ratings)
print(maxval)
for i in range(0,998):
user_ratings[i]=((user_ratings[i]*5.0)/(maxval))
if(user_ratings[i]>3):
cnt+=1
return user_ratings
content_ratings = _get_similar_items_to_user_profile(user_id)
num = md[['book_id']]
num1 = pd.DataFrame(data=content_ratings[0:,0:])
frames = [num, num1]
content_rating = pd.concat(frames, axis =1,join_axes=[num.index])
content_rating.columns=['book_id', 'content_rating']
return(content_rating)
def final_hybrid(self,md, popularity_rating , collaborative_rating, content_rating, user_id):
hyb = md[['book_id']]
title = md[['book_id','title', 'Genres']]
hyb = hyb.merge(title,on = 'book_id')
hyb = hyb.merge(self.collaborative_rating,on = 'book_id')
hyb = hyb.merge(self.popularity_rating, on='book_id')
hyb = hyb.merge(self.content_rating, on='book_id')
def weighted_rating(x):
v = x['rating']
R = x['popularity_rating']
c = x['content_rating']
return 0.4*v + 0.2*R + 0.4 * c
hyb['hyb_rating'] = hyb.apply(weighted_rating, axis=1)
hyb = hyb.sort_values('hyb_rating', ascending=False).head(999)
hyb.columns = ['Book ID' , 'Title', 'Genres', 'Collaborative Rating', 'Popularity Rating' , 'Content Rating', 'Hybrid Rating']
print(len(hyb['Hybrid Rating']))
print(hyb)
def newUser():
print('\n Rate from books\n')
print('ID Author Title Genre\n')
print('2. <NAME>, Mary <NAME> and the Sorcerer\'s Stone (Harry Potter, #1) Fantasy;Young-Age')
print('127. <NAME> The Tipping Point: How Little Things Can Make a Big Difference Self-Help')
print('239. <NAME> World War Z: An Oral History of the Zombie War Horror;Fiction')
print('26 <NAME> The Da Vinci Code Thriller;Drama')
print('84 <NAME> Jurassic Park (Jurassic Park, #1) SciFi;Thriller;Fantasy')
print('86 <NAME> A Time to Kill Thriller')
print('966 <NAME> Presumed Innocent Thriller;Crime')
print('42 <NAME> Little Women (Little Women, #1) Young-Age;Romance;Drama')
print('44 <NAME> The Notebook (The Notebook, #1) Romance;Drama')
print('54 <NAME> The Hitchhiker\'s Guide to the Galaxy Fantasy;Fiction')
print('134 <NAME> City of Glass (The Mortal Instruments, #3) Kids;Fantasy;Fiction')
print('399 <NAME> The Tales of Beedle the Bard Kids;Fantasy;Fiction')
print('38 <NAME> The Time Traveler\'s Wife Romance;SciFi;Fantasy;Domestic')
print('729 <NAME> Hyperion (Hyperion Cantos, #1) SciFi')
print('807 <NAME> The Circle SciFi')
print('690 <NAME> The Audacity of Hope: Thoughts on Reclaiming the American Dream Biography')
print('617 <NAME> Orange Is the New Black Biography')
print('495 <NAME> A Heartbreaking Work of Staggering Genius Biography')
print('770 <NAME>,<NAME> <NAME> History;Classic')
print('773 <NAME> The Taming of the Shrew Comedy;Classic')
print('829 <NAME> A Room with a View Classic')
print('971 <NAME>, <NAME> The Rainbow Fish Kids')
print('976 <NAME>, Dr. Seuss Dr. Seuss\'s Green Eggs and Ham: For Soprano, Boy Soprano, and Orchestra Kids')
print('627 <NAME>, <NAME> The True Story of the 3 Little Pigs Kids;Fiction')
print('121 <NAME>, <NAME> Lolita Biography;Romance;Comedy')
print('196 <NAME> Fight Club Comedy;Drama')
print('444 <NAME>, <NAME> Winnie-the-Pooh (Winnie-the-Pooh, #1) Kids;Comedy')
print('745 Jenny Lawson Lets Pretend This Never Happened: A Mostly True Memoir Biography;Comedy')
ratings = pd.read_csv('CustomData/FinalRatings.csv')
#taking only the first 100000 ratings
ratings=ratings[1:100000]
user_id = 60000
rating_count = len(ratings['user_id'])+1
print(user_id)
print('\n----------------Welcome User '+str(user_id)+'-------------------')
print('\nPlease Rate 5 books from the above list.')
for x in range(0,5):
print("\n")
bookId=input("BookId:")
rating=input("Rating:")
ratings.loc[rating_count]= [user_id,bookId,rating]
rating_count =rating_count+1
h = hybrid(user_id,ratings)
print("------------------------------Welcome to the Book Recommendation Engine---------------------------\n")
user=raw_input("1. Book Recommendation for New User. \n2. Book Recommendation for Existing User.\n")
if user=='1':
newUser()
elif user=='2':
ratings = pd.read_csv('CustomData/FinalRatings.csv')
ratings=ratings[1:100000]
#taking only the first 100000 ratings
userId=int(raw_input("\nPlease Enter User Id: "))
print('\n----------------Welcome User'+str(userId)+'-------------------')
h = hybrid(userId,ratings)
else:
print("Invalid option\n ")
| [
"sklearn.metrics.pairwise.cosine_similarity",
"pandas.read_csv",
"pandas.DataFrame",
"sklearn.feature_extraction.text.CountVectorizer",
"surprise.evaluate",
"surprise.Dataset.load_from_df",
"numpy.zeros",
"numpy.empty",
"surprise.SVD",
"warnings.simplefilter",
"surprise.Reader",
"pandas.concat... | [((110, 141), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (131, 141), False, 'import warnings\n'), ((10678, 10720), 'pandas.read_csv', 'pd.read_csv', (['"""CustomData/FinalRatings.csv"""'], {}), "('CustomData/FinalRatings.csv')\n", (10689, 10720), True, 'import pandas as pd\n'), ((685, 724), 'pandas.read_csv', 'pd.read_csv', (['"""CustomData/FinalData.csv"""'], {}), "('CustomData/FinalData.csv')\n", (696, 724), True, 'import pandas as pd\n'), ((1264, 1308), 'pandas.read_csv', 'pd.read_csv', (['"""CustomData/AverageRatings.csv"""'], {}), "('CustomData/AverageRatings.csv')\n", (1275, 1308), True, 'import pandas as pd\n'), ((1323, 1365), 'pandas.read_csv', 'pd.read_csv', (['"""CustomData/RatingsCount.csv"""'], {}), "('CustomData/RatingsCount.csv')\n", (1334, 1365), True, 'import pandas as pd\n'), ((2617, 2625), 'surprise.Reader', 'Reader', ([], {}), '()\n', (2623, 2625), False, 'from surprise import Reader, Dataset, SVD, evaluate\n'), ((2697, 2773), 'surprise.Dataset.load_from_df', 'Dataset.load_from_df', (["temp_ratings[['user_id', 'book_id', 'rating']]", 'reader'], {}), "(temp_ratings[['user_id', 'book_id', 'rating']], reader)\n", (2717, 2773), False, 'from surprise import Reader, Dataset, SVD, evaluate\n'), ((2851, 2856), 'surprise.SVD', 'SVD', ([], {}), '()\n', (2854, 2856), False, 'from surprise import Reader, Dataset, SVD, evaluate\n'), ((2865, 2910), 'surprise.evaluate', 'evaluate', (['svd', 'data'], {'measures': "['RMSE', 'MAE']"}), "(svd, data, measures=['RMSE', 'MAE'])\n", (2873, 2910), False, 'from surprise import Reader, Dataset, SVD, evaluate\n'), ((2974, 2979), 'surprise.SVD', 'SVD', ([], {}), '()\n', (2977, 2979), False, 'from surprise import Reader, Dataset, SVD, evaluate\n'), ((4302, 4391), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'analyzer': '"""word"""', 'ngram_range': '(1, 1)', 'min_df': '(0)', 'stop_words': '"""english"""'}), "(analyzer='word', ngram_range=(1, 1), min_df=0, stop_words=\n 'english')\n", (4317, 4391), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n'), ((4496, 4541), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['count_matrix', 'count_matrix'], {}), '(count_matrix, count_matrix)\n', (4513, 4541), False, 'from sklearn.metrics.pairwise import linear_kernel, cosine_similarity\n'), ((5759, 5801), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'content_ratings[0:, 0:]'}), '(data=content_ratings[0:, 0:])\n', (5771, 5801), True, 'import pandas as pd\n'), ((5857, 5905), 'pandas.concat', 'pd.concat', (['frames'], {'axis': '(1)', 'join_axes': '[num.index]'}), '(frames, axis=1, join_axes=[num.index])\n', (5866, 5905), True, 'import pandas as pd\n'), ((11581, 11623), 'pandas.read_csv', 'pd.read_csv', (['"""CustomData/FinalRatings.csv"""'], {}), "('CustomData/FinalRatings.csv')\n", (11592, 11623), True, 'import pandas as pd\n'), ((4604, 4626), 'numpy.zeros', 'np.zeros', (['(60001, 999)'], {}), '((60001, 999))\n', (4612, 4626), True, 'import numpy as np\n'), ((5140, 5158), 'numpy.empty', 'np.empty', (['(999, 1)'], {}), '((999, 1))\n', (5148, 5158), True, 'import numpy as np\n')] |
#!venv/bin/python3
'''
Author: <NAME>
Date: 2021-04-07
Predict next-day rain by training classification models on the target variable RainTomorrow.
This dataset contains about 10 years of daily weather observations from many locations across Australia.
RainTomorrow is the target variable to predict. It means -- did it rain the next day, Yes or No?
This column is Yes if the rain for that day was 1mm or more.
! THE CODE DOES NOT WORK AT THE MOMENT
'''
# %% Imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import urllib.request
from datetime import datetime
import os
# %% Load and prepare dataset
# Download dataset here:
# https://www.kaggle.com/jsphyg/weather-dataset-rattle-package/download
data_path = '../datasets/weatherAUS.csv'
assert os.path.exists(data_path)
df = pd.read_csv(data_path)
df = df[[
'Date', 'Location','MinTemp', 'MaxTemp',
'Humidity9am', 'Humidity3pm',
'Pressure9am', 'Pressure3pm',
'Temp9am', 'Temp3pm',
'RainToday', 'RainTomorrow',
'Rainfall'
]]
# Rough normalization
df.iloc[:,2:] = (df.iloc[:,2:] - df.iloc[:,2:].max()) / df.iloc[:,2:].std()
# Define dictionaries
cities = df.Location.unique()
cities_to_num = {v:k for k,v in enumerate(cities)}
YesNo_to_num = {
'No': 0,
'Yes': 1
}
df.Location = df.Location.map(cities_to_num)
df.RainToday = df.RainToday.map(YesNo_to_num)
df.RainTomorrow = df.RainTomorrow.map(YesNo_to_num)
data = []
for city in range(len(cities)):
to_add = df[df.Location == city].to_numpy()[:,1:]
# Do not add cities with low number of training examples
if to_add.shape[0] >= 3000:
data.append(to_add[:3000,:])
data = np.array(data).astype(np.float)
# Define train and test sets
x_train = data[:,:1998,:]
y_train = data[:,1999,:]
x_test = data[:,2000:2998,:]
y_test = data[:,2999,:]
# %% Define NN
model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(
units=12, # output dim is full input
activation='linear',
recurrent_activation='sigmoid',
input_shape=(3000,12)
)
])
#print(model.output_shape)
model.summary()
# %% Compile, train and evaluate model
loss_fn = tf.keras.losses.CategoricalCrossentropy()
model.compile(
optimizer='adam',
loss=loss_fn,
metrics=['accuracy']
)
''' Early stopping to prevent overfitting '''
def get_callbacks():
return [
tf.keras.callbacks.EarlyStopping(monitor='loss', patience=150)
]
''' Train the model '''
print("Training started at", datetime.now())
history = model.fit(x_train,
y_train,
epochs=10,
callbacks=get_callbacks()
)
print("Training ended at", datetime.now())
model.save_weights( #save trained weights
filepath='../saved_models/3_LSTM/LSTM',
overwrite=True,
)
''' Visualize performances over the epochs '''
ax = sns.lineplot(
x=history.epoch,
y=history.history['accuracy'],
color='green',
label='accuracy'
)
ax2 = ax.twinx()
sns.lineplot(
x=history.epoch,
y=history.history['loss'],
label='loss',
color='red',
ax=ax2
)
model.evaluate(x_test, y_test, verbose=2)
# %%
| [
"os.path.exists",
"pandas.read_csv",
"seaborn.lineplot",
"datetime.datetime.now",
"numpy.array",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.losses.CategoricalCrossentropy"
] | [((870, 895), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (884, 895), False, 'import os\n'), ((901, 923), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {}), '(data_path)\n', (912, 923), True, 'import pandas as pd\n'), ((2248, 2289), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {}), '()\n', (2287, 2289), True, 'import tensorflow as tf\n'), ((2910, 3007), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': 'history.epoch', 'y': "history.history['accuracy']", 'color': '"""green"""', 'label': '"""accuracy"""'}), "(x=history.epoch, y=history.history['accuracy'], color='green',\n label='accuracy')\n", (2922, 3007), True, 'import seaborn as sns\n'), ((3039, 3134), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': 'history.epoch', 'y': "history.history['loss']", 'label': '"""loss"""', 'color': '"""red"""', 'ax': 'ax2'}), "(x=history.epoch, y=history.history['loss'], label='loss',\n color='red', ax=ax2)\n", (3051, 3134), True, 'import seaborn as sns\n'), ((2580, 2594), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2592, 2594), False, 'from datetime import datetime\n'), ((2730, 2744), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2742, 2744), False, 'from datetime import datetime\n'), ((1751, 1765), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1759, 1765), True, 'import numpy as np\n'), ((1976, 2088), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', ([], {'units': '(12)', 'activation': '"""linear"""', 'recurrent_activation': '"""sigmoid"""', 'input_shape': '(3000, 12)'}), "(units=12, activation='linear', recurrent_activation=\n 'sigmoid', input_shape=(3000, 12))\n", (1996, 2088), True, 'import tensorflow as tf\n'), ((2459, 2521), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""loss"""', 'patience': '(150)'}), "(monitor='loss', patience=150)\n", (2491, 2521), True, 'import tensorflow as tf\n')] |
import numpy as np
from numpy.linalg import matrix_power
from numpy.linalg import multi_dot
def markovMP(A,B,C,D,t):
H = np.zeros(shape=(len(t),len(t)))
for row in range(len(t)):
for col in range(len(t)):
if row == col :
H[row][col] = np.dot(C,B)
for k in range(len(t)):
if row - col == k:
H[row][col] = multi_dot([C,matrix_power(A, k),B])
return H | [
"numpy.dot",
"numpy.linalg.matrix_power"
] | [((286, 298), 'numpy.dot', 'np.dot', (['C', 'B'], {}), '(C, B)\n', (292, 298), True, 'import numpy as np\n'), ((416, 434), 'numpy.linalg.matrix_power', 'matrix_power', (['A', 'k'], {}), '(A, k)\n', (428, 434), False, 'from numpy.linalg import matrix_power\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : recording.py
# Author : <NAME>, <NAME>
# Email : <EMAIL>, <EMAIL>
# Date : 03.08.2019
# Last Modified Date: 27.08.2019
# Last Modified By : Chi Han, Jiayuan Mao
#
# This file is part of the VCML codebase
# Distributed under MIT license
import os
import numpy as np
import torch
from abc import abstractmethod
from utility.common import make_parent_dir
class History:
def __init__(self, name, capacity):
self.capacity = capacity
self.log = []
self.n = 0
self.log_interval = 1
self.name = name
def record(self, value):
self.n += 1
if self.n % self.log_interval == 0:
self.log.append(value)
if len(self.log) == self.capacity:
self.log = self.log[::2]
self.log_interval *= 2
def plot(self, ax, label):
axis_time = np.arange(len(self.log))
axis_time *= self.log_interval
axis_value = np.array(self.log)
ax.plot(axis_time, axis_value, label=label)
def state_dict(self):
ckpt = {
'n': self.n,
'log': self.log,
'log_interval': self.log_interval,
'name': self.name,
'capacity': self.capacity,
}
return ckpt
def load_state_dict(self, ckpt):
self.__dict__.update(ckpt)
class MatrixHistory:
def __init__(self, name, dims, capacity):
self.name = name
self.capacity = capacity
self.log = np.zeros(dims + (capacity,))
self.log_indexes = np.zeros(dims[0], dtype=int)
def record(self, row_ind, value):
self.log[row_ind, :, self.log_indexes[row_ind] % self.capacity] = value
self.log_indexes[row_ind] += 1
def reset(self):
self.log *= 0
self.log_indexes *= 0
def state_dict(self):
ckpt = {
'name': self.name,
'capacity': self.capacity,
'log': self.log,
'log_indexes': self.log_indexes,
}
return ckpt
def load_state_dict(self, ckpt):
self.__dict__.update(ckpt)
class AverageRecording:
def __init__(self, name, history_capacity):
self.name = name
self.current = 0
self.history = History(
f'{name}_history', history_capacity)
self.value_history = History(
f'{name}_valueHistory', history_capacity)
@abstractmethod
def reset(self):
pass
def record(self, new_value):
assert not isinstance(new_value, torch.Tensor), 'not converted'
new_value = new_value
self.history.record(new_value)
self.update_average(new_value)
self.value_history.record(self.value)
def __str__(self):
return '{0}: {1:.3f}'.format(self.name, self.value)
def state_dict(self):
return {
'name': self.name,
'current': self.current,
'history': self.history.state_dict(),
'value_history': self.value_history.state_dict(),
}
def load_state_dict(self, ckpt):
self.name = ckpt['name']
self.current = ckpt['current']
self.history.load_state_dict(ckpt['history'])
self.value_history.load_state_dict(ckpt['value_history'])
@property
@abstractmethod
def value(self):
pass
@abstractmethod
def update_average(self):
pass
def plot(self, ax, label):
self.value_history.plot(ax, label)
ax.set_xlabel('time')
ax.set_ylabel('value')
ax.set_title(self.name)
class ExponentialAverage(AverageRecording):
def __init__(self, name, capacity, momentum=0.99):
super().__init__(name, capacity)
self.momentum = momentum
self.weight = 0
def update_average(self, new_value):
y = self.momentum
self.current = y * self.current + (1 - y) * new_value
self.weight = y * self.weight + (1 - y) * 1
@property
def value(self):
if self.weight == 0:
return 0
else:
return self.current / self.weight
def reset(self):
self.weight = 0
self.current = 0
def state_dict(self):
ckpt = super().state_dict()
ckpt.update({
'weight': self.weight
})
return ckpt
def load_state_dict(self, ckpt):
super().load_state_dict(ckpt)
self.weight = ckpt['weight']
class SimpleAverage(AverageRecording):
def __init__(self, name, capacity):
super().__init__(name, capacity)
self.count = 0
def update_average(self, new_value):
self.current += new_value
self.count += 1
@property
def value(self):
if self.count == 0:
return 0
else:
return self.current / self.count
def reset(self):
self.count = 0
self.current = 0
def state_dict(self):
ckpt = super().state_dict()
ckpt.update({
'count': self.count
})
return ckpt
def load_state_dict(self, ckpt):
super().load_state_dict(ckpt)
self.count = ckpt['count']
class CoeffMatrixRecording:
def __init__(self, name, dim, history_capacity):
self.name = name
self.dim = dim
self.history_capacity = history_capacity
self.history = MatrixHistory(
name,
(dim, dim),
history_capacity,
)
def record(self, row_ind, value):
if value.ndim == 2:
for i in range(value.shape[0]):
self.history.record(row_ind, value[i])
else:
self.history.record(row_ind, value)
def reset(self):
self.history.reset()
def calculate_coeff(self):
coeff_matrix = np.zeros((self.dim, self.dim))
log_mat = self.history.log
for i in range(self.dim):
x = log_mat[i, i]
if x.var() > 0:
for j in range(self.dim):
y = log_mat[i, j]
coeff = np.polyfit(x, y, 1)[0]
coeff_matrix[i, j] = coeff
return coeff_matrix
def state_dict(self):
ckpt = {
'name': self.name,
'dim': self.dim,
'history_capacity': self.history_capacity,
'history': self.history.state_dict(),
}
return ckpt
def load_state_dict(self, ckpt):
self.name = ckpt['name']
self.dim = ckpt['dim']
self.history_capacity = ckpt['history_capacity']
self.history.load_state_dict(ckpt['history'])
class AverageGroup:
def __init__(self, groupname, mode, history, path):
self.groupname = groupname
self.mode = mode
self.history = history
self.path = path
if mode == 'exponential':
self.recording_cls = ExponentialAverage
elif mode == 'simple':
self.recording_cls = SimpleAverage
self.group = {}
def record(self, value_dict):
for key in value_dict.keys():
if key not in self.group:
self.setup_new(key)
for key, recording in self.group.items():
if key in value_dict and value_dict[key] is not None:
recording.record(value_dict[key])
else:
recording.record(recording.value)
def setup_new(self, key):
new_name = f'{self.groupname}_{key}'
self.group[key] = self.recording_cls(new_name, self.history)
def reset(self):
for item in self.group.values():
item.reset()
def state_dict(self):
ckpt = {
'groupname': self.groupname,
'mode': self.mode,
'group': {key: recording.state_dict()
for key, recording in self.group.items()}
}
return ckpt
def load_state_dict(self, ckpt):
self.groupname = ckpt['groupname']
self.mode = ckpt['mode']
for key, recording in ckpt['group'].items():
self.setup_new(key)
self.group[key].load_state_dict(recording)
def __str__(self):
return ' '.join([
'{0}: {1:.2f}'.format(key, recording.value)
for key, recording in self.group.items()
])
def __len__(self):
return len(self.group)
@property
def values(self):
return {
key: recording.value
for key, recording in self.group.items()
}
@property
def visualize_filename(self):
return os.path.join(
self.path,
'plots',
f'{self.groupname}.jpg'
)
def plot_on_axes(self, key_dict, ax_array, label):
for key, recording in self.group.items():
if key not in key_dict:
key_dict[key] = len(key_dict)
ind = key_dict[key]
recording.plot(ax_array[ind], label)
def savefig(self, fig):
fig.suptitle(self.groupname)
fig.tight_layout()
make_parent_dir(self.visualize_filename)
fig.savefig(self.visualize_filename)
| [
"numpy.polyfit",
"os.path.join",
"numpy.array",
"numpy.zeros",
"utility.common.make_parent_dir"
] | [((1040, 1058), 'numpy.array', 'np.array', (['self.log'], {}), '(self.log)\n', (1048, 1058), True, 'import numpy as np\n'), ((1575, 1603), 'numpy.zeros', 'np.zeros', (['(dims + (capacity,))'], {}), '(dims + (capacity,))\n', (1583, 1603), True, 'import numpy as np\n'), ((1631, 1659), 'numpy.zeros', 'np.zeros', (['dims[0]'], {'dtype': 'int'}), '(dims[0], dtype=int)\n', (1639, 1659), True, 'import numpy as np\n'), ((5859, 5889), 'numpy.zeros', 'np.zeros', (['(self.dim, self.dim)'], {}), '((self.dim, self.dim))\n', (5867, 5889), True, 'import numpy as np\n'), ((8628, 8685), 'os.path.join', 'os.path.join', (['self.path', '"""plots"""', 'f"""{self.groupname}.jpg"""'], {}), "(self.path, 'plots', f'{self.groupname}.jpg')\n", (8640, 8685), False, 'import os\n'), ((9102, 9142), 'utility.common.make_parent_dir', 'make_parent_dir', (['self.visualize_filename'], {}), '(self.visualize_filename)\n', (9117, 9142), False, 'from utility.common import make_parent_dir\n'), ((6126, 6145), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (6136, 6145), True, 'import numpy as np\n')] |
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class PusherEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, '3link_gripper_push.xml', 2)
def _step(self, a):
vec_1 = self.get_body_com("object")-self.get_body_com("distal_4")
vec_2 = self.get_body_com("object")-self.get_body_com("goal")
reward_near = - np.linalg.norm(vec_1)
reward_dist = - np.linalg.norm(vec_2)
reward_ctrl = - np.square(a).sum()
#the coefficients in the following line are ad hoc
reward = reward_dist + 0.1*reward_ctrl + 0.5*reward_near
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
def viewer_setup(self):
##New camera positions
self.viewer.cam.trackbodyid=-1
self.viewer.cam.distance=3.0
self.viewer.cam.elevation=-90
self.viewer.cam.azimuth=90
self.viewer.cam.lookat[0]=.17 #2
self.viewer.cam.lookat[1]-=0
##Old camera positions
#self.viewer.cam.trackbodyid=0
#self.viewer.cam.distance = 4.0
def reset_model(self):
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
while True:
self.object = np.concatenate([self.np_random.uniform(low=-1, high=1, size=1),
self.np_random.uniform(low=0.2, high=1, size=1)])
#self.goal = self.np_random.uniform(low=-1, high=1, size=2)
self.goal = np.asarray([0.0, 1.0])
if np.linalg.norm(self.object) > 0.7 and np.linalg.norm(self.goal) > 0.7:
if np.linalg.norm(self.object-self.goal) > 0.5: break
qpos[-4:-2] = self.object
qpos[-2:] = self.goal
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
qvel[-4:] = 0
self.set_state(qpos, qvel)
#import IPython; IPython.embed()
return self._get_obs()
def _get_obs(self):
return np.concatenate([
self.model.data.qpos.flat[-4:],
self.model.data.qvel.flat[:-4],
self.get_body_com("distal_4"),
self.get_body_com("object"),
self.get_body_com("goal"),
])
# theta = self.model.data.qpos.flat[:-4]
# return np.concatenate([
# np.sin(theta),
# np.cos(theta),
# self.model.data.qpos.flat[-4:],
# self.model.data.qvel.flat,
# self.get_body_com("object"),
# self.get_body_com("goal"),
# self.get_body_com("distal_4"),
# ])
def get_eval(self):
#print('arm position ',self.get_body_com("object"))
#print('goal position',self.get_body_com("goal"))
self.arm_pos=self.get_body_com("distal_4")
self.goal_pos=self.get_body_com("goal")
self.eval=np.linalg.norm(self.arm_pos-self.goal_pos)
return self.eval
| [
"numpy.asarray",
"gym.envs.mujoco.mujoco_env.MujocoEnv.__init__",
"numpy.square",
"gym.utils.EzPickle.__init__",
"numpy.linalg.norm"
] | [((168, 197), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (191, 197), False, 'from gym import utils\n'), ((206, 270), 'gym.envs.mujoco.mujoco_env.MujocoEnv.__init__', 'mujoco_env.MujocoEnv.__init__', (['self', '"""3link_gripper_push.xml"""', '(2)'], {}), "(self, '3link_gripper_push.xml', 2)\n", (235, 270), False, 'from gym.envs.mujoco import mujoco_env\n'), ((3083, 3127), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.arm_pos - self.goal_pos)'], {}), '(self.arm_pos - self.goal_pos)\n', (3097, 3127), True, 'import numpy as np\n'), ((464, 485), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_1'], {}), '(vec_1)\n', (478, 485), True, 'import numpy as np\n'), ((510, 531), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_2'], {}), '(vec_2)\n', (524, 531), True, 'import numpy as np\n'), ((1705, 1727), 'numpy.asarray', 'np.asarray', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (1715, 1727), True, 'import numpy as np\n'), ((556, 568), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (565, 568), True, 'import numpy as np\n'), ((1743, 1770), 'numpy.linalg.norm', 'np.linalg.norm', (['self.object'], {}), '(self.object)\n', (1757, 1770), True, 'import numpy as np\n'), ((1781, 1806), 'numpy.linalg.norm', 'np.linalg.norm', (['self.goal'], {}), '(self.goal)\n', (1795, 1806), True, 'import numpy as np\n'), ((1833, 1872), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.object - self.goal)'], {}), '(self.object - self.goal)\n', (1847, 1872), True, 'import numpy as np\n')] |
import numpy as np
from scipy import sparse
from scipy import linalg
'''
function to construct the matrix operators for the eigenproblem
'''
def PWEM2D_TE(Kx, Ky, E_r):
'''
currently, we assume that mu_r is always homogeneous
:param Kx:
:param Ky:
:param E_r:
:return:
'''
A = Kx.todense()**2 + Ky.todense()**2; #can do this because Kx and Ky are diagonal
#A = A.todense(); #this can be bad, but for now...
B = E_r;
eigenvalues, eigenvectors = linalg.eig(A,B);
#get eigenvalues of this
return eigenvalues, eigenvectors, A;
def PWEM2D_TM(Kx, Ky, E_r):
'''
currently, we assume that mu_r is always homogeneous
:param Kx:
:param Ky:
:param E_r: fourier decomp conv matrix of eps_r
:return:
'''
#A = Kx.todense() ** 2 + Ky.todense() ** 2
Er_inv = np.linalg.inv(E_r);
A = Kx@Er_inv@Kx +Ky@Er_inv@Ky;
eigenvalues, eigenvectors = np.linalg.eig(A);
#get eigenvalues of this
return eigenvalues, eigenvectors,A;
| [
"scipy.linalg.eig",
"numpy.linalg.inv",
"numpy.linalg.eig"
] | [((491, 507), 'scipy.linalg.eig', 'linalg.eig', (['A', 'B'], {}), '(A, B)\n', (501, 507), False, 'from scipy import linalg\n'), ((837, 855), 'numpy.linalg.inv', 'np.linalg.inv', (['E_r'], {}), '(E_r)\n', (850, 855), True, 'import numpy as np\n'), ((925, 941), 'numpy.linalg.eig', 'np.linalg.eig', (['A'], {}), '(A)\n', (938, 941), True, 'import numpy as np\n')] |
"""
Работа с моделью и данными по электроэнергетике ОЭС Средняя Волга
"""
import pickle
import pandas as pd
import numpy as np
class Energy:
def __init__(self, filename='model_data.pkl'):
"""Загружаем заранее предобработанные данные и предобученную модель"""
with open(filename, 'rb') as handle:
self.data = pickle.load(handle)
self.df = data['data']
self.model = data['model']
def get_data(self, date='1979-01-01'):
"""
Датафрейм, начиная с указанной даты
"""
return np.expm1(self.df[self.df.DATE >= date][["USE_FACT"]])
def get_predict(self, date):
"""
Датафрейм, содержащий прогнозы, начиная с указанной даты, на последующие 5 дней
"""
dataframe = data['data']
X_data = self.df[self.df.DATE >= date] \
.drop(columns=["DATE", "USE_PRED1", "USE_PRED2", "USE_PRED3", "USE_PRED4", "USE_PRED5"])
return np.expm1(data['model'].predict(X_data))
| [
"pickle.load",
"numpy.expm1"
] | [((581, 634), 'numpy.expm1', 'np.expm1', (["self.df[self.df.DATE >= date][['USE_FACT']]"], {}), "(self.df[self.df.DATE >= date][['USE_FACT']])\n", (589, 634), True, 'import numpy as np\n'), ((351, 370), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (362, 370), False, 'import pickle\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.