code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from typing import Tuple, Optional
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
def beta_a_b_from_mode_concentration(mode: float, concentration: float) -> Tuple[float, float]:
"""
Computes the shape parameters, alpha and beta, of a beta distribution given its mode and concentration.
Parameters
----------
mode : float
The mode of the distribution
concentration : float
The concentration of the distribution
Returns
-------
out: tuple of floats
alpha and beta parameters
"""
return mode*(concentration - 2.) + 1., (1-mode)*(concentration - 2.) + 1.
def beta_mode_concentration_from_a_b(a: float, b: float) -> Tuple[float, float]:
"""
Computes the mode and concentration of a beta distribution from its shape parameters, alpha and beta.
Parameters
----------
a : float
alpha parameter
b : float
beta parameter
Returns
-------
out: tuple of floats
Mode and concentration
"""
assert a > 1 and b > 1
return (a - 1.) / (a + b - 2.), a + b
def plot_beta(
a: Optional[float] = None, b: Optional[float] = None, mode: Optional[float] = None,
concentration: Optional[float] = None) -> None:
"""
Plots a beta distribution.
Parameters
----------
a : float, optional
alpha parameter
b : float, optional
beta parameter
mode : float, optional
Mode
concentration : float, optional
Concentration
"""
if mode and concentration:
a, b = beta_a_b_from_mode_concentration(mode, concentration)
elif a and b:
# it's fine
pass
else:
raise Exception('either `a` and `b` or `mode` and `concentration` must be passed')
x = np.linspace(scipy.stats.beta.ppf(0.01, a, b), scipy.stats.beta.ppf(0.99, a, b), 100)
plt.figure()
plt.plot(x, scipy.stats.beta.pdf(x, a, b), 'r-', lw=5, alpha=0.6, label='beta pdf')
def gamma_shape_rate_from_mode_sd(mode: float, sd: float) -> Tuple[float, float]:
"""
Returns the shape and rate parameters of a gamma distribution from its mode and standard deviation.
Parameters
----------
mode : float
Mode
sd : float
Standard deviation
Returns
-------
out: tuple of floats
Shape and rate
"""
r = (mode + np.sqrt(mode**2 + 4*sd**2)) / (2*sd**2)
s = 1 + mode * r
return s, r
def plot_gamma(mode: float, sd: float) -> None:
"""
Plots a gamma distribution.
Parameters
----------
mode : float
Mode
sd : float
Standard deviation
"""
shape, rate = gamma_shape_rate_from_mode_sd(mode, sd)
# the scale is the inverse of the rate
scale = 1./rate
x = np.linspace(scipy.stats.gamma.ppf(0.01, a=shape, scale=scale), scipy.stats.gamma.ppf(0.99, a=shape, scale=scale))
plt.figure()
plt.plot(x, scipy.stats.gamma.pdf(x, a=shape, scale=scale), 'r-', lw=5, alpha=0.6, label='gamma pdf')
def plot_half_cauchy(scale: float) -> None:
"""
Plot a half-cauchy distribution.
Parameters
----------
scale : float
Scale
"""
x = np.linspace(scipy.stats.halfcauchy.ppf(0.01, scale=scale), scipy.stats.halfcauchy.ppf(0.99, scale=scale), 100)
plt.figure()
plt.plot(x, scipy.stats.halfcauchy.pdf(x, scale=scale), 'r-', lw=5, alpha=0.6, label='Half-Cauchy pdf')
| [
"matplotlib.pyplot.figure",
"numpy.sqrt"
] | [((1699, 1711), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1709, 1711), True, 'import matplotlib.pyplot as plt\n'), ((2621, 2633), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2631, 2633), True, 'import matplotlib.pyplot as plt\n'), ((2995, 3007), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3005, 3007), True, 'import matplotlib.pyplot as plt\n'), ((2144, 2176), 'numpy.sqrt', 'np.sqrt', (['(mode ** 2 + 4 * sd ** 2)'], {}), '(mode ** 2 + 4 * sd ** 2)\n', (2151, 2176), True, 'import numpy as np\n')] |
import os
import os.path as op
import sys
import json
import string
import numpy as np
import PropertySI as ps
#import tk
thispath = op.abspath(op.dirname(__file__))
datapath = op.join(thispath, 'PropData')
suffix = '.json'
pun = string.punctuation
current = ps.prop_names()
#Takes the dict and makes it key value pairs
def make_jobj(newData):
dataTuple = tuple(open(newData))
dats = [d.strip(pun) for d in dataTuple[0].split() if '[' not in d]
vals = np.genfromtxt(newData, skip_header=1)
Td = vals[:,0]
dc = dict()
for i, nm in enumerate(dats[1:]):
dc[nm] = np.vstack((Td, vals[:,i+1])).tolist()
return dc
class GUI:
def __init__(self, app):
self.app = app
app.title("THANK YOU FOR CONTRIBUTING TO SOLIDPROP!")
#WIP
# jLoad = json.load(db)
#
# #Button, entry boxes, radio button, file chooser.
#
# #if loading a file
# db = op.join(thispath, 'solidProps.json')
# gwon = np.genfromtxt(chosenFile, delim_whitespace=True)
# db[mats] = gwon #Also need to split by key. Maybe just read in normal and convert to numpy
# jLoad.dumps(db)
if __name__ == "__main__":
# if len(sys.argv) < 2:
#
# master = tk.Tk()
# GUI(master)
# master.mainloop()
#
# else:
tDatapath = op.join(datapath, 'temp')
for file in os.listdir(tDatapath):
if file.lower().endswith(".txt") and file.lower() not in current:
f = file.split('.')[0].upper()
fnow = op.join(tDatapath, file)
propFile = op.join(datapath, f.upper() + suffix)
boy = make_jobj(fnow)
with open(propFile, 'w') as dr:
json.dump(boy, dr)
| [
"os.listdir",
"os.path.join",
"os.path.dirname",
"PropertySI.prop_names",
"numpy.vstack",
"numpy.genfromtxt",
"json.dump"
] | [((178, 207), 'os.path.join', 'op.join', (['thispath', '"""PropData"""'], {}), "(thispath, 'PropData')\n", (185, 207), True, 'import os.path as op\n'), ((260, 275), 'PropertySI.prop_names', 'ps.prop_names', ([], {}), '()\n', (273, 275), True, 'import PropertySI as ps\n'), ((145, 165), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (155, 165), True, 'import os.path as op\n'), ((467, 504), 'numpy.genfromtxt', 'np.genfromtxt', (['newData'], {'skip_header': '(1)'}), '(newData, skip_header=1)\n', (480, 504), True, 'import numpy as np\n'), ((1387, 1412), 'os.path.join', 'op.join', (['datapath', '"""temp"""'], {}), "(datapath, 'temp')\n", (1394, 1412), True, 'import os.path as op\n'), ((1433, 1454), 'os.listdir', 'os.listdir', (['tDatapath'], {}), '(tDatapath)\n', (1443, 1454), False, 'import os\n'), ((1623, 1647), 'os.path.join', 'op.join', (['tDatapath', 'file'], {}), '(tDatapath, file)\n', (1630, 1647), True, 'import os.path as op\n'), ((595, 626), 'numpy.vstack', 'np.vstack', (['(Td, vals[:, i + 1])'], {}), '((Td, vals[:, i + 1]))\n', (604, 626), True, 'import numpy as np\n'), ((1819, 1837), 'json.dump', 'json.dump', (['boy', 'dr'], {}), '(boy, dr)\n', (1828, 1837), False, 'import json\n')] |
import os
import re
import glob
import numpy as np
import matplotlib.pylab as plt
import matplotlib
from scipy.spatial import ConvexHull
from scipy.interpolate import interp1d
from itertools import chain, count
from collections import defaultdict
from os import makedirs
from os.path import isdir, isfile, join
from plot_util import *
from plot_other import *
# ------------------------------------------------------------------------------
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
method_labels_map = {
'FH': 'FH',
'FH_Minus': 'FH$^-$',
'NH': 'NH',
'FH_wo_S': 'FH-wo-S',
'FH_Minus_wo_S': 'FH$^{-}$-wo-S',
'NH_wo_S': 'NH-wo-S',
'EH': 'EH',
'Orig_EH': 'EH',
'BH': 'BH',
'Orig_BH': 'BH',
'MH': 'MH',
'Orig_MH': 'MH',
'Random_Scan': 'Random-Scan',
'Sorted_Scan': 'Sorted-Scan',
'Linear': 'Linear-Scan'
}
dataset_labels_map = {
'Yelp': 'Yelp',
'Music': 'Music-100',
'GloVe100': 'GloVe',
'Tiny1M': 'Tiny-1M',
'Msong': 'Msong',
'MovieLens150': 'MovieLens',
'Netflix300': 'Netflix',
'Yahoo300': 'Yahoo',
'Mnist': 'Mnist',
'Sift': 'Sift',
'Gaussian': 'Gaussian',
'Gist': 'Gist',
}
# datasets = ['Yelp', 'GloVe100']
datasets = ['Yelp', 'Music', 'GloVe100', 'Tiny1M', 'Msong']
dataset_labels = [dataset_labels_map[dataset] for dataset in datasets]
method_colors = ['red', 'blue', 'green', 'purple', 'deepskyblue', 'darkorange',
'olive', 'deeppink', 'dodgerblue', 'dimgray']
method_markers = ['o', '^', 's', 'd', '*', 'p', 'x', 'v', 'D', '>']
# ------------------------------------------------------------------------------
def calc_width_and_height(n_datasets, n_rows):
'''
calc the width and height of figure
:params n_datasets: number of dataset (integer)
:params n_rows: number of rows (integer)
:returns: width and height of figure
'''
fig_width = 0.55 + 3.333 * n_datasets
fig_height = 0.80 + 2.5 * n_rows
return fig_width, fig_height
# ------------------------------------------------------------------------------
def get_filename(input_folder, dataset_name, method_name):
'''
get the file prefix 'dataset_method'
:params input_folder: input folder (string)
:params dataset_name: name of dataset (string)
:params method_name: name of method (string)
:returns: file prefix (string)
'''
name = '%s%s_%s.out' % (input_folder, dataset_name, method_name)
return name
# ------------------------------------------------------------------------------
def parse_res(filename, chosen_top_k):
'''
parse result and get info such as ratio, qtime, recall, index_size,
chosen_k, and the setting of different methods
BH: m=2, l=8, b=0.90
Indexing Time: 2.708386 Seconds
Estimated Memory: 347.581116 MB
cand=10000
1 5.948251 2.960960 0.000000 0.000000 0.844941
5 4.475743 2.954690 0.400000 0.000200 0.845279
10 3.891794 2.953910 0.900000 0.000899 0.845703
20 3.289422 2.963460 0.950000 0.001896 0.846547
50 2.642880 2.985980 0.900000 0.004478 0.849082
100 2.244649 3.012860 0.800000 0.007922 0.853307
cand=50000
1 3.905541 14.901140 6.000000 0.000120 4.222926
5 2.863510 14.905370 4.800000 0.000480 4.223249
10 2.626913 14.910181 5.300000 0.001061 4.223649
20 2.392440 14.913270 4.850000 0.001941 4.224458
50 2.081206 14.931760 4.560000 0.004558 4.227065
100 1.852284 14.964050 4.500000 0.008987 4.231267
'''
setting_pattern = re.compile(r'\S+\s+.*=.*')
setting_m = re.compile(r'.*(m)=(\d+).*')
setting_l = re.compile(r'.*(l)=(\d+).*')
setting_M = re.compile(r'.*(M)=(\d+).*')
setting_s = re.compile(r'.*(s)=(\d+).*')
setting_b = re.compile(r'.*(b)=(\d+\.\d+).*')
param_settings = [setting_m, setting_l, setting_M, setting_s, setting_b]
index_time_pattern = re.compile(r'Indexing Time: (\d+\.\d+).*')
memory_usage_pattern = re.compile(r'Estimated Memory: (\d+\.\d+).*')
candidate_pattern = re.compile(r'.*cand=(\d+).*')
records_pattern = re.compile(r'(\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)\s*(\d+\.\d+)')
params = {}
with open(filename, 'r') as f:
for line in f:
res = setting_pattern.match(line)
if res:
for param_setting in param_settings:
tmp_res = param_setting.match(line)
if tmp_res is not None:
# print(tmp_res.groups())
params[tmp_res.group(1)] = tmp_res.group(2)
# print("setting=", line)
res = index_time_pattern.match(line)
if res:
chosen_k = float(res.group(1))
# print('chosen_k=', chosen_k)
res = memory_usage_pattern.match(line)
if res:
memory_usage = float(res.group(1))
# print('memory_usage=', memory_usage)
res = candidate_pattern.match(line)
if res:
cand = int(res.group(1))
# print('cand=', cand)
res = records_pattern.match(line)
if res:
top_k = int(res.group(1))
ratio = float(res.group(2))
qtime = float(res.group(3))
recall = float(res.group(4))
precision = float(res.group(5))
fraction = float(res.group(6))
# print(top_k, ratio, qtime, recall, precision, fraction)
if top_k == chosen_top_k:
yield ((cand, params), (top_k, chosen_k, memory_usage,
ratio, qtime, recall, precision, fraction))
# ------------------------------------------------------------------------------
def getindexingtime(res):
return res[1]
def getindexsize(res):
return res[2]
def getratio(res):
return res[3]
def gettime(res):
return res[4]
def getrecall(res):
return res[5]
def getprecision(res):
return res[6]
def getfraction(res):
return res[7]
def get_cand(res):
return int(res[0][0])
def get_l(res):
return int(res[0][1]['l'])
def get_m(res):
return int(res[0][1]['m'])
def get_s(res):
return int(res[0][1]['s'])
def get_time(res):
return float(res[1][4])
def get_recall(res):
return float(res[1][5])
def get_precision(res):
return float(res[1][6])
def get_fraction(res):
return float(res[1][7])
# ------------------------------------------------------------------------------
def lower_bound_curve(xys):
'''
get the time-recall curve by convex hull and interpolation
:params xys: 2-dim array (np.array)
:returns: time-recall curve with interpolation
'''
# add noise and conduct convex hull to find the curve
eps = np.random.normal(size=xys.shape) * 1e-6
xys += eps
# print(xys)
hull = ConvexHull(xys)
hull_vs = xys[hull.vertices]
# hull_vs = np.array(sorted(hull_vs, key=lambda x:x[1]))
# print("hull_vs: ", hull_vs)
# find max pair (maxv0) and min pairs (v1s) from the convex hull
v1s = []
maxv0 = [-1, -1]
for v0, v1 in zip(hull_vs, chain(hull_vs[1:], hull_vs[:1])):
# print(v0, v1)
if v0[1] > v1[1] and v0[0] > v1[0]:
v1s = np.append(v1s, v1, axis=-1)
if v0[1] > maxv0[1]:
maxv0 = v0
# print(v1s, maxv0)
# interpolation: vs[:, 1] -> recall (x), vs[:, 0] -> time (y)
vs = np.array(np.append(maxv0, v1s)).reshape(-1, 2) # 2-dim array
f = interp1d(vs[:, 1], vs[:, 0])
minx = np.min(vs[:, 1]) + 1e-6
maxx = np.max(vs[:, 1]) - 1e-6
x = np.arange(minx, maxx, 1.0) # the interval of interpolation: 1.0
y = list(map(f, x)) # get time (y) by interpolation
return x, y
# ------------------------------------------------------------------------------
def upper_bound_curve(xys, interval, is_sorted):
'''
get the time-ratio and precision-recall curves by convex hull and interpolation
:params xys: 2-dim array (np.array)
:params interval: the interval of interpolation (float)
:params is_sorted: sort the convex hull or not (boolean)
:returns: curve with interpolation
'''
# add noise and conduct convex hull to find the curve
eps = np.random.normal(size=xys.shape) * 1e-6
xys += eps
# print(xys)
xs = xys[:, 0]
if len(xs) > 2 and xs[-1] > 0:
hull = ConvexHull(xys)
hull_vs = xys[hull.vertices]
if is_sorted:
hull_vs = np.array(sorted(hull_vs, key=lambda x:x[1]))
print("hull_vs: ", hull_vs)
# find max pair (maxv0) and min pairs (v1s) from the convex hull
v1s = []
maxv0 = [-1, -1]
for v0, v1 in zip(hull_vs, chain(hull_vs[1:], hull_vs[:1])):
# print(v0, v1)
if v0[1] > v1[1] and v0[0] < v1[0]:
v1s = np.append(v1s, v1, axis=-1)
if v0[1] > maxv0[1]:
maxv0 = v0
print(v1s, maxv0)
# interpolation: vs[:, 1] -> recall (x), vs[:, 0] -> time (y)
vs = np.array(np.append(maxv0, v1s)).reshape(-1, 2) # 2-dim array
if len(vs) >= 2:
f = interp1d(vs[:, 1], vs[:, 0])
minx = np.min(vs[:, 1]) + 1e-6
maxx = np.max(vs[:, 1]) - 1e-6
x = np.arange(minx, maxx, interval)
y = list(map(f, x)) # get time (y) by interpolation
return x, y
else:
return xys[:, 0], xys[:, 1]
else:
return xys[:, 0], xys[:, 1]
# ------------------------------------------------------------------------------
def lower_bound_curve2(xys):
'''
get the querytime-indexsize and querytime-indextime curve by convex hull
:params xys: 2-dim array (np.array)
:returns: querytime-indexsize and querytime-indextime curve
'''
# add noise and conduct convex hull to find the curve
eps = np.random.normal(size=xys.shape) * 1e-6
xys += eps
# print(xys)
xs = xys[:, 0]
if len(xs) > 2 and xs[-1] > 0:
# conduct convex hull to find the curve
hull = ConvexHull(xys)
hull_vs = xys[hull.vertices]
# print("hull_vs: ", hull_vs)
ret_vs = []
for v0, v1, v2 in zip(chain(hull_vs[-1:], hull_vs[:-1]), hull_vs, \
chain(hull_vs[1:], hull_vs[:1])):
# print(v0, v1, v2)
if v0[0] < v1[0] or v1[0] < v2[0]:
ret_vs = np.append(ret_vs, v1, axis=-1)
# sort the results in ascending order of x without interpolation
ret_vs = ret_vs.reshape((-1, 2))
ret_vs = np.array(sorted(ret_vs, key=lambda x:x[0]))
return ret_vs[:, 0], ret_vs[:, 1]
else:
return xys[:, 0], xys[:, 1]
# ------------------------------------------------------------------------------
def plot_time_fraction_recall(chosen_top_k, methods, input_folder, output_folder):
'''
draw the querytime-recall curves and fraction-recall curves for all methods
on all datasets
:params chosen_top_k: top_k value for drawing figure (integer)
:params methods: a list of method (list)
:params input_folder: input folder (string)
:params output_folder: output folder (string)
:returns: None
'''
n_datasets = len(datasets)
fig_width, fig_height = calc_width_and_height(n_datasets, 2)
plt_helper = PlotHelper(plt, fig_width, fig_height)
plt_helper.plot_subplots_adjust() # define a window for a figure
method_labels = [method_labels_map[method] for method in methods]
for di, (dataset, dataset_label) in enumerate(zip(datasets, dataset_labels)):
# set up two sub-figures
ax_recall = plt.subplot(2, n_datasets, di+1)
plt.title(dataset_label) # title
plt.xlabel('Recall (%)') # label of x-axis
plt.xlim(0, 100) # limit (or range) of x-axis
ax_fraction = plt.subplot(2, n_datasets, n_datasets+di+1)
plt.xlabel('Recall (%)') # label of x-axis
plt.xlim(0, 100) # limit (or range) of x-axis
if di == 0:
ax_recall.set_ylabel('Query Time (ms)')
ax_fraction.set_ylabel('Fraction (%)')
min_t_y = 1e9; max_t_y = -1e9
min_f_y = 1e9; max_f_y = -1e9
for method_idx, method, method_label, method_color, method_marker in \
zip(count(), methods, method_labels, method_colors, method_markers):
# get file name for this method on this dataset
filename = get_filename(input_folder, dataset, method)
if filename is None: continue
print(filename)
# get time-recall and fraction-recall results from disk
time_recalls = []
fraction_recalls = []
for _,res in parse_res(filename, chosen_top_k):
time_recalls += [[gettime(res), getrecall(res)]]
fraction_recalls += [[getfraction(res), getrecall(res)]]
time_recalls = np.array(time_recalls)
fraction_recalls = np.array(fraction_recalls)
# print(time_recalls, fraction_recalls)
# get the time-recall curve by convex hull and interpolation
lower_recalls, lower_times = lower_bound_curve(time_recalls)
min_t_y = min(min_t_y, np.min(lower_times))
max_t_y = max(max_t_y, np.max(lower_times))
ax_recall.semilogy(lower_recalls, lower_times, '-',
color=method_color, marker=method_marker,
label=method_label if di==0 else "", markevery=10,
markerfacecolor='none', markersize=10)
# get the fraction-recall curve by convex hull
lower_recalls, lower_fractions = lower_bound_curve(fraction_recalls)
min_f_y = min(min_f_y, np.min(lower_fractions))
max_f_y = max(max_f_y, np.max(lower_fractions))
ax_fraction.semilogy(lower_recalls, lower_fractions, '-',
color=method_color, marker=method_marker, label="",
markevery=10, markerfacecolor='none', markersize=10,
zorder=len(methods)-method_idx)
# set up the limit (or range) of y-axis
plt_helper.set_y_axis_log10(ax_recall, min_t_y, max_t_y)
plt_helper.set_y_axis_log10(ax_fraction, min_f_y, max_f_y)
# plot legend and save figure
plt_helper.plot_fig_legend(ncol=len(methods))
plt_helper.plot_and_save(output_folder, 'time_fraction_recall')
# ------------------------------------------------------------------------------
def plot_time_index_k(chosen_top_k, chosen_top_ks, recall_level, size_x_scales,\
time_x_scales, methods, input_folder, output_folder):
'''
draw the querytime-indexsize curves and querytime-indexingtime curves for
all methods on all datasets
:params chosen_top_k: top_k value for drawing figure (integer)
:params chosen_top_ks: a list of op_k values for drawing figure (list)
:params recall_level: recall value for drawing figure (integer)
:params size_x_scales: a list of x scales for index size (list)
:params time_x_scales: a list of x scales for indexing time (list)
:params methods: a list of method (list)
:params input_folder: input folder (string)
:params output_folder: output folder (string)
:returns: None
'''
n_datasets = len(datasets)
fig_width, fig_height = calc_width_and_height(n_datasets, 3)
plt_helper = PlotHelper(plt, fig_width, fig_height)
plt_helper.plot_subplots_adjust() # define a window for a figure
method_labels = [method_labels_map[method] for method in methods]
for di, (dataset, dataset_label) in enumerate(zip(datasets, dataset_labels)):
# set up three sub-figures
ax_size = plt.subplot(3, n_datasets, di+1)
plt.title(dataset_label) # title
plt.xlabel('Index Size (MB)') # label of x-axis
ax_time = plt.subplot(3, n_datasets, n_datasets+di+1)
plt.xlabel('Indexing Time (Seconds)') # label of x-axis
ax_k = plt.subplot(3, n_datasets, 2*n_datasets+di+1)
plt.xlabel('$k$') # label of x-axis
if di == 0:
ax_size.set_ylabel('Query Time (ms)')
ax_time.set_ylabel('Query Time (ms)')
ax_k.set_ylabel('Query Time (ms)')
min_size_x = 1e9; max_size_x = -1e9
min_size_y = 1e9; max_size_y = -1e9
min_time_x = 1e9; max_time_x = -1e9
min_time_y = 1e9; max_time_y = -1e9
min_k_y = 1e9; max_k_y = -1e9
for method_idx, method, method_label, method_color, method_marker in \
zip(count(), methods, method_labels, method_colors, method_markers):
# get file name for this method on this dataset
filename = get_filename(input_folder, dataset, method)
if filename is None: continue
print(filename)
# ------------------------------------------------------------------
# query time vs. index size and indexing time
# ------------------------------------------------------------------
# get all results from disk
chosen_ks_dict = defaultdict(list)
for _,res in parse_res(filename, chosen_top_k):
query_time = gettime(res)
recall = getrecall(res)
index_time = getindexingtime(res)
index_size = getindexsize(res)
chosen_ks_dict[(index_time, index_size)] += [[recall, query_time]]
# get querytime-indexsize and querytime-indexingtime results if its
# recall is higher than recall_level
index_times, index_sizes, querytimes_at_recall = [], [], []
for (index_time, index_size), recall_querytimes_ in chosen_ks_dict.items():
# add [[0, 0]] for interpolation
recall_querytimes_ = np.array([[0, 0]] + recall_querytimes_)
recalls, query_times = lower_bound_curve2(recall_querytimes_)
if np.max(recalls) > recall_level:
# get the estimated time at recall level by interpolation
f = interp1d(recalls, query_times)
querytime_at_recall = f(recall_level)
# update results
index_times += [index_time]
index_sizes += [index_size]
querytimes_at_recall += [querytime_at_recall]
# print('interp, ', querytime_at_recall, index_size, index_time)
index_times = np.array(index_times)
index_sizes = np.array(index_sizes)
querytimes_at_recall = np.array(querytimes_at_recall)
# get the querytime-indexsize curve by convex hull
isize_qtime = np.zeros(shape=(len(index_sizes), 2))
isize_qtime[:, 0] = index_sizes
isize_qtime[:, 1] = querytimes_at_recall
lower_isizes, lower_qtimes = lower_bound_curve2(isize_qtime)
if len(lower_isizes) > 0:
# print(method, lower_isizes, lower_qtimes)
min_size_x = min(min_size_x, np.min(lower_isizes))
max_size_x = max(max_size_x, np.max(lower_isizes))
min_size_y = min(min_size_y, np.min(lower_qtimes))
max_size_y = max(max_size_y, np.max(lower_qtimes))
ax_size.semilogy(lower_isizes, lower_qtimes, '-', color=method_color,
marker=method_marker, label=method_label if di==0 else "",
markerfacecolor='none', markersize=10)
# get the querytime-indextime curve by convex hull
itime_qtime = np.zeros(shape=(len(index_times), 2))
itime_qtime[:, 0] = index_times
itime_qtime[:, 1] = querytimes_at_recall
lower_itimes, lower_qtimes = lower_bound_curve2(itime_qtime)
# print(method, lower_itimes, lower_qtimes)
min_time_x = min(min_time_x, np.min(lower_itimes))
max_time_x = max(max_time_x, np.max(lower_itimes))
min_time_y = min(min_time_y, np.min(lower_qtimes))
max_time_y = max(max_time_y, np.max(lower_qtimes))
ax_time.semilogy(lower_itimes, lower_qtimes, '-', color=method_color,
marker=method_marker, label="", markerfacecolor='none',
markersize=10, zorder=len(methods)-method_idx)
# ------------------------------------------------------------------
# query time vs. k
# ------------------------------------------------------------------
# get all results from disk
chosen_ks_dict = defaultdict(list)
for chosen_top_k in chosen_top_ks:
for _,res in parse_res(filename, chosen_top_k):
query_time = gettime(res)
recall = getrecall(res)
chosen_ks_dict[chosen_top_k] += [[recall, query_time]]
# get querytime-indexsize and querytime-indexingtime results if its
# recall is higher than recall_level
chosen_ks, querytimes_at_recall = [], []
for chosen_k, recall_querytimes_ in chosen_ks_dict.items():
# add [[0, 0]] for interpolation
recall_querytimes_ = np.array([[0, 0]] + recall_querytimes_)
recalls, query_times = lower_bound_curve2(recall_querytimes_)
if np.max(recalls) > recall_level:
# get the estimated time at recall level by interpolation
f = interp1d(recalls, query_times)
querytime_at_recall = f(recall_level)
# update results
chosen_ks += [chosen_k]
querytimes_at_recall += [querytime_at_recall]
chosen_ks = np.array(chosen_ks)
querytimes_at_recall = np.array(querytimes_at_recall)
min_k_y = min(min_k_y, np.min(querytimes_at_recall))
max_k_y = max(max_k_y, np.max(querytimes_at_recall))
ax_k.semilogy(chosen_ks, querytimes_at_recall, '-', color=method_color,
marker=method_marker, label="", markerfacecolor='none',
markersize=10, zorder=len(methods)-method_idx)
# set up the limit (or range) of y-axis
plt_helper.set_x_axis(ax_size, min_size_x, size_x_scales[di]*max_size_x)
plt_helper.set_y_axis_log10(ax_size, min_size_y, max_size_y)
plt_helper.set_x_axis(ax_time, min_time_x, time_x_scales[di]*max_time_x)
plt_helper.set_y_axis_log10(ax_time, min_time_y, max_time_y)
plt_helper.set_y_axis_log10(ax_k, min_k_y, max_k_y)
# plot legend and save figure
plt_helper.plot_fig_legend(ncol=len(methods))
plt_helper.plot_and_save(output_folder, 'time_index_k_%d' % recall_level)
# ------------------------------------------------------------------------------
def plot_time_recall_indextime(chosen_top_k, recall_level, time_x_scales, \
methods, input_folder, output_folder):
'''
draw the querytime-indexsize curves and querytime-indexingtime curves for
all methods on all datasets
:params chosen_top_k: top_k value for drawing figure (integer)
:params recall_level: recall value for drawing figure (integer)
:params time_x_scales: a list of x scales for indexing time (list)
:params methods: a list of method (list)
:params input_folder: input folder (string)
:params output_folder: output folder (string)
:returns: None
'''
n_datasets = len(datasets)
fig_width, fig_height = calc_width_and_height(n_datasets, 2)
plt_helper = PlotHelper(plt, fig_width, fig_height)
plt_helper.plot_subplots_adjust() # define a window for a figure
method_labels = [method_labels_map[method] for method in methods]
for di, (dataset, dataset_label) in enumerate(zip(datasets, dataset_labels)):
# set up sub-figure
ax_recall = plt.subplot(2, n_datasets, di+1)
plt.title(dataset_label) # title
plt.xlim(0, 100) # limit (or range) of x-axis
plt.xlabel('Recall (%)') # label of x-axis
ax_time = plt.subplot(2, n_datasets, n_datasets+di+1)
plt.xlabel('Indexing Time (Seconds)') # label of x-axis
if di == 0:
ax_recall.set_ylabel('Query Time (ms)')
ax_time.set_ylabel('Query Time (ms)')
min_r_y = 1e9; max_r_y = -1e9
min_t_x = 1e9; max_t_x = -1e9
min_t_y = 1e9; max_t_y = -1e9
for method_idx, method, method_label, method_color, method_marker in \
zip(count(), methods, method_labels, method_colors, method_markers):
# get file name for this method on this dataset
filename = get_filename(input_folder, dataset, method)
if filename is None: continue
print(filename)
# ------------------------------------------------------------------
# query time vs. recall
# ------------------------------------------------------------------
time_recalls = []
for _,res in parse_res(filename, chosen_top_k):
time_recalls += [[gettime(res), getrecall(res)]]
time_recalls = np.array(time_recalls)
# print(time_recalls)
# get the time-recall curve by convex hull and interpolation, where
# lower_recalls -> x, lower_times -> y
lower_recalls, lower_times = lower_bound_curve(time_recalls)
min_r_y = min(min_r_y, np.min(lower_times))
max_r_y = max(max_r_y, np.max(lower_times))
ax_recall.semilogy(lower_recalls, lower_times, '-', color=method_color,
marker=method_marker, label=method_label if di==0 else "",
markevery=10, markerfacecolor='none', markersize=7,
zorder=len(methods)-method_idx)
# ------------------------------------------------------------------
# query time vs. indexing time
# ------------------------------------------------------------------
# get all results from disk
chosen_ks_dict = defaultdict(list)
for _,res in parse_res(filename, chosen_top_k):
query_time = gettime(res)
recall = getrecall(res)
index_time = getindexingtime(res)
chosen_ks_dict[index_time] += [[recall, query_time]]
# get querytime-indexsize and querytime-indexingtime results if its
# recall is higher than recall_level
index_times, querytimes_at_recall = [], []
for index_time, recall_querytimes_ in chosen_ks_dict.items():
# add [[0, 0]] for interpolation
recall_querytimes_ = np.array([[0, 0]] +recall_querytimes_)
recalls, query_times = lower_bound_curve2(recall_querytimes_)
if np.max(recalls) > recall_level:
# get the estimated time at recall level by interpolation
f = interp1d(recalls, query_times)
querytime_at_recall = f(recall_level)
# update results
index_times += [index_time]
querytimes_at_recall += [querytime_at_recall]
# print('interp, ', querytime_at_recall, index_time)
index_times = np.array(index_times)
querytimes_at_recall = np.array(querytimes_at_recall)
# get the querytime-indextime curve by convex hull
itime_qtimes = np.zeros(shape=(len(index_times), 2))
itime_qtimes[:, 0] = index_times
itime_qtimes[:, 1] = querytimes_at_recall
lower_itimes, lower_qtimes = lower_bound_curve2(itime_qtimes)
if len(lower_itimes) > 0:
# print(method, lower_itimes, lower_qtimes)
min_t_x = min(min_t_x, np.min(lower_itimes))
max_t_x = max(max_t_x, np.max(lower_itimes))
min_t_y = min(min_t_y, np.min(lower_qtimes))
max_t_y = max(max_t_y, np.max(lower_qtimes))
ax_time.semilogy(lower_itimes, lower_qtimes, '-', color=method_color,
marker=method_marker, label="", markerfacecolor='none',
markersize=10, zorder=len(methods)-method_idx)
# set up the limit (or range) of y-axis
plt_helper.set_y_axis_log10(ax_recall, min_r_y, max_r_y)
plt_helper.set_x_axis(ax_time, min_t_x, time_x_scales[di]*max_t_x)
if max_t_y / min_t_y < 10:
plt_helper.set_y_axis_close(ax_time, min_t_y, max_t_y)
else:
plt_helper.set_y_axis_log10(ax_time, min_t_y, max_t_y)
# plot legend and save figure
plt_helper.plot_fig_legend(ncol=len(methods))
plt_helper.plot_and_save(output_folder, 'time_recall_indextime_%d' %
recall_level)
# ------------------------------------------------------------------------------
def plot_params(chosen_top_k, dataset, input_folder, output_folder, \
fig_width=19.2, fig_height=3.0):
'''
draw the querytime-recall curves for the parameters of NH and FH
:params chosen_top_k: top_k value for drawing figure (integer)
:params dataset: name of dataset (string)
:params input_folder: input folder (string)
:params output_folder: output folder (string)
:params fig_width: the width of a figure (float)
:params fig_height: the height of a figure (float)
:returns: None
'''
plt.figure(figsize=(fig_width, fig_height))
plt.rcParams.update({'font.size': 13})
left_space = 0.80
bottom_space = 0.55
top_space = 0.30 # 1.2
right_space = 0.25
width_space = 0.24
height_space = 0.37
bottom = bottom_space / fig_height
top = (fig_height - top_space) / fig_height
left = left_space / fig_width
right = (fig_width - right_space) / fig_width
plt.subplots_adjust(bottom=bottom, top=top, left=left, right=right,
wspace=width_space, hspace=height_space)
# --------------------------------------------------------------------------
# NH on t (\lambda = 2d)
# --------------------------------------------------------------------------
method = 'NH'
ax = plt.subplot(1, 5, 1)
ax.set_xlabel(r'Recall (%)')
ax.set_ylabel(r'Query Time (ms)')
ax.set_title('Impact of $t$ for %s' % method, fontsize=16)
filename = get_filename(input_folder, dataset, method)
print(filename, method, dataset)
fix_s=2
data = []
for record in parse_res(filename, chosen_top_k):
m = get_m(record)
s = get_s(record)
cand = get_cand(record)
time = get_time(record)
recall = get_recall(record)
if s == fix_s:
data += [[m, s, cand, time, recall]]
data = np.array(data)
legend_name = ['$t=8$', '$t=16$', '$t=32$', '$t=64$', '$t=128$', '$t=256$']
ms = [8, 16, 32, 64, 128, 256]
for color, marker, m in zip(method_colors, method_markers, ms):
data_mp = data[data[:, 0]==m]
ax.plot(data_mp[:, -1], data_mp[:, -2], marker=marker,c=color,
markerfacecolor='none', markersize=7)
plt.legend(legend_name, loc='upper right', ncol=2, fontsize=13)
plt.xlim(0, 100)
ax.set_yscale('log')
# plt.ylim(1e-2, 1e3) # Yelp
plt.ylim(1e-1, 1e5) # GloVe100
# --------------------------------------------------------------------------
# NH on \lambda (t = 256)
# --------------------------------------------------------------------------
method = 'NH'
ax = plt.subplot(1, 5, 2)
ax.set_xlabel(r'Recall (%)')
ax.set_title('Impact of $\lambda$ for %s' % method, fontsize=16)
filename = get_filename(input_folder, dataset, method)
print(filename, method, dataset)
fix_m=256
data = []
for record in parse_res(filename, chosen_top_k):
m = get_m(record)
s = get_s(record)
cand = get_cand(record)
time = get_time(record)
recall = get_recall(record)
if m == fix_m:
data += [[m, s, cand, time, recall]]
data = np.array(data)
legend_name = ['$\lambda=1d$', '$\lambda=2d$', '$\lambda=4d$', '$\lambda=8d$']
ss = [1, 2, 4, 8]
for color, marker, s in zip(method_colors, method_markers, ss):
data_mp = data[data[:, 1]==s]
ax.plot(data_mp[:, -1], data_mp[:, -2], marker=marker, c=color,
markerfacecolor='none', markersize=7)
plt.legend(legend_name, loc='upper right', ncol=2, fontsize=13)
plt.xlim(0, 100)
ax.set_yscale('log')
# plt.ylim(1e-1, 1e2) # Yelp
plt.ylim(1e-1, 1e5) # GloVe100
# --------------------------------------------------------------------------
# FH on m (l = 4 and \lambda = 2d)
# --------------------------------------------------------------------------
method = 'FH'
ax = plt.subplot(1, 5, 3)
ax.set_xlabel(r'Recall (%)')
ax.set_title('Impact of $m$ for %s' % method, fontsize=16)
filename = get_filename(input_folder, dataset, method)
print(filename, method, dataset)
fix_l=4; fix_s=2
data = []
for record in parse_res(filename, chosen_top_k):
m = get_m(record)
l = get_l(record)
s = get_s(record)
cand = get_cand(record)
time = get_time(record)
recall = get_recall(record)
if l == fix_l and s == fix_s:
data += [[m, l, s, cand, time, recall]]
data = np.array(data)
legend_name = ['$m=8$', '$m=16$', '$m=32$', '$m=64$', '$m=128$', '$m=256$']
ms = [8, 16, 32, 64, 128, 256]
for color, marker, m in zip(method_colors, method_markers, ms):
data_mp = data[data[:, 0]==m]
ax.plot(data_mp[:, -1], data_mp[:, -2], marker=marker, c=color,
markerfacecolor='none', markersize=7)
plt.legend(legend_name, loc='upper right', ncol=2, fontsize=13)
plt.xlim(0, 100)
ax.set_yscale('log')
# plt.ylim(1e-2, 1e2) # Yelp
plt.ylim(1e-2, 1e4) # GloVe100
# --------------------------------------------------------------------------
# FH on l (m = 16 and \lambda = 2d)
# --------------------------------------------------------------------------
method = 'FH'
ax = plt.subplot(1, 5, 4)
ax.set_xlabel(r'Recall (%)')
ax.set_title('Impact of $l$ for %s' % method, fontsize=16)
filename = get_filename(input_folder, dataset, method)
print(filename, method, dataset)
fix_m=16; fix_s=2
data = []
for record in parse_res(filename, chosen_top_k):
m = get_m(record)
l = get_l(record)
s = get_s(record)
cand = get_cand(record)
time = get_time(record)
recall = get_recall(record)
if m == fix_m and s == fix_s:
data += [[m, l, s, cand, time, recall]]
data = np.array(data)
legend_name = ['$l=2$', '$l=4$', '$l=6$', '$l=8$', '$l=10$']
ls = [2, 4, 6, 8, 10]
for color, marker, l in zip(method_colors, method_markers, ls):
data_mp = data[data[:, 1]==l]
ax.plot(data_mp[:, -1], data_mp[:, -2], marker=marker, c=color,
markerfacecolor='none', markersize=7)
plt.legend(legend_name, loc='upper right', ncol=2, fontsize=13)
plt.xlim(0, 100)
ax.set_yscale('log')
# plt.ylim(1e-2, 1e2) # Yelp
plt.ylim(1e-2, 1e4) # GloVe100
# --------------------------------------------------------------------------
# FH on \lambda (m = 16 and l = 4)
# --------------------------------------------------------------------------
method = 'FH'
ax = plt.subplot(1, 5, 5)
ax.set_xlabel(r'Recall (%)')
ax.set_title('Impact of $\lambda$ for %s' % method, fontsize=16)
filename = get_filename(input_folder, dataset, method)
print(filename, method, dataset)
fix_m=16; fix_l=4
data = []
for record in parse_res(filename, chosen_top_k):
m = get_m(record)
l = get_l(record)
s = get_s(record)
cand = get_cand(record)
time = get_time(record)
recall = get_recall(record)
if m == fix_m and l == fix_l:
data += [[m, l, s, cand, time, recall]]
data = np.array(data)
legend_name = ['$\lambda=1d$', '$\lambda=2d$', '$\lambda=4d$', '$\lambda=8d$']
ss = [1, 2, 4, 8]
miny = 1e9; maxy = -1e9
for color, marker, s in zip(method_colors, method_markers, ss):
data_mp = data[data[:, 2]==s]
ax.plot(data_mp[:, -1], data_mp[:, -2], marker=marker, c=color,
markerfacecolor='none', markersize=7)
plt.legend(legend_name, loc='upper right', ncol=2, fontsize=13)
plt.xlim(0, 100)
ax.set_yscale('log')
# plt.ylim(1e-2, 1e2) # Yelp
plt.ylim(1e-2, 1e4) # GloVe100
# --------------------------------------------------------------------------
# save and show figure
# --------------------------------------------------------------------------
plt.savefig('%s.png' % join(output_folder, 'params_%s' % dataset))
plt.savefig('%s.eps' % join(output_folder, 'params_%s' % dataset))
plt.savefig('%s.pdf' % join(output_folder, 'params_%s' % dataset))
plt.show()
# ------------------------------------------------------------------------------
if __name__ == '__main__':
chosen_top_k = 10
chosen_top_ks = [1,5,10,20,50,100]
# 1. plot curves of time vs. recall & fraction vs. recall
input_folder = "../results/"
output_folder = "../figures/competitors/"
methods = ['FH', 'FH_Minus', 'NH', 'BH', 'MH', 'Random_Scan', 'Sorted_Scan']
plot_time_fraction_recall(chosen_top_k, methods, input_folder, output_folder)
# 2. plot curves of time vs. index (size and time) & time vs. k
input_folder = "../results/"
output_folder = "../figures/competitors/"
methods = ['FH', 'FH_Minus', 'NH', 'BH', 'MH', 'Random_Scan', 'Sorted_Scan']
size_x_scales = [0.3,0.3,0.3,0.3,0.3]; time_x_scales = [0.1,0.1,0.1,0.3,0.05]
recall_levels = [80,70,60,50]
for recall_level in recall_levels:
plot_time_index_k(chosen_top_k, chosen_top_ks, recall_level, size_x_scales,
time_x_scales, methods, input_folder, output_folder)
# 3. plot curves of time vs. recall & time vs. indexing time
input_folder = "../results/"
output_folder = "../figures/sampling/"
methods = ['FH', 'FH_Minus', 'NH', 'FH_wo_S', 'FH_Minus_wo_S', 'NH_wo_S']
time_x_scales = [0.2, 0.1, 0.1, 0.2, 0.02]
recall_levels = [80,70,60,50]
for recall_level in recall_levels:
plot_time_recall_indextime(chosen_top_k, recall_level, time_x_scales,
methods, input_folder, output_folder)
# 4. plot parameters
chosen_top_k = 10
datasets = ['GloVe100', 'Music', 'Msong', 'Yelp', 'Tiny1M']
input_folder = "../results/"
output_folder = "../figures/param/"
for dataset in datasets:
plot_params(chosen_top_k, dataset, input_folder, output_folder)
# 5. plot curves of time vs. recall & time vs. indexing time for normalized data
input_folder = "../results_normalized/"
output_folder = "../figures/normalized/"
methods = ['FH', 'NH', 'Orig_BH', 'Orig_MH']
recall_level = 50; time_x_scales = [0.1, 0.1, 0.1, 0.05, 0.02]
plot_time_recall_indextime(chosen_top_k, recall_level, time_x_scales, methods,
input_folder, output_folder)
recall_level = 60; time_x_scales = [0.1, 0.1, 0.1, 0.05, 0.02]
plot_time_recall_indextime(chosen_top_k, recall_level, time_x_scales, methods,
input_folder, output_folder)
recall_level = 70; time_x_scales = [0.1, 0.2, 0.1, 0.1, 0.04]
plot_time_recall_indextime(chosen_top_k, recall_level, time_x_scales, methods,
input_folder, output_folder)
recall_level = 80; time_x_scales = [0.1, 0.2, 0.1, 0.1, 0.08]
plot_time_recall_indextime(chosen_top_k, recall_level, time_x_scales, methods,
input_folder, output_folder)
| [
"itertools.chain",
"matplotlib.pylab.xlim",
"re.compile",
"scipy.interpolate.interp1d",
"numpy.array",
"matplotlib.pylab.show",
"numpy.arange",
"matplotlib.pylab.figure",
"matplotlib.pylab.legend",
"matplotlib.pylab.title",
"numpy.max",
"matplotlib.pylab.rcParams.update",
"numpy.min",
"num... | [((3885, 3912), 're.compile', 're.compile', (['"""\\\\S+\\\\s+.*=.*"""'], {}), "('\\\\S+\\\\s+.*=.*')\n", (3895, 3912), False, 'import re\n'), ((3931, 3959), 're.compile', 're.compile', (['""".*(m)=(\\\\d+).*"""'], {}), "('.*(m)=(\\\\d+).*')\n", (3941, 3959), False, 'import re\n'), ((3977, 4005), 're.compile', 're.compile', (['""".*(l)=(\\\\d+).*"""'], {}), "('.*(l)=(\\\\d+).*')\n", (3987, 4005), False, 'import re\n'), ((4023, 4051), 're.compile', 're.compile', (['""".*(M)=(\\\\d+).*"""'], {}), "('.*(M)=(\\\\d+).*')\n", (4033, 4051), False, 'import re\n'), ((4069, 4097), 're.compile', 're.compile', (['""".*(s)=(\\\\d+).*"""'], {}), "('.*(s)=(\\\\d+).*')\n", (4079, 4097), False, 'import re\n'), ((4115, 4150), 're.compile', 're.compile', (['""".*(b)=(\\\\d+\\\\.\\\\d+).*"""'], {}), "('.*(b)=(\\\\d+\\\\.\\\\d+).*')\n", (4125, 4150), False, 'import re\n'), ((4259, 4303), 're.compile', 're.compile', (['"""Indexing Time: (\\\\d+\\\\.\\\\d+).*"""'], {}), "('Indexing Time: (\\\\d+\\\\.\\\\d+).*')\n", (4269, 4303), False, 'import re\n'), ((4330, 4377), 're.compile', 're.compile', (['"""Estimated Memory: (\\\\d+\\\\.\\\\d+).*"""'], {}), "('Estimated Memory: (\\\\d+\\\\.\\\\d+).*')\n", (4340, 4377), False, 'import re\n'), ((4404, 4433), 're.compile', 're.compile', (['""".*cand=(\\\\d+).*"""'], {}), "('.*cand=(\\\\d+).*')\n", (4414, 4433), False, 'import re\n'), ((4462, 4577), 're.compile', 're.compile', (['"""(\\\\d+)\\\\s*(\\\\d+\\\\.\\\\d+)\\\\s*(\\\\d+\\\\.\\\\d+)\\\\s*(\\\\d+\\\\.\\\\d+)\\\\s*(\\\\d+\\\\.\\\\d+)\\\\s*(\\\\d+\\\\.\\\\d+)"""'], {}), "(\n '(\\\\d+)\\\\s*(\\\\d+\\\\.\\\\d+)\\\\s*(\\\\d+\\\\.\\\\d+)\\\\s*(\\\\d+\\\\.\\\\d+)\\\\s*(\\\\d+\\\\.\\\\d+)\\\\s*(\\\\d+\\\\.\\\\d+)'\n )\n", (4472, 4577), False, 'import re\n'), ((7406, 7421), 'scipy.spatial.ConvexHull', 'ConvexHull', (['xys'], {}), '(xys)\n', (7416, 7421), False, 'from scipy.spatial import ConvexHull\n'), ((8096, 8124), 'scipy.interpolate.interp1d', 'interp1d', (['vs[:, 1]', 'vs[:, 0]'], {}), '(vs[:, 1], vs[:, 0])\n', (8104, 8124), False, 'from scipy.interpolate import interp1d\n'), ((8208, 8234), 'numpy.arange', 'np.arange', (['minx', 'maxx', '(1.0)'], {}), '(minx, maxx, 1.0)\n', (8217, 8234), True, 'import numpy as np\n'), ((30996, 31039), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(fig_width, fig_height)'}), '(figsize=(fig_width, fig_height))\n', (31006, 31039), True, 'import matplotlib.pylab as plt\n'), ((31045, 31083), 'matplotlib.pylab.rcParams.update', 'plt.rcParams.update', (["{'font.size': 13}"], {}), "({'font.size': 13})\n", (31064, 31083), True, 'import matplotlib.pylab as plt\n'), ((31432, 31545), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': 'bottom', 'top': 'top', 'left': 'left', 'right': 'right', 'wspace': 'width_space', 'hspace': 'height_space'}), '(bottom=bottom, top=top, left=left, right=right, wspace=\n width_space, hspace=height_space)\n', (31451, 31545), True, 'import matplotlib.pylab as plt\n'), ((31776, 31796), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(5)', '(1)'], {}), '(1, 5, 1)\n', (31787, 31796), True, 'import matplotlib.pylab as plt\n'), ((32374, 32388), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (32382, 32388), True, 'import numpy as np\n'), ((32747, 32810), 'matplotlib.pylab.legend', 'plt.legend', (['legend_name'], {'loc': '"""upper right"""', 'ncol': '(2)', 'fontsize': '(13)'}), "(legend_name, loc='upper right', ncol=2, fontsize=13)\n", (32757, 32810), True, 'import matplotlib.pylab as plt\n'), ((32816, 32832), 'matplotlib.pylab.xlim', 'plt.xlim', (['(0)', '(100)'], {}), '(0, 100)\n', (32824, 32832), True, 'import matplotlib.pylab as plt\n'), ((32898, 32921), 'matplotlib.pylab.ylim', 'plt.ylim', (['(0.1)', '(100000.0)'], {}), '(0.1, 100000.0)\n', (32906, 32921), True, 'import matplotlib.pylab as plt\n'), ((33155, 33175), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(5)', '(2)'], {}), '(1, 5, 2)\n', (33166, 33175), True, 'import matplotlib.pylab as plt\n'), ((33721, 33735), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (33729, 33735), True, 'import numpy as np\n'), ((34085, 34148), 'matplotlib.pylab.legend', 'plt.legend', (['legend_name'], {'loc': '"""upper right"""', 'ncol': '(2)', 'fontsize': '(13)'}), "(legend_name, loc='upper right', ncol=2, fontsize=13)\n", (34095, 34148), True, 'import matplotlib.pylab as plt\n'), ((34154, 34170), 'matplotlib.pylab.xlim', 'plt.xlim', (['(0)', '(100)'], {}), '(0, 100)\n', (34162, 34170), True, 'import matplotlib.pylab as plt\n'), ((34236, 34259), 'matplotlib.pylab.ylim', 'plt.ylim', (['(0.1)', '(100000.0)'], {}), '(0.1, 100000.0)\n', (34244, 34259), True, 'import matplotlib.pylab as plt\n'), ((34503, 34523), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(5)', '(3)'], {}), '(1, 5, 3)\n', (34514, 34523), True, 'import matplotlib.pylab as plt\n'), ((35124, 35138), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (35132, 35138), True, 'import numpy as np\n'), ((35498, 35561), 'matplotlib.pylab.legend', 'plt.legend', (['legend_name'], {'loc': '"""upper right"""', 'ncol': '(2)', 'fontsize': '(13)'}), "(legend_name, loc='upper right', ncol=2, fontsize=13)\n", (35508, 35561), True, 'import matplotlib.pylab as plt\n'), ((35567, 35583), 'matplotlib.pylab.xlim', 'plt.xlim', (['(0)', '(100)'], {}), '(0, 100)\n', (35575, 35583), True, 'import matplotlib.pylab as plt\n'), ((35649, 35672), 'matplotlib.pylab.ylim', 'plt.ylim', (['(0.01)', '(10000.0)'], {}), '(0.01, 10000.0)\n', (35657, 35672), True, 'import matplotlib.pylab as plt\n'), ((35917, 35937), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(5)', '(4)'], {}), '(1, 5, 4)\n', (35928, 35937), True, 'import matplotlib.pylab as plt\n'), ((36540, 36554), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (36548, 36554), True, 'import numpy as np\n'), ((36894, 36957), 'matplotlib.pylab.legend', 'plt.legend', (['legend_name'], {'loc': '"""upper right"""', 'ncol': '(2)', 'fontsize': '(13)'}), "(legend_name, loc='upper right', ncol=2, fontsize=13)\n", (36904, 36957), True, 'import matplotlib.pylab as plt\n'), ((36963, 36979), 'matplotlib.pylab.xlim', 'plt.xlim', (['(0)', '(100)'], {}), '(0, 100)\n', (36971, 36979), True, 'import matplotlib.pylab as plt\n'), ((37045, 37068), 'matplotlib.pylab.ylim', 'plt.ylim', (['(0.01)', '(10000.0)'], {}), '(0.01, 10000.0)\n', (37053, 37068), True, 'import matplotlib.pylab as plt\n'), ((37312, 37332), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(5)', '(5)'], {}), '(1, 5, 5)\n', (37323, 37332), True, 'import matplotlib.pylab as plt\n'), ((37940, 37954), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (37948, 37954), True, 'import numpy as np\n'), ((38333, 38396), 'matplotlib.pylab.legend', 'plt.legend', (['legend_name'], {'loc': '"""upper right"""', 'ncol': '(2)', 'fontsize': '(13)'}), "(legend_name, loc='upper right', ncol=2, fontsize=13)\n", (38343, 38396), True, 'import matplotlib.pylab as plt\n'), ((38402, 38418), 'matplotlib.pylab.xlim', 'plt.xlim', (['(0)', '(100)'], {}), '(0, 100)\n', (38410, 38418), True, 'import matplotlib.pylab as plt\n'), ((38484, 38507), 'matplotlib.pylab.ylim', 'plt.ylim', (['(0.01)', '(10000.0)'], {}), '(0.01, 10000.0)\n', (38492, 38507), True, 'import matplotlib.pylab as plt\n'), ((38931, 38941), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (38939, 38941), True, 'import matplotlib.pylab as plt\n'), ((7314, 7346), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'xys.shape'}), '(size=xys.shape)\n', (7330, 7346), True, 'import numpy as np\n'), ((7693, 7724), 'itertools.chain', 'chain', (['hull_vs[1:]', 'hull_vs[:1]'], {}), '(hull_vs[1:], hull_vs[:1])\n', (7698, 7724), False, 'from itertools import chain, count\n'), ((8139, 8155), 'numpy.min', 'np.min', (['vs[:, 1]'], {}), '(vs[:, 1])\n', (8145, 8155), True, 'import numpy as np\n'), ((8175, 8191), 'numpy.max', 'np.max', (['vs[:, 1]'], {}), '(vs[:, 1])\n', (8181, 8191), True, 'import numpy as np\n'), ((8872, 8904), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'xys.shape'}), '(size=xys.shape)\n', (8888, 8904), True, 'import numpy as np\n'), ((9024, 9039), 'scipy.spatial.ConvexHull', 'ConvexHull', (['xys'], {}), '(xys)\n', (9034, 9039), False, 'from scipy.spatial import ConvexHull\n'), ((10576, 10608), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'xys.shape'}), '(size=xys.shape)\n', (10592, 10608), True, 'import numpy as np\n'), ((10773, 10788), 'scipy.spatial.ConvexHull', 'ConvexHull', (['xys'], {}), '(xys)\n', (10783, 10788), False, 'from scipy.spatial import ConvexHull\n'), ((12408, 12442), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', 'n_datasets', '(di + 1)'], {}), '(2, n_datasets, di + 1)\n', (12419, 12442), True, 'import matplotlib.pylab as plt\n'), ((12450, 12474), 'matplotlib.pylab.title', 'plt.title', (['dataset_label'], {}), '(dataset_label)\n', (12459, 12474), True, 'import matplotlib.pylab as plt\n'), ((12503, 12527), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Recall (%)"""'], {}), "('Recall (%)')\n", (12513, 12527), True, 'import matplotlib.pylab as plt\n'), ((12566, 12582), 'matplotlib.pylab.xlim', 'plt.xlim', (['(0)', '(100)'], {}), '(0, 100)\n', (12574, 12582), True, 'import matplotlib.pylab as plt\n'), ((12664, 12711), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', 'n_datasets', '(n_datasets + di + 1)'], {}), '(2, n_datasets, n_datasets + di + 1)\n', (12675, 12711), True, 'import matplotlib.pylab as plt\n'), ((12717, 12741), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Recall (%)"""'], {}), "('Recall (%)')\n", (12727, 12741), True, 'import matplotlib.pylab as plt\n'), ((12780, 12796), 'matplotlib.pylab.xlim', 'plt.xlim', (['(0)', '(100)'], {}), '(0, 100)\n', (12788, 12796), True, 'import matplotlib.pylab as plt\n'), ((16663, 16697), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', 'n_datasets', '(di + 1)'], {}), '(3, n_datasets, di + 1)\n', (16674, 16697), True, 'import matplotlib.pylab as plt\n'), ((16705, 16729), 'matplotlib.pylab.title', 'plt.title', (['dataset_label'], {}), '(dataset_label)\n', (16714, 16729), True, 'import matplotlib.pylab as plt\n'), ((16758, 16787), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Index Size (MB)"""'], {}), "('Index Size (MB)')\n", (16768, 16787), True, 'import matplotlib.pylab as plt\n'), ((16833, 16880), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', 'n_datasets', '(n_datasets + di + 1)'], {}), '(3, n_datasets, n_datasets + di + 1)\n', (16844, 16880), True, 'import matplotlib.pylab as plt\n'), ((16886, 16923), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Indexing Time (Seconds)"""'], {}), "('Indexing Time (Seconds)')\n", (16896, 16923), True, 'import matplotlib.pylab as plt\n'), ((16960, 17011), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', 'n_datasets', '(2 * n_datasets + di + 1)'], {}), '(3, n_datasets, 2 * n_datasets + di + 1)\n', (16971, 17011), True, 'import matplotlib.pylab as plt\n'), ((17015, 17032), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""$k$"""'], {}), "('$k$')\n", (17025, 17032), True, 'import matplotlib.pylab as plt\n'), ((25178, 25212), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', 'n_datasets', '(di + 1)'], {}), '(2, n_datasets, di + 1)\n', (25189, 25212), True, 'import matplotlib.pylab as plt\n'), ((25220, 25244), 'matplotlib.pylab.title', 'plt.title', (['dataset_label'], {}), '(dataset_label)\n', (25229, 25244), True, 'import matplotlib.pylab as plt\n'), ((25273, 25289), 'matplotlib.pylab.xlim', 'plt.xlim', (['(0)', '(100)'], {}), '(0, 100)\n', (25281, 25289), True, 'import matplotlib.pylab as plt\n'), ((25347, 25371), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Recall (%)"""'], {}), "('Recall (%)')\n", (25357, 25371), True, 'import matplotlib.pylab as plt\n'), ((25422, 25469), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', 'n_datasets', '(n_datasets + di + 1)'], {}), '(2, n_datasets, n_datasets + di + 1)\n', (25433, 25469), True, 'import matplotlib.pylab as plt\n'), ((25475, 25512), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Indexing Time (Seconds)"""'], {}), "('Indexing Time (Seconds)')\n", (25485, 25512), True, 'import matplotlib.pylab as plt\n'), ((7816, 7843), 'numpy.append', 'np.append', (['v1s', 'v1'], {'axis': '(-1)'}), '(v1s, v1, axis=-1)\n', (7825, 7843), True, 'import numpy as np\n'), ((9362, 9393), 'itertools.chain', 'chain', (['hull_vs[1:]', 'hull_vs[:1]'], {}), '(hull_vs[1:], hull_vs[:1])\n', (9367, 9393), False, 'from itertools import chain, count\n'), ((9813, 9841), 'scipy.interpolate.interp1d', 'interp1d', (['vs[:, 1]', 'vs[:, 0]'], {}), '(vs[:, 1], vs[:, 0])\n', (9821, 9841), False, 'from scipy.interpolate import interp1d\n'), ((9949, 9980), 'numpy.arange', 'np.arange', (['minx', 'maxx', 'interval'], {}), '(minx, maxx, interval)\n', (9958, 9980), True, 'import numpy as np\n'), ((10928, 10961), 'itertools.chain', 'chain', (['hull_vs[-1:]', 'hull_vs[:-1]'], {}), '(hull_vs[-1:], hull_vs[:-1])\n', (10933, 10961), False, 'from itertools import chain, count\n'), ((10987, 11018), 'itertools.chain', 'chain', (['hull_vs[1:]', 'hull_vs[:1]'], {}), '(hull_vs[1:], hull_vs[:1])\n', (10992, 11018), False, 'from itertools import chain, count\n'), ((13150, 13157), 'itertools.count', 'count', ([], {}), '()\n', (13155, 13157), False, 'from itertools import chain, count\n'), ((13802, 13824), 'numpy.array', 'np.array', (['time_recalls'], {}), '(time_recalls)\n', (13810, 13824), True, 'import numpy as np\n'), ((13857, 13883), 'numpy.array', 'np.array', (['fraction_recalls'], {}), '(fraction_recalls)\n', (13865, 13883), True, 'import numpy as np\n'), ((17566, 17573), 'itertools.count', 'count', ([], {}), '()\n', (17571, 17573), False, 'from itertools import chain, count\n'), ((18131, 18148), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (18142, 18148), False, 'from collections import defaultdict\n'), ((19566, 19587), 'numpy.array', 'np.array', (['index_times'], {}), '(index_times)\n', (19574, 19587), True, 'import numpy as np\n'), ((19615, 19636), 'numpy.array', 'np.array', (['index_sizes'], {}), '(index_sizes)\n', (19623, 19636), True, 'import numpy as np\n'), ((19673, 19703), 'numpy.array', 'np.array', (['querytimes_at_recall'], {}), '(querytimes_at_recall)\n', (19681, 19703), True, 'import numpy as np\n'), ((21784, 21801), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (21795, 21801), False, 'from collections import defaultdict\n'), ((22984, 23003), 'numpy.array', 'np.array', (['chosen_ks'], {}), '(chosen_ks)\n', (22992, 23003), True, 'import numpy as np\n'), ((23040, 23070), 'numpy.array', 'np.array', (['querytimes_at_recall'], {}), '(querytimes_at_recall)\n', (23048, 23070), True, 'import numpy as np\n'), ((25874, 25881), 'itertools.count', 'count', ([], {}), '()\n', (25879, 25881), False, 'from itertools import chain, count\n'), ((26533, 26555), 'numpy.array', 'np.array', (['time_recalls'], {}), '(time_recalls)\n', (26541, 26555), True, 'import numpy as np\n'), ((27481, 27498), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (27492, 27498), False, 'from collections import defaultdict\n'), ((28799, 28820), 'numpy.array', 'np.array', (['index_times'], {}), '(index_times)\n', (28807, 28820), True, 'import numpy as np\n'), ((28857, 28887), 'numpy.array', 'np.array', (['querytimes_at_recall'], {}), '(querytimes_at_recall)\n', (28865, 28887), True, 'import numpy as np\n'), ((38738, 38780), 'os.path.join', 'join', (['output_folder', "('params_%s' % dataset)"], {}), "(output_folder, 'params_%s' % dataset)\n", (38742, 38780), False, 'from os.path import isdir, isfile, join\n'), ((38810, 38852), 'os.path.join', 'join', (['output_folder', "('params_%s' % dataset)"], {}), "(output_folder, 'params_%s' % dataset)\n", (38814, 38852), False, 'from os.path import isdir, isfile, join\n'), ((38882, 38924), 'os.path.join', 'join', (['output_folder', "('params_%s' % dataset)"], {}), "(output_folder, 'params_%s' % dataset)\n", (38886, 38924), False, 'from os.path import isdir, isfile, join\n'), ((8035, 8056), 'numpy.append', 'np.append', (['maxv0', 'v1s'], {}), '(maxv0, v1s)\n', (8044, 8056), True, 'import numpy as np\n'), ((9497, 9524), 'numpy.append', 'np.append', (['v1s', 'v1'], {'axis': '(-1)'}), '(v1s, v1, axis=-1)\n', (9506, 9524), True, 'import numpy as np\n'), ((9864, 9880), 'numpy.min', 'np.min', (['vs[:, 1]'], {}), '(vs[:, 1])\n', (9870, 9880), True, 'import numpy as np\n'), ((9908, 9924), 'numpy.max', 'np.max', (['vs[:, 1]'], {}), '(vs[:, 1])\n', (9914, 9924), True, 'import numpy as np\n'), ((11130, 11160), 'numpy.append', 'np.append', (['ret_vs', 'v1'], {'axis': '(-1)'}), '(ret_vs, v1, axis=-1)\n', (11139, 11160), True, 'import numpy as np\n'), ((14135, 14154), 'numpy.min', 'np.min', (['lower_times'], {}), '(lower_times)\n', (14141, 14154), True, 'import numpy as np\n'), ((14192, 14211), 'numpy.max', 'np.max', (['lower_times'], {}), '(lower_times)\n', (14198, 14211), True, 'import numpy as np\n'), ((14644, 14667), 'numpy.min', 'np.min', (['lower_fractions'], {}), '(lower_fractions)\n', (14650, 14667), True, 'import numpy as np\n'), ((14705, 14728), 'numpy.max', 'np.max', (['lower_fractions'], {}), '(lower_fractions)\n', (14711, 14728), True, 'import numpy as np\n'), ((18866, 18905), 'numpy.array', 'np.array', (['([[0, 0]] + recall_querytimes_)'], {}), '([[0, 0]] + recall_querytimes_)\n', (18874, 18905), True, 'import numpy as np\n'), ((22437, 22476), 'numpy.array', 'np.array', (['([[0, 0]] + recall_querytimes_)'], {}), '([[0, 0]] + recall_querytimes_)\n', (22445, 22476), True, 'import numpy as np\n'), ((23109, 23137), 'numpy.min', 'np.min', (['querytimes_at_recall'], {}), '(querytimes_at_recall)\n', (23115, 23137), True, 'import numpy as np\n'), ((23175, 23203), 'numpy.max', 'np.max', (['querytimes_at_recall'], {}), '(querytimes_at_recall)\n', (23181, 23203), True, 'import numpy as np\n'), ((26838, 26857), 'numpy.min', 'np.min', (['lower_times'], {}), '(lower_times)\n', (26844, 26857), True, 'import numpy as np\n'), ((26895, 26914), 'numpy.max', 'np.max', (['lower_times'], {}), '(lower_times)\n', (26901, 26914), True, 'import numpy as np\n'), ((28125, 28164), 'numpy.array', 'np.array', (['([[0, 0]] + recall_querytimes_)'], {}), '([[0, 0]] + recall_querytimes_)\n', (28133, 28164), True, 'import numpy as np\n'), ((9718, 9739), 'numpy.append', 'np.append', (['maxv0', 'v1s'], {}), '(maxv0, v1s)\n', (9727, 9739), True, 'import numpy as np\n'), ((19007, 19022), 'numpy.max', 'np.max', (['recalls'], {}), '(recalls)\n', (19013, 19022), True, 'import numpy as np\n'), ((19144, 19174), 'scipy.interpolate.interp1d', 'interp1d', (['recalls', 'query_times'], {}), '(recalls, query_times)\n', (19152, 19174), False, 'from scipy.interpolate import interp1d\n'), ((20166, 20186), 'numpy.min', 'np.min', (['lower_isizes'], {}), '(lower_isizes)\n', (20172, 20186), True, 'import numpy as np\n'), ((20234, 20254), 'numpy.max', 'np.max', (['lower_isizes'], {}), '(lower_isizes)\n', (20240, 20254), True, 'import numpy as np\n'), ((20302, 20322), 'numpy.min', 'np.min', (['lower_qtimes'], {}), '(lower_qtimes)\n', (20308, 20322), True, 'import numpy as np\n'), ((20370, 20390), 'numpy.max', 'np.max', (['lower_qtimes'], {}), '(lower_qtimes)\n', (20376, 20390), True, 'import numpy as np\n'), ((21054, 21074), 'numpy.min', 'np.min', (['lower_itimes'], {}), '(lower_itimes)\n', (21060, 21074), True, 'import numpy as np\n'), ((21122, 21142), 'numpy.max', 'np.max', (['lower_itimes'], {}), '(lower_itimes)\n', (21128, 21142), True, 'import numpy as np\n'), ((21190, 21210), 'numpy.min', 'np.min', (['lower_qtimes'], {}), '(lower_qtimes)\n', (21196, 21210), True, 'import numpy as np\n'), ((21258, 21278), 'numpy.max', 'np.max', (['lower_qtimes'], {}), '(lower_qtimes)\n', (21264, 21278), True, 'import numpy as np\n'), ((22578, 22593), 'numpy.max', 'np.max', (['recalls'], {}), '(recalls)\n', (22584, 22593), True, 'import numpy as np\n'), ((22715, 22745), 'scipy.interpolate.interp1d', 'interp1d', (['recalls', 'query_times'], {}), '(recalls, query_times)\n', (22723, 22745), False, 'from scipy.interpolate import interp1d\n'), ((28281, 28296), 'numpy.max', 'np.max', (['recalls'], {}), '(recalls)\n', (28287, 28296), True, 'import numpy as np\n'), ((28438, 28468), 'scipy.interpolate.interp1d', 'interp1d', (['recalls', 'query_times'], {}), '(recalls, query_times)\n', (28446, 28468), False, 'from scipy.interpolate import interp1d\n'), ((29338, 29358), 'numpy.min', 'np.min', (['lower_itimes'], {}), '(lower_itimes)\n', (29344, 29358), True, 'import numpy as np\n'), ((29400, 29420), 'numpy.max', 'np.max', (['lower_itimes'], {}), '(lower_itimes)\n', (29406, 29420), True, 'import numpy as np\n'), ((29462, 29482), 'numpy.min', 'np.min', (['lower_qtimes'], {}), '(lower_qtimes)\n', (29468, 29482), True, 'import numpy as np\n'), ((29524, 29544), 'numpy.max', 'np.max', (['lower_qtimes'], {}), '(lower_qtimes)\n', (29530, 29544), True, 'import numpy as np\n')] |
"""URBAN-SED Dataset Loader
.. admonition:: Dataset Info
:class: dropdown
URBAN-SED
=========
URBAN-SED (c) by <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
URBAN-SED is licensed under a Creative Commons Attribution 4.0 International License (CC BY 4.0).
You should have received a copy of the license along with this work. If not, see <http://creativecommons.org/licenses/by/4.0/>.
Created By
----------
<NAME>*^, <NAME>*, <NAME>*, <NAME>*, and <NAME>*.
* Music and Audio Research Lab (MARL), New York University, USA
^ Center for Urban Science and Progress (CUSP), New York University, USA
http://urbansed.weebly.com
http://steinhardt.nyu.edu/marl/
http://cusp.nyu.edu/
Version 2.0.0
-------------
- Audio files generated with scaper v0.1.0 (identical to audio in URBAN-SED 1.0)
- Jams annotation files generated with scaper v0.1.0 and updated to comply with scaper v1.0.0 (namespace changed from "sound_event" to "scaper")
- NOTE: due to updates to the scaper library, regenerating the audio from the jams annotations using scaper >=1.0.0 will result in audio files that are highly similar, but not identical, to the audio files provided. This is because the provided audio files were generated with scaper v0.1.0 and have been purposely kept the same as in URBAN-SED v1.0 to ensure comparability to previously published results.
Description
-----------
URBAN-SED is a dataset of 10,000 soundscapes with sound event annotations generated using scaper (github.com/justinsalamon/scaper).
A detailed description of the dataset is provided in the following article:
.. code-block:: latex
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. "Scaper: A Library for Soundscape Synthesis and Augmentation",
In IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA), New Paltz, NY, USA, Oct. 2017.
A summary is provided here:
* The dataset includes 10,000 soundscapes, totals almost 30 hours and includes close to 50,000 annotated sound events
* Complete annotations are provided in JAMS format, and simplified annotations are provided as tab-separated text files
* Every soundscape is 10 seconds long and has a background of Brownian noise resembling the typical "hum" often heard in urban environments
* Every soundscape contains between 1-9 sound events from the following classes:
* air_conditioner, car_horn, children_playing, dog_bark, drilling, engine_idling, gun_shot, jackhammer, siren and street_music
* The source material for the sound events are the clips from the UrbanSound8K dataset
* URBAN-SED comes pre-sorted into three sets: train, validate and test:
* There are 6000 soundscapes in the training set, generated using clips from folds 1-6 in UrbanSound8K
* There are 2000 soundscapes in the validation set, generated using clips from folds 7-8 in UrbanSound8K
* There are 2000 soundscapes in the test set, generated using clips from folds 9-10 in UrbanSound8K
* Further details about how the soundscapes were generated including the distribution of sound event start times, durations, signal-to-noise ratios, pitch shifting, time stretching, and the range of sound event polyphony (overlap) can be found in Section 3 of the aforementioned scaper paper
* The scripts used to generated URBAN-SED using scaper can be found here: https://github.com/justinsalamon/scaper_waspaa2017/tree/master/notebooks
Audio Files Included
--------------------
* 10,000 synthesized soundscapes in single channel (mono), 44100Hz, 16-bit, WAV format.
* The files are split into a training set (6000), validation set (2000) and test set (2000).
Annotation Files Included
-------------------------
The annotations list the sound events that occur in every soundscape. The annotations are "strong", meaning for every
sound event the annotations include (at least) the start time, end time, and label of the sound event. Sound events
come from the following 10 labels (categories):
* air_conditioner, car_horn, children_playing, dog_bark, drilling, engine_idling, gun_shot, jackhammer,
siren, street_music
There are two types of annotations: full annotations in JAMS format, and simplified annotations in
tab-separated txt format.
# JAMS Annotations
~~~~~~~~~~~~~~~~~~
* The full annotations are distributed in JAMS format (https://github.com/marl/jams).
* There are 10,000 JAMS annotation files, each one corresponding to a single soundscape with the same filename (other than the extension)
* Each JAMS file contains a single annotation in the scaper namespace format - jams >=v0.3.2 is required in order to load the annotation into python with jams:
import jams
jam = jams.load('soundscape_train_bimodal0.jams').
* The value of each observation (sound event) is a dictionary storing all scaper-related sound event parameters:
* label, source_file, source_time, event_time, event_duration, snr, role, pitch_shift, time_stretch.
* Note: the event_duration stored in the value dictionary represents the specified duration prior to any time
stretching. The actual event durtation in the soundscape is stored in the duration field of the JAMS observation.
* The observations (sound events) in the JAMS annotation include both foreground sound events and the background(s).
* The probabilistic scaper foreground and background event specifications are stored in the annotation's sandbox, allowing
a complete reconstruction of the soundscape audio from the JAMS annotation (assuming access to the original source material)
using scaper.generate_from_jams('soundscape_train_bimodal0.jams').
* The annotation sandbox also includes additional metadata such as the total number of foreground sound events, the
maximum polyphony (sound event overlap) of the soundscape and its gini coefficient (a measure of soundscape complexity).
# Simplified Annotations
~~~~~~~~~~~~~~~~~~~~~~~~
* The simplified annotations are distributed as tab-separated text files.
* There are 10,000 simplified annotation files, each one corresponding to a single soundscape with the same filename (other than the extension)
* Each simplified annotation has a 3-column format (no header): start_time, end_time, label.
* Background sounds are NOT included in the simplified annotations (only foreground sound events)
* No additional information is stored in the simplified events (see the JAMS annotations for more details).
Please Acknowledge URBAN-SED in Academic Research
-------------------------------------------------
When URBAN-SED is used for academic research, we would highly appreciate it if scientific publications of works
partly based on the URBAN-SED dataset cite the following publication:
.. code-block:: latex
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. "Scaper: A Library for Soundscape Synthesis and Augmentation",
In IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA), New Paltz, NY, USA, Oct. 2017.
The creation of this dataset was supported by NSF award 1544753.
Conditions of Use
-----------------
Dataset created by <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Audio files contain excerpts of
recordings uploaded to www.freesound.org. Please see FREESOUNDCREDITS.txt for an attribution list.
The URBAN-SED dataset is offered free of charge under the terms of the Creative Commons
Attribution 4.0 International License (CC BY 4.0): http://creativecommons.org/licenses/by/4.0/
The dataset and its contents are made available on an "as is" basis and without warranties of any kind, including
without limitation satisfactory quality and conformity, merchantability, fitness for a particular purpose, accuracy or
completeness, or absence of errors. Subject to any liability that may not be excluded or limited by law, NYU is not
liable for, and expressly excludes, all liability for loss or damage however and whenever caused to anyone by any use of
the URBAN-SED dataset or any part of it.
Feedback
--------
Please help us improve URBAN-SED by sending your feedback to: <EMAIL>
In case of a problem report please include as many details as possible.
"""
import os
from typing import BinaryIO, Optional, TextIO, Tuple
import librosa
import numpy as np
import csv
import jams
import glob
from soundata import download_utils
from soundata import jams_utils
from soundata import core
from soundata import annotations
from soundata import io
BIBTEX = """
@inproceedings{Salamon:Scaper:WASPAA:17,
Address = {New Paltz, NY, USA},
Author = {<NAME> <NAME> <NAME>.},
Booktitle = {IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)},
Month = {Oct.},
Pages = {344--348},
Title = {Scaper: A Library for Soundscape Synthesis and Augmentation},
Year = {2017}}
"""
REMOTES = {
"all": download_utils.RemoteFileMetadata(
filename="URBAN-SED_v2.0.0.tar.gz",
url="https://zenodo.org/record/1324404/files/URBAN-SED_v2.0.0.tar.gz?download=1",
checksum="a2d24a2148ece7c021fcc079ee87c2dc",
unpack_directories=["URBAN-SED_v2.0.0"],
)
}
LICENSE_INFO = "Creative Commons Attribution 4.0 International"
class Clip(core.Clip):
"""URBAN-SED Clip class
Args:
clip_id (str): id of the clip
Attributes:
audio (np.ndarray, float): path to the audio file
audio_path (str): path to the audio file
clip_id (str): clip id
events (soundata.annotations.Events): sound events with start time, end time, label and confidence
split (str): subset the clip belongs to (for experiments): train, validate, or test
"""
def __init__(self, clip_id, data_home, dataset_name, index, metadata):
super().__init__(clip_id, data_home, dataset_name, index, metadata)
self.audio_path = self.get_path("audio")
self.jams_path = self.get_path("jams")
self.txt_path = self.get_path("txt")
@property
def audio(self) -> Optional[Tuple[np.ndarray, float]]:
"""The clip's audio
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_audio(self.audio_path)
@property
def split(self):
"""The data splits (e.g. train)
Returns
* str - split
"""
return self._clip_metadata.get("split")
@core.cached_property
def events(self) -> Optional[annotations.Events]:
"""The audio events
Returns
* annotations.Events - audio event object
"""
return load_events(self.txt_path)
def to_jams(self):
"""Get the clip's data in jams format
Returns:
jams.JAMS: the clip's data in jams format
"""
jam = jams.load(self.jams_path)
jam.annotations[0].annotation_metadata.data_source = "soundata"
return jam
@io.coerce_to_bytes_io
def load_audio(fhandle: BinaryIO, sr=None) -> Tuple[np.ndarray, float]:
"""Load a UrbanSound8K audio file.
Args:
fhandle (str or file-like): File-like object or path to audio file
sr (int or None): sample rate for loaded audio, None by default, which
uses the file's original sample rate of 44100 without resampling.
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
audio, sr = librosa.load(fhandle, sr=sr, mono=True)
return audio, sr
@io.coerce_to_string_io
def load_events(fhandle: TextIO) -> annotations.Events:
"""Load an URBAN-SED sound events annotation file
Args:
fhandle (str or file-like): File-like object or path to the sound events annotation file
Raises:
IOError: if txt_path doesn't exist
Returns:
Events: sound events annotation data
"""
times = []
labels = []
confidence = []
reader = csv.reader(fhandle, delimiter="\t")
for line in reader:
times.append([float(line[0]), float(line[1])])
labels.append(line[2])
confidence.append(1.0)
events_data = annotations.Events(
np.array(times), "seconds", labels, "open", np.array(confidence)
)
return events_data
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The URBAN-SED dataset
"""
def __init__(self, data_home=None):
super().__init__(
data_home,
name="urbansed",
clip_class=Clip,
bibtex=BIBTEX,
remotes=REMOTES,
license_info=LICENSE_INFO,
)
@core.copy_docs(load_audio)
def load_audio(self, *args, **kwargs):
return load_audio(*args, **kwargs)
@core.cached_property
def _metadata(self):
splits = ["train", "validate", "test"]
expected_sizes = [6000, 2000, 2000]
metadata_index = {}
for split, es in zip(splits, expected_sizes):
annotation_folder = os.path.join(self.data_home, "annotations", split)
txtfiles = sorted(glob.glob(os.path.join(annotation_folder, "*.txt")))
for tf in txtfiles:
clip_id = os.path.basename(tf).replace(".txt", "")
metadata_index[clip_id] = {"split": split}
return metadata_index
| [
"os.path.join",
"jams.load",
"soundata.core.copy_docs",
"soundata.core.docstring_inherit",
"numpy.array",
"soundata.download_utils.RemoteFileMetadata",
"os.path.basename",
"csv.reader",
"librosa.load"
] | [((12525, 12561), 'soundata.core.docstring_inherit', 'core.docstring_inherit', (['core.Dataset'], {}), '(core.Dataset)\n', (12547, 12561), False, 'from soundata import core\n'), ((9146, 9398), 'soundata.download_utils.RemoteFileMetadata', 'download_utils.RemoteFileMetadata', ([], {'filename': '"""URBAN-SED_v2.0.0.tar.gz"""', 'url': '"""https://zenodo.org/record/1324404/files/URBAN-SED_v2.0.0.tar.gz?download=1"""', 'checksum': '"""a2d24a2148ece7c021fcc079ee87c2dc"""', 'unpack_directories': "['URBAN-SED_v2.0.0']"}), "(filename='URBAN-SED_v2.0.0.tar.gz', url=\n 'https://zenodo.org/record/1324404/files/URBAN-SED_v2.0.0.tar.gz?download=1'\n , checksum='a2d24a2148ece7c021fcc079ee87c2dc', unpack_directories=[\n 'URBAN-SED_v2.0.0'])\n", (9179, 9398), False, 'from soundata import download_utils\n'), ((11714, 11753), 'librosa.load', 'librosa.load', (['fhandle'], {'sr': 'sr', 'mono': '(True)'}), '(fhandle, sr=sr, mono=True)\n', (11726, 11753), False, 'import librosa\n'), ((12204, 12239), 'csv.reader', 'csv.reader', (['fhandle'], {'delimiter': '"""\t"""'}), "(fhandle, delimiter='\\t')\n", (12214, 12239), False, 'import csv\n'), ((12892, 12918), 'soundata.core.copy_docs', 'core.copy_docs', (['load_audio'], {}), '(load_audio)\n', (12906, 12918), False, 'from soundata import core\n'), ((11082, 11107), 'jams.load', 'jams.load', (['self.jams_path'], {}), '(self.jams_path)\n', (11091, 11107), False, 'import jams\n'), ((12428, 12443), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (12436, 12443), True, 'import numpy as np\n'), ((12472, 12492), 'numpy.array', 'np.array', (['confidence'], {}), '(confidence)\n', (12480, 12492), True, 'import numpy as np\n'), ((13265, 13315), 'os.path.join', 'os.path.join', (['self.data_home', '"""annotations"""', 'split'], {}), "(self.data_home, 'annotations', split)\n", (13277, 13315), False, 'import os\n'), ((13356, 13396), 'os.path.join', 'os.path.join', (['annotation_folder', '"""*.txt"""'], {}), "(annotation_folder, '*.txt')\n", (13368, 13396), False, 'import os\n'), ((13458, 13478), 'os.path.basename', 'os.path.basename', (['tf'], {}), '(tf)\n', (13474, 13478), False, 'import os\n')] |
# -*- coding: utf-8 -*-
########################################################################
# NSAp - Copyright (C) CEA, 2021
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
########################################################################
"""
Module provides functions to prepare different datasets from EUAIMS.
"""
# Imports
import os
import json
import time
import urllib
import shutil
import pickle
import requests
import logging
import numpy as np
from collections import namedtuple
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import RobustScaler, OneHotEncoder, StandardScaler
from sklearn.linear_model import LinearRegression
from pynet.datasets import Fetchers
from neurocombat_sklearn import CombatModel as fortin_combat
from nibabel.freesurfer.mghformat import load as surface_loader
# Global parameters
Item = namedtuple("Item", ["train_input_path", "test_input_path",
"train_metadata_path", "test_metadata_path"])
COHORT_NAME = "EUAIMS"
FOLDER = "/neurospin/brainomics/2020_deepint/data"
SAVING_FOLDER = "/tmp/EUAIMS"
FILES = {
"stratification": os.path.join(FOLDER, "EUAIMS_stratification.tsv"),
"rois_mapper": os.path.join(FOLDER, "EUAIMS_rois.tsv"),
"surf_stratification": os.path.join(
FOLDER, "EUAIMS_surf_stratification.tsv")
}
DEFAULTS = {
"clinical": {
"test_size": 0.2, "seed": 42,
"return_data": False, "z_score": True,
"drop_cols": ["t1:site", "t1:ageyrs", "t1:sex", "t1:fsiq",
"t1:group", "t1:diagnosis", "mri", "t1:group:name",
"qc", "labels", "subgroups"],
"qc": {"t1:fsiq": {"gte": 70},
"mri": {"eq": 1},
"qc": {"eq": "include"}}
},
"rois": {
"test_size": 0.2, "seed": 42,
"return_data": False, "z_score": True, "adjust_sites": True,
"metrics": ["lgi:avg", "thick:avg", "surf:area"],
"roi_types": ["cortical"],
"residualize_by": {"continuous": ["t1:ageyrs", "t1:fsiq"],
"discrete": ["t1:sex"]},
"qc": {"t1:fsiq": {"gte": 70},
"mri": {"eq": 1},
"qc": {"eq": "include"}}
},
"genetic": {
"test_size": 0.2, "seed": 42,
"return_data": False, "z_score": True, "scores": None,
"qc": {"t1:fsiq": {"gte": 70},
"mri": {"eq": 1},
"qc": {"eq": "include"}}
},
"surface": {
"test_size": 0.2, "seed": 42,
"return_data": False, "z_score": True, "adjust_sites": True,
"metrics": ["pial_lgi", "thickness"],
"residualize_by": {"continuous": ["t1:ageyrs", "t1:fsiq"],
"discrete": ["t1:sex"]},
"qc": {"t1:fsiq": {"gte": 70},
"mri": {"eq": 1},
"qc": {"eq": "include"}}
},
"multiblock": {
"test_size": 0.2, "seed": 42,
"blocks": ["clinical", "surface-lh", "surface-rh", "genetic"],
"qc": {"t1:fsiq": {"gte": 70},
"mri": {"eq": 1},
"qc": {"eq": "include"}}
}
}
logger = logging.getLogger("pynet")
def apply_qc(data, prefix, qc):
""" applies quality control to the data
Parameters
----------
data: pandas DataFrame
data for which we control the quality
prefix: string
prefix of the column names
qc: dict
quality control dict. keys are the name of the columns
to control on, and values dict containing an order relationsip
and a value as items
Returns
-------
data: pandas DataFrame
selected data by the quality control
"""
idx_to_keep = pd.Series([True] * len(data))
relation_mapper = {
"gt": lambda x, y: x > y,
"lt": lambda x, y: x < y,
"gte": lambda x, y: x >= y,
"lte": lambda x, y: x <= y,
"eq": lambda x, y: x == y,
}
for name, controls in qc.items():
for relation, value in controls.items():
if relation not in relation_mapper.keys():
raise ValueError("The relationship {} provided is not a \
valid one".format(relation))
elif "{}{}".format(prefix, name) in data.columns:
new_idx = relation_mapper[relation](
data["{}{}".format(prefix, name)], value)
idx_to_keep = idx_to_keep & new_idx
return data[idx_to_keep]
def fetch_clinical_wrapper(datasetdir=SAVING_FOLDER, files=FILES,
cohort=COHORT_NAME, defaults=DEFAULTS['clinical']):
""" Fetcher wrapper for clinical data
Parameters
----------
datasetdir: string, default SAVING_FOLDER
path to the folder in which to save the data
files: dict, default FILES
contains the paths to the different files
cohort: string, default COHORT_NAME,
name of the cohort
subject_columns_name: string, default 'subjects'
name of the column containing the subjects id
defaults: dict, default DEFAULTS
default values for the wrapped function
Returns
-------
fetcher: function
corresponding fetcher.
"""
fetcher_name = "fetcher_clinical_{}".format(cohort)
# @Fetchers.register
def fetch_clinical(
test_size=defaults["test_size"], seed=defaults["seed"],
return_data=defaults["return_data"], z_score=defaults["z_score"],
drop_cols=defaults["drop_cols"], qc=defaults["qc"]):
""" Fetches and preprocesses clinical data
Parameters
----------
test_size: float, default 0.2
proportion of the dataset to keep for testing. Preprocessing models
will only be fitted on the training part and applied to the test
set. You can specify not to use a testing set by setting it to 0
seed: int, default 42
random seed to split the data into train / test
return_data: bool, default False
If false, saves the data in the specified folder, and return the
path. Otherwise, returns the preprocessed data and the
corresponding subjects
z_score: bool, default True
wether or not to transform the data into z_scores, meaning
standardizing and scaling it
drop_cols: list of string, see default
names of the columns to drop before saving the data.
qc: dict, see default
keys are the name of the features the control on, values are the
requirements on their values (see the function apply_qc)
Returns
-------
item: namedtuple
a named tuple containing 'train_input_path', 'train_metadata_path',
and 'test_input_path', 'test_metadata_path' if test_size > 0
X_train: numpy array,
Training data, if return_data is True
X_test: numpy array,
Test data, if return_data is True and test_size > 0
subj_train: numpy array,
Training subjects, if return_data is True
subj_test: numpy array,
Test subjects, if return_data is True and test_size > 0
"""
clinical_prefix = "bloc-clinical_score-"
subject_column_name = "participant_id"
path = os.path.join(datasetdir, "clinical_X_train.npy")
meta_path = os.path.join(datasetdir, "clinical_X_train.tsv")
path_test = None
meta_path_test = None
if test_size > 0:
path_test = os.path.join(datasetdir, "clinical_X_test.npy")
meta_path_test = os.path.join(datasetdir, "clinical_X_test.tsv")
if not os.path.isfile(path):
data = pd.read_csv(files["stratification"], sep="\t")
clinical_cols = [subject_column_name]
clinical_cols += [col for col in data.columns
if col.startswith(clinical_prefix)]
data = data[clinical_cols]
data_train = apply_qc(data, clinical_prefix, qc).sort_values(
subject_column_name)
data_train.columns = [elem.replace(clinical_prefix, "")
for elem in data_train.columns]
X_train = data_train.drop(columns=drop_cols)
# Splits in train and test and removes nans
X_test, subj_test = (None, None)
if test_size > 0:
X_train, X_test = train_test_split(
X_train, test_size=test_size, random_state=seed)
na_idx_test = (X_test.isna().sum(1) == 0)
X_test = X_test[na_idx_test]
subj_test = X_test[subject_column_name].values
X_test = X_test.drop(columns=[subject_column_name]).values
na_idx_train = (X_train.isna().sum(1) == 0)
X_train = X_train[na_idx_train]
subj_train = X_train[subject_column_name].values
X_train = X_train.drop(columns=[subject_column_name])
cols = X_train.columns
X_train = X_train.values
# Standardizes and scales
if z_score:
scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
_path = os.path.join(datasetdir, "clinical_scaler.pkl")
with open(_path, "wb") as f:
pickle.dump(scaler, f)
if test_size > 0:
X_test = scaler.transform(X_test)
# Return data and subjects
X_train_df = pd.DataFrame(data=X_train, columns=cols)
X_train_df.insert(0, subject_column_name, subj_train)
X_test_df = None
if test_size > 0:
X_test_df = pd.DataFrame(data=X_test, columns=cols)
X_test_df.insert(0, subject_column_name, subj_test)
# Saving
np.save(path, X_train)
X_train_df.to_csv(meta_path, index=False, sep="\t")
if test_size > 0:
np.save(path_test, X_test)
X_test_df.to_csv(meta_path_test, index=False, sep="\t")
if return_data:
X_train = np.load(path)
subj_train = pd.read_csv(meta_path, sep="\t")[
subject_column_name].values
X_test, subj_test = (None, None)
if test_size > 0:
X_test = np.load(path_test)
subj_test = pd.read_csv(meta_path_test, sep="\t")[
subject_column_name].values
return X_train, X_test, subj_train, subj_test
else:
return Item(train_input_path=path, test_input_path=path_test,
train_metadata_path=meta_path,
test_metadata_path=meta_path_test)
return fetch_clinical
def fetch_rois_wrapper(datasetdir=SAVING_FOLDER, files=FILES,
cohort=COHORT_NAME, site_column_name="t1:site",
defaults=DEFAULTS['rois']):
""" Fetcher wrapper for rois data
Parameters
----------
datasetdir: string, default SAVING_FOLDER
path to the folder in which to save the data
files: dict, default FILES
contains the paths to the different files
cohort: string, default COHORT_NAME,
name of the cohort
site_columns_name: string, default "t1:site"
name of the column containing the site of MRI acquisition
defaults: dict, default DEFAULTS
default values for the wrapped function
Returns
-------
fetcher: function
corresponding fetcher
"""
fetcher_name = "fetcher_rois_{}".format(cohort)
# @Fetchers.register
def fetch_rois(
metrics=defaults["metrics"], roi_types=defaults["roi_types"],
test_size=defaults["test_size"], seed=defaults["seed"],
return_data=defaults["return_data"], z_score=defaults["z_score"],
adjust_sites=defaults["adjust_sites"],
residualize_by=defaults["residualize_by"], qc=defaults["qc"]):
""" Fetches and preprocesses roi data
Parameters
----------
datasetdir: string
path to the folder in which to save the data
metrics: list of strings, see default
metrics to fetch
roi_types: list of strings, default ["cortical"]
type of rois to fetch. Must be one of "cortical", "subcortical"
and "other"
test_size: float, default 0.2
proportion of the dataset to keep for testing. Preprocessing models
will only be fitted on the training part and applied to the test
set. You can specify not to use a testing set by setting it to 0
seed: int, default 42
random seed to split the data into train / test
return_data: bool, default False
If false, saves the data in the specified folder, and return the
path. Otherwise, returns the preprocessed data and the
corresponding subjects
z_score: bool, default True
wether or not to transform the data into z_scores, meaning
standardizing and scaling it
adjust_sites: bool, default True
wether or not the correct site effects via the Combat algorithm
residualize_by: dict, see default
variables to residualize the data. Two keys, "continuous" and
"discrete", and the values are a list of the variable names
qc: dict, see default
keys are the name of the features the control on, values are the
requirements on their values (see the function apply_qc)
Returns
-------
item: namedtuple
a named tuple containing 'train_input_path', 'train_metadata_path',
and 'test_input_path', 'test_metadata_path' if test_size > 0
X_train: numpy array,
Training data, if return_data is True
X_test: numpy array,
Test data, if return_data is True and test_size > 0
subj_train: numpy array,
Training subjects, if return_data is True
subj_test: numpy array,
Test subjects, if return_data is True and test_size > 0
"""
clinical_prefix = "bloc-clinical_score-"
roi_prefix = "bloc-t1w_roi-"
subject_column_name = "participant_id"
path = os.path.join(datasetdir, "rois_X_train.npy")
meta_path = os.path.join(datasetdir, "rois_X_train.tsv")
path_test = None
meta_path_test = None
if test_size > 0:
path_test = os.path.join(datasetdir, "rois_X_test.npy")
meta_path_test = os.path.join(datasetdir, "rois_X_test.tsv")
if not os.path.isfile(path):
data = pd.read_csv(files["stratification"], sep="\t")
roi_mapper = pd.read_csv(files["rois_mapper"], sep="\t")
# ROI selection
roi_label_range = pd.Series([False] * len(roi_mapper))
for roi_type in roi_types:
if roi_type == "cortical":
roi_label_range = roi_label_range | (
(roi_mapper["labels"] > 11000) &
(roi_mapper["labels"] < 13000))
elif roi_type == "subcortical":
roi_label_range = roi_label_range | (
roi_mapper["labels"] > 13000)
elif roi_type == "other":
roi_label_range = roi_label_range | (
roi_mapper["labels"] < 11000)
else:
raise ValueError("Roi types must be either 'cortical', \
'subcortical' or 'other'")
roi_labels = roi_mapper.loc[roi_label_range, "labels"]
# Feature selection
features_list = []
for column in data.columns:
if column.startswith(roi_prefix):
roi = int(column.split(":")[1].split("_")[0])
metric = column.split("-")[-1]
if roi in roi_labels.values and metric in metrics:
features_list.append(column.replace(roi_prefix, ""))
data_train = apply_qc(data, clinical_prefix, qc).sort_values(
subject_column_name)
data_train.columns = [elem.replace(roi_prefix, "")
for elem in data_train.columns]
X_train = data_train[features_list].copy()
# Splits in train and test and removes nans
if test_size > 0:
X_train, X_test, data_train, data_test = train_test_split(
X_train, data_train, test_size=test_size,
random_state=seed)
na_idx_test = (X_test.isna().sum(1) == 0)
X_test = X_test[na_idx_test]
data_test = data_test[na_idx_test]
subj_test = data_test[subject_column_name].values
na_idx_train = (X_train.isna().sum(1) == 0)
X_train = X_train[na_idx_train]
data_train = data_train[na_idx_train]
subj_train = data_train[subject_column_name].values
cols = X_train.columns
# Correction for site effects
if adjust_sites:
for metric in metrics:
adjuster = fortin_combat()
features = [feature for feature in features_list
if metric in feature]
X_train[features] = adjuster.fit_transform(
X_train[features],
data_train[["{}{}".format(
clinical_prefix, site_column_name)]],
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]],
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]])
_path = os.path.join(
datasetdir, "rois_combat_{0}.pkl".format(metric))
with open(_path, "wb") as of:
pickle.dump(adjuster, of)
if test_size > 0:
X_test[features] = adjuster.transform(
X_test[features],
data_test[["{}{}".format(
clinical_prefix, site_column_name)]],
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]],
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]])
# Standardizes
if z_score:
scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
_path = os.path.join(datasetdir, "rois_scaler.pkl")
with open(_path, "wb") as f:
pickle.dump(scaler, f)
if test_size > 0:
X_test = scaler.transform(X_test)
else:
X_train = X_train.values
if test_size > 0:
X_test = X_test.values
# Residualizes and scales
if residualize_by is not None or len(residualize_by) > 0:
regressor = LinearRegression()
y_train = np.concatenate([
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]].values,
OneHotEncoder(sparse=False).fit_transform(
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]])
], axis=1)
regressor.fit(y_train, X_train)
X_train = X_train - regressor.predict(y_train)
_path = os.path.join(datasetdir, "rois_residualizer.pkl")
with open(_path, "wb") as f:
pickle.dump(regressor, f)
if test_size > 0:
y_test = np.concatenate([
data_test[[
"{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]].values,
OneHotEncoder(sparse=False).fit_transform(
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]])
], axis=1)
X_test = X_test - regressor.predict(y_test)
# Return data and subjects
X_train_df = pd.DataFrame(data=X_train, columns=cols)
X_train_df.insert(0, subject_column_name, subj_train)
X_test_df = None
if test_size > 0:
X_test_df = pd.DataFrame(data=X_test, columns=cols)
X_test_df.insert(0, subject_column_name, subj_test)
# Saving
np.save(path, X_train)
X_train_df.to_csv(meta_path, index=False, sep="\t")
if test_size > 0:
np.save(path_test, X_test)
X_test_df.to_csv(meta_path_test, index=False, sep="\t")
if return_data:
X_train = np.load(path)
subj_train = pd.read_csv(meta_path, sep="\t")[
subject_column_name].values
X_test, subj_test = (None, None)
if test_size > 0:
X_test = np.load(path_test)
subj_test = pd.read_csv(meta_path_test, sep="\t")[
subject_column_name].values
return X_train, X_test, subj_train, subj_test
else:
return Item(train_input_path=path, test_input_path=path_test,
train_metadata_path=meta_path,
test_metadata_path=meta_path_test)
return fetch_rois
def fetch_surface_wrapper(hemisphere, datasetdir=SAVING_FOLDER,
files=FILES, cohort=COHORT_NAME,
site_column_name="t1:site",
defaults=DEFAULTS["surface"]):
""" Fetcher wrapper for surface data
Parameters
----------
hemisphere: string
name of the hemisphere data fetcher, one of "rh" or "lh"
datasetdir: string, default SAVING_FOLDER
path to the folder in which to save the data
files: dict, default FILES
contains the paths to the different files
cohort: string, default COHORT_NAME,
name of the cohort
site_columns_name: string, default "t1:site"
name of the column containing the site of MRI acquisition
defaults: dict, default DEFAULTS
default values for the wrapped function
Returns
-------
fetcher: function
corresponding fetcher
"""
assert(hemisphere in ["rh", "lh"])
fetcher_name = "fetcher_surface_{}_{}".format(hemisphere, cohort)
# @Fetchers.register
def fetch_surface(
metrics=defaults["metrics"],
test_size=defaults["test_size"], seed=defaults["seed"],
return_data=defaults["return_data"],
z_score=defaults["z_score"], adjust_sites=defaults["adjust_sites"],
residualize_by=defaults["residualize_by"], qc=defaults["qc"]):
""" Fetches and preprocesses surface data
Parameters
----------
metrics: list of strings, see defaults
metrics to fetch
test_size: float, default 0.2
proportion of the dataset to keep for testing. Preprocessing models
will only be fitted on the training part and applied to the test
set. You can specify not to use a testing set by setting it to 0
seed: int, default 42
random seed to split the data into train / test
return_data: bool, default False
If false, saves the data in the specified folder, and return the
path. Otherwise, returns the preprocessed data and the
corresponding subjects
z_score: bool, default True
wether or not to transform the data into z_scores, meaning
standardizing and scaling it
adjust_sites: bool, default True
wether or not the correct site effects via the Combat algorithm
residualize_by: dict, see default
variables to residualize the data. Two keys, "continuous" and
"discrete", and the values are a list of the variable names
qc: dict, see default
keys are the name of the features the control on, values are the
requirements on their values (see the function apply_qc)
Returns
-------
item: namedtuple
a named tuple containing 'train_input_path', 'train_metadata_path',
and 'test_input_path', 'test_metadata_path' if test_size > 0
X_train: numpy array,
Training data, if return_data is True
X_test: numpy array,
Test data, if return_data is True and test_size > 0
subj_train: numpy array,
Training subjects, if return_data is True
subj_test: numpy array,
Test subjects, if return_data is True and test_size > 0
"""
clinical_prefix = "bloc-clinical_score-"
surf_prefix = "bloc-t1w_hemi-{}_metric".format(hemisphere)
data = pd.read_csv(files["clinical_surface"], sep="\t").drop(
columns=["bloc-t1w_hemi-lh_metric-area",
"bloc-t1w_hemi-rh_metric-area"])
# Feature selection
features_list = []
for metric in metrics:
for column in data.columns:
if column.startswith(surf_prefix):
m = column.split('-')[-1]
if m == metric:
features_list.append(column)
data_train = apply_qc(data, clinical_prefix, qc).sort_values(
"participant_id")
# Loads surface data
n_vertices = len(
surface_loader(data_train[features_list[0]].iloc[0]).get_data())
X_train = np.zeros((len(data_train), n_vertices, len(features_list)))
for i in range(len(data_train)):
for j, feature in enumerate(features_list):
path = data_train[feature].iloc[i]
if not pd.isnull([path]):
X_train[i, :, j] = surface_loader(
path).get_data().squeeze()
# Splits in train and test and removes nans
if test_size > 0:
X_train, X_test, data_train, data_test = train_test_split(
X_train, data_train, test_size=test_size, random_state=seed)
na_idx_test = (np.isnan(X_test).sum((1, 2)) == 0)
X_test = X_test[na_idx_test]
data_test = data_test[na_idx_test]
if return_data:
subj_test = data_test["participant_id"].values
na_idx_train = (np.isnan(X_train).sum((1, 2)) == 0)
X_train = X_train[na_idx_train]
data_train = data_train[na_idx_train]
if return_data:
subj_train = data_train["participant_id"].values
# Applies feature-wise preprocessing
for i, feature in enumerate(features_list):
# Correction for site effects
if adjust_sites:
non_zeros_idx = (X_train[:, :, i] > 0).sum(0) >= 1
adjuster = fortin_combat()
X_train[:, non_zeros_idx, i] = adjuster.fit_transform(
X_train[:, non_zeros_idx, i],
data_train[["{}{}".format(
clinical_prefix, site_column_name)]],
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]],
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]])
path = os.path.join(
datasetdir,
"surface_{}_combat_feature{}.pkl".format(hemisphere, i))
with open(path, "wb") as f:
pickle.dump(adjuster, f)
if test_size > 0:
X_test[:, non_zeros_idx, i] = adjuster.transform(
X_test[:, non_zeros_idx, i],
data_test[["{}{}".format(
clinical_prefix, site_column_name)]],
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]],
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]])
# Standardizes and scales
if z_score:
scaler = RobustScaler()
X_train[:, :, i] = scaler.fit_transform(X_train[:, :, i])
path = os.path.join(
datasetdir,
"surface_{}_scaler_feature{}.pkl".format(hemisphere, i))
with open(path, "wb") as f:
pickle.dump(scaler, f)
if test_size > 0:
X_test[:, :, i] = scaler.transform(X_test[:, :, i])
# Residualizes
if residualize_by is not None or len(residualize_by) > 0:
regressor = LinearRegression()
y_train = np.concatenate([
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]].values,
OneHotEncoder(sparse=False).fit_transform(
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]])
], axis=1)
regressor.fit(y_train, X_train[:, :, i])
X_train[:, :, i] = X_train[:, :, i] - regressor.predict(
y_train)
path = os.path.join(
datasetdir,
"surface_{}_residualizer_feature{}.pkl".format(
hemisphere, i))
with open(path, "wb") as f:
pickle.dump(regressor, f)
if test_size > 0:
y_test = np.concatenate([
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]
].values,
OneHotEncoder(sparse=False).fit_transform(
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]])
], axis=1)
X_test[:, :, i] = X_test[:, :, i] - regressor.predict(
y_test)
# Returns data and subjects
if return_data:
if test_size > 0:
return X_train, X_test, subj_train, subj_test
return X_train, subj_train
# Saving
path = os.path.join(
datasetdir, "surface_{}_X_train.npy".format(hemisphere))
np.save(path, X_train)
if test_size > 0:
path_test = os.path.join(
datasetdir, "surface_{}_X_test.npy".format(hemisphere))
np.save(path_test, X_test)
return path, path_test
return path
return fetch_surface
def fetch_genetic_wrapper(datasetdir=SAVING_FOLDER, files=FILES,
cohort=COHORT_NAME, defaults=DEFAULTS['genetic']):
""" Fetcher wrapper for genetic data
Parameters
----------
datasetdir: string, default SAVING_FOLDER
path to the folder in which to save the data
files: dict, default FILES
contains the paths to the different files
cohort: string, default COHORT_NAME,
name of the cohort
defaults: dict, default DEFAULTS
default values for the wrapped function
Returns
-------
fetcher: function
corresponding fetcher
"""
fetcher_name = "fetcher_genetic_{}".format(cohort)
# @Fetchers.register
def fetch_genetic(
scores=defaults["scores"], test_size=defaults["test_size"],
seed=defaults["seed"], return_data=defaults["return_data"],
z_score=defaults["z_score"], qc=defaults["qc"]):
""" Fetches and preprocesses genetic data
Parameters
----------
scores: list of strings, see defaults
scores to fetch, None mean it fetches all the available scores
test_size: float, see defaults
proportion of the dataset to keep for testing. Preprocessing models
will only be fitted on the training part and applied to the test
set. You can specify not to use a testing set by setting it to 0
seed: int, see default
random seed to split the data into train / test
return_data: bool, default False
If false, saves the data in the specified folder, and return the
path. Otherwise, returns the preprocessed data and the
corresponding subjects
z_score: bool, see defaults
wether or not to transform the data into z_scores, meaning
standardizing and scaling it
qc: dict, see defaults
keys are the name of the features the control on, values are the
requirements on their values (see the function apply_qc)
Returns
-------
item: namedtuple
a named tuple containing 'train_input_path', 'train_metadata_path',
and 'test_input_path', 'test_metadata_path' if test_size > 0
X_train: numpy array
Training data, if return_data is True
X_test: numpy array
Test data, if return_data is True and test_size > 0
subj_train: numpy array
Training subjects, if return_data is True
subj_test: numpy array
Test subjects, if return_data is True and test_size > 0
"""
clinical_prefix = "bloc-clinical_score-"
genetic_prefix = "bloc-genetic_score-"
subject_column_name = "participant_id"
path = os.path.join(datasetdir, "genetic_X_train.npy")
meta_path = os.path.join(datasetdir, "genetic_X_train.tsv")
path_test = None
meta_path_test = None
if test_size > 0:
path_test = os.path.join(datasetdir, "genetic_X_test.npy")
meta_path_test = os.path.join(datasetdir, "genetic_X_test.tsv")
if not os.path.isfile(path):
data = pd.read_csv(files["stratification"], sep="\t")
# Feature selection
features_list = []
for column in data.columns:
if column.startswith(genetic_prefix):
score = column.split("-")[-1]
if scores is not None and score in scores:
features_list.append(
column.replace(genetic_prefix, ""))
elif scores is None:
features_list.append(
column.replace(genetic_prefix, ""))
data_train = apply_qc(data, clinical_prefix, qc).sort_values(
subject_column_name)
data_train.columns = [elem.replace(genetic_prefix, "")
for elem in data_train.columns]
X_train = data_train[features_list].copy()
# Splits in train and test and removes nans
if test_size > 0:
X_train, X_test, data_train, data_test = train_test_split(
X_train, data_train, test_size=test_size,
random_state=seed)
na_idx_test = (X_test.isna().sum(1) == 0)
X_test = X_test[na_idx_test]
data_test = data_test[na_idx_test]
subj_test = data_test[subject_column_name].values
na_idx_train = (X_train.isna().sum(1) == 0)
X_train = X_train[na_idx_train]
data_train = data_train[na_idx_train]
subj_train = data_train[subject_column_name].values
cols = X_train.columns
# Standardizes and scales
if z_score:
scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
_path = os.path.join(datasetdir, "genetic_scaler.pkl")
with open(_path, "wb") as f:
pickle.dump(scaler, f)
if test_size > 0:
X_test = scaler.transform(X_test)
else:
X_train = X_train.values
if test_size > 0:
X_test = X_test.values
# Return data and subjects
X_train_df = pd.DataFrame(data=X_train, columns=cols)
X_train_df.insert(0, subject_column_name, subj_train)
X_test_df = None
if test_size > 0:
X_test_df = pd.DataFrame(data=X_test, columns=cols)
X_test_df.insert(0, subject_column_name, subj_test)
# Saving
np.save(path, X_train)
X_train_df.to_csv(meta_path, index=False, sep="\t")
if test_size > 0:
np.save(path_test, X_test)
X_test_df.to_csv(meta_path_test, index=False, sep="\t")
if return_data:
X_train = np.load(path)
subj_train = pd.read_csv(meta_path, sep="\t")[
subject_column_name].values
X_test, subj_test = (None, None)
if test_size > 0:
X_test = np.load(path_test)
subj_test = pd.read_csv(meta_path_test, sep="\t")[
subject_column_name].values
return X_train, X_test, subj_train, subj_test
else:
return Item(train_input_path=path, test_input_path=path_test,
train_metadata_path=meta_path,
test_metadata_path=meta_path_test)
return fetch_genetic
def make_fetchers(datasetdir=SAVING_FOLDER):
return {
"clinical": fetch_clinical_wrapper(datasetdir=datasetdir),
"rois": fetch_rois_wrapper(datasetdir=datasetdir),
"surface-rh": fetch_surface_wrapper(hemisphere="rh",
datasetdir=datasetdir),
"surface-lh": fetch_surface_wrapper(hemisphere="lh",
datasetdir=datasetdir),
"genetic": fetch_genetic_wrapper(datasetdir=datasetdir),
}
def fetch_multiblock_wrapper(datasetdir=SAVING_FOLDER, files=FILES,
cohort=COHORT_NAME,
subject_column_name="subjects",
defaults=DEFAULTS["multiblock"],
make_fetchers_func=make_fetchers):
""" Fetcher wrapper for multiblock data
Parameters
----------
datasetdir: string, default SAVING_FOLDER
path to the folder in which to save the data
files: dict, default FILES
contains the paths to the different files
cohort: string, default COHORT_NAME,
name of the cohort
subject_columns_name: string, default "subjects"
name of the column containing the subjects id
defaults: dict, default DEFAULTS
default values for the wrapped function
make_fetchers_func: function, default make_fetchers
function to build the fetchers from their wrappers.
Must return a dict containing as keys the name of the
channels, and values the corresponding fetcher
Returns
-------
fetcher: function
corresponding fetcher
"""
fetcher_name = "fetcher_multiblock_{}".format(cohort)
FETCHERS = make_fetchers_func(datasetdir)
# @Fetchers.register
def fetch_multiblock(
blocks=defaults["blocks"],
test_size=defaults["test_size"], seed=defaults["seed"],
qc=defaults["qc"],
**kwargs):
""" Fetches and preprocesses multi block data
Parameters
----------
blocks: list of strings, see default
blocks of data to fetch, all must be in the key list of FETCHERS
test_size: float, default 0.2
proportion of the dataset to keep for testing. Preprocessing models
will only be fitted on the training part and applied to the test
set. You can specify not to use a testing set by setting it to 0
seed: int, default 42
random seed to split the data into train / test
qc: dict, see default
keys are the name of the features the control on, values are the
requirements on their values (see the function apply_qc)
kwargs: dict
additional arguments to be passed to each fetcher indivudally.
Keys are the name of the fetchers, and values are a dictionnary
containing arguments and the values for this fetcher
Returns
-------
item: namedtuple
a named tuple containing 'train_input_path', 'train_metadata_path',
and 'test_input_path', 'test_metadata_path' if test_size > 0
"""
path = os.path.join(datasetdir, "multiblock_X_train.npz")
metadata_path = os.path.join(datasetdir, "metadata_train.tsv")
path_test = None
metadata_path_test = None
if test_size > 0:
path_test = os.path.join(datasetdir, "multiblock_X_test.npz")
metadata_path_test = os.path.join(
datasetdir, "metadata_test.tsv")
if not os.path.isfile(path):
X_train = {}
subj_train = {}
if test_size > 0:
X_test = {}
subj_test = {}
for block in blocks:
assert block in FETCHERS.keys()
if block in kwargs.keys():
local_kwargs = kwargs[block]
# Impose to have the same qc steps and splitting train/test
# over all the blocks to have the same subjects
for key, value in local_kwargs.items():
if key in ["qc", "test_size", "seed"]:
del local_kwargs[key]
else:
local_kwargs = {}
new_X_train, new_X_test, new_subj_train, new_subj_test = \
FETCHERS[block](
qc=qc, test_size=test_size, seed=seed,
return_data=True, **local_kwargs)
if test_size > 0:
X_test[block] = new_X_test
subj_test[block] = new_subj_test
X_train[block] = new_X_train
subj_train[block] = new_subj_train
# Remove subjects that arent in all the channels
common_subjects_train = list(
set.intersection(*map(set, subj_train.values())))
for block in blocks:
subjects = subj_train[block]
assert(len(subjects) == len(X_train[block]))
idx_to_keep = [
_idx for _idx in range(len(subjects))
if subjects[_idx] in common_subjects_train]
X_train[block] = X_train[block][idx_to_keep]
if test_size > 0:
common_subjects_test = list(
set.intersection(*map(set, subj_test.values())))
for block in blocks:
subjects = subj_test[block]
assert(len(subjects) == len(X_test[block]))
idx_to_keep = [
_idx for _idx in range(len(subjects))
if subjects[_idx] in common_subjects_test]
X_test[block] = X_test[block][idx_to_keep]
# Loads metadata
clinical_prefix = "bloc-clinical_score-"
metadata_cols = ["participant_id", "labels", "subgroups"]
metadata = pd.read_csv(files["stratification"], sep="\t")
clinical_cols = ["participant_id"]
clinical_cols += [col for col in metadata.columns
if col.startswith(clinical_prefix)]
metadata = metadata[clinical_cols]
metadata.columns = [elem.replace(clinical_prefix, "")
for elem in metadata.columns]
metadata = metadata[metadata_cols]
metadata_train = metadata[
metadata[subject_column_name].isin(common_subjects_train)]
if test_size > 0:
metadata_test = metadata[
metadata[subject_column_name].isin(common_subjects_test)]
# Saving
np.savez(path, **X_train)
metadata_train.to_csv(metadata_path, index=False, sep="\t")
if test_size > 0:
np.savez(path_test, **X_test)
metadata_test.to_csv(metadata_path_test, index=False, sep="\t")
return Item(train_input_path=path, test_input_path=path_test,
train_metadata_path=metadata_path,
test_metadata_path=metadata_path_test)
return fetch_multiblock
WRAPPERS = {
"clinical": fetch_clinical_wrapper,
"rois": fetch_rois_wrapper,
"genetic": fetch_genetic_wrapper,
"surface": fetch_surface_wrapper,
"multiblock": fetch_multiblock_wrapper,
}
def fetch_multiblock_euaims(datasetdir=SAVING_FOLDER, fetchers=make_fetchers,
surface=False):
if surface:
DEFAULTS["multiblock"]["blocks"] = ["clinical", "surface-lh",
"surface-rh", "genetic"]
else:
DEFAULTS["multiblock"]["blocks"] = ["clinical", "rois", "genetic"]
return WRAPPERS["multiblock"](
datasetdir=datasetdir, files=FILES, cohort=COHORT_NAME,
subject_column_name="participant_id", defaults=DEFAULTS["multiblock"],
make_fetchers_func=make_fetchers)()
def inverse_normalization(data, scalers):
""" De-normalize a dataset.
"""
for scaler_path in scalers:
with open(scaler_path, "rb") as of:
scaler = pickle.load(of)
data = scaler.inverse_transform(data)
return data
| [
"logging.getLogger",
"pandas.read_csv",
"numpy.save",
"numpy.savez",
"pandas.DataFrame",
"collections.namedtuple",
"sklearn.model_selection.train_test_split",
"pickle.load",
"os.path.isfile",
"numpy.isnan",
"neurocombat_sklearn.CombatModel",
"sklearn.linear_model.LinearRegression",
"pandas.i... | [((1061, 1169), 'collections.namedtuple', 'namedtuple', (['"""Item"""', "['train_input_path', 'test_input_path', 'train_metadata_path',\n 'test_metadata_path']"], {}), "('Item', ['train_input_path', 'test_input_path',\n 'train_metadata_path', 'test_metadata_path'])\n", (1071, 1169), False, 'from collections import namedtuple\n'), ((3317, 3343), 'logging.getLogger', 'logging.getLogger', (['"""pynet"""'], {}), "('pynet')\n", (3334, 3343), False, 'import logging\n'), ((1329, 1378), 'os.path.join', 'os.path.join', (['FOLDER', '"""EUAIMS_stratification.tsv"""'], {}), "(FOLDER, 'EUAIMS_stratification.tsv')\n", (1341, 1378), False, 'import os\n'), ((1399, 1438), 'os.path.join', 'os.path.join', (['FOLDER', '"""EUAIMS_rois.tsv"""'], {}), "(FOLDER, 'EUAIMS_rois.tsv')\n", (1411, 1438), False, 'import os\n'), ((1467, 1521), 'os.path.join', 'os.path.join', (['FOLDER', '"""EUAIMS_surf_stratification.tsv"""'], {}), "(FOLDER, 'EUAIMS_surf_stratification.tsv')\n", (1479, 1521), False, 'import os\n'), ((7505, 7553), 'os.path.join', 'os.path.join', (['datasetdir', '"""clinical_X_train.npy"""'], {}), "(datasetdir, 'clinical_X_train.npy')\n", (7517, 7553), False, 'import os\n'), ((7574, 7622), 'os.path.join', 'os.path.join', (['datasetdir', '"""clinical_X_train.tsv"""'], {}), "(datasetdir, 'clinical_X_train.tsv')\n", (7586, 7622), False, 'import os\n'), ((14569, 14613), 'os.path.join', 'os.path.join', (['datasetdir', '"""rois_X_train.npy"""'], {}), "(datasetdir, 'rois_X_train.npy')\n", (14581, 14613), False, 'import os\n'), ((14634, 14678), 'os.path.join', 'os.path.join', (['datasetdir', '"""rois_X_train.tsv"""'], {}), "(datasetdir, 'rois_X_train.tsv')\n", (14646, 14678), False, 'import os\n'), ((31570, 31592), 'numpy.save', 'np.save', (['path', 'X_train'], {}), '(path, X_train)\n', (31577, 31592), True, 'import numpy as np\n'), ((34651, 34698), 'os.path.join', 'os.path.join', (['datasetdir', '"""genetic_X_train.npy"""'], {}), "(datasetdir, 'genetic_X_train.npy')\n", (34663, 34698), False, 'import os\n'), ((34719, 34766), 'os.path.join', 'os.path.join', (['datasetdir', '"""genetic_X_train.tsv"""'], {}), "(datasetdir, 'genetic_X_train.tsv')\n", (34731, 34766), False, 'import os\n'), ((41724, 41774), 'os.path.join', 'os.path.join', (['datasetdir', '"""multiblock_X_train.npz"""'], {}), "(datasetdir, 'multiblock_X_train.npz')\n", (41736, 41774), False, 'import os\n'), ((41799, 41845), 'os.path.join', 'os.path.join', (['datasetdir', '"""metadata_train.tsv"""'], {}), "(datasetdir, 'metadata_train.tsv')\n", (41811, 41845), False, 'import os\n'), ((7728, 7775), 'os.path.join', 'os.path.join', (['datasetdir', '"""clinical_X_test.npy"""'], {}), "(datasetdir, 'clinical_X_test.npy')\n", (7740, 7775), False, 'import os\n'), ((7805, 7852), 'os.path.join', 'os.path.join', (['datasetdir', '"""clinical_X_test.tsv"""'], {}), "(datasetdir, 'clinical_X_test.tsv')\n", (7817, 7852), False, 'import os\n'), ((7869, 7889), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (7883, 7889), False, 'import os\n'), ((7911, 7957), 'pandas.read_csv', 'pd.read_csv', (["files['stratification']"], {'sep': '"""\t"""'}), "(files['stratification'], sep='\\t')\n", (7922, 7957), True, 'import pandas as pd\n'), ((9739, 9779), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X_train', 'columns': 'cols'}), '(data=X_train, columns=cols)\n', (9751, 9779), True, 'import pandas as pd\n'), ((10075, 10097), 'numpy.save', 'np.save', (['path', 'X_train'], {}), '(path, X_train)\n', (10082, 10097), True, 'import numpy as np\n'), ((10354, 10367), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (10361, 10367), True, 'import numpy as np\n'), ((14784, 14827), 'os.path.join', 'os.path.join', (['datasetdir', '"""rois_X_test.npy"""'], {}), "(datasetdir, 'rois_X_test.npy')\n", (14796, 14827), False, 'import os\n'), ((14857, 14900), 'os.path.join', 'os.path.join', (['datasetdir', '"""rois_X_test.tsv"""'], {}), "(datasetdir, 'rois_X_test.tsv')\n", (14869, 14900), False, 'import os\n'), ((14917, 14937), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (14931, 14937), False, 'import os\n'), ((14958, 15004), 'pandas.read_csv', 'pd.read_csv', (["files['stratification']"], {'sep': '"""\t"""'}), "(files['stratification'], sep='\\t')\n", (14969, 15004), True, 'import pandas as pd\n'), ((15030, 15073), 'pandas.read_csv', 'pd.read_csv', (["files['rois_mapper']"], {'sep': '"""\t"""'}), "(files['rois_mapper'], sep='\\t')\n", (15041, 15073), True, 'import pandas as pd\n'), ((21007, 21047), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X_train', 'columns': 'cols'}), '(data=X_train, columns=cols)\n', (21019, 21047), True, 'import pandas as pd\n'), ((21343, 21365), 'numpy.save', 'np.save', (['path', 'X_train'], {}), '(path, X_train)\n', (21350, 21365), True, 'import numpy as np\n'), ((21622, 21635), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (21629, 21635), True, 'import numpy as np\n'), ((26965, 27042), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'data_train'], {'test_size': 'test_size', 'random_state': 'seed'}), '(X_train, data_train, test_size=test_size, random_state=seed)\n', (26981, 27042), False, 'from sklearn.model_selection import train_test_split\n'), ((31741, 31767), 'numpy.save', 'np.save', (['path_test', 'X_test'], {}), '(path_test, X_test)\n', (31748, 31767), True, 'import numpy as np\n'), ((34872, 34918), 'os.path.join', 'os.path.join', (['datasetdir', '"""genetic_X_test.npy"""'], {}), "(datasetdir, 'genetic_X_test.npy')\n", (34884, 34918), False, 'import os\n'), ((34948, 34994), 'os.path.join', 'os.path.join', (['datasetdir', '"""genetic_X_test.tsv"""'], {}), "(datasetdir, 'genetic_X_test.tsv')\n", (34960, 34994), False, 'import os\n'), ((35011, 35031), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (35025, 35031), False, 'import os\n'), ((35053, 35099), 'pandas.read_csv', 'pd.read_csv', (["files['stratification']"], {'sep': '"""\t"""'}), "(files['stratification'], sep='\\t')\n", (35064, 35099), True, 'import pandas as pd\n'), ((37270, 37310), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X_train', 'columns': 'cols'}), '(data=X_train, columns=cols)\n', (37282, 37310), True, 'import pandas as pd\n'), ((37606, 37628), 'numpy.save', 'np.save', (['path', 'X_train'], {}), '(path, X_train)\n', (37613, 37628), True, 'import numpy as np\n'), ((37885, 37898), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (37892, 37898), True, 'import numpy as np\n'), ((41955, 42004), 'os.path.join', 'os.path.join', (['datasetdir', '"""multiblock_X_test.npz"""'], {}), "(datasetdir, 'multiblock_X_test.npz')\n", (41967, 42004), False, 'import os\n'), ((42038, 42083), 'os.path.join', 'os.path.join', (['datasetdir', '"""metadata_test.tsv"""'], {}), "(datasetdir, 'metadata_test.tsv')\n", (42050, 42083), False, 'import os\n'), ((42117, 42137), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (42131, 42137), False, 'import os\n'), ((44520, 44566), 'pandas.read_csv', 'pd.read_csv', (["files['stratification']"], {'sep': '"""\t"""'}), "(files['stratification'], sep='\\t')\n", (44531, 44566), True, 'import pandas as pd\n'), ((45262, 45287), 'numpy.savez', 'np.savez', (['path'], {}), '(path, **X_train)\n', (45270, 45287), True, 'import numpy as np\n'), ((46706, 46721), 'pickle.load', 'pickle.load', (['of'], {}), '(of)\n', (46717, 46721), False, 'import pickle\n'), ((8639, 8704), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train'], {'test_size': 'test_size', 'random_state': 'seed'}), '(X_train, test_size=test_size, random_state=seed)\n', (8655, 8704), False, 'from sklearn.model_selection import train_test_split\n'), ((9354, 9368), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (9366, 9368), False, 'from sklearn.preprocessing import RobustScaler, OneHotEncoder, StandardScaler\n'), ((9449, 9496), 'os.path.join', 'os.path.join', (['datasetdir', '"""clinical_scaler.pkl"""'], {}), "(datasetdir, 'clinical_scaler.pkl')\n", (9461, 9496), False, 'import os\n'), ((9933, 9972), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X_test', 'columns': 'cols'}), '(data=X_test, columns=cols)\n', (9945, 9972), True, 'import pandas as pd\n'), ((10208, 10234), 'numpy.save', 'np.save', (['path_test', 'X_test'], {}), '(path_test, X_test)\n', (10215, 10234), True, 'import numpy as np\n'), ((10571, 10589), 'numpy.load', 'np.load', (['path_test'], {}), '(path_test)\n', (10578, 10589), True, 'import numpy as np\n'), ((16812, 16889), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'data_train'], {'test_size': 'test_size', 'random_state': 'seed'}), '(X_train, data_train, test_size=test_size, random_state=seed)\n', (16828, 16889), False, 'from sklearn.model_selection import train_test_split\n'), ((19067, 19081), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (19079, 19081), False, 'from sklearn.preprocessing import RobustScaler, OneHotEncoder, StandardScaler\n'), ((19162, 19205), 'os.path.join', 'os.path.join', (['datasetdir', '"""rois_scaler.pkl"""'], {}), "(datasetdir, 'rois_scaler.pkl')\n", (19174, 19205), False, 'import os\n'), ((19655, 19673), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (19671, 19673), False, 'from sklearn.linear_model import LinearRegression\n'), ((20233, 20282), 'os.path.join', 'os.path.join', (['datasetdir', '"""rois_residualizer.pkl"""'], {}), "(datasetdir, 'rois_residualizer.pkl')\n", (20245, 20282), False, 'import os\n'), ((21201, 21240), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X_test', 'columns': 'cols'}), '(data=X_test, columns=cols)\n', (21213, 21240), True, 'import pandas as pd\n'), ((21476, 21502), 'numpy.save', 'np.save', (['path_test', 'X_test'], {}), '(path_test, X_test)\n', (21483, 21502), True, 'import numpy as np\n'), ((21839, 21857), 'numpy.load', 'np.load', (['path_test'], {}), '(path_test)\n', (21846, 21857), True, 'import numpy as np\n'), ((25750, 25798), 'pandas.read_csv', 'pd.read_csv', (["files['clinical_surface']"], {'sep': '"""\t"""'}), "(files['clinical_surface'], sep='\\t')\n", (25761, 25798), True, 'import pandas as pd\n'), ((27798, 27813), 'neurocombat_sklearn.CombatModel', 'fortin_combat', ([], {}), '()\n', (27811, 27813), True, 'from neurocombat_sklearn import CombatModel as fortin_combat\n'), ((29206, 29220), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (29218, 29220), False, 'from sklearn.preprocessing import RobustScaler, OneHotEncoder, StandardScaler\n'), ((29761, 29779), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (29777, 29779), False, 'from sklearn.linear_model import LinearRegression\n'), ((36075, 36152), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'data_train'], {'test_size': 'test_size', 'random_state': 'seed'}), '(X_train, data_train, test_size=test_size, random_state=seed)\n', (36091, 36152), False, 'from sklearn.model_selection import train_test_split\n'), ((36751, 36765), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (36763, 36765), False, 'from sklearn.preprocessing import RobustScaler, OneHotEncoder, StandardScaler\n'), ((36846, 36892), 'os.path.join', 'os.path.join', (['datasetdir', '"""genetic_scaler.pkl"""'], {}), "(datasetdir, 'genetic_scaler.pkl')\n", (36858, 36892), False, 'import os\n'), ((37464, 37503), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X_test', 'columns': 'cols'}), '(data=X_test, columns=cols)\n', (37476, 37503), True, 'import pandas as pd\n'), ((37739, 37765), 'numpy.save', 'np.save', (['path_test', 'X_test'], {}), '(path_test, X_test)\n', (37746, 37765), True, 'import numpy as np\n'), ((38102, 38120), 'numpy.load', 'np.load', (['path_test'], {}), '(path_test)\n', (38109, 38120), True, 'import numpy as np\n'), ((45406, 45435), 'numpy.savez', 'np.savez', (['path_test'], {}), '(path_test, **X_test)\n', (45414, 45435), True, 'import numpy as np\n'), ((9562, 9584), 'pickle.dump', 'pickle.dump', (['scaler', 'f'], {}), '(scaler, f)\n', (9573, 9584), False, 'import pickle\n'), ((10393, 10425), 'pandas.read_csv', 'pd.read_csv', (['meta_path'], {'sep': '"""\t"""'}), "(meta_path, sep='\\t')\n", (10404, 10425), True, 'import pandas as pd\n'), ((17542, 17557), 'neurocombat_sklearn.CombatModel', 'fortin_combat', ([], {}), '()\n', (17555, 17557), True, 'from neurocombat_sklearn import CombatModel as fortin_combat\n'), ((19271, 19293), 'pickle.dump', 'pickle.dump', (['scaler', 'f'], {}), '(scaler, f)\n', (19282, 19293), False, 'import pickle\n'), ((20348, 20373), 'pickle.dump', 'pickle.dump', (['regressor', 'f'], {}), '(regressor, f)\n', (20359, 20373), False, 'import pickle\n'), ((21661, 21693), 'pandas.read_csv', 'pd.read_csv', (['meta_path'], {'sep': '"""\t"""'}), "(meta_path, sep='\\t')\n", (21672, 21693), True, 'import pandas as pd\n'), ((26394, 26446), 'nibabel.freesurfer.mghformat.load', 'surface_loader', (['data_train[features_list[0]].iloc[0]'], {}), '(data_train[features_list[0]].iloc[0])\n', (26408, 26446), True, 'from nibabel.freesurfer.mghformat import load as surface_loader\n'), ((26708, 26725), 'pandas.isnull', 'pd.isnull', (['[path]'], {}), '([path])\n', (26717, 26725), True, 'import pandas as pd\n'), ((27327, 27344), 'numpy.isnan', 'np.isnan', (['X_train'], {}), '(X_train)\n', (27335, 27344), True, 'import numpy as np\n'), ((28531, 28555), 'pickle.dump', 'pickle.dump', (['adjuster', 'f'], {}), '(adjuster, f)\n', (28542, 28555), False, 'import pickle\n'), ((29506, 29528), 'pickle.dump', 'pickle.dump', (['scaler', 'f'], {}), '(scaler, f)\n', (29517, 29528), False, 'import pickle\n'), ((30604, 30629), 'pickle.dump', 'pickle.dump', (['regressor', 'f'], {}), '(regressor, f)\n', (30615, 30629), False, 'import pickle\n'), ((36958, 36980), 'pickle.dump', 'pickle.dump', (['scaler', 'f'], {}), '(scaler, f)\n', (36969, 36980), False, 'import pickle\n'), ((37924, 37956), 'pandas.read_csv', 'pd.read_csv', (['meta_path'], {'sep': '"""\t"""'}), "(meta_path, sep='\\t')\n", (37935, 37956), True, 'import pandas as pd\n'), ((10618, 10655), 'pandas.read_csv', 'pd.read_csv', (['meta_path_test'], {'sep': '"""\t"""'}), "(meta_path_test, sep='\\t')\n", (10629, 10655), True, 'import pandas as pd\n'), ((18388, 18413), 'pickle.dump', 'pickle.dump', (['adjuster', 'of'], {}), '(adjuster, of)\n', (18399, 18413), False, 'import pickle\n'), ((21886, 21923), 'pandas.read_csv', 'pd.read_csv', (['meta_path_test'], {'sep': '"""\t"""'}), "(meta_path_test, sep='\\t')\n", (21897, 21923), True, 'import pandas as pd\n'), ((27088, 27104), 'numpy.isnan', 'np.isnan', (['X_test'], {}), '(X_test)\n', (27096, 27104), True, 'import numpy as np\n'), ((38149, 38186), 'pandas.read_csv', 'pd.read_csv', (['meta_path_test'], {'sep': '"""\t"""'}), "(meta_path_test, sep='\\t')\n", (38160, 38186), True, 'import pandas as pd\n'), ((19883, 19910), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (19896, 19910), False, 'from sklearn.preprocessing import RobustScaler, OneHotEncoder, StandardScaler\n'), ((29989, 30016), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (30002, 30016), False, 'from sklearn.preprocessing import RobustScaler, OneHotEncoder, StandardScaler\n'), ((20653, 20680), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (20666, 20680), False, 'from sklearn.preprocessing import RobustScaler, OneHotEncoder, StandardScaler\n'), ((26766, 26786), 'nibabel.freesurfer.mghformat.load', 'surface_loader', (['path'], {}), '(path)\n', (26780, 26786), True, 'from nibabel.freesurfer.mghformat import load as surface_loader\n'), ((30922, 30949), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (30935, 30949), False, 'from sklearn.preprocessing import RobustScaler, OneHotEncoder, StandardScaler\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define the network structure of DeepBSDE"""
import numpy as np
import mindspore.common.dtype as mstype
from mindspore import nn
from mindspore import ops as P
from mindspore import Tensor, Parameter
class DeepBSDE(nn.Cell):
"""
The network structure of DeepBSDE.
Args:
cfg: configure settings.
bsde(Cell): equation function
"""
def __init__(self, cfg, bsde):
super(DeepBSDE, self).__init__()
self.bsde = bsde
self.delta_t = bsde.delta_t
self.num_time_interval = bsde.num_time_interval
self.dim = bsde.dim
self.time_stamp = Tensor(np.arange(0, cfg.num_time_interval) * bsde.delta_t)
self.y_init = Parameter(np.random.uniform(low=cfg.y_init_range[0],
high=cfg.y_init_range[1],
size=[1]).astype(np.float32))
self.z_init = Parameter(np.random.uniform(low=-0.1, high=0.1, size=[1, cfg.dim]).astype(np.float32))
self.subnet = nn.CellList([FeedForwardSubNet(cfg.dim, cfg.num_hiddens)
for _ in range(bsde.num_time_interval-1)])
self.generator = bsde.generator
self.matmul = P.MatMul()
self.sum = P.ReduceSum(keep_dims=True)
def construct(self, dw, x):
"""repeat FeedForwardSubNet (num_time_interval - 1) times."""
all_one_vec = P.Ones()((P.shape(dw)[0], 1), mstype.float32)
y = all_one_vec * self.y_init
z = self.matmul(all_one_vec, self.z_init)
for t in range(0, self.num_time_interval - 1):
y = y - self.delta_t * (self.generator(self.time_stamp[t], x[:, :, t], y, z)) + self.sum(z * dw[:, :, t], 1)
z = self.subnet[t](x[:, :, t + 1]) / self.dim
# terminal time
y = y - self.delta_t * self.generator(self.time_stamp[-1], x[:, :, -2], y, z) + self.sum(z * dw[:, :, -1], 1)
return y
class FeedForwardSubNet(nn.Cell):
"""
Subnet to fit the spatial gradients at time t=tn
Args:
dim (int): dimension of the final output
train (bool): True for train
num_hidden list(int): number of hidden layers
"""
def __init__(self, dim, num_hiddens):
super(FeedForwardSubNet, self).__init__()
self.dim = dim
self.num_hiddens = num_hiddens
bn_layers = [nn.BatchNorm1d(c, momentum=0.99, eps=1e-6, beta_init='normal', gamma_init='uniform')
for c in [dim] + num_hiddens + [dim]]
self.bns = nn.CellList(bn_layers)
dense_layers = [nn.Dense(dim, num_hiddens[0], has_bias=False, activation=None)]
dense_layers = dense_layers + [nn.Dense(num_hiddens[i], num_hiddens[i + 1], has_bias=False, activation=None)
for i in range(len(num_hiddens) - 1)]
# final output should be gradient of size dim
dense_layers.append(nn.Dense(num_hiddens[-1], dim, activation=None))
self.denses = nn.CellList(dense_layers)
self.relu = nn.ReLU()
def construct(self, x):
"""structure: bn -> (dense -> bn -> relu) * len(num_hiddens) -> dense -> bn"""
x = self.bns[0](x)
hiddens_length = len(self.num_hiddens)
for i in range(hiddens_length):
x = self.denses[i](x)
x = self.bns[i+1](x)
x = self.relu(x)
x = self.denses[hiddens_length](x)
x = self.bns[hiddens_length + 1](x)
return x
class WithLossCell(nn.Cell):
"""Loss function for DeepBSDE"""
def __init__(self, net):
super(WithLossCell, self).__init__()
self.net = net
self.terminal_condition = net.bsde.terminal_condition
self.total_time = net.bsde.total_time
self.sum = P.ReduceSum()
self.delta_clip = 50.0
self.selete = P.Select()
def construct(self, dw, x):
y_terminal = self.net(dw, x)
delta = y_terminal - self.terminal_condition(self.total_time, x[:, :, -1])
# use linear approximation outside the clipped range
abs_delta = P.Abs()(delta)
loss = self.sum(self.selete(abs_delta < self.delta_clip,
P.Square()(delta),
2 * self.delta_clip * abs_delta - self.delta_clip * self.delta_clip))
return loss
| [
"mindspore.nn.CellList",
"mindspore.nn.Dense",
"mindspore.ops.Abs",
"numpy.arange",
"mindspore.ops.Select",
"mindspore.ops.ReduceSum",
"mindspore.ops.Square",
"numpy.random.uniform",
"mindspore.nn.ReLU",
"mindspore.ops.Ones",
"mindspore.ops.MatMul",
"mindspore.ops.shape",
"mindspore.nn.Batch... | [((1899, 1909), 'mindspore.ops.MatMul', 'P.MatMul', ([], {}), '()\n', (1907, 1909), True, 'from mindspore import ops as P\n'), ((1929, 1956), 'mindspore.ops.ReduceSum', 'P.ReduceSum', ([], {'keep_dims': '(True)'}), '(keep_dims=True)\n', (1940, 1956), True, 'from mindspore import ops as P\n'), ((3204, 3226), 'mindspore.nn.CellList', 'nn.CellList', (['bn_layers'], {}), '(bn_layers)\n', (3215, 3226), False, 'from mindspore import nn\n'), ((3662, 3687), 'mindspore.nn.CellList', 'nn.CellList', (['dense_layers'], {}), '(dense_layers)\n', (3673, 3687), False, 'from mindspore import nn\n'), ((3708, 3717), 'mindspore.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3715, 3717), False, 'from mindspore import nn\n'), ((4440, 4453), 'mindspore.ops.ReduceSum', 'P.ReduceSum', ([], {}), '()\n', (4451, 4453), True, 'from mindspore import ops as P\n'), ((4507, 4517), 'mindspore.ops.Select', 'P.Select', ([], {}), '()\n', (4515, 4517), True, 'from mindspore import ops as P\n'), ((2082, 2090), 'mindspore.ops.Ones', 'P.Ones', ([], {}), '()\n', (2088, 2090), True, 'from mindspore import ops as P\n'), ((3041, 3131), 'mindspore.nn.BatchNorm1d', 'nn.BatchNorm1d', (['c'], {'momentum': '(0.99)', 'eps': '(1e-06)', 'beta_init': '"""normal"""', 'gamma_init': '"""uniform"""'}), "(c, momentum=0.99, eps=1e-06, beta_init='normal', gamma_init=\n 'uniform')\n", (3055, 3131), False, 'from mindspore import nn\n'), ((3251, 3313), 'mindspore.nn.Dense', 'nn.Dense', (['dim', 'num_hiddens[0]'], {'has_bias': '(False)', 'activation': 'None'}), '(dim, num_hiddens[0], has_bias=False, activation=None)\n', (3259, 3313), False, 'from mindspore import nn\n'), ((3591, 3638), 'mindspore.nn.Dense', 'nn.Dense', (['num_hiddens[-1]', 'dim'], {'activation': 'None'}), '(num_hiddens[-1], dim, activation=None)\n', (3599, 3638), False, 'from mindspore import nn\n'), ((4752, 4759), 'mindspore.ops.Abs', 'P.Abs', ([], {}), '()\n', (4757, 4759), True, 'from mindspore import ops as P\n'), ((1287, 1322), 'numpy.arange', 'np.arange', (['(0)', 'cfg.num_time_interval'], {}), '(0, cfg.num_time_interval)\n', (1296, 1322), True, 'import numpy as np\n'), ((3354, 3431), 'mindspore.nn.Dense', 'nn.Dense', (['num_hiddens[i]', 'num_hiddens[i + 1]'], {'has_bias': '(False)', 'activation': 'None'}), '(num_hiddens[i], num_hiddens[i + 1], has_bias=False, activation=None)\n', (3362, 3431), False, 'from mindspore import nn\n'), ((1371, 1449), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'cfg.y_init_range[0]', 'high': 'cfg.y_init_range[1]', 'size': '[1]'}), '(low=cfg.y_init_range[0], high=cfg.y_init_range[1], size=[1])\n', (1388, 1449), True, 'import numpy as np\n'), ((1602, 1658), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.1)', 'high': '(0.1)', 'size': '[1, cfg.dim]'}), '(low=-0.1, high=0.1, size=[1, cfg.dim])\n', (1619, 1658), True, 'import numpy as np\n'), ((2092, 2103), 'mindspore.ops.shape', 'P.shape', (['dw'], {}), '(dw)\n', (2099, 2103), True, 'from mindspore import ops as P\n'), ((4868, 4878), 'mindspore.ops.Square', 'P.Square', ([], {}), '()\n', (4876, 4878), True, 'from mindspore import ops as P\n')] |
import os
import sys
import numpy as np
def get_malware_dataset(valid=False):
def get_monthly_data(file_path, num_feature=483):
'''Each row of `x_mat` is a datapoint.
It adds a constant one for another dimension at the end for the bias term.
Returns:
two numpy arrays, one for input of size (num_data, num_feature) and another for the output
(num_data,).
'''
x_mat = []
y_vec = []
with open(file_path, 'r') as datafile:
for line in datafile:
feature = [0] * num_feature
feature[-1] = 1 # bias term
items = line.split()
y = float(items[0])
for item in items[1:]:
k, v = item.split(':')
feature[int(k)] = int(v)
y_vec.append(y)
x_mat.append(feature)
return np.array(x_mat), np.array(y_vec)
def log_odds_vec(p):
# convert a probability to log-odds. Feel free to ignore the "divide by
# zero" warning since we deal with it manually. The extreme values are
# determined by looking at the histogram of the first-month data such
# that they do not deviate too far from the others
ind_0 = np.where(p == 0)[0]
ind_1 = np.where(p == 1)[0]
logodds = np.log(p) - np.log(1.-p)
logodds[ind_0] = -5
logodds[ind_1] = 4
return logodds
def read_data(valid, folder_name='./malware-dataset/'):
if valid:
# first month
xs, ys = get_monthly_data('./malware-dataset/2010-11.txt')
else:
file_names = ['2010-12.txt', '2011-01.txt', '2011-02.txt', '2011-03.txt', '2011-04.txt', '2011-05.txt', '2011-06.txt', '2011-07.txt', '2011-08.txt', '2011-09.txt', '2011-10.txt', '2011-11.txt', '2011-12.txt', '2012-01.txt', '2012-02.txt', '2012-03.txt', '2012-04.txt', '2012-05.txt', '2012-06.txt', '2012-07.txt', '2012-08.txt', '2012-09.txt', '2012-10.txt', '2012-11.txt', '2012-12.txt', '2013-01.txt', '2013-02.txt', '2013-03.txt', '2013-04.txt', '2013-05.txt', '2013-06.txt', '2013-07.txt', '2013-08.txt', '2013-09.txt', '2013-10.txt', '2013-11.txt', '2013-12.txt', '2014-01.txt', '2014-02.txt', '2014-03.txt', '2014-04.txt', '2014-05.txt', '2014-06.txt', '2014-07.txt'] # exclude '2010-11.txt'
xs, ys = [], []
for f in file_names:
x_mat, y_vec = get_monthly_data('./malware-dataset/' + f)
xs.append(x_mat)
ys.append(y_vec)
xs = np.concatenate(xs, axis=0)
ys = np.concatenate(ys)
x_train_set, y_train_set = xs[:-1], ys[:-1]
x_test_set, y_test_set = xs[1:], ys[1:]
y_train_set = log_odds_vec(y_train_set)
print('data size:', len(xs))
return x_train_set, y_train_set, x_test_set, y_test_set
return read_data(valid)
def get_elec2_dataset(valid=False):
def get_all_data(file_path='./electricity-price-dataset/electricity-normalized.csv',
num_feature=15):
''' 15 features in total:
- The first seven features are indicator of week days;
- The eighth feature is time
- The ninth feature is date
- The remaining five features: NSWprice, NSWdemand, VICprice, VICdemand, transfer
- The bias
'''
X, y, _y = [], [], []
with open(file_path, 'r') as datafile:
header = datafile.readline()
for line in datafile.readlines():
feature = [0] * num_feature
feature[-1] = 1 # bias term
items = line.split(',')
feature[int(items[1])-1] = 1 # day
feature[7] = float(items[2]) # time
feature[8] = float(items[0]) # date
fid = 9 # others
for item in items[3:-1]:
feature[fid] = float(item)
fid += 1
X.append(feature)
# y.append(float(items[3])) # target
# print(np.mean(y[-49:-1])<y[-1], items[-1])
_y.append(float(items[3]))
y_prob = np.sum(np.array(_y[-49:-1]) < _y[-1]) / len(_y[-49:-1])
y.append(y_prob)
# print(y_prob, items[-1])
# make it predict the future
X = X[49:]
y = y[49:]
num_instance = len(X)
print(f'Number of samples: {num_instance}')
return np.array(X), np.array(y)
def log_odds_vec(p):
# convert a probability to log-odds. Feel free to ignore the "divide by
# zero" warning since we deal with it manually. The extreme values are
# determined by looking at the histogram of the first-month data such
# that they do not deviate too far from the others
ind_0 = np.where(p == 0)[0]
ind_1 = np.where(p == 1)[0]
logodds = np.log(p) - np.log(1.-p)
logodds[ind_0] = -4
logodds[ind_1] = 4
return logodds
X, y = get_all_data()
log_odds = log_odds_vec(y)
val_size = 4000
if valid:
X = X[:val_size]
y = y[:val_size]
log_odds = log_odds[:val_size]
else:
X = X[val_size:]
y = y[val_size:]
log_odds = log_odds[val_size:]
x_train_set, y_train_set, x_test_set, y_test_set = X[:-1], log_odds[:-1], X[1:], y[1:]
return x_train_set, y_train_set, x_test_set, y_test_set
def get_sensordrift_dataset(valid=False):
def get_batch_data(file_path, num_feature=129):
'''`gas_class` - dict; args in {1,...,6}
`gas_class[i]` - dict; args in {'X', 'y'}
`gas_class[i][j]` - list
e.g., gas_class[2]['X']
'''
gas_class = {}
for i in range(1, 7):
gas_class[i] = {}
gas_class[i]['X'] = []
gas_class[i]['y'] = []
with open(file_path, 'r') as datafile:
for line in datafile:
feature = [0] * num_feature
feature[-1] = 1 # bias term
class_items = line.split(';')
X = gas_class[int(class_items[0])]['X']
y = gas_class[int(class_items[0])]['y']
items = class_items[1].strip().split()
y.append(float(items[0])) # concentration
for item in items[1:]:
k, v = item.split(':')
feature[int(k)-1] = float(v)
X.append(np.array(feature))
# summary
print(file_path)
for i in range(1, 7):
assert len(gas_class[i]['X']) == len(gas_class[i]['y'])
num_instance = len(gas_class[i]['X'])
print(f'class{i}: {num_instance} samples')
return gas_class
class_id = 2
gas_class = get_batch_data('./sensor-drift-dataset/batch1.dat') # validation
X, y = gas_class[class_id]['X'], gas_class[class_id]['y']
mu_x = np.mean(X, axis=0, keepdims=True)
mu_x[0, -1] = 0
scale_x = np.std(X, axis=0, keepdims=True)
scale_x[0, -1] = 1
mu_y = np.mean(y)
scale_y = np.std(y)
def scaling_x(x):
return (x-mu_x)/scale_x
def scaling_y(y):
return (y-mu_y)/scale_y
if valid:
X = scaling_x(X)
y = scaling_y(y)
else:
file_names = ['batch2.dat',
'batch3.dat',
'batch4.dat',
'batch5.dat',
'batch6.dat',
'batch7.dat',
'batch8.dat',
'batch9.dat',
'batch10.dat']
X, y = [], []
for file_name in file_names:
gas_class = get_batch_data('./sensor-drift-dataset/'+file_name)
X.append(gas_class[class_id]['X'])
y.append(gas_class[class_id]['y'])
X = np.concatenate(X, axis=0)
y = np.concatenate(y, axis=0)
X = scaling_x(X)
y = scaling_y(y)
x_train_set, y_train_set, x_test_set, y_test_set = X[:-1], y[:-1], X[1:], y[1:]
return x_train_set, y_train_set, x_test_set, y_test_set | [
"numpy.mean",
"numpy.where",
"numpy.log",
"numpy.array",
"numpy.concatenate",
"numpy.std"
] | [((7147, 7180), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)', 'keepdims': '(True)'}), '(X, axis=0, keepdims=True)\n', (7154, 7180), True, 'import numpy as np\n'), ((7215, 7247), 'numpy.std', 'np.std', (['X'], {'axis': '(0)', 'keepdims': '(True)'}), '(X, axis=0, keepdims=True)\n', (7221, 7247), True, 'import numpy as np\n'), ((7282, 7292), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (7289, 7292), True, 'import numpy as np\n'), ((7307, 7316), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (7313, 7316), True, 'import numpy as np\n'), ((8090, 8115), 'numpy.concatenate', 'np.concatenate', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (8104, 8115), True, 'import numpy as np\n'), ((8128, 8153), 'numpy.concatenate', 'np.concatenate', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (8142, 8153), True, 'import numpy as np\n'), ((916, 931), 'numpy.array', 'np.array', (['x_mat'], {}), '(x_mat)\n', (924, 931), True, 'import numpy as np\n'), ((933, 948), 'numpy.array', 'np.array', (['y_vec'], {}), '(y_vec)\n', (941, 948), True, 'import numpy as np\n'), ((1287, 1303), 'numpy.where', 'np.where', (['(p == 0)'], {}), '(p == 0)\n', (1295, 1303), True, 'import numpy as np\n'), ((1323, 1339), 'numpy.where', 'np.where', (['(p == 1)'], {}), '(p == 1)\n', (1331, 1339), True, 'import numpy as np\n'), ((1361, 1370), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (1367, 1370), True, 'import numpy as np\n'), ((1373, 1388), 'numpy.log', 'np.log', (['(1.0 - p)'], {}), '(1.0 - p)\n', (1379, 1388), True, 'import numpy as np\n'), ((2583, 2609), 'numpy.concatenate', 'np.concatenate', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (2597, 2609), True, 'import numpy as np\n'), ((2627, 2645), 'numpy.concatenate', 'np.concatenate', (['ys'], {}), '(ys)\n', (2641, 2645), True, 'import numpy as np\n'), ((4603, 4614), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (4611, 4614), True, 'import numpy as np\n'), ((4616, 4627), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4624, 4627), True, 'import numpy as np\n'), ((4966, 4982), 'numpy.where', 'np.where', (['(p == 0)'], {}), '(p == 0)\n', (4974, 4982), True, 'import numpy as np\n'), ((5002, 5018), 'numpy.where', 'np.where', (['(p == 1)'], {}), '(p == 1)\n', (5010, 5018), True, 'import numpy as np\n'), ((5040, 5049), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (5046, 5049), True, 'import numpy as np\n'), ((5052, 5067), 'numpy.log', 'np.log', (['(1.0 - p)'], {}), '(1.0 - p)\n', (5058, 5067), True, 'import numpy as np\n'), ((6654, 6671), 'numpy.array', 'np.array', (['feature'], {}), '(feature)\n', (6662, 6671), True, 'import numpy as np\n'), ((4276, 4296), 'numpy.array', 'np.array', (['_y[-49:-1]'], {}), '(_y[-49:-1])\n', (4284, 4296), True, 'import numpy as np\n')] |
import argparse
import logging
import os
import random
import math
import ransac.core as ransac
from ransac.models.conic_section import ConicSection
import cv2
import numpy as np
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s [%(levelname)s] %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument('--imageFilepath', help="The filepath to the image. Default: './images/bonsai.png'", default='./images/bonsai.png')
parser.add_argument('--randomSeed', help="The seed for the random module. Default: 0", type=int, default=0)
parser.add_argument('--outputDirectory', help="The output directory. Default: './outputs/'", default='./outputs/')
parser.add_argument('--blurringKernelSize', help="The size of the blurring kernel. Default: 9", type=int, default=9)
parser.add_argument('--cannyThreshold1', help="The 1st Canny threshold. Default: 30", type=int, default=30)
parser.add_argument('--cannyThreshold2', help="The 2nd Canny threshold. Default: 150", type=int, default=150)
parser.add_argument('--ransacMaximumNumberOfPoints', help="The maximum number of points used by the RANSAC algorithm. Default: 400", type=int, default=400)
parser.add_argument('--ransacNumberOfTrials', help="The number of RANSAC trials. Default: 1000", type=int, default=1000)
parser.add_argument('--ransacAcceptableError', help="The acceptable error for the RANSAC algorithm. Default: 10", type=float, default=10)
args = parser.parse_args()
random.seed(args.randomSeed)
def main():
logging.info("fit_bonsai_pot_ellipse.py main()")
if not os.path.exists(args.outputDirectory):
os.makedirs(args.outputDirectory)
original_img = cv2.imread(args.imageFilepath, cv2.IMREAD_COLOR)
# Convert to grayscale
grayscale_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
cv2.imwrite(os.path.join(args.outputDirectory, "fitBonsaiPot_main_grayscale.png"), grayscale_img)
# Blurring
blurred_img = cv2.blur(grayscale_img, (args.blurringKernelSize, args.blurringKernelSize))
cv2.imwrite(os.path.join(args.outputDirectory, "fitBonsaiPot_main_blurred.png"), blurred_img)
# Canny edge detector
canny_img = cv2.Canny(blurred_img, args.cannyThreshold1, args.cannyThreshold2)
cv2.imwrite(os.path.join(args.outputDirectory, "fitBonsaiPot_main_canny.png"), canny_img)
green_dominated_inverse_map = GreenDominatedInverseMap(original_img)
cv2.imwrite(os.path.join(args.outputDirectory, "fitBonsaiPot_main_greenDominatedInverseMap.png"), green_dominated_inverse_map)
masked_canny_img = cv2.min(canny_img, green_dominated_inverse_map)
cv2.imwrite(os.path.join(args.outputDirectory, "fitBonsaiPot_main_maskedCanny.png"), masked_canny_img)
# List the points
xy_tuples = []
for y in range(masked_canny_img.shape[0]):
for x in range(masked_canny_img.shape[1]):
if masked_canny_img[y, x] > 0:
xy_tuples.append(((x, y), 0))
xy_tuples = random.sample(xy_tuples, min(len(xy_tuples), args.ransacMaximumNumberOfPoints))
modeller = ransac.Modeler(ConicSection, number_of_trials=args.ransacNumberOfTrials, acceptable_error=args.ransacAcceptableError)
logging.info("Calling modeller.ConsensusModel(xy_tuples)...")
consensus_conic_section, inliers, outliers = modeller.ConsensusModel(xy_tuples)
logging.info("Done!")
annotated_img = original_img.copy()
ellipse_points = consensus_conic_section.EllipsePoints()
for ellipse_pt_ndx in range(0, len(ellipse_points) - 1):
p1 = ellipse_points[ellipse_pt_ndx]
p2 = ellipse_points[ellipse_pt_ndx + 1]
cv2.line(annotated_img, p1, p2, (255, 0, 0), thickness=3)
cv2.line(annotated_img, ellipse_points[0], ellipse_points[-1], (255, 0, 0), thickness=3)
for ((x, y), d) in inliers:
cv2.circle(annotated_img, (x, y), 2, (0, 255, 0))
for ((x, y), d) in outliers:
cv2.circle(annotated_img, (x, y), 2, (0, 0, 255))
cv2.imwrite(os.path.join(args.outputDirectory, "fitBonsaiPot_main_annotated.png"), annotated_img)
# Ellipse parameters
center, a, b, theta = consensus_conic_section.EllipseParameters()
logging.debug("center = {}".format(center))
logging.debug("a = {}".format(a))
logging.debug("b = {}".format(b))
logging.debug("theta = {}".format(theta))
def GreenDominatedInverseMap(image):
img_sizeHWC = image.shape
green_dominated_map = np.zeros((img_sizeHWC[0], img_sizeHWC[1]), dtype=np.uint8)
for y in range(img_sizeHWC[0]):
for x in range(img_sizeHWC[1]):
bgr = image[y, x]
if bgr[1] > bgr[0] and bgr[1] > bgr[2]:
green_dominated_map[y, x] = 255
cv2.imwrite(os.path.join(args.outputDirectory, "fitBonsaiPot_GreenDominatedInverseMap_greenDominatedMap.png"), green_dominated_map)
return 255 - green_dominated_map
if __name__ == '__main__':
main() | [
"logging.basicConfig",
"cv2.min",
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"cv2.line",
"os.path.join",
"logging.info",
"random.seed",
"ransac.core.Modeler",
"numpy.zeros",
"cv2.circle",
"cv2.cvtColor",
"cv2.Canny",
"cv2.imread",
"cv2.blur"
] | [((180, 278), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)-15s [%(levelname)s] %(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)-15s [%(levelname)s] %(message)s')\n", (199, 278), False, 'import logging\n'), ((284, 309), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (307, 309), False, 'import argparse\n'), ((1447, 1475), 'random.seed', 'random.seed', (['args.randomSeed'], {}), '(args.randomSeed)\n', (1458, 1475), False, 'import random\n'), ((1493, 1541), 'logging.info', 'logging.info', (['"""fit_bonsai_pot_ellipse.py main()"""'], {}), "('fit_bonsai_pot_ellipse.py main()')\n", (1505, 1541), False, 'import logging\n'), ((1654, 1702), 'cv2.imread', 'cv2.imread', (['args.imageFilepath', 'cv2.IMREAD_COLOR'], {}), '(args.imageFilepath, cv2.IMREAD_COLOR)\n', (1664, 1702), False, 'import cv2\n'), ((1751, 1797), 'cv2.cvtColor', 'cv2.cvtColor', (['original_img', 'cv2.COLOR_BGR2GRAY'], {}), '(original_img, cv2.COLOR_BGR2GRAY)\n', (1763, 1797), False, 'import cv2\n'), ((1934, 2009), 'cv2.blur', 'cv2.blur', (['grayscale_img', '(args.blurringKernelSize, args.blurringKernelSize)'], {}), '(grayscale_img, (args.blurringKernelSize, args.blurringKernelSize))\n', (1942, 2009), False, 'import cv2\n'), ((2151, 2217), 'cv2.Canny', 'cv2.Canny', (['blurred_img', 'args.cannyThreshold1', 'args.cannyThreshold2'], {}), '(blurred_img, args.cannyThreshold1, args.cannyThreshold2)\n', (2160, 2217), False, 'import cv2\n'), ((2541, 2588), 'cv2.min', 'cv2.min', (['canny_img', 'green_dominated_inverse_map'], {}), '(canny_img, green_dominated_inverse_map)\n', (2548, 2588), False, 'import cv2\n'), ((3036, 3157), 'ransac.core.Modeler', 'ransac.Modeler', (['ConicSection'], {'number_of_trials': 'args.ransacNumberOfTrials', 'acceptable_error': 'args.ransacAcceptableError'}), '(ConicSection, number_of_trials=args.ransacNumberOfTrials,\n acceptable_error=args.ransacAcceptableError)\n', (3050, 3157), True, 'import ransac.core as ransac\n'), ((3158, 3219), 'logging.info', 'logging.info', (['"""Calling modeller.ConsensusModel(xy_tuples)..."""'], {}), "('Calling modeller.ConsensusModel(xy_tuples)...')\n", (3170, 3219), False, 'import logging\n'), ((3308, 3329), 'logging.info', 'logging.info', (['"""Done!"""'], {}), "('Done!')\n", (3320, 3329), False, 'import logging\n'), ((3655, 3747), 'cv2.line', 'cv2.line', (['annotated_img', 'ellipse_points[0]', 'ellipse_points[-1]', '(255, 0, 0)'], {'thickness': '(3)'}), '(annotated_img, ellipse_points[0], ellipse_points[-1], (255, 0, 0),\n thickness=3)\n', (3663, 3747), False, 'import cv2\n'), ((4389, 4447), 'numpy.zeros', 'np.zeros', (['(img_sizeHWC[0], img_sizeHWC[1])'], {'dtype': 'np.uint8'}), '((img_sizeHWC[0], img_sizeHWC[1]), dtype=np.uint8)\n', (4397, 4447), True, 'import numpy as np\n'), ((1554, 1590), 'os.path.exists', 'os.path.exists', (['args.outputDirectory'], {}), '(args.outputDirectory)\n', (1568, 1590), False, 'import os\n'), ((1600, 1633), 'os.makedirs', 'os.makedirs', (['args.outputDirectory'], {}), '(args.outputDirectory)\n', (1611, 1633), False, 'import os\n'), ((1814, 1883), 'os.path.join', 'os.path.join', (['args.outputDirectory', '"""fitBonsaiPot_main_grayscale.png"""'], {}), "(args.outputDirectory, 'fitBonsaiPot_main_grayscale.png')\n", (1826, 1883), False, 'import os\n'), ((2026, 2093), 'os.path.join', 'os.path.join', (['args.outputDirectory', '"""fitBonsaiPot_main_blurred.png"""'], {}), "(args.outputDirectory, 'fitBonsaiPot_main_blurred.png')\n", (2038, 2093), False, 'import os\n'), ((2234, 2299), 'os.path.join', 'os.path.join', (['args.outputDirectory', '"""fitBonsaiPot_main_canny.png"""'], {}), "(args.outputDirectory, 'fitBonsaiPot_main_canny.png')\n", (2246, 2299), False, 'import os\n'), ((2402, 2490), 'os.path.join', 'os.path.join', (['args.outputDirectory', '"""fitBonsaiPot_main_greenDominatedInverseMap.png"""'], {}), "(args.outputDirectory,\n 'fitBonsaiPot_main_greenDominatedInverseMap.png')\n", (2414, 2490), False, 'import os\n'), ((2605, 2676), 'os.path.join', 'os.path.join', (['args.outputDirectory', '"""fitBonsaiPot_main_maskedCanny.png"""'], {}), "(args.outputDirectory, 'fitBonsaiPot_main_maskedCanny.png')\n", (2617, 2676), False, 'import os\n'), ((3593, 3650), 'cv2.line', 'cv2.line', (['annotated_img', 'p1', 'p2', '(255, 0, 0)'], {'thickness': '(3)'}), '(annotated_img, p1, p2, (255, 0, 0), thickness=3)\n', (3601, 3650), False, 'import cv2\n'), ((3785, 3834), 'cv2.circle', 'cv2.circle', (['annotated_img', '(x, y)', '(2)', '(0, 255, 0)'], {}), '(annotated_img, (x, y), 2, (0, 255, 0))\n', (3795, 3834), False, 'import cv2\n'), ((3876, 3925), 'cv2.circle', 'cv2.circle', (['annotated_img', '(x, y)', '(2)', '(0, 0, 255)'], {}), '(annotated_img, (x, y), 2, (0, 0, 255))\n', (3886, 3925), False, 'import cv2\n'), ((3943, 4012), 'os.path.join', 'os.path.join', (['args.outputDirectory', '"""fitBonsaiPot_main_annotated.png"""'], {}), "(args.outputDirectory, 'fitBonsaiPot_main_annotated.png')\n", (3955, 4012), False, 'import os\n'), ((4670, 4771), 'os.path.join', 'os.path.join', (['args.outputDirectory', '"""fitBonsaiPot_GreenDominatedInverseMap_greenDominatedMap.png"""'], {}), "(args.outputDirectory,\n 'fitBonsaiPot_GreenDominatedInverseMap_greenDominatedMap.png')\n", (4682, 4771), False, 'import os\n')] |
"""
process_abundances
Author: <NAME>
Notes: script and functions to process stellar abundane output from a
simulation where chemical tags of stars are written to stdout. This
loops through all files that could contain this info in the given
directory (or files that match chosen string) and collates the
abundance information into a single file. Stars are identified
their particle IDs, which can be readily tagged with data output
info from Enzo.
In order to work, this MUST have a data output associated with the
simulation, in order to read in species names for what is followed
in the run.
"""
import numpy as np
import glob
import yt
import subprocess
from galaxy_analysis.utilities.utilities import species_from_fields
_base_col_names = ["grid_id","pid","ptype","x","y","z","particle_mass","birth_mass","metallicity"]
_base_dtypes = [int, int, float, float, float, float]
def filter_data(data):
"""
Filter data, returning only the non-repeating values
"""
return
def _read_sf_data(directory = '.'):
bash_commands = ["grep --no-filename -e '^P(' ./enzo_job*.out > SA_temp.dat",
'sed -e "s/P(//g" -i SA_temp.dat',
'sed -e "s/individual_star_maker//g" -i SA_temp.dat',
'sed -e "s/ \[add\]\://g" -i SA_temp.dat',
'sed -i "/CFRF/d" SA_temp.dat',
'sed -e "s/new star particles//g" -i SA_temp.dat',
"awk " + "'{$1=" + '""; print $0}' + "' SA_temp.dat > sf.dat",
"rm SA_temp.dat"]
for bc in bash_commands:
subprocess.call(bc, shell=True)
data = np.genfromtxt('sf.dat')
print(np.sum(data[:]))
return
def read_all_data(directory = '.'):
# First lets do some hacky BS
bash_commands = ["grep --no-filename -e '^StellarAbundances P' ./enzo_job*.out > SA_temp.dat",
'sed -e "s/StellarAbundances P(//g" -i SA_temp.dat',
"awk " + "'{$1=" + '""; print $0}' + "' SA_temp.dat > StellarAbundances.dat",
"rm SA_temp.dat"]
for bc in bash_commands:
subprocess.call(bc, shell=True)
try:
# need simulation parameter file to get column names
ds_names = glob.glob(directory + "/DD????/DD????")
ds = yt.load(ds_names[-1])
species = ['H','He'] + species_from_fields(ds.field_list)
except:
species = ['H','He','C','N','O','K','Fe','Zn','Sr','Ba','AGB','PopIII','SNIa','SNII']
_col_names = _base_col_names + species
_dtypes = _base_dtypes + [float] * len(species)
_ndtypes = [(x,y) for x,y in zip(_col_names,_dtypes)]
data = np.genfromtxt(directory +'/StellarAbundances.dat', dtype = _ndtypes, invalid_raise=False)
return data
if __name__ == "__main__":
_read_sf_data()
read_all_data()
| [
"galaxy_analysis.utilities.utilities.species_from_fields",
"numpy.sum",
"yt.load",
"subprocess.call",
"numpy.genfromtxt",
"glob.glob"
] | [((1751, 1774), 'numpy.genfromtxt', 'np.genfromtxt', (['"""sf.dat"""'], {}), "('sf.dat')\n", (1764, 1774), True, 'import numpy as np\n'), ((2772, 2864), 'numpy.genfromtxt', 'np.genfromtxt', (["(directory + '/StellarAbundances.dat')"], {'dtype': '_ndtypes', 'invalid_raise': '(False)'}), "(directory + '/StellarAbundances.dat', dtype=_ndtypes,\n invalid_raise=False)\n", (2785, 2864), True, 'import numpy as np\n'), ((1707, 1738), 'subprocess.call', 'subprocess.call', (['bc'], {'shell': '(True)'}), '(bc, shell=True)\n', (1722, 1738), False, 'import subprocess\n'), ((1786, 1801), 'numpy.sum', 'np.sum', (['data[:]'], {}), '(data[:])\n', (1792, 1801), True, 'import numpy as np\n'), ((2237, 2268), 'subprocess.call', 'subprocess.call', (['bc'], {'shell': '(True)'}), '(bc, shell=True)\n', (2252, 2268), False, 'import subprocess\n'), ((2359, 2398), 'glob.glob', 'glob.glob', (["(directory + '/DD????/DD????')"], {}), "(directory + '/DD????/DD????')\n", (2368, 2398), False, 'import glob\n'), ((2412, 2433), 'yt.load', 'yt.load', (['ds_names[-1]'], {}), '(ds_names[-1])\n', (2419, 2433), False, 'import yt\n'), ((2465, 2499), 'galaxy_analysis.utilities.utilities.species_from_fields', 'species_from_fields', (['ds.field_list'], {}), '(ds.field_list)\n', (2484, 2499), False, 'from galaxy_analysis.utilities.utilities import species_from_fields\n')] |
# -*- coding: utf-8 -*-
"""
Coil module
Created on Tue Jan 26 08:31:05 2021
@author: <NAME>
"""
from __future__ import annotations
from typing import List
import numpy as np
import os
import matplotlib.pyplot as plt
from ..segment.segment import Segment, Arc, Circle, Line
from ..wire.wire import Wire, WireRect, WireCircular
from ..inductance.inductance import calc_mutual
from ..misc.set_axes_equal import set_axes_equal
from ..misc import geometry as geo
class Coil:
"""General Coil object.
A coil is defined as the combination of a segment array and a wire type.
:param List[segment] segment_array: TODO description or delete
:param function wire: TODO description or delete
:param numpy.ndarray anchor: TODO description or delete
"""
VEC_0 = np.array([0., 0., 0.])
VEC_X = np.array([1., 0., 0.])
VEC_Y = np.array([0., 1., 0.])
VEC_Z = np.array([0., 0., 1.])
def __init__(self, segment_array: List[Segment], wire=Wire(), anchor: np.ndarray = None):
"""the constructor"""
self.segment_array = segment_array
self.wire = wire
self.anchor = self.VEC_0.copy() if anchor is None else anchor.copy()
def from_magpylib(cls, magpy_object, wire=Wire(), anchor: np.ndarray = None):
"""Construct a coil from a collection of magpy sources and a Wire object
.. WARNING::
Not implemented
"""
raise NotImplementedError
def to_magpy(self):
"""Return a list of segments as collection of magpy sources
.. WARNING::
Not implemented
"""
raise NotImplementedError
def _magpy2pycoil(self, magpy_object):
raise NotImplementedError
def _pycoil2magpy(self, coil_array):
raise NotImplementedError
def move_to(self, new_position: np.ndarray) -> Coil:
"""Move the coil to a new position.
:param numpy.ndarray new_position: TODO description or delete"""
translation = new_position - self.anchor
for segment in self.segment_array:
segment.translate(translation)
self.anchor = new_position.copy()
return self
def translate(self, translation: np.ndarray):
"""Translate the coil by a specific translation vector.
:param numpy.ndarray translation: TODO description or delete"""
for segment in self.segment_array:
segment.translate(translation)
self.anchor += translation
return self
def rotate(self, angle: float, axis: np.ndarray = None):
"""Rotate the coil around an axis by a specific angle.
:param float angle: TODO description or delete
:param numpy.ndarray axis: TODO description or delete"""
axis = self.VEC_Z if axis is None else axis
for segment in self.segment_array:
segment.rotate(angle, axis, self.anchor)
return self
def draw(self, draw_current=True, savefig=False):
"""Draw the coil in a 3D plot.
:param bool draw_current: TODO description or delete
:param bool savefig: TODO description or delete"""
fig = plt.figure(figsize=(7.5/2.4, 7.5/2.4), dpi=300,)
ax = fig.add_subplot(111, projection='3d')
for shape in self.segment_array:
shape.draw(ax, draw_current)
set_axes_equal(ax)
ax.set_xlabel("x [mm]")
ax.set_ylabel("y [mm]")
ax.set_zlabel("z [mm]")
if savefig:
i = 0
while True:
path = "Fig_"+str(i)+".png"
if os.path.exists(path):
i += 1
else:
break
plt.savefig(path, dpi=300, transparent=True)
plt.show()
def get_inductance(self):
"""Compute the coil self-inductance.
:returns: self inductance
:rtype: float"""
inductance = 0
n = len(self.segment_array)
# Mutual between segment pairs
for i, segment_i in enumerate(self.segment_array[:-1]):
for j, segment_j in enumerate(self.segment_array[i + 1:]):
res = calc_mutual(segment_i, segment_j)
inductance += 2*res[0]
# Self of segments
for i, segment_i in enumerate(self.segment_array):
res = self.wire.self_inductance(segment_i)
inductance += res
return inductance
class Loop(Coil):
"""Loop class TODO describe it
:param float radius: TODO description or delete
:param numpy.ndarray position: TODO description or delete
:param numpy.ndarray axis: TODO description or delete
:param float angle: TODO description or delete
"""
def __init__(self, radius: float, position: np.ndarray = None, axis: np.ndarray = None, angle: float = 0.,
wire=Wire()):
"""The constructor"""
position = self.VEC_0 if position is None else position
axis = self.VEC_Z if axis is None else axis
circle = Circle.from_rot(radius, position, axis, angle)
super().__init__([circle], wire)
def from_normal(cls, radius: float, position: np.ndarray = None, normal: np.ndarray = None, wire=Wire()):
""" TODO describe methode
:param float radius: TODO description or delete
:param numpy.ndarray position: TODO description or delete
:param numpy.ndarray normal: TODO description or delete
:param function wire: TODO description or delete
:returns: TODO description or delete
:rtype: TODO description or delete
"""
position = cls.VEC_0 if position is None else position
normal = cls.VEC_Y if normal is None else normal
axis, angle = geo.get_rotation(cls.VEC_Z, normal)
return cls(radius, position, axis, angle, wire)
class Solenoid(Coil):
"""Solenoid class TODO describe it
:param float radius: TODO description or delete
:param float length: TODO description or delete
:param int n_turns: TODO description or delete
:param numpy.ndarray position: TODO description or delete
:param numpy.ndarray axis: TODO description or delete
:param float angle: TODO description or delete
:param function wire: TODO description or delete
"""
def __init__(self, radius: float, length: float, n_turns: int,
position: np.ndarray = None, axis: np.ndarray = None, angle: float = 0.,
wire=Wire()):
"""constructor"""
segments = [Circle(radius, np.array([0., 0., z])) for z in np.linspace(-length/2, length/2, n_turns)]
super().__init__(segments, wire)
position = self.VEC_0 if position is None else position
axis = self.VEC_Z if axis is None else axis
self.move_to(position)
self.rotate(axis, angle)
def from_normal(cls, radius, length, n_turns, position, normal, wire=Wire()):
""" TODO describe methode
:param function cls: TODO description or delete
:param float radius: TODO description or delete
:param float length: TODO description or delete
:param int n_turns: TODO description or delete
:param numpy.ndarray position: TODO description or delete
:param numpy.ndarray normal: TODO description or delete
:param function wire: TODO description or delete
:returns: TODO description or delete
:rtype: TODO description or delete
"""
axis, angle = geo.get_rotation(cls.VEC_Z, normal)
return cls(radius, length, n_turns, position, axis, angle, wire)
class Polygon(Coil):
"""Polygon class TODO describe it
:param TYPE polygon: TODO description or delete + type
:param function wire: TODO description or delete
"""
def __init__(self, polygon, wire):
"""the constructor"""
lines = []
for p0, p1 in zip(polygon[:-1], polygon[1:]):
lines.append(Line(p0, p1))
super().__init__(lines, wire)
class Helmholtz(Coil):
"""Helmholtz class TODO describe it
:param float radius: TODO description or delete
:param numpy.ndarray position: TODO description or delete
:param numpy.ndarray axis: TODO description or delete
:param float angle: TODO description or delete
:param function wire: TODO description or delete
"""
def __init__(self, radius: float, position: np.ndarray = None, axis: np.ndarray = None, angle:float = 0.,
wire=Wire()):
segments = [Circle(radius, np.array([0, 0, -radius/2])),
Circle(radius, np.array([0, 0, radius/2]))]
super().__init__(segments, wire)
position = self.VEC_0 if position is None else position
axis = self.VEC_Z if axis is None else axis
self.move_to(position)
self.rotate(axis, angle)
# class Birdcage(Coil):
# def __init__(self,
# radius, length, nwires, position=_vec_0, axis=_vec_z, angle=0,
# wire=Wire() ):
# segments = []
# θ_0 = 2*π/(nwires-1)/2 # Angular position of the first wire
# Θ = np.linspace(θ_0, 2*π-θ_0, nwires) # Vector of angular positions
# # Linear segments
# p0, p1 = _vec_0, np.array([0,0,length] )
# positions = np.array( [radius*cos(Θ), radius*sin(Θ), -length/2 ] )
# currents = cos(Θ) # Current in each segment
# for curr, pos in zip(currents, positions):
# segments.append( segment.Line(p0+pos, p1+pos, curr))
# # Arc segments
# integral_matrix = np.zeros( (nwires, nwires) )
# for i, line in enumerate(integral_matrix.T):
# line[i:] = 1
# currents = integral_matrix @ segments_current
# currents -= np.sum(arcs_currents)
# #arcs_pos # to be implemeted
# #arcs_angle # to be implemented
# magpy_collection = magpy.collection(sources)
# angle, axis = geo.get_rotation(geo.z_vector, normal)
# magpy_collection.rotate(angle*180/π, axis)
# magpy_collection.move(position)
# vmax = norm(magpy_collection.getB(position))*1.2
# super().__init__(magpy_collection, position, vmax)
class MTLR(Coil):
"""MTLR class TODO describe it
:param float inner_radius: TODO description or delete
:param float delta_radius: TODO description or delete
:param float line_width: TODO description or delete
:param int n_turns: TODO description or delete
:param float dielectric_thickness: TODO description or delete
:param numpy.ndarray anchor: TODO description or delete
:param numpy.ndarray axis: TODO description or delete
:param float angle: TODO description or delete
"""
def __init__(self, inner_radius: float, delta_radius: float, line_width: float, n_turns,
dielectric_thickness: float,
anchor: np.ndarray = None, axis: np.ndarray = None, angle: float = 0.):
"""the constructor"""
radii = np.array([inner_radius + n * delta_radius for n in range(n_turns)])
segments = []
for radius in radii:
segments.append(Circle.from_normal(radius))
segments.append(Circle.from_normal(radius, position=np.array([0., 0., -dielectric_thickness])))
wire = WireRect(line_width, )
super().__init__(segments, wire)
if anchor:
self.translate(anchor)
if axis:
self.rotate(angle, axis)
| [
"os.path.exists",
"matplotlib.pyplot.savefig",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.show"
] | [((784, 809), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (792, 809), True, 'import numpy as np\n'), ((819, 844), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (827, 844), True, 'import numpy as np\n'), ((854, 879), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (862, 879), True, 'import numpy as np\n'), ((889, 914), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (897, 914), True, 'import numpy as np\n'), ((3161, 3212), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7.5 / 2.4, 7.5 / 2.4)', 'dpi': '(300)'}), '(figsize=(7.5 / 2.4, 7.5 / 2.4), dpi=300)\n', (3171, 3212), True, 'import matplotlib.pyplot as plt\n'), ((3772, 3782), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3780, 3782), True, 'import matplotlib.pyplot as plt\n'), ((3719, 3763), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'dpi': '(300)', 'transparent': '(True)'}), '(path, dpi=300, transparent=True)\n', (3730, 3763), True, 'import matplotlib.pyplot as plt\n'), ((3610, 3630), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3624, 3630), False, 'import os\n'), ((6570, 6593), 'numpy.array', 'np.array', (['[0.0, 0.0, z]'], {}), '([0.0, 0.0, z])\n', (6578, 6593), True, 'import numpy as np\n'), ((6602, 6647), 'numpy.linspace', 'np.linspace', (['(-length / 2)', '(length / 2)', 'n_turns'], {}), '(-length / 2, length / 2, n_turns)\n', (6613, 6647), True, 'import numpy as np\n'), ((8568, 8597), 'numpy.array', 'np.array', (['[0, 0, -radius / 2]'], {}), '([0, 0, -radius / 2])\n', (8576, 8597), True, 'import numpy as np\n'), ((8633, 8661), 'numpy.array', 'np.array', (['[0, 0, radius / 2]'], {}), '([0, 0, radius / 2])\n', (8641, 8661), True, 'import numpy as np\n'), ((11350, 11393), 'numpy.array', 'np.array', (['[0.0, 0.0, -dielectric_thickness]'], {}), '([0.0, 0.0, -dielectric_thickness])\n', (11358, 11393), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 21:20:48 2019
INSTITUTO FEDERAL DE EDUCAÇÃO, CIÊNCIA E TECNOLOGIA DO PÁRA - IFPA ANANINDEUA
@author:
Prof. Dr. <NAME>
Discentes:
<NAME>
<NAME>
Grupo de Pesquisa:
Gradiente de Modelagem Matemática e
Simulação Computacional - GM²SC
Assunto:
Função Trigonométrica em Python: Função Cosseno
Nome do sript: funcao_cosseno
Disponível em:
"""
# Importando Bibliotecas
# Biblioteca numpy: Operações matemáticas
import numpy as np
# Biblioteca matplotlib: Represntação Gráfica
import matplotlib.pyplot as plt
# Variável independente: x em radianos
# Declarando pi: np.pi
x = np.linspace(-2*np.pi, 2*np.pi,100)
# Função cosseno: np.cos
fc = np.cos(x)
print('')
print('Função Cosseno')
input("Pressione <enter> para representar graficamente")
print('')
# Representação Gráfica de fs
# Comando plot: (variável, função, 'cor da linha')
plt.plot(x,fc,'k')
plt.xlabel('Valores de x')
plt.ylabel('Valores de y')
plt.title('Função Cosseno')
plt.grid(True)
plt.show()
print('=== Fim do Programa funcao_cosseno ===')
print('')
input("Acione <Ctrl + l> para limpar o console")
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((786, 825), 'numpy.linspace', 'np.linspace', (['(-2 * np.pi)', '(2 * np.pi)', '(100)'], {}), '(-2 * np.pi, 2 * np.pi, 100)\n', (797, 825), True, 'import numpy as np\n'), ((853, 862), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (859, 862), True, 'import numpy as np\n'), ((1056, 1076), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'fc', '"""k"""'], {}), "(x, fc, 'k')\n", (1064, 1076), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1102), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Valores de x"""'], {}), "('Valores de x')\n", (1086, 1102), True, 'import matplotlib.pyplot as plt\n'), ((1104, 1130), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Valores de y"""'], {}), "('Valores de y')\n", (1114, 1130), True, 'import matplotlib.pyplot as plt\n'), ((1132, 1159), 'matplotlib.pyplot.title', 'plt.title', (['"""Função Cosseno"""'], {}), "('Função Cosseno')\n", (1141, 1159), True, 'import matplotlib.pyplot as plt\n'), ((1161, 1175), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1169, 1175), True, 'import matplotlib.pyplot as plt\n'), ((1177, 1187), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1185, 1187), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import argparse
# from rllab.envs.normalized_env import normalize
from maci.learners import MADDPG, MAVBAC, MASQL
from maci.misc.kernel import adaptive_isotropic_gaussian_kernel
from maci.replay_buffers import SimpleReplayBuffer
from maci.value_functions.sq_value_function import NNQFunction, NNJointQFunction
from maci.misc.plotter import QFPolicyPlotter
from maci.policies import DeterministicNNPolicy, StochasticNNConditionalPolicy, StochasticNNPolicy
from maci.misc.sampler import MASampler
from maci.environments import make_particle_env
from rllab.misc import logger
import gtimer as gt
import datetime
from copy import deepcopy
import maci.misc.tf_utils as U
import os
def masql_agent(model_name, i, env, M, u_range, base_kwargs):
joint = True
pool = SimpleReplayBuffer(env.env_specs, max_replay_buffer_size=1e6, joint=joint, agent_id=i)
policy = StochasticNNPolicy(env.env_specs,
hidden_layer_sizes=(M, M),
squash=True, u_range=1., joint=joint,
agent_id=i, sampling=True)
qf = NNQFunction(env_spec=env.env_specs, hidden_layer_sizes=[M, M], joint=joint, agent_id=i)
target_qf = NNQFunction(env_spec=env.env_specs, hidden_layer_sizes=[M, M], name='target_qf', joint=joint,
agent_id=i)
agent = MASQL(
base_kwargs=base_kwargs,
agent_id=i,
env=env,
pool=pool,
qf=qf,
target_qf=target_qf,
policy=policy,
plotter=None,
policy_lr=3e-4,
qf_lr=3e-4,
tau=0.01,
value_n_particles=16,
td_target_update_interval=10,
kernel_fn=adaptive_isotropic_gaussian_kernel,
kernel_n_particles=32,
kernel_update_ratio=0.5,
discount=0.95,
reward_scale=1,
save_full_state=False,
opponent_action_range=[0, 1],
opponent_action_range_normalize=False
)
return agent
def pr2ac_agent(model_name, i, env, M, u_range, base_kwargs):
joint = False
pool = SimpleReplayBuffer(env.env_specs, max_replay_buffer_size=1e6, joint=joint, agent_id=i)
policy = DeterministicNNPolicy(env.env_specs,
hidden_layer_sizes=(M, M),
squash=True, u_range=u_range, joint=False,
agent_id=i, sampling=True)
target_policy = DeterministicNNPolicy(env.env_specs,
hidden_layer_sizes=(M, M),
name='target_policy',
squash=True, u_range=u_range, joint=False,
agent_id=i, sampling=True)
conditional_policy = StochasticNNConditionalPolicy(env.env_specs,
hidden_layer_sizes=(M, M),
name='conditional_policy',
squash=True, u_range=u_range, joint=False,
agent_id=i, sampling=True)
joint_qf = NNJointQFunction(env_spec=env.env_specs, hidden_layer_sizes=[M, M], joint=joint, agent_id=i)
target_joint_qf = NNJointQFunction(env_spec=env.env_specs, hidden_layer_sizes=[M, M], name='target_joint_qf',
joint=joint, agent_id=i)
qf = NNQFunction(env_spec=env.env_specs, hidden_layer_sizes=[M, M], joint=False, agent_id=i)
plotter = None
agent = MAVBAC(
base_kwargs=base_kwargs,
agent_id=i,
env=env,
pool=pool,
joint_qf=joint_qf,
target_joint_qf=target_joint_qf,
qf=qf,
policy=policy,
target_policy=target_policy,
conditional_policy=conditional_policy,
plotter=plotter,
policy_lr=3e-4,
qf_lr=3e-4,
joint=False,
value_n_particles=16,
kernel_fn=adaptive_isotropic_gaussian_kernel,
kernel_n_particles=32,
kernel_update_ratio=0.5,
td_target_update_interval=5,
discount=0.95,
reward_scale=1,
tau=0.01,
save_full_state=False,
opponent_action_range=[0, 1],
opponent_action_range_normalize=True
)
return agent
def ddpg_agent(joint, opponent_modelling, model_name, i, env, M, u_range, base_kwargs):
# joint = True
# opponent_modelling = False
print(model_name)
print(joint, opponent_modelling)
pool = SimpleReplayBuffer(env.env_specs, max_replay_buffer_size=1e6, joint=joint, agent_id=i)
policy = DeterministicNNPolicy(env.env_specs,
hidden_layer_sizes=(M, M),
squash=True, u_range=u_range, joint=False,
agent_id=i, sampling=True)
target_policy = DeterministicNNPolicy(env.env_specs,
hidden_layer_sizes=(M, M),
name='target_policy',
squash=True, u_range=u_range, joint=False,
agent_id=i, sampling=True)
opponent_policy = None
if opponent_modelling:
print('opponent_modelling start')
opponent_policy = DeterministicNNPolicy(env.env_specs,
hidden_layer_sizes=(M, M),
name='opponent_policy',
squash=True, u_range=u_range, joint=True,
opponent_policy=True,
agent_id=i, sampling=True)
qf = NNQFunction(env_spec=env.env_specs, hidden_layer_sizes=[M, M], joint=joint, agent_id=i)
target_qf = NNQFunction(env_spec=env.env_specs, hidden_layer_sizes=[M, M], name='target_qf', joint=joint,
agent_id=i)
plotter = None
agent = MADDPG(
base_kwargs=base_kwargs,
agent_id=i,
env=env,
pool=pool,
qf=qf,
target_qf=target_qf,
policy=policy,
target_policy=target_policy,
opponent_policy=opponent_policy,
plotter=plotter,
policy_lr=3e-4,
qf_lr=3e-4,
joint=joint,
opponent_modelling=opponent_modelling,
td_target_update_interval=10,
discount=0.95,
reward_scale=0.1,
save_full_state=False)
return agent
def parse_args():
parser = argparse.ArgumentParser("Reinforcement Learning experiments for multiagent environments")
# Environment
# ['simple_spread', 'simple_adversary', 'simple_tag', 'simple_push']
parser.add_argument('-g', "--game_name", type=str, default="simple_adversary", help="name of the game")
parser.add_argument('-m', "--model_names_setting", type=str, default='PR2AC_PR2AC', help="models setting agent vs adv")
return parser.parse_args()
def main(arglist):
game_name = arglist.game_name
env = make_particle_env(game_name=game_name)
print(env.action_space, env.observation_space)
agent_num = env.n
adv_agent_num = 0
if game_name == 'simple_push' or game_name == 'simple_adversary':
adv_agent_num = 1
elif game_name == 'simple_tag':
adv_agent_num = 3
model_names_setting = arglist.model_names_setting.split('_')
model_name = '_'.join(model_names_setting)
model_names = [model_names_setting[1]] * adv_agent_num + [model_names_setting[0]] * (agent_num - adv_agent_num)
now = datetime.datetime.now()
timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')
suffix = '{}/{}/{}'.format(game_name, model_name, timestamp)
# agent_num = 2
u_range = 10.
logger.add_tabular_output('./log/{}.csv'.format(suffix))
snapshot_dir = './snapshot/{}'.format(suffix)
policy_dir = './policy/{}'.format(suffix)
os.makedirs(snapshot_dir, exist_ok=True)
os.makedirs(policy_dir, exist_ok=True)
logger.set_snapshot_dir(snapshot_dir)
# policy_file = open('{}/policy.csv'.format(policy_dir), 'a')
agents = []
M = 100
batch_size = 1024
sampler = MASampler(agent_num=agent_num, joint=True, max_path_length=25, min_pool_size=100, batch_size=batch_size)
base_kwargs = {
'sampler': sampler,
'epoch_length': 30,
'n_epochs': 12000,
'n_train_repeat': 1,
'eval_render': True,
'eval_n_episodes': 10
}
with U.single_threaded_session():
for i, model_name in enumerate(model_names):
if model_name == 'PR2AC':
agent = pr2ac_agent(model_name, i, env, M, u_range, base_kwargs)
elif model_name == 'MASQL':
agent = masql_agent(model_name, i, env, M, u_range, base_kwargs)
else:
if model_name == 'DDPG':
joint = False
opponent_modelling = False
elif model_name == 'MADDPG':
joint = True
opponent_modelling = False
elif model_name == 'DDPG-OM':
joint = True
opponent_modelling = True
agent = ddpg_agent(joint, opponent_modelling, model_name, i, env, M, u_range, base_kwargs)
agents.append(agent)
sampler.initialize(env, agents)
for agent in agents:
agent._init_training()
gt.rename_root('MARLAlgorithm')
gt.reset()
gt.set_def_unique(False)
initial_exploration_done = False
# noise = .1
# noise = 1.
alpha = .5
global_step = 0
for epoch in gt.timed_for(range(base_kwargs['n_epochs'] + 1)):
logger.push_prefix('Epoch #%d | ' % epoch)
for t in range(base_kwargs['epoch_length']):
# TODO.code consolidation: Add control interval to sampler
if not initial_exploration_done:
if epoch >= 200:
initial_exploration_done = True
sampler.sample()
if not initial_exploration_done:
continue
global_step += 1
if global_step % 100 != 0:
continue
gt.stamp('sample')
for j in range(base_kwargs['n_train_repeat']):
batch_n = []
recent_batch_n = []
indices = None
receent_indices = None
for i, agent in enumerate(agents):
if i == 0:
batch = agent.pool.random_batch(batch_size)
indices = agent.pool.indices
receent_indices = list(range(agent.pool._top-batch_size, agent.pool._top))
# batch_n.append(batch)
# recent_batch_n.append(agent.pool.random_batch_by_indices(receent_indices))
batch_n.append(agent.pool.random_batch_by_indices(indices))
recent_batch_n.append(agent.pool.random_batch_by_indices(receent_indices))
# print(len(batch_n))
target_next_actions_n = []
for agent, batch in zip(agents, batch_n):
try:
target_next_actions_n.append(agent._target_policy.get_actions(batch['next_observations']))
except:
target_next_actions_n.append([])
# print(target_next_actions_n)
# next_actions_n = [agent._policy.get_actions(batch['next_observations']) for agent, batch in zip(agents, batch_n)]
opponent_actions_n = [batch['actions'] for batch in batch_n]
# print(len(recent_batch_n))
# print(recent_batch_n[0])
recent_opponent_actions_n = [np.array(batch['actions']) for batch in recent_batch_n]
recent_opponent_observations_n = [np.array(batch['observations']) for batch in recent_batch_n]
# print('=====target====behaviour')
# # print(agents[0]._target_policy.get_actions(batch_n[0]['next_observations'])[0])
# a1 = agents[0]._policy.get_actions(batch_n[0]['next_observations'])[0][0]
# a2 = agents[1]._policy.get_actions(batch_n[1]['next_observations'])[0][0]
# print(a1, a2)
# with open('{}/policy.csv'.format(policy_dir), 'a') as f:
# f.write('{},{}\n'.format(a1, a2))
# print('============')
print('training {} {}'.format(game_name, '_'.join(model_names_setting)))
for i, agent in enumerate(agents):
try:
batch_n[i]['next_actions'] = deepcopy(target_next_actions_n[i])
except:
pass
batch_n[i]['opponent_actions'] = np.reshape(np.delete(deepcopy(opponent_actions_n), i, 0), (-1, agent._opponent_action_dim))
if agent.joint:
if agent.opponent_modelling:
batch_n[i]['recent_opponent_observations'] = recent_opponent_observations_n[i]
batch_n[i]['recent_opponent_actions'] = np.reshape(np.delete(deepcopy(recent_opponent_actions_n), i, 0), (-1, agent._opponent_action_dim))
batch_n[i]['opponent_next_actions'] = agent.opponent_policy.get_actions(batch_n[i]['next_observations'])
else:
batch_n[i]['opponent_next_actions'] = np.reshape(np.delete(deepcopy(target_next_actions_n), i, 0), (-1, agent._opponent_action_dim))
if isinstance(agent, MAVBAC) or isinstance(agent, MASQL):
agent._do_training(iteration=t + epoch * agent._epoch_length, batch=batch_n[i], annealing=alpha)
else:
agent._do_training(iteration=t + epoch * agent._epoch_length, batch=batch_n[i])
gt.stamp('train')
# self._evaluate(epoch)
# for agent in agents:
# params = agent.get_snapshot(epoch)
# logger.save_itr_params(epoch, params)
# times_itrs = gt.get_times().stamps.itrs
#
# eval_time = times_itrs['eval'][-1] if epoch > 1 else 0
# total_time = gt.get_times().total
# logger.record_tabular('time-train', times_itrs['train'][-1])
# logger.record_tabular('time-eval', eval_time)
# logger.record_tabular('time-sample', times_itrs['sample'][-1])
# logger.record_tabular('time-total', total_time)
# logger.record_tabular('epoch', epoch)
# sampler.log_diagnostics()
# logger.dump_tabular(with_prefix=False)
logger.pop_prefix()
sampler.terminate()
if __name__ == '__main__':
arglist = parse_args()
main(arglist) | [
"maci.value_functions.sq_value_function.NNQFunction",
"maci.replay_buffers.SimpleReplayBuffer",
"maci.policies.StochasticNNConditionalPolicy",
"gtimer.reset",
"gtimer.rename_root",
"numpy.array",
"maci.policies.StochasticNNPolicy",
"copy.deepcopy",
"rllab.misc.logger.set_snapshot_dir",
"gtimer.sta... | [((790, 887), 'maci.replay_buffers.SimpleReplayBuffer', 'SimpleReplayBuffer', (['env.env_specs'], {'max_replay_buffer_size': '(1000000.0)', 'joint': 'joint', 'agent_id': 'i'}), '(env.env_specs, max_replay_buffer_size=1000000.0, joint=\n joint, agent_id=i)\n', (808, 887), False, 'from maci.replay_buffers import SimpleReplayBuffer\n'), ((890, 1020), 'maci.policies.StochasticNNPolicy', 'StochasticNNPolicy', (['env.env_specs'], {'hidden_layer_sizes': '(M, M)', 'squash': '(True)', 'u_range': '(1.0)', 'joint': 'joint', 'agent_id': 'i', 'sampling': '(True)'}), '(env.env_specs, hidden_layer_sizes=(M, M), squash=True,\n u_range=1.0, joint=joint, agent_id=i, sampling=True)\n', (908, 1020), False, 'from maci.policies import DeterministicNNPolicy, StochasticNNConditionalPolicy, StochasticNNPolicy\n'), ((1122, 1213), 'maci.value_functions.sq_value_function.NNQFunction', 'NNQFunction', ([], {'env_spec': 'env.env_specs', 'hidden_layer_sizes': '[M, M]', 'joint': 'joint', 'agent_id': 'i'}), '(env_spec=env.env_specs, hidden_layer_sizes=[M, M], joint=joint,\n agent_id=i)\n', (1133, 1213), False, 'from maci.value_functions.sq_value_function import NNQFunction, NNJointQFunction\n'), ((1226, 1336), 'maci.value_functions.sq_value_function.NNQFunction', 'NNQFunction', ([], {'env_spec': 'env.env_specs', 'hidden_layer_sizes': '[M, M]', 'name': '"""target_qf"""', 'joint': 'joint', 'agent_id': 'i'}), "(env_spec=env.env_specs, hidden_layer_sizes=[M, M], name=\n 'target_qf', joint=joint, agent_id=i)\n", (1237, 1336), False, 'from maci.value_functions.sq_value_function import NNQFunction, NNJointQFunction\n'), ((1373, 1829), 'maci.learners.MASQL', 'MASQL', ([], {'base_kwargs': 'base_kwargs', 'agent_id': 'i', 'env': 'env', 'pool': 'pool', 'qf': 'qf', 'target_qf': 'target_qf', 'policy': 'policy', 'plotter': 'None', 'policy_lr': '(0.0003)', 'qf_lr': '(0.0003)', 'tau': '(0.01)', 'value_n_particles': '(16)', 'td_target_update_interval': '(10)', 'kernel_fn': 'adaptive_isotropic_gaussian_kernel', 'kernel_n_particles': '(32)', 'kernel_update_ratio': '(0.5)', 'discount': '(0.95)', 'reward_scale': '(1)', 'save_full_state': '(False)', 'opponent_action_range': '[0, 1]', 'opponent_action_range_normalize': '(False)'}), '(base_kwargs=base_kwargs, agent_id=i, env=env, pool=pool, qf=qf,\n target_qf=target_qf, policy=policy, plotter=None, policy_lr=0.0003,\n qf_lr=0.0003, tau=0.01, value_n_particles=16, td_target_update_interval\n =10, kernel_fn=adaptive_isotropic_gaussian_kernel, kernel_n_particles=\n 32, kernel_update_ratio=0.5, discount=0.95, reward_scale=1,\n save_full_state=False, opponent_action_range=[0, 1],\n opponent_action_range_normalize=False)\n', (1378, 1829), False, 'from maci.learners import MADDPG, MAVBAC, MASQL\n'), ((2083, 2180), 'maci.replay_buffers.SimpleReplayBuffer', 'SimpleReplayBuffer', (['env.env_specs'], {'max_replay_buffer_size': '(1000000.0)', 'joint': 'joint', 'agent_id': 'i'}), '(env.env_specs, max_replay_buffer_size=1000000.0, joint=\n joint, agent_id=i)\n', (2101, 2180), False, 'from maci.replay_buffers import SimpleReplayBuffer\n'), ((2183, 2320), 'maci.policies.DeterministicNNPolicy', 'DeterministicNNPolicy', (['env.env_specs'], {'hidden_layer_sizes': '(M, M)', 'squash': '(True)', 'u_range': 'u_range', 'joint': '(False)', 'agent_id': 'i', 'sampling': '(True)'}), '(env.env_specs, hidden_layer_sizes=(M, M), squash=True,\n u_range=u_range, joint=False, agent_id=i, sampling=True)\n', (2204, 2320), False, 'from maci.policies import DeterministicNNPolicy, StochasticNNConditionalPolicy, StochasticNNPolicy\n'), ((2442, 2606), 'maci.policies.DeterministicNNPolicy', 'DeterministicNNPolicy', (['env.env_specs'], {'hidden_layer_sizes': '(M, M)', 'name': '"""target_policy"""', 'squash': '(True)', 'u_range': 'u_range', 'joint': '(False)', 'agent_id': 'i', 'sampling': '(True)'}), "(env.env_specs, hidden_layer_sizes=(M, M), name=\n 'target_policy', squash=True, u_range=u_range, joint=False, agent_id=i,\n sampling=True)\n", (2463, 2606), False, 'from maci.policies import DeterministicNNPolicy, StochasticNNConditionalPolicy, StochasticNNPolicy\n'), ((2791, 2967), 'maci.policies.StochasticNNConditionalPolicy', 'StochasticNNConditionalPolicy', (['env.env_specs'], {'hidden_layer_sizes': '(M, M)', 'name': '"""conditional_policy"""', 'squash': '(True)', 'u_range': 'u_range', 'joint': '(False)', 'agent_id': 'i', 'sampling': '(True)'}), "(env.env_specs, hidden_layer_sizes=(M, M),\n name='conditional_policy', squash=True, u_range=u_range, joint=False,\n agent_id=i, sampling=True)\n", (2820, 2967), False, 'from maci.policies import DeterministicNNPolicy, StochasticNNConditionalPolicy, StochasticNNPolicy\n'), ((3196, 3293), 'maci.value_functions.sq_value_function.NNJointQFunction', 'NNJointQFunction', ([], {'env_spec': 'env.env_specs', 'hidden_layer_sizes': '[M, M]', 'joint': 'joint', 'agent_id': 'i'}), '(env_spec=env.env_specs, hidden_layer_sizes=[M, M], joint=\n joint, agent_id=i)\n', (3212, 3293), False, 'from maci.value_functions.sq_value_function import NNQFunction, NNJointQFunction\n'), ((3311, 3432), 'maci.value_functions.sq_value_function.NNJointQFunction', 'NNJointQFunction', ([], {'env_spec': 'env.env_specs', 'hidden_layer_sizes': '[M, M]', 'name': '"""target_joint_qf"""', 'joint': 'joint', 'agent_id': 'i'}), "(env_spec=env.env_specs, hidden_layer_sizes=[M, M], name=\n 'target_joint_qf', joint=joint, agent_id=i)\n", (3327, 3432), False, 'from maci.value_functions.sq_value_function import NNQFunction, NNJointQFunction\n'), ((3477, 3568), 'maci.value_functions.sq_value_function.NNQFunction', 'NNQFunction', ([], {'env_spec': 'env.env_specs', 'hidden_layer_sizes': '[M, M]', 'joint': '(False)', 'agent_id': 'i'}), '(env_spec=env.env_specs, hidden_layer_sizes=[M, M], joint=False,\n agent_id=i)\n', (3488, 3568), False, 'from maci.value_functions.sq_value_function import NNQFunction, NNJointQFunction\n'), ((3597, 4174), 'maci.learners.MAVBAC', 'MAVBAC', ([], {'base_kwargs': 'base_kwargs', 'agent_id': 'i', 'env': 'env', 'pool': 'pool', 'joint_qf': 'joint_qf', 'target_joint_qf': 'target_joint_qf', 'qf': 'qf', 'policy': 'policy', 'target_policy': 'target_policy', 'conditional_policy': 'conditional_policy', 'plotter': 'plotter', 'policy_lr': '(0.0003)', 'qf_lr': '(0.0003)', 'joint': '(False)', 'value_n_particles': '(16)', 'kernel_fn': 'adaptive_isotropic_gaussian_kernel', 'kernel_n_particles': '(32)', 'kernel_update_ratio': '(0.5)', 'td_target_update_interval': '(5)', 'discount': '(0.95)', 'reward_scale': '(1)', 'tau': '(0.01)', 'save_full_state': '(False)', 'opponent_action_range': '[0, 1]', 'opponent_action_range_normalize': '(True)'}), '(base_kwargs=base_kwargs, agent_id=i, env=env, pool=pool, joint_qf=\n joint_qf, target_joint_qf=target_joint_qf, qf=qf, policy=policy,\n target_policy=target_policy, conditional_policy=conditional_policy,\n plotter=plotter, policy_lr=0.0003, qf_lr=0.0003, joint=False,\n value_n_particles=16, kernel_fn=adaptive_isotropic_gaussian_kernel,\n kernel_n_particles=32, kernel_update_ratio=0.5,\n td_target_update_interval=5, discount=0.95, reward_scale=1, tau=0.01,\n save_full_state=False, opponent_action_range=[0, 1],\n opponent_action_range_normalize=True)\n', (3603, 4174), False, 'from maci.learners import MADDPG, MAVBAC, MASQL\n'), ((4574, 4671), 'maci.replay_buffers.SimpleReplayBuffer', 'SimpleReplayBuffer', (['env.env_specs'], {'max_replay_buffer_size': '(1000000.0)', 'joint': 'joint', 'agent_id': 'i'}), '(env.env_specs, max_replay_buffer_size=1000000.0, joint=\n joint, agent_id=i)\n', (4592, 4671), False, 'from maci.replay_buffers import SimpleReplayBuffer\n'), ((4674, 4811), 'maci.policies.DeterministicNNPolicy', 'DeterministicNNPolicy', (['env.env_specs'], {'hidden_layer_sizes': '(M, M)', 'squash': '(True)', 'u_range': 'u_range', 'joint': '(False)', 'agent_id': 'i', 'sampling': '(True)'}), '(env.env_specs, hidden_layer_sizes=(M, M), squash=True,\n u_range=u_range, joint=False, agent_id=i, sampling=True)\n', (4695, 4811), False, 'from maci.policies import DeterministicNNPolicy, StochasticNNConditionalPolicy, StochasticNNPolicy\n'), ((4933, 5097), 'maci.policies.DeterministicNNPolicy', 'DeterministicNNPolicy', (['env.env_specs'], {'hidden_layer_sizes': '(M, M)', 'name': '"""target_policy"""', 'squash': '(True)', 'u_range': 'u_range', 'joint': '(False)', 'agent_id': 'i', 'sampling': '(True)'}), "(env.env_specs, hidden_layer_sizes=(M, M), name=\n 'target_policy', squash=True, u_range=u_range, joint=False, agent_id=i,\n sampling=True)\n", (4954, 5097), False, 'from maci.policies import DeterministicNNPolicy, StochasticNNConditionalPolicy, StochasticNNPolicy\n'), ((5807, 5898), 'maci.value_functions.sq_value_function.NNQFunction', 'NNQFunction', ([], {'env_spec': 'env.env_specs', 'hidden_layer_sizes': '[M, M]', 'joint': 'joint', 'agent_id': 'i'}), '(env_spec=env.env_specs, hidden_layer_sizes=[M, M], joint=joint,\n agent_id=i)\n', (5818, 5898), False, 'from maci.value_functions.sq_value_function import NNQFunction, NNJointQFunction\n'), ((5911, 6021), 'maci.value_functions.sq_value_function.NNQFunction', 'NNQFunction', ([], {'env_spec': 'env.env_specs', 'hidden_layer_sizes': '[M, M]', 'name': '"""target_qf"""', 'joint': 'joint', 'agent_id': 'i'}), "(env_spec=env.env_specs, hidden_layer_sizes=[M, M], name=\n 'target_qf', joint=joint, agent_id=i)\n", (5922, 6021), False, 'from maci.value_functions.sq_value_function import NNQFunction, NNJointQFunction\n'), ((6077, 6452), 'maci.learners.MADDPG', 'MADDPG', ([], {'base_kwargs': 'base_kwargs', 'agent_id': 'i', 'env': 'env', 'pool': 'pool', 'qf': 'qf', 'target_qf': 'target_qf', 'policy': 'policy', 'target_policy': 'target_policy', 'opponent_policy': 'opponent_policy', 'plotter': 'plotter', 'policy_lr': '(0.0003)', 'qf_lr': '(0.0003)', 'joint': 'joint', 'opponent_modelling': 'opponent_modelling', 'td_target_update_interval': '(10)', 'discount': '(0.95)', 'reward_scale': '(0.1)', 'save_full_state': '(False)'}), '(base_kwargs=base_kwargs, agent_id=i, env=env, pool=pool, qf=qf,\n target_qf=target_qf, policy=policy, target_policy=target_policy,\n opponent_policy=opponent_policy, plotter=plotter, policy_lr=0.0003,\n qf_lr=0.0003, joint=joint, opponent_modelling=opponent_modelling,\n td_target_update_interval=10, discount=0.95, reward_scale=0.1,\n save_full_state=False)\n', (6083, 6452), False, 'from maci.learners import MADDPG, MAVBAC, MASQL\n'), ((6624, 6718), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Reinforcement Learning experiments for multiagent environments"""'], {}), "(\n 'Reinforcement Learning experiments for multiagent environments')\n", (6647, 6718), False, 'import argparse\n'), ((7133, 7171), 'maci.environments.make_particle_env', 'make_particle_env', ([], {'game_name': 'game_name'}), '(game_name=game_name)\n', (7150, 7171), False, 'from maci.environments import make_particle_env\n'), ((7665, 7688), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7686, 7688), False, 'import datetime\n'), ((8012, 8052), 'os.makedirs', 'os.makedirs', (['snapshot_dir'], {'exist_ok': '(True)'}), '(snapshot_dir, exist_ok=True)\n', (8023, 8052), False, 'import os\n'), ((8057, 8095), 'os.makedirs', 'os.makedirs', (['policy_dir'], {'exist_ok': '(True)'}), '(policy_dir, exist_ok=True)\n', (8068, 8095), False, 'import os\n'), ((8100, 8137), 'rllab.misc.logger.set_snapshot_dir', 'logger.set_snapshot_dir', (['snapshot_dir'], {}), '(snapshot_dir)\n', (8123, 8137), False, 'from rllab.misc import logger\n'), ((8271, 8379), 'maci.misc.sampler.MASampler', 'MASampler', ([], {'agent_num': 'agent_num', 'joint': '(True)', 'max_path_length': '(25)', 'min_pool_size': '(100)', 'batch_size': 'batch_size'}), '(agent_num=agent_num, joint=True, max_path_length=25,\n min_pool_size=100, batch_size=batch_size)\n', (8280, 8379), False, 'from maci.misc.sampler import MASampler\n'), ((5379, 5566), 'maci.policies.DeterministicNNPolicy', 'DeterministicNNPolicy', (['env.env_specs'], {'hidden_layer_sizes': '(M, M)', 'name': '"""opponent_policy"""', 'squash': '(True)', 'u_range': 'u_range', 'joint': '(True)', 'opponent_policy': '(True)', 'agent_id': 'i', 'sampling': '(True)'}), "(env.env_specs, hidden_layer_sizes=(M, M), name=\n 'opponent_policy', squash=True, u_range=u_range, joint=True,\n opponent_policy=True, agent_id=i, sampling=True)\n", (5400, 5566), False, 'from maci.policies import DeterministicNNPolicy, StochasticNNConditionalPolicy, StochasticNNPolicy\n'), ((8584, 8611), 'maci.misc.tf_utils.single_threaded_session', 'U.single_threaded_session', ([], {}), '()\n', (8609, 8611), True, 'import maci.misc.tf_utils as U\n'), ((9551, 9582), 'gtimer.rename_root', 'gt.rename_root', (['"""MARLAlgorithm"""'], {}), "('MARLAlgorithm')\n", (9565, 9582), True, 'import gtimer as gt\n'), ((9591, 9601), 'gtimer.reset', 'gt.reset', ([], {}), '()\n', (9599, 9601), True, 'import gtimer as gt\n'), ((9610, 9634), 'gtimer.set_def_unique', 'gt.set_def_unique', (['(False)'], {}), '(False)\n', (9627, 9634), True, 'import gtimer as gt\n'), ((9847, 9889), 'rllab.misc.logger.push_prefix', 'logger.push_prefix', (["('Epoch #%d | ' % epoch)"], {}), "('Epoch #%d | ' % epoch)\n", (9865, 9889), False, 'from rllab.misc import logger\n'), ((15220, 15239), 'rllab.misc.logger.pop_prefix', 'logger.pop_prefix', ([], {}), '()\n', (15237, 15239), False, 'from rllab.misc import logger\n'), ((10397, 10415), 'gtimer.stamp', 'gt.stamp', (['"""sample"""'], {}), "('sample')\n", (10405, 10415), True, 'import gtimer as gt\n'), ((14402, 14419), 'gtimer.stamp', 'gt.stamp', (['"""train"""'], {}), "('train')\n", (14410, 14419), True, 'import gtimer as gt\n'), ((12101, 12127), 'numpy.array', 'np.array', (["batch['actions']"], {}), "(batch['actions'])\n", (12109, 12127), True, 'import numpy as np\n'), ((12211, 12242), 'numpy.array', 'np.array', (["batch['observations']"], {}), "(batch['observations'])\n", (12219, 12242), True, 'import numpy as np\n'), ((13077, 13111), 'copy.deepcopy', 'deepcopy', (['target_next_actions_n[i]'], {}), '(target_next_actions_n[i])\n', (13085, 13111), False, 'from copy import deepcopy\n'), ((13255, 13283), 'copy.deepcopy', 'deepcopy', (['opponent_actions_n'], {}), '(opponent_actions_n)\n', (13263, 13283), False, 'from copy import deepcopy\n'), ((13627, 13662), 'copy.deepcopy', 'deepcopy', (['recent_opponent_actions_n'], {}), '(recent_opponent_actions_n)\n', (13635, 13662), False, 'from copy import deepcopy\n'), ((13967, 13998), 'copy.deepcopy', 'deepcopy', (['target_next_actions_n'], {}), '(target_next_actions_n)\n', (13975, 13998), False, 'from copy import deepcopy\n')] |
import os
import torch
import torch.nn as nn
import struct
import numpy as np
import json
from time import perf_counter
from pprint import pprint
from lstm_rnnt_dec import PluginLstmRnntDec
start_setup_time = perf_counter()
# Setup.
output_bin = os.environ.get('CK_OUT_RAW_DATA', 'tmp-ck-output.bin')
output_json = output_bin.replace('bin', 'json')
dataset_path = os.environ.get('CK_DATASET_PATH', '')
dataset_prefix = os.environ.get('CK_LSTM_DATASET_PREFIX', 'sample')
logit_count = os.environ.get('CK_LSTM_LOGIT_COUNT', '1')
op_id = os.environ.get('CK_LSTM_OP_ID', '')
sample_id = os.environ.get('CK_LSTM_SAMPLE_ID', '0').zfill(6)
layers = int(os.environ.get('CK_LSTM_LAYERS', '2'))
hidden_width = int(os.environ.get('CK_LSTM_HIDDEN_WIDTH', '320'))
input_width = int(os.environ.get('CK_LSTM_INPUT_WIDTH', '320'))
logit_count = int(os.environ.get('CK_LSTM_LOGIT_COUNT', '128'))
batch_size = int(os.environ.get('CK_LSTM_BATCH_SIZE', '1'))
dropout = float(os.environ.get('CK_LSTM_DROPOUT', '0.0'))
rnd_seed = int(os.environ.get('CK_SEED', '42'))
rng = np.random.RandomState(rnd_seed)
print_in_tensor = os.environ.get('CK_PRINT_IN_TENSOR', 'no') in [ 'yes', 'YES', 'ON', 'on', '1' ]
print_out_tensor = os.environ.get('CK_PRINT_OUT_TENSOR', 'no') in [ 'yes', 'YES', 'ON', 'on', '1' ]
sample_file = os.path.join(dataset_path, '{}{}-DEC0000.pt'.format(dataset_path, dataset_prefix))
sizeof_float32 = 4
# LOAD LSTM
lstm = PluginLstmRnntDec()
# LOAD DATA
if os.path.exists(sample_file):
input_data = []
for i in range(logit_count):
sample_file = os.path.join(dataset_path, '{}{}-DEC{}.pt'.format(dataset_path, dataset_prefix, str(i).zfill(4)))
input_data.append(torch.load(sample_file))
else:
# Generate random input data
input_data = []
for i in range(logit_count):
input_x = rng.randn(1, batch_size, input_width).astype(np.float32)
input_x = torch.from_numpy(input_x)
input_h = rng.randn(2, batch_size, hidden_width).astype(np.float32)
input_h = torch.from_numpy(input_h)
input_c = rng.randn(2, batch_size, hidden_width).astype(np.float32)
input_c = torch.from_numpy(input_c)
input_data.append([input_x,(input_h,input_c)])
if print_in_tensor:
print("Input:")
pprint(input_data)
print("")
finish_setup_time = perf_counter()
# RUN THE TEST
output = torch.zeros([logit_count,1,hidden_width])
for i in range(logit_count):
outx, _ = lstm(input_data[i][0], input_data[i][1])
output[i:i+1]=outx
finish_lstm_time = perf_counter()
# Print output as tensor.
if print_out_tensor:
print("LSTM Output:")
pprint(output)
# Convert output to flat list.
output_list = output.flatten().tolist()
# Dump output as binary.
with open(output_bin, 'wb') as output_file:
output_file.write( struct.pack('f'*len(output_list), *output_list) )
# Dump output as JSON.
with open(output_json, 'w') as output_file:
output_file.write( json.dumps(output_list, indent=2) )
# Dump timing and misc info.
height, batch, width = output.size()
timer_json = 'tmp-ck-timer.json'
with open(timer_json, 'w') as output_file:
timer = {
"execution_time": (finish_lstm_time - start_setup_time),
"run_time_state": {
"input_width": input_width,
"hidden_width": hidden_width,
"num_layers": layers,
"logit_count": logit_count,
"out_shape_N": batch,
"out_shape_C": 1,
"out_shape_H": height,
"out_shape_W": width,
"rnd_seed": rnd_seed,
"data_bits": sizeof_float32*8,
"time_setup": (finish_setup_time - start_setup_time),
"time_test": (finish_lstm_time - finish_setup_time)
}
}
output_file.write( json.dumps(timer, indent=2) )
| [
"os.path.exists",
"pprint.pprint",
"torch.load",
"json.dumps",
"os.environ.get",
"time.perf_counter",
"torch.from_numpy",
"lstm_rnnt_dec.PluginLstmRnntDec",
"torch.zeros",
"numpy.random.RandomState"
] | [((211, 225), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (223, 225), False, 'from time import perf_counter\n'), ((249, 303), 'os.environ.get', 'os.environ.get', (['"""CK_OUT_RAW_DATA"""', '"""tmp-ck-output.bin"""'], {}), "('CK_OUT_RAW_DATA', 'tmp-ck-output.bin')\n", (263, 303), False, 'import os\n'), ((368, 405), 'os.environ.get', 'os.environ.get', (['"""CK_DATASET_PATH"""', '""""""'], {}), "('CK_DATASET_PATH', '')\n", (382, 405), False, 'import os\n'), ((424, 474), 'os.environ.get', 'os.environ.get', (['"""CK_LSTM_DATASET_PREFIX"""', '"""sample"""'], {}), "('CK_LSTM_DATASET_PREFIX', 'sample')\n", (438, 474), False, 'import os\n'), ((490, 532), 'os.environ.get', 'os.environ.get', (['"""CK_LSTM_LOGIT_COUNT"""', '"""1"""'], {}), "('CK_LSTM_LOGIT_COUNT', '1')\n", (504, 532), False, 'import os\n'), ((542, 577), 'os.environ.get', 'os.environ.get', (['"""CK_LSTM_OP_ID"""', '""""""'], {}), "('CK_LSTM_OP_ID', '')\n", (556, 577), False, 'import os\n'), ((1062, 1093), 'numpy.random.RandomState', 'np.random.RandomState', (['rnd_seed'], {}), '(rnd_seed)\n', (1083, 1093), True, 'import numpy as np\n'), ((1432, 1451), 'lstm_rnnt_dec.PluginLstmRnntDec', 'PluginLstmRnntDec', ([], {}), '()\n', (1449, 1451), False, 'from lstm_rnnt_dec import PluginLstmRnntDec\n'), ((1468, 1495), 'os.path.exists', 'os.path.exists', (['sample_file'], {}), '(sample_file)\n', (1482, 1495), False, 'import os\n'), ((2328, 2342), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (2340, 2342), False, 'from time import perf_counter\n'), ((2368, 2411), 'torch.zeros', 'torch.zeros', (['[logit_count, 1, hidden_width]'], {}), '([logit_count, 1, hidden_width])\n', (2379, 2411), False, 'import torch\n'), ((2537, 2551), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (2549, 2551), False, 'from time import perf_counter\n'), ((654, 691), 'os.environ.get', 'os.environ.get', (['"""CK_LSTM_LAYERS"""', '"""2"""'], {}), "('CK_LSTM_LAYERS', '2')\n", (668, 691), False, 'import os\n'), ((712, 757), 'os.environ.get', 'os.environ.get', (['"""CK_LSTM_HIDDEN_WIDTH"""', '"""320"""'], {}), "('CK_LSTM_HIDDEN_WIDTH', '320')\n", (726, 757), False, 'import os\n'), ((777, 821), 'os.environ.get', 'os.environ.get', (['"""CK_LSTM_INPUT_WIDTH"""', '"""320"""'], {}), "('CK_LSTM_INPUT_WIDTH', '320')\n", (791, 821), False, 'import os\n'), ((842, 886), 'os.environ.get', 'os.environ.get', (['"""CK_LSTM_LOGIT_COUNT"""', '"""128"""'], {}), "('CK_LSTM_LOGIT_COUNT', '128')\n", (856, 886), False, 'import os\n'), ((905, 946), 'os.environ.get', 'os.environ.get', (['"""CK_LSTM_BATCH_SIZE"""', '"""1"""'], {}), "('CK_LSTM_BATCH_SIZE', '1')\n", (919, 946), False, 'import os\n'), ((965, 1005), 'os.environ.get', 'os.environ.get', (['"""CK_LSTM_DROPOUT"""', '"""0.0"""'], {}), "('CK_LSTM_DROPOUT', '0.0')\n", (979, 1005), False, 'import os\n'), ((1023, 1054), 'os.environ.get', 'os.environ.get', (['"""CK_SEED"""', '"""42"""'], {}), "('CK_SEED', '42')\n", (1037, 1054), False, 'import os\n'), ((1113, 1155), 'os.environ.get', 'os.environ.get', (['"""CK_PRINT_IN_TENSOR"""', '"""no"""'], {}), "('CK_PRINT_IN_TENSOR', 'no')\n", (1127, 1155), False, 'import os\n'), ((1212, 1255), 'os.environ.get', 'os.environ.get', (['"""CK_PRINT_OUT_TENSOR"""', '"""no"""'], {}), "('CK_PRINT_OUT_TENSOR', 'no')\n", (1226, 1255), False, 'import os\n'), ((2274, 2292), 'pprint.pprint', 'pprint', (['input_data'], {}), '(input_data)\n', (2280, 2292), False, 'from pprint import pprint\n'), ((2630, 2644), 'pprint.pprint', 'pprint', (['output'], {}), '(output)\n', (2636, 2644), False, 'from pprint import pprint\n'), ((590, 630), 'os.environ.get', 'os.environ.get', (['"""CK_LSTM_SAMPLE_ID"""', '"""0"""'], {}), "('CK_LSTM_SAMPLE_ID', '0')\n", (604, 630), False, 'import os\n'), ((1908, 1933), 'torch.from_numpy', 'torch.from_numpy', (['input_x'], {}), '(input_x)\n', (1924, 1933), False, 'import torch\n'), ((2028, 2053), 'torch.from_numpy', 'torch.from_numpy', (['input_h'], {}), '(input_h)\n', (2044, 2053), False, 'import torch\n'), ((2148, 2173), 'torch.from_numpy', 'torch.from_numpy', (['input_c'], {}), '(input_c)\n', (2164, 2173), False, 'import torch\n'), ((2951, 2984), 'json.dumps', 'json.dumps', (['output_list'], {'indent': '(2)'}), '(output_list, indent=2)\n', (2961, 2984), False, 'import json\n'), ((3773, 3800), 'json.dumps', 'json.dumps', (['timer'], {'indent': '(2)'}), '(timer, indent=2)\n', (3783, 3800), False, 'import json\n'), ((1697, 1720), 'torch.load', 'torch.load', (['sample_file'], {}), '(sample_file)\n', (1707, 1720), False, 'import torch\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import pandas as pd
import numpy as np
import copy
from SyntheticControlMethods.optimize import Optimize
from SyntheticControlMethods.plot import Plot
from SyntheticControlMethods.tables import Tables
from SyntheticControlMethods.validity_tests import ValidityTests
class SynthBase(object):
'''Class that stores all variables and results'''
def __init__(self, dataset, outcome_var, id_var, time_var, treatment_period, treated_unit, control_units,
covariates, periods_all, periods_pre_treatment, n_controls, n_covariates,
treated_outcome, control_outcome, treated_covariates, control_covariates,
unscaled_treated_covariates, unscaled_control_covariates,
treated_outcome_all, control_outcome_all, pairwise_difference, pen, random_seed=0,
w=None, v=None, **kwargs):
'''
INPUT VARIABLES:
dataset: the dataset for the synthetic control procedure.
Should have the the following column structure:
ID, Time, outcome_var, x0, x1,..., xn
Each row in dataset represents one observation.
The dataset should be sorted on ID then Time.
That is, all observations for one unit in order of Time,
followed by all observations by the next unit also sorted on time
ID: a string containing a unique identifier for the unit associated with the observation.
E.g. in the simulated datasets provided, the ID of the treated unit is "A".
Time: an integer indicating the time period to which the observation corresponds.
treated_unit: ID of the treated unit
'''
self.dataset = dataset
self.outcome_var = outcome_var
self.id = id_var
self.time = time_var
self.treatment_period = treatment_period
self.treated_unit = treated_unit
self.control_units = control_units
self.covariates = covariates
self.periods_all = periods_all
self.periods_pre_treatment = periods_pre_treatment
self.n_controls = n_controls
self.n_covariates = n_covariates
self.pen = pen
self.rng = np.random.default_rng(random_seed)
'''
PROCESSED VARIABLES:
treated_outcome: a (1 x treatment_period) matrix containing the
outcome of the treated unit for each observation in the pre-treatment period.
Referred to as Z1 in Abadie, Diamond, Hainmueller.
control_outcome: a ((len(unit_list)-1) x treatment_period) matrix containing the
outcome of every control unit for each observation in the pre-treatment period
Referred to as Z0 in Abadie, Diamond, Hainmueller.
treated_outcome_all: a (1 x len(time)) matrix
same as treated_outcome but includes all observations, including post-treatment
control_outcome_all: a (n_controls x len(time)) matrix
same as control_outcome but includes all observations, including post-treatment
treated_covariates: a (1 x len(covariates)) matrix containing the
average value for each predictor of the treated unit in the pre-treatment period
Referred to as X1 in Abadie, Diamond, Hainmueller.
control_covariates: a (n_controls x len(covariates)) matrix containing the
average value for each predictor of every control unit in the pre-treatment period
Referred to as X0 in Abadie, Diamond, Hainmueller.
W: a (1 x n_controls) matrix containing the weights assigned to each
control unit in the synthetic control. W is contstrained to be convex,
that is sum(W)==1 and ∀w∈W, w≥0, each weight is non-negative and all weights sum to one.
Referred to as W in Abadie, Diamond, Hainmueller.
V: a (len(covariates) x len(covariates)) matrix representing the relative importance
of each covariate. V is contrained to be diagonal, positive semi-definite.
Pracitcally, this means that the product V.control_covariates and V.treated_covariates
will always be non-negative. Further, we constrain sum(V)==1, otherwise there will an infinite
number of solutions V*c, where c is a scalar, that assign equal relative importance to each covariate
Referred to as V in Abadie, Diamond, Hainmueller.
'''
###Post processing quantities
self.treated_outcome = treated_outcome
self.control_outcome = control_outcome
self.treated_covariates = treated_covariates
self.control_covariates = control_covariates
self.unscaled_treated_covariates = unscaled_treated_covariates
self.unscaled_control_covariates = unscaled_control_covariates
self.treated_outcome_all = treated_outcome_all
self.control_outcome_all = control_outcome_all
self.pairwise_difference = pairwise_difference
###Post inference quantities
self.w = w #Can be provided
self.v = v #Can be provided
self.weight_df = None
self.comparison_df = None
self.synth_outcome = None
self.synth_constant = None
self.synth_covariates = None
self.rmspe_df = None
#used in optimization
self.min_loss = float("inf")
self.fail_count = 0 #Used to limit number of optimization attempts
###Validity tests
self.in_space_placebos = None
self.in_space_placebo_w = None
self.pre_post_rmspe_ratio = None
self.in_time_placebo_outcome = None
self.in_time_placebo_treated_outcome = None
self.in_time_placebo_w = None
self.placebo_treatment_period = None
self.placebo_periods_pre_treatment = None
class DataProcessor(object):
'''Class that processes input data into variables and matrices needed for optimization'''
def _process_input_data(self, dataset,
outcome_var, id_var, time_var,
treatment_period, treated_unit,
pen, exclude_columns, random_seed,
**kwargs):
'''
Extracts processed variables, excluding v and w, from input variables.
These are all the data matrices.
'''
#All columns not y, id or time must be predictors
covariates = [col for col in dataset.columns if col not in [id_var, time_var] and col not in exclude_columns]
#Extract quantities needed for pre-processing matrices
#Get number of periods in pre-treatment and total
periods_all = dataset[time_var].nunique()
periods_pre_treatment = dataset.loc[dataset[time_var]<treatment_period][time_var].nunique()
#Number of control units, -1 to remove treated unit
n_controls = dataset[id_var].nunique() - 1
n_covariates = len(covariates)
#All units that are not the treated unit are controls
control_units = dataset.loc[dataset[id_var] != treated_unit][id_var].unique()
###Get treated unit matrices first###
treated_outcome_all, treated_outcome, unscaled_treated_covariates = self._process_treated_data(
dataset, outcome_var, id_var, time_var,
treatment_period, treated_unit, periods_all,
periods_pre_treatment, covariates, n_covariates
)
### Now for control unit matrices ###
control_outcome_all, control_outcome, unscaled_control_covariates = self._process_control_data(
dataset, outcome_var, id_var, time_var,
treatment_period, treated_unit, n_controls,
periods_all, periods_pre_treatment, covariates
)
#Rescale covariates to be unit variance (helps with optimization)
treated_covariates, control_covariates = self._rescale_covariate_variance(unscaled_treated_covariates,
unscaled_control_covariates,
n_covariates)
#Get matrix of unitwise differences between control units to treated unit
pairwise_difference = self._get_pairwise_difference_matrix(treated_covariates,
control_covariates)
return {
'dataset': dataset,
'outcome_var':outcome_var,
'id_var':id_var,
'time_var':time_var,
'treatment_period':treatment_period,
'treated_unit':treated_unit,
'control_units':control_units,
'covariates':covariates,
'periods_all':periods_all,
'periods_pre_treatment':periods_pre_treatment,
'n_controls': n_controls,
'n_covariates':n_covariates,
'treated_outcome_all': treated_outcome_all,
'treated_outcome': treated_outcome,
'treated_covariates': treated_covariates,
'unscaled_treated_covariates':unscaled_treated_covariates,
'control_outcome_all': control_outcome_all,
'control_outcome': control_outcome,
'control_covariates': control_covariates,
'unscaled_control_covariates': unscaled_control_covariates,
'pairwise_difference':pairwise_difference,
'pen':pen,
'random_seed':random_seed,
}
def _process_treated_data(self, dataset, outcome_var, id_var, time_var, treatment_period, treated_unit,
periods_all, periods_pre_treatment, covariates, n_covariates):
'''
Extracts and formats outcome and covariate matrices for the treated unit
'''
treated_data_all = dataset.loc[dataset[id_var] == treated_unit]
treated_outcome_all = np.array(treated_data_all[outcome_var]).reshape(periods_all,1) #All outcomes
#Only pre-treatment
treated_data = treated_data_all.loc[dataset[time_var] < treatment_period]
#Extract outcome and shape as matrix
treated_outcome = np.array(treated_data[outcome_var]).reshape(periods_pre_treatment, 1)
#Columnwise mean of each covariate in pre-treatment period for treated unit, shape as matrix
treated_covariates = np.array(treated_data[covariates].mean(axis=0)).reshape(n_covariates, 1)
return treated_outcome_all, treated_outcome, treated_covariates
def _process_control_data(self, dataset, outcome_var, id_var, time_var, treatment_period, treated_unit, n_controls,
periods_all, periods_pre_treatment, covariates):
'''
Extracts and formats outcome and covariate matrices for the control group
'''
#Every unit that is not the treated unit is control
control_data_all = dataset.loc[dataset[id_var] != treated_unit]
control_outcome_all = np.array(control_data_all[outcome_var]).reshape(n_controls, periods_all).T #All outcomes
#Only pre-treatment
control_data = control_data_all.loc[dataset[time_var] < treatment_period]
#Extract outcome, then shape as matrix
control_outcome = np.array(control_data[outcome_var]).reshape(n_controls, periods_pre_treatment).T
#Extract the covariates for all the control units
#Identify which rows correspond to which control unit by setting index,
#then take the unitwise mean of each covariate
#This results in the desired (n_control x n_covariates) matrix
control_covariates = np.array(control_data[covariates].\
set_index(np.arange(len(control_data[covariates])) // periods_pre_treatment).groupby(level=-1).mean()).T
return control_outcome_all, control_outcome, control_covariates
def _rescale_covariate_variance(self, treated_covariates, control_covariates, n_covariates):
'''Rescale covariates to be unit variance'''
#Combine control and treated into one big dataframe, over which we will compute variance for each covariate
big_dataframe = np.concatenate((treated_covariates, control_covariates), axis=1)
#Rescale each covariate to have unit variance
big_dataframe /= np.apply_along_axis(np.std, 0, big_dataframe)
#Re-seperate treated and control from big dataframe
treated_covariates = big_dataframe[:,0].reshape(n_covariates, 1) #First column is treated unit
control_covariates = big_dataframe[:,1:] #All other columns are control units
#Return covariate matices with unit variance
return treated_covariates, control_covariates
def _get_pairwise_difference_matrix(self, treated_covariates, control_covariates):
'''
Computes matrix of same shape as control_covariates, but with unit-wise difference from treated unit
Used in optimization objective for both SC and DSC
'''
return treated_covariates - control_covariates
class Synth(DataProcessor, Optimize, Plot, Tables, ValidityTests):
'''Class implementing the Synthetic Control Method'''
def __init__(self, dataset,
outcome_var, id_var, time_var,
treatment_period, treated_unit,
n_optim=10, pen=0, exclude_columns=[], random_seed=0,
**kwargs):
'''
data:
Type: Pandas dataframe.
A pandas dataframe containing the dataset. Each row should contain one observation for a unit at a time,
including the outcome and covariates. Dataset should be ordered by unit then time.
outcome_var:
Type: str.
Name of outcome column in data, e.g. "gdp"
id_var:
Type: str.
Name of unit indicator column in data, e.g. "country"
time_var:
Type: str.
Name of time column in data, e.g. "year"
treatment_period:
Type: int.
Time of first observation after the treatment took place, i.e. first observation affected by the treatment effect.
E.g. 1990 for german reunification.
treated_unit:
Type: str.
Name of the unit that recieved treatment, e.g. "West Germany"
data["id_var"] == treated_unit
n_optim:
Type: int. Default: 10.
Number of different initialization values for which the optimization is run.
Higher number means longer runtime, but a higher chance of a globally optimal solution.
pen:
Type: float. Default: 0.
Penalization coefficient which determines the relative importance of minimizing the sum of the pairwise difference of each individual
control unit in the synthetic control and the treated unit, vis-a-vis the difference between the synthetic control and the treated unit.
Higher number means pairwise difference matters more.
'''
self.method = "SC"
original_checked_input = self._process_input_data(
dataset, outcome_var, id_var, time_var, treatment_period, treated_unit, pen,
exclude_columns, random_seed, **kwargs
)
self.original_data = SynthBase(**original_checked_input)
#Get synthetic Control
self.optimize(self.original_data.treated_outcome, self.original_data.treated_covariates,
self.original_data.control_outcome, self.original_data.control_covariates,
self.original_data.pairwise_difference,
self.original_data, False, pen, n_optim)
#Compute rmspe_df
self._pre_post_rmspe_ratios(None, False)
#Prepare weight_df with unit weights
self.original_data.weight_df = self._get_weight_df(self.original_data)
self.original_data.comparison_df = self._get_comparison_df(self.original_data)
class DiffSynth(DataProcessor, Optimize, Plot, Tables, ValidityTests):
'''Class implementing the Differenced Synthetic Control Method'''
def __init__(self, dataset,
outcome_var, id_var, time_var,
treatment_period, treated_unit,
n_optim=10, pen=0,
exclude_columns=[], random_seed=0,
not_diff_cols=None,
**kwargs):
'''
data:
Type: Pandas dataframe.
A pandas dataframe containing the dataset. Each row should contain one observation for a unit at a time,
including the outcome and covariates. Dataset should be ordered by unit then time.
outcome_var:
Type: str.
Name of outcome column in data, e.g. "gdp"
id_var:
Type: str.
Name of unit indicator column in data, e.g. "country"
time_var:
Type: str.
Name of time column in data, e.g. "year"
treatment_period:
Type: int.
Time of first observation after the treatment took place, i.e. first observation affected by the treatment effect.
E.g. 1990 for german reunification.
treated_unit:
Type: str.
Name of the unit that recieved treatment, e.g. "West Germany"
data["id_var"] == treated_unit
n_optim:
Type: int. Default: 10.
Number of different initialization values for which the optimization is run.
Higher number means longer runtime, but a higher chance of a globally optimal solution.
not_diff_cols:
Type: list. Default: [].
List of column names to omit from pre-processing, e.g. compute the first difference for.
Typically, columns should be included if the proportion of missing values is high.
This is because the first difference is only defined for two consecutive values.
pen:
Type: float. Default: 0.
Penalization coefficient which determines the relative importance of minimizing the sum of the pairwise difference of each individual
control unit in the synthetic control and the treated unit, vis-a-vis the difference between the synthetic control and the treated unit.
Higher number means pairwise difference matters more.
'''
self.method = "DSC"
#Process original data - will be used in plotting and summary
original_checked_input = self._process_input_data(
dataset, outcome_var, id_var, time_var, treatment_period, treated_unit, pen,
exclude_columns, random_seed, **kwargs
)
self.original_data = SynthBase(**original_checked_input)
#Process differenced data - will be used in inference and optimization
modified_dataset = self.difference_data(dataset, not_diff_cols)
modified_checked_input = self._process_input_data(
modified_dataset, outcome_var, id_var, time_var, treatment_period, treated_unit, pen,
exclude_columns, random_seed, **kwargs
)
self.modified_data = SynthBase(**modified_checked_input)
self.modified_data.pairwise_difference = self.original_data.pairwise_difference
#Get synthetic Control
self.optimize(self.modified_data.treated_outcome, self.modified_data.treated_covariates,
self.modified_data.control_outcome, self.modified_data.control_covariates,
self.modified_data.pairwise_difference,
self.modified_data, False, pen, n_optim)
#Compute rmspe_df for treated unit Synthetic Control
self._pre_post_rmspe_ratios(None, False)
#Prepare summary tables
self.original_data.weight_df = self._get_weight_df(self.original_data)
self.original_data.comparison_df = self._get_comparison_df(self.original_data)
def difference_data(self, dataset, not_diff_cols):
'''
Takes an appropriately formatted, unprocessed dataset
returns dataset with first-difference values (change from previous time period)
computed unitwise for the outcome and all covariates
Ready to fit a Differenced Synthetic Control
Transformation method - First Differencing:
Additional processing:
1. Imputes missing values using linear interpolation.
An important step because the first difference is undefined if two consecutive periods are not present.
'''
#Make deepcopy of original data as base
modified_dataset = copy.deepcopy(dataset)
data = self.original_data
#Binary flag for whether there are columns to ignore
ignore_all_cols = not_diff_cols == None
#Compute difference of outcome variable
modified_dataset[data.outcome_var] = modified_dataset.groupby(data.id)[data.outcome_var].apply(
lambda unit: unit.interpolate(method='linear', limit_direction="both")).diff()
#For covariates
for col in data.covariates:
#Fill in missing values using unitwise linear interpolation
modified_dataset[col] = modified_dataset.groupby(data.id)[col].apply(
lambda unit: unit.interpolate(method='linear', limit_direction="both"))
#Compute change from previous period
if not ignore_all_cols:
if col not in not_diff_cols:
modified_dataset[col].diff()
#Drop first time period for every unit as the change from the previous period is undefined
modified_dataset.drop(modified_dataset.loc[modified_dataset[data.time]==modified_dataset[data.time].min()].index, inplace=True)
#Return resulting dataframe
return modified_dataset
def demean_data(self):
'''
Takes an appropriately formatted, unprocessed dataset
returns dataset with demeaned values computed unitwise for the outcome and all covariates
Ready to fit a Differenced Synthetic Control
Transformation method - MeanSubtraction:
Subtracting the mean of the corresponding variable and unit from every observation
'''
raise NotImplementedError
mean_subtract_cols = self.dataset.groupby(self.id).apply(lambda x: x - np.mean(x)).drop(columns=[self.time], axis=1)
return pd.concat([data[["ID", "Time"]], mean_subtract_cols], axis=1)
| [
"numpy.mean",
"numpy.random.default_rng",
"numpy.array",
"numpy.apply_along_axis",
"numpy.concatenate",
"copy.deepcopy",
"pandas.concat"
] | [((2802, 2836), 'numpy.random.default_rng', 'np.random.default_rng', (['random_seed'], {}), '(random_seed)\n', (2823, 2836), True, 'import numpy as np\n'), ((12769, 12833), 'numpy.concatenate', 'np.concatenate', (['(treated_covariates, control_covariates)'], {'axis': '(1)'}), '((treated_covariates, control_covariates), axis=1)\n', (12783, 12833), True, 'import numpy as np\n'), ((12914, 12959), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.std', '(0)', 'big_dataframe'], {}), '(np.std, 0, big_dataframe)\n', (12933, 12959), True, 'import numpy as np\n'), ((21210, 21232), 'copy.deepcopy', 'copy.deepcopy', (['dataset'], {}), '(dataset)\n', (21223, 21232), False, 'import copy\n'), ((23074, 23135), 'pandas.concat', 'pd.concat', (["[data[['ID', 'Time']], mean_subtract_cols]"], {'axis': '(1)'}), "([data[['ID', 'Time']], mean_subtract_cols], axis=1)\n", (23083, 23135), True, 'import pandas as pd\n'), ((10497, 10536), 'numpy.array', 'np.array', (['treated_data_all[outcome_var]'], {}), '(treated_data_all[outcome_var])\n', (10505, 10536), True, 'import numpy as np\n'), ((10764, 10799), 'numpy.array', 'np.array', (['treated_data[outcome_var]'], {}), '(treated_data[outcome_var])\n', (10772, 10799), True, 'import numpy as np\n'), ((11583, 11622), 'numpy.array', 'np.array', (['control_data_all[outcome_var]'], {}), '(control_data_all[outcome_var])\n', (11591, 11622), True, 'import numpy as np\n'), ((11864, 11899), 'numpy.array', 'np.array', (['control_data[outcome_var]'], {}), '(control_data[outcome_var])\n', (11872, 11899), True, 'import numpy as np\n'), ((23013, 23023), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (23020, 23023), True, 'import numpy as np\n')] |
"""
Example inferring multiple exponential decay models arranged into a
4D voxelwise image.
This example uses the main() interface as used by the command line
application to simplify running the inference and saving the output
"""
import sys
import numpy as np
import nibabel as nib
from vaby_avb import run
import vaby
# Uncomment line below to start the random number generator off with the same seed value
# each time, for repeatable results.
#np.random.seed(0)
# Ground truth parameters
PARAMS_TRUTH = [42, 0.5]
NOISE_PREC_TRUTH = 0.1
NOISE_VAR_TRUTH = 1/NOISE_PREC_TRUTH
NOISE_STD_TRUTH = np.sqrt(NOISE_VAR_TRUTH)
# Observed data samples are generated by Numpy from the ground truth
# Gaussian distribution. Reducing the number of samples should make
# the inference less 'confident' - i.e. the output variances for
# MU and BETA will increase
model = vaby.get_model_class("exp")(None)
N = 100
DT = 2.0 / N
NX, NY, NZ = 10, 10, 10
t = np.array([float(t)*DT for t in range(N)])
params_voxelwise = np.tile(np.array(PARAMS_TRUTH)[..., np.newaxis, np.newaxis], (1, NX*NY*NZ, 1))
DATA_CLEAN = model.evaluate(params_voxelwise, t).numpy()
DATA_NOISY = DATA_CLEAN + np.random.normal(0, NOISE_STD_TRUTH, DATA_CLEAN.shape)
niidata = DATA_NOISY.reshape((NX, NY, NZ, N))
nii = nib.Nifti1Image(niidata, np.identity(4))
nii.to_filename("data_exp_noisy.nii.gz")
# Run Fabber as a comparison if desired
#import os
#os.system("fabber_exp --data=data_exp_noisy --max-iterations=20 --output=exps_example_fabber_out --dt=%.3f --model=exp --num-exps=1 --method=vb --noise=white --overwrite" % DT)
options = {
"dt" : DT,
"save_mean" : True,
"save_free_energy" : True,
"save_model_fit" : True,
"save_log" : True,
"log_stream" : sys.stdout,
"max_iterations" : 20,
}
runtime, avb = run("data_exp_noisy.nii.gz", "exp", "exps_example_out", **options)
| [
"numpy.random.normal",
"numpy.identity",
"numpy.sqrt",
"numpy.array",
"vaby_avb.run",
"vaby.get_model_class"
] | [((599, 623), 'numpy.sqrt', 'np.sqrt', (['NOISE_VAR_TRUTH'], {}), '(NOISE_VAR_TRUTH)\n', (606, 623), True, 'import numpy as np\n'), ((1800, 1866), 'vaby_avb.run', 'run', (['"""data_exp_noisy.nii.gz"""', '"""exp"""', '"""exps_example_out"""'], {}), "('data_exp_noisy.nii.gz', 'exp', 'exps_example_out', **options)\n", (1803, 1866), False, 'from vaby_avb import run\n'), ((863, 890), 'vaby.get_model_class', 'vaby.get_model_class', (['"""exp"""'], {}), "('exp')\n", (883, 890), False, 'import vaby\n'), ((1169, 1223), 'numpy.random.normal', 'np.random.normal', (['(0)', 'NOISE_STD_TRUTH', 'DATA_CLEAN.shape'], {}), '(0, NOISE_STD_TRUTH, DATA_CLEAN.shape)\n', (1185, 1223), True, 'import numpy as np\n'), ((1301, 1315), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (1312, 1315), True, 'import numpy as np\n'), ((1015, 1037), 'numpy.array', 'np.array', (['PARAMS_TRUTH'], {}), '(PARAMS_TRUTH)\n', (1023, 1037), True, 'import numpy as np\n')] |
import math
import numpy as np
import scipy.optimize as opt
import matplotlib.pyplot as plt
import matplotlib.gridspec as gdsc
class concreteSection:
def __init__(self,sct,units='mm'):
'''
Imports section.
Parameters
----------
sct : Section Object
Object defining the section to be analysed.
units : string, optional
The units in which the dimensions are given. Options are 'mm','m'. The default is 'mm'.
Raises
------
Exception
If units given aren't mm or m raise an Exception.
Returns
-------
None.
'''
he,w = sct.getXHeights()
self.sct = sct
allowableUnits = ['mm','m']
if units not in allowableUnits:
raise Exception("Only mm or m currently allowed as section size units")
elif units == 'm':
self.concreteWidths = np.array(w*1000)
self.concreteHeights = np.array(he*1000)
self.steelHeights = np.array(sct.steel.steelCo[:,0]*1000)
self.barDiameters = np.array(sct.steel.steelDia*1000)
self.steelAreas = 0.25*(np.array(sct.steel.steelDia)*1000)**2*math.pi
self.step = (he[1]-he[0])*1000
self.h = (he[-1]-he[0])*1000
else:
self.concreteWidths = np.array(w)
self.concreteHeights = np.array(he)
self.steelHeights = np.array(sct.steel.steelCo[:,0])
self.barDiameters = np.array(sct.steel.steelDia)
self.steelAreas = 0.25*(np.array(sct.steel.steelDia))**2*math.pi
self.step = (he[1]-he[0])
self.h = (he[-1]-he[0])
def getConcreteForces(self,strains,b,concreteMaterial):
'''
Returns the stress and force in the concrete when given the strains, and the widths. If calling explicitly, make sure units are consistent. Recommended to use SI.
Parameters
----------
strains : array
Array of the strains down the section.
b : array
Array of the widths down the section.
concreteMaterial : material Object
Object holding the concrete material properties.
Returns
-------
stress : array
Returns the stress at each height.
force : array
Returns the forces at each height.
'''
stress = np.where(strains>0,0,np.where(strains<-concreteMaterial.ec3,-concreteMaterial.fcd,concreteMaterial.Ec*strains))
#print(stress)
force = b*stress*self.step
return (stress,force);
def resultantForce(self,topStrain,bottomStrain,concreteMaterial,steelMaterial):
'''
Calculates the force and moment in the section given the strain at the top and bottom. If calling explicitly, make sure units are consistent. Recommended to use SI.
If the axial force is assumed to act at the centroid, section can be relocated using the relevant method in the section object.
Parameters
----------
topStrain : number
Strain at the top of the section
bottomStrain : number
Strain at the bottom of the section.
concreteMaterial : material object
Object holding the concrete material properties.
steelMaterial : material object
Object holding the steel material properties.
Returns
-------
N : float
Axial Force in the section. The axial force acts at height zero.
M : float
Moment in the section. The moment is calculated about height zero.
'''
steelStrains = np.interp(self.steelHeights,[self.concreteHeights[0],self.concreteHeights[-1]],[bottomStrain,topStrain])
#print(steelStrains)
concStrains = np.interp(self.concreteHeights,[self.concreteHeights[0],self.concreteHeights[-1]],[bottomStrain,topStrain])
#print(concStrains)
concForces = self.getConcreteForces(concStrains,self.concreteWidths,concreteMaterial)[1]
#print(concForces)
steelForces = np.where(np.absolute(steelStrains)<steelMaterial.ey,steelStrains*steelMaterial.Es*self.steelAreas,self.steelAreas*(steelMaterial.fyd + ((np.absolute(steelStrains)-steelMaterial.ey)/(steelMaterial.euk-steelMaterial.ey))*(steelMaterial.k-1)*steelMaterial.fyd)*np.sign(steelStrains))
#print(steelForces)
N = np.sum(steelForces,axis=0) + np.sum(concForces,axis=0) #N
M = -np.sum(steelForces*(self.steelHeights)*0.001) + -np.sum(concForces*(self.concreteHeights)*0.001) #Nm
return (N,M);
def strainFinder(self,x,concreteMaterial,steelMaterial,N,M):
'''
Used by the strainSolver routine to solve the strains for a given input N and M.
Parameters
----------
x : list
List holding the initial guess for top and bottom strain. Varied by solver routine to find solution.
concreteMaterial : Material Object
An object holding the concrete material properties
steelMaterial : Material Object
An object holding the steel material properties
N : float
Axial force to solve for.
M : float
Moment to solve for.
Returns
-------
eqN : float
Difference between target axial force and calculated axial force. Aiming for zero with solver.
eqM : float
Difference between target moment and calculated moment. Aiming for zero with solver.
'''
topStrain,bottomStrain = x
if bottomStrain<-0.0035 or topStrain<-0.0035 or bottomStrain>1 or topStrain>1:
eqN=100000000
eqM=100000000
else:
eqN = N - self.resultantForce(topStrain,bottomStrain,concreteMaterial,steelMaterial)[0]
eqM = M - self.resultantForce(topStrain,bottomStrain,concreteMaterial,steelMaterial)[1]
return [eqN,eqM];
def strainSolver(self,N,M,concreteMaterial,steelMaterial,units='Nm'):
'''
Calculates the strain situation for a given axial force and bending moment.
Parameters
----------
N : Number
Input axial force.
M : Number
Input moment.
concreteMaterial : Material Object
An object holding the concrete material properties
steelMaterial : Material Object
An object holding the steel material properties
units : string, optional
The units in which the moment and axial force are given. Options are 'Nm','kNm','MNm'. The default is 'Nm'.
Raises
------
Exception
Exception is raised if a unit which isn't allowed is input.
Returns
-------
(topStrain,bottomStrain) : double
Returns both the strains at the extreme top and bottom of the section.
'''
allowableUnits = ['Nm','kNm','MNm']
if units not in allowableUnits:
raise Exception("Allowable units are 'Nm','kNm','MNm'")
elif (units == 'kNm'):
N = N*10**3
M = M*10**3
elif (units == 'MNm'):
N = N*10**6
M = M*10**6
topStrain,bottomStrain = opt.root(self.strainFinder,[0,0],args=(concreteMaterial,steelMaterial,N,M)).x
return (topStrain,bottomStrain);
def concreteLimitedMomentCapacity(self,M,N,concreteMaterial,steelMaterial):
topStrain,bottomStrain = self.strainSolver(N,M,concreteMaterial,steelMaterial)
if abs(topStrain-bottomStrain) < concreteMaterial.ecu3:
concLimit = -concreteMaterial.ecu2*(1+abs(topStrain-bottomStrain)/concreteMaterial.ecu3)
else:
concLimit = -concreteMaterial.ecu3
concEq = min(topStrain,bottomStrain) - concLimit
return concEq;
def steelLimitedMomentCapacity(self,M,N,concreteMaterial,steelMaterial):
topStrain,bottomStrain = self.strainSolver(N,M,concreteMaterial,steelMaterial)
steelStrains = np.interp(self.steelHeights,[self.concreteHeights[0],self.concreteHeights[-1]],[bottomStrain,topStrain])
steelEq = np.amax(steelStrains) - steelMaterial.eud
return steelEq;
def CapacitySolver(self,concreteMaterial,steelMaterial,Anum=50,Bnum=200,Cnum=50,returnStrains=False,units="Nm"):
'''
This function iterates through the limiting strain states, and caclulates the axial force and moment at each of these states.
Parameters
----------
concreteMaterial : Material Object
An object holding the concrete material properties
steelMaterial : Material Object
An object holding the steel material properties
Anum : integer, optional
The number of steps in the stage between uniform compression, and first extreme tension/limiting compression. The default is 50.
Bnum : integer, optional
The number of steps in the stage between first extreme tension and limiting tension (with limiting compression). Divided exponentially. The default is 200.
Cnum : integer, optional
The number of steps in the stage between extreme bending and uniform tension. The default is 50.
returnStrains : boolean, optional
Whether to return the strains or not. The default is False.
units : string, optional
The units which the axial force and moment should be returned in. Current options are 'Nm','kNm','MNm'. The default is "Nm".
Raises
------
Exception
If an invalid input for units is tried, an exception is raised.
Returns
-------
Lists
Default is to return a list of the axial force and corresponding moment capacity.
If returnStrains is true, the corresponding top and bottom strains are also returned.
'''
tenStrainLimit = (self.h/np.amax(self.steelHeights))*(concreteMaterial.ecu3+steelMaterial.eud) - concreteMaterial.ecu3
topStrainsA = np.linspace(-concreteMaterial.ecu2,-concreteMaterial.ecu3,num=Anum)
topStrainsB = np.tile([-concreteMaterial.ecu3],Bnum)
topStrainsC = np.linspace(-concreteMaterial.ecu3,steelMaterial.eud,Cnum)
topStrainsD = np.linspace(steelMaterial.eud,tenStrainLimit,Cnum)
topStrainsE = np.geomspace(tenStrainLimit,1e-10,Bnum)
topStrainsF = np.linspace(0,-concreteMaterial.ecu2,Anum)
botStrainsA = np.linspace(-concreteMaterial.ecu2,0,num=Anum)
botStrainsB = np.geomspace(1e-10,tenStrainLimit,num=Bnum)
botStrainsC = np.linspace(tenStrainLimit,steelMaterial.eud,Cnum)
botStrainsD = np.linspace(steelMaterial.eud,-concreteMaterial.ecu3,Cnum)
botStrainsE = np.tile([-concreteMaterial.ecu3],Bnum)
botStrainsF = np.linspace(-concreteMaterial.ecu3,-concreteMaterial.ecu2,Anum)
topStrains = np.concatenate([topStrainsA,topStrainsB,topStrainsC,topStrainsD,topStrainsE,topStrainsF])
botStrains = np.concatenate([botStrainsA,botStrainsB,botStrainsC,botStrainsD,botStrainsE,botStrainsF])
N = np.asarray([])
M = np.asarray([])
for a in range(0,2*(Anum+Bnum+Cnum)):
force,moment = self.resultantForce(topStrains[a],botStrains[a],concreteMaterial,steelMaterial)
N = np.append(N,force)
M = np.append(M,moment)
allowableUnits = ['Nm','kNm','MNm']
if units not in allowableUnits:
raise Exception("Allowable units are 'Nm','kNm','MNm'")
if (units == 'kNm'):
N = N/10**3
M = M/10**3
elif (units == 'MNm'):
N = N/10**6
M = M/10**6
if returnStrains:
return N,M,topStrains,botStrains;
else:
return N,M;
def formatSectionPlot(self,ax,xlabel,grid=True):
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
ax.set_ylim(self.concreteHeights[0],self.concreteHeights[-1])
ax.set_yticks(np.arange(self.concreteHeights[0], self.concreteHeights[-1]+self.h/5, step=self.h/5))
if(ax.get_xlim()[1]<0):
ax.set_xlim(right=0)
ax.minorticks_on()
if grid:
ax.grid(grid,'major')
ax.grid(grid,'minor','both',linestyle=':')
ax.set_xlabel(xlabel,fontsize=8,fontweight='bold')
ax.xaxis.set_label_coords(0.5, -0.025)
ax.tick_params(labelsize=8)
return;
def plotStrainStressState(self,N,M,concreteMaterial,steelMaterial,units='Nm'):
'''
Plot the strain and stress blocks in the section for a given axial force and moment.
Parameters
----------
N : TYPE
DESCRIPTION.
M : TYPE
DESCRIPTION.
concreteMaterial : TYPE
DESCRIPTION.
steelMaterial : TYPE
DESCRIPTION.
units : TYPE, optional
DESCRIPTION. The default is 'Nm'.
Raises
------
Exception
DESCRIPTION.
Returns
-------
None.
'''
allowableUnits = ['Nm','kNm','MNm']
if units not in allowableUnits:
raise Exception("Allowable units are 'Nm','kNm','MNm'")
elif (units == 'kNm'):
N = N*10**3
M = M*10**3
elif (units == 'MNm'):
N = N*10**6
M = M*10**6
#Produce a figure to hold the plots
fig = plt.figure(figsize=(10,6))
spec = fig.add_gridspec(ncols=4,nrows=1,height_ratios = [1])
#Plot the section as a graph
ax0 = fig.add_subplot(spec[0,0],adjustable='box',aspect='equal')
self.sct.rotateSection(90).plotSection(ax=ax0)
self.formatSectionPlot(ax0,"",grid=False)
# Plot the strain
topStrain,bottomStrain = self.strainSolver(N,M,concreteMaterial,steelMaterial,units='Nm')
concStrains = np.interp(self.concreteHeights,[self.concreteHeights[0],self.concreteHeights[-1]],[bottomStrain,topStrain])
ax1 = fig.add_subplot(spec[0,1])
plt.plot(concStrains,self.concreteHeights)
self.formatSectionPlot(ax1,"Strain")
# Plot the stresses in the concrete
ax2 = fig.add_subplot(spec[0,2])
concStresses = self.getConcreteForces(concStrains,self.concreteWidths,concreteMaterial)[0]
plt.plot(concStresses,self.concreteHeights)
self.formatSectionPlot(ax2,"Stress (MPa)")
#Calculate the resultant forces from the stresses
steelStrains = np.interp(self.steelHeights,[self.concreteHeights[0],self.concreteHeights[-1]],[bottomStrain,topStrain])
steelForces = np.where(np.absolute(steelStrains)<steelMaterial.ey,steelStrains*steelMaterial.Es*self.steelAreas,self.steelAreas*(steelMaterial.fyd + ((np.absolute(steelStrains)-steelMaterial.ey)/(steelMaterial.euk-steelMaterial.ey))*(steelMaterial.k-1)*steelMaterial.fyd)*np.sign(steelStrains))
concForces = self.getConcreteForces(concStrains,self.concreteWidths,concreteMaterial)[1]
singleConcForce = np.sum(concForces,axis=0)
concCentroid = np.sum(concForces*self.concreteHeights,axis=0)/np.sum(concForces,axis=0)
steelTensionSum = np.sum(np.where(steelForces>0,steelForces,0))
steelCompressionSum = np.sum(np.where(steelForces<0,steelForces,0))
steelCompressionCentroid = np.sum(np.where(steelForces<0,steelForces*self.steelHeights,0))/steelCompressionSum
#Plot the resultant forces
ax3 = fig.add_subplot(spec[0,3])
if(steelTensionSum>0):
steelTensionCentroid = np.sum(np.where(steelForces>0,steelForces*self.steelHeights,0))/steelTensionSum
plt.plot(steelTensionSum*10**-6,steelTensionCentroid,'r.',label = "Steel Tensile Forces",markersize=8)
plt.plot(steelCompressionSum*10**-6,steelCompressionCentroid,'m.',label="Steel Compression Forces",markersize=8)
plt.plot(singleConcForce*10**-6,concCentroid,'gx',label="Concrete Force",markersize=8)
plt.legend(loc='upper left',fontsize=8)
self.formatSectionPlot(ax3,"Forces (MN)")
#Show the plots
plt.show()
return; | [
"numpy.array",
"numpy.arange",
"numpy.where",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.linspace",
"numpy.concatenate",
"numpy.tile",
"numpy.geomspace",
"numpy.sign",
"numpy.interp",
"scipy.optimize.root",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.absolute",
... | [((3792, 3905), 'numpy.interp', 'np.interp', (['self.steelHeights', '[self.concreteHeights[0], self.concreteHeights[-1]]', '[bottomStrain, topStrain]'], {}), '(self.steelHeights, [self.concreteHeights[0], self.concreteHeights\n [-1]], [bottomStrain, topStrain])\n', (3801, 3905), True, 'import numpy as np\n'), ((3950, 4066), 'numpy.interp', 'np.interp', (['self.concreteHeights', '[self.concreteHeights[0], self.concreteHeights[-1]]', '[bottomStrain, topStrain]'], {}), '(self.concreteHeights, [self.concreteHeights[0], self.\n concreteHeights[-1]], [bottomStrain, topStrain])\n', (3959, 4066), True, 'import numpy as np\n'), ((8282, 8395), 'numpy.interp', 'np.interp', (['self.steelHeights', '[self.concreteHeights[0], self.concreteHeights[-1]]', '[bottomStrain, topStrain]'], {}), '(self.steelHeights, [self.concreteHeights[0], self.concreteHeights\n [-1]], [bottomStrain, topStrain])\n', (8291, 8395), True, 'import numpy as np\n'), ((10352, 10421), 'numpy.linspace', 'np.linspace', (['(-concreteMaterial.ecu2)', '(-concreteMaterial.ecu3)'], {'num': 'Anum'}), '(-concreteMaterial.ecu2, -concreteMaterial.ecu3, num=Anum)\n', (10363, 10421), True, 'import numpy as np\n'), ((10443, 10482), 'numpy.tile', 'np.tile', (['[-concreteMaterial.ecu3]', 'Bnum'], {}), '([-concreteMaterial.ecu3], Bnum)\n', (10450, 10482), True, 'import numpy as np\n'), ((10505, 10565), 'numpy.linspace', 'np.linspace', (['(-concreteMaterial.ecu3)', 'steelMaterial.eud', 'Cnum'], {}), '(-concreteMaterial.ecu3, steelMaterial.eud, Cnum)\n', (10516, 10565), True, 'import numpy as np\n'), ((10587, 10639), 'numpy.linspace', 'np.linspace', (['steelMaterial.eud', 'tenStrainLimit', 'Cnum'], {}), '(steelMaterial.eud, tenStrainLimit, Cnum)\n', (10598, 10639), True, 'import numpy as np\n'), ((10661, 10702), 'numpy.geomspace', 'np.geomspace', (['tenStrainLimit', '(1e-10)', 'Bnum'], {}), '(tenStrainLimit, 1e-10, Bnum)\n', (10673, 10702), True, 'import numpy as np\n'), ((10724, 10768), 'numpy.linspace', 'np.linspace', (['(0)', '(-concreteMaterial.ecu2)', 'Anum'], {}), '(0, -concreteMaterial.ecu2, Anum)\n', (10735, 10768), True, 'import numpy as np\n'), ((10792, 10840), 'numpy.linspace', 'np.linspace', (['(-concreteMaterial.ecu2)', '(0)'], {'num': 'Anum'}), '(-concreteMaterial.ecu2, 0, num=Anum)\n', (10803, 10840), True, 'import numpy as np\n'), ((10862, 10907), 'numpy.geomspace', 'np.geomspace', (['(1e-10)', 'tenStrainLimit'], {'num': 'Bnum'}), '(1e-10, tenStrainLimit, num=Bnum)\n', (10874, 10907), True, 'import numpy as np\n'), ((10929, 10981), 'numpy.linspace', 'np.linspace', (['tenStrainLimit', 'steelMaterial.eud', 'Cnum'], {}), '(tenStrainLimit, steelMaterial.eud, Cnum)\n', (10940, 10981), True, 'import numpy as np\n'), ((11003, 11063), 'numpy.linspace', 'np.linspace', (['steelMaterial.eud', '(-concreteMaterial.ecu3)', 'Cnum'], {}), '(steelMaterial.eud, -concreteMaterial.ecu3, Cnum)\n', (11014, 11063), True, 'import numpy as np\n'), ((11085, 11124), 'numpy.tile', 'np.tile', (['[-concreteMaterial.ecu3]', 'Bnum'], {}), '([-concreteMaterial.ecu3], Bnum)\n', (11092, 11124), True, 'import numpy as np\n'), ((11147, 11212), 'numpy.linspace', 'np.linspace', (['(-concreteMaterial.ecu3)', '(-concreteMaterial.ecu2)', 'Anum'], {}), '(-concreteMaterial.ecu3, -concreteMaterial.ecu2, Anum)\n', (11158, 11212), True, 'import numpy as np\n'), ((11235, 11333), 'numpy.concatenate', 'np.concatenate', (['[topStrainsA, topStrainsB, topStrainsC, topStrainsD, topStrainsE, topStrainsF]'], {}), '([topStrainsA, topStrainsB, topStrainsC, topStrainsD,\n topStrainsE, topStrainsF])\n', (11249, 11333), True, 'import numpy as np\n'), ((11347, 11445), 'numpy.concatenate', 'np.concatenate', (['[botStrainsA, botStrainsB, botStrainsC, botStrainsD, botStrainsE, botStrainsF]'], {}), '([botStrainsA, botStrainsB, botStrainsC, botStrainsD,\n botStrainsE, botStrainsF])\n', (11361, 11445), True, 'import numpy as np\n'), ((11452, 11466), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (11462, 11466), True, 'import numpy as np\n'), ((11480, 11494), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (11490, 11494), True, 'import numpy as np\n'), ((14004, 14031), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (14014, 14031), True, 'import matplotlib.pyplot as plt\n'), ((14490, 14606), 'numpy.interp', 'np.interp', (['self.concreteHeights', '[self.concreteHeights[0], self.concreteHeights[-1]]', '[bottomStrain, topStrain]'], {}), '(self.concreteHeights, [self.concreteHeights[0], self.\n concreteHeights[-1]], [bottomStrain, topStrain])\n', (14499, 14606), True, 'import numpy as np\n'), ((14650, 14693), 'matplotlib.pyplot.plot', 'plt.plot', (['concStrains', 'self.concreteHeights'], {}), '(concStrains, self.concreteHeights)\n', (14658, 14693), True, 'import matplotlib.pyplot as plt\n'), ((14945, 14989), 'matplotlib.pyplot.plot', 'plt.plot', (['concStresses', 'self.concreteHeights'], {}), '(concStresses, self.concreteHeights)\n', (14953, 14989), True, 'import matplotlib.pyplot as plt\n'), ((15135, 15248), 'numpy.interp', 'np.interp', (['self.steelHeights', '[self.concreteHeights[0], self.concreteHeights[-1]]', '[bottomStrain, topStrain]'], {}), '(self.steelHeights, [self.concreteHeights[0], self.concreteHeights\n [-1]], [bottomStrain, topStrain])\n', (15144, 15248), True, 'import numpy as np\n'), ((15669, 15695), 'numpy.sum', 'np.sum', (['concForces'], {'axis': '(0)'}), '(concForces, axis=0)\n', (15675, 15695), True, 'import numpy as np\n'), ((16413, 16537), 'matplotlib.pyplot.plot', 'plt.plot', (['(steelCompressionSum * 10 ** -6)', 'steelCompressionCentroid', '"""m."""'], {'label': '"""Steel Compression Forces"""', 'markersize': '(8)'}), "(steelCompressionSum * 10 ** -6, steelCompressionCentroid, 'm.',\n label='Steel Compression Forces', markersize=8)\n", (16421, 16537), True, 'import matplotlib.pyplot as plt\n'), ((16535, 16634), 'matplotlib.pyplot.plot', 'plt.plot', (['(singleConcForce * 10 ** -6)', 'concCentroid', '"""gx"""'], {'label': '"""Concrete Force"""', 'markersize': '(8)'}), "(singleConcForce * 10 ** -6, concCentroid, 'gx', label=\n 'Concrete Force', markersize=8)\n", (16543, 16634), True, 'import matplotlib.pyplot as plt\n'), ((16631, 16671), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fontsize': '(8)'}), "(loc='upper left', fontsize=8)\n", (16641, 16671), True, 'import matplotlib.pyplot as plt\n'), ((16766, 16776), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16774, 16776), True, 'import matplotlib.pyplot as plt\n'), ((2518, 2618), 'numpy.where', 'np.where', (['(strains < -concreteMaterial.ec3)', '(-concreteMaterial.fcd)', '(concreteMaterial.Ec * strains)'], {}), '(strains < -concreteMaterial.ec3, -concreteMaterial.fcd, \n concreteMaterial.Ec * strains)\n', (2526, 2618), True, 'import numpy as np\n'), ((4559, 4586), 'numpy.sum', 'np.sum', (['steelForces'], {'axis': '(0)'}), '(steelForces, axis=0)\n', (4565, 4586), True, 'import numpy as np\n'), ((4588, 4614), 'numpy.sum', 'np.sum', (['concForces'], {'axis': '(0)'}), '(concForces, axis=0)\n', (4594, 4614), True, 'import numpy as np\n'), ((7487, 7572), 'scipy.optimize.root', 'opt.root', (['self.strainFinder', '[0, 0]'], {'args': '(concreteMaterial, steelMaterial, N, M)'}), '(self.strainFinder, [0, 0], args=(concreteMaterial, steelMaterial,\n N, M))\n', (7495, 7572), True, 'import scipy.optimize as opt\n'), ((8406, 8427), 'numpy.amax', 'np.amax', (['steelStrains'], {}), '(steelStrains)\n', (8413, 8427), True, 'import numpy as np\n'), ((11669, 11688), 'numpy.append', 'np.append', (['N', 'force'], {}), '(N, force)\n', (11678, 11688), True, 'import numpy as np\n'), ((11705, 11725), 'numpy.append', 'np.append', (['M', 'moment'], {}), '(M, moment)\n', (11714, 11725), True, 'import numpy as np\n'), ((12498, 12592), 'numpy.arange', 'np.arange', (['self.concreteHeights[0]', '(self.concreteHeights[-1] + self.h / 5)'], {'step': '(self.h / 5)'}), '(self.concreteHeights[0], self.concreteHeights[-1] + self.h / 5,\n step=self.h / 5)\n', (12507, 12592), True, 'import numpy as np\n'), ((15719, 15768), 'numpy.sum', 'np.sum', (['(concForces * self.concreteHeights)'], {'axis': '(0)'}), '(concForces * self.concreteHeights, axis=0)\n', (15725, 15768), True, 'import numpy as np\n'), ((15766, 15792), 'numpy.sum', 'np.sum', (['concForces'], {'axis': '(0)'}), '(concForces, axis=0)\n', (15772, 15792), True, 'import numpy as np\n'), ((15826, 15867), 'numpy.where', 'np.where', (['(steelForces > 0)', 'steelForces', '(0)'], {}), '(steelForces > 0, steelForces, 0)\n', (15834, 15867), True, 'import numpy as np\n'), ((15903, 15944), 'numpy.where', 'np.where', (['(steelForces < 0)', 'steelForces', '(0)'], {}), '(steelForces < 0, steelForces, 0)\n', (15911, 15944), True, 'import numpy as np\n'), ((16301, 16414), 'matplotlib.pyplot.plot', 'plt.plot', (['(steelTensionSum * 10 ** -6)', 'steelTensionCentroid', '"""r."""'], {'label': '"""Steel Tensile Forces"""', 'markersize': '(8)'}), "(steelTensionSum * 10 ** -6, steelTensionCentroid, 'r.', label=\n 'Steel Tensile Forces', markersize=8)\n", (16309, 16414), True, 'import matplotlib.pyplot as plt\n'), ((967, 985), 'numpy.array', 'np.array', (['(w * 1000)'], {}), '(w * 1000)\n', (975, 985), True, 'import numpy as np\n'), ((1020, 1039), 'numpy.array', 'np.array', (['(he * 1000)'], {}), '(he * 1000)\n', (1028, 1039), True, 'import numpy as np\n'), ((1071, 1111), 'numpy.array', 'np.array', (['(sct.steel.steelCo[:, 0] * 1000)'], {}), '(sct.steel.steelCo[:, 0] * 1000)\n', (1079, 1111), True, 'import numpy as np\n'), ((1142, 1177), 'numpy.array', 'np.array', (['(sct.steel.steelDia * 1000)'], {}), '(sct.steel.steelDia * 1000)\n', (1150, 1177), True, 'import numpy as np\n'), ((1395, 1406), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (1403, 1406), True, 'import numpy as np\n'), ((1443, 1455), 'numpy.array', 'np.array', (['he'], {}), '(he)\n', (1451, 1455), True, 'import numpy as np\n'), ((1489, 1522), 'numpy.array', 'np.array', (['sct.steel.steelCo[:, 0]'], {}), '(sct.steel.steelCo[:, 0])\n', (1497, 1522), True, 'import numpy as np\n'), ((1555, 1583), 'numpy.array', 'np.array', (['sct.steel.steelDia'], {}), '(sct.steel.steelDia)\n', (1563, 1583), True, 'import numpy as np\n'), ((4245, 4270), 'numpy.absolute', 'np.absolute', (['steelStrains'], {}), '(steelStrains)\n', (4256, 4270), True, 'import numpy as np\n'), ((4494, 4515), 'numpy.sign', 'np.sign', (['steelStrains'], {}), '(steelStrains)\n', (4501, 4515), True, 'import numpy as np\n'), ((4631, 4678), 'numpy.sum', 'np.sum', (['(steelForces * self.steelHeights * 0.001)'], {}), '(steelForces * self.steelHeights * 0.001)\n', (4637, 4678), True, 'import numpy as np\n'), ((4680, 4729), 'numpy.sum', 'np.sum', (['(concForces * self.concreteHeights * 0.001)'], {}), '(concForces * self.concreteHeights * 0.001)\n', (4686, 4729), True, 'import numpy as np\n'), ((15272, 15297), 'numpy.absolute', 'np.absolute', (['steelStrains'], {}), '(steelStrains)\n', (15283, 15297), True, 'import numpy as np\n'), ((15521, 15542), 'numpy.sign', 'np.sign', (['steelStrains'], {}), '(steelStrains)\n', (15528, 15542), True, 'import numpy as np\n'), ((15985, 16046), 'numpy.where', 'np.where', (['(steelForces < 0)', '(steelForces * self.steelHeights)', '(0)'], {}), '(steelForces < 0, steelForces * self.steelHeights, 0)\n', (15993, 16046), True, 'import numpy as np\n'), ((10233, 10259), 'numpy.amax', 'np.amax', (['self.steelHeights'], {}), '(self.steelHeights)\n', (10240, 10259), True, 'import numpy as np\n'), ((16215, 16276), 'numpy.where', 'np.where', (['(steelForces > 0)', '(steelForces * self.steelHeights)', '(0)'], {}), '(steelForces > 0, steelForces * self.steelHeights, 0)\n', (16223, 16276), True, 'import numpy as np\n'), ((1621, 1649), 'numpy.array', 'np.array', (['sct.steel.steelDia'], {}), '(sct.steel.steelDia)\n', (1629, 1649), True, 'import numpy as np\n'), ((1213, 1241), 'numpy.array', 'np.array', (['sct.steel.steelDia'], {}), '(sct.steel.steelDia)\n', (1221, 1241), True, 'import numpy as np\n'), ((4373, 4398), 'numpy.absolute', 'np.absolute', (['steelStrains'], {}), '(steelStrains)\n', (4384, 4398), True, 'import numpy as np\n'), ((15400, 15425), 'numpy.absolute', 'np.absolute', (['steelStrains'], {}), '(steelStrains)\n', (15411, 15425), True, 'import numpy as np\n')] |
"""
Detection of trends: Given prices of X previous hours, will price increase or decrease Y% amount within next Z hours?
(1) X = previous prices
(2) y = next prices
(3) X_norm = X \ X[0] - 1 # Normalize X
(4) y_norm = y \ X[0] - 1 # Normalize y
(5) Given X_norm, which decision should we take if we know y_norm ?
(5.1) Look at last few prices e.g. average prices of last hour X_norm[-12:].mean()
(5.2) x_decision_point = X_norm[-12:].mean()
(5.3) y_decision_point = y.mean()
(6) if y_decision_point > (x_decision_point * 1.02) and y_decision_point > 0 and x_decision_point > 0
We should buy now because prices will increase
(7) if y_decision_point =< (x_decision_point * .98) and y_decision_point > 0 and x_decision_point > 0
We should sell now because prices decreasing
(8) OTHERWISE HOLD
"""
import coinpy as cp
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib.animation import FuncAnimation
# Fixing the random seeds.
torch.backends.cudnn.deterministic = True
seed = 1
np.random.seed(seed)
torch.manual_seed(seed)
# Problem Definition
# INPUT: Previously seen prices
length_prev_seq = 12 * 24 * 7 # 7 DAYS: 12 x 5minutes => 1 hour
# OUTPUT: Next seen prices: Create label in the next seen data
batch_size = 2048
num_epoch = 1
print(f'Input is {length_prev_seq / (12 * 24)} day price')
def decision_evaluator(title: str, seq_of_decisions: list):
budget = 0
num_buy, num_sell = 0, 0
print(f'Number of decision {len(seq_of_decisions)}')
for dec, price in seq_of_decisions:
if dec == 'BUY':
budget -= price[0]
num_buy += 1
elif dec == 'SELL':
budget += price[0]
num_sell += 1
else:
raise ValueError()
print(f'{title} on {coin_name} current budget:{budget} after Number of Buy:{num_buy}\t Number of Sell:{num_sell}')
def trade_simulator(model, coin_name, d, length_prev_seq):
decisions = []
plt.plot(d, alpha=.5)
plt.show()
with torch.no_grad():
for k, i in enumerate(range(len(d) - length_prev_seq)):
prev_seq = d[i:(i + length_prev_seq)]
x_input = torch.from_numpy(prev_seq).float().flatten()
y_hat = model(x_input / x_input[0] - 1)
decision = y_hat.argmax()
# BUY, SELL or HOLD
if decision == 0:
# if buy_flag is True:
decisions.append(['BUY', prev_seq[-1]]) # ALLOWED TO BUY
# buy_flag = False
plt.scatter(i + length_prev_seq, prev_seq[-1], c='g', marker='^')
elif decision == 1:
# if buy_flag is False:
decisions.append(['SELL', prev_seq[-1]]) # ALLOWED TO SELL
# plt.scatter(i + length_prev_seq, prev_seq[-1], c='r', marker='v')
else:
pass
plt.title(f'Buy:Green, Sell:Red on {coin_name}')
plt.show()
print('Testing completed')
decision_evaluator('Linear Trader', decisions)
class LinearTrader(torch.nn.Module):
def __init__(self, input_dim, output_dim=3):
super().__init__()
self.af1 = nn.Linear(input_dim, output_dim)
def forward(self, seq_input):
return self.af1(seq_input)
@staticmethod
def event_definition(seen_price, next_price):
assert len(seen_price) > 0
assert len(next_price) > 0
seen_price = seen_price.flatten()
next_price = next_price.flatten()
# (1) Normalize seen and next prices with oldest price
norm_seen_price = seen_price / seen_price[0] - 1
norm_next_price = next_price / seen_price[0] - 1
# (2) Consider only the average of last 1 hour prices for labelling
x_decision_point = norm_seen_price[-12:].mean()
# (5) Compute Target, mean price of next hour
y_decision_point = norm_next_price.mean()
label = np.zeros(3)
# Prices go up and positive and future is brighter
if y_decision_point >= x_decision_point * 1.02 and (y_decision_point > 0) and (x_decision_point > 0):
label[0] = 1 # BUY
# Prices go down and positive and future is not brighter
elif y_decision_point <= x_decision_point * .98 and (y_decision_point > 0) and (x_decision_point > 0):
label[1] = 1 # Sell
"""
# Prices are negative. and negative and and future is brighter
elif y_decision_point >= x_decision_point * .98 and (y_decision_point < 0) and (x_decision_point < 0):
label[0] = 1 # BUY
# Prices are negative. and negative and and future is brighter
elif y_decision_point <= x_decision_point * 1.02 and (y_decision_point < 0) and (x_decision_point < 0):
label[1] = 1 # Sell
"""
else:
label[2] = 1 # WAIT
return seen_price, label
for coin_name in ['BTC', 'ADA', 'ETH', 'SOL']:
# (1) Load dataframes holding low, high, open, close and volume of coins in 5 minutes interval
dfs = cp.DataFramesHolder(path='../Data')
# (2) Drop dataframes having length of less than 60 days
dfs.drop_data_frames(key=lambda x: len(x) < 12 * 24 * 60)
# (3) Create a feature based on the average value of open and close prices
dfs.create_feature(name='price', key=lambda df: (df['open'] + df['close']) / 2)
# (4) Select only price feature
dfs.select_col(['price'])
df = dfs[coin_name]
n_time_stamp, n_coins = df.shape
input_dim = int(n_coins * length_prev_seq)
model = LinearTrader(input_dim=input_dim, output_dim=1)
dataset = cp.PredictPriceDataset(df, seq_length=length_prev_seq)
train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True,drop_last=True)
optimizer = torch.optim.Adam(model.parameters())
loss_fn = nn.L1Loss()
print(f'\nTRAIN a classifier on {coin_name}')
for i in range(num_epoch):
losses = []
for x, y in train_dataloader:
optimizer.zero_grad()
x = x.reshape(batch_size, input_dim)
y = y.reshape(batch_size, 1)
y_hat = model.forward(x)
# Compute prediction error
loss = loss_fn(y_hat, y)
losses.append(loss.item())
# Backpropagation
loss.backward()
optimizer.step()
losses = np.array(losses)
# if i % 25 == 0:
print(f'{i}.th Epoch\t Avg. Loss:{losses.mean():.3f}\tStd. Loss:{losses.std():.3f}')
# Important to remember index of coins.
torch.save(model.state_dict(), f'{coin_name}_model_weights.pth')
model.eval()
print('Test Training on ***TRAINING DATA***')
trade_simulator(model, coin_name, df.values[:10_000], length_prev_seq)
break
| [
"torch.manual_seed",
"coinpy.PredictPriceDataset",
"torch.nn.L1Loss",
"matplotlib.pyplot.plot",
"torch.from_numpy",
"torch.no_grad",
"coinpy.DataFramesHolder",
"numpy.zeros",
"numpy.array",
"numpy.random.seed",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.scatter",
... | [((1123, 1143), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1137, 1143), True, 'import numpy as np\n'), ((1144, 1167), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1161, 1167), False, 'import torch\n'), ((2059, 2081), 'matplotlib.pyplot.plot', 'plt.plot', (['d'], {'alpha': '(0.5)'}), '(d, alpha=0.5)\n', (2067, 2081), True, 'import matplotlib.pyplot as plt\n'), ((2085, 2095), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2093, 2095), True, 'import matplotlib.pyplot as plt\n'), ((2962, 3010), 'matplotlib.pyplot.title', 'plt.title', (['f"""Buy:Green, Sell:Red on {coin_name}"""'], {}), "(f'Buy:Green, Sell:Red on {coin_name}')\n", (2971, 3010), True, 'import matplotlib.pyplot as plt\n'), ((3015, 3025), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3023, 3025), True, 'import matplotlib.pyplot as plt\n'), ((5148, 5183), 'coinpy.DataFramesHolder', 'cp.DataFramesHolder', ([], {'path': '"""../Data"""'}), "(path='../Data')\n", (5167, 5183), True, 'import coinpy as cp\n'), ((5720, 5774), 'coinpy.PredictPriceDataset', 'cp.PredictPriceDataset', (['df'], {'seq_length': 'length_prev_seq'}), '(df, seq_length=length_prev_seq)\n', (5742, 5774), True, 'import coinpy as cp\n'), ((5799, 5892), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(dataset, batch_size=batch_size, shuffle=True,\n drop_last=True)\n', (5826, 5892), False, 'import torch\n'), ((5956, 5967), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (5965, 5967), True, 'import torch.nn as nn\n'), ((2105, 2120), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2118, 2120), False, 'import torch\n'), ((3242, 3274), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'output_dim'], {}), '(input_dim, output_dim)\n', (3251, 3274), True, 'import torch.nn as nn\n'), ((4002, 4013), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4010, 4013), True, 'import numpy as np\n'), ((6490, 6506), 'numpy.array', 'np.array', (['losses'], {}), '(losses)\n', (6498, 6506), True, 'import numpy as np\n'), ((2621, 2686), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(i + length_prev_seq)', 'prev_seq[-1]'], {'c': '"""g"""', 'marker': '"""^"""'}), "(i + length_prev_seq, prev_seq[-1], c='g', marker='^')\n", (2632, 2686), True, 'import matplotlib.pyplot as plt\n'), ((2259, 2285), 'torch.from_numpy', 'torch.from_numpy', (['prev_seq'], {}), '(prev_seq)\n', (2275, 2285), False, 'import torch\n')] |
import cvlog as log
import cv2
import numpy as np
from .utils import read_file, remove_dirs, get_html
def test_log_image():
remove_dirs('log/')
img = cv2.imread("tests/data/orange.png")
log.set_mode(log.Mode.LOG)
log.image(log.Level.ERROR, img)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'image'
assert logitem[0]['logdata'] == read_file('tests/data/expected/image.txt')
def test_log_edges():
remove_dirs('log/')
img = cv2.imread('tests/data/sudoku.png')
log.set_mode(log.Mode.LOG)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
log.edges(log.Level.ERROR, edges)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'edges'
assert logitem[0]['logdata'] == read_file('tests/data/expected/edges.txt')
def test_log_threshold():
remove_dirs('log/')
img = cv2.imread('tests/data/board.jpg')
log.set_mode(log.Mode.LOG)
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
log.threshold(log.Level.ERROR, thresh)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'threshold'
assert logitem[0]['logdata'] == read_file('tests/data/expected/thershold.txt')
def test_log_hough_lines():
remove_dirs('log/')
img = cv2.imread('tests/data/sudoku.png')
log.set_mode(log.Mode.LOG)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi / 180, 200)
log.hough_lines(log.Level.ERROR, lines, img)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'hough lines'
assert logitem[0]['logdata'] == read_file('tests/data/expected/houghline_img.txt')
def test_log_hough_circles():
remove_dirs('log/')
img = cv2.imread('tests/data/board.jpg')
log.set_mode(log.Mode.LOG)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray, 5)
circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)
log.hough_circles(log.Level.ERROR, circles, img)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'hough circles'
assert logitem[0]['logdata'] == read_file('tests/data/expected/houghcircle_img.txt')
def test_contours():
remove_dirs('log/')
img = cv2.imread('tests/data/contour.jpg')
log.set_mode(log.Mode.LOG)
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
log.contours(log.Level.ERROR, contours, img)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'contours'
assert logitem[0]['logdata'] == read_file('tests/data/expected/contour.txt')
def test_keypoints():
remove_dirs('log/')
img = cv2.imread('tests/data/orange.png')
log.set_mode(log.Mode.LOG)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
orb = cv2.ORB_create()
kp, _ = orb.detectAndCompute(gray_img, None)
log.keypoints(log.Level.ERROR, kp, img, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.log-type')[0].text == 'key points'
print(logitem[0]['logdata'])
# assert log_item[0]['logdata'] == read_file('tests/data/expected/keypoints.txt') #TODO Fix circle ci issue
def test_message():
remove_dirs('log/')
img = cv2.imread('tests/data/contour.jpg')
log.set_mode(log.Mode.LOG)
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
message = 'Lorem ipsum dolor sit amet, ne persius reprehendunt mei. Ea summo elitr munere his, et consul offendit recteque sea, quis elit nam ut.'
log.image(log.Level.ERROR, img)
log.contours(log.Level.ERROR, contours, img, msg=message)
logitem = get_html('log/cvlog.html').select('.log-list .log-item')
assert logitem[0].select('.description') == []
assert logitem[1].select('.description')[0].text == message
| [
"cv2.threshold",
"cv2.medianBlur",
"cvlog.hough_circles",
"cvlog.threshold",
"cvlog.hough_lines",
"numpy.array",
"cvlog.image",
"cv2.HoughLines",
"cv2.ORB_create",
"cv2.cvtColor",
"cvlog.keypoints",
"cv2.findContours",
"cvlog.set_mode",
"cv2.Canny",
"cv2.imread",
"cvlog.contours",
"c... | [((159, 194), 'cv2.imread', 'cv2.imread', (['"""tests/data/orange.png"""'], {}), "('tests/data/orange.png')\n", (169, 194), False, 'import cv2\n'), ((199, 225), 'cvlog.set_mode', 'log.set_mode', (['log.Mode.LOG'], {}), '(log.Mode.LOG)\n', (211, 225), True, 'import cvlog as log\n'), ((230, 261), 'cvlog.image', 'log.image', (['log.Level.ERROR', 'img'], {}), '(log.Level.ERROR, img)\n', (239, 261), True, 'import cvlog as log\n'), ((530, 565), 'cv2.imread', 'cv2.imread', (['"""tests/data/sudoku.png"""'], {}), "('tests/data/sudoku.png')\n", (540, 565), False, 'import cv2\n'), ((570, 596), 'cvlog.set_mode', 'log.set_mode', (['log.Mode.LOG'], {}), '(log.Mode.LOG)\n', (582, 596), True, 'import cvlog as log\n'), ((608, 645), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (620, 645), False, 'import cv2\n'), ((658, 698), 'cv2.Canny', 'cv2.Canny', (['gray', '(50)', '(150)'], {'apertureSize': '(3)'}), '(gray, 50, 150, apertureSize=3)\n', (667, 698), False, 'import cv2\n'), ((703, 736), 'cvlog.edges', 'log.edges', (['log.Level.ERROR', 'edges'], {}), '(log.Level.ERROR, edges)\n', (712, 736), True, 'import cvlog as log\n'), ((1009, 1043), 'cv2.imread', 'cv2.imread', (['"""tests/data/board.jpg"""'], {}), "('tests/data/board.jpg')\n", (1019, 1043), False, 'import cv2\n'), ((1048, 1074), 'cvlog.set_mode', 'log.set_mode', (['log.Mode.LOG'], {}), '(log.Mode.LOG)\n', (1060, 1074), True, 'import cvlog as log\n'), ((1088, 1125), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1100, 1125), False, 'import cv2\n'), ((1144, 1178), 'cv2.threshold', 'cv2.threshold', (['imgray', '(127)', '(255)', '(0)'], {}), '(imgray, 127, 255, 0)\n', (1157, 1178), False, 'import cv2\n'), ((1183, 1221), 'cvlog.threshold', 'log.threshold', (['log.Level.ERROR', 'thresh'], {}), '(log.Level.ERROR, thresh)\n', (1196, 1221), True, 'import cvlog as log\n'), ((1504, 1539), 'cv2.imread', 'cv2.imread', (['"""tests/data/sudoku.png"""'], {}), "('tests/data/sudoku.png')\n", (1514, 1539), False, 'import cv2\n'), ((1544, 1570), 'cvlog.set_mode', 'log.set_mode', (['log.Mode.LOG'], {}), '(log.Mode.LOG)\n', (1556, 1570), True, 'import cvlog as log\n'), ((1582, 1619), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1594, 1619), False, 'import cv2\n'), ((1632, 1672), 'cv2.Canny', 'cv2.Canny', (['gray', '(50)', '(150)'], {'apertureSize': '(3)'}), '(gray, 50, 150, apertureSize=3)\n', (1641, 1672), False, 'import cv2\n'), ((1685, 1727), 'cv2.HoughLines', 'cv2.HoughLines', (['edges', '(1)', '(np.pi / 180)', '(200)'], {}), '(edges, 1, np.pi / 180, 200)\n', (1699, 1727), False, 'import cv2\n'), ((1733, 1777), 'cvlog.hough_lines', 'log.hough_lines', (['log.Level.ERROR', 'lines', 'img'], {}), '(log.Level.ERROR, lines, img)\n', (1748, 1777), True, 'import cvlog as log\n'), ((2068, 2102), 'cv2.imread', 'cv2.imread', (['"""tests/data/board.jpg"""'], {}), "('tests/data/board.jpg')\n", (2078, 2102), False, 'import cv2\n'), ((2107, 2133), 'cvlog.set_mode', 'log.set_mode', (['log.Mode.LOG'], {}), '(log.Mode.LOG)\n', (2119, 2133), True, 'import cvlog as log\n'), ((2145, 2182), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2157, 2182), False, 'import cv2\n'), ((2194, 2217), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', '(5)'], {}), '(gray, 5)\n', (2208, 2217), False, 'import cv2\n'), ((2317, 2365), 'cvlog.hough_circles', 'log.hough_circles', (['log.Level.ERROR', 'circles', 'img'], {}), '(log.Level.ERROR, circles, img)\n', (2334, 2365), True, 'import cvlog as log\n'), ((2651, 2687), 'cv2.imread', 'cv2.imread', (['"""tests/data/contour.jpg"""'], {}), "('tests/data/contour.jpg')\n", (2661, 2687), False, 'import cv2\n'), ((2692, 2718), 'cvlog.set_mode', 'log.set_mode', (['log.Mode.LOG'], {}), '(log.Mode.LOG)\n', (2704, 2718), True, 'import cvlog as log\n'), ((2732, 2769), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2744, 2769), False, 'import cv2\n'), ((2788, 2822), 'cv2.threshold', 'cv2.threshold', (['imgray', '(127)', '(255)', '(0)'], {}), '(imgray, 127, 255, 0)\n', (2801, 2822), False, 'import cv2\n'), ((2849, 2913), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (2865, 2913), False, 'import cv2\n'), ((2918, 2962), 'cvlog.contours', 'log.contours', (['log.Level.ERROR', 'contours', 'img'], {}), '(log.Level.ERROR, contours, img)\n', (2930, 2962), True, 'import cvlog as log\n'), ((3236, 3271), 'cv2.imread', 'cv2.imread', (['"""tests/data/orange.png"""'], {}), "('tests/data/orange.png')\n", (3246, 3271), False, 'import cv2\n'), ((3276, 3302), 'cvlog.set_mode', 'log.set_mode', (['log.Mode.LOG'], {}), '(log.Mode.LOG)\n', (3288, 3302), True, 'import cvlog as log\n'), ((3318, 3355), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (3330, 3355), False, 'import cv2\n'), ((3366, 3382), 'cv2.ORB_create', 'cv2.ORB_create', ([], {}), '()\n', (3380, 3382), False, 'import cv2\n'), ((3436, 3530), 'cvlog.keypoints', 'log.keypoints', (['log.Level.ERROR', 'kp', 'img'], {'flags': 'cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS'}), '(log.Level.ERROR, kp, img, flags=cv2.\n DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n', (3449, 3530), True, 'import cvlog as log\n'), ((3863, 3899), 'cv2.imread', 'cv2.imread', (['"""tests/data/contour.jpg"""'], {}), "('tests/data/contour.jpg')\n", (3873, 3899), False, 'import cv2\n'), ((3904, 3930), 'cvlog.set_mode', 'log.set_mode', (['log.Mode.LOG'], {}), '(log.Mode.LOG)\n', (3916, 3930), True, 'import cvlog as log\n'), ((3944, 3981), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (3956, 3981), False, 'import cv2\n'), ((4000, 4034), 'cv2.threshold', 'cv2.threshold', (['imgray', '(127)', '(255)', '(0)'], {}), '(imgray, 127, 255, 0)\n', (4013, 4034), False, 'import cv2\n'), ((4061, 4125), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (4077, 4125), False, 'import cv2\n'), ((4281, 4312), 'cvlog.image', 'log.image', (['log.Level.ERROR', 'img'], {}), '(log.Level.ERROR, img)\n', (4290, 4312), True, 'import cvlog as log\n'), ((4317, 4374), 'cvlog.contours', 'log.contours', (['log.Level.ERROR', 'contours', 'img'], {'msg': 'message'}), '(log.Level.ERROR, contours, img, msg=message)\n', (4329, 4374), True, 'import cvlog as log\n'), ((2282, 2294), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2290, 2294), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
##############export checkpoint file into air, mindir models#################
python export.py
"""
import os
import numpy as np
import mindspore as ms
from mindspore import Tensor, export, context
from src.utils import init_net
from model_utils.config import config
from model_utils.device_adapter import get_device_id
from model_utils.moxing_adapter import moxing_wrapper
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
if config.device_target == "Ascend":
context.set_context(device_id=get_device_id())
MAX_HR_SIZE = 2040
@moxing_wrapper()
def run_export():
"""
run export
"""
print(config)
cfg = config
if cfg.pre_trained is None:
raise RuntimeError('config.pre_trained is None.')
net = init_net(cfg)
max_lr_size = MAX_HR_SIZE // cfg.scale
input_arr = Tensor(np.ones([1, cfg.n_colors, max_lr_size, max_lr_size]), ms.float32)
file_name = os.path.splitext(os.path.basename(cfg.pre_trained))[0]
file_name = file_name + f"_InputSize{max_lr_size}"
file_path = os.path.join(cfg.output_path, file_name)
file_format = 'MINDIR'
num_params = sum([param.size for param in net.parameters_dict().values()])
export(net, input_arr, file_name=file_path, file_format=file_format)
print(f"export success", flush=True)
print(f"{cfg.pre_trained} -> {file_path}.{file_format.lower()}, net parameters = {num_params/1000000:>0.4}M",
flush=True)
if __name__ == '__main__':
run_export()
| [
"model_utils.moxing_adapter.moxing_wrapper",
"model_utils.device_adapter.get_device_id",
"numpy.ones",
"mindspore.export",
"mindspore.context.set_context",
"os.path.join",
"src.utils.init_net",
"os.path.basename"
] | [((1048, 1133), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': 'config.device_target'}), '(mode=context.GRAPH_MODE, device_target=config.device_target\n )\n', (1067, 1133), False, 'from mindspore import Tensor, export, context\n'), ((1240, 1256), 'model_utils.moxing_adapter.moxing_wrapper', 'moxing_wrapper', ([], {}), '()\n', (1254, 1256), False, 'from model_utils.moxing_adapter import moxing_wrapper\n'), ((1443, 1456), 'src.utils.init_net', 'init_net', (['cfg'], {}), '(cfg)\n', (1451, 1456), False, 'from src.utils import init_net\n'), ((1731, 1771), 'os.path.join', 'os.path.join', (['cfg.output_path', 'file_name'], {}), '(cfg.output_path, file_name)\n', (1743, 1771), False, 'import os\n'), ((1883, 1951), 'mindspore.export', 'export', (['net', 'input_arr'], {'file_name': 'file_path', 'file_format': 'file_format'}), '(net, input_arr, file_name=file_path, file_format=file_format)\n', (1889, 1951), False, 'from mindspore import Tensor, export, context\n'), ((1523, 1575), 'numpy.ones', 'np.ones', (['[1, cfg.n_colors, max_lr_size, max_lr_size]'], {}), '([1, cfg.n_colors, max_lr_size, max_lr_size])\n', (1530, 1575), True, 'import numpy as np\n'), ((1200, 1215), 'model_utils.device_adapter.get_device_id', 'get_device_id', ([], {}), '()\n', (1213, 1215), False, 'from model_utils.device_adapter import get_device_id\n'), ((1622, 1655), 'os.path.basename', 'os.path.basename', (['cfg.pre_trained'], {}), '(cfg.pre_trained)\n', (1638, 1655), False, 'import os\n')] |
# coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, <NAME>
# All rights reserved.
# Complete license can be found in the LICENSE file.
from math import pi, log
import numpy as np
from mvc.observers import ListObserver
from mvc.models.properties import (
StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty,
ObserveMixin, ListProperty, BoolProperty
)
from pyxrd.data import settings
from pyxrd.generic.io import storables, Storable
from pyxrd.generic.models import ExperimentalLine, CalculatedLine, DataModel
from pyxrd.generic.utils import not_none
from pyxrd.generic.models.lines import PyXRDLine
from pyxrd.calculations.peak_detection import peakdetect
from pyxrd.calculations.data_objects import SpecimenData
from pyxrd.goniometer.models import Goniometer
from pyxrd.file_parsers.xrd_parsers import xrd_parsers
from pyxrd.file_parsers.exc_parsers import exc_parsers
from .markers import Marker
from .statistics import Statistics
@storables.register()
class Specimen(DataModel, Storable):
# MODEL INTEL:
class Meta(DataModel.Meta):
store_id = "Specimen"
export_filters = xrd_parsers.get_export_file_filters()
excl_filters = exc_parsers.get_import_file_filters()
_data_object = None
@property
def data_object(self):
self._data_object.goniometer = self.goniometer.data_object
self._data_object.range_theta = self.__get_range_theta()
self._data_object.selected_range = self.get_exclusion_selector()
self._data_object.z_list = self.get_z_list()
try:
self._data_object.observed_intensity = np.copy(self.experimental_pattern.data_y)
except IndexError:
self._data_object.observed_intensity = np.array([], dtype=float)
return self._data_object
def get_z_list(self):
return list(self.experimental_pattern.z_data)
project = property(DataModel.parent.fget, DataModel.parent.fset)
# PROPERTIES:
#: The sample name
sample_name = StringProperty(
default="", text="Sample",
visible=True, persistent=True, tabular=True,
signal_name="visuals_changed",
mix_with=(SignalMixin,)
)
#: The sample name
name = StringProperty(
default="", text="Name",
visible=True, persistent=True, tabular=True,
signal_name="visuals_changed",
mix_with=(SignalMixin,)
)
@StringProperty(
default="", text="Label",
visible=False, persistent=False, tabular=True,
mix_with=(ReadOnlyMixin,)
)
def label(self):
if self.display_stats_in_lbl and (self.project is not None and self.project.layout_mode == "FULL"):
label = self.sample_name
label += "\nRp = %.1f%%" % not_none(self.statistics.Rp, 0.0)
label += "\nRwp = %.1f%%" % not_none(self.statistics.Rwp, 0.0)
return label
else:
return self.sample_name
display_calculated = BoolProperty(
default=True, text="Display calculated diffractogram",
visible=True, persistent=True, tabular=True,
signal_name="visuals_changed",
mix_with=(SignalMixin,)
)
display_experimental = BoolProperty(
default=True, text="Display experimental diffractogram",
visible=True, persistent=True, tabular=True,
signal_name="visuals_changed",
mix_with=(SignalMixin,)
)
display_vshift = FloatProperty(
default=0.0, text="Vertical shift of the plot",
visible=True, persistent=True, tabular=True,
signal_name="visuals_changed", widget_type="spin",
mix_with=(SignalMixin,)
)
display_vscale = FloatProperty(
default=0.0, text="Vertical scale of the plot",
visible=True, persistent=True, tabular=True,
signal_name="visuals_changed", widget_type="spin",
mix_with=(SignalMixin,)
)
display_phases = BoolProperty(
default=True, text="Display phases seperately",
visible=True, persistent=True, tabular=True,
signal_name="visuals_changed",
mix_with=(SignalMixin,)
)
display_stats_in_lbl = BoolProperty(
default=True, text="Display Rp in label",
visible=True, persistent=True, tabular=True,
signal_name="visuals_changed",
mix_with=(SignalMixin,)
)
display_residuals = BoolProperty(
default=True, text="Display residual patterns",
visible=True, persistent=True, tabular=True,
signal_name="visuals_changed",
mix_with=(SignalMixin,)
)
display_residual_scale = FloatProperty(
default=1.0, text="Residual pattern scale", minimum=0.0,
visible=True, persistent=True, tabular=True,
signal_name="visuals_changed", widget_type="spin",
mix_with=(SignalMixin,)
)
display_derivatives = BoolProperty(
default=False, text="Display derivative patterns",
visible=True, persistent=True, tabular=True,
signal_name="visuals_changed",
mix_with=(SignalMixin,)
)
#: A :class:`~pyxrd.generic.models.lines.CalculatedLine` instance
calculated_pattern = LabeledProperty(
default=None, text="Calculated diffractogram",
visible=True, persistent=True, tabular=True,
signal_name="data_changed", widget_type="xy_list_view",
mix_with=(SignalMixin, ObserveMixin,)
)
#: A :class:`~pyxrd.generic.models.lines.ExperimentalLine` instance
experimental_pattern = LabeledProperty(
default=None, text="Experimental diffractogram",
visible=True, persistent=True, tabular=True,
signal_name="data_changed", widget_type="xy_list_view",
mix_with=(SignalMixin, ObserveMixin,)
)
#: A list of 2-theta ranges to exclude for the calculation of the Rp factor
exclusion_ranges = LabeledProperty(
default=None, text="Excluded ranges",
visible=True, persistent=True, tabular=True,
signal_name="data_changed", widget_type="xy_list_view",
mix_with=(SignalMixin, ObserveMixin)
)
#: A :class:`~pyxrd.goniometer.models.Goniometer` instance
goniometer = LabeledProperty(
default=None, text="Goniometer",
visible=True, persistent=True, tabular=True,
signal_name="data_changed",
mix_with=(SignalMixin, ObserveMixin,)
)
#: A :class:`~pyxrd.specimen.models.Statistics` instance
statistics = LabeledProperty(
default=None, text="Markers",
visible=False, persistent=False, tabular=True,
)
#: A list :class:`~pyxrd.specimen.models.Marker` instances
markers = ListProperty(
default=None, text="Markers", data_type=Marker,
visible=False, persistent=True, tabular=True,
signal_name="visuals_changed", widget_type="object_list_view",
mix_with=(SignalMixin,)
)
@property
def max_display_y(self):
"""
The maximum intensity or z-value (display y axis)
of the current profile (both calculated and observed)
"""
_max = 0.0
if self.experimental_pattern is not None:
_max = max(_max, np.max(self.experimental_pattern.max_display_y))
if self.calculated_pattern is not None:
_max = max(_max, np.max(self.calculated_pattern.max_display_y))
return _max
# ------------------------------------------------------------
# Initialisation and other internals
# ------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
Valid keyword arguments for a Specimen are:
name: the name of the specimen
sample_name: the sample name of the specimen
calculated_pattern: the calculated pattern
experimental_pattern: the experimental pattern
exclusion_ranges: the exclusion ranges XYListStore
goniometer: the goniometer used for recording data
markers: the specimen's markers
display_vshift: the patterns vertical shift from its default position
display_vscale: the patterns vertical scale (default is 1.0)
display_calculated: whether or not to show the calculated pattern
display_experimental: whether or not to show the experimental pattern
display_residuals: whether or not to show the residuals
display_derivatives: whether or not to show the 1st derivative patterns
display_phases: whether or not to show the separate phase patterns
display_stats_in_lbl: whether or not to display the Rp values
in the pattern label
"""
my_kwargs = self.pop_kwargs(kwargs,
"data_name", "data_sample", "data_sample_length",
"data_calculated_pattern", "data_experimental_pattern",
"calc_color", "calc_lw", "inherit_calc_color", "inherit_calc_lw",
"exp_color", "exp_lw", "inherit_exp_color", "inherit_exp_lw",
"project_goniometer", "data_markers", "bg_shift", "abs_scale",
"exp_cap_value", "sample_length", "absorption", "sample_z_dev",
*[prop.label for prop in Specimen.Meta.get_local_persistent_properties()]
)
super(Specimen, self).__init__(*args, **kwargs)
kwargs = my_kwargs
self._data_object = SpecimenData()
with self.visuals_changed.hold_and_emit():
with self.data_changed.hold_and_emit():
self.name = self.get_kwarg(kwargs, "", "name", "data_name")
sample_name = self.get_kwarg(kwargs, "", "sample_name", "data_sample")
if isinstance(sample_name, bytes):
sample_name = sample_name.decode("utf-8", "ignore")
self.sample_name = sample_name
calc_pattern_old_kwargs = {}
for kw in ("calc_color", "calc_lw", "inherit_calc_color", "inherit_calc_lw"):
if kw in kwargs:
calc_pattern_old_kwargs[kw.replace("calc_", "")] = kwargs.pop(kw)
self.calculated_pattern = self.parse_init_arg(
self.get_kwarg(kwargs, None, "calculated_pattern", "data_calculated_pattern"),
CalculatedLine,
child=True, default_is_class=True,
label="Calculated Profile",
parent=self,
**calc_pattern_old_kwargs
)
exp_pattern_old_kwargs = {}
for kw in ("exp_color", "exp_lw", "inherit_exp_color", "inherit_exp_lw"):
if kw in kwargs:
exp_pattern_old_kwargs[kw.replace("exp_", "")] = kwargs.pop(kw)
self.experimental_pattern = self.parse_init_arg(
self.get_kwarg(kwargs, None, "experimental_pattern", "data_experimental_pattern"),
ExperimentalLine,
child=True, default_is_class=True,
label="Experimental Profile",
parent=self,
**exp_pattern_old_kwargs
)
self.exclusion_ranges = PyXRDLine(data=self.get_kwarg(kwargs, None, "exclusion_ranges"), parent=self)
# Extract old kwargs if they are there:
gonio_kwargs = {}
sample_length = self.get_kwarg(kwargs, None, "sample_length", "data_sample_length")
if sample_length is not None:
gonio_kwargs["sample_length"] = float(sample_length)
absorption = self.get_kwarg(kwargs, None, "absorption")
if absorption is not None: # assuming a surface density of at least 20 mg/cm²:
gonio_kwargs["absorption"] = float(absorption) / 0.02
# Initialize goniometer (with optional old kwargs):
self.goniometer = self.parse_init_arg(
self.get_kwarg(kwargs, None, "goniometer", "project_goniometer"),
Goniometer, child=True, default_is_class=True,
parent=self, **gonio_kwargs
)
self.markers = self.get_list(kwargs, None, "markers", "data_markers", parent=self)
for marker in self.markers:
self.observe_model(marker)
self._specimens_observer = ListObserver(
self.on_marker_inserted,
self.on_marker_removed,
prop_name="markers",
model=self
)
self.display_vshift = float(self.get_kwarg(kwargs, 0.0, "display_vshift"))
self.display_vscale = float(self.get_kwarg(kwargs, 1.0, "display_vscale"))
self.display_calculated = bool(self.get_kwarg(kwargs, True, "display_calculated"))
self.display_experimental = bool(self.get_kwarg(kwargs, True, "display_experimental"))
self.display_residuals = bool(self.get_kwarg(kwargs, True, "display_residuals"))
self.display_residual_scale = float(self.get_kwarg(kwargs, 1.0, "display_residual_scale"))
self.display_derivatives = bool(self.get_kwarg(kwargs, False, "display_derivatives"))
self.display_phases = bool(self.get_kwarg(kwargs, False, "display_phases"))
self.display_stats_in_lbl = bool(self.get_kwarg(kwargs, True, "display_stats_in_lbl"))
self.statistics = Statistics(parent=self)
pass # end of with
pass # end of with
pass # end of __init__
def __str__(self):
return "<Specimen %s(%s)>" % (self.name, repr(self))
# ------------------------------------------------------------
# Notifications of observable properties
# ------------------------------------------------------------
@DataModel.observe("data_changed", signal=True)
def notify_data_changed(self, model, prop_name, info):
if model == self.calculated_pattern:
self.visuals_changed.emit() # don't propagate this as data_changed
else:
self.data_changed.emit() # propagate signal
@DataModel.observe("visuals_changed", signal=True)
def notify_visuals_changed(self, model, prop_name, info):
self.visuals_changed.emit() # propagate signal
def on_marker_removed(self, item):
with self.visuals_changed.hold_and_emit():
self.relieve_model(item)
item.parent = None
def on_marker_inserted(self, item):
with self.visuals_changed.hold_and_emit():
self.observe_model(item)
item.parent = self
# ------------------------------------------------------------
# Input/Output stuff
# ------------------------------------------------------------
@staticmethod
def from_experimental_data(filename, parent, parser=xrd_parsers._group_parser, load_as_insitu=False):
"""
Returns a list of new :class:`~.specimen.models.Specimen`'s loaded
from `filename`, setting their parent to `parent` using the given
parser. If the load_as_insitu flag is set to true,
"""
specimens = list()
xrdfiles = parser.parse(filename)
if len(xrdfiles):
if getattr(xrdfiles[0], "relative_humidity_data", None) is not None: # we have relative humidity data
specimen = None
# Setup list variables:
x_data = None
y_datas = []
rh_datas = []
for xrdfile in xrdfiles:
# Get data we need:
name, sample, xy_data, rh_data = (
xrdfile.filename, xrdfile.name,
xrdfile.data, xrdfile.relative_humidity_data
)
# Transform into numpy array for column selection
xy_data = np.array(xy_data)
rh_data = np.array(rh_data)
if specimen is None:
specimen = Specimen(parent=parent, name=name, sample_name=sample)
specimen.goniometer.reset_from_file(xrdfile.create_gon_file())
# Extract the 2-theta positions once:
x_data = np.copy(xy_data[:,0])
# Add a new sub-pattern:
y_datas.append(np.copy(xy_data[:,1]))
# Store the average RH for this pattern:
rh_datas.append(np.average(rh_data))
specimen.experimental_pattern.load_data_from_generator(zip(x_data, np.asanyarray(y_datas).transpose()), clear=True)
specimen.experimental_pattern.y_names = ["%.1f" % f for f in rh_datas]
specimen.experimental_pattern.z_data = rh_datas
specimens.append(specimen)
else: # regular (might be multi-pattern) file
for xrdfile in xrdfiles:
name, sample, generator = xrdfile.filename, xrdfile.name, xrdfile.data
specimen = Specimen(parent=parent, name=name, sample_name=sample)
# TODO FIXME:
specimen.experimental_pattern.load_data_from_generator(generator, clear=True)
specimen.goniometer.reset_from_file(xrdfile.create_gon_file())
specimens.append(specimen)
return specimens
def json_properties(self):
props = Storable.json_properties(self)
props["exclusion_ranges"] = self.exclusion_ranges._serialize_data()
return props
def get_export_meta_data(self):
""" Returns a dictionary with common meta-data used in export functions
for experimental or calculated data """
return dict(
sample=self.label + " " + self.sample_name,
wavelength=self.goniometer.wavelength,
radius=self.goniometer.radius,
divergence=self.goniometer.divergence,
soller1=self.goniometer.soller1,
soller2=self.goniometer.soller2,
)
# ------------------------------------------------------------
# Methods & Functions
# ------------------------------------------------------------
def clear_markers(self):
with self.visuals_changed.hold():
for marker in list(self.markers)[::-1]:
self.markers.remove(marker)
def auto_add_peaks(self, tmodel):
"""
Automagically add peak markers
*tmodel* a :class:`~specimen.models.ThresholdSelector` model
"""
threshold = tmodel.sel_threshold
base = 1 if (tmodel.pattern == "exp") else 2
data_x, data_y = tmodel.get_xy()
maxtab, mintab = peakdetect(data_y, data_x, 5, threshold) # @UnusedVariable
mpositions = [marker.position for marker in self.markers]
with self.visuals_changed.hold():
i = 1
for x, y in maxtab: # @UnusedVariable
if not x in mpositions:
nm = self.goniometer.get_nm_from_2t(x) if x != 0 else 0
new_marker = Marker(label="%%.%df" % (3 + min(int(log(nm, 10)), 0)) % nm, parent=self, position=x, base=base)
self.markers.append(new_marker)
i += 1
def get_exclusion_selector(self):
"""
Get the numpy selector array for non-excluded data
:rtype: a numpy ndarray
"""
x = self.__get_range_theta() * 360.0 / pi # convert to degrees
selector = np.ones(x.shape, dtype=bool)
data = np.sort(np.asarray(self.exclusion_ranges.get_xy_data()), axis=0)
for x0, x1 in zip(*data):
new_selector = ((x < x0) | (x > x1))
selector = selector & new_selector
return selector
def get_exclusion_xy(self):
"""
Get an numpy array containing only non-excluded data X and Y data
:rtype: a tuple containing 4 numpy ndarray's: the experimental X and Y
data and the calculated X and Y data
"""
ex, ey = self.experimental_pattern.get_xy_data()
cx, cy = self.calculated_pattern.get_xy_data()
selector = self.get_exclusion_selector(ex)
return ex[selector], ey[selector], cx[selector], cy[selector]
# ------------------------------------------------------------
# Draggable mix-in hook:
# ------------------------------------------------------------
def on_pattern_dragged(self, delta_y, button=1):
if button == 1:
self.display_vshift += delta_y
elif button == 3:
self.display_vscale += delta_y
elif button == 2:
self.project.display_plot_offset += delta_y
pass
def update_visuals(self, phases):
"""
Update visual representation of phase patterns (if any)
"""
if phases is not None:
self.calculated_pattern.y_names = [
phase.name if phase is not None else "" for phase in phases
]
self.calculated_pattern.phase_colors = [
phase.display_color if phase is not None else "#FF00FF" for phase in phases
]
# ------------------------------------------------------------
# Intensity calculations:
# ------------------------------------------------------------
def update_pattern(self, total_intensity, phase_intensities, phases):
"""
Update calculated patterns using the provided total and phase intensities
"""
if len(phases) == 0:
self.calculated_pattern.clear()
else:
maxZ = len(self.get_z_list())
new_data = np.zeros((phase_intensities.shape[-1], maxZ + maxZ*len(phases)))
for z_index in range(maxZ):
# Set the total intensity for this z_index:
new_data[:, z_index] = total_intensity[z_index]
# Calculate phase intensity offsets:
phase_start_index = maxZ + z_index * len(phases)
phase_end_index = phase_start_index + len(phases)
# Set phase intensities for this z_index:
new_data[:,phase_start_index:phase_end_index] = phase_intensities[:,z_index,:].transpose()
# Store in pattern:
self.calculated_pattern.set_data(
self.__get_range_theta() * 360. / pi,
new_data
)
self.update_visuals(phases)
if settings.GUI_MODE:
self.statistics.update_statistics(derived=self.display_derivatives)
def convert_to_fixed(self):
"""
Converts the experimental data from ADS to fixed slits in-place
(disregards the `has_ads` flag in the goniometer, but uses the settings
otherwise)
"""
correction = self.goniometer.get_ADS_to_fixed_correction(self.__get_range_theta())
self.experimental_pattern.apply_correction(correction)
def convert_to_ads(self):
"""
Converts the experimental data from fixed slits to ADS in-place
(disregards the `has_ads` flag in the goniometer, but uses the settings
otherwise)
"""
correction = 1.0 / self.goniometer.get_ADS_to_fixed_correction(self.__get_range_theta())
self.experimental_pattern.apply_correction(correction)
def __get_range_theta(self):
if len(self.experimental_pattern) <= 1:
return self.goniometer.get_default_theta_range()
else:
return np.radians(self.experimental_pattern.data_x * 0.5)
def __repr__(self):
return "Specimen(name='%s')" % self.name
pass # end of class
| [
"numpy.radians",
"mvc.models.properties.BoolProperty",
"pyxrd.generic.utils.not_none",
"numpy.asanyarray",
"math.log",
"numpy.array",
"mvc.models.properties.StringProperty",
"pyxrd.calculations.peak_detection.peakdetect",
"mvc.observers.ListObserver",
"numpy.max",
"mvc.models.properties.LabeledP... | [((982, 1002), 'pyxrd.generic.io.storables.register', 'storables.register', ([], {}), '()\n', (1000, 1002), False, 'from pyxrd.generic.io import storables, Storable\n'), ((2025, 2171), 'mvc.models.properties.StringProperty', 'StringProperty', ([], {'default': '""""""', 'text': '"""Sample"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""visuals_changed"""', 'mix_with': '(SignalMixin,)'}), "(default='', text='Sample', visible=True, persistent=True,\n tabular=True, signal_name='visuals_changed', mix_with=(SignalMixin,))\n", (2039, 2171), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((2241, 2385), 'mvc.models.properties.StringProperty', 'StringProperty', ([], {'default': '""""""', 'text': '"""Name"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""visuals_changed"""', 'mix_with': '(SignalMixin,)'}), "(default='', text='Name', visible=True, persistent=True,\n tabular=True, signal_name='visuals_changed', mix_with=(SignalMixin,))\n", (2255, 2385), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((2427, 2545), 'mvc.models.properties.StringProperty', 'StringProperty', ([], {'default': '""""""', 'text': '"""Label"""', 'visible': '(False)', 'persistent': '(False)', 'tabular': '(True)', 'mix_with': '(ReadOnlyMixin,)'}), "(default='', text='Label', visible=False, persistent=False,\n tabular=True, mix_with=(ReadOnlyMixin,))\n", (2441, 2545), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((2987, 3164), 'mvc.models.properties.BoolProperty', 'BoolProperty', ([], {'default': '(True)', 'text': '"""Display calculated diffractogram"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""visuals_changed"""', 'mix_with': '(SignalMixin,)'}), "(default=True, text='Display calculated diffractogram', visible\n =True, persistent=True, tabular=True, signal_name='visuals_changed',\n mix_with=(SignalMixin,))\n", (2999, 3164), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((3222, 3401), 'mvc.models.properties.BoolProperty', 'BoolProperty', ([], {'default': '(True)', 'text': '"""Display experimental diffractogram"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""visuals_changed"""', 'mix_with': '(SignalMixin,)'}), "(default=True, text='Display experimental diffractogram',\n visible=True, persistent=True, tabular=True, signal_name=\n 'visuals_changed', mix_with=(SignalMixin,))\n", (3234, 3401), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((3453, 3643), 'mvc.models.properties.FloatProperty', 'FloatProperty', ([], {'default': '(0.0)', 'text': '"""Vertical shift of the plot"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""visuals_changed"""', 'widget_type': '"""spin"""', 'mix_with': '(SignalMixin,)'}), "(default=0.0, text='Vertical shift of the plot', visible=True,\n persistent=True, tabular=True, signal_name='visuals_changed',\n widget_type='spin', mix_with=(SignalMixin,))\n", (3466, 3643), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((3696, 3886), 'mvc.models.properties.FloatProperty', 'FloatProperty', ([], {'default': '(0.0)', 'text': '"""Vertical scale of the plot"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""visuals_changed"""', 'widget_type': '"""spin"""', 'mix_with': '(SignalMixin,)'}), "(default=0.0, text='Vertical scale of the plot', visible=True,\n persistent=True, tabular=True, signal_name='visuals_changed',\n widget_type='spin', mix_with=(SignalMixin,))\n", (3709, 3886), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((3939, 4109), 'mvc.models.properties.BoolProperty', 'BoolProperty', ([], {'default': '(True)', 'text': '"""Display phases seperately"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""visuals_changed"""', 'mix_with': '(SignalMixin,)'}), "(default=True, text='Display phases seperately', visible=True,\n persistent=True, tabular=True, signal_name='visuals_changed', mix_with=\n (SignalMixin,))\n", (3951, 4109), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((4167, 4331), 'mvc.models.properties.BoolProperty', 'BoolProperty', ([], {'default': '(True)', 'text': '"""Display Rp in label"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""visuals_changed"""', 'mix_with': '(SignalMixin,)'}), "(default=True, text='Display Rp in label', visible=True,\n persistent=True, tabular=True, signal_name='visuals_changed', mix_with=\n (SignalMixin,))\n", (4179, 4331), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((4386, 4556), 'mvc.models.properties.BoolProperty', 'BoolProperty', ([], {'default': '(True)', 'text': '"""Display residual patterns"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""visuals_changed"""', 'mix_with': '(SignalMixin,)'}), "(default=True, text='Display residual patterns', visible=True,\n persistent=True, tabular=True, signal_name='visuals_changed', mix_with=\n (SignalMixin,))\n", (4398, 4556), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((4616, 4816), 'mvc.models.properties.FloatProperty', 'FloatProperty', ([], {'default': '(1.0)', 'text': '"""Residual pattern scale"""', 'minimum': '(0.0)', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""visuals_changed"""', 'widget_type': '"""spin"""', 'mix_with': '(SignalMixin,)'}), "(default=1.0, text='Residual pattern scale', minimum=0.0,\n visible=True, persistent=True, tabular=True, signal_name=\n 'visuals_changed', widget_type='spin', mix_with=(SignalMixin,))\n", (4629, 4816), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((4873, 5046), 'mvc.models.properties.BoolProperty', 'BoolProperty', ([], {'default': '(False)', 'text': '"""Display derivative patterns"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""visuals_changed"""', 'mix_with': '(SignalMixin,)'}), "(default=False, text='Display derivative patterns', visible=\n True, persistent=True, tabular=True, signal_name='visuals_changed',\n mix_with=(SignalMixin,))\n", (4885, 5046), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((5172, 5382), 'mvc.models.properties.LabeledProperty', 'LabeledProperty', ([], {'default': 'None', 'text': '"""Calculated diffractogram"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""data_changed"""', 'widget_type': '"""xy_list_view"""', 'mix_with': '(SignalMixin, ObserveMixin)'}), "(default=None, text='Calculated diffractogram', visible=True,\n persistent=True, tabular=True, signal_name='data_changed', widget_type=\n 'xy_list_view', mix_with=(SignalMixin, ObserveMixin))\n", (5187, 5382), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((5513, 5725), 'mvc.models.properties.LabeledProperty', 'LabeledProperty', ([], {'default': 'None', 'text': '"""Experimental diffractogram"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""data_changed"""', 'widget_type': '"""xy_list_view"""', 'mix_with': '(SignalMixin, ObserveMixin)'}), "(default=None, text='Experimental diffractogram', visible=\n True, persistent=True, tabular=True, signal_name='data_changed',\n widget_type='xy_list_view', mix_with=(SignalMixin, ObserveMixin))\n", (5528, 5725), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((5860, 6061), 'mvc.models.properties.LabeledProperty', 'LabeledProperty', ([], {'default': 'None', 'text': '"""Excluded ranges"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""data_changed"""', 'widget_type': '"""xy_list_view"""', 'mix_with': '(SignalMixin, ObserveMixin)'}), "(default=None, text='Excluded ranges', visible=True,\n persistent=True, tabular=True, signal_name='data_changed', widget_type=\n 'xy_list_view', mix_with=(SignalMixin, ObserveMixin))\n", (5875, 6061), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((6172, 6340), 'mvc.models.properties.LabeledProperty', 'LabeledProperty', ([], {'default': 'None', 'text': '"""Goniometer"""', 'visible': '(True)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""data_changed"""', 'mix_with': '(SignalMixin, ObserveMixin)'}), "(default=None, text='Goniometer', visible=True, persistent=\n True, tabular=True, signal_name='data_changed', mix_with=(SignalMixin,\n ObserveMixin))\n", (6187, 6340), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((6450, 6547), 'mvc.models.properties.LabeledProperty', 'LabeledProperty', ([], {'default': 'None', 'text': '"""Markers"""', 'visible': '(False)', 'persistent': '(False)', 'tabular': '(True)'}), "(default=None, text='Markers', visible=False, persistent=\n False, tabular=True)\n", (6465, 6547), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((6644, 6846), 'mvc.models.properties.ListProperty', 'ListProperty', ([], {'default': 'None', 'text': '"""Markers"""', 'data_type': 'Marker', 'visible': '(False)', 'persistent': '(True)', 'tabular': '(True)', 'signal_name': '"""visuals_changed"""', 'widget_type': '"""object_list_view"""', 'mix_with': '(SignalMixin,)'}), "(default=None, text='Markers', data_type=Marker, visible=False,\n persistent=True, tabular=True, signal_name='visuals_changed',\n widget_type='object_list_view', mix_with=(SignalMixin,))\n", (6656, 6846), False, 'from mvc.models.properties import StringProperty, SignalMixin, ReadOnlyMixin, FloatProperty, LabeledProperty, ObserveMixin, ListProperty, BoolProperty\n'), ((14044, 14090), 'pyxrd.generic.models.DataModel.observe', 'DataModel.observe', (['"""data_changed"""'], {'signal': '(True)'}), "('data_changed', signal=True)\n", (14061, 14090), False, 'from pyxrd.generic.models import ExperimentalLine, CalculatedLine, DataModel\n'), ((14350, 14399), 'pyxrd.generic.models.DataModel.observe', 'DataModel.observe', (['"""visuals_changed"""'], {'signal': '(True)'}), "('visuals_changed', signal=True)\n", (14367, 14399), False, 'from pyxrd.generic.models import ExperimentalLine, CalculatedLine, DataModel\n'), ((1147, 1184), 'pyxrd.file_parsers.xrd_parsers.xrd_parsers.get_export_file_filters', 'xrd_parsers.get_export_file_filters', ([], {}), '()\n', (1182, 1184), False, 'from pyxrd.file_parsers.xrd_parsers import xrd_parsers\n'), ((1208, 1245), 'pyxrd.file_parsers.exc_parsers.exc_parsers.get_import_file_filters', 'exc_parsers.get_import_file_filters', ([], {}), '()\n', (1243, 1245), False, 'from pyxrd.file_parsers.exc_parsers import exc_parsers\n'), ((9450, 9464), 'pyxrd.calculations.data_objects.SpecimenData', 'SpecimenData', ([], {}), '()\n', (9462, 9464), False, 'from pyxrd.calculations.data_objects import SpecimenData\n'), ((17784, 17814), 'pyxrd.generic.io.Storable.json_properties', 'Storable.json_properties', (['self'], {}), '(self)\n', (17808, 17814), False, 'from pyxrd.generic.io import storables, Storable\n'), ((19077, 19117), 'pyxrd.calculations.peak_detection.peakdetect', 'peakdetect', (['data_y', 'data_x', '(5)', 'threshold'], {}), '(data_y, data_x, 5, threshold)\n', (19087, 19117), False, 'from pyxrd.calculations.peak_detection import peakdetect\n'), ((19888, 19916), 'numpy.ones', 'np.ones', (['x.shape'], {'dtype': 'bool'}), '(x.shape, dtype=bool)\n', (19895, 19916), True, 'import numpy as np\n'), ((1634, 1675), 'numpy.copy', 'np.copy', (['self.experimental_pattern.data_y'], {}), '(self.experimental_pattern.data_y)\n', (1641, 1675), True, 'import numpy as np\n'), ((23956, 24006), 'numpy.radians', 'np.radians', (['(self.experimental_pattern.data_x * 0.5)'], {}), '(self.experimental_pattern.data_x * 0.5)\n', (23966, 24006), True, 'import numpy as np\n'), ((1754, 1779), 'numpy.array', 'np.array', (['[]'], {'dtype': 'float'}), '([], dtype=float)\n', (1762, 1779), True, 'import numpy as np\n'), ((2777, 2810), 'pyxrd.generic.utils.not_none', 'not_none', (['self.statistics.Rp', '(0.0)'], {}), '(self.statistics.Rp, 0.0)\n', (2785, 2810), False, 'from pyxrd.generic.utils import not_none\n'), ((2851, 2885), 'pyxrd.generic.utils.not_none', 'not_none', (['self.statistics.Rwp', '(0.0)'], {}), '(self.statistics.Rwp, 0.0)\n', (2859, 2885), False, 'from pyxrd.generic.utils import not_none\n'), ((7166, 7213), 'numpy.max', 'np.max', (['self.experimental_pattern.max_display_y'], {}), '(self.experimental_pattern.max_display_y)\n', (7172, 7213), True, 'import numpy as np\n'), ((7292, 7337), 'numpy.max', 'np.max', (['self.calculated_pattern.max_display_y'], {}), '(self.calculated_pattern.max_display_y)\n', (7298, 7337), True, 'import numpy as np\n'), ((12533, 12632), 'mvc.observers.ListObserver', 'ListObserver', (['self.on_marker_inserted', 'self.on_marker_removed'], {'prop_name': '"""markers"""', 'model': 'self'}), "(self.on_marker_inserted, self.on_marker_removed, prop_name=\n 'markers', model=self)\n", (12545, 12632), False, 'from mvc.observers import ListObserver\n'), ((16158, 16175), 'numpy.array', 'np.array', (['xy_data'], {}), '(xy_data)\n', (16166, 16175), True, 'import numpy as np\n'), ((16206, 16223), 'numpy.array', 'np.array', (['rh_data'], {}), '(rh_data)\n', (16214, 16223), True, 'import numpy as np\n'), ((16537, 16559), 'numpy.copy', 'np.copy', (['xy_data[:, 0]'], {}), '(xy_data[:, 0])\n', (16544, 16559), True, 'import numpy as np\n'), ((16660, 16682), 'numpy.copy', 'np.copy', (['xy_data[:, 1]'], {}), '(xy_data[:, 1])\n', (16667, 16682), True, 'import numpy as np\n'), ((16801, 16820), 'numpy.average', 'np.average', (['rh_data'], {}), '(rh_data)\n', (16811, 16820), True, 'import numpy as np\n'), ((16926, 16948), 'numpy.asanyarray', 'np.asanyarray', (['y_datas'], {}), '(y_datas)\n', (16939, 16948), True, 'import numpy as np\n'), ((19500, 19511), 'math.log', 'log', (['nm', '(10)'], {}), '(nm, 10)\n', (19503, 19511), False, 'from math import pi, log\n')] |
import pyaudio
import math, random
import numpy as np
import find_peaks as fp
from search_tree import SearchTree
from pydub import AudioSegment
import time
END = 10000 #Sample for 10 seconds
THRESH = 0.6
def match(audio_name, mv_name):
audio_file = AudioSegment.from_file(audio_name)[5000:(END + 5000)]
mv_file = AudioSegment.from_file(mv_name)
data = np.frombuffer(audio_file._data, np.int16)
audio_channels = []
audio_sparse_maps = [] #freq/time mappings of amp peaks in spectogram
for i in range(audio_file.channels):
audio_channels.append(data[i::audio_file.channels])
for channel_data in audio_channels:
smap = fp.get_sparse_map(channel_data, audio_file.frame_rate)
audio_sparse_maps.append(list(smap))
l = 0
length = 120000 #mv_file.duration_seconds * 1000
best = None
mn = -1
while((l + END) <= length):
if(l%5000 == 0):
print('At time', l/1000, '(s)')
sample = mv_file[l:(l + END)]
sample_channels = []
sample_sparse_maps = []
data = np.frombuffer(sample._data, np.int16)
for i in range(sample.channels):
sample_channels.append(data[i::sample.channels])
#print(sample_channels[0][])
for channel_data in sample_channels:
smap = fp.get_sparse_map(channel_data, mv_file.frame_rate)
sample_sparse_maps.append(list(smap))
try:
curr = difference(audio_sparse_maps, sample_sparse_maps)
except():
return (0, 0)
if((mn == -1) or ((curr > 0) and (mn - curr >= THRESH))):
mn = curr
best = l
#print(curr)
l += 500
return (best - 5000, mn)
def difference(audio_maps, sample_maps):
mean_offset = 0
map_cnt = min(len(audio_maps), len(sample_maps))
for i in range(map_cnt):
mean_offset += offset(audio_maps[i], sample_maps[i])
return (mean_offset / map_cnt)
def offset(mapA, mapB):
#print('query start')
init = time.time() * 1000
mean_dist = 0
st = SearchTree()
#print('sizes:',len(mapA), len(mapB))
random.shuffle(mapA) #Reduce tree height
for i in mapA:
#print(i[0], i[1])
st.push(i[0], i[1])
#print('building done. Time was', (time.time() * 1000 - init))
init = time.time() * 1000
for j in mapB:
#print(j, st.query(j[0], j[1], False))
res = st.query(j[0], j[1], False)
mean_dist += res[0]
#print('query end. Time was', (time.time() * 1000 - init), 'Visited count:', res[1])
return (mean_dist / len(mapB))
'''
def offset(mapA, mapB):
mean_dist = 0
for i in mapA:
min_dist = -1
res = None
for j in mapB:
if(res == None):
res = j
min_dist = dist(i[0], i[1], j[0], j[1])
else:
if(dist(i[0], i[1], j[0], j[1]) < min_dist):
min_dist = dist(i[0], i[1], j[0], j[1])
res = j
mean_dist += min_dist
return (mean_dist / len(mapA))
'''
def dist(x1, y1, x2, y2):
return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
| [
"random.shuffle",
"math.sqrt",
"search_tree.SearchTree",
"pydub.AudioSegment.from_file",
"numpy.frombuffer",
"time.time",
"find_peaks.get_sparse_map"
] | [((326, 357), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['mv_name'], {}), '(mv_name)\n', (348, 357), False, 'from pydub import AudioSegment\n'), ((374, 415), 'numpy.frombuffer', 'np.frombuffer', (['audio_file._data', 'np.int16'], {}), '(audio_file._data, np.int16)\n', (387, 415), True, 'import numpy as np\n'), ((2096, 2108), 'search_tree.SearchTree', 'SearchTree', ([], {}), '()\n', (2106, 2108), False, 'from search_tree import SearchTree\n'), ((2155, 2175), 'random.shuffle', 'random.shuffle', (['mapA'], {}), '(mapA)\n', (2169, 2175), False, 'import math, random\n'), ((3152, 3194), 'math.sqrt', 'math.sqrt', (['((x1 - x2) ** 2 + (y1 - y2) ** 2)'], {}), '((x1 - x2) ** 2 + (y1 - y2) ** 2)\n', (3161, 3194), False, 'import math, random\n'), ((258, 292), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['audio_name'], {}), '(audio_name)\n', (280, 292), False, 'from pydub import AudioSegment\n'), ((677, 731), 'find_peaks.get_sparse_map', 'fp.get_sparse_map', (['channel_data', 'audio_file.frame_rate'], {}), '(channel_data, audio_file.frame_rate)\n', (694, 731), True, 'import find_peaks as fp\n'), ((1087, 1124), 'numpy.frombuffer', 'np.frombuffer', (['sample._data', 'np.int16'], {}), '(sample._data, np.int16)\n', (1100, 1124), True, 'import numpy as np\n'), ((2050, 2061), 'time.time', 'time.time', ([], {}), '()\n', (2059, 2061), False, 'import time\n'), ((2348, 2359), 'time.time', 'time.time', ([], {}), '()\n', (2357, 2359), False, 'import time\n'), ((1330, 1381), 'find_peaks.get_sparse_map', 'fp.get_sparse_map', (['channel_data', 'mv_file.frame_rate'], {}), '(channel_data, mv_file.frame_rate)\n', (1347, 1381), True, 'import find_peaks as fp\n')] |
import torch
from finetuning import TweetBatch, weights
from tqdm import tqdm, trange
import os
import numpy as np
from sklearn.metrics import mean_squared_error
import argparse
import pdb
from transformers import BertForSequenceClassification
def evaluate(args, model, eval_dataloader, wi, device, prefix=""):
# Validation
eval_output_dir = args.output_dir
results = {}
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
# Eval!
print("***** Running evaluation {} *****".format(prefix))
num_eval_examples = int(1653*0.2)
print(" Num examples = %d", num_eval_examples)
print(" Batch size = %d", 8)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
tweet_batch = TweetBatch(args.discretization_unit, args.window_size)
n_batch = 1
eval_iterator = tqdm(eval_dataloader, desc="Evaluating")
for step, batch in enumerate(eval_iterator):
# Set our model to evaluation mode (as opposed to training mode) to evaluate loss on validation set
model = model.eval()
tweet_batch.discretize_batch(batch, step+1, n_batch)
n_batch += 1
X, y = tweet_batch.sliding_window(wi, device, step+1)
# Forward pass
if len(X)>=1: #the batch must contain, at least, one example, otherwise don't do forward
with torch.no_grad(): #in evaluation we tell the model not to compute or store gradients, saving memory and speeding up validation
pdb.set_trace()
outputs,_,_ = model(input_ids = X, labels=torch.tensor(y).to(device), weights=wi, window_size=args.window_size)
tmp_eval_loss, logits = outputs
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = y
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, y, axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.squeeze(preds) #because we are doing regression, otherwise it would be np.argmax(preds, axis=1)
#since we are doing regression, our metric will be the mse
result = mean_squared_error(preds, out_label_ids) #compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
print("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
print(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
parser = argparse.ArgumentParser(description='Test Bert finetuned for a regression task, to predict the tweet counts from the embbedings.')
parser.add_argument('--discretization_unit', default=1, help="The discretization unit is the number of hours to discretize the time series data. E.g.: If the user choses 3, then one sample point will cointain 3 hours of data.")
parser.add_argument('--window_size', default=3, help="Number of time windows to look behind. E.g.: If the user choses 3, when to provide the features for the current window, we average the embbedings of the tweets of the 3 previous windows.")
parser.add_argument("--output_dir", default='/bitcoin_data/test_results1', type=str, help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--model_path", default=r"C:\Users\Filipa\Desktop\Predtweet\bitcoin_data\pytorch_model.bin", type=str)
parser.add_argument("--dataset_path", default=r"C:\Users\Filipa\Desktop\Predtweet\bitcoin_data\finetuning_outputs\test_dataloader.pth", type=str )
args = parser.parse_args()
# Load BertForSequenceClassification, the pretrained BERT model with a single linear classification layer on top.
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=1)
model.load_state_dict(torch.load(args.model_path))
print("Done!")
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
n_gpu = 1
model.cuda()
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
test_dataloader = torch.load(args.dataset_path)
#Calculates the timedifference
timedif = [i for i in range(args.window_size)]
#Calculate the weights using K = 0.5 (giving 50% of importance to the most recent timestamp)
#and tau = 6.25s so that when the temporal difference is 10s, the importance is +- 10.1%
wi = weights(0.5, 2, timedif)
results = evaluate(args, model, test_dataloader, wi, device)
| [
"torch.cuda.device_count",
"torch.cuda.is_available",
"finetuning.TweetBatch",
"os.path.exists",
"argparse.ArgumentParser",
"finetuning.weights",
"numpy.squeeze",
"sklearn.metrics.mean_squared_error",
"transformers.BertForSequenceClassification.from_pretrained",
"torch.device",
"torch.cuda.get_d... | [((2919, 3059), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test Bert finetuned for a regression task, to predict the tweet counts from the embbedings."""'}), "(description=\n 'Test Bert finetuned for a regression task, to predict the tweet counts from the embbedings.'\n )\n", (2942, 3059), False, 'import argparse\n'), ((4127, 4212), 'transformers.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', (['"""bert-base-uncased"""'], {'num_labels': '(1)'}), "('bert-base-uncased', num_labels=1\n )\n", (4172, 4212), False, 'from transformers import BertForSequenceClassification\n'), ((4319, 4344), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4342, 4344), False, 'import torch\n'), ((4749, 4778), 'torch.load', 'torch.load', (['args.dataset_path'], {}), '(args.dataset_path)\n', (4759, 4778), False, 'import torch\n'), ((5054, 5078), 'finetuning.weights', 'weights', (['(0.5)', '(2)', 'timedif'], {}), '(0.5, 2, timedif)\n', (5061, 5078), False, 'from finetuning import TweetBatch, weights\n'), ((805, 859), 'finetuning.TweetBatch', 'TweetBatch', (['args.discretization_unit', 'args.window_size'], {}), '(args.discretization_unit, args.window_size)\n', (815, 859), False, 'from finetuning import TweetBatch, weights\n'), ((900, 940), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""'}), "(eval_dataloader, desc='Evaluating')\n", (904, 940), False, 'from tqdm import tqdm, trange\n'), ((2226, 2243), 'numpy.squeeze', 'np.squeeze', (['preds'], {}), '(preds)\n', (2236, 2243), True, 'import numpy as np\n'), ((2409, 2449), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['preds', 'out_label_ids'], {}), '(preds, out_label_ids)\n', (2427, 2449), False, 'from sklearn.metrics import mean_squared_error\n'), ((2554, 2611), 'os.path.join', 'os.path.join', (['eval_output_dir', 'prefix', '"""eval_results.txt"""'], {}), "(eval_output_dir, prefix, 'eval_results.txt')\n", (2566, 2611), False, 'import os\n'), ((4233, 4260), 'torch.load', 'torch.load', (['args.model_path'], {}), '(args.model_path)\n', (4243, 4260), False, 'import torch\n'), ((4406, 4426), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4418, 4426), False, 'import torch\n'), ((4702, 4721), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4714, 4721), False, 'import torch\n'), ((414, 445), 'os.path.exists', 'os.path.exists', (['eval_output_dir'], {}), '(eval_output_dir)\n', (428, 445), False, 'import os\n'), ((456, 484), 'os.makedirs', 'os.makedirs', (['eval_output_dir'], {}), '(eval_output_dir)\n', (467, 484), False, 'import os\n'), ((4539, 4568), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['(0)'], {}), '(0)\n', (4565, 4568), False, 'import torch\n'), ((4475, 4500), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4498, 4500), False, 'import torch\n'), ((1436, 1451), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1449, 1451), False, 'import torch\n'), ((1579, 1594), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1592, 1594), False, 'import pdb\n'), ((2126, 2161), 'numpy.append', 'np.append', (['out_label_ids', 'y'], {'axis': '(0)'}), '(out_label_ids, y, axis=0)\n', (2135, 2161), True, 'import numpy as np\n'), ((1654, 1669), 'torch.tensor', 'torch.tensor', (['y'], {}), '(y)\n', (1666, 1669), False, 'import torch\n')] |
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import pandas as pd
import numpy as np
from app import octopusData, client, linePlot
app = FastAPI()
origins = ["http://localhost:3000", "http://127.0.0.1:3000"]
# assumes a suitable web server, e.g. "python -m http.server 9000"
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
async def root():
octopusData.update(client)
data = {
"missing_electric": len(octopusData.missing_electric),
"missing_gas": len(octopusData.missing_gas),
"recent_gas": octopusData.g_consumption.index.max().isoformat(),
"recent_electric": octopusData.e_consumption.index.max().isoformat(),
}
return data
@app.get("/starttimes")
def starttimes():
octopusData.update(client)
now = pd.Timestamp.now(tz="UTC")
today = octopusData.agile_tariff[octopusData.agile_tariff.index >= now]
def applicanceData(usagePattern, title):
startTime = (
today["value_inc_vat"]
.rolling(len(usagePattern))
.apply(lambda x: np.multiply(x, usagePattern).sum())
)
start = startTime.idxmin()
end = start + pd.Timedelta("30 m") * len(usagePattern)
cost = startTime.min()
plot = linePlot(startTime, title)
appData = {
"start": start,
"end": end,
"cost": cost,
"plot": plot,
}
return appData
# order must match time order of series. latest to earliest, for instance
washing_machine = [0.2, 0.2, 0.2, 0.2, 0.2, 1, 1]
washing_machine_data = applicanceData(
washing_machine, "Start Times Washing Machine"
)
washing_up = (
(4.18 * 8 * 2 * 30) # 8 litres per bowl, 2 bowls
/ (60 * 60) # temperature difference. J/g/°C
/ (0.8) # seconds. This gives kWh
) # efficiency
unitCost = 2.74 / 100
# 0.9 kwH over 2:44. But actually 1/2 in the first 1/2 hour, delayed by 20min, 1/2 1 hour later.
# order must match time order of series. latest to earliest, for instance
gentle_dishwasher = [0.4, 0, 0.5]
gentle_dishwasher_data = applicanceData(
gentle_dishwasher, "Start Times Gentle Dishwasher"
)
# 0.75 kWh over 3:58
eco_dishwasher = [0.05, 0.05, 0.05, 0.05, 0.15, 0.15, 0.15, 0.1]
eco_dishwasher_data = applicanceData(eco_dishwasher, "Start Times Eco Dishwasher")
# 1.35 kWh over 3.11 hours
intense_dishwasher = [0.05, 0.1, 0.1, 0.1, 0.1, 0.5, 0.4]
intense_dishwasher_data = applicanceData(
intense_dishwasher, "Start Times Intense Dishwasher"
)
data = {
"WashingMachineEnd": washing_machine_data["end"],
"WashingMachineCost": washing_machine_data["cost"],
"WashingMachinePlot": washing_machine_data["plot"],
"GentleDishwasherStart": gentle_dishwasher_data["start"],
"GentleDishwasherCost": gentle_dishwasher_data["cost"],
"GentleDishwasherPlot": gentle_dishwasher_data["plot"],
"EcoDishwasherStart": eco_dishwasher_data["start"],
"EcoDishwasherCost": eco_dishwasher_data["cost"],
"EcoDishwasherPlot": eco_dishwasher_data["plot"],
"IntenseDishwasherStart": intense_dishwasher_data["start"],
"IntenseDishwasherCost": intense_dishwasher_data["cost"],
"IntenseDishwasherPlot": intense_dishwasher_data["plot"],
}
return data
@app.get("/consumption")
def consumption():
octopusData.update(client)
data = {
"gasConsumptionBinnedChart": octopusData.gasConsumptionBinnedChart,
"gasConsumption2022BinnedChart": octopusData.gasConsumption2022BinnedChart,
"electricityDailyChart": octopusData.electricityDailyChart,
"electricityRollingChart": octopusData.electricityRollingChart,
"gasDailyChart": octopusData.gasDailyChart,
"gasRollingChart": octopusData.gasRollingChart,
}
return data
| [
"numpy.multiply",
"fastapi.FastAPI",
"pandas.Timestamp.now",
"pandas.Timedelta",
"app.octopusData.e_consumption.index.max",
"app.octopusData.update",
"app.octopusData.g_consumption.index.max",
"app.linePlot"
] | [((171, 180), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (178, 180), False, 'from fastapi import FastAPI\n'), ((497, 523), 'app.octopusData.update', 'octopusData.update', (['client'], {}), '(client)\n', (515, 523), False, 'from app import octopusData, client, linePlot\n'), ((874, 900), 'app.octopusData.update', 'octopusData.update', (['client'], {}), '(client)\n', (892, 900), False, 'from app import octopusData, client, linePlot\n'), ((912, 938), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {'tz': '"""UTC"""'}), "(tz='UTC')\n", (928, 938), True, 'import pandas as pd\n'), ((3573, 3599), 'app.octopusData.update', 'octopusData.update', (['client'], {}), '(client)\n', (3591, 3599), False, 'from app import octopusData, client, linePlot\n'), ((1378, 1404), 'app.linePlot', 'linePlot', (['startTime', 'title'], {}), '(startTime, title)\n', (1386, 1404), False, 'from app import octopusData, client, linePlot\n'), ((675, 712), 'app.octopusData.g_consumption.index.max', 'octopusData.g_consumption.index.max', ([], {}), '()\n', (710, 712), False, 'from app import octopusData, client, linePlot\n'), ((753, 790), 'app.octopusData.e_consumption.index.max', 'octopusData.e_consumption.index.max', ([], {}), '()\n', (788, 790), False, 'from app import octopusData, client, linePlot\n'), ((1291, 1311), 'pandas.Timedelta', 'pd.Timedelta', (['"""30 m"""'], {}), "('30 m')\n", (1303, 1311), True, 'import pandas as pd\n'), ((1187, 1215), 'numpy.multiply', 'np.multiply', (['x', 'usagePattern'], {}), '(x, usagePattern)\n', (1198, 1215), True, 'import numpy as np\n')] |
from os import path
import numpy as np
import pandas as pd
import impyute as impy
from matplotlib import pyplot as plt
plt.close("all")
data_path = 'data'
# read geographic information for capitals
municipios = pd.read_csv(path.join(data_path, 'population_capitals.csv'))
# population density
municipios['density'] = municipios['population'] / municipios['area']
# List of months for preparing the output
start_date = '2017-01-01'
end_date = '2022-03-01'
frequency = '1M'
dates = pd.date_range(start_date, end_date, freq=frequency) - pd.offsets.MonthBegin(1)
dates = dates.strftime("%Y-%m").values.tolist()[:-2]
data_output = []
headers = []
# Weight and prepare changes
max_value = 0
for i in range(len(municipios)):
municipio = municipios.iloc[i]
asc = pd.read_csv(path.join(data_path + '/gee_results', municipio.iloc[0] + '_ASCENDING_.csv')).iloc[:, 1].to_numpy()
dsc = pd.read_csv(path.join(data_path + '/gee_results', municipio.iloc[0] + '_DESCENDING_.csv')).iloc[:, 1].to_numpy()
changes = (asc + dsc) / 2
data_output.append(changes)
headers.append(municipio.iloc[0])
# Convert to numpy and flip over the diagonal
data_output = np.rot90(np.fliplr(np.array(data_output)))
# Impute zeros
data_output[data_output == 0] = 'Nan'
data_output = impy.em(data_output)
data_output = pd.DataFrame(data=data_output, index=dates, columns=headers)
# Multiply monthly values by density
change_index = []
for index, row in data_output.iterrows():
monthly_total = 0
for i, v in row.iteritems():
monthly_total = monthly_total + (v * float(municipios[municipios['capital'] == i]['density']))
monthly_total = monthly_total/len(row)
change_index.append(monthly_total)
# Save as csv
data_output['change_index'] = change_index
data_output['change_index'].to_csv(path.join(data_path, 'change_index.csv'))
capitals = data_output.drop('change_index', 1)
# plot
plt.plot(capitals)
plt.xticks(rotation=80, ha='right')
plt.show()
print('eof')
| [
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.close",
"numpy.array",
"impyute.em",
"pandas.offsets.MonthBegin",
"pandas.DataFrame",
"pandas.date_range",
"matplotlib.pyplot.show"
] | [((119, 135), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (128, 135), True, 'from matplotlib import pyplot as plt\n'), ((1276, 1296), 'impyute.em', 'impy.em', (['data_output'], {}), '(data_output)\n', (1283, 1296), True, 'import impyute as impy\n'), ((1312, 1372), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data_output', 'index': 'dates', 'columns': 'headers'}), '(data=data_output, index=dates, columns=headers)\n', (1324, 1372), True, 'import pandas as pd\n'), ((1901, 1919), 'matplotlib.pyplot.plot', 'plt.plot', (['capitals'], {}), '(capitals)\n', (1909, 1919), True, 'from matplotlib import pyplot as plt\n'), ((1920, 1955), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(80)', 'ha': '"""right"""'}), "(rotation=80, ha='right')\n", (1930, 1955), True, 'from matplotlib import pyplot as plt\n'), ((1956, 1966), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1964, 1966), True, 'from matplotlib import pyplot as plt\n'), ((224, 271), 'os.path.join', 'path.join', (['data_path', '"""population_capitals.csv"""'], {}), "(data_path, 'population_capitals.csv')\n", (233, 271), False, 'from os import path\n'), ((483, 534), 'pandas.date_range', 'pd.date_range', (['start_date', 'end_date'], {'freq': 'frequency'}), '(start_date, end_date, freq=frequency)\n', (496, 534), True, 'import pandas as pd\n'), ((537, 561), 'pandas.offsets.MonthBegin', 'pd.offsets.MonthBegin', (['(1)'], {}), '(1)\n', (558, 561), True, 'import pandas as pd\n'), ((1804, 1844), 'os.path.join', 'path.join', (['data_path', '"""change_index.csv"""'], {}), "(data_path, 'change_index.csv')\n", (1813, 1844), False, 'from os import path\n'), ((1184, 1205), 'numpy.array', 'np.array', (['data_output'], {}), '(data_output)\n', (1192, 1205), True, 'import numpy as np\n'), ((781, 857), 'os.path.join', 'path.join', (["(data_path + '/gee_results')", "(municipio.iloc[0] + '_ASCENDING_.csv')"], {}), "(data_path + '/gee_results', municipio.iloc[0] + '_ASCENDING_.csv')\n", (790, 857), False, 'from os import path\n'), ((903, 980), 'os.path.join', 'path.join', (["(data_path + '/gee_results')", "(municipio.iloc[0] + '_DESCENDING_.csv')"], {}), "(data_path + '/gee_results', municipio.iloc[0] + '_DESCENDING_.csv')\n", (912, 980), False, 'from os import path\n')] |
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import mean_squared_error
from agents.stew.utils import create_diff_matrix
import numpy as np
import gym
from agents.EwRegularizer import KerasEWRegularizer
class DQNAgent:
def __init__(self, state_size, action_space, reg_strength):
self.n_actions = action_space
# we define some parameters and hyperparameters:
# "lr" : learning rate
# "gamma": discounted factor
# "exploration_proba_decay": decay of the exploration probability
# "batch_size": size of experiences we sample to train the DNN
self.lr = 0.001
self.gamma = 0.99
self.exploration_proba = 1.0
self.exploration_proba_decay = 0.005
self.batch_size = 32
self.reg_strength = reg_strength
# We define our memory buffer where we will store our experiences
# We stores only the 2000 last time steps
self.memory_buffer = list()
self.max_memory_buffer = 2000
# We creaate our model having to hidden layers of 24 units (neurones)
# The first layer has the same size as a state size
# The last layer has the size of actions space
self.model = Sequential([
Dense(units=10, input_dim=state_size, activation='relu',
kernel_regularizer=KerasEWRegularizer(self.reg_strength)),
Dense(units=6, activation='relu', kernel_regularizer=KerasEWRegularizer(self.reg_strength)),
Dense(units=action_size, activation='linear', kernel_regularizer=KerasEWRegularizer(self.reg_strength))
])
self.model.compile(loss="mse",
optimizer=Adam(learning_rate=self.lr))
# The agent computes the action to perform given a state
def compute_action(self, current_state):
# We sample a variable uniformly over [0,1]
# if the variable is less than the exploration probability
# we choose an action randomly
# else
# we forward the state through the DNN and choose the action
# with the highest Q-value.
if np.random.uniform(0, 1) < self.exploration_proba:
return np.random.choice(range(self.n_actions))
q_values = self.model.predict(current_state)[0]
return np.argmax(q_values)
# when an episode is finished, we update the exploration probability using
# espilon greedy algorithm
def update_exploration_probability(self):
self.exploration_proba = self.exploration_proba * np.exp(-self.exploration_proba_decay)
# At each time step, we store the corresponding experience
def store_episode(self, current_state, action, reward, next_state, done):
# We use a dictionnary to store them
self.memory_buffer.append({
"current_state": current_state,
"action": action,
"reward": reward,
"next_state": next_state,
"done": done
})
# If the size of memory buffer exceeds its maximum, we remove the oldest experience
if len(self.memory_buffer) > self.max_memory_buffer:
self.memory_buffer.pop(0)
# At the end of each episode, we train our model
def train(self):
# We shuffle the memory buffer and select a batch size of experiences
np.random.shuffle(self.memory_buffer)
batch_sample = self.memory_buffer[0:self.batch_size]
# We iterate over the selected experiences
for experience in batch_sample:
# We compute the Q-values of S_t
q_current_state = self.model.predict(experience["current_state"])
# We compute the Q-target using Bellman optimality equation
q_target = experience["reward"]
if not experience["done"]:
q_target = q_target + self.gamma * np.max(self.model.predict(experience["next_state"])[0])
q_current_state[0][experience["action"]] = q_target
# train the model
self.model.fit(experience["current_state"], q_current_state, verbose=0)
# We create our gym environment
env = gym.make("CartPole-v1")
# We get the shape of a state and the actions space size
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
# Number of episodes to run
n_episodes = 150
# Max iterations per epiode
max_iteration_ep = 500
# We define our agent
agent = DQNAgent(state_size, action_size)
total_steps = 0
all_returns = []
# We iterate over episodes
for e in range(n_episodes):
# We initialize the first state and reshape it to fit
# with the input layer of the DNN
current_state = env.reset()
current_state = np.array([current_state])
cumulative_reward = 0
for step in range(max_iteration_ep):
total_steps = total_steps + 1
# the agent computes the action to perform
action = agent.compute_action(current_state)
# the envrionment runs the action and returns
# the next state, a reward and whether the agent is done
next_state, reward, done, _ = env.step(action)
next_state = np.array([next_state])
cumulative_reward += reward
# We store each experience in the memory buffer
agent.store_episode(current_state, action, reward, next_state, done)
# if the episode is ended, we leave the loop after
# updating the exploration probability
if done:
agent.update_exploration_probability()
all_returns.append(cumulative_reward)
print(cumulative_reward)
break
current_state = next_state
# if the have at least batch_size experiences in the memory buffer
# than we tain our model
if total_steps >= agent.batch_size:
agent.train()
np.save('hello', all_returns)
def make_video():
env_to_wrap = gym.make('CartPole-v1')
env = wrappers.Monitor(env_to_wrap, 'videos', force = True)
rewards = 0
steps = 0
done = False
state = env.reset()
state = np.array([state])
while not done:
action = agent.compute_action(state)
state, reward, done, _ = env.step(action)
state = np.array([state])
steps += 1
rewards += reward
print(rewards)
env.close()
env_to_wrap.close()
make_video()
| [
"numpy.argmax",
"agents.EwRegularizer.KerasEWRegularizer",
"numpy.exp",
"numpy.array",
"tensorflow.keras.optimizers.Adam",
"numpy.random.uniform",
"gym.make",
"numpy.save",
"numpy.random.shuffle"
] | [((4269, 4292), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (4277, 4292), False, 'import gym\n'), ((5926, 5955), 'numpy.save', 'np.save', (['"""hello"""', 'all_returns'], {}), "('hello', all_returns)\n", (5933, 5955), True, 'import numpy as np\n'), ((4825, 4850), 'numpy.array', 'np.array', (['[current_state]'], {}), '([current_state])\n', (4833, 4850), True, 'import numpy as np\n'), ((5993, 6016), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (6001, 6016), False, 'import gym\n'), ((6164, 6181), 'numpy.array', 'np.array', (['[state]'], {}), '([state])\n', (6172, 6181), True, 'import numpy as np\n'), ((2449, 2468), 'numpy.argmax', 'np.argmax', (['q_values'], {}), '(q_values)\n', (2458, 2468), True, 'import numpy as np\n'), ((3475, 3512), 'numpy.random.shuffle', 'np.random.shuffle', (['self.memory_buffer'], {}), '(self.memory_buffer)\n', (3492, 3512), True, 'import numpy as np\n'), ((5256, 5278), 'numpy.array', 'np.array', (['[next_state]'], {}), '([next_state])\n', (5264, 5278), True, 'import numpy as np\n'), ((6313, 6330), 'numpy.array', 'np.array', (['[state]'], {}), '([state])\n', (6321, 6330), True, 'import numpy as np\n'), ((2269, 2292), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2286, 2292), True, 'import numpy as np\n'), ((2684, 2721), 'numpy.exp', 'np.exp', (['(-self.exploration_proba_decay)'], {}), '(-self.exploration_proba_decay)\n', (2690, 2721), True, 'import numpy as np\n'), ((1832, 1859), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': 'self.lr'}), '(learning_rate=self.lr)\n', (1836, 1859), False, 'from tensorflow.keras.optimizers import Adam\n'), ((1482, 1519), 'agents.EwRegularizer.KerasEWRegularizer', 'KerasEWRegularizer', (['self.reg_strength'], {}), '(self.reg_strength)\n', (1500, 1519), False, 'from agents.EwRegularizer import KerasEWRegularizer\n'), ((1588, 1625), 'agents.EwRegularizer.KerasEWRegularizer', 'KerasEWRegularizer', (['self.reg_strength'], {}), '(self.reg_strength)\n', (1606, 1625), False, 'from agents.EwRegularizer import KerasEWRegularizer\n'), ((1706, 1743), 'agents.EwRegularizer.KerasEWRegularizer', 'KerasEWRegularizer', (['self.reg_strength'], {}), '(self.reg_strength)\n', (1724, 1743), False, 'from agents.EwRegularizer import KerasEWRegularizer\n')] |
"""
Functions associated with a molecule.
"""
from .measure import calculate_distance
from .atom_data import atomic_weights
import numpy as np
def build_bond_list(coordinates, max_bond=1.5, min_bond=0):
"""Return the bonds in a system based on bond distance criteria.
The pairwise distance between atoms is computed. If the distance
is within the range 'min_bond' and 'max_bond", the atoms are counted as bonded.
Parameters
----------
coordinates : np.ndarray
The coordinates of the atoms.
max_bond : float (optional)
The maximum distance to be considered bonded. Default = 1.5
min_bond : float (optional)
The minimum distance to be considered bonded. Default = 0
Returns
-------
bonds : dict
A dictionary where the keys are tuples of the bonded atom indices,
and the associated values are the bond lengths.
"""
# Throwing exceptions
if min_bond < 0:
raise ValueError(
"Invalid minimum bond distance entered! Minimum bond distance must be greater than zero!")
# Find the bonds in a molecule (set of coordinates) based on distance criteria.
bonds = {}
num_atoms = len(coordinates)
for atom1 in range(num_atoms):
for atom2 in range(atom1, num_atoms):
distance = calculate_distance(coordinates[atom1], coordinates[atom2])
if distance > min_bond and distance < max_bond:
bonds[(atom1, atom2)] = distance
return bonds
def calculate_molecular_mass(symbols):
"""Calculate the mass of a molecule.
Parameters
----------
symbols : list
A list of elements.
Returns
-------
mass : float
The mass of the molecule
"""
mass = 0.0
for atom in symbols:
mass += atomic_weights[atom]
return mass
def calculate_center_of_mass(symbols, coordinates):
"""Calculate the center of mass of a molecule.
The center of mass is weighted by each atom's weight.
Parameters
----------
symbols : list
A list of elements for the molecule
coordinates : np.ndarray
The coordinates of the molecule.
Returns
-------
center_of_mass: np.ndarray
The center of mass of the molecule.
Notes
-----
The center of mass is calculated with the formula
.. math:: \\vec{R}=\\frac{1}{M} \\sum_{i=1}^{n} m_{i}\\vec{r_{}i}
"""
total_mass = calculate_molecular_mass(symbols)
center_of_mass = np.array([0.0, 0.0, 0.0])
for atom_number in range(len(symbols)):
atom_type = symbols[atom_number]
mass_of_atom = atomic_weights[atom_type]
atom_position = coordinates[atom_number]
center_of_mass += mass_of_atom * atom_position
center_of_mass = center_of_mass / total_mass
return center_of_mass | [
"numpy.array"
] | [((2536, 2561), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (2544, 2561), True, 'import numpy as np\n')] |
from typing import Union
import numpy as np
def bspline_basis_manual(
knot_vector_t: Union[list, tuple],
knot_i: int = 0,
p: int = 0,
nti: int = 1,
verbose: bool = False,
):
"""Computes the B-spline polynomial basis,
currently limited to degree constant, linear, or quadratic.
Args:
knot_vector_t (float array): [t0, t1, t2, ... tI]
len(knot_vector_t) = (I + 1)
(I + 1) knots with (I) knot spans
must have length of two or more
must be a non-decreasing sequence
knot_i (int): index in the list of
possible knot_index values = [0, 1, 2, ... I]
p (int): polynomial degree
(p=0: constant, p=1: linear, p=2: quadratic, p=3: cubic, etc.),
currently limited to p = [0, 1, 2].
nti (int): number of time intervals for t in per-knot-span
interval [t_i, t_{i+1}],
nti = 1 is default
verbose (bool): prints polynomial or error checking
Returns:
tuple: arrays of (t, f(t)) as time t and polynomial evaluated at t; or,
AssertionError: if input is out of range
"""
num_knots = len(knot_vector_t)
MAX_DEGREE = 2
try:
assert (
len(knot_vector_t) >= 2
), "Error: knot vector length must be two or larger."
assert knot_i >= 0, "Error: knot index knot_i must be non-negative."
assert p >= 0, "Error: polynomial degree p must be non-negative."
assert (
p <= MAX_DEGREE
), f"Error: polynomial degree p exceeds maximum of {MAX_DEGREE}"
assert nti >= 1, "Error: number of time intervals nti must be 1 or greater."
assert knot_i <= (
num_knots - 1
), "Error: knot index knot_i exceeds knot vector length minus 1."
num_knots_i_to_end = len(knot_vector_t[knot_i:])
assert (
num_knots_i_to_end >= p + 1
), "Error: insufficient remaining knots for local support."
except AssertionError as error:
if verbose:
print(error)
return error
knots_lhs = knot_vector_t[0:-1] # left-hand-side knot values
knots_rhs = knot_vector_t[1:] # right-hand-side knot values
knot_spans = np.array(knots_rhs) - np.array(knots_lhs)
dt = knot_spans / nti
# assert all([dti >= 0 for dti in dt]), "Error: knot vector is decreasing."
if not all([dti >= 0 for dti in dt]):
raise ValueError("Error: knot vector is decreasing.")
# improve index notation
# t = [knots_lhs[i] + k * dt[i] for i in np.arange(num_knots-1) for k in np.arange(nti)]
t = [
knots_lhs[k] + j * dt[k]
for k in np.arange(num_knots - 1)
for j in np.arange(nti)
]
t.append(knot_vector_t[-1])
t = np.array(t)
# y = np.zeros((num_knots - 1) * nti + 1)
# y = np.zeros(len(t))
f_of_t = np.zeros(len(t))
if verbose:
print(f"Knot vector: {knot_vector_t}")
print(f"Number of knots = {num_knots}")
print(f"Knot index: {knot_i}")
print(f"Left-hand-side knot vector values: {knots_lhs}")
print(f"Right-hand-side knot vector values: {knots_rhs}")
print(f"Knot spans: {knot_spans}")
print(f"Number of time intervals per knot span: {nti}")
print(f"Knot span deltas: {dt}")
if p == 0:
f_of_t[knot_i * nti : knot_i * nti + nti] = 1.0
if verbose:
print(f"t = {t}")
print(f"f(t) = {f_of_t}")
if p == 1:
for (eix, te) in enumerate(t): # e for evaluations, ix for index
if te >= knot_vector_t[knot_i] and te < knot_vector_t[knot_i + 1]:
f_of_t[eix] = (te - knot_vector_t[knot_i]) / (
knot_vector_t[knot_i + 1] - knot_vector_t[knot_i]
)
elif te >= knot_vector_t[knot_i + 1] and te < knot_vector_t[knot_i + 2]:
f_of_t[eix] = (knot_vector_t[knot_i + 2] - te) / (
knot_vector_t[knot_i + 2] - knot_vector_t[knot_i + 1]
)
if p == 2:
for (eix, te) in enumerate(t): # e for evaluations, ix for index
if te >= knot_vector_t[knot_i] and te < knot_vector_t[knot_i + 1]:
a_1 = (te - knot_vector_t[knot_i]) / (
knot_vector_t[knot_i + 2] - knot_vector_t[knot_i]
)
a_2 = (te - knot_vector_t[knot_i]) / (
knot_vector_t[knot_i + 1] - knot_vector_t[knot_i]
)
f_of_t[eix] = a_1 * a_2
elif te >= knot_vector_t[knot_i + 1] and te < knot_vector_t[knot_i + 2]:
b_1 = (te - knot_vector_t[knot_i]) / (
knot_vector_t[knot_i + 2] - knot_vector_t[knot_i]
)
b_2 = (knot_vector_t[knot_i + 2] - te) / (
knot_vector_t[knot_i + 2] - knot_vector_t[knot_i + 1]
)
b_3 = (knot_vector_t[knot_i + 3] - te) / (
knot_vector_t[knot_i + 3] - knot_vector_t[knot_i + 1]
)
b_4 = (te - knot_vector_t[knot_i + 1]) / (
knot_vector_t[knot_i + 2] - knot_vector_t[knot_i + 1]
)
f_of_t[eix] = (b_1 * b_2) + (b_3 * b_4)
elif te >= knot_vector_t[knot_i + 2] and te < knot_vector_t[knot_i + 3]:
c_1 = (knot_vector_t[knot_i + 3] - te) / (
knot_vector_t[knot_i + 3] - knot_vector_t[knot_i + 1]
)
c_2 = (knot_vector_t[knot_i + 3] - te) / (
knot_vector_t[knot_i + 3] - knot_vector_t[knot_i + 2]
)
f_of_t[eix] = c_1 * c_2
return t, f_of_t
| [
"numpy.array",
"numpy.arange"
] | [((2803, 2814), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (2811, 2814), True, 'import numpy as np\n'), ((2265, 2284), 'numpy.array', 'np.array', (['knots_rhs'], {}), '(knots_rhs)\n', (2273, 2284), True, 'import numpy as np\n'), ((2287, 2306), 'numpy.array', 'np.array', (['knots_lhs'], {}), '(knots_lhs)\n', (2295, 2306), True, 'import numpy as np\n'), ((2700, 2724), 'numpy.arange', 'np.arange', (['(num_knots - 1)'], {}), '(num_knots - 1)\n', (2709, 2724), True, 'import numpy as np\n'), ((2742, 2756), 'numpy.arange', 'np.arange', (['nti'], {}), '(nti)\n', (2751, 2756), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Compute a density raster for input geometries or sum a property.
"""
from __future__ import division
import affine
import click
import fiona as fio
import numpy as np
import rasterio as rio
import rasterio.dtypes
from rasterio.features import rasterize
import str2type.ext
def cb_res(ctx, param, value):
"""
Click callback to handle ``--resolution`` syntax and validation.
Parameter
---------
ctx : click.Context
Ignored
param : click.Parameter
Ignored
value : tuple
Tuple of values from each instance of `--resolution`.
Returns
-------
tuple
First element is pixel x size and second is pixel y size.
"""
if len(value) > 2:
raise click.BadParameter('target can only be specified once or twice.')
elif len(value) is 2:
return tuple(abs(v) for v in value)
elif len(value) is 1:
return value[0], value[0]
else:
raise click.BadParameter('bad syntax: {0}'.format(value))
def cb_bbox(ctx, param, value):
"""
Click callback to handle ``--bbox`` syntax and validation.
Parameters
----------
ctx : click.Context
Ignored.
param : click.Parameter
Ignored.
value : tuple
x_min, y_min, x_max, y_max
Raises
------
click.BadParameter
Returns
-------
tuple
(x_min, y_min, x_max, y_max)
"""
if not value:
return None
bbox = value
x_min, y_min, x_max, y_max = bbox
if (x_max < x_min) or (y_max < y_min):
raise click.BadParameter('min exceeds max for one or more dimensions: {0}'.format(' '.join(bbox)))
return bbox
@click.command()
@click.argument('infile')
@click.argument('outfile')
@click.option(
'-f', '--format', 'driver_name', metavar='NAME', default='GTiff',
help="Output raster driver."
)
@click.option(
'-c', '--creation-option', metavar='NAME=VAL', multiple=True,
callback=str2type.ext.click_cb_key_val, help="Output raster creation options."
)
@click.option(
'-t', '--output-type', type=click.Choice(rio_dtypes.typename_fwd.values()),
metavar='NAME', default='Float32',
help="Output raster type. Defaults to `Float32' but must support the value "
"accessed by --property if it is supplied."
)
@click.option(
'-r', '--resolution', type=click.FLOAT, multiple=True, required=True,
callback=cb_res, help="Target resolution in georeferenced units. Assumes square "
"pixels unless specified twice. -tr 1 -tr 2 yields pixels "
"that are 1 unit wide and 2 units tall."
)
@click.option(
'-n', '--nodata', type=click.FLOAT, default=0.0,
help="Nodata value for output raster."
)
@click.option(
'-l', '--layer', 'layer_name', metavar='NAME',
help="Name of input layer to process."
)
@click.option(
'-p', '--property', 'property_name', metavar='NAME',
help="Property to sum. Defaults to density."
)
@click.option(
'-a', '--all-touched', is_flag=True,
help="Enable all touched rasterization."
)
@click.option(
'--bbox', metavar='X_MIN Y_MIN X_MAX Y_MAX', nargs=4, callback=cb_bbox,
help='Only process data within the specified bounding box.'
)
def main(infile, outfile, creation_option, driver_name, output_type, resolution, nodata, layer_name,
property_name, all_touched, bbox):
"""
Creation a geometry density map or sum a property.
When summing a property every pixel that intersects a geometry has the value
of the specified property added to the pixel, which also means that negative
values will be subtracted - the overall net value is written to the output
raster.
Given two partially overlapping polygons A and B where A has a value of 1
and B has a value of 2, pixels in the overlapping area will have a value of
3, pixels in polygon A will have a value of 1, and pixels in polygon B will
have a value of 2. See below:
\b
Sum a property:
\b
A = 1
B = 2
\b
B
+------------+
A |222222222222|
+-------+---+22222222|
|1111111|333|22222222|
|1111111|333|22222222|
|1111111+---+--------+
+-----------+
\b
Compute density:
\b
B
+------------+
A |111111111111|
+-------+---+11111111|
|1111111|222|11111111|
|1111111|222|11111111|
|1111111+---+--------+
+-----------+
\b
Create a point density raster at a 10 meter resolution:
\b
$ summation-raster.py sample-data/point-sample.geojson OUT.tif \\
--creation-option TILED=YES \\
--resolution 10
\b
Sum a property at a 100 meter resolution
\b
$ summation-raster.py sample-data/point-sample.geojson OUT.tif \\
--creation-option TILED=YES \\
--resolution 100 \\
--property ID
\b
NOTE: Point layers work well but other types are raising the error below. All
geometry types will work once this is fixed.
Assertion failed: (0), function query, file AbstractSTRtree.cpp, line 285.
Abort trap: 6
"""
x_res, y_res = resolution
with fio.open(infile, layer=layer_name) as src:
if property_name is not None and src.schema['properties'][property_name].split(':')[0] == 'str':
raise click.BadParameter("Property `%s' is an invalid type for summation: `%s'"
% (property_name, src.schema['properties'][property_name]))
v_x_min, v_y_min, v_x_max, v_y_max = src.bounds if not bbox else bbox
raster_meta = {
'count': 1,
'crs': src.crs,
'driver': driver_name,
'dtype': output_type,
'affine': affine.Affine.from_gdal(*(v_x_min, x_res, 0.0, v_y_max, 0.0, -y_res)),
'width': int((v_x_max - v_x_min) / x_res),
'height': int((v_y_max - v_y_min) / y_res),
'nodata': nodata
}
raster_meta['transform'] = raster_meta['affine']
raster_meta.update(**creation_option)
with rio.open(outfile, 'w', **raster_meta) as dst:
num_blocks = len([bw for bw in dst.block_windows()])
with click.progressbar(dst.block_windows(), length=num_blocks) as block_windows:
for _, window in block_windows:
((row_min, row_max), (col_min, col_max)) = window
x_min, y_min = dst.affine * (col_min, row_max)
x_max, y_max = dst.affine * (col_max, row_min)
block_affine = dst.window_transform(window)
data = np.zeros((row_max - row_min, col_max - col_min), dtype=dst.meta['dtype'])
for feat in src.filter(bbox=(x_min, y_min, x_max, y_max)):
if property_name is None:
add_val = 1
else:
add_val = feat['properties'][property_name]
if add_val is None:
add_val = 0
data += rasterize(
shapes=[feat['geometry']],
out_shape=data.shape,
fill=dst.nodata,
transform=block_affine,
all_touched=all_touched,
default_value=add_val,
dtype=rio.float64
).astype(dst.meta['dtype'])
dst.write(data.astype(dst.meta['dtype']), indexes=1, window=window)
if __name__ == '__main__':
main()
| [
"click.argument",
"click.option",
"rasterio.open",
"affine.Affine.from_gdal",
"rasterio.features.rasterize",
"numpy.zeros",
"fiona.open",
"click.BadParameter",
"click.command"
] | [((1695, 1710), 'click.command', 'click.command', ([], {}), '()\n', (1708, 1710), False, 'import click\n'), ((1712, 1736), 'click.argument', 'click.argument', (['"""infile"""'], {}), "('infile')\n", (1726, 1736), False, 'import click\n'), ((1738, 1763), 'click.argument', 'click.argument', (['"""outfile"""'], {}), "('outfile')\n", (1752, 1763), False, 'import click\n'), ((1765, 1878), 'click.option', 'click.option', (['"""-f"""', '"""--format"""', '"""driver_name"""'], {'metavar': '"""NAME"""', 'default': '"""GTiff"""', 'help': '"""Output raster driver."""'}), "('-f', '--format', 'driver_name', metavar='NAME', default=\n 'GTiff', help='Output raster driver.')\n", (1777, 1878), False, 'import click\n'), ((1885, 2048), 'click.option', 'click.option', (['"""-c"""', '"""--creation-option"""'], {'metavar': '"""NAME=VAL"""', 'multiple': '(True)', 'callback': 'str2type.ext.click_cb_key_val', 'help': '"""Output raster creation options."""'}), "('-c', '--creation-option', metavar='NAME=VAL', multiple=True,\n callback=str2type.ext.click_cb_key_val, help=\n 'Output raster creation options.')\n", (1897, 2048), False, 'import click\n'), ((2322, 2598), 'click.option', 'click.option', (['"""-r"""', '"""--resolution"""'], {'type': 'click.FLOAT', 'multiple': '(True)', 'required': '(True)', 'callback': 'cb_res', 'help': '"""Target resolution in georeferenced units. Assumes square pixels unless specified twice. -tr 1 -tr 2 yields pixels that are 1 unit wide and 2 units tall."""'}), "('-r', '--resolution', type=click.FLOAT, multiple=True,\n required=True, callback=cb_res, help=\n 'Target resolution in georeferenced units. Assumes square pixels unless specified twice. -tr 1 -tr 2 yields pixels that are 1 unit wide and 2 units tall.'\n )\n", (2334, 2598), False, 'import click\n'), ((2654, 2760), 'click.option', 'click.option', (['"""-n"""', '"""--nodata"""'], {'type': 'click.FLOAT', 'default': '(0.0)', 'help': '"""Nodata value for output raster."""'}), "('-n', '--nodata', type=click.FLOAT, default=0.0, help=\n 'Nodata value for output raster.')\n", (2666, 2760), False, 'import click\n'), ((2767, 2871), 'click.option', 'click.option', (['"""-l"""', '"""--layer"""', '"""layer_name"""'], {'metavar': '"""NAME"""', 'help': '"""Name of input layer to process."""'}), "('-l', '--layer', 'layer_name', metavar='NAME', help=\n 'Name of input layer to process.')\n", (2779, 2871), False, 'import click\n'), ((2878, 2995), 'click.option', 'click.option', (['"""-p"""', '"""--property"""', '"""property_name"""'], {'metavar': '"""NAME"""', 'help': '"""Property to sum. Defaults to density."""'}), "('-p', '--property', 'property_name', metavar='NAME', help=\n 'Property to sum. Defaults to density.')\n", (2890, 2995), False, 'import click\n'), ((3002, 3098), 'click.option', 'click.option', (['"""-a"""', '"""--all-touched"""'], {'is_flag': '(True)', 'help': '"""Enable all touched rasterization."""'}), "('-a', '--all-touched', is_flag=True, help=\n 'Enable all touched rasterization.')\n", (3014, 3098), False, 'import click\n'), ((3105, 3255), 'click.option', 'click.option', (['"""--bbox"""'], {'metavar': '"""X_MIN Y_MIN X_MAX Y_MAX"""', 'nargs': '(4)', 'callback': 'cb_bbox', 'help': '"""Only process data within the specified bounding box."""'}), "('--bbox', metavar='X_MIN Y_MIN X_MAX Y_MAX', nargs=4, callback\n =cb_bbox, help='Only process data within the specified bounding box.')\n", (3117, 3255), False, 'import click\n'), ((755, 820), 'click.BadParameter', 'click.BadParameter', (['"""target can only be specified once or twice."""'], {}), "('target can only be specified once or twice.')\n", (773, 820), False, 'import click\n'), ((5381, 5415), 'fiona.open', 'fio.open', (['infile'], {'layer': 'layer_name'}), '(infile, layer=layer_name)\n', (5389, 5415), True, 'import fiona as fio\n'), ((5548, 5685), 'click.BadParameter', 'click.BadParameter', (['("Property `%s\' is an invalid type for summation: `%s\'" % (property_name,\n src.schema[\'properties\'][property_name]))'], {}), '("Property `%s\' is an invalid type for summation: `%s\'" %\n (property_name, src.schema[\'properties\'][property_name]))\n', (5566, 5685), False, 'import click\n'), ((5965, 6034), 'affine.Affine.from_gdal', 'affine.Affine.from_gdal', (['*(v_x_min, x_res, 0.0, v_y_max, 0.0, -y_res)'], {}), '(*(v_x_min, x_res, 0.0, v_y_max, 0.0, -y_res))\n', (5988, 6034), False, 'import affine\n'), ((6303, 6340), 'rasterio.open', 'rio.open', (['outfile', '"""w"""'], {}), "(outfile, 'w', **raster_meta)\n", (6311, 6340), True, 'import rasterio as rio\n'), ((6854, 6927), 'numpy.zeros', 'np.zeros', (['(row_max - row_min, col_max - col_min)'], {'dtype': "dst.meta['dtype']"}), "((row_max - row_min, col_max - col_min), dtype=dst.meta['dtype'])\n", (6862, 6927), True, 'import numpy as np\n'), ((7325, 7499), 'rasterio.features.rasterize', 'rasterize', ([], {'shapes': "[feat['geometry']]", 'out_shape': 'data.shape', 'fill': 'dst.nodata', 'transform': 'block_affine', 'all_touched': 'all_touched', 'default_value': 'add_val', 'dtype': 'rio.float64'}), "(shapes=[feat['geometry']], out_shape=data.shape, fill=dst.nodata,\n transform=block_affine, all_touched=all_touched, default_value=add_val,\n dtype=rio.float64)\n", (7334, 7499), False, 'from rasterio.features import rasterize\n')] |
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
from torch.nn import Softmax2d
import torch
from os.path import basename
class plt_loss(object):
def __call__(self, train_loss, valid_loss, figsize=(10, 10), savefig=None, display=False):
train_loss=np.array(train_loss)
valid_loss=np.array(valid_loss)
f = plt.figure(figsize=figsize)
plt.plot(train_loss[:,0],train_loss[:,1],c='blue', label="training loss")
plt.plot(valid_loss[:,0], valid_loss[:,1],c='green', label="validation loss")
plt.legend(bbox_to_anchor=(1.0, 0.5), loc='center left', borderaxespad=0.5)
plt.tight_layout(rect=[0, 0, 1, 1])
plt.xlabel("epoch")
plt.ylabel("loss")
if savefig:
f.savefig(savefig, bbox_inches="tight")
if display:
plt.show()
plt.close(f)
f = None
class plt_loss2(object):
def __call__(self, train_loss, valid_loss, figsize=(10, 10), savefig=None, display=False):
# train_loss=np.array(train_loss)
# valid_loss=np.array(valid_loss)
f = plt.figure(figsize=figsize)
for k,v in train_loss.items():
try:
v=np.array(v)
# plt.plot(v[:,0],v[:,1], label="training "+k)
# xnew = np.linspace(int(np.min(v[:,0])), int(np.max(v[:,0])), int(np.max(v[:,0])-np.min(v[:,0])*10) )
# spl = make_interp_spline(v[:,0], v[:,1], k=3) # type: BSpline
# power_smooth = np.convolve(v[:,1],[1/30]*30, 'same',)
# plt.plot(v[30:-30,0], power_smooth[30:-30], label="Interpolated training " + k)
# plt.plot(v[:, 0], v[:, 1], label="training_ " + k)
except:
pass
for k,v in valid_loss.items():
v=np.array(v)
plt.plot(v[:,0],v[:,1], label="validation "+k)
plt.legend(bbox_to_anchor=(1.0, 0.5), loc='center left', borderaxespad=0.5)
plt.tight_layout(rect=[0, 0, 1, 1])
plt.xlabel("epoch")
plt.ylabel("loss")
if savefig:
f.savefig(savefig, bbox_inches="tight")
if display:
plt.show()
plt.close(f)
f = None
class plt_loss3(object):
def __init__(self,loss={}):
self.loss=loss
def _setloss(self,l):
for k,v in l.items():
if self.loss.get(k) is not None:
self.loss[k].append(v)
else:
self.loss[k]=[v]
def __call__(self,loss, figsize=(10, 10), savefig=None, display=False):
f = plt.figure(figsize=figsize)
self._setloss(loss)
for k,v in self.loss.items():
v=np.array(v)
plt.plot(v[:,0],v[:,1], label=k)
plt.legend(bbox_to_anchor=(1.0, 0.5), loc='center left', borderaxespad=0.5)
plt.tight_layout(rect=[0, 0, 1, 1])
plt.xlabel("epoch")
plt.ylabel("loss")
if savefig:
f.savefig(savefig, bbox_inches="tight")
if display:
plt.show()
plt.close(f)
class PltPerClassMetrics(object):
def __call__(self, conf_matrix,labels=None,figsize=(20, 20), savefig=None, display=False):
for source, target_data in conf_matrix.items():
for target,cm in target_data.items():
source = basename(source)
target = basename(target)
TP = np.diag(cm)
FP = np.sum(cm, axis=0) - TP
FN = np.sum(cm, axis=1) - TP
num_classes = cm.shape[0]
# TN = []
# for i in range(num_classes):
# temp = np.delete(cm, i, 0) # delete ith row
# temp = np.delete(temp, i, 1) # delete ith column
# TN.append(sum(sum(temp)))
tp_fp=TP + FP
tp_fn=TP + FN
precision = np.divide(TP,tp_fp , out=np.zeros_like(TP), where=tp_fp!=0)
recall = np.divide(TP,tp_fn , out=np.zeros_like(TP), where=tp_fn!=0)
prec_rec=precision+recall
fscore=np.divide(2*precision*recall,prec_rec , out=np.zeros_like(TP), where=prec_rec!=0)
indices=np.arange(num_classes)
if labels is None:
labels=np.array([str(i) for i in indices])
suf=tp_fn>0
indices=indices[suf]#[1:]
labels=labels[suf]#[1:]
precision=precision[suf]#[1:]
recall=recall[suf]#[1:]
fscoret=fscore[suf]#[1:]
f,ax = plt.subplots(1,2,figsize=figsize)
plt.title("Source : {} Target: {}".format(source,target))
ax[0].barh(indices, precision, .2, label="precision", color='navy')
ax[0].barh(indices + .3, recall, .2, label="recall",color='c')
ax[0].barh(indices + .6, fscoret, .2, label="f-score", color='darkorange')
ax[0].set_yticks(())
ax[0].legend(loc='best')
# ax[0].subplots_adjust(left=.25)
# ax[0].subplots_adjust(top=.95)
# ax[0].subplots_adjust(bottom=.05)
for i, c in zip(indices, labels):
ax[0].text(-.3, i, c)
x=np.log10( tp_fn,out=np.zeros_like(tp_fn), where=tp_fn!=0)
ax[1].scatter(x,fscore,)
for i, c in zip(indices, labels):
ax[1].annotate(c, (x[i], fscore[i]))
ax[1].set_xlabel("log support")
ax[1].set_ylabel("fscore")
# ax[1].set_xscale('log')
if savefig:
f.savefig(savefig+"_source_{}_target_{}.png".format(source,target), bbox_inches="tight")
if display:
plt.show()
plt.close(f)
f = None
labels=None
class plt_kappa(object):
def __call__(self, train_kappa, valid_kappa, figsize=(10, 10), savefig=None, display=False):
train_kappa=np.array(train_kappa)
valid_kappa=np.array(valid_kappa)
f = plt.figure(figsize=figsize)
plt.plot(train_kappa[:,0],train_kappa[:,1],c='blue', label="training kappa")
plt.plot(valid_kappa[:,0], valid_kappa[:,1],c='green', label="validation kappa")
plt.legend(bbox_to_anchor=(1.0, 0.5), loc='center left', borderaxespad=0.5)
plt.tight_layout(rect=[0, 0, 1, 1])
plt.xlabel("epoch")
plt.ylabel("loss")
if savefig:
f.savefig(savefig, bbox_inches="tight")
if display:
plt.show()
plt.close(f)
f = None
class plt_scatter(object):
def __call__(self,X,Y,C,label,xlabel,ylabel,figsize=(10,10),savefig=None,display=False):
f=plt.figure(figsize=figsize)
for x,y,c,lab in zip(X,Y,C,label):
plt.scatter(x,y,c=np.array([c]),label=lab,edgecolors='black',s=100)
plt.legend(bbox_to_anchor=(1.0, 0.5), loc='center left', borderaxespad=0.5)
plt.tight_layout(rect=[0, 0, 1, 1])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if savefig :
f.savefig(savefig,bbox_inches="tight")
if display:
plt.show()
plt.close(f)
f=None
class plt_image(object):
def __init__(self):
pass
def masks_to_img(self,masks):
return np.argmax(masks, axis=0)+1
def probability_map(self,output):
m = Softmax2d()
prob, _ = torch.max(m(output), dim=1)
return prob
def colorbar(self,fig, ax, cmap, labels_name):
n_labels = len(labels_name)
mappable = cm.ScalarMappable(cmap=cmap)
mappable.set_array([])
mappable.set_clim(0.5, n_labels + 0.5)
colorbar = fig.colorbar(mappable, ax=ax)
colorbar.set_ticks(np.linspace(1, n_labels + 1, n_labels + 1))
colorbar.set_ticklabels(labels_name)
if len(labels_name)>30:
colorbar.ax.tick_params(labelsize=5)
else:
colorbar.ax.tick_params(labelsize=15)
def normalize_value_for_diplay(self,inputs,src_type):
for i in range (len(src_type.id_labels)):
inputs=np.where(inputs==src_type.id_labels[i],i+1,inputs)
return inputs
def show_res(self,inputs, src_type, labels, tgt_type, outputs,save_path,display=False):
fig, axs = plt.subplots(3, 3, figsize=(30, 20))
for i in range(3):
show_labels = self.masks_to_img(labels.cpu().numpy()[i]).astype("uint8")
if outputs.shape[1]>1:
show_outputs = self.masks_to_img(outputs.cpu().detach().numpy()[i]).astype("uint8")
else:
show_outputs=show_outputs.cpu().detach().numpy()[i][0]
input = inputs.cpu().detach().numpy()[i][0]
input =self.normalize_value_for_diplay(input,src_type)
axs[i][0].imshow(input, cmap=src_type.matplotlib_cmap, vmin=1, vmax=len(src_type.labels_name), interpolation='nearest')
axs[i][0].axis('off')
self.colorbar(fig, axs[i][0], src_type.matplotlib_cmap, src_type.labels_name)
axs[i][1].imshow(show_labels, cmap=tgt_type.matplotlib_cmap, vmin=1, vmax=len(tgt_type.labels_name), interpolation='nearest')
axs[i][1].axis('off')
self.colorbar(fig, axs[i][1], tgt_type.matplotlib_cmap, tgt_type.labels_name)
axs[i][2].imshow(show_outputs, cmap=tgt_type.matplotlib_cmap, vmin=1, vmax=len(tgt_type.labels_name), interpolation='nearest')
axs[i][2].axis('off')
self.colorbar(fig, axs[i][2], tgt_type.matplotlib_cmap, tgt_type.labels_name)
# plt.tight_layout()
fig.savefig(save_path,bbox_inches="tight")
if display:
plt.show()
plt.close(fig)
fig=None
def show_one_res(self,inputs, src_type, labels, tgt_type, outputs,save_path,display=False):
fig, axs = plt.subplots(1, 3, figsize=(30, 20))
show_labels = self.masks_to_img(labels.cpu().numpy()[0]).astype("uint8")
if outputs.shape[1]>1:
show_outputs = self.masks_to_img(outputs.cpu().detach().numpy()[0]).astype("uint8")
else:
show_outputs=outputs.cpu().detach().numpy()[0][0]
input = inputs.cpu().detach().numpy()[0][0]
input =self.normalize_value_for_diplay(input,src_type)
axs[0].imshow(input, cmap=src_type.matplotlib_cmap, vmin=1, vmax=len(src_type.labels_name), interpolation='nearest')
axs[0].axis('off')
self.colorbar(fig, axs[0], src_type.matplotlib_cmap, src_type.labels_name)
axs[1].imshow(show_labels, cmap=tgt_type.matplotlib_cmap, vmin=1, vmax=len(tgt_type.labels_name), interpolation='nearest')
axs[1].axis('off')
self.colorbar(fig, axs[1], tgt_type.matplotlib_cmap, tgt_type.labels_name)
axs[2].imshow(show_outputs, cmap=tgt_type.matplotlib_cmap, vmin=1, vmax=len(tgt_type.labels_name), interpolation='nearest')
axs[2].axis('off')
self.colorbar(fig, axs[2], tgt_type.matplotlib_cmap, tgt_type.labels_name)
# plt.tight_layout()
fig.savefig(save_path,bbox_inches="tight")
if display:
plt.show()
plt.close(fig)
fig=None
def show_probability_map(self,inputs, src_type, labels, tgt_type, outputs,save_path,display=False):
fig, axs = plt.subplots(3, 3, figsize=(30, 20))
for i in range(3):
show_labels = self.masks_to_img(labels.cpu().numpy()[i]).astype("uint8")
show_outputs = self.masks_to_img(outputs.cpu().detach().numpy()[i]).astype("uint8")
prob = self.probability_map(outputs).cpu().detach().numpy()[i]
p = axs[i][2].imshow(prob, cmap='magma', vmin=0, vmax=1)
axs[i][2].axis('off')
fig.colorbar(p, ax=axs[i][2])
axs[i][0].imshow(show_labels, cmap=tgt_type.matplotlib_cmap, vmin=1, vmax=len(tgt_type.labels_name), interpolation='nearest')
axs[i][0].axis('off')
self.colorbar(fig, axs[i][0], tgt_type.matplotlib_cmap, tgt_type.labels_name)
axs[i][1].imshow(show_outputs, cmap=tgt_type.matplotlib_cmap, vmin=1, vmax=len(tgt_type.labels_name), interpolation='nearest')
axs[i][1].axis('off')
self.colorbar(fig, axs[i][1], tgt_type.matplotlib_cmap, tgt_type.labels_name)
# plt.tight_layout()
fig.savefig(save_path,bbox_inches="tight")
if display:
plt.show()
plt.close(fig)
fig=None
| [
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.arange",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.cm.ScalarMappable",
"numpy.argmax",
"torch.nn.Softmax2d",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.sh... | [((290, 310), 'numpy.array', 'np.array', (['train_loss'], {}), '(train_loss)\n', (298, 310), True, 'import numpy as np\n'), ((330, 350), 'numpy.array', 'np.array', (['valid_loss'], {}), '(valid_loss)\n', (338, 350), True, 'import numpy as np\n'), ((363, 390), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (373, 390), True, 'import matplotlib.pyplot as plt\n'), ((399, 476), 'matplotlib.pyplot.plot', 'plt.plot', (['train_loss[:, 0]', 'train_loss[:, 1]'], {'c': '"""blue"""', 'label': '"""training loss"""'}), "(train_loss[:, 0], train_loss[:, 1], c='blue', label='training loss')\n", (407, 476), True, 'import matplotlib.pyplot as plt\n'), ((481, 566), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_loss[:, 0]', 'valid_loss[:, 1]'], {'c': '"""green"""', 'label': '"""validation loss"""'}), "(valid_loss[:, 0], valid_loss[:, 1], c='green', label='validation loss'\n )\n", (489, 566), True, 'import matplotlib.pyplot as plt\n'), ((568, 643), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.0, 0.5)', 'loc': '"""center left"""', 'borderaxespad': '(0.5)'}), "(bbox_to_anchor=(1.0, 0.5), loc='center left', borderaxespad=0.5)\n", (578, 643), True, 'import matplotlib.pyplot as plt\n'), ((652, 687), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0, 0, 1, 1]'}), '(rect=[0, 0, 1, 1])\n', (668, 687), True, 'import matplotlib.pyplot as plt\n'), ((696, 715), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (706, 715), True, 'import matplotlib.pyplot as plt\n'), ((724, 742), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (734, 742), True, 'import matplotlib.pyplot as plt\n'), ((866, 878), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (875, 878), True, 'import matplotlib.pyplot as plt\n'), ((1113, 1140), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1123, 1140), True, 'import matplotlib.pyplot as plt\n'), ((1904, 1979), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.0, 0.5)', 'loc': '"""center left"""', 'borderaxespad': '(0.5)'}), "(bbox_to_anchor=(1.0, 0.5), loc='center left', borderaxespad=0.5)\n", (1914, 1979), True, 'import matplotlib.pyplot as plt\n'), ((1988, 2023), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0, 0, 1, 1]'}), '(rect=[0, 0, 1, 1])\n', (2004, 2023), True, 'import matplotlib.pyplot as plt\n'), ((2032, 2051), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (2042, 2051), True, 'import matplotlib.pyplot as plt\n'), ((2060, 2078), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (2070, 2078), True, 'import matplotlib.pyplot as plt\n'), ((2202, 2214), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (2211, 2214), True, 'import matplotlib.pyplot as plt\n'), ((2592, 2619), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2602, 2619), True, 'import matplotlib.pyplot as plt\n'), ((2766, 2841), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.0, 0.5)', 'loc': '"""center left"""', 'borderaxespad': '(0.5)'}), "(bbox_to_anchor=(1.0, 0.5), loc='center left', borderaxespad=0.5)\n", (2776, 2841), True, 'import matplotlib.pyplot as plt\n'), ((2850, 2885), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0, 0, 1, 1]'}), '(rect=[0, 0, 1, 1])\n', (2866, 2885), True, 'import matplotlib.pyplot as plt\n'), ((2894, 2913), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (2904, 2913), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2940), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (2932, 2940), True, 'import matplotlib.pyplot as plt\n'), ((3064, 3076), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (3073, 3076), True, 'import matplotlib.pyplot as plt\n'), ((6081, 6102), 'numpy.array', 'np.array', (['train_kappa'], {}), '(train_kappa)\n', (6089, 6102), True, 'import numpy as np\n'), ((6123, 6144), 'numpy.array', 'np.array', (['valid_kappa'], {}), '(valid_kappa)\n', (6131, 6144), True, 'import numpy as np\n'), ((6157, 6184), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6167, 6184), True, 'import matplotlib.pyplot as plt\n'), ((6193, 6278), 'matplotlib.pyplot.plot', 'plt.plot', (['train_kappa[:, 0]', 'train_kappa[:, 1]'], {'c': '"""blue"""', 'label': '"""training kappa"""'}), "(train_kappa[:, 0], train_kappa[:, 1], c='blue', label='training kappa'\n )\n", (6201, 6278), True, 'import matplotlib.pyplot as plt\n'), ((6278, 6366), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_kappa[:, 0]', 'valid_kappa[:, 1]'], {'c': '"""green"""', 'label': '"""validation kappa"""'}), "(valid_kappa[:, 0], valid_kappa[:, 1], c='green', label=\n 'validation kappa')\n", (6286, 6366), True, 'import matplotlib.pyplot as plt\n'), ((6368, 6443), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.0, 0.5)', 'loc': '"""center left"""', 'borderaxespad': '(0.5)'}), "(bbox_to_anchor=(1.0, 0.5), loc='center left', borderaxespad=0.5)\n", (6378, 6443), True, 'import matplotlib.pyplot as plt\n'), ((6452, 6487), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0, 0, 1, 1]'}), '(rect=[0, 0, 1, 1])\n', (6468, 6487), True, 'import matplotlib.pyplot as plt\n'), ((6496, 6515), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6506, 6515), True, 'import matplotlib.pyplot as plt\n'), ((6524, 6542), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (6534, 6542), True, 'import matplotlib.pyplot as plt\n'), ((6666, 6678), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (6675, 6678), True, 'import matplotlib.pyplot as plt\n'), ((6836, 6863), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6846, 6863), True, 'import matplotlib.pyplot as plt\n'), ((6996, 7071), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.0, 0.5)', 'loc': '"""center left"""', 'borderaxespad': '(0.5)'}), "(bbox_to_anchor=(1.0, 0.5), loc='center left', borderaxespad=0.5)\n", (7006, 7071), True, 'import matplotlib.pyplot as plt\n'), ((7080, 7115), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0, 0, 1, 1]'}), '(rect=[0, 0, 1, 1])\n', (7096, 7115), True, 'import matplotlib.pyplot as plt\n'), ((7124, 7142), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (7134, 7142), True, 'import matplotlib.pyplot as plt\n'), ((7151, 7169), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (7161, 7169), True, 'import matplotlib.pyplot as plt\n'), ((7293, 7305), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (7302, 7305), True, 'import matplotlib.pyplot as plt\n'), ((7513, 7524), 'torch.nn.Softmax2d', 'Softmax2d', ([], {}), '()\n', (7522, 7524), False, 'from torch.nn import Softmax2d\n'), ((7698, 7726), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap'}), '(cmap=cmap)\n', (7715, 7726), True, 'import matplotlib.cm as cm\n'), ((8441, 8477), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(30, 20)'}), '(3, 3, figsize=(30, 20))\n', (8453, 8477), True, 'import matplotlib.pyplot as plt\n'), ((9854, 9868), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9863, 9868), True, 'import matplotlib.pyplot as plt\n'), ((10003, 10039), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(30, 20)'}), '(1, 3, figsize=(30, 20))\n', (10015, 10039), True, 'import matplotlib.pyplot as plt\n'), ((11293, 11307), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (11302, 11307), True, 'import matplotlib.pyplot as plt\n'), ((11449, 11485), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(30, 20)'}), '(3, 3, figsize=(30, 20))\n', (11461, 11485), True, 'import matplotlib.pyplot as plt\n'), ((12574, 12588), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (12583, 12588), True, 'import matplotlib.pyplot as plt\n'), ((847, 857), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (855, 857), True, 'import matplotlib.pyplot as plt\n'), ((1824, 1835), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (1832, 1835), True, 'import numpy as np\n'), ((1848, 1899), 'matplotlib.pyplot.plot', 'plt.plot', (['v[:, 0]', 'v[:, 1]'], {'label': "('validation ' + k)"}), "(v[:, 0], v[:, 1], label='validation ' + k)\n", (1856, 1899), True, 'import matplotlib.pyplot as plt\n'), ((2183, 2193), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2191, 2193), True, 'import matplotlib.pyplot as plt\n'), ((2700, 2711), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (2708, 2711), True, 'import numpy as np\n'), ((2724, 2759), 'matplotlib.pyplot.plot', 'plt.plot', (['v[:, 0]', 'v[:, 1]'], {'label': 'k'}), '(v[:, 0], v[:, 1], label=k)\n', (2732, 2759), True, 'import matplotlib.pyplot as plt\n'), ((3045, 3055), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3053, 3055), True, 'import matplotlib.pyplot as plt\n'), ((6647, 6657), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6655, 6657), True, 'import matplotlib.pyplot as plt\n'), ((7274, 7284), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7282, 7284), True, 'import matplotlib.pyplot as plt\n'), ((7435, 7459), 'numpy.argmax', 'np.argmax', (['masks'], {'axis': '(0)'}), '(masks, axis=0)\n', (7444, 7459), True, 'import numpy as np\n'), ((7881, 7923), 'numpy.linspace', 'np.linspace', (['(1)', '(n_labels + 1)', '(n_labels + 1)'], {}), '(1, n_labels + 1, n_labels + 1)\n', (7892, 7923), True, 'import numpy as np\n'), ((8255, 8311), 'numpy.where', 'np.where', (['(inputs == src_type.id_labels[i])', '(i + 1)', 'inputs'], {}), '(inputs == src_type.id_labels[i], i + 1, inputs)\n', (8263, 8311), True, 'import numpy as np\n'), ((9835, 9845), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9843, 9845), True, 'import matplotlib.pyplot as plt\n'), ((11274, 11284), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11282, 11284), True, 'import matplotlib.pyplot as plt\n'), ((12555, 12565), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12563, 12565), True, 'import matplotlib.pyplot as plt\n'), ((1215, 1226), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (1223, 1226), True, 'import numpy as np\n'), ((3340, 3356), 'os.path.basename', 'basename', (['source'], {}), '(source)\n', (3348, 3356), False, 'from os.path import basename\n'), ((3382, 3398), 'os.path.basename', 'basename', (['target'], {}), '(target)\n', (3390, 3398), False, 'from os.path import basename\n'), ((3420, 3431), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (3427, 3431), True, 'import numpy as np\n'), ((4231, 4253), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (4240, 4253), True, 'import numpy as np\n'), ((4616, 4651), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': 'figsize'}), '(1, 2, figsize=figsize)\n', (4628, 4651), True, 'import matplotlib.pyplot as plt\n'), ((5871, 5883), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (5880, 5883), True, 'import matplotlib.pyplot as plt\n'), ((3453, 3471), 'numpy.sum', 'np.sum', (['cm'], {'axis': '(0)'}), '(cm, axis=0)\n', (3459, 3471), True, 'import numpy as np\n'), ((3498, 3516), 'numpy.sum', 'np.sum', (['cm'], {'axis': '(1)'}), '(cm, axis=1)\n', (3504, 3516), True, 'import numpy as np\n'), ((5844, 5854), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5852, 5854), True, 'import matplotlib.pyplot as plt\n'), ((6937, 6950), 'numpy.array', 'np.array', (['[c]'], {}), '([c])\n', (6945, 6950), True, 'import numpy as np\n'), ((3939, 3956), 'numpy.zeros_like', 'np.zeros_like', (['TP'], {}), '(TP)\n', (3952, 3956), True, 'import numpy as np\n'), ((4024, 4041), 'numpy.zeros_like', 'np.zeros_like', (['TP'], {}), '(TP)\n', (4037, 4041), True, 'import numpy as np\n'), ((4168, 4185), 'numpy.zeros_like', 'np.zeros_like', (['TP'], {}), '(TP)\n', (4181, 4185), True, 'import numpy as np\n'), ((5339, 5359), 'numpy.zeros_like', 'np.zeros_like', (['tp_fn'], {}), '(tp_fn)\n', (5352, 5359), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
This module contains dyPolyChord's high-level functionality for
performing dynamic nested sampling calculations. This is done using the
algorithm described in Appendix F of "Dynamic nested sampling: an
improved algorithm for parameter estimation and evidence calculation"
(Higson et al., 2019). For more details see the dyPolyChord
doumentation at https://dypolychord.readthedocs.io/en/latest/.
"""
from __future__ import division # Enforce float division for Python2
import copy
import os
import traceback
import sys
import shutil
import warnings
import numpy as np
import scipy.signal
import nestcheck.data_processing
import nestcheck.io_utils
import nestcheck.write_polychord_output
import dyPolyChord.nlive_allocation
import dyPolyChord.output_processing
# The only main public-facing API for this module is the run_dypolychord
# function.
__all__ = ['run_dypolychord']
@nestcheck.io_utils.timing_decorator
def run_dypolychord(run_polychord, dynamic_goal, settings_dict_in, **kwargs):
r"""Performs dynamic nested sampling using the algorithm described in
Appendix F of "Dynamic nested sampling: an improved algorithm for
parameter estimation and evidence calculation" (Higson et al., 2019).
The likelihood, prior and PolyChord sampler are contained in the
run_polychord callable (first argument).
Dynamic nested sampling is performed in 4 steps:
1) Generate an initial nested sampling run with a constant number of live
points n_init. This process is run in chunks using PolyChord's max_ndead
setting to allow periodic saving of .resume files so the initial run can
be resumed at different points.
2) Calculate an allocation of the number of live points at each likelihood
for use in step 3. Also clean up resume files and save relevant
information.
3) Generate a dynamic nested sampling run using the calculated live point
allocation from step 2.
4) Combine the initial and dynamic runs and write combined output files
in the PolyChord format. Remove the intermediate output files produced
which are no longer needed.
The output files are of the same format produced by PolyChord, and
contain posterior samples and an estimate of the Bayesian evidence.
Further analysis, including estimating uncertainties, can be performed
with the nestcheck package.
Like for PolyChord, the output files are saved in base_dir (specified
in settings_dict_in, default value is 'chains'). Their names are
determined by file_root (also specified in settings_dict_in).
dyPolyChord ensures the following following files are always produced:
* [base_dir]/[file_root].stats: run statistics including an estimate of
the Bayesian evidence;
* [base_dir]/[file_root]_dead.txt: posterior samples;
* [base_dir]/[file_root]_dead-birth.txt: as above but with an extra
column containing information about when points were sampled.
For more information about the output format, see PolyChord's
documentation. Note that dyPolyChord is not able to produce all of the
types of output files made by PolyChord - see check_settings'
documentation for more information.
In addition, a number of intermediate files are produced during the dynamic
nested sampling process which are removed by default when the process
finishes. See clean_extra_output's documentation for more details.
Parameters
----------
run_polychord: callable
A callable which performs nested sampling using PolyChord
for a given likelihood and prior, and takes a settings dictionary as
its argument. Note that the likelihood and prior must be specified
within the run_polychord callable. For helper functions for creating
such callables, see the documentation for dyPolyChord.pypolychord
(Python likelihoods) and dyPolyChord.polychord (C++ and Fortran
likelihoods). Examples can be found at:
https://dypolychord.readthedocs.io/en/latest/demo.html
dynamic_goal: float or int
Number in [0, 1] which determines how to allocate computational effort
between parameter estimation and evidence calculation. See the dynamic
nested sampling paper for more details.
settings_dict: dict
PolyChord settings to use (see check_settings for information on
allowed and default settings).
nlive_const: int, optional
Used to calculate total number of samples if max_ndead not specified in
settings. The total number of samples used in this case is the
estimated number that would be taken by a nested sampling run with a
constant number of live points nlive_const.
ninit: int, optional
Number of live points to use for the initial exploratory run (Step 1).
ninit_step: int, optional
Number of samples taken between saving .resume files in Step 1.
seed_increment: int, optional
If random seeding is used (PolyChord seed setting >= 0), this increment
is added to PolyChord's random seed each time it is run to avoid
repeated points.
When running in parallel using MPI, PolyChord hashes the seed with the
MPI rank using IEOR. Hence you need seed_increment to be > number of
processors to ensure no two processes use the same seed.
When running repeated results you need to increment the seed used for
each run by some number > seed_increment.
smoothing_filter: func or None, optional
Smoothing function to apply to the nlive allocation array of target
live points. Use smoothing_filter=None for no smoothing.
comm: None or mpi4py MPI.COMM object, optional
For MPI parallelisation.
stats_means_errs: bool, optional
Whether to include estimates of the log evidence logZ and the
parameter mean values and their uncertainties in the .stats file.
This is passed to nestcheck's write_run_output; see its documentation
for more details.
clean: bool, optional
Clean the additional output files made by dyPolyChord, leaving only
output files for the combined run in PolyChord format.
When debugging this can be set to False to allow inspection of
intermediate output.
resume_dyn_run: bool, optional
Resume a partially completed dyPolyChord run using its cached output
files. Resuming is only possible if the initial exploratory run
finished and the process reached the dynamic run stage. If the run
is resumed with different settings to what were used the first time
then this may give unexpected results.
"""
try:
nlive_const = kwargs.pop('nlive_const', settings_dict_in['nlive'])
except KeyError:
# If nlive_const is not specified in the arguments or the settings
# dictionary, default to nlive_const = 100
nlive_const = kwargs.pop('nlive_const', 100)
ninit = kwargs.pop('ninit', 10)
init_step = kwargs.pop('init_step', ninit)
seed_increment = kwargs.pop('seed_increment', 100)
default_smoothing = (lambda x: scipy.signal.savgol_filter(
x, 1 + (2 * ninit), 3, mode='nearest'))
smoothing_filter = kwargs.pop('smoothing_filter', default_smoothing)
comm = kwargs.pop('comm', None)
stats_means_errs = kwargs.pop('stats_means_errs', True)
clean = kwargs.pop('clean', True)
resume_dyn_run = kwargs.pop('resume_dyn_run', False)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
# Set Up
# ------
# Set up rank if running with MPI
if comm is not None:
rank = comm.Get_rank()
else:
rank = 0
settings_dict = None # Define for rank != 0
if rank == 0:
settings_dict_in, output_settings = check_settings(
settings_dict_in)
if (settings_dict_in['seed'] >= 0 and comm is not None and
comm.Get_size() > 1):
warnings.warn((
'N.B. random seeded results will not be reproducible when '
'running dyPolyChord with multiple MPI processes. You have '
'seed={} and {} MPI processes.').format(
settings_dict_in['seed'], comm.Get_size()), UserWarning)
root_name = os.path.join(settings_dict_in['base_dir'],
settings_dict_in['file_root'])
if resume_dyn_run:
# Check if the files we need to resume the dynamic run all exist.
files_needed = ['_dyn_info.pkl', '_init_dead.txt', '_dyn_dead.txt',
'_dyn.resume']
files_needed = [root_name + ending for ending in files_needed]
files_exist = [os.path.isfile(name) for name in files_needed]
if all(files_exist):
skip_initial_run = True
print('resume_dyn_run=True so I am skipping the initial '
'exploratory run and resuming the dynamic run')
else:
skip_initial_run = False
# Only print a message if some of the files are present
if any(files_exist):
msg = (
'resume_dyn_run=True but I could not resume as not '
'all of the files I need are present. Perhaps the initial '
'exploratory run did not finish? The dyPolyChord process '
'can only be resumed from after the dynamic run '
'starts.\nFiles I am missing are:')
for i, name in enumerate(files_needed):
if not files_exist[i]:
msg += '\n' + name
print(msg)
else:
skip_initial_run = False
if skip_initial_run:
dyn_info = nestcheck.io_utils.pickle_load(root_name + '_dyn_info')
else:
# Step 1: do initial run
# ----------------------
if rank == 0:
# Make a copy of settings_dict so we don't edit settings
settings_dict = copy.deepcopy(settings_dict_in)
settings_dict['file_root'] = settings_dict['file_root'] + '_init'
settings_dict['nlive'] = ninit
if dynamic_goal == 0:
# We definitely won't need to resume midway through in this case,
# so just run PolyChord normally
run_polychord(settings_dict, comm=comm)
if rank == 0:
final_seed = settings_dict['seed']
if settings_dict['seed'] >= 0:
final_seed += seed_increment
step_ndead = None
resume_outputs = None
else:
step_ndead, resume_outputs, final_seed = run_and_save_resumes(
run_polychord, settings_dict, init_step, seed_increment,
comm=comm)
# Step 2: calculate an allocation of live points
# ----------------------------------------------
if rank == 0:
try:
# Get settings for dynamic run based on initial run
dyn_info = process_initial_run(
settings_dict_in, nlive_const=nlive_const,
smoothing_filter=smoothing_filter,
step_ndead=step_ndead, resume_outputs=resume_outputs,
ninit=ninit, dynamic_goal=dynamic_goal,
final_seed=final_seed)
except: # pragma: no cover
# We need a bare except statement here to ensure that if
# any type of error occurs in the rank == 0 process when
# running in parallel with MPI then we also abort all the
# other processes.
if comm is None or comm.Get_size() == 1:
raise # Just one process so raise error normally.
else:
# Print error info.
traceback.print_exc(file=sys.stdout)
print('Error in process with rank == 0: forcing MPI abort')
sys.stdout.flush() # Make sure message prints before abort
comm.Abort(1)
# Step 3: do dynamic run
# ----------------------
# Get settings for dynamic run
if rank == 0:
settings_dict = get_dynamic_settings(settings_dict_in, dyn_info)
if resume_dyn_run:
settings_dict['write_resume'] = True
settings_dict['read_resume'] = True
# Do the run
run_polychord(settings_dict, comm=comm)
# Step 4: process output and tidy
# -------------------------------
if rank == 0:
try:
# Combine initial and dynamic runs
run = dyPolyChord.output_processing.process_dypolychord_run(
settings_dict_in['file_root'], settings_dict_in['base_dir'],
dynamic_goal=dynamic_goal)
# Save combined output in PolyChord format
nestcheck.write_polychord_output.write_run_output(
run, stats_means_errs=stats_means_errs, **output_settings)
if clean:
# Remove temporary files
root_name = os.path.join(settings_dict_in['base_dir'],
settings_dict_in['file_root'])
dyPolyChord.output_processing.clean_extra_output(root_name)
except: # pragma: no cover
# We need a bare except statement here to ensure that if
# any type of error occurs in the rank == 0 process when
# running in parallel with MPI then we also abort all the
# other processes.
if comm is None or comm.Get_size() == 1:
raise # Just one process so raise error normally.
else:
# Print error info.
traceback.print_exc(file=sys.stdout)
print('Error in process with rank == 0: forcing MPI abort')
sys.stdout.flush() # Make sure message prints before abort
comm.Abort(1)
# Helper functions
# ----------------
def check_settings(settings_dict_in):
"""
Check the input dictionary of PolyChord settings and add default values.
Some setting values are mandatory for dyPolyChord - if one of these is set
to a value which is not allowed then a UserWarning is issued and the
program proceeds with the mandatory setting.
Parameters
----------
settings_dict_in: dict
PolyChord settings to use.
Returns
-------
settings_dict: dict
Updated settings dictionary including default and mandatory values.
output_settings: dict
Settings for writing output files which are saved until the final
output files are calculated at the end.
"""
default_settings = {'nlive': 100,
'num_repeats': 20,
'file_root': 'temp',
'base_dir': 'chains',
'seed': -1,
'do_clustering': True,
'max_ndead': -1,
'equals': True,
'posteriors': True}
mandatory_settings = {'nlives': {},
'write_dead': True,
'write_stats': True,
'write_paramnames': False,
'write_prior': False,
'write_live': False,
'write_resume': False,
'read_resume': False,
'cluster_posteriors': False,
'boost_posterior': 0.0}
settings_dict = copy.deepcopy(settings_dict_in)
# Assign default settings.
for key, value in default_settings.items():
if key not in settings_dict:
settings_dict[key] = value
# Produce warning if settings_dict_in has different values for any
# mandatory settings.
for key, value in mandatory_settings.items():
if key in settings_dict_in and settings_dict_in[key] != value:
warnings.warn((
'dyPolyChord currently only allows the setting {0}={1}, '
'so I am proceeding with this. You tried to specify {0}={2}.'
.format(key, value, settings_dict_in[key])), UserWarning)
settings_dict[key] = value
# Extract output settings (not needed until later)
output_settings = {}
for key in ['posteriors', 'equals']:
output_settings[key] = settings_dict[key]
settings_dict[key] = False
return settings_dict, output_settings
def run_and_save_resumes(run_polychord, settings_dict_in, init_step,
seed_increment, comm=None):
"""
Run PolyChord while pausing after every init_step samples (dead points)
generated to save a resume file before continuing.
Parameters
----------
run_polychord: callable
Callable which runs PolyChord with the desired likelihood and prior,
and takes a settings dictionary as its argument.
settings_dict: dict
PolyChord settings to use (see check_settings for information on
allowed and default settings).
ninit_step: int, optional
Number of samples taken between saving .resume files in Step 1.
seed_increment: int, optional
If seeding is used (PolyChord seed setting >= 0), this increment is
added to PolyChord's random seed each time it is run to avoid
repeated points.
When running in parallel using MPI, PolyChord hashes the seed with the
MPI rank using IEOR. Hence you need seed_increment to be > number of
processors to ensure no two processes use the same seed.
When running repeated results you need to increment the seed used for
each run by some number > seed_increment.
comm: None or mpi4py MPI.COMM object, optional
For MPI parallelisation.
Returns
-------
step_ndead: list of ints
Numbers of dead points at which resume files are saved.
resume_outputs: dict
Dictionary containing run output (contents of .stats file) at each
resume. Keys are elements of step_ndead.
final_seed: int
Random seed. This is incremented after each run so it can be used
when resuming without generating correlated points.
"""
settings_dict = copy.deepcopy(settings_dict_in)
# set up rank if running with MPI
if comm is not None:
# Define variables for rank != 0
step_ndead = None
resume_outputs = None
final_seed = None
# Get rank
rank = comm.Get_rank()
else:
rank = 0
if rank == 0:
root_name = os.path.join(settings_dict['base_dir'],
settings_dict['file_root'])
try:
os.remove(root_name + '.resume')
except OSError:
pass
settings_dict['write_resume'] = True
settings_dict['read_resume'] = True
step_ndead = []
resume_outputs = {}
add_points = True
while add_points:
if rank == 0:
settings_dict['max_ndead'] = (len(step_ndead) + 1) * init_step
run_polychord(settings_dict, comm=comm)
if rank == 0:
try:
if settings_dict['seed'] >= 0:
settings_dict['seed'] += seed_increment
run_output = nestcheck.data_processing.process_polychord_stats(
settings_dict['file_root'], settings_dict['base_dir'])
# Store run outputs for getting number of likelihood calls
# while accounting for resuming a run.
resume_outputs[run_output['ndead']] = run_output
step_ndead.append(run_output['ndead'] - settings_dict['nlive'])
if len(step_ndead) >= 2 and step_ndead[-1] == step_ndead[-2]:
add_points = False
# store resume file in new file path
shutil.copyfile(
root_name + '.resume',
root_name + '_' + str(step_ndead[-1]) + '.resume')
except: # pragma: no cover
# We need a bare except statement here to ensure that if
# any type of error occurs in the rank == 0 process when
# running in parallel with MPI then we also abort all the
# other processes.
if comm is None or comm.Get_size() == 1:
raise # Just one process so raise error normally.
else:
# Print error info.
traceback.print_exc(file=sys.stdout)
print('Error in process with rank == 0: forcing MPI abort')
sys.stdout.flush() # Make sure message prints before abort
comm.Abort(1)
if comm is not None:
add_points = comm.bcast(add_points, root=0)
if rank == 0:
final_seed = settings_dict['seed']
return step_ndead, resume_outputs, final_seed
def process_initial_run(settings_dict_in, **kwargs):
"""Loads the initial exploratory run and analyses it to create information
about the second, dynamic run. This information is returned as a dictionary
and also cached.
Parameters
----------
settings_dict_in: dict
Initial PolyChord settings (see check_settings for information on
allowed and default settings).
dynamic_goal: float or int
Number in [0, 1] which determines how to allocate computational effort
between parameter estimation and evidence calculation. See the dynamic
nested sampling paper for more details.
nlive_const: int
Used to calculate total number of samples if max_ndead not specified in
settings. The total number of samples used is the estimated number that
would be taken by a nested sampling run with a constant number of live
points nlive_const.
ninit: int
Number of live points to use for the initial exploratory run (Step 1).
smoothing_filter: func
Smoothing to apply to the nlive allocation (if any).
step_ndead: list of ints
Numbers of dead points at which resume files are saved.
resume_outputs: dict
Dictionary containing run output (contents of .stats file) at each
resume. Keys are elements of step_ndead.
final_seed: int
Random seed at the end of the initial run.
Returns
-------
dyn_info: dict
Information about the second dynamic run which is calculated from
analysing the initial exploratory run.
"""
dynamic_goal = kwargs.pop('dynamic_goal')
nlive_const = kwargs.pop('nlive_const')
ninit = kwargs.pop('ninit')
smoothing_filter = kwargs.pop('smoothing_filter')
step_ndead = kwargs.pop('step_ndead')
resume_outputs = kwargs.pop('resume_outputs')
final_seed = kwargs.pop('final_seed')
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
init_run = nestcheck.data_processing.process_polychord_run(
settings_dict_in['file_root'] + '_init',
settings_dict_in['base_dir'])
# Calculate max number of samples
if settings_dict_in['max_ndead'] > 0:
samp_tot = settings_dict_in['max_ndead']
assert (settings_dict_in['max_ndead']
> init_run['logl'].shape[0]), (
'all points used in inital run - '
'none left for dynamic run!')
else:
samp_tot = init_run['logl'].shape[0] * (nlive_const / ninit)
assert nlive_const > ninit
dyn_info = dyPolyChord.nlive_allocation.allocate(
init_run, samp_tot, dynamic_goal,
smoothing_filter=smoothing_filter)
dyn_info['final_seed'] = final_seed
dyn_info['ninit'] = ninit
root_name = os.path.join(settings_dict_in['base_dir'],
settings_dict_in['file_root'])
if dyn_info['peak_start_ind'] != 0:
# subtract 1 as ndead=1 corresponds to point 0
resume_steps = np.asarray(step_ndead) - 1
# Work out which resume file to load. This is the first resume file
# before dyn_info['peak_start_ind']. If there are no such files then we
# do not reload and instead start the second dynamic run by sampling
# from the entire prior.
indexes_before_peak = np.where(
resume_steps < dyn_info['peak_start_ind'])[0]
if indexes_before_peak.shape[0] > 0:
resume_ndead = step_ndead[indexes_before_peak[-1]]
# copy resume step to dynamic file root
shutil.copyfile(
root_name + '_init_' + str(resume_ndead) + '.resume',
root_name + '_dyn.resume')
# Save resume info
dyn_info['resume_ndead'] = resume_ndead
try:
dyn_info['resume_nlike'] = (
resume_outputs[resume_ndead]['nlike'])
except KeyError:
pass # protect from error reading nlike from .stats file
if dynamic_goal != 0:
# Remove all the temporary resume files. Use set to avoid
# duplicates as these cause OSErrors.
for snd in set(step_ndead):
os.remove(root_name + '_init_' + str(snd) + '.resume')
nestcheck.io_utils.pickle_save(
dyn_info, root_name + '_dyn_info', overwrite_existing=True)
return dyn_info
def get_dynamic_settings(settings_dict_in, dyn_info):
"""Loads the initial exploratory run and analyses it to create
information about the second, dynamic run. This information is returned
in a dictionary.
Parameters
----------
settings_dict_in: dict
Initial PolyChord settings (see check_settings for information on
allowed and default settings).
dynamic_goal: float or int
Number in (0, 1) which determines how to allocate computational effort
between parameter estimation and evidence calculation. See the dynamic
nested sampling paper for more details.
Returns
-------
settings_dict: dict
PolyChord settings for dynamic run.
"""
settings_dict = copy.deepcopy(settings_dict_in)
settings_dict['seed'] = dyn_info['final_seed']
if settings_dict['seed'] >= 0:
assert settings_dict_in['seed'] >= 0, (
'if input seed was <0 it should not have been edited')
if dyn_info['peak_start_ind'] != 0:
settings_dict['nlive'] = dyn_info['ninit']
else:
settings_dict['nlive'] = dyn_info['nlives_dict'][
min(dyn_info['nlives_dict'].keys())]
settings_dict['nlives'] = dyn_info['nlives_dict']
# To write .ini files correctly, read_resume must be type bool not
# np.bool
settings_dict['read_resume'] = (
bool(dyn_info['peak_start_ind'] != 0))
settings_dict['file_root'] = settings_dict_in['file_root'] + '_dyn'
return settings_dict
| [
"numpy.where",
"os.path.join",
"numpy.asarray",
"os.path.isfile",
"copy.deepcopy",
"sys.stdout.flush",
"traceback.print_exc",
"os.remove"
] | [((8355, 8428), 'os.path.join', 'os.path.join', (["settings_dict_in['base_dir']", "settings_dict_in['file_root']"], {}), "(settings_dict_in['base_dir'], settings_dict_in['file_root'])\n", (8367, 8428), False, 'import os\n'), ((15643, 15674), 'copy.deepcopy', 'copy.deepcopy', (['settings_dict_in'], {}), '(settings_dict_in)\n', (15656, 15674), False, 'import copy\n'), ((18366, 18397), 'copy.deepcopy', 'copy.deepcopy', (['settings_dict_in'], {}), '(settings_dict_in)\n', (18379, 18397), False, 'import copy\n'), ((23863, 23936), 'os.path.join', 'os.path.join', (["settings_dict_in['base_dir']", "settings_dict_in['file_root']"], {}), "(settings_dict_in['base_dir'], settings_dict_in['file_root'])\n", (23875, 23936), False, 'import os\n'), ((26198, 26229), 'copy.deepcopy', 'copy.deepcopy', (['settings_dict_in'], {}), '(settings_dict_in)\n', (26211, 26229), False, 'import copy\n'), ((18699, 18766), 'os.path.join', 'os.path.join', (["settings_dict['base_dir']", "settings_dict['file_root']"], {}), "(settings_dict['base_dir'], settings_dict['file_root'])\n", (18711, 18766), False, 'import os\n'), ((8764, 8784), 'os.path.isfile', 'os.path.isfile', (['name'], {}), '(name)\n', (8778, 8784), False, 'import os\n'), ((10053, 10084), 'copy.deepcopy', 'copy.deepcopy', (['settings_dict_in'], {}), '(settings_dict_in)\n', (10066, 10084), False, 'import copy\n'), ((18825, 18857), 'os.remove', 'os.remove', (["(root_name + '.resume')"], {}), "(root_name + '.resume')\n", (18834, 18857), False, 'import os\n'), ((24084, 24106), 'numpy.asarray', 'np.asarray', (['step_ndead'], {}), '(step_ndead)\n', (24094, 24106), True, 'import numpy as np\n'), ((24407, 24458), 'numpy.where', 'np.where', (["(resume_steps < dyn_info['peak_start_ind'])"], {}), "(resume_steps < dyn_info['peak_start_ind'])\n", (24415, 24458), True, 'import numpy as np\n'), ((13145, 13218), 'os.path.join', 'os.path.join', (["settings_dict_in['base_dir']", "settings_dict_in['file_root']"], {}), "(settings_dict_in['base_dir'], settings_dict_in['file_root'])\n", (13157, 13218), False, 'import os\n'), ((13801, 13837), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (13820, 13837), False, 'import traceback\n'), ((13930, 13948), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13946, 13948), False, 'import sys\n'), ((11914, 11950), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (11933, 11950), False, 'import traceback\n'), ((12051, 12069), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (12067, 12069), False, 'import sys\n'), ((20627, 20663), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (20646, 20663), False, 'import traceback\n'), ((20764, 20782), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (20780, 20782), False, 'import sys\n')] |
#!/usr/bin/env python
# coding=utf-8
import numpy as np
import sys
import lmoments as lmom
years = sys.argv[1]
yeare = sys.argv[2]
ysize = int(sys.argv[3])
xsize = int(sys.argv[4])
outdir = sys.argv[5]
var = sys.argv[6]
rp = sys.argv[7]
FUNC = sys.argv[8]
rivhgt = np.fromfile(outdir+'/map/rivhgt.bin', np.float32).reshape(ysize,xsize)
Nflddph = np.zeros((ysize,xsize), dtype = np.float64)
if float(rp) > 1:
RPP = 1.0 - (1.0/float(rp))
else:
RPP = 1.0 - float(rp)
Nrivdph = 0.0
# For GEV distribution
if FUNC == "GEV":
f_para1 = 'GEV_mu_'+var+'_'+years+'-'+yeare+'.bin'
f_para2 = 'GEV_sigma_'+var+'_'+years+'-'+yeare+'.bin'
f_para3 = 'GEV_theta_'+var+'_'+years+'-'+yeare+'.bin'
para1 = np.fromfile(outdir+'/para/'+f_para1, np.float64).reshape(ysize,xsize)
para2 = np.fromfile(outdir+'/para/'+f_para2, np.float64).reshape(ysize,xsize)
para3 = np.fromfile(outdir+'/para/'+f_para3, np.float64).reshape(ysize,xsize)
for i in range(ysize):
for j in range(xsize):
if para2[i,j] == -9999:
Nflddph[i,j] = -9999
elif para2[i,j] == 0.0 or para2[i,j] == -999.0:
Nflddph[i,j] = 0.0
else:
if rivhgt[i,j] != -9999:
Nrivdph = lmom.quagev(RPP, [para1[i,j], para2[i,j], para3[i,j]])
Nflddph[i,j] = Nrivdph - rivhgt[i,j]
if Nflddph[i,j] < 0.0:
Nflddph[i,j] = 0.0
fflddph = 'flddph_RP'+rp+'_'+ FUNC + '.bin'
Nflddph.astype(np.float32).tofile(outdir+'/Nyear_flddph/'+fflddph)
# For GAM distribution
if FUNC == "GAM":
f_para1 = 'GAM_alpha_'+var+'_'+years+'-'+yeare+'.bin'
f_para2 = 'GAM_beta_'+var+'_'+years+'-'+yeare+'.bin'
para1 = np.fromfile(outdir+'/para/'+f_para1, np.float64).reshape(ysize,xsize)
para2 = np.fromfile(outdir+'/para/'+f_para2, np.float64).reshape(ysize,xsize)
for i in range(ysize):
for j in range(xsize):
if para2[i,j] == -9999:
Nflddph[i,j] = -9999
elif para2[i,j] == 0.0 or para2[i,j] == -999.0:
Nflddph[i,j] = 0.0
else:
if rivhgt[i,j] != -9999:
Nrivdph = lmom.quagam(RPP, [para1[i,j], para2[i,j]])
Nflddph[i,j] = Nrivdph - rivhgt[i,j]
if Nflddph[i,j] < 0.0:
Nflddph[i,j] = 0.0
fflddph = 'flddph_RP'+rp+'_'+ FUNC + '.bin'
Nflddph.astype(np.float32).tofile(outdir+'/Nyear_flddph/'+fflddph)
# For PE3 distribution
if FUNC == "PE3":
f_para1 = 'PE3_para1_'+var+'_'+years+'-'+yeare+'.bin'
f_para2 = 'PE3_para2_'+var+'_'+years+'-'+yeare+'.bin'
f_para3 = 'PE3_gamma_'+var+'_'+years+'-'+yeare+'.bin'
para1 = np.fromfile(outdir+'/para/'+f_para1, np.float64).reshape(ysize,xsize)
para2 = np.fromfile(outdir+'/para/'+f_para2, np.float64).reshape(ysize,xsize)
para3 = np.fromfile(outdir+'/para/'+f_para3, np.float64).reshape(ysize,xsize)
for i in range(ysize):
for j in range(xsize):
if para2[i,j] == -9999. :
Nflddph[i,j] = -9999.
elif para2[i,j] == -999.0 :
Nflddph[i,j] = 0.0
else:
if rivhgt[i,j] != -9999:
Nrivdph = lmom.quape3(RPP, [para1[i,j], para2[i,j], para3[i,j]])
Nflddph[i,j] = Nrivdph - rivhgt[i,j]
if Nflddph[i,j] < 0.0:
Nflddph[i,j] = 0.0
fflddph = 'flddph_RP'+rp+'_'+ FUNC + '.bin'
Nflddph.astype(np.float32).tofile(outdir+'/Nyear_flddph/'+fflddph)
# For GUM distribution
if FUNC == "GUM":
f_para1 = 'GUM_U_'+var+'_'+years+'-'+yeare+'.bin'
f_para2 = 'GUM_A_'+var+'_'+years+'-'+yeare+'.bin'
para1 = np.fromfile(outdir+'/para/'+f_para1, np.float64).reshape(ysize,xsize)
para2 = np.fromfile(outdir+'/para/'+f_para2, np.float64).reshape(ysize,xsize)
for i in range(ysize):
for j in range(xsize):
if para2[i,j] == -9999:
Nflddph[i,j] = -9999
elif para2[i,j] == -999.0:
Nflddph[i,j] = 0.0
else:
if rivhgt[i,j] != -9999:
Nrivdph = lmom.quagum(RPP, [para1[i,j], para2[i,j]])
Nflddph[i,j] = Nrivdph - rivhgt[i,j]
if Nflddph[i,j] < 0.0:
Nflddph[i,j] = 0.0
fflddph = 'flddph_RP'+rp+'_'+ FUNC + '.bin'
Nflddph.astype(np.float32).tofile(outdir+'/Nyear_flddph/'+fflddph)
# For WEI distribution
if FUNC == "WEI":
f_para1 = 'WEI_para1_'+var+'_'+years+'-'+yeare+'.bin'
f_para2 = 'WEI_beta_'+var+'_'+years+'-'+yeare+'.bin'
f_para3 = 'WEI_delta_'+var+'_'+years+'-'+yeare+'.bin'
para1 = np.fromfile(outdir+'/para/'+f_para1, np.float64).reshape(ysize,xsize)
para2 = np.fromfile(outdir+'/para/'+f_para2, np.float64).reshape(ysize,xsize)
para3 = np.fromfile(outdir+'/para/'+f_para3, np.float64).reshape(ysize,xsize)
for i in range(ysize):
for j in range(xsize):
if para2[i,j] == -9999. :
Nflddph[i,j] = -9999.
elif para2[i,j] == -999.0 :
Nflddph[i,j] = 0.0
else:
if rivhgt[i,j] != -9999:
Nrivdph = lmom.quawei(RPP, [para1[i,j], para2[i,j], para3[i,j]])
Nflddph[i,j] = Nrivdph - rivhgt[i,j]
if Nflddph[i,j] < 0.0:
Nflddph[i,j] = 0.0
fflddph = 'flddph_RP'+rp+'_'+ FUNC + '.bin'
Nflddph.astype(np.float32).tofile(outdir+'/Nyear_flddph/'+fflddph)
# For WAK distribution
if FUNC == "WAK":
f_para1 = 'WAK_XI_'+var+'_'+years+'-'+yeare+'.bin'
f_para2 = 'WAK_A_'+var+'_'+years+'-'+yeare+'.bin'
f_para3 = 'WAK_B_'+var+'_'+years+'-'+yeare+'.bin'
f_para4 = 'WAK_C_'+var+'_'+years+'-'+yeare+'.bin'
f_para5 = 'WAK_D_'+var+'_'+years+'-'+yeare+'.bin'
para1 = np.fromfile(outdir+'/para/'+f_para1, np.float64).reshape(ysize,xsize)
para2 = np.fromfile(outdir+'/para/'+f_para2, np.float64).reshape(ysize,xsize)
para3 = np.fromfile(outdir+'/para/'+f_para3, np.float64).reshape(ysize,xsize)
para4 = np.fromfile(outdir+'/para/'+f_para4, np.float64).reshape(ysize,xsize)
para5 = np.fromfile(outdir+'/para/'+f_para5, np.float64).reshape(ysize,xsize)
for i in range(ysize):
for j in range(xsize):
if para2[i,j] == -9999. :
Nflddph[i,j] = -9999.
elif para2[i,j] == -999.0 :
Nflddph[i,j] = 0.0
else:
if rivhgt[i,j] != -9999:
Nrivdph = lmom.quawak(RPP, [para1[i,j], para2[i,j], para3[i,j], para4[i,j], para5[i,j]])
Nflddph[i,j] = Nrivdph - rivhgt[i,j]
if Nflddph[i,j] < 0.0:
Nflddph[i,j] = 0.0
fflddph = 'flddph_RP'+rp+'_'+ FUNC + '.bin'
Nflddph.astype(np.float32).tofile(outdir+'/Nyear_flddph/'+fflddph)
| [
"lmoments.quagum",
"numpy.fromfile",
"lmoments.quape3",
"lmoments.quawak",
"lmoments.quagev",
"lmoments.quagam",
"numpy.zeros",
"lmoments.quawei"
] | [((349, 391), 'numpy.zeros', 'np.zeros', (['(ysize, xsize)'], {'dtype': 'np.float64'}), '((ysize, xsize), dtype=np.float64)\n', (357, 391), True, 'import numpy as np\n'), ((267, 318), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/map/rivhgt.bin')", 'np.float32'], {}), "(outdir + '/map/rivhgt.bin', np.float32)\n", (278, 318), True, 'import numpy as np\n'), ((715, 767), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para1)", 'np.float64'], {}), "(outdir + '/para/' + f_para1, np.float64)\n", (726, 767), True, 'import numpy as np\n'), ((797, 849), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para2)", 'np.float64'], {}), "(outdir + '/para/' + f_para2, np.float64)\n", (808, 849), True, 'import numpy as np\n'), ((879, 931), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para3)", 'np.float64'], {}), "(outdir + '/para/' + f_para3, np.float64)\n", (890, 931), True, 'import numpy as np\n'), ((1755, 1807), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para1)", 'np.float64'], {}), "(outdir + '/para/' + f_para1, np.float64)\n", (1766, 1807), True, 'import numpy as np\n'), ((1837, 1889), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para2)", 'np.float64'], {}), "(outdir + '/para/' + f_para2, np.float64)\n", (1848, 1889), True, 'import numpy as np\n'), ((2759, 2811), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para1)", 'np.float64'], {}), "(outdir + '/para/' + f_para1, np.float64)\n", (2770, 2811), True, 'import numpy as np\n'), ((2841, 2893), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para2)", 'np.float64'], {}), "(outdir + '/para/' + f_para2, np.float64)\n", (2852, 2893), True, 'import numpy as np\n'), ((2923, 2975), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para3)", 'np.float64'], {}), "(outdir + '/para/' + f_para3, np.float64)\n", (2934, 2975), True, 'import numpy as np\n'), ((3775, 3827), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para1)", 'np.float64'], {}), "(outdir + '/para/' + f_para1, np.float64)\n", (3786, 3827), True, 'import numpy as np\n'), ((3857, 3909), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para2)", 'np.float64'], {}), "(outdir + '/para/' + f_para2, np.float64)\n", (3868, 3909), True, 'import numpy as np\n'), ((4757, 4809), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para1)", 'np.float64'], {}), "(outdir + '/para/' + f_para1, np.float64)\n", (4768, 4809), True, 'import numpy as np\n'), ((4839, 4891), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para2)", 'np.float64'], {}), "(outdir + '/para/' + f_para2, np.float64)\n", (4850, 4891), True, 'import numpy as np\n'), ((4921, 4973), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para3)", 'np.float64'], {}), "(outdir + '/para/' + f_para3, np.float64)\n", (4932, 4973), True, 'import numpy as np\n'), ((5937, 5989), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para1)", 'np.float64'], {}), "(outdir + '/para/' + f_para1, np.float64)\n", (5948, 5989), True, 'import numpy as np\n'), ((6019, 6071), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para2)", 'np.float64'], {}), "(outdir + '/para/' + f_para2, np.float64)\n", (6030, 6071), True, 'import numpy as np\n'), ((6101, 6153), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para3)", 'np.float64'], {}), "(outdir + '/para/' + f_para3, np.float64)\n", (6112, 6153), True, 'import numpy as np\n'), ((6183, 6235), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para4)", 'np.float64'], {}), "(outdir + '/para/' + f_para4, np.float64)\n", (6194, 6235), True, 'import numpy as np\n'), ((6265, 6317), 'numpy.fromfile', 'np.fromfile', (["(outdir + '/para/' + f_para5)", 'np.float64'], {}), "(outdir + '/para/' + f_para5, np.float64)\n", (6276, 6317), True, 'import numpy as np\n'), ((1266, 1323), 'lmoments.quagev', 'lmom.quagev', (['RPP', '[para1[i, j], para2[i, j], para3[i, j]]'], {}), '(RPP, [para1[i, j], para2[i, j], para3[i, j]])\n', (1277, 1323), True, 'import lmoments as lmom\n'), ((2223, 2267), 'lmoments.quagam', 'lmom.quagam', (['RPP', '[para1[i, j], para2[i, j]]'], {}), '(RPP, [para1[i, j], para2[i, j]])\n', (2234, 2267), True, 'import lmoments as lmom\n'), ((3293, 3350), 'lmoments.quape3', 'lmom.quape3', (['RPP', '[para1[i, j], para2[i, j], para3[i, j]]'], {}), '(RPP, [para1[i, j], para2[i, j], para3[i, j]])\n', (3304, 3350), True, 'import lmoments as lmom\n'), ((4222, 4266), 'lmoments.quagum', 'lmom.quagum', (['RPP', '[para1[i, j], para2[i, j]]'], {}), '(RPP, [para1[i, j], para2[i, j]])\n', (4233, 4266), True, 'import lmoments as lmom\n'), ((5291, 5348), 'lmoments.quawei', 'lmom.quawei', (['RPP', '[para1[i, j], para2[i, j], para3[i, j]]'], {}), '(RPP, [para1[i, j], para2[i, j], para3[i, j]])\n', (5302, 5348), True, 'import lmoments as lmom\n'), ((6635, 6723), 'lmoments.quawak', 'lmom.quawak', (['RPP', '[para1[i, j], para2[i, j], para3[i, j], para4[i, j], para5[i, j]]'], {}), '(RPP, [para1[i, j], para2[i, j], para3[i, j], para4[i, j], para5\n [i, j]])\n', (6646, 6723), True, 'import lmoments as lmom\n')] |
import numpy as np
from scipy.optimize import brentq
from yt.fields.field_detector import \
FieldDetector
from pygrackle import \
add_grackle_fields, \
FluidContainer, \
chemistry_data
from pygrackle.yt_fields import \
_data_to_fc, \
_get_needed_fields
from yt.config import ytcfg
from yt.funcs import \
get_pbar, \
DummyProgressBar
def _calculate_cooling_metallicity_fast(field, data, fc):
gfields = _get_needed_fields(fc.chemistry_data)
if field.name[1].endswith('tdt'):
tdfield = 'total_dynamical_time'
else:
tdfield = 'dynamical_time'
td = data['gas', tdfield].to('code_time').d
flatten = len(td.shape) > 1
if flatten:
td = td.flatten()
fc.chemistry_data.metal_cooling_only = 0
fc.chemistry_data.metal_cooling = 0
fc.calculate_cooling_time()
ct_0 = fc['cooling_time'] + td
calc = ct_0 > 0
lct_0 = np.log(ct_0[calc])
z = data.ds.arr(np.zeros(ct_0.size), '')
# if not isinstance(data, FieldDetector):
# breakpoint()
fc.chemistry_data.metal_cooling = 1
fc.chemistry_data.metal_cooling_only = 1
z1 = 1e-4
lz1 = np.log(z1)
fc['metal'][:] = z1 * fc['density']
fc.calculate_cooling_time()
lct1 = np.log(-fc['cooling_time'])
z2 = 2 * z1
lz2 = np.log(z2)
fc['metal'][:] = z2 * fc['density']
fc.calculate_cooling_time()
lct2 = np.log(-fc['cooling_time'])
slope = ((lct2 - lct1) / (z2 - z1))[calc]
z[calc] = np.exp((lct_0 - lct1[calc]) / slope + lz1)
return z
def _cooling_metallicity_fast(field, data):
fc = _data_to_fc(data)
return _calculate_cooling_metallicity_fast(field, data, fc)
def _cooling_metallicity_diss_fast(field, data):
fc = _data_to_fc(data)
if fc.chemistry_data.primordial_chemistry > 1:
fc['HI'] += fc['H2I'] + fc['H2II']
fc['H2I'][:] = 0
fc['H2II'][:] = 0
if fc.chemistry_data.primordial_chemistry > 2:
fc['HI'] += fc['HDI'] / 3
fc['DI'] += 2 * fc['HDI'] / 3
fc['HDI'][:] = 0
return _calculate_cooling_metallicity_fast(field, data, fc)
def _calculate_cooling_metallicity(field, data, fc):
gfields = _get_needed_fields(fc.chemistry_data)
if field.name[1].endswith('tdt'):
tdfield = 'total_dynamical_time'
else:
tdfield = 'dynamical_time'
td = data['gas', tdfield].to('code_time').d
flatten = len(td.shape) > 1
if flatten:
td = td.flatten()
fc_mini = FluidContainer(data.ds.grackle_data, 1)
fc.calculate_cooling_time()
def cdrat(Z, my_td):
fc_mini['metal'][:] = Z * fc_mini['density']
fc_mini.calculate_cooling_time()
return my_td + fc_mini['cooling_time'][0]
field_data = data.ds.arr(np.zeros(td.size), '')
if isinstance(data, FieldDetector):
return field_data
if field_data.size > 200000:
my_str = "Reticulating splines"
if ytcfg.getboolean("yt","__parallel"):
my_str = "P%03d %s" % \
(ytcfg.getint("yt", "__global_parallel_rank"),
my_str)
pbar = get_pbar(my_str, field_data.size, parallel=True)
else:
pbar = DummyProgressBar()
for i in range(field_data.size):
pbar.update(i)
if td[i] + fc['cooling_time'][i] > 0:
continue
for mfield in gfields:
fc_mini[mfield][:] = fc[mfield][i]
success = False
if i > 0 and field_data[i-1] > 0:
try:
field_data[i] = brentq(
cdrat, 0.1*field_data[i-1], 10*field_data[i-1],
args=(td[i]), xtol=1e-6)
success = True
except:
pass
if not success:
bds = np.logspace(-2, 2, 5)
for bd in bds:
try:
field_data[i] = brentq(cdrat, 1e-6, bd, args=(td[i]), xtol=1e-6)
success = True
break
except:
continue
if not success:
field_data[i] = np.nan
# field_data[i] = 0. # hack for imaging
pbar.finish()
if flatten:
field_data = field_data.reshape(data.ActiveDimensions)
return field_data
def _cooling_metallicity(field, data):
fc = _data_to_fc(data)
return _calculate_cooling_metallicity(field, data, fc)
def _cooling_metallicity_diss(field, data):
fc = _data_to_fc(data)
if fc.chemistry_data.primordial_chemistry > 1:
fc['HI'] += fc['H2I'] + fc['H2II']
fc['H2I'][:] = 0
fc['H2II'][:] = 0
if fc.chemistry_data.primordial_chemistry > 2:
fc['HI'] += fc['HDI'] / 3
fc['DI'] += 2 * fc['HDI'] / 3
fc['HDI'][:] = 0
return _calculate_cooling_metallicity(field, data, fc)
def add_p2p_grackle_fields(ds, parameters=None):
add_grackle_fields(ds, parameters=parameters)
for suf in ['', '_tdt']:
ds.add_field("cooling_metallicity%s" % suf,
function=_cooling_metallicity,
units="Zsun", sampling_type="cell")
ds.add_field("cooling_metallicity_diss%s" % suf,
function=_cooling_metallicity_diss,
units="Zsun", sampling_type="cell")
ds.add_field("cooling_metallicity_fast%s" % suf,
function=_cooling_metallicity_fast,
units="Zsun", sampling_type="cell")
ds.add_field("cooling_metallicity_diss_fast%s" % suf,
function=_cooling_metallicity_diss_fast,
units="Zsun", sampling_type="cell")
| [
"yt.funcs.get_pbar",
"scipy.optimize.brentq",
"pygrackle.add_grackle_fields",
"numpy.log",
"yt.funcs.DummyProgressBar",
"pygrackle.yt_fields._get_needed_fields",
"pygrackle.yt_fields._data_to_fc",
"numpy.exp",
"numpy.zeros",
"yt.config.ytcfg.getboolean",
"pygrackle.FluidContainer",
"yt.config.... | [((439, 476), 'pygrackle.yt_fields._get_needed_fields', '_get_needed_fields', (['fc.chemistry_data'], {}), '(fc.chemistry_data)\n', (457, 476), False, 'from pygrackle.yt_fields import _data_to_fc, _get_needed_fields\n'), ((908, 926), 'numpy.log', 'np.log', (['ct_0[calc]'], {}), '(ct_0[calc])\n', (914, 926), True, 'import numpy as np\n'), ((1153, 1163), 'numpy.log', 'np.log', (['z1'], {}), '(z1)\n', (1159, 1163), True, 'import numpy as np\n'), ((1247, 1274), 'numpy.log', 'np.log', (["(-fc['cooling_time'])"], {}), "(-fc['cooling_time'])\n", (1253, 1274), True, 'import numpy as np\n'), ((1302, 1312), 'numpy.log', 'np.log', (['z2'], {}), '(z2)\n', (1308, 1312), True, 'import numpy as np\n'), ((1396, 1423), 'numpy.log', 'np.log', (["(-fc['cooling_time'])"], {}), "(-fc['cooling_time'])\n", (1402, 1423), True, 'import numpy as np\n'), ((1485, 1527), 'numpy.exp', 'np.exp', (['((lct_0 - lct1[calc]) / slope + lz1)'], {}), '((lct_0 - lct1[calc]) / slope + lz1)\n', (1491, 1527), True, 'import numpy as np\n'), ((1595, 1612), 'pygrackle.yt_fields._data_to_fc', '_data_to_fc', (['data'], {}), '(data)\n', (1606, 1612), False, 'from pygrackle.yt_fields import _data_to_fc, _get_needed_fields\n'), ((1736, 1753), 'pygrackle.yt_fields._data_to_fc', '_data_to_fc', (['data'], {}), '(data)\n', (1747, 1753), False, 'from pygrackle.yt_fields import _data_to_fc, _get_needed_fields\n'), ((2179, 2216), 'pygrackle.yt_fields._get_needed_fields', '_get_needed_fields', (['fc.chemistry_data'], {}), '(fc.chemistry_data)\n', (2197, 2216), False, 'from pygrackle.yt_fields import _data_to_fc, _get_needed_fields\n'), ((2477, 2516), 'pygrackle.FluidContainer', 'FluidContainer', (['data.ds.grackle_data', '(1)'], {}), '(data.ds.grackle_data, 1)\n', (2491, 2516), False, 'from pygrackle import add_grackle_fields, FluidContainer, chemistry_data\n'), ((4309, 4326), 'pygrackle.yt_fields._data_to_fc', '_data_to_fc', (['data'], {}), '(data)\n', (4320, 4326), False, 'from pygrackle.yt_fields import _data_to_fc, _get_needed_fields\n'), ((4440, 4457), 'pygrackle.yt_fields._data_to_fc', '_data_to_fc', (['data'], {}), '(data)\n', (4451, 4457), False, 'from pygrackle.yt_fields import _data_to_fc, _get_needed_fields\n'), ((4864, 4909), 'pygrackle.add_grackle_fields', 'add_grackle_fields', (['ds'], {'parameters': 'parameters'}), '(ds, parameters=parameters)\n', (4882, 4909), False, 'from pygrackle import add_grackle_fields, FluidContainer, chemistry_data\n'), ((947, 966), 'numpy.zeros', 'np.zeros', (['ct_0.size'], {}), '(ct_0.size)\n', (955, 966), True, 'import numpy as np\n'), ((2750, 2767), 'numpy.zeros', 'np.zeros', (['td.size'], {}), '(td.size)\n', (2758, 2767), True, 'import numpy as np\n'), ((2924, 2960), 'yt.config.ytcfg.getboolean', 'ytcfg.getboolean', (['"""yt"""', '"""__parallel"""'], {}), "('yt', '__parallel')\n", (2940, 2960), False, 'from yt.config import ytcfg\n'), ((3100, 3148), 'yt.funcs.get_pbar', 'get_pbar', (['my_str', 'field_data.size'], {'parallel': '(True)'}), '(my_str, field_data.size, parallel=True)\n', (3108, 3148), False, 'from yt.funcs import get_pbar, DummyProgressBar\n'), ((3174, 3192), 'yt.funcs.DummyProgressBar', 'DummyProgressBar', ([], {}), '()\n', (3190, 3192), False, 'from yt.funcs import get_pbar, DummyProgressBar\n'), ((3748, 3769), 'numpy.logspace', 'np.logspace', (['(-2)', '(2)', '(5)'], {}), '(-2, 2, 5)\n', (3759, 3769), True, 'import numpy as np\n'), ((3513, 3603), 'scipy.optimize.brentq', 'brentq', (['cdrat', '(0.1 * field_data[i - 1])', '(10 * field_data[i - 1])'], {'args': 'td[i]', 'xtol': '(1e-06)'}), '(cdrat, 0.1 * field_data[i - 1], 10 * field_data[i - 1], args=td[i],\n xtol=1e-06)\n', (3519, 3603), False, 'from scipy.optimize import brentq\n'), ((3014, 3058), 'yt.config.ytcfg.getint', 'ytcfg.getint', (['"""yt"""', '"""__global_parallel_rank"""'], {}), "('yt', '__global_parallel_rank')\n", (3026, 3058), False, 'from yt.config import ytcfg\n'), ((3854, 3902), 'scipy.optimize.brentq', 'brentq', (['cdrat', '(1e-06)', 'bd'], {'args': 'td[i]', 'xtol': '(1e-06)'}), '(cdrat, 1e-06, bd, args=td[i], xtol=1e-06)\n', (3860, 3902), False, 'from scipy.optimize import brentq\n')] |
#!/usr/bin/env python3
import numpy as np
#import scipy.linalg
def vector_lengths(a):
squared = a**2
euclidean = squared.sum(axis=1)
return np.sqrt(euclidean)
def main():
a = np.random.randint(0, 10, (3, 4))
print(a)
print(vector_lengths(a))
if __name__ == "__main__":
main()
| [
"numpy.random.randint",
"numpy.sqrt"
] | [((159, 177), 'numpy.sqrt', 'np.sqrt', (['euclidean'], {}), '(euclidean)\n', (166, 177), True, 'import numpy as np\n'), ((199, 231), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(3, 4)'], {}), '(0, 10, (3, 4))\n', (216, 231), True, 'import numpy as np\n')] |
from copy import copy
from typing import Union
import numpy as np
from fedot.core.data.data import InputData, OutputData
from fedot.core.data.multi_modal import MultiModalData
from fedot.core.operations.evaluation.operation_implementations.data_operations.ts_transformations import ts_to_table
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import TaskTypesEnum
def out_of_sample_ts_forecast(pipeline, input_data: InputData,
horizon: int = None) -> np.array:
"""
Method allow make forecast with appropriate forecast length. The previously
predicted parts of the time series are used for forecasting next parts. Available
only for time series forecasting task. Steps ahead provided iteratively.
time series ----------------|
forecast |---|---|---|
:param pipeline: Pipeline for making time series forecasting
:param input_data: data for prediction
:param horizon: forecasting horizon
:return final_forecast: array with forecast
"""
# Prepare data for time series forecasting
task = input_data.task
exception_if_not_ts_task(task)
if isinstance(input_data, InputData):
pre_history_ts = np.array(input_data.features)
# How many elements to the future pipeline can produce
scope_len = task.task_params.forecast_length
number_of_iterations = _calculate_number_of_steps(scope_len, horizon)
# Make forecast iteratively moving throw the horizon
final_forecast = []
for _ in range(0, number_of_iterations):
iter_predict = pipeline.root_node.predict(input_data=input_data)
iter_predict = np.ravel(np.array(iter_predict.predict))
final_forecast.append(iter_predict)
# Add prediction to the historical data - update it
pre_history_ts = np.hstack((pre_history_ts, iter_predict))
# Prepare InputData for next iteration
input_data = _update_input(pre_history_ts, scope_len, task)
elif isinstance(input_data, MultiModalData):
data = MultiModalData()
for data_id in input_data.keys():
features = input_data[data_id].features
pre_history_ts = np.array(features)
source_len = len(pre_history_ts)
# How many elements to the future pipeline can produce
scope_len = task.task_params.forecast_length
number_of_iterations = _calculate_number_of_steps(scope_len, horizon)
# Make forecast iteratively moving throw the horizon
final_forecast = []
for _ in range(0, number_of_iterations):
iter_predict = pipeline.predict(input_data=input_data)
iter_predict = np.ravel(np.array(iter_predict.predict))
final_forecast.append(iter_predict)
# Add prediction to the historical data - update it
pre_history_ts = np.hstack((pre_history_ts, iter_predict))
# Prepare InputData for next iteration
input_data = _update_input(pre_history_ts, scope_len, task)
# Create output data
final_forecast = np.ravel(np.array(final_forecast))
# Clip the forecast if it is necessary
final_forecast = final_forecast[:horizon]
return final_forecast
def in_sample_ts_forecast(pipeline, input_data: Union[InputData, MultiModalData],
horizon: int = None) -> np.array:
"""
Method allows to make in-sample forecasting. The actual values of the time
series, rather than the previously predicted parts of the time series,
are used for forecasting next parts.
time series ----------------|---|---|---|
forecast |---|---|---|
:param pipeline: Pipeline for making time series forecasting
:param input_data: data for prediction
:param horizon: forecasting horizon
:return final_forecast: array with forecast
"""
# Divide data on samples into pre-history and validation part
task = input_data.task
exception_if_not_ts_task(task)
if isinstance(input_data, InputData):
time_series = np.array(input_data.features)
pre_history_ts = time_series[:-horizon]
source_len = len(pre_history_ts)
last_index_pre_history = source_len - 1
# How many elements to the future pipeline can produce
scope_len = task.task_params.forecast_length
number_of_iterations = _calculate_number_of_steps(scope_len, horizon)
# Calculate intervals
intervals = _calculate_intervals(last_index_pre_history,
number_of_iterations,
scope_len)
data = _update_input(pre_history_ts, scope_len, task)
else:
# TODO simplify
data = MultiModalData()
for data_id in input_data.keys():
features = input_data[data_id].features
time_series = np.array(features)
pre_history_ts = time_series[:-horizon]
source_len = len(pre_history_ts)
last_index_pre_history = source_len - 1
# How many elements to the future pipeline can produce
scope_len = task.task_params.forecast_length
number_of_iterations = _calculate_number_of_steps(scope_len, horizon)
# Calculate intervals
intervals = _calculate_intervals(last_index_pre_history,
number_of_iterations,
scope_len)
local_data = _update_input(pre_history_ts, scope_len, task)
data[data_id] = local_data
# Make forecast iteratively moving throw the horizon
final_forecast = []
for _, border in zip(range(0, number_of_iterations), intervals):
iter_predict = pipeline.predict(input_data=data)
iter_predict = np.ravel(np.array(iter_predict.predict))
final_forecast.append(iter_predict)
if isinstance(input_data, InputData):
# Add actual values to the historical data - update it
pre_history_ts = time_series[:border + 1]
# Prepare InputData for next iteration
data = _update_input(pre_history_ts, scope_len, task)
else:
# TODO simplify
data = MultiModalData()
for data_id in input_data.keys():
features = input_data[data_id].features
time_series = np.array(features)
pre_history_ts = time_series[:border + 1]
local_data = _update_input(pre_history_ts, scope_len, task)
data[data_id] = local_data
# Create output data
final_forecast = np.ravel(np.array(final_forecast))
# Clip the forecast if it is necessary
final_forecast = final_forecast[:horizon]
return final_forecast
def fitted_values(train_predicted: OutputData, horizon_step: int = None) -> OutputData:
""" The method converts a multidimensional lagged array into an
one-dimensional array - time series based on predicted values for training sample
:param train_predicted: OutputData
:param horizon_step: index of elements for forecast. If None - perform
averaging for all forecasting steps
"""
copied_data = copy(train_predicted)
if horizon_step is not None:
# Take particular forecast step
copied_data.predict = copied_data.predict[:, horizon_step]
copied_data.idx = copied_data.idx + horizon_step
return copied_data
else:
# Perform collapse with averaging
forecast_length = copied_data.task.task_params.forecast_length
# Extend source index range
indices_range = np.arange(copied_data.idx[0], copied_data.idx[-1] + forecast_length + 1)
# Lagged matrix with indices in cells
_, idx_matrix = ts_to_table(idx=indices_range,
time_series=indices_range,
window_size=forecast_length)
predicted_matrix = copied_data.predict
# For every index calculate mean predictions (by all forecast steps)
final_predictions = []
indices_range = indices_range[:-1]
for index in indices_range:
vals = predicted_matrix[idx_matrix == index]
mean_value = np.mean(vals)
final_predictions.append(mean_value)
copied_data.predict = np.array(final_predictions)
copied_data.idx = indices_range
return copied_data
def in_sample_fitted_values(train_predicted: OutputData) -> OutputData:
""" Perform in sample validation based on training sample """
forecast_length = train_predicted.task.task_params.forecast_length
all_values = []
step = 0
# Glues together parts of predictions using "in-sample" way
while step < len(train_predicted.predict):
all_values.extend(train_predicted.predict[step, :])
step += forecast_length
# In some cases it doesn't reach the end
if not np.isclose(all_values[-1], train_predicted.predict[-1, -1]):
missing_part_index = step - len(train_predicted.predict) + 1
# Store missing predicted values
all_values.extend(train_predicted.predict[-1, missing_part_index:])
copied_data = copy(train_predicted)
copied_data.predict = np.array(all_values)
# Update indices
first_id = copied_data.idx[0]
copied_data.idx = np.arange(first_id, first_id + len(all_values))
return copied_data
def _calculate_number_of_steps(scope_len, horizon):
""" Method return amount of iterations which must be done for multistep
time series forecasting
:param scope_len: time series forecasting length
:param horizon: forecast horizon
:return amount_of_steps: amount of steps to produce
"""
amount_of_iterations = int(horizon // scope_len)
# Remainder of the division
resid = int(horizon % scope_len)
if resid == 0:
amount_of_steps = amount_of_iterations
else:
amount_of_steps = amount_of_iterations + 1
return amount_of_steps
def _update_input(pre_history_ts, scope_len, task):
""" Method make new InputData object based on the previous part of time
series
:param pre_history_ts: time series
:param scope_len: how many elements to the future can algorithm forecast
:param task: time series forecasting task
:return input_data: updated InputData
"""
start_forecast = len(pre_history_ts)
end_forecast = start_forecast + scope_len
input_data = InputData(idx=np.arange(start_forecast, end_forecast),
features=pre_history_ts, target=None,
task=task, data_type=DataTypesEnum.ts)
return input_data
def _calculate_intervals(last_index_pre_history, amount_of_iterations, scope_len):
""" Function calculate
:param last_index_pre_history: last id of the known part of time series
:param amount_of_iterations: amount of steps for time series forecasting
:param scope_len: amount of elements in every time series forecasting step
:return intervals: ids of finish of every step in time series
"""
intervals = []
current_border = last_index_pre_history
for i in range(0, amount_of_iterations):
current_border = current_border + scope_len
intervals.append(current_border)
return intervals
def exception_if_not_ts_task(task):
if task.task_type != TaskTypesEnum.ts_forecasting:
raise ValueError(f'Method forecast is available only for time series forecasting task')
| [
"numpy.mean",
"numpy.isclose",
"numpy.hstack",
"fedot.core.data.multi_modal.MultiModalData",
"fedot.core.operations.evaluation.operation_implementations.data_operations.ts_transformations.ts_to_table",
"numpy.array",
"copy.copy",
"numpy.arange"
] | [((7328, 7349), 'copy.copy', 'copy', (['train_predicted'], {}), '(train_predicted)\n', (7332, 7349), False, 'from copy import copy\n'), ((9336, 9357), 'copy.copy', 'copy', (['train_predicted'], {}), '(train_predicted)\n', (9340, 9357), False, 'from copy import copy\n'), ((9384, 9404), 'numpy.array', 'np.array', (['all_values'], {}), '(all_values)\n', (9392, 9404), True, 'import numpy as np\n'), ((1254, 1283), 'numpy.array', 'np.array', (['input_data.features'], {}), '(input_data.features)\n', (1262, 1283), True, 'import numpy as np\n'), ((3184, 3208), 'numpy.array', 'np.array', (['final_forecast'], {}), '(final_forecast)\n', (3192, 3208), True, 'import numpy as np\n'), ((4162, 4191), 'numpy.array', 'np.array', (['input_data.features'], {}), '(input_data.features)\n', (4170, 4191), True, 'import numpy as np\n'), ((4848, 4864), 'fedot.core.data.multi_modal.MultiModalData', 'MultiModalData', ([], {}), '()\n', (4862, 4864), False, 'from fedot.core.data.multi_modal import MultiModalData\n'), ((6762, 6786), 'numpy.array', 'np.array', (['final_forecast'], {}), '(final_forecast)\n', (6770, 6786), True, 'import numpy as np\n'), ((7758, 7830), 'numpy.arange', 'np.arange', (['copied_data.idx[0]', '(copied_data.idx[-1] + forecast_length + 1)'], {}), '(copied_data.idx[0], copied_data.idx[-1] + forecast_length + 1)\n', (7767, 7830), True, 'import numpy as np\n'), ((7902, 7993), 'fedot.core.operations.evaluation.operation_implementations.data_operations.ts_transformations.ts_to_table', 'ts_to_table', ([], {'idx': 'indices_range', 'time_series': 'indices_range', 'window_size': 'forecast_length'}), '(idx=indices_range, time_series=indices_range, window_size=\n forecast_length)\n', (7913, 7993), False, 'from fedot.core.operations.evaluation.operation_implementations.data_operations.ts_transformations import ts_to_table\n'), ((8471, 8498), 'numpy.array', 'np.array', (['final_predictions'], {}), '(final_predictions)\n', (8479, 8498), True, 'import numpy as np\n'), ((9070, 9129), 'numpy.isclose', 'np.isclose', (['all_values[-1]', 'train_predicted.predict[-1, -1]'], {}), '(all_values[-1], train_predicted.predict[-1, -1])\n', (9080, 9129), True, 'import numpy as np\n'), ((1905, 1946), 'numpy.hstack', 'np.hstack', (['(pre_history_ts, iter_predict)'], {}), '((pre_history_ts, iter_predict))\n', (1914, 1946), True, 'import numpy as np\n'), ((2135, 2151), 'fedot.core.data.multi_modal.MultiModalData', 'MultiModalData', ([], {}), '()\n', (2149, 2151), False, 'from fedot.core.data.multi_modal import MultiModalData\n'), ((4985, 5003), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (4993, 5003), True, 'import numpy as np\n'), ((5939, 5969), 'numpy.array', 'np.array', (['iter_predict.predict'], {}), '(iter_predict.predict)\n', (5947, 5969), True, 'import numpy as np\n'), ((6361, 6377), 'fedot.core.data.multi_modal.MultiModalData', 'MultiModalData', ([], {}), '()\n', (6375, 6377), False, 'from fedot.core.data.multi_modal import MultiModalData\n'), ((8378, 8391), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (8385, 8391), True, 'import numpy as np\n'), ((10618, 10657), 'numpy.arange', 'np.arange', (['start_forecast', 'end_forecast'], {}), '(start_forecast, end_forecast)\n', (10627, 10657), True, 'import numpy as np\n'), ((1731, 1761), 'numpy.array', 'np.array', (['iter_predict.predict'], {}), '(iter_predict.predict)\n', (1739, 1761), True, 'import numpy as np\n'), ((2275, 2293), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (2283, 2293), True, 'import numpy as np\n'), ((2962, 3003), 'numpy.hstack', 'np.hstack', (['(pre_history_ts, iter_predict)'], {}), '((pre_history_ts, iter_predict))\n', (2971, 3003), True, 'import numpy as np\n'), ((6510, 6528), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (6518, 6528), True, 'import numpy as np\n'), ((2788, 2818), 'numpy.array', 'np.array', (['iter_predict.predict'], {}), '(iter_predict.predict)\n', (2796, 2818), True, 'import numpy as np\n')] |
import os
from itertools import chain
import time
import numpy as np
from cffi import FFI
# The datatype that we use for computation. We always convert the given data
# to a double array to make sure we have enough bits for precise computation.
_double = np.dtype('d')
_ffi = FFI()
_ffi.cdef(r"""
void train(const long *data, size_t data_size, long n_steps,
double eta_0, double power, int start_step,
const double *features,
double *b_weights, double *a_weights, double *n_logz,
size_t n_items, size_t l_dim, size_t m_feat);
""")
_lib = _ffi.verify('#include "train_features.h"',
sources=[os.path.join(
os.path.dirname(__file__), 'train_features.cpp')],
include_dirs=[os.path.dirname(__file__)],
extra_compile_args=['-O3', '-DNDEBUG', '-std=c++11'])
class TrainerFeatures:
def __init__(self, model_data, noise_data, a_noise,
features, n_items, l_dims, m_features):
data = []
rolled_features = []
for i, subset in enumerate(chain(model_data, noise_data)):
subset = list(subset)
assert min(subset) >= 0
assert len(subset) > 0
label = 1 if i < len(model_data) else 0
if data:
data.append(-1)
data.append(label)
data.extend(subset)
for row in features:
for value in row:
rolled_features.append(value)
self.data = _ffi.new('long []', data)
self.features = _ffi.new('double []', rolled_features)
self.data_size = len(data)
self.orig_data = model_data
self.orig_nois = noise_data
self.orig_features = features
self.n_items = n_items
self.l_dims = l_dims
self.m_features = m_features
self.b_weights = 1e-3 * np.asarray(
np.random.rand(*(self.m_features, self.l_dims)),
dtype=np.float64)
self.a_weights = np.array(a_noise, dtype=np.float64)
self.unaries = np.dot(np.array(self.orig_features), self.a_weights)
self.iteration = 0
self.n_logz = np.array([-np.sum(np.log(1 + np.exp(self.unaries)))],
dtype=np.float64)
def train(self, n_steps, eta_0, power):
step = self.data_size
n_steps *= 1
for i in range(0, step * n_steps, step):
print(100. * i / (step * n_steps), '%')
time_s = time.time()
print('iter start')
_lib.train(
self.data, self.data_size, step,
eta_0, power, i,
self.features,
_ffi.cast("double *", self.b_weights.ctypes.data),
_ffi.cast("double *", self.a_weights.ctypes.data),
_ffi.cast("double *", self.n_logz.ctypes.data),
self.n_items, self.l_dims, self.m_features)
print('iter done in ', time.time() - time_s, 'seconds')
| [
"itertools.chain",
"numpy.random.rand",
"cffi.FFI",
"numpy.exp",
"os.path.dirname",
"numpy.array",
"numpy.dtype",
"time.time"
] | [((257, 270), 'numpy.dtype', 'np.dtype', (['"""d"""'], {}), "('d')\n", (265, 270), True, 'import numpy as np\n'), ((280, 285), 'cffi.FFI', 'FFI', ([], {}), '()\n', (283, 285), False, 'from cffi import FFI\n'), ((2042, 2077), 'numpy.array', 'np.array', (['a_noise'], {'dtype': 'np.float64'}), '(a_noise, dtype=np.float64)\n', (2050, 2077), True, 'import numpy as np\n'), ((798, 823), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (813, 823), False, 'import os\n'), ((1119, 1148), 'itertools.chain', 'chain', (['model_data', 'noise_data'], {}), '(model_data, noise_data)\n', (1124, 1148), False, 'from itertools import chain\n'), ((2108, 2136), 'numpy.array', 'np.array', (['self.orig_features'], {}), '(self.orig_features)\n', (2116, 2136), True, 'import numpy as np\n'), ((2525, 2536), 'time.time', 'time.time', ([], {}), '()\n', (2534, 2536), False, 'import time\n'), ((714, 739), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (729, 739), False, 'import os\n'), ((1938, 1985), 'numpy.random.rand', 'np.random.rand', (['*(self.m_features, self.l_dims)'], {}), '(*(self.m_features, self.l_dims))\n', (1952, 1985), True, 'import numpy as np\n'), ((2999, 3010), 'time.time', 'time.time', ([], {}), '()\n', (3008, 3010), False, 'import time\n'), ((2232, 2252), 'numpy.exp', 'np.exp', (['self.unaries'], {}), '(self.unaries)\n', (2238, 2252), True, 'import numpy as np\n')] |
import re
import string
from collections import defaultdict
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import TweetTokenizer
from scipy import linalg
def process_tweet(tweet):
"""Process tweet function.
Input:
tweet: a string containing a tweet
Output:
tweets_clean: a list of words containing the processed tweet
"""
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
# remove stock market tickers like $GE
tweet = re.sub(r'\$\w*', '', tweet)
# remove old style retweet text "RT"
tweet = re.sub(r'^RT[\s]+', '', tweet)
# remove hyperlinks
tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet)
# remove hashtags
# only removing the hash # sign from the word
tweet = re.sub(r'#', '', tweet)
# tokenize tweets
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True,
reduce_len=True)
tweet_tokens = tokenizer.tokenize(tweet)
tweets_clean = []
for word in tweet_tokens:
if (word not in stopwords_english and # remove stopwords
word not in string.punctuation): # remove punctuation
# tweets_clean.append(word)
stem_word = stemmer.stem(word) # stemming word
tweets_clean.append(stem_word)
return tweets_clean
def build_freqs(tweets, ys):
"""Build frequencies.
Input:
tweets: a list of tweets
ys: an m x 1 array with the sentiment label of each tweet
(either 0 or 1)
Output:
freqs: a dictionary mapping each (word, sentiment) pair to its
frequency
"""
# Convert np array to list since zip needs an iterable.
# The squeeze is necessary or the list ends up with one element.
# Also note that this is just a NOP if ys is already a list.
yslist = np.squeeze(ys).tolist()
# Start with an empty dictionary and populate it by looping over all tweets
# and over all processed words in each tweet.
freqs = {}
for y, tweet in zip(yslist, tweets):
for word in process_tweet(tweet):
pair = (word, y)
if pair in freqs:
freqs[pair] += 1
else:
freqs[pair] = 1
return freqs
def sigmoid(z):
# sigmoid function
return 1.0 / (1.0 + np.exp(-z))
def get_idx(words, word2Ind):
idx = []
for word in words:
idx = idx + [word2Ind[word]]
return idx
def pack_idx_with_frequency(context_words, word2Ind):
freq_dict = defaultdict(int)
for word in context_words:
freq_dict[word] += 1
idxs = get_idx(context_words, word2Ind)
packed = []
for i in range(len(idxs)):
idx = idxs[i]
freq = freq_dict[context_words[i]]
packed.append((idx, freq))
return packed
def get_vectors(data, word2Ind, V, C):
i = C
while True:
y = np.zeros(V)
x = np.zeros(V)
center_word = data[i]
y[word2Ind[center_word]] = 1
context_words = data[(i - C):i] + data[(i + 1):(i + C + 1)]
num_ctx_words = len(context_words)
for idx, freq in pack_idx_with_frequency(context_words, word2Ind):
x[idx] = freq / num_ctx_words
yield x, y
i += 1
if i >= len(data):
print('i is being set to 0')
i = 0
def get_batches(data, word2Ind, V, C, batch_size):
batch_x = []
batch_y = []
for x, y in get_vectors(data, word2Ind, V, C):
while len(batch_x) < batch_size:
batch_x.append(x)
batch_y.append(y)
else:
yield np.array(batch_x).T, np.array(batch_y).T
batch = []
def compute_pca(data, n_components=2):
"""
Input:
data: of dimension (m,n) where each row corresponds to a word vector
n_components: Number of components you want to keep.
Output:
X_reduced: data transformed in 2 dims/columns + regenerated original data
pass in: data as 2D NumPy array
"""
m, n = data.shape
### START CODE HERE ###
# mean center the data
data -= data.mean(axis=0)
# calculate the covariance matrix
R = np.cov(data, rowvar=False)
# calculate eigenvectors & eigenvalues of the covariance matrix
# use 'eigh' rather than 'eig' since R is symmetric,
# the performance gain is substantial
evals, evecs = linalg.eigh(R)
# sort eigenvalue in decreasing order
# this returns the corresponding indices of evals and evecs
idx = np.argsort(evals)[::-1]
evecs = evecs[:, idx]
# sort eigenvectors according to same index
evals = evals[idx]
# select the first n eigenvectors (n is desired dimension
# of rescaled data array, or dims_rescaled_data)
evecs = evecs[:, :n_components]
### END CODE HERE ###
return np.dot(evecs.T, data.T).T
def get_dict(data):
"""
Input:
K: the number of negative samples
data: the data you want to pull from
indices: a list of word indices
Output:
word_dict: a dictionary with the weighted probabilities of each word
word2Ind: returns dictionary mapping the word to its index
Ind2Word: returns dictionary mapping the index to its word
"""
#
# words = nltk.word_tokenize(data)
words = sorted(list(set(data)))
n = len(words)
idx = 0
# return these correctly
word2Ind = {}
Ind2word = {}
for k in words:
word2Ind[k] = idx
Ind2word[idx] = k
idx += 1
return word2Ind, Ind2word
| [
"scipy.linalg.eigh",
"nltk.tokenize.TweetTokenizer",
"nltk.corpus.stopwords.words",
"nltk.stem.PorterStemmer",
"numpy.squeeze",
"numpy.argsort",
"numpy.exp",
"numpy.zeros",
"numpy.dot",
"collections.defaultdict",
"numpy.array",
"re.sub",
"numpy.cov"
] | [((433, 448), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (446, 448), False, 'from nltk.stem import PorterStemmer\n'), ((473, 499), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (488, 499), False, 'from nltk.corpus import stopwords\n'), ((555, 583), 're.sub', 're.sub', (['"""\\\\$\\\\w*"""', '""""""', 'tweet'], {}), "('\\\\$\\\\w*', '', tweet)\n", (561, 583), False, 'import re\n'), ((636, 666), 're.sub', 're.sub', (['"""^RT[\\\\s]+"""', '""""""', 'tweet'], {}), "('^RT[\\\\s]+', '', tweet)\n", (642, 666), False, 'import re\n'), ((703, 748), 're.sub', 're.sub', (['"""https?:\\\\/\\\\/.*[\\\\r\\\\n]*"""', '""""""', 'tweet'], {}), "('https?:\\\\/\\\\/.*[\\\\r\\\\n]*', '', tweet)\n", (709, 748), False, 'import re\n'), ((830, 852), 're.sub', 're.sub', (['"""#"""', '""""""', 'tweet'], {}), "('#', '', tweet)\n", (836, 852), False, 'import re\n'), ((892, 964), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {'preserve_case': '(False)', 'strip_handles': '(True)', 'reduce_len': '(True)'}), '(preserve_case=False, strip_handles=True, reduce_len=True)\n', (906, 964), False, 'from nltk.tokenize import TweetTokenizer\n'), ((2592, 2608), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2603, 2608), False, 'from collections import defaultdict\n'), ((4234, 4260), 'numpy.cov', 'np.cov', (['data'], {'rowvar': '(False)'}), '(data, rowvar=False)\n', (4240, 4260), True, 'import numpy as np\n'), ((4447, 4461), 'scipy.linalg.eigh', 'linalg.eigh', (['R'], {}), '(R)\n', (4458, 4461), False, 'from scipy import linalg\n'), ((2957, 2968), 'numpy.zeros', 'np.zeros', (['V'], {}), '(V)\n', (2965, 2968), True, 'import numpy as np\n'), ((2981, 2992), 'numpy.zeros', 'np.zeros', (['V'], {}), '(V)\n', (2989, 2992), True, 'import numpy as np\n'), ((4578, 4595), 'numpy.argsort', 'np.argsort', (['evals'], {}), '(evals)\n', (4588, 4595), True, 'import numpy as np\n'), ((4888, 4911), 'numpy.dot', 'np.dot', (['evecs.T', 'data.T'], {}), '(evecs.T, data.T)\n', (4894, 4911), True, 'import numpy as np\n'), ((1910, 1924), 'numpy.squeeze', 'np.squeeze', (['ys'], {}), '(ys)\n', (1920, 1924), True, 'import numpy as np\n'), ((2388, 2398), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (2394, 2398), True, 'import numpy as np\n'), ((3679, 3696), 'numpy.array', 'np.array', (['batch_x'], {}), '(batch_x)\n', (3687, 3696), True, 'import numpy as np\n'), ((3700, 3717), 'numpy.array', 'np.array', (['batch_y'], {}), '(batch_y)\n', (3708, 3717), True, 'import numpy as np\n')] |
# This file is part of the P3IV Simulator (https://github.com/fzi-forschungszentrum-informatik/P3IV),
# copyright by FZI Forschungszentrum Informatik, licensed under the BSD-3 license (see LICENSE file in main directory)
import unittest
import numpy as np
import matplotlib.pyplot as plt
import logging
from p3iv_utils_probability.distributions import *
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def plot_gaussians(gs, sigma=2, title=""):
fig, ax = plt.subplots()
fig.suptitle(title)
for g in gs:
plot_gaussian_new(g, sigma=sigma, ax=ax)
plt.show()
def plot_gaussian_new(g, sigma=2, ax=None):
color = np.random.rand(
sigma + 1,
)
x = np.arange(len(g.mean))
ax.plot(x, g.mean, linestyle="--", color=color)
for s in range(1, sigma + 1):
upper_bound = g.upper_bound(s)
lower_bound = g.lower_bound(s)
valid_ = upper_bound > lower_bound
ax.fill_between(x[valid_], lower_bound[valid_], upper_bound[valid_], facecolors=color, alpha=0.3)
class TestBasics(unittest.TestCase):
def test_univariate_float(self):
m = 1.0
v = 5.0
test = UnivariateNormalDistribution(mean=m, covariance=v)
r = test.range(2)
self.assertEqual(r.shape, (2,))
self.assertAlmostEqual(np.sum(r - np.array([-9.0, 11.0])), 0.0)
self.assertAlmostEqual(test.pdf(2)[0], 0.161434225872)
self.assertAlmostEqual(test.cdf(1)[0], 0.5)
def test_truncated_univariate_float(self):
m = 1.0
v = 5.0
test = TruncatedUnivariateNormalDistribution(mean=m, covariance=v, lower_truncation=0, upper_truncation=4)
r = test.range(2)
self.assertAlmostEqual(np.sum(r - np.array([0.0, 4.0])), 0.0)
def test_univariate_array(self):
m = np.array([1])
v = np.array([5])
test = UnivariateNormalDistribution(mean=m, covariance=v)
r = test.range(2)
self.assertEqual(r[1], np.array([11.0]))
self.assertEqual(r[0], np.array([-9.0]))
def test_univariate_seq(self):
test = UnivariateNormalDistributionSequence()
test.resize(100)
test.mean = np.arange(100)
test.covariance = np.linspace(0.1, 10, 100)
t = test[:5]
r = t.range(2)
upper = r[:, 1]
lower = r[:, 0]
self.assertAlmostEqual(np.sum(upper - np.asarray([0.2, 1.4, 2.6, 3.8, 5.0])), 0.0)
self.assertAlmostEqual(np.sum(lower - np.asarray([-0.2, 0.6, 1.4, 2.2, 3.0])), 0.0)
title = "UnivariateNormalDistributionSequence"
# plot_gaussians([test], sigma=3, title=title)
def test_univariate_seq_append(self):
title = "UnivariateNormalDistributionSequence"
test = UnivariateNormalDistributionSequence()
test.resize(50)
test.mean = np.arange(50)
test.covariance = np.linspace(0.1, 10, 50)
test2 = UnivariateNormalDistributionSequence()
test2.resize(50)
test2.mean = np.arange(50, 100)
test2.covariance = np.linspace(0.1, 10, 50)
test.append(test2)
t = test[:5]
r = t.range(2)
upper = r[:, 1]
lower = r[:, 0]
self.assertAlmostEqual(np.sum(t.mean - np.asarray([0, 1, 2, 3, 4])), 0.0)
self.assertAlmostEqual(
np.sum(t.covariance - np.asarray([0.1, 0.30204082, 0.50408163, 0.70612245, 0.90816327])), 0.0
)
self.assertAlmostEqual(np.sum(upper - np.asarray([0.2, 1.60408163, 3.00816327, 4.4122449, 5.81632653])), 0.0)
self.assertAlmostEqual(np.sum(lower - np.asarray([-0.2, 0.39591837, 0.99183673, 1.5877551, 2.18367347])), 0.0)
# plot_gaussians([test], sigma=3, title=title)
def test_bivariate(self):
m = np.array([[1, 2]])
v = np.array([[5, 0], [0, 1]])
test = BivariateNormalDistribution(mean=m, covariance=v)
r = test.range(2)
self.assertAlmostEqual(np.sum(r - np.asarray([1.0, 2.0, 0.0, 4.47213595, 2.0])), 0.0)
x = (1.5, 3, 4)
y = (3, 1, 5)
self.assertAlmostEqual(np.sum(test.pdf(x, y) - np.asarray([0.0421047, 0.02893811, 0.00032147])), 0.0)
self.assertAlmostEqual(test.cdf(1, 2), 0.25)
def test_truncated_bivariate(self):
m = np.array([[1, 2]])
v = np.array([[5, 0], [0, 1]])
tr_up = np.array([[7, 1], [4, 4]])
tr_lw = np.array([[0, 0], [0, 0]])
test = TruncatedBivariateNormalDistribution(
mean=m, covariance=v, upper_truncation=tr_up, lower_truncation=tr_lw
)
def test_bivariate_seq(self):
title = "BivariateNormalDistributionSequence"
m = np.array([[1, 0], [2, 2], [3, 3]])
v = np.array([[[5, 0], [0, 1]], [[3, 0], [0, 3]], [[1, 0], [0, 1]]])
test = BivariateNormalDistributionSequence()
test.resize(3)
test.mean = m
test.covariance = v
r = test.range(2)
self.assertAlmostEqual(np.sum(r[0] - np.asarray([1.0, 0.0, 0.0, 4.47213595, 2.0])), 0.0)
self.assertAlmostEqual(np.sum(r[1] - np.asarray([2.0, 2.0, 0.0, 3.46410162, 3.46410162])), 0.0)
self.assertAlmostEqual(np.sum(r[2] - np.asarray([3.0, 3.0, 0.0, 2.0, 2.0])), 0.0)
def test_bivariate_seq_mean(self):
title = "BivariateNormalDistributionSequence"
m = np.array([[1, 0], [2, 2], [3, 3]])
test = BivariateNormalDistributionSequence()
test.resize(3)
test.mean = m
t = test[1:]
r = t.range(2)
truth = np.asarray([[2.0, 2.0, 0.0, 0.0, 0.0], [3.0, 3.0, 0.0, 0.0, 0.0]])
for i, t in enumerate(truth):
self.assertAlmostEqual(np.sum(r[i] - t), 0.0)
def test_bivariate_seq_append(self):
title = "BivariateNormalDistributionSequence"
m = np.array([[1, 0], [2, 2], [3, 3]])
v = np.array([[[5, 0], [0, 1]], [[3, 0], [0, 3]], [[1, 0], [0, 1]]])
test1 = BivariateNormalDistributionSequence()
test1.resize(3)
test1.mean = m
test1.covariance = v
test2 = BivariateNormalDistributionSequence()
test2.resize(3)
test2.mean = m
test2.covariance = v
test1.append(test2)
upper = test1.range(2)
truth = np.asarray(
[
[1.0, 0.0, 0.0, 4.47213595, 2.0],
[2.0, 2.0, 0.0, 3.46410162, 3.46410162],
[3.0, 3.0, 0.0, 2.0, 2.0],
[1.0, 0.0, 0.0, 4.47213595, 2.0],
[2.0, 2.0, 0.0, 3.46410162, 3.46410162],
[3.0, 3.0, 0.0, 2.0, 2.0],
]
)
for i, t in enumerate(truth):
self.assertAlmostEqual(np.sum(upper[i] - t), 0.0)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| [
"logging.getLogger",
"logging.basicConfig",
"numpy.random.rand",
"numpy.asarray",
"numpy.array",
"numpy.linspace",
"numpy.sum",
"unittest.main",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((366, 385), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (383, 385), False, 'import logging\n'), ((476, 490), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (488, 490), True, 'import matplotlib.pyplot as plt\n'), ((586, 596), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (594, 596), True, 'import matplotlib.pyplot as plt\n'), ((655, 680), 'numpy.random.rand', 'np.random.rand', (['(sigma + 1)'], {}), '(sigma + 1)\n', (669, 680), True, 'import numpy as np\n'), ((6706, 6746), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (6725, 6746), False, 'import logging\n'), ((6751, 6766), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6764, 6766), False, 'import unittest\n'), ((1811, 1824), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1819, 1824), True, 'import numpy as np\n'), ((1837, 1850), 'numpy.array', 'np.array', (['[5]'], {}), '([5])\n', (1845, 1850), True, 'import numpy as np\n'), ((2177, 2191), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (2186, 2191), True, 'import numpy as np\n'), ((2218, 2243), 'numpy.linspace', 'np.linspace', (['(0.1)', '(10)', '(100)'], {}), '(0.1, 10, 100)\n', (2229, 2243), True, 'import numpy as np\n'), ((2825, 2838), 'numpy.arange', 'np.arange', (['(50)'], {}), '(50)\n', (2834, 2838), True, 'import numpy as np\n'), ((2865, 2889), 'numpy.linspace', 'np.linspace', (['(0.1)', '(10)', '(50)'], {}), '(0.1, 10, 50)\n', (2876, 2889), True, 'import numpy as np\n'), ((2992, 3010), 'numpy.arange', 'np.arange', (['(50)', '(100)'], {}), '(50, 100)\n', (3001, 3010), True, 'import numpy as np\n'), ((3038, 3062), 'numpy.linspace', 'np.linspace', (['(0.1)', '(10)', '(50)'], {}), '(0.1, 10, 50)\n', (3049, 3062), True, 'import numpy as np\n'), ((3749, 3767), 'numpy.array', 'np.array', (['[[1, 2]]'], {}), '([[1, 2]])\n', (3757, 3767), True, 'import numpy as np\n'), ((3780, 3806), 'numpy.array', 'np.array', (['[[5, 0], [0, 1]]'], {}), '([[5, 0], [0, 1]])\n', (3788, 3806), True, 'import numpy as np\n'), ((4255, 4273), 'numpy.array', 'np.array', (['[[1, 2]]'], {}), '([[1, 2]])\n', (4263, 4273), True, 'import numpy as np\n'), ((4286, 4312), 'numpy.array', 'np.array', (['[[5, 0], [0, 1]]'], {}), '([[5, 0], [0, 1]])\n', (4294, 4312), True, 'import numpy as np\n'), ((4329, 4355), 'numpy.array', 'np.array', (['[[7, 1], [4, 4]]'], {}), '([[7, 1], [4, 4]])\n', (4337, 4355), True, 'import numpy as np\n'), ((4372, 4398), 'numpy.array', 'np.array', (['[[0, 0], [0, 0]]'], {}), '([[0, 0], [0, 0]])\n', (4380, 4398), True, 'import numpy as np\n'), ((4644, 4678), 'numpy.array', 'np.array', (['[[1, 0], [2, 2], [3, 3]]'], {}), '([[1, 0], [2, 2], [3, 3]])\n', (4652, 4678), True, 'import numpy as np\n'), ((4691, 4755), 'numpy.array', 'np.array', (['[[[5, 0], [0, 1]], [[3, 0], [0, 3]], [[1, 0], [0, 1]]]'], {}), '([[[5, 0], [0, 1]], [[3, 0], [0, 3]], [[1, 0], [0, 1]]])\n', (4699, 4755), True, 'import numpy as np\n'), ((5306, 5340), 'numpy.array', 'np.array', (['[[1, 0], [2, 2], [3, 3]]'], {}), '([[1, 0], [2, 2], [3, 3]])\n', (5314, 5340), True, 'import numpy as np\n'), ((5500, 5566), 'numpy.asarray', 'np.asarray', (['[[2.0, 2.0, 0.0, 0.0, 0.0], [3.0, 3.0, 0.0, 0.0, 0.0]]'], {}), '([[2.0, 2.0, 0.0, 0.0, 0.0], [3.0, 3.0, 0.0, 0.0, 0.0]])\n', (5510, 5566), True, 'import numpy as np\n'), ((5772, 5806), 'numpy.array', 'np.array', (['[[1, 0], [2, 2], [3, 3]]'], {}), '([[1, 0], [2, 2], [3, 3]])\n', (5780, 5806), True, 'import numpy as np\n'), ((5819, 5883), 'numpy.array', 'np.array', (['[[[5, 0], [0, 1]], [[3, 0], [0, 3]], [[1, 0], [0, 1]]]'], {}), '([[[5, 0], [0, 1]], [[3, 0], [0, 3]], [[1, 0], [0, 1]]])\n', (5827, 5883), True, 'import numpy as np\n'), ((6223, 6449), 'numpy.asarray', 'np.asarray', (['[[1.0, 0.0, 0.0, 4.47213595, 2.0], [2.0, 2.0, 0.0, 3.46410162, 3.46410162],\n [3.0, 3.0, 0.0, 2.0, 2.0], [1.0, 0.0, 0.0, 4.47213595, 2.0], [2.0, 2.0,\n 0.0, 3.46410162, 3.46410162], [3.0, 3.0, 0.0, 2.0, 2.0]]'], {}), '([[1.0, 0.0, 0.0, 4.47213595, 2.0], [2.0, 2.0, 0.0, 3.46410162, \n 3.46410162], [3.0, 3.0, 0.0, 2.0, 2.0], [1.0, 0.0, 0.0, 4.47213595, 2.0\n ], [2.0, 2.0, 0.0, 3.46410162, 3.46410162], [3.0, 3.0, 0.0, 2.0, 2.0]])\n', (6233, 6449), True, 'import numpy as np\n'), ((1974, 1990), 'numpy.array', 'np.array', (['[11.0]'], {}), '([11.0])\n', (1982, 1990), True, 'import numpy as np\n'), ((2023, 2039), 'numpy.array', 'np.array', (['[-9.0]'], {}), '([-9.0])\n', (2031, 2039), True, 'import numpy as np\n'), ((5640, 5656), 'numpy.sum', 'np.sum', (['(r[i] - t)'], {}), '(r[i] - t)\n', (5646, 5656), True, 'import numpy as np\n'), ((6646, 6666), 'numpy.sum', 'np.sum', (['(upper[i] - t)'], {}), '(upper[i] - t)\n', (6652, 6666), True, 'import numpy as np\n'), ((1325, 1347), 'numpy.array', 'np.array', (['[-9.0, 11.0]'], {}), '([-9.0, 11.0])\n', (1333, 1347), True, 'import numpy as np\n'), ((1733, 1753), 'numpy.array', 'np.array', (['[0.0, 4.0]'], {}), '([0.0, 4.0])\n', (1741, 1753), True, 'import numpy as np\n'), ((2382, 2419), 'numpy.asarray', 'np.asarray', (['[0.2, 1.4, 2.6, 3.8, 5.0]'], {}), '([0.2, 1.4, 2.6, 3.8, 5.0])\n', (2392, 2419), True, 'import numpy as np\n'), ((2473, 2511), 'numpy.asarray', 'np.asarray', (['[-0.2, 0.6, 1.4, 2.2, 3.0]'], {}), '([-0.2, 0.6, 1.4, 2.2, 3.0])\n', (2483, 2511), True, 'import numpy as np\n'), ((3230, 3257), 'numpy.asarray', 'np.asarray', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (3240, 3257), True, 'import numpy as np\n'), ((3331, 3396), 'numpy.asarray', 'np.asarray', (['[0.1, 0.30204082, 0.50408163, 0.70612245, 0.90816327]'], {}), '([0.1, 0.30204082, 0.50408163, 0.70612245, 0.90816327])\n', (3341, 3396), True, 'import numpy as np\n'), ((3460, 3524), 'numpy.asarray', 'np.asarray', (['[0.2, 1.60408163, 3.00816327, 4.4122449, 5.81632653]'], {}), '([0.2, 1.60408163, 3.00816327, 4.4122449, 5.81632653])\n', (3470, 3524), True, 'import numpy as np\n'), ((3578, 3643), 'numpy.asarray', 'np.asarray', (['[-0.2, 0.39591837, 0.99183673, 1.5877551, 2.18367347]'], {}), '([-0.2, 0.39591837, 0.99183673, 1.5877551, 2.18367347])\n', (3588, 3643), True, 'import numpy as np\n'), ((3940, 3984), 'numpy.asarray', 'np.asarray', (['[1.0, 2.0, 0.0, 4.47213595, 2.0]'], {}), '([1.0, 2.0, 0.0, 4.47213595, 2.0])\n', (3950, 3984), True, 'import numpy as np\n'), ((4094, 4141), 'numpy.asarray', 'np.asarray', (['[0.0421047, 0.02893811, 0.00032147]'], {}), '([0.0421047, 0.02893811, 0.00032147])\n', (4104, 4141), True, 'import numpy as np\n'), ((4954, 4998), 'numpy.asarray', 'np.asarray', (['[1.0, 0.0, 0.0, 4.47213595, 2.0]'], {}), '([1.0, 0.0, 0.0, 4.47213595, 2.0])\n', (4964, 4998), True, 'import numpy as np\n'), ((5051, 5102), 'numpy.asarray', 'np.asarray', (['[2.0, 2.0, 0.0, 3.46410162, 3.46410162]'], {}), '([2.0, 2.0, 0.0, 3.46410162, 3.46410162])\n', (5061, 5102), True, 'import numpy as np\n'), ((5155, 5192), 'numpy.asarray', 'np.asarray', (['[3.0, 3.0, 0.0, 2.0, 2.0]'], {}), '([3.0, 3.0, 0.0, 2.0, 2.0])\n', (5165, 5192), True, 'import numpy as np\n')] |
#-----------------------------------------------------------------------------
# Copyright (c) 2014, <NAME>
# All rights reserved.
#
# Distributed under the terms of the BSD 3-Clause ("BSD New") license.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
import timeit
__all__ = ['apply_to_2d', 'apply_filter_mode', 'convslice', 'downsample',
'pow2', 'time_filters', 'upsample', 'zero_pad']
def pow2(n):
return 2**(int(np.ceil(np.log2(n))))
def zero_pad(x, n):
s = x.shape
m = s[-1]
if m == n:
return x
elif m > n:
return x[..., 0:n]
else:
y = np.zeros(s[:-1] + (n,), dtype=x.dtype)
y[..., 0:m] = x
return y
def upsample(x, n, axis=0, phase=0):
"""Upsample x by inserting n-1 zeros between samples along the specified axis."""
x = np.asarray(x)
n = int(n)
if phase < 0 or phase >= n:
raise ValueError('phase must be between 0 and n-1')
upshape = list(x.shape)
upshape[axis] = n*x.shape[axis]
y = np.zeros(upshape, x.dtype)
idx = [slice(None)]*y.ndim
idx[axis] = slice(phase, None, n)
y[idx] = x
return y
def downsample(x, n, axis=0, phase=0):
"""Downsample x by keeping every nth sample along the specified axis, starting with phase."""
x = np.asarray(x)
n = int(n)
if phase < 0 or phase >= n:
raise ValueError('phase must be between 0 and n-1')
idx = [slice(None)]*x.ndim
idx[axis] = slice(phase, None, n)
return x[idx]
def time_filters(flist, x, number=100):
times = []
for filt in flist:
timer = timeit.Timer(lambda: filt(x))
times.append(min(timer.repeat(repeat=3, number=number)))
return times
def convslice(L, M, mode='validsame'):
smaller = min(L, M)
bigger = max(L, M)
if mode == 'valid':
return slice(smaller - 1, bigger)
elif mode == 'same':
return slice((smaller - 1)//2, (smaller - 1)//2 + bigger)
elif mode == 'validsame':
return slice(smaller - 1, None)
else:
return slice(None)
def apply_filter_mode(filt, res, mode=None):
if mode is None or mode == 'full':
return res
try:
slc = getattr(filt, mode)
except AttributeError:
raise ValueError('Unknown mode')
return res[..., slc]
def apply_to_2d(func1d, arr):
if len(arr.shape) != 2:
raise ValueError('arr must be 2-D')
res = func1d(arr[0])
if not isinstance(res, tuple):
res = np.asarray(res)
outshape = arr.shape[:1] + res.shape
out = np.empty(outshape, res.dtype)
out[0] = res
for k in xrange(1, outshape[0]):
row = arr[k]
res = func1d(row)
out[k] = res
else:
resarr = tuple(np.asarray(r) for r in res)
try:
# res is a namedtuple, keep same class
out = res.__class__(*(np.empty(arr.shape[:1] + r.shape, r.dtype) for r in resarr))
except:
out = tuple(np.empty(arr.shape[:1] + r.shape, r.dtype) for r in resarr)
for l, r in enumerate(res):
out[l][0] = r
for k in xrange(1, arr.shape[0]):
row = arr[k]
res = func1d(row)
for l, r in enumerate(res):
out[l][k] = r
return out | [
"numpy.log2",
"numpy.zeros",
"numpy.asarray",
"numpy.empty"
] | [((943, 956), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (953, 956), True, 'import numpy as np\n'), ((1137, 1163), 'numpy.zeros', 'np.zeros', (['upshape', 'x.dtype'], {}), '(upshape, x.dtype)\n', (1145, 1163), True, 'import numpy as np\n'), ((1413, 1426), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1423, 1426), True, 'import numpy as np\n'), ((2613, 2628), 'numpy.asarray', 'np.asarray', (['res'], {}), '(res)\n', (2623, 2628), True, 'import numpy as np\n'), ((2688, 2717), 'numpy.empty', 'np.empty', (['outshape', 'res.dtype'], {}), '(outshape, res.dtype)\n', (2696, 2717), True, 'import numpy as np\n'), ((731, 769), 'numpy.zeros', 'np.zeros', (['(s[:-1] + (n,))'], {'dtype': 'x.dtype'}), '(s[:-1] + (n,), dtype=x.dtype)\n', (739, 769), True, 'import numpy as np\n'), ((569, 579), 'numpy.log2', 'np.log2', (['n'], {}), '(n)\n', (576, 579), True, 'import numpy as np\n'), ((2893, 2906), 'numpy.asarray', 'np.asarray', (['r'], {}), '(r)\n', (2903, 2906), True, 'import numpy as np\n'), ((3019, 3061), 'numpy.empty', 'np.empty', (['(arr.shape[:1] + r.shape)', 'r.dtype'], {}), '(arr.shape[:1] + r.shape, r.dtype)\n', (3027, 3061), True, 'import numpy as np\n'), ((3120, 3162), 'numpy.empty', 'np.empty', (['(arr.shape[:1] + r.shape)', 'r.dtype'], {}), '(arr.shape[:1] + r.shape, r.dtype)\n', (3128, 3162), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
""" Simple 1 input mixer node. This takes the input of M channels, and produces
an output of N channels, using simple matrix mulitplication.
"""
import labgraph as lg
import numpy as np
from ...messages.generic_signal_sample import SignalSampleMessage
class MixerOneInputConfig(lg.Config):
# This is an NxM matrix (for M inputs, N outputs)
weights: np.ndarray
class MixerOneInputNode(lg.Node):
IN_SAMPLE_TOPIC = lg.Topic(SignalSampleMessage)
OUT_SAMPLE_TOPIC = lg.Topic(SignalSampleMessage)
@lg.subscriber(IN_SAMPLE_TOPIC)
@lg.publisher(OUT_SAMPLE_TOPIC)
async def mix_samples(self, in_sample: SignalSampleMessage) -> lg.AsyncPublisher:
if in_sample.sample.shape[0] != self.config.weights.shape[1]:
raise lg.util.LabgraphError("Mismatching input dimensions")
out_sample = SignalSampleMessage(
timestamp=in_sample.timestamp,
sample=np.dot(self.config.weights, in_sample.sample),
)
yield self.OUT_SAMPLE_TOPIC, out_sample
| [
"numpy.dot",
"labgraph.publisher",
"labgraph.util.LabgraphError",
"labgraph.Topic",
"labgraph.subscriber"
] | [((514, 543), 'labgraph.Topic', 'lg.Topic', (['SignalSampleMessage'], {}), '(SignalSampleMessage)\n', (522, 543), True, 'import labgraph as lg\n'), ((567, 596), 'labgraph.Topic', 'lg.Topic', (['SignalSampleMessage'], {}), '(SignalSampleMessage)\n', (575, 596), True, 'import labgraph as lg\n'), ((603, 633), 'labgraph.subscriber', 'lg.subscriber', (['IN_SAMPLE_TOPIC'], {}), '(IN_SAMPLE_TOPIC)\n', (616, 633), True, 'import labgraph as lg\n'), ((639, 669), 'labgraph.publisher', 'lg.publisher', (['OUT_SAMPLE_TOPIC'], {}), '(OUT_SAMPLE_TOPIC)\n', (651, 669), True, 'import labgraph as lg\n'), ((844, 897), 'labgraph.util.LabgraphError', 'lg.util.LabgraphError', (['"""Mismatching input dimensions"""'], {}), "('Mismatching input dimensions')\n", (865, 897), True, 'import labgraph as lg\n'), ((1002, 1047), 'numpy.dot', 'np.dot', (['self.config.weights', 'in_sample.sample'], {}), '(self.config.weights, in_sample.sample)\n', (1008, 1047), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
###############################################################################
###############################################################################
## ##
## _ ___ ___ ___ ___ ___ ##
## | | | __ / \ / __| _ | __| ##
## | |__| __| ( ) | (_ | _|__ \ ##
## |____|___ \___/ \___|_| \___/ ##
## v 1.3 (Stable) ##
## ##
## Module for conversion of coordinate frames (ICRF, ITRF, and LVLH) ##
## - ICRF - International Celestial Reference Frame (ECI) ##
## - ITRF - International Terrestrial Reference Frame (ECEF) ##
## - LVLH - Local Vertical Local Horizontal Frame (Hill Frame, VCI) ##
## ##
## Uses the IAU1976 Theory of Precession and IAU1980 Theory of Nutation. ##
## ##
## References: ##
## https://gssc.esa.int/navipedia/index.php/ICRF_to_CEP ##
## https://gssc.esa.int/navipedia/index.php/CEP_to_ITRF ##
## https://gssc.esa.int/navipedia/index.php/Julian_Date ##
## ##
## Written by <NAME>. ##
## Last modified 09-Aug-2021 ##
## Website: https://github.com/sammmlow/LEOGPS ##
## Documentation: https://leogps.readthedocs.io/en/latest/ ##
## ##
###############################################################################
###############################################################################
# Import global libraries
import datetime
import numpy as np
# Import local libraries
#from source import rotate
from source import rotate
##############################################################################
##############################################################################
def icrf2cep(t, r, v = np.zeros(3)):
'''Transformation of the international celestial reference frame (ICRF)
to the conventional ephemeris pole frame (the True-Of-Epoch frame), by
correcting precession and nutation. This transformation is performed using
a composite of two orthogonal rotation matrices P and N. This function
will return two vectors (position and velocity).
Parameters
----------
t : datetime.datetime
Current time of observation in GPST.
r : numpy.ndarray
Position vector (1x3) in ICRF frame.
v : numpy.ndarray, optional
Velocity vector (1x3) in ICRF frame.
Returns
-------
r_cep : numpy.ndarray
Position vector in CEP frame.
v_cep : numpy.ndarray
Velocity vector in CEP frame.
'''
P = rotate.precession(t) # Precession rotation DCM
N = rotate.nutation(t) # Nutation rotation DCM
if sum( abs( v ) ) == 0.0:
r_cep = N @ P @ r
return r_cep, np.zeros(3)
else:
r_cep = N @ P @ r
v_cep = N @ P @ v
return r_cep, v_cep
##############################################################################
##############################################################################
def cep2itrf(t, r, v = np.zeros(3)):
'''Transformation of the conventional ephemeris pole frame (CEP) to the
international terrestrial reference frame (ITRF) by accounting for the
diurnal rotation of the Earth, and accounting for the motion of the poles
that matches the CEP to the ITRF. This function will return two vectors
(position and velocity).
Parameters
----------
t : datetime.datetime
Current time of observation in GPST.
r : numpy.ndarray
Position vector (1x3) in CEP frame.
v : numpy.ndarray, optional
Velocity vector (1x3) in CEP frame.
Returns
-------
r_itrf : numpy.ndarray
Position vector in ITRF frame.
v_itrf : numpy.ndarray
Velocity vector in ITRF frame.
'''
N = rotate.nutation(t)
S = rotate.diurnal( t, N ) # Diurnal Rotation DCM
M = rotate.polewander( t ) # Pole Wander Rotation DCM
if sum( abs( v ) ) == 0.0:
r_itrf = M @ S @ r
return r_itrf, np.zeros(3)
else:
Sd = rotate.diurnal_dot( t, S )
r_itrf = M @ S @ r
v_itrf = M @ ((Sd @ r) + (S @ v))
return r_itrf, v_itrf
##############################################################################
##############################################################################
def itrf2cep(t, r, v = np.zeros(3)):
'''Transformation of the international terrestrial reference frame (ITRF)
to the conventional ephemeris pole frame (CEP) by discounting for the
diurnal rotation of the Earth, and discounting the motion of the poles,
from the ITRF to CEP. This function will return two vectors (position
and velocity).
Parameters
----------
t : datetime.datetime
Current time of observation in GPST.
r : numpy.ndarray
Position vector (1x3) in ITRF frame.
v : numpy.ndarray, optional
Velocity vector (1x3) in ITRF frame.
Returns
-------
r_itrf : numpy.ndarray
Position vector in CEP frame.
v_itrf : numpy.ndarray
Velocity vector in CEP frame.
'''
N = rotate.nutation(t)
S = rotate.diurnal( t, N )
M = rotate.polewander( t )
Si = S.transpose()
Mi = M.transpose()
if sum( abs( v ) ) == 0.0:
r_cep = Si @ Mi @ r
return r_cep, np.zeros(3)
else:
Sd = rotate.diurnal_dot( t, S )
r_cep = Si @ Mi @ r
v_cep = Si @ (( Mi @ v ) - ( Sd @ r_cep ))
return r_cep, v_cep
##############################################################################
##############################################################################
def cep2icrf(t, r, v = np.zeros(3)):
'''Transformation of the conventional ephemeris pole frame (the True-Of-
Epoch frame) to the international celestial reference frame (ICRF), by
discounting precession and nutation. This transformation is performed
via a the inverse of the precession and nutation matrices P and N. This
function will return two vectors (position and velocity).
Parameters
----------
t : datetime.datetime
Current time of observation in GPST.
r : numpy.ndarray
Position vector (1x3) in CEP frame.
v : numpy.ndarray, optional
Velocity vector (1x3) in CEP frame.
Returns
-------
r_icrf : numpy.ndarray
Position vector in ICRF frame.
v_icrf : numpy.ndarray
Velocity vector in ICRF frame.
'''
Pi = rotate.precession(t).transpose()
Ni = rotate.nutation(t).transpose()
if sum( abs( v ) ) == 0.0:
r_icrf = Pi @ Ni @ r
return r_icrf, np.zeros(3)
else:
r_icrf = Pi @ Ni @ r
v_icrf = Pi @ Ni @ v
return r_icrf, v_icrf
##############################################################################
##############################################################################
def itrf2icrf(t, r, v = np.zeros(3)):
'''Transformation of the international terrestrial reference frame (ITRF)
to the international celestial reference frame (ICRF), by calling the two
functions in sequence: `itrf2cep()` and `cep2icrf()`.
Parameters
----------
t : datetime.datetime
Current time of observation in GPST.
r : numpy.ndarray
Position vector (1x3) in ITRF frame.
v : numpy.ndarray, optional
Velocity vector (1x3) in ITRF frame.
Returns
-------
r_icrf : numpy.ndarray
Position vector in ICRF frame.
v_icrf : numpy.ndarray
Velocity vector in ICRF frame.
'''
r_cep, v_cep = itrf2cep( t, r, v )
r_icrf, v_icrf = cep2icrf( t, r_cep, v_cep )
return r_icrf, v_icrf
##############################################################################
##############################################################################
def icrf2itrf(t, r, v = np.zeros(3)):
'''Transformation of the international celestial reference frame (ICRF) to
the international terrestrial reference frame (ITRF), by calling the two
functions in sequence: `icrf2cep` and `cep2itrf()`.
Parameters
----------
t : datetime.datetime
Current time of observation in GPST.
r : numpy.ndarray
Position vector (1x3) in ICRF frame.
v : numpy.ndarray, optional
Velocity vector (1x3) in ICRF frame.
Returns
-------
r_icrf : numpy.ndarray
Position vector in ITRF frame.
v_icrf : numpy.ndarray
Velocity vector in ITRF frame.
'''
r_cep, v_cep = icrf2cep( t, r, v )
r_itrf, v_itrf = cep2itrf( t, r_cep, v_cep )
return r_itrf, v_itrf
##############################################################################
##############################################################################
def icrf2hill(baseline, rc, vc):
'''Takes in a relative position vector, or baseline vector, as well as
the chief position and velocity vectors. All inputs in ICRF. Transforms
the relative position vector, or baseline vector, to the satellite local
vertical local horizontal Euler-Hill Frame of the chief spacecraft.
Parameters
----------
baseline : numpy.ndarray
Relative position vector (1x3) in ICRF frame.
rc : numpy.ndarray
Position vector (1x3) of Chief in ICRF frame.
vc : numpy.ndarray
Velocity vector (1x3) of Chief in ICRF frame.
Returns
-------
hill_baseline : numpy.ndarray
Relative position vector (1x3) of Deputy in Euler-Hill frame.
'''
# Construct the Euler-Hill frame basis vectors
if sum( abs( vc ) ) != 0.0:
h = np.cross(rc, vc) # Angular momentum
r_hat = rc / np.linalg.norm(rc) # Local X-axis
h_hat = h / np.linalg.norm(h) # Local Z-axis
y_hat = np.cross(h_hat,r_hat) # Local Y-axis
# Compute the Hill DCM and transform the chief and deputy states.
hill_dcm = np.array([ r_hat, h_hat, y_hat ])
return hill_dcm @ baseline
# Else, a Hill DCM cannot be created if velocity vector is invalid.
else:
return np.zeros(3)
##############################################################################
##############################################################################
# if __name__ == '__main__' :
# import csv # Import CSV library
# input_file = 'OUTPUT.csv' # File name for input
# output_file = 'OUT2.csv' # File name for output
# ti = datetime.datetime(2020,1,15,4,0,0) # Set an initial epoch
# ts = datetime.timedelta(seconds=60) # Set a time step value (s)
# output = open(output_file, 'w') # Open up output file
# with open(input_file) as csvf: # Begin looping through CSV
# csvr = csv.reader(csvf, delimiter=',')
# for row in csvr:
# if len(row) > 0:
# px = float(row[0]) # X-Axis Position in J2000 frame
# py = float(row[1]) # Y-Axis Position in J2000 frame
# pz = float(row[2]) # Z-Axis Position in J2000 frame
# vx = float(row[3]) # X-Axis Velocity in J2000 frame
# vy = float(row[4]) # Y-Axis Velocity in J2000 frame
# vz = float(row[5]) # Z-Axis Velocity in J2000 frame
# pos = np.array([px,py,pz]) # Position Vector J2000
# vel = np.array([vx,vy,vz]) # Velocity Vector J2000
# pos_CEP, vel_CEP = itrf2cep(ti, pos, vel )
# pos_ITRF, vel_ITRF = cep2icrf(ti, pos_CEP, vel_CEP)
# line = str(pos_ITRF[0]) + ', ' # Write X-Axis Position ITRF
# line += str(pos_ITRF[1]) + ', ' # Write Y-Axis Position ITRF
# line += str(pos_ITRF[2]) + ', ' # Write Z-Axis Position ITRF
# line += str(vel_ITRF[0]) + ', ' # Write X-Axis Velocity ITRF
# line += str(vel_ITRF[1]) + ', ' # Write Y-Axis Velocity ITRF
# line += str(vel_ITRF[2]) + '\n' # Write Z-Axis Velocity ITRF
# output.write(line)
# ti = ti + ts # Update the time step
# output.close() # Close the output file
| [
"source.rotate.diurnal_dot",
"numpy.cross",
"source.rotate.nutation",
"numpy.array",
"numpy.zeros",
"source.rotate.precession",
"source.rotate.diurnal",
"numpy.linalg.norm",
"source.rotate.polewander"
] | [((2580, 2591), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2588, 2591), True, 'import numpy as np\n'), ((3388, 3408), 'source.rotate.precession', 'rotate.precession', (['t'], {}), '(t)\n', (3405, 3408), False, 'from source import rotate\n'), ((3443, 3461), 'source.rotate.nutation', 'rotate.nutation', (['t'], {}), '(t)\n', (3458, 3461), False, 'from source import rotate\n'), ((3855, 3866), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3863, 3866), True, 'import numpy as np\n'), ((4631, 4649), 'source.rotate.nutation', 'rotate.nutation', (['t'], {}), '(t)\n', (4646, 4649), False, 'from source import rotate\n'), ((4658, 4678), 'source.rotate.diurnal', 'rotate.diurnal', (['t', 'N'], {}), '(t, N)\n', (4672, 4678), False, 'from source import rotate\n'), ((4713, 4733), 'source.rotate.polewander', 'rotate.polewander', (['t'], {}), '(t)\n', (4730, 4733), False, 'from source import rotate\n'), ((5197, 5208), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5205, 5208), True, 'import numpy as np\n'), ((5963, 5981), 'source.rotate.nutation', 'rotate.nutation', (['t'], {}), '(t)\n', (5978, 5981), False, 'from source import rotate\n'), ((5991, 6011), 'source.rotate.diurnal', 'rotate.diurnal', (['t', 'N'], {}), '(t, N)\n', (6005, 6011), False, 'from source import rotate\n'), ((6023, 6043), 'source.rotate.polewander', 'rotate.polewander', (['t'], {}), '(t)\n', (6040, 6043), False, 'from source import rotate\n'), ((6530, 6541), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6538, 6541), True, 'import numpy as np\n'), ((7805, 7816), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (7813, 7816), True, 'import numpy as np\n'), ((8762, 8773), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (8770, 8773), True, 'import numpy as np\n'), ((4884, 4908), 'source.rotate.diurnal_dot', 'rotate.diurnal_dot', (['t', 'S'], {}), '(t, S)\n', (4902, 4908), False, 'from source import rotate\n'), ((6213, 6237), 'source.rotate.diurnal_dot', 'rotate.diurnal_dot', (['t', 'S'], {}), '(t, S)\n', (6231, 6237), False, 'from source import rotate\n'), ((10553, 10569), 'numpy.cross', 'np.cross', (['rc', 'vc'], {}), '(rc, vc)\n', (10561, 10569), True, 'import numpy as np\n'), ((10726, 10748), 'numpy.cross', 'np.cross', (['h_hat', 'r_hat'], {}), '(h_hat, r_hat)\n', (10734, 10748), True, 'import numpy as np\n'), ((10867, 10898), 'numpy.array', 'np.array', (['[r_hat, h_hat, y_hat]'], {}), '([r_hat, h_hat, y_hat])\n', (10875, 10898), True, 'import numpy as np\n'), ((11042, 11053), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (11050, 11053), True, 'import numpy as np\n'), ((3570, 3581), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3578, 3581), True, 'import numpy as np\n'), ((4849, 4860), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4857, 4860), True, 'import numpy as np\n'), ((6178, 6189), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6186, 6189), True, 'import numpy as np\n'), ((7346, 7366), 'source.rotate.precession', 'rotate.precession', (['t'], {}), '(t)\n', (7363, 7366), False, 'from source import rotate\n'), ((7388, 7406), 'source.rotate.nutation', 'rotate.nutation', (['t'], {}), '(t)\n', (7403, 7406), False, 'from source import rotate\n'), ((7507, 7518), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (7515, 7518), True, 'import numpy as np\n'), ((10621, 10639), 'numpy.linalg.norm', 'np.linalg.norm', (['rc'], {}), '(rc)\n', (10635, 10639), True, 'import numpy as np\n'), ((10675, 10692), 'numpy.linalg.norm', 'np.linalg.norm', (['h'], {}), '(h)\n', (10689, 10692), True, 'import numpy as np\n')] |
# Original code from: https://github.com/isayev/ReLeaSE
import numpy as np
import torch
from irelease.utils import read_smi_file, tokenize, read_object_property_file, seq2tensor, pad_sequences
class GeneratorData(object):
def __init__(self, training_data_path, tokens=None, start_token='<',
end_token='>', pad_symbol=' ', max_len=120, use_cuda=None, seed=None,
**kwargs):
"""
Constructor for the GeneratorData object.
Parameters
----------
training_data_path: str
path to file with training dataset. Training dataset must contain
a column with training strings. The file also may contain other
columns.
tokens: list (default None)
list of characters specifying the language alphabet. If left
unspecified, tokens will be extracted from data automatically.
start_token: str (default '<')
special character that will be added to the beginning of every
sequence and encode the sequence start.
end_token: str (default '>')
special character that will be added to the end of every
sequence and encode the sequence end.
max_len: int (default 120)
maximum allowed length of the sequences. All sequences longer than
max_len will be excluded from the training data.
use_cuda: bool (default None)
parameter specifying if GPU is used for computations. If left
unspecified, GPU will be used if available
kwargs: additional positional arguments
These include cols_to_read (list, default [0]) specifying which
column in the file with training data contains training sequences
and delimiter (str, default ',') that will be used to separate
columns if there are multiple of them in the file.
"""
super(GeneratorData, self).__init__()
if seed:
np.random.seed(seed)
if 'cols_to_read' not in kwargs:
kwargs['cols_to_read'] = []
if 'batch_size' in kwargs:
self.batch_size = kwargs['batch_size']
if 'tokens_reload' in kwargs:
self.tokens_reload = kwargs['tokens_reload']
data = read_object_property_file(training_data_path, **kwargs)
self.start_token = start_token
self.end_token = end_token
self.pad_symbol = pad_symbol
self.file = []
for i in range(len(data)):
if len(data[i].strip()) <= max_len:
self.file.append(self.start_token + data[i].strip() + self.end_token)
self.file_len = len(self.file)
self.all_characters, self.char2idx, \
self.n_characters = tokenize(self.file, tokens)
self.pad_symbol_idx = self.all_characters.index(self.pad_symbol)
self.use_cuda = use_cuda
if self.use_cuda is None:
self.use_cuda = torch.cuda.is_available()
def load_dictionary(self, tokens, char2idx):
self.all_characters = tokens
self.char2idx = char2idx
self.n_characters = len(tokens)
def set_batch_size(self, batch_size):
self.batch_size = batch_size
def random_chunk(self, batch_size):
"""
Samples random SMILES string from generator training data set.
Returns:
random_smiles (str).
"""
index = np.random.randint(0, self.file_len - 1, batch_size)
return [self.file[i][:-1] for i in index], [self.file[i][1:] for i in index]
def random_training_set_smiles(self, batch_size=None):
if batch_size is None:
batch_size = self.batch_size
assert (batch_size > 0)
sels = np.random.randint(0, self.file_len - 1, batch_size)
return [self.file[i][1:-1] for i in sels]
def random_training_set(self, batch_size=None, return_seq_len=False):
if batch_size is None:
batch_size = self.batch_size
assert (batch_size > 0)
inp, target = self.random_chunk(batch_size)
inp_padded, inp_seq_len = pad_sequences(inp)
inp_tensor, self.all_characters = seq2tensor(inp_padded,
tokens=self.all_characters,
flip=False)
target_padded, target_seq_len = pad_sequences(target)
target_tensor, self.all_characters = seq2tensor(target_padded,
tokens=self.all_characters,
flip=False)
self.n_characters = len(self.all_characters)
inp_tensor = torch.tensor(inp_tensor).long()
target_tensor = torch.tensor(target_tensor).long()
if self.use_cuda:
inp_tensor = inp_tensor.cuda()
target_tensor = target_tensor.cuda()
if return_seq_len:
return inp_tensor, target_tensor, (inp_seq_len, target_seq_len)
return inp_tensor, target_tensor
def read_sdf_file(self, path, fields_to_read):
raise NotImplementedError
def update_data(self, path):
self.file, success = read_smi_file(path, unique=True)
self.file_len = len(self.file)
assert success
| [
"irelease.utils.pad_sequences",
"irelease.utils.seq2tensor",
"irelease.utils.read_smi_file",
"irelease.utils.tokenize",
"numpy.random.randint",
"torch.cuda.is_available",
"torch.tensor",
"numpy.random.seed",
"irelease.utils.read_object_property_file"
] | [((2305, 2360), 'irelease.utils.read_object_property_file', 'read_object_property_file', (['training_data_path'], {}), '(training_data_path, **kwargs)\n', (2330, 2360), False, 'from irelease.utils import read_smi_file, tokenize, read_object_property_file, seq2tensor, pad_sequences\n'), ((2777, 2804), 'irelease.utils.tokenize', 'tokenize', (['self.file', 'tokens'], {}), '(self.file, tokens)\n', (2785, 2804), False, 'from irelease.utils import read_smi_file, tokenize, read_object_property_file, seq2tensor, pad_sequences\n'), ((3441, 3492), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.file_len - 1)', 'batch_size'], {}), '(0, self.file_len - 1, batch_size)\n', (3458, 3492), True, 'import numpy as np\n'), ((3757, 3808), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.file_len - 1)', 'batch_size'], {}), '(0, self.file_len - 1, batch_size)\n', (3774, 3808), True, 'import numpy as np\n'), ((4124, 4142), 'irelease.utils.pad_sequences', 'pad_sequences', (['inp'], {}), '(inp)\n', (4137, 4142), False, 'from irelease.utils import read_smi_file, tokenize, read_object_property_file, seq2tensor, pad_sequences\n'), ((4185, 4247), 'irelease.utils.seq2tensor', 'seq2tensor', (['inp_padded'], {'tokens': 'self.all_characters', 'flip': '(False)'}), '(inp_padded, tokens=self.all_characters, flip=False)\n', (4195, 4247), False, 'from irelease.utils import read_smi_file, tokenize, read_object_property_file, seq2tensor, pad_sequences\n'), ((4394, 4415), 'irelease.utils.pad_sequences', 'pad_sequences', (['target'], {}), '(target)\n', (4407, 4415), False, 'from irelease.utils import read_smi_file, tokenize, read_object_property_file, seq2tensor, pad_sequences\n'), ((4461, 4526), 'irelease.utils.seq2tensor', 'seq2tensor', (['target_padded'], {'tokens': 'self.all_characters', 'flip': '(False)'}), '(target_padded, tokens=self.all_characters, flip=False)\n', (4471, 4526), False, 'from irelease.utils import read_smi_file, tokenize, read_object_property_file, seq2tensor, pad_sequences\n'), ((5215, 5247), 'irelease.utils.read_smi_file', 'read_smi_file', (['path'], {'unique': '(True)'}), '(path, unique=True)\n', (5228, 5247), False, 'from irelease.utils import read_smi_file, tokenize, read_object_property_file, seq2tensor, pad_sequences\n'), ((2005, 2025), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2019, 2025), True, 'import numpy as np\n'), ((2973, 2998), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2996, 2998), False, 'import torch\n'), ((4713, 4737), 'torch.tensor', 'torch.tensor', (['inp_tensor'], {}), '(inp_tensor)\n', (4725, 4737), False, 'import torch\n'), ((4769, 4796), 'torch.tensor', 'torch.tensor', (['target_tensor'], {}), '(target_tensor)\n', (4781, 4796), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-1, 1, 50)
y = np.cos(np.linspace(0, 0.5 * np.pi))
plt.title("curve")
plt.grid()
plt.xlim(-1, 1)
plt.ylim(-1.05, +1.05)
plt.plot(x, y)
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.show"
] | [((56, 78), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(50)'], {}), '(-1, 1, 50)\n', (67, 78), True, 'import numpy as np\n'), ((120, 138), 'matplotlib.pyplot.title', 'plt.title', (['"""curve"""'], {}), "('curve')\n", (129, 138), True, 'import matplotlib.pyplot as plt\n'), ((139, 149), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (147, 149), True, 'import matplotlib.pyplot as plt\n'), ((150, 165), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (158, 165), True, 'import matplotlib.pyplot as plt\n'), ((166, 188), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.05)', '(+1.05)'], {}), '(-1.05, +1.05)\n', (174, 188), True, 'import matplotlib.pyplot as plt\n'), ((189, 203), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (197, 203), True, 'import matplotlib.pyplot as plt\n'), ((204, 214), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (212, 214), True, 'import matplotlib.pyplot as plt\n'), ((90, 117), 'numpy.linspace', 'np.linspace', (['(0)', '(0.5 * np.pi)'], {}), '(0, 0.5 * np.pi)\n', (101, 117), True, 'import numpy as np\n')] |
"""Convenience functions for writing netcdf files."""
import fnmatch
import logging
import os
import iris
import numpy as np
from .iris_helpers import unify_1d_cubes
logger = logging.getLogger(__name__)
VAR_KEYS = [
'long_name',
'units',
]
NECESSARY_KEYS = VAR_KEYS + [
'dataset',
'filename',
'project',
'short_name',
]
def _has_necessary_attributes(metadata,
only_var_attrs=False,
log_level='debug'):
"""Check if dataset metadata has necessary attributes."""
keys_to_check = (VAR_KEYS +
['short_name'] if only_var_attrs else NECESSARY_KEYS)
for dataset in metadata:
for key in keys_to_check:
if key not in dataset:
getattr(logger, log_level)("Dataset '%s' does not have "
"necessary attribute '%s'", dataset,
key)
return False
return True
def get_all_ancestor_files(cfg, pattern=None):
"""Return a list of all files in the ancestor directories.
Parameters
----------
cfg : dict
Diagnostic script configuration.
pattern : str, optional
Only return files which match a certain pattern.
Returns
-------
list of str
Full paths to the ancestor files.
"""
ancestor_files = []
input_dirs = [
d for d in cfg['input_files'] if not d.endswith('metadata.yml')
]
for input_dir in input_dirs:
for (root, _, files) in os.walk(input_dir):
if pattern is not None:
files = fnmatch.filter(files, pattern)
files = [os.path.join(root, f) for f in files]
ancestor_files.extend(files)
return ancestor_files
def get_ancestor_file(cfg, pattern):
"""Return a desired file in the ancestor directories.
Parameters
----------
cfg : dict
Diagnostic script configuration.
pattern : str
Pattern which specifies the name of the file.
Returns
-------
str or None
Full path to the file or `None` if file not found.
"""
files = get_all_ancestor_files(cfg, pattern=pattern)
if not files:
logger.warning(
"No file with requested name %s found in ancestor "
"directories", pattern)
return None
if len(files) != 1:
logger.warning(
"Multiple files with requested pattern %s found (%s), returning "
"first appearance", pattern, files)
return files[0]
def netcdf_to_metadata(cfg, pattern=None, root=None):
"""Convert attributes of netcdf files to list of metadata.
Parameters
----------
cfg : dict
Diagnostic script configuration.
pattern : str, optional
Only consider files which match a certain pattern.
root : str, optional (default: ancestor directories)
Root directory for the search.
Returns
-------
list of dict
List of dataset metadata.
"""
if root is None:
all_files = get_all_ancestor_files(cfg, pattern)
else:
all_files = []
for (base, _, files) in os.walk(root):
if pattern is not None:
files = fnmatch.filter(files, pattern)
files = [os.path.join(base, f) for f in files]
all_files.extend(files)
all_files = fnmatch.filter(all_files, '*.nc')
# Iterate over netcdf files
metadata = []
for path in all_files:
cube = iris.load_cube(path)
dataset_info = dict(cube.attributes)
for var_key in VAR_KEYS:
dataset_info[var_key] = getattr(cube, var_key)
dataset_info['short_name'] = cube.var_name
dataset_info['standard_name'] = cube.standard_name
dataset_info['filename'] = path
# Check if necessary keys are available
if _has_necessary_attributes([dataset_info], log_level='warning'):
metadata.append(dataset_info)
else:
logger.warning("Skipping '%s'", path)
return metadata
def metadata_to_netcdf(cube, metadata):
"""Convert single metadata dictionary to netcdf file.
Parameters
----------
cube : iris.cube.Cube
Cube to be written.
metadata : dict
Metadata for the cube.
"""
metadata = dict(metadata)
if not _has_necessary_attributes([metadata], log_level='warning'):
logger.warning("Cannot save cube\n%s", cube)
return
for var_key in VAR_KEYS:
setattr(cube, var_key, metadata.pop(var_key))
cube.var_name = metadata.pop('short_name')
cube.standard_name = None
if 'standard_name' in metadata:
standard_name = metadata.pop('standard_name')
try:
cube.standard_name = standard_name
except ValueError:
logger.debug("Got invalid standard_name '%s'", standard_name)
for (attr, val) in metadata.items():
if isinstance(val, bool):
metadata[attr] = str(val)
cube.attributes.update(metadata)
iris_save(cube, metadata['filename'])
def save_1d_data(cubes, path, coord_name, var_attrs, attributes=None):
"""Save 1D data for multiple datasets.
Create 2D cube with the dimensionsal coordinate `coord_name` and the
auxiliary coordinate `dataset` and save 1D data for every dataset given.
The cube is filled with missing values where no data exists for a dataset
at a certain point.
Note
----
Does not check metadata of the `cubes`, i.e. different names or units
will be ignored.
Parameters
----------
cubes : dict of iris.cube.Cube
1D `iris.cube.Cube`s (values) and corresponding datasets (keys).
path : str
Path to the new file.
coord_name : str
Name of the coordinate.
var_attrs : dict
Attributes for the variable (`short_name`, `long_name`, or `units`).
attributes : dict, optional
Additional attributes for the cube.
"""
var_attrs = dict(var_attrs)
if not cubes:
logger.warning("Cannot save 1D data, no cubes given")
return
if not _has_necessary_attributes(
[var_attrs], only_var_attrs=True, log_level='warning'):
logger.warning("Cannot write file '%s'", path)
return
datasets = list(cubes.keys())
cube_list = iris.cube.CubeList(list(cubes.values()))
cube_list = unify_1d_cubes(cube_list, coord_name)
data = [c.data for c in cube_list]
dataset_coord = iris.coords.AuxCoord(datasets, long_name='dataset')
coord = cube_list[0].coord(coord_name)
if attributes is None:
attributes = {}
var_attrs['var_name'] = var_attrs.pop('short_name')
# Create new cube
cube = iris.cube.Cube(np.ma.array(data),
aux_coords_and_dims=[(dataset_coord, 0), (coord, 1)],
attributes=attributes,
**var_attrs)
iris_save(cube, path)
def iris_save(source, path):
"""Save :mod:`iris` objects with correct attributes.
Parameters
----------
source : iris.cube.Cube or iterable of iris.cube.Cube
Cube(s) to be saved.
path : str
Path to the new file.
"""
if isinstance(source, iris.cube.Cube):
source.attributes['filename'] = path
else:
for cube in source:
cube.attributes['filename'] = path
iris.save(source, path)
logger.info("Wrote %s", path)
def save_scalar_data(data, path, var_attrs, aux_coord=None, attributes=None):
"""Save scalar data for multiple datasets.
Create 1D cube with the auxiliary dimension `dataset` and save scalar data
for every dataset given.
Note
----
Missing values can be added by `np.nan`.
Parameters
----------
data : dict
Scalar data (values) and corresponding datasets (keys).
path : str
Path to the new file.
var_attrs : dict
Attributes for the variable (`short_name`, `long_name` and `units`).
aux_coord : iris.coords.AuxCoord, optional
Optional auxiliary coordinate.
attributes : dict, optional
Additional attributes for the cube.
"""
var_attrs = dict(var_attrs)
if not data:
logger.warning("Cannot save scalar data, no data given")
return
if not _has_necessary_attributes(
[var_attrs], only_var_attrs=True, log_level='warning'):
logger.warning("Cannot write file '%s'", path)
return
dataset_coord = iris.coords.AuxCoord(list(data), long_name='dataset')
if attributes is None:
attributes = {}
var_attrs['var_name'] = var_attrs.pop('short_name')
coords = [(dataset_coord, 0)]
if aux_coord is not None:
coords.append((aux_coord, 0))
cube = iris.cube.Cube(np.ma.masked_invalid(list(data.values())),
aux_coords_and_dims=coords,
attributes=attributes,
**var_attrs)
iris_save(cube, path)
| [
"logging.getLogger",
"numpy.ma.array",
"iris.save",
"iris.coords.AuxCoord",
"os.path.join",
"iris.load_cube",
"fnmatch.filter",
"os.walk"
] | [((178, 205), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (195, 205), False, 'import logging\n'), ((3416, 3449), 'fnmatch.filter', 'fnmatch.filter', (['all_files', '"""*.nc"""'], {}), "(all_files, '*.nc')\n", (3430, 3449), False, 'import fnmatch\n'), ((6529, 6580), 'iris.coords.AuxCoord', 'iris.coords.AuxCoord', (['datasets'], {'long_name': '"""dataset"""'}), "(datasets, long_name='dataset')\n", (6549, 6580), False, 'import iris\n'), ((7430, 7453), 'iris.save', 'iris.save', (['source', 'path'], {}), '(source, path)\n', (7439, 7453), False, 'import iris\n'), ((1567, 1585), 'os.walk', 'os.walk', (['input_dir'], {}), '(input_dir)\n', (1574, 1585), False, 'import os\n'), ((3199, 3212), 'os.walk', 'os.walk', (['root'], {}), '(root)\n', (3206, 3212), False, 'import os\n'), ((3543, 3563), 'iris.load_cube', 'iris.load_cube', (['path'], {}), '(path)\n', (3557, 3563), False, 'import iris\n'), ((6780, 6797), 'numpy.ma.array', 'np.ma.array', (['data'], {}), '(data)\n', (6791, 6797), True, 'import numpy as np\n'), ((1647, 1677), 'fnmatch.filter', 'fnmatch.filter', (['files', 'pattern'], {}), '(files, pattern)\n', (1661, 1677), False, 'import fnmatch\n'), ((1699, 1720), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (1711, 1720), False, 'import os\n'), ((3274, 3304), 'fnmatch.filter', 'fnmatch.filter', (['files', 'pattern'], {}), '(files, pattern)\n', (3288, 3304), False, 'import fnmatch\n'), ((3326, 3347), 'os.path.join', 'os.path.join', (['base', 'f'], {}), '(base, f)\n', (3338, 3347), False, 'import os\n')] |
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np
from keras.models import load_model
model = load_model('chatbot_model\chatbot_model.h5')
import json
import random
import cgi
import cgitb
from datetime import datetime
intents = json.loads(open('chatbot_model\intents.json').read())
words = pickle.load(open('chatbot_model\words.pkl','rb'))
classes = pickle.load(open('chatbot_model\classes.pkl','rb'))
cgitb.enable()
def clean_up_sentence(sentence):
# tokenize the pattern - splitting words into array
sentence_words = nltk.word_tokenize(sentence)
# stemming every word - reducing to base form
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for words that exist in sentence
def bag_of_words(sentence, words, show_details=True):
# tokenizing patterns
sentence_words = clean_up_sentence(sentence)
# bag of words - vocabulary matrix
bag = [0]*len(words)
for s in sentence_words:
for i,word in enumerate(words):
if word == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
print ("found in bag: %s" % word)
return(np.array(bag))
def predict_class(sentence):
# filter below threshold predictions
p = bag_of_words(sentence, words,show_details=False)
res = model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.25
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# sorting strength probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
return return_list
def getResponse(ints, intents_json):
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result
def print_header():
print ("""Content-type: text/html\n
<!DOCTYPE html>
<html>
<body>""")
def print_close():
print ("""</body>
</html>""")
def display_data(param1):
ints = predict_class(param1)
res = getResponse(ints, intents)
now = datetime.now()
current_time = now.strftime("%H:%M")
print("Current Time =", current_time)
print_header()
print('<p>' + res + '</p>')
print('<span>' + current_time + '</span>')
print_close()
def display_error():
print_header()
print ("<p>Ooops! Sorry, error here!</p>")
print_close()
def main():
form = cgi.FieldStorage()
if ("human_message" in form):
display_data(form["human_message"].value)
else:
display_error()
main() | [
"cgi.FieldStorage",
"keras.models.load_model",
"random.choice",
"nltk.word_tokenize",
"nltk.stem.WordNetLemmatizer",
"numpy.array",
"datetime.datetime.now",
"cgitb.enable"
] | [((65, 84), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (82, 84), False, 'from nltk.stem import WordNetLemmatizer\n'), ((163, 208), 'keras.models.load_model', 'load_model', (['"""chatbot_model\\\\chatbot_model.h5"""'], {}), "('chatbot_model\\\\chatbot_model.h5')\n", (173, 208), False, 'from keras.models import load_model\n'), ((473, 487), 'cgitb.enable', 'cgitb.enable', ([], {}), '()\n', (485, 487), False, 'import cgitb\n'), ((599, 627), 'nltk.word_tokenize', 'nltk.word_tokenize', (['sentence'], {}), '(sentence)\n', (617, 627), False, 'import nltk\n'), ((1349, 1362), 'numpy.array', 'np.array', (['bag'], {}), '(bag)\n', (1357, 1362), True, 'import numpy as np\n'), ((2392, 2406), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2404, 2406), False, 'from datetime import datetime\n'), ((2736, 2754), 'cgi.FieldStorage', 'cgi.FieldStorage', ([], {}), '()\n', (2752, 2754), False, 'import cgi\n'), ((1517, 1530), 'numpy.array', 'np.array', (['[p]'], {}), '([p])\n', (1525, 1530), True, 'import numpy as np\n'), ((2054, 2083), 'random.choice', 'random.choice', (["i['responses']"], {}), "(i['responses'])\n", (2067, 2083), False, 'import random\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""verify by svm"""
import time
import argparse
import numpy as np
from mindspore import context
from sklearn import svm
from sklearn.preprocessing import StandardScaler
def verify_cifar10():
"""
verify on cifar10 dataset
"""
label_path_train = "./preprocess_Result/cifar10_label_ids_train.npy"
label_path_test = "./preprocess_Result/cifar10_label_ids_test.npy"
label_set_train = np.load(label_path_train, allow_pickle=True)
label_set_test = np.load(label_path_test, allow_pickle=True)
result_set_train = []
for i in range(0, 500):
result_file_train = './result_Files_train/dcgan_data_bs100_' + str(i) + "_train_0.bin"
result = np.fromfile(result_file_train, dtype=np.float32).reshape(-1, 14336)
result_set_train.append(result)
result_set_train = np.array(result_set_train)
result_set_train = result_set_train.reshape(-1, 14336)
label_set_train = label_set_train.reshape(-1, 1)
label_set_train = label_set_train.flatten()
result_set_test = []
for i in range(0, 100):
result_file_test = './result_Files_test/dcgan_data_bs100_' + str(i) + "_test_0.bin"
result = np.fromfile(result_file_test, dtype=np.float32).reshape(-1, 14336)
result_set_test.append(result)
result_set_test = np.array(result_set_test)
result_set_test = result_set_test.reshape(-1, 14336)
label_set_test = label_set_test.reshape(-1, 1)
label_set_test = label_set_test.flatten()
print("result_set_train.shape: ", result_set_train.shape)
print("label_set_train.shape: ", label_set_train.shape)
print("result_set_test.shape: ", result_set_test.shape)
print("label_set_test.shape: ", label_set_test.shape)
print("============================standradScaler")
standardScaler = StandardScaler()
standardScaler.fit(result_set_train)
result_set_train_standard = standardScaler.transform(result_set_train)
standardScaler.fit(result_set_test)
result_set_test_standard = standardScaler.transform(result_set_test)
print("============================training")
clf = svm.SVC(max_iter=-1)
start = time.time()
print("result_set_train.shape: ", result_set_train_standard.shape)
print("label_set_train.shape: ", label_set_train.shape)
clf.fit(result_set_train_standard, label_set_train)
t = time.time() - start
print("train time:", t)
print("============================testing")
# Test on Training data
print("result_set_test.shape: ", result_set_test_standard.shape)
print("label_set_test.shape: ", label_set_test.shape)
test_result = clf.predict(result_set_test_standard)
accuracy = sum(test_result == label_set_test) / label_set_test.shape[0]
print('Test accuracy: ', accuracy)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='image production training')
parser.add_argument('--device_target', type=str, default='Ascend', choices=('Ascend', 'GPU'),
help='device where the code will be implemented (default: Ascend)')
parser.add_argument('--device_id', type=int, default=0, help='device id of GPU or Ascend. (Default: 0)')
args = parser.parse_args()
device_target = args.device_target
device_id = args.device_id
context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False, device_id=device_id)
print("============================verify_cifar10")
verify_cifar10()
| [
"numpy.fromfile",
"argparse.ArgumentParser",
"mindspore.context.set_context",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.load",
"time.time",
"sklearn.svm.SVC"
] | [((1079, 1123), 'numpy.load', 'np.load', (['label_path_train'], {'allow_pickle': '(True)'}), '(label_path_train, allow_pickle=True)\n', (1086, 1123), True, 'import numpy as np\n'), ((1145, 1188), 'numpy.load', 'np.load', (['label_path_test'], {'allow_pickle': '(True)'}), '(label_path_test, allow_pickle=True)\n', (1152, 1188), True, 'import numpy as np\n'), ((1488, 1514), 'numpy.array', 'np.array', (['result_set_train'], {}), '(result_set_train)\n', (1496, 1514), True, 'import numpy as np\n'), ((1966, 1991), 'numpy.array', 'np.array', (['result_set_test'], {}), '(result_set_test)\n', (1974, 1991), True, 'import numpy as np\n'), ((2466, 2482), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2480, 2482), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2773, 2793), 'sklearn.svm.SVC', 'svm.SVC', ([], {'max_iter': '(-1)'}), '(max_iter=-1)\n', (2780, 2793), False, 'from sklearn import svm\n'), ((2806, 2817), 'time.time', 'time.time', ([], {}), '()\n', (2815, 2817), False, 'import time\n'), ((3479, 3543), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""image production training"""'}), "(description='image production training')\n", (3502, 3543), False, 'import argparse\n'), ((3948, 4065), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': 'device_target', 'save_graphs': '(False)', 'device_id': 'device_id'}), '(mode=context.GRAPH_MODE, device_target=device_target,\n save_graphs=False, device_id=device_id)\n', (3967, 4065), False, 'from mindspore import context\n'), ((3013, 3024), 'time.time', 'time.time', ([], {}), '()\n', (3022, 3024), False, 'import time\n'), ((1356, 1404), 'numpy.fromfile', 'np.fromfile', (['result_file_train'], {'dtype': 'np.float32'}), '(result_file_train, dtype=np.float32)\n', (1367, 1404), True, 'import numpy as np\n'), ((1838, 1885), 'numpy.fromfile', 'np.fromfile', (['result_file_test'], {'dtype': 'np.float32'}), '(result_file_test, dtype=np.float32)\n', (1849, 1885), True, 'import numpy as np\n')] |
import socket
import numpy as np
from select import select
import threading
import time
def clamp(x,min,max):
"Clamps the value x between `min` and `max` "
if x < min:
return min
elif x > max:
return max
else:
return x
def check_server(ip='192.168.127.12', port=23, timeout=3):
"""
# `check_server(ip, port, timeout)`
Check if the server at `ip` on port `port` is responding.
It waits for `timeout` seconds before returning `False`
## Arguments
* `ip`: IP address of the server
* `port`: Port number the connection should be attempted
* `timeout`: Time in seconds that the function should wait before giving up
## Return
* `True` if the server responded
* `False`otherwise
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
try:
s.connect((ip,port))
return True
except:
return False
class Packet(object):
"""
Handles EU with time DSA-3217 packets
"""
def __init__(self, packet_info):
self.model = packet_info['model']
self.packlen = packet_info['packlen']
self.press = packet_info['press']
self.temp = packet_info['temp']
self.t = packet_info['t']
self.time = packet_info['time']
self.tunit = packet_info['tunit']
self.acquiring = False
self.samplesread = 0
self.fps = 1
self.buf = None
self.allocbuffer(1)
self.dataread = False
self.time1 = None
self.time2 = None
self.timeN = None
self.stop_reading = False
def allocbuffer(self, fps):
"""
Allocates a buffer with `fps` elements
"""
self.buf = np.zeros((fps, self.packlen), np.uint8)
self.fps = fps
def scan(self, s, dt):
"""
Execute the scan command and read the frames into a buffer.
"""
fps = self.fps
self.dt = dt
s.settimeout(max(0.5, 3 * dt))
s.send(b"SCAN\n")
self.acquiring = True
self.time1 = time.monotonic()
s.recv_into(self.buf[0], self.packlen)
self.time2 = time.monotonic()
self.timeN = self.time2
self.dataread = True # There is data read
self.samplesread = 1
for i in range(1,fps):
if self.stop_reading:
print("STOP_READING")
break
s.recv_into(self.buf[i], self.packlen)
self.timeN = time.monotonic()
self.samplesread = i+1
self.acquiring = False
def get_pressure(self):
"""
Given a a buffer filled with frames, return the pressure
"""
if not self.dataread:
raise RuntimeError("No pressure to read from scanivalve!")
nsamp = self.samplesread
P = np.zeros((nsamp, 16), np.float64)
for i in range(nsamp):
np.copyto(P[i], self.buf[i,self.press].view(np.float32))
return P
def get_time(self, meas=True):
"""
Return the sampling time calculated from acquisition parameters.
"""
nsamp = self.samplesread
if meas:
if nsamp > 4:
return (self.timeN - self.time2) / (nsamp-1)
elif nsamp > 0:
return (self.timeN - self.time1) / nsamp
if not self.t:
return -1000.0
ttype = self.buf[0,self.tunit].view(np.int32)[0]
tmult = 1e6 if ttype==1 else 1e3
t1 = self.buf[0,self.time].view(np.int32)[0]
t2 = self.buf[self.samplesread-1,104:108].view(np.int32)[0]
ns = max(1, self.samplesread-1)
dt = (t2 - t1) / (tmult * ns)
return self.dt
def clear(self):
if self.acquiring is not False:
raise RuntimeError("Still acquiring data from scanivalve!")
self.acquiring = False
self.samplesread = 0
self.dataread = False
self.time1 = None
self.time2 = None
self.timeN = None
self.stop_reading = False
def isacquiring(self):
"Is the scanivalve acquiring data?"
return self.acquiring
def read(self, meas=True):
"Read the data from the buffers and return a pair with pressure and sampling rate"
if self.samplesread > 0:
p = self.get_pressure()
dt = self.get_time(meas)
return p, 1.0/dt
else:
raise RuntimeError("Nothing to read from scanivalve!")
def stop(self):
self.stop_reading = True
return None
class ScanivalveThread(threading.Thread):
"""
Handles asynchronous threaded data acquisition.
Objects of this class, handle the threading part of the acquisition
"""
def __init__(self, s, dt, pack):
threading.Thread.__init__(self)
self.pack = pack
self.s = s
self.dt = dt
def run(self):
self.pack.clear()
self.pack.scan(self.s, self.dt)
def isacquiring(self):
return self.pack.isacquiring()
valid_lists = ['FPS', 'AVG', 'PERIOD', 'XSCANTRIG']
class Scanivalve(object):
"""
# Data Aquisition from DSA3217
Handles data acquisition from Scanivalve DSA-3217
To initialize, the IP address of the scanivalve device should be used.
```python
import scanivalve
s = scanivalve.Scanivalve(ip)
```
"""
def __init__(self, ip='191.30.80.131', tinfo=False):
# Create the socket
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ip = ip
self.port = 23
self.acquiring = False
self.s.settimeout(5)
# Connect to socket
try:
self.s.connect((self.ip,self.port))
except:
self.s = None
raise RuntimeError("Unable to connect to scanivalve on IP:{}!".format(ip))
# Clear errors and configure the scanivalve
self.clear()
self.numchans = 16
self.FPS = 1
self.PERIOD=500
self.AVG=16
self.XSCANTRIG = 0
self.time = 2 if tinfo else 0
self.set_var("BIN", 1)
self.set_var("EU", 1)
self.set_var("UNITSCAN", "PA")
self.set_var("XSCANTRIG", 0)
self.set_var("QPKTS", 0)
self.set_var("TIME", self.time)
self.set_var("SIM", 0)
self.set_var("AVG", self.AVG)
self.set_var("PERIOD", self.PERIOD)
self.set_var("FPS", self.FPS)
self.dt = self.PERIOD*1e-6*16 * self.AVG
self.packet_info = self.packet_info()
self.model = self.packet_info['model']
self.pack = Packet(self.packet_info)
self.pack.allocbuffer(self.FPS)
self.thread = None
def packet_info(self, tinfo=True):
model = self.get_model().strip()
if model=='3017':
tinfo = False
packlen = 104
tt = None
tunit = None
elif model=='3217':
press = slice(8, 72)
temp = slice(72,104)
if tinfo:
packlen = 112
tt = slice(104, 108)
tunit = slice(108, 112)
else:
packlen = 104
tt = None
tunit = None
else:
raise RuntimeError("Model {} not recognized!".format(model))
return dict(model=model, packlen=packlen, press=press, temp=temp, t=tinfo, time=tt, tunit=tunit)
def is_pending(self, timeout=0.5):
"Check whether the scanivalve sent some information"
r, w, x = select([self.s], [], [], timeout)
if r == []:
return None
else:
return True
def list_any(self, command, timeout=0.2):
"""
Most query commands of the DSA-3X17 consists of
something like LIST S\n
This method simplys sends the LIST command to the scanivalve and returns
the data.
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
cmd = ("LIST %s\n" % (command)).encode()
self.s.send(cmd)
buffer = b''
while self.is_pending(timeout):
buffer = buffer + self.s.recv(1492)
return [b.split(' ') for b in buffer.decode().strip().split('\r\n')]
def list_any_map(self, command, timeout=0.5):
"""
Takes data obtained from `list_any` method and builds a dictionary with the
different parameters
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
buffer = self.list_any(command, timeout)
list = {}
for i in range(len(buffer)):
list[buffer[i][1]] = buffer[i][2]
return list
def hard_zero(self):
"Command to zero the DSA-3X17"
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
self.s.send(b"CALZ\n")
def set_var(self, var, val):
"""
Set the value of a parameter in the scanivalve by using the command
SET var val
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
cmd = ( "SET %s %s\n" % (var, val) ).encode()
self.s.send(cmd)
def get_model(self):
"""
Returns the model of the scanivalve
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
return self.list_any_map("I")["MODEL"]
def stop(self):
"""
Stop the scanivalve
"""
self.pack.stop_reading = True
self.pack.acquiring = False
self.s.send(b"STOP\n")
self.acquiring = False
self.thread = None
time.sleep(0.2)
buffer = b''
while self.is_pending(0.5):
buffer = buffer + self.s.recv(1492)
return None
def clear(self):
"""
Clear the error buffer in the scanivalve
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
self.s.send(b"CLEAR\n")
def error(self):
"""
Returns a list of errors detected by the scanivalve.
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
self.s.send(b"ERROR\n")
buffer = b''
while self.is_pending(1):
buffer = buffer + self.s.recv(1492)
return buffer
return buffer.strip().split('\r\n')
def config1(self, FPS=1, PERIOD=500, AVG=16, xtrig=False):
"""
Configures data aquisition
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
XSCANTRIG = int(xtrig)
if self.model=='3017':
self.PERIOD = clamp(PERIOD, 500, 62500) # 325 if raw packets: not implemented!
self.FPS = clamp(FPS, 1, 2**31) # Could be 0. Not implemented for now!
self.AVG = clamp(AVG, 1, 32767)
else:
self.PERIOD = clamp(PERIOD, 125, 65000)
self.FPS = clamp(FPS, 1, 2**30) # Could be 0. Not implemented for now!
self.AVG = clamp(AVG, 1, 240)
self.dt = self.PERIOD*1e-6*16 * self.AVG
self.set_var("FPS", self.FPS)
self.pack.allocbuffer(self.FPS)
self.set_var("AVG", self.AVG)
self.set_var("PERIOD", self.PERIOD)
self.set_var("XSCANTRIG", XSCANTRIG)
def config(self, **kw):
"""
Configures data aquisition
"""
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
isold = self.model=='3017'
for k in kw.keys():
K = k.upper()
if K == 'XSCANTRIG':
val = int(kw[k])
self.XSCANTRIG = val
elif K=='PERIOD':
x = int(kw[k])
val = clamp(x, 500, 62500) if isold else clamp(x, 160, 650000)
self.PERIOD = val
elif K=='AVG':
x = int(kw[k])
val = clamp(x, 1, 32767) if isold else clamp(x, 1, 240)
self.AVG = val
elif K=='FPS':
x = int(kw[k])
val = clamp(x, 1, 2**31) if isold else clamp(x, 1, 2**30)
self.FPS = val
self.pack.allocbuffer(self.FPS)
else:
RuntimeError("Illegal configuration. SET {} {} not implemented!".format(K, kw[k]))
self.set_var(K, val)
self.dt = self.PERIOD*1e-6*16 * self.AVG
def acquire(self):
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
self.pack.scan(self.s, self.dt)
p,freq = self.pack.read()
self.pack.clear()
return p, freq
def start(self):
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
self.thread = ScanivalveThread(self.s, self.dt, self.pack)
self.thread.start()
self.acquiring = True
def read(self):
if self.thread is not None:
self.thread.join()
if self.pack.samplesread > 0:
p, freq = self.pack.read()
self.pack.clear()
self.thread = None
self.acquiring = False
return p, freq
else:
#raise RuntimeError("Nothing to read")
print("ERRO EM READ")
def samplesread(self):
if self.thread is not None:
return self.pack.samplesread
else:
raise RuntimeError("Scanivalve not reading")
def samplerate(self, meas=True):
if self.thread is not None:
dt = self.pack.get_time(True)
if dt < -1.0:
dt = self.dt
return 1.0/dt
else:
raise RuntimeError("Scanivalve not reading")
def isacquiring(self):
if self.thread is not None:
return self.pack.isacquring()
else:
raise RuntimeError("Scanivalve not reading")
def close(self):
if self.acquiring:
self.stop()
self.thread = None
self.s.close()
self.s = None
def nchans(self):
return 16
def channames(self):
return ["{:02d}".format(i+1) for i in range(self.nchans())]
def list_config(self):
if self.acquiring:
raise RuntimeError("Illegal operation. Scanivalve is currently acquiring data!")
conf = dict(devtype='pressure', manufacturer='scanivalve', model=self.model,
parameters=self.list_any_map('S'))
return conf
| [
"threading.Thread.__init__",
"select.select",
"socket.socket",
"time.monotonic",
"time.sleep",
"numpy.zeros"
] | [((785, 834), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (798, 834), False, 'import socket\n'), ((1773, 1812), 'numpy.zeros', 'np.zeros', (['(fps, self.packlen)', 'np.uint8'], {}), '((fps, self.packlen), np.uint8)\n', (1781, 1812), True, 'import numpy as np\n'), ((2116, 2132), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2130, 2132), False, 'import time\n'), ((2203, 2219), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2217, 2219), False, 'import time\n'), ((2904, 2937), 'numpy.zeros', 'np.zeros', (['(nsamp, 16)', 'np.float64'], {}), '((nsamp, 16), np.float64)\n', (2912, 2937), True, 'import numpy as np\n'), ((5008, 5039), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (5033, 5039), False, 'import threading\n'), ((5769, 5818), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (5782, 5818), False, 'import socket\n'), ((7928, 7961), 'select.select', 'select', (['[self.s]', '[]', '[]', 'timeout'], {}), '([self.s], [], [], timeout)\n', (7934, 7961), False, 'from select import select\n'), ((10301, 10316), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (10311, 10316), False, 'import time\n'), ((2546, 2562), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2560, 2562), False, 'import time\n')] |
import numpy as np
import matplotlib.pyplot as plt
import uncertainties
from scipy.signal import find_peaks
from scipy.optimize import curve_fit
import scipy.constants as sc
import scipy.integrate as integrate
from uncertainties import ufloat
from uncertainties import unumpy as unp
from uncertainties.unumpy import nominal_values as nomval
from uncertainties.unumpy import std_devs as std
# Loading experimental data and results of further calculations
r = 0.5*45*10**(-3)
L = (73.5+15)*10**(-3)
Omega = 0.5 * ( 1- L/np.sqrt(L**2+r**2))
C_u2 = np.genfromtxt('2018-12-10_Nitschke_Pape/Probe_21.Spe', unpack = True)
Peaks_Eu, Q_Eu = np.genfromtxt('EuropiumQ.txt', unpack = True)
Channels = np.linspace(0,len(C_u2[:4000])-1, len(C_u2[:4000]))
params_energy, covariance_energy_0, covariance_energy_1, params_Q, covariance_Q_0, covariance_Q_1= np.genfromtxt('Europium.txt', unpack = True)
covariance_energy = np.array([covariance_energy_0, covariance_energy_1])
errors_energy = np.sqrt(np.diag(covariance_energy))
covariance_Q = np.array([covariance_Q_0,covariance_Q_1])
errors_Q = np.sqrt(np.diag(covariance_Q))
def Energy(C):
return ufloat(params_energy[0], errors_energy[0])*C + ufloat(params_energy[1], errors_energy[1])
def Gauss(x, A, xmu, sigma, B):
return A * np.exp(-0.5*(x-xmu)**2/sigma**2) + B
def Gauss_Ufloat(x, A, xmu, sigma):
return A * unp.exp(-0.5*(x-xmu)**2/sigma**2)
def AreaGaus(A, sigma):
return np.sqrt(2*np.pi)*sigma*A
def Efficiency(E):
return ufloat(params_Q[0], errors_Q[0])*E**ufloat(params_Q[1], errors_Q[1])
Spektrum = C_u2[:4000]
tges = 4046
Peaks = find_peaks(Spektrum, height = 120)
plt.clf()
plt.hist(unp.nominal_values(Energy(np.arange(0, len(Spektrum[0:4000]), 1))),
bins=unp.nominal_values(Energy(np.linspace(0, len(Spektrum[0:4000]), len(Spektrum[0:4000])))),
weights=Spektrum[0:4000], label='Spektrum')
plt.yscale('log')
plt.plot(nomval(Energy(Peaks[0][:])), Spektrum[Peaks[0][:]], '.',
markersize=4, label='Gauß-Peaks', color='C1', alpha=0.8)
plt.xlim(0,1500)
plt.ylabel('Zählungen pro Energie')
plt.xlabel('E / keV')
plt.legend()
#plt.show()
plt.savefig('Plots/unbekannt2.pdf')
Peaks_Energy = Energy(Peaks[0][:])
Energy_co = np.array([1173.237, 1332.501])
Params_u2 = []
errors_u2 = []
for n in Peaks[0]:
Params, covariance = curve_fit(Gauss, Channels[n-30:n+30], Spektrum[n-30:n+30], p0 = [C_u2[n], n, 1, 0])
Params_u2.append(Params.tolist())
errors = np.sqrt(np.diag(covariance))
errors_u2.append(errors.tolist())
for i,n in enumerate(Peaks[0]):
l_u = np.int(Channels[n-30])
l_o = np.int(Channels[n+30])
plt.clf()
plt.hist(unp.nominal_values(Energy(np.arange(l_u, l_o, 1))),
bins=unp.nominal_values(Energy(np.linspace(l_u, l_o, len(Spektrum[n-30:n+30])))),
weights=Spektrum[n-30:n+30], label='Spektrum')
Channel_Gauss = np.linspace(n-30,n+30,1000)
plt.plot(unp.nominal_values(Energy(Channel_Gauss)), Gauss(Channel_Gauss,*Params_u2[i]))
#plt.show()
Peaks_mittel = np.round(np.asarray(Params_u2)[:,1],0)
Amplitudes = np.asarray(Params_u2)[:,0]
Amplitudes_ufloat = np.asarray([ufloat(n, np.asarray(errors_u2)[i,0]) for i,n in enumerate(np.asarray(Params_u2)[:,0])])
Means_ufloat = np.asarray([ufloat(n, np.asarray(errors_u2)[i,1]) for i,n in enumerate(np.asarray(Params_u2)[:,1])])
sigmas = np.asarray(Params_u2)[:,2]
sigmas_ufloat = np.asarray([ufloat(n, np.asarray(errors_u2)[i,2]) for i,n in enumerate(np.asarray(Params_u2)[:,2])])
Area_Params = np.array([[n,sigmas[i]] for i,n in enumerate(Amplitudes)])
Area_params_ufloat = np.array([[n,sigmas_ufloat[i]] for i,n in enumerate(Amplitudes_ufloat)])
Constants_ufloat = np.asarray([ufloat(n, np.asarray(errors_u2)[i,3]) for i,n in enumerate(np.asarray(Params_u2)[:,3])])
print("--- Find Peaks and gaussian fit---")
print(f"Channel Peaks: {np.round(Peaks_mittel,0)}")
#print(f"Energy Peaks: {Energy(np.round(Peaks_mittel,0))}")
print(f"Energy Literature: {Energy_co}", '\n')
Area = AreaGaus(Area_Params[:,0], Area_Params[:,1])
Area_ufloat = AreaGaus(Area_params_ufloat[:,0], Area_params_ufloat[:,1])
Area_norm = Area/tges
Area_norm_ufloat = Area_ufloat/tges
print("-- Fit Parameter --")
print(f"Amplituden: {Amplitudes_ufloat}")
print(f"Means: {Energy(Means_ufloat)}")
print(f"Sigmas: {sigmas_ufloat}")
print(f"Constants: {Constants_ufloat}", '\n')
print("--- Calculating the activity ---")
r = 0.5*45*10**(-3)
L = (73.5+15)*10**(-3)
Omega = 0.5 * ( 1- L/np.sqrt(L**2+r**2))
W = np.asarray([0.999736, 0.999856])
Q = Efficiency(Peaks_Energy)
Aktivität = np.array([Area_norm[i]/(W[i]*n*Omega) for i,n in enumerate(Q)])
print(f"emission probability: {W}")
print(f"Area under Gaussian Fit: {Area_ufloat}")
print(f"Efficiency: {Q}", '\n')
print(f"resulting acitivity: {Aktivität}")
A_all = sum(Aktivität)/len(Aktivität)#ufloat(np.mean(nomval(Aktivität)),np.std(std(Aktivität)))
print(f"Mean with all values: {nomval(A_all)}, {std(A_all)}")
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.array",
"uncertainties.ufloat",
"numpy.genfromtxt",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"numpy.asarray",
"numpy.exp",
"numpy.linspace",
"uncertainties.unumpy.nominal_values",
"scipy.signal.find_peaks",
"uncertainties.unumpy.std_devs",
... | [((547, 614), 'numpy.genfromtxt', 'np.genfromtxt', (['"""2018-12-10_Nitschke_Pape/Probe_21.Spe"""'], {'unpack': '(True)'}), "('2018-12-10_Nitschke_Pape/Probe_21.Spe', unpack=True)\n", (560, 614), True, 'import numpy as np\n'), ((634, 677), 'numpy.genfromtxt', 'np.genfromtxt', (['"""EuropiumQ.txt"""'], {'unpack': '(True)'}), "('EuropiumQ.txt', unpack=True)\n", (647, 677), True, 'import numpy as np\n'), ((842, 884), 'numpy.genfromtxt', 'np.genfromtxt', (['"""Europium.txt"""'], {'unpack': '(True)'}), "('Europium.txt', unpack=True)\n", (855, 884), True, 'import numpy as np\n'), ((908, 960), 'numpy.array', 'np.array', (['[covariance_energy_0, covariance_energy_1]'], {}), '([covariance_energy_0, covariance_energy_1])\n', (916, 960), True, 'import numpy as np\n'), ((1028, 1070), 'numpy.array', 'np.array', (['[covariance_Q_0, covariance_Q_1]'], {}), '([covariance_Q_0, covariance_Q_1])\n', (1036, 1070), True, 'import numpy as np\n'), ((1605, 1637), 'scipy.signal.find_peaks', 'find_peaks', (['Spektrum'], {'height': '(120)'}), '(Spektrum, height=120)\n', (1615, 1637), False, 'from scipy.signal import find_peaks\n'), ((1641, 1650), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1648, 1650), True, 'import matplotlib.pyplot as plt\n'), ((1885, 1902), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (1895, 1902), True, 'import matplotlib.pyplot as plt\n'), ((2035, 2052), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1500)'], {}), '(0, 1500)\n', (2043, 2052), True, 'import matplotlib.pyplot as plt\n'), ((2052, 2087), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Zählungen pro Energie"""'], {}), "('Zählungen pro Energie')\n", (2062, 2087), True, 'import matplotlib.pyplot as plt\n'), ((2088, 2109), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""E / keV"""'], {}), "('E / keV')\n", (2098, 2109), True, 'import matplotlib.pyplot as plt\n'), ((2110, 2122), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2120, 2122), True, 'import matplotlib.pyplot as plt\n'), ((2135, 2170), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Plots/unbekannt2.pdf"""'], {}), "('Plots/unbekannt2.pdf')\n", (2146, 2170), True, 'import matplotlib.pyplot as plt\n'), ((2219, 2249), 'numpy.array', 'np.array', (['[1173.237, 1332.501]'], {}), '([1173.237, 1332.501])\n', (2227, 2249), True, 'import numpy as np\n'), ((4501, 4533), 'numpy.asarray', 'np.asarray', (['[0.999736, 0.999856]'], {}), '([0.999736, 0.999856])\n', (4511, 4533), True, 'import numpy as np\n'), ((985, 1011), 'numpy.diag', 'np.diag', (['covariance_energy'], {}), '(covariance_energy)\n', (992, 1011), True, 'import numpy as np\n'), ((1089, 1110), 'numpy.diag', 'np.diag', (['covariance_Q'], {}), '(covariance_Q)\n', (1096, 1110), True, 'import numpy as np\n'), ((2326, 2420), 'scipy.optimize.curve_fit', 'curve_fit', (['Gauss', 'Channels[n - 30:n + 30]', 'Spektrum[n - 30:n + 30]'], {'p0': '[C_u2[n], n, 1, 0]'}), '(Gauss, Channels[n - 30:n + 30], Spektrum[n - 30:n + 30], p0=[C_u2\n [n], n, 1, 0])\n', (2335, 2420), False, 'from scipy.optimize import curve_fit\n'), ((2571, 2595), 'numpy.int', 'np.int', (['Channels[n - 30]'], {}), '(Channels[n - 30])\n', (2577, 2595), True, 'import numpy as np\n'), ((2604, 2628), 'numpy.int', 'np.int', (['Channels[n + 30]'], {}), '(Channels[n + 30])\n', (2610, 2628), True, 'import numpy as np\n'), ((2631, 2640), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2638, 2640), True, 'import matplotlib.pyplot as plt\n'), ((2879, 2912), 'numpy.linspace', 'np.linspace', (['(n - 30)', '(n + 30)', '(1000)'], {}), '(n - 30, n + 30, 1000)\n', (2890, 2912), True, 'import numpy as np\n'), ((3083, 3104), 'numpy.asarray', 'np.asarray', (['Params_u2'], {}), '(Params_u2)\n', (3093, 3104), True, 'import numpy as np\n'), ((3356, 3377), 'numpy.asarray', 'np.asarray', (['Params_u2'], {}), '(Params_u2)\n', (3366, 3377), True, 'import numpy as np\n'), ((1186, 1228), 'uncertainties.ufloat', 'ufloat', (['params_energy[1]', 'errors_energy[1]'], {}), '(params_energy[1], errors_energy[1])\n', (1192, 1228), False, 'from uncertainties import ufloat\n'), ((1366, 1409), 'uncertainties.unumpy.exp', 'unp.exp', (['(-0.5 * (x - xmu) ** 2 / sigma ** 2)'], {}), '(-0.5 * (x - xmu) ** 2 / sigma ** 2)\n', (1373, 1409), True, 'from uncertainties import unumpy as unp\n'), ((1492, 1524), 'uncertainties.ufloat', 'ufloat', (['params_Q[0]', 'errors_Q[0]'], {}), '(params_Q[0], errors_Q[0])\n', (1498, 1524), False, 'from uncertainties import ufloat\n'), ((2469, 2488), 'numpy.diag', 'np.diag', (['covariance'], {}), '(covariance)\n', (2476, 2488), True, 'import numpy as np\n'), ((3040, 3061), 'numpy.asarray', 'np.asarray', (['Params_u2'], {}), '(Params_u2)\n', (3050, 3061), True, 'import numpy as np\n'), ((519, 543), 'numpy.sqrt', 'np.sqrt', (['(L ** 2 + r ** 2)'], {}), '(L ** 2 + r ** 2)\n', (526, 543), True, 'import numpy as np\n'), ((1139, 1181), 'uncertainties.ufloat', 'ufloat', (['params_energy[0]', 'errors_energy[0]'], {}), '(params_energy[0], errors_energy[0])\n', (1145, 1181), False, 'from uncertainties import ufloat\n'), ((1277, 1319), 'numpy.exp', 'np.exp', (['(-0.5 * (x - xmu) ** 2 / sigma ** 2)'], {}), '(-0.5 * (x - xmu) ** 2 / sigma ** 2)\n', (1283, 1319), True, 'import numpy as np\n'), ((1436, 1454), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1443, 1454), True, 'import numpy as np\n'), ((1528, 1560), 'uncertainties.ufloat', 'ufloat', (['params_Q[1]', 'errors_Q[1]'], {}), '(params_Q[1], errors_Q[1])\n', (1534, 1560), False, 'from uncertainties import ufloat\n'), ((3857, 3882), 'numpy.round', 'np.round', (['Peaks_mittel', '(0)'], {}), '(Peaks_mittel, 0)\n', (3865, 3882), True, 'import numpy as np\n'), ((4476, 4500), 'numpy.sqrt', 'np.sqrt', (['(L ** 2 + r ** 2)'], {}), '(L ** 2 + r ** 2)\n', (4483, 4500), True, 'import numpy as np\n'), ((4929, 4942), 'uncertainties.unumpy.nominal_values', 'nomval', (['A_all'], {}), '(A_all)\n', (4935, 4942), True, 'from uncertainties.unumpy import nominal_values as nomval\n'), ((4946, 4956), 'uncertainties.unumpy.std_devs', 'std', (['A_all'], {}), '(A_all)\n', (4949, 4956), True, 'from uncertainties.unumpy import std_devs as std\n'), ((2680, 2702), 'numpy.arange', 'np.arange', (['l_u', 'l_o', '(1)'], {}), '(l_u, l_o, 1)\n', (2689, 2702), True, 'import numpy as np\n'), ((3152, 3173), 'numpy.asarray', 'np.asarray', (['errors_u2'], {}), '(errors_u2)\n', (3162, 3173), True, 'import numpy as np\n'), ((3268, 3289), 'numpy.asarray', 'np.asarray', (['errors_u2'], {}), '(errors_u2)\n', (3278, 3289), True, 'import numpy as np\n'), ((3422, 3443), 'numpy.asarray', 'np.asarray', (['errors_u2'], {}), '(errors_u2)\n', (3432, 3443), True, 'import numpy as np\n'), ((3709, 3730), 'numpy.asarray', 'np.asarray', (['errors_u2'], {}), '(errors_u2)\n', (3719, 3730), True, 'import numpy as np\n'), ((3201, 3222), 'numpy.asarray', 'np.asarray', (['Params_u2'], {}), '(Params_u2)\n', (3211, 3222), True, 'import numpy as np\n'), ((3317, 3338), 'numpy.asarray', 'np.asarray', (['Params_u2'], {}), '(Params_u2)\n', (3327, 3338), True, 'import numpy as np\n'), ((3471, 3492), 'numpy.asarray', 'np.asarray', (['Params_u2'], {}), '(Params_u2)\n', (3481, 3492), True, 'import numpy as np\n'), ((3758, 3779), 'numpy.asarray', 'np.asarray', (['Params_u2'], {}), '(Params_u2)\n', (3768, 3779), True, 'import numpy as np\n')] |
import scipy.stats as ss
import numpy as np
def read_data(path):
file = open(path)
lines = file.readlines()
file.close()
dic = {}
for line in lines[1:]:
sl = line.split(',')
pid = int(sl[0].split('_')[0])
data_len = float(sl[1])
dic[pid] = data_len
return dic
def main():
no_data = read_data('tmp/No.csv')
abp_data = read_data('tmp/oABP.csv')
icp_data = read_data('tmp/oICP.csv')
all_data = read_data('tmp/ALL.csv')
pen = open('tmp/data_length.csv', 'w')
pen.write('FileName,ABP artifact,ICP artifact,AorI\n')
for k, v in no_data.items():
if k not in abp_data:
continue
if k not in icp_data:
continue
if k not in all_data:
continue
no_val = no_data[k]
abp_val = abp_data[k]
icp_val = icp_data[k]
all_val = all_data[k]
abp_artifact = no_val - abp_val
icp_artifact = no_val - icp_val
all_artifact = no_val - all_val
pen.write(str(k) + ',' + str(abp_artifact) + ',' + str(icp_artifact) + ',' + str(all_artifact) + '\n')
pen.close()
def MUT(x, y):
#Mann-Whitney U test
(U, p) = ss.mannwhitneyu(x, y)
return p
def PTT(x, y):
t = ss.ttest_rel(x, y)
return t.pvalue
def spss():
file = open('tmp/SPSS.csv')
lines = file.readlines()
RCNN = []
FD = []
LDA = []
LP = []
RLDAS = []
for line in lines[1:]:
sl = line.split(',')
RCNN.append(float(sl[1]))
FD.append(float(sl[2]))
LDA.append(float(sl[3]))
LP.append(float(sl[4]))
RLDAS.append(float(sl[5]))
pen = open('SPSS_result.csv', 'w')
pen.write('Test,FD,LDA,LP,RLDAS\n')
pen.write('MUT,'+str(MUT(RCNN, FD))+','+str(MUT(RCNN, LDA))+','+str(MUT(RCNN, LP))+','+str(MUT(RCNN,RLDAS))+'\n')
pen.write('PPT,'+str(PTT(RCNN, FD))+','+str(PTT(RCNN, LDA))+','+str(PTT(RCNN, LP))+','+str(PTT(RCNN, RLDAS))+'\n')
pen.close()
def mutual_information(f1, f2):
merge = np.concatenate((f1, f2), axis=0)
estimated_density = parzen_kde(merge, merge, 1)
entropy = -np.sum(np.log(estimated_density)) / len(estimated_density)
class_one_density = parzen_kde(f1, f1, 1)
class_two_density = parzen_kde(f2, f2, 1)
hac_one = -np.sum(np.log(class_one_density)) / len(class_one_density)
hac_two = -np.sum(np.log(class_two_density)) / len(class_two_density)
cond_entropy = (hac_one + hac_two) / 2
return entropy - cond_entropy
def parzen_kde(train, test, window):
from scipy.stats import multivariate_normal
train_size = np.shape(train)[0]
test_size = np.shape(test)[0]
#num_feature = len(train[1])
num_feature = 1
covariance = np.zeros((num_feature, num_feature))
for i in range(num_feature):
covariance[i][i] = np.var(train)
estimated_density = np.zeros((test_size, 1))
for i in range(len(test)):
x = test[i]
test_sample_matrix = np.ones((train_size, 1)) * x
new_diff = test_sample_matrix - np.reshape(train, (len(train), 1))
for j in range(num_feature):
new_diff[abs(new_diff[:, j]) > window, j] = 10000000
mvn = multivariate_normal(np.zeros((1, num_feature)), covariance)
estimated_density[i] = np.mean((1/(window**num_feature)) * mvn.pdf((new_diff/window)))
return estimated_density
def mibif(x, y):
fb_csp = CSP.filterbank_CSP(x)
xs = [[], [], [], []]
for i in range(len(fb_csp)):
xs[y.argmax(axis=1)[i]].append(fb_csp[i])
mis = np.zeros((len(xs), len(xs[0][0]), len(xs[0]) + 10))
for i in range(len(xs)): # class number
for j in range(len(xs[0][0])): # filter number
for k in range(len(xs[i])): # epoch count
one = xs[i][k][j]
rest = []
for l in range(len(xs)):
if i == l: continue
try:
rest.extend(xs[l][k][j])
except Exception as e:
print(e)
mis[i][j][k] = mutual_information(one, rest)
return np.sum(np.sum(mis, axis=2), axis=0)
def make_plot():
import numpy as np
import matplotlib.pyplot as plt
bar1 = [0.706298669,0.512972582,0.810844987,0.810844987]
bar2 = [0.735260041,0.522692844,0.834244166,0.834244166]
bar3 = [0.70510582,0.510224868,0.810469577,0.810469577]
bar4 = [0.698510998,0.481737271,0.805419026,0.805419026]
yer1 = [0.046212297,0.072766472,0.023064386,0.023064386]
yer2 = [0.042870652,0.078064932,0.022132357,0.022132357]
yer3 = [0.046591546,0.073620136,0.02443552,0.02443552]
yer4 = [0.0472298,0.076059316,0.023763944,0.023763944]
bandwirth = 0.3
r1 = np.arange(len(bar1)) * 2
r2 = [x + bandwirth for x in r1]
r3 = [x + bandwirth for x in r2]
r4 = [x + bandwirth for x in r3]
plt.bar(r1, bar1, width=bandwirth, yerr=yer1, capsize=3)
plt.bar(r2, bar2, width=bandwirth, yerr=yer2, capsize=3)
plt.bar(r3, bar3, width=bandwirth, yerr=yer3, capsize=3)
plt.bar(r4, bar4, width=bandwirth, yerr=yer4, capsize=2)
plt.savefig('fig/res.eps', format='eps', dpi=1000)
from scipy.signal import butter, lfilter
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def bandpass_filter(data, lowcut, highcut, fs, order=5):
from scipy.signal import butter, lfilter
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
y = lfilter(b, a, data)
return y
def arr_bandpass_filter(data, lowcut, highcut, fs, order=5):
y = np.array(data)
for i in range(len(data)):
for j in range(len(data[i])):
cur_data = data[i][j]
cur_y = bandpass_filter(cur_data, lowcut, highcut, fs, order)
y[i][j] = cur_y
return y
def real_test():
file = open('dlfkjalk.csv', 'r')
lines = file.readlines()
x = []; y = []
for line in lines:
sl = line.split(",")
cur_x = []
for v in sl[:-3]:
cur_x.append(float(v))
cur_y = [float(sl[-3]), float(sl[-2]), float(sl[-1])]
x.append(np.array(cur_x))
y.append(np.array(cur_y))
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto')
x = np.array(x)
y = np.array(y).argmax(axis=1)
clf2.fit(x[20:], y[20:])
sc = clf2.score(x[:20], y[:20])
print(sc)
def test_ddd():
import scipy.io, BCI
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
data = scipy.io.loadmat('C:/Users/CNM/Downloads/fbcsp_example/ConvData/csp/1.mat.mat')['csp_fv'][0][0]
x = np.transpose(data[4])
y = np.transpose(data[5])
kv = BCI.gen_kv_idx(y, 5)
for train_idx, test_idx in kv:
x_train, y_train = x[train_idx], y[train_idx]
x_test, y_test = x[test_idx], y[test_idx]
model = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto')
model.fit(x_train, y_train.argmax(axis=1))
score = model.score(x_test, y_test.argmax(axis=1))
predict = model.predict(x_test)
if __name__ == "__main__":
test_ddd() | [
"BCI.gen_kv_idx",
"numpy.transpose",
"matplotlib.pyplot.savefig",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis",
"numpy.ones",
"numpy.log",
"scipy.signal.butter",
"numpy.array",
"numpy.zeros",
"scipy.stats.ttest_rel",
"matplotlib.pyplot.bar",
"scipy.signal.lfilter",
"numpy.conca... | [((1094, 1115), 'scipy.stats.mannwhitneyu', 'ss.mannwhitneyu', (['x', 'y'], {}), '(x, y)\n', (1109, 1115), True, 'import scipy.stats as ss\n'), ((1149, 1167), 'scipy.stats.ttest_rel', 'ss.ttest_rel', (['x', 'y'], {}), '(x, y)\n', (1161, 1167), True, 'import scipy.stats as ss\n'), ((1873, 1905), 'numpy.concatenate', 'np.concatenate', (['(f1, f2)'], {'axis': '(0)'}), '((f1, f2), axis=0)\n', (1887, 1905), True, 'import numpy as np\n'), ((2550, 2586), 'numpy.zeros', 'np.zeros', (['(num_feature, num_feature)'], {}), '((num_feature, num_feature))\n', (2558, 2586), True, 'import numpy as np\n'), ((2677, 2701), 'numpy.zeros', 'np.zeros', (['(test_size, 1)'], {}), '((test_size, 1))\n', (2685, 2701), True, 'import numpy as np\n'), ((4520, 4576), 'matplotlib.pyplot.bar', 'plt.bar', (['r1', 'bar1'], {'width': 'bandwirth', 'yerr': 'yer1', 'capsize': '(3)'}), '(r1, bar1, width=bandwirth, yerr=yer1, capsize=3)\n', (4527, 4576), True, 'import matplotlib.pyplot as plt\n'), ((4579, 4635), 'matplotlib.pyplot.bar', 'plt.bar', (['r2', 'bar2'], {'width': 'bandwirth', 'yerr': 'yer2', 'capsize': '(3)'}), '(r2, bar2, width=bandwirth, yerr=yer2, capsize=3)\n', (4586, 4635), True, 'import matplotlib.pyplot as plt\n'), ((4638, 4694), 'matplotlib.pyplot.bar', 'plt.bar', (['r3', 'bar3'], {'width': 'bandwirth', 'yerr': 'yer3', 'capsize': '(3)'}), '(r3, bar3, width=bandwirth, yerr=yer3, capsize=3)\n', (4645, 4694), True, 'import matplotlib.pyplot as plt\n'), ((4697, 4753), 'matplotlib.pyplot.bar', 'plt.bar', (['r4', 'bar4'], {'width': 'bandwirth', 'yerr': 'yer4', 'capsize': '(2)'}), '(r4, bar4, width=bandwirth, yerr=yer4, capsize=2)\n', (4704, 4753), True, 'import matplotlib.pyplot as plt\n'), ((4757, 4807), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fig/res.eps"""'], {'format': '"""eps"""', 'dpi': '(1000)'}), "('fig/res.eps', format='eps', dpi=1000)\n", (4768, 4807), True, 'import matplotlib.pyplot as plt\n'), ((4980, 5020), 'scipy.signal.butter', 'butter', (['order', '[low, high]'], {'btype': '"""band"""'}), "(order, [low, high], btype='band')\n", (4986, 5020), False, 'from scipy.signal import butter, lfilter\n'), ((5171, 5190), 'scipy.signal.lfilter', 'lfilter', (['b', 'a', 'data'], {}), '(b, a, data)\n', (5178, 5190), False, 'from scipy.signal import butter, lfilter\n'), ((5375, 5415), 'scipy.signal.butter', 'butter', (['order', '[low, high]'], {'btype': '"""band"""'}), "(order, [low, high], btype='band')\n", (5381, 5415), False, 'from scipy.signal import butter, lfilter\n'), ((5422, 5441), 'scipy.signal.lfilter', 'lfilter', (['b', 'a', 'data'], {}), '(b, a, data)\n', (5429, 5441), False, 'from scipy.signal import butter, lfilter\n'), ((5521, 5535), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (5529, 5535), True, 'import numpy as np\n'), ((6137, 6196), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {'solver': '"""lsqr"""', 'shrinkage': '"""auto"""'}), "(solver='lsqr', shrinkage='auto')\n", (6163, 6196), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((6203, 6214), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (6211, 6214), True, 'import numpy as np\n'), ((6546, 6567), 'numpy.transpose', 'np.transpose', (['data[4]'], {}), '(data[4])\n', (6558, 6567), True, 'import numpy as np\n'), ((6574, 6595), 'numpy.transpose', 'np.transpose', (['data[5]'], {}), '(data[5])\n', (6586, 6595), True, 'import numpy as np\n'), ((6603, 6623), 'BCI.gen_kv_idx', 'BCI.gen_kv_idx', (['y', '(5)'], {}), '(y, 5)\n', (6617, 6623), False, 'import scipy.io, BCI\n'), ((2435, 2450), 'numpy.shape', 'np.shape', (['train'], {}), '(train)\n', (2443, 2450), True, 'import numpy as np\n'), ((2468, 2482), 'numpy.shape', 'np.shape', (['test'], {}), '(test)\n', (2476, 2482), True, 'import numpy as np\n'), ((2641, 2654), 'numpy.var', 'np.var', (['train'], {}), '(train)\n', (2647, 2654), True, 'import numpy as np\n'), ((3788, 3807), 'numpy.sum', 'np.sum', (['mis'], {'axis': '(2)'}), '(mis, axis=2)\n', (3794, 3807), True, 'import numpy as np\n'), ((6765, 6824), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {'solver': '"""lsqr"""', 'shrinkage': '"""auto"""'}), "(solver='lsqr', shrinkage='auto')\n", (6791, 6824), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((2772, 2796), 'numpy.ones', 'np.ones', (['(train_size, 1)'], {}), '((train_size, 1))\n', (2779, 2796), True, 'import numpy as np\n'), ((2994, 3020), 'numpy.zeros', 'np.zeros', (['(1, num_feature)'], {}), '((1, num_feature))\n', (3002, 3020), True, 'import numpy as np\n'), ((6009, 6024), 'numpy.array', 'np.array', (['cur_x'], {}), '(cur_x)\n', (6017, 6024), True, 'import numpy as np\n'), ((6039, 6054), 'numpy.array', 'np.array', (['cur_y'], {}), '(cur_y)\n', (6047, 6054), True, 'import numpy as np\n'), ((6221, 6232), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (6229, 6232), True, 'import numpy as np\n'), ((1976, 2001), 'numpy.log', 'np.log', (['estimated_density'], {}), '(estimated_density)\n', (1982, 2001), True, 'import numpy as np\n'), ((2138, 2163), 'numpy.log', 'np.log', (['class_one_density'], {}), '(class_one_density)\n', (2144, 2163), True, 'import numpy as np\n'), ((2210, 2235), 'numpy.log', 'np.log', (['class_two_density'], {}), '(class_two_density)\n', (2216, 2235), True, 'import numpy as np\n')] |
import numpy as np
from bp.Model import Model
# ANN 经典 2 -> 3 -> 6 -> 1
# x = np.linspace(-1,1,20)
x = np.array([[1, 2], [2, 3], [3, 3], [1, 4], [-1, -2], [-1, -1], [-2, -3], [-3, -2]]).T
y = np.array([[0, 0, 0, 0, 1, 1, 1, 1]])
model = Model(x, y,inputType='normal')
model.addLayer(3, 'sigmoid', False)
model.addLayer(6, 'sigmoid', False)
# model.addReshape([6,x.shape[1]])
model.addLayer(1, 'sigmoid', True)
for i in range(1000):
model.train(10) | [
"numpy.array",
"bp.Model.Model"
] | [((193, 229), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 1, 1, 1, 1]]'], {}), '([[0, 0, 0, 0, 1, 1, 1, 1]])\n', (201, 229), True, 'import numpy as np\n'), ((238, 269), 'bp.Model.Model', 'Model', (['x', 'y'], {'inputType': '"""normal"""'}), "(x, y, inputType='normal')\n", (243, 269), False, 'from bp.Model import Model\n'), ((104, 190), 'numpy.array', 'np.array', (['[[1, 2], [2, 3], [3, 3], [1, 4], [-1, -2], [-1, -1], [-2, -3], [-3, -2]]'], {}), '([[1, 2], [2, 3], [3, 3], [1, 4], [-1, -2], [-1, -1], [-2, -3], [-3,\n -2]])\n', (112, 190), True, 'import numpy as np\n')] |
import math
import numpy as np
import matplotlib.pyplot as plt
from control.timeresp import step_info
from control.matlab import tf, series, feedback, step
class RF:
def calculo(mp, ta, u, tempo, numerador, denominador, flag):
var = (math.log(mp)/-math.pi) ** 2
qsi = math.sqrt(var/(1+var))
mf = np.degrees(np.arcsin(qsi)) * 2
wn = 4 / (qsi * ta)
wcg = complex(0,wn)
g_jwcg = numerador/ ((denominador[0] * wcg) + denominador[1])
mod_g_jwcg = np.absolute(g_jwcg)
angulo_g_jwcg = (np.angle(g_jwcg)*180)/math.pi
theta = -180 + mf - angulo_g_jwcg
kp = np.cos(np.radians(theta)) / mod_g_jwcg
ki = - (np.sin(np.radians(theta)) * wn**2) / (mod_g_jwcg * wn)
c_s = tf([kp[-1], ki[-1]], [1, 0])
g_s = tf(numerador, denominador)
c_g_s = series(c_s, g_s)
h_s = feedback(c_g_s, 1)
resposta, _ = step(h_s*u, tempo)
info = step_info(h_s*u)
#print(f"mod_g_jwcg: {mod_g_jwcg}\nangulo_g_jwcg: {angulo_g_jwcg}\ntheta: {theta}\nkp: {kp}\nki: {ki}\nc_s: {c_s}\ng_s: {g_s}\nc_g_s: {c_g_s}\nh_s: {h_s}")
if flag:
plt.plot(tempo, resposta)
plt.xlabel('Tempo(s)')
plt.ylabel('Distância(cm)')
plt.title('Resposta Malha Fechada - Sintonia Resposta em Frequência')
plt.legend([f"Rise Time:{info['RiseTime']:.2f}s\nOvershoot:{info['Overshoot']:.2f}%\nSettling Time:{info['SettlingTime']:.2f}s\nPeak:{info['Peak']:.2f}cm\nPeak Time:{info['PeakTime']:.2f}s"])
plt.grid()
plt.show()
return kp[-1], ki[-1], h_s | [
"numpy.radians",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"control.timeresp.step_info",
"control.matlab.series",
"matplotlib.pyplot.ylabel",
"numpy.absolute",
"matplotlib.pyplot.plot",
"control.matlab.tf",
"math.sqrt",
"control.matlab.step",
"matplotlib.pyplot.xlabel",
"math.log"... | [((298, 324), 'math.sqrt', 'math.sqrt', (['(var / (1 + var))'], {}), '(var / (1 + var))\n', (307, 324), False, 'import math\n'), ((549, 568), 'numpy.absolute', 'np.absolute', (['g_jwcg'], {}), '(g_jwcg)\n', (560, 568), True, 'import numpy as np\n'), ((823, 851), 'control.matlab.tf', 'tf', (['[kp[-1], ki[-1]]', '[1, 0]'], {}), '([kp[-1], ki[-1]], [1, 0])\n', (825, 851), False, 'from control.matlab import tf, series, feedback, step\n'), ((875, 901), 'control.matlab.tf', 'tf', (['numerador', 'denominador'], {}), '(numerador, denominador)\n', (877, 901), False, 'from control.matlab import tf, series, feedback, step\n'), ((927, 943), 'control.matlab.series', 'series', (['c_s', 'g_s'], {}), '(c_s, g_s)\n', (933, 943), False, 'from control.matlab import tf, series, feedback, step\n'), ((967, 985), 'control.matlab.feedback', 'feedback', (['c_g_s', '(1)'], {}), '(c_g_s, 1)\n', (975, 985), False, 'from control.matlab import tf, series, feedback, step\n'), ((1017, 1037), 'control.matlab.step', 'step', (['(h_s * u)', 'tempo'], {}), '(h_s * u, tempo)\n', (1021, 1037), False, 'from control.matlab import tf, series, feedback, step\n'), ((1052, 1070), 'control.timeresp.step_info', 'step_info', (['(h_s * u)'], {}), '(h_s * u)\n', (1061, 1070), False, 'from control.timeresp import step_info\n'), ((1262, 1287), 'matplotlib.pyplot.plot', 'plt.plot', (['tempo', 'resposta'], {}), '(tempo, resposta)\n', (1270, 1287), True, 'import matplotlib.pyplot as plt\n'), ((1300, 1322), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tempo(s)"""'], {}), "('Tempo(s)')\n", (1310, 1322), True, 'import matplotlib.pyplot as plt\n'), ((1335, 1362), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distância(cm)"""'], {}), "('Distância(cm)')\n", (1345, 1362), True, 'import matplotlib.pyplot as plt\n'), ((1375, 1444), 'matplotlib.pyplot.title', 'plt.title', (['"""Resposta Malha Fechada - Sintonia Resposta em Frequência"""'], {}), "('Resposta Malha Fechada - Sintonia Resposta em Frequência')\n", (1384, 1444), True, 'import matplotlib.pyplot as plt\n'), ((1457, 1658), 'matplotlib.pyplot.legend', 'plt.legend', (['[f"""Rise Time:{info[\'RiseTime\']:.2f}s\nOvershoot:{info[\'Overshoot\']:.2f}%\nSettling Time:{info[\'SettlingTime\']:.2f}s\nPeak:{info[\'Peak\']:.2f}cm\nPeak Time:{info[\'PeakTime\']:.2f}s"""\n ]'], {}), '([\n f"""Rise Time:{info[\'RiseTime\']:.2f}s\nOvershoot:{info[\'Overshoot\']:.2f}%\nSettling Time:{info[\'SettlingTime\']:.2f}s\nPeak:{info[\'Peak\']:.2f}cm\nPeak Time:{info[\'PeakTime\']:.2f}s"""\n ])\n', (1467, 1658), True, 'import matplotlib.pyplot as plt\n'), ((1661, 1671), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1669, 1671), True, 'import matplotlib.pyplot as plt\n'), ((1684, 1694), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1692, 1694), True, 'import matplotlib.pyplot as plt\n'), ((247, 259), 'math.log', 'math.log', (['mp'], {}), '(mp)\n', (255, 259), False, 'import math\n'), ((355, 369), 'numpy.arcsin', 'np.arcsin', (['qsi'], {}), '(qsi)\n', (364, 369), True, 'import numpy as np\n'), ((594, 610), 'numpy.angle', 'np.angle', (['g_jwcg'], {}), '(g_jwcg)\n', (602, 610), True, 'import numpy as np\n'), ((704, 721), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (714, 721), True, 'import numpy as np\n'), ((760, 777), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (770, 777), True, 'import numpy as np\n')] |
import os
import uuid
import time
import numpy as np
import tensorflow as tf
import external.luodong.Model2bin as mem_to_bin
mod_uuid = uuid.uuid4()
TODAY = time.strftime("%Y%m%d")
model_ids = list(np.arange(22))
mem_path = r'/home/hangz/TF_graphs/unit_test/DTLN_keras/converted_model/'
os.makedirs(os.path.join(mem_path, 'upload'), exist_ok=True)
mem_to_bin.auto_create_bin(mem_path=mem_path, model_ids=model_ids, file_prefix='dtln', mod_uuid=mod_uuid, today=TODAY)
| [
"external.luodong.Model2bin.auto_create_bin",
"time.strftime",
"os.path.join",
"uuid.uuid4",
"numpy.arange"
] | [((138, 150), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (148, 150), False, 'import uuid\n'), ((159, 182), 'time.strftime', 'time.strftime', (['"""%Y%m%d"""'], {}), "('%Y%m%d')\n", (172, 182), False, 'import time\n'), ((350, 472), 'external.luodong.Model2bin.auto_create_bin', 'mem_to_bin.auto_create_bin', ([], {'mem_path': 'mem_path', 'model_ids': 'model_ids', 'file_prefix': '"""dtln"""', 'mod_uuid': 'mod_uuid', 'today': 'TODAY'}), "(mem_path=mem_path, model_ids=model_ids,\n file_prefix='dtln', mod_uuid=mod_uuid, today=TODAY)\n", (376, 472), True, 'import external.luodong.Model2bin as mem_to_bin\n'), ((200, 213), 'numpy.arange', 'np.arange', (['(22)'], {}), '(22)\n', (209, 213), True, 'import numpy as np\n'), ((301, 333), 'os.path.join', 'os.path.join', (['mem_path', '"""upload"""'], {}), "(mem_path, 'upload')\n", (313, 333), False, 'import os\n')] |
# <NAME> <<<EMAIL>>>.
# A DataProjector is an object that projects data object to a latent space
# At the moment we will use LSI types of methdos to do this
import numpy
from gensim import corpora, models, similarities,matutils
import numpy as np
import os.path
import time
import scipy.sparse
from pathlib import Path
class DataProjector:
def __init__(self, data_orig, params, model_path):
"""For initialization"""
self.params = params
self.model_path = model_path
self.num_terms = data_orig.corpus.num_terms #total number of features (items of views, aka terms)
self.num_docs = data_orig.corpus.num_docs #total number of data (snapshots)
self.num_features = params["num_latent_dims"] #number of latent dimensions
self.data_orig = data_orig # keep the original data
self.corpus_normalized = None # contains the corpus in the tfidf format or in the nomalized format
self.tfidf = None # the tf-idf model of the input corpus
self.corpus_lsi = None # contains the corpus in the LSI space
self.lsi = None # the lsi transformation of corpus_normalized
self.svd_v = None # the V matrix in lsi[X] = U^-1*X = V*S
def generate_latent_space(self):
#creating temp folder if not exist
Path(os.path.join(self.model_path,'temp')).mkdir(parents=True, exist_ok=True)
#for now just use Gensim's LSA for latent space
if os.path.isfile(os.path.join(self.model_path,'./temp/corp1.lsi')) and os.path.isfile(os.path.join(self.model_path,'./temp/corp1.tfidf')) \
and os.path.isfile(os.path.join(self.model_path,'./temp/corpus_normalized.mm')) and os.path.isfile(os.path.join(self.model_path,'./temp/corp1.svd_v.npy')) :
print('Loading LSI model from folder /temp...')
#The mapping between the questions (how many times does a word appear..) and ids is called a dictionary
#self.dictionary = corpora.Dictionary.load('./temp/corp1.dict')
self.lsi = models.LsiModel.load(os.path.join(self.model_path,'./temp/corp1.lsi'))
self.tfidf = models.TfidfModel.load(os.path.join(self.model_path,'./temp/corp1.tfidf'))
self.svd_v = np.load(os.path.join(self.model_path,'./temp/corp1.svd_v.npy'))
self.corpus_normalized = corpora.MmCorpus(os.path.join(self.model_path,'./temp/corpus_normalized.mm'))
else:
#use libraries from gensim to build LSI model
print('Create latent space and save it in /temp...')
t1 = time.time()
#todo: maybe I don't need to do tfidf, but if I do I should also do it for the query
self.tfidf = models.TfidfModel(self.data_orig.corpus)
self.tfidf.save(os.path.join(self.model_path,'./temp/corp1.tfidf'))
corpus_tfidf = self.tfidf[self.data_orig.corpus]
self.corpus_normalized = corpus_tfidf # tfidf is a basic normalization
corpora.MmCorpus.serialize(os.path.join(self.model_path,'./temp/corpus_normalized.mm'), self.corpus_normalized) #save the normalized corpus
# initialize an LSI transformation
self.lsi = models.LsiModel(self.corpus_normalized, id2word=self.data_orig.dictionary, num_topics=self.num_features)
self.lsi.save(os.path.join(self.model_path,'./temp/corp1.lsi'))
# Given a model lsi = LsiModel(X, ...), with the truncated singular value decomposition of your corpus X being X=U*S*V^T,
# doing lsi[X] computes U^-1*X, which equals V*S (basic linear algebra). So if you want V, divide lsi[X] by S:
self.svd_v = matutils.corpus2dense(self.lsi[self.corpus_normalized], num_terms=len(self.lsi.projection.s)).T / self.lsi.projection.s #TODO: is \ element wise?!
np.save(os.path.join(self.model_path,'./temp/corp1.svd_v.npy'), self.svd_v)
#print(lsi.print_topics(self.num_latent_dims))
t2 = time.time()
t_latent = t2-t1
print('Latent space creation took %f second' %t_latent)
# create a double wrapper over the original corpus: bow->tfidf->fold-in-lsi
self.corpus_lsi = self.lsi[self.corpus_normalized]
def create_feature_matrices(self):
#This function creates the neccessary featuer matrices introduced in [1]
#the new idea is that the keyword space is projected to a latent space first and
# based on the document transformation idea, the documents are also projected
if os.path.isfile(os.path.join(self.model_path,'./temp/term_f_mat.npy')) and os.path.isfile(os.path.join(self.model_path,'./temp/doc_f_mat.npy')):
self.term_f_mat = np.load(os.path.join(self.model_path,'./temp/term_f_mat.npy'))
self.doc_f_mat = np.load(os.path.join(self.model_path,'./temp/doc_f_mat.npy'))
else:
t1 = time.time()
w = self.svd_v
w = w/self.lsi.projection.s # this is necessary based on the LSI in wiki
# Use sparse matrix rather than dense matrices to do the calculations (save memory)
M_T_sparse = matutils.corpus2csc(self.corpus_normalized, num_terms=self.data_orig.num_features, num_docs=self.data_orig.num_data, num_nnz=self.data_orig.corpus.num_nnz)
self.term_f_mat = M_T_sparse.dot(w)
np.save(os.path.join(self.model_path,'./temp/term_f_mat.npy'), self.term_f_mat)
t2 = time.time()
# Based on the assumptions in [1], I need to normalize to have P(t_i|d_j) in the original space
# Normalize the document vectors to sum up to one
if self.params["normalize_terms"]:
sum_over_terms = M_T_sparse.sum(axis=0).A.ravel() # take the sum over terms for each doc
sum_over_terms_diag = scipy.sparse.diags(1/sum_over_terms, 0) # create an inverted diag matrix of sums
M_T_sparse_normalized = M_T_sparse.dot(sum_over_terms_diag) # divide by sums by using doc product
M_T_sparse_normalized_T = M_T_sparse_normalized.transpose()
else:
M_T_sparse_normalized_T = M_T_sparse.transpose()
# Use sparse matrix rather than dence matrices to do the calculations (save memory)
self.doc_f_mat = M_T_sparse_normalized_T.dot(self.term_f_mat)
np.save(os.path.join(self.model_path,'./temp/doc_f_mat.npy'), self.doc_f_mat)
t3 = time.time()
t_term_mat = t2-t1
t_doc_mat = t3-t2
t_total = t3-t1
print('Creating term matrix %f second' %t_term_mat)
print('Creating document matrix %f second' %t_doc_mat)
print('Total %f second' %t_total)
def item_fv(self,index_item):
return self.term_f_mat[index_item][:]
def doc_fv(self,index_doc):
#there would be new docs generated in every iteration. Should I update the latent space? "no" at the moment
return self.doc_f_mat[index_doc][:]
def doc_fv_new(self, new_doc_fv):
#feedbacks are on new docs (not in corpus)
#It is only enough to transform the new doc fv to the latent space which can be done as: fv * self.term_f_mat
#input: new_doc_fv should be a bag-of-word representation of a document (sparse matrix)
# the logger needs to check if the term names are the same to the current dictionary
# use tfidf
new_doc_fv_tfidf = self.tfidf[new_doc_fv]
# make it an array
new_doc_fv_normalized = np.zeros(self.num_terms)
sum_over_terms = 0
for i in range(len(new_doc_fv_tfidf)):
new_doc_fv_normalized[int(new_doc_fv_tfidf[i][0])] = new_doc_fv_tfidf[i][1]
sum_over_terms = sum_over_terms + new_doc_fv_tfidf[i][1]
if self.params["normalize_terms"]:
new_doc_fv_normalized = new_doc_fv_normalized / sum_over_terms
new_fv = np.dot(new_doc_fv_normalized, self.term_f_mat)
return new_fv | [
"gensim.models.LsiModel",
"numpy.dot",
"numpy.zeros",
"gensim.matutils.corpus2csc",
"time.time",
"gensim.models.TfidfModel"
] | [((7676, 7700), 'numpy.zeros', 'np.zeros', (['self.num_terms'], {}), '(self.num_terms)\n', (7684, 7700), True, 'import numpy as np\n'), ((8069, 8115), 'numpy.dot', 'np.dot', (['new_doc_fv_normalized', 'self.term_f_mat'], {}), '(new_doc_fv_normalized, self.term_f_mat)\n', (8075, 8115), True, 'import numpy as np\n'), ((2711, 2722), 'time.time', 'time.time', ([], {}), '()\n', (2720, 2722), False, 'import time\n'), ((2845, 2885), 'gensim.models.TfidfModel', 'models.TfidfModel', (['self.data_orig.corpus'], {}), '(self.data_orig.corpus)\n', (2862, 2885), False, 'from gensim import corpora, models, similarities, matutils\n'), ((3334, 3442), 'gensim.models.LsiModel', 'models.LsiModel', (['self.corpus_normalized'], {'id2word': 'self.data_orig.dictionary', 'num_topics': 'self.num_features'}), '(self.corpus_normalized, id2word=self.data_orig.dictionary,\n num_topics=self.num_features)\n', (3349, 3442), False, 'from gensim import corpora, models, similarities, matutils\n'), ((4108, 4119), 'time.time', 'time.time', ([], {}), '()\n', (4117, 4119), False, 'import time\n'), ((5027, 5038), 'time.time', 'time.time', ([], {}), '()\n', (5036, 5038), False, 'import time\n'), ((5274, 5439), 'gensim.matutils.corpus2csc', 'matutils.corpus2csc', (['self.corpus_normalized'], {'num_terms': 'self.data_orig.num_features', 'num_docs': 'self.data_orig.num_data', 'num_nnz': 'self.data_orig.corpus.num_nnz'}), '(self.corpus_normalized, num_terms=self.data_orig.\n num_features, num_docs=self.data_orig.num_data, num_nnz=self.data_orig.\n corpus.num_nnz)\n', (5293, 5439), False, 'from gensim import corpora, models, similarities, matutils\n'), ((5587, 5598), 'time.time', 'time.time', ([], {}), '()\n', (5596, 5598), False, 'import time\n'), ((6595, 6606), 'time.time', 'time.time', ([], {}), '()\n', (6604, 6606), False, 'import time\n')] |
import os
import nltk
import numpy as np
from nltk.sentiment.util import mark_negation
def lol2str(doc):
"""Transforms a document in the list-of-lists format into
a block of text (str type)."""
return " ".join([word for sent in doc for word in sent])
def mr2str(dataset):
"""Transforms the Movie Reviews Dataset (or a slice) into a block of text."""
return [lol2str(doc) for doc in dataset]
def get_movie_reviews_dataset(mark_negs=True):
"""Uses the nltk library to download the "Movie Reviews" dateset,
splitting it into negative reviews and positive reviews."""
nltk.download("movie_reviews")
from nltk.corpus import movie_reviews
neg = movie_reviews.paras(categories="neg")
pos = movie_reviews.paras(categories="pos")
if mark_negs:
neg = [[mark_negation(sent) for sent in doc] for doc in neg]
pos = [[mark_negation(sent) for sent in doc] for doc in pos]
return neg, pos
def load_corpus_rotten_imdb(path):
subjective_sentences = "quote.tok.gt9.5000"
objective_sentences = "plot.tok.gt9.5000"
subj = []
with open(os.path.join(path, subjective_sentences), 'r') as f:
[subj.append(sent.strip()) for sent in f.readlines()]
obj = []
with open(os.path.join(path, objective_sentences), 'r') as f:
[obj.append(sent.strip()) for sent in f.readlines()]
return subj, obj
def hconcat(X1: np.ndarray, X2: np.ndarray) -> np.ndarray:
"""Applies horizontal concatenation to the X1 and X2 matrices, returning the concatenated matrix."""
assert len(X1.shape) == len(
X2.shape) == 2, "function 'hconcat' only works with matrices (np.array with 2 dimensions)."
assert X1.shape[0] == X2.shape[0], "In order to hconcat matrices, they must have the same number of rows."
N = X1.shape[0]
M = X1.shape[1] + X2.shape[1]
X = np.ndarray(shape=(N, M))
X[:, :X1.shape[1]] = X1
X[:, X1.shape[1]:] = X2
return X
def vconcat(X1: np.ndarray, X2: np.ndarray) -> np.ndarray:
"""Applies vertical concatenation to the X1 and X2 matrices, returning the concatenated matrix."""
assert len(X1.shape) == len(
X2.shape) == 2, "function 'vconcat' only works with matrices (np.array with 2 dimensions)."
assert X1.shape[1] == X2.shape[1], "In order to vconcat matrices, they must have the same number of columns."
N = X1.shape[0] + X2.shape[0] # sum of
M = X1.shape[1]
X = np.ndarray(shape=(N, M))
X[:X1.shape[0], :] = X1
X[X1.shape[0]:, :] = X2
return X
| [
"nltk.download",
"os.path.join",
"nltk.corpus.movie_reviews.paras",
"numpy.ndarray",
"nltk.sentiment.util.mark_negation"
] | [((602, 632), 'nltk.download', 'nltk.download', (['"""movie_reviews"""'], {}), "('movie_reviews')\n", (615, 632), False, 'import nltk\n'), ((685, 722), 'nltk.corpus.movie_reviews.paras', 'movie_reviews.paras', ([], {'categories': '"""neg"""'}), "(categories='neg')\n", (704, 722), False, 'from nltk.corpus import movie_reviews\n'), ((733, 770), 'nltk.corpus.movie_reviews.paras', 'movie_reviews.paras', ([], {'categories': '"""pos"""'}), "(categories='pos')\n", (752, 770), False, 'from nltk.corpus import movie_reviews\n'), ((1857, 1881), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(N, M)'}), '(shape=(N, M))\n', (1867, 1881), True, 'import numpy as np\n'), ((2434, 2458), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(N, M)'}), '(shape=(N, M))\n', (2444, 2458), True, 'import numpy as np\n'), ((1107, 1147), 'os.path.join', 'os.path.join', (['path', 'subjective_sentences'], {}), '(path, subjective_sentences)\n', (1119, 1147), False, 'import os\n'), ((1250, 1289), 'os.path.join', 'os.path.join', (['path', 'objective_sentences'], {}), '(path, objective_sentences)\n', (1262, 1289), False, 'import os\n'), ((805, 824), 'nltk.sentiment.util.mark_negation', 'mark_negation', (['sent'], {}), '(sent)\n', (818, 824), False, 'from nltk.sentiment.util import mark_negation\n'), ((874, 893), 'nltk.sentiment.util.mark_negation', 'mark_negation', (['sent'], {}), '(sent)\n', (887, 893), False, 'from nltk.sentiment.util import mark_negation\n')] |
import sys, os, itertools
import numpy as np
import matplotlib.tri as tri
from glbase3 import *
import shared_conservation
# collect three things:
# 1. The total PhyloP score of the transcript
# 2. score for TE-containing bits
# 3. score for non-TE containign bits;
gl = glload('phyloP_conservation_table.glb')
print(gl)
shared_conservation.scat('scat_te_vs_not_tes.pdf',
gl['phyloP_tes'], gl['phyloP_nottes'],
'TE', 'not-TE',
xlims=[-0.6, 0.7],
ylims=[-0.6, 0.7],
)
shared_conservation.hist('hist_te_vs_not_tes.pdf',
gl['phyloP_tes'], gl['phyloP_nottes'],
'TE', 'not-TE',
ranges=[[-0.2, 0.7], [-0.2, 0.7]],
hlines = [0, 0.25],
vlines = [0, 0.25],
)
shared_conservation.contour('cont_te_vs_not_tes.pdf',
gl['phyloP_tes'], gl['phyloP_nottes'],
'TE', 'not-TE',
ranges=[[-0.6, 0.7], [-0.6, 0.7]],
)
for t in ('phyloP_tes', 'phyloP_nottes'):
shared_conservation.scat(filename='scat_expn_cons_vs_{0}.pdf'.format(t),
x=gl[t],
y=np.log2(np.array(gl['TPM'])+0.1),
xlabel='cons',
ylabel='expn',
xlims=[-0.6, 0.7],
ylims=[-3, 9],
)
shared_conservation.hist(filename='hist_expn_cons_vs_{0}.pdf'.format(t),
x=gl[t],
y=np.log2(np.array(gl['TPM'])+0.1),
xlabel='cons',
ylabel='expn',
ranges=[[-0.2, 0.7], [-3, 9]],
)
shared_conservation.contour(filename='cont_expn_cons_vs_{0}.pdf'.format(t),
x=gl[t],
y=np.log2(np.array(gl['TPM'])+0.1),
xlabel='cons',
ylabel='expn',
ranges=[[-0.6, 0.7], [-3, 9]],
vmax=100,
)
| [
"shared_conservation.scat",
"numpy.array",
"shared_conservation.contour",
"shared_conservation.hist"
] | [((325, 473), 'shared_conservation.scat', 'shared_conservation.scat', (['"""scat_te_vs_not_tes.pdf"""', "gl['phyloP_tes']", "gl['phyloP_nottes']", '"""TE"""', '"""not-TE"""'], {'xlims': '[-0.6, 0.7]', 'ylims': '[-0.6, 0.7]'}), "('scat_te_vs_not_tes.pdf', gl['phyloP_tes'], gl[\n 'phyloP_nottes'], 'TE', 'not-TE', xlims=[-0.6, 0.7], ylims=[-0.6, 0.7])\n", (349, 473), False, 'import shared_conservation\n'), ((492, 677), 'shared_conservation.hist', 'shared_conservation.hist', (['"""hist_te_vs_not_tes.pdf"""', "gl['phyloP_tes']", "gl['phyloP_nottes']", '"""TE"""', '"""not-TE"""'], {'ranges': '[[-0.2, 0.7], [-0.2, 0.7]]', 'hlines': '[0, 0.25]', 'vlines': '[0, 0.25]'}), "('hist_te_vs_not_tes.pdf', gl['phyloP_tes'], gl[\n 'phyloP_nottes'], 'TE', 'not-TE', ranges=[[-0.2, 0.7], [-0.2, 0.7]],\n hlines=[0, 0.25], vlines=[0, 0.25])\n", (516, 677), False, 'import shared_conservation\n'), ((700, 848), 'shared_conservation.contour', 'shared_conservation.contour', (['"""cont_te_vs_not_tes.pdf"""', "gl['phyloP_tes']", "gl['phyloP_nottes']", '"""TE"""', '"""not-TE"""'], {'ranges': '[[-0.6, 0.7], [-0.6, 0.7]]'}), "('cont_te_vs_not_tes.pdf', gl['phyloP_tes'], gl[\n 'phyloP_nottes'], 'TE', 'not-TE', ranges=[[-0.6, 0.7], [-0.6, 0.7]])\n", (727, 848), False, 'import shared_conservation\n'), ((1017, 1036), 'numpy.array', 'np.array', (["gl['TPM']"], {}), "(gl['TPM'])\n", (1025, 1036), True, 'import numpy as np\n'), ((1262, 1281), 'numpy.array', 'np.array', (["gl['TPM']"], {}), "(gl['TPM'])\n", (1270, 1281), True, 'import numpy as np\n'), ((1499, 1518), 'numpy.array', 'np.array', (["gl['TPM']"], {}), "(gl['TPM'])\n", (1507, 1518), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#####################################################
# This file is a component of ClusterGB #
# Copyright (c) 2018 <NAME> #
# Released under the MIT License (see distribution) #
#####################################################
"""
For each grain boundary, calculates the bond-order parameters of each GB site.
Assumes a cutoff between the second and third nearest neighbours.
If used, please cite Steinhardt, Nelson, and Ronchetti, PRB 28 (1983).
.. WARNING: If the number of allowed crystal structure in the main code are extended, you'll need to extend the
calculation of the cutoff radius here too.
"""
from __future__ import absolute_import
import argparse
#from . import clustergb as cgb
import clustergb as cgb
import os
import time
import numpy as np
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def run_bond_orientational_order(job, force_recalculate=False, max_order=8):
"""
Calculate the bond orientational order parameters for each GB site at a particular boundary.
Args:
job (clustergb.job.Job): Job on which to run.
force_recalculate (bool): Whether to overwrite existing data (default=False.)
max_order (int): Maximum order of spherical harmonics to use.
"""
start = time.time()
cgb.osio.tee(job.log_loc, "Starting bond-order calculation for " + job.name)
# Make sure there is a container available for bond order parameters
bondo = job.ensure_namespace("bond_order", scope=job.results)
# Check if this GB already has the largest Q and we're not recalculating, just pass
if hasattr(bondo, "q" + str(max_order)) and not force_recalculate:
return
# Run bond order calculation
pos, _, lx, ly, lz = cgb.structure.read_xyzin(os.path.join(job.location, "gb.xyzin"))
xl = job.par.xl.type
gb_ids = np.arange(job.gb.natoms_nonbulklike)
latt = job.par.xl.length
qs = cgb.bond_orientational_order.bond_orientational_order_parameter(pos, gb_ids, xl, latt, lmax=max_order)
# Save results in our job
for l in np.arange(max_order):
setattr(bondo, "q" + str(l + 1), qs[:, l])
job.save()
end = time.time()
cgb.osio.tee(job.log_loc, job.name + " bond order runtime = " + str(end - start) + "s.")
def main(args):
start = time.time()
cgb.osio.run_in_hierarchy(run_bond_orientational_order, vars(args))
end = time.time()
print("Total bond-order runtime = " + str(end - start) + "s.")
def _ret_parser():
parser = argparse.ArgumentParser(description="Calculate the bond-orientational order parameters of each GB site at "
"each GB recursively through the ClusterGB hierarchy.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--force-recalculate", "-recalc", action="store_true",
help="Overwrite any existing bond-orientational order parameter data with new calculations.")
parser.add_argument("--max-order", "-maxo", default=8,
help="Maximum order (`l` in `Y_l^m` spherical harmonics) to calculate.")
parser.add_argument("--cutoff", "-cut", default=None,
help="Cutoff distance to include neighbours in bond counting. Default is 3rd nearest "
"neighbour distance.")
return parser
if __name__ == "__main__":
returned_parser = _ret_parser()
arguments = returned_parser.parse_args()
main(arguments)
| [
"clustergb.bond_orientational_order.bond_orientational_order_parameter",
"argparse.ArgumentParser",
"os.path.join",
"clustergb.osio.tee",
"time.time",
"numpy.arange"
] | [((1418, 1429), 'time.time', 'time.time', ([], {}), '()\n', (1427, 1429), False, 'import time\n'), ((1435, 1511), 'clustergb.osio.tee', 'cgb.osio.tee', (['job.log_loc', "('Starting bond-order calculation for ' + job.name)"], {}), "(job.log_loc, 'Starting bond-order calculation for ' + job.name)\n", (1447, 1511), True, 'import clustergb as cgb\n'), ((1989, 2025), 'numpy.arange', 'np.arange', (['job.gb.natoms_nonbulklike'], {}), '(job.gb.natoms_nonbulklike)\n', (1998, 2025), True, 'import numpy as np\n'), ((2064, 2170), 'clustergb.bond_orientational_order.bond_orientational_order_parameter', 'cgb.bond_orientational_order.bond_orientational_order_parameter', (['pos', 'gb_ids', 'xl', 'latt'], {'lmax': 'max_order'}), '(pos, gb_ids,\n xl, latt, lmax=max_order)\n', (2127, 2170), True, 'import clustergb as cgb\n'), ((2211, 2231), 'numpy.arange', 'np.arange', (['max_order'], {}), '(max_order)\n', (2220, 2231), True, 'import numpy as np\n'), ((2310, 2321), 'time.time', 'time.time', ([], {}), '()\n', (2319, 2321), False, 'import time\n'), ((2445, 2456), 'time.time', 'time.time', ([], {}), '()\n', (2454, 2456), False, 'import time\n'), ((2541, 2552), 'time.time', 'time.time', ([], {}), '()\n', (2550, 2552), False, 'import time\n'), ((2654, 2880), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Calculate the bond-orientational order parameters of each GB site at each GB recursively through the ClusterGB hierarchy."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Calculate the bond-orientational order parameters of each GB site at each GB recursively through the ClusterGB hierarchy.'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (2677, 2880), False, 'import argparse\n'), ((1911, 1949), 'os.path.join', 'os.path.join', (['job.location', '"""gb.xyzin"""'], {}), "(job.location, 'gb.xyzin')\n", (1923, 1949), False, 'import os\n')] |
"""
demosaic bayer filter of type 'grbg' using nearest neighbor interpolation
input:
assumes uint8 or uint16 raw bayer filtered grbg input
assumes 2-D I x J pixels, where a single image is I x J pixels,
where I and J are both even numbers
output:
same uint8 or uint16 shape as inpout
Note: If you're on Windows, be sure your PATH environment variable includes your Python DLLs directory.
E.g. Python installed to C:/Miniconda3, you should have C:/Miniconda3/DLLs on your Windows PATH.
"""
import logging
import numpy as np
from scipy.ndimage.interpolation import zoom
#
try:
from .api import Convert
except Exception:
Convert = None # type: ignore
#
from .rgb2gray import rgb2gray
def demosaic(img: np.ndarray, method: str = "", alg: int = 1, color: bool = True):
ndim = img.ndim
if ndim == 2:
pass # normal case
elif ndim == 3 and img.shape[-1] != 3: # normal case, iterate
logging.info(f"iterate over {img.shape[0]} frames")
if color:
dem = np.empty(img.shape + (3,), dtype=img.dtype)
else:
dem = np.empty(img.shape, dtype=img.dtype)
for i, f in enumerate(img):
dem[i, ...] = demosaic(f, method, alg, color)
return dem
else:
raise ValueError(f"unsure what you want with shape {img.shape}")
if str(method).lower() == "sumix" and Convert is not None:
return Convert().BayerToRgb(img, alg)
else:
return grbg2rgb(img, alg, color)
def grbg2rgb(img: np.ndarray, alg: int = 1, color: bool = True) -> np.ndarray:
""" GRBG means the upper left corner of the image has four pixels arranged like
green red
blue green
"""
if img.ndim != 2:
raise NotImplementedError(f"for now, only 2-D Numpy ndarray is accepted {img.shape}")
if img.shape[0] % 2 or img.shape[1] % 2:
raise TypeError(f"requires even-numbered number of pixels on both axes {img.shape}")
if img.dtype not in (np.uint8, np.uint16):
raise TypeError(f"demosaic is currently for uint8 and uint16 input ONLY {img.shape}")
# upcast g1,g2 to avoid overflow from 8-bit or 16-bit input
g1 = img[0::2, 0::2].astype(np.uint32)
g2 = img[1::2, 1::2].astype(np.uint32)
r = img[0::2, 1::2]
b = img[1::2, 0::2]
g = np.round(((g1 + g2) / 2)).astype(img.dtype)
rgb = np.dstack((r, g, b)) # this is the way matplotlib likes it for imshow (RGB in axis=2)
if 1 <= alg <= 4:
order = alg - 1
else:
logging.warning(f"unknown method {alg} falling back to nearest neighbor alg=1")
order = 0
demos = zoom(rgb, (2, 2, 1), order=order) # 0:nearest neighbor
if not color:
demos = rgb2gray(demos)
return demos
| [
"numpy.dstack",
"logging.warning",
"numpy.empty",
"scipy.ndimage.interpolation.zoom",
"logging.info",
"numpy.round"
] | [((2359, 2379), 'numpy.dstack', 'np.dstack', (['(r, g, b)'], {}), '((r, g, b))\n', (2368, 2379), True, 'import numpy as np\n'), ((2623, 2656), 'scipy.ndimage.interpolation.zoom', 'zoom', (['rgb', '(2, 2, 1)'], {'order': 'order'}), '(rgb, (2, 2, 1), order=order)\n', (2627, 2656), False, 'from scipy.ndimage.interpolation import zoom\n'), ((2511, 2596), 'logging.warning', 'logging.warning', (['f"""unknown method {alg} falling back to nearest neighbor alg=1"""'], {}), "(f'unknown method {alg} falling back to nearest neighbor alg=1'\n )\n", (2526, 2596), False, 'import logging\n'), ((935, 986), 'logging.info', 'logging.info', (['f"""iterate over {img.shape[0]} frames"""'], {}), "(f'iterate over {img.shape[0]} frames')\n", (947, 986), False, 'import logging\n'), ((2304, 2327), 'numpy.round', 'np.round', (['((g1 + g2) / 2)'], {}), '((g1 + g2) / 2)\n', (2312, 2327), True, 'import numpy as np\n'), ((1023, 1066), 'numpy.empty', 'np.empty', (['(img.shape + (3,))'], {'dtype': 'img.dtype'}), '(img.shape + (3,), dtype=img.dtype)\n', (1031, 1066), True, 'import numpy as np\n'), ((1099, 1135), 'numpy.empty', 'np.empty', (['img.shape'], {'dtype': 'img.dtype'}), '(img.shape, dtype=img.dtype)\n', (1107, 1135), True, 'import numpy as np\n')] |
# FIXME[hack]: this is just using a specific keras network as proof of
# concept. It has to be modularized and integrated into the framework
import os
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(f"numpy: {np.__version__}")
print(f"tensorflow: {tf.__version__}")
from dltb.base.observer import Observable
import cleverhans
from cleverhans.attacks import FastGradientMethod
from cleverhans.dataset import MNIST
from cleverhans.loss import CrossEntropy
from cleverhans.train import train
from cleverhans.utils import AccuracyReport
from cleverhans.utils_keras import cnn_model
from cleverhans.utils_keras import KerasModelWrapper
from cleverhans.utils_tf import model_eval
print(f"cleverhans: {cleverhans.__version__}")
print(f"keras: {keras.__version__} (backend: {keras.backend.backend()}, dim_ordering: {keras.backend.image_data_format()})")
assert keras.backend.image_data_format() == 'channels_last', "this tutorial requires keras to be configured to channels_last format"
from network.keras import Network as KerasNetwork
from network import Classifier
# FIXME[hack]: is the following really needed?
class KerasClassifier(KerasNetwork, Classifier): pass
# FIXME[hack]
from models.example_keras_advex_mnist import KerasMnistClassifier
# FIXME[design]: QtGUI objects should not be Observable! move this to the dltb.
class AdversarialExampleController(Observable,
method='adversarialControllerChanged',
changes={'busy_changed', 'data_changed',
'parameter_changed'}):
"""
Attributes
----------
_model: leverhans.model.Model
The cleverhans model used to
"""
_nb_epochs = 6
_batch_size = 128
_learning_rate = 0.001
_train_dir = 'train_dir'
_filename = 'mnist.ckpt'
_testing = False
_label_smoothing = 0.1
# FIXME[todo]: this needs to be initialized ...
_runner: Runner = None
def __init__(self):
super().__init__()
self._model = None
self._loss = None
self._input_placeholder = None
self._label_placeholder = None
self._preds = None
self._graph = None
self._sess = None
self._busy = False
self.load_mnist() # FIXME[hack]
# FIXME[old]: check what is still needed from the following code
# Object used to keep track of (and return) key accuracies
self._report = AccuracyReport()
# Set numpy random seed to improve reproducibility
self._rng = np.random.RandomState([2017, 8, 30])
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
self._train_params = {
'nb_epochs': self._nb_epochs,
'batch_size': self._batch_size,
'learning_rate': self._learning_rate,
'train_dir': self._train_dir,
'filename': self._filename
}
self._eval_params = {
'batch_size': self._batch_size
}
if not os.path.exists(self._train_dir):
os.mkdir(self._train_dir)
self._ckpt = tf.train.get_checkpoint_state(self._train_dir)
print(f"train_dir={self._train_dir}, chheckpoint={self._ckpt}")
self._ckpt_path = False if self._ckpt is None else self._ckpt.model_checkpoint_path
def init_from_keras_classifier(self, keras_classifier: KerasClassifier):
self._graph = keras_classifier.graph
self._sess = keras_classifier.session
self._input_placeholder = keras_classifier.input
self._label_placeholder = keras_classifier.label
self._preds = keras_classifier.predictions
self._model = KerasModelWrapper(keras_classifier.model)
self._loss = CrossEntropy(self._model, smoothing=self._label_smoothing)
with self._graph.as_default():
fgsm = FastGradientMethod(self._model, sess=self._sess)
fgsm_params = {
'eps': 0.3,
'clip_min': 0.,
'clip_max': 1.
}
adv_x = fgsm.generate(self._input_placeholder, **fgsm_params)
# Consider the attack to be constant
self._adv_x = tf.stop_gradient(adv_x)
# model predictions for adversarial examples
self._preds_adv = keras_classifier.model(adv_x)
self._keras_classifier = keras_classifier # FIXME[hack]: we need to keep a reference to the KerasClassifier to prevent the session from being closed
def create_model(self):
keras_classifier = KerasMnistClassifier() # FIXME[hack]
self.init_from_keras_classifier(keras_classifier)
def dump_model(self):
with self._graph.as_default():
layer_names = self._model.get_layer_names()
print(f"Model has {len(layer_names)} layers: {layer_names}")
for n in layer_names:
print(f" {n}: {self._model.get_layer(self._input_placeholder, n)}")
model_layers = self._model.fprop(self._input_placeholder)
print(f"Model has {len(model_layers)} layers:")
for n, l in model_layers.items():
print(f" {n}: {l}")
def train_model(self):
"""Train the model using the current training data.
"""
logging.info("Training Cleverhans model from scratch.")
# FIXME[todo]: self._runner is not initialized yet!
#self._runner.runTask(self._train_model)
self._train_model() # FIXME[hack]
def _train_model(self):
self._busy = True
self.change('busy_changed')
def evaluate():
self.evaluate_model(self._x_test, self._y_test)
# now use the cleverhans train method (this will optimize the
# loss function, and hence the model):
# FIXME[problem]: there seems to be no way to get some progress
# report from this train method. The only callback we can
# register is 'evaluate', which can be used for arbitrary
# operations, but which is only called after every epoch
with self._graph.as_default():
train(self._sess, self._loss, self._x_train, self._y_train,
evaluate=evaluate, args=self._train_params,
rng=self._rng)
self._busy = False
self.change('busy_changed')
def evaluate_model(self, data, label):
"""Evaluate the accuracy of the MNIST model.
"""
# use cleverhans' model_eval function:
with self._graph.as_default():
accuracy = model_eval(self._sess, self._input_placeholder,
self._label_placeholder, self._preds,
data, label, args=self._eval_params)
print(f"MNIST model accurace: {accuracy:0.4f}")
def load_model(self):
if self._ckpt_path:
with self._graph.as_default():
saver = tf.train.Saver()
print(self._ckpt_path)
saver.restore(self._sess, self._ckpt_path)
print(f"Model loaded from: {format(self._ckpt_path)}")
self.evaluate_model(self._x_test, self._y_test)
else:
print("Model was not loaded.")
def save_model(self):
print("Model was not saved.")
def reset_model(self):
print("Model was not reset.")
def load_mnist(self):
"""Load the training data (MNIST).
"""
# Get MNIST data
train_start, train_end = 0, 60000
test_start, test_end = 0, 10000
mnist = MNIST(train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end)
self._x_train, self._y_train = mnist.get_set('train')
self._x_test, self._y_test = mnist.get_set('test')
# Use Image Parameters
self._img_rows, self._img_cols, self._nchannels = \
self._x_train.shape[1:4]
self._nb_classes = self._y_train.shape[1]
print(f"len(train): {len(self._x_train)} / {len(self._y_train)}")
print(f"len(test): {len(self._x_test)} / {len(self._y_test)}")
print(f"img_rows x img_cols x nchannels: {self._img_rows} x {self._img_cols} x {self._nchannels}")
print(f"nb_classes: {self._nb_classes}")
def get_example(self, index: int=None):
if index is None:
index = np.random.randint(len(self._x_test))
#batch = np.arange(self._batch_size)
batch = np.asarray([index])
self._x_sample = self._x_train[batch]
self._y_sample = self._y_train[batch]
with self._graph.as_default():
feed_dict = {self._input_placeholder: self._x_sample}
preds_sample = \
self._preds.eval(feed_dict=feed_dict, session=self._sess)
return self._x_sample[0], self._y_sample[0], preds_sample[0]
def get_adversarial_example(self, index: int=None):
with self._graph.as_default():
feed_dict = {self._input_placeholder: self._x_sample}
x_adversarial = \
self._adv_x.eval(feed_dict=feed_dict, session=self._sess)
feed_dict = {self._input_placeholder: x_adversarial}
preds_adversarial = \
self._preds_adv.eval(feed_dict=feed_dict, session=self._sess)
return x_adversarial[0], preds_adversarial[0]
@property
def busy(self):
return self._busy
| [
"logging.getLogger",
"cleverhans.attacks.FastGradientMethod",
"cleverhans.utils.AccuracyReport",
"tensorflow.set_random_seed",
"logging.info",
"numpy.random.RandomState",
"os.path.exists",
"cleverhans.utils_tf.model_eval",
"numpy.asarray",
"os.mkdir",
"tensorflow.keras.backend.backend",
"cleve... | [((177, 204), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (194, 204), False, 'import logging\n'), ((974, 1007), 'tensorflow.keras.backend.image_data_format', 'keras.backend.image_data_format', ([], {}), '()\n', (1005, 1007), False, 'from tensorflow import keras\n'), ((2699, 2715), 'cleverhans.utils.AccuracyReport', 'AccuracyReport', ([], {}), '()\n', (2713, 2715), False, 'from cleverhans.utils import AccuracyReport\n'), ((2796, 2832), 'numpy.random.RandomState', 'np.random.RandomState', (['[2017, 8, 30]'], {}), '([2017, 8, 30])\n', (2817, 2832), True, 'import numpy as np\n'), ((2898, 2922), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1234)'], {}), '(1234)\n', (2916, 2922), True, 'import tensorflow as tf\n'), ((3374, 3420), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['self._train_dir'], {}), '(self._train_dir)\n', (3403, 3420), True, 'import tensorflow as tf\n'), ((3950, 3991), 'cleverhans.utils_keras.KerasModelWrapper', 'KerasModelWrapper', (['keras_classifier.model'], {}), '(keras_classifier.model)\n', (3967, 3991), False, 'from cleverhans.utils_keras import KerasModelWrapper\n'), ((4013, 4071), 'cleverhans.loss.CrossEntropy', 'CrossEntropy', (['self._model'], {'smoothing': 'self._label_smoothing'}), '(self._model, smoothing=self._label_smoothing)\n', (4025, 4071), False, 'from cleverhans.loss import CrossEntropy\n'), ((4824, 4846), 'models.example_keras_advex_mnist.KerasMnistClassifier', 'KerasMnistClassifier', ([], {}), '()\n', (4844, 4846), False, 'from models.example_keras_advex_mnist import KerasMnistClassifier\n'), ((5557, 5612), 'logging.info', 'logging.info', (['"""Training Cleverhans model from scratch."""'], {}), "('Training Cleverhans model from scratch.')\n", (5569, 5612), False, 'import logging\n'), ((7831, 7928), 'cleverhans.dataset.MNIST', 'MNIST', ([], {'train_start': 'train_start', 'train_end': 'train_end', 'test_start': 'test_start', 'test_end': 'test_end'}), '(train_start=train_start, train_end=train_end, test_start=test_start,\n test_end=test_end)\n', (7836, 7928), False, 'from cleverhans.dataset import MNIST\n'), ((8793, 8812), 'numpy.asarray', 'np.asarray', (['[index]'], {}), '([index])\n', (8803, 8812), True, 'import numpy as np\n'), ((887, 910), 'tensorflow.keras.backend.backend', 'keras.backend.backend', ([], {}), '()\n', (908, 910), False, 'from tensorflow import keras\n'), ((928, 961), 'tensorflow.keras.backend.image_data_format', 'keras.backend.image_data_format', ([], {}), '()\n', (959, 961), False, 'from tensorflow import keras\n'), ((3281, 3312), 'os.path.exists', 'os.path.exists', (['self._train_dir'], {}), '(self._train_dir)\n', (3295, 3312), False, 'import os\n'), ((3326, 3351), 'os.mkdir', 'os.mkdir', (['self._train_dir'], {}), '(self._train_dir)\n', (3334, 3351), False, 'import os\n'), ((4131, 4179), 'cleverhans.attacks.FastGradientMethod', 'FastGradientMethod', (['self._model'], {'sess': 'self._sess'}), '(self._model, sess=self._sess)\n', (4149, 4179), False, 'from cleverhans.attacks import FastGradientMethod\n'), ((4467, 4490), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['adv_x'], {}), '(adv_x)\n', (4483, 4490), True, 'import tensorflow as tf\n'), ((6385, 6508), 'cleverhans.train.train', 'train', (['self._sess', 'self._loss', 'self._x_train', 'self._y_train'], {'evaluate': 'evaluate', 'args': 'self._train_params', 'rng': 'self._rng'}), '(self._sess, self._loss, self._x_train, self._y_train, evaluate=\n evaluate, args=self._train_params, rng=self._rng)\n', (6390, 6508), False, 'from cleverhans.train import train\n'), ((6822, 6948), 'cleverhans.utils_tf.model_eval', 'model_eval', (['self._sess', 'self._input_placeholder', 'self._label_placeholder', 'self._preds', 'data', 'label'], {'args': 'self._eval_params'}), '(self._sess, self._input_placeholder, self._label_placeholder,\n self._preds, data, label, args=self._eval_params)\n', (6832, 6948), False, 'from cleverhans.utils_tf import model_eval\n'), ((7192, 7208), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7206, 7208), True, 'import tensorflow as tf\n')] |
# Results plotter.
#
# https://github.com/stefanvanberkum/CD-ABSC
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
from plotnine import aes, element_text, geom_point, ggplot, stat_smooth, theme
global embedding_dim, rest_path, target_path, ft_path, save_path
def main():
"""
Plots the obtained results for all domains specified in domains. Plots are saved to the specified save path.
:return:
"""
global embedding_dim
embedding_dim = 768
results_path = "C:/Users/<NAME>/Google Drive/Studie/Seminar BA & QM/Results/" # Path for results.
file_path = results_path + "Result_Files/" # Path to result files.
global rest_path
global target_path
global ft_path
global save_path
rest_path = file_path + "Restaurant/" # Path to restaurant results.
target_path = file_path + "Target/" # Path to target results.
ft_path = file_path + "Fine_Tuning/" # Path to fine-tuning results.
save_path = results_path + "Graphs/" # Path for saving the graphs.
# Name, year, splits, split size.
laptop_domain = ["laptop", 2014, 9, 250]
book_domain = ["book", 2019, 9, 300]
hotel_domain = ["hotel", 2015, 10, 20]
apex_domain = ["Apex", 2004, 10, 25]
camera_domain = ["Camera", 2004, 10, 31]
creative_domain = ["Creative", 2004, 10, 54]
nokia_domain = ["Nokia", 2004, 10, 22]
domains = [laptop_domain, book_domain, hotel_domain, apex_domain, camera_domain, creative_domain, nokia_domain]
for domain in domains:
result = get_results(domain=domain[0], year=domain[1], splits=domain[2], split_size=domain[3])
plot = ggplot(result) + aes(x='Aspects', y='Accuracy', color='Task', shape='Task') + geom_point() + stat_smooth(
method='lm') + theme(legend_text=element_text(size=10))
plot.save(save_path + domain[0] + "_results", dpi=600)
# Calculate and save trendline summary.
with open(save_path + domain[0] + "_trend.txt", 'w') as trend:
trend.write("Target: \n")
target_data = result.loc[result['Task'] == 'target-target']
target_fit = sm.ols('Accuracy ~ Aspects', target_data).fit()
target_coef = target_fit.params
target_intercept = target_coef['Intercept']
target_coef = target_coef['Aspects']
trend.write("Intercept: " + str(target_intercept) + ", Coefficient: " + str(target_coef) + "\n")
target_start = target_intercept + target_coef * domain[3]
target_end = target_intercept + target_coef * domain[2] * domain[3]
trend.write("Start: " + str(target_start) + ", End: " + str(target_end) + "\n\nFine-tuning: \n")
ft_data = result.loc[result['Task'] == 'fine-tuning']
ft_fit = sm.ols('Accuracy ~ Aspects', ft_data).fit()
ft_coef = ft_fit.params
ft_intercept = ft_coef['Intercept']
ft_coef = ft_coef['Aspects']
trend.write("Intercept: " + str(ft_intercept) + ", Coefficient: " + str(ft_coef) + "\n")
ft_start = ft_intercept + ft_coef * domain[3]
ft_end = ft_intercept + ft_coef * domain[2] * domain[3]
trend.write("Start: " + str(ft_start) + ", End: " + str(ft_end) + "\n\n")
cross = np.round(np.divide(ft_intercept - target_intercept, target_coef - ft_coef))
trend.write("Cross: " + str(cross))
def get_results(domain, year, splits, split_size):
"""
Get the results from the program generated text files.
:param domain: the domain
:param year: the year of the domain dataset
:param splits: the number of cumulative training data splits
:param split_size: the incremental size of each training data split
:return:
"""
# Extract restaurant results.
with open(rest_path + str(embedding_dim) + "results_restaurant_" + domain + "_test_" + str(year) + ".txt",
'r') as results:
lines = results.readlines()
acc_line = lines[4].split(" ")
acc = acc_line[3][:len(acc_line[3]) - 1] # Remove trailing comma too.
aspects = []
accuracy = []
task = []
for i in range(1, splits + 1):
aspects.append(i * split_size)
accuracy.append(float(acc))
task.append('restaurant-target')
# Extract target results.
with open(target_path + str(embedding_dim) + "results_" + domain + "_" + domain + "_" + str(year) + ".txt",
'r') as results:
lines = results.readlines()
for i in range(5, len(lines), 15):
acc_line = lines[i].split(" ")
acc = acc_line[6][:len(acc_line[3]) - 1] # Remove trailing comma too.
accuracy.append(float(acc))
task.append('target-target')
for i in range(1, splits + 1):
aspects.append(i * split_size)
# Extract fine-tuning results.
with open(ft_path + str(embedding_dim) + "results_restaurant_" + domain + "_" + domain + "_" + str(year) + ".txt",
'r') as results:
lines = results.readlines()
for i in range(5, len(lines), 15):
acc_line = lines[i].split(" ")
acc = acc_line[6][:len(acc_line[3]) - 1] # Remove trailing comma too.
accuracy.append(float(acc))
task.append('fine-tuning')
for i in range(1, splits + 1):
aspects.append(i * split_size)
# Create and return dataframe.
result = {'Aspects': aspects, 'Accuracy': accuracy, 'Task': task}
df_result = pd.DataFrame(result)
return df_result
if __name__ == '__main__':
main()
| [
"plotnine.stat_smooth",
"plotnine.ggplot",
"plotnine.aes",
"plotnine.element_text",
"statsmodels.formula.api.ols",
"plotnine.geom_point",
"pandas.DataFrame",
"numpy.divide"
] | [((5549, 5569), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (5561, 5569), True, 'import pandas as pd\n'), ((1741, 1765), 'plotnine.stat_smooth', 'stat_smooth', ([], {'method': '"""lm"""'}), "(method='lm')\n", (1752, 1765), False, 'from plotnine import aes, element_text, geom_point, ggplot, stat_smooth, theme\n'), ((3303, 3368), 'numpy.divide', 'np.divide', (['(ft_intercept - target_intercept)', '(target_coef - ft_coef)'], {}), '(ft_intercept - target_intercept, target_coef - ft_coef)\n', (3312, 3368), True, 'import numpy as np\n'), ((1726, 1738), 'plotnine.geom_point', 'geom_point', ([], {}), '()\n', (1736, 1738), False, 'from plotnine import aes, element_text, geom_point, ggplot, stat_smooth, theme\n'), ((1799, 1820), 'plotnine.element_text', 'element_text', ([], {'size': '(10)'}), '(size=10)\n', (1811, 1820), False, 'from plotnine import aes, element_text, geom_point, ggplot, stat_smooth, theme\n'), ((2140, 2181), 'statsmodels.formula.api.ols', 'sm.ols', (['"""Accuracy ~ Aspects"""', 'target_data'], {}), "('Accuracy ~ Aspects', target_data)\n", (2146, 2181), True, 'import statsmodels.formula.api as sm\n'), ((2792, 2829), 'statsmodels.formula.api.ols', 'sm.ols', (['"""Accuracy ~ Aspects"""', 'ft_data'], {}), "('Accuracy ~ Aspects', ft_data)\n", (2798, 2829), True, 'import statsmodels.formula.api as sm\n'), ((1648, 1662), 'plotnine.ggplot', 'ggplot', (['result'], {}), '(result)\n', (1654, 1662), False, 'from plotnine import aes, element_text, geom_point, ggplot, stat_smooth, theme\n'), ((1665, 1723), 'plotnine.aes', 'aes', ([], {'x': '"""Aspects"""', 'y': '"""Accuracy"""', 'color': '"""Task"""', 'shape': '"""Task"""'}), "(x='Aspects', y='Accuracy', color='Task', shape='Task')\n", (1668, 1723), False, 'from plotnine import aes, element_text, geom_point, ggplot, stat_smooth, theme\n')] |
# To run this script:
# > python getwaveform_silwal2016.py
import obspy
import copy
import getwaveform_iris
from obspy.clients.fdsn import Client
from obspy.core.event import Event, Origin, Magnitude
import numpy as np
import util_helpers as uh
# Catalog file
filename = '/home/carltape/REPOSITORIES/manuscripts/vipul/papers/2014mt/data/SCAK_mech.txt'
# Select events
iex = [80] # default = 8 (for example event = 2009-04-07T20:12:55.351000Z)
# Line index for 21 events
#iex = [1, 3, 6, 7, 12, 29, 30, 48, 51, 66, 69, 71, 73, 77, 80, 81, 83, 88, 93, 96, 101];
header_lines = 21 # number of header lines to skip
line_indx = np.array(iex) + header_lines
#--------------------------------------------------------------------------------
# DEFAULT SETTINGS (see getwaveform_iris.py)
# Pre-processing (manily for CAP)
rotate = True
output_cap_weight_file = True
remove_response = True
detrend = True
demean = True
output_event_info = True
# pre-filter for deconvolution
# https://ds.iris.edu/files/sac-manual/commands/transfer.html
# fmaxc should be based on sampling rate (desired channels)
# fminc should be based on the request length of the time series
fminc = 1/200
fmaxc = 10
pre_filt=(0.5*fminc, fminc, fmaxc, 2.0*fmaxc)
#pre_filt=(0.005, 0.006, 10.0, 15.0) # BH default
# for CAP all waveforms need to have the same sample rate
resample_freq = 50.0 # =0 for no resampling
scale_factor = 10**2 # 10**2 to convert m/s to cm/s
# event parameters
sec_before_after_event = 10 # time window to search for a target event in a catalog
# Input parameter for extracting waveforms (common for all 21 events)
min_dist = 0
max_dist = 500
tbefore_sec = 100
tafter_sec = 300
network = 'AK,AT,AV,CN,II,IU,XM,XV,XZ,YV' # note: cannot use '*' because of IM
station = '*'
channel = 'BH*'
use_catalog = 0 # To get (lat,lon, etime, dep, mag) from some catalog = 1 OR use defined = 0 (see iex=9)
#--------------------------------------------------------------------------------
# Read catalog file
for indx in line_indx:
f = open(filename,'r')
line = f.readlines()[indx]
line_elements = line.split()
# Get event info (otime, lat, lon, dep, mag) from the catalog
eid = line_elements[-1] # Fix to take microseconds
otime = uh.eid2otime(eid)
elat = line_elements[7]
elon = line_elements[6]
edep = float(line_elements[8]) * 1000.0 # in meters
emag = line_elements[16]
#for line in lines:
print(otime, elat, elon, edep, emag)
# Create event object
client = Client("IRIS")
ev = Event()
org = Origin()
org.latitude = elat
org.longitude = elon
org.depth = edep
org.time = otime
mag = Magnitude()
mag.mag = emag
mag.magnitude_type = "Mw"
ev.origins.append(org)
ev.magnitudes.append(mag)
# Extract waveforms
getwaveform_iris.run_get_waveform(c = client, event = ev,
min_dist = min_dist, max_dist = max_dist,
before = tbefore_sec, after = tafter_sec,
network = network, station = station, channel = channel,
resample_freq = resample_freq, ifrotate = rotate,
ifCapInp = output_cap_weight_file,
ifRemoveResponse = remove_response,
ifDetrend = detrend, ifDemean = demean,
ifEvInfo = output_event_info,
scale_factor = scale_factor, pre_filt = pre_filt)
| [
"obspy.core.event.Origin",
"numpy.array",
"util_helpers.eid2otime",
"obspy.core.event.Event",
"getwaveform_iris.run_get_waveform",
"obspy.clients.fdsn.Client",
"obspy.core.event.Magnitude"
] | [((646, 659), 'numpy.array', 'np.array', (['iex'], {}), '(iex)\n', (654, 659), True, 'import numpy as np\n'), ((2313, 2330), 'util_helpers.eid2otime', 'uh.eid2otime', (['eid'], {}), '(eid)\n', (2325, 2330), True, 'import util_helpers as uh\n'), ((2582, 2596), 'obspy.clients.fdsn.Client', 'Client', (['"""IRIS"""'], {}), "('IRIS')\n", (2588, 2596), False, 'from obspy.clients.fdsn import Client\n'), ((2606, 2613), 'obspy.core.event.Event', 'Event', ([], {}), '()\n', (2611, 2613), False, 'from obspy.core.event import Event, Origin, Magnitude\n'), ((2624, 2632), 'obspy.core.event.Origin', 'Origin', ([], {}), '()\n', (2630, 2632), False, 'from obspy.core.event import Event, Origin, Magnitude\n'), ((2734, 2745), 'obspy.core.event.Magnitude', 'Magnitude', ([], {}), '()\n', (2743, 2745), False, 'from obspy.core.event import Event, Origin, Magnitude\n'), ((2882, 3308), 'getwaveform_iris.run_get_waveform', 'getwaveform_iris.run_get_waveform', ([], {'c': 'client', 'event': 'ev', 'min_dist': 'min_dist', 'max_dist': 'max_dist', 'before': 'tbefore_sec', 'after': 'tafter_sec', 'network': 'network', 'station': 'station', 'channel': 'channel', 'resample_freq': 'resample_freq', 'ifrotate': 'rotate', 'ifCapInp': 'output_cap_weight_file', 'ifRemoveResponse': 'remove_response', 'ifDetrend': 'detrend', 'ifDemean': 'demean', 'ifEvInfo': 'output_event_info', 'scale_factor': 'scale_factor', 'pre_filt': 'pre_filt'}), '(c=client, event=ev, min_dist=min_dist,\n max_dist=max_dist, before=tbefore_sec, after=tafter_sec, network=\n network, station=station, channel=channel, resample_freq=resample_freq,\n ifrotate=rotate, ifCapInp=output_cap_weight_file, ifRemoveResponse=\n remove_response, ifDetrend=detrend, ifDemean=demean, ifEvInfo=\n output_event_info, scale_factor=scale_factor, pre_filt=pre_filt)\n', (2915, 3308), False, 'import getwaveform_iris\n')] |
from functools import partial
import numpy as np
from tqdm import tqdm
from .experiment_linear_balance_classes import ExperimentLinearBalanced
from .experiment_linear_exploit_partial import ExperimentLinearExploitPartial
from ..models.linear import Linear_S, Linear_M, Linear_L
from .training_args import LMMixupOutsideDataArgs
from ..data_loaders.cvs_loader import CVSLoader
from ..data_loaders.hdf5_loader import HDF5Loader
from ..utils.label_convertors import convert2vec
from ..utils.label_convertors import partial2onehot, fill_unlabeled
from ..utils.training_utils import init_model, callback_list, training_log
class ExpLinBalLargeOutsideExploitPartial(
ExperimentLinearBalanced, ExperimentLinearExploitPartial
):
def load_data(self):
data_loader = CVSLoader(self.data_path, rand_seed=self.rand_seed)
outside_data_loader = HDF5Loader(self.outside_data_path, "r")
x_train, y_train, x_test, y_test = data_loader.load_data(
["ECFP", "onehot_label"], ratio=0.7, shuffle=True
)
convert2vec_float = partial(convert2vec, dtype=float)
x_train, y_train, x_test, y_test = list(
map(convert2vec_float, [x_train, y_train, x_test, y_test])
)
# if self.mixup is not None:
# x_train, y_train = self._mixup(x_train, y_train)
data_partial = data_loader.load_unlabeled(["ECFP", "Label"])
x_partial = convert2vec(data_partial[:, 0], dtype=float)
y_partial = data_partial[:, 1]
for i, label in enumerate(y_partial):
y_partial[i] = partial2onehot(label)
return (
x_train,
y_train,
x_test,
y_test,
x_partial,
y_partial,
outside_data_loader,
)
def _predict_and_balance(self, model, outside, x, y, shuffle=True):
r""" Make predictions with the given model and mix it with the existing
training set.
model (tf.keras.Model): the pretrained model to make predictions
x_unlabeled (np.array): inputs for the model
x (np.arrapy): training set data
y (np.array): training set label
shuffle (bool): if shuffle after mixing the training and predicted data
=======================================================================
return:
x_mix: mixed training data
y_mix: mixed lables (soft)
"""
distribution = self.find_distribution(y)
orig_dis = distribution.copy()
new_dis = [0] * len(distribution)
x_mix = x
y_mix = y
n_batches = 0
pb = tqdm(total=outside.steps)
print("total steps: {}".format(outside.steps))
for batch in outside.batch_loader():
y_pred = model.predict(batch)
indices, _, increase = self._find_good(y_pred, distribution)
x_mix = np.concatenate([x_mix, batch[0][indices]], axis=0)
y_mix = np.concatenate([y_mix, y_pred[indices]], axis=0)
new_dis = [n + i for n, i in zip(new_dis, increase)]
n_batches += 1
pb.update(1)
if n_batches == outside.steps:
break
if shuffle:
randomed_idx = np.random.permutation(x_mix.shape[0])
np.take(x_mix, randomed_idx, axis=0, out=x_mix)
np.take(y_mix, randomed_idx, axis=0, out=y_mix)
return x_mix, y_mix, orig_dis, new_dis
def train_teacher(
self,
model,
x_train,
y_train,
x_test,
y_test,
x_unlabeled,
x_partial,
y_partial,
log_f,
log_path,
n_repeat,
activation="sigmoid",
loss="binary_crossentropy",
out_len=12,
):
r""" Train linear model with Noisy Student and the inputs are balanced
by the first teacher model
model: the model to be trained
x_train: training data
y_train: labels of the training data
x_test: testing data
y_test: testing data labels
x_unlabeled: unlabeled training data
x_partial: partially labeled data
y_partial: the partial labels
log_f: logging file handler
log_path: path to the logging directory
n_repeat: times to re-train the model with balanced data
=======================================================================
return: the trained model, training histories
"""
model = init_model(
model,
drop_rate=self.drop_rate,
loss=loss,
out_len=out_len,
activation=activation,
)
cb_list = callback_list(log_path, self.es_patience, model)
histories = list()
log_f.write("training {}:\n".format(str(model)))
train_his = model.fit(
x=x_train,
y=y_train,
batch_size=self.batch_size,
epochs=self.epochs,
callbacks=cb_list,
validation_data=[x_test, y_test],
)
histories.append(train_his)
y_pred = model.predict(x_test)
training_log(train_his, y_pred, y_test, log_f)
# repeat training the model
for i in range(n_repeat):
log_f.write(
"repeat training {}, {}/{}:\n".format(str(model), i + 1, n_repeat)
)
# label partially labled data
y_pred_partial = model.predict(x_partial)
y_pred_partial = fill_unlabeled(y_pred_partial, y_partial, normalize=True)
# label outside data
x_mix, y_mix, orig_dis, new_dis = self._predict_and_balance(
model=model, outside=x_unlabeled, x=x_train, y=y_train, shuffle=True
)
# combine partially labeled and unlabeled
x_mix = np.concatenate([x_partial, x_mix], axis=0)
y_mix = np.concatenate([y_pred_partial, y_mix], axis=0)
# shuffle
randomed_idx = np.random.permutation(x_mix.shape[0])
np.take(x_mix, randomed_idx, axis=0, out=x_mix)
np.take(y_mix, randomed_idx, axis=0, out=y_mix)
# mixup
if self.mixup is not None:
x_mix, y_mix = self._mixup(x_mix, y_mix)
# train model with the mixed data
train_his = model.fit(
x=x_mix,
y=y_mix,
batch_size=self.batch_size,
epochs=self.epochs,
callbacks=cb_list,
validation_data=[x_test, y_test],
)
histories.append(train_his)
# log training history
y_pred = model.predict(x_test)
training_log(train_his, y_pred, y_test, log_f)
self._plot_distribution_change(
orig_dis,
new_dis,
name=str(model) + "_repeat" + str(i) + "_distribution.png",
)
return model, histories
def train_student(
self,
student_model,
teacher_model,
x_train,
y_train,
x_test,
y_test,
x_unlabeled,
x_partial,
y_partial,
log_f,
log_path,
n_repeat,
activation="softmax",
loss="categorical_crossentropy",
out_len=32,
):
r""" Train student linear model with Noisy Student
student_model: the model to be trained
teacher_model: trained model used to generate labels
x_train: training data
y_train: labels of the training data
x_test: testing data
y_test: testing data labels
x_unlabeled: unlabeled training data
x_partial: partially labeled data
y_partial: the partial labels
cb_list: callback list
log_f: logging file handler
log_path: path to the logging directory
n_repeat: times to train the model
=======================================================================
return: the trained model, training histories
"""
# label partially labeld data with the teacher model
y_pred_partial = teacher_model.predict(x_partial)
y_pred_partial = fill_unlabeled(y_pred_partial, y_partial, normalize=True)
# balance training data with outside dataset
x_mix, y_mix, _, _ = self._predict_and_balance(
model=teacher_model, outside=x_unlabeled, x=x_train, y=y_train, shuffle=True
)
# combine partially labeled and unlabeled
x_mix = np.concatenate([x_partial, x_mix], axis=0)
y_mix = np.concatenate([y_pred_partial, y_mix], axis=0)
# shuffle
randomed_idx = np.random.permutation(x_mix.shape[0])
np.take(x_mix, randomed_idx, axis=0, out=x_mix)
np.take(y_mix, randomed_idx, axis=0, out=y_mix)
# mixup
if self.mixup is not None:
x_mix, y_mix = self._mixup(x_mix, y_mix)
# init model
model = init_model(
student_model,
drop_rate=self.drop_rate,
loss=loss,
out_len=out_len,
activation=activation,
)
# callbacks
cb_list = callback_list(log_path, self.es_patience, model)
# fit Linear_M model to mixed dataset
histories = list()
log_f.write("training {}:\n".format(str(model)))
train_his = model.fit(
x=x_mix,
y=y_mix,
batch_size=self.batch_size,
epochs=self.epochs,
callbacks=cb_list,
validation_data=[x_test, y_test],
)
histories.append(train_his)
y_pred = model.predict(x_test)
training_log(train_his, y_pred, y_test, log_f)
# repeat training the model
for i in range(n_repeat):
log_f.write(
"repeat training {}, {}/{}:\n".format(str(model), i + 1, n_repeat)
)
# label partially labled data
y_pred_partial = model.predict(x_partial)
y_pred_partial = fill_unlabeled(y_pred_partial, y_partial, normalize=True)
# label unlabled
x_mix, y_mix, orig_dis, new_dis = self._predict_and_balance(
model=model, outside=x_unlabeled, x=x_train, y=y_train, shuffle=True
)
# combine partially labeled and unlabeled
x_mix = np.concatenate([x_partial, x_mix], axis=0)
y_mix = np.concatenate([y_pred_partial, y_mix], axis=0)
# shuffle
randomed_idx = np.random.permutation(x_mix.shape[0])
np.take(x_mix, randomed_idx, axis=0, out=x_mix)
np.take(y_mix, randomed_idx, axis=0, out=y_mix)
# mixup
if self.mixup is not None:
x_mix, y_mix = self._mixup(x_mix, y_mix)
# train model with the mixed data
train_his = model.fit(
x=x_mix,
y=y_mix,
batch_size=self.batch_size,
epochs=self.epochs,
callbacks=cb_list,
validation_data=[x_test, y_test],
)
histories.append(train_his)
# log training history
y_pred = model.predict(x_test)
training_log(train_his, y_pred, y_test, log_f)
self._plot_distribution_change(
orig_dis,
new_dis,
name=str(model) + "_repeat" + str(i) + "_distribution.png",
)
return model, histories
def run_experiment(self):
# load training and testing data
(
x_train,
y_train,
x_test,
y_test,
x_partial,
y_partial,
outside_data_loader,
) = self.load_data()
# specific for ChEMBL24 dataset
outside_data_loader.set_dataset("/ChEMBL/ECFP", shuffle=True, infinite=True)
outside_data_loader.set_batch_size(self.batch_size)
# open log
log_f, log_path = self.open_log_(self.log_path)
# train the teacher model
trained_model, histories = self.train_teacher(
model=Linear_S,
x_train=x_train,
y_train=y_train,
x_test=x_test,
y_test=y_test,
x_unlabeled=outside_data_loader,
x_partial=x_partial,
y_partial=y_partial,
log_f=log_f,
log_path=log_path,
n_repeat=self.n_repeat,
)
# log results
self.log_training(trained_model, histories, log_path)
# train student models
for student in [Linear_M, Linear_L]:
trained_model, histories = self.train_student(
student_model=student,
teacher_model=trained_model,
x_train=x_train,
y_train=y_train,
x_test=x_test,
y_test=y_test,
x_unlabeled=outside_data_loader,
x_partial=x_partial,
y_partial=y_partial,
log_f=log_f,
log_path=log_path,
n_repeat=self.n_repeat,
)
# log results
self.log_training(trained_model, histories, log_path)
log_f.write("best losses:\n {}\n".format(str(self.best_loss)))
log_f.write("best accuracies:\n {}\n".format(str(self.best_acc)))
log_f.close()
if __name__ == "__main__":
parser = LMMixupOutsideDataArgs()
args = parser.parse_args()
experiment = ExpLinBalLargeOutsideExploitPartial(
data_path=args.data_path,
outside_data_path=args.outside_path,
log_path=args.log_path,
es_patience=args.es_patience,
batch_size=args.batch_size,
epochs=args.epochs,
n_repeat=args.repeat,
mixup=args.mixup,
mixup_repeat=args.mixup_repeat,
rand_seed=args.rand_seed,
)
experiment.run_experiment()
| [
"tqdm.tqdm",
"numpy.take",
"functools.partial",
"numpy.concatenate",
"numpy.random.permutation"
] | [((1064, 1097), 'functools.partial', 'partial', (['convert2vec'], {'dtype': 'float'}), '(convert2vec, dtype=float)\n', (1071, 1097), False, 'from functools import partial\n'), ((2626, 2651), 'tqdm.tqdm', 'tqdm', ([], {'total': 'outside.steps'}), '(total=outside.steps)\n', (2630, 2651), False, 'from tqdm import tqdm\n'), ((8542, 8584), 'numpy.concatenate', 'np.concatenate', (['[x_partial, x_mix]'], {'axis': '(0)'}), '([x_partial, x_mix], axis=0)\n', (8556, 8584), True, 'import numpy as np\n'), ((8601, 8648), 'numpy.concatenate', 'np.concatenate', (['[y_pred_partial, y_mix]'], {'axis': '(0)'}), '([y_pred_partial, y_mix], axis=0)\n', (8615, 8648), True, 'import numpy as np\n'), ((8690, 8727), 'numpy.random.permutation', 'np.random.permutation', (['x_mix.shape[0]'], {}), '(x_mix.shape[0])\n', (8711, 8727), True, 'import numpy as np\n'), ((8736, 8783), 'numpy.take', 'np.take', (['x_mix', 'randomed_idx'], {'axis': '(0)', 'out': 'x_mix'}), '(x_mix, randomed_idx, axis=0, out=x_mix)\n', (8743, 8783), True, 'import numpy as np\n'), ((8792, 8839), 'numpy.take', 'np.take', (['y_mix', 'randomed_idx'], {'axis': '(0)', 'out': 'y_mix'}), '(y_mix, randomed_idx, axis=0, out=y_mix)\n', (8799, 8839), True, 'import numpy as np\n'), ((2887, 2937), 'numpy.concatenate', 'np.concatenate', (['[x_mix, batch[0][indices]]'], {'axis': '(0)'}), '([x_mix, batch[0][indices]], axis=0)\n', (2901, 2937), True, 'import numpy as np\n'), ((2958, 3006), 'numpy.concatenate', 'np.concatenate', (['[y_mix, y_pred[indices]]'], {'axis': '(0)'}), '([y_mix, y_pred[indices]], axis=0)\n', (2972, 3006), True, 'import numpy as np\n'), ((3236, 3273), 'numpy.random.permutation', 'np.random.permutation', (['x_mix.shape[0]'], {}), '(x_mix.shape[0])\n', (3257, 3273), True, 'import numpy as np\n'), ((3286, 3333), 'numpy.take', 'np.take', (['x_mix', 'randomed_idx'], {'axis': '(0)', 'out': 'x_mix'}), '(x_mix, randomed_idx, axis=0, out=x_mix)\n', (3293, 3333), True, 'import numpy as np\n'), ((3346, 3393), 'numpy.take', 'np.take', (['y_mix', 'randomed_idx'], {'axis': '(0)', 'out': 'y_mix'}), '(y_mix, randomed_idx, axis=0, out=y_mix)\n', (3353, 3393), True, 'import numpy as np\n'), ((5832, 5874), 'numpy.concatenate', 'np.concatenate', (['[x_partial, x_mix]'], {'axis': '(0)'}), '([x_partial, x_mix], axis=0)\n', (5846, 5874), True, 'import numpy as np\n'), ((5895, 5942), 'numpy.concatenate', 'np.concatenate', (['[y_pred_partial, y_mix]'], {'axis': '(0)'}), '([y_pred_partial, y_mix], axis=0)\n', (5909, 5942), True, 'import numpy as np\n'), ((5992, 6029), 'numpy.random.permutation', 'np.random.permutation', (['x_mix.shape[0]'], {}), '(x_mix.shape[0])\n', (6013, 6029), True, 'import numpy as np\n'), ((6042, 6089), 'numpy.take', 'np.take', (['x_mix', 'randomed_idx'], {'axis': '(0)', 'out': 'x_mix'}), '(x_mix, randomed_idx, axis=0, out=x_mix)\n', (6049, 6089), True, 'import numpy as np\n'), ((6102, 6149), 'numpy.take', 'np.take', (['y_mix', 'randomed_idx'], {'axis': '(0)', 'out': 'y_mix'}), '(y_mix, randomed_idx, axis=0, out=y_mix)\n', (6109, 6149), True, 'import numpy as np\n'), ((10386, 10428), 'numpy.concatenate', 'np.concatenate', (['[x_partial, x_mix]'], {'axis': '(0)'}), '([x_partial, x_mix], axis=0)\n', (10400, 10428), True, 'import numpy as np\n'), ((10449, 10496), 'numpy.concatenate', 'np.concatenate', (['[y_pred_partial, y_mix]'], {'axis': '(0)'}), '([y_pred_partial, y_mix], axis=0)\n', (10463, 10496), True, 'import numpy as np\n'), ((10546, 10583), 'numpy.random.permutation', 'np.random.permutation', (['x_mix.shape[0]'], {}), '(x_mix.shape[0])\n', (10567, 10583), True, 'import numpy as np\n'), ((10596, 10643), 'numpy.take', 'np.take', (['x_mix', 'randomed_idx'], {'axis': '(0)', 'out': 'x_mix'}), '(x_mix, randomed_idx, axis=0, out=x_mix)\n', (10603, 10643), True, 'import numpy as np\n'), ((10656, 10703), 'numpy.take', 'np.take', (['y_mix', 'randomed_idx'], {'axis': '(0)', 'out': 'y_mix'}), '(y_mix, randomed_idx, axis=0, out=y_mix)\n', (10663, 10703), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from math import *
def Draw(u,f,n):
x=u
List_X=[u]
List_Y=[0]
for k in range(n):
y=f(x)
List_X.append(x)
List_Y.append(y)
List_Y.append(y)
List_X.append(y)
x=y
plt.plot(List_X,List_Y,"r")
X_MIN,X_MAX,Y_MIN,Y_MAX=min(List_X),max(List_X),min(List_Y),max(List_Y)
plt.axis([0,X_MAX+0.5,0,Y_MAX+0.5])
plt.plot([X_MIN-1,X_MAX+0.5],[X_MIN-1,X_MAX+0.5],"b")
x=np.linspace(0,X_MAX+0.5,200)
y=[f(k) for k in x]
plt.plot(x, y,"g")
plt.show()
def f (x):
return 1/(2-sqrt(x))
Draw(2.8,f, 50)
plt.clf()
Draw(1.1,f, 50)
plt.clf()
Draw(2.5,f, 50) | [
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
] | [((676, 685), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (683, 685), True, 'import matplotlib.pyplot as plt\n'), ((704, 713), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (711, 713), True, 'import matplotlib.pyplot as plt\n'), ((296, 325), 'matplotlib.pyplot.plot', 'plt.plot', (['List_X', 'List_Y', '"""r"""'], {}), "(List_X, List_Y, 'r')\n", (304, 325), True, 'import matplotlib.pyplot as plt\n'), ((406, 448), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, X_MAX + 0.5, 0, Y_MAX + 0.5]'], {}), '([0, X_MAX + 0.5, 0, Y_MAX + 0.5])\n', (414, 448), True, 'import matplotlib.pyplot as plt\n'), ((447, 512), 'matplotlib.pyplot.plot', 'plt.plot', (['[X_MIN - 1, X_MAX + 0.5]', '[X_MIN - 1, X_MAX + 0.5]', '"""b"""'], {}), "([X_MIN - 1, X_MAX + 0.5], [X_MIN - 1, X_MAX + 0.5], 'b')\n", (455, 512), True, 'import matplotlib.pyplot as plt\n'), ((508, 540), 'numpy.linspace', 'np.linspace', (['(0)', '(X_MAX + 0.5)', '(200)'], {}), '(0, X_MAX + 0.5, 200)\n', (519, 540), True, 'import numpy as np\n'), ((567, 586), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""g"""'], {}), "(x, y, 'g')\n", (575, 586), True, 'import matplotlib.pyplot as plt\n'), ((597, 607), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (605, 607), True, 'import matplotlib.pyplot as plt\n')] |
import typing
import numpy as np
from rrtstar.geometry import Point2d, Trajectory
from rrtstar.rrt_star import Vertex
def constant_speed_line_steering_policy(
start: Vertex, dest: Point2d, time: float, velocity: float
) -> typing.Tuple[float, Trajectory]:
unit_vector = dest.to_array() - start.position.to_array()
norm_vec = np.linalg.norm(unit_vector)
unit_vector = unit_vector / norm_vec
# Steer to the destination
step = 0.05 # [s]
path = []
timesteps = []
for timestep in np.arange(start=0.0, stop=time + step, step=step):
if velocity * timestep < norm_vec:
path.append(
Point2d(
start.position.to_array()[0] + unit_vector[0] * velocity * timestep,
start.position.to_array()[1] + unit_vector[1] * velocity * timestep,
)
)
timesteps.append(timestep)
else:
path.append(dest)
timesteps.append(timestep)
break
# Generate trajectory. Time is equivalent to the steering input
trajectory = Trajectory(path=path, steering_input=timesteps, time=timesteps[-1])
cost = np.linalg.norm(path[-1].to_array() - path[0].to_array())
return cost, trajectory
| [
"rrtstar.geometry.Trajectory",
"numpy.arange",
"numpy.linalg.norm"
] | [((345, 372), 'numpy.linalg.norm', 'np.linalg.norm', (['unit_vector'], {}), '(unit_vector)\n', (359, 372), True, 'import numpy as np\n'), ((522, 571), 'numpy.arange', 'np.arange', ([], {'start': '(0.0)', 'stop': '(time + step)', 'step': 'step'}), '(start=0.0, stop=time + step, step=step)\n', (531, 571), True, 'import numpy as np\n'), ((1103, 1170), 'rrtstar.geometry.Trajectory', 'Trajectory', ([], {'path': 'path', 'steering_input': 'timesteps', 'time': 'timesteps[-1]'}), '(path=path, steering_input=timesteps, time=timesteps[-1])\n', (1113, 1170), False, 'from rrtstar.geometry import Point2d, Trajectory\n')] |
import argparse
import logging
import numpy as np
from PCA.PCAModel import PCAModel
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s'
)
def main():
logging.info("pca_toy_vector.py main()")
X = np.array([[0.14, -2.3, 1.58, 1], [-1.2, 1.62, 0.76, -1], [0.1, -0.2, 0.3, -0.4]])
pcaModel = PCAModel(X)
pcaModel.TruncateModel(3)
average = pcaModel.Average()
eigenpairs = pcaModel.Eigenpairs()
varianceProportionList = pcaModel.VarianceProportion()
logging.info("average = {}".format(average))
logging.info("eigenpairs = {}".format(eigenpairs))
logging.info("varianceProportionList = {}".format(varianceProportionList))
projection = pcaModel.Project(X)
logging.info("projection = {}".format(projection))
reconstruction = pcaModel.Reconstruct(projection)
logging.info("reconstruction = {}".format(reconstruction))
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"numpy.array",
"PCA.PCAModel.PCAModel",
"logging.info"
] | [((85, 208), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s"""'}), "(level=logging.DEBUG, format=\n '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s')\n", (104, 208), False, 'import logging\n'), ((232, 272), 'logging.info', 'logging.info', (['"""pca_toy_vector.py main()"""'], {}), "('pca_toy_vector.py main()')\n", (244, 272), False, 'import logging\n'), ((281, 367), 'numpy.array', 'np.array', (['[[0.14, -2.3, 1.58, 1], [-1.2, 1.62, 0.76, -1], [0.1, -0.2, 0.3, -0.4]]'], {}), '([[0.14, -2.3, 1.58, 1], [-1.2, 1.62, 0.76, -1], [0.1, -0.2, 0.3, -\n 0.4]])\n', (289, 367), True, 'import numpy as np\n'), ((378, 389), 'PCA.PCAModel.PCAModel', 'PCAModel', (['X'], {}), '(X)\n', (386, 389), False, 'from PCA.PCAModel import PCAModel\n')] |
import numpy as np
import os
import pickle
from utils import load_config_file
import sys
if __name__ == '__main__':
config_file_name = sys.argv[1]
config = load_config_file(config_file_name)
# create output folder if it doesn't exist
output_folder = config["Output Folder"]
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
# load test set
input_feature_path_testing = config["Input Feature Path Testing"]
X_testing = np.load(input_feature_path_testing)
input_age_path_testing = config["Input Age Path Testing"]
Y_testing = np.load(input_age_path_testing)
input_subjects_path_testing = config["Input Subject IDs Path Testing"]
IDs_testing = np.load(input_subjects_path_testing)
# extract PCs of test set
pretrained_folder = config["Pretrained Folder"]
with open(os.path.join(pretrained_folder, 'pca_training_set.pickle'), 'rb') as input_file:
pca = pickle.load(input_file)
X_testing = pca.transform(X_testing)
age_mean = np.load(os.path.join(pretrained_folder, 'mean_age_training_set.npy'))
Y_training_orig = np.load(os.path.join(pretrained_folder, 'y_training_set.npy'))
Y_training = Y_training_orig - age_mean
# age estimation with GPR
with open(os.path.join(pretrained_folder, 'gpr_model.pickle'), 'rb') as input_file:
gpr = pickle.load(input_file)
Y_pred_testing_gpr = gpr.predict(X_testing) + age_mean
# age estimation with LR if specified
perform_lr = config["Add LR predictions"]
if perform_lr:
with open(os.path.join(pretrained_folder, 'lr_model.pickle'), 'rb') as input_file:
lr = pickle.load(input_file)
bin_means, bin_edges = np.histogram(Y_training_orig, 40)
ix_bins = np.digitize(Y_training_orig, bin_edges)
Y_training_orig_binned = bin_edges[ix_bins - 1]
age_mean = np.mean(Y_training_orig_binned)
Y_training = Y_training_orig_binned - age_mean
for i in range(np.shape(Y_training)[0]):
Y_training[i] = int(Y_training[i])
Y_pred_testing_lr = lr.predict(X_testing) + int(age_mean)
# get LR weights
lr_probabilities = lr.predict_proba(X_testing)
all_ages = np.unique(Y_training) + int(age_mean)
lr_weights = []
for i in range(np.shape(lr_probabilities)[0]):
pred_age_i = Y_pred_testing_lr[i]
ix = np.where(all_ages == pred_age_i)
weight_i = lr_probabilities[i][ix][0]
lr_weights.append(weight_i)
# weighted average of the predictions
Y_pred_testing = []
for i in range(len(lr_weights)):
w_i = lr_weights[i]
pred_i = (Y_pred_testing_gpr[i] + w_i * Y_pred_testing_lr[i]) / (1 + w_i)
Y_pred_testing.append(pred_i)
else:
Y_pred_testing = Y_pred_testing_gpr
# write results in output text file
text = ''
curr_ID = ''
for i in range(len(IDs_testing)):
test_ID = IDs_testing[i]
if not curr_ID == test_ID:
text += test_ID + '\n'
curr_ID = test_ID
text += str(Y_pred_testing[i]) + \
' - AE = ' + str(np.abs(Y_pred_testing[i] - Y_testing[i])) + \
' - Real age = ' + str(Y_testing[i]) + '\n'
output_file_name = os.path.join(output_folder, 'predictions_test_dataset.txt')
with open(output_file_name, 'w') as f:
f.write(text)
f.close()
| [
"numpy.mean",
"utils.load_config_file",
"numpy.histogram",
"numpy.abs",
"numpy.unique",
"numpy.digitize",
"numpy.where",
"os.path.join",
"pickle.load",
"os.path.isdir",
"os.mkdir",
"numpy.shape",
"numpy.load"
] | [((175, 209), 'utils.load_config_file', 'load_config_file', (['config_file_name'], {}), '(config_file_name)\n', (191, 209), False, 'from utils import load_config_file\n'), ((491, 526), 'numpy.load', 'np.load', (['input_feature_path_testing'], {}), '(input_feature_path_testing)\n', (498, 526), True, 'import numpy as np\n'), ((607, 638), 'numpy.load', 'np.load', (['input_age_path_testing'], {}), '(input_age_path_testing)\n', (614, 638), True, 'import numpy as np\n'), ((734, 770), 'numpy.load', 'np.load', (['input_subjects_path_testing'], {}), '(input_subjects_path_testing)\n', (741, 770), True, 'import numpy as np\n'), ((3388, 3447), 'os.path.join', 'os.path.join', (['output_folder', '"""predictions_test_dataset.txt"""'], {}), "(output_folder, 'predictions_test_dataset.txt')\n", (3400, 3447), False, 'import os\n'), ((317, 345), 'os.path.isdir', 'os.path.isdir', (['output_folder'], {}), '(output_folder)\n', (330, 345), False, 'import os\n'), ((356, 379), 'os.mkdir', 'os.mkdir', (['output_folder'], {}), '(output_folder)\n', (364, 379), False, 'import os\n'), ((968, 991), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (979, 991), False, 'import pickle\n'), ((1058, 1118), 'os.path.join', 'os.path.join', (['pretrained_folder', '"""mean_age_training_set.npy"""'], {}), "(pretrained_folder, 'mean_age_training_set.npy')\n", (1070, 1118), False, 'import os\n'), ((1151, 1204), 'os.path.join', 'os.path.join', (['pretrained_folder', '"""y_training_set.npy"""'], {}), "(pretrained_folder, 'y_training_set.npy')\n", (1163, 1204), False, 'import os\n'), ((1388, 1411), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (1399, 1411), False, 'import pickle\n'), ((1750, 1783), 'numpy.histogram', 'np.histogram', (['Y_training_orig', '(40)'], {}), '(Y_training_orig, 40)\n', (1762, 1783), True, 'import numpy as np\n'), ((1803, 1842), 'numpy.digitize', 'np.digitize', (['Y_training_orig', 'bin_edges'], {}), '(Y_training_orig, bin_edges)\n', (1814, 1842), True, 'import numpy as np\n'), ((1920, 1951), 'numpy.mean', 'np.mean', (['Y_training_orig_binned'], {}), '(Y_training_orig_binned)\n', (1927, 1951), True, 'import numpy as np\n'), ((872, 930), 'os.path.join', 'os.path.join', (['pretrained_folder', '"""pca_training_set.pickle"""'], {}), "(pretrained_folder, 'pca_training_set.pickle')\n", (884, 930), False, 'import os\n'), ((1299, 1350), 'os.path.join', 'os.path.join', (['pretrained_folder', '"""gpr_model.pickle"""'], {}), "(pretrained_folder, 'gpr_model.pickle')\n", (1311, 1350), False, 'import os\n'), ((1694, 1717), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (1705, 1717), False, 'import pickle\n'), ((2277, 2298), 'numpy.unique', 'np.unique', (['Y_training'], {}), '(Y_training)\n', (2286, 2298), True, 'import numpy as np\n'), ((2461, 2493), 'numpy.where', 'np.where', (['(all_ages == pred_age_i)'], {}), '(all_ages == pred_age_i)\n', (2469, 2493), True, 'import numpy as np\n'), ((1603, 1653), 'os.path.join', 'os.path.join', (['pretrained_folder', '"""lr_model.pickle"""'], {}), "(pretrained_folder, 'lr_model.pickle')\n", (1615, 1653), False, 'import os\n'), ((2032, 2052), 'numpy.shape', 'np.shape', (['Y_training'], {}), '(Y_training)\n', (2040, 2052), True, 'import numpy as np\n'), ((2364, 2390), 'numpy.shape', 'np.shape', (['lr_probabilities'], {}), '(lr_probabilities)\n', (2372, 2390), True, 'import numpy as np\n'), ((3257, 3297), 'numpy.abs', 'np.abs', (['(Y_pred_testing[i] - Y_testing[i])'], {}), '(Y_pred_testing[i] - Y_testing[i])\n', (3263, 3297), True, 'import numpy as np\n')] |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import re
import numpy as np
import tensorflow as tf
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client.client import GoogleCredentials
from sklearn.model_selection import train_test_split
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import hparam
from google.cloud.storage import blob, bucket, client
import trainer.dataset
import trainer.model
import trainer.ml_helpers
import trainer.top_words
def generate_experiment_fn(**experiment_args):
"""Create an experiment function.
Args:
experiment_args: keyword arguments to be passed through to experiment
See `tf.contrib.learn.Experiment` for full args.
Returns:
A function:
(tf.contrib.learn.RunConfig, tf.contrib.training.HParams) -> Experiment
This function is used by learn_runner to create an Experiment which
executes model code provided in the form of an Estimator and
input functions.
"""
def _experiment_fn(config, hparams):
index_to_component = {}
if hparams.train_file:
with open(hparams.train_file) as f:
if hparams.trainer_type == 'spam':
training_data = trainer.ml_helpers.spam_from_file(f)
else:
training_data = trainer.ml_helpers.component_from_file(f)
else:
training_data = trainer.dataset.fetch_training_data(hparams.gcs_bucket,
hparams.gcs_prefix, hparams.trainer_type)
tf.logging.info('Training data received. Len: %d' % len(training_data))
if hparams.trainer_type == 'spam':
X, y = trainer.ml_helpers.transform_spam_csv_to_features(
training_data)
else:
top_list = trainer.top_words.make_top_words_list(hparams.job_dir)
X, y, index_to_component = trainer.ml_helpers \
.transform_component_csv_to_features(training_data, top_list)
tf.logging.info('Features generated')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x=trainer.model.feature_list_to_dict(X_train, hparams.trainer_type),
y=np.array(y_train),
num_epochs=hparams.num_epochs,
batch_size=hparams.train_batch_size,
shuffle=True
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x=trainer.model.feature_list_to_dict(X_test, hparams.trainer_type),
y=np.array(y_test),
num_epochs=None,
batch_size=hparams.eval_batch_size,
shuffle=False # Don't shuffle evaluation data
)
tf.logging.info('Numpy fns created')
if hparams.trainer_type == 'component':
store_component_conversion(hparams.job_dir, index_to_component)
return tf.contrib.learn.Experiment(
trainer.model.build_estimator(config=config,
trainer_type=hparams.trainer_type,
class_count=len(set(y))),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
**experiment_args
)
return _experiment_fn
def store_component_conversion(job_dir, data):
tf.logging.info('job_dir: %s' % job_dir)
job_info = re.search('gs://(monorail-.+)-mlengine/(component_trainer_\d+)',
job_dir)
# Check if training is being done on GAE or locally.
if job_info:
project = job_info.group(1)
job_name = job_info.group(2)
client_obj = client.Client(project=project)
bucket_name = '%s-mlengine' % project
bucket_obj = bucket.Bucket(client_obj, bucket_name)
bucket_obj.blob = blob.Blob(job_name + '/component_index.json', bucket_obj)
bucket_obj.blob.upload_from_string(json.dumps(data),
content_type='application/json')
else:
paths = job_dir.split('/')
for y, _ in enumerate(list(range(1, len(paths))), 1):
if not os.path.exists("/".join(paths[:y+1])):
os.makedirs('/'.join(paths[:y+1]))
with open(job_dir + '/component_index.json', 'w') as f:
f.write(json.dumps(data))
def store_eval(job_dir, results):
tf.logging.info('job_dir: %s' % job_dir)
job_info = re.search('gs://(monorail-.+)-mlengine/(spam_trainer_\d+)',
job_dir)
# Only upload eval data if this is not being run locally.
if job_info:
project = job_info.group(1)
job_name = job_info.group(2)
tf.logging.info('project: %s' % project)
tf.logging.info('job_name: %s' % job_name)
client_obj = client.Client(project=project)
bucket_name = '%s-mlengine' % project
bucket_obj = bucket.Bucket(client_obj, bucket_name)
bucket_obj.blob = blob.Blob(job_name + '/eval_data.json', bucket_obj)
for key, value in results[0].items():
if isinstance(value, np.float32):
results[0][key] = value.item()
bucket_obj.blob.upload_from_string(json.dumps(results[0]),
content_type='application/json')
else:
tf.logging.error('Could not find bucket "%s" to output evalution to.'
% job_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--train-file',
help='GCS or local path to training data',
)
parser.add_argument(
'--gcs-bucket',
help='GCS bucket for training data.',
)
parser.add_argument(
'--gcs-prefix',
help='Training data path prefix inside GCS bucket.',
)
parser.add_argument(
'--num-epochs',
help="""\
Maximum number of training data epochs on which to train.
If both --max-steps and --num-epochs are specified,
the training job will run for --max-steps or --num-epochs,
whichever occurs first. If unspecified will run for --max-steps.\
""",
type=int,
)
parser.add_argument(
'--train-batch-size',
help='Batch size for training steps',
type=int,
default=128
)
parser.add_argument(
'--eval-batch-size',
help='Batch size for evaluation steps',
type=int,
default=128
)
# Training arguments
parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True
)
# Logging arguments
parser.add_argument(
'--verbosity',
choices=[
'DEBUG',
'ERROR',
'FATAL',
'INFO',
'WARN'
],
default='INFO',
)
# Experiment arguments
parser.add_argument(
'--eval-delay-secs',
help='How long to wait before running first evaluation',
default=10,
type=int
)
parser.add_argument(
'--min-eval-frequency',
help='Minimum number of training steps between evaluations',
default=None, # Use TensorFlow's default (currently, 1000)
type=int
)
parser.add_argument(
'--train-steps',
help="""\
Steps to run the training job for. If --num-epochs is not specified,
this must be. Otherwise the training job will run indefinitely.\
""",
type=int
)
parser.add_argument(
'--eval-steps',
help='Number of steps to run evalution for at each checkpoint',
default=100,
type=int
)
parser.add_argument(
'--trainer-type',
help='Which trainer to use (spam or component)',
choices=['spam', 'component'],
required=True
)
args = parser.parse_args()
tf.logging.set_verbosity(args.verbosity)
# Set C++ Graph Execution level verbosity.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(
tf.logging.__dict__[args.verbosity] / 10)
# Run the training job
# learn_runner pulls configuration information from environment
# variables using tf.learn.RunConfig and uses this configuration
# to conditionally execute Experiment, or param server code.
eval_results = learn_runner.run(
generate_experiment_fn(
min_eval_frequency=args.min_eval_frequency,
eval_delay_secs=args.eval_delay_secs,
train_steps=args.train_steps,
eval_steps=args.eval_steps,
export_strategies=[saved_model_export_utils.make_export_strategy(
trainer.model.SERVING_FUNCTIONS['JSON-' + args.trainer_type],
exports_to_keep=1,
default_output_alternative_key=None,
)],
),
run_config=run_config.RunConfig(model_dir=args.job_dir),
hparams=hparam.HParams(**args.__dict__)
)
# Store a json blob in GCS with the results of training job (AUC of
# precision/recall, etc).
if args.trainer_type == 'spam':
store_eval(args.job_dir, eval_results)
| [
"tensorflow.contrib.training.python.training.hparam.HParams",
"tensorflow.logging.error",
"google.cloud.storage.blob.Blob",
"argparse.ArgumentParser",
"sklearn.model_selection.train_test_split",
"tensorflow.logging.info",
"json.dumps",
"tensorflow.logging.set_verbosity",
"tensorflow.contrib.learn.py... | [((3648, 3688), 'tensorflow.logging.info', 'tf.logging.info', (["('job_dir: %s' % job_dir)"], {}), "('job_dir: %s' % job_dir)\n", (3663, 3688), True, 'import tensorflow as tf\n'), ((3702, 3776), 're.search', 're.search', (['"""gs://(monorail-.+)-mlengine/(component_trainer_\\\\d+)"""', 'job_dir'], {}), "('gs://(monorail-.+)-mlengine/(component_trainer_\\\\d+)', job_dir)\n", (3711, 3776), False, 'import re\n'), ((4617, 4657), 'tensorflow.logging.info', 'tf.logging.info', (["('job_dir: %s' % job_dir)"], {}), "('job_dir: %s' % job_dir)\n", (4632, 4657), True, 'import tensorflow as tf\n'), ((4671, 4740), 're.search', 're.search', (['"""gs://(monorail-.+)-mlengine/(spam_trainer_\\\\d+)"""', 'job_dir'], {}), "('gs://(monorail-.+)-mlengine/(spam_trainer_\\\\d+)', job_dir)\n", (4680, 4740), False, 'import re\n'), ((5631, 5656), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5654, 5656), False, 'import argparse\n'), ((7817, 7857), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['args.verbosity'], {}), '(args.verbosity)\n', (7841, 7857), True, 'import tensorflow as tf\n'), ((2407, 2444), 'tensorflow.logging.info', 'tf.logging.info', (['"""Features generated"""'], {}), "('Features generated')\n", (2422, 2444), True, 'import tensorflow as tf\n'), ((2484, 2538), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (2500, 2538), False, 'from sklearn.model_selection import train_test_split\n'), ((3094, 3130), 'tensorflow.logging.info', 'tf.logging.info', (['"""Numpy fns created"""'], {}), "('Numpy fns created')\n", (3109, 3130), True, 'import tensorflow as tf\n'), ((3953, 3983), 'google.cloud.storage.client.Client', 'client.Client', ([], {'project': 'project'}), '(project=project)\n', (3966, 3983), False, 'from google.cloud.storage import blob, bucket, client\n'), ((4043, 4081), 'google.cloud.storage.bucket.Bucket', 'bucket.Bucket', (['client_obj', 'bucket_name'], {}), '(client_obj, bucket_name)\n', (4056, 4081), False, 'from google.cloud.storage import blob, bucket, client\n'), ((4105, 4162), 'google.cloud.storage.blob.Blob', 'blob.Blob', (["(job_name + '/component_index.json')", 'bucket_obj'], {}), "(job_name + '/component_index.json', bucket_obj)\n", (4114, 4162), False, 'from google.cloud.storage import blob, bucket, client\n'), ((4909, 4949), 'tensorflow.logging.info', 'tf.logging.info', (["('project: %s' % project)"], {}), "('project: %s' % project)\n", (4924, 4949), True, 'import tensorflow as tf\n'), ((4954, 4996), 'tensorflow.logging.info', 'tf.logging.info', (["('job_name: %s' % job_name)"], {}), "('job_name: %s' % job_name)\n", (4969, 4996), True, 'import tensorflow as tf\n'), ((5015, 5045), 'google.cloud.storage.client.Client', 'client.Client', ([], {'project': 'project'}), '(project=project)\n', (5028, 5045), False, 'from google.cloud.storage import blob, bucket, client\n'), ((5105, 5143), 'google.cloud.storage.bucket.Bucket', 'bucket.Bucket', (['client_obj', 'bucket_name'], {}), '(client_obj, bucket_name)\n', (5118, 5143), False, 'from google.cloud.storage import blob, bucket, client\n'), ((5167, 5218), 'google.cloud.storage.blob.Blob', 'blob.Blob', (["(job_name + '/eval_data.json')", 'bucket_obj'], {}), "(job_name + '/eval_data.json', bucket_obj)\n", (5176, 5218), False, 'from google.cloud.storage import blob, bucket, client\n'), ((5489, 5574), 'tensorflow.logging.error', 'tf.logging.error', (['(\'Could not find bucket "%s" to output evalution to.\' % job_dir)'], {}), '(\'Could not find bucket "%s" to output evalution to.\' % job_dir\n )\n', (5505, 5574), True, 'import tensorflow as tf\n'), ((4203, 4219), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (4213, 4219), False, 'import json\n'), ((5380, 5402), 'json.dumps', 'json.dumps', (['results[0]'], {}), '(results[0])\n', (5390, 5402), False, 'import json\n'), ((8689, 8733), 'tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig', 'run_config.RunConfig', ([], {'model_dir': 'args.job_dir'}), '(model_dir=args.job_dir)\n', (8709, 8733), False, 'from tensorflow.contrib.learn.python.learn.estimators import run_config\n'), ((8747, 8778), 'tensorflow.contrib.training.python.training.hparam.HParams', 'hparam.HParams', ([], {}), '(**args.__dict__)\n', (8761, 8778), False, 'from tensorflow.contrib.training.python.training import hparam\n'), ((2686, 2703), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (2694, 2703), True, 'import numpy as np\n'), ((2948, 2964), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (2956, 2964), True, 'import numpy as np\n'), ((4560, 4576), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (4570, 4576), False, 'import json\n'), ((8468, 8640), 'tensorflow.contrib.learn.python.learn.utils.saved_model_export_utils.make_export_strategy', 'saved_model_export_utils.make_export_strategy', (["trainer.model.SERVING_FUNCTIONS['JSON-' + args.trainer_type]"], {'exports_to_keep': '(1)', 'default_output_alternative_key': 'None'}), "(trainer.model.\n SERVING_FUNCTIONS['JSON-' + args.trainer_type], exports_to_keep=1,\n default_output_alternative_key=None)\n", (8513, 8640), False, 'from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils\n')] |
import cv2
import numpy as np
import glob
def comparator(result, groundTruth):
"""
Compares the background/foreground of 2 grayscale images as states in
http://jacarini.dinf.usherbrooke.ca/datasetOverview/
but with some simplification: the shadows and unknown are considered as good classification in either case.
:param result: model background subtraction output
:param groundTruth: expected result
:return: tp, fp, fn, tn
"""
result = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
groundTruth = cv2.cvtColor(groundTruth, cv2.COLOR_BGR2GRAY)
FOREGROUND = 255
BACKGROUND = 0
UNKNOWN = 170
SHADOW = 50
bg_result = result == BACKGROUND
fg_result = result == FOREGROUND
# We will consider that UNKNOWN and SHADOW can be both considered as background or foreground
bg_groundTruth = (groundTruth == BACKGROUND) | (groundTruth == UNKNOWN) | (groundTruth == SHADOW)
fg_groundTruth = (groundTruth == FOREGROUND) | (groundTruth == UNKNOWN) | (groundTruth == SHADOW)
tp = sum(sum(np.bitwise_and(fg_result, fg_groundTruth)))
fp = sum(sum(np.bitwise_and(fg_result, np.bitwise_not(fg_groundTruth))))
fn = sum(sum(np.bitwise_and(bg_result, np.bitwise_not(bg_groundTruth))))
tn = sum(sum(np.bitwise_and(bg_result, bg_groundTruth)))
return tp, fp, fn, tn
# def im2im(loadPath, savePath):
# """
# :param loadPath: 'DATA/baseline/baseline/office/input/*.jpg'
# :param savePath: 'DATA/baseline/results/office/'
# :return:
# """
# img_array = []
# for filename in glob.glob(loadPath):
# img = cv2.imread(filename)
# height, width, layers = img.shape
# size = (width, height)
# img_array.append(img)
#
# for i in range(len(img_array)):
# filepath = 'DATA/baseline/results/office/'
# filename = 'out' + str(i).zfill(6)+'.jpg'
# cv2.imwrite(filepath + filename, img_array[i])
def loadImages(loadPath):
"""
Load all images in the specified file and returns an array with all of them.
:param loadPath:
:return: array with all images in the specified path
"""
img_array = []
for filename in glob.glob(loadPath):
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width, height)
img_array.append(img)
return img_array
# def saveImages (savePath):
#
# """
# :param savePath:
# :return:
# """
# filepath = 'DATA/baseline/results/office/'
# filename = 'out' + str(i).zfill(6) + '.jpg'
# cv2.imwrite(filepath + filename, img_array[i])
def exponentialFilter(img_array, alpha, savePath):
# We get the shape of the images
im_shape = img_array[0].shape
# Initialize kernel for morphology transformation
kernel = np.ones((2, 2), np.uint8)
# Number of initial frames to obtain the starting background
init_frames = 20
# learning rate [0,1]
alpha = 0.05
# Initial background calculation
background = np.zeros(shape=im_shape[0:2])
for i in range(init_frames):
frame = img_array[i]
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
background = background + frame
background = background / init_frames
background = background.astype(np.uint8)
# Algortihm aplication
for i in range(init_frames + 1, len(img_array)):
# Take the next frame/image
frame = img_array[i]
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Substract the background from the frame to get the foreground (out)
out = np.abs(frame - background)
ret, out = cv2.threshold(out, 100, 255, cv2.THRESH_BINARY)
out = cv2.morphologyEx(out, cv2.MORPH_OPEN, kernel)
# Calculate the new background
background = ((1 - alpha) * background + alpha * frame).astype(np.uint8)
# Save the result to the specified path
filepath = savePath
filename = 'out' + str(i).zfill(6) + '.jpg'
cv2.imwrite(filepath + filename, out)
def MOG(img_array, savePath):
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
for i in range(len(img_array)):
frame = img_array[i]
fgmask = fgbg.apply(frame)
# Save the result to the specified path
filepath = savePath
filename = 'out' + str(i).zfill(6) + '.jpg'
cv2.imwrite(filepath + filename, fgmask)
def MOG2(img_array, savePath):
fgbg = cv2.createBackgroundSubtractorMOG2()
for i in range(len(img_array)):
frame = img_array[i]
fgmask = fgbg.apply(frame)
# Save the result to the specified path
filepath = savePath
filename = 'out' + str(i).zfill(6) + '.jpg'
cv2.imwrite(filepath + filename, fgmask)
def im2vid_2(img_array, filename):
"""
:param img_array:
:param savePath:
:param filename:
:return:
"""
size = img_array[0].shape[0:2]
# out = cv2.VideoWriter('result_' + filename, cv2.VideoWriter_fourcc(*'mp4v'), 24, size) # Save video as mp4 format
out = cv2.VideoWriter('result_' + filename, cv2.VideoWriter_fourcc(*'DIVX'), 24, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
def im2vid(img_path, name):
img_array = []
for filename in glob.glob(img_path):
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width, height)
img_array.append(img)
out = cv2.VideoWriter(name, cv2.VideoWriter_fourcc(*'mp4v'), 24, size) # Save video as mp4 format
#out = cv2.VideoWriter(name, cv2.VideoWriter_fourcc(*'DIVX'), 24, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
def showVideo (path):
cap = cv2.VideoCapture(path)
while (1):
ret, frame = cap.read()
cv2.imshow('frame', frame)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
# loadPath = 'DATA/baseline/baseline/highway/input/*.jpg'
# gtHighwayPath = 'DATA/baseline/baseline/highway/groundtruth/*.png'
# savePath = 'DATA/baseline/results/highway_MOG/'
# images = loadImages(loadPath)
# gt_hihgway_frames = loadImages(gtHighwayPath)
# # exponentialFilter(images,0.05,savePath)
# MOG(images, savePath)
# result_MOG = loadImages('DATA/baseline/results/highway_MOG/*jpg')
# tp = np.zeros((len(result_MOG)))
# fn = np.zeros((len(result_MOG)))
# fp = np.zeros((len(result_MOG)))
# tn = np.zeros((len(result_MOG)))
#
# for i in range(len(result_MOG)):
# tp[i],fn[i],fp[i],tn[i] = comparator(result_MOG[i],gt_hihgway_frames[i])
# print(tp[i],fn[i],fp[i],tn[i])
im2vid('DATA/baseline/results/highway_MOG/*.jpg', 'resultTest.mp4')
# im_gt = cv2.imread('DATA/baseline/baseline/highway/groundtruth/gt000684.png')
# im_gt_gray = cv2.cvtColor(im_gt, cv2.COLOR_BGR2GRAY)
# im_in = cv2.imread('DATA/baseline/baseline/office/input/in000001')
#
# # test_result = np.zeros(shape = (20,20))
# # plt.imshow(test_result, cmap="gray")
# # plt.show()
# #
# # test_gt = np.zeros(shape = (20,20))
# # test_gt[0:10,5:10] = 255
# # plt.imshow(test_gt, cmap="gray")
# # plt.show()
# #
# # print(comparator(test_result,test_gt))
# # test_gt[15:17,12:17] = 50
# # plt.imshow(test_gt, cmap="gray")
# # plt.show()
#
# test_result = np.zeros(shape = (3,3))
# test_result[0,1:3] = 255
# test_result[1,2] = 255
# plt.imshow(test_result, cmap="gray")
# plt.show()
#
# test_gt = np.zeros(shape = (3,3))
# test_gt[0:2,1:3] = 255
# test_gt[1,1] = 170
# plt.imshow(test_gt, cmap="gray")
# plt.show()
#
# print(comparator(test_result,test_gt))
| [
"cv2.createBackgroundSubtractorMOG2",
"numpy.abs",
"cv2.imwrite",
"numpy.bitwise_not",
"numpy.ones",
"cv2.threshold",
"cv2.bgsegm.createBackgroundSubtractorMOG",
"cv2.imshow",
"cv2.morphologyEx",
"numpy.zeros",
"numpy.bitwise_and",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor... | [((477, 517), 'cv2.cvtColor', 'cv2.cvtColor', (['result', 'cv2.COLOR_BGR2GRAY'], {}), '(result, cv2.COLOR_BGR2GRAY)\n', (489, 517), False, 'import cv2\n'), ((536, 581), 'cv2.cvtColor', 'cv2.cvtColor', (['groundTruth', 'cv2.COLOR_BGR2GRAY'], {}), '(groundTruth, cv2.COLOR_BGR2GRAY)\n', (548, 581), False, 'import cv2\n'), ((2184, 2203), 'glob.glob', 'glob.glob', (['loadPath'], {}), '(loadPath)\n', (2193, 2203), False, 'import glob\n'), ((2800, 2825), 'numpy.ones', 'np.ones', (['(2, 2)', 'np.uint8'], {}), '((2, 2), np.uint8)\n', (2807, 2825), True, 'import numpy as np\n'), ((3011, 3040), 'numpy.zeros', 'np.zeros', ([], {'shape': 'im_shape[0:2]'}), '(shape=im_shape[0:2])\n', (3019, 3040), True, 'import numpy as np\n'), ((4074, 4116), 'cv2.bgsegm.createBackgroundSubtractorMOG', 'cv2.bgsegm.createBackgroundSubtractorMOG', ([], {}), '()\n', (4114, 4116), False, 'import cv2\n'), ((4439, 4475), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {}), '()\n', (4473, 4475), False, 'import cv2\n'), ((5287, 5306), 'glob.glob', 'glob.glob', (['img_path'], {}), '(img_path)\n', (5296, 5306), False, 'import glob\n'), ((5745, 5767), 'cv2.VideoCapture', 'cv2.VideoCapture', (['path'], {}), '(path)\n', (5761, 5767), False, 'import cv2\n'), ((5946, 5969), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5967, 5969), False, 'import cv2\n'), ((2219, 2239), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (2229, 2239), False, 'import cv2\n'), ((3119, 3158), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (3131, 3158), False, 'import cv2\n'), ((3449, 3488), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (3461, 3488), False, 'import cv2\n'), ((3582, 3608), 'numpy.abs', 'np.abs', (['(frame - background)'], {}), '(frame - background)\n', (3588, 3608), True, 'import numpy as np\n'), ((3628, 3675), 'cv2.threshold', 'cv2.threshold', (['out', '(100)', '(255)', 'cv2.THRESH_BINARY'], {}), '(out, 100, 255, cv2.THRESH_BINARY)\n', (3641, 3675), False, 'import cv2\n'), ((3690, 3735), 'cv2.morphologyEx', 'cv2.morphologyEx', (['out', 'cv2.MORPH_OPEN', 'kernel'], {}), '(out, cv2.MORPH_OPEN, kernel)\n', (3706, 3735), False, 'import cv2\n'), ((3993, 4030), 'cv2.imwrite', 'cv2.imwrite', (['(filepath + filename)', 'out'], {}), '(filepath + filename, out)\n', (4004, 4030), False, 'import cv2\n'), ((4354, 4394), 'cv2.imwrite', 'cv2.imwrite', (['(filepath + filename)', 'fgmask'], {}), '(filepath + filename, fgmask)\n', (4365, 4394), False, 'import cv2\n'), ((4713, 4753), 'cv2.imwrite', 'cv2.imwrite', (['(filepath + filename)', 'fgmask'], {}), '(filepath + filename, fgmask)\n', (4724, 4753), False, 'import cv2\n'), ((5088, 5119), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DIVX'"], {}), "(*'DIVX')\n", (5110, 5119), False, 'import cv2\n'), ((5322, 5342), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (5332, 5342), False, 'import cv2\n'), ((5479, 5510), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (5501, 5510), False, 'import cv2\n'), ((5824, 5850), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (5834, 5850), False, 'import cv2\n'), ((1053, 1094), 'numpy.bitwise_and', 'np.bitwise_and', (['fg_result', 'fg_groundTruth'], {}), '(fg_result, fg_groundTruth)\n', (1067, 1094), True, 'import numpy as np\n'), ((1268, 1309), 'numpy.bitwise_and', 'np.bitwise_and', (['bg_result', 'bg_groundTruth'], {}), '(bg_result, bg_groundTruth)\n', (1282, 1309), True, 'import numpy as np\n'), ((5863, 5878), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (5874, 5878), False, 'import cv2\n'), ((1140, 1170), 'numpy.bitwise_not', 'np.bitwise_not', (['fg_groundTruth'], {}), '(fg_groundTruth)\n', (1154, 1170), True, 'import numpy as np\n'), ((1217, 1247), 'numpy.bitwise_not', 'np.bitwise_not', (['bg_groundTruth'], {}), '(bg_groundTruth)\n', (1231, 1247), True, 'import numpy as np\n')] |
import torch
import math
import numpy as np
from matplotlib import path
import pdb
class BoxSampler(object):
def __init__(self,
RoI_number=1,
IoU_bin_bases=torch.tensor([0.73,0.12,0.15,0.05,0], dtype=torch.float),
IoU_weights=torch.tensor([0.5,0.6,0.7,0.8,0.9], dtype=torch.float),
IoU_limit_precision=1e-5):
super(BoxSampler,self).__init__()
'''
INPUTS:
RoI_number : Number of RoIs/boxes to generate
IoU_bin_bases : N dimensional tensor storing the lower bounds for the bins.
Ex.[0.5, 0.6, 0.7, 0.8, 0.9] then there are 5 bins from [0.5,0.6] to [0.9, 1.0]
IoU_weights: N dimensional tensor storing the weights of the bins.
IoU_limit_precision: While drawing the limits for an IoU (e.g. see Fig.2 red curves),
it show the precision of the points. This is the part that makes the
algorithm a bit slower and needs an improvement.
'''
self.RoI_number=RoI_number
self.IoU_bin_bases=IoU_bin_bases
self.IoU_weights=IoU_weights
self.IoU_limit_precision=IoU_limit_precision
self.IoU_bin_tops=torch.cat([IoU_bin_bases[1:], torch.tensor([1.])])
self.bin_width=self.IoU_bin_tops-self.IoU_bin_bases
# We assume that self.reference_box is a square. Following coordinates are preferred
# since even the IoU=0.5, the limits will be always positive (see Fig.2 or Fig.6 in the paper).
self.reference_box=[0.3, 0.3, 0.6, 0.6]
def isnan(self,x):
return x != x
def sample_single(self, B, IoUs, imgSize):
'''
Samples a set of bounding boxes for a given input BB.
INPUTS:
B : Input BB (i.e. B in Alg.1 in the paper) Mx4 dimensional tensor.
A BB is represented by [TL_x, TL_y, BR_x, BR_y]
IoUs : Set of IoU thresholds. T in Alg.1. A box is generated for each IoU.
imgSize : [width, height] of the image. Ensures that the generated box is in the image.
'''
#Normalize the input box such that it is shifted/scaled on the reference box
#that resides at [0.3, 0.3, 0.6, 0.6]. Save scale and shift, for renormalization
#before returning. All operations are conducted within [0, 1] range. Hence we do not,
#normalize image, we normalize the boxes owing to Theorems 1 and 2 in the paper.
inputBox, scale, shift=self.normalize(B.clone().detach().unsqueeze(0))
#BoundingBoxGenerator is doing exactly what Alg.1 in the paper achieves.
#Given a GT/input BB and and IoU, it generates the boxes with the desired IoU.
#To make it more efficient, it generates sample_count boxes for
#a GT at once.
sample_count =IoUs.shape[0]
sampledBoxSet=self.BoundingBoxGenerator(inputBox.squeeze(), IoUs, sample_count)
#Given the generated boxes from a BB, now we map the generated boxes to the image by reshifting and rescaling.
sampledBoxSet=self.unnormalize(sampledBoxSet, scale[0], shift[0])
#Clamp the boxes from 0 and imgSize to ensure that they are in the image.
sampledBoxSet[:,[0,2]]=torch.clamp(sampledBoxSet[:,[0,2]], 0, imgSize[0])
sampledBoxSet[:,[1,3]]=torch.clamp(sampledBoxSet[:,[1,3]], 0, imgSize[1])
#Compute the bbox overlaps of the generated boxes.
generated_box_overlaps=self.computeBoxToBoxIoU(B.expand(sample_count,5)[:,:4], sampledBoxSet).squeeze()
return sampledBoxSet, generated_box_overlaps
def sample(self, inputBoxSet, imgSize):
'''
INPUTS:
inputBoxSet : Input BBs (i.e. ground truths-GTs in Alg.2 in the paper)
Mx5 dimensional tensor.
Each box is represented by [TL_x, TL_y, BR_x, BR_y, gt_label]
imgSize : [width, height] of an image
'''
#Normalize the input boxes such that all are shifted/scaled on the reference box
#that resides at [0.3, 0.3, 0.6, 0.6]. Save scales and shifts, for renormalization
#before returning. All operations are conducted within [0, 1] range. Hence we do not,
#normalize image, we normalize the boxes owing to Theorems 1 and 2.
inputBoxSet, scales, shifts=self.normalize(inputBoxSet)
boxNumber=inputBoxSet.size()[0]
#Annotations of the datasets may be incorrect especially for small objects.
#In some cases TL_x=BR_x (same for y). If there is such kind of very rare examples,
#then we catch the error here, and discard the corrupted annotation.
validIndices=torch.cuda.ByteTensor(boxNumber).fill_(1)
flag=0
for i in range(boxNumber):
if self.isnan(inputBoxSet[i,0]) or self.isnan(inputBoxSet[i,1]):
validIndices[i]=0
flag=1
if flag==1:
inputBoxSet = inputBoxSet[validIndices,:]
scales = scales[validIndices,:]
shifts = shifts[validIndices,:]
boxNumber=inputBoxSet.size()[0]
# InstanceAllocation determines:
# 1-perInputAllocation: Number of boxes to be generated for each gt. So, it is a boxNumber sized tensor.
# 2-positiveRoI_number: In some cases, number of boxes can be 1 or 2 more. So we keep the number of returned boxes.
# The sum of perInputAllocation should also provide this number.
# 3-inputBoxSetExtended: positiveRoI_numberx5 dimensional array for gts. Basically, each BB in inputBoxSet is
# duplicated for perInputAllocation[i] times. We use this info to validate/return the IoUs of
# generated boxes on computeBoxToBoxIoU function.
perInputAllocation, positiveRoI_number, inputBoxSetExtended =self.InstanceAllocation(inputBoxSet)
# Another question is the IoU distribution over the boxes. Having estimated the number of generated boxes
# for each GT, IoUAllocation assigns an IoU using the desired distribution (i.e. self.IoU_weights) for each box.
IoUSet=self.IoUAllocation(inputBoxSetExtended,positiveRoI_number)
#Initialize the necessary data structures to be returned
sampledBoxSet=torch.cuda.FloatTensor(positiveRoI_number,4).fill_(-1)
gt_inds=torch.cuda.LongTensor(positiveRoI_number).fill_(0)
indexPointer=0
for i in range(boxNumber):
#BoundingBoxGenerator is doing exactly what Alg.1 in the paper achieves.
#Given a GT and and IoU, it generates the boxes with the desired IoU.
#To make it more efficient, it generates perInputAllocation[i] boxes for
#a GT at once.
sampledBoxSet[indexPointer:indexPointer+perInputAllocation[i],:]=self.BoundingBoxGenerator(inputBoxSet[i,:],\
IoUSet[indexPointer:indexPointer+perInputAllocation[i]],\
perInputAllocation[i])
#Given the generated boxes from a GT (also GT), now we map the generated boxes to the image by reshifting and rescaling.
sampledBoxSet[indexPointer:indexPointer+perInputAllocation[i],:]=self.unnormalize(sampledBoxSet[indexPointer:indexPointer+perInputAllocation[i],:], scales[i], shifts[i])
inputBoxSetExtended[indexPointer:indexPointer+perInputAllocation[i],:4] = self.unnormalize(inputBoxSetExtended[indexPointer:indexPointer+perInputAllocation[i],:4], scales[i], shifts[i])
#In mmdetection, the association between the boxes are tracked, hence we store the mapping.
gt_inds[indexPointer:indexPointer+perInputAllocation[i]]=i+1
#Update indexpointer to show next empty cell.
indexPointer+=perInputAllocation[i]
#Clamp the boxes from 0 and imgSize to ensure that they are in the image.
sampledBoxSet[:,[0,2]]=torch.clamp(sampledBoxSet[:,[0,2]], 0, imgSize[0])
sampledBoxSet[:,[1,3]]=torch.clamp(sampledBoxSet[:,[1,3]], 0, imgSize[1])
#Compute the bbox overlaps of the generated boxes.
generated_box_overlaps=self.computeBoxToBoxIoU(inputBoxSetExtended[:,:4],sampledBoxSet).squeeze()
return sampledBoxSet, inputBoxSetExtended[:,-1].type(torch.cuda.LongTensor),generated_box_overlaps,gt_inds
def normalize(self, boxes):
#Compute shifts
shifts = boxes[:,[0,1]]
#Compute scales
scales = (torch.cat(((boxes[:,2]-boxes[:,0]).unsqueeze(1), (boxes[:,3]-boxes[:,1]).unsqueeze(1)),1))/(self.reference_box[2]-self.reference_box[0])
#All the boxes are normalized to reference box.
#One can safely following two lines by assigning the boxes[:,:4] to reference box.
boxes[:,[0,2]]=(boxes[:,[0,2]]-shifts[:,0].unsqueeze(1))/scales[:,0].unsqueeze(1)+self.reference_box[0]
boxes[:,[1,3]]=(boxes[:,[1,3]]-shifts[:,1].unsqueeze(1))/scales[:,1].unsqueeze(1)+self.reference_box[1]
return boxes, scales, shifts
def unnormalize(self, boxes,scales,shifts):
#self.reference_box[1] will work also, for different reference boxes please correct here.
boxes[:,:4]-=self.reference_box[0]
#Map the normalized boxes to the image coordinates
boxes[:,[0,2]]=boxes[:,[0,2]]*scales[0]+shifts[0]
boxes[:,[1,3]]=boxes[:,[1,3]]*scales[1]+shifts[1]
return boxes
def InstanceAllocation(self,inputBoxSet):
#Determine the number of classes and ensure the sampling to be balanced over classes
#instead of the instances. Note that this idea originates from OFB sampling in the paper.
#Here BB generator generates class-balanced examples. Hence determine perClassAllocation
# in this manner.
classes=torch.unique(inputBoxSet[:,-1])
classNumber=classes.size()[0]
perClassAllocation=math.ceil(self.RoI_number/classNumber)
#Count the number of instances from each class
classIndices=torch.cuda.FloatTensor(classNumber,inputBoxSet.size()[0]).fill_(0)
for i in range(classNumber):
classIndices[i,:]=inputBoxSet[:,-1]==classes[i]
classCounts=torch.sum(classIndices,1)
#Distribute the perClassAllocation over instances of each class equally
perInstanceAllocation=torch.ceil(perClassAllocation/classCounts)
#count the total number of positive examples determined in this fashion
positiveRoI_number=torch.sum(classCounts*perInstanceAllocation).int()
extendedInputBoxSet=torch.cuda.FloatTensor(positiveRoI_number,5).fill_(0)
instanceNumber=inputBoxSet.size()[0]
indexTracker=0
perInputAllocation=torch.cuda.FloatTensor(inputBoxSet.size()[0]).fill_(0)
for i in range(instanceNumber):
index=classes==inputBoxSet[i,-1]
extendedInputBoxSet[indexTracker:indexTracker+perInstanceAllocation[index].int()]=inputBoxSet[i,:].expand(perInstanceAllocation[index].int(),5)
indexTracker+=perInstanceAllocation[index].int()
perInputAllocation[i]=perInstanceAllocation[index].int()
# if positiveRoI_number>self.RoI_number:
# delete_idx=torch.multinomial(perInstanceAllocation,positiveRoI_number-self.RoI_number,replacement=False)
# pdb.set_trace()
# delete_idx=torch.randint(positiveRoI_number, [positiveRoI_number-self.RoI_number])
return perInputAllocation.int(), positiveRoI_number.item(), extendedInputBoxSet
def IoUAllocation(self,inputBoxSet, positiveRoI_number):
#Determine the number of examples to be sampled from each bin
IoUIndices=torch.multinomial(self.IoU_weights,positiveRoI_number,replacement=True)
#Sample the exact IoUs consdiering the bin length and base of each bin
IoUSet=(self.IoU_bin_bases[IoUIndices]+torch.rand(positiveRoI_number)*self.bin_width[IoUIndices]).cuda()
#If IoU is larger than 0.95, then it can be problematic during sampling, so set it to 0.95 for stability.
IoUSet[IoUSet>0.95]=0.95
return IoUSet
def findBottomRightMaxBorders(self,inputBox, IoU, boxArea,proposedx1,proposedy1):
xA = torch.max(proposedx1, inputBox[0])#alpha
yA = torch.max(proposedy1, inputBox[1])
xB = inputBox[2]
yB = inputBox[3]
I=torch.clamp(xB - xA,min=0) * torch.clamp(yB - yA,min=0)
limitLeftX=IoU*boxArea+xA*IoU*(inputBox[3]-yA)+xA*(inputBox[3]-yA)-IoU*proposedx1*(inputBox[3]-proposedy1)
limitLeftX/=((IoU+1)*(inputBox[3]-yA)-IoU*(inputBox[3]-proposedy1))
limitRightX=(I/IoU-boxArea+I)/(inputBox[3]-proposedy1)
limitRightX+=proposedx1
limitTopY=IoU*boxArea+IoU*(inputBox[2]-xA)*yA+yA*(inputBox[2]-xA)-IoU*proposedy1*(inputBox[2]-proposedx1)
limitTopY/=((IoU+1)*(inputBox[2]-xA)-IoU*(inputBox[2]-proposedx1))
limitBottomY=(I/IoU-boxArea+I)/(inputBox[2]-proposedx1)
limitBottomY+=proposedy1
return limitLeftX,limitRightX,limitTopY,limitBottomY
def findBottomRightBorders(self,inputBox, IoU, boxArea,proposedx1,proposedy1,limitLeftX,limitRightX,limitTopY,limitBottomY):
xA = torch.max(proposedx1, inputBox[0])#alpha
yA = torch.max(proposedy1, inputBox[1])
xB = inputBox[2]
yB = inputBox[3]
I=torch.clamp(xB - xA,min=0) * torch.clamp(yB - yA,min=0)
y2TR=torch.arange(limitTopY, inputBox[3]+self.IoU_limit_precision, step=self.IoU_limit_precision).cuda()
yBnew = torch.min(y2TR, inputBox[3])
Inew=torch.clamp(xB - xA,min=0) * torch.clamp(yBnew - yA,min=0)
x2TR=(Inew/IoU-boxArea+Inew)/(y2TR-proposedy1)
x2TR+=proposedx1
x2BR=torch.arange(limitRightX, inputBox[2]-self.IoU_limit_precision, step=-self.IoU_limit_precision).cuda()
y2BR=(I/IoU-boxArea+I)/(x2BR-proposedx1)
y2BR+=proposedy1
y2BL=torch.arange(limitBottomY, inputBox[3]-self.IoU_limit_precision, step=-self.IoU_limit_precision).cuda()
yBnew = torch.min(y2BL, inputBox[3])
x2BL=IoU*boxArea+xA*IoU*(yBnew-yA)+xA*(yBnew-yA)-IoU*proposedx1*(y2BL-proposedy1)
x2BL/=((IoU+1)*(yBnew-yA)-IoU*(y2BL-proposedy1))
x2TL=torch.arange(limitLeftX, inputBox[2]+self.IoU_limit_precision, step=self.IoU_limit_precision).cuda()
xBnew = torch.min(x2TL, inputBox[2])
y2TL=IoU*boxArea+IoU*(xBnew-xA)*yA+yA*(xBnew-xA)-IoU*proposedy1*(x2TL-proposedx1)
y2TL/=((IoU+1)*(xBnew-xA)-IoU*(x2TL-proposedx1))
x2=torch.cat((x2TR,x2BR,x2BL,x2TL))
y2=torch.cat((y2TR,y2BR,y2BL,y2TL))
bottomRightBorders=torch.cat((x2.unsqueeze(1),1-y2.unsqueeze(1)),1)
return bottomRightBorders
def findTopLeftPointBorders(self,inputBox, IoU,boxArea):
#Top Left
y1TR=torch.arange((((inputBox[3]*(IoU-1))+ inputBox[1])/IoU), inputBox[1], step=self.IoU_limit_precision).cuda()
x1TR=inputBox[2]-(boxArea/(IoU*(inputBox[3]-y1TR)))
inv_idx = torch.arange(y1TR.size(0)-1, -1, -1).long()
y1TR = y1TR[inv_idx]
x1TR = x1TR[inv_idx]
#Top Right
x1BR=torch.arange(inputBox[0], inputBox[2]-IoU*(inputBox[2]-inputBox[0]), step=self.IoU_limit_precision).cuda()
I=(inputBox[2]-x1BR)*(inputBox[3]-inputBox[1])
y1BR=inputBox[3]-(I/IoU-boxArea+I)/(inputBox[2]-x1BR)
#Top Left
y1BL=torch.arange(inputBox[1], inputBox[3]-(boxArea*IoU)/(inputBox[2]-inputBox[0]), step=self.IoU_limit_precision).cuda()
x1BL=inputBox[2]-((boxArea*IoU)/((inputBox[3]-y1BL)))
#Top Right
y1TL=torch.arange(inputBox[1], inputBox[3]-(boxArea*IoU)/(inputBox[2]-inputBox[0]), step=self.IoU_limit_precision).cuda()
I=(inputBox[2]-inputBox[0])*(inputBox[3]-y1TL)
x1TL=inputBox[2]-(I/IoU-boxArea+I)/(inputBox[3]-y1TL)
inv_idx = torch.arange(y1TL.size(0)-1, -1, -1).long()
y1TL = y1TL[inv_idx]
x1TL = x1TL[inv_idx]
x1=torch.cat((x1TR, x1BR,x1BL,x1TL))
y1=torch.cat((y1TR, y1BR,y1BL,y1TL))
P=torch.cat((x1.unsqueeze(1),1-y1.unsqueeze(1)),1)
return P
def BoundingBoxGenerator(self, inputBox, IoUSet, numBoxes):
sampledBox=torch.cuda.FloatTensor(numBoxes,4).fill_(-1)
boxArea=(inputBox[3]-inputBox[1])*(inputBox[2]-inputBox[0])
box=inputBox
for i in range(numBoxes):
#In order to prevent bias for a single corner, decide which corner to pick first
if np.random.uniform()<0.5:
flag=1
inputBox=torch.tensor([1-box[2],1-box[3],1-box[0],1-box[1],box[4]]).cuda()
else:
flag=0
inputBox=box
#Step 1 in Algorithm 1
topLeftBorders=self.findTopLeftPointBorders(inputBox, IoUSet[i], boxArea)
sampledBox[i,0],sampledBox[i,1]=self.samplePolygon(topLeftBorders, inputBox)
#Step 2 in Algorithm 1
limitLeftX,limitRightX,limitTopY,limitBottomY=self.findBottomRightMaxBorders(inputBox, IoUSet[i], boxArea,sampledBox[i,0],sampledBox[i,1])
bottomRightBorders=self.findBottomRightBorders(inputBox, IoUSet[i], boxArea, sampledBox[i,0], sampledBox[i,1], limitLeftX, limitRightX, limitTopY, limitBottomY)
sampledBox[i,2],sampledBox[i,3]=self.samplePolygon(bottomRightBorders, inputBox)
#If the box is reversed above then assign the reversed coordinates.
if flag==1:
sampledBox[i,:]=torch.tensor([1-sampledBox[i,2],1-sampledBox[i,3],1-sampledBox[i,0],1-sampledBox[i,1]]).cuda()
return sampledBox
def samplePolygon(self,P, box):
maxX=torch.max(P[:,0])
maxY=torch.max(1-P[:,1])
minX=torch.min(P[:,0])
minY=torch.min(1-P[:,1])
inpoly=0
while inpoly==0:
proposedx1, proposedy1=self.sampleRectangle([minX,minY,maxX,maxY])
#Next line is bottleneck
p = path.Path(P.cpu().numpy())
if p.contains_point([proposedx1,1-proposedy1]):
inpoly=1
return (proposedx1,proposedy1)
def sampleRectangle(self,B,numSamples=1):
x=torch.rand([numSamples])*(B[2]-B[0])+B[0]
y=torch.rand([numSamples])*(B[3]-B[1])+B[1]
return (x,y)
def computeBoxToBoxIoU(self,box_a,box_b):
max_xy = torch.min(box_a[:, 2:].unsqueeze(0), box_b[:, 2:].unsqueeze(0))
min_xy = torch.max(box_a[:, :2].unsqueeze(0), box_b[:, :2].unsqueeze(0))
interside = torch.clamp((max_xy - min_xy), min=0)
inter = interside[:, :, 0] * interside[:, :, 1]
area_a = ((box_a[:, 2]-box_a[:, 0]) * (box_a[:, 3]-box_a[:, 1])).unsqueeze(0)
area_b = ((box_b[:, 2]-box_b[:, 0]) * (box_b[:, 3]-box_b[:, 1])).unsqueeze(0)
union = area_a + area_b - inter
IoU=inter / union
return IoU
| [
"torch.cuda.LongTensor",
"torch.unique",
"math.ceil",
"torch.multinomial",
"torch.cuda.FloatTensor",
"torch.rand",
"torch.max",
"torch.min",
"torch.ceil",
"torch.tensor",
"torch.cat",
"torch.arange",
"torch.sum",
"torch.cuda.ByteTensor",
"numpy.random.uniform",
"torch.clamp"
] | [((197, 257), 'torch.tensor', 'torch.tensor', (['[0.73, 0.12, 0.15, 0.05, 0]'], {'dtype': 'torch.float'}), '([0.73, 0.12, 0.15, 0.05, 0], dtype=torch.float)\n', (209, 257), False, 'import torch\n'), ((284, 342), 'torch.tensor', 'torch.tensor', (['[0.5, 0.6, 0.7, 0.8, 0.9]'], {'dtype': 'torch.float'}), '([0.5, 0.6, 0.7, 0.8, 0.9], dtype=torch.float)\n', (296, 342), False, 'import torch\n'), ((3317, 3369), 'torch.clamp', 'torch.clamp', (['sampledBoxSet[:, [0, 2]]', '(0)', 'imgSize[0]'], {}), '(sampledBoxSet[:, [0, 2]], 0, imgSize[0])\n', (3328, 3369), False, 'import torch\n'), ((3399, 3451), 'torch.clamp', 'torch.clamp', (['sampledBoxSet[:, [1, 3]]', '(0)', 'imgSize[1]'], {}), '(sampledBoxSet[:, [1, 3]], 0, imgSize[1])\n', (3410, 3451), False, 'import torch\n'), ((8219, 8271), 'torch.clamp', 'torch.clamp', (['sampledBoxSet[:, [0, 2]]', '(0)', 'imgSize[0]'], {}), '(sampledBoxSet[:, [0, 2]], 0, imgSize[0])\n', (8230, 8271), False, 'import torch\n'), ((8301, 8353), 'torch.clamp', 'torch.clamp', (['sampledBoxSet[:, [1, 3]]', '(0)', 'imgSize[1]'], {}), '(sampledBoxSet[:, [1, 3]], 0, imgSize[1])\n', (8312, 8353), False, 'import torch\n'), ((10092, 10124), 'torch.unique', 'torch.unique', (['inputBoxSet[:, -1]'], {}), '(inputBoxSet[:, -1])\n', (10104, 10124), False, 'import torch\n'), ((10189, 10229), 'math.ceil', 'math.ceil', (['(self.RoI_number / classNumber)'], {}), '(self.RoI_number / classNumber)\n', (10198, 10229), False, 'import math\n'), ((10487, 10513), 'torch.sum', 'torch.sum', (['classIndices', '(1)'], {}), '(classIndices, 1)\n', (10496, 10513), False, 'import torch\n'), ((10626, 10670), 'torch.ceil', 'torch.ceil', (['(perClassAllocation / classCounts)'], {}), '(perClassAllocation / classCounts)\n', (10636, 10670), False, 'import torch\n'), ((11970, 12043), 'torch.multinomial', 'torch.multinomial', (['self.IoU_weights', 'positiveRoI_number'], {'replacement': '(True)'}), '(self.IoU_weights, positiveRoI_number, replacement=True)\n', (11987, 12043), False, 'import torch\n'), ((12506, 12540), 'torch.max', 'torch.max', (['proposedx1', 'inputBox[0]'], {}), '(proposedx1, inputBox[0])\n', (12515, 12540), False, 'import torch\n'), ((12560, 12594), 'torch.max', 'torch.max', (['proposedy1', 'inputBox[1]'], {}), '(proposedy1, inputBox[1])\n', (12569, 12594), False, 'import torch\n'), ((13500, 13534), 'torch.max', 'torch.max', (['proposedx1', 'inputBox[0]'], {}), '(proposedx1, inputBox[0])\n', (13509, 13534), False, 'import torch\n'), ((13554, 13588), 'torch.max', 'torch.max', (['proposedy1', 'inputBox[1]'], {}), '(proposedy1, inputBox[1])\n', (13563, 13588), False, 'import torch\n'), ((13843, 13871), 'torch.min', 'torch.min', (['y2TR', 'inputBox[3]'], {}), '(y2TR, inputBox[3])\n', (13852, 13871), False, 'import torch\n'), ((14366, 14394), 'torch.min', 'torch.min', (['y2BL', 'inputBox[3]'], {}), '(y2BL, inputBox[3])\n', (14375, 14394), False, 'import torch\n'), ((14686, 14714), 'torch.min', 'torch.min', (['x2TL', 'inputBox[2]'], {}), '(x2TL, inputBox[2])\n', (14695, 14714), False, 'import torch\n'), ((14883, 14918), 'torch.cat', 'torch.cat', (['(x2TR, x2BR, x2BL, x2TL)'], {}), '((x2TR, x2BR, x2BL, x2TL))\n', (14892, 14918), False, 'import torch\n'), ((14927, 14962), 'torch.cat', 'torch.cat', (['(y2TR, y2BR, y2BL, y2TL)'], {}), '((y2TR, y2BR, y2BL, y2TL))\n', (14936, 14962), False, 'import torch\n'), ((16360, 16395), 'torch.cat', 'torch.cat', (['(x1TR, x1BR, x1BL, x1TL)'], {}), '((x1TR, x1BR, x1BL, x1TL))\n', (16369, 16395), False, 'import torch\n'), ((16405, 16440), 'torch.cat', 'torch.cat', (['(y1TR, y1BR, y1BL, y1TL)'], {}), '((y1TR, y1BR, y1BL, y1TL))\n', (16414, 16440), False, 'import torch\n'), ((18112, 18130), 'torch.max', 'torch.max', (['P[:, 0]'], {}), '(P[:, 0])\n', (18121, 18130), False, 'import torch\n'), ((18143, 18165), 'torch.max', 'torch.max', (['(1 - P[:, 1])'], {}), '(1 - P[:, 1])\n', (18152, 18165), False, 'import torch\n'), ((18176, 18194), 'torch.min', 'torch.min', (['P[:, 0]'], {}), '(P[:, 0])\n', (18185, 18194), False, 'import torch\n'), ((18207, 18229), 'torch.min', 'torch.min', (['(1 - P[:, 1])'], {}), '(1 - P[:, 1])\n', (18216, 18229), False, 'import torch\n'), ((18965, 19000), 'torch.clamp', 'torch.clamp', (['(max_xy - min_xy)'], {'min': '(0)'}), '(max_xy - min_xy, min=0)\n', (18976, 19000), False, 'import torch\n'), ((12655, 12682), 'torch.clamp', 'torch.clamp', (['(xB - xA)'], {'min': '(0)'}), '(xB - xA, min=0)\n', (12666, 12682), False, 'import torch\n'), ((12684, 12711), 'torch.clamp', 'torch.clamp', (['(yB - yA)'], {'min': '(0)'}), '(yB - yA, min=0)\n', (12695, 12711), False, 'import torch\n'), ((13649, 13676), 'torch.clamp', 'torch.clamp', (['(xB - xA)'], {'min': '(0)'}), '(xB - xA, min=0)\n', (13660, 13676), False, 'import torch\n'), ((13678, 13705), 'torch.clamp', 'torch.clamp', (['(yB - yA)'], {'min': '(0)'}), '(yB - yA, min=0)\n', (13689, 13705), False, 'import torch\n'), ((13886, 13913), 'torch.clamp', 'torch.clamp', (['(xB - xA)'], {'min': '(0)'}), '(xB - xA, min=0)\n', (13897, 13913), False, 'import torch\n'), ((13915, 13945), 'torch.clamp', 'torch.clamp', (['(yBnew - yA)'], {'min': '(0)'}), '(yBnew - yA, min=0)\n', (13926, 13945), False, 'import torch\n'), ((1290, 1309), 'torch.tensor', 'torch.tensor', (['[1.0]'], {}), '([1.0])\n', (1302, 1309), False, 'import torch\n'), ((4774, 4806), 'torch.cuda.ByteTensor', 'torch.cuda.ByteTensor', (['boxNumber'], {}), '(boxNumber)\n', (4795, 4806), False, 'import torch\n'), ((6450, 6495), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['positiveRoI_number', '(4)'], {}), '(positiveRoI_number, 4)\n', (6472, 6495), False, 'import torch\n'), ((6521, 6562), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['positiveRoI_number'], {}), '(positiveRoI_number)\n', (6542, 6562), False, 'import torch\n'), ((10777, 10823), 'torch.sum', 'torch.sum', (['(classCounts * perInstanceAllocation)'], {}), '(classCounts * perInstanceAllocation)\n', (10786, 10823), False, 'import torch\n'), ((10856, 10901), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['positiveRoI_number', '(5)'], {}), '(positiveRoI_number, 5)\n', (10878, 10901), False, 'import torch\n'), ((13727, 13826), 'torch.arange', 'torch.arange', (['limitTopY', '(inputBox[3] + self.IoU_limit_precision)'], {'step': 'self.IoU_limit_precision'}), '(limitTopY, inputBox[3] + self.IoU_limit_precision, step=self.\n IoU_limit_precision)\n', (13739, 13826), False, 'import torch\n'), ((14047, 14149), 'torch.arange', 'torch.arange', (['limitRightX', '(inputBox[2] - self.IoU_limit_precision)'], {'step': '(-self.IoU_limit_precision)'}), '(limitRightX, inputBox[2] - self.IoU_limit_precision, step=-\n self.IoU_limit_precision)\n', (14059, 14149), False, 'import torch\n'), ((14246, 14349), 'torch.arange', 'torch.arange', (['limitBottomY', '(inputBox[3] - self.IoU_limit_precision)'], {'step': '(-self.IoU_limit_precision)'}), '(limitBottomY, inputBox[3] - self.IoU_limit_precision, step=-\n self.IoU_limit_precision)\n', (14258, 14349), False, 'import torch\n'), ((14569, 14669), 'torch.arange', 'torch.arange', (['limitLeftX', '(inputBox[2] + self.IoU_limit_precision)'], {'step': 'self.IoU_limit_precision'}), '(limitLeftX, inputBox[2] + self.IoU_limit_precision, step=self.\n IoU_limit_precision)\n', (14581, 14669), False, 'import torch\n'), ((15181, 15288), 'torch.arange', 'torch.arange', (['((inputBox[3] * (IoU - 1) + inputBox[1]) / IoU)', 'inputBox[1]'], {'step': 'self.IoU_limit_precision'}), '((inputBox[3] * (IoU - 1) + inputBox[1]) / IoU, inputBox[1],\n step=self.IoU_limit_precision)\n', (15193, 15288), False, 'import torch\n'), ((15511, 15620), 'torch.arange', 'torch.arange', (['inputBox[0]', '(inputBox[2] - IoU * (inputBox[2] - inputBox[0]))'], {'step': 'self.IoU_limit_precision'}), '(inputBox[0], inputBox[2] - IoU * (inputBox[2] - inputBox[0]),\n step=self.IoU_limit_precision)\n', (15523, 15620), False, 'import torch\n'), ((15768, 15887), 'torch.arange', 'torch.arange', (['inputBox[1]', '(inputBox[3] - boxArea * IoU / (inputBox[2] - inputBox[0]))'], {'step': 'self.IoU_limit_precision'}), '(inputBox[1], inputBox[3] - boxArea * IoU / (inputBox[2] -\n inputBox[0]), step=self.IoU_limit_precision)\n', (15780, 15887), False, 'import torch\n'), ((15981, 16100), 'torch.arange', 'torch.arange', (['inputBox[1]', '(inputBox[3] - boxArea * IoU / (inputBox[2] - inputBox[0]))'], {'step': 'self.IoU_limit_precision'}), '(inputBox[1], inputBox[3] - boxArea * IoU / (inputBox[2] -\n inputBox[0]), step=self.IoU_limit_precision)\n', (15993, 16100), False, 'import torch\n'), ((16611, 16646), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['numBoxes', '(4)'], {}), '(numBoxes, 4)\n', (16633, 16646), False, 'import torch\n'), ((16896, 16915), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (16913, 16915), True, 'import numpy as np\n'), ((18617, 18641), 'torch.rand', 'torch.rand', (['[numSamples]'], {}), '([numSamples])\n', (18627, 18641), False, 'import torch\n'), ((18669, 18693), 'torch.rand', 'torch.rand', (['[numSamples]'], {}), '([numSamples])\n', (18679, 18693), False, 'import torch\n'), ((12169, 12199), 'torch.rand', 'torch.rand', (['positiveRoI_number'], {}), '(positiveRoI_number)\n', (12179, 12199), False, 'import torch\n'), ((16969, 17039), 'torch.tensor', 'torch.tensor', (['[1 - box[2], 1 - box[3], 1 - box[0], 1 - box[1], box[4]]'], {}), '([1 - box[2], 1 - box[3], 1 - box[0], 1 - box[1], box[4]])\n', (16981, 17039), False, 'import torch\n'), ((17932, 18038), 'torch.tensor', 'torch.tensor', (['[1 - sampledBox[i, 2], 1 - sampledBox[i, 3], 1 - sampledBox[i, 0], 1 -\n sampledBox[i, 1]]'], {}), '([1 - sampledBox[i, 2], 1 - sampledBox[i, 3], 1 - sampledBox[i,\n 0], 1 - sampledBox[i, 1]])\n', (17944, 18038), False, 'import torch\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 17:02:59 2018
@author: bruce
compared with version 1.6.4
the update is from correlation coefficient
"""
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
def correlation_matrix(corr_mx, cm_title):
from matplotlib import pyplot as plt
from matplotlib import cm as cm
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cs = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cs)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rank(corr_mx, cm_title):
temp = corr_mx
#output = (temp == temp.max(axis=1)[:,None]) # along row
output = temp.rank(axis=1, ascending=False)
fig, ax1 = plt.subplots()
im1 = ax1.matshow(output, cmap=plt.cm.Wistia)
#cs = ax1.matshow(output)
fig.colorbar(im1)
ax1.grid(False)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.title(cm_title)
# show digit in matrix
output = np.asarray(output)
for i in range(22):
for j in range(22):
c = output[j,i]
ax1.text(i, j, int(c), va='center', ha='center')
plt.show()
def correlation_matrix_comb(corr_mx, cm_title):
fig, (ax2, ax3) = plt.subplots(1, 2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
'''
# graph 1 grayscale
im1 = ax1.matshow(corr_mx, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im1, ax=ax1)
ax1.grid(False)
ax1.set_title(cm_title)
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
'''
# graph 2 yellowscale
corr_mx_rank = corr_mx.rank(axis=1, ascending=False)
cmap_grey = LinearSegmentedColormap.from_list('mycmap', ['white', 'black'])
im2 = ax2.matshow(corr_mx, cmap='viridis')
# colorbar need numpy version 1.13.1
fig.colorbar(im2, ax=ax2)
ax2.grid(False)
ax2.set_title(cm_title)
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
# show digit in matrix
corr_mx_rank = np.asarray(corr_mx_rank)
for i in range(22):
for j in range(22):
c = corr_mx_rank[j,i]
ax2.text(i, j, int(c), va='center', ha='center')
# graph 3
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
im3 = ax3.matshow(output, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im3, ax=ax3)
ax3.grid(False)
ax3.set_title(cm_title)
ax3.set_xticks(np.arange(len(xlabels)))
ax3.set_yticks(np.arange(len(ylabels)))
ax3.set_xticklabels(xlabels,fontsize=6)
ax3.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# shrink value for correlation matrix
# in order to use colormap -> 10 scale
def shrink_value_03_1(corr_in1):
corr_out1 = corr_in1.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i in range (22):
for j in range(22):
if corr_in1.iloc[i, j] < 0.3:
corr_out1.iloc[i, j] = 0.3
return corr_out1
def shrink_value_05_1(corr_in2):
corr_out2 = corr_in2.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i2 in range (22):
for j2 in range(22):
if corr_in2.iloc[i2, j2] < 0.5:
corr_out2.iloc[i2, j2] = 0.5
return corr_out2
# not used!!!!!!!!!!!!
# normalize the complex signal series
def normalize_complex_arr(a):
a_oo = a - a.real.min() - 1j*a.imag.min() # origin offsetted
return a_oo/np.abs(a_oo).max()
def improved_PCC(signal_in):
output_corr = pd.DataFrame()
for i in range(44):
row_pcc_notremovemean = []
for j in range(44):
sig_1 = signal_in.iloc[i, :]
sig_2 = signal_in.iloc[j, :]
pcc_notremovemean = np.abs(np.sum(sig_1 * sig_2) / np.sqrt(np.sum(sig_1*sig_1) * np.sum(sig_2 * sig_2)))
row_pcc_notremovemean = np.append(row_pcc_notremovemean, pcc_notremovemean)
output_corr = output_corr.append(pd.DataFrame(row_pcc_notremovemean.reshape(1,44)), ignore_index=True)
output_corr = output_corr.iloc[22:44, 0:22]
return output_corr
###############################################################################
# import the pkl file
#pkl_file=pd.read_pickle('/Users/bruce/Documents/uOttawa/Project/audio_brainstem_response/Data_BruceSunMaster_Studies/study2/study2DataFrame.pkl')
df_EFR=pd.read_pickle('/home/bruce/Dropbox/4.Project/4.Code for Linux/df_EFR.pkl')
# Mac
# df_EFR=pd.read_pickle('/Users/bruce/Documents/uOttawa/Master‘s Thesis/4.Project/4.Code for Linux/df_EFR.pkl')
# remove DC offset
df_EFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_EFR_detrend_data = pd.DataFrame(signal.detrend(df_EFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7))
df_EFR_detrend = df_EFR_detrend.append(pd.concat([df_EFR_detrend_data, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_detrend = df_EFR_detrend.reset_index(drop=True)
df_EFR = df_EFR_detrend
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_EFR
df_EFR_avg = pd.DataFrame()
df_EFR_avg_win = pd.DataFrame()
# average test1 and test2
for i in range(704):
# combine next two rows later
df_EFR_avg_t = pd.DataFrame(df_EFR.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
# without window function
df_EFR_avg_t = pd.DataFrame(df_EFR_avg_t.iloc[0,:].values.reshape(1,1024)) # without window function
# implement the window function
df_EFR_avg_t_window = pd.DataFrame((df_EFR_avg_t.iloc[0,:] * win_hamming).values.reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_avg = df_EFR_avg.append(pd.concat([df_EFR_avg_t, df_EFR_label], axis=1, ignore_index=True))
df_EFR_avg_win = df_EFR_avg_win.append(pd.concat([df_EFR_avg_t_window, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_avg = df_EFR_avg.sort_values(by=["Condition", "Subject"])
df_EFR_avg = df_EFR_avg.reset_index(drop=True)
df_EFR_avg_win.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_avg_win = df_EFR_avg_win.sort_values(by=["Condition", "Subject"])
df_EFR_avg_win = df_EFR_avg_win.reset_index(drop=True)
# average all the subjects , test and retest and keep one sound levels
# filter by 'a vowel and 85Db'
df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True)
df_EFR_avg_win_sorted = df_EFR_avg_win.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_avg_win_sorted = df_EFR_avg_win_sorted.reset_index(drop=True)
# filter55 65 75 sound levels and keep 85dB
# keep vowel condition and subject
df_EFR_avg_85 = pd.DataFrame(df_EFR_avg_sorted.iloc[528:, :])
df_EFR_avg_85 = df_EFR_avg_85.reset_index(drop=True)
df_EFR_avg_win_85 = pd.DataFrame(df_EFR_avg_win_sorted.iloc[528:, :])
df_EFR_avg_win_85 = df_EFR_avg_win_85.reset_index(drop=True)
# this part was replaced by upper part based on what I need to do
'''
# average all the subjects , test and retest, different sound levels
# filter by 'a vowel and 85Db'
df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Vowel","Condition", "Subject", "Sound Level"])
df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True)
# average sound levels and
# keep vowel condition and subject
df_EFR_avg_vcs = pd.DataFrame()
for i in range(176):
# combine next two rows later
df_EFR_avg_vcs_t = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i: 4*i+4, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
df_EFR_avg_vcs_label = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i, 1024:1031].values.reshape(1,7))
df_EFR_avg_vcs = df_EFR_avg_vcs.append(pd.concat([df_EFR_avg_vcs_t, df_EFR_avg_vcs_label], axis=1, ignore_index=True), ignore_index=True)
# set the title of columns
df_EFR_avg_vcs.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
#df_EFR_avg_vcs = df_EFR_avg_vcs.sort_values(by=["Condition", "Subject"])
'''
'''
# filter by 'a vowel and 85Db'
df_EFR_a_85_test1 = df_EFR[(df_EFR['Vowel'] == 'a vowel') & (df_EFR['Sound Level'] == '85')]
df_EFR_a_85_test1 = df_EFR_a_85_test1.reset_index(drop=True)
df_EFR_a_85_avg = pd.DataFrame()
# average test1 and test2
for i in range(44):
df_EFR_a_85_avg_t = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024))
df_EFR_a_85_label = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_a_85_avg = df_EFR_a_85_avg.append(pd.concat([df_EFR_a_85_avg_t, df_EFR_a_85_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_a_85_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_a_85_avg = df_EFR_a_85_avg.sort_values(by=["Condition", "Subject"])
df_EFR_a_85_avg = df_EFR_a_85_avg.reset_index(drop=True)
'''
##################################################
# Frequency Domain
# parameters
sampling_rate = 9606 # fs
# sampling_rate = 9596.623
n = 1024
k = np.arange(n)
T = n/sampling_rate # time of signal
frq = k/T
freq = frq[range(int(n/2))]
n2 = 9606
k2 = np.arange(n2)
T2 = n2/sampling_rate
frq2 = k2/T2
freq2 = frq2[range(int(n2/2))]
# zero padding
# for df_EFR
df_EFR_data = df_EFR.iloc[:, :1024]
df_EFR_label = df_EFR.iloc[:, 1024:]
df_EFR_mid = pd.DataFrame(np.zeros((1408, 95036)))
df_EFR_withzero = pd.concat([df_EFR_data, df_EFR_mid, df_EFR_label], axis=1)
# rename columns
df_EFR_withzero.columns = np.append(np.arange(96060), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# for df_EFR_avg_85
df_EFR_avg_85_data = df_EFR_avg_85.iloc[:, :1024]
df_EFR_avg_85_label = df_EFR_avg_85.iloc[:, 1024:]
df_EFR_avg_85_mid = pd.DataFrame(np.zeros((176, 8582)))
df_EFR_avg_85_withzero = pd.concat([df_EFR_avg_85_data, df_EFR_avg_85_mid, df_EFR_avg_85_label], axis=1)
# rename columns
df_EFR_avg_85_withzero.columns = np.append(np.arange(9606), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# df_EFR_avg_win_85
df_EFR_avg_win_85_data = df_EFR_avg_win_85.iloc[:, :1024]
df_EFR_avg_win_85_label = df_EFR_avg_win_85.iloc[:, 1024:]
df_EFR_avg_win_85_mid = pd.DataFrame(np.zeros((176, 8582)))
df_EFR_avg_win_85_withzero = pd.concat([df_EFR_avg_win_85_data, df_EFR_avg_win_85_mid, df_EFR_avg_win_85_label], axis=1)
df_EFR_avg_win_85_withzero.columns = np.append(np.arange(9606), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# concatenate AENU
temp1 = pd.concat([df_EFR_avg_85.iloc[0:44, 0:1024].reset_index(drop=True),df_EFR_avg_85.iloc[44:88, 0:1024].reset_index(drop=True)], axis=1)
temp2 = pd.concat([df_EFR_avg_85.iloc[88:132, 0:1024].reset_index(drop=True), df_EFR_avg_85.iloc[132:176, 0:1024].reset_index(drop=True)], axis=1)
df_EFR_avg_85_aenu = pd.concat([temp1, temp2], axis=1, ignore_index=True)
df_EFR_avg_85_aenu_withzero = pd.concat([df_EFR_avg_85_aenu, pd.DataFrame(np.zeros((44, 36864)))] , axis=1)
'''
# test##############
# test(detrend)
temp_test = np.asarray(df_EFR_avg_85_data.iloc[0, 0:1024])
temp_test_detrend = signal.detrend(temp_test)
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(temp_test)
plt.subplot(2, 1, 2)
plt.plot(temp_test_detrend)
plt.show()
# the raw data is already DC removed
# test(zero padding)
temp_EFR_1 = df_EFR_withzero.iloc[0, 0:1024]
temp_EFR_2= df_EFR_withzero.iloc[0, 0:9606]
temp_amplitude_spectrum_1 = np.abs((fftpack.fft(temp_EFR_1)/n)[range(int(n/2))])
temp_amplitude_spectrum_2 = np.abs((fftpack.fft(temp_EFR_2)/n2)[range(int(n2/2))])
plt.figure()
plt.subplot(2, 1, 1)
markers1 = [11, 21, 32, 43, 53, 64, 75]
# which corresponds to 100 200....700Hz in frequency domain
plt.plot(temp_amplitude_spectrum_1, '-D', markevery=markers1)
plt.xlim(0, 100)
plt.title('without zero padding')
plt.subplot(2, 1, 2)
#markers2 = [100, 200, 300, 400, 500, 600, 700]
markers2 = [99, 199, 299, 399, 499, 599, 599]
# which corresponds to 100 200....700Hz in frequency domain
plt.plot(temp_amplitude_spectrum_2, '-D', markevery=markers2)
plt.xlim(0, 1000)
# plt.xscale('linear')
plt.title('with zero padding')
plt.show()
# #################
'''
# Calculate the Amplitude Spectrum
# create a new dataframe with zero-padding amplitude spectrum
'''
# for df_EFR
df_as_7= pd.DataFrame()
for i in range(1408):
temp_EFR = df_EFR_avg_85_withzero.iloc[i, 0:96060]
temp_as = np.abs((fftpack.fft(temp_EFR)/n2)[range(int(n2/2))])
#df_as_7 = pd.concat([df_as_7, temp_as_7_t], axis=0)
df_as_7 = df_as_7.append(pd.DataFrame(np.array([temp_as[1000], temp_as[2000], temp_as[3000], temp_as[4000], \
temp_as[5000], temp_as[6000], temp_as[7000]]).reshape(1,7)), ignore_index = True)
df_as_7 = pd.concat([df_as_7, df_EFR_label], axis=1) # add labels on it
# filter by 'a vowel and 85Db'
df_as_7_test1 = df_as_7[(df_as_7['Vowel'] == 'a vowel') & (df_as_7['Sound Level'] == '85')]
df_as_7_test1 = df_as_7_test1.reset_index(drop=True)
'''
# for df_EFR_avg_vcs_withzero
df_as_85_no0= pd.DataFrame()
df_as_85= pd.DataFrame()
df_as7_85= pd.DataFrame()
df_as_win_85= pd.DataFrame()
df_as7_win_85= pd.DataFrame()
for i in range(176):
#temp_aenu_EFR = df_EFR_avg_aenu_withzero.iloc[i, 0:9606]
temp_as_no0 = np.abs((np.fft.fft(df_EFR_avg_85_data.iloc[i, :]))[range(int(n/2))])
df_as_85_no0 = df_as_85_no0.append(pd.DataFrame(temp_as_no0.reshape(1,512)), ignore_index = True)
temp_as = np.abs((np.fft.fft(df_EFR_avg_85_withzero.iloc[i, 0:9606]))[range(int(n2/2))])
df_as_85 = df_as_85.append(pd.DataFrame(temp_as.reshape(1,4803)), ignore_index = True)
df_as7_85 = df_as7_85.append(pd.DataFrame(np.array([temp_as[100], temp_as[200], temp_as[300], temp_as[400], \
temp_as[500], temp_as[600], temp_as[700]]).reshape(1,7)), ignore_index = True)
temp_as_win = np.abs((np.fft.fft(df_EFR_avg_win_85_withzero.iloc[i, 0:9606]))[range(int(n2/2))])
df_as_win_85 = df_as_win_85.append(pd.DataFrame(temp_as_win.reshape(1,4803)), ignore_index = True)
df_as7_win_85 = df_as7_win_85.append(pd.DataFrame(np.array([temp_as_win[100], temp_as_win[200], temp_as_win[300], temp_as_win[400], \
temp_as_win[500], temp_as_win[600], temp_as_win[700]]).reshape(1,7)), ignore_index = True)
df_as_85_no0 = pd.concat([df_as_85_no0, df_EFR_avg_85_label], axis=1) # add labels on it
df_as_85 = pd.concat([df_as_85, df_EFR_avg_85_label], axis=1) # add labels on it
df_as7_85 = pd.concat([df_as7_85, df_EFR_avg_85_label], axis=1) # add labels on it
df_as_win_85 = pd.concat([df_as_win_85, df_EFR_avg_win_85_label], axis=1) # add labels on it
df_as7_win_85 = pd.concat([df_as7_win_85, df_EFR_avg_win_85_label], axis=1) # add labels on it
# wothout zero padding
df_as_85_aenu = pd.concat([df_as_85.iloc[0:44, :4803],
df_as_85.iloc[44:88, :4803].reset_index(drop=True),
df_as_85.iloc[88:132, :4803].reset_index(drop=True),
df_as_85.iloc[132:176, :4803].reset_index(drop=True)], axis=1)
df_as_85_1300_aenu = pd.concat([df_as_85.iloc[0:44, :1300],
df_as_85.iloc[44:88, :1300].reset_index(drop=True),
df_as_85.iloc[88:132, :1300].reset_index(drop=True),
df_as_85.iloc[132:176, :1300].reset_index(drop=True)], axis=1)
df_as_85_no0_1300 = df_as_85_no0.iloc[:, :139]
df_as_85_no0_aenu = pd.concat([df_as_85_no0_1300.iloc[0:44, :],
df_as_85_no0_1300.iloc[44:88, :].reset_index(drop=True),
df_as_85_no0_1300.iloc[88:132, :].reset_index(drop=True),
df_as_85_no0_1300.iloc[132:176, :].reset_index(drop=True)], axis=1)
df_as7_85_aenu = pd.concat([df_as7_85.iloc[0:44, :7],
df_as7_85.iloc[44:88, :7].reset_index(drop=True),
df_as7_85.iloc[88:132, :7].reset_index(drop=True),
df_as7_85.iloc[132:176, :7].reset_index(drop=True)], axis=1)
# for efr_aenu
df_aenu_as_85 = pd.DataFrame()
df_aenu_as7_85 = pd.DataFrame()
for i in range(44):
#temp_aenu_EFR = df_EFR_avg_aenu_withzero.iloc[i, 0:9606]
temp_as2 = np.abs((fftpack.fft(df_EFR_avg_85_aenu.iloc[i, 0:4096])/4096)[range(int(4096/2))])
df_aenu_as_85 = df_aenu_as_85.append(pd.DataFrame(temp_as2.reshape(1,2048)), ignore_index = True)
df_aenu_as7_85 = df_aenu_as7_85.append(pd.DataFrame(np.array([temp_as2[43], temp_as2[85], temp_as2[128], temp_as2[170], \
temp_as2[213], temp_as2[256], temp_as2[298]]).reshape(1,7)), ignore_index = True)
#df_aenu_as_85 = pd.concat([df_aenu_as_85, df_EFR_avg_85_label], axis=1) # add labels on it
'''
# average test1 and test2
df_as_7_avg = pd.DataFrame()
for i in range(44):
df_as_7_avg1 = pd.DataFrame(df_as_7_test1.iloc[2*i: 2*i+1, 0:7].mean(axis=0).values.reshape(1,7))
df_as_7_label = pd.DataFrame(df_as_7_test1.iloc[2*i, 7:14].values.reshape(1,7))
df_as_7_avg_t = pd.concat([df_as_7_avg1, df_as_7_label], axis=1, ignore_index=True)
df_as_7_avg = df_as_7_avg.append(df_as_7_avg_t)
# set the title of columns
df_as_7_avg.columns = np.append(np.arange(7), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_as_7_avg = df_as_7_avg.sort_values(by=["Condition", "Subject"])
df_as_7_avg = df_as_7_avg.reset_index(drop=True)
'''
'''
# set a normalized AS
df_as_7_avg_data= pd.DataFrame(df_as_7_avg.iloc[:, 0:7].astype(float))
df_as_7_avg_sum= pd.DataFrame(df_as_7_avg.iloc[:, 0:7]).sum(axis=1)
df_as_7_avg_label= pd.DataFrame(df_as_7_avg.iloc[:, 7:14])
# normalize
df_as_7_avg_norm = df_as_7_avg_data.div(df_as_7_avg_sum, axis=0)
# add label
df_as_7_avg_norm = pd.concat([df_as_7_avg_norm, df_as_7_avg_label], axis=1, ignore_index=True)
'''
# normalization
df_EFR_avg_85_aenu_norm = df_EFR_avg_85_aenu.div((df_EFR_avg_85_aenu.iloc[0:4096].abs()**2).sum())
df_aenu_as_85_1300_norm = df_aenu_as_85.iloc[:, :535].div((df_aenu_as_85.iloc[:, :535].abs()**2).sum()/1300)
df_as_85_1300_aenu_norm = df_as_85_1300_aenu.div((df_as_85_1300_aenu.abs()**2).sum()/1300)
# Calculate correlation
# EFR
corr_EFR_avg_85_a = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_e = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_n = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_u = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_aenu = df_EFR_avg_85_aenu.iloc[:, 0:4096].T.corr(method='pearson').iloc[22:44, 0:22]
'''
corr_EFR_avg_85_a_t = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_e_t = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_n_t = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_u_t = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_a_re = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_e_re = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_n_re = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_u_re = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
'''
# AS
corr_as_85_a = df_as_85.iloc[0:44, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_e = df_as_85.iloc[44:88, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_n = df_as_85.iloc[88:132, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_u = df_as_85.iloc[132:176, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_a = df_as_win_85.iloc[0:44, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_e = df_as_win_85.iloc[44:88, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_n = df_as_win_85.iloc[88:132, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_u = df_as_win_85.iloc[132:176, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_aenu = df_aenu_as_85.iloc[0:44, 0:2048].T.corr(method='pearson').iloc[22:44, 0:22]
# here we use df_aenu_as_85.iloc[:, 0:535] to limit freq into 0 to 1300Hz
corr_as_85_aenu_1300 = df_aenu_as_85.iloc[0:44, 0:535].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_no0_aenu = df_as_85_no0_aenu.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_no0_aenu = df_as_85_no0_aenu.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_aenu = df_as7_85_aenu.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22]
corr_aenu_as7_85 = df_aenu_as7_85.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22]
# calculate the improved PCC matrix
corr_as_85_a_v2 = improved_PCC(df_as_85.iloc[0:44, 0:1300])
corr_as_85_e_v2 = improved_PCC(df_as_85.iloc[44:88, 0:1300])
corr_as_85_n_v2 = improved_PCC(df_as_85.iloc[88:132, 0:1300])
corr_as_85_u_v2 = improved_PCC(df_as_85.iloc[132:176, 0:1300])
corr_as_85_1300_aenu = improved_PCC(df_as_85_1300_aenu)
# df_EFR + df_aenu_AS_1300
df_aenu_sum_85 = pd.concat([df_EFR_avg_85_aenu, df_aenu_as_85.iloc[:, :535]], axis=1)
# df_aenu_sum_85 = pd.concat([df_EFR_avg_85_aenu_norm, df_aenu_as_85_1300_norm], axis=1)
corr_sum_85_aenu = df_aenu_sum_85.iloc[0:44, 0:].T.corr(method='pearson').iloc[22:44, 0:22]
# df_EFR + df_aenu_no0_as
df_aenu_sum_85_v2 = pd.concat([df_EFR_avg_85_aenu, df_as_85_no0_aenu], axis=1)
corr_sum_85_aenu_v2 = df_aenu_sum_85_v2.iloc[0:44, 0:].T.corr(method='pearson').iloc[22:44, 0:22]
# concatenate df_EFR and df_as_85_1300_aenu
df_aenu_sum_85_v3 = pd.concat([df_EFR_avg_85_aenu, df_as_85_1300_aenu], axis=1)
# df_aenu_sum_85_v3 = pd.concat([df_EFR_avg_85_aenu_norm, df_as_85_1300_aenu_norm], axis=1)
corr_sum_85_aenu_v3 = df_aenu_sum_85_v3.iloc[0:44, 0:].T.corr(method='pearson').iloc[22:44, 0:22]
# improved PCC (not remove mean for as)
# test for do not removing the mean of PCC
corr_sum_85_aenu_v4 = pd.DataFrame()
signal_in = df_aenu_sum_85_v3
for i in range(44):
row_pcc_notremovemean = []
row_pcc = []
for j in range(44):
sig_1 = signal_in.iloc[i, :].reset_index(drop=True)
sig_2 = signal_in.iloc[j, :].reset_index(drop=True)
sig_1_remove_mean = (sig_1 - sig_1.mean()).reset_index(drop=True)
sig_2_remove_mean = (sig_2 - sig_2.mean()).reset_index(drop=True)
# here EFR remove the mean but AS not
# then normalize the energy of EFR and AS
sig_1_p1 = sig_1_remove_mean.iloc[0:4096].div((sig_1_remove_mean.iloc[0:4096].abs()**2).sum())
sig_1_p2 = sig_1.iloc[4096:].div((sig_1.iloc[4096:].abs()**2).sum()/1300)
sig_1_new = pd.concat([sig_1_p1, sig_1_p2])
sig_2_p1 = sig_2_remove_mean.iloc[0:4096].div((sig_2_remove_mean.iloc[0:4096].abs()**2).sum())
sig_2_p2 = sig_2.iloc[4096:].div((sig_2.iloc[4096:].abs()**2).sum()/1300)
sig_2_new = pd.concat([sig_2_p1, sig_2_p2])
#sig_1_new = pd.concat([sig_1_remove_mean.iloc[0:4096], sig_1.iloc[4096:]])
#sig_2_new = pd.concat([sig_2_remove_mean.iloc[0:4096], sig_2.iloc[4096:]])
'''
pcc_notremovemean = np.abs(np.sum(sig_1 * sig_2) / np.sqrt(np.sum(sig_1*sig_1) * np.sum(sig_2 * sig_2)))
pcc = np.abs(np.sum(sig_1_remove_mean * sig_2_remove_mean) /
np.sqrt(np.sum(sig_1_remove_mean*sig_1_remove_mean) * np.sum(sig_2_remove_mean * sig_2_remove_mean)))
'''
pcc_notremovemean = np.abs(np.sum(sig_1_new * sig_2_new) / np.sqrt(np.sum(sig_1_new*sig_1_new) * np.sum(sig_2_new * sig_2_new)))
row_pcc_notremovemean = np.append(row_pcc_notremovemean, pcc_notremovemean)
# row_pcc = np.append(row_pcc, pcc)
# example
if i==4 & j==5:
plt.figure(1)
ax1 = plt.subplot(211)
ax1.plot(sig_1)
ax1.plot(sig_2)
ax2 = plt.subplot(212)
ax2.plot(sig_1_remove_mean)
ax2.plot(sig_2_remove_mean)
ax1.set_title("original signal, norm corr = %.3f" % pcc_notremovemean)
ax2.set_title("signal with mean removed(PCC), norm corr = %.3f" % pcc)
plt.tight_layout()
ax1.grid(True)
ax2.grid(True)
plt.show()
corr_sum_85_aenu_v4 = corr_sum_85_aenu_v4.append(pd.DataFrame(row_pcc_notremovemean.reshape(1,44)), ignore_index=True)
corr_sum_85_aenu_v4 = corr_sum_85_aenu_v4.iloc[22:44, 0:22]
'''
corr_as_85_a_t = df_as_85.iloc[0:44, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_e_t = df_as_85.iloc[44:88, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_n_t = df_as_85.iloc[88:132, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_u_t = df_as_85.iloc[132:176, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_a_re = df_as_85.iloc[0:44, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as_85_e_re = df_as_85.iloc[44:88, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as_85_n_re = df_as_85.iloc[88:132, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as_85_u_re = df_as_85.iloc[132:176, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44]
'''
#AS7
corr_as7_85_a = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_e = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_n = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_u = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
'''
corr_as7_85_a_t = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_e_t = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_n_t = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_u_t = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_a_re = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as7_85_e_re = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as7_85_n_re = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as7_85_u_re = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
'''
# shrink
# shrink the correlation range from 0.3 to 1
# EFR
'''
corr_EFR_avg_85_a_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_a)
corr_EFR_avg_85_e_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_e)
corr_EFR_avg_85_n_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_n)
corr_EFR_avg_85_u_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_u)
'''
corr_EFR_avg_85_aenu_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_aenu)
# AS
'''
corr_as_win_85_a_shrink_03_1 = shrink_value_03_1(corr_as_win_85_a)
corr_as_win_85_e_shrink_03_1 = shrink_value_03_1(corr_as_win_85_e)
corr_as_win_85_n_shrink_03_1 = shrink_value_03_1(corr_as_win_85_n)
corr_as_win_85_u_shrink_03_1 = shrink_value_03_1(corr_as_win_85_u)
'''
corr_as_85_aenu_shrink_03_1 = shrink_value_03_1(corr_as_85_aenu)
# shrink the correlation range from 0.5 to 1
# EFR
corr_EFR_avg_85_aenu_shrink_05_1 = shrink_value_05_1(corr_EFR_avg_85_aenu)
# AS
corr_as_85_aenu_shrink_05_1 = shrink_value_05_1(corr_as_85_aenu)
# test
# sum of time and frequency corelation matrix
corr_sum_avg_85_aenu = (corr_EFR_avg_85_aenu + corr_as_85_aenu_1300).copy()
corr_sum_avg_85_aenu_v2 = (corr_EFR_avg_85_aenu + corr_as_85_no0_aenu).copy()
#corr_sum_avg_85_aenu = (corr_EFR_avg_85_aenu + corr_as_85_aenu).copy()
# max of time and frequency corelation matrix
# corr_max_avg_85_aenu = (corr_EFR_avg_85_aenu ? corr_as_85_aenu).copy()
# plot the figure
'''
# Correlation Matrix
# EFR
correlation_matrix(corr_EFR_avg_85_a, 'cross correlation of 85dB a_vowel in time domain')
correlation_matrix(corr_EFR_avg_85_e, 'cross correlation of 85dB e_vowel in time domain')
correlation_matrix(corr_EFR_avg_85_n, 'cross correlation of 85dB n_vowel in time domain')
correlation_matrix(corr_EFR_avg_85_u, 'cross correlation of 85dB u_vowel in time domain')
# AS
correlation_matrix(corr_as_85_a, 'cross correlation of 85dB a_vowel in frequency domain')
correlation_matrix(corr_as_85_e, 'cross correlation of 85dB e_vowel in frequency domain')
correlation_matrix(corr_as_85_n, 'cross correlation of 85dB n_vowel in frequency domain')
correlation_matrix(corr_as_85_u, 'cross correlation of 85dB u_vowel in frequency domain')
# AS7
correlation_matrix(corr_as7_85_a, 'cross correlation of 85dB a_vowel in frequency domain 7')
correlation_matrix(corr_as7_85_e, 'cross correlation of 85dB e_vowel in frequency domain 7')
correlation_matrix(corr_as7_85_n, 'cross correlation of 85dB n_vowel in frequency domain 7')
correlation_matrix(corr_as7_85_u, 'cross correlation of 85dB u_vowel in frequency domain 7')
# Correlation Matrix witn 0 and 1
# EFR
correlation_matrix_01(corr_EFR_avg_85_a, 'cross correlation of 85dB a_vowel in time domain')
#correlation_matrix_tt_01(corr_EFR_avg_85_a_t, 'cross correlation of 85dB a_vowel in time domain')
#correlation_matrix_rr_01(corr_EFR_avg_85_a_re, 'cross correlation of 85dB a_vowel in time domain')
correlation_matrix_01(corr_EFR_avg_85_e, 'cross correlation of 85dB e_vowel in time domain')
#correlation_matrix_tt_01(corr_EFR_avg_85_e_t, 'cross correlation of 85dB e_vowel in time domain')
#correlation_matrix_rr_01(corr_EFR_avg_85_e_re, 'cross correlation of 85dB e_vowel in time domain')
correlation_matrix_01(corr_EFR_avg_85_n, 'cross correlation of 85dB n_vowel in time domain')
#correlation_matrix_tt_01(corr_EFR_avg_85_n_t, 'cross correlation of 85dB n_vowel in time domain')
#correlation_matrix_rr_01(corr_EFR_avg_85_n_re, 'cross correlation of 85dB n_vowel in time domain')
correlation_matrix_01(corr_EFR_avg_85_u, 'cross correlation of 85dB u_vowel in time domain')
#correlation_matrix_tt_01(corr_EFR_avg_85_u_t, 'cross correlation of 85dB u_vowel in time domain')
#correlation_matrix_rr_01(corr_EFR_avg_85_u_re, 'cross correlation of 85dB u_vowel in time domain')
# Amplitude Spectrum
correlation_matrix_01(corr_as_85_a, 'cross correlation of 85dB a_vowel in frequency domain')
#correlation_matrix_tt_01(corr_as_85_a_t, 'cross correlation of 85dB a_vowel in frequency domain')
#correlation_matrix_rr_01(corr_as_85_a_re, 'cross correlation of 85dB a_vowel in frequency domain')
correlation_matrix_01(corr_as_85_e, 'cross correlation of 85dB e_vowel in frequency domain')
#correlation_matrix_tt_01(corr_as_85_e_t, 'cross correlation of 85dB e_vowel in frequency domain')
#correlation_matrix_rr_01(corr_as_85_e_re, 'cross correlation of 85dB e_vowel in frequency domain')
correlation_matrix_01(corr_as_85_n, 'cross correlation of 85dB n_vowel in frequency domain')
#correlation_matrix_tt_01(corr_as_85_n_t, 'cross correlation of 85dB n_vowel in frequency domain')
#correlation_matrix_rr_01(corr_as_85_n_re, 'cross correlation of 85dB n_vowel in frequency domain')
correlation_matrix_01(corr_as_85_u, 'cross correlation of 85dB u_vowel in frequency domain')
#correlation_matrix_tt_01(corr_as_85_u_t, 'cross correlation of 85dB u_vowel in frequency domain')
#correlation_matrix_rr_01(corr_as_85_u_re, 'cross correlation of 85dB u_vowel in frequency domain')
# Amplitude Spectrum 7 points
correlation_matrix_01(corr_as7_85_a, 'cross correlation of 85dB a_vowel in frequency domain 7')
#correlation_matrix_tt_01(corr_as7_85_a_t, 'cross correlation of 85dB a_vowel in frequency domain 7')
#correlation_matrix_rr_01(corr_as7_85_a_re, 'cross correlation of 85dB a_vowel in frequency domain 7')
correlation_matrix_01(corr_as7_85_e, 'cross correlation of 85dB e_vowel in frequency domain 7')
#correlation_matrix_tt_01(corr_as7_85_e_t, 'cross correlation of 85dB e_vowel in frequency domain 7')
#correlation_matrix_rr_01(corr_as7_85_e_re, 'cross correlation of 85dB e_vowel in frequency domain 7')
correlation_matrix_01(corr_as7_85_n, 'cross correlation of 85dB n_vowel in frequency domain 7')
#correlation_matrix_tt_01(corr_as7_85_n_t, 'cross correlation of 85dB n_vowel in frequency domain 7')
#correlation_matrix_rr_01(corr_as7_85_n_re, 'cross correlation of 85dB n_vowel in frequency domain 7')
correlation_matrix_01(corr_as7_85_u, 'cross correlation of 85dB u_vowel in frequency domain 7')
#correlation_matrix_tt_01(corr_as7_85_u_t, 'cross correlation of 85dB u_vowel in frequency domain 7')
#correlation_matrix_rr_01(corr_as7_85_u_re, 'cross correlation of 85dB u_vowel in frequency domain 7')
'''
# Correlation Matrix_both
# EFR
'''
correlation_matrix_comb(corr_EFR_avg_85_a, 'cross correlation of 85dB a_vowel in time domain')
correlation_matrix_comb(corr_EFR_avg_85_e, 'cross correlation of 85dB e_vowel in time domain')
correlation_matrix_comb(corr_EFR_avg_85_n, 'cross correlation of 85dB n_vowel in time domain')
correlation_matrix_comb(corr_EFR_avg_85_u, 'cross correlation of 85dB u_vowel in time domain')
'''
correlation_matrix_comb(corr_EFR_avg_85_aenu, 'cross correlation of 85dB aenu in time domain')
correlation_matrix_comb(corr_EFR_avg_85_aenu_shrink_03_1, 'cross correlation of shrinked(0.3, 1) 85dB aenu in time domain')
correlation_matrix_comb(corr_EFR_avg_85_aenu_shrink_05_1, 'cross correlation of shrinked(0.5, 1) 85dB aenu in time domain')
# AS
'''
correlation_matrix_comb(corr_as_85_a, 'cross correlation of 85dB a_vowel in frequency domain')
correlation_matrix_comb(corr_as_85_e, 'cross correlation of 85dB e_vowel in frequency domain')
correlation_matrix_comb(corr_as_85_n, 'cross correlation of 85dB n_vowel in frequency domain')
correlation_matrix_comb(corr_as_85_u, 'cross correlation of 85dB u_vowel in frequency domain')
'''
correlation_matrix_comb(corr_as_85_a_v2, 'cross correlation of 85dB a_vowel in frequency domain (improved PCC)')
correlation_matrix_comb(corr_as_85_e_v2, 'cross correlation of 85dB e_vowel in frequency domain (improved PCC)')
correlation_matrix_comb(corr_as_85_n_v2, 'cross correlation of 85dB n_vowel in frequency domain (improved PCC)')
correlation_matrix_comb(corr_as_85_u_v2, 'cross correlation of 85dB u_vowel in frequency domain (improved PCC)')
'''
correlation_matrix_comb(corr_as_win_85_a, 'cross correlation of 85dB a_vowel in frequency domain(hamming)')
correlation_matrix_comb(corr_as_win_85_e, 'cross correlation of 85dB e_vowel in frequency domain(hamming)')
correlation_matrix_comb(corr_as_win_85_n, 'cross correlation of 85dB n_vowel in frequency domain(hamming)')
correlation_matrix_comb(corr_as_win_85_u, 'cross correlation of 85dB u_vowel in frequency domain(hamming)')
'''
# no zero-padding
correlation_matrix_comb(corr_as_85_no0_aenu, 'cross correlation of 85dB aenu in frequency domain(no zero padding)')
# aenu -> as
correlation_matrix_comb(corr_as_85_aenu, 'cross correlation of 85dB aenu in frequency domain')
correlation_matrix_comb(corr_as_85_aenu_shrink_03_1, 'cross correlation of shrinked(0.3, 1) 85dB aenu in frequency domain')
correlation_matrix_comb(corr_as_85_aenu_shrink_05_1, 'cross correlation of shrinked(0.5, 1) 85dB aenu in frequency domain')
# zero padding -> as -> 0-1300Hz -> aenu
# pcc do not remove mean
correlation_matrix_comb(corr_as_85_1300_aenu, 'cross correlation of 85dB aenu in frequency domain(version2, improved PCC)')
# AS7
'''
correlation_matrix_comb(corr_as7_85_a, 'cross correlation of 85dB a_vowel in frequency domain 7')
correlation_matrix_comb(corr_as7_85_e, 'cross correlation of 85dB e_vowel in frequency domain 7')
correlation_matrix_comb(corr_as7_85_n, 'cross correlation of 85dB n_vowel in frequency domain 7')
correlation_matrix_comb(corr_as7_85_u, 'cross correlation of 85dB u_vowel in frequency domain 7')
'''
correlation_matrix_comb(corr_as7_85_aenu, 'cross correlation of 85dB aenu in frequency domain 7(as7_aenu)')
correlation_matrix_comb(corr_aenu_as7_85, 'cross correlation of 85dB aenu in frequency domain 7(aenu_as7)')
# sum of EFR and AS
# corr_EFR + corr_AS
correlation_matrix_comb(corr_sum_avg_85_aenu, 'cross correlation of sum 85dB aenu in time and freq domain')
correlation_matrix_comb(corr_sum_avg_85_aenu_v2, 'cross correlation of sum 85dB aenu in time and freq domain(version2)')
# concat df_EFR + df_aenu_as 4096+535
correlation_matrix_comb(corr_sum_85_aenu, 'cross correlation of sum 85dB aenu in time and freq domain')
# concat df_EFR + df_as_aenu 4096+5200
correlation_matrix_comb(corr_sum_85_aenu_v3, 'cross correlation of sum 85dB aenu in time and freq domain(version3)')
# improved PCC
correlation_matrix_comb(corr_sum_85_aenu_v4, 'cross correlation of sum 85dB aenu in time and freq domain (improved PCC)')
# test
corr_sum_85_aenu_v4.style.background_gradient(cmap='coolwarm') | [
"scipy.signal.detrend",
"numpy.array",
"scipy.fftpack.fft",
"numpy.arange",
"pandas.read_pickle",
"numpy.fft.fft",
"numpy.asarray",
"pandas.concat",
"pandas.DataFrame",
"scipy.signal.kaiser",
"numpy.abs",
"scipy.signal.hamming",
"matplotlib.pyplot.title",
"matplotlib.colors.LinearSegmented... | [((10135, 10210), 'pandas.read_pickle', 'pd.read_pickle', (['"""/home/bruce/Dropbox/4.Project/4.Code for Linux/df_EFR.pkl"""'], {}), "('/home/bruce/Dropbox/4.Project/4.Code for Linux/df_EFR.pkl')\n", (10149, 10210), True, 'import pandas as pd\n'), ((10366, 10380), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10378, 10380), True, 'import pandas as pd\n'), ((11024, 11052), 'scipy.signal.kaiser', 'signal.kaiser', (['(1024)'], {'beta': '(14)'}), '(1024, beta=14)\n', (11037, 11052), False, 'from scipy import signal\n'), ((11067, 11087), 'scipy.signal.hamming', 'signal.hamming', (['(1024)'], {}), '(1024)\n', (11081, 11087), False, 'from scipy import signal\n'), ((11123, 11137), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11135, 11137), True, 'import pandas as pd\n'), ((11155, 11169), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11167, 11169), True, 'import pandas as pd\n'), ((13003, 13048), 'pandas.DataFrame', 'pd.DataFrame', (['df_EFR_avg_sorted.iloc[528:, :]'], {}), '(df_EFR_avg_sorted.iloc[528:, :])\n', (13015, 13048), True, 'import pandas as pd\n'), ((13122, 13171), 'pandas.DataFrame', 'pd.DataFrame', (['df_EFR_avg_win_sorted.iloc[528:, :]'], {}), '(df_EFR_avg_win_sorted.iloc[528:, :])\n', (13134, 13171), True, 'import pandas as pd\n'), ((15393, 15405), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (15402, 15405), True, 'import numpy as np\n'), ((15497, 15510), 'numpy.arange', 'np.arange', (['n2'], {}), '(n2)\n', (15506, 15510), True, 'import numpy as np\n'), ((15752, 15810), 'pandas.concat', 'pd.concat', (['[df_EFR_data, df_EFR_mid, df_EFR_label]'], {'axis': '(1)'}), '([df_EFR_data, df_EFR_mid, df_EFR_label], axis=1)\n', (15761, 15810), True, 'import pandas as pd\n'), ((16160, 16239), 'pandas.concat', 'pd.concat', (['[df_EFR_avg_85_data, df_EFR_avg_85_mid, df_EFR_avg_85_label]'], {'axis': '(1)'}), '([df_EFR_avg_85_data, df_EFR_avg_85_mid, df_EFR_avg_85_label], axis=1)\n', (16169, 16239), True, 'import pandas as pd\n'), ((16619, 16714), 'pandas.concat', 'pd.concat', (['[df_EFR_avg_win_85_data, df_EFR_avg_win_85_mid, df_EFR_avg_win_85_label]'], {'axis': '(1)'}), '([df_EFR_avg_win_85_data, df_EFR_avg_win_85_mid,\n df_EFR_avg_win_85_label], axis=1)\n', (16628, 16714), True, 'import pandas as pd\n'), ((17182, 17234), 'pandas.concat', 'pd.concat', (['[temp1, temp2]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([temp1, temp2], axis=1, ignore_index=True)\n', (17191, 17234), True, 'import pandas as pd\n'), ((19405, 19419), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (19417, 19419), True, 'import pandas as pd\n'), ((19430, 19444), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (19442, 19444), True, 'import pandas as pd\n'), ((19456, 19470), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (19468, 19470), True, 'import pandas as pd\n'), ((19485, 19499), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (19497, 19499), True, 'import pandas as pd\n'), ((19515, 19529), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (19527, 19529), True, 'import pandas as pd\n'), ((20749, 20803), 'pandas.concat', 'pd.concat', (['[df_as_85_no0, df_EFR_avg_85_label]'], {'axis': '(1)'}), '([df_as_85_no0, df_EFR_avg_85_label], axis=1)\n', (20758, 20803), True, 'import pandas as pd\n'), ((20834, 20884), 'pandas.concat', 'pd.concat', (['[df_as_85, df_EFR_avg_85_label]'], {'axis': '(1)'}), '([df_as_85, df_EFR_avg_85_label], axis=1)\n', (20843, 20884), True, 'import pandas as pd\n'), ((20916, 20967), 'pandas.concat', 'pd.concat', (['[df_as7_85, df_EFR_avg_85_label]'], {'axis': '(1)'}), '([df_as7_85, df_EFR_avg_85_label], axis=1)\n', (20925, 20967), True, 'import pandas as pd\n'), ((21002, 21060), 'pandas.concat', 'pd.concat', (['[df_as_win_85, df_EFR_avg_win_85_label]'], {'axis': '(1)'}), '([df_as_win_85, df_EFR_avg_win_85_label], axis=1)\n', (21011, 21060), True, 'import pandas as pd\n'), ((21096, 21155), 'pandas.concat', 'pd.concat', (['[df_as7_win_85, df_EFR_avg_win_85_label]'], {'axis': '(1)'}), '([df_as7_win_85, df_EFR_avg_win_85_label], axis=1)\n', (21105, 21155), True, 'import pandas as pd\n'), ((22576, 22590), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (22588, 22590), True, 'import pandas as pd\n'), ((22608, 22622), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (22620, 22622), True, 'import pandas as pd\n'), ((27783, 27851), 'pandas.concat', 'pd.concat', (['[df_EFR_avg_85_aenu, df_aenu_as_85.iloc[:, :535]]'], {'axis': '(1)'}), '([df_EFR_avg_85_aenu, df_aenu_as_85.iloc[:, :535]], axis=1)\n', (27792, 27851), True, 'import pandas as pd\n'), ((28080, 28138), 'pandas.concat', 'pd.concat', (['[df_EFR_avg_85_aenu, df_as_85_no0_aenu]'], {'axis': '(1)'}), '([df_EFR_avg_85_aenu, df_as_85_no0_aenu], axis=1)\n', (28089, 28138), True, 'import pandas as pd\n'), ((28303, 28362), 'pandas.concat', 'pd.concat', (['[df_EFR_avg_85_aenu, df_as_85_1300_aenu]'], {'axis': '(1)'}), '([df_EFR_avg_85_aenu, df_as_85_1300_aenu], axis=1)\n', (28312, 28362), True, 'import pandas as pd\n'), ((28662, 28676), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (28674, 28676), True, 'import pandas as pd\n'), ((490, 502), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (500, 502), True, 'from matplotlib import pyplot as plt\n'), ((728, 747), 'matplotlib.pyplot.title', 'plt.title', (['cm_title'], {}), '(cm_title)\n', (737, 747), True, 'from matplotlib import pyplot as plt\n'), ((1472, 1491), 'numpy.asarray', 'np.asarray', (['corr_mx'], {}), '(corr_mx)\n', (1482, 1491), True, 'import numpy as np\n'), ((1661, 1671), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1669, 1671), True, 'from matplotlib import pyplot as plt\n'), ((1884, 1903), 'numpy.asarray', 'np.asarray', (['corr_mx'], {}), '(corr_mx)\n', (1894, 1903), True, 'import numpy as np\n'), ((1976, 1988), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1986, 1988), True, 'from matplotlib import pyplot as plt\n'), ((2211, 2230), 'matplotlib.pyplot.title', 'plt.title', (['cm_title'], {}), '(cm_title)\n', (2220, 2230), True, 'from matplotlib import pyplot as plt\n'), ((2715, 2725), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2723, 2725), True, 'from matplotlib import pyplot as plt\n'), ((2923, 2937), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2935, 2937), True, 'from matplotlib import pyplot as plt\n'), ((3544, 3563), 'matplotlib.pyplot.title', 'plt.title', (['cm_title'], {}), '(cm_title)\n', (3553, 3563), True, 'from matplotlib import pyplot as plt\n'), ((3604, 3622), 'numpy.asarray', 'np.asarray', (['output'], {}), '(output)\n', (3614, 3622), True, 'import numpy as np\n'), ((3768, 3778), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3776, 3778), True, 'from matplotlib import pyplot as plt\n'), ((3853, 3871), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (3865, 3871), True, 'from matplotlib import pyplot as plt\n'), ((4889, 4952), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""mycmap"""', "['white', 'black']"], {}), "('mycmap', ['white', 'black'])\n", (4922, 4952), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((5425, 5449), 'numpy.asarray', 'np.asarray', (['corr_mx_rank'], {}), '(corr_mx_rank)\n', (5435, 5449), True, 'import numpy as np\n'), ((5775, 5794), 'numpy.asarray', 'np.asarray', (['corr_mx'], {}), '(corr_mx)\n', (5785, 5794), True, 'import numpy as np\n'), ((6199, 6209), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6207, 6209), True, 'from matplotlib import pyplot as plt\n'), ((6421, 6440), 'numpy.asarray', 'np.asarray', (['corr_mx'], {}), '(corr_mx)\n', (6431, 6440), True, 'import numpy as np\n'), ((6513, 6525), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6523, 6525), True, 'from matplotlib import pyplot as plt\n'), ((6750, 6769), 'matplotlib.pyplot.title', 'plt.title', (['cm_title'], {}), '(cm_title)\n', (6759, 6769), True, 'from matplotlib import pyplot as plt\n'), ((7254, 7264), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7262, 7264), True, 'from matplotlib import pyplot as plt\n'), ((7475, 7494), 'numpy.asarray', 'np.asarray', (['corr_mx'], {}), '(corr_mx)\n', (7485, 7494), True, 'import numpy as np\n'), ((7567, 7579), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7577, 7579), True, 'from matplotlib import pyplot as plt\n'), ((7804, 7823), 'matplotlib.pyplot.title', 'plt.title', (['cm_title'], {}), '(cm_title)\n', (7813, 7823), True, 'from matplotlib import pyplot as plt\n'), ((8308, 8318), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8316, 8318), True, 'from matplotlib import pyplot as plt\n'), ((9297, 9311), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9309, 9311), True, 'import pandas as pd\n'), ((10813, 10828), 'numpy.arange', 'np.arange', (['(1024)'], {}), '(1024)\n', (10822, 10828), True, 'import numpy as np\n'), ((12015, 12030), 'numpy.arange', 'np.arange', (['(1024)'], {}), '(1024)\n', (12024, 12030), True, 'import numpy as np\n'), ((12254, 12269), 'numpy.arange', 'np.arange', (['(1024)'], {}), '(1024)\n', (12263, 12269), True, 'import numpy as np\n'), ((15709, 15732), 'numpy.zeros', 'np.zeros', (['(1408, 95036)'], {}), '((1408, 95036))\n', (15717, 15732), True, 'import numpy as np\n'), ((15864, 15880), 'numpy.arange', 'np.arange', (['(96060)'], {}), '(96060)\n', (15873, 15880), True, 'import numpy as np\n'), ((16112, 16133), 'numpy.zeros', 'np.zeros', (['(176, 8582)'], {}), '((176, 8582))\n', (16120, 16133), True, 'import numpy as np\n'), ((16300, 16315), 'numpy.arange', 'np.arange', (['(9606)'], {}), '(9606)\n', (16309, 16315), True, 'import numpy as np\n'), ((16567, 16588), 'numpy.zeros', 'np.zeros', (['(176, 8582)'], {}), '((176, 8582))\n', (16575, 16588), True, 'import numpy as np\n'), ((16758, 16773), 'numpy.arange', 'np.arange', (['(9606)'], {}), '(9606)\n', (16767, 16773), True, 'import numpy as np\n'), ((10676, 10749), 'pandas.concat', 'pd.concat', (['[df_EFR_detrend_data, df_EFR_label]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([df_EFR_detrend_data, df_EFR_label], axis=1, ignore_index=True)\n', (10685, 10749), True, 'import pandas as pd\n'), ((11766, 11832), 'pandas.concat', 'pd.concat', (['[df_EFR_avg_t, df_EFR_label]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([df_EFR_avg_t, df_EFR_label], axis=1, ignore_index=True)\n', (11775, 11832), True, 'import pandas as pd\n'), ((11877, 11950), 'pandas.concat', 'pd.concat', (['[df_EFR_avg_t_window, df_EFR_label]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([df_EFR_avg_t_window, df_EFR_label], axis=1, ignore_index=True)\n', (11886, 11950), True, 'import pandas as pd\n'), ((29389, 29420), 'pandas.concat', 'pd.concat', (['[sig_1_p1, sig_1_p2]'], {}), '([sig_1_p1, sig_1_p2])\n', (29398, 29420), True, 'import pandas as pd\n'), ((29635, 29666), 'pandas.concat', 'pd.concat', (['[sig_2_p1, sig_2_p2]'], {}), '([sig_2_p1, sig_2_p2])\n', (29644, 29666), True, 'import pandas as pd\n'), ((30366, 30417), 'numpy.append', 'np.append', (['row_pcc_notremovemean', 'pcc_notremovemean'], {}), '(row_pcc_notremovemean, pcc_notremovemean)\n', (30375, 30417), True, 'import numpy as np\n'), ((9637, 9688), 'numpy.append', 'np.append', (['row_pcc_notremovemean', 'pcc_notremovemean'], {}), '(row_pcc_notremovemean, pcc_notremovemean)\n', (9646, 9688), True, 'import numpy as np\n'), ((17311, 17332), 'numpy.zeros', 'np.zeros', (['(44, 36864)'], {}), '((44, 36864))\n', (17319, 17332), True, 'import numpy as np\n'), ((19639, 19680), 'numpy.fft.fft', 'np.fft.fft', (['df_EFR_avg_85_data.iloc[i, :]'], {}), '(df_EFR_avg_85_data.iloc[i, :])\n', (19649, 19680), True, 'import numpy as np\n'), ((19825, 19875), 'numpy.fft.fft', 'np.fft.fft', (['df_EFR_avg_85_withzero.iloc[i, 0:9606]'], {}), '(df_EFR_avg_85_withzero.iloc[i, 0:9606])\n', (19835, 19875), True, 'import numpy as np\n'), ((20266, 20320), 'numpy.fft.fft', 'np.fft.fft', (['df_EFR_avg_win_85_withzero.iloc[i, 0:9606]'], {}), '(df_EFR_avg_win_85_withzero.iloc[i, 0:9606])\n', (20276, 20320), True, 'import numpy as np\n'), ((30525, 30538), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (30535, 30538), True, 'from matplotlib import pyplot as plt\n'), ((30557, 30573), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (30568, 30573), True, 'from matplotlib import pyplot as plt\n'), ((30661, 30677), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (30672, 30677), True, 'from matplotlib import pyplot as plt\n'), ((30950, 30968), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (30966, 30968), True, 'from matplotlib import pyplot as plt\n'), ((31035, 31045), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31043, 31045), True, 'from matplotlib import pyplot as plt\n'), ((9229, 9241), 'numpy.abs', 'np.abs', (['a_oo'], {}), '(a_oo)\n', (9235, 9241), True, 'import numpy as np\n'), ((10476, 10537), 'scipy.signal.detrend', 'signal.detrend', (['df_EFR.iloc[i:i + 1, 0:1024]'], {'type': '"""constant"""'}), "(df_EFR.iloc[i:i + 1, 0:1024], type='constant')\n", (10490, 10537), False, 'from scipy import signal\n'), ((22728, 22775), 'scipy.fftpack.fft', 'fftpack.fft', (['df_EFR_avg_85_aenu.iloc[i, 0:4096]'], {}), '(df_EFR_avg_85_aenu.iloc[i, 0:4096])\n', (22739, 22775), False, 'from scipy import fftpack\n'), ((30223, 30252), 'numpy.sum', 'np.sum', (['(sig_1_new * sig_2_new)'], {}), '(sig_1_new * sig_2_new)\n', (30229, 30252), True, 'import numpy as np\n'), ((9523, 9544), 'numpy.sum', 'np.sum', (['(sig_1 * sig_2)'], {}), '(sig_1 * sig_2)\n', (9529, 9544), True, 'import numpy as np\n'), ((20033, 20146), 'numpy.array', 'np.array', (['[temp_as[100], temp_as[200], temp_as[300], temp_as[400], temp_as[500],\n temp_as[600], temp_as[700]]'], {}), '([temp_as[100], temp_as[200], temp_as[300], temp_as[400], temp_as[\n 500], temp_as[600], temp_as[700]])\n', (20041, 20146), True, 'import numpy as np\n'), ((20498, 20639), 'numpy.array', 'np.array', (['[temp_as_win[100], temp_as_win[200], temp_as_win[300], temp_as_win[400],\n temp_as_win[500], temp_as_win[600], temp_as_win[700]]'], {}), '([temp_as_win[100], temp_as_win[200], temp_as_win[300], temp_as_win\n [400], temp_as_win[500], temp_as_win[600], temp_as_win[700]])\n', (20506, 20639), True, 'import numpy as np\n'), ((22961, 23078), 'numpy.array', 'np.array', (['[temp_as2[43], temp_as2[85], temp_as2[128], temp_as2[170], temp_as2[213],\n temp_as2[256], temp_as2[298]]'], {}), '([temp_as2[43], temp_as2[85], temp_as2[128], temp_as2[170],\n temp_as2[213], temp_as2[256], temp_as2[298]])\n', (22969, 23078), True, 'import numpy as np\n'), ((30263, 30292), 'numpy.sum', 'np.sum', (['(sig_1_new * sig_1_new)'], {}), '(sig_1_new * sig_1_new)\n', (30269, 30292), True, 'import numpy as np\n'), ((30293, 30322), 'numpy.sum', 'np.sum', (['(sig_2_new * sig_2_new)'], {}), '(sig_2_new * sig_2_new)\n', (30299, 30322), True, 'import numpy as np\n'), ((9555, 9576), 'numpy.sum', 'np.sum', (['(sig_1 * sig_1)'], {}), '(sig_1 * sig_1)\n', (9561, 9576), True, 'import numpy as np\n'), ((9577, 9598), 'numpy.sum', 'np.sum', (['(sig_2 * sig_2)'], {}), '(sig_2 * sig_2)\n', (9583, 9598), True, 'import numpy as np\n')] |
"""
MouseRobotEKF 自己实现的一个EKF算法,
此算法中把鼠标作为机器人控制信号,为机器人提供运动需要的加速度和方向向量: M_vector,(加速度就用鼠标的加速度a,方向也是鼠标运动的方向);
同时,鼠标的当前位置和当前速度,也作为机器人的测量传感器使用Z_k(P,V)
Note: 以下: 当前时刻: cur or k-1 下一时刻: next or k
1.状态向量定义:
机器人根据鼠标的运动产生自己的位置和速度两个状态向量state_x(Position(x,y),Velocity(x,y)),即: X(P,V);
估计值向量: X= [ Px
Vx
Py
Vy]
估计值协方差矩阵: P =[ Cov(Px,Px) Cov(Px,Vx) Cov(Px,Py) Cov(Px,Vy)
....
....
.... ] # Note: Cov(x,y)参见协方差公式
2.根据运动学分析出预测方程:
p_next_x = p_cur_x + delta_t * v_cur_x #---> [1, delta_t ]
v_next_x = 0 + v_cur_x #---> [0, 1 ] 先假设匀速运动
p_next_y = p_cur_y + delta_t * v_cur_y #---> [1, delta_t ]
v_next_y = 0 + v_cur_y #---> [0, 1 ] 先假设匀速运动
用矩阵表示估计向量
(1) X_next = [ 1 delta_t 0 0
0 1 0 0
0 0 1 delta_t
0 0 0 1] * X_cur = F_k * X_cur
上面,预测矩阵 或 估计矩阵 或 状态转移矩阵:
F_k = [ 1 delta_t 0 0
0 1 0 0
0 0 1 delta_t
0 0 0 1 ]
根据协方差推到公式: Cov(Ax) = A * Cov(x) * A_T 依据此,就可以更新估计值协方差矩阵P
(1) X_next = F_k * X_cur # 状态向量 方程
(2) P_next = F_k * P_cur * F_k_T # 估计值协方差矩阵 方程
*************暂不添加外部控制向量,比如这里的加速度a(a_x,a_y), 通过EKF的变量ENABLE_ACCELERATION设置***************
进一步,设鼠标的加速度为a,分为x方向和y方向两个加速度分量, 根据前面控制要求: 实际控制加速度u是鼠标加速度a(a_x,a_y), 有:
p_next_x = p_cur_x + delta_t * v_cur_x + 1/2 * a_x * delta_t^2 #---> [ 1/2 * delta_t^2 ] 运动学位移公式: S1 = s0 +V0 + 1/2at^2
v_next_x = 0 + v_cur_x + a_x * delta_t #---> [ delta_t ] 运动学速度公式: V1 = V0 + at
p_next_y = p_cur_y + delta_t * v_cur_y + 1/2 * a_y * delta_t^2 #---> [ 1/2 * delta_t^2 ] 运动学位移公式: S1 = s0 +V0 + 1/2at^2
v_next_y = 0 + v_cur_y + a_y * delta_t #---> [ delta_t ] 运动学速度公式: V1 = V0 + at
得新的矩阵表示估计向量形式:
(1) X_next = F_k * X_cur + [delta_t^2/2
delta_t
delta_t^2/2
delta_t ] * a
= F_k * X_cur + B_k * U_k
这里控制矩阵B_k: [ delta_t^2/2
delta_t
delta_t^2/2
delta_t ]
控制变量矩阵(n*1)U_k: a
*******************************************************************************************
因为机器人行进过程中,会受到外部噪声影响,添加噪声协方差矩阵Q_k后:
#根据上一时刻系统状态和当前时刻系统控制量所得到的系统估计值,该估计值又叫做先验估计值,为各变量高斯分布的均值.
(1) X_next = F_k * X_cur + B_k * U_k # 状态向量预测方程,先验估计值,为各变量高斯分布的均值
#协方差矩阵,代表了不确定性,它是由上一时刻的协方差矩阵和外部噪声的协方差一起计算得到的.
(2) P_next = F_k * P_cur * F_k_T + Q_k # 估计值协方差矩阵 方程
这两个公式代表了卡尔曼滤波器中的预测部分,是卡尔曼滤波器五个基础公式中的前两个.
(3),(4),(5)详见tinyekf包
"""
import math
import numpy as np
from tinyekf import EKF
# Note: 谨慎打开加速度选项,因为加速度并没有和速度方向相关联,所以加速度会导致朝某方向快速运动
ENABLE_ACCELERATION = False
class RobotEKF(EKF):
"""
An EKF for mouse tracking
"""
def __init__(self, n, m, pval=0.1, qval=1e-4, rval=0.1, interval=10):
self.stateCount = n
self.measurementCount = m
self.interval = interval # 预测更新时间间隔,单位 ms
self.acceleration_x = 0 # pixels/ms^2
self.acceleration_y = 0 # pixels/ms^2
EKF.__init__(self, n, m, pval, qval, rval)
# update 加速度
def update_acceleration(self, a_x,a_y):
self.acceleration_x = a_x # pixels/ms^2
self.acceleration_y = a_y # pixels/ms^2
# 返回: X_k: 向量(nx1), F_k: (nxn)
def stateTransitionFunction(self, x):
# 执行预测方程 (1). X_k = F_k * X_k-1 + B_k * u_k
F_k = np.array([(1, self.interval, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, self.interval),
(0, 0, 0, 1)
])
F_k.shape = (4, 4)
if ENABLE_ACCELERATION:
# B_k * u_k
# 这里注意: B_k * u_k的结果维数要保持和X_k同样的维数,以便做矩阵相加
tt = math.pow(self.interval, 2) # 不做 ms 转 s,单位按 pixels/ms算
B_k_dot_u_k = np.array((int(tt/2 * self.acceleration_x),
int(self.interval * self.acceleration_x),
int(tt/2 * self.acceleration_y),
int(self.interval * self.acceleration_y)
))
print(f"stateTransitionFunction: B_k_dot_u_k={B_k_dot_u_k}")
# X_k = F_k * X_k-1 + B_k * u_k
new_x = F_k.dot(x) + B_k_dot_u_k
else:
new_x = F_k.dot(x)
return new_x, F_k # 返回的是一个二维的数组(N,N),对角线的地方为1,其余的地方为0.
# 返回: 预估测量值向量ZZ_K, H_k
def stateToMeasurementTransitionFunction(self, x):
# Observation function is identity
H_k = np.eye(self.measurementCount) # 状态值转换为测量值的函数为: y= f(x) = x,基本是恒等关系,故返回一个单位矩阵
return H_k.dot(x), H_k # 同时返回经状态转换函数变换后的测量值^: H_k(mxn) * X(nx1) = ZZ_k(mx1)
| [
"math.pow",
"tinyekf.EKF.__init__",
"numpy.array",
"numpy.eye"
] | [((3162, 3204), 'tinyekf.EKF.__init__', 'EKF.__init__', (['self', 'n', 'm', 'pval', 'qval', 'rval'], {}), '(self, n, m, pval, qval, rval)\n', (3174, 3204), False, 'from tinyekf import EKF\n'), ((3510, 3604), 'numpy.array', 'np.array', (['[(1, self.interval, 0, 0), (0, 1, 0, 0), (0, 0, 1, self.interval), (0, 0, 0, 1)\n ]'], {}), '([(1, self.interval, 0, 0), (0, 1, 0, 0), (0, 0, 1, self.interval),\n (0, 0, 0, 1)])\n', (3518, 3604), True, 'import numpy as np\n'), ((4687, 4716), 'numpy.eye', 'np.eye', (['self.measurementCount'], {}), '(self.measurementCount)\n', (4693, 4716), True, 'import numpy as np\n'), ((3890, 3916), 'math.pow', 'math.pow', (['self.interval', '(2)'], {}), '(self.interval, 2)\n', (3898, 3916), False, 'import math\n')] |
from math import gcd, ceil, sqrt
from numpy import around, int32, zeros
from numpy.random import RandomState
from polymod import Poly
def isprime(n):
"""
>>> isprime(1109)
True
>>> isprime(10**6)
False
"""
# this is slow and basic, but saves the sympy dependency
# alternatively, from sympy import isprime
for i in range(2, ceil(sqrt(n)) + 1):
if n % i == 0:
return False
return True
class StreamlinedNTRUPrime:
"""
Training implementation of Streamlined NTRU Prime. Do not use in production.
Following https://ntruprime.cr.yp.to/nist/ntruprime-20190330.pdf
# Used Notation (in Comments and Variable Names)
R = Z[x]/(x^p - x - 1)
R/3 = (Z/3)[x]/(x^p - x - 1)
R/q = (Z/q)[x]/(x^p - x - 1)
Short = set of small weight-w elements of R
>>> p, q, w = 23, 113, 5
>>> cipher = StreamlinedNTRUPrime(p, q, w, seed=1337)
>>> pk, sk = cipher.generate_keys()
>>> message = cipher.random_small_poly(w, None, cipher.modulus_r, RandomState(42))
>>> message == cipher.decrypt(cipher.encrypt(message, pk), sk)
True
"""
@staticmethod
def co_prime(a, b):
return gcd(a, b) == 1
@staticmethod
def random_small_poly(weight, N, modulus, random_state=RandomState()):
"""
Returns a uniformly randomly chosen small polynomial from ℤ_N[x]/modulus (if N not None) or
ℤ[x]/modulus (if N is None) of weight w that is invertible in ℤ_inv_N[x]/modulus.
Small polynomials only have coefficients in {-1,0,1}.
Weight-w polynomials have exactly w non-zero coefficients.
>>> StreamlinedNTRUPrime.random_small_poly(2, None, Poly([0, 0, 0, 1]), RandomState(42)).deg < 4
True
>>> StreamlinedNTRUPrime.random_small_poly(13, None, Poly.xn1(n=44), RandomState(42)).weight
13
"""
c_values = 2 * random_state.randint(2, size=weight) - 1 # "small" non-zero coefficients, i.e. -1, 1
c_pos = random_state.choice(modulus.deg, weight, replace=False)
cs = zeros((modulus.deg + 1), dtype=int32)
for i in range(weight):
cs[c_pos[i]] = c_values[i]
return Poly(cs, N=N, modulus=modulus) # TODO confirm uniformity on the ring
def _random_small_poly_invertible(self, weight, N, modulus, inv_N):
"""
Returns a uniformly randomly chosen small polynomial from ℤ_N[x]/modulus (if N not None) or
ℤ[x]/modulus (if N is None) of weight w that is invertible in ℤ_inv_N[x]/modulus.
Small polynomials only have coefficients in {-1,0,1}.
Weight-w polynomials have exactly w non-zero coefficients.
>>> modulus = Poly.xn1(n=34, N=113)
>>> p, inv = StreamlinedNTRUPrime(23, 113, 5, seed=27182)._random_small_poly_invertible(12, None, modulus, inv_N=113)
>>> p.in_ring(N=113, modulus=modulus) * inv
1
>>> modulus = Poly.xn1(n=34, N=113)
>>> p, inv = StreamlinedNTRUPrime(23, 113, 5, seed=31415)._random_small_poly_invertible(12, 113, modulus, inv_N=113)
>>> p.in_ring(N=113, modulus=modulus) * inv
1
"""
while True:
poly = self.random_small_poly(weight, N, modulus, self.random)
try:
inverse = poly.in_ring(N=inv_N, modulus=poly.modulus.in_ring(N=inv_N)).inv()
return poly, inverse
except ValueError:
pass
def __init__(self, p, q, w, seed=None):
# check parameters (non-exhaustive)
assert q >= 17
assert w <= p
assert isprime(p)
assert isprime(q)
assert q > p
assert self.co_prime(p, q)
assert 2 * p >= 3 * w
assert q >= 16 * w + 1
# TODO assert that x^p - x - 1 is irreducible in (Z/q)[x]
# initialize object state
self.p, self.q, self.w = p, q, w
self.random = RandomState() if seed is None else RandomState(seed)
self.modulus_r = Poly.monomial(n=p) - Poly.monomial(n=1) - Poly.monomial(n=0)
self.modulus_r3 = self.modulus_r.in_ring(N=3)
self.modulus_rq = self.modulus_r.in_ring(N=q)
def generate_keys(self):
"""
>>> h, (f, g_r3_inv) = StreamlinedNTRUPrime(23, 113, 5, seed=3141).generate_keys()
>>> f.coeffs.min(), f.coeffs.max()
(-1, 1)
>>> f.weight
5
>>> h.N, f.N, g_r3_inv.N
(113, None, 3)
"""
# Generate a uniform random small element g in R, repeat until invertible in R/3
g, g_r3_inv = self._random_small_poly_invertible(self.w, None, self.modulus_r, 3)
# Generate a uniform random f in Short
f = self.random_small_poly(self.w, None, self.modulus_r, self.random)
# Compute h = g/(3f) in R/q
g_rq = g.in_ring(N=self.q, modulus=self.modulus_rq)
three_f = (3*f).in_ring(N=self.q, modulus=self.modulus_rq)
h = g_rq * three_f.inv()
return h, (f, g_r3_inv)
def encrypt(self, plain_text: Poly, public_key: Poly):
"""
>>> cipher = StreamlinedNTRUPrime(23, 113, 5, seed=3141)
>>> pk, _ = cipher.generate_keys()
>>> m = Poly([0, 1] + 6*[0] + [-1] + 6*[0] + [-1, 1, 0, 0, 0, 0, -1], N=None, modulus=cipher.modulus_r)
>>> c = cipher.encrypt(m, pk).coeffs
>>> (c / 3 == c // 3).all()
True
"""
# notation from NIST submission
r = plain_text
h = public_key
# compute h*r in R/q
hr = h.in_ring(N=self.q, modulus=self.modulus_rq) * r.in_ring(N=self.q, modulus=self.modulus_rq)
# Round(h*r)
return Poly(around(hr.coeffs / 3) * 3, N=None, modulus=self.modulus_rq)
def decrypt(self, cipher_text: Poly, secret_key):
# notation from NIST submission
f, v = secret_key
c = cipher_text
# compute 3fc in R/q
three_f_c = 3 * f.in_ring(N=self.q, modulus=self.modulus_rq) * c.in_ring(N=self.q, modulus=self.modulus_rq)
# reduce modulo three to obtain e in R/3
e = Poly(three_f_c.coeffs % 3, N=3, modulus=self.modulus_r3)
# lift e*v in R/3 to a small polynomial in R
return (e*v).in_ring(N=None, modulus=self.modulus_r)
| [
"math.gcd",
"polymod.Poly.monomial",
"polymod.Poly",
"math.sqrt",
"numpy.zeros",
"numpy.around",
"numpy.random.RandomState"
] | [((1295, 1308), 'numpy.random.RandomState', 'RandomState', ([], {}), '()\n', (1306, 1308), False, 'from numpy.random import RandomState\n'), ((2080, 2115), 'numpy.zeros', 'zeros', (['(modulus.deg + 1)'], {'dtype': 'int32'}), '(modulus.deg + 1, dtype=int32)\n', (2085, 2115), False, 'from numpy import around, int32, zeros\n'), ((2209, 2239), 'polymod.Poly', 'Poly', (['cs'], {'N': 'N', 'modulus': 'modulus'}), '(cs, N=N, modulus=modulus)\n', (2213, 2239), False, 'from polymod import Poly\n'), ((6092, 6148), 'polymod.Poly', 'Poly', (['(three_f_c.coeffs % 3)'], {'N': '(3)', 'modulus': 'self.modulus_r3'}), '(three_f_c.coeffs % 3, N=3, modulus=self.modulus_r3)\n', (6096, 6148), False, 'from polymod import Poly\n'), ((1202, 1211), 'math.gcd', 'gcd', (['a', 'b'], {}), '(a, b)\n', (1205, 1211), False, 'from math import gcd, ceil, sqrt\n'), ((3929, 3942), 'numpy.random.RandomState', 'RandomState', ([], {}), '()\n', (3940, 3942), False, 'from numpy.random import RandomState\n'), ((3964, 3981), 'numpy.random.RandomState', 'RandomState', (['seed'], {}), '(seed)\n', (3975, 3981), False, 'from numpy.random import RandomState\n'), ((4049, 4067), 'polymod.Poly.monomial', 'Poly.monomial', ([], {'n': '(0)'}), '(n=0)\n', (4062, 4067), False, 'from polymod import Poly\n'), ((370, 377), 'math.sqrt', 'sqrt', (['n'], {}), '(n)\n', (374, 377), False, 'from math import gcd, ceil, sqrt\n'), ((4007, 4025), 'polymod.Poly.monomial', 'Poly.monomial', ([], {'n': 'p'}), '(n=p)\n', (4020, 4025), False, 'from polymod import Poly\n'), ((4028, 4046), 'polymod.Poly.monomial', 'Poly.monomial', ([], {'n': '(1)'}), '(n=1)\n', (4041, 4046), False, 'from polymod import Poly\n'), ((5679, 5700), 'numpy.around', 'around', (['(hr.coeffs / 3)'], {}), '(hr.coeffs / 3)\n', (5685, 5700), False, 'from numpy import around, int32, zeros\n')] |
from calendar import month_name, monthrange
from pathlib import Path, PureWindowsPath
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import math as mt
from dataclima import helioclim3
class energycalc:
def __init__(self, df,
horizon,
shadings,
iam,
soiling,
lowirradeff,
temperatureloss,
modulequality,
lid,
mismatch,
ohmicdcloss,
inverterloss,
plantcontroller,
transf_lv_mv,
transf_mv_hv,
auxloadsloss,
ohmicac_poi,
systemunavailability,
gridunavailability):
self.horizon = horizon
self.shadings = shadings
self.iam = iam
self.soiling = soiling
self.lowirradeff = lowirradeff
self.temperatureloss = temperatureloss
self.modulequality = modulequality
self.modulequality = modulequality
self.lid = lid
self.mismatch = mismatch
self.ohmicdcloss = ohmicdcloss
self.inverterloss = inverterloss
self.plantcontroller = plantcontroller
self.transf_lv_mv = transf_lv_mv
self.transf_mv_hv = transf_mv_hv
self.auxloadsloss = auxloadsloss
self.ohmicac_poi = ohmicac_poi
self.systemunavailability = systemunavailability
self.gridunavailability = gridunavailability
self.df = self._datatreat(df)
def _datatreat(self, df):
# df.drop(columns=['Top of Atmosphere', 'Code', 'Relative Humidity',
# 'Wind direction', 'Rainfall', 'Snowfall',
# 'Snow depth'],
# inplace=True)
df = df.drop(columns=['Top of Atmosphere',
'Code', 'Relative Humidity',
'Wind direction', 'Rainfall', 'Snowfall',
'Snow depth'])
return df
def perfratio(self):
lossesModuleFactors = {'Horizon': self.horizon,
'Shadings': self.shadings,
'IAM': self.iam,
'Soiling': self.soiling}
lossesLocalFactors = {'Low Irradiance efficiency fall-off': self.lowirradeff,
'Temperature': self.temperatureloss,
'Module Quality': self.modulequality,
'LID': self.lid,
'Mismatch': self.mismatch,
'Ohmic (DC)': self.ohmicdcloss,
'Inverter': self.inverterloss,
'Plant Controller': self.plantcontroller,
'Transformers LV-MV': self.transf_lv_mv,
'Transformers MV-HV': self.transf_mv_hv,
'Auxiliary Loads': self.auxloadsloss,
'Ohmic AC until POI': self.ohmicac_poi,
'System Unavailability': self.systemunavailability,
'Grid Unavailability': self.gridunavailability}
'''
Performace Ratio (Desempenho global)
'''
dflocalloss = pd.DataFrame(data=lossesLocalFactors.values(),
index=lossesLocalFactors.keys(),
columns=['value'])
dfmodloss = pd.DataFrame(data=lossesModuleFactors.values(),
index=lossesModuleFactors.keys(),
columns=['value'])
vector1 = np.array(dflocalloss['value'])
frac1 = (1 - (vector1)/100)
local_losses = np.cumprod(frac1, dtype=float)[-1]
vector2 = np.array(dfmodloss['value'])
frac2 = (1 - (vector2)/100)
module_losses = np.cumprod(frac2, dtype=float)[-1]
perf_ratio = local_losses * module_losses
# print('Desempenho global: {0:.2f} %'.format(perf_ratio * 100))
return module_losses, local_losses
def production(self, modulearea, totalpower, modulepower, trackeradd):
module_losses, local_losses = self.perfratio()
# Eficiencia do painel solar
eff = modulepower / (modulearea * 1000)
# Calculo total area
number_modul = totalpower / modulepower # MWp total / MWp
totalmodulearea = number_modul * modulearea # m²
# Irradiância da UFV no plano dos módulos (kWh/m²)
irrad_eff = self.df['Global Horiz'] * \
trackeradd * module_losses
# Calculating gross and net production
gross_production = irrad_eff * totalmodulearea * eff / 1e6 # Wh para MWh
# net_prodution = gross_production * perf_ratio
net_prodution = gross_production * local_losses
dfprod = pd.DataFrame({'Gross Prod': gross_production,
'Net Prod': net_prodution})
dfconcat = pd.concat([self.df, dfprod], axis=1, sort=False)
# https://pandas-docs.github.io/pandas-docs-travis/user_guide/merging.html
return dfconcat
| [
"pandas.DataFrame",
"numpy.array",
"pandas.concat",
"numpy.cumprod"
] | [((3777, 3807), 'numpy.array', 'np.array', (["dflocalloss['value']"], {}), "(dflocalloss['value'])\n", (3785, 3807), True, 'import numpy as np\n'), ((3921, 3949), 'numpy.array', 'np.array', (["dfmodloss['value']"], {}), "(dfmodloss['value'])\n", (3929, 3949), True, 'import numpy as np\n'), ((4992, 5065), 'pandas.DataFrame', 'pd.DataFrame', (["{'Gross Prod': gross_production, 'Net Prod': net_prodution}"], {}), "({'Gross Prod': gross_production, 'Net Prod': net_prodution})\n", (5004, 5065), True, 'import pandas as pd\n'), ((5117, 5165), 'pandas.concat', 'pd.concat', (['[self.df, dfprod]'], {'axis': '(1)', 'sort': '(False)'}), '([self.df, dfprod], axis=1, sort=False)\n', (5126, 5165), True, 'import pandas as pd\n'), ((3867, 3897), 'numpy.cumprod', 'np.cumprod', (['frac1'], {'dtype': 'float'}), '(frac1, dtype=float)\n', (3877, 3897), True, 'import numpy as np\n'), ((4010, 4040), 'numpy.cumprod', 'np.cumprod', (['frac2'], {'dtype': 'float'}), '(frac2, dtype=float)\n', (4020, 4040), True, 'import numpy as np\n')] |
"""Bezier, a module for creating Bezier curves.
Version 1.1, from < BezierCurveFunction-v1.ipynb > on 2019-05-02
"""
import numpy as np
__all__ = ["Bezier"]
class Bezier():
def TwoPoints(t, P1, P2):
"""
Returns a point between P1 and P2, parametised by t.
INPUTS:
t float/int; a parameterisation.
P1 numpy array; a point.
P2 numpy array; a point.
OUTPUTS:
Q1 numpy array; a point.
"""
if not isinstance(P1, np.ndarray) or not isinstance(P2, np.ndarray):
raise TypeError('Points must be an instance of the numpy.ndarray!')
if not isinstance(t, (int, float)):
raise TypeError('Parameter t must be an int or float!')
Q1 = (1 - t) * P1 + t * P2
return Q1
def Points(t, points):
"""
Returns a list of points interpolated by the Bezier process
INPUTS:
t float/int; a parameterisation.
points list of numpy arrays; points.
OUTPUTS:
newpoints list of numpy arrays; points.
"""
newpoints = []
#print("points =", points, "\n")
for i1 in range(0, len(points) - 1):
#print("i1 =", i1)
#print("points[i1] =", points[i1])
newpoints += [Bezier.TwoPoints(t, points[i1], points[i1 + 1])]
#print("newpoints =", newpoints, "\n")
return newpoints
def Point(t, points):
"""
Returns a point interpolated by the Bezier process
INPUTS:
t float/int; a parameterisation.
points list of numpy arrays; points.
OUTPUTS:
newpoint numpy array; a point.
"""
newpoints = points
#print("newpoints = ", newpoints)
while len(newpoints) > 1:
newpoints = Bezier.Points(t, newpoints)
#print("newpoints in loop = ", newpoints)
#print("newpoints = ", newpoints)
#print("newpoints[0] = ", newpoints[0])
return newpoints[0]
def Curve(t_values, points):
"""
Returns a point interpolated by the Bezier process
INPUTS:
t_values list of floats/ints; a parameterisation.
points list of numpy arrays; points.
OUTPUTS:
curve list of numpy arrays; points.
"""
if not hasattr(t_values, '__iter__'):
raise TypeError("`t_values` Must be an iterable of integers or floats, of length greater than 0 .")
if len(t_values) < 1:
raise TypeError("`t_values` Must be an iterable of integers or floats, of length greater than 0 .")
if not isinstance(t_values[0], (int, float)):
raise TypeError("`t_values` Must be an iterable of integers or floats, of length greater than 0 .")
curve = np.array([[0.0] * len(points[0])])
for t in t_values:
#print("curve \n", curve)
#print("Bezier.Point(t, points) \n", Bezier.Point(t, points))
curve = np.append(curve, [Bezier.Point(t, points)], axis=0)
#print("curve after \n", curve, "\n--- --- --- --- --- --- ")
curve = np.delete(curve, 0, 0)
#print("curve final \n", curve, "\n--- --- --- --- --- --- ")
return curve
| [
"numpy.delete"
] | [((3282, 3304), 'numpy.delete', 'np.delete', (['curve', '(0)', '(0)'], {}), '(curve, 0, 0)\n', (3291, 3304), True, 'import numpy as np\n')] |
import numpy as np
class sensor:
def __init__(self, inverse_x=False, inverse_z=False):
self.inverse_x = inverse_x
self.inverse_z = inverse_z
self.flip_x_z = False
self.rotation = np.array([0, 0])
def get_angle(self):
rot = np.copy(self.rotation)
if self.flip_x_z:
rot[0], rot[1] = rot[1], rot[0]
if self.inverse_x:
rot[0] = -rot[0]
if self.inverse_z:
rot[1] = -rot[1]
return rot
# Override update to add functionality
def update(self):
pass
# Override update to add the ability to calibrate
def calibrate(self):
pass
def get_json_params(self):
return {"inverse_x": self.inverse_x,
"inverse_z": self.inverse_z,
"flip_x_z": self.flip_x_z
}
def set_json_params(self, d):
self.inverse_x = d['inverse_x']
self.inverse_z = d['inverse_z']
self.flip_x_z = d['flip_x_z']
| [
"numpy.array",
"numpy.copy"
] | [((217, 233), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (225, 233), True, 'import numpy as np\n'), ((274, 296), 'numpy.copy', 'np.copy', (['self.rotation'], {}), '(self.rotation)\n', (281, 296), True, 'import numpy as np\n')] |
import os.path
import sys
import numpy as np
import copy
import shutil
import astropy.units as u
import pandas as pd
import pickle
from sklearn.neighbors import BallTree
import time
from nexoclom import math as mathMB
from nexoclom.atomicdata import gValue
from nexoclom.modelcode.input_classes import InputError
from nexoclom.modelcode.Output import Output
from nexoclom.modelcode.SourceMap import SourceMap
from nexoclom import __file__ as basefile
try:
import condorMB
except:
pass
class ModelResult:
"""Base class for nexoclom model comparisons with data.
The ModelResult object is the base class for ModelImage (radiance and column
density images), LOSResult (radiance or column along lines of sight), and
ModelDensity (density along a trajectory or plane - not written yet).
**Parameters**
inputs
An Input object
params
A dictionary containing parameters needed to create the model result.
See LOSResult.py or ModelImage.py for details.
**Methods**
packet_weighting
Determine the weighting factor each packet. When determining density
or column, no weighting is needed. For radiance, takes into account
determines g-value and takes into account shadowing.
transform_reference_frame
Will be used to transform packets from the central object reference frame
to another location
**Attributes**
inputs
Input object with the parameters associated with the model result
params
A dictionary containing parameters of the result. See LOSResult.py
or ModelImage.py for details.
outputfiles
locations of saved Outputs associated with the inputs
npackets
total number of packets simulated
totalsource
total source in packets (if the initial fraction are all 1, then
totalsource = npackets * nsteps
quantity
column, density, or radiance determined from params
mechanism
Emission mechanism if quantity = radiance else None
wavelength
Emssion wavelengths if quantity = radiance else None
"""
def __init__(self, inputs, params):
"""
:param inputs: Input object
:param params: Dictionary with ModelResult parameters
"""
self.inputs = copy.deepcopy(inputs)
self.outid, self.outputfiles, _, _ = self.inputs.search()
self.npackets = 0
self.totalsource = 0.
self.atoms_per_packet = 0.
self.sourcerate = 0. * u.def_unit('10**23 atoms/s', 1e23 / u.s)
if isinstance(params, str):
if os.path.exists(params):
self.params = {}
with open(params, 'r') as f:
for line in f:
if ';' in line:
line = line[:line.find(';')]
elif '#' in line:
line = line[:line.find('#')]
else:
pass
if '=' in line:
p, v = line.split('=')
self.params[p.strip().lower()] = v.strip()
else:
pass
else:
raise FileNotFoundError('ModelResult.__init__',
'params file not found.')
elif isinstance(params, dict):
self.params = params
else:
raise TypeError('ModelResult.__init__',
'params must be a dict or filename.')
# Do some validation
quantities = ['column', 'radiance', 'density']
self.quantity = self.params.get('quantity', None)
if (self.quantity is None) or (self.quantity not in quantities):
raise InputError('ModelImage.__init__',
"quantity must be 'column' or 'radiance'")
else:
pass
if self.quantity == 'radiance':
# Note - only resonant scattering currently possible
self.mechanism = ['resonant scattering']
if 'wavelength' in self.params:
self.wavelength = tuple(sorted(int(m.strip())*u.AA for m
in self.params['wavelength'].split(',')))
elif self.inputs.options.species is None:
raise InputError('ModelImage.__init__',
'Must provide either species or params.wavelength')
elif self.inputs.options.species == 'Na':
self.wavelength = (5891*u.AA, 5897*u.AA)
elif self.inputs.options.species == 'Ca':
self.wavelength = (4227*u.AA,)
elif self.inputs.options.species == 'Mg':
self.wavelength = (2852*u.AA,)
else:
raise InputError('ModelResult.__init__', ('Default wavelengths '
f'not available for {self.inputs.options.species}'))
else:
self.mechanism = None
self.wavelength = None
self.unit = u.def_unit('R_' + self.inputs.geometry.planet.object,
self.inputs.geometry.planet.radius)
def packet_weighting(self, packets, out_of_shadow, aplanet):
"""
Determine weighting factor for each packet
:param packets: DataFrame with packet parameters
:param out_of_shadow: Boolean array, True if in sunlight; False if in shadow
:param aplanet: Distance of planet from Sun (used for g-value calculation)
:return: Adds a 'weight' column to the packets DataFrame
"""
if self.quantity == 'column':
packets['weight'] = packets['frac']
elif self.quantity == 'density':
packets['weight'] = packets['frac']
elif self.quantity == 'radiance':
if 'resonant scattering' in self.mechanism:
gg = np.zeros(len(packets))/u.s
for w in self.wavelength:
gval = gValue(self.inputs.options.species, w, aplanet)
gg += mathMB.interpu(packets['radvel_sun'].values *
self.unit/u.s, gval.velocity, gval.g)
weight_resscat = packets['frac']*out_of_shadow*gg.value/1e6
else:
weight_resscat = np.zeros(len(packets))
packets['weight'] = weight_resscat # + other stuff
else:
raise InputError('ModelResults.packet_weighting',
f'{self.quantity} is invalid.')
assert np.all(np.isfinite(packets['weight'])), 'Non-finite weights'
def make_source_map(self, smear_radius=10*np.pi/180, nlonbins=180,
nlatbins=90, nvelbins=100, nazbins=90, naltbins=23,
use_condor=False, normalize=True):
"""
At each point in lon/lat grid want:
* Source flux (atoms/cm2/s
* Speed distribution (f_v vs v)
* Azimuthal distribution (f_az vs az) -> measured CCW from north
* Altitude distribution (f_alt vs alt) -> tangent = 0, normal = 90
"""
params = {'smear_radius': smear_radius,
'nlonbins': nlonbins,
'nlatbins': nlatbins,
'nvelbins': nvelbins,
'nazbins': nazbins,
'naltbins': naltbins,
'use_condor': use_condor,
'normalize': normalize}
X0 = None
for outputfile in self.outputfiles:
output = Output.restore(outputfile)
if X0 is None:
X0 = output.X0[['x', 'y', 'z', 'vx', 'vy', 'vz', 'frac']]
else:
X0 = pd.concat([X0, output.X0[['x', 'y', 'z', 'vx', 'vy', 'vz',
'frac']]], ignore_index=True)
del output
velocity = (X0[['vx', 'vy', 'vz']].values *
self.inputs.geometry.planet.radius.value)
speed = np.linalg.norm(velocity, axis=1)
# Radial, east, north unit vectors
rad = X0[['x', 'y', 'z']].values
rad_ = np.linalg.norm(rad, axis=1)
rad = rad/rad_[:, np.newaxis]
east = X0[['y', 'x', 'z']].values
east[:,1] = -1*east[:,1]
east[:,2] = 0
east_ = np.linalg.norm(east, axis=1)
east = east/east_[:, np.newaxis]
# north = np.array([-X0.z.values * X0.x.values,
# -X0.z.values * X0.y.values,
# X0.x.values**2 + X0.y.values**2])
north = np.cross(rad, east, axis=1)
north_ = np.linalg.norm(north, axis=1)
north = north/north_[:, np.newaxis]
v_rad = np.sum(velocity * rad, axis=1)
v_east = np.sum(velocity * east, axis=1)
v_north = np.sum(velocity * north, axis=1)
v_rad_over_speed = v_rad/speed
v_rad_over_speed[v_rad_over_speed > 1] = 1
v_rad_over_speed[v_rad_over_speed < -1] = -1
assert np.all(np.isclose(v_rad**2 + v_east**2 + v_north**2, speed**2))
X0['altitude'] = np.arcsin(v_rad_over_speed)
X0['azimuth'] = (np.arctan2(v_north, v_east) + 2*np.pi) % (2*np.pi)
X0['v_rad'] = v_rad
X0['v_east'] = v_east
X0['v_north'] = v_north
X0['speed'] = speed
X0['longitude'] = (np.arctan2(X0.x.values, -X0.y.values) + 2*np.pi) % (2*np.pi)
X0['latitude'] = np.arcsin(X0.z.values)
source = self._calculate_histograms(X0, params, weight=True)
if self.__dict__.get('fitted', False):
available = self._calculate_histograms(X0, params, weight=False)
# Compute the abundance scaling factors based on phase space filling factor
source.fraction_observed = self._phase_space_filling_factor(
X0, source, params, weight=True)
available.fraction_observed = self._phase_space_filling_factor(
X0, available, params, weight=False)
else:
available, scalefactor_fit, scalefactor_avail = None, None, None
return source, available, X0
def _phase_space_filling_factor(self, X0, source, params, weight):
if weight:
X0['weight'] = X0.frac
else:
X0['weight'] = np.ones_like(X0.frac.values)
# Figure out the v, alt, and az axis
# Speed isn't used right now, but probably want to account for filling factor
speed, azimuth, altitude = source.speed, source.azimuth, source.altitude
gridazimuth, gridaltitude = np.meshgrid(azimuth, altitude)
gridazimuth, gridaltitude = gridazimuth.T, gridaltitude.T
lowalt = altitude < 85*u.deg
# Area of each bin
dalt, daz = altitude[1]-altitude[0], azimuth[1]-azimuth[0]
dOmega = dalt * daz * np.cos(gridaltitude)
tree = BallTree(X0[['latitude', 'longitude']], metric='haversine')
gridlatitude, gridlongitude = np.meshgrid(source.latitude,
source.longitude)
points = np.array([gridlatitude.flatten(), gridlongitude.flatten()]).T
ind = tree.query_radius(points, params['smear_radius'])
fraction_observed = np.ndarray((points.shape[0], ))
if params['use_condor']:
python = sys.executable
pyfile = os.path.join(os.path.dirname(basefile), 'modelcode',
'calculation_step.py')
tempdir = os.path.join(self.inputs.config.savepath, 'temp',
str(np.random.randint(1000000)))
if not os.path.exists(tempdir):
os.makedirs(tempdir)
# Save the data
# Break it down into pieces
ct, nper = 0, points.shape[0]//(condorMB.nCPUs()-1) + 1
datafiles = []
jobs = []
while ct < points.shape[0]:
inds = ind[ct:min(ct+nper, points.shape[0])]
inds_ = []
for idx in inds:
inds_.extend(idx)
inds_ = list(set(inds_))
sub = X0.loc[inds_]
datafile = os.path.join(tempdir, f'data_{ct}.pkl')
with open(datafile, 'wb') as file:
pickle.dump((sub, inds, params, lowalt, dOmega), file)
datafiles.append(datafile)
print(datafile)
# submit to condor
logfile = os.path.join(tempdir, f'{ct}.log')
outfile = os.path.join(tempdir, f'{ct}.out')
errfile = os.path.join(tempdir, f'{ct}.err')
job = condorMB.submit_to_condor(python,
delay=1,
arguments=f'{pyfile} {datafile}',
logfile=logfile,
outlogfile=outfile,
errlogfile=errfile)
jobs.append(job)
ct += nper
else:
for index in range(points.shape[0]):
sub = X0.iloc[ind[index]]
# Speed, az/alt phase space distribution in cell
speed = mathMB.Histogram(sub.speed, bins=params['nvelbins'],
weights=sub.weight,
range=[0, sub.speed.max()])
angdist = mathMB.Histogram2d(sub.azimuth, sub.altitude,
weights=sub.weight,
bins=(params['nazbins'],
params['naltbins']),
range=[[0, 2*np.pi], [0, np.pi/2]])
# Convolve with gaussian to smooth things out
ang = mathMB.smooth2d(angdist.histogram, 3, wrap=True)
az_max = np.zeros((2, params['nazbins']))
alt_max = np.zeros((2, params['naltbins']))
for i in range(params['nazbins']):
row = mathMB.smooth(ang[i,:], 7, wrap=True)
az_max[:,i] = [np.where(row == row.max())[0].mean(), row.max()]
for i in range(params['naltbins']):
row = mathMB.smooth(ang[:,i], 7, wrap=True)
alt_max[:,i] = [np.where(row == row.max())[0][0], row.max()]
# Ignore high altitudes (bad statistics)
top = np.mean([az_max[1,:].max(), alt_max[1, lowalt].max()])
ang /= top
integral = np.sum(ang * dOmega)
fraction_observed[index] = integral.value/(2*np.pi)
from IPython import embed; embed()
sys.exit()
if index % 500 == 0:
print(f'weight = {weight}: {index+1}/{points.shape[0]} completed')
if params['use_condor']:
jobs = set(jobs)
while condorMB.n_to_go(jobs):
print(f'{condorMB.n_to_go(jobs)} to go.')
time.sleep(10)
for datafile in datafiles:
fracfile = datafile.replace('data', 'fraction')
with open(fracfile, 'rb') as file:
fraction = pickle.load(file)
dfile = os.path.basename(datafile)
ct = int(''.join([x for x in dfile if x.isdigit()]))
fraction_observed[ct:ct+len(fraction)] = fraction
shutil.rmtree(tempdir)
fraction_observed = fraction_observed.reshape(gridlongitude.shape)
return fraction_observed
def _calculate_histograms(self, X0, params, weight=True):
if weight:
w = X0.frac.values
else:
w = np.ones_like(X0.frac.values)
# Determine source distribution
if (params['nlonbins'] > 0) and (params['nlatbins'] > 0):
source = mathMB.Histogram2d(X0.longitude, X0.latitude, weights=w,
range=[[0, 2*np.pi], [-np.pi/2, np.pi/2]],
bins=(params['nlonbins'], params['nlatbins']))
source.x, source.dx = source.x * u.rad, source.dx * u.rad
source.y, source.dy = source.y * u.rad, source.dy * u.rad
if params['normalize']:
# Convert histogram to flux
# (a) divide by area of a grid cell
# Surface area of a grid cell =
# R**2 (lambda_2 - lambda_1) (sin(phi2)-sin(phi1))
# https://onlinelibrary.wiley.com/doi/epdf/10.1111/tgis.12636, eqn 1
# (b) Multiply by source rate
_, gridlatitude = np.meshgrid(source.x, source.y)
area = (self.inputs.geometry.planet.radius**2 * source.dx.value *
(np.sin(gridlatitude + source.dy / 2) -
np.sin(gridlatitude - source.dy / 2)))
source.histogram = (source.histogram / X0.frac.sum() /
area.T.to(u.cm**2) * self.sourcerate.to(1 / u.s))
else:
pass
else:
source = None
# Velocity flux atoms/s/(km/s)
if params['nvelbins'] > 0:
velocity = mathMB.Histogram(X0.speed, bins=params['nvelbins'],
range=[0, X0.speed.max()], weights=w)
velocity.x = velocity.x * u.km / u.s
velocity.dx = velocity.dx * u.km / u.s
if params['normalize']:
velocity.histogram = (self.sourcerate * velocity.histogram /
velocity.histogram.sum() / velocity.dx)
velocity.histogram = velocity.histogram.to(self.sourcerate.unit *
u.def_unit('(km/s)^-1', u.s/u.km))
else:
pass
else:
velocity = None
# Altitude distribution
if params['naltbins'] > 0:
altitude = mathMB.Histogram(X0.altitude, bins=params['naltbins'],
range=[0, np.pi / 2], weights=w)
altitude.x = altitude.x * u.rad
altitude.dx = altitude.dx * u.rad
if params['normalize']:
altitude.histogram = (self.sourcerate * altitude.histogram /
altitude.histogram.sum() / altitude.dx)
else:
pass
else:
altitude = None
# Azimuth distribution
if params['nazbins'] > 0:
azimuth = mathMB.Histogram(X0.azimuth, bins=params['nazbins'],
range=[0, 2 * np.pi], weights=w)
azimuth.x = azimuth.x * u.rad
azimuth.dx = azimuth.dx * u.rad
if params['normalize']:
azimuth.histogram = (self.sourcerate * azimuth.histogram /
azimuth.histogram.sum() / azimuth.dx)
else:
pass
else:
azimuth = None
source = SourceMap({'abundance': source.histogram,
'longitude': source.x,
'latitude': source.y,
'speed': velocity.x,
'speed_dist': velocity.histogram,
'altitude': altitude.x,
'altitude_dist': altitude.histogram,
'azimuth': azimuth.x,
'azimuth_dist': azimuth.histogram,
'coordinate_system': 'solar-fixed'})
return source
def show_source_map(self, filename, which='source', smooth=False, show=True,
source=None, available=None, X0=None):
if X0 is None:
source, available, X0 = self.make_source_map()
elif (which == 'source') and (source is None):
touse, _, X0 = self.make_source_map()
elif which == 'source':
touse = source
elif (which == 'available') and (available is None):
_, touse, X0 = self.make_source_map()
elif which == 'available':
touse = available
else:
raise InputError
# def transform_reference_frame(self, output):
# """If the image center is not the planet, transform to a
# moon-centric reference frame."""
# assert 0, 'Not ready yet.'
#
# # Load output
#
# # # Transform to moon-centric frame if necessary
# # if result.origin != result.inputs.geometry.planet:
# # assert 0, 'Need to do transparamsion for a moon.'
# # else:
# # origin = np.array([0., 0., 0.])*output.x.unit
# # sc = 1.
#
# # Choose which packets to use
# # touse = output.frac >= 0 if keepall else output.frac > 0
#
# # packet positions relative to origin -- not rotated
# # pts_sun = np.array((output.x[touse]-origin[0],
# # output.y[touse]-origin[1],
# # output.z[touse]-origin[2]))*output.x.unit
# #
# # # Velocities relative to sun
# # vels_sun = np.array((output.vx[touse],
# # output.vy[touse],
# # output.vz[touse]))*output.vx.unit
#
# # Fractional content
# # frac = output.frac[touse]
#
# return output #, pts_sun, vels_sun, frac
| [
"nexoclom.math.Histogram2d",
"nexoclom.math.Histogram",
"nexoclom.modelcode.SourceMap.SourceMap",
"time.sleep",
"numpy.isfinite",
"numpy.arctan2",
"copy.deepcopy",
"numpy.linalg.norm",
"sys.exit",
"numpy.sin",
"nexoclom.math.smooth",
"numpy.cross",
"IPython.embed",
"nexoclom.modelcode.Outp... | [((2416, 2437), 'copy.deepcopy', 'copy.deepcopy', (['inputs'], {}), '(inputs)\n', (2429, 2437), False, 'import copy\n'), ((5251, 5345), 'astropy.units.def_unit', 'u.def_unit', (["('R_' + self.inputs.geometry.planet.object)", 'self.inputs.geometry.planet.radius'], {}), "('R_' + self.inputs.geometry.planet.object, self.inputs.geometry.\n planet.radius)\n", (5261, 5345), True, 'import astropy.units as u\n'), ((8245, 8277), 'numpy.linalg.norm', 'np.linalg.norm', (['velocity'], {'axis': '(1)'}), '(velocity, axis=1)\n', (8259, 8277), True, 'import numpy as np\n'), ((8378, 8405), 'numpy.linalg.norm', 'np.linalg.norm', (['rad'], {'axis': '(1)'}), '(rad, axis=1)\n', (8392, 8405), True, 'import numpy as np\n'), ((8558, 8586), 'numpy.linalg.norm', 'np.linalg.norm', (['east'], {'axis': '(1)'}), '(east, axis=1)\n', (8572, 8586), True, 'import numpy as np\n'), ((8819, 8846), 'numpy.cross', 'np.cross', (['rad', 'east'], {'axis': '(1)'}), '(rad, east, axis=1)\n', (8827, 8846), True, 'import numpy as np\n'), ((8864, 8893), 'numpy.linalg.norm', 'np.linalg.norm', (['north'], {'axis': '(1)'}), '(north, axis=1)\n', (8878, 8893), True, 'import numpy as np\n'), ((8955, 8985), 'numpy.sum', 'np.sum', (['(velocity * rad)'], {'axis': '(1)'}), '(velocity * rad, axis=1)\n', (8961, 8985), True, 'import numpy as np\n'), ((9003, 9034), 'numpy.sum', 'np.sum', (['(velocity * east)'], {'axis': '(1)'}), '(velocity * east, axis=1)\n', (9009, 9034), True, 'import numpy as np\n'), ((9053, 9085), 'numpy.sum', 'np.sum', (['(velocity * north)'], {'axis': '(1)'}), '(velocity * north, axis=1)\n', (9059, 9085), True, 'import numpy as np\n'), ((9335, 9362), 'numpy.arcsin', 'np.arcsin', (['v_rad_over_speed'], {}), '(v_rad_over_speed)\n', (9344, 9362), True, 'import numpy as np\n'), ((9670, 9692), 'numpy.arcsin', 'np.arcsin', (['X0.z.values'], {}), '(X0.z.values)\n', (9679, 9692), True, 'import numpy as np\n'), ((10820, 10850), 'numpy.meshgrid', 'np.meshgrid', (['azimuth', 'altitude'], {}), '(azimuth, altitude)\n', (10831, 10850), True, 'import numpy as np\n'), ((11124, 11183), 'sklearn.neighbors.BallTree', 'BallTree', (["X0[['latitude', 'longitude']]"], {'metric': '"""haversine"""'}), "(X0[['latitude', 'longitude']], metric='haversine')\n", (11132, 11183), False, 'from sklearn.neighbors import BallTree\n'), ((11222, 11268), 'numpy.meshgrid', 'np.meshgrid', (['source.latitude', 'source.longitude'], {}), '(source.latitude, source.longitude)\n', (11233, 11268), True, 'import numpy as np\n'), ((11495, 11525), 'numpy.ndarray', 'np.ndarray', (['(points.shape[0],)'], {}), '((points.shape[0],))\n', (11505, 11525), True, 'import numpy as np\n'), ((19646, 19958), 'nexoclom.modelcode.SourceMap.SourceMap', 'SourceMap', (["{'abundance': source.histogram, 'longitude': source.x, 'latitude': source.y,\n 'speed': velocity.x, 'speed_dist': velocity.histogram, 'altitude':\n altitude.x, 'altitude_dist': altitude.histogram, 'azimuth': azimuth.x,\n 'azimuth_dist': azimuth.histogram, 'coordinate_system': 'solar-fixed'}"], {}), "({'abundance': source.histogram, 'longitude': source.x, 'latitude':\n source.y, 'speed': velocity.x, 'speed_dist': velocity.histogram,\n 'altitude': altitude.x, 'altitude_dist': altitude.histogram, 'azimuth':\n azimuth.x, 'azimuth_dist': azimuth.histogram, 'coordinate_system':\n 'solar-fixed'})\n", (19655, 19958), False, 'from nexoclom.modelcode.SourceMap import SourceMap\n'), ((2626, 2667), 'astropy.units.def_unit', 'u.def_unit', (['"""10**23 atoms/s"""', '(1e+23 / u.s)'], {}), "('10**23 atoms/s', 1e+23 / u.s)\n", (2636, 2667), True, 'import astropy.units as u\n'), ((3962, 4038), 'nexoclom.modelcode.input_classes.InputError', 'InputError', (['"""ModelImage.__init__"""', '"""quantity must be \'column\' or \'radiance\'"""'], {}), '(\'ModelImage.__init__\', "quantity must be \'column\' or \'radiance\'")\n', (3972, 4038), False, 'from nexoclom.modelcode.input_classes import InputError\n'), ((6792, 6822), 'numpy.isfinite', 'np.isfinite', (["packets['weight']"], {}), "(packets['weight'])\n", (6803, 6822), True, 'import numpy as np\n'), ((7776, 7802), 'nexoclom.modelcode.Output.Output.restore', 'Output.restore', (['outputfile'], {}), '(outputfile)\n', (7790, 7802), False, 'from nexoclom.modelcode.Output import Output\n'), ((9253, 9316), 'numpy.isclose', 'np.isclose', (['(v_rad ** 2 + v_east ** 2 + v_north ** 2)', '(speed ** 2)'], {}), '(v_rad ** 2 + v_east ** 2 + v_north ** 2, speed ** 2)\n', (9263, 9316), True, 'import numpy as np\n'), ((10533, 10561), 'numpy.ones_like', 'np.ones_like', (['X0.frac.values'], {}), '(X0.frac.values)\n', (10545, 10561), True, 'import numpy as np\n'), ((11083, 11103), 'numpy.cos', 'np.cos', (['gridaltitude'], {}), '(gridaltitude)\n', (11089, 11103), True, 'import numpy as np\n'), ((15447, 15469), 'condorMB.n_to_go', 'condorMB.n_to_go', (['jobs'], {}), '(jobs)\n', (15463, 15469), False, 'import condorMB\n'), ((15987, 16009), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (16000, 16009), False, 'import shutil\n'), ((16275, 16303), 'numpy.ones_like', 'np.ones_like', (['X0.frac.values'], {}), '(X0.frac.values)\n', (16287, 16303), True, 'import numpy as np\n'), ((16436, 16598), 'nexoclom.math.Histogram2d', 'mathMB.Histogram2d', (['X0.longitude', 'X0.latitude'], {'weights': 'w', 'range': '[[0, 2 * np.pi], [-np.pi / 2, np.pi / 2]]', 'bins': "(params['nlonbins'], params['nlatbins'])"}), "(X0.longitude, X0.latitude, weights=w, range=[[0, 2 * np.\n pi], [-np.pi / 2, np.pi / 2]], bins=(params['nlonbins'], params[\n 'nlatbins']))\n", (16454, 16598), True, 'from nexoclom import math as mathMB\n'), ((18565, 18656), 'nexoclom.math.Histogram', 'mathMB.Histogram', (['X0.altitude'], {'bins': "params['naltbins']", 'range': '[0, np.pi / 2]', 'weights': 'w'}), "(X0.altitude, bins=params['naltbins'], range=[0, np.pi / 2],\n weights=w)\n", (18581, 18656), True, 'from nexoclom import math as mathMB\n'), ((19147, 19236), 'nexoclom.math.Histogram', 'mathMB.Histogram', (['X0.azimuth'], {'bins': "params['nazbins']", 'range': '[0, 2 * np.pi]', 'weights': 'w'}), "(X0.azimuth, bins=params['nazbins'], range=[0, 2 * np.pi],\n weights=w)\n", (19163, 19236), True, 'from nexoclom import math as mathMB\n'), ((7943, 8035), 'pandas.concat', 'pd.concat', (["[X0, output.X0[['x', 'y', 'z', 'vx', 'vy', 'vz', 'frac']]]"], {'ignore_index': '(True)'}), "([X0, output.X0[['x', 'y', 'z', 'vx', 'vy', 'vz', 'frac']]],\n ignore_index=True)\n", (7952, 8035), True, 'import pandas as pd\n'), ((9388, 9415), 'numpy.arctan2', 'np.arctan2', (['v_north', 'v_east'], {}), '(v_north, v_east)\n', (9398, 9415), True, 'import numpy as np\n'), ((9584, 9621), 'numpy.arctan2', 'np.arctan2', (['X0.x.values', '(-X0.y.values)'], {}), '(X0.x.values, -X0.y.values)\n', (9594, 9621), True, 'import numpy as np\n'), ((12970, 13107), 'condorMB.submit_to_condor', 'condorMB.submit_to_condor', (['python'], {'delay': '(1)', 'arguments': 'f"""{pyfile} {datafile}"""', 'logfile': 'logfile', 'outlogfile': 'outfile', 'errlogfile': 'errfile'}), "(python, delay=1, arguments=f'{pyfile} {datafile}',\n logfile=logfile, outlogfile=outfile, errlogfile=errfile)\n", (12995, 13107), False, 'import condorMB\n'), ((13833, 13994), 'nexoclom.math.Histogram2d', 'mathMB.Histogram2d', (['sub.azimuth', 'sub.altitude'], {'weights': 'sub.weight', 'bins': "(params['nazbins'], params['naltbins'])", 'range': '[[0, 2 * np.pi], [0, np.pi / 2]]'}), "(sub.azimuth, sub.altitude, weights=sub.weight, bins=(\n params['nazbins'], params['naltbins']), range=[[0, 2 * np.pi], [0, np.\n pi / 2]])\n", (13851, 13994), True, 'from nexoclom import math as mathMB\n'), ((14264, 14312), 'nexoclom.math.smooth2d', 'mathMB.smooth2d', (['angdist.histogram', '(3)'], {'wrap': '(True)'}), '(angdist.histogram, 3, wrap=True)\n', (14279, 14312), True, 'from nexoclom import math as mathMB\n'), ((14351, 14383), 'numpy.zeros', 'np.zeros', (["(2, params['nazbins'])"], {}), "((2, params['nazbins']))\n", (14359, 14383), True, 'import numpy as np\n'), ((14410, 14443), 'numpy.zeros', 'np.zeros', (["(2, params['naltbins'])"], {}), "((2, params['naltbins']))\n", (14418, 14443), True, 'import numpy as np\n'), ((15041, 15061), 'numpy.sum', 'np.sum', (['(ang * dOmega)'], {}), '(ang * dOmega)\n', (15047, 15061), True, 'import numpy as np\n'), ((15173, 15180), 'IPython.embed', 'embed', ([], {}), '()\n', (15178, 15180), False, 'from IPython import embed\n'), ((15197, 15207), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15205, 15207), False, 'import sys\n'), ((15545, 15559), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (15555, 15559), False, 'import time\n'), ((17234, 17265), 'numpy.meshgrid', 'np.meshgrid', (['source.x', 'source.y'], {}), '(source.x, source.y)\n', (17245, 17265), True, 'import numpy as np\n'), ((4518, 4607), 'nexoclom.modelcode.input_classes.InputError', 'InputError', (['"""ModelImage.__init__"""', '"""Must provide either species or params.wavelength"""'], {}), "('ModelImage.__init__',\n 'Must provide either species or params.wavelength')\n", (4528, 4607), False, 'from nexoclom.modelcode.input_classes import InputError\n'), ((6664, 6739), 'nexoclom.modelcode.input_classes.InputError', 'InputError', (['"""ModelResults.packet_weighting"""', 'f"""{self.quantity} is invalid."""'], {}), "('ModelResults.packet_weighting', f'{self.quantity} is invalid.')\n", (6674, 6739), False, 'from nexoclom.modelcode.input_classes import InputError\n'), ((11843, 11869), 'numpy.random.randint', 'np.random.randint', (['(1000000)'], {}), '(1000000)\n', (11860, 11869), True, 'import numpy as np\n'), ((12574, 12628), 'pickle.dump', 'pickle.dump', (['(sub, inds, params, lowalt, dOmega)', 'file'], {}), '((sub, inds, params, lowalt, dOmega), file)\n', (12585, 12628), False, 'import pickle\n'), ((14521, 14559), 'nexoclom.math.smooth', 'mathMB.smooth', (['ang[i, :]', '(7)'], {'wrap': '(True)'}), '(ang[i, :], 7, wrap=True)\n', (14534, 14559), True, 'from nexoclom import math as mathMB\n'), ((14721, 14759), 'nexoclom.math.smooth', 'mathMB.smooth', (['ang[:, i]', '(7)'], {'wrap': '(True)'}), '(ang[:, i], 7, wrap=True)\n', (14734, 14759), True, 'from nexoclom import math as mathMB\n'), ((15754, 15771), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (15765, 15771), False, 'import pickle\n'), ((17373, 17409), 'numpy.sin', 'np.sin', (['(gridlatitude + source.dy / 2)'], {}), '(gridlatitude + source.dy / 2)\n', (17379, 17409), True, 'import numpy as np\n'), ((17437, 17473), 'numpy.sin', 'np.sin', (['(gridlatitude - source.dy / 2)'], {}), '(gridlatitude - source.dy / 2)\n', (17443, 17473), True, 'import numpy as np\n'), ((18354, 18389), 'astropy.units.def_unit', 'u.def_unit', (['"""(km/s)^-1"""', '(u.s / u.km)'], {}), "('(km/s)^-1', u.s / u.km)\n", (18364, 18389), True, 'import astropy.units as u\n'), ((6201, 6248), 'nexoclom.atomicdata.gValue', 'gValue', (['self.inputs.options.species', 'w', 'aplanet'], {}), '(self.inputs.options.species, w, aplanet)\n', (6207, 6248), False, 'from nexoclom.atomicdata import gValue\n'), ((6275, 6365), 'nexoclom.math.interpu', 'mathMB.interpu', (["(packets['radvel_sun'].values * self.unit / u.s)", 'gval.velocity', 'gval.g'], {}), "(packets['radvel_sun'].values * self.unit / u.s, gval.\n velocity, gval.g)\n", (6289, 6365), True, 'from nexoclom import math as mathMB\n'), ((12074, 12090), 'condorMB.nCPUs', 'condorMB.nCPUs', ([], {}), '()\n', (12088, 12090), False, 'import condorMB\n'), ((15496, 15518), 'condorMB.n_to_go', 'condorMB.n_to_go', (['jobs'], {}), '(jobs)\n', (15512, 15518), False, 'import condorMB\n'), ((4990, 5100), 'nexoclom.modelcode.input_classes.InputError', 'InputError', (['"""ModelResult.__init__"""', 'f"""Default wavelengths not available for {self.inputs.options.species}"""'], {}), "('ModelResult.__init__',\n f'Default wavelengths not available for {self.inputs.options.species}')\n", (5000, 5100), False, 'from nexoclom.modelcode.input_classes import InputError\n')] |
import argparse
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
from torch.utils.data import DataLoader
from src.dataload.tabularDataset import (split_tabular_normal_only_train,
tabularDataset)
from src.lit_models.LitBaseAutoEncoder import LitBaseAutoEncoder
from src.lit_models.LitBaseVAE import LitBaseVAE
def define_argparser():
parser = argparse.ArgumentParser()
parser.add_argument("--project", default="Tabular Anomaly Detection")
parser.add_argument("--model", default="LitBaseAutoEncoder")
parser.add_argument(
"--data_path",
default="/Users/nhn/Workspace/catchMinor/data/tabular_data/abalone9-18.csv",
)
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="input batch size for training (default: 512)",
)
parser.add_argument(
"--epochs", type=int, default=10, help="number of epochs to train (default: 10)"
)
parser.add_argument("--cuda", type=int, default=0, help="0 for cpu -1 for all gpu")
config = parser.parse_args()
if config.cuda == 0 or torch.cuda.is_available() == False:
config.cuda = 0
return config
def main(config):
# data
df = pd.read_csv(config.data_path)
normal_train, normal_val, normal_abnormal_test = split_tabular_normal_only_train(df)
train_dataset = tabularDataset(
np.array(normal_train.iloc[:, :-1]), np.array(normal_train.iloc[:, -1])
)
valid_dataset = tabularDataset(
np.array(normal_val.iloc[:, :-1]), np.array(normal_val.iloc[:, -1])
)
test_dataset = tabularDataset(
np.array(normal_abnormal_test.iloc[:, :-1]),
np.array(normal_abnormal_test.iloc[:, -1]),
)
train_loader = DataLoader(train_dataset, batch_size=config.batch_size)
valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size)
test_loader = DataLoader(test_dataset, batch_size=config.batch_size)
# model
if config.model == "LitBaseAutoEncoder":
model = LitBaseAutoEncoder(n_layers=2, features_list=[8, 4, 2])
if config.model == "LitBaseVAE":
model = LitBaseVAE()
# trainer
logger = pl.loggers.WandbLogger()
early_stopping_callback = pl.callbacks.EarlyStopping(
monitor="val_loss", mode="min", patience=20
)
trainer = pl.Trainer(
logger=logger,
log_every_n_steps=10, # set the logging frequency
gpus=config.cuda, # use all GPUs
max_epochs=config.epochs, # number of epochs
deterministic=True, # keep it deterministic
callbacks=[early_stopping_callback],
)
# fit the model
trainer.fit(model, train_loader, valid_loader)
# error
############
# # validate
# trainer.validate(valid_loader)
# # test
# trainer.test(test_loader)
# inference
# result = trainer.predict(test_loader)
# print(result.shape)
#############
if __name__ == "__main__":
config = define_argparser()
main(config)
| [
"pytorch_lightning.callbacks.EarlyStopping",
"src.lit_models.LitBaseVAE.LitBaseVAE",
"argparse.ArgumentParser",
"pandas.read_csv",
"src.dataload.tabularDataset.split_tabular_normal_only_train",
"src.lit_models.LitBaseAutoEncoder.LitBaseAutoEncoder",
"pytorch_lightning.loggers.WandbLogger",
"numpy.arra... | [((425, 450), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (448, 450), False, 'import argparse\n'), ((1272, 1301), 'pandas.read_csv', 'pd.read_csv', (['config.data_path'], {}), '(config.data_path)\n', (1283, 1301), True, 'import pandas as pd\n'), ((1355, 1390), 'src.dataload.tabularDataset.split_tabular_normal_only_train', 'split_tabular_normal_only_train', (['df'], {}), '(df)\n', (1386, 1390), False, 'from src.dataload.tabularDataset import split_tabular_normal_only_train, tabularDataset\n'), ((1796, 1851), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'config.batch_size'}), '(train_dataset, batch_size=config.batch_size)\n', (1806, 1851), False, 'from torch.utils.data import DataLoader\n'), ((1871, 1926), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_dataset'], {'batch_size': 'config.batch_size'}), '(valid_dataset, batch_size=config.batch_size)\n', (1881, 1926), False, 'from torch.utils.data import DataLoader\n'), ((1945, 1999), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'config.batch_size'}), '(test_dataset, batch_size=config.batch_size)\n', (1955, 1999), False, 'from torch.utils.data import DataLoader\n'), ((2224, 2248), 'pytorch_lightning.loggers.WandbLogger', 'pl.loggers.WandbLogger', ([], {}), '()\n', (2246, 2248), True, 'import pytorch_lightning as pl\n'), ((2279, 2350), 'pytorch_lightning.callbacks.EarlyStopping', 'pl.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'patience': '(20)'}), "(monitor='val_loss', mode='min', patience=20)\n", (2305, 2350), True, 'import pytorch_lightning as pl\n'), ((2379, 2536), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'logger': 'logger', 'log_every_n_steps': '(10)', 'gpus': 'config.cuda', 'max_epochs': 'config.epochs', 'deterministic': '(True)', 'callbacks': '[early_stopping_callback]'}), '(logger=logger, log_every_n_steps=10, gpus=config.cuda,\n max_epochs=config.epochs, deterministic=True, callbacks=[\n early_stopping_callback])\n', (2389, 2536), True, 'import pytorch_lightning as pl\n'), ((1435, 1470), 'numpy.array', 'np.array', (['normal_train.iloc[:, :-1]'], {}), '(normal_train.iloc[:, :-1])\n', (1443, 1470), True, 'import numpy as np\n'), ((1472, 1506), 'numpy.array', 'np.array', (['normal_train.iloc[:, -1]'], {}), '(normal_train.iloc[:, -1])\n', (1480, 1506), True, 'import numpy as np\n'), ((1557, 1590), 'numpy.array', 'np.array', (['normal_val.iloc[:, :-1]'], {}), '(normal_val.iloc[:, :-1])\n', (1565, 1590), True, 'import numpy as np\n'), ((1592, 1624), 'numpy.array', 'np.array', (['normal_val.iloc[:, -1]'], {}), '(normal_val.iloc[:, -1])\n', (1600, 1624), True, 'import numpy as np\n'), ((1674, 1717), 'numpy.array', 'np.array', (['normal_abnormal_test.iloc[:, :-1]'], {}), '(normal_abnormal_test.iloc[:, :-1])\n', (1682, 1717), True, 'import numpy as np\n'), ((1727, 1769), 'numpy.array', 'np.array', (['normal_abnormal_test.iloc[:, -1]'], {}), '(normal_abnormal_test.iloc[:, -1])\n', (1735, 1769), True, 'import numpy as np\n'), ((2074, 2129), 'src.lit_models.LitBaseAutoEncoder.LitBaseAutoEncoder', 'LitBaseAutoEncoder', ([], {'n_layers': '(2)', 'features_list': '[8, 4, 2]'}), '(n_layers=2, features_list=[8, 4, 2])\n', (2092, 2129), False, 'from src.lit_models.LitBaseAutoEncoder import LitBaseAutoEncoder\n'), ((2183, 2195), 'src.lit_models.LitBaseVAE.LitBaseVAE', 'LitBaseVAE', ([], {}), '()\n', (2193, 2195), False, 'from src.lit_models.LitBaseVAE import LitBaseVAE\n'), ((1153, 1178), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1176, 1178), False, 'import torch\n')] |
import numpy as np
from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10
from .orbital_elements import elements, jupiter_oe, saturn_oe, uranus_oe
from .utils import *
from .transform import cartesian_to_spherical, spherical_to_cartesian, ecliptic_to_equatorial, elements_to_ecliptic, radec_to_altaz
from .correction import moon_perts, topocentric, jupiter_lon_perts, saturn_lon_perts, saturn_lat_perts, uranus_lon_perts
class sun:
"""
Sun positional parameters
Parameters
----------
d (datetime): time of observation
epoch (int): year of epoch
obs_loc : tuple of observer location (longtitude, latitude)
Attributes
----------
ecl_sph : ecliptic spherical coordinates (lon, lat, r)
ecl_car : ecliptic cartesian coordinates (x, y, z)
equ_car : equatorial cartesian coordinates (x, y, z)
equ_sph : equatorial spherical coordinates (ra, dec, r)
alt : azimuth
az : altitude
"""
def __init__(self, t, obs_loc=None, epoch=None):
self.name = 'sun'
d = datetime_to_day(t)
ecl = obl_ecl(d)
self.d = d
N,i,w,a,e,M = elements(self.name, d)
self.elements = {'N':N, 'i':i, 'w':w, 'a':a, 'e':e, 'M':M}
self.L = rev(w+M)
self.ecl_car = elements_to_ecliptic('sun', N,i,w,a,e,M)
self.ecl_sph = cartesian_to_spherical(self.ecl_car)
if epoch is not None:
self.ecl_sph[0] = self.ecl_sph[0] + 3.82394E-5 * (365.2422 * (epoch-2000) - d)
self.ecl_car = spherical_to_cartesian(self.ecl_sph)
self.equ_car = ecliptic_to_equatorial(self.ecl_car, d)
self.equ_sph = cartesian_to_spherical(self.equ_car)
self.ra, self.dec, self.r = self.equ_sph # just for ease of use
if obs_loc is None:
self.az, self.alt = None, None
else:
self.az, self.alt = radec_to_altaz(self.ra, self.dec, obs_loc, t)
class moon:
"""
Moon positional parameters
Parameters
----------
d (datetime): time of observation
epoch (int): year of epoch
obs_loc : tuple of observer location (longtitude, latitude)
Attributes
----------
elements : dictionary of orbital elements
geo_ecl_car : Geocentric ecliptic cartesian coordinates (x, y, z)
geo_ecl_sph : Geocentric ecliptic spherical coordinates (lon, lat, r)
geo_equ_car : Geocentric equatorial cartesian coordinates (x, y, z)
geo_equ_sph : Geocentric equatorial spherical coordinates (ra, dec, r)
ra : Right Ascension (GCRS)
dec : Declination (GCRS)
r : distance to earth (in Earth Radii)
alt : azimuth
az : altitude
elongation : elongation
FV : phase angle (0:full, 90:half, 180:new)
"""
def __init__(self, t, obs_loc=None, epoch=None):
self.name = 'moon'
d = datetime_to_day(t)
ecl = obl_ecl(d)
#self.obs_loc = obs_loc
self._sun = sun(t=t, obs_loc=obs_loc, epoch=epoch)
N,i,w,a,e,M = elements(self.name, d)
self.elements = {'N':N, 'i':i, 'w':w, 'a':a, 'e':e, 'M':M}
geo_ecl_car = elements_to_ecliptic(self.name, N,i,w,a,e,M) # OK
self.geo_ecl_sph = cartesian_to_spherical(geo_ecl_car) # OK
self.L = rev(N+w+M) # Moon's mean longitude
# Correcting perturbations
self.geo_ecl_sph = moon_perts(self.geo_ecl_sph, self._sun.elements, self.elements)
self.geo_ecl_car = spherical_to_cartesian(self.geo_ecl_sph)
self.geo_equ_car = ecliptic_to_equatorial(self.geo_ecl_car, d)
self.geo_equ_sph = cartesian_to_spherical(self.geo_equ_car)
if obs_loc is None:
self.az, self.alt = None, None
else:
self.geo_equ_sph = topocentric(obs_loc, self._sun.L, self.geo_equ_sph, d)
self.az, self.alt = radec_to_altaz(self.geo_equ_sph[0], self.geo_equ_sph[1], obs_loc, t)
self.ra, self.dec, self.r = self.geo_equ_sph # For ease of use
self.elongation = arccos( cos((self._sun.ecl_sph[0]-self.geo_ecl_sph[0])*rd) * cos(self.geo_ecl_sph[1]*rd) )*(180/pi)
self.FV = 180 - self.elongation
class planet:
"""
Planets positional parameters
Parameters
----------
name (str) : name of the planet
t (datetime) : time of observation
obs_loc (tuple) : observer location (longtitude, latitude)
epoch (int) : year of epoch
Attributes
----------
hel_ecl_sph : Heliocentric ecliptic spherical coordinates (lon, lat, r)
hel_ecl_car : Heliocentric ecliptic cartesian coordinates (x, y, z)
geo_ecl_car : Geocentric ecliptic cartesian coordinates (x, y, z)
geo_ecl_sph : Geocentric ecliptic spherical coordinates (lon, lat, r)
geo_equ_car : Geocentric equatorial cartesian coordinates (x, y, z)
geo_equ_sph : Geocentric equatorial spherical coordinates (ra, dec, r)
ra : Right Ascension (GCRS)
dec : Declination (GCRS)
r : distance to earth (in AU)
alt : azimuth
az : altitude
elongation : elongation
FV : phase angle
mag : Apparent magnitude
diameter : Apparent diameter
"""
def __init__(self, name, t, obs_loc=None, epoch=None):
self.name = name.lower()
#self.obs_loc = obs_loc
d = datetime_to_day(t)
ecl = obl_ecl(d)
self._sun = sun(t=t, obs_loc=obs_loc, epoch=epoch)
N,i,w,a,e,M = elements(self.name, d)
self.elements = {'N':N, 'i':i, 'w':w, 'a':a, 'e':e, 'M':M}
hel_ecl_car = elements_to_ecliptic(self.name, N,i,w,a,e,M)
lon, lat, r = cartesian_to_spherical(hel_ecl_car)
# Correcting perturbations of Jupiter, Saturn and Uranus
if self.name in ['jupiter', 'saturn', 'uranus']:
Mj = jupiter_oe(d)[-1]
Ms = saturn_oe(d)[-1]
Mu = uranus_oe(d)[-1]
if self.name=='jupiter':
lon = lon + jupiter_lon_perts(Mj, Ms, Mu)
elif self.name=='saturn':
lon = lon + saturn_lon_perts(Mj, Ms, Mu)
lat = lat + saturn_lat_perts(Mj, Ms, Mu)
elif self.name=='uranus':
lon = lon + uranus_lon_perts(Mj, Ms, Mu)
# Precession
if epoch is not None:
lon = lon + 3.82394E-5 * (365.2422 * (epoch-2000) - d)
# heliocentric
self.hel_ecl_sph = np.array([lon, lat, r])
self.hel_ecl_car = spherical_to_cartesian(self.hel_ecl_sph)
# To geocentric
self.geo_ecl_car = self._sun.ecl_car + self.hel_ecl_car # sun check shavad
self.geo_ecl_sph = cartesian_to_spherical(self.geo_ecl_car)
self.geo_equ_car = ecliptic_to_equatorial(self.geo_ecl_car, d)
self.geo_equ_sph = cartesian_to_spherical(self.geo_equ_car)
self.ra, self.dec, self.r = self.geo_equ_sph # just for ease of use
if obs_loc is None:
self.az, self.alt = None, None
else:
self.az, self.alt = radec_to_altaz(self.ra, self.dec, obs_loc, t)
#=====================================================================
# Phase angle and the elongation
R = self.geo_ecl_sph[-1] # ehtemalan
r = self.hel_ecl_sph[-1]
s = self._sun.r
self.elongation = arccos((s**2 + R**2 - r**2)/(2*s*R))*(180/pi)
FV = arccos((r**2 + R**2 - s**2)/(2*r*R))*(180/pi)
self.FV = FV
#self.phase = (1 + cos(self.FV*rd))/2
# Magnitude
if self.name=='mercury':
d0 = 6.74
mag = -0.36 + 5*log10(r*R) + 0.027 * FV + 2.2E-13 * FV**6
elif self.name=='venus':
d0 = 16.92
mag = -4.34 + 5*log10(r*R) + 0.013 * FV + 4.2E-7 * FV**3
elif self.name=='mars':
d0 = 9.32
mag = -1.51 + 5*log10(r*R) + 0.016 * FV
elif self.name=='jupiter':
d0 = 191.01
mag = -9.25 + 5*log10(r*R) + 0.014 * FV
elif self.name=='saturn':
d0 = 158.2
ir = 28.06 # tilt rings to ecliptic
Nr = 169.51 + 3.82E-5 * d # ascending node of plane of rings
los = self.geo_ecl_sph[0] # Saturn's geocentric ecliptic longitude
las = self.geo_ecl_sph[1] # Saturn's geocentric ecliptic latitude
# B : tilt of Saturn's rings
B = arcsin(sin(las*rd) * cos(ir*rd) - cos(las*rd) * sin(ir*rd) * sin((los-Nr)*rd))*(180/pi)
ring_magn = -2.6 * sin(abs(B)*rd) + 1.2 * (sin(B*rd))**2
mag = -9.0 + 5*log10(r*R) + 0.044 * FV + ring_magn
elif self.name=='uranus':
d0 = 63.95
mag = -7.15 + 5*log10(r*R) + 0.001 * FV
elif self.name=='neptune':
d0 = 61.55
mag = -6.90 + 5*log10(r*R) + 0.001 * FV
else:
mag = None
self.mag = round(mag,2)
self.diameter = d0 / self.r
| [
"numpy.log10",
"numpy.arccos",
"numpy.array",
"numpy.cos",
"numpy.sin"
] | [((6730, 6753), 'numpy.array', 'np.array', (['[lon, lat, r]'], {}), '([lon, lat, r])\n', (6738, 6753), True, 'import numpy as np\n'), ((7636, 7684), 'numpy.arccos', 'arccos', (['((s ** 2 + R ** 2 - r ** 2) / (2 * s * R))'], {}), '((s ** 2 + R ** 2 - r ** 2) / (2 * s * R))\n', (7642, 7684), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((7698, 7746), 'numpy.arccos', 'arccos', (['((r ** 2 + R ** 2 - s ** 2) / (2 * r * R))'], {}), '((r ** 2 + R ** 2 - s ** 2) / (2 * r * R))\n', (7704, 7746), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((4225, 4279), 'numpy.cos', 'cos', (['((self._sun.ecl_sph[0] - self.geo_ecl_sph[0]) * rd)'], {}), '((self._sun.ecl_sph[0] - self.geo_ecl_sph[0]) * rd)\n', (4228, 4279), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((4278, 4307), 'numpy.cos', 'cos', (['(self.geo_ecl_sph[1] * rd)'], {}), '(self.geo_ecl_sph[1] * rd)\n', (4281, 4307), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((7916, 7928), 'numpy.log10', 'log10', (['(r * R)'], {}), '(r * R)\n', (7921, 7928), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((8042, 8054), 'numpy.log10', 'log10', (['(r * R)'], {}), '(r * R)\n', (8047, 8054), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((8166, 8178), 'numpy.log10', 'log10', (['(r * R)'], {}), '(r * R)\n', (8171, 8178), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((8277, 8289), 'numpy.log10', 'log10', (['(r * R)'], {}), '(r * R)\n', (8282, 8289), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((8836, 8847), 'numpy.sin', 'sin', (['(B * rd)'], {}), '(B * rd)\n', (8839, 8847), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((8700, 8713), 'numpy.sin', 'sin', (['(las * rd)'], {}), '(las * rd)\n', (8703, 8713), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((8714, 8726), 'numpy.cos', 'cos', (['(ir * rd)'], {}), '(ir * rd)\n', (8717, 8726), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((8754, 8774), 'numpy.sin', 'sin', (['((los - Nr) * rd)'], {}), '((los - Nr) * rd)\n', (8757, 8774), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((8878, 8890), 'numpy.log10', 'log10', (['(r * R)'], {}), '(r * R)\n', (8883, 8890), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((8999, 9011), 'numpy.log10', 'log10', (['(r * R)'], {}), '(r * R)\n', (9004, 9011), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((8727, 8740), 'numpy.cos', 'cos', (['(las * rd)'], {}), '(las * rd)\n', (8730, 8740), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((8741, 8753), 'numpy.sin', 'sin', (['(ir * rd)'], {}), '(ir * rd)\n', (8744, 8753), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n'), ((9109, 9121), 'numpy.log10', 'log10', (['(r * R)'], {}), '(r * R)\n', (9114, 9121), False, 'from numpy import pi, sin, cos, tan, sqrt, arctan2, arcsin, arctan, arccos, log10\n')] |
import re
import numpy as np
from numpy.core.einsumfunc import _parse_einsum_input
from paderbox.array.segment import segment_axis
__all__ = [
'split_complex_features',
'merge_complex_features',
'tbf_to_tbchw',
'morph',
]
def split_complex_features(X):
""" Split a complex valued input array into two stacked real parts.
:param variable: Complex input array with T times B times F features
:return: Real output array with T times B times 2*F features
"""
return np.concatenate((np.asarray(X.real), np.asarray(X.imag)), axis=2)
def merge_complex_features(X):
""" Merge a two stacked real parts into a complex array.
:param variable: Real input array with T times B times 2*F features
:return: Complex input array with T times B times F features
"""
bins = X.shape[-1]
return X[:, :, :bins // 2] + 1j * X[:, :, bins // 2:]
def tbf_to_tbchw(x, left_context, right_context, step_width,
pad_mode='symmetric', pad_kwargs=None):
""" Transfroms data from TxBxF format to TxBxCxHxW format
This is only relevant for training a neural network in frames mode.
The abbreviations stand for:
T: Time frames
B: Batch size
F: Feature size
C: Channel (almost always 1)
H: Height of the convolution filter
W: Width of the convolution filter
:param x: Data to be transformed
:param left_context: Context size left to current frame
:param right_context: Context size right to current frame
:param step_width: Step width for window
:param pad_mode: Mode for padding. See :numpy.pad for details
:param pad_kwargs: Kwargs for pad call
:return: Transformed data
"""
if pad_kwargs is None:
pad_kwargs = dict()
x = np.pad(x,
((left_context, right_context), (0, 0), (0, 0)),
mode=pad_mode, **pad_kwargs)
window_size = left_context + right_context + 1
return segment_axis(
x, window_size, step_width, axis=0, end='cut'
).transpose(0, 2, 3, 1)[:, :, None, :, :]
def _normalize(op):
op = op.replace(',', '')
op = op.replace(' ', '')
op = ' '.join(c for c in op)
op = op.replace(' * ', '*')
op = op.replace('- >', '->')
op = op.replace('. . .', '...')
return op
def _shrinking_reshape(array, source, target):
source, target = source.split(), target.replace(' * ', '*').split()
if '...' in source:
assert '...' in target, (source, target)
independent_dims = array.ndim - len(source) + 1
import string
ascii_letters = [
s
for s in string.ascii_letters
if s not in source and s not in target
]
index = source.index('...')
source[index:index + 1] = ascii_letters[:independent_dims]
index = target.index('...')
target[index:index + 1] = ascii_letters[:independent_dims]
input_shape = {key: array.shape[index] for index, key in enumerate(source)}
output_shape = []
for t in target:
product = 1
if not t == '1':
t = t.split('*')
for t_ in t:
product *= input_shape[t_]
output_shape.append(product)
return array.reshape(output_shape)
def _expanding_reshape(array, source, target, **shape_hints):
try: # Check number of inputs for unflatten operations
assert len(re.sub(r'.\*', '', source.replace(' ', ''))) == array.ndim, \
(array.shape, source, target)
except AssertionError: # Check number of inputs for ellipses operations
assert len(re.sub(r'(\.\.\.)|(.\*)', '', source.replace(' ', ''))) <= \
array.ndim,(array.shape, source, target)
def _get_source_grouping(source):
"""
Gets axis as alphanumeric.
"""
source = ' '.join(source)
source = source.replace(' * ', '*')
groups = source.split()
groups = [group.split('*') for group in groups]
return groups
if '*' not in source:
return array
source, target = source.split(), target.replace(' * ', '*').split()
if '...' in source:
assert '...' in target, (source, target)
independent_dims = array.ndim - len(source) + 1
import string
ascii_letters = [
s
for s in string.ascii_letters
if s not in source and s not in target
]
index = source.index('...')
source[index:index + 1] = ascii_letters[:independent_dims]
index = target.index('...')
target[index:index + 1] = ascii_letters[:independent_dims]
target_shape = []
for axis, group in enumerate(_get_source_grouping(source)):
if len(group) == 1:
target_shape.append(array.shape[axis:axis + 1])
else:
shape_wildcard_remaining = True
for member in group:
if member in shape_hints:
target_shape.append([shape_hints[member]])
else:
if shape_wildcard_remaining:
shape_wildcard_remaining = False
target_shape.append([-1])
else:
raise ValueError('Not enough shape hints provided.')
target_shape = np.concatenate(target_shape, 0)
array = array.reshape(target_shape)
return array
def morph(operation, array, reduce=None, **shape_hints):
""" This is an experimental version of a generalized reshape.
See test cases for examples.
"""
operation = _normalize(operation)
source, target = operation.split('->')
# Expanding reshape
array = _expanding_reshape(array, source, target, **shape_hints)
# Initial squeeze
squeeze_operation = operation.split('->')[0].split()
for axis, op in reversed(list(enumerate(squeeze_operation))):
if op == '1':
array = np.squeeze(array, axis=axis)
# Transpose
transposition_operation = operation.replace('1', ' ').replace('*', ' ')
try:
in_shape, out_shape, (array, ) = _parse_einsum_input([transposition_operation.replace(' ', ''), array])
if len(set(in_shape) - set(out_shape)) > 0:
assert reduce is not None, ('Missing reduce function', reduce, transposition_operation)
reduce_axis = tuple([i for i, s in enumerate(in_shape) if s not in out_shape])
array = reduce(array, axis=reduce_axis)
in_shape = ''.join([s for s in in_shape if s in out_shape])
array = np.einsum(f'{in_shape}->{out_shape}', array)
except ValueError as e:
msg = (
f'op: {transposition_operation} ({in_shape}->{out_shape}), '
f'shape: {np.shape(array)}'
)
if len(e.args) == 1:
e.args = (e.args[0] + '\n\n' + msg,)
else:
print(msg)
raise
# Final reshape
source = transposition_operation.split('->')[-1]
target = operation.split('->')[-1]
return _shrinking_reshape(array, source, target)
| [
"numpy.shape",
"paderbox.array.segment.segment_axis",
"numpy.asarray",
"numpy.squeeze",
"numpy.einsum",
"numpy.concatenate",
"numpy.pad"
] | [((1764, 1855), 'numpy.pad', 'np.pad', (['x', '((left_context, right_context), (0, 0), (0, 0))'], {'mode': 'pad_mode'}), '(x, ((left_context, right_context), (0, 0), (0, 0)), mode=pad_mode,\n **pad_kwargs)\n', (1770, 1855), True, 'import numpy as np\n'), ((5304, 5335), 'numpy.concatenate', 'np.concatenate', (['target_shape', '(0)'], {}), '(target_shape, 0)\n', (5318, 5335), True, 'import numpy as np\n'), ((6551, 6595), 'numpy.einsum', 'np.einsum', (['f"""{in_shape}->{out_shape}"""', 'array'], {}), "(f'{in_shape}->{out_shape}', array)\n", (6560, 6595), True, 'import numpy as np\n'), ((520, 538), 'numpy.asarray', 'np.asarray', (['X.real'], {}), '(X.real)\n', (530, 538), True, 'import numpy as np\n'), ((540, 558), 'numpy.asarray', 'np.asarray', (['X.imag'], {}), '(X.imag)\n', (550, 558), True, 'import numpy as np\n'), ((5922, 5950), 'numpy.squeeze', 'np.squeeze', (['array'], {'axis': 'axis'}), '(array, axis=axis)\n', (5932, 5950), True, 'import numpy as np\n'), ((1944, 2003), 'paderbox.array.segment.segment_axis', 'segment_axis', (['x', 'window_size', 'step_width'], {'axis': '(0)', 'end': '"""cut"""'}), "(x, window_size, step_width, axis=0, end='cut')\n", (1956, 2003), False, 'from paderbox.array.segment import segment_axis\n'), ((6735, 6750), 'numpy.shape', 'np.shape', (['array'], {}), '(array)\n', (6743, 6750), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import os
import math
import matplotlib.pyplot as plt
import pickle
import geopy
import geopy.distance as distance
def retrieve_and_save(countries, fns, out_dir, keys, sample=True):
for idx, country in enumerate(countries):
df = pd.read_csv(fns[idx], sep=' ')
df = df[(df.lat!=0) & (df.lon!=0)]
df.to_csv(out_dir+country+'/data.csv')
# download locs
loc_around = []
loc_cluster = []
for index, row in df.iterrows():
center_lat = row['lat']
center_lon = row['lon']
start = geopy.Point(center_lat, center_lon)
d = geopy.distance.great_circle(kilometers = 5)
end_north = d.destination(point=start, bearing=0)
end_east = d.destination(point=start, bearing=90)
end_south = d.destination(point=start, bearing=180)
end_west = d.destination(point=start, bearing=270)
top_left = geopy.Point(end_north.latitude,end_west.longitude)
top_right = geopy.Point(end_north.latitude,end_east.longitude)
bottom_left = geopy.Point(end_south.latitude,end_west.longitude)
bottom_right = geopy.Point(end_south.latitude,end_east.longitude)
step_down = (bottom_left.latitude-top_left.latitude)/10
step_right = (top_right.longitude-top_left.longitude)/10
for yi in range(11):
y = top_left.latitude+yi*step_down
for xi in range(11):
x = top_left.longitude+xi*step_right
loc_around.append((y,x))
loc_cluster.append((row['lat'],row['lon']))
with open(os.path.join(out_dir, country, 'candidate_download_locs.txt'), 'w') as f:
for loc, cluster_loc in zip(loc_around, loc_cluster):
f.write("%f %f %f %f\n" % (loc[0], loc[1], cluster_loc[0], cluster_loc[1]))
lats = []
lons = []
with open(os.path.join(out_dir, country, 'candidate_download_locs.csv'), 'w') as f:
c = 0
f.write("name,latitude,longitude\n")
for loc, cluster_loc in zip(loc_around, loc_cluster):
f.write("%s,%f,%f\n" % (country+"_"+str(c),loc[0],loc[1]))
c += 1
lats.append(loc[0])
lons.append(loc[1])
# clustering
# create an array of the size of malawi with a 1km x 1km grid. All households (including the 10km x 10km) are stored
# then the corresponding coordinates are stored
nlats = np.array(lats)
nlons = np.array(lons)
nlats_min = np.min(nlats)
nlats_max = np.max(nlats)
nlons_min = np.min(nlons)
nlons_max = np.max(nlons)
np.save(os.path.join(out_dir, country, 'nlats'), nlats)
np.save(os.path.join(out_dir, country, 'nlons'), nlons)
print("Lats: %f - %f" % (nlats_min, nlats_max))
print("Lons: %f - %f" % (nlons_min, nlons_max))
# size of array
lats_dif = nlats_max - nlats_min
lons_dif = nlons_max - nlons_min
mid_point = geopy.Point(((nlats_max + nlats_min)/2), ((nlons_max + nlons_min)/2))
d = geopy.distance.great_circle(kilometers = 1)
onek_lats = mid_point.latitude-d.destination(point=mid_point, bearing=180).latitude
onek_lons = mid_point.longitude-d.destination(point=mid_point, bearing=270).longitude
print("onek_lats: ", onek_lats)
print("onek_lats: ", onek_lons)
size_y = math.ceil(lats_dif/onek_lats)
size_x = math.ceil(lons_dif/onek_lons)
print("size_y: ", size_y)
print("size_x: ", size_x)
# get depth
# how many households can be in a 1km area
np_counting = np.zeros((size_y, size_x), dtype=np.int)
for lat,lon in zip(lats, lons):
y_pos = int((nlats_max-lat)/onek_lats)
x_pos = int((lon-nlons_min)/onek_lons)
np_counting[y_pos, x_pos] += 1
depth = np.max(np_counting)
np_clustering = -np.ones((size_y, size_x, depth), dtype=np.int)
print("Depth: ", depth)
i = 0
for lat,lon in zip(lats, lons):
y_pos = int((nlats_max-lat)/onek_lats)
x_pos = int((lon-nlons_min)/onek_lons)
d = 0
while np_clustering[y_pos, x_pos, d] != -1:
d += 1
np_clustering[y_pos, x_pos, d] = i//121 # get the household id
i += 1
np.save(os.path.join(out_dir, country, 'clustering'), np_clustering)
np.save(os.path.join(out_dir, country, 'counting'), np_counting)
iid2hh = []
with open(os.path.join(out_dir, country, 'candidate_download_locs.csv'), 'w') as f:
f.write("name,latitude,longitude\n")
c = 0
for y in range(size_y):
lat = nlats_max-y*onek_lats # going south and onek_lats is positive
for x in range(size_x):
lon = nlons_min+x*onek_lons # going east
if np_counting[y,x] > 0:
f.write("%s,%f,%f\n" % (country+"_"+str(c),lat,lon))
# number of households
nhh = np_counting[y,x]
iid2hh.append(list(np_clustering[y,x,:nhh]))
c += 1
with open(os.path.join(out_dir, country, 'cluster_list'), 'wb') as filehandle:
# store the data as binary data stream
pickle.dump(iid2hh, filehandle)
if __name__ == '__main__':
############################
############ LSMS ##########
############################
countries = ['malawi']
fns = ['../data/output/LSMS/Malawi 2016 LSMS (Household).txt']
out_dir = '../data/output/LSMS/'
keys = ['lats', 'lons', 'expagg']
retrieve_and_save(countries, fns, out_dir, keys)
| [
"math.ceil",
"numpy.ones",
"pandas.read_csv",
"pickle.dump",
"os.path.join",
"numpy.max",
"numpy.array",
"numpy.zeros",
"geopy.distance.great_circle",
"geopy.Point",
"numpy.min"
] | [((2394, 2408), 'numpy.array', 'np.array', (['lats'], {}), '(lats)\n', (2402, 2408), True, 'import numpy as np\n'), ((2419, 2433), 'numpy.array', 'np.array', (['lons'], {}), '(lons)\n', (2427, 2433), True, 'import numpy as np\n'), ((2448, 2461), 'numpy.min', 'np.min', (['nlats'], {}), '(nlats)\n', (2454, 2461), True, 'import numpy as np\n'), ((2476, 2489), 'numpy.max', 'np.max', (['nlats'], {}), '(nlats)\n', (2482, 2489), True, 'import numpy as np\n'), ((2504, 2517), 'numpy.min', 'np.min', (['nlons'], {}), '(nlons)\n', (2510, 2517), True, 'import numpy as np\n'), ((2532, 2545), 'numpy.max', 'np.max', (['nlons'], {}), '(nlons)\n', (2538, 2545), True, 'import numpy as np\n'), ((2868, 2937), 'geopy.Point', 'geopy.Point', (['((nlats_max + nlats_min) / 2)', '((nlons_max + nlons_min) / 2)'], {}), '((nlats_max + nlats_min) / 2, (nlons_max + nlons_min) / 2)\n', (2879, 2937), False, 'import geopy\n'), ((2945, 2986), 'geopy.distance.great_circle', 'geopy.distance.great_circle', ([], {'kilometers': '(1)'}), '(kilometers=1)\n', (2972, 2986), False, 'import geopy\n'), ((3244, 3275), 'math.ceil', 'math.ceil', (['(lats_dif / onek_lats)'], {}), '(lats_dif / onek_lats)\n', (3253, 3275), False, 'import math\n'), ((3285, 3316), 'math.ceil', 'math.ceil', (['(lons_dif / onek_lons)'], {}), '(lons_dif / onek_lons)\n', (3294, 3316), False, 'import math\n'), ((3451, 3491), 'numpy.zeros', 'np.zeros', (['(size_y, size_x)'], {'dtype': 'np.int'}), '((size_y, size_x), dtype=np.int)\n', (3459, 3491), True, 'import numpy as np\n'), ((3659, 3678), 'numpy.max', 'np.max', (['np_counting'], {}), '(np_counting)\n', (3665, 3678), True, 'import numpy as np\n'), ((277, 307), 'pandas.read_csv', 'pd.read_csv', (['fns[idx]'], {'sep': '""" """'}), "(fns[idx], sep=' ')\n", (288, 307), True, 'import pandas as pd\n'), ((2556, 2595), 'os.path.join', 'os.path.join', (['out_dir', 'country', '"""nlats"""'], {}), "(out_dir, country, 'nlats')\n", (2568, 2595), False, 'import os\n'), ((2614, 2653), 'os.path.join', 'os.path.join', (['out_dir', 'country', '"""nlons"""'], {}), "(out_dir, country, 'nlons')\n", (2626, 2653), False, 'import os\n'), ((3699, 3745), 'numpy.ones', 'np.ones', (['(size_y, size_x, depth)'], {'dtype': 'np.int'}), '((size_y, size_x, depth), dtype=np.int)\n', (3706, 3745), True, 'import numpy as np\n'), ((4065, 4109), 'os.path.join', 'os.path.join', (['out_dir', 'country', '"""clustering"""'], {}), "(out_dir, country, 'clustering')\n", (4077, 4109), False, 'import os\n'), ((4136, 4178), 'os.path.join', 'os.path.join', (['out_dir', 'country', '"""counting"""'], {}), "(out_dir, country, 'counting')\n", (4148, 4178), False, 'import os\n'), ((4935, 4966), 'pickle.dump', 'pickle.dump', (['iid2hh', 'filehandle'], {}), '(iid2hh, filehandle)\n', (4946, 4966), False, 'import pickle\n'), ((580, 615), 'geopy.Point', 'geopy.Point', (['center_lat', 'center_lon'], {}), '(center_lat, center_lon)\n', (591, 615), False, 'import geopy\n'), ((628, 669), 'geopy.distance.great_circle', 'geopy.distance.great_circle', ([], {'kilometers': '(5)'}), '(kilometers=5)\n', (655, 669), False, 'import geopy\n'), ((926, 977), 'geopy.Point', 'geopy.Point', (['end_north.latitude', 'end_west.longitude'], {}), '(end_north.latitude, end_west.longitude)\n', (937, 977), False, 'import geopy\n'), ((997, 1048), 'geopy.Point', 'geopy.Point', (['end_north.latitude', 'end_east.longitude'], {}), '(end_north.latitude, end_east.longitude)\n', (1008, 1048), False, 'import geopy\n'), ((1070, 1121), 'geopy.Point', 'geopy.Point', (['end_south.latitude', 'end_west.longitude'], {}), '(end_south.latitude, end_west.longitude)\n', (1081, 1121), False, 'import geopy\n'), ((1144, 1195), 'geopy.Point', 'geopy.Point', (['end_south.latitude', 'end_east.longitude'], {}), '(end_south.latitude, end_east.longitude)\n', (1155, 1195), False, 'import geopy\n'), ((4220, 4281), 'os.path.join', 'os.path.join', (['out_dir', 'country', '"""candidate_download_locs.csv"""'], {}), "(out_dir, country, 'candidate_download_locs.csv')\n", (4232, 4281), False, 'import os\n'), ((4817, 4863), 'os.path.join', 'os.path.join', (['out_dir', 'country', '"""cluster_list"""'], {}), "(out_dir, country, 'cluster_list')\n", (4829, 4863), False, 'import os\n'), ((1604, 1665), 'os.path.join', 'os.path.join', (['out_dir', 'country', '"""candidate_download_locs.txt"""'], {}), "(out_dir, country, 'candidate_download_locs.txt')\n", (1616, 1665), False, 'import os\n'), ((1865, 1926), 'os.path.join', 'os.path.join', (['out_dir', 'country', '"""candidate_download_locs.csv"""'], {}), "(out_dir, country, 'candidate_download_locs.csv')\n", (1877, 1926), False, 'import os\n')] |
# Autogenerated using symbolic_dynamics_n.py; don't edit!
from enum import IntEnum
import numpy as np
from numpy import sin, cos
from .dynamic_model_n import NBallDynamicModel
class StateIndex(IntEnum):
ALPHA_1_IDX = 0,
PHI_IDX = 1,
PSI_0_IDX = 2,
ALPHA_DOT_1_IDX = 3,
PHI_DOT_IDX = 4,
PSI_DOT_0_IDX = 5,
NUM_STATES = 6,
class DynamicModel(NBallDynamicModel):
def __init__(self, param, x0):
super().__init__(StateIndex.NUM_STATES, param, x0)
def computeOmegaDot(self, x, param, omega_cmd):
phi = x[StateIndex.PHI_IDX]
psi_0 = x[StateIndex.PSI_0_IDX]
alpha_dot_1 = x[StateIndex.ALPHA_DOT_1_IDX]
phi_dot = x[StateIndex.PHI_DOT_IDX]
psi_dot_0 = x[StateIndex.PSI_DOT_0_IDX]
x0 = param["r_1"]**2
x1 = cos(phi)
x2 = param["r_2"] * x1
x3 = param["m_2"] * x2
x4 = param["r_1"] * x3
x5 = param["theta_0"] / param["r_0"]**2
x6 = sin(phi)
x7 = param["m_2"] * param["r_2"]**2
x8 = -param["r_0"] - param["r_1"]
x9 = sin(psi_0)
x10 = x8 * x9
x11 = param["r_2"] * x6
x12 = param["m_2"] * x11
x13 = cos(psi_0)
x14 = x13 * x8 + x8
x15 = x10 * x12 + x14 * x3
x16 = param["r_1"] * x8
x17 = param["r_0"] + param["r_1"]
x18 = param["r_1"] * x14
x19 = param["m_0"] * x16 + param["m_1"] * x18 + param["m_2"] * x18 - param["r_1"] * x17 * x5
x20 = x8**2
x21 = x9**2
x22 = param["m_1"] * x17
x23 = x14**2
x24 = psi_dot_0**2
x25 = param["m_1"] * x24
x26 = phi_dot**2
x27 = param["m_2"] * x24
x28 = -x10 * x27 - x12 * x26
x29 = param["g"] * param["m_2"] - x13 * x17 * x27 + x26 * x3
A = np.zeros((3, 3))
b = np.zeros((3, 1))
A[0, 0] = param["m_0"] * x0 + param["m_1"] * x0 + param["m_2"] * x0 + param["theta_1"] + x0 * x5 + x4
A[0, 1] = param["theta_2"] + x1**2 * x7 + x4 + x6**2 * x7
A[0, 2] = x15 + x19
A[1, 0] = -1
A[1, 1] = 1
A[1, 2] = 0
A[2, 0] = x19
A[2, 1] = x15
A[2, 2] = param["m_0"] * x20 + param["m_1"] * x23 + param["m_2"] * \
x20 * x21 + param["m_2"] * x23 + x17**2 * x5 - x21 * x22 * x8
b[0, 0] = -param["r_1"] * x28 - x11 * x29 + x16 * x25 * x9 - x2 * x28
b[1, 0] = (alpha_dot_1 + omega_cmd - phi_dot) / param["tau"]
b[2, 0] = x10 * x14 * x25 - x10 * x29 - x10 * (param["g"] * param["m_1"] - x13 * x22 * x24) - x14 * x28
return np.linalg.solve(A, b)
def computeContactForces(self, x, param, omega_cmd):
omega_dot = self.computeOmegaDot(x, param, omega_cmd)
alpha_ddot_1 = omega_dot[StateIndex.ALPHA_1_IDX]
phi_ddot = omega_dot[StateIndex.PHI_IDX]
psi_ddot_0 = omega_dot[StateIndex.PSI_0_IDX]
phi = x[StateIndex.PHI_IDX]
psi_0 = x[StateIndex.PSI_0_IDX]
phi_dot = x[StateIndex.PHI_DOT_IDX]
psi_dot_0 = x[StateIndex.PSI_DOT_0_IDX]
x0 = alpha_ddot_1 * param["r_1"]
x1 = -param["r_0"] - param["r_1"]
x2 = psi_ddot_0 * x1
x3 = cos(psi_0)
x4 = psi_ddot_0 * (x1 * x3 + x1)
x5 = sin(psi_0)
x6 = param["m_1"] * x5
x7 = psi_dot_0**2
x8 = x1 * x7
x9 = cos(phi)
x10 = param["m_2"] * param["r_2"]
x11 = phi_ddot * x10
x12 = sin(phi)
x13 = phi_dot**2 * x10
x14 = param["m_2"] * x5
x15 = param["m_2"] * x0 + param["m_2"] * x4 + x11 * x9 - x12 * x13 - x14 * x8
x16 = param["m_1"] * x0 + param["m_1"] * x4 + x15 - x6 * x8
x17 = param["r_0"] + param["r_1"]
x18 = x17 * x3 * x7
x19 = param["g"] * param["m_2"] - param["m_2"] * x18 + x11 * x12 + x13 * x9 + x14 * x2
x20 = param["g"] * param["m_1"] - param["m_1"] * x18 - psi_ddot_0 * x17 * x6 + x19
F_0 = np.zeros((3, 1))
F_1 = np.zeros((3, 1))
F_2 = np.zeros((3, 1))
F_0[0, 0] = param["m_0"] * x0 + param["m_0"] * x2 + x16
F_0[1, 0] = param["g"] * param["m_0"] + x20
F_0[2, 0] = 0
F_1[0, 0] = x16
F_1[1, 0] = x20
F_1[2, 0] = 0
F_2[0, 0] = x15
F_2[1, 0] = x19
F_2[2, 0] = 0
return [F_0, F_1, F_2]
def computePositions(self, x, param):
alpha_1 = x[StateIndex.ALPHA_1_IDX]
phi = x[StateIndex.PHI_IDX]
psi_0 = x[StateIndex.PSI_0_IDX]
x0 = alpha_1 * param["r_1"] - param["r_0"] * psi_0 - param["r_1"] * psi_0
x1 = param["r_0"] + param["r_1"]
x2 = x0 - x1 * sin(psi_0)
x3 = param["r_0"] + x1 * cos(psi_0)
r_OS_0 = np.zeros((3, 1))
r_OS_1 = np.zeros((3, 1))
r_OS_2 = np.zeros((3, 1))
r_OS_0[0, 0] = x0
r_OS_0[1, 0] = param["r_0"]
r_OS_0[2, 0] = 0
r_OS_1[0, 0] = x2
r_OS_1[1, 0] = x3
r_OS_1[2, 0] = 0
r_OS_2[0, 0] = param["r_2"] * sin(phi) + x2
r_OS_2[1, 0] = -param["r_2"] * cos(phi) + x3
r_OS_2[2, 0] = 0
return [r_OS_0, r_OS_1, r_OS_2]
def computeBallAngles(self, x, param):
alpha_1 = x[StateIndex.ALPHA_1_IDX]
psi_0 = x[StateIndex.PSI_0_IDX]
alpha = np.zeros((1, 2))
alpha[0, 0] = (-alpha_1 * param["r_1"] + param["r_0"] * psi_0 + param["r_1"] * psi_0) / param["r_0"]
alpha[0, 1] = alpha_1
return [alpha]
| [
"numpy.sin",
"numpy.zeros",
"numpy.linalg.solve",
"numpy.cos"
] | [((802, 810), 'numpy.cos', 'cos', (['phi'], {}), '(phi)\n', (805, 810), False, 'from numpy import sin, cos\n'), ((965, 973), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (968, 973), False, 'from numpy import sin, cos\n'), ((1073, 1083), 'numpy.sin', 'sin', (['psi_0'], {}), '(psi_0)\n', (1076, 1083), False, 'from numpy import sin, cos\n'), ((1185, 1195), 'numpy.cos', 'cos', (['psi_0'], {}), '(psi_0)\n', (1188, 1195), False, 'from numpy import sin, cos\n'), ((1797, 1813), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1805, 1813), True, 'import numpy as np\n'), ((1826, 1842), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (1834, 1842), True, 'import numpy as np\n'), ((2577, 2598), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (2592, 2598), True, 'import numpy as np\n'), ((3171, 3181), 'numpy.cos', 'cos', (['psi_0'], {}), '(psi_0)\n', (3174, 3181), False, 'from numpy import sin, cos\n'), ((3236, 3246), 'numpy.sin', 'sin', (['psi_0'], {}), '(psi_0)\n', (3239, 3246), False, 'from numpy import sin, cos\n'), ((3338, 3346), 'numpy.cos', 'cos', (['phi'], {}), '(phi)\n', (3341, 3346), False, 'from numpy import sin, cos\n'), ((3432, 3440), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (3435, 3440), False, 'from numpy import sin, cos\n'), ((3928, 3944), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (3936, 3944), True, 'import numpy as np\n'), ((3959, 3975), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (3967, 3975), True, 'import numpy as np\n'), ((3990, 4006), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (3998, 4006), True, 'import numpy as np\n'), ((4697, 4713), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (4705, 4713), True, 'import numpy as np\n'), ((4731, 4747), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (4739, 4747), True, 'import numpy as np\n'), ((4765, 4781), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (4773, 4781), True, 'import numpy as np\n'), ((5260, 5276), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (5268, 5276), True, 'import numpy as np\n'), ((4625, 4635), 'numpy.sin', 'sin', (['psi_0'], {}), '(psi_0)\n', (4628, 4635), False, 'from numpy import sin, cos\n'), ((4669, 4679), 'numpy.cos', 'cos', (['psi_0'], {}), '(psi_0)\n', (4672, 4679), False, 'from numpy import sin, cos\n'), ((4984, 4992), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (4987, 4992), False, 'from numpy import sin, cos\n'), ((5037, 5045), 'numpy.cos', 'cos', (['phi'], {}), '(phi)\n', (5040, 5045), False, 'from numpy import sin, cos\n')] |
#!/usr/bin/env python
"""
TODO: Modify module doc.
"""
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "4/12/14"
import inspect
import itertools
import numpy as np
from pymatgen import Lattice
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pymatgen.util.plotting_utils import get_publication_quality_plot
class SpaceGroupVisualizer(object):
def __init__(self):
pass
def plot(self, sg):
cs = sg.crystal_system
params = {
"a": 10,
"b": 12,
"c": 14,
"alpha": 20,
"beta": 30,
"gamma": 40
}
cs = "rhombohedral" if cs == "Trigonal" else cs
func = getattr(Lattice, cs.lower())
kw = {k: params[k] for k in inspect.getargspec(func).args}
lattice = func(**kw)
global plt
fig = plt.figure(figsize=(10, 10))
#ax = fig.add_subplot(111, projection='3d')
for i in range(2):
plt.plot([0, lattice.matrix[i][0]], [0, lattice.matrix[i][1]],
'k-')
plt.plot([lattice.matrix[0][0], lattice.matrix[0][0]],
[0, lattice.matrix[1][1]],
'k-')
plt.plot([0, lattice.matrix[0][0]],
[lattice.matrix[1][1], lattice.matrix[1][1]],
'k-')
l = np.arange(0, 0.02, 0.02 / 100)
theta = np.arange(0, 4 * np.pi, 4 * np.pi / 100) - np.pi / 2
x = l * np.cos(theta) + 0.025
y = l * np.sin(theta) + 0.025
z = 0.001 * theta + 0.02
d = np.array(zip(x, y, z, [1] * len(x)))
for op in sg.symmetry_ops:
dd = np.dot(op, d.T).T
for tx, ty in itertools.product((-1, 0, 1), (-1, 0, 1)):
ddd = dd[:, 0:3] + np.array([tx, ty, 0])[None, :]
color = "r" if 0.5 < ddd[0, 2] > 1 else "b"
coords = lattice.get_cartesian_coords(ddd[:, 0:3])
plt.plot(coords[:, 0], coords[:, 1], color + "-")
#plt.plot(x, y, 'k-')
max_l = max(params['a'], params['b'])
#plt = get_publication_quality_plot(8, 8, plt)
#plt.savefig('test2png.png',dpi=100)
lim = [-max_l * 0.1, max_l * 1.1]
plt.xlim(lim)
plt.ylim(lim)
plt.tight_layout()
plt.show()
if __name__ == "__main__":
from symmetry.groups import SpaceGroup
sg = SpaceGroup("Pnma")
SpaceGroupVisualizer().plot(sg)
| [
"symmetry.groups.SpaceGroup",
"matplotlib.pyplot.plot",
"itertools.product",
"inspect.getargspec",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.dot",
"numpy.cos",
"matplotlib.pyplot.tight_layout",
"numpy.sin",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.arange",
"matplo... | [((2553, 2571), 'symmetry.groups.SpaceGroup', 'SpaceGroup', (['"""Pnma"""'], {}), "('Pnma')\n", (2563, 2571), False, 'from symmetry.groups import SpaceGroup\n'), ((1011, 1039), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1021, 1039), True, 'import matplotlib.pyplot as plt\n'), ((1229, 1321), 'matplotlib.pyplot.plot', 'plt.plot', (['[lattice.matrix[0][0], lattice.matrix[0][0]]', '[0, lattice.matrix[1][1]]', '"""k-"""'], {}), "([lattice.matrix[0][0], lattice.matrix[0][0]], [0, lattice.matrix[1\n ][1]], 'k-')\n", (1237, 1321), True, 'import matplotlib.pyplot as plt\n'), ((1363, 1455), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, lattice.matrix[0][0]]', '[lattice.matrix[1][1], lattice.matrix[1][1]]', '"""k-"""'], {}), "([0, lattice.matrix[0][0]], [lattice.matrix[1][1], lattice.matrix[1\n ][1]], 'k-')\n", (1371, 1455), True, 'import matplotlib.pyplot as plt\n'), ((1506, 1536), 'numpy.arange', 'np.arange', (['(0)', '(0.02)', '(0.02 / 100)'], {}), '(0, 0.02, 0.02 / 100)\n', (1515, 1536), True, 'import numpy as np\n'), ((2391, 2404), 'matplotlib.pyplot.xlim', 'plt.xlim', (['lim'], {}), '(lim)\n', (2399, 2404), True, 'import matplotlib.pyplot as plt\n'), ((2413, 2426), 'matplotlib.pyplot.ylim', 'plt.ylim', (['lim'], {}), '(lim)\n', (2421, 2426), True, 'import matplotlib.pyplot as plt\n'), ((2435, 2453), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2451, 2453), True, 'import matplotlib.pyplot as plt\n'), ((2462, 2472), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2470, 2472), True, 'import matplotlib.pyplot as plt\n'), ((1131, 1199), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, lattice.matrix[i][0]]', '[0, lattice.matrix[i][1]]', '"""k-"""'], {}), "([0, lattice.matrix[i][0]], [0, lattice.matrix[i][1]], 'k-')\n", (1139, 1199), True, 'import matplotlib.pyplot as plt\n'), ((1553, 1593), 'numpy.arange', 'np.arange', (['(0)', '(4 * np.pi)', '(4 * np.pi / 100)'], {}), '(0, 4 * np.pi, 4 * np.pi / 100)\n', (1562, 1593), True, 'import numpy as np\n'), ((1862, 1903), 'itertools.product', 'itertools.product', (['(-1, 0, 1)', '(-1, 0, 1)'], {}), '((-1, 0, 1), (-1, 0, 1))\n', (1879, 1903), False, 'import itertools\n'), ((1623, 1636), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1629, 1636), True, 'import numpy as np\n'), ((1661, 1674), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1667, 1674), True, 'import numpy as np\n'), ((1818, 1833), 'numpy.dot', 'np.dot', (['op', 'd.T'], {}), '(op, d.T)\n', (1824, 1833), True, 'import numpy as np\n'), ((2114, 2163), 'matplotlib.pyplot.plot', 'plt.plot', (['coords[:, 0]', 'coords[:, 1]', "(color + '-')"], {}), "(coords[:, 0], coords[:, 1], color + '-')\n", (2122, 2163), True, 'import matplotlib.pyplot as plt\n'), ((918, 942), 'inspect.getargspec', 'inspect.getargspec', (['func'], {}), '(func)\n', (936, 942), False, 'import inspect\n'), ((1940, 1961), 'numpy.array', 'np.array', (['[tx, ty, 0]'], {}), '([tx, ty, 0])\n', (1948, 1961), True, 'import numpy as np\n')] |
""" Module providing unit-testing for the `~halotools.mock_observables.tpcf` function.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import warnings
import pytest
from astropy.utils.misc import NumpyRNGContext
from .locate_external_unit_testing_data import tpcf_corrfunc_comparison_files_exist
from ..tpcf import tpcf
from ....custom_exceptions import HalotoolsError
slow = pytest.mark.slow
__all__ = ('test_tpcf_auto', 'test_tpcf_cross', 'test_tpcf_estimators',
'test_tpcf_sample_size_limit', 'test_tpcf_randoms',
'test_tpcf_period_API', 'test_tpcf_cross_consistency_w_auto')
fixed_seed = 43
TPCF_CORRFUNC_FILES_EXIST = tpcf_corrfunc_comparison_files_exist()
@slow
def test_tpcf_auto():
"""
test the tpcf auto-correlation functionality
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
# with randoms
result = tpcf(sample1, rbins, sample2=None,
randoms=randoms, period=None,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
assert result.ndim == 1, "More than one correlation function returned erroneously."
# with out randoms
result = tpcf(sample1, rbins, sample2=None,
randoms=None, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax], num_threads=1)
assert result.ndim == 1, "More than one correlation function returned erroneously."
@slow
def test_tpcf_cross():
"""
test the tpcf cross-correlation functionality
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((100, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
# with randoms
result = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Natural', do_auto=False,
approx_cell1_size=[rmax, rmax, rmax])
assert result.ndim == 1, "More than one correlation function returned erroneously."
# with out randoms
result = tpcf(sample1, rbins, sample2=sample2,
randoms=None, period=period,
estimator='Natural', do_auto=False,
approx_cell1_size=[rmax, rmax, rmax])
assert result.ndim == 1, "More than one correlation function returned erroneously."
@slow
def test_tpcf_estimators():
"""
test the tpcf different estimators functionality
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((100, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
result_1 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
result_2 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Davis-Peebles',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
result_3 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Hewett',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
result_4 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Hamilton',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
result_5 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Landy-Szalay',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
assert len(result_1) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_2) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_3) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_4) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_5) == 3, "wrong number of correlation functions returned erroneously."
@slow
def test_tpcf_randoms():
"""
test the tpcf possible randoms + PBCs combinations
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((100, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
# No PBCs w/ randoms
result_1 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
# PBCs w/o randoms
result_2 = tpcf(sample1, rbins, sample2=sample2,
randoms=None, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
# PBCs w/ randoms
result_3 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
# No PBCs and no randoms should throw an error.
with pytest.raises(ValueError) as err:
tpcf(sample1, rbins, sample2=sample2,
randoms=None, period=None,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
substr = "If no PBCs are specified, randoms must be provided."
assert substr in err.value.args[0]
assert len(result_1) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_2) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_3) == 3, "wrong number of correlation functions returned erroneously."
@slow
def test_tpcf_period_API():
"""
test the tpcf period API functionality.
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
result_1 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
period = 1.0
result_2 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
# should throw an error. period must be positive!
period = np.array([1.0, 1.0, -1.0])
with pytest.raises(ValueError) as err:
tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
substr = "All values must bounded positive numbers."
assert substr in err.value.args[0]
assert len(result_1) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_2) == 3, "wrong number of correlation functions returned erroneously."
@slow
def test_tpcf_cross_consistency_w_auto():
"""
test the tpcf cross-correlation mode consistency with auto-correlation mode
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((200, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((300, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
# with out randoms
result1 = tpcf(sample1, rbins, sample2=None,
randoms=None, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
result2 = tpcf(sample2, rbins, sample2=None,
randoms=None, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
result1_p, result12, result2_p = tpcf(sample1, rbins, sample2=sample2,
randoms=None, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
assert np.allclose(result1, result1_p), "cross mode and auto mode are not the same"
assert np.allclose(result2, result2_p), "cross mode and auto mode are not the same"
# with randoms
result1 = tpcf(sample1, rbins, sample2=None,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
result2 = tpcf(sample2, rbins, sample2=None,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
result1_p, result12, result2_p = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
assert np.allclose(result1, result1_p), "cross mode and auto mode are not the same"
assert np.allclose(result2, result2_p), "cross mode and auto mode are not the same"
def test_RR_precomputed_exception_handling1():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
RR_precomputed = rmax
with pytest.raises(HalotoolsError) as err:
_ = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
RR_precomputed=RR_precomputed)
substr = "``RR_precomputed`` and ``NR_precomputed`` arguments, or neither\n"
assert substr in err.value.args[0]
def test_RR_precomputed_exception_handling2():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
RR_precomputed = rbins[:-2]
NR_precomputed = randoms.shape[0]
with pytest.raises(HalotoolsError) as err:
_ = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
RR_precomputed=RR_precomputed, NR_precomputed=NR_precomputed)
substr = "\nLength of ``RR_precomputed`` must match length of ``rbins``\n"
assert substr in err.value.args[0]
def test_RR_precomputed_exception_handling3():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
RR_precomputed = rbins[:-1]
NR_precomputed = 5
with pytest.raises(HalotoolsError) as err:
_ = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
RR_precomputed=RR_precomputed, NR_precomputed=NR_precomputed)
substr = "the value of NR_precomputed must agree with the number of randoms"
assert substr in err.value.args[0]
@slow
def test_RR_precomputed_natural_estimator_auto():
""" Strategy here is as follows. First, we adopt the same setup
with randomly generated points as used in the rest of the test suite.
First, we just compute the tpcf in the normal way.
Then we break apart the tpcf innards so that we can
compute RR in the exact same way that it is computed within tpcf.
We will then pass in this RR using the RR_precomputed keyword,
and verify that the tpcf computed in this second way gives
exactly the same results as if we did not pre-compute RR.
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = sample1
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
approx_cell1_size = [rmax, rmax, rmax]
approx_cell2_size = approx_cell1_size
approx_cellran_size = [rmax, rmax, rmax]
normal_result = tpcf(
sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=approx_cell1_size,
approx_cellran_size=approx_cellran_size)
# The following quantities are computed inside the
# tpcf namespace. We reproduce them here because they are
# necessary inputs to the _random_counts and _pair_counts
# functions called by tpcf
_sample1_is_sample2 = True
PBCs = True
num_threads = 1
do_DD, do_DR, do_RR = True, True, True
do_auto, do_cross = True, False
from ..tpcf import _random_counts, _pair_counts
# count data pairs
D1D1, D1D2, D2D2 = _pair_counts(
sample1, sample2, rbins, period,
num_threads, do_auto, do_cross, _sample1_is_sample2,
approx_cell1_size, approx_cell2_size)
# count random pairs
D1R, D2R, RR = _random_counts(
sample1, sample2, randoms, rbins, period,
PBCs, num_threads, do_RR, do_DR, _sample1_is_sample2,
approx_cell1_size, approx_cell2_size, approx_cellran_size)
N1 = len(sample1)
NR = len(randoms)
factor = N1*N1/(NR*NR)
def mult(x, y):
return x*y
xi_11 = mult(1.0/factor, D1D1/RR) - 1.0
# The following assertion implies that the RR
# computed within this testing namespace is the same RR
# as computed in the tpcf namespace
assert np.all(xi_11 == normal_result)
# Now we will pass in the above RR as an argument
# and verify that we get an identical tpcf
result_with_RR_precomputed = tpcf(
sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=approx_cell1_size,
approx_cellran_size=approx_cellran_size,
RR_precomputed=RR,
NR_precomputed=NR)
assert np.all(result_with_RR_precomputed == normal_result)
@slow
def test_RR_precomputed_Landy_Szalay_estimator_auto():
""" Strategy here is as follows. First, we adopt the same setup
with randomly generated points as used in the rest of the test suite.
First, we just compute the tpcf in the normal way.
Then we break apart the tpcf innards so that we can
compute RR in the exact same way that it is computed within tpcf.
We will then pass in this RR using the RR_precomputed keyword,
and verify that the tpcf computed in this second way gives
exactly the same results as if we did not pre-compute RR.
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = sample1
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
approx_cell1_size = [rmax, rmax, rmax]
approx_cell2_size = approx_cell1_size
approx_cellran_size = [rmax, rmax, rmax]
normal_result = tpcf(
sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Landy-Szalay',
approx_cell1_size=approx_cell1_size,
approx_cellran_size=approx_cellran_size)
# The following quantities are computed inside the
# tpcf namespace. We reproduce them here because they are
# necessary inputs to the _random_counts and _pair_counts
# functions called by tpcf
_sample1_is_sample2 = True
PBCs = True
num_threads = 1
do_DD, do_DR, do_RR = True, True, True
do_auto, do_cross = True, False
from ..tpcf import _random_counts, _pair_counts
# count data pairs
D1D1, D1D2, D2D2 = _pair_counts(
sample1, sample2, rbins, period,
num_threads, do_auto, do_cross, _sample1_is_sample2,
approx_cell1_size, approx_cell2_size)
# count random pairs
D1R, D2R, RR = _random_counts(
sample1, sample2, randoms, rbins, period,
PBCs, num_threads, do_RR, do_DR, _sample1_is_sample2,
approx_cell1_size, approx_cell2_size, approx_cellran_size)
ND1 = len(sample1)
ND2 = len(sample2)
NR1 = len(randoms)
NR2 = len(randoms)
factor1 = ND1*ND2/(NR1*NR2)
factor2 = ND1*NR2/(NR1*NR2)
def mult(x, y):
return x*y
xi_11 = mult(1.0/factor1, D1D1/RR) - mult(1.0/factor2, 2.0*D1R/RR) + 1.0
# # The following assertion implies that the RR
# # computed within this testing namespace is the same RR
# # as computed in the tpcf namespace
assert np.all(xi_11 == normal_result)
# Now we will pass in the above RR as an argument
# and verify that we get an identical tpcf
result_with_RR_precomputed = tpcf(
sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Landy-Szalay',
approx_cell1_size=approx_cell1_size,
approx_cellran_size=approx_cellran_size,
RR_precomputed=RR,
NR_precomputed=NR1)
assert np.all(result_with_RR_precomputed == normal_result)
def test_tpcf_raises_exception_for_non_monotonic_rbins():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(10, 0.3, 5)
with pytest.raises(TypeError) as err:
normal_result = tpcf(sample1, rbins, period=period)
substr = "Input separation bins must be a monotonically increasing"
assert substr in err.value.args[0]
def test_tpcf_raises_exception_for_large_search_length():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.1, 0.5, 5)
with pytest.raises(ValueError) as err:
normal_result = tpcf(sample1, rbins, period=period)
substr = "Either decrease your search length or use a larger simulation"
assert substr in err.value.args[0]
def test_tpcf_raises_exception_for_incompatible_data_shapes():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((1000, 2))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.1, 0.3, 5)
with pytest.raises(TypeError) as err:
normal_result = tpcf(sample1, rbins, sample2=sample2, period=period)
substr = "Input sample of points must be a Numpy ndarray of shape (Npts, 3)."
assert substr in err.value.args[0]
def test_tpcf_raises_exception_for_bad_do_auto_instructions():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((1000, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.1, 0.3, 5)
with pytest.raises(ValueError) as err:
normal_result = tpcf(sample1, rbins, sample2=sample2, period=period,
do_auto='<NAME>')
substr = "`do_auto` and `do_cross` keywords must be boolean-valued."
assert substr in err.value.args[0]
def test_tpcf_raises_exception_for_unavailable_estimator():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((1000, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.1, 0.3, 5)
with pytest.raises(ValueError) as err:
normal_result = tpcf(sample1, rbins, period=period,
estimator='<NAME>')
substr = "is not in the list of available estimators:"
assert substr in err.value.args[0]
@pytest.mark.skipif('not TPCF_CORRFUNC_FILES_EXIST')
def test_tpcf_vs_corrfunc():
"""
"""
msg = ("This unit-test compares the tpcf results from halotools \n"
"against the results derived from the Corrfunc code managed by \n"
"Manodeep Sinha. ")
__, aph_fname1, aph_fname2, aph_fname3, deep_fname1, deep_fname2 = (
tpcf_corrfunc_comparison_files_exist(return_fnames=True))
sinha_sample1_xi = np.load(deep_fname1)[:, 0]
sinha_sample2_xi = np.load(deep_fname2)[:, 0]
sample1 = np.load(aph_fname1)
sample2 = np.load(aph_fname2)
rbins = np.load(aph_fname3)
halotools_result1 = tpcf(sample1, rbins, period=250.0)
assert np.allclose(halotools_result1, sinha_sample1_xi, rtol=1e-5), msg
halotools_result2 = tpcf(sample2, rbins, period=250.0)
assert np.allclose(halotools_result2, sinha_sample2_xi, rtol=1e-5), msg
| [
"numpy.allclose",
"astropy.utils.misc.NumpyRNGContext",
"numpy.random.random",
"numpy.array",
"numpy.linspace",
"pytest.raises",
"pytest.mark.skipif",
"numpy.all",
"numpy.load"
] | [((21047, 21098), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""not TPCF_CORRFUNC_FILES_EXIST"""'], {}), "('not TPCF_CORRFUNC_FILES_EXIST')\n", (21065, 21098), False, 'import pytest\n'), ((971, 996), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (979, 996), True, 'import numpy as np\n'), ((1009, 1035), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.3)', '(5)'], {}), '(0.001, 0.3, 5)\n', (1020, 1035), True, 'import numpy as np\n'), ((2016, 2041), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (2024, 2041), True, 'import numpy as np\n'), ((2054, 2080), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.3)', '(5)'], {}), '(0.001, 0.3, 5)\n', (2065, 2080), True, 'import numpy as np\n'), ((3031, 3057), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.3)', '(5)'], {}), '(0.001, 0.3, 5)\n', (3042, 3057), True, 'import numpy as np\n'), ((5159, 5184), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (5167, 5184), True, 'import numpy as np\n'), ((5197, 5223), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.3)', '(5)'], {}), '(0.001, 0.3, 5)\n', (5208, 5223), True, 'import numpy as np\n'), ((7094, 7119), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (7102, 7119), True, 'import numpy as np\n'), ((7132, 7158), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.3)', '(5)'], {}), '(0.001, 0.3, 5)\n', (7143, 7158), True, 'import numpy as np\n'), ((7678, 7704), 'numpy.array', 'np.array', (['[1.0, 1.0, -1.0]'], {}), '([1.0, 1.0, -1.0])\n', (7686, 7704), True, 'import numpy as np\n'), ((8539, 8564), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (8547, 8564), True, 'import numpy as np\n'), ((8577, 8603), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.3)', '(5)'], {}), '(0.001, 0.3, 5)\n', (8588, 8603), True, 'import numpy as np\n'), ((9342, 9373), 'numpy.allclose', 'np.allclose', (['result1', 'result1_p'], {}), '(result1, result1_p)\n', (9353, 9373), True, 'import numpy as np\n'), ((9430, 9461), 'numpy.allclose', 'np.allclose', (['result2', 'result2_p'], {}), '(result2, result2_p)\n', (9441, 9461), True, 'import numpy as np\n'), ((10227, 10258), 'numpy.allclose', 'np.allclose', (['result1', 'result1_p'], {}), '(result1, result1_p)\n', (10238, 10258), True, 'import numpy as np\n'), ((10315, 10346), 'numpy.allclose', 'np.allclose', (['result2', 'result2_p'], {}), '(result2, result2_p)\n', (10326, 10346), True, 'import numpy as np\n'), ((10628, 10653), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (10636, 10653), True, 'import numpy as np\n'), ((10666, 10692), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.3)', '(5)'], {}), '(0.001, 0.3, 5)\n', (10677, 10692), True, 'import numpy as np\n'), ((11366, 11391), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (11374, 11391), True, 'import numpy as np\n'), ((11404, 11430), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.3)', '(5)'], {}), '(0.001, 0.3, 5)\n', (11415, 11430), True, 'import numpy as np\n'), ((12177, 12202), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (12185, 12202), True, 'import numpy as np\n'), ((12215, 12241), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.3)', '(5)'], {}), '(0.001, 0.3, 5)\n', (12226, 12241), True, 'import numpy as np\n'), ((13489, 13514), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (13497, 13514), True, 'import numpy as np\n'), ((13527, 13553), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.3)', '(5)'], {}), '(0.001, 0.3, 5)\n', (13538, 13553), True, 'import numpy as np\n'), ((15117, 15147), 'numpy.all', 'np.all', (['(xi_11 == normal_result)'], {}), '(xi_11 == normal_result)\n', (15123, 15147), True, 'import numpy as np\n'), ((15559, 15610), 'numpy.all', 'np.all', (['(result_with_RR_precomputed == normal_result)'], {}), '(result_with_RR_precomputed == normal_result)\n', (15565, 15610), True, 'import numpy as np\n'), ((16366, 16391), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (16374, 16391), True, 'import numpy as np\n'), ((16404, 16430), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.3)', '(5)'], {}), '(0.001, 0.3, 5)\n', (16415, 16430), True, 'import numpy as np\n'), ((18123, 18153), 'numpy.all', 'np.all', (['(xi_11 == normal_result)'], {}), '(xi_11 == normal_result)\n', (18129, 18153), True, 'import numpy as np\n'), ((18571, 18622), 'numpy.all', 'np.all', (['(result_with_RR_precomputed == normal_result)'], {}), '(result_with_RR_precomputed == normal_result)\n', (18577, 18622), True, 'import numpy as np\n'), ((18780, 18805), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (18788, 18805), True, 'import numpy as np\n'), ((18818, 18841), 'numpy.linspace', 'np.linspace', (['(10)', '(0.3)', '(5)'], {}), '(10, 0.3, 5)\n', (18829, 18841), True, 'import numpy as np\n'), ((19213, 19238), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (19221, 19238), True, 'import numpy as np\n'), ((19251, 19275), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.5)', '(5)'], {}), '(0.1, 0.5, 5)\n', (19262, 19275), True, 'import numpy as np\n'), ((19704, 19729), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (19712, 19729), True, 'import numpy as np\n'), ((19742, 19766), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.3)', '(5)'], {}), '(0.1, 0.3, 5)\n', (19753, 19766), True, 'import numpy as np\n'), ((20216, 20241), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (20224, 20241), True, 'import numpy as np\n'), ((20254, 20278), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.3)', '(5)'], {}), '(0.1, 0.3, 5)\n', (20265, 20278), True, 'import numpy as np\n'), ((20747, 20772), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (20755, 20772), True, 'import numpy as np\n'), ((20785, 20809), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.3)', '(5)'], {}), '(0.1, 0.3, 5)\n', (20796, 20809), True, 'import numpy as np\n'), ((21574, 21593), 'numpy.load', 'np.load', (['aph_fname1'], {}), '(aph_fname1)\n', (21581, 21593), True, 'import numpy as np\n'), ((21608, 21627), 'numpy.load', 'np.load', (['aph_fname2'], {}), '(aph_fname2)\n', (21615, 21627), True, 'import numpy as np\n'), ((21640, 21659), 'numpy.load', 'np.load', (['aph_fname3'], {}), '(aph_fname3)\n', (21647, 21659), True, 'import numpy as np\n'), ((21731, 21791), 'numpy.allclose', 'np.allclose', (['halotools_result1', 'sinha_sample1_xi'], {'rtol': '(1e-05)'}), '(halotools_result1, sinha_sample1_xi, rtol=1e-05)\n', (21742, 21791), True, 'import numpy as np\n'), ((21867, 21927), 'numpy.allclose', 'np.allclose', (['halotools_result2', 'sinha_sample2_xi'], {'rtol': '(1e-05)'}), '(halotools_result2, sinha_sample2_xi, rtol=1e-05)\n', (21878, 21927), True, 'import numpy as np\n'), ((839, 866), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (854, 866), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((886, 912), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (902, 912), True, 'import numpy as np\n'), ((931, 957), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (947, 957), True, 'import numpy as np\n'), ((1839, 1866), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (1854, 1866), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((1886, 1912), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (1902, 1912), True, 'import numpy as np\n'), ((1931, 1957), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (1947, 1957), True, 'import numpy as np\n'), ((1976, 2002), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (1992, 2002), True, 'import numpy as np\n'), ((2855, 2882), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (2870, 2882), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((2902, 2928), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (2918, 2928), True, 'import numpy as np\n'), ((2947, 2973), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (2963, 2973), True, 'import numpy as np\n'), ((2992, 3018), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (3008, 3018), True, 'import numpy as np\n'), ((4982, 5009), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (4997, 5009), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((5029, 5055), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (5045, 5055), True, 'import numpy as np\n'), ((5074, 5100), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (5090, 5100), True, 'import numpy as np\n'), ((5119, 5145), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (5135, 5145), True, 'import numpy as np\n'), ((6167, 6192), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6180, 6192), False, 'import pytest\n'), ((6916, 6943), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (6931, 6943), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((6963, 6990), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (6979, 6990), True, 'import numpy as np\n'), ((7009, 7035), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (7025, 7035), True, 'import numpy as np\n'), ((7054, 7080), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (7070, 7080), True, 'import numpy as np\n'), ((7714, 7739), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7727, 7739), False, 'import pytest\n'), ((8362, 8389), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (8377, 8389), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((8409, 8435), 'numpy.random.random', 'np.random.random', (['(200, 3)'], {}), '((200, 3))\n', (8425, 8435), True, 'import numpy as np\n'), ((8454, 8480), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (8470, 8480), True, 'import numpy as np\n'), ((8499, 8525), 'numpy.random.random', 'np.random.random', (['(300, 3)'], {}), '((300, 3))\n', (8515, 8525), True, 'import numpy as np\n'), ((10450, 10477), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (10465, 10477), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((10497, 10524), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (10513, 10524), True, 'import numpy as np\n'), ((10543, 10569), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (10559, 10569), True, 'import numpy as np\n'), ((10588, 10614), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (10604, 10614), True, 'import numpy as np\n'), ((10752, 10781), 'pytest.raises', 'pytest.raises', (['HalotoolsError'], {}), '(HalotoolsError)\n', (10765, 10781), False, 'import pytest\n'), ((11188, 11215), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (11203, 11215), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((11235, 11262), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (11251, 11262), True, 'import numpy as np\n'), ((11281, 11307), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (11297, 11307), True, 'import numpy as np\n'), ((11326, 11352), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (11342, 11352), True, 'import numpy as np\n'), ((11534, 11563), 'pytest.raises', 'pytest.raises', (['HalotoolsError'], {}), '(HalotoolsError)\n', (11547, 11563), False, 'import pytest\n'), ((11999, 12026), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (12014, 12026), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((12046, 12073), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (12062, 12073), True, 'import numpy as np\n'), ((12092, 12118), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (12108, 12118), True, 'import numpy as np\n'), ((12137, 12163), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (12153, 12163), True, 'import numpy as np\n'), ((12330, 12359), 'pytest.raises', 'pytest.raises', (['HalotoolsError'], {}), '(HalotoolsError)\n', (12343, 12359), False, 'import pytest\n'), ((13330, 13357), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (13345, 13357), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((13377, 13404), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (13393, 13404), True, 'import numpy as np\n'), ((13449, 13475), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (13465, 13475), True, 'import numpy as np\n'), ((16207, 16234), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (16222, 16234), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((16254, 16281), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (16270, 16281), True, 'import numpy as np\n'), ((16326, 16352), 'numpy.random.random', 'np.random.random', (['(100, 3)'], {}), '((100, 3))\n', (16342, 16352), True, 'import numpy as np\n'), ((18692, 18719), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (18707, 18719), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((18739, 18766), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (18755, 18766), True, 'import numpy as np\n'), ((18852, 18876), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (18865, 18876), False, 'import pytest\n'), ((19125, 19152), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (19140, 19152), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((19172, 19199), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (19188, 19199), True, 'import numpy as np\n'), ((19286, 19311), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (19299, 19311), False, 'import pytest\n'), ((19570, 19597), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (19585, 19597), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((19617, 19644), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (19633, 19644), True, 'import numpy as np\n'), ((19663, 19690), 'numpy.random.random', 'np.random.random', (['(1000, 2)'], {}), '((1000, 2))\n', (19679, 19690), True, 'import numpy as np\n'), ((19777, 19801), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (19790, 19801), False, 'import pytest\n'), ((20082, 20109), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (20097, 20109), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((20129, 20156), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (20145, 20156), True, 'import numpy as np\n'), ((20175, 20202), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (20191, 20202), True, 'import numpy as np\n'), ((20289, 20314), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (20302, 20314), False, 'import pytest\n'), ((20613, 20640), 'astropy.utils.misc.NumpyRNGContext', 'NumpyRNGContext', (['fixed_seed'], {}), '(fixed_seed)\n', (20628, 20640), False, 'from astropy.utils.misc import NumpyRNGContext\n'), ((20660, 20687), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (20676, 20687), True, 'import numpy as np\n'), ((20706, 20733), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (20722, 20733), True, 'import numpy as np\n'), ((20820, 20845), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (20833, 20845), False, 'import pytest\n'), ((21482, 21502), 'numpy.load', 'np.load', (['deep_fname1'], {}), '(deep_fname1)\n', (21489, 21502), True, 'import numpy as np\n'), ((21532, 21552), 'numpy.load', 'np.load', (['deep_fname2'], {}), '(deep_fname2)\n', (21539, 21552), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import utils
import pdb
data = utils.load_dataset("credittest")
Xvalid, yvalid = data['X'], data['y']
def kappa(ww, delta):
ww = np.array(ww)
yhat = np.sign(np.dot(Xvalid, ww))
ww2 = np.array(ww + delta)
yhat2 = np.sign(np.dot(Xvalid, ww2))
P_A = np.sum(yhat == yhat2) / float(yvalid.size)
P_E = 0.5
return (P_A - P_E) / (1 - P_E)
def roni(ww, delta):
ww = np.array(ww)
yhat = np.sign(np.dot(Xvalid, ww))
ww2 = np.array(ww + delta)
yhat2 = np.sign(np.dot(Xvalid, ww2))
g_err = np.sum(yhat != yvalid) / float(yvalid.size)
new_err = np.sum(yhat2 != yvalid) / float(yvalid.size)
return new_err - g_err
# Returns the index of the row that should be used in Krum
def krum(deltas, clip):
# assume deltas is an array of size group * d
n = len(deltas)
deltas = np.array(deltas)
scores = get_krum_scores(deltas, n - clip)
good_idx = np.argpartition(scores, n - clip)[:(n - clip)]
print(good_idx)
return good_idx
# return np.mean(deltas[good_idx], axis=0)
def get_krum_scores(X, groupsize):
krum_scores = np.zeros(len(X))
# Calculate distances
distances = np.sum(X**2, axis=1)[:, None] + np.sum(
X**2, axis=1)[None] - 2 * np.dot(X, X.T)
for i in range(len(X)):
krum_scores[i] = np.sum(np.sort(distances[i])[1:(groupsize - 1)])
return krum_scores
if __name__ == "__main__":
pdb.set_trace() | [
"numpy.argpartition",
"numpy.sort",
"numpy.array",
"numpy.dot",
"numpy.sum",
"pdb.set_trace",
"utils.load_dataset"
] | [((83, 115), 'utils.load_dataset', 'utils.load_dataset', (['"""credittest"""'], {}), "('credittest')\n", (101, 115), False, 'import utils\n'), ((191, 203), 'numpy.array', 'np.array', (['ww'], {}), '(ww)\n', (199, 203), True, 'import numpy as np\n'), ((258, 278), 'numpy.array', 'np.array', (['(ww + delta)'], {}), '(ww + delta)\n', (266, 278), True, 'import numpy as np\n'), ((460, 472), 'numpy.array', 'np.array', (['ww'], {}), '(ww)\n', (468, 472), True, 'import numpy as np\n'), ((527, 547), 'numpy.array', 'np.array', (['(ww + delta)'], {}), '(ww + delta)\n', (535, 547), True, 'import numpy as np\n'), ((902, 918), 'numpy.array', 'np.array', (['deltas'], {}), '(deltas)\n', (910, 918), True, 'import numpy as np\n'), ((1486, 1501), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1499, 1501), False, 'import pdb\n'), ((223, 241), 'numpy.dot', 'np.dot', (['Xvalid', 'ww'], {}), '(Xvalid, ww)\n', (229, 241), True, 'import numpy as np\n'), ((299, 318), 'numpy.dot', 'np.dot', (['Xvalid', 'ww2'], {}), '(Xvalid, ww2)\n', (305, 318), True, 'import numpy as np\n'), ((331, 352), 'numpy.sum', 'np.sum', (['(yhat == yhat2)'], {}), '(yhat == yhat2)\n', (337, 352), True, 'import numpy as np\n'), ((492, 510), 'numpy.dot', 'np.dot', (['Xvalid', 'ww'], {}), '(Xvalid, ww)\n', (498, 510), True, 'import numpy as np\n'), ((568, 587), 'numpy.dot', 'np.dot', (['Xvalid', 'ww2'], {}), '(Xvalid, ww2)\n', (574, 587), True, 'import numpy as np\n'), ((602, 624), 'numpy.sum', 'np.sum', (['(yhat != yvalid)'], {}), '(yhat != yvalid)\n', (608, 624), True, 'import numpy as np\n'), ((660, 683), 'numpy.sum', 'np.sum', (['(yhat2 != yvalid)'], {}), '(yhat2 != yvalid)\n', (666, 683), True, 'import numpy as np\n'), ((987, 1020), 'numpy.argpartition', 'np.argpartition', (['scores', '(n - clip)'], {}), '(scores, n - clip)\n', (1002, 1020), True, 'import numpy as np\n'), ((1314, 1328), 'numpy.dot', 'np.dot', (['X', 'X.T'], {}), '(X, X.T)\n', (1320, 1328), True, 'import numpy as np\n'), ((1240, 1262), 'numpy.sum', 'np.sum', (['(X ** 2)'], {'axis': '(1)'}), '(X ** 2, axis=1)\n', (1246, 1262), True, 'import numpy as np\n'), ((1272, 1294), 'numpy.sum', 'np.sum', (['(X ** 2)'], {'axis': '(1)'}), '(X ** 2, axis=1)\n', (1278, 1294), True, 'import numpy as np\n'), ((1390, 1411), 'numpy.sort', 'np.sort', (['distances[i]'], {}), '(distances[i])\n', (1397, 1411), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import itertools
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_predict,cross_val_score,train_test_split
from sklearn.metrics import classification_report,confusion_matrix,roc_curve,auc,precision_recall_curve,roc_curve
import pickle
#raw_df = pd.read_csv("/home/terrence/CODING/Python/MODELS/Credit_Union_PDs/default_data.csv", encoding="latin-1")
myfile = "/home/terrence/CODING/Python/MODELS/Credit_Union_PDs/Test Variables READY.xlsx"
raw_df = pd.read_excel(myfile, sheet_name = 'Data', header = 0)
print(raw_df.shape)
#raw_df.dropna(inplace = True)
#print(raw_df.shape)
#print(raw_df.columns.values)
'''
[u'Loan Number' u'Loan Type Description' u'Balance' u'Loan Term' u'Interest Rate' u'Origination Date' u'Origination Month'
u'Most Recent Credit Score' u'AmountFunded' u'MonthlyIncomeBaseSalary' u'TotalMonthlyIncome' u'MonthlyIncomeOther'
u'Collateral Current Valuation' u'LTV' u'Number of Days Delinquent' u'Days Late T or F' u'Balance.1' u'Days 11-15 Delinquent'
u'Days 16-20 Delinquent' u'Days 21-29 Delinquent' u'Days 30-44 Delinquent' u'Days 45-59 Delinquent' u'Days 60-179 Delinquent'
u'Days 180-359 Days Delinquent' u'Days 360+ Delinquent' u'Days Delinquent T or F' u'Grade Overall' u'Original Loan Amount'
u'Current Credit Limit' u'Maturity Date' u'Maturity Month' u'Original Credit Score' u'LTV-Original' u'Probability of Default'
u'Branch' u'Loan Officer' u'Underwriter' u'Loan Type Code' u'Loan Category' u'Auto Dealer' u'Primary Customer City' u'Status'
u'Updated Credit Score' u'Original Interest Rate' u'LTV (Effective)' u'LTV-Original (Effective)' u'LTV-Original Total Commitments'
u'LTV-Total Commitments' u'LTV-Total Commitments (Effective)' u'LTV-Total Commitments-Original (Effective)'
u'Grade by Most Recent Credit Score' u'Grade by Cerdit Score (ORIGINAL)' u'GRADE BY CREDIT SCORE (UPDATED)' u'JointTotalMonthlyIncome'
u'JointProfessionMonths' u'JointCity' u'JointApplicantType' u'JointMonthlyIncomeBaseSalary' u'JointMonthlyIncomeOther'
u'JointMonthlyIncomeOtherDescription1' u'JointOccupation' u'IndCity' u'IndMonthlyIncomeBaseSalary' u'IndMonthlyIncomeOther'
u'IndTotalMonthlyIncome' u'IndMonthlyIncomeOtherDescription1' u'PaymentAmount' u'PaymentFrequency' u'Insurance' u'DueDay1' u'DueDay2'
u'PaymentMethodText' u'SymitarPurposeCode' u'ApprovedLTV' u'FundedLTV' u'PaymentToIncome' u'NumberOfOpenRevolvingAccounts' u'AmountApproved'
u'AmountFunded.1' u'AmountOwedToLender' u'DOB' u'DOB.1' u'DOB.2' u'AGE' u'AGE of BORROWER' u'JointDOB' u'Year' u'Year.1' u'AGE OF JOINT'
u'AGE OF JOINT.1' u'IndDOB' u'YEAR' u'YEAR.1' u'AGE.1' u'AGE of IND' u'AllButThisDebtToIncomeFund' u'AllButThisDebtToIncomeUW'
u'EstimatedMonthlyPayment' u'TotalDebtToIncomeFund' u'TotalDebtToIncomeUW' u'TotalUnsecureBalance' u'TotalExistingLoanAmount' u'APR'
u'IsHighRiskConsumerLoan' u'IsAdvanceRequest' u'IsWorkoutLoan' u'LoanPaymentFrequency' u'PaymentType' u'Rate']
'''
raw_df['label'] = raw_df['Number of Days Delinquent'].map(lambda x : 1 if int(x) > 11 else 0)
print(raw_df.shape)
#print(raw_df['Loan Type Description'].mean())
print(np.any(np.isnan(raw_df['Loan Type Description'])))
#print(raw_df['Balance'].mean())
print(np.any(np.isnan(raw_df['Balance'])))
#print(raw_df['Loan Term'].mean())
print(np.any(np.isnan(raw_df['Loan Term'])))
#print(raw_df['LTV'].mean())
print(np.any(np.isnan(raw_df['LTV'])))
#print(raw_df['label'].sum())
print(np.any(np.isnan(raw_df['label'])))
print("\n\n")
#print(raw_df['Interest Rate'].mean())
print(np.any(np.isnan(raw_df['Interest Rate'])))
#print(raw_df['Origination Month'].mean())
print(np.any(np.isnan(raw_df['Origination Month'])))
#print(raw_df['Most Recent Credit Score'].mean())
print(np.any(np.isnan(raw_df['Most Recent Credit Score'])))
#print(raw_df['AmountFunded'].mean())
raw_df['AmountFunded'] = raw_df['AmountFunded'].fillna(raw_df['AmountFunded'].mean())
print(np.any(np.isnan(raw_df['AmountFunded'])))
#print(raw_df['MonthlyIncomeBaseSalary'].mean())
raw_df['MonthlyIncomeBaseSalary'] = raw_df['MonthlyIncomeBaseSalary'].fillna(raw_df['MonthlyIncomeBaseSalary'].mean())
print(np.any(np.isnan(raw_df['MonthlyIncomeBaseSalary'])))
#print(raw_df['TotalMonthlyIncome'].mean())
raw_df['TotalMonthlyIncome'] = raw_df['TotalMonthlyIncome'].fillna(raw_df['TotalMonthlyIncome'].mean())
print(np.any(np.isnan(raw_df['TotalMonthlyIncome'])))
#print(raw_df['MonthlyIncomeOther'].mean())
raw_df['MonthlyIncomeOther'] = raw_df['MonthlyIncomeOther'].fillna(raw_df['MonthlyIncomeOther'].mean())
print(np.any(np.isnan(raw_df['MonthlyIncomeOther'])))
#print(raw_df['Collateral Current Valuation'].mean())
print(np.any(np.isnan(raw_df['Collateral Current Valuation'])))
print("\n\n")
#raw_df['Balance'] = raw_df['Balance'].fillna(-99999)
print(np.any(np.isnan(raw_df['Balance'])))
#raw_df['Grade Overall'] = raw_df['Grade Overall'].fillna(-99999)
print(np.any(np.isnan(raw_df['Grade Overall'])))
#raw_df['Current Credit Limit'] = raw_df['Current Credit Limit'].fillna(-99999)
print(np.any(np.isnan(raw_df['Current Credit Limit'])))
#raw_df['Loan Type Code'] = raw_df['Loan Type Code'].fillna(-99999)
print(np.any(np.isnan(raw_df['Loan Type Code'])))
#raw_df['Status'] = raw_df['Status'].fillna(-99999)
print(np.any(np.isnan(raw_df['Status'])))
raw_df['Insurance'] = raw_df['Insurance'].fillna(raw_df['Insurance'].mean())
print(np.any(np.isnan(raw_df['Insurance'])))
raw_df['NumberOfOpenRevolvingAccounts'] = raw_df['NumberOfOpenRevolvingAccounts'].fillna(raw_df['NumberOfOpenRevolvingAccounts'].mean())
print(np.any(np.isnan(raw_df['NumberOfOpenRevolvingAccounts'])))
raw_df['APR'] = raw_df['APR'].fillna(raw_df['APR'].mean())
print(np.any(np.isnan(raw_df['APR'])))
#raw_df['PaymentToIncome'] = raw_df['PaymentToIncome'].fillna(raw_df['PaymentToIncome'].mean())
#print(np.any(np.isnan(raw_df['PaymentToIncome'])))
raw_df['AmountOwedToLender'] = raw_df['AmountOwedToLender'].fillna(raw_df['AmountOwedToLender'].mean())
print(np.any(np.isnan(raw_df['AmountOwedToLender'])))
#raw_df['AGE of BORROWER'] = raw_df['AGE of BORROWER'].fillna(raw_df['AGE of BORROWER'].mean())
#print(np.any(np.isnan(raw_df['AGE of BORROWER'])))
raw_df['LoanPaymentFrequency'] = raw_df['LoanPaymentFrequency'].fillna(raw_df['LoanPaymentFrequency'].mean())
print(np.any(np.isnan(raw_df['LoanPaymentFrequency'])))
raw_df['Rate'] = raw_df['Rate'].fillna(raw_df['Rate'].mean())
print(np.any(np.isnan(raw_df['Rate'])))
#df1 = pd.concat([raw_df['Loan Type Description'], raw_df['Balance'], raw_df['Loan Term'],raw_df['LTV'], raw_df['label']],axis =1)
df1 = raw_df[['Loan Type Description','Balance','Loan Term','Interest Rate','Origination Month','Most Recent Credit Score',
'AmountFunded','MonthlyIncomeBaseSalary', 'TotalMonthlyIncome','MonthlyIncomeOther','Collateral Current Valuation','LTV',
'Balance','Grade Overall','Current Credit Limit','Loan Type Code','Loan Category','Status','Updated Credit Score',
'Original Interest Rate','Grade by Cerdit Score (ORIGINAL)','GRADE BY CREDIT SCORE (UPDATED)','Insurance',
'NumberOfOpenRevolvingAccounts','AmountOwedToLender','APR','LoanPaymentFrequency','Rate','label']]
print(df1.shape)
print(df1.head(4))
#df1 = df1.reset_index()
print(np.any(np.isnan(df1)))
print(np.all(np.isfinite(df1)))
y_CU = raw_df['Probability of Default']
y = df1.label
X = df1.drop("label", axis =1)
print(X.shape)
RANDOM_SEED = 42
LABELS = ["non-delinguent", "delinguent"]
print(df1.shape)
print(df1.isnull().values.any())
print(df1.head(3))
fig11 = plt.figure()
count_classes = pd.value_counts(df1['label'], sort = True)
count_classes.plot(kind = 'bar', rot=0)
plt.title("delinguency distribution")
plt.xticks(range(2), LABELS)
plt.xlabel("Class")
plt.ylabel("Frequency")
plt.show()
fig11.savefig("Class distribution.pdf")
#fig11.savefig("Class distribution.png")
print(df1['label'].value_counts())
#from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, auc, roc_curve
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
from imblearn.over_sampling import SMOTE
os = SMOTE(random_state=0)
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
columns = X_train.columns
os_data_X,os_data_y=os.fit_sample(X_train, y_train)
os_data_X = pd.DataFrame(data=os_data_X,columns=columns )
os_data_y= pd.DataFrame(data=os_data_y,columns=['y'])
# we can Check the numbers of our data
print("length of X data is ",len(X))
print("length of oversampled data is ",len(os_data_X))
print("Number of no delinguent in oversampled data",len(os_data_y[os_data_y['y']==0]))
print("Number of delinguent",len(os_data_y[os_data_y['y']==1]))
print("Proportion of no delinguent data in oversampled data is ",len(os_data_y[os_data_y['y']==0])/len(os_data_X))
print("Proportion of delinguent data in oversampled data is ",len(os_data_y[os_data_y['y']==1])/len(os_data_X))
X_train = os_data_X
y_train = os_data_y
from sklearn.linear_model import LogisticRegression
fig12 = plt.figure(figsize=(15,8))
ax1 = fig12.add_subplot(1,2,1)
ax1.set_xlim([-0.05,1.05])
ax1.set_ylim([-0.05,1.05])
ax1.set_xlabel('Recall')
ax1.set_ylabel('Precision')
ax1.set_title('PR Curve')
ax2 = fig12.add_subplot(1,2,2)
ax2.set_xlim([-0.05,1.05])
ax2.set_ylim([-0.05,1.05])
ax2.set_xlabel('False Positive Rate')
ax2.set_ylabel('True Positive Rate')
ax2.set_title('ROC Curve')
for w,k in zip([1,5,10,20,50,100,10000],'bgrcmykw'):
lr_model = LogisticRegression(class_weight={0:1,1:w})
lr_model.fit(X_train,y_train)
#lr_model.fit(os_data_X,os_data_y)
pred_prob = lr_model.predict_proba(X_test)[:,1]
p,r,_ = precision_recall_curve(y_test,pred_prob)
tpr,fpr,_ = roc_curve(y_test,pred_prob)
ax1.plot(r,p,c=k,label=w)
ax2.plot(tpr,fpr,c=k,label=w)
ax1.legend(loc='lower left')
ax2.legend(loc='lower left')
plt.show()
fig12.savefig("log_reg_weights.pdf")
#fig12.savefig("log_reg_weights.png")
#lr = LogisticRegression(class_weight='balanced')
#lr = LogisticRegression(class_weight={0:1,1:28})
lr = LogisticRegression()
lr = lr.fit(X_train, y_train)
params = np.append(lr.intercept_,lr.coef_)
#params = np.append(lr.coef_)
#print(params)
var1 = np.append("Intercept",X.columns)
print(var1)
#coeff1 = pd.DataFrame({'Variable':var1,'Coeffient':params})
coeff1 = pd.DataFrame({'Coeffient':params, 'Variable':var1})
print(coeff1.shape)
print(coeff1.head(16))
coeff1.to_csv("Model_Coefficients.csv")
lr_predicted = lr.predict(X_test)
confusion = confusion_matrix(y_test, lr_predicted)
print(lr.score(X_test,y_test))
print("Number of mislabeled points out of a total %d points : %d" % (X_test.shape[0],(y_test != lr_predicted).sum()))
print("\n\n")
print(confusion)
y_pred = lr.predict(X_test)
acc = accuracy_score(y_test,y_pred)
prec = precision_score(y_test,y_pred)
rec = recall_score(y_test,y_pred)
f1 = f1_score(y_test, y_pred)
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
auc1 = auc(fpr,tpr)
print("\n\n")
print("Number of mislabeled points out of a total %d points : %d" % (X_test.shape[0],(y_test != y_pred).sum()))
print("\n\n")
print("Logistic accuracy:" ,acc)
print("Logistic precision:" ,prec)
print("Logistic recall:" ,rec)
print("Logistic f1 ratio:" ,f1)
print("Logistic AUC:" ,auc1)
#y_proba_lr = lr.fit(X_train, y_train).predict_proba(X_test)
y_proba_lr = lr.fit(X_train, y_train).predict_proba(X)
print(y_proba_lr[:,1])
from sklearn.model_selection import cross_val_score
# accuracy is the default scoring metric
print('Cross-validation (accuracy)', cross_val_score(lr, X_train, y_train, cv=5))
scores_acc = cross_val_score(lr, X_train, y_train, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores_acc.mean(), scores_acc.std() * 2))
# use AUC as scoring metric
print('Cross-validation (AUC)', cross_val_score(lr, X_train, y_train, cv=5, scoring = 'roc_auc'))
scores_auc = cross_val_score(lr, X_train, y_train, cv=5, scoring = 'roc_auc')
print("AUC: %0.2f (+/- %0.2f)" % (scores_auc.mean(), scores_auc.std() * 2))
# use recall as scoring metric
print('Cross-validation (recall)', cross_val_score(lr, X_train, y_train, cv=5, scoring = 'recall'))
scores_rec = cross_val_score(lr, X_train, y_train, cv=5, scoring = 'recall')
print("Recall: %0.2f (+/- %0.2f)" % (scores_rec.mean(), scores_rec.std() * 2))
print('Cross-validation (precision)', cross_val_score(lr, X_train, y_train, cv=5, scoring = 'precision'))
scores_prec = cross_val_score(lr, X_train, y_train, cv=5, scoring = 'precision')
print("precision: %0.2f (+/- %0.2f)" % (scores_prec.mean(), scores_prec.std() * 2))
import seaborn as sns
#cm = pd.crosstab(y_test, y_pred, rownames = 'True', colnames = 'predicted', margins = False)
cm = confusion_matrix(y_test, lr_predicted)
ax= plt.subplot()
sns.heatmap(cm, annot=True, ax = ax); #annot=True to annotate cells
# labels, title and ticks
ax.set_xlabel('Predicted labels');ax.set_ylabel('True labels');
ax.set_title('Confusion Matrix');
ax.xaxis.set_ticklabels(['non-delinguent', 'delinguent']); ax.yaxis.set_ticklabels(['non-delinguent', 'delinguent'])
plt.show()
#ax.savefig("confusion_matrix.pdf")
#ax.savefig("confusion_matrix.png")
y_scores_lr = lr.decision_function(X_test)
# ### Precision-recall curves
from sklearn.metrics import precision_recall_curve
precision, recall, thresholds = precision_recall_curve(y_test, y_scores_lr)
closest_zero = np.argmin(np.abs(thresholds))
closest_zero_p = precision[closest_zero]
closest_zero_r = recall[closest_zero]
plt.figure()
plt.xlim([0.0, 1.01])
plt.ylim([0.0, 1.01])
plt.plot(precision, recall, label='Precision-Recall Curve')
plt.plot(closest_zero_p, closest_zero_r, 'o', markersize = 12, fillstyle = 'none', c='r', mew=3)
plt.xlabel('Precision', fontsize=16)
plt.ylabel('Recall', fontsize=16)
plt.axes().set_aspect('equal')
plt.show()
fpr_lr, tpr_lr, _ = roc_curve(y_test, y_scores_lr)
roc_auc_lr = auc(fpr_lr, tpr_lr)
fig13 = plt.figure()
plt.xlim([-0.01, 1.00])
plt.ylim([-0.01, 1.01])
plt.plot(fpr_lr, tpr_lr, lw=3, label='Logistic Reg ROC curve (area = {:0.2f})'.format(roc_auc_lr))
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.title('ROC curve (delinguency classifier)', fontsize=16)
plt.legend(loc='lower right', fontsize=13)
plt.plot([0, 1], [0, 1], color='navy', lw=3, linestyle='--')
plt.axes().set_aspect('equal')
plt.show()
fig13.savefig("ROC_curve_1.pdf")
#fig1.savefig("ROC_curve_1.png")
print(y_proba_lr[:,1])
err = y_CU - y_proba_lr[:,1]
rmse_err = np.sqrt(np.mean(err**2))
print(rmse_err)
prob = y_proba_lr[:,1]
prob2 = pd.DataFrame({'probability':prob})
print(prob2.shape)
print(prob2.head(6))
prob2.to_csv("predicted_probability.csv")
save_classifier = open("log_reg_Credit_Union_PDS_model.pickle", "wb")
pickle.dump(lr, save_classifier)
#cPickle.dump(model, save_classifier)
##dill.dump(model, save_classifier)
save_classifier.close()
print("hoora!")
#classifier_f = open("log_reg_Credit_Union_PDS_model.pickle","rb")
#model = pickle.load(classifier_f)
#classifier_f.close()
#https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html#sphx-glr-auto-examples-model-selection-plot-roc-py
#https://towardsdatascience.com/building-a-logistic-regression-in-python-step-by-step-becd4d56c9c8
#https://github.com/susanli2016/Machine-Learning-with-Python/blob/master/Logistic%20Regression%20balanced.ipynb
y_score = lr.decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
fig14 =plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic (ROC) curve')
plt.legend(loc="lower right")
plt.show()
fig14.savefig("ROC_curve_2.pdf")
#fig.savefig("ROC_curve_2.png")
#++++++++++++++++++++++++++++++++++++++++ LGD +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Load modules and data
import statsmodels.api as sm
# Instantiate a gamma family model with the default link function.
gamma_model = sm.GLM(y_train, X_train, family=sm.families.Gamma())
gamma_results = gamma_model.fit()
print(gamma_results.summary())
| [
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"pandas.value_counts",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"sklearn.metrics.roc_curve",
"numpy.isfinite",
"pandas.read_excel",
"statsmodels.api.families.Gamma",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotl... | [((520, 570), 'pandas.read_excel', 'pd.read_excel', (['myfile'], {'sheet_name': '"""Data"""', 'header': '(0)'}), "(myfile, sheet_name='Data', header=0)\n", (533, 570), True, 'import pandas as pd\n'), ((7507, 7519), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7517, 7519), True, 'import matplotlib.pyplot as plt\n'), ((7536, 7576), 'pandas.value_counts', 'pd.value_counts', (["df1['label']"], {'sort': '(True)'}), "(df1['label'], sort=True)\n", (7551, 7576), True, 'import pandas as pd\n'), ((7619, 7656), 'matplotlib.pyplot.title', 'plt.title', (['"""delinguency distribution"""'], {}), "('delinguency distribution')\n", (7628, 7656), True, 'import matplotlib.pyplot as plt\n'), ((7686, 7705), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Class"""'], {}), "('Class')\n", (7696, 7705), True, 'import matplotlib.pyplot as plt\n'), ((7706, 7729), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (7716, 7729), True, 'import matplotlib.pyplot as plt\n'), ((7730, 7740), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7738, 7740), True, 'import matplotlib.pyplot as plt\n'), ((8148, 8186), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': '(0)'}), '(X, y, random_state=0)\n', (8164, 8186), False, 'from sklearn.model_selection import train_test_split\n'), ((8236, 8257), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {'random_state': '(0)'}), '(random_state=0)\n', (8241, 8257), False, 'from imblearn.over_sampling import SMOTE\n'), ((8438, 8483), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'os_data_X', 'columns': 'columns'}), '(data=os_data_X, columns=columns)\n', (8450, 8483), True, 'import pandas as pd\n'), ((8495, 8538), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'os_data_y', 'columns': "['y']"}), "(data=os_data_y, columns=['y'])\n", (8507, 8538), True, 'import pandas as pd\n'), ((9151, 9178), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)'}), '(figsize=(15, 8))\n', (9161, 9178), True, 'import matplotlib.pyplot as plt\n'), ((9996, 10006), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10004, 10006), True, 'import matplotlib.pyplot as plt\n'), ((10188, 10208), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (10206, 10208), False, 'from sklearn.linear_model import LogisticRegression\n'), ((10248, 10282), 'numpy.append', 'np.append', (['lr.intercept_', 'lr.coef_'], {}), '(lr.intercept_, lr.coef_)\n', (10257, 10282), True, 'import numpy as np\n'), ((10335, 10368), 'numpy.append', 'np.append', (['"""Intercept"""', 'X.columns'], {}), "('Intercept', X.columns)\n", (10344, 10368), True, 'import numpy as np\n'), ((10451, 10504), 'pandas.DataFrame', 'pd.DataFrame', (["{'Coeffient': params, 'Variable': var1}"], {}), "({'Coeffient': params, 'Variable': var1})\n", (10463, 10504), True, 'import pandas as pd\n'), ((10633, 10671), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'lr_predicted'], {}), '(y_test, lr_predicted)\n', (10649, 10671), False, 'from sklearn.metrics import confusion_matrix\n'), ((10888, 10918), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (10902, 10918), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, auc, roc_curve\n'), ((10925, 10956), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (10940, 10956), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, auc, roc_curve\n'), ((10962, 10990), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (10974, 10990), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, auc, roc_curve\n'), ((10995, 11019), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (11003, 11019), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, auc, roc_curve\n'), ((11043, 11068), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (11052, 11068), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, auc, roc_curve\n'), ((11076, 11089), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (11079, 11089), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, auc, roc_curve\n'), ((11723, 11766), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['lr', 'X_train', 'y_train'], {'cv': '(5)'}), '(lr, X_train, y_train, cv=5)\n', (11738, 11766), False, 'from sklearn.model_selection import cross_val_score\n'), ((11988, 12050), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['lr', 'X_train', 'y_train'], {'cv': '(5)', 'scoring': '"""roc_auc"""'}), "(lr, X_train, y_train, cv=5, scoring='roc_auc')\n", (12003, 12050), False, 'from sklearn.model_selection import cross_val_score\n'), ((12274, 12335), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['lr', 'X_train', 'y_train'], {'cv': '(5)', 'scoring': '"""recall"""'}), "(lr, X_train, y_train, cv=5, scoring='recall')\n", (12289, 12335), False, 'from sklearn.model_selection import cross_val_score\n'), ((12538, 12602), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['lr', 'X_train', 'y_train'], {'cv': '(5)', 'scoring': '"""precision"""'}), "(lr, X_train, y_train, cv=5, scoring='precision')\n", (12553, 12602), False, 'from sklearn.model_selection import cross_val_score\n'), ((12815, 12853), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'lr_predicted'], {}), '(y_test, lr_predicted)\n', (12831, 12853), False, 'from sklearn.metrics import confusion_matrix\n'), ((12859, 12872), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (12870, 12872), True, 'import matplotlib.pyplot as plt\n'), ((12873, 12907), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'ax': 'ax'}), '(cm, annot=True, ax=ax)\n', (12884, 12907), True, 'import seaborn as sns\n'), ((13185, 13195), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13193, 13195), True, 'import matplotlib.pyplot as plt\n'), ((13428, 13471), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_test', 'y_scores_lr'], {}), '(y_test, y_scores_lr)\n', (13450, 13471), False, 'from sklearn.metrics import precision_recall_curve\n'), ((13597, 13609), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13607, 13609), True, 'import matplotlib.pyplot as plt\n'), ((13610, 13631), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.01]'], {}), '([0.0, 1.01])\n', (13618, 13631), True, 'import matplotlib.pyplot as plt\n'), ((13632, 13653), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.01]'], {}), '([0.0, 1.01])\n', (13640, 13653), True, 'import matplotlib.pyplot as plt\n'), ((13654, 13713), 'matplotlib.pyplot.plot', 'plt.plot', (['precision', 'recall'], {'label': '"""Precision-Recall Curve"""'}), "(precision, recall, label='Precision-Recall Curve')\n", (13662, 13713), True, 'import matplotlib.pyplot as plt\n'), ((13714, 13811), 'matplotlib.pyplot.plot', 'plt.plot', (['closest_zero_p', 'closest_zero_r', '"""o"""'], {'markersize': '(12)', 'fillstyle': '"""none"""', 'c': '"""r"""', 'mew': '(3)'}), "(closest_zero_p, closest_zero_r, 'o', markersize=12, fillstyle=\n 'none', c='r', mew=3)\n", (13722, 13811), True, 'import matplotlib.pyplot as plt\n'), ((13811, 13847), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Precision"""'], {'fontsize': '(16)'}), "('Precision', fontsize=16)\n", (13821, 13847), True, 'import matplotlib.pyplot as plt\n'), ((13848, 13881), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Recall"""'], {'fontsize': '(16)'}), "('Recall', fontsize=16)\n", (13858, 13881), True, 'import matplotlib.pyplot as plt\n'), ((13913, 13923), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13921, 13923), True, 'import matplotlib.pyplot as plt\n'), ((13945, 13975), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_scores_lr'], {}), '(y_test, y_scores_lr)\n', (13954, 13975), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, auc, roc_curve\n'), ((13989, 14008), 'sklearn.metrics.auc', 'auc', (['fpr_lr', 'tpr_lr'], {}), '(fpr_lr, tpr_lr)\n', (13992, 14008), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, auc, roc_curve\n'), ((14018, 14030), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14028, 14030), True, 'import matplotlib.pyplot as plt\n'), ((14031, 14053), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.01, 1.0]'], {}), '([-0.01, 1.0])\n', (14039, 14053), True, 'import matplotlib.pyplot as plt\n'), ((14055, 14078), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.01, 1.01]'], {}), '([-0.01, 1.01])\n', (14063, 14078), True, 'import matplotlib.pyplot as plt\n'), ((14178, 14224), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {'fontsize': '(16)'}), "('False Positive Rate', fontsize=16)\n", (14188, 14224), True, 'import matplotlib.pyplot as plt\n'), ((14225, 14270), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {'fontsize': '(16)'}), "('True Positive Rate', fontsize=16)\n", (14235, 14270), True, 'import matplotlib.pyplot as plt\n'), ((14271, 14331), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC curve (delinguency classifier)"""'], {'fontsize': '(16)'}), "('ROC curve (delinguency classifier)', fontsize=16)\n", (14280, 14331), True, 'import matplotlib.pyplot as plt\n'), ((14332, 14374), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'fontsize': '(13)'}), "(loc='lower right', fontsize=13)\n", (14342, 14374), True, 'import matplotlib.pyplot as plt\n'), ((14375, 14435), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': '(3)', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=3, linestyle='--')\n", (14383, 14435), True, 'import matplotlib.pyplot as plt\n'), ((14467, 14477), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14475, 14477), True, 'import matplotlib.pyplot as plt\n'), ((14681, 14716), 'pandas.DataFrame', 'pd.DataFrame', (["{'probability': prob}"], {}), "({'probability': prob})\n", (14693, 14716), True, 'import pandas as pd\n'), ((14869, 14901), 'pickle.dump', 'pickle.dump', (['lr', 'save_classifier'], {}), '(lr, save_classifier)\n', (14880, 14901), False, 'import pickle\n'), ((15587, 15613), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_score'], {}), '(y_test, y_score)\n', (15596, 15613), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, auc, roc_curve\n'), ((15624, 15637), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (15627, 15637), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, auc, roc_curve\n'), ((15646, 15658), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15656, 15658), True, 'import matplotlib.pyplot as plt\n'), ((15666, 15760), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'color': '"""darkorange"""', 'lw': 'lw', 'label': "('ROC curve (area = %0.2f)' % roc_auc)"}), "(fpr, tpr, color='darkorange', lw=lw, label=\n 'ROC curve (area = %0.2f)' % roc_auc)\n", (15674, 15760), True, 'import matplotlib.pyplot as plt\n'), ((15765, 15826), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n", (15773, 15826), True, 'import matplotlib.pyplot as plt\n'), ((15827, 15847), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (15835, 15847), True, 'import matplotlib.pyplot as plt\n'), ((15848, 15869), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (15856, 15869), True, 'import matplotlib.pyplot as plt\n'), ((15870, 15903), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (15880, 15903), True, 'import matplotlib.pyplot as plt\n'), ((15904, 15936), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (15914, 15936), True, 'import matplotlib.pyplot as plt\n'), ((15937, 15995), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver operating characteristic (ROC) curve"""'], {}), "('Receiver operating characteristic (ROC) curve')\n", (15946, 15995), True, 'import matplotlib.pyplot as plt\n'), ((15996, 16025), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (16006, 16025), True, 'import matplotlib.pyplot as plt\n'), ((16026, 16036), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16034, 16036), True, 'import matplotlib.pyplot as plt\n'), ((9599, 9648), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'class_weight': '{(0): 1, (1): w}'}), '(class_weight={(0): 1, (1): w})\n', (9617, 9648), False, 'from sklearn.linear_model import LogisticRegression\n'), ((9780, 9821), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_test', 'pred_prob'], {}), '(y_test, pred_prob)\n', (9802, 9821), False, 'from sklearn.metrics import precision_recall_curve\n'), ((9837, 9865), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'pred_prob'], {}), '(y_test, pred_prob)\n', (9846, 9865), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, auc, roc_curve\n'), ((11665, 11708), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['lr', 'X_train', 'y_train'], {'cv': '(5)'}), '(lr, X_train, y_train, cv=5)\n', (11680, 11708), False, 'from sklearn.model_selection import cross_val_score\n'), ((11909, 11971), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['lr', 'X_train', 'y_train'], {'cv': '(5)', 'scoring': '"""roc_auc"""'}), "(lr, X_train, y_train, cv=5, scoring='roc_auc')\n", (11924, 11971), False, 'from sklearn.model_selection import cross_val_score\n'), ((12196, 12257), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['lr', 'X_train', 'y_train'], {'cv': '(5)', 'scoring': '"""recall"""'}), "(lr, X_train, y_train, cv=5, scoring='recall')\n", (12211, 12257), False, 'from sklearn.model_selection import cross_val_score\n'), ((12456, 12520), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['lr', 'X_train', 'y_train'], {'cv': '(5)', 'scoring': '"""precision"""'}), "(lr, X_train, y_train, cv=5, scoring='precision')\n", (12471, 12520), False, 'from sklearn.model_selection import cross_val_score\n'), ((13497, 13515), 'numpy.abs', 'np.abs', (['thresholds'], {}), '(thresholds)\n', (13503, 13515), True, 'import numpy as np\n'), ((14616, 14633), 'numpy.mean', 'np.mean', (['(err ** 2)'], {}), '(err ** 2)\n', (14623, 14633), True, 'import numpy as np\n'), ((3147, 3188), 'numpy.isnan', 'np.isnan', (["raw_df['Loan Type Description']"], {}), "(raw_df['Loan Type Description'])\n", (3155, 3188), True, 'import numpy as np\n'), ((3237, 3264), 'numpy.isnan', 'np.isnan', (["raw_df['Balance']"], {}), "(raw_df['Balance'])\n", (3245, 3264), True, 'import numpy as np\n'), ((3315, 3344), 'numpy.isnan', 'np.isnan', (["raw_df['Loan Term']"], {}), "(raw_df['Loan Term'])\n", (3323, 3344), True, 'import numpy as np\n'), ((3389, 3412), 'numpy.isnan', 'np.isnan', (["raw_df['LTV']"], {}), "(raw_df['LTV'])\n", (3397, 3412), True, 'import numpy as np\n'), ((3458, 3483), 'numpy.isnan', 'np.isnan', (["raw_df['label']"], {}), "(raw_df['label'])\n", (3466, 3483), True, 'import numpy as np\n'), ((3554, 3587), 'numpy.isnan', 'np.isnan', (["raw_df['Interest Rate']"], {}), "(raw_df['Interest Rate'])\n", (3562, 3587), True, 'import numpy as np\n'), ((3646, 3683), 'numpy.isnan', 'np.isnan', (["raw_df['Origination Month']"], {}), "(raw_df['Origination Month'])\n", (3654, 3683), True, 'import numpy as np\n'), ((3749, 3793), 'numpy.isnan', 'np.isnan', (["raw_df['Most Recent Credit Score']"], {}), "(raw_df['Most Recent Credit Score'])\n", (3757, 3793), True, 'import numpy as np\n'), ((3933, 3965), 'numpy.isnan', 'np.isnan', (["raw_df['AmountFunded']"], {}), "(raw_df['AmountFunded'])\n", (3941, 3965), True, 'import numpy as np\n'), ((4149, 4192), 'numpy.isnan', 'np.isnan', (["raw_df['MonthlyIncomeBaseSalary']"], {}), "(raw_df['MonthlyIncomeBaseSalary'])\n", (4157, 4192), True, 'import numpy as np\n'), ((4356, 4394), 'numpy.isnan', 'np.isnan', (["raw_df['TotalMonthlyIncome']"], {}), "(raw_df['TotalMonthlyIncome'])\n", (4364, 4394), True, 'import numpy as np\n'), ((4558, 4596), 'numpy.isnan', 'np.isnan', (["raw_df['MonthlyIncomeOther']"], {}), "(raw_df['MonthlyIncomeOther'])\n", (4566, 4596), True, 'import numpy as np\n'), ((4666, 4714), 'numpy.isnan', 'np.isnan', (["raw_df['Collateral Current Valuation']"], {}), "(raw_df['Collateral Current Valuation'])\n", (4674, 4714), True, 'import numpy as np\n'), ((4798, 4825), 'numpy.isnan', 'np.isnan', (["raw_df['Balance']"], {}), "(raw_df['Balance'])\n", (4806, 4825), True, 'import numpy as np\n'), ((4907, 4940), 'numpy.isnan', 'np.isnan', (["raw_df['Grade Overall']"], {}), "(raw_df['Grade Overall'])\n", (4915, 4940), True, 'import numpy as np\n'), ((5036, 5076), 'numpy.isnan', 'np.isnan', (["raw_df['Current Credit Limit']"], {}), "(raw_df['Current Credit Limit'])\n", (5044, 5076), True, 'import numpy as np\n'), ((5160, 5194), 'numpy.isnan', 'np.isnan', (["raw_df['Loan Type Code']"], {}), "(raw_df['Loan Type Code'])\n", (5168, 5194), True, 'import numpy as np\n'), ((5262, 5288), 'numpy.isnan', 'np.isnan', (["raw_df['Status']"], {}), "(raw_df['Status'])\n", (5270, 5288), True, 'import numpy as np\n'), ((5381, 5410), 'numpy.isnan', 'np.isnan', (["raw_df['Insurance']"], {}), "(raw_df['Insurance'])\n", (5389, 5410), True, 'import numpy as np\n'), ((5563, 5612), 'numpy.isnan', 'np.isnan', (["raw_df['NumberOfOpenRevolvingAccounts']"], {}), "(raw_df['NumberOfOpenRevolvingAccounts'])\n", (5571, 5612), True, 'import numpy as np\n'), ((5687, 5710), 'numpy.isnan', 'np.isnan', (["raw_df['APR']"], {}), "(raw_df['APR'])\n", (5695, 5710), True, 'import numpy as np\n'), ((5980, 6018), 'numpy.isnan', 'np.isnan', (["raw_df['AmountOwedToLender']"], {}), "(raw_df['AmountOwedToLender'])\n", (5988, 6018), True, 'import numpy as np\n'), ((6294, 6334), 'numpy.isnan', 'np.isnan', (["raw_df['LoanPaymentFrequency']"], {}), "(raw_df['LoanPaymentFrequency'])\n", (6302, 6334), True, 'import numpy as np\n'), ((6413, 6437), 'numpy.isnan', 'np.isnan', (["raw_df['Rate']"], {}), "(raw_df['Rate'])\n", (6421, 6437), True, 'import numpy as np\n'), ((7218, 7231), 'numpy.isnan', 'np.isnan', (['df1'], {}), '(df1)\n', (7226, 7231), True, 'import numpy as np\n'), ((7247, 7263), 'numpy.isfinite', 'np.isfinite', (['df1'], {}), '(df1)\n', (7258, 7263), True, 'import numpy as np\n'), ((13882, 13892), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (13890, 13892), True, 'import matplotlib.pyplot as plt\n'), ((14436, 14446), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (14444, 14446), True, 'import matplotlib.pyplot as plt\n'), ((16388, 16407), 'statsmodels.api.families.Gamma', 'sm.families.Gamma', ([], {}), '()\n', (16405, 16407), True, 'import statsmodels.api as sm\n')] |
import pandas as pd
import numpy as np
from tqdm import tqdm
from dateutil.relativedelta import relativedelta
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.multioutput import RegressorChain
from sklearn.metrics import fbeta_score, mean_squared_error, r2_score
from sklearn.preprocessing import StandardScaler
from vespid.data.neo4j_tools import Nodes, Relationships
from vespid.models.static_communities import get_cluster_data
from vespid.models.static_communities import jaccard_coefficient
from vespid.models.static_communities import cosine_similarities
from vespid import setup_logger
logger = setup_logger(__name__)
class DynamicCommunities():
'''
Class designed to track, over an entire dynamic graph,
the birth, death, merging, splitting, or simple
continuation of dynamic communities.
'''
DEATH = 'death'
BIRTH = 'birth'
SPLIT = 'split'
MERGE = 'merge'
CONTINUATION = 'continuation'
CONTRACTION = 'contraction'
EXPANSION = 'expansion'
def __init__(
self,
graph,
start_year,
end_year,
window_size=3,
similarity_threshold=0.95,
similarity_scoring='embeddings',
size_change_threshold=0.1,
expire_cycles=3
):
'''
Parameters
----------
graph: Neo4jConnectionHandler object. The graph of interest.
start_year: int. Indicates the beginning year from which data will
be pulled for community building.
end_year: int. Same as start year, but defines the end of the period
of interest. Inclusive.
window_size: int. Number of years to include in a single analytical frame.
Note that this is currently not being used.
similarity_threshold: float in range [0.01, 0.99].
Dictates the minimum similarity score required
between C_t and C_(t+1) to indicate connected
clusters. Note that recommended value for membership-type
scoring is 0.1. For embeddings-type scoring,
recommended value is 0.95.
similarity_scoring: str. Can be one of ['embeddings', 'membership'].
Indicates what you want to compare in order to detect cluster
evolution events.
'embeddings': Use BERT/SPECTER/GraphSAGE/whatever vector
embeddings of the nodes to assess their similarity scores.
The actual similarity mechanism to be used is cosine similarity.
This is most directly useful when you need the embeddings to
make the cluster comparisons over time stateful, e.g.
if your static clustering approach is based on a graph
at a fixed time window `t` such that nodes
that existed before that window began aren't included
in the solution.
'membership': Use actual membership (e.g. unique node IDs) vectors
of each cluster to assess changes. Uses Jaccard similarity as
the metric. This really only works when the static clustering
solutions come from a cumulative graph in which the graph at
time `t` is the result of all graph information prior to `t`.
size_change_threshold: float in range [0.01, 0.99].
Dictates the minimum change in size of a cluster
from t to t+1 to indicate that it has expanded
or contracted.
expire_cycles: int. Number of timesteps a cluster
should be missing from the timeline before declaring
it dead.
'''
self.graph = graph
self.start_year = start_year
self.end_year = end_year
self.window_size = window_size
if similarity_scoring not in ['embeddings', 'membership']:
raise ValueError(f"``embeddings`` received an invalid value of '{self.embeddings}'")
self.similarity_threshold = similarity_threshold
self.similarity_scoring = similarity_scoring
self.size_change_threshold = size_change_threshold
self.expire_cycles = expire_cycles
self._c_t1_column = 'C_t'
self._c_t2_column = 'C_(t+1)'
self._event_column = 'event_type'
def __repr__(self):
output = {k:v for k,v in self.__dict__.items() if k not in ['graph', 'jaccard_scores'] and k[0] != '_'}
return str(output)
def clusters_over_time(self):
'''
Gets the cluster labels associated with each year of the
graph and maps them to the list of papers in that cluster
for that year, generating useful cluster-level metadata
along the way.
Parameters
----------
None.
Returns
-------
pandas DataFrame describing each cluster found in each time window
(e.g. cluster0_2016).
'''
dfs = []
#TODO: change queries below if we stop putting the year in the cluster ID attribute name
for year in tqdm(range(self.start_year, self.end_year + 1),
desc='Pulling down year-by-year data from Neo4j'):
if self.similarity_scoring == 'membership':
query = f"""
MATCH (p:Publication)
WHERE p.clusterID IS NOT NULL
AND p.publicationDate.year = {year}
RETURN toInteger(p.clusterID) AS ClusterLabel,
{year} AS Year,
COUNT(p) AS ClusterSize,
COLLECT(ID(p)) AS Papers
ORDER BY ClusterLabel ASC
"""
dfs.append(self.graph.cypher_query_to_dataframe(query,
verbose=False))
elif self.similarity_scoring == 'embeddings':
dfs.append(get_cluster_data(year, self.graph))
else:
raise ValueError(f"``embeddings`` received an invalid value of '{self.embeddings}'")
output = pd.concat(dfs, ignore_index=True)
return output
def track_cluster_similarity(self, clusters_over_time=None):
'''
Computes the Jaccard coefficient for consecutive year
pairs (e.g. 2017-2018) pairwise between each year's clusters
(e.g. cluster0_2017 compared to cluster1_2018) to determine
how similar each cluster in year t is those in year t+1.
Parameters
----------
clusters_over_time: pandas DataFrame that is equivalent
to the output of DynamicCommunities.clusters_over_time().
If not None, this will be used as the pre-computed result
of running that method. If None, clusters_over_time() will
be run.
Useful for saving time if you already have the pre-computed
result in memory.
Returns
-------
pandas DataFrame that is the result of self.clusters_over_time().
Also, a dict of the form {integer_t+1_year: pandas DataFrame}, wherein
the DataFrame has rows representing each cluster in the year t
and columns representing each cluster in t+1, with the values
reflective the Jaccard coefficient of similarity between each
cluster pair is written to self.similarity_scores. Note that keys
are the t+1 year value, so output[2018] covers the 2017-2018
comparison.
'''
if clusters_over_time is None:
df_clusters_over_time = self.clusters_over_time()
else:
df_clusters_over_time = clusters_over_time
results = {}
#TODO: make this robust to start year > end year
#TODO: combine with to_dataframe functionality so we can loop only once
for year in tqdm(range(self.start_year, self.end_year),
desc='Calculating cluster similarity scores for each t/t+1 pair'):
# Setup DataFrames for t and t+1 that have all cluster labels in them
df_t1 = df_clusters_over_time[
df_clusters_over_time['Year'] == year
].set_index('ClusterLabel')
df_t2 = df_clusters_over_time[
df_clusters_over_time['Year'] == year + 1
].set_index('ClusterLabel')
if self.similarity_scoring == 'membership':
# This will produce np.nan for any cluster label not present in a given year
df_years = pd.DataFrame({
year: df_t1['Papers'],
year + 1: df_t2['Papers']
})
# shape is (max_cluster_num, max_cluster_num)
# Form of [[cluster0_year0 to cluster0_year1], [cluster1_year0 to cluster0_year1], [cluster2_year0 to cluster0_year1],
# [cluster0_year0 to cluster1_year1], [cluster1_year0 to cluster1_year1], [cluster2_year0 to cluster1_year1],
# [cluster0_year0 to cluster2_year1], [], []]
scores = np.full((df_years.shape[0], df_years.shape[0]), np.nan)
#TODO: make this more efficient by avoiding loops!
# Go through each C_t vs. C_(t+1) pair and calculate Jaccard coefficient
for i, papers_past in enumerate(df_years[year]):
# Check for nulls
if isinstance(papers_past, list):
for j, papers_current in enumerate(df_years[year + 1]):
if isinstance(papers_current, list):
scores[i][j] = jaccard_coefficient(papers_past, papers_current)
results[year + 1] = pd.DataFrame(
scores,
index=[f"cluster{i}_{year}" for i in range(scores.shape[1])],
columns=[f"cluster{i}_{year + 1}" for i in range(scores.shape[0])]
).dropna(how='all', axis=0).dropna(how='all', axis=1) # Drop past, then future, clusters that don't exist (all null)
elif self.similarity_scoring == 'embeddings':
#TODO: consider plugging this straight into scoring function if memory is tight
t1 = df_clusters_over_time.loc[
df_clusters_over_time['Year'] == year,
'ClusterEmbedding'
]
t2 = df_clusters_over_time.loc[
df_clusters_over_time['Year'] == year + 1,
'ClusterEmbedding'
]
scores = cosine_similarities(t1, t2)
results[year + 1] = pd.DataFrame(
scores,
index=[f"cluster{i}_{year}" for i in range(scores.shape[0])],
columns=[f"cluster{i}_{year + 1}" for i in range(scores.shape[1])]
)
self.similarity_scores = results
return df_clusters_over_time
def _format_events(self, events):
'''
Reformats events DataFrame to be consistent output
Parameters
----------
events : pandas DataFrame
Original output of any given flagging method
Returns
-------
pandas DataFrame
Formatted output with consistent column order, etc.
'''
return events[[
self._c_t1_column,
self._c_t2_column,
self._event_column
]]
def flag_merge_events(self, year):
'''
Given a set of C_t (cluster at time t) to C_(t+1)
similarity scores and a threshold to dictate what
clusters are similar enough to be connected to one
another through time, return the ones that appear
to be the result of a merge event.
Parameters
----------
year: int. Year `t` that should be compared to
year t+1.
Returns
-------
pandas DataFrame tracking the type of event, the focal cluster
(e.g. the result of a merge or the cause of a split) and the
parents/children of the focal cluster (for merging and splitting,
resp.), if any.
'''
above_threshold_scores = (self.similarity_scores[year] >= self.similarity_threshold)
num_matching_past_clusters = (above_threshold_scores).sum(axis=0)
resulting_merged_clusters = num_matching_past_clusters[num_matching_past_clusters >= 2].index.tolist()
if np.any(num_matching_past_clusters > 1):
# For each column
merge_results = above_threshold_scores[resulting_merged_clusters]\
.apply(lambda column: [column[column].index.tolist()])\
.iloc[0].to_dict()
output = pd.DataFrame([merge_results])\
.transpose().reset_index(drop=False).rename(columns={
'index': self._c_t2_column,
0: self._c_t1_column
})
output[self._event_column] = self.MERGE
return self._format_events(output)
else:
return pd.DataFrame()
def flag_split_events(self, year):
'''
Given a set of C_t (cluster at time t) to C_(t+1)
similarity scores and a threshold to dictate what
clusters are similar enough to be connected to one
another through time, return the ones that appear
to be the result of a split event.
Parameters
----------
year: int. Year `t` that should be compared to
year t+1.
Returns
-------
pandas DataFrame tracking the type of event, the focal cluster
(e.g. the result of a merge or the cause of a split) and the
parents/children of the focal cluster (for merging and splitting,
resp.), if any.
'''
above_threshold_scores = (self.similarity_scores[year] >= self.similarity_threshold)
num_matching_current_clusters = (above_threshold_scores).sum(axis=1)
resulting_split_clusters = num_matching_current_clusters[num_matching_current_clusters >= 2].index.tolist()
if np.any(num_matching_current_clusters > 1):
# For each row AKA C_t cluster that qualified as being above threshold in 2+ cases,
# pull out the column names for the C_(t+1) clusters that are its children
merge_results = above_threshold_scores.loc[resulting_split_clusters]\
.apply(lambda row: row[row].index.tolist(),
axis=1).to_dict()
output = pd.DataFrame([merge_results])\
.transpose().reset_index(drop=False).rename(columns={
'index': self._c_t1_column,
0: self._c_t2_column
})
output[self._event_column] = self.SPLIT
return self._format_events(output)
else:
return pd.DataFrame()
def flag_birth_events(self, year):
'''
Given a set of C_t (cluster at time t) to C_(t+1)
similarity scores and a threshold to dictate what
clusters are similar enough to be connected to one
another through time, return the ones that appear
to have been created for the first time in t+1.
Parameters
----------
year: int. Year `t` that should be compared to
year t+1.
Returns
-------
pandas DataFrame tracking the type of event, the focal cluster
(e.g. the result of a merge or the cause of a split) and the
parents/children of the focal cluster (for merging and splitting,
resp.), if any.
'''
# The question: do any t+1 clusters have no t cluster they are similar to?
# Put in terms of the jaccard_scores DataFrame structure: any column for C_(t+1)
# that is all < similarity_threshold?
above_threshold_scores = (self.similarity_scores[year] >= self.similarity_threshold)
num_matching_current_clusters = (above_threshold_scores).sum(axis=0)
resulting_birth_clusters = num_matching_current_clusters[num_matching_current_clusters < 1].index.tolist()
if np.any(num_matching_current_clusters < 1):
output = pd.DataFrame({
self._c_t1_column: np.nan,
self._c_t2_column: resulting_birth_clusters,
self._event_column: self.BIRTH
})
return self._format_events(output)
else:
return pd.DataFrame()
def flag_death_events(self, year):
'''
Given a set of C_t (cluster at time t) to C_(t+1)
similarity scores and a threshold to dictate what
clusters are similar enough to be connected to one
another through time, return the ones that appear
to have not continued in any form into t+1.
Parameters
----------
year: int. Year `t` that should be compared to
year t+1.
Returns
-------
pandas DataFrame tracking the type of event, the focal cluster
(e.g. the result of a merge or the cause of a split) and the
parents/children of the focal cluster (for merging and splitting,
resp.), if any.
'''
# The question: do any t+1 clusters have no t cluster they are similar to?
# Put in terms of the jaccard_scores DataFrame structure: any column for C_(t+1)
# that is all < similarity_threshold?
above_threshold_scores = (self.similarity_scores[year] >= self.similarity_threshold)
num_matching_current_clusters = (above_threshold_scores).sum(axis=1)
resulting_dead_clusters = num_matching_current_clusters[num_matching_current_clusters < 1].index.tolist()
if np.any(num_matching_current_clusters < 1):
output = pd.DataFrame({
self._c_t1_column: resulting_dead_clusters,
self._c_t2_column: np.nan,
self._event_column: self.DEATH
})
return self._format_events(output)
else:
return pd.DataFrame()
def flag_continuity_events(self, year, cluster_metadata, other_events):
'''
Given a set of C_t (cluster at time t) to C_(t+1)
similarity scores and a threshold to dictate what
clusters are similar enough to be connected to one
another through time, return the ones that appear
to have continued on as a single cluster into t+1,
but that have increased above the relative change
threshold.
Parameters
----------
year: int. Year `t` that should be compared to
year t+1.
cluster_metadata: pandas DataFrame with columns
['ClusterLabel', 'Year', 'ClusterSize'].
other_events: pandas DataFrame of split/merge/etc.
events that can be used to determine what clusters
are left and thus likely continuity events.
Returns
-------
pandas DataFrame tracking the type of event, the focal cluster
(e.g. the result of a merge or the cause of a split) and the
parents/children of the focal cluster (for merging and splitting,
resp.), if any.
'''
above_threshold_scores = (self.similarity_scores[year] >= self.similarity_threshold)
# Find clusters that qualify as very similar to one another
# Need to check that there's only one-to-one mapping from t to t+1
num_matching_t1_clusters = (above_threshold_scores).sum(axis=1)
num_matching_t2_clusters = (above_threshold_scores).sum(axis=0)
if np.any(num_matching_t1_clusters == 1) \
and np.any(num_matching_t2_clusters == 1) \
and not other_events.empty:
# There were other flagged events, so we need to skip them
# Expand cluster columns so we have 1:1 C_t to C_(t+1) mappings
events_expanded = other_events\
.explode(self._c_t1_column)\
.explode(self._c_t2_column)
# Drop any C_t that are part of another event already
num_matching_t1_clusters.drop(
labels=events_expanded[self._c_t1_column],
errors='ignore',
inplace=True
)
# No more events to investigate?
if num_matching_t1_clusters.empty:
return pd.DataFrame()
# Identify clusters at time `t` that only match one cluster in time `t+1`
continued_clusters = num_matching_t1_clusters[num_matching_t1_clusters == 1]\
.index.tolist()
# Make a dict mapping {C_t: C_(t+1)}
continuity_mapping = above_threshold_scores.loc[continued_clusters]\
.apply(lambda row: row[row].index.tolist()[0],
axis=1).to_dict()
# Put it all into an events-record format
events = pd.DataFrame([continuity_mapping])\
.transpose().reset_index(drop=False).rename(columns={
'index': self._c_t1_column,
0: self._c_t2_column
})
# Make sure everything gets flagged as continuing
# if it made it this far, only change flag if needed
events[self._event_column] = self.CONTINUATION
# Get an events-records-friendly cluster label
cluster_metadata['C_t_label'] = 'cluster' + cluster_metadata['ClusterLabel'].astype(str) \
+ "_" + cluster_metadata['Year'].astype(str)
# Match cluster sizes to the cluster labels
cluster_label_columns = [self._c_t1_column, self._c_t2_column]
for column in cluster_label_columns:
events = events.merge(
cluster_metadata[['C_t_label', 'ClusterSize']],
how='left',
left_on=column,
right_on='C_t_label',
sort=False
).rename(columns={'ClusterSize': column + '_size'})\
.drop(columns=['C_t_label'])
# bool Series indicating if expansion has occurred at or above our threshold
expanded = events[f"{cluster_label_columns[0]}_size"] * (1 + self.size_change_threshold) \
<= events[f"{cluster_label_columns[1]}_size"]
events.loc[expanded, self._event_column] = self.EXPANSION
contracted = events[f"{cluster_label_columns[0]}_size"] * (1 - self.size_change_threshold) \
>= events[f"{cluster_label_columns[1]}_size"]
events.loc[contracted, self._event_column] = self.CONTRACTION
return self._format_events(events)
else:
return pd.DataFrame()
def to_dataframe(self, clusters_over_time=None):
'''
Produces a tabular record of the various dynamic community events.
Does so for all t/t+1 pairs of time windows.
Parameters
----------
clusters_over_time: pandas DataFrame that is equivalent
to the output of DynamicCommunities.clusters_over_time().
If not None, this will be used as the pre-computed result
of running that method. If None, clusters_over_time() will
be run.
Useful for saving time if you already have the pre-computed
result in memory.
Returns
-------
pandas DataFrame with relevant columns for each event.
'''
# Get the data
if clusters_over_time is None:
df_clusters_over_time = self.track_cluster_similarity()
else:
df_clusters_over_time = self.track_cluster_similarity(clusters_over_time)
all_events = []
step_size = -1 if self.start_year > self.end_year else 1
for year in tqdm(range(self.start_year + 1, self.end_year + 1, step_size),
desc="Identifying events over each consecutive pair of years"):
merges = self.flag_merge_events(year)
splits = self.flag_split_events(year)
births = self.flag_birth_events(year)
deaths = self.flag_death_events(year)
events = pd.concat([
births,
splits,
merges,
deaths
],
ignore_index=True)
# Continuity events are the only ones left after these, but
# need knowledge of other flagged events to work properly
continuity = self.flag_continuity_events(year, df_clusters_over_time, events)
events = events.append(continuity, ignore_index=True)
if events.empty:
raise RuntimeError("No community events detected...")
elif self._missing_cluster_events(year, events):
raise RuntimeError("Some clusters were not accounted for")
all_events.append(events)
return pd.concat(all_events, ignore_index=True)
def _missing_cluster_events(self, year, events):
'''
Detects if any clusters have not been accounted for
in the events logging.
Parameters
----------
year : int
Year of interest.
events : pandas DataFrame
Results of flagging methods combined together
Returns
-------
bool
True if there were any missing clusters detected,
False otherwise.
'''
events_expanded = events.explode('C_t').explode('C_(t+1)')
C_t1 = self.similarity_scores[year].index
C_t2 = self.similarity_scores[year].columns
missing_C_t1 = C_t1[~C_t1.isin(events_expanded['C_t'])]
missing_C_t2 = C_t2[~C_t2.isin(events_expanded['C_(t+1)'])]
return not missing_C_t1.empty or not missing_C_t2.empty
def export_to_neo4j(self, clusters_over_time=None):
'''
Takes Knowledge nodes generated and pushes them to a target graph,
along with edges between the member nodes and Knowledge nodes.
Parameters
----------
clusters_over_time : pandas DataFrame, optional
This is equivalent to the output of
DynamicCommunities.clusters_over_time().
If not None, this will be used as the pre-computed result
of running that method. If None, clusters_over_time() will
be run.
Useful for saving time if you already have the pre-computed
result in memory, by default None
'''
if clusters_over_time is None:
clusters_over_time = self.clusters_over_time()
else:
clusters_over_time = clusters_over_time.copy()
# Set start and end dates to be 1/1/YEAR and
# 12/31/YEAR, resp., to allow for future
# non-year-long time windows to be used
clusters_over_time['start_date'] = pd.to_datetime(clusters_over_time['Year'], format="%Y")
clusters_over_time['end_date'] = pd.to_datetime(clusters_over_time['start_date'].dt.date + relativedelta(years=1, days=-1))
# Query Neo4j for Knowledge nodes, if any, and pull down
# the max ID so we can increment off of that for our IDs
query = """
MATCH (n:LanguageCluster)
RETURN MAX(n.id)
"""
max_node_id = self.graph.cypher_query_to_dataframe(query, verbose=False).iloc[0,0]
# If no Knowledge nodes in graph
if max_node_id is not None:
starting_node_id = max_node_id + 1
else:
starting_node_id = 0
clusters_over_time['id'] = range(starting_node_id, len(clusters_over_time))
# Get the events records
events_all_years = self.to_dataframe(clusters_over_time=clusters_over_time)
# Birth or death: make sure proper node gets this label!
# Drop nulls to make it so we retain index while still getting only relevant rows
birth_events = events_all_years[events_all_years['event_type'] == 'birth']
birth_index = clusters_over_time.merge(
birth_events[self._c_t2_column],
how='left',
left_on='C_t_label',
right_on=self._c_t2_column
).dropna(subset=[self._c_t2_column]).index
clusters_over_time['born'] = False
clusters_over_time.loc[birth_index, 'born'] = True
# Drop nulls to make it so we retain index while still getting only relevant rows
death_events = events_all_years[events_all_years['event_type'] == 'death']
death_index = clusters_over_time.merge(
death_events[self._c_t1_column],
how='left',
left_on='C_t_label',
right_on=self._c_t1_column
).dropna(subset=[self._c_t1_column]).index
clusters_over_time['died'] = False
clusters_over_time.loc[death_index, 'died'] = True
#TODO: figure out how to re-do this such that we are setting the birth/death properties only, since LanguageCluster nodes should already exist at this point in analysis
properties = pd.DataFrame([
['ClusterLabel', 'label', np.nan], # this will be specific to its year
['start_date', 'startDate', 'datetime'],
['end_date', 'endDate', 'datetime'],
['ClusterKeyphrases', 'keyphrases', 'string[]'],
['ClusterEmbedding', 'embedding', 'float[]'],
['born', 'born', 'boolean'],
['died', 'died', 'boolean']
], columns=['old', 'new', 'type'])
# 'type' is in case we do CSV saving, but not necessary right now
properties['type'] = np.nan
knowledge_nodes = Nodes(
parent_label='LanguageCluster',
data=clusters_over_time,
id_column='id',
reference='knowledge',
properties=properties,
additional_labels=None
)
# Get the edges
# Drop birth and death, no edges there
events = events_all_years[~events_all_years['event_type'].isin(['birth', 'death'])].copy()
# Map event types to planned Neo4j relationship types
# Also set all expansion and contraction events to just continuation
events['event_type'] = events['event_type'].replace({
'continuation': 'CONTINUES_AS',
'expansion': 'CONTINUES_AS',
'contraction': 'CONTINUES_AS',
'split': 'SPLITS_INTO',
'merge': 'MERGES_INTO'
}).values
events = events.explode('C_t').explode('C_(t+1)')
# Map Knowledge IDs to events
# First t1
events = events.merge(
clusters_over_time[['id', 'C_t_label']],
left_on=self._c_t1_column,
right_on='C_t_label',
how='left'
).drop(columns=['C_t_label']).rename(columns={'id': 'id_t1'})
# Now t2
events = events.merge(
clusters_over_time[['id', 'C_t_label']],
left_on=self._c_t2_column,
right_on='C_t_label',
how='left'
).drop(columns=['C_t_label']).rename(columns={'id': 'id_t2'})
events['similarity_threshold'] = self.similarity_threshold
events['similarity_scoring'] = self.similarity_scoring
all_edges = []
properties = pd.DataFrame([
['similarity_threshold', 'similarityThreshold', 'float'],
['similarity_scoring', 'similarityMethod', np.nan]
], columns=['old', 'new', 'type'])
for type in events['event_type'].unique():
all_edges.append(Relationships(
type=type,
data=events[events['event_type'] == type],
start_node=knowledge_nodes,
id_column_start='id_t1',
end_node=knowledge_nodes,
id_column_end='id_t2', # Need this so they don't both try to use 'id'
properties=properties
))
# Connect papers to their Knowledge node
logger.debug("Creating Knowledge node constraint if it doesn't exist...")
query = "CREATE CONSTRAINT clusters IF NOT EXISTS ON (n:LanguageCluster) ASSERT n.id IS UNIQUE"
_ = self.graph.cypher_query_to_dataframe(query, verbose=False)
#TODO: consider removing all of this, as this should be done upstream
query = """
MATCH (p:Publication)
WHERE NOT (p)-[:IS_CLUSTER_MEMBER_OF]-(:LanguageCluster)
AND p.clusterID IS NOT NULL
AND toInteger(p.clusterID) > -1
RETURN p.id AS id_paper, 'cluster' + p.clusterID + '_' + p.publicationDate.year AS C_t_label
"""
papers_to_knowledge = self.graph.cypher_query_to_dataframe(query, verbose=False)
# Merge C_t_label on to nodes we have to get IDs
papers_to_knowledge = papers_to_knowledge.merge(
clusters_over_time[['C_t_label', 'id']],
how='left',
on='C_t_label'
).rename(columns={'id': 'id_cluster'})
papers = Nodes(
parent_label='Publication',
data=papers_to_knowledge,
id_column='id_paper',
reference='paper'
)
all_edges.append(Relationships(
type='IS_CLUSTER_MEMBER_OF',
data=papers_to_knowledge,
start_node=papers,
id_column_start='id_paper',
end_node=knowledge_nodes,
id_column_end='id_cluster',
properties=None #TODO: add info about clustering pipeline model version used
))
# Export Knowledge nodes
knowledge_nodes.export_to_neo4j(self.graph, batch_size=1_000)
# Export all edges
for edges in all_edges:
edges.export_to_neo4j(self.graph, batch_size=1_000)
return clusters_over_time, knowledge_nodes, all_edges
def process_data_for_event_modeling(
graph,
features_query,
targets_query=None,
time_variable='Year',
sort_key_t1='KnowledgeID_t1',
sort_key_t2='KnowledgeID_t2'
):
'''
Given existing Knowledge nodes in a Neo4j graph, pull down the dynamic
events data via the edges between Knowledge nodes and vectorize the
event counts for predictive modeling of event evolutions. Also merges
the vectorized event information (AKA the modeling targets) with a custom
feature query result to generate the full dataset and returns it all
sorted by time (ascending) for easy time-aware train/test splitting.
Parameters
----------
graph : Neo4jConnectionHandler
The graph providing the feature and target data
features_query : str, optional
Cypher query to generate the input features for modeling
targets_query : str, optional
Cypher query to generate the prediction targets
time_variable : str, optional
Variable name used in `features_query` to describe the time window
used for generating the cluster events, by default 'Year'
Returns
-------
pandas DataFrame
DataFrame with identfier columns and feature + target columns.
Target columns are called the same as the edge types between
`Knowledge` nodes (e.g. 'SPLITS_INTO').
'''
if targets_query is None:
targets_query = """
MATCH (n:LanguageCluster)
OPTIONAL MATCH (n)-[e]->(n2:LanguageCluster)
RETURN
DISTINCT 'cluster' + toString(n.label) + '_' + toString(n.startDate.year) AS cluster_t1,
n.id AS KnowledgeID_t1,
n.startDate.year AS Year1,
n.born AS born_t1, n.died AS died_t1,
type(e) AS event_type,
'cluster' + toString(n2.label) + '_' + toString(n2.startDate.year) AS cluster_t2,
n2.id AS KnowledgeID_t2,
n2.startDate.year AS Year2,
n2.born AS born_t2, n2.died AS died_t2
ORDER BY cluster_t1 ASC
"""
df_events = graph.cypher_query_to_dataframe(targets_query)\
.replace({None: np.nan})
# Transform so we have a new row for each death event
# Valid events include death at time t2 OR birth + death at time t1,
# but death at time t1 alone is over-counting
# As we don't default to assuming a cluster is born in the first year of
# analysis,
# need to find min year of clusters so we can use that as a parameter for
# tracking deaths in that year
death_criteria = (
(df_events['born_t1']) & (df_events['died_t1'])
) | ( #TODO: is this over-counting? We only want t1 stuff don't we?
df_events['died_t2']
) | (
(df_events['died_t1']) & (df_events['Year1'] == df_events['Year1'].min())
)
death_events = df_events[death_criteria].copy()
death_events['event_type'] = 'DIES'
# For clusters born in t1 and dead in t2, keep their clusterN_year labels
# as they are
# BUT for those that have an event associated with them,
# need to generate new event records
# with DIES type and cluster_t2 in cluster_t1 position
death_events.loc[
death_events['died_t2'] == True,
sort_key_t1
] = death_events.loc[
death_events['died_t2'] == True,
sort_key_t2
]
death_events[sort_key_t2] = np.nan
# Make sure we drop duplicates t1 clusters, since split/merge events
# can generate a bunch of extra death events for a single t2 cluster
death_events.drop_duplicates(subset=[sort_key_t1], inplace=True)
# Merge death events with normal events
df_events = df_events.append(death_events, ignore_index=True)\
.drop(columns=['born_t1', 'died_t1', 'born_t2', 'died_t2'])
# Get rid of event_types that are null,
# as these are likely birth + death at t1 holdovers
df_events.dropna(subset=['event_type'], inplace=True)
ordered_columns = [
'SPLITS_INTO',
'MERGES_INTO',
'CONTINUES_AS',
'DIES'
]
# Vectorize the records so we have one row per cluster at t1
df_events = pd.get_dummies(
df_events[[sort_key_t1, 'event_type']],
columns=['event_type'],
prefix='',
prefix_sep=''
).groupby(sort_key_t1).sum()[ordered_columns]
df_events.columns.name = 'events_at_t2'
# Generate the features
features = graph.cypher_query_to_dataframe(features_query)
# Combine features and labels to make sure we align the data row-wise properly
# Should preserve sort order
data = features.merge(
df_events,
how='right',
left_on='KnowledgeID',
right_on=sort_key_t1
).reset_index(drop=True)
return data
def temporal_train_test_split(
data,
feature_columns,
target_columns=None,
train_fraction=0.6,
time_variable='Year'
):
'''
Given a dataset with features and targets and a timestamp-like column,
split the observations as closely to the desired training fraction as
possible without breaking up any time window to do it.
Note: the observations are *not* shuffled.
Parameters
----------
data : pandas DataFrame
The features and targets to be split
feature_columns : list of str
The columns to be considered features
target_columns : list of str, optional
The column(s) to be considered the target(s).
If None, will assume they are the 4 main predictive class types
(splitting, merging, continuation, death), by default None
train_fraction : float, optional
Fraction of `data` that should be in the training set. Note that this
value is not guaranteed, given the constraint that a time window may
not be broken up, by default 0.6
time_variable : str, optional
Name of column in `data` to use for grouping by time window,
by default 'Year'
Returns
-------
4-tuple of pandas DataFrames of the form (X_train, X_test, y_train, y_test)
The training and testing features (X) and targets (y)
'''
if target_columns is None:
target_columns = [
'SPLITS_INTO',
'MERGES_INTO',
'CONTINUES_AS',
'DIES'
]
window_grouping = data.groupby(time_variable)
cluster_fractions_by_window = \
window_grouping.count().iloc[:,0] / window_grouping.count().iloc[:,0].sum()
max_train_year = cluster_fractions_by_window[
cluster_fractions_by_window.cumsum() <= train_fraction
].index.max()
test_start_index = data[data[time_variable] > max_train_year].index.min()
X_train = data.loc[:test_start_index - 1, feature_columns]
y_train = data.loc[:test_start_index - 1, target_columns]
X_test = data.loc[test_start_index:, feature_columns]
y_test = data.loc[test_start_index:, target_columns]
# Report on how well the shapes match to our goals
goal_train_percent = train_fraction * 100
realized_train_percent = round(X_train.shape[0] / len(data) * 100, 2)
logger.info("As a result of trying to stay as close to "
f"{goal_train_percent}% of training data as possible without "
"splitting data within a time window, "
f"{realized_train_percent}% of the observations are in "
"the training set")
return X_train, X_test, y_train, y_test
def model_binary_events(
X_train,
X_test,
y_train,
y_test,
beta=1.0,
return_averaged_score=False,
model=RandomForestClassifier,
**model_kwargs
):
'''
Models cluster evolution events as binary 0/1 (AKA split(s) did/did not
occur) vectors and reports on f1-scores, feature importances, and other
useful model evaluation items.
Parameters
----------
X_train : numpy array or pandas DataFrame
Features to train the model on
X_test : numpy array or pandas DataFrame
Held-out features for model testing
y_train : numpy array or pandas DataFrame
Target(s) to train the model on
y_test : numpy array or pandas DataFrame
Target(s) for model testing
beta : float between 0.0 and inf, optional
Beta value to use for f-beta score calculations
return_averaged_score : bool, optional
If True, return a 2-tuple of form (model, f-beta-score)
model : sklearn-compatible estimator, optional
Callable estimator for model training and testing,
by default RandomForestClassifier
Returns
-------
Same type as that of the `model` input or optionally 2-tuple as dictated
by the `return_average_score` param
Trained estimator object of the type defined in `model` parameter
and optionally also a float f-beta-score
'''
y_train_binarized = (y_train > 0).astype(int).reset_index(drop=True)
y_test_binarized = (y_test > 0).astype(int).reset_index(drop=True)
scaler = StandardScaler()
X_train_scaled = pd.DataFrame(
scaler.fit_transform(X_train),
columns=X_train.columns
)
X_test_scaled = pd.DataFrame(
scaler.transform(X_test),
columns=X_test.columns
)
clf = model(**model_kwargs)
clf.fit(X_train_scaled, y_train_binarized)
predictions = pd.DataFrame(clf.predict(X_test_scaled), columns=y_test.columns)
logger.info(f"y_train events summary: \n{y_train_binarized.sum()}\n")
logger.info(f"y_test events summary: \n{y_test_binarized.sum()}]\n")
logger.info(f"predictions events summary: \n{predictions.sum()}\n")
f1_score = fbeta_score(y_test_binarized, predictions, beta=1.0, average='micro')
f1_scores = pd.DataFrame([
fbeta_score(y_test_binarized, predictions, beta=1.0, average=None)
], columns=y_test.columns)
logger.info(f"f1_scores: \n{f1_scores}")
logger.info(f"Micro-average f1-score = {f1_score}")
f_beta_score = fbeta_score(
y_test_binarized, predictions, beta=beta, average='micro'
)
f_beta_scores = pd.DataFrame([
fbeta_score(y_test_binarized, predictions, beta=beta, average=None)
], columns=y_test.columns)
logger.info(f"f_{beta}-scores: \n{f_beta_scores}")
logger.info(f"Micro-average f_{beta}-score = {f_beta_score}")
feature_importances = pd.DataFrame({
'name': clf.feature_names_in_,
'importance': clf.feature_importances_
})
logger.info(f"feature_importances: \n{feature_importances}")
# How often were these predictions off from the logic we know?
default_target_columns = [
'SPLITS_INTO',
'MERGES_INTO',
'CONTINUES_AS',
'DIES'
]
# Make sure we have all the columns needed
if predictions.columns.isin(default_target_columns).sum() == len(default_target_columns):
split_cont = ((predictions['SPLITS_INTO'] > 0) & (predictions['CONTINUES_AS'] > 0)).sum()
split_die = ((predictions['SPLITS_INTO'] > 0) & (predictions['DIES'] > 0)).sum()
merge_cont = ((predictions['MERGES_INTO'] > 0) & (predictions['CONTINUES_AS'] > 0)).sum()
merge_die = ((predictions['MERGES_INTO'] > 0) & (predictions['DIES'] > 0)).sum()
cont_die = ((predictions['CONTINUES_AS'] > 0) & (predictions['DIES'] > 0)).sum()
logic_violations = pd.DataFrame({
"split_continue": [split_cont],
"split_die": [split_die],
"merge_continue": [merge_cont],
"merge_die": [merge_die],
"continue_die": [cont_die]
})
logger.info("Counts of events that shouldn't co-occur but are predicted "
f"to do so anyway: \n\n{logic_violations}")
else:
logger.info("Skipping logical event violations checking as not all "
"expected target columns are present")
if return_averaged_score:
return clf, f_beta_score
else:
return clf
def model_continuous_events(
X_train,
X_test,
y_train,
y_test,
return_averaged_score=False,
model=RandomForestRegressor,
**model_kwargs
):
'''
Models cluster evolution events as count vectors
(AKA there were 10 split(s)) and reports on RMSEs, feature importances,
and other useful model evaluation items.
Parameters
----------
X_train : numpy array or pandas DataFrame
Features to train the model on
X_test : numpy array or pandas DataFrame
Held-out features for model testing
y_train : numpy array or pandas DataFrame
Target(s) to train the model on
y_test : numpy array or pandas DataFrame
Target(s) for model testing
return_averaged_score : bool, optional
If True, return a 2-tuple of form (model, variance-averaged R^2)
model : sklearn-compatible estimator, optional
Callable estimator for model training and testing,
by default RandomForestClassifier
Returns
-------
Same type as that of the `model` input or a 2-tuple
Either returns the trained estimator object of the type defined in
`model` parameter or returns a tuple as defined by the
`return_averaged_score` parameter.
'''
scaler = StandardScaler()
X_train_scaled = pd.DataFrame(
scaler.fit_transform(X_train),
columns=X_train.columns
)
X_test_scaled = pd.DataFrame(
scaler.transform(X_test),
columns=X_test.columns
)
regressor = model(**model_kwargs)
# Chaining allows us to account for correlations between targets
chained_regressor = RegressorChain(regressor)
chained_regressor.fit(X_train_scaled, y_train)
predictions = pd.DataFrame(chained_regressor.predict(X_test_scaled), columns=y_test.columns)
logger.info(f"y_train events summary: \n{y_train.describe()}\n")
logger.info(f"y_test events summary: \n{y_test.describe()}]n")
logger.info(f"predictions events summary: \n{predictions.describe()}\n")
rmse = mean_squared_error(
y_test,
predictions,
multioutput='uniform_average',
squared=False
)
rmses = pd.DataFrame(
[
mean_squared_error(
y_test,
predictions,
multioutput='raw_values',
squared=False
)
],
columns=y_test.columns
)
logger.info(f"RMSEs across targets: \n{rmses}\n")
logger.info(f"Uniform-average RMSE = {rmse}")
r2_uniform = r2_score(
y_test,
predictions,
multioutput='uniform_average'
)
r2_variance_weighted = r2_score(
y_test,
predictions,
multioutput='variance_weighted'
)
r2s = pd.DataFrame(
[
r2_score(
y_test,
predictions,
multioutput='raw_values'
)
],
columns=y_test.columns
)
logger.info(f"R^2s across targets: \n{r2s}\n")
logger.info(f"Uniform-average R^2 = {r2_uniform}")
logger.info(f"Target-variance-weighted-average R^2 = {r2_variance_weighted}")
if return_averaged_score:
return chained_regressor, r2_variance_weighted
else:
return chained_regressor
def predict_multilabel_proba_formatted(model, features_test):
'''
Converts the native shape of multi-label predict_proba() output from
sklearn into one that is a tad more intuitive.
Parameters
----------
model : scikit-learn estimator
Trained estimator that must have the `predict_proba()` method
features_test : numpy array or pandas DataFrame
Held-out input features for the model to predict on. Must have the
same number, scaling style, etc. of the featuers used for model
training.
Returns
-------
numpy array of shape (n_samples, n_labels, n_classes)
Provides probability values for every class (e.g. in the case of
binary classification, 0/1 AKA n_classes = 2) and for every
label/output in a multi-label modeling setup.
So, for a dataset with 100 observations, modeling a binary outcome,
across 3 outputs (e.g. wind speed, temperature, and humidity), the
shape of the output would be (100, 3, 2).
'''
if model.n_outputs_ < 2:
logger.warn(f"This model only outputs {model.n_outputs_} labels, so "
"just returning the raw output of predict_proba()...")
return model.predict_proba(features_test)
else:
return np.swapaxes(np.array(model.predict_proba(features_test)), 0, 1) | [
"vespid.data.neo4j_tools.Relationships",
"dateutil.relativedelta.relativedelta",
"vespid.models.static_communities.cosine_similarities",
"vespid.models.static_communities.get_cluster_data",
"vespid.setup_logger",
"vespid.models.static_communities.jaccard_coefficient",
"sklearn.multioutput.RegressorChain... | [((642, 664), 'vespid.setup_logger', 'setup_logger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'from vespid import setup_logger\n'), ((43773, 43789), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (43787, 43789), False, 'from sklearn.preprocessing import StandardScaler\n'), ((44420, 44489), 'sklearn.metrics.fbeta_score', 'fbeta_score', (['y_test_binarized', 'predictions'], {'beta': '(1.0)', 'average': '"""micro"""'}), "(y_test_binarized, predictions, beta=1.0, average='micro')\n", (44431, 44489), False, 'from sklearn.metrics import fbeta_score, mean_squared_error, r2_score\n'), ((44756, 44826), 'sklearn.metrics.fbeta_score', 'fbeta_score', (['y_test_binarized', 'predictions'], {'beta': 'beta', 'average': '"""micro"""'}), "(y_test_binarized, predictions, beta=beta, average='micro')\n", (44767, 44826), False, 'from sklearn.metrics import fbeta_score, mean_squared_error, r2_score\n'), ((45139, 45229), 'pandas.DataFrame', 'pd.DataFrame', (["{'name': clf.feature_names_in_, 'importance': clf.feature_importances_}"], {}), "({'name': clf.feature_names_in_, 'importance': clf.\n feature_importances_})\n", (45151, 45229), True, 'import pandas as pd\n'), ((48056, 48072), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (48070, 48072), False, 'from sklearn.preprocessing import StandardScaler\n'), ((48433, 48458), 'sklearn.multioutput.RegressorChain', 'RegressorChain', (['regressor'], {}), '(regressor)\n', (48447, 48458), False, 'from sklearn.multioutput import RegressorChain\n'), ((48841, 48930), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'predictions'], {'multioutput': '"""uniform_average"""', 'squared': '(False)'}), "(y_test, predictions, multioutput='uniform_average',\n squared=False)\n", (48859, 48930), False, 'from sklearn.metrics import fbeta_score, mean_squared_error, r2_score\n'), ((49356, 49416), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'predictions'], {'multioutput': '"""uniform_average"""'}), "(y_test, predictions, multioutput='uniform_average')\n", (49364, 49416), False, 'from sklearn.metrics import fbeta_score, mean_squared_error, r2_score\n'), ((49476, 49538), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'predictions'], {'multioutput': '"""variance_weighted"""'}), "(y_test, predictions, multioutput='variance_weighted')\n", (49484, 49538), False, 'from sklearn.metrics import fbeta_score, mean_squared_error, r2_score\n'), ((6064, 6097), 'pandas.concat', 'pd.concat', (['dfs'], {'ignore_index': '(True)'}), '(dfs, ignore_index=True)\n', (6073, 6097), True, 'import pandas as pd\n'), ((12685, 12723), 'numpy.any', 'np.any', (['(num_matching_past_clusters > 1)'], {}), '(num_matching_past_clusters > 1)\n', (12691, 12723), True, 'import numpy as np\n'), ((14424, 14465), 'numpy.any', 'np.any', (['(num_matching_current_clusters > 1)'], {}), '(num_matching_current_clusters > 1)\n', (14430, 14465), True, 'import numpy as np\n'), ((16526, 16567), 'numpy.any', 'np.any', (['(num_matching_current_clusters < 1)'], {}), '(num_matching_current_clusters < 1)\n', (16532, 16567), True, 'import numpy as np\n'), ((18169, 18210), 'numpy.any', 'np.any', (['(num_matching_current_clusters < 1)'], {}), '(num_matching_current_clusters < 1)\n', (18175, 18210), True, 'import numpy as np\n'), ((25596, 25636), 'pandas.concat', 'pd.concat', (['all_events'], {'ignore_index': '(True)'}), '(all_events, ignore_index=True)\n', (25605, 25636), True, 'import pandas as pd\n'), ((27603, 27658), 'pandas.to_datetime', 'pd.to_datetime', (["clusters_over_time['Year']"], {'format': '"""%Y"""'}), "(clusters_over_time['Year'], format='%Y')\n", (27617, 27658), True, 'import pandas as pd\n'), ((29820, 30149), 'pandas.DataFrame', 'pd.DataFrame', (["[['ClusterLabel', 'label', np.nan], ['start_date', 'startDate', 'datetime'],\n ['end_date', 'endDate', 'datetime'], ['ClusterKeyphrases', 'keyphrases',\n 'string[]'], ['ClusterEmbedding', 'embedding', 'float[]'], ['born',\n 'born', 'boolean'], ['died', 'died', 'boolean']]"], {'columns': "['old', 'new', 'type']"}), "([['ClusterLabel', 'label', np.nan], ['start_date', 'startDate',\n 'datetime'], ['end_date', 'endDate', 'datetime'], ['ClusterKeyphrases',\n 'keyphrases', 'string[]'], ['ClusterEmbedding', 'embedding', 'float[]'],\n ['born', 'born', 'boolean'], ['died', 'died', 'boolean']], columns=[\n 'old', 'new', 'type'])\n", (29832, 30149), True, 'import pandas as pd\n'), ((30409, 30562), 'vespid.data.neo4j_tools.Nodes', 'Nodes', ([], {'parent_label': '"""LanguageCluster"""', 'data': 'clusters_over_time', 'id_column': '"""id"""', 'reference': '"""knowledge"""', 'properties': 'properties', 'additional_labels': 'None'}), "(parent_label='LanguageCluster', data=clusters_over_time, id_column=\n 'id', reference='knowledge', properties=properties, additional_labels=None)\n", (30414, 30562), False, 'from vespid.data.neo4j_tools import Nodes, Relationships\n'), ((32079, 32244), 'pandas.DataFrame', 'pd.DataFrame', (["[['similarity_threshold', 'similarityThreshold', 'float'], [\n 'similarity_scoring', 'similarityMethod', np.nan]]"], {'columns': "['old', 'new', 'type']"}), "([['similarity_threshold', 'similarityThreshold', 'float'], [\n 'similarity_scoring', 'similarityMethod', np.nan]], columns=['old',\n 'new', 'type'])\n", (32091, 32244), True, 'import pandas as pd\n'), ((33840, 33945), 'vespid.data.neo4j_tools.Nodes', 'Nodes', ([], {'parent_label': '"""Publication"""', 'data': 'papers_to_knowledge', 'id_column': '"""id_paper"""', 'reference': '"""paper"""'}), "(parent_label='Publication', data=papers_to_knowledge, id_column=\n 'id_paper', reference='paper')\n", (33845, 33945), False, 'from vespid.data.neo4j_tools import Nodes, Relationships\n'), ((46143, 46309), 'pandas.DataFrame', 'pd.DataFrame', (["{'split_continue': [split_cont], 'split_die': [split_die], 'merge_continue':\n [merge_cont], 'merge_die': [merge_die], 'continue_die': [cont_die]}"], {}), "({'split_continue': [split_cont], 'split_die': [split_die],\n 'merge_continue': [merge_cont], 'merge_die': [merge_die],\n 'continue_die': [cont_die]})\n", (46155, 46309), True, 'import pandas as pd\n'), ((13323, 13337), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (13335, 13337), True, 'import pandas as pd\n'), ((15204, 15218), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15216, 15218), True, 'import pandas as pd\n'), ((16590, 16712), 'pandas.DataFrame', 'pd.DataFrame', (['{self._c_t1_column: np.nan, self._c_t2_column: resulting_birth_clusters,\n self._event_column: self.BIRTH}'], {}), '({self._c_t1_column: np.nan, self._c_t2_column:\n resulting_birth_clusters, self._event_column: self.BIRTH})\n', (16602, 16712), True, 'import pandas as pd\n'), ((16852, 16866), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16864, 16866), True, 'import pandas as pd\n'), ((18233, 18354), 'pandas.DataFrame', 'pd.DataFrame', (['{self._c_t1_column: resulting_dead_clusters, self._c_t2_column: np.nan,\n self._event_column: self.DEATH}'], {}), '({self._c_t1_column: resulting_dead_clusters, self._c_t2_column:\n np.nan, self._event_column: self.DEATH})\n', (18245, 18354), True, 'import pandas as pd\n'), ((18494, 18508), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (18506, 18508), True, 'import pandas as pd\n'), ((20093, 20130), 'numpy.any', 'np.any', (['(num_matching_t1_clusters == 1)'], {}), '(num_matching_t1_clusters == 1)\n', (20099, 20130), True, 'import numpy as np\n'), ((20145, 20182), 'numpy.any', 'np.any', (['(num_matching_t2_clusters == 1)'], {}), '(num_matching_t2_clusters == 1)\n', (20151, 20182), True, 'import numpy as np\n'), ((23304, 23318), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (23316, 23318), True, 'import pandas as pd\n'), ((24803, 24865), 'pandas.concat', 'pd.concat', (['[births, splits, merges, deaths]'], {'ignore_index': '(True)'}), '([births, splits, merges, deaths], ignore_index=True)\n', (24812, 24865), True, 'import pandas as pd\n'), ((34033, 34227), 'vespid.data.neo4j_tools.Relationships', 'Relationships', ([], {'type': '"""IS_CLUSTER_MEMBER_OF"""', 'data': 'papers_to_knowledge', 'start_node': 'papers', 'id_column_start': '"""id_paper"""', 'end_node': 'knowledge_nodes', 'id_column_end': '"""id_cluster"""', 'properties': 'None'}), "(type='IS_CLUSTER_MEMBER_OF', data=papers_to_knowledge,\n start_node=papers, id_column_start='id_paper', end_node=knowledge_nodes,\n id_column_end='id_cluster', properties=None)\n", (34046, 34227), False, 'from vespid.data.neo4j_tools import Nodes, Relationships\n'), ((44529, 44595), 'sklearn.metrics.fbeta_score', 'fbeta_score', (['y_test_binarized', 'predictions'], {'beta': '(1.0)', 'average': 'None'}), '(y_test_binarized, predictions, beta=1.0, average=None)\n', (44540, 44595), False, 'from sklearn.metrics import fbeta_score, mean_squared_error, r2_score\n'), ((44884, 44951), 'sklearn.metrics.fbeta_score', 'fbeta_score', (['y_test_binarized', 'predictions'], {'beta': 'beta', 'average': 'None'}), '(y_test_binarized, predictions, beta=beta, average=None)\n', (44895, 44951), False, 'from sklearn.metrics import fbeta_score, mean_squared_error, r2_score\n'), ((49016, 49101), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'predictions'], {'multioutput': '"""raw_values"""', 'squared': '(False)'}), "(y_test, predictions, multioutput='raw_values', squared=False\n )\n", (49034, 49101), False, 'from sklearn.metrics import fbeta_score, mean_squared_error, r2_score\n'), ((49617, 49672), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'predictions'], {'multioutput': '"""raw_values"""'}), "(y_test, predictions, multioutput='raw_values')\n", (49625, 49672), False, 'from sklearn.metrics import fbeta_score, mean_squared_error, r2_score\n'), ((8555, 8621), 'pandas.DataFrame', 'pd.DataFrame', (["{year: df_t1['Papers'], (year + 1): df_t2['Papers']}"], {}), "({year: df_t1['Papers'], (year + 1): df_t2['Papers']})\n", (8567, 8621), True, 'import pandas as pd\n'), ((9105, 9160), 'numpy.full', 'np.full', (['(df_years.shape[0], df_years.shape[0])', 'np.nan'], {}), '((df_years.shape[0], df_years.shape[0]), np.nan)\n', (9112, 9160), True, 'import numpy as np\n'), ((20903, 20917), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (20915, 20917), True, 'import pandas as pd\n'), ((27758, 27789), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(1)', 'days': '(-1)'}), '(years=1, days=-1)\n', (27771, 27789), False, 'from dateutil.relativedelta import relativedelta\n'), ((32351, 32552), 'vespid.data.neo4j_tools.Relationships', 'Relationships', ([], {'type': 'type', 'data': "events[events['event_type'] == type]", 'start_node': 'knowledge_nodes', 'id_column_start': '"""id_t1"""', 'end_node': 'knowledge_nodes', 'id_column_end': '"""id_t2"""', 'properties': 'properties'}), "(type=type, data=events[events['event_type'] == type],\n start_node=knowledge_nodes, id_column_start='id_t1', end_node=\n knowledge_nodes, id_column_end='id_t2', properties=properties)\n", (32364, 32552), False, 'from vespid.data.neo4j_tools import Nodes, Relationships\n'), ((10718, 10745), 'vespid.models.static_communities.cosine_similarities', 'cosine_similarities', (['t1', 't2'], {}), '(t1, t2)\n', (10737, 10745), False, 'from vespid.models.static_communities import cosine_similarities\n'), ((5890, 5924), 'vespid.models.static_communities.get_cluster_data', 'get_cluster_data', (['year', 'self.graph'], {}), '(year, self.graph)\n', (5906, 5924), False, 'from vespid.models.static_communities import get_cluster_data\n'), ((38865, 38974), 'pandas.get_dummies', 'pd.get_dummies', (["df_events[[sort_key_t1, 'event_type']]"], {'columns': "['event_type']", 'prefix': '""""""', 'prefix_sep': '""""""'}), "(df_events[[sort_key_t1, 'event_type']], columns=[\n 'event_type'], prefix='', prefix_sep='')\n", (38879, 38974), True, 'import pandas as pd\n'), ((9667, 9715), 'vespid.models.static_communities.jaccard_coefficient', 'jaccard_coefficient', (['papers_past', 'papers_current'], {}), '(papers_past, papers_current)\n', (9686, 9715), False, 'from vespid.models.static_communities import jaccard_coefficient\n'), ((12980, 13009), 'pandas.DataFrame', 'pd.DataFrame', (['[merge_results]'], {}), '([merge_results])\n', (12992, 13009), True, 'import pandas as pd\n'), ((14861, 14890), 'pandas.DataFrame', 'pd.DataFrame', (['[merge_results]'], {}), '([merge_results])\n', (14873, 14890), True, 'import pandas as pd\n'), ((21448, 21482), 'pandas.DataFrame', 'pd.DataFrame', (['[continuity_mapping]'], {}), '([continuity_mapping])\n', (21460, 21482), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
"""Investigates the distribution for link Markov models.
The link Markov model is based on the linking behavior of a given
linkograph. Since the links in a linkograph are determined by
ontology, the transition model is depending on the ontology. This
script creates a sequence of random Markov models and considers how
the link markov models might be used to profile them.
"""
import argparse # For command line parsing.
import numpy as np # For matrices.
import time # For getting the time to use as a random seed.
import math # For modf.
import json # For manipulating json files.
import matplotlib.pyplot as plt # For graphing.
import markov.Model as markel
def genSingleOntologyStats(ontNext, ontLink, minLinkoSize,
maxLinkoSize, stepLinkoSize, modelNum,
runNum, precision=2, seeds=None):
"""Generate the stats on link models for a given ontology.
inputs:
ontNext: ontology used to generate Markov model that create the
next state.
ontLink: ontology used for constructing linkographs.
minLinkoSize: the minimun number of nodes in the linkographs to
consider.
maxLinkoSize: the maximum number of nodes in the linkographs to
consider. Note that the max is not included to match pythons
convertions on lists and ranges.
stepLinkoSize: the step size between minLinkoSize to maxLinkoSize
for the number of linkographs to Consider.
modelNum: the number of models.
runNum: the number of linkographs to consider for each linkograph
size.
precision: the number of decimals places to use for the Markov
models.
seeds: a list of seeds to use for the generated next Markov
models. The size of the list should be the same as the number of
runs.
output:
a modelNum x number_of_linkographs array that records the
Frobenius norm of the average Markov model for each model and each
linkograph size. The (i, j) entry uses i-th model and the n-th
size linkograph, constructs runNum number of linkographs of that
size, finds the average link Markov model, and records the norm of
this average.
"""
linkoSizes = range(minLinkoSize, maxLinkoSize, stepLinkoSize)
ontSize = len(ontNext)
absClasses = list(ontNext.keys())
absClasses.sort()
results = np.zeros((modelNum, len(linkoSizes)))
if seeds is None:
seeds = [time.time()*i for i in range(modelNum)]
models = []
# Create the generating models
for i in range(modelNum):
m = markel.genModelFromOntology(ontology=ontNext,
precision=2,
seed=seeds[i])
# Storing the model and the current state
models.append(m)
# For each size linkograph, generate the runNum links and
# caculate the needed statistics.
for size in linkoSizes:
print('size: {0}'.format(size))
for modelIndex, m in enumerate(models):
linkModels = np.zeros((ontSize, ontSize, runNum))
for i in range(runNum):
# Randomize the initial state
m.state = m.random.randint(1, len(m.absClasses)) - 1
linko = m.genLinkograph(size, ontology=ontLink)
newModel = markel.genModelFromLinko(linko,
precision=precision,
ontology=None,
seed=None,
method='link_predictor',
linkNum=1)
linkModels[:, :, i] = newModel.tMatrix
# Find the matrix norm for the average.
index = (size - minLinkoSize)//stepLinkoSize
norm = np.linalg.norm(np.mean(linkModels, axis=-1),
ord='fro')
results[modelIndex][index] = norm
return results
def genLinkMarkov(linkoSize, model, precision=2, timeSize=7):
"""Generates a link Markov from model generated linkograph.
inputs:
linkoSize: the size of linkograph to base the link Markov model
off of.
model: the Markov model to use. Note that the model must have an
ontology in order to generate the linkgraphs.
precicision: the number of decimal places to use for the
link Markov model.
timeSize = the size of integers to use for seeding the random
number generator of the returned Markov model.
output:
A link Markov model based off a linkoSize linkograph generated by
the provided Markov model.
"""
seed = int(math.modf(time.time())[0]*(10**timeSize))
# generate the linkograph
linko = model.genLinkograph(linkoSize)
# create the link model
model = genModelFromLinko(linko, precision=precision,
ontology=model.ontology, seed=seed,
method='link_predictor', linkNum=1)
return model
if __name__ == '__main__':
info = "Investigates the distribution of link markov models."
parser = argparse.ArgumentParser(description=info)
parser.add_argument('ontNext', metavar='ONTOLOGY_NEXT.json',
nargs=1,
help='the ontology file for producing.')
parser.add_argument('ontLink', metavar='ONTOLOGY_LINK.json',
nargs=1,
help='the ontology file for learning.')
parser.add_argument('-m', '--minimum', type=int, default = 2,
help='minimum size of linkographs.')
parser.add_argument('-M', '--maximum', type=int, default = 100,
help='maximum size of linkographs.')
parser.add_argument('-s', '--step', type=int, default = 1,
help='step size of linkographs.')
parser.add_argument('-n', '--modelNum', type=int, default = 100,
help='number of generating models.')
parser.add_argument('-r', '--runs', type=int, default = 100,
help='the number of runs.')
parser.add_argument('-p', '--precision', type=int, default = 2,
help='the number of runs.')
args = parser.parse_args()
# Extract the ontology
ontNext = None
with open(args.ontNext[0], 'r') as ontNextFile:
ontNext = json.load(ontNextFile)
ontLink = None
with open(args.ontLink[0], 'r') as ontLinkFile:
ontLink = json.load(ontLinkFile)
seed = int(math.modf(time.time())[0]*(10**7))
results = genSingleOntologyStats(ontNext=ontNext,
ontLink=ontLink,
minLinkoSize=args.minimum,
maxLinkoSize=args.maximum,
stepLinkoSize=args.step,
modelNum=args.modelNum,
runNum=args.runs,
precision=args.precision)
absClasses = list(ontNext.keys())
absClasses.sort()
linkoSizes = range(args.minimum, args.maximum, args.step)
plt.figure(1)
for norms in results:
plt.plot(linkoSizes, norms)
plt.xlabel('Size of Linkographs')
plt.ylabel('Matrix norms')
plt.title('Matrix Norms for Difference Markov Models')
plt.show()
| [
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"markov.Model.genModelFromOntology",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"markov.Model.genModelFromLinko",
"matplotlib.pyplot.figure",
"numpy.zeros",
"json.load",
"matplotlib.pyplot.title",
"time.time",
"m... | [((5197, 5238), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'info'}), '(description=info)\n', (5220, 5238), False, 'import argparse\n'), ((7254, 7267), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (7264, 7267), True, 'import matplotlib.pyplot as plt\n'), ((7336, 7369), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Size of Linkographs"""'], {}), "('Size of Linkographs')\n", (7346, 7369), True, 'import matplotlib.pyplot as plt\n'), ((7374, 7400), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Matrix norms"""'], {}), "('Matrix norms')\n", (7384, 7400), True, 'import matplotlib.pyplot as plt\n'), ((7405, 7459), 'matplotlib.pyplot.title', 'plt.title', (['"""Matrix Norms for Difference Markov Models"""'], {}), "('Matrix Norms for Difference Markov Models')\n", (7414, 7459), True, 'import matplotlib.pyplot as plt\n'), ((7465, 7475), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7473, 7475), True, 'import matplotlib.pyplot as plt\n'), ((2572, 2645), 'markov.Model.genModelFromOntology', 'markel.genModelFromOntology', ([], {'ontology': 'ontNext', 'precision': '(2)', 'seed': 'seeds[i]'}), '(ontology=ontNext, precision=2, seed=seeds[i])\n', (2599, 2645), True, 'import markov.Model as markel\n'), ((6460, 6482), 'json.load', 'json.load', (['ontNextFile'], {}), '(ontNextFile)\n', (6469, 6482), False, 'import json\n'), ((6573, 6595), 'json.load', 'json.load', (['ontLinkFile'], {}), '(ontLinkFile)\n', (6582, 6595), False, 'import json\n'), ((7303, 7330), 'matplotlib.pyplot.plot', 'plt.plot', (['linkoSizes', 'norms'], {}), '(linkoSizes, norms)\n', (7311, 7330), True, 'import matplotlib.pyplot as plt\n'), ((3047, 3083), 'numpy.zeros', 'np.zeros', (['(ontSize, ontSize, runNum)'], {}), '((ontSize, ontSize, runNum))\n', (3055, 3083), True, 'import numpy as np\n'), ((2438, 2449), 'time.time', 'time.time', ([], {}), '()\n', (2447, 2449), False, 'import time\n'), ((3330, 3449), 'markov.Model.genModelFromLinko', 'markel.genModelFromLinko', (['linko'], {'precision': 'precision', 'ontology': 'None', 'seed': 'None', 'method': '"""link_predictor"""', 'linkNum': '(1)'}), "(linko, precision=precision, ontology=None, seed=\n None, method='link_predictor', linkNum=1)\n", (3354, 3449), True, 'import markov.Model as markel\n'), ((3905, 3933), 'numpy.mean', 'np.mean', (['linkModels'], {'axis': '(-1)'}), '(linkModels, axis=-1)\n', (3912, 3933), True, 'import numpy as np\n'), ((4741, 4752), 'time.time', 'time.time', ([], {}), '()\n', (4750, 4752), False, 'import time\n'), ((6622, 6633), 'time.time', 'time.time', ([], {}), '()\n', (6631, 6633), False, 'import time\n')] |
import pytest
from py_scripts.singleton_calling_utils import get_good_idxs, get_singleton_idx, levenshtein, reformat_genotypes, normalize_var
import numpy as np
def test_reformat_genotypes(genotype_array):
assert np.array_equal(reformat_genotypes(genotype_array), np.array([0, 1, 1, 2]))
def test_reformat_genotypes(genotype_array_with_unk):
assert np.array_equal(reformat_genotypes(genotype_array_with_unk), np.array([-1, 1, -1, 2]))
@pytest.mark.parametrize('invara,invarb,outvar', [
('TCC', 'CCC', ('T', 'C')),
('CAGGGGG', 'CTGGGGG', ('A', 'T')),
('GAT', 'GAT', ('N', 'N')),
])
def test_normalize_var(invara, invarb, outvar):
assert normalize_var(invara, invarb) == outvar
@pytest.mark.parametrize('gt,dp,gq,idxs', [
(
np.array([0, 0, 2, 0]),
np.array([20, 4, 99, 33]),
np.array([15, 9, 22, 4]),
np.array([0, 2]),
),
(
np.array([0, -1, -1, 1]),
np.array([20, -1, -1, 47]),
np.array([15, -1, -1, 23]),
np.array([0, 3]),
),
(
np.array([0, 0, 0, 0]),
np.array([55, 99, 10, 34]),
np.array([5, 9, 23, 56]),
np.array([3]),
),
])
def test_get_good_idxs(gt, dp, gq, idxs):
assert np.array_equal(get_good_idxs(gt, dp, gq), idxs)
@pytest.mark.parametrize('seqa,seqb,dist', [
('AAA', 'ATA', 1),
('ATTTT', 'CAAAA', 5),
])
def test_levenshtein(seqa, seqb, dist):
assert levenshtein(seqa, seqb) == dist
@pytest.mark.parametrize('gt,rd,ad,idx', [
(
np.array([0, 0, 2, 0]),
np.array([0, 0, 12, 0]),
np.array([15, 9, 0, 18]),
2,
),
(
np.array([0, 0, 2, 1]),
np.array([0, 0, 12, 4]),
np.array([15, 9, 0, 8]),
None,
),
(
np.array([0, 0, 0, 1]),
np.array([0, 0, 6, 10]),
np.array([15, 9, 1, 0]),
3,
),
])
def test_get_singleton_idx(gt, rd, ad, idx):
assert get_singleton_idx(gt, rd, ad) == idx | [
"py_scripts.singleton_calling_utils.normalize_var",
"py_scripts.singleton_calling_utils.reformat_genotypes",
"py_scripts.singleton_calling_utils.levenshtein",
"pytest.mark.parametrize",
"numpy.array",
"py_scripts.singleton_calling_utils.get_singleton_idx",
"py_scripts.singleton_calling_utils.get_good_id... | [((449, 594), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""invara,invarb,outvar"""', "[('TCC', 'CCC', ('T', 'C')), ('CAGGGGG', 'CTGGGGG', ('A', 'T')), ('GAT',\n 'GAT', ('N', 'N'))]"], {}), "('invara,invarb,outvar', [('TCC', 'CCC', ('T', 'C')),\n ('CAGGGGG', 'CTGGGGG', ('A', 'T')), ('GAT', 'GAT', ('N', 'N'))])\n", (472, 594), False, 'import pytest\n'), ((1281, 1370), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seqa,seqb,dist"""', "[('AAA', 'ATA', 1), ('ATTTT', 'CAAAA', 5)]"], {}), "('seqa,seqb,dist', [('AAA', 'ATA', 1), ('ATTTT',\n 'CAAAA', 5)])\n", (1304, 1370), False, 'import pytest\n'), ((234, 268), 'py_scripts.singleton_calling_utils.reformat_genotypes', 'reformat_genotypes', (['genotype_array'], {}), '(genotype_array)\n', (252, 268), False, 'from py_scripts.singleton_calling_utils import get_good_idxs, get_singleton_idx, levenshtein, reformat_genotypes, normalize_var\n'), ((270, 292), 'numpy.array', 'np.array', (['[0, 1, 1, 2]'], {}), '([0, 1, 1, 2])\n', (278, 292), True, 'import numpy as np\n'), ((375, 418), 'py_scripts.singleton_calling_utils.reformat_genotypes', 'reformat_genotypes', (['genotype_array_with_unk'], {}), '(genotype_array_with_unk)\n', (393, 418), False, 'from py_scripts.singleton_calling_utils import get_good_idxs, get_singleton_idx, levenshtein, reformat_genotypes, normalize_var\n'), ((420, 444), 'numpy.array', 'np.array', (['[-1, 1, -1, 2]'], {}), '([-1, 1, -1, 2])\n', (428, 444), True, 'import numpy as np\n'), ((665, 694), 'py_scripts.singleton_calling_utils.normalize_var', 'normalize_var', (['invara', 'invarb'], {}), '(invara, invarb)\n', (678, 694), False, 'from py_scripts.singleton_calling_utils import get_good_idxs, get_singleton_idx, levenshtein, reformat_genotypes, normalize_var\n'), ((1245, 1270), 'py_scripts.singleton_calling_utils.get_good_idxs', 'get_good_idxs', (['gt', 'dp', 'gq'], {}), '(gt, dp, gq)\n', (1258, 1270), False, 'from py_scripts.singleton_calling_utils import get_good_idxs, get_singleton_idx, levenshtein, reformat_genotypes, normalize_var\n'), ((1429, 1452), 'py_scripts.singleton_calling_utils.levenshtein', 'levenshtein', (['seqa', 'seqb'], {}), '(seqa, seqb)\n', (1440, 1452), False, 'from py_scripts.singleton_calling_utils import get_good_idxs, get_singleton_idx, levenshtein, reformat_genotypes, normalize_var\n'), ((1935, 1964), 'py_scripts.singleton_calling_utils.get_singleton_idx', 'get_singleton_idx', (['gt', 'rd', 'ad'], {}), '(gt, rd, ad)\n', (1952, 1964), False, 'from py_scripts.singleton_calling_utils import get_good_idxs, get_singleton_idx, levenshtein, reformat_genotypes, normalize_var\n'), ((765, 787), 'numpy.array', 'np.array', (['[0, 0, 2, 0]'], {}), '([0, 0, 2, 0])\n', (773, 787), True, 'import numpy as np\n'), ((797, 822), 'numpy.array', 'np.array', (['[20, 4, 99, 33]'], {}), '([20, 4, 99, 33])\n', (805, 822), True, 'import numpy as np\n'), ((832, 856), 'numpy.array', 'np.array', (['[15, 9, 22, 4]'], {}), '([15, 9, 22, 4])\n', (840, 856), True, 'import numpy as np\n'), ((866, 882), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (874, 882), True, 'import numpy as np\n'), ((905, 929), 'numpy.array', 'np.array', (['[0, -1, -1, 1]'], {}), '([0, -1, -1, 1])\n', (913, 929), True, 'import numpy as np\n'), ((939, 965), 'numpy.array', 'np.array', (['[20, -1, -1, 47]'], {}), '([20, -1, -1, 47])\n', (947, 965), True, 'import numpy as np\n'), ((975, 1001), 'numpy.array', 'np.array', (['[15, -1, -1, 23]'], {}), '([15, -1, -1, 23])\n', (983, 1001), True, 'import numpy as np\n'), ((1011, 1027), 'numpy.array', 'np.array', (['[0, 3]'], {}), '([0, 3])\n', (1019, 1027), True, 'import numpy as np\n'), ((1050, 1072), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (1058, 1072), True, 'import numpy as np\n'), ((1082, 1108), 'numpy.array', 'np.array', (['[55, 99, 10, 34]'], {}), '([55, 99, 10, 34])\n', (1090, 1108), True, 'import numpy as np\n'), ((1118, 1142), 'numpy.array', 'np.array', (['[5, 9, 23, 56]'], {}), '([5, 9, 23, 56])\n', (1126, 1142), True, 'import numpy as np\n'), ((1152, 1165), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (1160, 1165), True, 'import numpy as np\n'), ((1520, 1542), 'numpy.array', 'np.array', (['[0, 0, 2, 0]'], {}), '([0, 0, 2, 0])\n', (1528, 1542), True, 'import numpy as np\n'), ((1552, 1575), 'numpy.array', 'np.array', (['[0, 0, 12, 0]'], {}), '([0, 0, 12, 0])\n', (1560, 1575), True, 'import numpy as np\n'), ((1585, 1609), 'numpy.array', 'np.array', (['[15, 9, 0, 18]'], {}), '([15, 9, 0, 18])\n', (1593, 1609), True, 'import numpy as np\n'), ((1643, 1665), 'numpy.array', 'np.array', (['[0, 0, 2, 1]'], {}), '([0, 0, 2, 1])\n', (1651, 1665), True, 'import numpy as np\n'), ((1675, 1698), 'numpy.array', 'np.array', (['[0, 0, 12, 4]'], {}), '([0, 0, 12, 4])\n', (1683, 1698), True, 'import numpy as np\n'), ((1708, 1731), 'numpy.array', 'np.array', (['[15, 9, 0, 8]'], {}), '([15, 9, 0, 8])\n', (1716, 1731), True, 'import numpy as np\n'), ((1768, 1790), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (1776, 1790), True, 'import numpy as np\n'), ((1800, 1823), 'numpy.array', 'np.array', (['[0, 0, 6, 10]'], {}), '([0, 0, 6, 10])\n', (1808, 1823), True, 'import numpy as np\n'), ((1833, 1856), 'numpy.array', 'np.array', (['[15, 9, 1, 0]'], {}), '([15, 9, 1, 0])\n', (1841, 1856), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""Very basic MIDI synthesizer.
This does the same as midi_sine.py, but it uses NumPy and block
processing. It is therefore much more efficient. But there are still
many allocations and dynamically growing and shrinking data structures.
"""
import jack
import numpy as np
import threading
# First 4 bits of status byte:
NOTEON = 0x9
NOTEOFF = 0x8
attack_seconds = 0.01
release_seconds = 0.2
attack = None
release = None
fs = None
voices = {}
client = jack.Client('MIDI-Sine-NumPy')
midiport = client.midi_inports.register('midi_in')
audioport = client.outports.register('audio_out')
event = threading.Event()
def m2f(note):
"""Convert MIDI note number to frequency in Hertz.
See https://en.wikipedia.org/wiki/MIDI_Tuning_Standard.
"""
return 2 ** ((note - 69) / 12) * 440
def update_envelope(envelope, begin, target, vel):
"""Helper function to calculate envelopes.
envelope: array of velocities, will be mutated
begin: sample index where ramp begins
target: sample index where *vel* shall be reached
vel: final velocity value
If the ramp goes beyond the blocksize, it is supposed to be
continued in the next block.
A reference to *envelope* is returned, as well as the (unchanged)
*vel* and the target index of the following block where *vel* shall
be reached.
"""
blocksize = len(envelope)
old_vel = envelope[begin]
slope = (vel - old_vel) / (target - begin + 1)
ramp = np.arange(min(target, blocksize) - begin) + 1
envelope[begin:target] = ramp * slope + old_vel
if target < blocksize:
envelope[target:] = vel
target = 0
else:
target -= blocksize
return envelope, vel, target
@client.set_process_callback
def process(blocksize):
"""Main callback."""
# Step 1: Update/delete existing voices from previous block
# Iterating over a copy because items may be deleted:
for pitch in list(voices):
envelope, vel, target = voices[pitch]
if any([vel, target]):
envelope[0] = envelope[-1]
voices[pitch] = update_envelope(envelope, 0, target, vel)
else:
del voices[pitch]
# Step 2: Create envelopes from the MIDI events of the current block
for offset, data in midiport.incoming_midi_events():
if len(data) == 3:
status, pitch, vel = bytes(data)
# MIDI channel number is ignored!
status >>= 4
if status == NOTEON and vel > 0:
try:
envelope, _, _ = voices[pitch]
except KeyError:
envelope = np.zeros(blocksize)
voices[pitch] = update_envelope(
envelope, offset, offset + attack, vel)
elif status in (NOTEON, NOTEOFF):
# NoteOff velocity is ignored!
try:
envelope, _, _ = voices[pitch]
except KeyError:
print('NoteOff without NoteOn (ignored)')
continue
voices[pitch] = update_envelope(
envelope, offset, offset + release, 0)
else:
pass # ignore
else:
pass # ignore
# Step 3: Create sine tones, apply envelopes, add to output buffer
buf = audioport.get_array()
buf.fill(0)
for pitch, (envelope, _, _) in voices.items():
t = (np.arange(blocksize) + client.last_frame_time) / fs
tone = np.sin(2 * np.pi * m2f(pitch) * t)
buf += tone * envelope / 127
@client.set_samplerate_callback
def samplerate(samplerate):
global fs, attack, release
fs = samplerate
attack = int(attack_seconds * fs)
release = int(release_seconds * fs)
voices.clear()
@client.set_shutdown_callback
def shutdown(status, reason):
print('JACK shutdown:', reason, status)
event.set()
with client:
print('Press Ctrl+C to stop')
try:
event.wait()
except KeyboardInterrupt:
print('\nInterrupted by user')
| [
"threading.Event",
"numpy.zeros",
"numpy.arange",
"jack.Client"
] | [((482, 512), 'jack.Client', 'jack.Client', (['"""MIDI-Sine-NumPy"""'], {}), "('MIDI-Sine-NumPy')\n", (493, 512), False, 'import jack\n'), ((622, 639), 'threading.Event', 'threading.Event', ([], {}), '()\n', (637, 639), False, 'import threading\n'), ((3458, 3478), 'numpy.arange', 'np.arange', (['blocksize'], {}), '(blocksize)\n', (3467, 3478), True, 'import numpy as np\n'), ((2657, 2676), 'numpy.zeros', 'np.zeros', (['blocksize'], {}), '(blocksize)\n', (2665, 2676), True, 'import numpy as np\n')] |
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import pytest
from .. import CylindricalGrid
from ..boundaries.local import NeumannBC
def test_cylindrical_grid():
""" test simple cylindrical grid """
for periodic in [True, False]:
grid = CylindricalGrid(4, (-1, 2), (8, 9), periodic_z=periodic)
msg = str(grid)
assert grid.dim == 3
assert grid.numba_type == "f8[:, :]"
assert grid.shape == (8, 9)
assert grid.length == pytest.approx(3)
assert grid.discretization[0] == pytest.approx(0.5)
assert grid.discretization[1] == pytest.approx(1 / 3)
np.testing.assert_array_equal(grid.discretization, np.array([0.5, 1 / 3]))
assert not grid.uniform_cell_volumes
assert grid.volume == pytest.approx(np.pi * 4 ** 2 * 3)
assert grid.volume == pytest.approx(grid.integrate(1))
rs, zs = grid.axes_coords
np.testing.assert_allclose(rs, np.linspace(0.25, 3.75, 8))
np.testing.assert_allclose(zs, np.linspace(-1 + 1 / 6, 2 - 1 / 6, 9))
# random points
c = np.random.randint(8, size=(6, 2))
c1 = grid.point_to_cell(grid.cell_to_point(c))
np.testing.assert_almost_equal(c, c1, err_msg=msg)
assert grid.contains_point(grid.get_random_point())
assert grid.contains_point(grid.get_random_point(1.49))
assert "laplace" in grid.operators
def test_cylindrical_to_cartesian():
""" test conversion of cylindrical grid to Cartesian """
from ...fields import ScalarField
from .. import CartesianGrid
expr_cyl = "cos(z / 2) / (1 + r**2)"
expr_cart = expr_cyl.replace("r**2", "(x**2 + y**2)")
z_range = (-np.pi, 2 * np.pi)
grid_cyl = CylindricalGrid(10, z_range, (16, 33))
pf_cyl = ScalarField.from_expression(grid_cyl, expression=expr_cyl)
grid_cart = CartesianGrid([[-7, 7], [-6, 7], z_range], [16, 16, 16])
pf_cart1 = pf_cyl.interpolate_to_grid(grid_cart)
pf_cart2 = ScalarField.from_expression(grid_cart, expression=expr_cart)
np.testing.assert_allclose(pf_cart1.data, pf_cart2.data, atol=0.1)
def test_setting_domain_cylindrical():
""" test various versions of settings bcs for cylindrical grids """
grid = CylindricalGrid(1, [0, 1], [2, 2], periodic_z=False)
grid.get_boundary_conditions("natural")
grid.get_boundary_conditions(["no-flux", "no-flux"])
with pytest.raises(ValueError):
grid.get_boundary_conditions(["no-flux"])
with pytest.raises(ValueError):
grid.get_boundary_conditions(["no-flux"] * 3)
with pytest.raises(RuntimeError):
grid.get_boundary_conditions(["no-flux", "periodic"])
grid = CylindricalGrid(1, [0, 1], [2, 2], periodic_z=True)
grid.get_boundary_conditions("natural")
grid.get_boundary_conditions(["no-flux", "periodic"])
with pytest.raises(RuntimeError):
grid.get_boundary_conditions(["no-flux", "no-flux"])
@pytest.mark.parametrize("periodic", [True, False])
def test_polar_conversion(periodic):
""" test conversion to polar coordinates """
grid = CylindricalGrid(1, [-1, 1], [5, 5], periodic_z=periodic)
dists = grid.polar_coordinates_real([0, 0, 0])
assert np.all(0.09 <= dists)
assert np.any(dists < 0.11)
assert np.all(dists <= np.sqrt(2))
assert np.any(dists > 0.8 * np.sqrt(2))
def test_setting_boundary_conditions():
""" test setting some boundary conditions """
grid = CylindricalGrid(1, [0, 1], 3)
b_inner = NeumannBC(grid, 0, upper=False)
assert grid.get_boundary_conditions("natural")[0].low == b_inner
assert grid.get_boundary_conditions({"value": 2})[0].low != b_inner
| [
"pytest.approx",
"numpy.sqrt",
"numpy.testing.assert_allclose",
"numpy.any",
"pytest.mark.parametrize",
"numpy.random.randint",
"numpy.testing.assert_almost_equal",
"pytest.raises",
"numpy.array",
"numpy.linspace",
"numpy.all"
] | [((2947, 2997), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""periodic"""', '[True, False]'], {}), "('periodic', [True, False])\n", (2970, 2997), False, 'import pytest\n'), ((2058, 2124), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['pf_cart1.data', 'pf_cart2.data'], {'atol': '(0.1)'}), '(pf_cart1.data, pf_cart2.data, atol=0.1)\n', (2084, 2124), True, 'import numpy as np\n'), ((3214, 3235), 'numpy.all', 'np.all', (['(0.09 <= dists)'], {}), '(0.09 <= dists)\n', (3220, 3235), True, 'import numpy as np\n'), ((3247, 3267), 'numpy.any', 'np.any', (['(dists < 0.11)'], {}), '(dists < 0.11)\n', (3253, 3267), True, 'import numpy as np\n'), ((1103, 1136), 'numpy.random.randint', 'np.random.randint', (['(8)'], {'size': '(6, 2)'}), '(8, size=(6, 2))\n', (1120, 1136), True, 'import numpy as np\n'), ((1200, 1250), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['c', 'c1'], {'err_msg': 'msg'}), '(c, c1, err_msg=msg)\n', (1230, 1250), True, 'import numpy as np\n'), ((2412, 2437), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2425, 2437), False, 'import pytest\n'), ((2498, 2523), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2511, 2523), False, 'import pytest\n'), ((2588, 2615), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (2601, 2615), False, 'import pytest\n'), ((2854, 2881), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (2867, 2881), False, 'import pytest\n'), ((492, 508), 'pytest.approx', 'pytest.approx', (['(3)'], {}), '(3)\n', (505, 508), False, 'import pytest\n'), ((550, 568), 'pytest.approx', 'pytest.approx', (['(0.5)'], {}), '(0.5)\n', (563, 568), False, 'import pytest\n'), ((610, 630), 'pytest.approx', 'pytest.approx', (['(1 / 3)'], {}), '(1 / 3)\n', (623, 630), False, 'import pytest\n'), ((690, 712), 'numpy.array', 'np.array', (['[0.5, 1 / 3]'], {}), '([0.5, 1 / 3])\n', (698, 712), True, 'import numpy as np\n'), ((789, 822), 'pytest.approx', 'pytest.approx', (['(np.pi * 4 ** 2 * 3)'], {}), '(np.pi * 4 ** 2 * 3)\n', (802, 822), False, 'import pytest\n'), ((960, 986), 'numpy.linspace', 'np.linspace', (['(0.25)', '(3.75)', '(8)'], {}), '(0.25, 3.75, 8)\n', (971, 986), True, 'import numpy as np\n'), ((1027, 1064), 'numpy.linspace', 'np.linspace', (['(-1 + 1 / 6)', '(2 - 1 / 6)', '(9)'], {}), '(-1 + 1 / 6, 2 - 1 / 6, 9)\n', (1038, 1064), True, 'import numpy as np\n'), ((3295, 3305), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3302, 3305), True, 'import numpy as np\n'), ((3339, 3349), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3346, 3349), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
""" Simple 2 input mixer node. This takes two inputs of potentially different sizes,
weights them and then sums them to form an output.
"""
import asyncio
import queue
import labgraph as lg
import numpy as np
from ...messages.generic_signal_sample import SignalSampleMessage
SHORT_SLEEP_SECS = 0.001
class MixerTwoInputConfig(lg.Config):
# These are NxL and NxR matrices (for L,R inputs, same dimension N outputs)
left_weights: np.ndarray
right_weights: np.ndarray
class MixerTwoInputNode(lg.Node):
IN_LEFT_SAMPLE_TOPIC = lg.Topic(SignalSampleMessage)
IN_RIGHT_SAMPLE_TOPIC = lg.Topic(SignalSampleMessage)
OUT_SAMPLE_TOPIC = lg.Topic(SignalSampleMessage)
def setup(self) -> None:
# Check the weights are correctly dimensioned to be summed on output
if self.config.left_weights.shape[0] != self.config.right_weights.shape[0]:
raise ValueError("Mismatch in left-right output dimensions")
self._left_in = queue.Queue()
self._right_in = queue.Queue()
self._shutdown = asyncio.Event()
def cleanup(self) -> None:
self._shutdown.set()
@lg.subscriber(IN_LEFT_SAMPLE_TOPIC)
def left_input(self, in_sample: SignalSampleMessage) -> None:
# Check in the input dimensions against left weights
if in_sample.sample.shape[0] != self.config.left_weights.shape[1]:
raise ValueError("Mismatch in left input dimension")
self._left_in.put(in_sample)
@lg.subscriber(IN_RIGHT_SAMPLE_TOPIC)
def right_input(self, in_sample: SignalSampleMessage) -> None:
# Check in the input dimensions against right weights
if in_sample.sample.shape[0] != self.config.right_weights.shape[1]:
raise ValueError("Mismatch in right input dimension")
self._right_in.put(in_sample)
@lg.publisher(OUT_SAMPLE_TOPIC)
async def mix_samples(self) -> lg.AsyncPublisher:
while not self._shutdown.is_set():
while self._left_in.empty() or self._right_in.empty():
await asyncio.sleep(SHORT_SLEEP_SECS)
left = self._left_in.get()
right = self._right_in.get()
mixed_output = np.dot(self.config.left_weights, left.sample) + np.dot(
self.config.right_weights, right.sample
)
# I am just using the left timetamp for this, since I don't
# know what else makes sense!
out_sample = SignalSampleMessage(
timestamp=left.timestamp, sample=mixed_output
)
yield self.OUT_SAMPLE_TOPIC, out_sample
| [
"asyncio.Event",
"numpy.dot",
"labgraph.publisher",
"asyncio.sleep",
"queue.Queue",
"labgraph.Topic",
"labgraph.subscriber"
] | [((630, 659), 'labgraph.Topic', 'lg.Topic', (['SignalSampleMessage'], {}), '(SignalSampleMessage)\n', (638, 659), True, 'import labgraph as lg\n'), ((688, 717), 'labgraph.Topic', 'lg.Topic', (['SignalSampleMessage'], {}), '(SignalSampleMessage)\n', (696, 717), True, 'import labgraph as lg\n'), ((741, 770), 'labgraph.Topic', 'lg.Topic', (['SignalSampleMessage'], {}), '(SignalSampleMessage)\n', (749, 770), True, 'import labgraph as lg\n'), ((1220, 1255), 'labgraph.subscriber', 'lg.subscriber', (['IN_LEFT_SAMPLE_TOPIC'], {}), '(IN_LEFT_SAMPLE_TOPIC)\n', (1233, 1255), True, 'import labgraph as lg\n'), ((1566, 1602), 'labgraph.subscriber', 'lg.subscriber', (['IN_RIGHT_SAMPLE_TOPIC'], {}), '(IN_RIGHT_SAMPLE_TOPIC)\n', (1579, 1602), True, 'import labgraph as lg\n'), ((1918, 1948), 'labgraph.publisher', 'lg.publisher', (['OUT_SAMPLE_TOPIC'], {}), '(OUT_SAMPLE_TOPIC)\n', (1930, 1948), True, 'import labgraph as lg\n'), ((1059, 1072), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1070, 1072), False, 'import queue\n'), ((1098, 1111), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1109, 1111), False, 'import queue\n'), ((1137, 1152), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (1150, 1152), False, 'import asyncio\n'), ((2274, 2319), 'numpy.dot', 'np.dot', (['self.config.left_weights', 'left.sample'], {}), '(self.config.left_weights, left.sample)\n', (2280, 2319), True, 'import numpy as np\n'), ((2322, 2369), 'numpy.dot', 'np.dot', (['self.config.right_weights', 'right.sample'], {}), '(self.config.right_weights, right.sample)\n', (2328, 2369), True, 'import numpy as np\n'), ((2135, 2166), 'asyncio.sleep', 'asyncio.sleep', (['SHORT_SLEEP_SECS'], {}), '(SHORT_SLEEP_SECS)\n', (2148, 2166), False, 'import asyncio\n')] |
import numpy as np
import csv
import os
import util
# Reproducible random numbers
np.random.seed(0)
# Prefix
path = 'results/hw2/'
if not os.path.exists(path):
os.mkdir(path)
reportLines = []
# 100 x 100 children
res = util.measurement(path, taskId = 'hw2_1', childrenPerLevel = (100, 100), initSecPrice = [1, 100, 100, 100], logNormalMean = 2, logNormalSigma = 1)
reportLines.append(res)
# 100 x 100 children
res = util.measurement(path, taskId = 'hw2_2', childrenPerLevel = (100, 100), initSecPrice = [1, 100, 100, 100], logNormalMean = 0, logNormalSigma = 2)
reportLines.append(res)
# csv report
with open(path + 'report.csv','w') as report:
csvWriter = csv.writer(report)
csvWriter.writerow(['Option price', 'Wall time', 'CPU Time', 'Lognormal Mean', 'Lognormal Sigma', 'Files'])
for row in reportLines:
csvWriter.writerow(row)
| [
"os.path.exists",
"util.measurement",
"csv.writer",
"os.mkdir",
"numpy.random.seed"
] | [((85, 102), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (99, 102), True, 'import numpy as np\n'), ((233, 372), 'util.measurement', 'util.measurement', (['path'], {'taskId': '"""hw2_1"""', 'childrenPerLevel': '(100, 100)', 'initSecPrice': '[1, 100, 100, 100]', 'logNormalMean': '(2)', 'logNormalSigma': '(1)'}), "(path, taskId='hw2_1', childrenPerLevel=(100, 100),\n initSecPrice=[1, 100, 100, 100], logNormalMean=2, logNormalSigma=1)\n", (249, 372), False, 'import util\n'), ((432, 571), 'util.measurement', 'util.measurement', (['path'], {'taskId': '"""hw2_2"""', 'childrenPerLevel': '(100, 100)', 'initSecPrice': '[1, 100, 100, 100]', 'logNormalMean': '(0)', 'logNormalSigma': '(2)'}), "(path, taskId='hw2_2', childrenPerLevel=(100, 100),\n initSecPrice=[1, 100, 100, 100], logNormalMean=0, logNormalSigma=2)\n", (448, 571), False, 'import util\n'), ((143, 163), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (157, 163), False, 'import os\n'), ((169, 183), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (177, 183), False, 'import os\n'), ((679, 697), 'csv.writer', 'csv.writer', (['report'], {}), '(report)\n', (689, 697), False, 'import csv\n')] |
from __future__ import division
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import Imputer, MinMaxScaler, StandardScaler, OneHotEncoder
from sklearn.feature_selection import VarianceThreshold
import pandas as pd
from clumpy.unique_threshold import UniqueThreshold
from clumpy.datasets.utils import ordinal_columns, continous_columns, categorical_columns
def column_atleast_2d(array):
if array.ndim == 1:
return array.reshape(-1, 1)
return array
def fit_encode_1d(y, strategy='frequency'):
y = column_atleast_2d(y)
levels = np.unique(y)
# FIXME: serach sorted doesn't work here...
if strategy == 'frequency':
frequencies = [np.sum(y == level) for level in levels]
levels = levels[np.argsort(frequencies)]
return levels
def transform_encode_1d(y, fit_levels):
levels = np.unique(y)
if len(np.intersect1d(levels, fit_levels)) < len(levels):
diff = np.setdiff1d(levels, fit_levels)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(fit_levels, y).reshape(-1, 1)
def inverse_encode_1d(y, fit_levels):
"""There is probably a built in numpy method for this..."""
vec_map = np.vectorize(lambda x: fit_levels[int(x)])
return vec_map(y).reshape(-1, 1)
class OrdinalEncoder(BaseEstimator, TransformerMixin):
def __init__(self, strategy='random'):
self.strategy = strategy
self.level_map = []
def fit(self, X):
X = column_atleast_2d(X)
self.n_features_ = X.shape[1]
self.level_map = [
fit_encode_1d(X[:, column_idx], strategy=self.strategy) for
column_idx in xrange(self.n_features_)]
return self
def transform(self, X):
X = column_atleast_2d(X)
if X.shape[1] != self.n_features_:
raise ValueError("Different number of features at transform time.",
" n_features_transform= %d" % X.shape[1],
" and n_features_fit= %d" % self.n_features_)
return np.hstack([transform_encode_1d(X[:, column_idx], levels) for
column_idx, levels in enumerate(self.level_map)])
def inverse_transform(self, X):
X = column_atleast_2d(X)
if X.shape[1] != self.n_features_:
raise ValueError("Different number of features at transform time.",
" n_features_transform= %d" % X.shape[1],
" and n_features_fit= %d" % self.n_features_)
encoding = np.hstack([inverse_encode_1d(X[:, column_idx], levels) for
column_idx, levels in enumerate(self.level_map)])
return encoding
class ArbitraryImputer(BaseEstimator, TransformerMixin):
def __init__(self, impute_value):
self.impute_value = impute_value
def fit(self, X):
return self
def transform(self, X):
mask = np.isfinite(X)
if ~np.all(mask):
np.putmask(X, ~mask, self.impute_value)
return X
def median_impute(X, strategy='median'):
imputer = Imputer(strategy=strategy, missing_values='NaN')
return imputer.fit_transform(X)
def scale_values(X, strategy='standardize'):
if strategy == 'standardize':
scaler = StandardScaler()
elif strategy == 'center':
scaler = StandardScaler(with_mean=True, with_std=False)
elif strategy == 'minmax':
scaler = MinMaxScaler()
else:
raise ValueError('Unrecognized scaling strategy `%s`.' % strategy)
return scaler.fit_transform(X)
def remove_low_variance(X, threshold=0.0):
"""Remove columns with low variance."""
selector = VarianceThreshold(threshold=threshold)
return selector.fit_transform(X)
def remove_low_info(X, max_frequency=0.99):
"""remove columns that have too much variance (a lot of unique values)"""
selector = UniqueThreshold(max_frequency=max_frequency)
return selector.fit_transform(X)
def encode_values(X, strategy='onehot'):
if strategy == 'onehot':
return pd.get_dummies(X, dummy_na=True).values
elif strategy == 'none':
return X.values
else:
raise ValueError('Unrecognized encoding strategy `%s`.' % strategy)
def process_continous(X):
"""Continous numeric value preprocessing."""
# missing value imputation
X = median_impute(X, strategy='median')
# remove low variance variables
X = remove_low_variance(X)
# scaling
X = scale_values(X, strategy='standardize')
return X.astype(np.float64)
def process_ordinal(X):
"""ordinal numeric value preprocessing."""
# missing value imputation
X = median_impute(X, strategy='median')
# remove any low info columns (high variance)
X = remove_low_info(X)
# remove low variance variables
X = remove_low_variance(X)
# scaling
X = scale_values(X, strategy='standardize')
return X.astype(np.float64)
def process_categorical(X):
# encode categoricals as numeric
X = encode_values(X, strategy='onehot')
# remove any low info columns
X = remove_low_info(X)
# remove low variance variables
X = remove_low_variance(X)
# scaling
#X = scale_values(X, strategy='center')
return X.astype(np.float64)
def process_data(df):
# categorize columns
categorical_cols = categorical_columns(df)
ordinal_cols = ordinal_columns(df)
continous_cols = continous_columns(df)
# pre-process
continous_X, ordinal_X, categorical_X = None, None, None
if categorical_cols:
categorical_X = process_categorical(df[categorical_cols])
if ordinal_cols:
ordinal_X = process_ordinal(df[ordinal_cols].values)
if continous_cols:
continous_X = process_continous(df[continous_cols].values)
return continous_X, ordinal_X, categorical_X
#if num_preprocessing == 'standardize':
# scaler = StandardScaler()
#elif num_preprocessing == 'minmax':
# scaler = MinMaxScaler()
#else:
# scaler = None
#if categorical_columns:
# num_columns = [col for col in X.columns if
# col not in categorical_columns]
# if cat_preprocessing == 'ordinal':
# cat_X = OrdinalEncoder().fit_transform(X[categorical_columns].values)
# else:
# dummy_data = pd.get_dummies(X[categorical_columns], columns=categorical_columns, dummy_na=True)
# categorical_columns = dummy_data.columns.tolist()
# cat_X = dummy_data.values
# if num_columns:
# num_X = imputer.fit_transform(X[num_columns].values)
# if scaler:
# num_X = scaler.fit_transform(num_X)
# return np.hstack((num_X, cat_X)), num_columns, categorical_columns
# return cat_X, [], categorical_columns
#else:
# num_X = imputer.fit_transform(X.values)
# if scaler:
# num_X = scaler.fit_transform(num_X)
# return num_X, X.columns.tolist(), []
| [
"numpy.intersect1d",
"sklearn.feature_selection.VarianceThreshold",
"numpy.unique",
"clumpy.datasets.utils.categorical_columns",
"numpy.searchsorted",
"sklearn.preprocessing.Imputer",
"numpy.putmask",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"numpy.argsort",
"clumpy.datasets.utils.or... | [((613, 625), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (622, 625), True, 'import numpy as np\n'), ((892, 904), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (901, 904), True, 'import numpy as np\n'), ((3158, 3206), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {'strategy': 'strategy', 'missing_values': '"""NaN"""'}), "(strategy=strategy, missing_values='NaN')\n", (3165, 3206), False, 'from sklearn.preprocessing import Imputer, MinMaxScaler, StandardScaler, OneHotEncoder\n'), ((3740, 3778), 'sklearn.feature_selection.VarianceThreshold', 'VarianceThreshold', ([], {'threshold': 'threshold'}), '(threshold=threshold)\n', (3757, 3778), False, 'from sklearn.feature_selection import VarianceThreshold\n'), ((3955, 3999), 'clumpy.unique_threshold.UniqueThreshold', 'UniqueThreshold', ([], {'max_frequency': 'max_frequency'}), '(max_frequency=max_frequency)\n', (3970, 3999), False, 'from clumpy.unique_threshold import UniqueThreshold\n'), ((5414, 5437), 'clumpy.datasets.utils.categorical_columns', 'categorical_columns', (['df'], {}), '(df)\n', (5433, 5437), False, 'from clumpy.datasets.utils import ordinal_columns, continous_columns, categorical_columns\n'), ((5457, 5476), 'clumpy.datasets.utils.ordinal_columns', 'ordinal_columns', (['df'], {}), '(df)\n', (5472, 5476), False, 'from clumpy.datasets.utils import ordinal_columns, continous_columns, categorical_columns\n'), ((5498, 5519), 'clumpy.datasets.utils.continous_columns', 'continous_columns', (['df'], {}), '(df)\n', (5515, 5519), False, 'from clumpy.datasets.utils import ordinal_columns, continous_columns, categorical_columns\n'), ((982, 1014), 'numpy.setdiff1d', 'np.setdiff1d', (['levels', 'fit_levels'], {}), '(levels, fit_levels)\n', (994, 1014), True, 'import numpy as np\n'), ((2991, 3005), 'numpy.isfinite', 'np.isfinite', (['X'], {}), '(X)\n', (3002, 3005), True, 'import numpy as np\n'), ((3341, 3357), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3355, 3357), False, 'from sklearn.preprocessing import Imputer, MinMaxScaler, StandardScaler, OneHotEncoder\n'), ((730, 748), 'numpy.sum', 'np.sum', (['(y == level)'], {}), '(y == level)\n', (736, 748), True, 'import numpy as np\n'), ((794, 817), 'numpy.argsort', 'np.argsort', (['frequencies'], {}), '(frequencies)\n', (804, 817), True, 'import numpy as np\n'), ((916, 950), 'numpy.intersect1d', 'np.intersect1d', (['levels', 'fit_levels'], {}), '(levels, fit_levels)\n', (930, 950), True, 'import numpy as np\n'), ((1092, 1122), 'numpy.searchsorted', 'np.searchsorted', (['fit_levels', 'y'], {}), '(fit_levels, y)\n', (1107, 1122), True, 'import numpy as np\n'), ((3018, 3030), 'numpy.all', 'np.all', (['mask'], {}), '(mask)\n', (3024, 3030), True, 'import numpy as np\n'), ((3044, 3083), 'numpy.putmask', 'np.putmask', (['X', '(~mask)', 'self.impute_value'], {}), '(X, ~mask, self.impute_value)\n', (3054, 3083), True, 'import numpy as np\n'), ((3406, 3452), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(True)', 'with_std': '(False)'}), '(with_mean=True, with_std=False)\n', (3420, 3452), False, 'from sklearn.preprocessing import Imputer, MinMaxScaler, StandardScaler, OneHotEncoder\n'), ((4124, 4156), 'pandas.get_dummies', 'pd.get_dummies', (['X'], {'dummy_na': '(True)'}), '(X, dummy_na=True)\n', (4138, 4156), True, 'import pandas as pd\n'), ((3501, 3515), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (3513, 3515), False, 'from sklearn.preprocessing import Imputer, MinMaxScaler, StandardScaler, OneHotEncoder\n')] |
#!/usr/bin/python
# -*- coding:utf-8 -*-
# @author : east
# @time : 2021/7/13 10:34
# @file : interval.py
# @project : fepy
# software : PyCharm
import numpy as np
# Functions
# ---------
def area(*points):
[pl, pr] = np.asarray(points)
if (pl.ndim > 1) or (pr.ndim > 1):
raise ValueError('Input a needs to be a (N, 1) Matrix.')
elif pl.size != pr.size:
raise ValueError('Input matrices need to have same raw.')
return pr - pl
| [
"numpy.asarray"
] | [((233, 251), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (243, 251), True, 'import numpy as np\n')] |
__doc__ = """Image chunk class"""
import numpy as np
from chunkflow.chunk import Chunk
class AffinityMap(Chunk):
"""
a chunk of affinity map. It has x,y,z three channels with single precision.
"""
def __init__(self, array, voxel_offset=None):
assert array.ndim == 4
assert np.issubdtype(array.dtype, np.float32)
assert array.shape[0] == 3
super().__init__(array, voxel_offset=voxel_offset)
def quantize(self):
# only use the last channel, it is the Z affinity
# if this is affinitymap
image = self[-1, :, :, :]
image = (image * 255).astype(np.uint8)
image = Chunk(image, voxel_offset=self.voxel_offset, voxel_size=self.voxel_size)
return image | [
"numpy.issubdtype",
"chunkflow.chunk.Chunk"
] | [((308, 346), 'numpy.issubdtype', 'np.issubdtype', (['array.dtype', 'np.float32'], {}), '(array.dtype, np.float32)\n', (321, 346), True, 'import numpy as np\n'), ((654, 726), 'chunkflow.chunk.Chunk', 'Chunk', (['image'], {'voxel_offset': 'self.voxel_offset', 'voxel_size': 'self.voxel_size'}), '(image, voxel_offset=self.voxel_offset, voxel_size=self.voxel_size)\n', (659, 726), False, 'from chunkflow.chunk import Chunk\n')] |
#!/usr/bin/env python
import numpy as np
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint, TrafficLightArray
from std_msgs.msg import Int32
from scipy.spatial import KDTree
import yaml
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 50 # Number of waypoints we will publish. You can change this number
MAX_DECEL = 5.0
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
# ROS Subscribers
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
#rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cheat)
# ROS Publisher for Final Waypoints
self.final_waypoints_pub = rospy.Publisher('/final_waypoints', Lane, queue_size=1)
# Member variables for WaypointUpdater
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.stopline_wp_idx = -1
self.traffic_lights = []
self.stop_line_positions = []
self.config = yaml.load(rospy.get_param("/traffic_light_config"))
self.loop()
def loop(self):
rate = rospy.Rate(50)
# Wait for base waypoints to be read into KD Tree
if not self.base_waypoints or not self.waypoint_tree:
rospy.loginfo("No waypoints received - skipping loop iteration.")
rate.sleep()
# Operate loop at 50hz
while not rospy.is_shutdown():
if self.pose and self.base_waypoints:
# Getting the closest waypoint to the vehicle
closest_waypoint_idx = self.get_closest_waypoint_idx()
# Publishing waypoints over ROS
rospy.loginfo("Publishing Waypoints")
self.publish_waypoints(closest_waypoint_idx)
rate.sleep()
def get_closest_waypoint_idx(self):
# Getting current position of the vehicle
x = self.pose.pose.position.x
y = self.pose.pose.position.y
# Finding closest waypoint to vehicle in KD Tree
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
# Extracting the closest coordinate and the coordinate from the previous
# index
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
# Constructing hyperplane throught the closest coordinates
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
# Value to determine which side of hyperplane vehicle is on
val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)
# If waypoint is behind the vehicle, get the next waypoint
if (val > 0):
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self, closest_idx):
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self):
# Creating Lane Object
lane = Lane()
# Getting index for closest and farthest waypoints
closest_idx = self.get_closest_waypoint_idx()
farthest_idx = closest_idx + LOOKAHEAD_WPS
# Adding relevant waypoints to base_lane
base_lane = self.base_waypoints.waypoints[closest_idx : farthest_idx]
if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):
lane.waypoints = base_lane
else:
lane.waypoints = self.decelerate_waypoints(base_lane, closest_idx)
''' This code is used for testing without perception
for light in self.traffic_lights:
if light[0] > closest_idx and light[0] < farthest_idx and light[1] == 0:
self.stopline_wp_idx = light[0]
lane.waypoints = self.decelerate_waypoints(base_lane, closest_idx)
return lane
else:
lane.waypoints = base_lane
'''
return lane
def decelerate_waypoints(self, waypoints, closest_idx):
# Creating temp list to hold modified waypoints
temp = []
# Iterate through waypoints and change their target velocity values
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
stop_idx = max(self.stopline_wp_idx - closest_idx - 3, 0)
dist = self.distance(waypoints, i, stop_idx)
# Calculating new velocities
vel = math.sqrt(MAX_DECEL * dist)
if vel < 1.0:
vel = 0
# Only update target velocity if it is lower than the value in the map
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
temp.append(p)
return temp
def pose_cb(self, msg):
# Get pose from ROS message
self.pose = msg
def waypoints_cb(self, waypoints):
rospy.loginfo("Initial Base Waypoints Received")
# Getting waypoints from ROS message
self.base_waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.stopline_wp_idx = msg.data
''' This code is used for testing without traffic light detection
def get_closest_waypoint_idx_to_tl(self, x, y):
# Finding closest waypoint to vehicle in KD Tree
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
# Extracting the closest coordinate and the coordinate from the previous
# index
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
# Constructing hyperplane throught the closest coordinates
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)
# If waypoint is behind the vehicle, get the next waypoint
if (val > 0):
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def traffic_cheat(self, msg):
if not self.traffic_lights:
self.stop_line_positions = self.config['stop_line_positions']
for light in msg.lights:
min_idx = -1
min_dist = 999999999999
for i, pos in enumerate(self.stop_line_positions):
dist = math.sqrt( pow(pos[0] - light.pose.pose.position.x, 2) +
pow(pos[1] - light.pose.pose.position.y, 2))
if dist < min_dist:
min_dist = dist
min_idx = i
idx = self.get_closest_waypoint_idx_to_tl(self.stop_line_positions[min_idx][0],
self.stop_line_positions[min_idx][1])
state = light.state
self.traffic_lights.append([idx, state])
else:
for i, light in enumerate(msg.lights):
self.traffic_lights[i][1] = light.state
'''
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| [
"rospy.logerr",
"rospy.Subscriber",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.get_param",
"scipy.spatial.KDTree",
"math.sqrt",
"numpy.array",
"numpy.dot",
"rospy.Rate",
"styx_msgs.msg.Waypoint",
"rospy.Publisher",
"rospy.loginfo",
"styx_msgs.msg.Lane"
] | [((1036, 1071), 'rospy.init_node', 'rospy.init_node', (['"""waypoint_updater"""'], {}), "('waypoint_updater')\n", (1051, 1071), False, 'import rospy\n'), ((1099, 1159), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_pose"""', 'PoseStamped', 'self.pose_cb'], {}), "('/current_pose', PoseStamped, self.pose_cb)\n", (1115, 1159), False, 'import rospy\n'), ((1164, 1224), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_waypoints"""', 'Lane', 'self.waypoints_cb'], {}), "('/base_waypoints', Lane, self.waypoints_cb)\n", (1180, 1224), False, 'import rospy\n'), ((1229, 1290), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/traffic_waypoint"""', 'Int32', 'self.traffic_cb'], {}), "('/traffic_waypoint', Int32, self.traffic_cb)\n", (1245, 1290), False, 'import rospy\n'), ((1451, 1506), 'rospy.Publisher', 'rospy.Publisher', (['"""/final_waypoints"""', 'Lane'], {'queue_size': '(1)'}), "('/final_waypoints', Lane, queue_size=1)\n", (1466, 1506), False, 'import rospy\n'), ((1879, 1893), 'rospy.Rate', 'rospy.Rate', (['(50)'], {}), '(50)\n', (1889, 1893), False, 'import rospy\n'), ((3018, 3041), 'numpy.array', 'np.array', (['closest_coord'], {}), '(closest_coord)\n', (3026, 3041), True, 'import numpy as np\n'), ((3058, 3078), 'numpy.array', 'np.array', (['prev_coord'], {}), '(prev_coord)\n', (3066, 3078), True, 'import numpy as np\n'), ((3094, 3110), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (3102, 3110), True, 'import numpy as np\n'), ((3186, 3233), 'numpy.dot', 'np.dot', (['(cl_vect - prev_vect)', '(pos_vect - cl_vect)'], {}), '(cl_vect - prev_vect, pos_vect - cl_vect)\n', (3192, 3233), True, 'import numpy as np\n'), ((3607, 3613), 'styx_msgs.msg.Lane', 'Lane', ([], {}), '()\n', (3611, 3613), False, 'from styx_msgs.msg import Lane, Waypoint, TrafficLightArray\n'), ((5295, 5343), 'rospy.loginfo', 'rospy.loginfo', (['"""Initial Base Waypoints Received"""'], {}), "('Initial Base Waypoints Received')\n", (5308, 5343), False, 'import rospy\n'), ((1790, 1830), 'rospy.get_param', 'rospy.get_param', (['"""/traffic_light_config"""'], {}), "('/traffic_light_config')\n", (1805, 1830), False, 'import rospy\n'), ((2012, 2077), 'rospy.loginfo', 'rospy.loginfo', (['"""No waypoints received - skipping loop iteration."""'], {}), "('No waypoints received - skipping loop iteration.')\n", (2025, 2077), False, 'import rospy\n'), ((2143, 2162), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (2160, 2162), False, 'import rospy\n'), ((4713, 4723), 'styx_msgs.msg.Waypoint', 'Waypoint', ([], {}), '()\n', (4721, 4723), False, 'from styx_msgs.msg import Lane, Waypoint, TrafficLightArray\n'), ((4917, 4944), 'math.sqrt', 'math.sqrt', (['(MAX_DECEL * dist)'], {}), '(MAX_DECEL * dist)\n', (4926, 4944), False, 'import math\n'), ((5605, 5630), 'scipy.spatial.KDTree', 'KDTree', (['self.waypoints_2d'], {}), '(self.waypoints_2d)\n', (5611, 5630), False, 'from scipy.spatial import KDTree\n'), ((7679, 7744), 'math.sqrt', 'math.sqrt', (['((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)'], {}), '((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)\n', (7688, 7744), False, 'import math\n'), ((7981, 8035), 'rospy.logerr', 'rospy.logerr', (['"""Could not start waypoint updater node."""'], {}), "('Could not start waypoint updater node.')\n", (7993, 8035), False, 'import rospy\n'), ((2373, 2410), 'rospy.loginfo', 'rospy.loginfo', (['"""Publishing Waypoints"""'], {}), "('Publishing Waypoints')\n", (2386, 2410), False, 'import rospy\n')] |
import torch
from torch import Tensor
from typing import List, Tuple
from numpy import ndarray
import numpy as np
from .config import SentenceTransformerConfig
from .models import TransformerModel
from tqdm import tqdm
class SentenceEncoder:
"""
Wrapper for the encoding and embedding of sentences with Sentence Transformers
"""
def __init__(self, transformer_model: TransformerModel, transformer_model_config: SentenceTransformerConfig):
"""
Creates a new encoder with the given model and config
:param transformer_model:
the model that encodes and embeds sentences
:param transformer_model_config:
the config for the embedding and encoding
"""
self.do_lower_case = transformer_model_config.do_lower_case
self.max_seq_length = transformer_model_config.max_seq_length
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = transformer_model
self.model.to(self.device)
self.model.eval()
self.tokenizer = transformer_model.tokenizer_model
def get_sentence_embeddings(self, sentences: List[str], batch_size: int = 32, show_progress_bar: bool = None) -> List[ndarray]:
"""
Computes the Sentence BERT embeddings for the sentences
:param sentences:
the sentences to embed
:param batch_size:
the batch size used for the computation
:return:
a list with ndarrays of the embeddings for each sentence
"""
all_embeddings = []
length_sorted_idx = np.argsort([len(sen) for sen in sentences])
iterator = range(0, len(sentences), batch_size)
if show_progress_bar:
iterator = tqdm(iterator, desc="Batches")
for batch_idx in iterator:
batch_tokens = []
batch_input_ids = []
batch_segment_ids = []
batch_input_masks = []
batch_start = batch_idx
batch_end = min(batch_start+batch_size, len(sentences))
longest_seq = 0
for idx in length_sorted_idx[batch_start: batch_end]:
sentence = sentences[idx]
if self.do_lower_case:
sentence = sentence.lower()
tokens = self.tokenizer.tokenize(sentence)
longest_seq = max(longest_seq, len(tokens))
batch_tokens.append(tokens)
for tokens in batch_tokens:
input_ids, segment_ids, input_mask = self.model.get_sentence_features(tokens, longest_seq)
batch_input_ids.append(input_ids)
batch_segment_ids.append(segment_ids)
batch_input_masks.append(input_mask)
batch_input_ids = torch.tensor(batch_input_ids, dtype=torch.long).to(self.device)
batch_segment_ids = torch.tensor(batch_segment_ids, dtype=torch.long).to(self.device)
batch_input_masks = torch.tensor(batch_input_masks, dtype=torch.long).to(self.device)
with torch.no_grad():
embeddings = self.model.get_sentence_representation(batch_input_ids, batch_segment_ids,
batch_input_masks)
embeddings = embeddings.to('cpu').numpy()
all_embeddings.extend(embeddings)
reverting_order = np.argsort(length_sorted_idx)
all_embeddings = [all_embeddings[idx] for idx in reverting_order]
return all_embeddings
def smart_batching_collate(self, batch: List[Tuple[List[List[str]], Tensor]]) \
-> Tuple[List[Tensor], List[Tensor], List[Tensor], Tensor]:
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0][0])
labels = []
paired_texts = [[] for _ in range(num_texts)]
max_seq_len = [0] * num_texts
for tokens, label in batch:
labels.append(label)
for i in range(num_texts):
paired_texts[i].append(tokens[i])
max_seq_len[i] = min(max(max_seq_len[i], len(tokens[i])), self.max_seq_length)
inputs = [[] for _ in range(num_texts)]
segments = [[] for _ in range(num_texts)]
masks = [[] for _ in range(num_texts)]
for texts in zip(*paired_texts):
features = [self.model.get_sentence_features(text, max_len) for text , max_len in zip(texts, max_seq_len)]
for i in range(num_texts):
inputs[i].append(features[i][0])
segments[i].append(features[i][1])
masks[i].append(features[i][2])
tensor_labels = torch.stack(labels)
tensor_inputs = [torch.tensor(input_ids, dtype=torch.long) for input_ids in inputs]
tensor_masks = [torch.tensor(mask_ids, dtype=torch.long) for mask_ids in masks]
tensor_segments = [torch.tensor(segment_ids, dtype=torch.long) for segment_ids in segments]
return tensor_inputs, tensor_segments, tensor_masks, tensor_labels
| [
"torch.stack",
"tqdm.tqdm",
"numpy.argsort",
"torch.tensor",
"torch.cuda.is_available",
"torch.no_grad"
] | [((3422, 3451), 'numpy.argsort', 'np.argsort', (['length_sorted_idx'], {}), '(length_sorted_idx)\n', (3432, 3451), True, 'import numpy as np\n'), ((4887, 4906), 'torch.stack', 'torch.stack', (['labels'], {}), '(labels)\n', (4898, 4906), False, 'import torch\n'), ((1770, 1800), 'tqdm.tqdm', 'tqdm', (['iterator'], {'desc': '"""Batches"""'}), "(iterator, desc='Batches')\n", (1774, 1800), False, 'from tqdm import tqdm\n'), ((4932, 4973), 'torch.tensor', 'torch.tensor', (['input_ids'], {'dtype': 'torch.long'}), '(input_ids, dtype=torch.long)\n', (4944, 4973), False, 'import torch\n'), ((5023, 5063), 'torch.tensor', 'torch.tensor', (['mask_ids'], {'dtype': 'torch.long'}), '(mask_ids, dtype=torch.long)\n', (5035, 5063), False, 'import torch\n'), ((5114, 5157), 'torch.tensor', 'torch.tensor', (['segment_ids'], {'dtype': 'torch.long'}), '(segment_ids, dtype=torch.long)\n', (5126, 5157), False, 'import torch\n'), ((913, 938), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (936, 938), False, 'import torch\n'), ((3079, 3094), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3092, 3094), False, 'import torch\n'), ((2801, 2848), 'torch.tensor', 'torch.tensor', (['batch_input_ids'], {'dtype': 'torch.long'}), '(batch_input_ids, dtype=torch.long)\n', (2813, 2848), False, 'import torch\n'), ((2897, 2946), 'torch.tensor', 'torch.tensor', (['batch_segment_ids'], {'dtype': 'torch.long'}), '(batch_segment_ids, dtype=torch.long)\n', (2909, 2946), False, 'import torch\n'), ((2995, 3044), 'torch.tensor', 'torch.tensor', (['batch_input_masks'], {'dtype': 'torch.long'}), '(batch_input_masks, dtype=torch.long)\n', (3007, 3044), False, 'import torch\n')] |
from zipfile import ZipFile
import numpy as np
def generate_report(task_a, task_b, task_c, save=True, zipname="res.zip"):
report = ""
task_a = task_a - 1
for i in range(task_a.shape[0]):
task_b_str = np.array2string(task_b[i], separator="")[1:-1]
task_c_str = np.array2string(task_c[i], separator="")[1:-1]
report += "{}_{}_{}\n".format(task_a[i], task_b_str, task_c_str)
if save is True:
print(report, file=open("answer.txt", 'w'))
with ZipFile(zipname, 'w') as myzip:
myzip.write("answer.txt")
return report
def generate_reports(res_a, res_b, res_c, model_name):
configs = ["text only", "image only", "deep cca", "concatenated"]
r = None
for c in configs:
task_a = res_a[c]["pred_cls_test"]
task_b = res_b[c]["pred_cls_test"]
task_c = res_c[c]["pred_cls_test"]
r = generate_report(task_a, task_b, task_c, zipname="res_{}_{}.zip".format(model_name, "_".join(c.split())))
return r
| [
"numpy.array2string",
"zipfile.ZipFile"
] | [((222, 262), 'numpy.array2string', 'np.array2string', (['task_b[i]'], {'separator': '""""""'}), "(task_b[i], separator='')\n", (237, 262), True, 'import numpy as np\n'), ((290, 330), 'numpy.array2string', 'np.array2string', (['task_c[i]'], {'separator': '""""""'}), "(task_c[i], separator='')\n", (305, 330), True, 'import numpy as np\n'), ((497, 518), 'zipfile.ZipFile', 'ZipFile', (['zipname', '"""w"""'], {}), "(zipname, 'w')\n", (504, 518), False, 'from zipfile import ZipFile\n')] |
from check_defn import *
import numpy as np
import random
def train(word_list, query_list, ensemble, iterations,l_rate,matrix,lamb):
#counter is used to go to next ensemble
counter = 0
total_data = len(word_list)
for iteration in range(iterations):
#To keep the cost
cost = 0
#To store the entries for back propagation
back = []
#For keeping the accuracy
accuracy = 0
for q in range(ensemble):
query_no = (q+counter)%total_data
select_len = len(query_list[query_no])
select = random.randint(1,100)%select_len
query = query_list[query_no][select]
#Gives a query for forward propagation
(cost_minimum, found_word) = forward(query, word_list,matrix,lamb)
cost+=cost_minimum
#Backpropagation
back.append((query, found_word, word_list[query_no]))
if found_word == word_list[query_no]:
accuracy+=1
cost = cost/ensemble
accuracy = accuracy*100/ensemble
print('Iteration:',iteration,'\tAccuracy:',accuracy,'%', '\tCost:',cost)
(correction,cost) = backward(back, matrix,lamb)
print('\tSecondary Cost:',cost)
matrix = matrix - l_rate*correction/(((accuracy+1)**2)*ensemble)
counter+=ensemble
return matrix
#Training dataset is wikipedia.txt
fp = open('wikipedia.txt','r')
#To store the words
word_list = []
#To store the queries
query_list = []
#Reads the data in the file
data = fp.readline()
while(data!=''):
data = data.split(':')
word_list.append(data[0])
query_list.append(data[1].split())
data = fp.readline()
#Initializes a matrix of all entries one of proper length
matrix = np.ones(len(create_vector(cost('a','a'))))
#Learning Rate : learning_rate
learning_rate = 1
#Ensemble or Batch size : ensemble_size
ensemble_size = len(word_list)//16
#Iterations : iterations
iterations = 32
#Lambda : lamb_da
lamb_da = 0
print('Learning rate:',learning_rate, 'Ensemble Size:', ensemble_size, 'Iterations:',iterations,'Lambda:',lamb_da)
matrix = train(word_list,query_list, ensemble_size, iterations, learning_rate, matrix, lamb_da)
np.save('matrix.npy', matrix)
np.savetxt('matrix.txt', matrix)
| [
"numpy.savetxt",
"random.randint",
"numpy.save"
] | [((2275, 2304), 'numpy.save', 'np.save', (['"""matrix.npy"""', 'matrix'], {}), "('matrix.npy', matrix)\n", (2282, 2304), True, 'import numpy as np\n'), ((2306, 2338), 'numpy.savetxt', 'np.savetxt', (['"""matrix.txt"""', 'matrix'], {}), "('matrix.txt', matrix)\n", (2316, 2338), True, 'import numpy as np\n'), ((603, 625), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (617, 625), False, 'import random\n')] |
# -*- coding: utf-8 -*-
"""
DTQPy_M
Create sequences for Mayer terms
Contributor: <NAME> (AthulKrishnaSundarrajan on Github)
Primary Contributor: <NAME> (danielrherber on Github)
"""
# import inbuilt libraries
import numpy as np
from numpy.matlib import repmat
# import DTQPy specific functions
from dtqpy.src.utilities.DTQPy_tmultiprod import DTQPy_tmultiprod
from dtqpy.src.DTQPy_getQPIndex import DTQPy_getQPIndex
def DTQPy_M(Mfull,internal,opts):
# extract variables
nt = internal.nt; IN = internal.IN; I_stored = internal.I_stored
# initialize storage arrays
Isav = np.array([]); Jsav = np.array([]); Vsav = np.array([])
# go through each Mayer term
for k in range(len(Mfull)):
# obtain current substructure
Mleft = Mfull[k].left
Mright = Mfull[k].right
Mmatrix = Mfull[k].matrix
# obtain matrix
Mt = DTQPy_tmultiprod(Mmatrix,[],np.array([0]))
if Mleft != 0:
R = IN[Mleft-1]
else:
R = np.array([0])
if Mright !=0:
#breakpoint()
C = IN[Mright-1]
else:
C = np.array([0])
# determine locations and matrix values at these points
for i in range(len(R)):
for j in range(len(C)):
# get current matrix value
Mv = Mt[:,i,j]
if Mv.any():
# hessian index sequence
r = DTQPy_getQPIndex(R[i],Mleft,0,nt,I_stored)
c = DTQPy_getQPIndex(C[j],Mright,0,nt,I_stored)
# assign
Isav = np.append(Isav,r)
Jsav = np.append(Jsav,c)
Vsav = np.append(Vsav,Mv)
return Isav,Jsav,Vsav
| [
"numpy.append",
"numpy.array",
"dtqpy.src.DTQPy_getQPIndex.DTQPy_getQPIndex"
] | [((600, 612), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (608, 612), True, 'import numpy as np\n'), ((621, 633), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (629, 633), True, 'import numpy as np\n'), ((642, 654), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (650, 654), True, 'import numpy as np\n'), ((942, 955), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (950, 955), True, 'import numpy as np\n'), ((1047, 1060), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1055, 1060), True, 'import numpy as np\n'), ((1182, 1195), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1190, 1195), True, 'import numpy as np\n'), ((1568, 1614), 'dtqpy.src.DTQPy_getQPIndex.DTQPy_getQPIndex', 'DTQPy_getQPIndex', (['R[i]', 'Mleft', '(0)', 'nt', 'I_stored'], {}), '(R[i], Mleft, 0, nt, I_stored)\n', (1584, 1614), False, 'from dtqpy.src.DTQPy_getQPIndex import DTQPy_getQPIndex\n'), ((1635, 1682), 'dtqpy.src.DTQPy_getQPIndex.DTQPy_getQPIndex', 'DTQPy_getQPIndex', (['C[j]', 'Mright', '(0)', 'nt', 'I_stored'], {}), '(C[j], Mright, 0, nt, I_stored)\n', (1651, 1682), False, 'from dtqpy.src.DTQPy_getQPIndex import DTQPy_getQPIndex\n'), ((1756, 1774), 'numpy.append', 'np.append', (['Isav', 'r'], {}), '(Isav, r)\n', (1765, 1774), True, 'import numpy as np\n'), ((1801, 1819), 'numpy.append', 'np.append', (['Jsav', 'c'], {}), '(Jsav, c)\n', (1810, 1819), True, 'import numpy as np\n'), ((1846, 1865), 'numpy.append', 'np.append', (['Vsav', 'Mv'], {}), '(Vsav, Mv)\n', (1855, 1865), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import os
import math
#import utm
import shapefile as shp
import seaborn as sns
from collections import OrderedDict
import geopandas as gpd
from geopy.distance import distance
import argparse
# PRIMARY DATA SOURCE
# https://s3.amazonaws.com/capitalbikeshare-data/index.html
# - - - - - - - - - - - - - - - -
# - - CHECK OUT MY DOCSTRINGS!!!
# - - NumPy/SciPy Docstring Format
# - - - - - - - - - - - - - - - -
def pd_csv_group(data_folder,num=-1):
"""Read many csv data files from a specified directory into a single data frame.
Parameters
----------
data_folder : str
path to directory containing ONLY csv data files.
num (int), optional
number of csv files to read and integrate into the primary dataframe.
Returns
-------
DataFrame()
dataframe built from csv files in given directory.
"""
if num == -1:
file_count = len(os.listdir(data_folder))
else:
file_count = num
df_list = []
#print('files to be included: ', files)
print("stacking dataframes....")
#print('(Please be patient for ~ 30 seconds)')
for file_num,file in enumerate(os.listdir(data_folder)):
f = pd.read_csv(data_folder+file)
print(f'appending df #{file_num+1}...')
df_list.append(f)
# if there is a file number limit, stop here.
if file_num == file_count-1:
data = pd.concat(df_list, axis=0, ignore_index=True, sort=False)
print(f'{len(data)/1e6:0.2}M rows of data with {len(data.columns)} features/columns derived from {file_num+1} CSV files. ')
return data
data = pd.concat(df_list, axis=0, ignore_index=True, sort=False)
print(f'{len(data)/1e6:0.2}M rows of data with {len(data.columns)} features/columns derived from {file_num+1} CSV files. ')
return data
def lifetime(duration):
"""Returns a dictionary that converts a number of seconds into a dictionary object with keys of 'days', 'hours', 'minutes', and 'seconds'.
Parameters
----------
duration (int):
The duration (in seconds) to be transformed into a dictionary.
Returns
-------
dict
a dictionary in the form of dict('days':int(), 'hours':int(), 'minutes':int(),'seconds':int())
"""
dct = {}
dct['days'] = duration//86400
dct['hours']= duration%86400//3600
dct['minutes'] = (duration%86400)%3600//60
dct['seconds'] = (duration%86400)%3600%60
return dct
def freq_dict(lst):
"""Returns a dictionary with keys of unique values in the given list and values representing the count of occurances.
Parameters
----------
lst (list)
a list of items to determine number of occurances for.
Returns
-------
dict
a dictionary of the format dict('unique_value_n': int(), 'unique_value_(n+1)': int(), ... )
"""
dct = dict()
for item in lst:
if item not in dct:
dct[item]=0
else:
dct[item]+=1
return dct
def series_freq_dict(df, column_name):
"""Performs the function "freq_dict()" for df["column_name"]
after extracting the "datetime.hour" value from each element.
Parameters
----------
df (DataFrame)
the dataframe object
column_name (str)
name of column in dataframe. *** Must contain ONLY datetime() objects.
Returns
-------
dict()
a frequency dictionary from freq_dict()
"""
lst = [x.hour for x in df[column_name].values]
return freq_dict(lst)
class BikeReport(object):
"""Creates an instance of the BikeReport object.
Attributes
----------
bike_number (str)
bike-specific identification number. Example, "W32432".
duration (dict)
dictionary representation of the duration of bike service life as determined from the data given
Parameters
----------
df (DataFrame)
dataframe that contains the bike of interest.
bike_number (str)
bike-specific identification number. Example, "W32432".
"""
def __init__(self, df, bike_number):
self.bike_number = bike_number
self.duration = lifetime(df[df['Bike number'] ==bike_number].agg({'Duration':'sum'}).Duration )
self.trips = df[df['Bike number'] ==bike_number].count()[0]
def __repr__(self):
return f'<BikeReport.obj>\n\tBikeNumber:{self.bike_number}\n\tServiceLifetime:{self.duration}\n\tTotalTrips:{self.trips}'
def lifetime(self):
dct = {}
dct['days'] = self.duration//86400
dct['hours']= self.duration%86400//3600
dct['minutes'] = (self.duration%86400)%3600//60
dct['seconds'] = (self.duration%86400)%3600%60
return dct
def time_filter(df, colname, start_time, end_time):
"""Returns a filtered dataframe at a specified column for time occurances between a start and end time.
Parameters
----------
df (dataframe)
dataframe object to apply filter to.
colname (str)
name of column in given dataframe to filter through.
start_time (datetime)
datetime object representing the lower bound of the filter.
end_time (datetime)
datetime object representing the upper bound of the filter.
Returns
-------
copy
a filtered copy of the given dataframe
"""
if type(start_time) != type(dt.time(0,0,0)):
print('Error: Given start time must be datetime.time() obj.')
return None
mask_low = df[colname] >= start_time
mask_hi = df[colname] <= end_time
mask = mask_low & mask_hi
return df[mask].copy()
def station_super_dict(df,popular_stations_df):
"""Given a primary dataframe and a dataframe representing bike stations of interest, performs the
series_freq_dict function for each value in popular_stations_df to get the 'by hour' frequency of each station.
Parameters
----------
df (dataframe)
primary dataframe of all bikeshare transactions.
popular_stations_df (dataframe)
dataframe representing bike stations of interest.
Returns
-------
dict()
a dictionary with keys representing the values from the popular_stations_df,
and values are dictionaries of the output for the series_freq_dict() function for each station.
"""
station_time_hist=dict()
station_groups = df.groupby('ADDRESS')
pass
# - - - build the super dict
for station in popular_stations_df.ADDRESS.values:
try:
station_by_hour = station_groups.get_group(station)
except:
# sometimes there is a space at the end of the station name as found in the address column's values for station names.
station_by_hour = station_groups.get_group(station+' ')
# - - - The super-dict's keys are the station names, and the super-dict's values for each key are the time this station
station_time_hist[station] = series_freq_dict(station_by_hour, 'Start time')
return station_time_hist
def read_shapefile(sf):
"""Read a shape file into a padas dataframe object.
Parameters
----------
sf (shapefile object)
a shape file
Returns
-------
dataframe
the loaded dataframe from the given shapefile.
"""
fields = [x[0] for x in sf.fields][1:]
records = sf.records()
shps = [s.points for s in sf.shapes()]
df = pd.DataFrame(columns=fields, data=records)
df = df.assign(coords=shps)
return df
def plot_popstations(popstations_df, name):
"""Given a dataframe of bike stations of interest, plot the locations of those stations.
Parameters
----------
popstations_df (dataframe)
dataframe of bike stations of interest.
name (str)
string representing "Morning", "Afternoon", or "Evening" time. Used in the title of the plot.
Returns
-------
None
produces the plot.
"""
fig = plt.figure(figsize=(10,15))
plt.style.use('ggplot')
station_time_hist=station_super_dict(df, popstations_df)
for i in range(len(popstations_df)):
ax = fig.add_subplot(5,2,i+1)
st_name = popstations_df.ADDRESS[i]
d = station_time_hist[st_name]
# keys are likely out of order in regards to ride count - fix with OrderedDict()
d = OrderedDict(sorted(d.items(), key=lambda t: t[0]))
x = [i[0] for i in list(d.items())]
y = [i[1] for i in list(d.items())]
# as these are the most popular stations in the am, lets look only at the am data (4pm-10am) *** including 9:59am ***
ax.bar(x[4:10], y[4:10], color = 'b', width=0.8)
ax.set_title(f'{st_name}')
if i==0:
ylim = 1.2* max(d.values())
ax.set_ylim(0,ylim)
fig.suptitle(f'Top Ten Capital Bikeshare Stations \n Bike Rentals Per Hour in the {name}',fontsize=18)
plt.subplots_adjust(hspace=0.5)
def plot_geomap(popstation, daytime_rides, daytime,hardstop=False, metrolines=False, metrostations=False,title=None):
"""Plot the bike stations and lines from start to end for bike rides.
Parameters
----------
popstation (dataframe)
dataframe of bike stations of interest.
daytime (str)
string of `"Morning"`, `"Afternoon"`, or `"Evening"`. Used in title of plot.
daytime_rides (dataframe)
dataframe of morning_rides, afternoon_rides, or evening_rides.
hardstop (int)
limit the number of rides to look at in daytime_rides. Mostly for testing on subsets. Defaults to `hardstop=False`.
metrolines (bool)
load and plot the geometry for the metro rail routes
metrostations (bool)
load and plot the geometry for the metro rail stations
Returns
-------
None
Produces a GeoDataFrame plot.
"""
# - - - READ IN SHAPE FILE FOR BORDER OF DC
# data source: https://opendata.dc.gov/datasets/23246020d6894453bdfcee00956df818_41
wash_shp_path = '../misc/Washington_DC_Boundary/Washington_DC_Boundary.shp'
gpd_washborder = gpd.read_file(wash_shp_path)
# - - - READ IN SHAPE FILE FOR STREET MAP OF DC
street_shp_path = '../misc/Street_Centerlines/Street_Centerlines.shp'
gpd_street = gpd.read_file(street_shp_path)
# - - - PLOT DC STREET LINES AND BORDER POLYGON
plt.style.use('ggplot')
fig,ax = plt.subplots(figsize=(15,15))
gpd_street.geometry.plot(ax = ax, color='k', linewidth=.25 )
gpd_washborder.geometry.plot(ax = ax, color = 'grey', linewidth =.5, alpha = .3)
# - - - if kwarg 'metro' is not set to False
if metrolines:
metro_lines = gpd.read_file('../misc/Metro_Lines/Metro_Lines.shp')
c = metro_lines.NAME.values
for num in range(len(metro_lines)-1):
c= metro_lines.NAME[num]
gpd.GeoSeries(metro_lines.iloc[num].geometry).plot(ax = ax, color = c, label=f'Metro Rail: {c.capitalize()} Line')
ax.legend()
if metrostations:
metro_stations = gpd.read_file('../misc/Metro_Stations_in_DC/Metro_Stations_in_DC.shp')
metro_stations.geometry.plot(ax = ax, color = 'w', label = 'Metro Rail Stations',zorder=4)
ax.legend()
# if there are fewer rows than the declared 'hardstop', change hardstop to False
hardstop_cap = daytime_rides.size
if hardstop > hardstop_cap:
print(f'Given number of bikeshare transactions to plot (hardstop = {hardstop}) exceeds number available {hardstop_cap}!\nPlotting up to {hardstop_cap} transactions.')
hardstop = False
# handle the plotting of the popstation based on it's datatype (if multiple stations were passed in or only one was passed in)
popstation_is_dataframe = 'DataFrame' in str(type(popstation))
if popstation_is_dataframe:
# handle dataframe popstation
ax.scatter(x =popstation.LONGITUDE.values, y=popstation.LATITUDE.values, color = 'b', s=100, zorder = 5, alpha=1, marker="*",label = f'Popular in {daytime}')
# for the plotting of rides at that station we must handle the terminal number values being a single or multiple valued array/list.
terminals = [x for x in popstation.TERMINAL_NUMBER.values]
else:
station_name= station_locations[station_locations['TERMINAL_NUMBER'] == popstation.TERMINAL_NUMBER].ADDRESS.values[0]
ax.scatter(x =popstation.LONGITUDE, y=popstation.LATITUDE, color = 'b', s=100, zorder = 5, alpha=1, marker="*",label = f'{station_name}')
# for the plotting of rides at that station we must handle the terminal number values being a single or multiple valued array/list.
terminals = [popstation.TERMINAL_NUMBER]
# TODO: Refactor to optimize for runtime complexity. O(n) is probably not as good as it could be. Need help here.
# - - - NETWORK PLOT OF WHERE THE CUSTOMERS OF MORNING RIDES GO WITHIN DC
for i,ride in daytime_rides.iterrows():
# looking at only the most popular stations and where the customers go.
if ride.TERMINAL_NUMBER in terminals:
mask = station_locations['TERMINAL_NUMBER'].values == ride['End station number']
x1 = ride.LONGITUDE
x2 = station_locations[mask].LONGITUDE
y1 = ride.LATITUDE
y2 = station_locations[mask].LATITUDE
# sometimes the starting station is not in the station_locations df (outdated station locations? new stations?)
# if this happens, the length of the returned x2 series will be zero
if len(list(x2.values)) > 0 and len(list(y2.values)) > 0:
X= [x1,x2 ]
X_float = [float(x) for x in X]
Y = [y1,y2 ]
Y_float = [float(y) for y in Y]
ax.plot(X_float,Y_float,'r',linewidth=.5, alpha=.1)
ax.scatter(X_float[1], Y_float[1], color = 'k', marker="X",alpha=.1)
if hardstop:
if i > hardstop:
break
# - - - make it sexy
ax.set_xlim(-77.13,-76.90)
ax.set_ylim(38.79,39)
plt.xlabel('Longitude ($^\circ$West)')
plt.ylabel('Latitude ($^\circ$North)')
if title:
ax.set_title(title, fontsize=20)
else:
ax.set_title(f'Capital Bikeshare \n 10 Highest Performing Stations during the {daytime}', fontsize=20)
plt.legend()
def print_args(args):
"""Print to console the values for all args. For visual verification.
Parameters
----------
args (argparse object)
contains all args and their values.
Returns
-------
None
prints to console.
"""
# - - - Output the args for visual verification. Especially useful during testing.
print('ARG STATUS \n')
print(f'--barchart \t{args.barchart}')
print(f'--geoplot \t{args.geoplot}')
# print(f'--testgeo \t{args.testgeo}')
if args.dflim != 0:
dflim = args.dflim
print(f'dflim \t{dflim}')
else:
dflim = -1
print(f'dflim \t{dflim}')
print('-'*72)
class StationStats(object):
def __init__(self, df, station_terminal_number):
self.station_id = station_terminal_number
self.rides = df[df['TERMINAL_NUMBER']==station_terminal_number]
def calc_station_rates(self, df, station_terminal_number):
''' Gets the ride rate of a given station by hour for each day Mon-Sun. Returned as a dictionary of dictionaries'''
# define keys for dictionaries
days = list(['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'])
hours = list(f'{x}hr_rates' for x in range(24))
# super dict (dict of dicts) for each day and each hour of each day
dct = dict({k:dict({k:list() for k in hours}) for (k,v) in list(zip(days, hours))})
# filter df by station number of interest
terminal_number_mask = df['TERMINAL_NUMBER'] == station_terminal_number
# isolate the columns we care about
cols_we_care_about = ['Start date', 'TERMINAL_NUMBER','Start time', 'End time' ]
df_filtered_for_terminal = df[terminal_number_mask][cols_we_care_about].copy()
df_filtered_for_terminal['Start date'] = df_filtered_for_terminal['Start date'].apply(lambda x: str(x))
# extract and separate the date and the time from the timestamp
df_filtered_for_terminal['Timestamp_date'] = df_filtered_for_terminal['Start date'].map(lambda x: x.split(' ')[0])
df_filtered_for_terminal['Timestamp_hour'] = df_filtered_for_terminal['Start time'].map(lambda x: str(x.hour))
# since all entries are now from the same station, drop the station number (its the same value all the way down).
# we also no longer need the 'End time' or 'Start time', as we only needed the hour of the start time.
# df_filtered_for_terminal.drop(columns=['Start date','Start time', 'End time','TERMINAL_NUMBER'], inplace = True)
df_filtered_for_terminal.drop(columns=['Start time', 'End time','TERMINAL_NUMBER'], inplace = True)
# lets look at each date separately for what the ride rates are by hour.
# we do this by setting the rate as the number of rides in a particular hour.
# this will be collected for each day of the week and analyzed.
groupedby_date = df_filtered_for_terminal.groupby('Start date')
for datename, dategroup in groupedby_date:
dayname = pd.Timestamp(datename).day_name()
for hr, grp in dategroup.groupby('Timestamp_hour'):
dct[dayname][f'{hr}hr_rates'].append(grp.size)
return dct
self.rates = calc_station_rates(self, df, station_terminal_number)
def info(self, daystring):
''' returns dataframe of stats for the given bike station's mean, median, and varience of useage by hour for the given day in 'daystring'. '''
data = [{'MEAN':(round(np.mean(val),3) if len(val)>0 else 0), \
'MEDIAN':(round(np.median(val),3) if len(val)>0 else 0), \
'VARIANCE':(round(np.var(val),3) if len(val)>0 else 0)} \
for key,val in list(zip(self.rates[daystring].keys(), self.rates[daystring].values())) ]
return pd.DataFrame(data, index = self.rates[daystring].keys() )
def kde(self, colname= 'MEDIAN'):
print('working kde plot...')
station_df = self.rides[['Start date', 'TERMINAL_NUMBER']].copy()
station_df['dayofweek'] = pd.to_datetime(station_df['Start date'].values).dayofweek
station_df['hour'] = pd.to_datetime(station_df['Start date'].values).hour
x = station_df['dayofweek']
y = station_df['hour']
g = sns.jointplot(x,y,kind='kde',color='blue',xlim=(-0.5,6.5),ylim=(0,23), space=0);
tics = list(range(0,25,2))
g.ax_joint.set_yticks(tics)
g.fig.suptitle(f"{colname.capitalize()} Bike Station Utilization \n {self.rides['Start station'].values[0]}") # can also get the figure from plt.gcf()
g.set_axis_labels('Day of Week','Time of Day (0-24)' )
g.ax_joint.set_xticklabels(['','Mon','Tue','Wen','Thu','Fri','Sut','Sun'])
print(' ... done')
return g
def plot_geoms(lines=False, metrostations=False, bikestations=False, title=None):
# - - - READ IN SHAPE FILE FOR BORDER OF DC
# data source: https://opendata.dc.gov/datasets/23246020d6894453bdfcee00956df818_41
gpd_washborder = gpd.read_file('../misc/Washington_DC_Boundary/Washington_DC_Boundary.shp')
# - - - READ IN SHAPE FILE FOR STREET MAP OF DC
gpd_street = gpd.read_file('../misc/Street_Centerlines/Street_Centerlines.shp')
# - - - PLOT DC STREET LINES AND BORDER POLYGON
plt.style.use('ggplot')
fig,ax = plt.subplots(figsize=(10,10))
gpd_street.geometry.plot(ax = ax, color='k', linewidth=.25 )
gpd_washborder.geometry.plot(ax = ax, color = 'grey', linewidth =.5, alpha = .3)
# - - - if kwarg 'metro' is not set to False
if lines:
metro_lines = gpd.read_file('../misc/Metro_Lines/Metro_Lines.shp')
c = metro_lines.NAME.values
for num in range(len(metro_lines)-1):
c= metro_lines.NAME[num]
gpd.GeoSeries(metro_lines.iloc[num].geometry).plot(ax = ax, color = c, label=f'Metro Rail: {c.capitalize()} Line')
ax.legend()
if metrostations:
metro_stations = gpd.read_file('../misc/Metro_Stations_in_DC/Metro_Stations_in_DC.shp')
metro_stations.geometry.plot(ax = ax, color = 'w', label = 'Metro Rail Stations',zorder=4)
ax.legend()
if bikestations:
ax.scatter(station_locations.LONGITUDE.values,station_locations.LATITUDE.values, c='b',alpha=.4, marker='o',label='Captial Bikeshare Bikestations')
ax.set_xlim(-77.13,-76.90)
ax.set_ylim(38.79,39)
plt.xlabel('Longitude ($^\circ$West)')
plt.ylabel('Latitude ($^\circ$North)')
ax.legend()
if title:
ax.set_title(title, fontsize=20)
else:
ax.set_title(f'Capital Bikeshare And Metro Rail Stations', fontsize=20)
return ax
def popular_stations(df,time_start,time_stop, top_n=10):
''' given the data and time range, return the top ten most popular stations in that time range.
time_start is a string of military time expressed as such: "0500", or "1830", or "2215"
'''
"""Returns the popular bike stations for bike checkout for a given time range.
Parameters
----------
df (data frame)
data frame containing the bike checkin/checkout transactions
Returns
-------
data frame
Columns: TERMINAL_NUMBER, RIDE_COUNT, LATITUDE, LONGITUDE
"""
# parse and recast times from strings to datetime.time() objects
hr_0 = time_start[0:2]
min_0 = time_start[2:4]
t_0 = dt.time(int(hr_0),int(min_0),0)
hr_f = time_stop[0:2]
min_f = time_stop[2:4]
t_f = dt.time(int(hr_f), int(min_f),59)
# filter the primary df by "Start time" column with values between (lower bound) t_0 and (upper bound) t_f
df_time_filtered = time_filter(df, 'Start time', t_0, t_f)
# group this filtered dataframe subset by terminal number from which bike is checked out from,
# get the frequency via ".size()",
# reset the index,
# rename and sort by the bogus column to RIDE_COUNT and take the top ten occurances,
# merge with the station locations dataframe to bring in lat/long of stations
# and return the result.
popular_daytime_stations = df_time_filtered.groupby('TERMINAL_NUMBER')\
.size()\
.reset_index()\
.rename(columns={0:'RIDE_COUNT'})\
.sort_values(by='RIDE_COUNT', ascending=False)[0:top_n]\
.merge(station_locations, on='TERMINAL_NUMBER', how='left')
return popular_daytime_stations
def bikestations_near_railstations(max_distance=200, showplot=False):
'''returns a filtered copy of station_locations dataframe where entries are bike stations that have at least one
rail station within 200m. Also returns the distances dictionary, with rail stations for keys and values are tuples
of closeby bike stations (by terminal number) and distance to the at bikestation from the rail station. Also returns
the matplolib object for the line plot that visualizes the rail stations and any bike station whos radial distance is
less than 200m away.
'''
bikestation_coords = list(zip(station_locations['LONGITUDE'].values,station_locations['LATITUDE'].values, station_locations['TERMINAL_NUMBER'].values))
metro_stations = gpd.read_file('../misc/Metro_Stations_in_DC/Metro_Stations_in_DC.shp')
rail_coords = list(zip(metro_stations.geometry.x,metro_stations.geometry.y, metro_stations.NAME))
''' for each Metro rail station, we'll determine which bike stations are less than 200m away.
A dictionary with keys as rail stations will have values that are a list of tuples -> (bikestation_terminal_number, distance from rail station)
'''
distances = dict()
if showplot:
plot_geoms(lines=True, metrostations=True,bikestations=True)
flagged_bikestations=list()
lineplot = None
for bs in bikestation_coords:
for rs in rail_coords:
# the distance function requires lat/long in reversed order than what I have it, so "reversed(X_coords)" is now implemented.
dist = int(distance(reversed(bs[0:2]),reversed(rs[0:2])).m)
if dist <= max_distance:
flagged_bikestations.append(bs[2])
try:
distances[rs[2]].append((bs,dist))
except KeyError as err:
distances[rs[2]]=list()
distances[rs[2]].append((bs,dist))
if showplot:
x = [bs[0], rs[0]]
y = [bs[1], rs[1]]
lineplot =plt.plot(x,y,'g--',linewidth=.85)
plt.scatter(bs[0], bs[1],color='r')
filtered_stations_df = station_locations[station_locations.TERMINAL_NUMBER.isin(flagged_bikestations)].copy()
filtered_stations_df.reset_index(inplace=True)
filtered_stations_df.drop('index', axis=1,inplace=True)
return filtered_stations_df,distances,lineplot
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - MAIN - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
# - - - Argparse
parser = argparse.ArgumentParser()
parser.add_argument('--barchart', help = 'activate barcharts', type=bool, default = False)
parser.add_argument('--geoplot', help='activate geographic data map', type = bool, default = False)
# parser.add_argument('--testgeo', help='activate geographic data map', type = bool, default = False)
parser.add_argument('--dflim', help = 'limit the number of files used to build main df', type=int, default = 0)
args = parser.parse_args()
# - - - Parse the arguments into variable names
show_barchart = args.barchart
show_geomap = args.geoplot
# testgeo = args.testgeo
dflim = args.dflim
# - - - Print to console the args for visual verification
print_args(args)
# - - -Define the folder containing only data files (csv or txt)
data_folder = "../data/"
df = pd_csv_group(data_folder, dflim)
# - - - Program appears to hang while handling the remaining code base. Output a "I am thinking" status.
print('Doing data science...')
print('# - - - FEATURE AND DATA ENGINEERING - - - #')
# the locations of the bike stations in lat/long are found in another dataset from Open Data DC:
# https://opendata.dc.gov/datasets/capital-bike-share-locations;
# detailed description of data from this file can be found here:
# https://www.arcgis.com/sharing/rest/content/items/a1f7acf65795451d89f0a38565a975b3/info/metadata/metadata.xml?format=default&output=html
station_locations_df = pd.read_csv('../misc/Capital_Bike_Share_Locations.csv')
# taking only relevant information from the data
station_locations = station_locations_df[['TERMINAL_NUMBER', 'LATITUDE', 'LONGITUDE','ADDRESS']].copy()
# we can now merge the new bikestation locations dataframe into the primary dataframe
df=df.merge(station_locations, left_on='Start station number', right_on='TERMINAL_NUMBER')
# - - - Create 'start time' and 'end time' columns after recasting to datetime objects.
df['Start time'] =[x.time() for x in pd.to_datetime((df['Start date']))]
df['End time'] =[x.time() for x in pd.to_datetime((df['End date']))]
print('# - - - DATA CLEANING - - - #')
# drop unnecessary columns
df.drop(['End date', 'Start station', 'End station', 'Member type'], axis = 1, inplace=True)
# drop redundant time from 'start date' col
df['Start date'] = df['Start date'].apply(lambda x: x.split(' ')[0])
df['Start date'] = df['Start date'].apply(lambda x: dt.date( int(x.split('-')[0]), int(x.split('-')[1]), int(x.split('-')[2]) ))
df.drop('Start station number', axis=1, inplace=True)
# - - - CLASS OBJECT INSTANTIATION: BIKEREPORT()
# - - - Which bikes (by bike number) have been used the most (by duration)?
print('# - - - BUILDING BIKE REPORT OBJECT - - - #')
most_used_bikes_10 = df[['Bike number', 'Duration']].groupby('Bike number').agg(sum).sort_values(by='Duration', ascending = False)[:10]
# - - - Generate reports for each of the top ten most used bikes.
show_bike_reports = False
if show_bike_reports:
for i in range(9):
br = BikeReport(df, most_used_bikes_10.iloc[i].name)
print(br)
# ADDRESS THE BUSINESS QUESTIONS
# What are the most popular bike stations for starting a ride in :
# 1) the morning (4am-9am)?
# 2) the afternoon (9am-3pm)?
# 1) the morning (3pm-Midnight)?
# TODO [COMPLETE]: Define a function that returns the popular morning/afternoon/evening bike stations given a start string and stop string of military time.
print('# - - - DETERMINING POPULAR BIKE STATIONS BY TIME OF DAY - - - #')
popular_morning_stations = popular_stations(df, "0400", "0900",top_n=10)
popular_afternoon_stations = popular_stations(df, "0900", "1500",top_n=10)
popular_evening_stations = popular_stations(df, "1500", "2359",top_n=10)
if show_barchart:
print('# - - - GENERATING BAR CHART OF POPULAR STATION RENTAL VOLUME - - - #')
# - - - select a style
plt.style.use('fivethirtyeight')
fig,ax = plt.subplots(figsize=(20,10))
# - - - define the data to plot
layer1 = np.array(popular_morning_stations.RIDE_COUNT.values)
layer2 = np.array(popular_afternoon_stations.RIDE_COUNT.values)
layer3 = np.array(popular_evening_stations.RIDE_COUNT.values)
labels_mor = [popular_morning_stations.ADDRESS.values]
labels_aft = [popular_afternoon_stations.ADDRESS.values]
labels_eve = [popular_evening_stations.ADDRESS.values]
# - - - build the bar plot
width = 0.8
xlocations = np.array(range(len(layer1)))
# (adding subsequent layers to build a stacked bar chart)
ax.bar(xlocations, layer3+layer2+layer1, width, label = 'Evening Rides', color = 'y', align = 'center')
ax.bar(xlocations, layer2+layer1, width, label = 'Afternoon Rides', color = 'b', align = 'center')
ax.bar(xlocations, layer1, width, label = 'Morning Rides', color = 'r', align = 'center')
# - - - make it sexy
ax.set_xticks(ticks=xlocations)
ax.set_xticklabels(labels_mor[0], rotation=0)
for tick in ax.xaxis.get_major_ticks()[1::2]:
tick.set_pad(35)
ax.set_xlabel("Station Name/Location")
ax.set_ylabel("Two-Year Ride Count")
ax.yaxis.grid(True)
ax.legend(loc='best', prop={'size':'small'})
ax.set_title("Top 10 Popular Bike Stations by Time of Day")
fig.tight_layout(pad=1)
popstations = list()
popstations.append(list(popular_morning_stations.TERMINAL_NUMBER.values))
popstations.append(list(popular_afternoon_stations.TERMINAL_NUMBER.values))
popstations.append(list(popular_evening_stations.TERMINAL_NUMBER.values))
popstations = [item for sublist in popstations for item in sublist] #thanks stack overflow
popstations_df = station_locations[station_locations['TERMINAL_NUMBER'].isin(popstations)]
plot_geoms(title='Highest Volume Bike Stations')
for s in popstations:
#sx = s['LATITUDE']
slat = station_locations[station_locations['TERMINAL_NUMBER']==s]['LATITUDE'].values[0]
slong = station_locations[station_locations['TERMINAL_NUMBER']==s]['LONGITUDE'].values[0]
#print(slat,slong)
sname = station_locations[station_locations['TERMINAL_NUMBER']==s]['ADDRESS'].values[0]
plt.scatter(slong,slat,marker='o',label = sname)
#plt.legend()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.show(block=False)
# ------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------
# Now build a dictionary for each station where the keys are the time(by hour)
# and the values are the counts of rides for that hour for that station.
# Each station's dictionary will all be stored in a 'super dictionary'
if show_barchart:
print('# - - - PLOTTING POPULAR STATIONS BY TIME OF DAY - - - #')
station_time_hist2_pop_morn_stations = station_super_dict(df, popular_morning_stations)
station_time_hist2_pop_aft_stations = station_super_dict(df, popular_afternoon_stations)
station_time_hist2_pop_eve_stations = station_super_dict(df, popular_evening_stations)
# These barcharts need some serious devine intervention... :/
plot_popstations(popular_morning_stations, 'Morning')
plot_popstations(popular_afternoon_stations, 'Afternoon')
plot_popstations(popular_evening_stations, 'Evening')
# - - - -
# Statistical Analysis:
# Are the bike stations that are "close" to Metro rail stations used more
# in terms of bikes checked out over the year?
print('# - - - DETERMINING DISTANCE FROM EACH RAIL STATIONS BIKE STATIONS - - - #')
# we need a distance formula for WTG coords. Enter geopy.distance FTW!
# source: https://janakiev.com/blog/gps-points-distance-python/
# for every rail station, find the bike stations that are less than 200m away.
bikestation_prox_railstation_df, distances_dict, pltimg = bikestations_near_railstations(max_distance =200, showplot=True)
# The "bikestation_prox_railstation_df" effectively contains a filtered copy of the "station_locations" dataframe describing all bike stations.
# We can look at the median, mean, and IQR for each of these bike stations over (initially) the last year.
'''
In: len(station_locations)
Out: 578
- Of the complete set of bike stations, how many are near a rail station?
In: len(bikestation_prox_railstation)
Out: 54
- What is the difference between these two groups in terms of utilization over a year?
'''
# lets look at the primary DF and filter by bike stations that have a rail station nearby (given by bikestation_prox_railstation_df)
print('# - - - FILTERING MAIN DATAFRAME FOR BIKE STATIONS WITHIN 200m OF RAIL STATIONS - - - #')
df_filtered_for_proximate_railstations = df[df['TERMINAL_NUMBER'].isin(bikestation_prox_railstation_df['TERMINAL_NUMBER'])]
df_time_filtered2019 = df_filtered_for_proximate_railstations[df_filtered_for_proximate_railstations['Start date'].between(dt.date(2018,10,31),dt.date(2019,12,31),inclusive=True)]
'''
HYPOTHESIS TESTING CHECKPOINT:
We have two samples: bike stations near a rail station and bike stations that are not.
For each group, we want to get a mean of the sums of bike checkouts over the course of a day.
mean(sum(bike checkouts in a day) for day in data range)
'''
# of all bike stations(terminals), they are either "close to rail station" or not
all_terminal_numbers = set(df.TERMINAL_NUMBER)
terminals_near_rail = set(df_time_filtered2019.TERMINAL_NUMBER)
terminals_not_near_rail = all_terminal_numbers - terminals_near_rail
'''
TUESDAY NIGHT:
Lets avoid the "sum of bikes checked out per day" and instead focus on "sum of bikes checked out per station group"
which gives us the total bike checkouts for each of the two groups over a one-year time span.
Now, since the sample sizes are drastically different (~10x different), we can divide by sample size and get an average
representing transaction count per station for each group. The question here: is the average transaction count per
bike station greater for those stations near Metro Rail (subway) stations or those with no rail station nearby?
'''
print('# - - - DETERMIMING RATIO OF RENTAL VOLUME BETWEEN "NEAR RAIL" AND "NOT NEAR RAIL" BIKE STATIONS - - - #')
transaction_total_not_near_rail = df[df['TERMINAL_NUMBER'].isin(terminals_not_near_rail)].size
total_stations_not_near_rail = len(terminals_not_near_rail)
transaction_total_near_rail = df[df['TERMINAL_NUMBER'].isin(terminals_near_rail)].size
total_stations_near_rail = len(terminals_near_rail)
# Ratio: (bike rentals near a rail station) to (bike rentals not near a rail station)
station_group_ratio = transaction_total_near_rail / transaction_total_not_near_rail
# >>> 0.268351
# Which would be unremarkable if there were also 0.26 as many bike stations near rail stations as not. BUT...!
# when we divide by sample size for each group we get
station_groups_ratio_per_station=(transaction_total_near_rail/total_stations_near_rail) / (transaction_total_not_near_rail/total_stations_not_near_rail)
# >>> 2.62781
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
ax2 = fig.add_subplot(1,2,2)
rental_count_by_station_cat = (transaction_total_near_rail,transaction_total_not_near_rail)
rental_count_per_station_count_per_station_cat = (transaction_total_near_rail/total_stations_near_rail,transaction_total_not_near_rail/total_stations_not_near_rail)
tick_loc = (1,2)
ax1.bar(tick_loc, rental_count_by_station_cat)
ax1.set_xticks(ticks=tick_loc)
ax1.set_xticklabels(('Near Rail','Not Near Rail'))
ax1.set_title('Rental Count by Station Category')
ax2.bar(tick_loc, rental_count_per_station_count_per_station_cat)
ax2.set_xticks(ticks=tick_loc)
ax2.set_xticklabels(('Near Rail','Not Near Rail'))
ax2.set_title('Rental Count Per Station by Station Category')
plt.show(block=False)
print('# - - - PLOTTING THE METRO STATION MAP AND 2019 BIKE STATIONS CLOSE TO RAIL - - - #')
# show the bike stations that are close (within 200m) to a rail station
plot_geoms(lines=True, metrostations=True,bikestations=False)
x = df_time_filtered2019.LONGITUDE.values
y = df_time_filtered2019.LATITUDE.values
plt.scatter(x,y,color='r',marker ="D", label="Bike Stations Near Rail")
plt.legend()
plt.show(block=False)
# show the utilization of each of the "near rail" bike stations over time
# This plot shows the seasonal cycle throug the 9 years of data, highlighting the dramatic drop in bikeshare rentals during the winter.
print('# - - - DETERMINING THE WEEKLY VOLUME OF "NEAR RAIL" BIKE STATIONS ACROSS ALL OF DATASET (2010-2019) - - - #')
stations_near_rail_df = df[df['TERMINAL_NUMBER'].isin(terminals_near_rail)]
weekly_sum_of_rentals_by_station_df = pd.DataFrame(index = [f'{yr}-{wknum}' for yr in range(2010,2020) for wknum in range(1,54) ],
columns=[str(x) for x in sorted(list(terminals_near_rail))],data=0)
for station in sorted(list(terminals_near_rail)):
station_df = stations_near_rail_df[stations_near_rail_df['TERMINAL_NUMBER'] == station].copy().sort_values(by='Start date')
station_df['Start date ordinal'] = station_df['Start date'].apply(lambda x: x.toordinal())
grpby_dt = station_df.groupby('Start date ordinal')
print(f'Aggregating daily usage for Station {station}')
for daynum,group_df in grpby_dt:
weeknum = dt.date.isocalendar(dt.date.fromordinal(daynum))[1]
yr =dt.date.isocalendar(dt.date.fromordinal(daynum))[0]
index_for_df = f'{yr}-{weeknum}'
weekly_sum_of_rentals_by_station_df[str(station)].loc[index_for_df]+=group_df.size
# Since there are so many lines on top of each other, lets look at just a few that are close to each other.
# There are two stations near each other. Lets see how their bike rental activity compares over time.
print('# - - - COMPARING RENTAL VOLUME OF BIKE STATIONS IN CLOSE PROXIMITY OVER ALL OF DATASET TIME RANGE - - - #')
station_terminal_pair1 = ['31650','31208']
station_terminal_pair2 = ['31254','31291']
station_terminal_pair3 = ['31124','31105']
def compare_bikestations(station_terminal_pair, wk_start=0):
'''station_terminal_pair: two station terminal numbers to compare visually
wk - week number from 0 to 529'''
station1 = station_locations[station_locations['TERMINAL_NUMBER']==int(station_terminal_pair[0])]
station2 = station_locations[station_locations['TERMINAL_NUMBER']==int(station_terminal_pair[1])]
station1_name = station_locations[station_locations['TERMINAL_NUMBER']==int(station_terminal_pair[0])].ADDRESS.values[0]
station2_name = station_locations[station_locations['TERMINAL_NUMBER']==int(station_terminal_pair[1])].ADDRESS.values[0]
# plot the geometry data for metrolines and metro stations with only two "close proximity" bike stations to compare rental volume
plot_geoms(lines=True, metrostations=True, bikestations=False)
plt.scatter(station1.LONGITUDE, station1.LATITUDE,color='r',marker='o', label=station1.ADDRESS.values)
plt.scatter(station2.LONGITUDE, station2.LATITUDE,color='b',marker='o', label=station2.ADDRESS.values)
plt.legend()
plt.show(block=False)
fig,ax = plt.subplots()
ax = weekly_sum_of_rentals_by_station_df[station_terminal_pair][wk_start::].plot()
ax.legend((station1_name,station2_name))
plt.title('Comparing Rental Volume of Two Nearby Bike Stations')
#ax.set_xticklabels((weekly_sum_of_rentals_by_station_df.index.values[0:-1:13]))
ax.set_xlabel('YEAR - #Week')
ax.set_ylabel('Weekly Ride Count per Station')
plt.show(block=False)
compare_bikestations(station_terminal_pair1,wk_start=250)
compare_bikestations(station_terminal_pair2,wk_start=250)
compare_bikestations(station_terminal_pair3,wk_start=250)
# - - - End of program
print('...done \n') | [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.array",
"pandas.to_datetime",
"numpy.mean",
"os.listdir",
"datetime.time",
"geopandas.read_file",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"datetime.date.fromordinal",... | [((1744, 1801), 'pandas.concat', 'pd.concat', (['df_list'], {'axis': '(0)', 'ignore_index': '(True)', 'sort': '(False)'}), '(df_list, axis=0, ignore_index=True, sort=False)\n', (1753, 1801), True, 'import pandas as pd\n'), ((7590, 7632), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'fields', 'data': 'records'}), '(columns=fields, data=records)\n', (7602, 7632), True, 'import pandas as pd\n'), ((8128, 8156), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 15)'}), '(figsize=(10, 15))\n', (8138, 8156), True, 'import matplotlib.pyplot as plt\n'), ((8160, 8183), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (8173, 8183), True, 'import matplotlib.pyplot as plt\n'), ((9068, 9099), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)'}), '(hspace=0.5)\n', (9087, 9099), True, 'import matplotlib.pyplot as plt\n'), ((10243, 10271), 'geopandas.read_file', 'gpd.read_file', (['wash_shp_path'], {}), '(wash_shp_path)\n', (10256, 10271), True, 'import geopandas as gpd\n'), ((10420, 10450), 'geopandas.read_file', 'gpd.read_file', (['street_shp_path'], {}), '(street_shp_path)\n', (10433, 10450), True, 'import geopandas as gpd\n'), ((10509, 10532), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (10522, 10532), True, 'import matplotlib.pyplot as plt\n'), ((10546, 10576), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (10558, 10576), True, 'import matplotlib.pyplot as plt\n'), ((14323, 14362), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Longitude ($^\\\\circ$West)"""'], {}), "('Longitude ($^\\\\circ$West)')\n", (14333, 14362), True, 'import matplotlib.pyplot as plt\n'), ((14366, 14405), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latitude ($^\\\\circ$North)"""'], {}), "('Latitude ($^\\\\circ$North)')\n", (14376, 14405), True, 'import matplotlib.pyplot as plt\n'), ((14585, 14597), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14595, 14597), True, 'import matplotlib.pyplot as plt\n'), ((19863, 19937), 'geopandas.read_file', 'gpd.read_file', (['"""../misc/Washington_DC_Boundary/Washington_DC_Boundary.shp"""'], {}), "('../misc/Washington_DC_Boundary/Washington_DC_Boundary.shp')\n", (19876, 19937), True, 'import geopandas as gpd\n'), ((20012, 20078), 'geopandas.read_file', 'gpd.read_file', (['"""../misc/Street_Centerlines/Street_Centerlines.shp"""'], {}), "('../misc/Street_Centerlines/Street_Centerlines.shp')\n", (20025, 20078), True, 'import geopandas as gpd\n'), ((20137, 20160), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (20150, 20160), True, 'import matplotlib.pyplot as plt\n'), ((20174, 20204), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (20186, 20204), True, 'import matplotlib.pyplot as plt\n'), ((24001, 24071), 'geopandas.read_file', 'gpd.read_file', (['"""../misc/Metro_Stations_in_DC/Metro_Stations_in_DC.shp"""'], {}), "('../misc/Metro_Stations_in_DC/Metro_Stations_in_DC.shp')\n", (24014, 24071), True, 'import geopandas as gpd\n'), ((25970, 25995), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (25993, 25995), False, 'import argparse\n'), ((27464, 27519), 'pandas.read_csv', 'pd.read_csv', (['"""../misc/Capital_Bike_Share_Locations.csv"""'], {}), "('../misc/Capital_Bike_Share_Locations.csv')\n", (27475, 27519), True, 'import pandas as pd\n'), ((32497, 32570), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.0)\n", (32507, 32570), True, 'import matplotlib.pyplot as plt\n'), ((32575, 32596), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (32583, 32596), True, 'import matplotlib.pyplot as plt\n'), ((38234, 38246), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (38244, 38246), True, 'import matplotlib.pyplot as plt\n'), ((39027, 39048), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (39035, 39048), True, 'import matplotlib.pyplot as plt\n'), ((39395, 39468), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'color': '"""r"""', 'marker': '"""D"""', 'label': '"""Bike Stations Near Rail"""'}), "(x, y, color='r', marker='D', label='Bike Stations Near Rail')\n", (39406, 39468), True, 'import matplotlib.pyplot as plt\n'), ((39471, 39483), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (39481, 39483), True, 'import matplotlib.pyplot as plt\n'), ((39488, 39509), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (39496, 39509), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1285), 'os.listdir', 'os.listdir', (['data_folder'], {}), '(data_folder)\n', (1272, 1285), False, 'import os\n'), ((1300, 1331), 'pandas.read_csv', 'pd.read_csv', (['(data_folder + file)'], {}), '(data_folder + file)\n', (1311, 1331), True, 'import pandas as pd\n'), ((10820, 10872), 'geopandas.read_file', 'gpd.read_file', (['"""../misc/Metro_Lines/Metro_Lines.shp"""'], {}), "('../misc/Metro_Lines/Metro_Lines.shp')\n", (10833, 10872), True, 'import geopandas as gpd\n'), ((11191, 11261), 'geopandas.read_file', 'gpd.read_file', (['"""../misc/Metro_Stations_in_DC/Metro_Stations_in_DC.shp"""'], {}), "('../misc/Metro_Stations_in_DC/Metro_Stations_in_DC.shp')\n", (11204, 11261), True, 'import geopandas as gpd\n'), ((19120, 19211), 'seaborn.jointplot', 'sns.jointplot', (['x', 'y'], {'kind': '"""kde"""', 'color': '"""blue"""', 'xlim': '(-0.5, 6.5)', 'ylim': '(0, 23)', 'space': '(0)'}), "(x, y, kind='kde', color='blue', xlim=(-0.5, 6.5), ylim=(0, 23\n ), space=0)\n", (19133, 19211), True, 'import seaborn as sns\n'), ((20443, 20495), 'geopandas.read_file', 'gpd.read_file', (['"""../misc/Metro_Lines/Metro_Lines.shp"""'], {}), "('../misc/Metro_Lines/Metro_Lines.shp')\n", (20456, 20495), True, 'import geopandas as gpd\n'), ((20814, 20884), 'geopandas.read_file', 'gpd.read_file', (['"""../misc/Metro_Stations_in_DC/Metro_Stations_in_DC.shp"""'], {}), "('../misc/Metro_Stations_in_DC/Metro_Stations_in_DC.shp')\n", (20827, 20884), True, 'import geopandas as gpd\n'), ((21260, 21299), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Longitude ($^\\\\circ$West)"""'], {}), "('Longitude ($^\\\\circ$West)')\n", (21270, 21299), True, 'import matplotlib.pyplot as plt\n'), ((21307, 21346), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latitude ($^\\\\circ$North)"""'], {}), "('Latitude ($^\\\\circ$North)')\n", (21317, 21346), True, 'import matplotlib.pyplot as plt\n'), ((30040, 30072), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (30053, 30072), True, 'import matplotlib.pyplot as plt\n'), ((30090, 30120), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (30102, 30120), True, 'import matplotlib.pyplot as plt\n'), ((30186, 30238), 'numpy.array', 'np.array', (['popular_morning_stations.RIDE_COUNT.values'], {}), '(popular_morning_stations.RIDE_COUNT.values)\n', (30194, 30238), True, 'import numpy as np\n'), ((30256, 30310), 'numpy.array', 'np.array', (['popular_afternoon_stations.RIDE_COUNT.values'], {}), '(popular_afternoon_stations.RIDE_COUNT.values)\n', (30264, 30310), True, 'import numpy as np\n'), ((30328, 30380), 'numpy.array', 'np.array', (['popular_evening_stations.RIDE_COUNT.values'], {}), '(popular_evening_stations.RIDE_COUNT.values)\n', (30336, 30380), True, 'import numpy as np\n'), ((32426, 32475), 'matplotlib.pyplot.scatter', 'plt.scatter', (['slong', 'slat'], {'marker': '"""o"""', 'label': 'sname'}), "(slong, slat, marker='o', label=sname)\n", (32437, 32475), True, 'import matplotlib.pyplot as plt\n'), ((42280, 42388), 'matplotlib.pyplot.scatter', 'plt.scatter', (['station1.LONGITUDE', 'station1.LATITUDE'], {'color': '"""r"""', 'marker': '"""o"""', 'label': 'station1.ADDRESS.values'}), "(station1.LONGITUDE, station1.LATITUDE, color='r', marker='o',\n label=station1.ADDRESS.values)\n", (42291, 42388), True, 'import matplotlib.pyplot as plt\n'), ((42391, 42499), 'matplotlib.pyplot.scatter', 'plt.scatter', (['station2.LONGITUDE', 'station2.LATITUDE'], {'color': '"""b"""', 'marker': '"""o"""', 'label': 'station2.ADDRESS.values'}), "(station2.LONGITUDE, station2.LATITUDE, color='b', marker='o',\n label=station2.ADDRESS.values)\n", (42402, 42499), True, 'import matplotlib.pyplot as plt\n'), ((42502, 42514), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (42512, 42514), True, 'import matplotlib.pyplot as plt\n'), ((42523, 42544), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (42531, 42544), True, 'import matplotlib.pyplot as plt\n'), ((42571, 42585), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (42583, 42585), True, 'import matplotlib.pyplot as plt\n'), ((42735, 42799), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparing Rental Volume of Two Nearby Bike Stations"""'], {}), "('Comparing Rental Volume of Two Nearby Bike Stations')\n", (42744, 42799), True, 'import matplotlib.pyplot as plt\n'), ((42993, 43014), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (43001, 43014), True, 'import matplotlib.pyplot as plt\n'), ((1018, 1041), 'os.listdir', 'os.listdir', (['data_folder'], {}), '(data_folder)\n', (1028, 1041), False, 'import os\n'), ((1515, 1572), 'pandas.concat', 'pd.concat', (['df_list'], {'axis': '(0)', 'ignore_index': '(True)', 'sort': '(False)'}), '(df_list, axis=0, ignore_index=True, sort=False)\n', (1524, 1572), True, 'import pandas as pd\n'), ((5525, 5541), 'datetime.time', 'dt.time', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (5532, 5541), True, 'import datetime as dt\n'), ((18892, 18939), 'pandas.to_datetime', 'pd.to_datetime', (["station_df['Start date'].values"], {}), "(station_df['Start date'].values)\n", (18906, 18939), True, 'import pandas as pd\n'), ((18979, 19026), 'pandas.to_datetime', 'pd.to_datetime', (["station_df['Start date'].values"], {}), "(station_df['Start date'].values)\n", (18993, 19026), True, 'import pandas as pd\n'), ((28005, 28037), 'pandas.to_datetime', 'pd.to_datetime', (["df['Start date']"], {}), "(df['Start date'])\n", (28019, 28037), True, 'import pandas as pd\n'), ((28085, 28115), 'pandas.to_datetime', 'pd.to_datetime', (["df['End date']"], {}), "(df['End date'])\n", (28099, 28115), True, 'import pandas as pd\n'), ((35924, 35945), 'datetime.date', 'dt.date', (['(2018)', '(10)', '(31)'], {}), '(2018, 10, 31)\n', (35931, 35945), True, 'import datetime as dt\n'), ((35944, 35965), 'datetime.date', 'dt.date', (['(2019)', '(12)', '(31)'], {}), '(2019, 12, 31)\n', (35951, 35965), True, 'import datetime as dt\n'), ((11004, 11049), 'geopandas.GeoSeries', 'gpd.GeoSeries', (['metro_lines.iloc[num].geometry'], {}), '(metro_lines.iloc[num].geometry)\n', (11017, 11049), True, 'import geopandas as gpd\n'), ((20627, 20672), 'geopandas.GeoSeries', 'gpd.GeoSeries', (['metro_lines.iloc[num].geometry'], {}), '(metro_lines.iloc[num].geometry)\n', (20640, 20672), True, 'import geopandas as gpd\n'), ((25320, 25357), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""g--"""'], {'linewidth': '(0.85)'}), "(x, y, 'g--', linewidth=0.85)\n", (25328, 25357), True, 'import matplotlib.pyplot as plt\n'), ((25374, 25410), 'matplotlib.pyplot.scatter', 'plt.scatter', (['bs[0]', 'bs[1]'], {'color': '"""r"""'}), "(bs[0], bs[1], color='r')\n", (25385, 25410), True, 'import matplotlib.pyplot as plt\n'), ((40662, 40689), 'datetime.date.fromordinal', 'dt.date.fromordinal', (['daynum'], {}), '(daynum)\n', (40681, 40689), True, 'import datetime as dt\n'), ((40731, 40758), 'datetime.date.fromordinal', 'dt.date.fromordinal', (['daynum'], {}), '(daynum)\n', (40750, 40758), True, 'import datetime as dt\n'), ((17839, 17861), 'pandas.Timestamp', 'pd.Timestamp', (['datename'], {}), '(datename)\n', (17851, 17861), True, 'import pandas as pd\n'), ((18337, 18349), 'numpy.mean', 'np.mean', (['val'], {}), '(val)\n', (18344, 18349), True, 'import numpy as np\n'), ((18406, 18420), 'numpy.median', 'np.median', (['val'], {}), '(val)\n', (18415, 18420), True, 'import numpy as np\n'), ((18479, 18490), 'numpy.var', 'np.var', (['val'], {}), '(val)\n', (18485, 18490), True, 'import numpy as np\n')] |
#!/usr/bin/python3
"""
sys.argv[1] - input database file
sys.argv[2] - output mat file
Composed by <NAME> @THU_IVG
Last revision: <NAME> @THU_IVG @Oct 3rd, 2019 CST
"""
import json
import scipy.io as sio
import numpy as np
import itertools
import sys
db_f = sys.argv[1]
with open(db_f) as f:
database = json.load(f)["database"]
steps = list(sorted(set(itertools.chain.from_iterable(
int(an["id"]) for an in itertools.chain.from_iterable(
v["annotation"] for v in database.values())))))
min_id = steps[0]
nb_step = len(steps)
init_dist = np.zeros((nb_step,))
frequency_mat = np.zeros((nb_step, nb_step))
for v in database:
if database[v]["subset"]!="training":
continue
for i, an in enumerate(database[v]["annotation"]):
if i==0:
init_dist[int(an["id"])-min_id] += 1
else:
frequency_mat[int(pan["id"])-min_id, int(an["id"])-min_id] += 1
pan = an
normalized_init_dist = init_dist/np.sum(init_dist)
frequency_mat_sum = np.sum(frequency_mat, axis=1)
normalized_frequency_mat = np.copy(frequency_mat)
mask = frequency_mat_sum!=0
normalized_frequency_mat[mask] /= frequency_mat_sum[mask][:, None]
zero_position = np.where(np.logical_not(mask))[0]
normalized_frequency_mat[zero_position, zero_position] = 1.
sio.savemat(sys.argv[2], {
"init_dist": init_dist,
"frequency_mat": frequency_mat,
"normalized_init_dist": normalized_init_dist,
"normalized_frequency_mat": normalized_frequency_mat,
})
| [
"numpy.copy",
"scipy.io.savemat",
"numpy.logical_not",
"numpy.sum",
"numpy.zeros",
"json.load"
] | [((563, 583), 'numpy.zeros', 'np.zeros', (['(nb_step,)'], {}), '((nb_step,))\n', (571, 583), True, 'import numpy as np\n'), ((600, 628), 'numpy.zeros', 'np.zeros', (['(nb_step, nb_step)'], {}), '((nb_step, nb_step))\n', (608, 628), True, 'import numpy as np\n'), ((1009, 1038), 'numpy.sum', 'np.sum', (['frequency_mat'], {'axis': '(1)'}), '(frequency_mat, axis=1)\n', (1015, 1038), True, 'import numpy as np\n'), ((1066, 1088), 'numpy.copy', 'np.copy', (['frequency_mat'], {}), '(frequency_mat)\n', (1073, 1088), True, 'import numpy as np\n'), ((1296, 1486), 'scipy.io.savemat', 'sio.savemat', (['sys.argv[2]', "{'init_dist': init_dist, 'frequency_mat': frequency_mat,\n 'normalized_init_dist': normalized_init_dist,\n 'normalized_frequency_mat': normalized_frequency_mat}"], {}), "(sys.argv[2], {'init_dist': init_dist, 'frequency_mat':\n frequency_mat, 'normalized_init_dist': normalized_init_dist,\n 'normalized_frequency_mat': normalized_frequency_mat})\n", (1307, 1486), True, 'import scipy.io as sio\n'), ((970, 987), 'numpy.sum', 'np.sum', (['init_dist'], {}), '(init_dist)\n', (976, 987), True, 'import numpy as np\n'), ((314, 326), 'json.load', 'json.load', (['f'], {}), '(f)\n', (323, 326), False, 'import json\n'), ((1209, 1229), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (1223, 1229), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.