code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import matplotlib.pyplot as plt
from numpy import sum, array
from numpy.random import randint, choice
class MonteCarlo(object):
""" A simple Monte Carlo implementation """
def __init__(self, energy, density, temperature=1, itermax=1000):
from numpy import any, array
density = array(density)
self.itermax = itermax
if temperature == 0:
raise NotImplementedError(
"Zero temperature not implemented")
if temperature < 0e0:
raise ValueError(
"Negative temperature makes no sense")
if len(density) < 2:
raise ValueError("Density is too short")
# of the right kind (integer). Unless it is zero length,
# in which case type does not matter.
if density.dtype.kind != 'i' and len(density) > 0:
raise TypeError("Density should be an array of *integers*.")
# and the right values (positive or null)
if any(density < 0):
raise ValueError("Density should be an array of" +
"*positive* integers.")
if density.ndim != 1:
raise ValueError("Density should be an a *1-dimensional*" +
"array of positive integers.")
if sum(density) == 0:
raise ValueError("Density is empty.")
self.current_energy = energy(density)
self.temperature = temperature
self.density = density
def random_direction(self): return choice([-1, 1])
def random_agent(self, density):
# Particle index
particle = randint(sum(density))
current = 0
for location, n in enumerate(density):
current += n
if current > particle:
break
return location
def change_density(self, density):
""" Move one particle left or right. """
location = self.random_agent(density)
# Move direction
if(density[location]-1 < 0):
return array(density)
if location == 0:
direction = 1
elif location == len(density) - 1:
direction = -1
else:
direction = self.random_direction()
# Now make change
result = array(density)
result[location] -= 1
result[location + direction] += 1
return result
def accept_change(self, prior, successor):
""" Returns true if should accept change. """
from numpy import exp
from numpy.random import uniform
if successor <= prior:
return True
else:
return exp(-(successor - prior) / self.temperature) > uniform()
def step(self):
iteration = 0
while iteration < self.itermax:
new_density = self.change_density(self.density)
new_energy = energy(new_density)
accept = self.accept_change(self.current_energy, new_energy)
if accept:
self.density, self.current_energy = new_density, new_energy
iteration += 1
return self.current_energy, self.density
def energy(density, coefficient=1):
""" Energy associated with the diffusion model
:Parameters:
density: array of positive integers
Number of particles at each position i in the array/geometry
"""
from numpy import array, any, sum
# Make sure input is an array
density = array(density)
# of the right kind (integer). Unless it is zero length, in which case type does not matter.
if density.dtype.kind != 'i' and len(density) > 0:
raise TypeError("Density should be an array of *integers*.")
# and the right values (positive or null)
if any(density < 0):
raise ValueError("Density should be an array" +
"of *positive* integers.")
if density.ndim != 1:
raise ValueError("Density should be an a *1-dimensional*" +
"array of positive integers.")
return coefficient * 0.5 * sum(density * (density - 1))
| [
"numpy.random.uniform",
"numpy.sum",
"numpy.any",
"numpy.array",
"numpy.exp",
"numpy.random.choice"
] | [((3444, 3458), 'numpy.array', 'array', (['density'], {}), '(density)\n', (3449, 3458), False, 'from numpy import any, array\n'), ((3734, 3750), 'numpy.any', 'any', (['(density < 0)'], {}), '(density < 0)\n', (3737, 3750), False, 'from numpy import any, array\n'), ((305, 319), 'numpy.array', 'array', (['density'], {}), '(density)\n', (310, 319), False, 'from numpy import any, array\n'), ((974, 990), 'numpy.any', 'any', (['(density < 0)'], {}), '(density < 0)\n', (977, 990), False, 'from numpy import any, array\n'), ((1507, 1522), 'numpy.random.choice', 'choice', (['[-1, 1]'], {}), '([-1, 1])\n', (1513, 1522), False, 'from numpy.random import randint, choice\n'), ((2261, 2275), 'numpy.array', 'array', (['density'], {}), '(density)\n', (2266, 2275), False, 'from numpy import any, array\n'), ((4042, 4070), 'numpy.sum', 'sum', (['(density * (density - 1))'], {}), '(density * (density - 1))\n', (4045, 4070), False, 'from numpy import array, any, sum\n'), ((1281, 1293), 'numpy.sum', 'sum', (['density'], {}), '(density)\n', (1284, 1293), False, 'from numpy import array, any, sum\n'), ((1613, 1625), 'numpy.sum', 'sum', (['density'], {}), '(density)\n', (1616, 1625), False, 'from numpy import array, any, sum\n'), ((2018, 2032), 'numpy.array', 'array', (['density'], {}), '(density)\n', (2023, 2032), False, 'from numpy import any, array\n'), ((2631, 2675), 'numpy.exp', 'exp', (['(-(successor - prior) / self.temperature)'], {}), '(-(successor - prior) / self.temperature)\n', (2634, 2675), False, 'from numpy import exp\n'), ((2678, 2687), 'numpy.random.uniform', 'uniform', ([], {}), '()\n', (2685, 2687), False, 'from numpy.random import uniform\n')] |
# coding: utf-8
import numpy as np
from kerasy.ML.decomposition import PCA, UMAP, tSNE
from kerasy.datasets import mnist
from kerasy.utils import cluster_accuracy
num_mnist = 300
n_components = 5
epochs = 10
seed = 123
def get_test_data():
(x_train, y_train), _ = mnist.load_data()
x_train = x_train[:num_mnist].reshape(num_mnist, -1)
y_train = y_train[:num_mnist]
return x_train, y_train
def _test_decomposition(model, **kwargs):
x_train, y_train = get_test_data()
if hasattr(model, "fit_transform"):
x_transformed = model.fit_transform(x_train, **kwargs)
else:
model.fit(x_train, **kwargs)
x_transformed = model.transform(x_train)
x_transformed = x_transformed.real
for label in np.unique(y_train):
center = np.mean(x_transformed[y_train==label], axis=0)
var_within = np.mean(np.sum(np.square(x_transformed[y_train==label] - center), axis=1))
var_outside = np.mean(np.sum(np.square(x_transformed[y_train!=label] - center), axis=1))
assert var_outside >= var_within
def test_pca():
model = PCA(n_components=n_components)
_test_decomposition(model)
def test_tsne():
model = tSNE(
initial_momentum=0.5,
final_momoentum=0.8,
eta=500,
min_gain=0.1,
tol=1e-05,
prec_max_iter=50,
random_state=seed
)
_test_decomposition(
model,
n_components=n_components,
epochs=epochs,
verbose=1
)
def test_umap():
model = UMAP(
min_dist=0.1,
spread=1.0,
sigma_iter=40,
sigma_init=1.0,
sigma_tol=1e-5,
sigma_lower=0,
sigma_upper=np.inf,
random_state=seed,
)
_test_decomposition(
model,
n_components=n_components,
epochs=epochs,
init_lr=1,
verbose=-1
)
| [
"kerasy.datasets.mnist.load_data",
"numpy.square",
"kerasy.ML.decomposition.PCA",
"kerasy.ML.decomposition.tSNE",
"numpy.mean",
"kerasy.ML.decomposition.UMAP",
"numpy.unique"
] | [((270, 287), 'kerasy.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (285, 287), False, 'from kerasy.datasets import mnist\n'), ((746, 764), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (755, 764), True, 'import numpy as np\n'), ((1094, 1124), 'kerasy.ML.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (1097, 1124), False, 'from kerasy.ML.decomposition import PCA, UMAP, tSNE\n'), ((1186, 1309), 'kerasy.ML.decomposition.tSNE', 'tSNE', ([], {'initial_momentum': '(0.5)', 'final_momoentum': '(0.8)', 'eta': '(500)', 'min_gain': '(0.1)', 'tol': '(1e-05)', 'prec_max_iter': '(50)', 'random_state': 'seed'}), '(initial_momentum=0.5, final_momoentum=0.8, eta=500, min_gain=0.1, tol=\n 1e-05, prec_max_iter=50, random_state=seed)\n', (1190, 1309), False, 'from kerasy.ML.decomposition import PCA, UMAP, tSNE\n'), ((1519, 1656), 'kerasy.ML.decomposition.UMAP', 'UMAP', ([], {'min_dist': '(0.1)', 'spread': '(1.0)', 'sigma_iter': '(40)', 'sigma_init': '(1.0)', 'sigma_tol': '(1e-05)', 'sigma_lower': '(0)', 'sigma_upper': 'np.inf', 'random_state': 'seed'}), '(min_dist=0.1, spread=1.0, sigma_iter=40, sigma_init=1.0, sigma_tol=\n 1e-05, sigma_lower=0, sigma_upper=np.inf, random_state=seed)\n', (1523, 1656), False, 'from kerasy.ML.decomposition import PCA, UMAP, tSNE\n'), ((783, 831), 'numpy.mean', 'np.mean', (['x_transformed[y_train == label]'], {'axis': '(0)'}), '(x_transformed[y_train == label], axis=0)\n', (790, 831), True, 'import numpy as np\n'), ((867, 918), 'numpy.square', 'np.square', (['(x_transformed[y_train == label] - center)'], {}), '(x_transformed[y_train == label] - center)\n', (876, 918), True, 'import numpy as np\n'), ((964, 1015), 'numpy.square', 'np.square', (['(x_transformed[y_train != label] - center)'], {}), '(x_transformed[y_train != label] - center)\n', (973, 1015), True, 'import numpy as np\n')] |
import leg_controllers.model as model
import leg_controllers.hopper as hopper
from leg_controllers.designs import Params
import numpy as np
class Observer():
"""
An implementation of a Kalman filter for the template dynamics.
The filter is provided the template state measurement, and uses
the template dynamics to produce an estimate of the template state.
"""
def __init__(self,params: Params,Q,R,damping_fit):
self.params = params
self.Q = Q
self.R = R
self.x_pri = np.zeros(2)
self.x_post = np.zeros(2)
self.P_pri = np.zeros((2,2))
self.P_post = np.zeros((2,2))
self.zeta = damping_fit/(2*model.m_body*hopper.omega)
def initialize(self, x, P):
self.x_post = x
self.x_pri = x
self.P_post = P
self.P_pri = P
def prior_update(self, u, dt):
# needs to be discrete time
_A = np.array([
[0., 1.],
[-hopper.omega**2, -2*self.zeta*hopper.omega]
])
A = np.eye(2)+dt*_A
self.x_pri = A@self.x_post + dt*np.array([0,1])*(u-model.g)
self.P_pri = A@self.P_post@A.T+self.Q
def posterior_update(self, y):
# innovation
xres = y - self.x_pri
Pres = self.P_pri + self.R
K = self.P_pri@np.linalg.inv(Pres)
self.x_post = self.x_pri+K@xres
self.P_post = (np.eye(2)-K)@self.P_pri | [
"numpy.array",
"numpy.eye",
"numpy.linalg.inv",
"numpy.zeros"
] | [((526, 537), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (534, 537), True, 'import numpy as np\n'), ((560, 571), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (568, 571), True, 'import numpy as np\n'), ((593, 609), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (601, 609), True, 'import numpy as np\n'), ((631, 647), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (639, 647), True, 'import numpy as np\n'), ((921, 996), 'numpy.array', 'np.array', (['[[0.0, 1.0], [-hopper.omega ** 2, -2 * self.zeta * hopper.omega]]'], {}), '([[0.0, 1.0], [-hopper.omega ** 2, -2 * self.zeta * hopper.omega]])\n', (929, 996), True, 'import numpy as np\n'), ((1035, 1044), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1041, 1044), True, 'import numpy as np\n'), ((1310, 1329), 'numpy.linalg.inv', 'np.linalg.inv', (['Pres'], {}), '(Pres)\n', (1323, 1329), True, 'import numpy as np\n'), ((1393, 1402), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1399, 1402), True, 'import numpy as np\n'), ((1091, 1107), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1099, 1107), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
=============================
Elekta phantom data with LCMV
=============================
"""
# authors: Amit & Alex & Eric
from __future__ import print_function
import numpy as np
import mne
from phantom_helpers import (get_data, get_fwd, actual_pos, maxfilter_options,
dipole_amplitudes, dipole_indices, plot_errors)
errors = np.empty(
(len(maxfilter_options), len(dipole_amplitudes), len(dipole_indices)))
src, fwd = get_fwd()
for ui, mf in enumerate(maxfilter_options):
for ai, dipole_amplitude in enumerate(dipole_amplitudes):
print(('Processing : %4d nAm : SSS=%s'
% (dipole_amplitude, mf)).ljust(40), end='')
for di, dipole_idx in enumerate(dipole_indices):
epochs, evoked, cov, sphere = get_data(
dipole_idx, dipole_amplitude, mf)
# Do LCMV
data_cov = mne.compute_covariance(epochs, tmin=0.)
stc = mne.beamformer.lcmv(
evoked, fwd, cov, data_cov, reg=0.01, pick_ori='max-power')
idx_max = np.argmax(np.mean(stc.data, axis=1))
vertno_max = stc.vertices[idx_max]
pos = src[0]['rr'][vertno_max]
errors[ui, ai, di] = 1e3 * np.linalg.norm(
pos - actual_pos[dipole_idx - 1])
if dipole_amplitude < 1000 and errors[ui, ai, di] > 20:
raise RuntimeError
print(np.round(errors[ui, ai], 1))
plot_errors(errors, 'lcmv')
| [
"phantom_helpers.get_fwd",
"mne.beamformer.lcmv",
"phantom_helpers.plot_errors",
"phantom_helpers.get_data",
"mne.compute_covariance",
"numpy.mean",
"numpy.linalg.norm",
"numpy.round"
] | [((486, 495), 'phantom_helpers.get_fwd', 'get_fwd', ([], {}), '()\n', (493, 495), False, 'from phantom_helpers import get_data, get_fwd, actual_pos, maxfilter_options, dipole_amplitudes, dipole_indices, plot_errors\n'), ((1470, 1497), 'phantom_helpers.plot_errors', 'plot_errors', (['errors', '"""lcmv"""'], {}), "(errors, 'lcmv')\n", (1481, 1497), False, 'from phantom_helpers import get_data, get_fwd, actual_pos, maxfilter_options, dipole_amplitudes, dipole_indices, plot_errors\n'), ((809, 851), 'phantom_helpers.get_data', 'get_data', (['dipole_idx', 'dipole_amplitude', 'mf'], {}), '(dipole_idx, dipole_amplitude, mf)\n', (817, 851), False, 'from phantom_helpers import get_data, get_fwd, actual_pos, maxfilter_options, dipole_amplitudes, dipole_indices, plot_errors\n'), ((914, 954), 'mne.compute_covariance', 'mne.compute_covariance', (['epochs'], {'tmin': '(0.0)'}), '(epochs, tmin=0.0)\n', (936, 954), False, 'import mne\n'), ((972, 1051), 'mne.beamformer.lcmv', 'mne.beamformer.lcmv', (['evoked', 'fwd', 'cov', 'data_cov'], {'reg': '(0.01)', 'pick_ori': '"""max-power"""'}), "(evoked, fwd, cov, data_cov, reg=0.01, pick_ori='max-power')\n", (991, 1051), False, 'import mne\n'), ((1440, 1467), 'numpy.round', 'np.round', (['errors[ui, ai]', '(1)'], {}), '(errors[ui, ai], 1)\n', (1448, 1467), True, 'import numpy as np\n'), ((1101, 1126), 'numpy.mean', 'np.mean', (['stc.data'], {'axis': '(1)'}), '(stc.data, axis=1)\n', (1108, 1126), True, 'import numpy as np\n'), ((1257, 1305), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos - actual_pos[dipole_idx - 1])'], {}), '(pos - actual_pos[dipole_idx - 1])\n', (1271, 1305), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun May 28 11:36:17 2017
Calculate steady state output of a linear recurrent network
@author: <NAME>
"""
import numpy as np
from numpy import linalg as LA
W = np.matrix('0.6 0.1 0.1 0.1 0.1; 0.1 0.6 0.1 0.1 0.1; 0.1 0.1 0.6 0.1 0.1;\
0.1 0.1 0.1 0.6 0.1; 0.1 0.1 0.1 0.1 0.6') # weight matrix
print("W = %s" %(W))
u = np.matrix('0.6; 0.5; 0.6; 0.2; 0.1') # static input vector
print("u = %s" %(u))
M = np.matrix('-0.75 0 0.75 0.75 0; 0 -0.75 0 0.75 0.75; 0.75 0 -0.75 0 0.75;\
0.75 0.75 0 -0.75 0; 0 0.75 0.75 0 -0.75') # recurrent weight matrix
print("M = %s" %(M))
h = W*u
ev, e = LA.eig(M)
# Calculate coefficients of steady state output vector
# v = sum_i(c_i*e_i)
c = np.empty([ev.size])
v_ss = np.zeros([ev.size,1])
for i in range(ev.size):
c[i] = h.T*e[:,i]/(1 - ev[i])
v_ss += c[i]*e[:,i]
print("v_ss = %s" %(v_ss)) | [
"numpy.matrix",
"numpy.zeros",
"numpy.linalg.eig",
"numpy.empty"
] | [((202, 341), 'numpy.matrix', 'np.matrix', (['"""0.6 0.1 0.1 0.1 0.1; 0.1 0.6 0.1 0.1 0.1; 0.1 0.1 0.6 0.1 0.1; 0.1 0.1 0.1 0.6 0.1; 0.1 0.1 0.1 0.1 0.6"""'], {}), "(\n '0.6 0.1 0.1 0.1 0.1; 0.1 0.6 0.1 0.1 0.1; 0.1 0.1 0.6 0.1 0.1; 0.1 0.1 0.1 0.6 0.1; 0.1 0.1 0.1 0.1 0.6'\n )\n", (211, 341), True, 'import numpy as np\n'), ((378, 414), 'numpy.matrix', 'np.matrix', (['"""0.6; 0.5; 0.6; 0.2; 0.1"""'], {}), "('0.6; 0.5; 0.6; 0.2; 0.1')\n", (387, 414), True, 'import numpy as np\n'), ((463, 602), 'numpy.matrix', 'np.matrix', (['"""-0.75 0 0.75 0.75 0; 0 -0.75 0 0.75 0.75; 0.75 0 -0.75 0 0.75; 0.75 0.75 0 -0.75 0; 0 0.75 0.75 0 -0.75"""'], {}), "(\n '-0.75 0 0.75 0.75 0; 0 -0.75 0 0.75 0.75; 0.75 0 -0.75 0 0.75; 0.75 0.75 0 -0.75 0; 0 0.75 0.75 0 -0.75'\n )\n", (472, 602), True, 'import numpy as np\n'), ((662, 671), 'numpy.linalg.eig', 'LA.eig', (['M'], {}), '(M)\n', (668, 671), True, 'from numpy import linalg as LA\n'), ((753, 772), 'numpy.empty', 'np.empty', (['[ev.size]'], {}), '([ev.size])\n', (761, 772), True, 'import numpy as np\n'), ((780, 802), 'numpy.zeros', 'np.zeros', (['[ev.size, 1]'], {}), '([ev.size, 1])\n', (788, 802), True, 'import numpy as np\n')] |
"""Sample patients from the CSV file of cBioPortal gene variants.
USAGE: python sample_patients.py <MUTATION_CSV_FILE> <OUTPUT_CSV_FILE> <OPTIONAL_NUMBER_OF_PATIENTS>
Corresponding Author: <NAME>
Affiliation: Stanford
Date: March 1, 2022
"""
import csv
import sys
from typing import Optional
import numpy as np
import pandas as pd
def sample_patient_records(
csv_path: str, out_path: str, patient_column: Optional[int] = 24,
n_patients: Optional[int] = 1000, n_row_skip: Optional[int] = 2
):
"""Load a subset of patient records from a CSV file
Parameters
----------
csv_path : str
Path to CSV file containing variant data
out_path : str
Path at which to save CSV file containing variant data from a subset of
patients
patient_column : Optional[int]
Column index identifying patient identifiers in CSV file of variant data
(default = 24)
n_patients : Optional[int]
Number of patients to sample from variant data CSV file (default = 1000)
n_row_skip : Optional[int]
Number of rows corresponding to header information in variant CSV file
(default = 2)
"""
count = 0
patient_set = set()
with open(csv_path, "r") as read_file:
with open(out_path, "w") as write_file:
reader = csv.reader(read_file)
writer = csv.writer(write_file, delimiter=',')
i = 0
for row in reader:
i += 1
if i < n_row_skip:
continue
elif i == n_row_skip:
writer.writerow(row)
else:
if row[patient_column] in patient_set:
writer.writerow(row)
else:
patient_set.add(row[patient_column])
count += 1
if count > n_patients:
print("Done")
return
writer.writerow(row)
def get_patient_mutation_data(
csv_path: str, patient_id: str, patient_column: int = 24,
n_row_skip: Optional[int] = 1, ind_header_row: Optional[int] = 1
) -> pd.DataFrame:
"""Load a table of mutation data associated with a single patient.
Parameters
----------
csv_path : str
Path to CSV file containing variant data
patient_id : str
Patient ID for which the mutation data is requested
patient_column : Optional[int]
Column index identifying patient identifiers in CSV file of variant data
(default = 24)
n_row_skip : Optional[int]
Number of rows corresponding to header information in variant CSV file
(default = 2)
ind_header_row : Optional[int]
Index of the header row, starting from 0 (default = 1)
Returns
-------
pd.DataFrame
Table of mutation data associated with the specific patient of interest
"""
with open(csv_path, "r") as read_file:
reader = csv.reader(read_file)
searching = True
i = 0
for row in reader:
i += 1
if i <= n_row_skip:
continue
if (i-1) == ind_header_row:
headers = np.array(row)
df = pd.DataFrame(columns=headers)
if row[patient_column] == patient_id:
if searching:
print(f"Patient '{patient_id}' found!")
searching = False
new_row = pd.DataFrame.from_records(
[{headers[ind]: row[ind] for ind in range(len(row))}]
)
df = pd.concat([df, new_row])
else:
if not searching:
return df
return df
if __name__ == "__main__":
"""Extract the records of interest.
"""
csv_file = sys.argv[1]
out_file = sys.argv[2]
if len(sys.argv) > 3:
n_patients = int(sys.argv[3])
sample_patient_records(
csv_path=csv_file, out_path=out_file, n_patients=n_patients
)
| [
"pandas.DataFrame",
"csv.reader",
"csv.writer",
"numpy.array",
"pandas.concat"
] | [((3065, 3086), 'csv.reader', 'csv.reader', (['read_file'], {}), '(read_file)\n', (3075, 3086), False, 'import csv\n'), ((1350, 1371), 'csv.reader', 'csv.reader', (['read_file'], {}), '(read_file)\n', (1360, 1371), False, 'import csv\n'), ((1393, 1430), 'csv.writer', 'csv.writer', (['write_file'], {'delimiter': '""","""'}), "(write_file, delimiter=',')\n", (1403, 1430), False, 'import csv\n'), ((3296, 3309), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (3304, 3309), True, 'import numpy as np\n'), ((3331, 3360), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'headers'}), '(columns=headers)\n', (3343, 3360), True, 'import pandas as pd\n'), ((3706, 3730), 'pandas.concat', 'pd.concat', (['[df, new_row]'], {}), '([df, new_row])\n', (3715, 3730), True, 'import pandas as pd\n')] |
from sklearn import metrics
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
import numpy as np
import time
class Cross_Validation:
"""
This class contains utility methods to compute Cross Validation
"""
""" Takes the following parameters as an input:
vector: vector to be partitioned in train_set and test_set (either a numpy array or a sparse matrix)
fold: index of the current portion to use as test set (integer)
k: size/k gives the size of the test set (integer)
Returns:
training: the portion of vector to use as training (either a numpy array or a sparse matrix)
validation: the portion of vector to use as validation (either a numpy array or a sparse matrix)
"""
@staticmethod
def partition(vector, fold, k):
size = vector.shape[0]
start = (size/k)*fold
end = (size/k)*(fold+1)
validation = vector[start:end]
if str(type(vector)) == "<class 'scipy.sparse.csr.csr_matrix'>":
indices = range(start, end)
mask = np.ones(vector.shape[0], dtype=bool)
mask[indices] = False
training = vector[mask]
elif str(type(vector)) == "<type 'numpy.ndarray'>":
training = np.concatenate((vector[:start], vector[end:]))
else:
return "Error, unexpected data type"
return training, validation
""" Takes the following parameters as an input:
learner: a SKLearn Classifier (SKLearn Classifier)
k: number of total folds of Cross-Validation (integer)
examples: the Bag of Words (sparse matrix of integers)
labels: labels for each sample (list of integers)
random_split: determines whether to use progressive splits of the set or random split (boolean)
Returns:
train_folds_score: the scores on the train set (list of floats)
validation_folds_score: the scores on the test set (list of floats)
"""
@staticmethod
def K_Fold_Cross_Validation(learner, k, examples, labels, random_split=False):
train_folds_score = []
validation_folds_score = []
examples, labels = shuffle(examples, labels, random_state=int(time.time()))
for fold in range(0, k):
if random_split:
training_set, validation_set, training_labels, validation_labels = \
train_test_split(examples, labels, test_size=1./k, random_state=int(time.time()+fold))
else:
training_set, validation_set = Cross_Validation.partition(examples, fold, k)
training_labels, validation_labels = Cross_Validation.partition(labels, fold, k)
learner.fit(training_set, training_labels)
training_predicted = learner.predict(training_set)
validation_predicted = learner.predict(validation_set)
train_folds_score.append(metrics.accuracy_score(training_labels, training_predicted))
validation_folds_score.append(metrics.accuracy_score(validation_labels, validation_predicted))
return train_folds_score, validation_folds_score
""" Takes the following parameters as an input:
learner: a SKLearn Classifier (SKLearn Classifier)
iters: number of iteration of Cross-Validation (float)
examples: the Bag of Words (sparse matrix of integers)
labels: labels for each sample (list of integers)
test_size: test set size for each iteration (float between 0 and 1)
Returns:
train_iter_score: the scores on the train set (list of floats)
validation_iter_score: the scores on the test set (list of floats)
"""
@staticmethod
def Shuffle_Cross_Validation(learner, iters, examples, labels, test_size=0.1):
train_iter_score = []
validation_iter_score = []
for iter in range(0, iters):
training_set, validation_set, training_labels, validation_labels = \
train_test_split(examples, labels, test_size=test_size, random_state=int(time.time()+iter))
learner.fit(training_set, training_labels)
training_predicted = learner.predict(training_set)
validation_predicted = learner.predict(validation_set)
train_iter_score.append(metrics.accuracy_score(training_labels, training_predicted))
validation_iter_score.append(metrics.accuracy_score(validation_labels, validation_predicted))
return train_iter_score, validation_iter_score
""" Takes the following parameters as an input:
vector: score results (list of list of floats)
Returns:
the average and the standard deviation (lists of floats)
"""
@staticmethod
def average_and_std_deviation(vector):
return [np.average(ts) for ts in vector], [np.std(ts) for ts in vector]
| [
"numpy.average",
"numpy.std",
"sklearn.metrics.accuracy_score",
"numpy.ones",
"time.time",
"numpy.concatenate"
] | [((1099, 1135), 'numpy.ones', 'np.ones', (['vector.shape[0]'], {'dtype': 'bool'}), '(vector.shape[0], dtype=bool)\n', (1106, 1135), True, 'import numpy as np\n'), ((1289, 1335), 'numpy.concatenate', 'np.concatenate', (['(vector[:start], vector[end:])'], {}), '((vector[:start], vector[end:]))\n', (1303, 1335), True, 'import numpy as np\n'), ((2944, 3003), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['training_labels', 'training_predicted'], {}), '(training_labels, training_predicted)\n', (2966, 3003), False, 'from sklearn import metrics\n'), ((3047, 3110), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['validation_labels', 'validation_predicted'], {}), '(validation_labels, validation_predicted)\n', (3069, 3110), False, 'from sklearn import metrics\n'), ((4329, 4388), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['training_labels', 'training_predicted'], {}), '(training_labels, training_predicted)\n', (4351, 4388), False, 'from sklearn import metrics\n'), ((4431, 4494), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['validation_labels', 'validation_predicted'], {}), '(validation_labels, validation_predicted)\n', (4453, 4494), False, 'from sklearn import metrics\n'), ((4826, 4840), 'numpy.average', 'np.average', (['ts'], {}), '(ts)\n', (4836, 4840), True, 'import numpy as np\n'), ((4861, 4871), 'numpy.std', 'np.std', (['ts'], {}), '(ts)\n', (4867, 4871), True, 'import numpy as np\n'), ((2246, 2257), 'time.time', 'time.time', ([], {}), '()\n', (2255, 2257), False, 'import time\n'), ((4089, 4100), 'time.time', 'time.time', ([], {}), '()\n', (4098, 4100), False, 'import time\n'), ((2495, 2506), 'time.time', 'time.time', ([], {}), '()\n', (2504, 2506), False, 'import time\n')] |
import random
import cv2
import numpy as np
from PIL import Image
from time import sleep
# Tetris game class
class Tetris:
'''Tetris game class'''
# BOARD
MAP_EMPTY = 0
MAP_BLOCK = 1
MAP_PLAYER = 2
BOARD_WIDTH = 10
BOARD_HEIGHT = 20
TETROMINOS = {
0: { # I
0: [(0, 0), (1, 0), (2, 0), (3, 0)],
90: [(1, 0), (1, 1), (1, 2), (1, 3)],
180: [(3, 0), (2, 0), (1, 0), (0, 0)],
270: [(1, 3), (1, 2), (1, 1), (1, 0)],
},
1: { # T
0: [(1, 0), (0, 1), (1, 1), (2, 1)],
90: [(0, 1), (1, 2), (1, 1), (1, 0)],
180: [(1, 2), (2, 1), (1, 1), (0, 1)],
270: [(2, 1), (1, 0), (1, 1), (1, 2)],
},
2: { # L
0: [(1, 0), (1, 1), (1, 2), (2, 2)],
90: [(0, 1), (1, 1), (2, 1), (2, 0)],
180: [(1, 2), (1, 1), (1, 0), (0, 0)],
270: [(2, 1), (1, 1), (0, 1), (0, 2)],
},
3: { # J
0: [(1, 0), (1, 1), (1, 2), (0, 2)],
90: [(0, 1), (1, 1), (2, 1), (2, 2)],
180: [(1, 2), (1, 1), (1, 0), (2, 0)],
270: [(2, 1), (1, 1), (0, 1), (0, 0)],
},
4: { # Z
0: [(0, 0), (1, 0), (1, 1), (2, 1)],
90: [(0, 2), (0, 1), (1, 1), (1, 0)],
180: [(2, 1), (1, 1), (1, 0), (0, 0)],
270: [(1, 0), (1, 1), (0, 1), (0, 2)],
},
5: { # S
0: [(2, 0), (1, 0), (1, 1), (0, 1)],
90: [(0, 0), (0, 1), (1, 1), (1, 2)],
180: [(0, 1), (1, 1), (1, 0), (2, 0)],
270: [(1, 2), (1, 1), (0, 1), (0, 0)],
},
6: { # O
0: [(1, 0), (2, 0), (1, 1), (2, 1)],
90: [(1, 0), (2, 0), (1, 1), (2, 1)],
180: [(1, 0), (2, 0), (1, 1), (2, 1)],
270: [(1, 0), (2, 0), (1, 1), (2, 1)],
}
}
COLORS = {
0: (255, 255, 255),
1: (247, 64, 99),
2: (0, 167, 247),
}
def __init__(self):
self.reset()
def reset(self):
'''Resets the game, returning the current state'''
self.board = [[0] * Tetris.BOARD_WIDTH for _ in range(Tetris.BOARD_HEIGHT)]
self.game_over = False
self.bag = list(range(len(Tetris.TETROMINOS)))
random.shuffle(self.bag)
self.next_piece = self.bag.pop()
self._new_round()
self.score = 0
return self._get_board_props(self.board)
def _get_rotated_piece(self):
'''Returns the current piece, including rotation'''
return Tetris.TETROMINOS[self.current_piece][self.current_rotation]
def _get_complete_board(self):
'''Returns the complete board, including the current piece'''
piece = self._get_rotated_piece()
piece = [np.add(x, self.current_pos) for x in piece]
board = [x[:] for x in self.board]
for x, y in piece:
board[y][x] = Tetris.MAP_PLAYER
return board
def get_game_score(self):
'''Returns the current game score.
Each block placed counts as one.
For lines cleared, it is used BOARD_WIDTH * lines_cleared ^ 2.
'''
return self.score
def _new_round(self):
'''Starts a new round (new piece)'''
# Generate new bag with the pieces
if len(self.bag) == 0:
self.bag = list(range(len(Tetris.TETROMINOS)))
random.shuffle(self.bag)
self.current_piece = self.next_piece
self.next_piece = self.bag.pop()
self.current_pos = [3, 0]
self.current_rotation = 0
if self._check_collision(self._get_rotated_piece(), self.current_pos):
self.game_over = True
def _check_collision(self, piece, pos):
'''Check if there is a collision between the current piece and the board'''
for x, y in piece:
x += pos[0]
y += pos[1]
if x < 0 or x >= Tetris.BOARD_WIDTH \
or y < 0 or y >= Tetris.BOARD_HEIGHT \
or self.board[y][x] == Tetris.MAP_BLOCK:
return True
return False
def _rotate(self, angle):
'''Change the current rotation'''
r = self.current_rotation + angle
if r == 360:
r = 0
if r < 0:
r += 360
elif r > 360:
r -= 360
self.current_rotation = r
def _add_piece_to_board(self, piece, pos):
'''Place a piece in the board, returning the resulting board'''
board = [x[:] for x in self.board]
for x, y in piece:
board[y + pos[1]][x + pos[0]] = Tetris.MAP_BLOCK
return board
def _clear_lines(self, board):
'''Clears completed lines in a board'''
# Check if lines can be cleared
lines_to_clear = [index for index, row in enumerate(
board) if sum(row) == Tetris.BOARD_WIDTH]
if lines_to_clear:
board = [row for index, row in enumerate(board) if index not in lines_to_clear]
# Add new lines at the top
for _ in lines_to_clear:
board.insert(0, [0 for _ in range(Tetris.BOARD_WIDTH)])
return len(lines_to_clear), board
def _number_of_holes(self, board):
'''Number of holes in the board (empty square with at least one block above it)'''
holes = 0
for col in zip(*board):
i = 0
while i < Tetris.BOARD_HEIGHT and col[i] != Tetris.MAP_BLOCK:
i += 1
holes += len([x for x in col[i + 1:] if x == Tetris.MAP_EMPTY])
return holes
def _bumpiness(self, board):
'''Sum of the differences of heights between pair of columns'''
total_bumpiness = 0
max_bumpiness = 0
min_ys = []
for col in zip(*board):
i = 0
while i < Tetris.BOARD_HEIGHT and col[i] != Tetris.MAP_BLOCK:
i += 1
min_ys.append(i)
for i in range(len(min_ys) - 1):
bumpiness = abs(min_ys[i] - min_ys[i + 1])
max_bumpiness = max(bumpiness, max_bumpiness)
total_bumpiness += abs(min_ys[i] - min_ys[i + 1])
return total_bumpiness, max_bumpiness
def _height(self, board):
'''Sum and maximum height of the board'''
sum_height = 0
max_height = 0
min_height = Tetris.BOARD_HEIGHT
for col in zip(*board):
i = 0
while i < Tetris.BOARD_HEIGHT and col[i] == Tetris.MAP_EMPTY:
i += 1
height = Tetris.BOARD_HEIGHT - i
sum_height += height
if height > max_height:
max_height = height
elif height < min_height:
min_height = height
return sum_height, max_height, min_height
def _get_board_props(self, board):
'''Get properties of the board'''
lines, board = self._clear_lines(board)
holes = self._number_of_holes(board)
total_bumpiness, max_bumpiness = self._bumpiness(board)
sum_height, max_height, min_height = self._height(board)
return [lines, holes, total_bumpiness, sum_height]
def get_next_states(self):
'''Get all possible next states'''
states = {}
piece_id = self.current_piece
if piece_id == 6:
rotations = [0]
elif piece_id == 0:
rotations = [0, 90]
else:
rotations = [0, 90, 180, 270]
# For all rotations
for rotation in rotations:
piece = Tetris.TETROMINOS[piece_id][rotation]
min_x = min([p[0] for p in piece])
max_x = max([p[0] for p in piece])
# For all positions
for x in range(-min_x, Tetris.BOARD_WIDTH - max_x):
pos = [x, 0]
# Drop piece
while not self._check_collision(piece, pos):
pos[1] += 1
pos[1] -= 1
# Valid move
if pos[1] >= 0:
board = self._add_piece_to_board(piece, pos)
states[(x, rotation)] = self._get_board_props(board)
return states
def get_state_size(self):
'''Size of the state'''
return 4
def play(self, x, rotation, render=False, render_delay=None):
'''Makes a play given a position and a rotation, returning the reward and if the game is over'''
self.current_pos = [x, 0]
self.current_rotation = rotation
# Drop piece
while not self._check_collision(self._get_rotated_piece(), self.current_pos):
if render:
self.render()
if render_delay:
sleep(render_delay)
self.current_pos[1] += 1
self.current_pos[1] -= 1
# Update board and calculate score
self.board = self._add_piece_to_board(self._get_rotated_piece(), self.current_pos)
lines_cleared, self.board = self._clear_lines(self.board)
score = 1 + (lines_cleared ** 2) * Tetris.BOARD_WIDTH
self.score += score
# Start new round
self._new_round()
if self.game_over:
score -= 2
return score, self.game_over
def render(self):
'''Renders the current board'''
img = [Tetris.COLORS[p] for row in self._get_complete_board() for p in row]
img = np.array(img).reshape(Tetris.BOARD_HEIGHT, Tetris.BOARD_WIDTH, 3).astype(np.uint8)
img = img[..., ::-1] # Convert RRG to BGR (used by cv2)
img = Image.fromarray(img, 'RGB')
img = img.resize((Tetris.BOARD_WIDTH * 25, Tetris.BOARD_HEIGHT * 25), Image.NEAREST)
img = np.array(img)
cv2.putText(img, str(self.score), (22, 22), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)
cv2.imshow('image', np.array(img))
cv2.waitKey(1)
| [
"cv2.waitKey",
"random.shuffle",
"time.sleep",
"numpy.array",
"PIL.Image.fromarray",
"numpy.add"
] | [((2387, 2411), 'random.shuffle', 'random.shuffle', (['self.bag'], {}), '(self.bag)\n', (2401, 2411), False, 'import random\n'), ((9906, 9933), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (9921, 9933), False, 'from PIL import Image\n'), ((10043, 10056), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (10051, 10056), True, 'import numpy as np\n'), ((10206, 10220), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (10217, 10220), False, 'import cv2\n'), ((2900, 2927), 'numpy.add', 'np.add', (['x', 'self.current_pos'], {}), '(x, self.current_pos)\n', (2906, 2927), True, 'import numpy as np\n'), ((3540, 3564), 'random.shuffle', 'random.shuffle', (['self.bag'], {}), '(self.bag)\n', (3554, 3564), False, 'import random\n'), ((10182, 10195), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (10190, 10195), True, 'import numpy as np\n'), ((9039, 9058), 'time.sleep', 'sleep', (['render_delay'], {}), '(render_delay)\n', (9044, 9058), False, 'from time import sleep\n'), ((9742, 9755), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (9750, 9755), True, 'import numpy as np\n')] |
"""Word/Symbol level next step prediction using Recurrent Highway Networks.
To run:
$ python rhn_train.py
"""
from __future__ import absolute_import, division, print_function
from copy import deepcopy
import time
import os
import numpy as np
import tensorflow as tf
from sacred import Experiment
from rhn import Model
from data.reader import data_iterator
ex = Experiment('rhn_prediction')
logging = tf.logging
class Config:
pass
C = Config()
@ex.config
def hyperparameters():
data_path = 'data'
dataset = 'ptb'
init_scale = 0.04
init_bias = -2.0
num_layers = 1
depth = 4 # the recurrence depth
learning_rate = 0.2
lr_decay = 1.02
weight_decay = 1e-7
max_grad_norm = 10
num_steps = 35
hidden_size = 1000
max_epoch = 20
max_max_epoch = 500
batch_size = 20
drop_x = 0.25
drop_i = 0.75
drop_h = 0.25
drop_o = 0.75
tied = True
load_model = ''
mc_steps = 0
if dataset == 'ptb':
vocab_size = 10000
elif dataset == 'enwik8':
vocab_size = 205
elif dataset == 'text8':
vocab_size = 27
else:
raise AssertionError("Unsupported dataset! Only 'ptb',",
"'enwik8' and 'text8' are currently supported.")
@ex.named_config
def ptb_sota():
data_path = 'data'
dataset = 'ptb'
init_scale = 0.04
init_bias = -2.0
num_layers = 1
depth = 10
learning_rate = 0.2
lr_decay = 1.02
weight_decay = 1e-7
max_grad_norm = 10
num_steps = 35
hidden_size = 830
max_epoch = 20
max_max_epoch = 500
batch_size = 20
drop_x = 0.25
drop_i = 0.75
drop_h = 0.25
drop_o = 0.75
tied = True
vocab_size = 10000
@ex.named_config
def enwik8_sota():
# test BPC 1.27
data_path = 'data'
dataset = 'enwik8'
init_scale = 0.04
init_bias = -4.0
num_layers = 1
depth = 10
learning_rate = 0.2
lr_decay = 1.03
weight_decay = 1e-7
max_grad_norm = 10
num_steps = 50
hidden_size = 1500
max_epoch = 5
max_max_epoch = 500
batch_size = 128
drop_x = 0.10
drop_i = 0.40
drop_h = 0.10
drop_o = 0.40
tied = False
vocab_size = 205
@ex.named_config
def text8_sota():
# test BPC 1.27
data_path = 'data'
dataset = 'text8'
init_scale = 0.04
init_bias = -4.0
num_layers = 1
depth = 10
learning_rate = 0.2
lr_decay = 1.03
weight_decay = 1e-7
max_grad_norm = 10
num_steps = 50
hidden_size = 1500
max_epoch = 5
max_max_epoch = 500
batch_size = 128
drop_x = 0.10
drop_i = 0.40
drop_h = 0.10
drop_o = 0.40
tied = False
vocab_size = 27
@ex.capture
def get_config(_config):
C.__dict__ = dict(_config)
return C
def get_data(data_path, dataset):
if dataset == 'ptb':
from tensorflow.models.rnn.ptb import reader
raw_data = reader.ptb_raw_data(data_path)
elif dataset == 'enwik8':
from data import reader
raw_data = reader.enwik8_raw_data(data_path)
elif dataset == 'text8':
from data import reader
raw_data = reader.text8_raw_data(data_path)
return reader, raw_data
def get_noise(x, m, drop_x, drop_i, drop_h, drop_o):
keep_x, keep_i, keep_h, keep_o = 1.0 - drop_x, 1.0 - drop_i, 1.0 - drop_h, 1.0 - drop_o
if keep_x < 1.0:
noise_x = (np.random.random_sample((m.batch_size, m.num_steps, 1)) < keep_x).astype(np.float32) / keep_x
for b in range(m.batch_size):
for n1 in range(m.num_steps):
for n2 in range(n1 + 1, m.num_steps):
if x[b][n2] == x[b][n1]:
noise_x[b][n2][0] = noise_x[b][n1][0]
break
else:
noise_x = np.ones((m.batch_size, m.num_steps, 1), dtype=np.float32)
if keep_i < 1.0:
noise_i = (np.random.random_sample((m.batch_size, m.in_size, m.num_layers)) < keep_i).astype(np.float32) / keep_i
else:
noise_i = np.ones((m.batch_size, m.in_size, m.num_layers), dtype=np.float32)
if keep_h < 1.0:
noise_h = (np.random.random_sample((m.batch_size, m.size, m.num_layers)) < keep_h).astype(np.float32) / keep_h
else:
noise_h = np.ones((m.batch_size, m.size, m.num_layers), dtype=np.float32)
if keep_o < 1.0:
noise_o = (np.random.random_sample((m.batch_size, 1, m.size)) < keep_o).astype(np.float32) / keep_o
else:
noise_o = np.ones((m.batch_size, 1, m.size), dtype=np.float32)
return noise_x, noise_i, noise_h, noise_o
def run_epoch(session, m, data, eval_op, config, verbose=False):
"""Run the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = [x.eval() for x in m.initial_state]
for step, (x, y) in enumerate(data_iterator(data, m.batch_size, m.num_steps)):
noise_x, noise_i, noise_h, noise_o = get_noise(x, m, config.drop_x, config.drop_i, config.drop_h, config.drop_o)
feed_dict = {m.input_data: x, m.targets: y,
m.noise_x: noise_x, m.noise_i: noise_i, m.noise_h: noise_h, m.noise_o: noise_o}
feed_dict.update({m.initial_state[i]: state[i] for i in range(m.num_layers)})
cost, state, _ = session.run([m.cost, m.final_state, eval_op], feed_dict)
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" % (step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
@ex.command
def evaluate(data_path, dataset, load_model):
"""Evaluate the model on the given data."""
ex.commands["print_config"]()
print("Evaluating model:", load_model)
reader, (train_data, valid_data, test_data, _) = get_data(data_path, dataset)
config = get_config()
val_config = deepcopy(config)
test_config = deepcopy(config)
val_config.drop_x = test_config.drop_x = 0.0
val_config.drop_i = test_config.drop_i = 0.0
val_config.drop_h = test_config.drop_h = 0.0
val_config.drop_o = test_config.drop_o = 0.0
test_config.batch_size = test_config.num_steps = 1
with tf.Session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
_ = Model(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
mvalid = Model(is_training=False, config=val_config)
mtest = Model(is_training=False, config=test_config)
tf.global_variables_initializer().run()
saver = tf.train.Saver()
saver.restore(session, load_model)
print("Testing on batched Valid ...")
valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op(), config=val_config)
print("Valid Perplexity (batched): %.3f, Bits: %.3f" % (valid_perplexity, np.log2(valid_perplexity)))
print("Testing on non-batched Valid ...")
valid_perplexity = run_epoch(session, mtest, valid_data, tf.no_op(), config=test_config, verbose=True)
print("Full Valid Perplexity: %.3f, Bits: %.3f" % (valid_perplexity, np.log2(valid_perplexity)))
print("Testing on non-batched Test ...")
test_perplexity = run_epoch(session, mtest, test_data, tf.no_op(), config=test_config, verbose=True)
print("Full Test Perplexity: %.3f, Bits: %.3f" % (test_perplexity, np.log2(test_perplexity)))
def run_mc_epoch(seed, session, m, data, eval_op, config, mc_steps, verbose=False):
"""Run the model with noise on the given data multiple times for MC evaluation."""
n_steps = len(data)
all_probs = np.array([0.0]*n_steps)
sum_probs = np.array([0.0]*n_steps)
mc_i = 1
print("Total MC steps to do:", mc_steps)
if not os.path.isdir('./probs'):
print('Creating probs directory')
os.mkdir('./probs')
while mc_i <= mc_steps:
print("MC sample number:", mc_i)
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = [x.eval() for x in m.initial_state]
for step, (x, y) in enumerate(data_iterator(data, m.batch_size, m.num_steps)):
if step == 0:
noise_x, noise_i, noise_h, noise_o = get_noise(x, m, config.drop_x, config.drop_i, config.drop_h, config.drop_o)
feed_dict = {m.input_data: x, m.targets: y,
m.noise_x: noise_x, m.noise_i: noise_i, m.noise_h: noise_h, m.noise_o: noise_o}
feed_dict.update({m.initial_state[i]: state[i] for i in range(m.num_layers)})
cost, state, _ = session.run([m.cost, m.final_state, eval_op], feed_dict)
costs += cost
iters += m.num_steps
all_probs[step] = np.exp(-cost)
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" % (step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
perplexity = np.exp(costs / iters)
print("Perplexity:", perplexity)
if perplexity < 500:
savefile = 'probs/' + str(seed) + '_' + str(mc_i)
print("Accepted. Saving to:", savefile)
np.save(savefile, all_probs)
sum_probs += all_probs
mc_i += 1
return np.exp(np.mean(-np.log(np.clip(sum_probs/mc_steps, 1e-10, 1-1e-10))))
@ex.command
def evaluate_mc(data_path, dataset, load_model, mc_steps, seed):
"""Evaluate the model on the given data using MC averaging."""
ex.commands['print_config']()
print("MC Evaluation of model:", load_model)
assert mc_steps > 0
reader, (train_data, valid_data, test_data, _) = get_data(data_path, dataset)
config = get_config()
val_config = deepcopy(config)
test_config = deepcopy(config)
test_config.batch_size = test_config.num_steps = 1
with tf.Session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
_ = Model(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
_ = Model(is_training=False, config=val_config)
mtest = Model(is_training=False, config=test_config)
tf.initialize_all_variables()
saver = tf.train.Saver()
saver.restore(session, load_model)
print("Testing on non-batched Test ...")
test_perplexity = run_mc_epoch(seed, session, mtest, test_data, tf.no_op(), test_config, mc_steps, verbose=True)
print("Full Test Perplexity: %.3f, Bits: %.3f" % (test_perplexity, np.log2(test_perplexity)))
@ex.automain
def main(data_path, dataset, seed, _run):
ex.commands['print_config']()
np.random.seed(seed)
reader, (train_data, valid_data, test_data, _) = get_data(data_path, dataset)
config = get_config()
val_config = deepcopy(config)
test_config = deepcopy(config)
val_config.drop_x = test_config.drop_x = 0.0
val_config.drop_i = test_config.drop_i = 0.0
val_config.drop_h = test_config.drop_h = 0.0
val_config.drop_o = test_config.drop_o = 0.0
test_config.batch_size = test_config.num_steps = 1
with tf.Graph().as_default(), tf.Session() as session:
tf.set_random_seed(seed)
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
mtrain = Model(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
mvalid = Model(is_training=False, config=val_config)
mtest = Model(is_training=False, config=test_config)
tf.global_variables_initializer().run()
saver = tf.train.Saver()
trains, vals, tests, best_val = [np.inf], [np.inf], [np.inf], np.inf
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i - config.max_epoch + 1, 0.0)
mtrain.assign_lr(session, config.learning_rate / lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(mtrain.lr)))
train_perplexity = run_epoch(session, mtrain, train_data, mtrain.train_op, config=config,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f, Bits: %.3f" % (i + 1, train_perplexity, np.log2(train_perplexity)))
valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op(), config=val_config)
print("Epoch: %d Valid Perplexity (batched): %.3f, Bits: %.3f" % (i + 1, valid_perplexity, np.log2(valid_perplexity)))
test_perplexity = run_epoch(session, mvalid, test_data, tf.no_op(), config=val_config)
print("Epoch: %d Test Perplexity (batched): %.3f, Bits: %.3f" % (i + 1, test_perplexity, np.log2(test_perplexity)))
trains.append(train_perplexity)
vals.append(valid_perplexity)
tests.append(test_perplexity)
if valid_perplexity < best_val:
best_val = valid_perplexity
print("Best Batched Valid Perplexity improved to %.03f" % best_val)
save_path = saver.save(session, './' + dataset + "_" + str(seed) + "_best_model.ckpt")
print("Saved to:", save_path)
_run.info['epoch_nr'] = i + 1
_run.info['nr_parameters'] = mtrain.nvars.item()
_run.info['logs'] = {'train_perplexity': trains, 'valid_perplexity': vals, 'test_perplexity': tests}
print("Training is over.")
best_val_epoch = np.argmin(vals)
print("Best Batched Validation Perplexity %.03f (Bits: %.3f) was at Epoch %d" %
(vals[best_val_epoch], np.log2(vals[best_val_epoch]), best_val_epoch))
print("Training Perplexity at this Epoch was %.03f, Bits: %.3f" %
(trains[best_val_epoch], np.log2(trains[best_val_epoch])))
print("Batched Test Perplexity at this Epoch was %.03f, Bits: %.3f" %
(tests[best_val_epoch], np.log2(tests[best_val_epoch])))
_run.info['best_val_epoch'] = best_val_epoch
_run.info['best_valid_perplexity'] = vals[best_val_epoch]
with tf.Session() as sess:
saver.restore(sess, './' + dataset + "_" + str(seed) + "_best_model.ckpt")
print("Testing on non-batched Valid ...")
valid_perplexity = run_epoch(sess, mtest, valid_data, tf.no_op(), config=test_config, verbose=True)
print("Full Valid Perplexity: %.3f, Bits: %.3f" % (valid_perplexity, np.log2(valid_perplexity)))
print("Testing on non-batched Test ...")
test_perplexity = run_epoch(sess, mtest, test_data, tf.no_op(), config=test_config, verbose=True)
print("Full Test Perplexity: %.3f, Bits: %.3f" % (test_perplexity, np.log2(test_perplexity)))
_run.info['full_best_valid_perplexity'] = valid_perplexity
_run.info['full_test_perplexity'] = test_perplexity
return vals[best_val_epoch]
| [
"os.mkdir",
"numpy.random.seed",
"numpy.random.random_sample",
"data.reader.data_iterator",
"data.reader.enwik8_raw_data",
"numpy.ones",
"numpy.argmin",
"numpy.clip",
"numpy.exp",
"data.reader.text8_raw_data",
"tensorflow.random_uniform_initializer",
"data.reader.ptb_raw_data",
"tensorflow.v... | [((366, 394), 'sacred.Experiment', 'Experiment', (['"""rhn_prediction"""'], {}), "('rhn_prediction')\n", (376, 394), False, 'from sacred import Experiment\n'), ((4410, 4421), 'time.time', 'time.time', ([], {}), '()\n', (4419, 4421), False, 'import time\n'), ((5313, 5334), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (5319, 5334), True, 'import numpy as np\n'), ((5634, 5650), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (5642, 5650), False, 'from copy import deepcopy\n'), ((5667, 5683), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (5675, 5683), False, 'from copy import deepcopy\n'), ((7425, 7450), 'numpy.array', 'np.array', (['([0.0] * n_steps)'], {}), '([0.0] * n_steps)\n', (7433, 7450), True, 'import numpy as np\n'), ((7463, 7488), 'numpy.array', 'np.array', (['([0.0] * n_steps)'], {}), '([0.0] * n_steps)\n', (7471, 7488), True, 'import numpy as np\n'), ((9493, 9509), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (9501, 9509), False, 'from copy import deepcopy\n'), ((9526, 9542), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (9534, 9542), False, 'from copy import deepcopy\n'), ((10479, 10499), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10493, 10499), True, 'import numpy as np\n'), ((10620, 10636), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (10628, 10636), False, 'from copy import deepcopy\n'), ((10653, 10669), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (10661, 10669), False, 'from copy import deepcopy\n'), ((2697, 2727), 'data.reader.ptb_raw_data', 'reader.ptb_raw_data', (['data_path'], {}), '(data_path)\n', (2716, 2727), False, 'from data import reader\n'), ((3476, 3533), 'numpy.ones', 'np.ones', (['(m.batch_size, m.num_steps, 1)'], {'dtype': 'np.float32'}), '((m.batch_size, m.num_steps, 1), dtype=np.float32)\n', (3483, 3533), True, 'import numpy as np\n'), ((3694, 3760), 'numpy.ones', 'np.ones', (['(m.batch_size, m.in_size, m.num_layers)'], {'dtype': 'np.float32'}), '((m.batch_size, m.in_size, m.num_layers), dtype=np.float32)\n', (3701, 3760), True, 'import numpy as np\n'), ((3917, 3980), 'numpy.ones', 'np.ones', (['(m.batch_size, m.size, m.num_layers)'], {'dtype': 'np.float32'}), '((m.batch_size, m.size, m.num_layers), dtype=np.float32)\n', (3924, 3980), True, 'import numpy as np\n'), ((4126, 4178), 'numpy.ones', 'np.ones', (['(m.batch_size, 1, m.size)'], {'dtype': 'np.float32'}), '((m.batch_size, 1, m.size), dtype=np.float32)\n', (4133, 4178), True, 'import numpy as np\n'), ((4526, 4572), 'data.reader.data_iterator', 'data_iterator', (['data', 'm.batch_size', 'm.num_steps'], {}), '(data, m.batch_size, m.num_steps)\n', (4539, 4572), False, 'from data.reader import data_iterator\n'), ((5933, 5945), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5943, 5945), True, 'import tensorflow as tf\n'), ((5976, 6044), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-config.init_scale)', 'config.init_scale'], {}), '(-config.init_scale, config.init_scale)\n', (6005, 6044), True, 'import tensorflow as tf\n'), ((6416, 6432), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6430, 6432), True, 'import tensorflow as tf\n'), ((7550, 7574), 'os.path.isdir', 'os.path.isdir', (['"""./probs"""'], {}), "('./probs')\n", (7563, 7574), False, 'import os\n'), ((7618, 7637), 'os.mkdir', 'os.mkdir', (['"""./probs"""'], {}), "('./probs')\n", (7626, 7637), False, 'import os\n'), ((7784, 7795), 'time.time', 'time.time', ([], {}), '()\n', (7793, 7795), False, 'import time\n'), ((8782, 8803), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (8788, 8803), True, 'import numpy as np\n'), ((9603, 9615), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9613, 9615), True, 'import tensorflow as tf\n'), ((9646, 9714), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-config.init_scale)', 'config.init_scale'], {}), '(-config.init_scale, config.init_scale)\n', (9675, 9714), True, 'import tensorflow as tf\n'), ((10029, 10058), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (10056, 10058), True, 'import tensorflow as tf\n'), ((10071, 10087), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (10085, 10087), True, 'import tensorflow as tf\n'), ((10944, 10956), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (10954, 10956), True, 'import tensorflow as tf\n'), ((10973, 10997), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (10991, 10997), True, 'import tensorflow as tf\n'), ((11016, 11084), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-config.init_scale)', 'config.init_scale'], {}), '(-config.init_scale, config.init_scale)\n', (11045, 11084), True, 'import tensorflow as tf\n'), ((11463, 11479), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (11477, 11479), True, 'import tensorflow as tf\n'), ((13157, 13172), 'numpy.argmin', 'np.argmin', (['vals'], {}), '(vals)\n', (13166, 13172), True, 'import numpy as np\n'), ((2799, 2832), 'data.reader.enwik8_raw_data', 'reader.enwik8_raw_data', (['data_path'], {}), '(data_path)\n', (2821, 2832), False, 'from data import reader\n'), ((6054, 6117), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': 'None', 'initializer': 'initializer'}), "('model', reuse=None, initializer=initializer)\n", (6071, 6117), True, 'import tensorflow as tf\n'), ((6129, 6167), 'rhn.Model', 'Model', ([], {'is_training': '(True)', 'config': 'config'}), '(is_training=True, config=config)\n', (6134, 6167), False, 'from rhn import Model\n'), ((6177, 6240), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': '(True)', 'initializer': 'initializer'}), "('model', reuse=True, initializer=initializer)\n", (6194, 6240), True, 'import tensorflow as tf\n'), ((6257, 6300), 'rhn.Model', 'Model', ([], {'is_training': '(False)', 'config': 'val_config'}), '(is_training=False, config=val_config)\n', (6262, 6300), False, 'from rhn import Model\n'), ((6315, 6359), 'rhn.Model', 'Model', ([], {'is_training': '(False)', 'config': 'test_config'}), '(is_training=False, config=test_config)\n', (6320, 6359), False, 'from rhn import Model\n'), ((6577, 6587), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (6585, 6587), True, 'import tensorflow as tf\n'), ((6822, 6832), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (6830, 6832), True, 'import tensorflow as tf\n'), ((7074, 7084), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (7082, 7084), True, 'import tensorflow as tf\n'), ((7909, 7955), 'data.reader.data_iterator', 'data_iterator', (['data', 'm.batch_size', 'm.num_steps'], {}), '(data, m.batch_size, m.num_steps)\n', (7922, 7955), False, 'from data.reader import data_iterator\n'), ((8483, 8496), 'numpy.exp', 'np.exp', (['(-cost)'], {}), '(-cost)\n', (8489, 8496), True, 'import numpy as np\n'), ((8974, 9002), 'numpy.save', 'np.save', (['savefile', 'all_probs'], {}), '(savefile, all_probs)\n', (8981, 9002), True, 'import numpy as np\n'), ((9724, 9787), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': 'None', 'initializer': 'initializer'}), "('model', reuse=None, initializer=initializer)\n", (9741, 9787), True, 'import tensorflow as tf\n'), ((9799, 9837), 'rhn.Model', 'Model', ([], {'is_training': '(True)', 'config': 'config'}), '(is_training=True, config=config)\n', (9804, 9837), False, 'from rhn import Model\n'), ((9847, 9910), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': '(True)', 'initializer': 'initializer'}), "('model', reuse=True, initializer=initializer)\n", (9864, 9910), True, 'import tensorflow as tf\n'), ((9922, 9965), 'rhn.Model', 'Model', ([], {'is_training': '(False)', 'config': 'val_config'}), '(is_training=False, config=val_config)\n', (9927, 9965), False, 'from rhn import Model\n'), ((9980, 10024), 'rhn.Model', 'Model', ([], {'is_training': '(False)', 'config': 'test_config'}), '(is_training=False, config=test_config)\n', (9985, 10024), False, 'from rhn import Model\n'), ((10241, 10251), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (10249, 10251), True, 'import tensorflow as tf\n'), ((11094, 11157), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': 'None', 'initializer': 'initializer'}), "('model', reuse=None, initializer=initializer)\n", (11111, 11157), True, 'import tensorflow as tf\n'), ((11174, 11212), 'rhn.Model', 'Model', ([], {'is_training': '(True)', 'config': 'config'}), '(is_training=True, config=config)\n', (11179, 11212), False, 'from rhn import Model\n'), ((11222, 11285), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': '(True)', 'initializer': 'initializer'}), "('model', reuse=True, initializer=initializer)\n", (11239, 11285), True, 'import tensorflow as tf\n'), ((11302, 11345), 'rhn.Model', 'Model', ([], {'is_training': '(False)', 'config': 'val_config'}), '(is_training=False, config=val_config)\n', (11307, 11345), False, 'from rhn import Model\n'), ((11360, 11404), 'rhn.Model', 'Model', ([], {'is_training': '(False)', 'config': 'test_config'}), '(is_training=False, config=test_config)\n', (11365, 11404), False, 'from rhn import Model\n'), ((13740, 13752), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (13750, 13752), True, 'import tensorflow as tf\n'), ((2903, 2935), 'data.reader.text8_raw_data', 'reader.text8_raw_data', (['data_path'], {}), '(data_path)\n', (2924, 2935), False, 'from data import reader\n'), ((6364, 6397), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6395, 6397), True, 'import tensorflow as tf\n'), ((10919, 10929), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10927, 10929), True, 'import tensorflow as tf\n'), ((11410, 11443), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11441, 11443), True, 'import tensorflow as tf\n'), ((12137, 12147), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (12145, 12147), True, 'import tensorflow as tf\n'), ((12356, 12366), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (12364, 12366), True, 'import tensorflow as tf\n'), ((13953, 13963), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (13961, 13963), True, 'import tensorflow as tf\n'), ((14208, 14218), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (14216, 14218), True, 'import tensorflow as tf\n'), ((6686, 6711), 'numpy.log2', 'np.log2', (['valid_perplexity'], {}), '(valid_perplexity)\n', (6693, 6711), True, 'import numpy as np\n'), ((6941, 6966), 'numpy.log2', 'np.log2', (['valid_perplexity'], {}), '(valid_perplexity)\n', (6948, 6966), True, 'import numpy as np\n'), ((7191, 7215), 'numpy.log2', 'np.log2', (['test_perplexity'], {}), '(test_perplexity)\n', (7198, 7215), True, 'import numpy as np\n'), ((9081, 9128), 'numpy.clip', 'np.clip', (['(sum_probs / mc_steps)', '(1e-10)', '(1 - 1e-10)'], {}), '(sum_probs / mc_steps, 1e-10, 1 - 1e-10)\n', (9088, 9128), True, 'import numpy as np\n'), ((10361, 10385), 'numpy.log2', 'np.log2', (['test_perplexity'], {}), '(test_perplexity)\n', (10368, 10385), True, 'import numpy as np\n'), ((13290, 13319), 'numpy.log2', 'np.log2', (['vals[best_val_epoch]'], {}), '(vals[best_val_epoch])\n', (13297, 13319), True, 'import numpy as np\n'), ((13443, 13474), 'numpy.log2', 'np.log2', (['trains[best_val_epoch]'], {}), '(trains[best_val_epoch])\n', (13450, 13474), True, 'import numpy as np\n'), ((13585, 13615), 'numpy.log2', 'np.log2', (['tests[best_val_epoch]'], {}), '(tests[best_val_epoch])\n', (13592, 13615), True, 'import numpy as np\n'), ((3141, 3196), 'numpy.random.random_sample', 'np.random.random_sample', (['(m.batch_size, m.num_steps, 1)'], {}), '((m.batch_size, m.num_steps, 1))\n', (3164, 3196), True, 'import numpy as np\n'), ((3569, 3633), 'numpy.random.random_sample', 'np.random.random_sample', (['(m.batch_size, m.in_size, m.num_layers)'], {}), '((m.batch_size, m.in_size, m.num_layers))\n', (3592, 3633), True, 'import numpy as np\n'), ((3795, 3856), 'numpy.random.random_sample', 'np.random.random_sample', (['(m.batch_size, m.size, m.num_layers)'], {}), '((m.batch_size, m.size, m.num_layers))\n', (3818, 3856), True, 'import numpy as np\n'), ((4015, 4065), 'numpy.random.random_sample', 'np.random.random_sample', (['(m.batch_size, 1, m.size)'], {}), '((m.batch_size, 1, m.size))\n', (4038, 4065), True, 'import numpy as np\n'), ((5173, 5194), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (5179, 5194), True, 'import numpy as np\n'), ((12044, 12069), 'numpy.log2', 'np.log2', (['train_perplexity'], {}), '(train_perplexity)\n', (12051, 12069), True, 'import numpy as np\n'), ((12265, 12290), 'numpy.log2', 'np.log2', (['valid_perplexity'], {}), '(valid_perplexity)\n', (12272, 12290), True, 'import numpy as np\n'), ((12482, 12506), 'numpy.log2', 'np.log2', (['test_perplexity'], {}), '(test_perplexity)\n', (12489, 12506), True, 'import numpy as np\n'), ((14074, 14099), 'numpy.log2', 'np.log2', (['valid_perplexity'], {}), '(valid_perplexity)\n', (14081, 14099), True, 'import numpy as np\n'), ((14327, 14351), 'numpy.log2', 'np.log2', (['test_perplexity'], {}), '(test_perplexity)\n', (14334, 14351), True, 'import numpy as np\n'), ((8633, 8654), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (8639, 8654), True, 'import numpy as np\n'), ((5275, 5286), 'time.time', 'time.time', ([], {}), '()\n', (5284, 5286), False, 'import time\n'), ((8737, 8748), 'time.time', 'time.time', ([], {}), '()\n', (8746, 8748), False, 'import time\n')] |
"""
- Version 1.3: Due to the current Rush/Cheese Meta, I implemented a more defensive build order and in return deactivated
the 2-Base Immortal BO.
- Version 1.4: Switched from randomly chosen build orders to scouting based build order. Yet, still not completely with
a neural network but with basic rules, provided by a neural network.
- Version 1.5: Added a simple neural network to chose build orders based on scouting information.
Local tests with hundreds of games revealed that win rates compared to random choosing increased from 44% to 71%.
Bots used locally: YoBot, Tyr, Tyrz, 5minBot, BlinkerBot, NaughtyBot, SarsaBot, SeeBot, ramu,
Micromachine, Kagamine, AviloBot, EarlyAggro, Voidstar, ReeBot
- Version 1.6: Adapted early game rush defense in order to deal better with 12 pools (e.g. by CheeZerg).
Trained a new neural network with 730 games against the newest versions of most bots available.
Also refined scouting on 4 player maps and tuned the late game emergency strategy to prevent ties.
- Version 1.6.1: Bugfixes and new Model
- Version 1.7: Added a One-Base defence into Void-Ray build in order to deal with other very aggressive builds
- Version 1.7.1: Bugfixes and improved Voidray micro
- Version 1.7.2: Newly trained model
- Version 1.7.3 - 4: Small Bugfixes
- Version 1.7.5: Slightly improved Rush defence
- Version 1.8: Improved scouting with more scouting parameters, new model and various bug fixes / small improvements
- Version 1.9: Improved building placement and attack priorities. Oracle harass for Stargate build
- Version 2.0: Updated to Python 3.7.4 and to Burnys Python-sc2 vom 20.09.2019
- Version 2.1: Switched to game_step = 4. Added a Random Forrest Classifier and a manual BO-Choice to the chat to compare the results with those of the DNN
Tried to increase survivalbility of the scout
- Version 3.0: Complete rewrite of MadAI in the sharpy-sc2 framework developed by Infy & merfolk. Initially implemented 3 basic strategies, i.e.
4-Gate, 2-Base Robo and Defensive build, randomly chosen in order to gather training data
- Version 3.1: Many minor improvements due to issues revealed in ladder replays. Addition of an automtic adaptive gateway unit selector,
based on a counter table. This should ensure that the gateway units are always the best composition with regards to the enemy units.
- Version 3.2: Added the Skytoss build with an early Oracle Harass and follow-up Voidrays with Chargelots
- Version 3.3: Added the (rather messy) neural network and random forrest classifier from MadAI 2.1 for build order choices
- Version 3.3.1: Changed the Skytoss BO from Voidrays to Tempests
- Version 3.4: Account for losses with a specific build order by having separate models trained with lost games.
Computation of the final prediction by substracting win prediction - loss prediction for both models and
all four build orders. Then taking the build order with the highest results. If an overall prediction for
a specific build order has a positive value, it is more likely to win with that, while if it has a neagtive value
it is more likely to lose with it.
"""
from sc2 import BotAI, UnitTypeId, AbilityId, Race
from sc2.unit import Unit
from sc2.position import Point2
from sc2.constants import RALLY_UNITS
from sc2.ids.upgrade_id import UpgradeId
from sharpy.managers.roles import UnitTask
from sharpy.knowledges import KnowledgeBot
from sharpy.plans import BuildOrder, Step, SequentialList, StepBuildGas
from sharpy.plans.acts import *
from sharpy.plans.acts.protoss import *
from sharpy.plans.require import *
from sharpy.plans.tactics import *
from sharpy.plans.tactics.protoss import *
from sharpy.managers.manager_base import ManagerBase
from typing import List
import random
import numpy as np
import time
import pickle
import keras
class GetScoutingData(ActBase):
def __init__(self):
super().__init__()
self.build_order = -1
self.scout_data = []
self.use_model = False
if self.use_model:
self.model = keras.models.load_model("MadAI/MadAI_06_03_2020")
self.model_loss = keras.models.load_model("MadAI/MadAI_06_03_2020_loss")
self.RF_model = pickle.load(open('MadAI/MadAI_RF_06_03_2020.sav', 'rb'))
self.RF_model_loss = pickle.load(open('MadAI/MadAI_RF_06_03_2020_loss.sav', 'rb'))
self.choice_data = []
async def start(self, knowledge: 'Knowledge'):
await super().start(knowledge)
async def execute(self) -> bool:
if self.build_order == -1:
if self.knowledge.possible_rush_detected:
enemy_rush = 1
else:
enemy_rush = 0
enemy_pylon_pos = []
for pylon in range(len(self.knowledge.known_enemy_units(UnitTypeId.PYLON))):
enemy_pylon_pos.append(self.knowledge.known_enemy_units(UnitTypeId.PYLON)[pylon].position)
enemy_gateway_pos = []
for gateway in range(len(self.knowledge.known_enemy_units(UnitTypeId.GATEWAY))):
enemy_gateway_pos.append(self.knowledge.known_enemy_units(UnitTypeId.GATEWAY)[gateway].position)
enemy_forge_pos = []
for forge in range(len(self.knowledge.known_enemy_units(UnitTypeId.FORGE))):
enemy_forge_pos.append(self.knowledge.known_enemy_units(UnitTypeId.FORGE)[forge].position)
enemy_cannon_pos = []
for cannon in range(len(self.knowledge.known_enemy_units(UnitTypeId.PHOTONCANNON))):
enemy_cannon_pos.append(self.knowledge.known_enemy_units(UnitTypeId.PHOTONCANNON)[cannon].position)
enemy_depot_pos = []
for depot in range(len(self.knowledge.known_enemy_units(UnitTypeId.SUPPLYDEPOT))):
enemy_depot_pos.append(self.knowledge.known_enemy_units(UnitTypeId.SUPPLYDEPOT)[depot].position)
enemy_depotlow_pos = []
for depotlow in range(len(self.knowledge.known_enemy_units(UnitTypeId.SUPPLYDEPOTLOWERED))):
enemy_depotlow_pos.append(
self.knowledge.known_enemy_units(UnitTypeId.SUPPLYDEPOTLOWERED)[depotlow].position
)
enemy_bunker_pos = []
for bunker in range(len(self.knowledge.known_enemy_units(UnitTypeId.BUNKER))):
enemy_bunker_pos.append(self.knowledge.known_enemy_units(UnitTypeId.BUNKER)[bunker].position)
enemy_barracks_pos = []
for barracks in range(len(self.knowledge.known_enemy_units(UnitTypeId.BARRACKS))):
enemy_barracks_pos.append(self.knowledge.known_enemy_units(UnitTypeId.BARRACKS)[barracks].position)
enemy_factory_pos = []
for factory in range(len(self.knowledge.known_enemy_units(UnitTypeId.FACTORY))):
enemy_factory_pos.append(self.knowledge.known_enemy_units(UnitTypeId.FACTORY)[factory].position)
enemy_pool_pos = []
for pool in range(len(self.knowledge.known_enemy_units(UnitTypeId.SPAWNINGPOOL))):
enemy_pool_pos.append(self.knowledge.known_enemy_units(UnitTypeId.SPAWNINGPOOL)[pool].position)
enemy_spine_pos = []
for spine in range(len(self.knowledge.known_enemy_units(UnitTypeId.SPINECRAWLER))):
enemy_spine_pos.append(self.knowledge.known_enemy_units(UnitTypeId.SPINECRAWLER)[spine].position)
if len(self.knowledge.known_enemy_units(UnitTypeId.PYLON)) >= 1:
pylon1_pos = enemy_pylon_pos[0][0] + enemy_pylon_pos[0][1]
else:
pylon1_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.PYLON)) >= 2:
pylon2_pos = enemy_pylon_pos[1][0] + enemy_pylon_pos[1][1]
else:
pylon2_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.PYLON)) >= 3:
pylon3_pos = enemy_pylon_pos[2][0] + enemy_pylon_pos[2][1]
else:
pylon3_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.GATEWAY)) >= 1:
gate1_pos = enemy_gateway_pos[0][0] + enemy_gateway_pos[0][1]
else:
gate1_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.GATEWAY)) >= 2:
gate2_pos = enemy_gateway_pos[1][0] + enemy_gateway_pos[1][1]
else:
gate2_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.FORGE)) >= 1:
forge1_pos = enemy_forge_pos[0][0] + enemy_forge_pos[0][1]
else:
forge1_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.PHOTONCANNON)) >= 1:
cannon1_pos = enemy_cannon_pos[0][0] + enemy_cannon_pos[0][1]
else:
cannon1_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.PHOTONCANNON)) >= 2:
cannon2_pos = enemy_cannon_pos[1][0] + enemy_cannon_pos[1][1]
else:
cannon2_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.PHOTONCANNON)) >= 3:
cannon3_pos = enemy_cannon_pos[2][0] + enemy_cannon_pos[2][1]
else:
cannon3_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.PHOTONCANNON)) >= 4:
cannon4_pos = enemy_cannon_pos[3][0] + enemy_cannon_pos[3][1]
else:
cannon4_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.SUPPLYDEPOT)) >= 1:
depot1_pos = enemy_depot_pos[0][0] + enemy_depot_pos[0][1]
else:
depot1_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.SUPPLYDEPOT)) >= 2:
depot2_pos = enemy_depot_pos[1][0] + enemy_depot_pos[1][1]
else:
depot2_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.SUPPLYDEPOT)) >= 3:
depot3_pos = enemy_depot_pos[2][0] + enemy_depot_pos[2][1]
else:
depot3_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.SUPPLYDEPOTLOWERED)) >= 1:
depotlow1_pos = enemy_depotlow_pos[0][0] + enemy_depotlow_pos[0][1]
else:
depotlow1_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.SUPPLYDEPOTLOWERED)) >= 2:
depotlow2_pos = enemy_depotlow_pos[1][0] + enemy_depotlow_pos[1][1]
else:
depotlow2_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.SUPPLYDEPOTLOWERED)) >= 3:
depotlow3_pos = enemy_depotlow_pos[2][0] + enemy_depotlow_pos[2][1]
else:
depotlow3_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.BUNKER)) >= 1:
bunker1_pos = enemy_bunker_pos[0][0] + enemy_bunker_pos[0][1]
else:
bunker1_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.BARRACKS)) >= 1:
barracks1_pos = enemy_barracks_pos[0][0] + enemy_barracks_pos[0][1]
else:
barracks1_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.BARRACKS)) >= 2:
barracks2_pos = enemy_barracks_pos[1][0] + enemy_barracks_pos[1][1]
else:
barracks2_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.BARRACKS)) >= 3:
barracks3_pos = enemy_barracks_pos[2][0] + enemy_barracks_pos[2][1]
else:
barracks3_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.FACTORY)) >= 1:
factory1_pos = enemy_factory_pos[0][0] + enemy_factory_pos[0][1]
else:
factory1_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.SPAWNINGPOOL)) >= 1:
pool1_pos = enemy_pool_pos[0][0] + enemy_pool_pos[0][1]
else:
pool1_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.SPINECRAWLER)) >= 1:
spine1_pos = enemy_spine_pos[0][0] + enemy_spine_pos[0][1]
else:
spine1_pos = 0
if len(self.knowledge.known_enemy_units(UnitTypeId.SPINECRAWLER)) >= 2:
spine2_pos = enemy_spine_pos[1][0] + enemy_spine_pos[1][1]
else:
spine2_pos = 0
self.scout_data = [
self.knowledge.enemy_start_location,
enemy_rush,
self.knowledge.enemy_units_manager.enemy_worker_count,
len(self.knowledge.known_enemy_units(UnitTypeId.NEXUS)),
len(self.knowledge.known_enemy_units(UnitTypeId.PYLON)),
len(self.knowledge.known_enemy_units(UnitTypeId.GATEWAY)),
len(self.knowledge.known_enemy_units(UnitTypeId.CYBERNETICSCORE)),
len(self.knowledge.known_enemy_units(UnitTypeId.ASSIMILATOR)),
len(self.knowledge.known_enemy_units(UnitTypeId.PHOTONCANNON)),
len(self.knowledge.known_enemy_units(UnitTypeId.BUNKER)),
len(self.knowledge.known_enemy_units(UnitTypeId.FORGE)),
len(self.knowledge.known_enemy_units(UnitTypeId.COMMANDCENTER)),
len(self.knowledge.known_enemy_units(UnitTypeId.ORBITALCOMMAND)),
len(self.knowledge.known_enemy_units(UnitTypeId.SUPPLYDEPOT)),
len(self.knowledge.known_enemy_units(UnitTypeId.SUPPLYDEPOTLOWERED)),
len(self.knowledge.known_enemy_units(UnitTypeId.BARRACKS)),
len(self.knowledge.known_enemy_units(UnitTypeId.TECHLAB)),
len(self.knowledge.known_enemy_units(UnitTypeId.REACTOR)),
len(self.knowledge.known_enemy_units(UnitTypeId.REFINERY)),
len(self.knowledge.known_enemy_units(UnitTypeId.FACTORY)),
len(self.knowledge.known_enemy_units(UnitTypeId.HATCHERY)),
len(self.knowledge.known_enemy_units(UnitTypeId.SPINECRAWLER)),
len(self.knowledge.known_enemy_units(UnitTypeId.SPAWNINGPOOL)),
len(self.knowledge.known_enemy_units(UnitTypeId.ROACHWARREN)),
len(self.knowledge.known_enemy_units(UnitTypeId.EXTRACTOR)),
self.knowledge.enemy_units_manager.unit_count(UnitTypeId.ZEALOT),
self.knowledge.enemy_units_manager.unit_count(UnitTypeId.STALKER),
self.knowledge.enemy_units_manager.unit_count(UnitTypeId.MARINE),
self.knowledge.enemy_units_manager.unit_count(UnitTypeId.REAPER),
self.knowledge.enemy_units_manager.unit_count(UnitTypeId.ZERGLING),
self.knowledge.enemy_units_manager.unit_count(UnitTypeId.ROACH),
pylon1_pos,
pylon2_pos,
pylon3_pos,
gate1_pos,
gate2_pos,
forge1_pos,
cannon1_pos,
cannon2_pos,
cannon3_pos,
cannon4_pos,
depot1_pos,
depot2_pos,
depot3_pos,
depotlow1_pos,
depotlow2_pos,
depotlow3_pos,
bunker1_pos,
barracks1_pos,
barracks2_pos,
barracks3_pos,
factory1_pos,
pool1_pos,
spine1_pos,
spine2_pos,
self.knowledge.game_analyzer.enemy_mineral_income,
self.knowledge.game_analyzer.enemy_gas_income,
self.knowledge.game_analyzer.enemy_power.power,
self.knowledge.game_analyzer.enemy_predict_power.power,
self.knowledge.game_analyzer.our_power.power,
self.knowledge.enemy_army_predicter.own_value,
self.knowledge.enemy_army_predicter.enemy_value,
self.knowledge.enemy_army_predicter.enemy_mined_minerals,
self.knowledge.enemy_army_predicter.enemy_mined_gas,
]
if self.use_model:
self.choice_data = [
self.scout_data[0][0]+self.scout_data[0][1],
self.scout_data[1],
self.scout_data[2],
self.scout_data[3],
self.scout_data[4],
self.scout_data[5],
self.scout_data[6],
self.scout_data[7],
self.scout_data[8],
self.scout_data[9],
self.scout_data[10],
self.scout_data[11],
self.scout_data[12],
self.scout_data[13],
self.scout_data[14],
self.scout_data[15],
self.scout_data[16],
self.scout_data[17],
self.scout_data[18],
self.scout_data[19],
self.scout_data[20],
self.scout_data[21],
self.scout_data[22],
self.scout_data[23],
self.scout_data[24],
self.scout_data[25],
self.scout_data[26],
self.scout_data[27],
self.scout_data[28],
self.scout_data[29],
self.scout_data[30],
self.scout_data[31],
self.scout_data[32],
self.scout_data[33],
self.scout_data[34],
self.scout_data[35],
self.scout_data[36],
self.scout_data[37],
self.scout_data[38],
self.scout_data[39],
self.scout_data[40],
self.scout_data[41],
self.scout_data[42],
self.scout_data[43],
self.scout_data[44],
self.scout_data[45],
self.scout_data[46],
self.scout_data[47],
self.scout_data[48],
self.scout_data[49],
self.scout_data[50],
self.scout_data[51],
self.scout_data[52],
self.scout_data[53],
self.scout_data[54],
self.scout_data[55],
self.scout_data[56],
self.scout_data[57],
self.scout_data[58],
self.scout_data[59],
self.scout_data[60],
self.scout_data[61],
self.scout_data[62],
self.scout_data[63],
]
# print(self.choice_data)
new_choice_data = np.array(self.choice_data).reshape(-1, 64, 1)
# print(new_choice_data)
prediction = self.model.predict(new_choice_data)
prediction_loss = self.model_loss.predict(new_choice_data)
# print(prediction[0])
RF_predictions = self.RF_model.predict_proba([self.choice_data])
RF_predictions_loss = self.RF_model_loss.predict_proba([self.choice_data])
# if len(self.knowledge.known_enemy_units(UnitTypeId.NEXUS)) > 1 or \
# len(self.knowledge.known_enemy_units(UnitTypeId.COMMANDCENTER)) > 1 or \
# len(self.knowledge.known_enemy_units(UnitTypeId.COMMANDCENTER)) + \
# len(self.knowledge.known_enemy_units(UnitTypeId.ORBITALCOMMAND)) > 1:
# manual_0 = 0
# manual_1 = 1
# manual_2 = 0
# manual_3 = 0
# elif len(self.knowledge.known_enemy_units(UnitTypeId.GATEWAY)) > 2 or \
# len(self.knowledge.known_enemy_units(UnitTypeId.BARRACKS)) > 2 or \
# self.knowledge.enemy_units_manager.unit_count(UnitTypeId.ZERGLING) > 2:
# manual_0 = 0
# manual_1 = 0
# manual_2 = 1
# manual_3 = 0
# elif len(self.knowledge.known_enemy_units(UnitTypeId.SPINECRAWLER)) > 0 or \
# len(self.knowledge.known_enemy_units(UnitTypeId.PHOTONCANNON)) > 0 or \
# len(self.knowledge.known_enemy_units(UnitTypeId.BUNKER)) > 0 or \
# len(self.knowledge.known_enemy_units(UnitTypeId.FORGE)) > 0:
# manual_0 = 0.5
# manual_1 = 0
# manual_2 = 0
# manual_3 = 0.5
# else:
# manual_0 = 0.25
# manual_1 = 0.25
# manual_2 = 0.25
# manual_3 = 0.25
await self.ai.chat_send(
"2-Base Robo: ["
+ str(round(prediction[0][0] * 100, 2))
+ " - "
+ str(round(prediction_loss[0][0] * 100, 2))
+ " / "
+ str(round(RF_predictions[0][0] * 100, 2))
+ " - "
+ str(round(RF_predictions_loss[0][0] * 100, 2))
+ " / "
+ str(round((prediction[0][0] - prediction_loss[0][0] + RF_predictions[0][0] - RF_predictions_loss[0][0]) * 100 / 2, 2))
+ "]; 4-Gate Proxy: ["
+ str(round(prediction[0][1] * 100, 2))
+ " - "
+ str(round(prediction_loss[0][1] * 100, 2))
+ " / "
+ str(round(RF_predictions[0][1] * 100, 2))
+ " - "
+ str(round(RF_predictions_loss[0][1] * 100, 2))
+ " / "
+ str(round((prediction[0][1] - prediction_loss[0][1] + RF_predictions[0][1] - RF_predictions_loss[0][1]) * 100 / 2, 2))
+ "]; Rush Defend: ["
+ str(round(prediction[0][2] * 100, 2))
+ " - "
+ str(round(prediction_loss[0][2] * 100, 2))
+ " / "
+ str(round(RF_predictions[0][2] * 100, 2))
+ " - "
+ str(round(RF_predictions_loss[0][2] * 100, 2))
+ " / "
+ str(round((prediction[0][2] - prediction_loss[0][2] + RF_predictions[0][2] - RF_predictions_loss[0][2]) * 100 / 2, 2))
+ "]; Skytoss: ["
+ str(round(prediction[0][3] * 100, 2))
+ " - "
+ str(round(prediction_loss[0][3] * 100, 2))
+ " / "
+ str(round(RF_predictions[0][3] * 100, 2))
+ " - "
+ str(round(RF_predictions_loss[0][3] * 100, 2))
+ " / "
+ str(round((prediction[0][3] - prediction_loss[0][3] + RF_predictions[0][3] - RF_predictions_loss[0][3]) * 100 / 2, 2))
+ "]"
)
choice = np.argmax([round((prediction[0][0] - prediction_loss[0][0] + RF_predictions[0][0] - RF_predictions_loss[0][0]) * 100 / 2, 2),
round((prediction[0][1] - prediction_loss[0][1] + RF_predictions[0][1] - RF_predictions_loss[0][1]) * 100 / 2, 2),
round((prediction[0][2] - prediction_loss[0][2] + RF_predictions[0][2] - RF_predictions_loss[0][2]) * 100 / 2, 2),
round((prediction[0][3] - prediction_loss[0][3] + RF_predictions[0][3] - RF_predictions_loss[0][3]) * 100 / 2, 2)])
self.build_order = choice
else:
self.build_order = random.randrange(0, 4)
if self.build_order == 0:
await self.ai.chat_send(
"(glhf) MadAI v3.4: 2-Base Robo BO chosen!"
)
elif self.build_order == 1:
await self.ai.chat_send(
"(glhf) MadAI v3.4: 4-Gate Proxy BO chosen!"
)
elif self.build_order == 2:
await self.ai.chat_send(
"(glhf) MadAI v3.4: Rush Defend BO chosen!"
)
elif self.build_order == 3:
await self.ai.chat_send(
"(glhf) MadAI v3.4: Skytoss BO chosen!"
)
else:
await self.ai.chat_send(
"(glhf) MadAI v3.4: No BO chosen! PANIC!"
)
return True
else:
return True
class Dt_Harass(ActBase):
#TODO: Let only the frist DT walk to base and the rest attack the closest enemy, just as in the old MadBot
def __init__(self):
super().__init__()
self.dts_detected = False
self.already_merging_tags: List[int] = []
self.main_dt_tag: List[int] = []
self.first_dts = False
async def execute(self) -> bool:
if (self.cache.own(UnitTypeId.DARKTEMPLAR).ready and not self.dts_detected and self.cache.own(UnitTypeId.DARKTEMPLAR).ready.random.shield < 60) or \
(len(self.knowledge.known_enemy_units(UnitTypeId.PHOTONCANNON)) > 0 and not self.dts_detected) or \
(len(self.knowledge.known_enemy_units(UnitTypeId.SPINECRAWLER)) > 0 and not self.dts_detected) or \
(len(self.knowledge.known_enemy_units(UnitTypeId.MISSILETURRET)) > 0 and not self.dts_detected) or \
(len(self.knowledge.known_enemy_units(UnitTypeId.OVERSEER)) > 0 and not self.dts_detected):
# Don't even start the harass if the enemy has some sort of detection
self.dts_detected = True
for dt in self.cache.own(UnitTypeId.DARKTEMPLAR):
# Get back to the gather point to be morphed to Archons savely
self.do(dt.move(self.knowledge.gather_point))
print('DTs detected!!')
# Start dark templar attack
if not self.dts_detected:
if self.cache.own(UnitTypeId.DARKTEMPLAR).exists:
if not self.first_dts:
dt1 = self.cache.own(UnitTypeId.DARKTEMPLAR)[0]
self.main_dt_tag.append(dt1.tag)
self.do(
dt1(
RALLY_UNITS,
self.knowledge.expansion_zones[-1].mineral_line_center,
)
)
self.knowledge.roles.set_task(UnitTask.Reserved, dt1)
self.first_dts = True
else:
dts = self.cache.own(UnitTypeId.DARKTEMPLAR).ready.tags_not_in(self.main_dt_tag)
if dts.amount == 1:
exe_dt = dts[0]
self.do(exe_dt.attack(self.knowledge.expansion_zones[-2].mineral_line_center))
self.knowledge.roles.set_task(UnitTask.Reserved, exe_dt)
elif dts.amount >= 2:
dts = dts.random_group_of(2)
exe_dt = dts[0]
attack_dt = dts[1]
self.do(exe_dt.attack(self.knowledge.expansion_zones[-2].mineral_line_center))
self.do(attack_dt.attack(self.knowledge.enemy_main_zone.center_location))
self.knowledge.roles.set_task(UnitTask.Reserved, exe_dt)
self.knowledge.roles.set_task(UnitTask.Reserved, attack_dt)
self.main_dt_tag.append(exe_dt.tag)
self.main_dt_tag.append(attack_dt.tag)
else:
if len(self.ai.units(UnitTypeId.DARKTEMPLAR).ready.closer_than(10, self.knowledge.gather_point)) >= 2:
# Only morph Archons when its safe, i.e. at the current gather point
templars = self.cache.own(UnitTypeId.DARKTEMPLAR).ready.tags_not_in(self.already_merging_tags)
if templars.amount > 1:
unit: Unit = templars[0]
self.already_merging_tags.append(unit.tag)
target: Unit = templars.tags_not_in(self.already_merging_tags).closest_to(unit)
self.already_merging_tags.append(target.tag)
self.knowledge.roles.set_task(UnitTask.Reserved, unit)
self.knowledge.roles.set_task(UnitTask.Reserved, target)
self.knowledge.print(f"[ARCHON] merging {str(unit.tag)} and {str(target.tag)}")
from s2clientprotocol import raw_pb2 as raw_pb
from s2clientprotocol import sc2api_pb2 as sc_pb
command = raw_pb.ActionRawUnitCommand(
ability_id=AbilityId.MORPH_ARCHON.value,
unit_tags=[unit.tag, target.tag],
queue_command=False
)
action = raw_pb.ActionRaw(unit_command=command)
await self.ai._client._execute(action=sc_pb.RequestAction(
actions=[sc_pb.Action(action_raw=action)]
))
return True
class Oracle_Harass(ActBase):
def __init__(self):
super().__init__()
self.harass_started = False
self.do_something_after_travel = 0
async def execute(self) -> bool:
if len(self.ai.units(UnitTypeId.ORACLE)) >= 1 and not self.harass_started:
self.save_target_main = self.knowledge.enemy_start_location.towards(self.knowledge.ai.game_info.map_center,
-25)
# print('X:', self.knowledge.ai.game_info.map_center[0] - self.knowledge.ai.start_location[0], 'Y:',
# self.knowledge.ai.game_info.map_center[1] - self.knowledge.ai.start_location[1])
if self.knowledge.ai.game_info.map_center[0] - self.knowledge.ai.start_location[0] < 0:
self.safe_spot1 = 1
else:
self.safe_spot1 = (self.knowledge.ai.game_info.map_center[0] * 2) - 1
if self.knowledge.ai.game_info.map_center[1] - self.knowledge.ai.start_location[1] > 0:
self.safe_spot2 = 1
else:
self.safe_spot2 = (self.knowledge.ai.game_info.map_center[1] * 2) - 1
or1 = self.ai.units(UnitTypeId.ORACLE)[0]
self.knowledge.roles.set_task(UnitTask.Reserved, or1)
self.do(or1.move(Point2((self.safe_spot1, self.safe_spot2))))
self.do(or1.move(self.save_target_main, queue=True))
self.harass_started = True
self.do_something_after_travel = self.ai.time + 50
elif len(self.ai.units(UnitTypeId.ORACLE)) >= 1 and self.harass_started:
if self.ai.time > self.do_something_after_travel:
or1 = self.ai.units(UnitTypeId.ORACLE)[0]
self.knowledge.roles.set_task(UnitTask.Reserved, or1)
attack_target_main = self.ai.enemy_start_locations[0].towards(self.ai.game_info.map_center, -5)
save_target_main = self.ai.enemy_start_locations[0].towards(self.ai.game_info.map_center, -25)
if or1.shield_percentage > 0.5 and or1.energy_percentage > 0.25:
workers = self.knowledge.ai.enemy_units.of_type({UnitTypeId.DRONE, UnitTypeId.PROBE, UnitTypeId.SCV})
if workers:
self.do(or1.attack(workers.closest_to(or1.position)))
self.ai.do(or1(AbilityId.BEHAVIOR_PULSARBEAMON, queue=True))
else:
self.do(or1.attack(attack_target_main))
self.ai.do(or1(AbilityId.BEHAVIOR_PULSARBEAMON, queue=True))
# self.do(or1(BUILD_STASISTRAP, attack_target_main))
# self.do_something_after_trap1 = self.time + 20
# self.do_something_after_trap2 = self.time + 10
elif or1.shield_percentage < 0.1 or or1.energy_percentage < 0.02:
self.do(or1(AbilityId.BEHAVIOR_PULSARBEAMOFF))
self.ai.do(or1.move(save_target_main, queue=True))
print('Moving out again')
return True
class MadAI(KnowledgeBot):
def __init__(self):
super().__init__("MadAI")
self.proxy_location = None
self.train_data = []
self.scout = GetScoutingData()
async def start(self, knowledge: 'Knowledge'):
await super().start(knowledge)
def on_end(self, game_result):
print("OnGameEnd() was called.")
if str(game_result) == "Result.Victory":
result = 1
else:
result = 0
self.train_data.append(
[
result,
self.scout.build_order,
self.scout.scout_data[0][0]+self.scout.scout_data[0][1],
self.scout.scout_data[1],
self.scout.scout_data[2],
self.scout.scout_data[3],
self.scout.scout_data[4],
self.scout.scout_data[5],
self.scout.scout_data[6],
self.scout.scout_data[7],
self.scout.scout_data[8],
self.scout.scout_data[9],
self.scout.scout_data[10],
self.scout.scout_data[11],
self.scout.scout_data[12],
self.scout.scout_data[13],
self.scout.scout_data[14],
self.scout.scout_data[15],
self.scout.scout_data[16],
self.scout.scout_data[17],
self.scout.scout_data[18],
self.scout.scout_data[19],
self.scout.scout_data[20],
self.scout.scout_data[21],
self.scout.scout_data[22],
self.scout.scout_data[23],
self.scout.scout_data[24],
self.scout.scout_data[25],
self.scout.scout_data[26],
self.scout.scout_data[27],
self.scout.scout_data[28],
self.scout.scout_data[29],
self.scout.scout_data[30],
self.scout.scout_data[31],
self.scout.scout_data[32],
self.scout.scout_data[33],
self.scout.scout_data[34],
self.scout.scout_data[35],
self.scout.scout_data[36],
self.scout.scout_data[37],
self.scout.scout_data[38],
self.scout.scout_data[39],
self.scout.scout_data[40],
self.scout.scout_data[41],
self.scout.scout_data[42],
self.scout.scout_data[43],
self.scout.scout_data[44],
self.scout.scout_data[45],
self.scout.scout_data[46],
self.scout.scout_data[47],
self.scout.scout_data[48],
self.scout.scout_data[49],
self.scout.scout_data[50],
self.scout.scout_data[51],
self.scout.scout_data[52],
self.scout.scout_data[53],
self.scout.scout_data[54],
self.scout.scout_data[55],
self.scout.scout_data[56],
self.scout.scout_data[57],
self.scout.scout_data[58],
self.scout.scout_data[59],
self.scout.scout_data[60],
self.scout.scout_data[61],
self.scout.scout_data[62],
self.scout.scout_data[63],
]
)
print(self.train_data)
np.save("data/{}_first.npy".format(str(int(time.time()))), np.array(self.train_data))
async def create_plan(self) -> BuildOrder:
# Common Start Build Order
#TODO: Implement more BOs
#TODO: Build second pylon at reaper ramp against Terran
#TODO: Ignore Larva and Eggs even more?
#TODO: Reenable Defence when Retreating
#TODO: Ignore Hallucinations
#TODO: Add time depended scouting variables, e.g. hatch before pool, etc.
#TODO: Use the Phoenix-Scout-Info to make the attack trigger more flexible, based on the power difference
#TODO: Position Rallypoint behind natural wall on Discobloodbath
#TODO: Move the builder probe towards the expansion already before minerals are at 400 just as it is done in BuildPosition
#TODO: Keep the units together better, i.e. not get lured out when defending or fight buildings while the other half of the army is fighting units somewhere close
#TODO: Defence against a single attacking worker is not functioning as intended
return BuildOrder([
Step(None, ChronoUnitProduction(UnitTypeId.PROBE, UnitTypeId.NEXUS),
skip=RequiredUnitExists(UnitTypeId.PROBE, 19, include_pending=True), skip_until=RequiredUnitReady(UnitTypeId.PYLON, 1)),
SequentialList([
ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 14),
GridBuilding(UnitTypeId.PYLON, 1),
Step(RequiredUnitExists(UnitTypeId.PYLON, 1, include_pending=False), WorkerScout(), skip=RequireCustom(lambda k: self.scout.build_order >= 0)),
ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 15),
GridBuilding(UnitTypeId.GATEWAY, 1),
ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 16),
StepBuildGas(1),
ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 18),
GridBuilding(UnitTypeId.PYLON, 2),
ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 20),
StepBuildGas(2),
ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 21),
GridBuilding(UnitTypeId.CYBERNETICSCORE, 1),
ProtossUnit(UnitTypeId.ZEALOT, 1),
ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 23),
Step(None, self.scout, skip_until=RequiredUnitExists(UnitTypeId.CYBERNETICSCORE, 1))
]),
Step(lambda k: self.scout.build_order == 0, self.two_base_robo()),
Step(lambda k: self.scout.build_order == 1, self.four_gate()),
Step(lambda k: self.scout.build_order == 2, self.defend_dt()),
Step(lambda k: self.scout.build_order == 3, self.skytoss()),
SequentialList([
Step(None, PlanZoneDefense(), skip=RequiredUnitReady(UnitTypeId.PROBE, 23)),
RestorePower(),
PlanDistributeWorkers(),
Step(None, PlanZoneGather(), skip=RequiredUnitReady(UnitTypeId.PROBE, 23))
])
])
def four_gate(self) -> ActBase:
#TODO: Follow-up BO
random_location = random.randrange(0, 2)
if random_location == 0 and not self.knowledge.enemy_race == Race.Zerg:
natural = self.knowledge.expansion_zones[-3]
pylon_pos: Point2 = natural.mineral_line_center.towards(
self.knowledge.expansion_zones[-3].behind_mineral_position_center, -5)
else:
pylon_pos = self.knowledge.ai.game_info.map_center.towards(self.knowledge.ai.enemy_start_locations[0],
17).position
return BuildOrder([
SequentialList([
GridBuilding(UnitTypeId.GATEWAY, 2),
BuildOrder(
[
AutoPylon(),
ActTech(UpgradeId.WARPGATERESEARCH, UnitTypeId.CYBERNETICSCORE),
ProtossUnit(UnitTypeId.STALKER, 1, priority=True),
GridBuilding(UnitTypeId.GATEWAY, 3),
Step(RequiredTechReady(UpgradeId.WARPGATERESEARCH, 0.4), BuildPosition(UnitTypeId.PYLON, pylon_pos,
exact=False, only_once=True), skip=RequiredTechReady(UpgradeId.WARPGATERESEARCH)),
GridBuilding(UnitTypeId.GATEWAY, 4),
[
Step(None, ProtossUnit(UnitTypeId.SENTRY, 1),
skip_until=RequiredUnitExists(UnitTypeId.STALKER, 2, include_pending=True)),
Step(None, ProtossUnit(UnitTypeId.SENTRY, 2),
skip_until=RequiredUnitExists(UnitTypeId.STALKER, 6, include_pending=True)),
Step(None, GateUnit()),
],
])
]),
SequentialList([
ChronoTech(AbilityId.RESEARCH_WARPGATE, UnitTypeId.CYBERNETICSCORE),
ChronoUnitProduction(UnitTypeId.STALKER, UnitTypeId.GATEWAY),
]),
SequentialList([
# Stop Defending when attacking, i.e. Base-Trade
Step(None, PlanZoneDefense(), skip=RequiredTechReady(UpgradeId.WARPGATERESEARCH)),
PlanZoneGather(),
# Step(RequiredUnitReady(UnitTypeId.GATEWAY, 4), PlanZoneGather()),
PlanZoneAttack(16),
PlanFinishEnemy(),
])
])
def two_base_robo(self) -> ActBase:
#TODO: Archons as follow-up after first push (ActArchon)
pylon_pos = self.knowledge.ai.game_info.map_center.position
attack = PlanZoneAttack(12)
attack.enemy_power_multiplier = 0.8 # Attack even if it might be a bad idea
return BuildOrder([
SequentialList([
ActExpand(2),
BuildOrder(
[
ActTech(UpgradeId.WARPGATERESEARCH, UnitTypeId.CYBERNETICSCORE),
ProtossUnit(UnitTypeId.STALKER, 1),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 24)),
]),
GridBuilding(UnitTypeId.ROBOTICSFACILITY, 1),
Step(None, ProtossUnit(UnitTypeId.SENTRY, 1), skip=RequiredTechReady(UpgradeId.CHARGE)),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 25)),
GridBuilding(UnitTypeId.PYLON, 3),
GridBuilding(UnitTypeId.GATEWAY, 2),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 26)),
BuildOrder(
[
Step(None, ProtossUnit(UnitTypeId.SENTRY, 2), skip=RequiredTechReady(UpgradeId.CHARGE)),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 30)),
GridBuilding(UnitTypeId.PYLON, 4),
SequentialList([
Step(RequiredUnitReady(UnitTypeId.SENTRY, 1), HallucinatedPhoenixScout()),
Step(RequiredUnitReady(UnitTypeId.SENTRY, 1), PlanHallucination()),
])
]),
Step(RequiredUnitReady(UnitTypeId.ROBOTICSFACILITY, 1), ProtossUnit(UnitTypeId.IMMORTAL, 1)),
Step(None, ProtossUnit(UnitTypeId.ZEALOT, 3), skip=RequiredTechReady(UpgradeId.CHARGE)),
GridBuilding(UnitTypeId.GATEWAY, 3),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 32)),
Step(RequiredUnitReady(UnitTypeId.IMMORTAL, 1), ProtossUnit(UnitTypeId.OBSERVER, 1)),
StepBuildGas(3),
Step(RequiredUnitReady(UnitTypeId.CYBERNETICSCORE, 1), GridBuilding(UnitTypeId.TWILIGHTCOUNCIL, 1)),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 34)),
Step(RequiredUnitReady(UnitTypeId.ROBOTICSFACILITY, 1), ProtossUnit(UnitTypeId.IMMORTAL, 2)),
Step(None, ProtossUnit(UnitTypeId.SENTRY, 4), skip=RequiredTechReady(UpgradeId.CHARGE)),
GridBuilding(UnitTypeId.GATEWAY, 4),
Step(RequiredUnitReady(UnitTypeId.IMMORTAL, 1), ActTech(UpgradeId.CHARGE, UnitTypeId.TWILIGHTCOUNCIL)),
StepBuildGas(4),
Step(RequiredUnitReady(UnitTypeId.IMMORTAL, 3), BuildPosition(UnitTypeId.PYLON, pylon_pos, exact=False, only_once=True)),
]),
BuildOrder(
[
AutoPylon(),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 44)),
Step(RequiredUnitReady(UnitTypeId.IMMORTAL, 3),
ProtossUnit(UnitTypeId.WARPPRISM, 1, priority=True)),
Step(RequiredUnitReady(UnitTypeId.ROBOTICSFACILITY, 1),
ProtossUnit(UnitTypeId.IMMORTAL, 20, priority=True)),
Step(RequiredUnitReady(UnitTypeId.ROBOTICSFACILITY, 1), ProtossUnit(UnitTypeId.ZEALOT, 7),
skip=RequiredUnitExists(UnitTypeId.TWILIGHTCOUNCIL, 1)),
Step(RequiredUnitReady(UnitTypeId.IMMORTAL, 1), GateUnit()),
]),
SequentialList([
ChronoTech(AbilityId.RESEARCH_WARPGATE, UnitTypeId.CYBERNETICSCORE),
ChronoUnitProduction(UnitTypeId.IMMORTAL, UnitTypeId.ROBOTICSFACILITY),
Step(RequiredUnitReady(UnitTypeId.TWILIGHTCOUNCIL, 1), ChronoAnyTech(0),
skip=RequiredUnitReady(UnitTypeId.IMMORTAL, 3)),
]),
SequentialList([
# Stop Defending when attacking, i.e. Base-Trade
Step(None, PlanZoneDefense(), skip=RequiredTechReady(UpgradeId.CHARGE, 0.9)),
PlanZoneGather(),
Step(RequiredTechReady(UpgradeId.CHARGE, 0.9), attack),
PlanFinishEnemy(),
])
])
def defend_dt(self) -> ActBase:
#TODO: Proxy-Pylon for DTs only, Follow-Up
#TODO: Give DTs something to do if everything is dead near them
pylon_pos = self.knowledge.ai.game_info.map_center.position
if self.knowledge.enemy_race == Race.Zerg:
defensive_position1 = self.knowledge.expansion_zones[1].mineral_line_center.towards(
self.knowledge.expansion_zones[1].behind_mineral_position_center, -12)
defensive_position2 = self.knowledge.expansion_zones[1].mineral_line_center.towards(
self.knowledge.expansion_zones[1].behind_mineral_position_center, -10)
else:
defensive_position1 = self.knowledge.base_ramp.top_center.towards(self.knowledge.base_ramp.bottom_center, -4)
defensive_position2 = self.knowledge.base_ramp.top_center.towards(self.knowledge.base_ramp.bottom_center, -5)
attack = PlanZoneAttack(10)
attack.retreat_multiplier = 0.5 # All in
attack.enemy_power_multiplier = 0.7 # Attack even if it might be a bad idea
return BuildOrder([
SequentialList([
GridBuilding(UnitTypeId.FORGE, 1),
BuildOrder(
[
Step(RequiredUnitReady(UnitTypeId.CYBERNETICSCORE, 1),
BuildPosition(UnitTypeId.SHIELDBATTERY, defensive_position1, exact=False, only_once=True)),
ActTech(UpgradeId.WARPGATERESEARCH, UnitTypeId.CYBERNETICSCORE),
ProtossUnit(UnitTypeId.STALKER, 1),
]),
Step(RequiredUnitReady(UnitTypeId.FORGE, 1),
BuildPosition(UnitTypeId.PHOTONCANNON, defensive_position1, exact=False, only_once=True)),
Step(RequiredUnitReady(UnitTypeId.FORGE, 1),
BuildPosition(UnitTypeId.PHOTONCANNON, defensive_position2, exact=False, only_once=True)),
Step(RequiredUnitReady(UnitTypeId.CYBERNETICSCORE, 1), GridBuilding(UnitTypeId.TWILIGHTCOUNCIL, 1)),
Step(None, ProtossUnit(UnitTypeId.SENTRY, 1), skip=RequiredUnitReady(UnitTypeId.DARKSHRINE, 1)),
GridBuilding(UnitTypeId.PYLON, 3),
GridBuilding(UnitTypeId.GATEWAY, 2),
Step(RequiredUnitReady(UnitTypeId.TWILIGHTCOUNCIL, 1), GridBuilding(UnitTypeId.DARKSHRINE, 1)),
BuildOrder(
[
Step(None, ProtossUnit(UnitTypeId.SENTRY, 2), skip=RequiredUnitReady(UnitTypeId.DARKSHRINE, 1)),
Step(None, ProtossUnit(UnitTypeId.ZEALOT, 3), skip=RequiredUnitReady(UnitTypeId.DARKSHRINE, 1)),
GridBuilding(UnitTypeId.GATEWAY, 3),
SequentialList([
Step(RequiredUnitReady(UnitTypeId.SENTRY, 1), HallucinatedPhoenixScout()),
Step(RequiredUnitReady(UnitTypeId.SENTRY, 1), PlanHallucination()),
])
]),
]),
[
AutoPylon(),
Step(RequiredUnitReady(UnitTypeId.DARKSHRINE, 1), ProtossUnit(UnitTypeId.DARKTEMPLAR, 3, priority=True),
skip_until=RequiredUnitReady(UnitTypeId.DARKSHRINE, 1), skip=(RequiredUnitReady(UnitTypeId.DARKTEMPLAR, 1) or RequiredUnitExists(UnitTypeId.ARCHON, 1))),
Step(RequiredUnitReady(UnitTypeId.DARKSHRINE, 1), GateUnit()),
Step(RequiredUnitReady(UnitTypeId.DARKSHRINE, 1), ProtossUnit(UnitTypeId.SENTRY, 5)),
],
SequentialList([
Step(RequiredUnitReady(UnitTypeId.DARKTEMPLAR, 1), ActTech(UpgradeId.CHARGE, UnitTypeId.TWILIGHTCOUNCIL)),
Step(RequiredUnitReady(UnitTypeId.DARKTEMPLAR, 1), ActTech(UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1, UnitTypeId.FORGE)),
Step(RequiredTechReady(UpgradeId.CHARGE, 0.1),
BuildPosition(UnitTypeId.PYLON, pylon_pos, exact=False, only_once=True))
]),
SequentialList([
ChronoTech(AbilityId.RESEARCH_WARPGATE, UnitTypeId.CYBERNETICSCORE),
ChronoUnitProduction(UnitTypeId.STALKER, UnitTypeId.GATEWAY),
ChronoAnyTech(0)
]),
SequentialList([
PlanZoneDefense(),
Step(None, PlanZoneGather(), skip=RequiredUnitReady(UnitTypeId.DARKSHRINE, 1)),
Step(RequiredUnitReady(UnitTypeId.DARKSHRINE, 1), Dt_Harass()), # skip=RequiredTechReady(UpgradeId.CHARGE)),
Step(RequiredTechReady(UpgradeId.CHARGE, 0.8), PlanZoneGather()),
Step(RequiredTechReady(UpgradeId.CHARGE), attack),
PlanFinishEnemy(),
])
])
def skytoss(self) -> ActBase:
#TODO: Follow-up
#TODO: Don't suicide the Oracle if there are units already waiting
#TODO: Strange freezing of Units and Groups after the first attack
#TODO: Switch to Tempests/Carrier and see how they perform
natural_pylon_pos = self.knowledge.expansion_zones[1].mineral_line_center.towards(
self.knowledge.expansion_zones[1].behind_mineral_position_center, -12)
attack = PlanZoneAttack(12)
attack.enemy_power_multiplier = 0.8 # Attack even if it might be a bad idea
return BuildOrder([
SequentialList([
ActExpand(2),
ProtossUnit(UnitTypeId.STALKER, 1),
BuildPosition(UnitTypeId.PYLON, natural_pylon_pos, exact=False, only_once=True),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 24)),
GridBuilding(UnitTypeId.STARGATE, 1),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 25)),
Step(None, ProtossUnit(UnitTypeId.SENTRY, 1), skip=RequiredUnitReady(UnitTypeId.TEMPEST, 1)),
BuildPosition(UnitTypeId.SHIELDBATTERY, natural_pylon_pos, exact=False, only_once=True),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 26)),
ProtossUnit(UnitTypeId.ZEALOT, 2),
BuildOrder(
[
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 28)),
SequentialList([
Step(RequiredUnitReady(UnitTypeId.SENTRY, 1), HallucinatedPhoenixScout()),
Step(RequiredUnitReady(UnitTypeId.SENTRY, 1), PlanHallucination()),
])
]),
Step(RequiredUnitReady(UnitTypeId.STARGATE, 1), GridBuilding(UnitTypeId.FLEETBEACON, 1)),
ActTech(UpgradeId.WARPGATERESEARCH, UnitTypeId.CYBERNETICSCORE),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 30)),
ProtossUnit(UnitTypeId.ZEALOT, 3),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 32)),
Step(RequiredUnitReady(UnitTypeId.FLEETBEACON, 1), ProtossUnit(UnitTypeId.TEMPEST, 1)),
StepBuildGas(3),
ProtossUnit(UnitTypeId.ZEALOT, 4),
Step(RequiredUnitReady(UnitTypeId.CYBERNETICSCORE, 1), GridBuilding(UnitTypeId.TWILIGHTCOUNCIL, 1)),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 34)),
Step(RequiredUnitReady(UnitTypeId.STARGATE, 1), ProtossUnit(UnitTypeId.TEMPEST, 2)),
StepBuildGas(4),
GridBuilding(UnitTypeId.STARGATE, 2),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 36)),
Step(RequiredUnitReady(UnitTypeId.TEMPEST, 1), ActTech(UpgradeId.PROTOSSAIRWEAPONSLEVEL1, UnitTypeId.CYBERNETICSCORE)),
ProtossUnit(UnitTypeId.ZEALOT, 5),
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 38)),
Step(RequiredUnitReady(UnitTypeId.STARGATE, 1), ProtossUnit(UnitTypeId.TEMPEST, 3)),
Step(RequiredUnitReady(UnitTypeId.TWILIGHTCOUNCIL, 1), ActTech(UpgradeId.CHARGE, UnitTypeId.TWILIGHTCOUNCIL)),
GridBuilding(UnitTypeId.GATEWAY, 2),
Step(RequiredMinerals(500), GridBuilding(UnitTypeId.GATEWAY, 3)),
]),
BuildOrder(
[
Step(RequiredUnitReady(UnitTypeId.NEXUS, 2), AutoPylon()),
Step(RequiredUnitReady(UnitTypeId.NEXUS, 2), ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 44)),
Step(RequiredUnitReady(UnitTypeId.TEMPEST, 1), ProtossUnit(UnitTypeId.TEMPEST, 20, priority=True)),
Step(RequiredUnitReady(UnitTypeId.STARGATE, 2), ProtossUnit(UnitTypeId.ZEALOT, 50)),
Step(RequiredTechReady(UpgradeId.PROTOSSAIRWEAPONSLEVEL1),
ActTech(UpgradeId.PROTOSSAIRWEAPONSLEVEL2, UnitTypeId.CYBERNETICSCORE)),
Step(RequiredTechReady(UpgradeId.PROTOSSAIRWEAPONSLEVEL2),
ActTech(UpgradeId.PROTOSSAIRWEAPONSLEVEL3, UnitTypeId.CYBERNETICSCORE)),
]),
SequentialList([
ChronoUnitProduction(UnitTypeId.STALKER, UnitTypeId.GATEWAY),
ChronoUnitProduction(UnitTypeId.TEMPEST, UnitTypeId.STARGATE),
ChronoAnyTech(0)
]),
SequentialList([
PlanZoneDefense(),
PlanZoneGather(),
# Step(RequiredUnitReady(UnitTypeId.ORACLE, 1), Oracle_Harass()),
Step(RequiredTechReady(UpgradeId.PROTOSSAIRWEAPONSLEVEL1, 0.9), attack),
PlanFinishEnemy(),
])
])
def two_base_stalker(self) -> ActBase:
#TODO: Adapt Unit Composition, Improve Timings, Hallu-Phoenix-Scout
natural = self.knowledge.expansion_zones[-3]
pylon_pos: Point2 = natural.behind_mineral_position_center
return BuildOrder([
SequentialList([
ActExpand(2),
BuildOrder(
[
AutoPylon(),
ActTech(UpgradeId.WARPGATERESEARCH, UnitTypeId.CYBERNETICSCORE),
[
Step(RequiredUnitExists(UnitTypeId.NEXUS, 2),
ActUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS, 44)),
StepBuildGas(3, skip=RequiredGas(300)),
StepBuildGas(4, skip=RequiredGas(200)),
],
[
Step(None, ProtossUnit(UnitTypeId.SENTRY, 1),
skip_until=RequiredUnitExists(UnitTypeId.STALKER, 2, include_pending=True)),
Step(None, ProtossUnit(UnitTypeId.SENTRY, 2),
skip_until=RequiredUnitExists(UnitTypeId.STALKER, 6, include_pending=True)),
Step(None, ProtossUnit(UnitTypeId.STALKER, 100)),
],
[
Step(RequiredUnitReady(UnitTypeId.GATEWAY, 3),
BuildPosition(UnitTypeId.PYLON, pylon_pos, exact=False, only_once=True))
],
SequentialList([
GridBuilding(UnitTypeId.GATEWAY, 4),
Step(RequiredUnitReady(UnitTypeId.CYBERNETICSCORE, 1),
GridBuilding(UnitTypeId.TWILIGHTCOUNCIL, 1)),
GridBuilding(UnitTypeId.GATEWAY, 6),
Step(RequiredUnitReady(UnitTypeId.TWILIGHTCOUNCIL, 1),
ActTech(UpgradeId.BLINKTECH, UnitTypeId.TWILIGHTCOUNCIL)),
GridBuilding(UnitTypeId.GATEWAY, 7),
]),
])
]),
SequentialList([
ChronoTech(AbilityId.RESEARCH_WARPGATE, UnitTypeId.CYBERNETICSCORE),
ChronoTech(AbilityId.RESEARCH_BLINK, UnitTypeId.TWILIGHTCOUNCIL),
ChronoUnitProduction(UnitTypeId.STALKER, UnitTypeId.GATEWAY),
]),
SequentialList([
Step(RequiredUnitReady(UnitTypeId.GATEWAY, 4), PlanZoneGather()),
Step(RequiredUnitReady(UnitTypeId.STALKER, 4), PlanZoneAttack(12)),
PlanFinishEnemy(),
])
])
class LadderBot(MadAI):
@property
def my_race(self):
return Race.Protoss
| [
"keras.models.load_model",
"sc2.position.Point2",
"s2clientprotocol.raw_pb2.ActionRawUnitCommand",
"time.time",
"s2clientprotocol.sc2api_pb2.Action",
"random.randrange",
"numpy.array",
"s2clientprotocol.raw_pb2.ActionRaw",
"sharpy.plans.StepBuildGas"
] | [((39181, 39203), 'random.randrange', 'random.randrange', (['(0)', '(2)'], {}), '(0, 2)\n', (39197, 39203), False, 'import random\n'), ((4148, 4197), 'keras.models.load_model', 'keras.models.load_model', (['"""MadAI/MadAI_06_03_2020"""'], {}), "('MadAI/MadAI_06_03_2020')\n", (4171, 4197), False, 'import keras\n'), ((4228, 4282), 'keras.models.load_model', 'keras.models.load_model', (['"""MadAI/MadAI_06_03_2020_loss"""'], {}), "('MadAI/MadAI_06_03_2020_loss')\n", (4251, 4282), False, 'import keras\n'), ((36135, 36160), 'numpy.array', 'np.array', (['self.train_data'], {}), '(self.train_data)\n', (36143, 36160), True, 'import numpy as np\n'), ((24098, 24120), 'random.randrange', 'random.randrange', (['(0)', '(4)'], {}), '(0, 4)\n', (24114, 24120), False, 'import random\n'), ((29077, 29204), 's2clientprotocol.raw_pb2.ActionRawUnitCommand', 'raw_pb.ActionRawUnitCommand', ([], {'ability_id': 'AbilityId.MORPH_ARCHON.value', 'unit_tags': '[unit.tag, target.tag]', 'queue_command': '(False)'}), '(ability_id=AbilityId.MORPH_ARCHON.value,\n unit_tags=[unit.tag, target.tag], queue_command=False)\n', (29104, 29204), True, 'from s2clientprotocol import raw_pb2 as raw_pb\n'), ((29324, 29362), 's2clientprotocol.raw_pb2.ActionRaw', 'raw_pb.ActionRaw', ([], {'unit_command': 'command'}), '(unit_command=command)\n', (29340, 29362), True, 'from s2clientprotocol import raw_pb2 as raw_pb\n'), ((30882, 30924), 'sc2.position.Point2', 'Point2', (['(self.safe_spot1, self.safe_spot2)'], {}), '((self.safe_spot1, self.safe_spot2))\n', (30888, 30924), False, 'from sc2.position import Point2\n'), ((19038, 19064), 'numpy.array', 'np.array', (['self.choice_data'], {}), '(self.choice_data)\n', (19046, 19064), True, 'import numpy as np\n'), ((36119, 36130), 'time.time', 'time.time', ([], {}), '()\n', (36128, 36130), False, 'import time\n'), ((37886, 37901), 'sharpy.plans.StepBuildGas', 'StepBuildGas', (['(1)'], {}), '(1)\n', (37898, 37901), False, 'from sharpy.plans import BuildOrder, Step, SequentialList, StepBuildGas\n'), ((38100, 38115), 'sharpy.plans.StepBuildGas', 'StepBuildGas', (['(2)'], {}), '(2)\n', (38112, 38115), False, 'from sharpy.plans import BuildOrder, Step, SequentialList, StepBuildGas\n'), ((43927, 43942), 'sharpy.plans.StepBuildGas', 'StepBuildGas', (['(3)'], {}), '(3)\n', (43939, 43942), False, 'from sharpy.plans import BuildOrder, Step, SequentialList, StepBuildGas\n'), ((44577, 44592), 'sharpy.plans.StepBuildGas', 'StepBuildGas', (['(4)'], {}), '(4)\n', (44589, 44592), False, 'from sharpy.plans import BuildOrder, Step, SequentialList, StepBuildGas\n'), ((53568, 53583), 'sharpy.plans.StepBuildGas', 'StepBuildGas', (['(3)'], {}), '(3)\n', (53580, 53583), False, 'from sharpy.plans import BuildOrder, Step, SequentialList, StepBuildGas\n'), ((53982, 53997), 'sharpy.plans.StepBuildGas', 'StepBuildGas', (['(4)'], {}), '(4)\n', (53994, 53997), False, 'from sharpy.plans import BuildOrder, Step, SequentialList, StepBuildGas\n'), ((29475, 29506), 's2clientprotocol.sc2api_pb2.Action', 'sc_pb.Action', ([], {'action_raw': 'action'}), '(action_raw=action)\n', (29487, 29506), True, 'from s2clientprotocol import sc2api_pb2 as sc_pb\n')] |
import unittest
from random import Random
import numpy as np
from infinitd_server.game_config import CellPos, Row, Col
from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState
from infinitd_server.paths import makePathMap, compressPath, PathMap, pathExists
def emptyBattleground(rows: int, cols: int):
return BattlegroundState(towers = BgTowersState([[None for c in range(cols)] for r in range(rows)]))
class TestGetRandomPath(unittest.TestCase):
def test_diagonal2(self):
battleground = emptyBattleground(2, 2)
start = CellPos(Row(0), Col(0))
end = CellPos(Row(1), Col(1))
pathMap = makePathMap(battleground, start, end)
self.assertIsNotNone(pathMap)
for i in range(10):
with self.subTest(seed=i):
path = pathMap.getRandomPath(start, Random(i)) # pytype: disable=attribute-error
self.assertEqual(len(path), 3)
self.assertEqual(path[0], start)
self.assertEqual(path[-1], end)
# Make sure each position is adjacent to the previous
prevElem = path[0]
for elem in path[1:]:
self.assertGreaterEqual(elem.row, prevElem.row - 1)
self.assertLessEqual(elem.row, prevElem.row + 1)
self.assertGreaterEqual(elem.col, prevElem.col - 1)
self.assertLessEqual(elem.col, prevElem.col + 1)
prevElem = elem
def test_diagonal5(self):
battleground = emptyBattleground(5, 5)
start = CellPos(Row(0), Col(0))
end = CellPos(Row(4), Col(4))
pathMap = makePathMap(battleground, start, end)
self.assertIsNotNone(pathMap)
for i in range(10):
with self.subTest(seed=i):
path = pathMap.getRandomPath(start, Random(i)) # pytype: disable=attribute-error
self.assertEqual(len(path), 9)
self.assertEqual(path[0], start)
self.assertEqual(path[-1], end)
# Make sure each position is adjacent to the previous
prevElem = path[0]
for elem in path[1:]:
self.assertGreaterEqual(elem.row, prevElem.row - 1)
self.assertLessEqual(elem.row, prevElem.row + 1)
self.assertGreaterEqual(elem.col, prevElem.col - 1)
self.assertLessEqual(elem.col, prevElem.col + 1)
prevElem = elem
def test_diagonal5_with_obstacles(self):
battleground = emptyBattleground(5, 5)
battleground.towers.towers[2][2] = BgTowerState(0)
battleground.towers.towers[2][3] = BgTowerState(0)
battleground.towers.towers[3][2] = BgTowerState(0)
battleground.towers.towers[3][3] = BgTowerState(0)
start = CellPos(Row(0), Col(0))
end = CellPos(Row(4), Col(4))
pathMap = makePathMap(battleground, start, end)
self.assertIsNotNone(pathMap)
for i in range(10):
with self.subTest(seed=i):
path = pathMap.getRandomPath(start, Random(i)) # pytype: disable=attribute-error
self.assertEqual(len(path), 9)
self.assertEqual(path[0], start)
self.assertEqual(path[-1], end)
# Make sure each position is adjacent to the previous
prevElem = path[0]
for elem in path[1:]:
self.assertGreaterEqual(elem.row, prevElem.row - 1)
self.assertLessEqual(elem.row, prevElem.row + 1)
self.assertGreaterEqual(elem.col, prevElem.col - 1)
self.assertLessEqual(elem.col, prevElem.col + 1)
prevElem = elem
class TestPathExists(unittest.TestCase):
def test_startBlocked(self):
battleground = emptyBattleground(2, 2)
battleground.towers.towers[0][0] = BgTowerState(0)
self.assertFalse(pathExists(battleground, CellPos(Row(0), Col(0)), CellPos(Row(1), Col(1))))
def test_endBlocked(self):
battleground = emptyBattleground(2, 2)
battleground.towers.towers[1][1] = BgTowerState(0)
self.assertFalse(pathExists(battleground, CellPos(Row(0), Col(0)), CellPos(Row(1), Col(1))))
def test_noPath(self):
battleground = emptyBattleground(2, 2)
battleground.towers.towers[0][1] = BgTowerState(0)
battleground.towers.towers[1][0] = BgTowerState(0)
self.assertFalse(pathExists(battleground, CellPos(Row(0), Col(0)), CellPos(Row(1), Col(1))))
def test_oneStepPath(self):
battleground = emptyBattleground(2, 2)
self.assertTrue(pathExists(battleground, CellPos(Row(0), Col(0)), CellPos(Row(1), Col(1))))
def test_multiStepPath(self):
battleground = emptyBattleground(2, 3)
battleground.towers.towers[0][1] = BgTowerState(0)
self.assertTrue(pathExists(battleground, CellPos(Row(0), Col(0)), CellPos(Row(0), Col(2))))
def test_multiplePaths(self):
battleground = emptyBattleground(3, 3)
battleground.towers.towers[1][1] = BgTowerState(0)
self.assertTrue(pathExists(battleground, CellPos(Row(0), Col(0)), CellPos(Row(2), Col(2))))
def test_manyPaths(self):
battleground = emptyBattleground(3, 3)
self.assertTrue(pathExists(battleground, CellPos(Row(0), Col(0)), CellPos(Row(2), Col(2))))
class TestMakePathMap(unittest.TestCase):
def test_startBlocked(self):
battleground = emptyBattleground(2, 2)
battleground.towers.towers[0][0] = BgTowerState(0)
pathMap = makePathMap(battleground, CellPos(Row(0), Col(0)), CellPos(Row(1), Col(1)))
self.assertIsNone(pathMap)
def test_endBlocked(self):
battleground = emptyBattleground(2, 2)
battleground.towers.towers[1][1] = BgTowerState(0)
pathMap = makePathMap(battleground, CellPos(Row(0), Col(0)), CellPos(Row(1), Col(1)))
self.assertIsNone(pathMap)
def test_noPath(self):
battleground = emptyBattleground(2, 2)
battleground.towers.towers[0][1] = BgTowerState(0)
battleground.towers.towers[1][0] = BgTowerState(0)
pathMap = makePathMap(battleground, CellPos(Row(0), Col(0)), CellPos(Row(1), Col(1)))
self.assertIsNone(pathMap)
def test_oneStepPath(self):
battleground = emptyBattleground(2, 2)
pathMap = makePathMap(battleground, CellPos(Row(0), Col(0)), CellPos(Row(0), Col(1)))
np.testing.assert_array_equal(
pathMap.dists, np.asarray([[0, 1], [-1, -1]]))
def test_multiStepPath(self):
battleground = emptyBattleground(2, 3)
battleground.towers.towers[0][1] = BgTowerState(0)
pathMap = makePathMap(battleground, CellPos(Row(0), Col(0)), CellPos(Row(0), Col(2)))
np.testing.assert_array_equal(
pathMap.dists, np.asarray([[0, -1, 4], [1, 2, 3]]))
def test_multiplePaths(self):
battleground = emptyBattleground(3, 3)
battleground.towers.towers[1][1] = BgTowerState(0)
pathMap = makePathMap(battleground, CellPos(Row(0), Col(0)), CellPos(Row(2), Col(2)))
np.testing.assert_array_equal(pathMap.dists,
np.asarray([[0, 1, 2], [1, -1, 3], [2, 3, 4]]))
def test_manyPaths(self):
battleground = emptyBattleground(3, 3)
pathMap = makePathMap(battleground, CellPos(Row(0), Col(0)), CellPos(Row(2), Col(2)))
np.testing.assert_array_equal(
pathMap.dists, np.asarray([[0, 1, 2], [1, 2, 3], [2, 3, 4]]))
class TestCompressPath(unittest.TestCase):
def test_twoNodePaths(self):
path1 = [CellPos(Row(0), Col(0)), CellPos(Row(0), Col(1))]
path2 = [CellPos(Row(0), Col(0)), CellPos(Row(1), Col(0))]
newPath1 = compressPath(path1)
newPath2 = compressPath(path2)
self.assertListEqual(newPath1, path1)
self.assertListEqual(newPath2, path2)
def test_singleChainPath(self):
path1 = [CellPos(Row(0), Col(0)), CellPos(Row(0), Col(1)), CellPos(Row(0), Col(2))]
path2 = [CellPos(Row(0), Col(0)), CellPos(Row(1), Col(0)), CellPos(Row(2), Col(0)),
CellPos(Row(3), Col(0))]
newPath1 = compressPath(path1)
newPath2 = compressPath(path2)
self.assertListEqual(newPath1, [CellPos(Row(0), Col(0)), CellPos(Row(0), Col(2))])
self.assertListEqual(newPath2, [CellPos(Row(0), Col(0)), CellPos(Row(3), Col(0))])
def test_twoCorners(self):
path = [CellPos(Row(0), Col(0)), CellPos(Row(0), Col(1)), CellPos(Row(0), Col(2)),
CellPos(Row(1), Col(2)), CellPos(Row(1), Col(3))]
newPath = compressPath(path)
self.assertListEqual(newPath,
[CellPos(Row(0), Col(0)), CellPos(Row(0), Col(2)), CellPos(Row(1), Col(2)),
CellPos(Row(1), Col(3))])
| [
"random.Random",
"numpy.asarray",
"infinitd_server.paths.makePathMap",
"infinitd_server.battleground_state.BgTowerState",
"infinitd_server.game_config.Col",
"infinitd_server.paths.compressPath",
"infinitd_server.game_config.Row"
] | [((665, 702), 'infinitd_server.paths.makePathMap', 'makePathMap', (['battleground', 'start', 'end'], {}), '(battleground, start, end)\n', (676, 702), False, 'from infinitd_server.paths import makePathMap, compressPath, PathMap, pathExists\n'), ((1685, 1722), 'infinitd_server.paths.makePathMap', 'makePathMap', (['battleground', 'start', 'end'], {}), '(battleground, start, end)\n', (1696, 1722), False, 'from infinitd_server.paths import makePathMap, compressPath, PathMap, pathExists\n'), ((2667, 2682), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (2679, 2682), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((2726, 2741), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (2738, 2741), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((2785, 2800), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (2797, 2800), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((2844, 2859), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (2856, 2859), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((2956, 2993), 'infinitd_server.paths.makePathMap', 'makePathMap', (['battleground', 'start', 'end'], {}), '(battleground, start, end)\n', (2967, 2993), False, 'from infinitd_server.paths import makePathMap, compressPath, PathMap, pathExists\n'), ((3967, 3982), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (3979, 3982), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((4207, 4222), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (4219, 4222), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((4443, 4458), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (4455, 4458), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((4502, 4517), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (4514, 4517), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((4926, 4941), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (4938, 4941), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((5168, 5183), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (5180, 5183), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((5630, 5645), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (5642, 5645), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((5899, 5914), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (5911, 5914), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((6164, 6179), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (6176, 6179), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((6223, 6238), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (6235, 6238), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((6769, 6784), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (6781, 6784), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((7109, 7124), 'infinitd_server.battleground_state.BgTowerState', 'BgTowerState', (['(0)'], {}), '(0)\n', (7121, 7124), False, 'from infinitd_server.battleground_state import BattlegroundState, BgTowersState, BgTowerState\n'), ((7852, 7871), 'infinitd_server.paths.compressPath', 'compressPath', (['path1'], {}), '(path1)\n', (7864, 7871), False, 'from infinitd_server.paths import makePathMap, compressPath, PathMap, pathExists\n'), ((7891, 7910), 'infinitd_server.paths.compressPath', 'compressPath', (['path2'], {}), '(path2)\n', (7903, 7910), False, 'from infinitd_server.paths import makePathMap, compressPath, PathMap, pathExists\n'), ((8286, 8305), 'infinitd_server.paths.compressPath', 'compressPath', (['path1'], {}), '(path1)\n', (8298, 8305), False, 'from infinitd_server.paths import makePathMap, compressPath, PathMap, pathExists\n'), ((8325, 8344), 'infinitd_server.paths.compressPath', 'compressPath', (['path2'], {}), '(path2)\n', (8337, 8344), False, 'from infinitd_server.paths import makePathMap, compressPath, PathMap, pathExists\n'), ((8736, 8754), 'infinitd_server.paths.compressPath', 'compressPath', (['path'], {}), '(path)\n', (8748, 8754), False, 'from infinitd_server.paths import makePathMap, compressPath, PathMap, pathExists\n'), ((593, 599), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (596, 599), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((601, 607), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (604, 607), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((631, 637), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (634, 637), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((639, 645), 'infinitd_server.game_config.Col', 'Col', (['(1)'], {}), '(1)\n', (642, 645), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((1613, 1619), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (1616, 1619), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((1621, 1627), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (1624, 1627), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((1651, 1657), 'infinitd_server.game_config.Row', 'Row', (['(4)'], {}), '(4)\n', (1654, 1657), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((1659, 1665), 'infinitd_server.game_config.Col', 'Col', (['(4)'], {}), '(4)\n', (1662, 1665), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((2884, 2890), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (2887, 2890), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((2892, 2898), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (2895, 2898), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((2922, 2928), 'infinitd_server.game_config.Row', 'Row', (['(4)'], {}), '(4)\n', (2925, 2928), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((2930, 2936), 'infinitd_server.game_config.Col', 'Col', (['(4)'], {}), '(4)\n', (2933, 2936), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6612, 6642), 'numpy.asarray', 'np.asarray', (['[[0, 1], [-1, -1]]'], {}), '([[0, 1], [-1, -1]])\n', (6622, 6642), True, 'import numpy as np\n'), ((6947, 6982), 'numpy.asarray', 'np.asarray', (['[[0, -1, 4], [1, 2, 3]]'], {}), '([[0, -1, 4], [1, 2, 3]])\n', (6957, 6982), True, 'import numpy as np\n'), ((7286, 7332), 'numpy.asarray', 'np.asarray', (['[[0, 1, 2], [1, -1, 3], [2, 3, 4]]'], {}), '([[0, 1, 2], [1, -1, 3], [2, 3, 4]])\n', (7296, 7332), True, 'import numpy as np\n'), ((7574, 7619), 'numpy.asarray', 'np.asarray', (['[[0, 1, 2], [1, 2, 3], [2, 3, 4]]'], {}), '([[0, 1, 2], [1, 2, 3], [2, 3, 4]])\n', (7584, 7619), True, 'import numpy as np\n'), ((5699, 5705), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (5702, 5705), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5707, 5713), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (5710, 5713), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5724, 5730), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (5727, 5730), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5732, 5738), 'infinitd_server.game_config.Col', 'Col', (['(1)'], {}), '(1)\n', (5735, 5738), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5968, 5974), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (5971, 5974), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5976, 5982), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (5979, 5982), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5993, 5999), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (5996, 5999), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6001, 6007), 'infinitd_server.game_config.Col', 'Col', (['(1)'], {}), '(1)\n', (6004, 6007), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6292, 6298), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (6295, 6298), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6300, 6306), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (6303, 6306), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6317, 6323), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (6320, 6323), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6325, 6331), 'infinitd_server.game_config.Col', 'Col', (['(1)'], {}), '(1)\n', (6328, 6331), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6503, 6509), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (6506, 6509), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6511, 6517), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (6514, 6517), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6528, 6534), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (6531, 6534), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6536, 6542), 'infinitd_server.game_config.Col', 'Col', (['(1)'], {}), '(1)\n', (6539, 6542), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6838, 6844), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (6841, 6844), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6846, 6852), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (6849, 6852), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6863, 6869), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (6866, 6869), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((6871, 6877), 'infinitd_server.game_config.Col', 'Col', (['(2)'], {}), '(2)\n', (6874, 6877), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7178, 7184), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (7181, 7184), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7186, 7192), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (7189, 7192), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7203, 7209), 'infinitd_server.game_config.Row', 'Row', (['(2)'], {}), '(2)\n', (7206, 7209), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7211, 7217), 'infinitd_server.game_config.Col', 'Col', (['(2)'], {}), '(2)\n', (7214, 7217), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7465, 7471), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (7468, 7471), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7473, 7479), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (7476, 7479), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7490, 7496), 'infinitd_server.game_config.Row', 'Row', (['(2)'], {}), '(2)\n', (7493, 7496), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7498, 7504), 'infinitd_server.game_config.Col', 'Col', (['(2)'], {}), '(2)\n', (7501, 7504), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7723, 7729), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (7726, 7729), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7731, 7737), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (7734, 7737), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7748, 7754), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (7751, 7754), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7756, 7762), 'infinitd_server.game_config.Col', 'Col', (['(1)'], {}), '(1)\n', (7759, 7762), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7790, 7796), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (7793, 7796), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7798, 7804), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (7801, 7804), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7815, 7821), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (7818, 7821), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((7823, 7829), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (7826, 7829), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8066, 8072), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (8069, 8072), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8074, 8080), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (8077, 8080), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8091, 8097), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (8094, 8097), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8099, 8105), 'infinitd_server.game_config.Col', 'Col', (['(1)'], {}), '(1)\n', (8102, 8105), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8116, 8122), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (8119, 8122), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8124, 8130), 'infinitd_server.game_config.Col', 'Col', (['(2)'], {}), '(2)\n', (8127, 8130), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8158, 8164), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (8161, 8164), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8166, 8172), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (8169, 8172), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8183, 8189), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (8186, 8189), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8191, 8197), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (8194, 8197), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8208, 8214), 'infinitd_server.game_config.Row', 'Row', (['(2)'], {}), '(2)\n', (8211, 8214), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8216, 8222), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (8219, 8222), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8249, 8255), 'infinitd_server.game_config.Row', 'Row', (['(3)'], {}), '(3)\n', (8252, 8255), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8257, 8263), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (8260, 8263), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8584, 8590), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (8587, 8590), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8592, 8598), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (8595, 8598), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8609, 8615), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (8612, 8615), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8617, 8623), 'infinitd_server.game_config.Col', 'Col', (['(1)'], {}), '(1)\n', (8620, 8623), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8634, 8640), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (8637, 8640), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8642, 8648), 'infinitd_server.game_config.Col', 'Col', (['(2)'], {}), '(2)\n', (8645, 8648), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8675, 8681), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (8678, 8681), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8683, 8689), 'infinitd_server.game_config.Col', 'Col', (['(2)'], {}), '(2)\n', (8686, 8689), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8700, 8706), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (8703, 8706), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8708, 8714), 'infinitd_server.game_config.Col', 'Col', (['(3)'], {}), '(3)\n', (8711, 8714), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((861, 870), 'random.Random', 'Random', (['i'], {}), '(i)\n', (867, 870), False, 'from random import Random\n'), ((1881, 1890), 'random.Random', 'Random', (['i'], {}), '(i)\n', (1887, 1890), False, 'from random import Random\n'), ((3152, 3161), 'random.Random', 'Random', (['i'], {}), '(i)\n', (3158, 3161), False, 'from random import Random\n'), ((4042, 4048), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (4045, 4048), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4050, 4056), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (4053, 4056), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4067, 4073), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (4070, 4073), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4075, 4081), 'infinitd_server.game_config.Col', 'Col', (['(1)'], {}), '(1)\n', (4078, 4081), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4282, 4288), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (4285, 4288), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4290, 4296), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (4293, 4296), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4307, 4313), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (4310, 4313), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4315, 4321), 'infinitd_server.game_config.Col', 'Col', (['(1)'], {}), '(1)\n', (4318, 4321), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4577, 4583), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (4580, 4583), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4585, 4591), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (4588, 4591), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4602, 4608), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (4605, 4608), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4610, 4616), 'infinitd_server.game_config.Col', 'Col', (['(1)'], {}), '(1)\n', (4613, 4616), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4758, 4764), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (4761, 4764), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4766, 4772), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (4769, 4772), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4783, 4789), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (4786, 4789), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((4791, 4797), 'infinitd_server.game_config.Col', 'Col', (['(1)'], {}), '(1)\n', (4794, 4797), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5000, 5006), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (5003, 5006), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5008, 5014), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (5011, 5014), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5025, 5031), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (5028, 5031), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5033, 5039), 'infinitd_server.game_config.Col', 'Col', (['(2)'], {}), '(2)\n', (5036, 5039), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5242, 5248), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (5245, 5248), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5250, 5256), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (5253, 5256), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5267, 5273), 'infinitd_server.game_config.Row', 'Row', (['(2)'], {}), '(2)\n', (5270, 5273), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5275, 5281), 'infinitd_server.game_config.Col', 'Col', (['(2)'], {}), '(2)\n', (5278, 5281), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5421, 5427), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (5424, 5427), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5429, 5435), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (5432, 5435), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5446, 5452), 'infinitd_server.game_config.Row', 'Row', (['(2)'], {}), '(2)\n', (5449, 5452), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((5454, 5460), 'infinitd_server.game_config.Col', 'Col', (['(2)'], {}), '(2)\n', (5457, 5460), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8394, 8400), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (8397, 8400), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8402, 8408), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (8405, 8408), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8419, 8425), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (8422, 8425), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8427, 8433), 'infinitd_server.game_config.Col', 'Col', (['(2)'], {}), '(2)\n', (8430, 8433), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8485, 8491), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (8488, 8491), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8493, 8499), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (8496, 8499), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8510, 8516), 'infinitd_server.game_config.Row', 'Row', (['(3)'], {}), '(3)\n', (8513, 8516), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8518, 8524), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (8521, 8524), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8819, 8825), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (8822, 8825), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8827, 8833), 'infinitd_server.game_config.Col', 'Col', (['(0)'], {}), '(0)\n', (8830, 8833), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8844, 8850), 'infinitd_server.game_config.Row', 'Row', (['(0)'], {}), '(0)\n', (8847, 8850), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8852, 8858), 'infinitd_server.game_config.Col', 'Col', (['(2)'], {}), '(2)\n', (8855, 8858), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8869, 8875), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (8872, 8875), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8877, 8883), 'infinitd_server.game_config.Col', 'Col', (['(2)'], {}), '(2)\n', (8880, 8883), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8914, 8920), 'infinitd_server.game_config.Row', 'Row', (['(1)'], {}), '(1)\n', (8917, 8920), False, 'from infinitd_server.game_config import CellPos, Row, Col\n'), ((8922, 8928), 'infinitd_server.game_config.Col', 'Col', (['(3)'], {}), '(3)\n', (8925, 8928), False, 'from infinitd_server.game_config import CellPos, Row, Col\n')] |
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env_pixel
import mujoco_py
from mujoco_py.mjlib import mjlib
from skimage import color
from skimage import transform
class PusherEnvPixel(mujoco_env_pixel.MujocoEnvPixel, utils.EzPickle):
def __init__(self):
self.memory = np.empty([84,84,4],dtype=np.uint8)
utils.EzPickle.__init__(self)
mujoco_env_pixel.MujocoEnvPixel.__init__(self, 'pusher.xml', 5)
def _step(self, a):
vec_1 = self.get_body_com("object") - self.get_body_com("tips_arm")
vec_2 = self.get_body_com("object") - self.get_body_com("goal")
reward_near = - np.linalg.norm(vec_1)
reward_dist = - np.linalg.norm(vec_2)
reward_ctrl = - np.square(a).sum()
reward = reward_dist + 0.1 * reward_ctrl + 0.5 * reward_near
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, dict(reward_dist=reward_dist,
reward_ctrl=reward_ctrl)
def viewer_setup(self):
self.viewer.cam.trackbodyid = -1
self.viewer.cam.distance = 4.0
def reset_model(self):
qpos = self.init_qpos
self.goal_pos = np.asarray([0, 0])
while True:
self.cylinder_pos = np.concatenate([
self.np_random.uniform(low=-0.3, high=0, size=1),
self.np_random.uniform(low=-0.2, high=0.2, size=1)])
if np.linalg.norm(self.cylinder_pos - self.goal_pos) > 0.17:
break
qpos[-4:-2] = self.cylinder_pos
qpos[-2:] = self.goal_pos
qvel = self.init_qvel + self.np_random.uniform(low=-0.005,
high=0.005, size=self.model.nv)
qvel[-4:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
data = self._get_viewer().get_image()
rawByteImg = data[0]
width = data[1]
height = data[2]
tmp = np.fromstring(rawByteImg, dtype=np.uint8)
img = np.reshape(tmp, [height, width, 3])
img = np.flipud(img) # 500x500x3
gray = color.rgb2gray(img) # convert to gray
gray_resized = transform.resize(gray,(84,84)) # resize
# update memory buffer
# self.memory[1:,:,:] = self.memory[0:3,:,:]
self.memory[:,:,1:] = self.memory[:,:,0:3]
# self.memory[0,:,:] = gray_resized
self.memory[:,:,0] = gray_resized*255
return self.memory
| [
"gym.envs.mujoco.mujoco_env_pixel.MujocoEnvPixel.__init__",
"skimage.color.rgb2gray",
"numpy.empty",
"numpy.asarray",
"numpy.square",
"numpy.flipud",
"skimage.transform.resize",
"numpy.reshape",
"numpy.linalg.norm",
"numpy.fromstring",
"gym.utils.EzPickle.__init__"
] | [((313, 350), 'numpy.empty', 'np.empty', (['[84, 84, 4]'], {'dtype': 'np.uint8'}), '([84, 84, 4], dtype=np.uint8)\n', (321, 350), True, 'import numpy as np\n'), ((356, 385), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (379, 385), False, 'from gym import utils\n'), ((394, 457), 'gym.envs.mujoco.mujoco_env_pixel.MujocoEnvPixel.__init__', 'mujoco_env_pixel.MujocoEnvPixel.__init__', (['self', '"""pusher.xml"""', '(5)'], {}), "(self, 'pusher.xml', 5)\n", (434, 457), False, 'from gym.envs.mujoco import mujoco_env_pixel\n'), ((1230, 1248), 'numpy.asarray', 'np.asarray', (['[0, 0]'], {}), '([0, 0])\n', (1240, 1248), True, 'import numpy as np\n'), ((1998, 2039), 'numpy.fromstring', 'np.fromstring', (['rawByteImg'], {'dtype': 'np.uint8'}), '(rawByteImg, dtype=np.uint8)\n', (2011, 2039), True, 'import numpy as np\n'), ((2054, 2089), 'numpy.reshape', 'np.reshape', (['tmp', '[height, width, 3]'], {}), '(tmp, [height, width, 3])\n', (2064, 2089), True, 'import numpy as np\n'), ((2104, 2118), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (2113, 2118), True, 'import numpy as np\n'), ((2146, 2165), 'skimage.color.rgb2gray', 'color.rgb2gray', (['img'], {}), '(img)\n', (2160, 2165), False, 'from skimage import color\n'), ((2207, 2239), 'skimage.transform.resize', 'transform.resize', (['gray', '(84, 84)'], {}), '(gray, (84, 84))\n', (2223, 2239), False, 'from skimage import transform\n'), ((656, 677), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_1'], {}), '(vec_1)\n', (670, 677), True, 'import numpy as np\n'), ((702, 723), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_2'], {}), '(vec_2)\n', (716, 723), True, 'import numpy as np\n'), ((1476, 1525), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.cylinder_pos - self.goal_pos)'], {}), '(self.cylinder_pos - self.goal_pos)\n', (1490, 1525), True, 'import numpy as np\n'), ((748, 760), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (757, 760), True, 'import numpy as np\n')] |
"""
Dataset object for PubChem BioAssay dataset. The data is given in the DeepChem package
(https://github.com/deepchem/deepchem), and the details can be found here:
https://deepchem.readthedocs.io/en/latest/api_reference/moleculenet.html?highlight=PCBA#pcba-datasets.
"""
import os
import random
import json
import gzip
from typing import Tuple, Optional, Dict
import numpy as np
import pandas
from sklearn.metrics import roc_auc_score, average_precision_score
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
import deepchem as dc
from meta.datasets import BaseDataset
from meta.datasets.utils import get_split, slice_second_dim
from meta.train.loss import MultiTaskLoss
TOTAL_TASKS = 128
DOWNLOAD_URL = "https://github.com/deepchem/deepchem/raw/master/datasets/pcba.csv.gz"
RAW_DATA_FNAME = "pcba.csv.gz"
DATASET_CONFIG = {
"feature_size": 2048,
"train_split": 0.9,
"ecfp_radius": 4,
}
CLASS_SAMPLES = np.array([30269634, 427685])
CLASS_WEIGHTS = 1.0 - CLASS_SAMPLES / np.sum(CLASS_SAMPLES)
class PCBA(Dataset, BaseDataset):
""" PyTorch wrapper for the PCBA dataset. """
def __init__(
self,
root: str,
num_tasks: int,
data_tasks: Optional[int] = None,
train: bool = True,
):
"""
Init function for PCBA.
Parameters
----------
root : str
Path to folder containing dataset files.
num_tasks : int
Number of tasks for instance of PCBA. Should be between 1 and `TOTAL_TASKS`.
For each input molecule, only the labels from the first `num_tasks` tasks
are loaded.
data_tasks : Optional[int]
Only use data points that are labeled for at least one of the first
`data_tasks` tasks. This can be used to ensure that a comparison of training
on e.g. 128 tasks vs. 32 tasks is using the same data, by setting
`data_tasks = 32`. If None, this will be set to `num_tasks`.
train : bool
Whether to load training set. Otherwise, test set is loaded.
"""
# Check that parameter values are valid.
assert 1 <= num_tasks <= TOTAL_TASKS
assert data_tasks is None or 1 <= data_tasks <= TOTAL_TASKS
Dataset.__init__(self)
BaseDataset.__init__(self)
# Store data settings.
self.num_tasks = num_tasks
self.data_tasks = data_tasks if data_tasks is not None else self.num_tasks
self.root = root
self.raw_data_path = os.path.join(os.path.dirname(root), RAW_DATA_FNAME)
self.train = train
self.split = "train" if self.train else "test"
# Set static dataset properties.
self.input_size = DATASET_CONFIG["feature_size"]
self.output_size = 2
self.loss_cls = MultiTaskLoss
self.loss_kwargs = {
"task_losses": [
{
"loss": nn.CrossEntropyLoss(
weight=torch.as_tensor(CLASS_WEIGHTS, dtype=torch.float32),
ignore_index=-1,
reduction="mean",
),
"output_slice": slice_second_dim(t),
"label_slice": slice_second_dim(t, to_long=True),
}
for t in range(self.num_tasks)
],
}
self.criterion_kwargs = {"train": {"train": True}, "eval": {"train": False}}
self.extra_metrics = {
"AP": {"maximize": True, "train": True, "eval": True, "show": True},
**{
f"loss_weight_{t}": {
"maximize": None,
"train": True,
"eval": False,
"show": False,
}
for t in range(self.num_tasks)
},
}
# Preprocess data if necessary.
if not os.path.isdir(self.root):
if not os.path.isfile(self.raw_data_path):
raise ValueError(
f"Neither raw data nor processed dataset exist in folder"
f" '{root}'. Download the raw data from:\n {DOWNLOAD_URL}\n"
f"and place it in '{root}'."
)
self.preprocess()
# Load data.
self.inputs = np.load(self.data_path(train=self.train, inp=True))
self.labels = np.load(self.data_path(train=self.train, inp=False))
assert len(self.inputs) == len(self.labels)
original_dataset_size = len(self.inputs)
# Remove datapoints that aren't labeled for any of the chosen subset of tasks.
good_idxs = np.any(self.labels[:, : self.data_tasks] != -1, axis=1).nonzero()[0]
self.inputs = self.inputs[good_idxs]
self.labels = self.labels[good_idxs]
self.dataset_size = len(self.inputs)
if self.dataset_size != original_dataset_size:
removed = original_dataset_size - self.dataset_size
print(
f"Removing {removed} datapoints from PCBA {self.split} that aren't"
f" labeled for first {self.data_tasks} tasks. {self.dataset_size}"
f" {self.split} datapoints remaining."
)
# Remove labels from tasks not being trained on.
self.labels = self.labels[:, : self.num_tasks]
# Load dataset config to ensure that the config of loaded data matches the
# current config.
config_path = self.config_path()
with open(config_path, "r") as config_file:
loaded_config = json.load(config_file)
if DATASET_CONFIG != loaded_config:
raise ValueError(
f"Config of loaded PCBA dataset ({config_path}) doesn't match current"
f" PCBA config (hard-coded in {os.path.relpath(__file__)})."
" To run training, you must either:\n"
" (1) Change the current PCBA config to match the loaded config.\n"
" (2) Delete the processed PCBA data to regenerate it with new config."
)
def __len__(self):
"""
Number of data points in dataset. For PCBA, this is the total number of input
molecules. However, most input molecules don't contain labels for many of the
targets (tasks).
"""
return self.dataset_size
def __getitem__(self, index: int) -> Tuple[np.ndarray, np.ndarray]:
""" Return dataset item with input `index`. """
return self.inputs[index], self.labels[index]
def preprocess(self) -> None:
"""
Preprocesses the raw PCBA data from the DeepChem repository:
- Uncompress the raw CSV file
- Fill in missing label values with -1
- Featurize input molecules with Extended Connectivity Circular Fingerprints
- Randomly split into a training and testing set
- Save inputs and labels to disk as numpy arrays
"""
print(
"Processing raw PCBA data."
" This only needs to be done once, but will take 5-10 minutes."
)
# Uncompress the raw PCBA data.
with gzip.open(self.raw_data_path, "rb") as csv_file:
dataframe = pandas.read_csv(csv_file)
# Fill in missing label values.
dataframe.fillna(value=-1, inplace=True)
# Featurize input molecules and check that they were computed without errors.
featurizer = dc.feat.CircularFingerprint(
size=self.input_size, radius=DATASET_CONFIG["ecfp_radius"]
)
features = featurizer.featurize(dataframe["smiles"])
assert np.logical_or(features == 0, features == 1).all()
# Drop unneeded columns.
dataframe.drop(labels=["mol_id", "smiles"], axis=1, inplace=True)
# Randomly split into training and testing set.
dataset_size = len(dataframe)
train_size = round(dataset_size * DATASET_CONFIG["train_split"])
assert 0 < train_size < dataset_size
idxs = list(range(dataset_size))
random.shuffle(idxs)
train_idxs = idxs[:train_size]
test_idxs = idxs[train_size:]
train_input = features[train_idxs].astype(dtype=np.float32)
test_input = features[test_idxs].astype(dtype=np.float32)
train_label = dataframe.iloc[train_idxs].to_numpy(dtype=np.float32)
test_label = dataframe.iloc[test_idxs].to_numpy(dtype=np.float32)
# Save results as numpy arrays.
os.makedirs(self.root)
np.save(self.data_path(train=True, inp=True), train_input)
np.save(self.data_path(train=True, inp=False), train_label)
np.save(self.data_path(train=False, inp=True), test_input)
np.save(self.data_path(train=False, inp=False), test_label)
print(f"train input shape: {train_input.shape}")
print(f"train label shape: {train_label.shape}")
print(f"test input shape: {test_input.shape}")
print(f"test label shape: {test_label.shape}")
# Save out dataset config.
with open(self.config_path(), "w") as config_file:
json.dump(DATASET_CONFIG, config_file, indent=4)
def data_path(self, train: bool, inp: bool) -> str:
""" Names for dataset files. """
split = "train" if train else "test"
name = "input" if inp else "label"
return os.path.join(self.root, f"{split}_{name}.npy")
def config_path(self) -> str:
return os.path.join(self.root, "dataset_config.json")
def compute_metrics(
self,
outputs: torch.Tensor,
labels: torch.Tensor,
criterion: nn.Module = None,
train: bool = True,
) -> Dict[str, float]:
""" Compute training/testing metrics from `outputs` and `labels`. """
split = get_split(train)
# Compute average precision.
metrics = {f"{split}_AP": PCBA_avg_precision(outputs, labels)}
# Add loss weights to metrics, if this is a training step.
if train:
loss_weights = criterion.loss_weighter.loss_weights
metrics.update(
{
f"{split}_loss_weight_{t}": float(loss_weights[t])
for t in range(self.num_tasks)
}
)
return metrics
def PCBA_ROC_AUC(outputs: torch.Tensor, labels: torch.Tensor) -> float:
"""
Computes the area under the ROC curve for the PCBA binary classification task.
"""
softmax_outputs = F.softmax(outputs, dim=2)
flat_outputs = softmax_outputs[:, :, 1].reshape(-1)
flat_labels = labels.reshape(-1)
valid = torch.logical_or(flat_labels == 0, flat_labels == 1)
valid_outputs = flat_outputs[valid].detach().cpu().numpy()
valid_labels = flat_labels[valid].detach().cpu().long().numpy()
score = roc_auc_score(valid_labels, valid_outputs, average="samples")
return score
def PCBA_avg_precision(outputs: torch.Tensor, labels: torch.Tensor) -> float:
"""
Computes the average precision (AP) for the PCBA binary classification task.
"""
softmax_outputs = F.softmax(outputs, dim=2)
flat_outputs = softmax_outputs[:, :, 1].reshape(-1)
flat_labels = labels.reshape(-1)
valid = torch.logical_or(flat_labels == 0, flat_labels == 1)
valid_outputs = flat_outputs[valid].detach().cpu().numpy()
valid_labels = flat_labels[valid].detach().cpu().long().numpy()
score = average_precision_score(valid_labels, valid_outputs, average="samples")
return score
| [
"numpy.sum",
"pandas.read_csv",
"random.shuffle",
"os.path.isfile",
"os.path.join",
"deepchem.feat.CircularFingerprint",
"os.path.dirname",
"torch.utils.data.Dataset.__init__",
"meta.datasets.utils.get_split",
"sklearn.metrics.average_precision_score",
"json.dump",
"torch.logical_or",
"sklea... | [((977, 1005), 'numpy.array', 'np.array', (['[30269634, 427685]'], {}), '([30269634, 427685])\n', (985, 1005), True, 'import numpy as np\n'), ((10530, 10555), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {'dim': '(2)'}), '(outputs, dim=2)\n', (10539, 10555), True, 'import torch.nn.functional as F\n'), ((10661, 10713), 'torch.logical_or', 'torch.logical_or', (['(flat_labels == 0)', '(flat_labels == 1)'], {}), '(flat_labels == 0, flat_labels == 1)\n', (10677, 10713), False, 'import torch\n'), ((10857, 10918), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['valid_labels', 'valid_outputs'], {'average': '"""samples"""'}), "(valid_labels, valid_outputs, average='samples')\n", (10870, 10918), False, 'from sklearn.metrics import roc_auc_score, average_precision_score\n'), ((11135, 11160), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {'dim': '(2)'}), '(outputs, dim=2)\n', (11144, 11160), True, 'import torch.nn.functional as F\n'), ((11266, 11318), 'torch.logical_or', 'torch.logical_or', (['(flat_labels == 0)', '(flat_labels == 1)'], {}), '(flat_labels == 0, flat_labels == 1)\n', (11282, 11318), False, 'import torch\n'), ((11462, 11533), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['valid_labels', 'valid_outputs'], {'average': '"""samples"""'}), "(valid_labels, valid_outputs, average='samples')\n", (11485, 11533), False, 'from sklearn.metrics import roc_auc_score, average_precision_score\n'), ((1044, 1065), 'numpy.sum', 'np.sum', (['CLASS_SAMPLES'], {}), '(CLASS_SAMPLES)\n', (1050, 1065), True, 'import numpy as np\n'), ((2316, 2338), 'torch.utils.data.Dataset.__init__', 'Dataset.__init__', (['self'], {}), '(self)\n', (2332, 2338), False, 'from torch.utils.data import Dataset\n'), ((2347, 2373), 'meta.datasets.BaseDataset.__init__', 'BaseDataset.__init__', (['self'], {}), '(self)\n', (2367, 2373), False, 'from meta.datasets import BaseDataset\n'), ((7492, 7584), 'deepchem.feat.CircularFingerprint', 'dc.feat.CircularFingerprint', ([], {'size': 'self.input_size', 'radius': "DATASET_CONFIG['ecfp_radius']"}), "(size=self.input_size, radius=DATASET_CONFIG[\n 'ecfp_radius'])\n", (7519, 7584), True, 'import deepchem as dc\n'), ((8098, 8118), 'random.shuffle', 'random.shuffle', (['idxs'], {}), '(idxs)\n', (8112, 8118), False, 'import random\n'), ((8529, 8551), 'os.makedirs', 'os.makedirs', (['self.root'], {}), '(self.root)\n', (8540, 8551), False, 'import os\n'), ((9403, 9449), 'os.path.join', 'os.path.join', (['self.root', 'f"""{split}_{name}.npy"""'], {}), "(self.root, f'{split}_{name}.npy')\n", (9415, 9449), False, 'import os\n'), ((9500, 9546), 'os.path.join', 'os.path.join', (['self.root', '"""dataset_config.json"""'], {}), "(self.root, 'dataset_config.json')\n", (9512, 9546), False, 'import os\n'), ((9835, 9851), 'meta.datasets.utils.get_split', 'get_split', (['train'], {}), '(train)\n', (9844, 9851), False, 'from meta.datasets.utils import get_split, slice_second_dim\n'), ((2591, 2612), 'os.path.dirname', 'os.path.dirname', (['root'], {}), '(root)\n', (2606, 2612), False, 'import os\n'), ((3950, 3974), 'os.path.isdir', 'os.path.isdir', (['self.root'], {}), '(self.root)\n', (3963, 3974), False, 'import os\n'), ((5624, 5646), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (5633, 5646), False, 'import json\n'), ((7195, 7230), 'gzip.open', 'gzip.open', (['self.raw_data_path', '"""rb"""'], {}), "(self.raw_data_path, 'rb')\n", (7204, 7230), False, 'import gzip\n'), ((7268, 7293), 'pandas.read_csv', 'pandas.read_csv', (['csv_file'], {}), '(csv_file)\n', (7283, 7293), False, 'import pandas\n'), ((9153, 9201), 'json.dump', 'json.dump', (['DATASET_CONFIG', 'config_file'], {'indent': '(4)'}), '(DATASET_CONFIG, config_file, indent=4)\n', (9162, 9201), False, 'import json\n'), ((3995, 4029), 'os.path.isfile', 'os.path.isfile', (['self.raw_data_path'], {}), '(self.raw_data_path)\n', (4009, 4029), False, 'import os\n'), ((7678, 7721), 'numpy.logical_or', 'np.logical_or', (['(features == 0)', '(features == 1)'], {}), '(features == 0, features == 1)\n', (7691, 7721), True, 'import numpy as np\n'), ((3229, 3248), 'meta.datasets.utils.slice_second_dim', 'slice_second_dim', (['t'], {}), '(t)\n', (3245, 3248), False, 'from meta.datasets.utils import get_split, slice_second_dim\n'), ((3285, 3318), 'meta.datasets.utils.slice_second_dim', 'slice_second_dim', (['t'], {'to_long': '(True)'}), '(t, to_long=True)\n', (3301, 3318), False, 'from meta.datasets.utils import get_split, slice_second_dim\n'), ((4702, 4756), 'numpy.any', 'np.any', (['(self.labels[:, :self.data_tasks] != -1)'], {'axis': '(1)'}), '(self.labels[:, :self.data_tasks] != -1, axis=1)\n', (4708, 4756), True, 'import numpy as np\n'), ((5855, 5880), 'os.path.relpath', 'os.path.relpath', (['__file__'], {}), '(__file__)\n', (5870, 5880), False, 'import os\n'), ((3034, 3085), 'torch.as_tensor', 'torch.as_tensor', (['CLASS_WEIGHTS'], {'dtype': 'torch.float32'}), '(CLASS_WEIGHTS, dtype=torch.float32)\n', (3049, 3085), False, 'import torch\n')] |
import numpy as np
from scipy import stats
from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_absolute_percentage_error
from sklearn.metrics import roc_auc_score, average_precision_score, f1_score, brier_score_loss, confusion_matrix
from .base_verification import *
from scipy.special import ndtr
import itertools
def generalized_roc(predicted, observed):
samples, classes = predicted.shape
observed_cats = np.squeeze(np.argmax(observed, axis=1))
cat_comps = np.array(np.meshgrid(observed_cats, observed_cats)).T.reshape(-1,2)
ndxs = np.array(np.meshgrid(np.arange(samples), np.arange(samples))).T.reshape(-1,2)
pairs = ndxs[cat_comps[:,0] < cat_comps[:,1]]
predictions1 = predicted[pairs[:,0], :]
predictions2 = predicted[pairs[:,1], :]
denominators = np.ones((predictions1.shape[0], 1)) - np.sum(predictions1 * predictions2, axis=-1).reshape(-1,1)
numerators = np.zeros((predictions1.shape[0], 1))
for i in range(classes-1):
numerators += np.sum(predictions1[:,i].reshape(-1,1) * predictions2[:, (i+1):], axis=-1).reshape(-1,1)
hit_scores = numerators / denominators
hit_scores[hit_scores < 0.5] = 0
hit_scores[hit_scores > 0.5] = 1
hit_scores[hit_scores == 0.5] = 0.5
return np.sum(hit_scores) / pairs.shape[0]
def generalized_roc_slow(predicted, observed):
samples, classes = predicted.shape
observed = np.argmax(observed, axis=1)
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
pairs = []
for i, j in itertools.permutations(range(samples), 2):
if observed[i] > observed[j]:
pairs.append([j, i])
pairs = np.asarray(pairs)
if len(pairs) == 0:
return np.asarray([1])
predictions1 = predicted[pairs[:,0], :]
predictions2 = predicted[pairs[:,1], :]
numerators = np.zeros((predictions1.shape[0], 1))
for i in range(classes-1):
for j in range(i+1, classes):
pr, ps = predictions1[:,i].reshape(-1, 1), predictions2[:, j].reshape(-1, 1)
numerators += pr*ps
denominators = np.ones((predictions1.shape[0], 1))
for i in range(classes):
denominators -= predictions1[:,i].reshape(-1,1) * predictions2[:,i].reshape(-1,1)
hit_scores = numerators.astype(float) / denominators.astype(float)
hit_scores[hit_scores < 0.5] = 0
hit_scores[hit_scores > 0.5] = 1
hit_scores[hit_scores == 0.5] = 0.5
return np.squeeze(np.sum(hit_scores) / float(pairs.shape[0]))
def log_likelihood(predicted, observed):
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
n, k = predicted.shape
residuals = np.squeeze(predicted - observed)
return np.squeeze(-(n * 1/2) * (1 + np.log(2 * np.pi)) - (n / 2) * np.log(residuals.dot(residuals) / n))
def akaike_information_criterion(predicted, observed):
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
ll = log_likelihood(predicted, observed)
n, k = predicted.shape
return np.squeeze((-2 * ll) + (2 * k))
def brier_score(predicted, observed):
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan for i in range(predicted.shape[1])])
return np.asarray([brier_score_loss(observed[:,i].reshape(-1,1), predicted[:,i].reshape(-1,1)) for i in range(predicted.shape[1]) ])
def rank_probability_score(predicted, observed):
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
return np.squeeze(np.mean(np.sum((observed-predicted)**2, axis=-1), axis=0))
def continuous_rank_probability_score(predicted, observed):
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
observed = np.cumsum(observed, axis=1)
predicted = np.cumsum(predicted, axis=1)
return np.squeeze(np.nanmean(np.sum((predicted - observed)**2, axis=1), axis=0))
def ignorance(predicted, observed):
""" where predicted is predicted and observed is observed """
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
observed = np.argmax(observed, axis=1)
logs = np.log(predicted)
ign = 0
for i in range(predicted.shape[0]):
ign += logs[i, observed[i]]
return np.squeeze(-1 * ign / float(predicted.shape[0]))
def hansen_kuiper(predicted, observed):
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan for i in range(predicted.shape[1]) ])
cm = confusion_matrix(np.squeeze(np.argmax(observed, axis=1)), np.squeeze(np.argmax(predicted, axis=1)), labels=[i for i in range(observed.shape[1])])
ret = []
for i in range(observed.shape[1]):
try:
total = np.sum(cm[i,:])
hits = cm[i,i]
misses = total - hits
negs = np.delete(cm, i, axis=0)
false_alarms = np.sum(negs[:,i])
correct_negatives = np.sum(negs) - false_alarms
hansen_kuiper_score = ( float(hits) / float(hits + misses) ) - (float(false_alarms) / float(false_alarms + correct_negatives))
ret.append(hansen_kuiper_score)
except:
ret.append(np.nan)
return np.squeeze(np.asarray(ret))
def kendalls_tau(predicted,observed):
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
predicted, observed = np.squeeze(predicted), np.squeeze(observed)
return stats.kendalltau(predicted[predicted.argsort()][::-1].astype(float), observed[observed.argsort()][::-1].astype(float))
def bayesian_information_criterion(predicted, observed):
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
ll = log_likelihood(predicted, observed)
n, k = predicted.shape
return (-2 * ll) + (k * np.log(n))
def point_biserial_correlation(predicted, observed):
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
rs = []
for i in range(predicted.shape[1]):
rs.append(stats.pointbiserialr(np.squeeze(observed[i]), np.squeeze(predicted[i])))
return np.asarray(rs)
def index_of_agreement(predicted, observed):
"""implements index of agreement metric"""
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
return 1 - np.sum((observed-predicted)**2) / np.sum( (np.abs(observed - np.nanmean(predicted)) + np.abs(predicted - np.nanmean(predicted)))**2)
def nash_sutcliffe_efficiency(predicted, observed):
""" implements Nash-Sutcliffe Model Efficiencobserved Coefficient where predicted is modeled and observed is observed"""
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
return 1 - np.sum((predicted - observed)**2) / np.sum((observed - observed.mean())**2)
def normalized_nash_sutcliffe_efficiency(predicted, observed):
""" implements normalized nash_sutcliffe_efficiencobserved """
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
return 1.0 / (2 - nash_sutcliffe_efficiency(predicted, observed))
def kling_gupta_efficiency(predicted, observed, sr=1, sa=1, sb=1):
""" implements kling gupta Efficiencobserved where predicted is modeled and observed = observed """
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
alpha = predicted.std() / observed.std()
beta = predicted.mean() / observed.mean()
r, p = stats.pearsonr(np.squeeze(predicted).astype(float), np.squeeze(observed).astype(float))
return 1 - np.sqrt( (sr * (r - 1.0))**2 + (sa * (alpha - 1.0))**2 + (sb * (beta - 1.0))**2 )
def kling_gupta_components(predicted, observed, sr=1, sa=1, sb=1, component='all' ):
""" implements kling gupta Efficiencobserved where predicted is modeled and observed = observed """
if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)):
return np.asarray([np.nan])
assert component in ['r', 'a', 'b', 'all'], 'invalid component {}'.format(component)
if component == 'a':
return predicted.std() / observed.std()
if component == 'b':
return predicted.mean() / observed.mean()
if component == 'r':
return stats.pearsonr(np.squeeze(predicted).astype(float), np.squeeze(observed).astype(float))[0]
return np.asarray([predicted.std() / observed.std(), predicted.mean() / observed.mean(), stats.pearsonr(np.squeeze(predicted).astype(float), np.squeeze(observed).astype(float))[0]])
flat_classification_metrics = [point_biserial_correlation, hansen_kuiper, ignorance, continuous_rank_probability_score, rank_probability_score, brier_score, generalized_roc ]
flat_regression_metrics = [kling_gupta_components, kling_gupta_efficiency, normalized_nash_sutcliffe_efficiency, nash_sutcliffe_efficiency, index_of_agreement, kendalls_tau, bayesian_information_criterion, akaike_information_criterion, log_likelihood ]
| [
"numpy.meshgrid",
"numpy.sum",
"numpy.log",
"numpy.argmax",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.cumsum",
"numpy.min",
"numpy.arange",
"numpy.nanmean",
"numpy.squeeze",
"numpy.delete",
"numpy.sqrt"
] | [((897, 933), 'numpy.zeros', 'np.zeros', (['(predictions1.shape[0], 1)'], {}), '((predictions1.shape[0], 1))\n', (905, 933), True, 'import numpy as np\n'), ((1352, 1379), 'numpy.argmax', 'np.argmax', (['observed'], {'axis': '(1)'}), '(observed, axis=1)\n', (1361, 1379), True, 'import numpy as np\n'), ((1606, 1623), 'numpy.asarray', 'np.asarray', (['pairs'], {}), '(pairs)\n', (1616, 1623), True, 'import numpy as np\n'), ((1766, 1802), 'numpy.zeros', 'np.zeros', (['(predictions1.shape[0], 1)'], {}), '((predictions1.shape[0], 1))\n', (1774, 1802), True, 'import numpy as np\n'), ((1984, 2019), 'numpy.ones', 'np.ones', (['(predictions1.shape[0], 1)'], {}), '((predictions1.shape[0], 1))\n', (1991, 2019), True, 'import numpy as np\n'), ((2540, 2572), 'numpy.squeeze', 'np.squeeze', (['(predicted - observed)'], {}), '(predicted - observed)\n', (2550, 2572), True, 'import numpy as np\n'), ((2902, 2929), 'numpy.squeeze', 'np.squeeze', (['(-2 * ll + 2 * k)'], {}), '(-2 * ll + 2 * k)\n', (2912, 2929), True, 'import numpy as np\n'), ((3622, 3649), 'numpy.cumsum', 'np.cumsum', (['observed'], {'axis': '(1)'}), '(observed, axis=1)\n', (3631, 3649), True, 'import numpy as np\n'), ((3663, 3691), 'numpy.cumsum', 'np.cumsum', (['predicted'], {'axis': '(1)'}), '(predicted, axis=1)\n', (3672, 3691), True, 'import numpy as np\n'), ((3979, 4006), 'numpy.argmax', 'np.argmax', (['observed'], {'axis': '(1)'}), '(observed, axis=1)\n', (3988, 4006), True, 'import numpy as np\n'), ((4015, 4032), 'numpy.log', 'np.log', (['predicted'], {}), '(predicted)\n', (4021, 4032), True, 'import numpy as np\n'), ((5833, 5847), 'numpy.asarray', 'np.asarray', (['rs'], {}), '(rs)\n', (5843, 5847), True, 'import numpy as np\n'), ((445, 472), 'numpy.argmax', 'np.argmax', (['observed'], {'axis': '(1)'}), '(observed, axis=1)\n', (454, 472), True, 'import numpy as np\n'), ((786, 821), 'numpy.ones', 'np.ones', (['(predictions1.shape[0], 1)'], {}), '((predictions1.shape[0], 1))\n', (793, 821), True, 'import numpy as np\n'), ((1220, 1238), 'numpy.sum', 'np.sum', (['hit_scores'], {}), '(hit_scores)\n', (1226, 1238), True, 'import numpy as np\n'), ((1452, 1472), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (1462, 1472), True, 'import numpy as np\n'), ((1654, 1669), 'numpy.asarray', 'np.asarray', (['[1]'], {}), '([1])\n', (1664, 1669), True, 'import numpy as np\n'), ((2481, 2501), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (2491, 2501), True, 'import numpy as np\n'), ((2807, 2827), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (2817, 2827), True, 'import numpy as np\n'), ((3357, 3377), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (3367, 3377), True, 'import numpy as np\n'), ((3589, 3609), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (3599, 3609), True, 'import numpy as np\n'), ((3946, 3966), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (3956, 3966), True, 'import numpy as np\n'), ((4949, 4964), 'numpy.asarray', 'np.asarray', (['ret'], {}), '(ret)\n', (4959, 4964), True, 'import numpy as np\n'), ((5077, 5097), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (5087, 5097), True, 'import numpy as np\n'), ((5121, 5142), 'numpy.squeeze', 'np.squeeze', (['predicted'], {}), '(predicted)\n', (5131, 5142), True, 'import numpy as np\n'), ((5144, 5164), 'numpy.squeeze', 'np.squeeze', (['observed'], {}), '(observed)\n', (5154, 5164), True, 'import numpy as np\n'), ((5423, 5443), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (5433, 5443), True, 'import numpy as np\n'), ((5673, 5693), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (5683, 5693), True, 'import numpy as np\n'), ((6011, 6031), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (6021, 6031), True, 'import numpy as np\n'), ((6425, 6445), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (6435, 6445), True, 'import numpy as np\n'), ((6734, 6754), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (6744, 6754), True, 'import numpy as np\n'), ((7063, 7083), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (7073, 7083), True, 'import numpy as np\n'), ((7277, 7367), 'numpy.sqrt', 'np.sqrt', (['((sr * (r - 1.0)) ** 2 + (sa * (alpha - 1.0)) ** 2 + (sb * (beta - 1.0)) ** 2)'], {}), '((sr * (r - 1.0)) ** 2 + (sa * (alpha - 1.0)) ** 2 + (sb * (beta - \n 1.0)) ** 2)\n', (7284, 7367), True, 'import numpy as np\n'), ((7618, 7638), 'numpy.asarray', 'np.asarray', (['[np.nan]'], {}), '([np.nan])\n', (7628, 7638), True, 'import numpy as np\n'), ((1393, 1410), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (1399, 1410), True, 'import numpy as np\n'), ((1424, 1440), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (1430, 1440), True, 'import numpy as np\n'), ((2323, 2341), 'numpy.sum', 'np.sum', (['hit_scores'], {}), '(hit_scores)\n', (2329, 2341), True, 'import numpy as np\n'), ((2422, 2439), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (2428, 2439), True, 'import numpy as np\n'), ((2453, 2469), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (2459, 2469), True, 'import numpy as np\n'), ((2748, 2765), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (2754, 2765), True, 'import numpy as np\n'), ((2779, 2795), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (2785, 2795), True, 'import numpy as np\n'), ((2986, 3003), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (2992, 3003), True, 'import numpy as np\n'), ((3017, 3033), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (3023, 3033), True, 'import numpy as np\n'), ((3298, 3315), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (3304, 3315), True, 'import numpy as np\n'), ((3329, 3345), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (3335, 3345), True, 'import numpy as np\n'), ((3405, 3449), 'numpy.sum', 'np.sum', (['((observed - predicted) ** 2)'], {'axis': '(-1)'}), '((observed - predicted) ** 2, axis=-1)\n', (3411, 3449), True, 'import numpy as np\n'), ((3530, 3547), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (3536, 3547), True, 'import numpy as np\n'), ((3561, 3577), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (3567, 3577), True, 'import numpy as np\n'), ((3722, 3765), 'numpy.sum', 'np.sum', (['((predicted - observed) ** 2)'], {'axis': '(1)'}), '((predicted - observed) ** 2, axis=1)\n', (3728, 3765), True, 'import numpy as np\n'), ((3887, 3904), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (3893, 3904), True, 'import numpy as np\n'), ((3918, 3934), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (3924, 3934), True, 'import numpy as np\n'), ((4220, 4237), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (4226, 4237), True, 'import numpy as np\n'), ((4251, 4267), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (4257, 4267), True, 'import numpy as np\n'), ((4370, 4397), 'numpy.argmax', 'np.argmax', (['observed'], {'axis': '(1)'}), '(observed, axis=1)\n', (4379, 4397), True, 'import numpy as np\n'), ((4411, 4439), 'numpy.argmax', 'np.argmax', (['predicted'], {'axis': '(1)'}), '(predicted, axis=1)\n', (4420, 4439), True, 'import numpy as np\n'), ((4552, 4568), 'numpy.sum', 'np.sum', (['cm[i, :]'], {}), '(cm[i, :])\n', (4558, 4568), True, 'import numpy as np\n'), ((4621, 4645), 'numpy.delete', 'np.delete', (['cm', 'i'], {'axis': '(0)'}), '(cm, i, axis=0)\n', (4630, 4645), True, 'import numpy as np\n'), ((4664, 4682), 'numpy.sum', 'np.sum', (['negs[:, i]'], {}), '(negs[:, i])\n', (4670, 4682), True, 'import numpy as np\n'), ((5018, 5035), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (5024, 5035), True, 'import numpy as np\n'), ((5049, 5065), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (5055, 5065), True, 'import numpy as np\n'), ((5364, 5381), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (5370, 5381), True, 'import numpy as np\n'), ((5395, 5411), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (5401, 5411), True, 'import numpy as np\n'), ((5535, 5544), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (5541, 5544), True, 'import numpy as np\n'), ((5614, 5631), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (5620, 5631), True, 'import numpy as np\n'), ((5645, 5661), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (5651, 5661), True, 'import numpy as np\n'), ((5952, 5969), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (5958, 5969), True, 'import numpy as np\n'), ((5983, 5999), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (5989, 5999), True, 'import numpy as np\n'), ((6044, 6079), 'numpy.sum', 'np.sum', (['((observed - predicted) ** 2)'], {}), '((observed - predicted) ** 2)\n', (6050, 6079), True, 'import numpy as np\n'), ((6366, 6383), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (6372, 6383), True, 'import numpy as np\n'), ((6397, 6413), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (6403, 6413), True, 'import numpy as np\n'), ((6458, 6493), 'numpy.sum', 'np.sum', (['((predicted - observed) ** 2)'], {}), '((predicted - observed) ** 2)\n', (6464, 6493), True, 'import numpy as np\n'), ((6675, 6692), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (6681, 6692), True, 'import numpy as np\n'), ((6706, 6722), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (6712, 6722), True, 'import numpy as np\n'), ((7004, 7021), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (7010, 7021), True, 'import numpy as np\n'), ((7035, 7051), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (7041, 7051), True, 'import numpy as np\n'), ((7559, 7576), 'numpy.min', 'np.min', (['predicted'], {}), '(predicted)\n', (7565, 7576), True, 'import numpy as np\n'), ((7590, 7606), 'numpy.min', 'np.min', (['observed'], {}), '(observed)\n', (7596, 7606), True, 'import numpy as np\n'), ((824, 868), 'numpy.sum', 'np.sum', (['(predictions1 * predictions2)'], {'axis': '(-1)'}), '(predictions1 * predictions2, axis=-1)\n', (830, 868), True, 'import numpy as np\n'), ((4705, 4717), 'numpy.sum', 'np.sum', (['negs'], {}), '(negs)\n', (4711, 4717), True, 'import numpy as np\n'), ((5773, 5796), 'numpy.squeeze', 'np.squeeze', (['observed[i]'], {}), '(observed[i])\n', (5783, 5796), True, 'import numpy as np\n'), ((5798, 5822), 'numpy.squeeze', 'np.squeeze', (['predicted[i]'], {}), '(predicted[i])\n', (5808, 5822), True, 'import numpy as np\n'), ((7192, 7213), 'numpy.squeeze', 'np.squeeze', (['predicted'], {}), '(predicted)\n', (7202, 7213), True, 'import numpy as np\n'), ((7229, 7249), 'numpy.squeeze', 'np.squeeze', (['observed'], {}), '(observed)\n', (7239, 7249), True, 'import numpy as np\n'), ((496, 537), 'numpy.meshgrid', 'np.meshgrid', (['observed_cats', 'observed_cats'], {}), '(observed_cats, observed_cats)\n', (507, 537), True, 'import numpy as np\n'), ((2610, 2627), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2616, 2627), True, 'import numpy as np\n'), ((584, 602), 'numpy.arange', 'np.arange', (['samples'], {}), '(samples)\n', (593, 602), True, 'import numpy as np\n'), ((604, 622), 'numpy.arange', 'np.arange', (['samples'], {}), '(samples)\n', (613, 622), True, 'import numpy as np\n'), ((7901, 7922), 'numpy.squeeze', 'np.squeeze', (['predicted'], {}), '(predicted)\n', (7911, 7922), True, 'import numpy as np\n'), ((7938, 7958), 'numpy.squeeze', 'np.squeeze', (['observed'], {}), '(observed)\n', (7948, 7958), True, 'import numpy as np\n'), ((8082, 8103), 'numpy.squeeze', 'np.squeeze', (['predicted'], {}), '(predicted)\n', (8092, 8103), True, 'import numpy as np\n'), ((8119, 8139), 'numpy.squeeze', 'np.squeeze', (['observed'], {}), '(observed)\n', (8129, 8139), True, 'import numpy as np\n'), ((6106, 6127), 'numpy.nanmean', 'np.nanmean', (['predicted'], {}), '(predicted)\n', (6116, 6127), True, 'import numpy as np\n'), ((6150, 6171), 'numpy.nanmean', 'np.nanmean', (['predicted'], {}), '(predicted)\n', (6160, 6171), True, 'import numpy as np\n')] |
import numpy as np
from stats import (
questions_complexity,
player2idx,
)
from scipy import sparse
def team_answer_estimation(
dataset: dict,
player_to_idx: dict,
prediction: list,
) -> list:
count = 0
result = []
for tournament in dataset:
for team in dataset[tournament]['teams']:
for answer in team['mask']:
per_team, n = [], 0
for player in team['players']:
if player in player_to_idx:
per_team.append(prediction[count, 0])
count += 1
n += 1
accum = 1. - np.prod(per_team)
result.extend([accum]*n)
return np.array(result)
def create_test_prediction(
dataset: dict,
player_to_idx: dict,
model,
n_features: int,
) -> tuple:
pred_per_tournament, gt_per_tournament = [], []
for tournament in dataset:
pred_per_team, gt_per_team = [], []
for team in dataset[tournament]['teams']:
player_idxs = [
player_to_idx[player]
for player in team['players']
if player in player_to_idx
]
if len(player_idxs) == 0:
continue
X = sparse.lil_matrix(
arg1=(len(player_idxs), n_features),
dtype=int,
)
X[range(len(player_idxs)), player_idxs] = 1
pred_per_team.append(
1. - model.predict_proba(X)[..., 0].prod()
)
gt_per_team.append(
sum(team['mask']) / len(team['mask'])
)
pred_per_tournament.append(pred_per_team)
gt_per_tournament.append(gt_per_team)
return pred_per_tournament, gt_per_tournament
def create_train_matrix_baseline(
dataset: dict,
n_team: int=3,
) -> tuple:
player_to_idx, idx_to_player = player2idx(dataset, n_team)
one_hot_players, one_hot_questions, complexity_feature, answers = [], [], [], []
question_position = 0
for tournament in dataset:
n_questions = len(dataset[tournament]['teams'][0]['mask'])
for team in dataset[tournament]['teams']:
for i, answer in enumerate(team['mask']):
for player in team['players']:
if player in player_to_idx:
one_hot_players.append(player_to_idx[player])
one_hot_questions.append(len(player_to_idx) + question_position + i)
answers.append(answer)
question_position += n_questions
questions = sparse.lil_matrix(
arg1=(len(one_hot_players), len(player_to_idx) + question_position),
dtype=int,
)
all_indexes = range(len(one_hot_players))
questions[all_indexes, one_hot_players] = 1.
questions[all_indexes, one_hot_questions] = 1.
return questions, np.array(answers), player_to_idx
| [
"stats.player2idx",
"numpy.array",
"numpy.prod"
] | [((757, 773), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (765, 773), True, 'import numpy as np\n'), ((2025, 2052), 'stats.player2idx', 'player2idx', (['dataset', 'n_team'], {}), '(dataset, n_team)\n', (2035, 2052), False, 'from stats import questions_complexity, player2idx\n'), ((3059, 3076), 'numpy.array', 'np.array', (['answers'], {}), '(answers)\n', (3067, 3076), True, 'import numpy as np\n'), ((661, 678), 'numpy.prod', 'np.prod', (['per_team'], {}), '(per_team)\n', (668, 678), True, 'import numpy as np\n')] |
from hypothesis import given
from hypothesis.strategies import composite, integers
import mf2
import numpy as np
import pandas as pd
from pytest import approx
from scipy.special import binom
from sklearn.metrics import mean_squared_error
import xarray as xr
from pyprojroot import here
import sys
module_path = str(here('scripts/experiments'))
if module_path not in sys.path:
sys.path.append(module_path)
import experiments as exp
import multiLevelCoSurrogates as mlcs
def prepare_DoE(func, nh=3, nl=5):
np.random.seed(20160501) # Setting seed for reproducibility
init_DoE = mlcs.bi_fidelity_doe(func.ndim, nh, nl)
DoE = exp.scale_to_function(func, init_DoE)
return DoE
def get_experiment_subsampled_EG(func, DoE, instances):
results = []
for i, (num_high, num_low, rep) in enumerate(instances):
mlcs.set_seed_by_instance(num_high, num_low, rep)
# Create sub-sampled Multi-Fidelity DoE in- and output according to instance specification
train, test = mlcs.split_bi_fidelity_doe(DoE, num_high, num_low)
train_high_y, train_low_y = func.high(train.high), \
func.low(train.low)
# Create an archive from the MF-function and MF-DoE data
archive = mlcs.CandidateArchive.from_multi_fidelity_function(func, ndim=func.ndim)
archive.addcandidates(train.low, train_low_y, fidelity='low')
archive.addcandidates(train.high, train_high_y, fidelity='high')
# (Automatically) Create the hierarchical model
mfbo = mlcs.MultiFidelityBO(func, archive, scaling='off', kernel='Matern')
mses = mfbo.getMSE()
test_high_y = func.high(test.high)
cv_mses = mean_squared_error(test_high_y, mfbo.models['high'].predict(test.high))
# Store the results
results.append((num_high, num_low, rep, 'high_hier', cv_mses, mses[0]))
columns = ['n_high', 'n_low', 'rep', 'model', 'mses', 'orig_mses']
tmp_df = pd.DataFrame.from_records(results, columns=columns, index=columns[:4])
return xr.Dataset.from_dataframe(tmp_df)
def get_subsampled_protoEG(archive, num_reps):
eg = mlcs.ProtoEG(archive, num_reps=num_reps)
eg.subsample_errorgrid()
return eg
@composite
def valid_subsample_spec(draw):
max_high = draw(integers(min_value=2, max_value=1_000))
max_low = draw(integers(min_value=max_high, max_value=10_000))
num_high = draw(integers(min_value=1, max_value=max_high-1))
num_low = draw(integers(min_value=num_high, max_value=max_low))
return num_high, num_low, max_high, max_low
@given(valid_subsample_spec())
def test_calc_reuse_fraction_high(spec):
num_high, num_low, max_high, max_low = spec
peg = mlcs.ProtoEG(archive=mlcs.CandidateArchive(ndim=0))
part1 = binom(max_high, num_high)
part2 = binom(max_high + 1, num_high)
if not (np.isfinite(part1) and np.isfinite(part2)):
return # invalid input that cannot be tested
true_fraction = part1 / part2
fraction = peg.calculate_reuse_fraction(num_high, num_low, fidelity='high',
max_high=max_high, max_low=max_low)
assert fraction == approx(true_fraction)
@given(valid_subsample_spec())
def test_calc_reuse_fraction_low(spec):
num_high, num_low, max_high, max_low = spec
peg = mlcs.ProtoEG(archive=mlcs.CandidateArchive(ndim=0))
part1 = binom(max_low - num_high, num_low - num_high)
part2 = binom(max_low+1 - num_high, num_low - num_high)
if not (np.isfinite(part1) and np.isfinite(part2)):
return # invalid input that cannot be tested
true_fraction = part1 / part2
fraction = peg.calculate_reuse_fraction(num_high, num_low, fidelity='low',
max_high=max_high, max_low=max_low)
assert fraction == approx(true_fraction)
def test_experiment():
func = mf2.currin
num_reps = 1
DoE = prepare_DoE(func)
spec = mlcs.InstanceSpec(len(DoE[0])-1, len(DoE[1]), num_reps=num_reps)
instances = list(spec.instances)
eg2 = get_experiment_subsampled_EG(func, DoE, instances=instances)
np.testing.assert_allclose(
[0.27112833049506163],
eg2['mses'].sel(model='high_hier').values.flatten()[0],
)
def test_protoEG_subsample_errorgrid_create():
func = mf2.currin
num_reps = 1
DoE_high, DoE_low = prepare_DoE(func)
archive = mlcs.CandidateArchive.from_multi_fidelity_function(func)
archive.addcandidates(DoE_high, func.high(DoE_high), fidelity='high')
archive.addcandidates(DoE_low, func.low(DoE_low), fidelity='low')
proto_eg = get_subsampled_protoEG(archive, num_reps)
eg1 = proto_eg.error_grid
np.testing.assert_allclose(
[0.27112833049506163],
eg1['mses'].sel(model='high_hier').values.flatten()[0],
)
def test_protoEG_subsample_errorgrid_update_low():
func = mf2.currin
num_reps = 1
DoE_high, DoE_low = prepare_DoE(func)
archive = mlcs.CandidateArchive.from_multi_fidelity_function(func)
archive.addcandidates(DoE_high, func.high(DoE_high), fidelity='high')
archive.addcandidates(DoE_low, func.low(DoE_low), fidelity='low')
proto_eg = get_subsampled_protoEG(archive, num_reps)
np.random.seed(0)
new_sample = np.random.rand(1,func.ndim)
archive.addcandidate(candidate=new_sample.flatten(), fitness=func.low(new_sample), fidelity='low')
prev_coords = proto_eg.error_grid.coords
proto_eg.update_errorgrid_with_sample(new_sample, fidelity='low')
assert len(proto_eg.error_grid.coords['n_low'].values) > len(prev_coords['n_low'].values)
def test_protoEG_subsample_errorgrid_update_high():
func = mf2.currin
num_reps = 1
DoE_high, DoE_low = prepare_DoE(func)
archive = mlcs.CandidateArchive.from_multi_fidelity_function(func)
archive.addcandidates(DoE_high, func.high(DoE_high), fidelity='high')
archive.addcandidates(DoE_low, func.low(DoE_low), fidelity='low')
proto_eg = get_subsampled_protoEG(archive, num_reps)
np.random.seed(0)
non_high = set(tuple(c) for c in DoE_low) - set(tuple(c) for c in DoE_high)
new_sample = np.array(next(iter(non_high))).reshape(1, -1) # just take 1 element
archive.addcandidate(candidate=new_sample.flatten(), fitness=func.high(new_sample), fidelity='high')
prev_coords = proto_eg.error_grid.coords
proto_eg.update_errorgrid_with_sample(new_sample, fidelity='high')
assert len(proto_eg.error_grid.coords['n_high'].values) > len(prev_coords['n_high'].values)
| [
"sys.path.append",
"scipy.special.binom",
"numpy.random.seed",
"multiLevelCoSurrogates.CandidateArchive.from_multi_fidelity_function",
"multiLevelCoSurrogates.bi_fidelity_doe",
"experiments.scale_to_function",
"numpy.isfinite",
"multiLevelCoSurrogates.set_seed_by_instance",
"multiLevelCoSurrogates.M... | [((316, 343), 'pyprojroot.here', 'here', (['"""scripts/experiments"""'], {}), "('scripts/experiments')\n", (320, 343), False, 'from pyprojroot import here\n'), ((381, 409), 'sys.path.append', 'sys.path.append', (['module_path'], {}), '(module_path)\n', (396, 409), False, 'import sys\n'), ((517, 541), 'numpy.random.seed', 'np.random.seed', (['(20160501)'], {}), '(20160501)\n', (531, 541), True, 'import numpy as np\n'), ((593, 632), 'multiLevelCoSurrogates.bi_fidelity_doe', 'mlcs.bi_fidelity_doe', (['func.ndim', 'nh', 'nl'], {}), '(func.ndim, nh, nl)\n', (613, 632), True, 'import multiLevelCoSurrogates as mlcs\n'), ((643, 680), 'experiments.scale_to_function', 'exp.scale_to_function', (['func', 'init_DoE'], {}), '(func, init_DoE)\n', (664, 680), True, 'import experiments as exp\n'), ((1981, 2051), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['results'], {'columns': 'columns', 'index': 'columns[:4]'}), '(results, columns=columns, index=columns[:4])\n', (2006, 2051), True, 'import pandas as pd\n'), ((2063, 2096), 'xarray.Dataset.from_dataframe', 'xr.Dataset.from_dataframe', (['tmp_df'], {}), '(tmp_df)\n', (2088, 2096), True, 'import xarray as xr\n'), ((2156, 2196), 'multiLevelCoSurrogates.ProtoEG', 'mlcs.ProtoEG', (['archive'], {'num_reps': 'num_reps'}), '(archive, num_reps=num_reps)\n', (2168, 2196), True, 'import multiLevelCoSurrogates as mlcs\n'), ((2791, 2816), 'scipy.special.binom', 'binom', (['max_high', 'num_high'], {}), '(max_high, num_high)\n', (2796, 2816), False, 'from scipy.special import binom\n'), ((2829, 2858), 'scipy.special.binom', 'binom', (['(max_high + 1)', 'num_high'], {}), '(max_high + 1, num_high)\n', (2834, 2858), False, 'from scipy.special import binom\n'), ((3407, 3452), 'scipy.special.binom', 'binom', (['(max_low - num_high)', '(num_low - num_high)'], {}), '(max_low - num_high, num_low - num_high)\n', (3412, 3452), False, 'from scipy.special import binom\n'), ((3465, 3514), 'scipy.special.binom', 'binom', (['(max_low + 1 - num_high)', '(num_low - num_high)'], {}), '(max_low + 1 - num_high, num_low - num_high)\n', (3470, 3514), False, 'from scipy.special import binom\n'), ((4422, 4478), 'multiLevelCoSurrogates.CandidateArchive.from_multi_fidelity_function', 'mlcs.CandidateArchive.from_multi_fidelity_function', (['func'], {}), '(func)\n', (4472, 4478), True, 'import multiLevelCoSurrogates as mlcs\n'), ((4994, 5050), 'multiLevelCoSurrogates.CandidateArchive.from_multi_fidelity_function', 'mlcs.CandidateArchive.from_multi_fidelity_function', (['func'], {}), '(func)\n', (5044, 5050), True, 'import multiLevelCoSurrogates as mlcs\n'), ((5257, 5274), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5271, 5274), True, 'import numpy as np\n'), ((5292, 5320), 'numpy.random.rand', 'np.random.rand', (['(1)', 'func.ndim'], {}), '(1, func.ndim)\n', (5306, 5320), True, 'import numpy as np\n'), ((5784, 5840), 'multiLevelCoSurrogates.CandidateArchive.from_multi_fidelity_function', 'mlcs.CandidateArchive.from_multi_fidelity_function', (['func'], {}), '(func)\n', (5834, 5840), True, 'import multiLevelCoSurrogates as mlcs\n'), ((6047, 6064), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6061, 6064), True, 'import numpy as np\n'), ((842, 891), 'multiLevelCoSurrogates.set_seed_by_instance', 'mlcs.set_seed_by_instance', (['num_high', 'num_low', 'rep'], {}), '(num_high, num_low, rep)\n', (867, 891), True, 'import multiLevelCoSurrogates as mlcs\n'), ((1014, 1064), 'multiLevelCoSurrogates.split_bi_fidelity_doe', 'mlcs.split_bi_fidelity_doe', (['DoE', 'num_high', 'num_low'], {}), '(DoE, num_high, num_low)\n', (1040, 1064), True, 'import multiLevelCoSurrogates as mlcs\n'), ((1266, 1338), 'multiLevelCoSurrogates.CandidateArchive.from_multi_fidelity_function', 'mlcs.CandidateArchive.from_multi_fidelity_function', (['func'], {'ndim': 'func.ndim'}), '(func, ndim=func.ndim)\n', (1316, 1338), True, 'import multiLevelCoSurrogates as mlcs\n'), ((1554, 1621), 'multiLevelCoSurrogates.MultiFidelityBO', 'mlcs.MultiFidelityBO', (['func', 'archive'], {'scaling': '"""off"""', 'kernel': '"""Matern"""'}), "(func, archive, scaling='off', kernel='Matern')\n", (1574, 1621), True, 'import multiLevelCoSurrogates as mlcs\n'), ((2305, 2342), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': '(2)', 'max_value': '(1000)'}), '(min_value=2, max_value=1000)\n', (2313, 2342), False, 'from hypothesis.strategies import composite, integers\n'), ((2364, 2409), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': 'max_high', 'max_value': '(10000)'}), '(min_value=max_high, max_value=10000)\n', (2372, 2409), False, 'from hypothesis.strategies import composite, integers\n'), ((2432, 2477), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': '(1)', 'max_value': '(max_high - 1)'}), '(min_value=1, max_value=max_high - 1)\n', (2440, 2477), False, 'from hypothesis.strategies import composite, integers\n'), ((2496, 2543), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': 'num_high', 'max_value': 'max_low'}), '(min_value=num_high, max_value=max_low)\n', (2504, 2543), False, 'from hypothesis.strategies import composite, integers\n'), ((3189, 3210), 'pytest.approx', 'approx', (['true_fraction'], {}), '(true_fraction)\n', (3195, 3210), False, 'from pytest import approx\n'), ((3842, 3863), 'pytest.approx', 'approx', (['true_fraction'], {}), '(true_fraction)\n', (3848, 3863), False, 'from pytest import approx\n'), ((2747, 2776), 'multiLevelCoSurrogates.CandidateArchive', 'mlcs.CandidateArchive', ([], {'ndim': '(0)'}), '(ndim=0)\n', (2768, 2776), True, 'import multiLevelCoSurrogates as mlcs\n'), ((2872, 2890), 'numpy.isfinite', 'np.isfinite', (['part1'], {}), '(part1)\n', (2883, 2890), True, 'import numpy as np\n'), ((2895, 2913), 'numpy.isfinite', 'np.isfinite', (['part2'], {}), '(part2)\n', (2906, 2913), True, 'import numpy as np\n'), ((3363, 3392), 'multiLevelCoSurrogates.CandidateArchive', 'mlcs.CandidateArchive', ([], {'ndim': '(0)'}), '(ndim=0)\n', (3384, 3392), True, 'import multiLevelCoSurrogates as mlcs\n'), ((3526, 3544), 'numpy.isfinite', 'np.isfinite', (['part1'], {}), '(part1)\n', (3537, 3544), True, 'import numpy as np\n'), ((3549, 3567), 'numpy.isfinite', 'np.isfinite', (['part2'], {}), '(part2)\n', (3560, 3567), True, 'import numpy as np\n')] |
import os
from mrcnn.utils import Dataset
from skimage.color import gray2rgb
import pandas as pd
import pydicom
import numpy as np
class PneumoniaDataset(Dataset):
def __init__(self, data_dir, mode):
assert mode in ['dev', 'val', 'train', 'test']
super().__init__(self)
self.data_dir = data_dir
self.mode = mode
self.img_path = os.path.join(self.data_dir, 'stage_2_{}_images'.format(self.mode))
self.train_labels_df = pd.read_csv(os.path.join(self.data_dir, 'stage_2_train_labels.csv'))
self.train_labels_dict = self.create_train_labels_dict()
self.add_class('Pneumonia', 1, 'Pneumonia')
self.add_all_images()
def add_all_images(self):
img_fn_list = [x for x in os.listdir(self.img_path) if x.endswith('.dcm')]
for idx, img_fn in enumerate(img_fn_list):
patient_id = img_fn.split('.')[0]
self.add_image(source="Pneumonia",
image_id=idx,
path=os.path.join(self.img_path, img_fn),
patient_id=patient_id)
def create_train_labels_dict(self):
train_labels_dict = {}
for key, row in self.train_labels_df.iterrows():
if row.patientId not in train_labels_dict.keys():
train_labels_dict[row.patientId] = [{
'x': row.x,
'y': row.y,
'width': row.width,
'height': row.height,
'Target': row.Target
}]
else:
train_labels_dict[row.patientId].append(
{
'x': row.x,
'y': row.y,
'width': row.width,
'height': row.height,
'Target': row.Target
}
)
return train_labels_dict
def image_reference(self, image_id):
return self.image_info[image_id]['path']
def load_image(self, image_id):
path = self.image_info[image_id]['path']
img = pydicom.read_file(path).pixel_array
img = gray2rgb(img)
return img
def load_mask(self, image_id):
patient_id = self.image_info[image_id]['patient_id']
full_pdict = self.train_labels_dict[patient_id]
n_mask = len(full_pdict)
mask = np.zeros((1024, 1024, n_mask))
class_mask = np.zeros(n_mask)
for idx_mask, pdict in enumerate(full_pdict):
target = int(pdict['Target'])
if target == 1:
x, y, width, height = \
int(pdict['x']), int(pdict['y']), int(pdict['width']), int(pdict['height'])
mask[y:(y+height), x:(x+width), idx_mask] = 1.
class_mask[idx_mask] = 1.
return mask.astype(np.bool), class_mask.astype(np.int32)
| [
"pydicom.read_file",
"numpy.zeros",
"skimage.color.gray2rgb",
"os.path.join",
"os.listdir"
] | [((2177, 2190), 'skimage.color.gray2rgb', 'gray2rgb', (['img'], {}), '(img)\n', (2185, 2190), False, 'from skimage.color import gray2rgb\n'), ((2411, 2441), 'numpy.zeros', 'np.zeros', (['(1024, 1024, n_mask)'], {}), '((1024, 1024, n_mask))\n', (2419, 2441), True, 'import numpy as np\n'), ((2463, 2479), 'numpy.zeros', 'np.zeros', (['n_mask'], {}), '(n_mask)\n', (2471, 2479), True, 'import numpy as np\n'), ((485, 540), 'os.path.join', 'os.path.join', (['self.data_dir', '"""stage_2_train_labels.csv"""'], {}), "(self.data_dir, 'stage_2_train_labels.csv')\n", (497, 540), False, 'import os\n'), ((2127, 2150), 'pydicom.read_file', 'pydicom.read_file', (['path'], {}), '(path)\n', (2144, 2150), False, 'import pydicom\n'), ((755, 780), 'os.listdir', 'os.listdir', (['self.img_path'], {}), '(self.img_path)\n', (765, 780), False, 'import os\n'), ((1021, 1056), 'os.path.join', 'os.path.join', (['self.img_path', 'img_fn'], {}), '(self.img_path, img_fn)\n', (1033, 1056), False, 'import os\n')] |
import collections
import itertools
import time
import numpy as np
from ..exceptions import ValidationError
from ..models import AlignmentMill
from ..physical_constants import constants
from ..testing import compare_values
from ..util import distance_matrix, linear_sum_assignment, random_rotation_matrix, uno, which_import
def _nre(Z, geom):
"""Nuclear repulsion energy"""
nre = 0.0
for at1 in range(geom.shape[0]):
for at2 in range(at1):
dist = np.linalg.norm(geom[at1] - geom[at2])
nre += Z[at1] * Z[at2] / dist
return nre
def _pseudo_nre(Zhash, geom):
"""Pseudo nuclear repulsion energy where non-physical Z contrived from `Zhash`."""
Zidx = list(set(sorted(Zhash)))
pZ = [Zidx.index(z) for z in Zhash]
return _nre(pZ, geom)
def B787(
cgeom,
rgeom,
cuniq,
runiq,
do_plot=False,
verbose=1,
atoms_map=False,
run_resorting=False,
mols_align=False,
run_to_completion=False,
algorithm="hungarian_uno",
uno_cutoff=1.0e-3,
run_mirror=False,
):
"""Use Kabsch algorithm to find best alignment of geometry `cgeom` onto
`rgeom` while sampling atom mappings restricted by `runiq` and `cuniq`.
Parameters
----------
rgeom : ndarray of float
(nat, 3) array of reference/target/unchanged geometry. Assumed [a0]
for RMSD purposes.
cgeom : ndarray of float
(nat, 3) array of concern/changeable geometry. Assumed [a0] for RMSD
purposes. Must have same nat, units, and atom content as rgeom.
runiq : ndarray of str
(nat,) array indicating which rows (atoms) in `rgeom` are shuffleable
without changing the molecule. Generally hashes of element symbol and
mass are used, but could be as simple as ['C', 'H', 'H', 'D', 'H'] for
monodeuterated methane.
cuniq : ndarray of str
(nat,) array indicating which rows (atoms) in `cgeom` are shuffleable.
See `runiq` for more details. Strings and count in `cuniq` must match
`runiq`. That is, `sorted(cuniq) == sorted(runiq)`.
do_plot : bool, optional
Pops up a mpl plot showing before, after, and ref geometries.
verbose : int, optional
Quantity of printing. 0 to silence.
atoms_map : bool, optional
Whether atom1 of rgeom already corresponds to atom1 of cgeom and so on.
If `True`, no resorting will be run, parameters `runiq` and `cuniq`
may be passed as `None`, and much time will be saved.
run_resorting : bool, optional
Run the resorting machinery even if unnecessary because `atoms_map=True`.
mols_align : bool or float, optional
Whether ref_mol and concern_mol have identical geometries by eye
(barring orientation or atom mapping) and expected final RMSD = 0.
If `True`, procedure is truncated when RMSD condition met, saving time.
If float, convcrit at which search for minimium truncates.
run_to_completion : bool, optional
Run reorderings to completion (past RMSD = 0) even if unnecessary because
`mols_align=True`. Used to test worst-case timings.
algorithm : {'hungarian_uno', 'permutative'}, optional
When `atoms_map=False`, screening algorithm for plausible atom mappings.
`permutative` suitable only for small systems.
uno_cutoff : float, optional
TODO
run_mirror : bool, optional
Run alternate geometries potentially allowing best match to `rgeom`
from mirror image of `cgeom`. Only run if system confirmed to
be nonsuperimposable upon mirror reflection.
Returns
-------
float, tuple
First item is RMSD [A] between `rgeom` and the optimally aligned
geometry computed.
Second item is a AlignmentMill with fields
(shift, rotation, atommap, mirror) that prescribe the transformation
from `cgeom` and the optimally aligned geometry.
"""
# validation
if rgeom.shape != cgeom.shape or rgeom.shape[1] != 3:
raise ValidationError("""natom doesn't match: {} != {}""".format(rgeom.shape, cgeom.shape))
nat = rgeom.shape[0]
if atoms_map and runiq is None and cuniq is None:
runiq = np.array([""] * nat)
cuniq = np.array([""] * nat)
if sorted(runiq) != sorted(cuniq):
raise ValidationError("""atom subclasses unequal:\n {}\n {}""".format(runiq, cuniq))
if run_mirror:
# use aligner to check if system and its (xz-plane) mirror image are
# superimposible and hence whether its worth doubling the number of Kabsch
# runs below to check for mirror-image matches
mcgeom = np.copy(cgeom)
mcgeom[:, 1] *= -1.0
exact = 1.0e-6
mrmsd, msolution = B787(
mcgeom,
cgeom,
cuniq,
cuniq,
do_plot=False,
verbose=0,
atoms_map=False,
mols_align=exact,
run_mirror=False,
uno_cutoff=0.1,
)
superimposable = mrmsd < exact
if verbose >= 1 and superimposable:
print(
"Not testing for mirror-image matches (despite `run_mirror`) since system and its mirror are superimposable"
)
# initialization
best_rmsd = 100.0 # [A]
ocount = 0
hold_solution = None
run_resorting = run_resorting or not atoms_map
if mols_align is True:
a_convergence = 1.0e-3
elif mols_align is False:
a_convergence = 0.0
else:
a_convergence = mols_align
# initial presentation
atomfmt2 = """ {} {:16.8f} {:16.8f} {:16.8f}"""
if verbose >= 2:
print("<<< Reference:")
for at, _ in enumerate(runiq):
print(atomfmt2.format(runiq[at][:6], *rgeom[at]))
print("<<< Concern:")
for at, _ in enumerate(cuniq):
print(atomfmt2.format(cuniq[at][:6], *cgeom[at]))
# start_rmsd is nonsense if not atoms_map
start_rmsd = np.linalg.norm(cgeom - rgeom) * constants.bohr2angstroms / np.sqrt(nat)
if verbose >= 1:
print("Start RMSD = {:8.4f} [A] (naive)".format(start_rmsd))
def _plausible_atom_orderings_wrapper(
runiq, cuniq, rgeom, cgeom, run_resorting, algorithm="hungarian_uno", verbose=1, uno_cutoff=1.0e-3
):
"""Wrapper to _plausible_atom_orderings that bypasses it (`run_resorting=False`) when
atoms of R & C known to be ordered. Easier to put logic here because _plausible is generator.
"""
if run_resorting:
return _plausible_atom_orderings(
runiq, cuniq, rgeom, cgeom, algorithm=algorithm, verbose=verbose, uno_cutoff=uno_cutoff
)
else:
return [np.arange(rgeom.shape[0])]
t0 = time.time()
tc = 0.0
for ordering in _plausible_atom_orderings_wrapper(
runiq, cuniq, rgeom, cgeom, run_resorting, algorithm=algorithm, verbose=verbose, uno_cutoff=uno_cutoff
):
t1 = time.time()
ocount += 1
npordd = np.asarray(ordering)
_, RR, TT = kabsch_align(rgeom, cgeom[npordd, :], weight=None)
temp_solution = AlignmentMill(shift=TT, rotation=RR, atommap=npordd, mirror=False)
tgeom = temp_solution.align_coordinates(cgeom, reverse=False)
if verbose >= 4:
print("temp geom diff\n", tgeom - rgeom)
temp_rmsd = np.linalg.norm(tgeom - rgeom) * constants.bohr2angstroms / np.sqrt(rgeom.shape[0])
temp_rmsd = np.around(temp_rmsd, decimals=8)
t2 = time.time()
tc += t2 - t1
if temp_rmsd < best_rmsd:
best_rmsd = temp_rmsd
hold_solution = temp_solution
if verbose >= 1:
print("<<< trial {:8} {} yields RMSD {} >>>".format(ocount, npordd, temp_rmsd))
if not run_to_completion and best_rmsd < a_convergence:
break
else:
if verbose >= 3:
print(" trial {:8} {} yields RMSD {}".format(ocount, npordd, temp_rmsd))
if run_mirror and not superimposable:
t1 = time.time()
ocount += 1
icgeom = np.copy(cgeom)
icgeom[:, 1] *= -1.0
_, RR, TT = kabsch_align(rgeom, icgeom[npordd, :], weight=None)
temp_solution = AlignmentMill(shift=TT, rotation=RR, atommap=npordd, mirror=True)
tgeom = temp_solution.align_coordinates(cgeom, reverse=False)
if verbose >= 4:
print("temp geom diff\n", tgeom - rgeom)
temp_rmsd = np.linalg.norm(tgeom - rgeom) * constants.bohr2angstroms / np.sqrt(rgeom.shape[0])
temp_rmsd = np.around(temp_rmsd, decimals=8)
t2 = time.time()
tc += t2 - t1
if temp_rmsd < best_rmsd:
best_rmsd = temp_rmsd
hold_solution = temp_solution
if verbose >= 1:
print("<<< trial {:8}m {} yields RMSD {} >>>".format(ocount - 1, npordd, temp_rmsd))
if not run_to_completion and best_rmsd < a_convergence:
break
else:
if verbose >= 3:
print(" trial {:8}m {} yields RMSD {}".format(ocount - 1, npordd, temp_rmsd))
t3 = time.time()
if verbose >= 1:
print("Total time [s] for {:6} iterations: {:.3}".format(ocount, t3 - t0))
print("Hungarian time [s] for atom ordering: {:.3}".format(t3 - t0 - tc))
print("Kabsch time [s] for mol alignment: {:.3}".format(tc))
ageom, auniq = hold_solution.align_mini_system(cgeom, cuniq, reverse=False)
final_rmsd = np.linalg.norm(ageom - rgeom) * constants.bohr2angstroms / np.sqrt(nat)
assert abs(best_rmsd - final_rmsd) < 1.0e-3
if verbose >= 1:
print("Final RMSD = {:8.4f} [A]".format(final_rmsd))
print("Mirror match:", hold_solution.mirror)
print(hold_solution)
# final presentation & plotting
if verbose >= 2:
print("<<< Aligned:")
for at, hsh in enumerate(auniq):
print(atomfmt2.format(auniq[at][:6], *ageom[at]))
print("<<< Aligned Diff:")
for at, hsh in enumerate(auniq):
print(atomfmt2.format(auniq[at][:6], *[ageom[at][i] - rgeom[at][i] for i in range(3)]))
if do_plot:
# TODO Missing import
plot_coord(ref=rgeom, cand=ageom, orig=cgeom, comment="Final RMSD = {:8.4f}".format(final_rmsd))
# sanity checks
assert compare_values(
_pseudo_nre(cuniq, cgeom),
_pseudo_nre(auniq, ageom),
"D: concern_mol-->returned_mol pNRE uncorrupted",
atol=1.0e-4,
quiet=(verbose < 2),
)
if mols_align is True:
assert compare_values(
_pseudo_nre(runiq, rgeom),
_pseudo_nre(auniq, ageom),
"D: concern_mol-->returned_mol pNRE matches ref_mol",
atol=1.0e-4,
quiet=(verbose < 2),
)
assert compare_values(
rgeom, ageom, "D: concern_mol-->returned_mol geometry matches ref_mol", atol=1.0e-4, quiet=(verbose < 2)
)
assert compare_values(0.0, final_rmsd, "D: null RMSD", atol=1.0e-4, quiet=(verbose < 2))
return final_rmsd, hold_solution
def _plausible_atom_orderings(ref, current, rgeom, cgeom, algorithm="hungarian_uno", verbose=1, uno_cutoff=1.0e-3):
"""
Parameters
----------
ref : list
Hashes encoding distinguishable non-coord characteristics of reference
molecule. Namely, atomic symbol, mass, basis sets?.
current : list
Hashes encoding distinguishable non-coord characteristics of trial
molecule. Namely, atomic symbol, mass, basis sets?.
Returns
-------
iterator of tuples
"""
if sorted(ref) != sorted(current):
raise ValidationError(
"""ref and current can't map to each other.\n""" + "R: " + str(ref) + "\nC: " + str(current)
)
where = collections.defaultdict(list)
for iuq, uq in enumerate(ref):
where[uq].append(iuq)
cwhere = collections.defaultdict(list)
for iuq, uq in enumerate(current):
cwhere[uq].append(iuq)
connect = collections.OrderedDict()
for k in where:
connect[tuple(where[k])] = tuple(cwhere[k])
def filter_permutative(rgp, cgp):
"""Original atom ordering generator for like subset of atoms (e.g., all carbons).
Relies on permutation. Filtering depends on similarity of structure (see `atol` parameter).
Only suitable for total system size up to about 20 atoms.
"""
if verbose >= 1:
print("""Space: {} <--> {}""".format(rgp, cgp))
bnbn = [rrdistmat[first, second] for first, second in zip(rgp, rgp[1:])]
for pm in itertools.permutations(cgp):
cncn = [ccdistmat[first, second] for first, second in zip(pm, pm[1:])]
if np.allclose(bnbn, cncn, atol=1.0):
if verbose >= 1:
print("Candidate:", rgp, "<--", pm)
yield pm
def filter_hungarian_uno(rgp, cgp):
"""Hungarian algorithm on cost matrix based off headless (all Z same w/i space anyways) NRE.
Having found _a_ solution and the reduced cost matrix, this still isn't likely to produce
atom rearrangement fit for Kabsch b/c internal coordinate cost matrix doesn't nail down
distance-equivalent atoms with different Cartesian coordinates like Cartesian-distance-matrix
cost matrix does. So, form a bipartite graph from all essentially-zero connections between
ref and concern and run Uno algorithm to enumerate them.
"""
if verbose >= 1:
print("""Space: {} <--> {}""".format(rgp, cgp))
# formulate cost matrix from internal (not Cartesian) layouts of R & C
npcgp = np.array(cgp)
submatCC = ccnremat[np.ix_(cgp, cgp)]
submatRR = rrnremat[np.ix_(rgp, rgp)]
sumCC = 100.0 * np.sum(submatCC, axis=0) # cost mat small if not scaled, this way like Z=Neon
sumRR = 100.0 * np.sum(submatRR, axis=0)
cost = np.zeros((len(cgp), len(rgp)))
for j in range(cost.shape[1]):
for i in range(cost.shape[0]):
cost[i, j] = (sumCC[i] - sumRR[j]) ** 2
if verbose >= 2:
print("Cost:\n", cost)
costcopy = np.copy(cost) # other one gets manipulated by hungarian call
# find _a_ best match btwn R & C atoms through Kuhn-Munkres (Hungarian) algorithm
# * linear_sum_assigment call is exactly like `scipy.optimize.linear_sum_assignment(cost)` only with extra return
t00 = time.time()
(row_ind, col_ind), reducedcost = linear_sum_assignment(cost, return_cost=True)
ptsCR = list(zip(row_ind, col_ind))
ptsCR = sorted(ptsCR, key=lambda tup: tup[1])
sumCR = costcopy[row_ind, col_ind].sum()
t01 = time.time()
if verbose >= 2:
print("Reduced cost:\n", cost)
if verbose >= 1:
print("Hungarian time [s] for space: {:.3}".format(t01 - t00))
# find _all_ best matches btwn R & C atoms through Uno algorithm, seeded from Hungarian sol'n
edges = np.argwhere(reducedcost < uno_cutoff)
gooduns = uno(edges, ptsCR)
t02 = time.time()
if verbose >= 1:
print("Uno time [s] for space: {:.3}".format(t02 - t01))
for gu in gooduns:
gu2 = gu[:]
gu2.sort(key=lambda x: x[1]) # resorts match into (r, c) = (info, range)
subans = [p[0] for p in gu2] # compacted to subans/lap format
ans = tuple(npcgp[np.array(subans)])
if verbose >= 3:
print("Best Candidate ({:6.3}):".format(sumCR), rgp, "<--", ans, " from", cgp, subans)
yield ans
if algorithm == "permutative":
ccdistmat = distance_matrix(cgeom, cgeom)
rrdistmat = distance_matrix(rgeom, rgeom)
algofn = filter_permutative
if algorithm == "hungarian_uno":
ccdistmat = distance_matrix(cgeom, cgeom)
rrdistmat = distance_matrix(rgeom, rgeom)
with np.errstate(divide="ignore"):
ccnremat = np.reciprocal(ccdistmat)
rrnremat = np.reciprocal(rrdistmat)
ccnremat[ccnremat == np.inf] = 0.0
rrnremat[rrnremat == np.inf] = 0.0
algofn = filter_hungarian_uno
# Ensure (optional dependency) networkx exists
if not which_import("networkx", return_bool=True):
raise ModuleNotFoundError(
"""Python module networkx not found. Solve by installing it: `conda install networkx` or `pip install networkx`"""
) # pragma: no cover
# collect candidate atom orderings from algofn for each of the atom classes,
# recombine the classes with each other in every permutation (could maybe
# add Hungarian here, too) as generator back to permutation_kabsch
for cpmut in itertools.product(*itertools.starmap(algofn, connect.items())):
atpat = [None] * len(ref)
for igp, group in enumerate(cpmut):
for iidx, idx in enumerate(list(connect.keys())[igp]):
atpat[idx] = group[iidx]
yield atpat
def kabsch_align(rgeom, cgeom, weight=None):
"""Finds optimal translation and rotation to align `cgeom` onto `rgeom` via
Kabsch algorithm by minimizing the norm of the residual, || R - U * C ||.
Parameters
----------
rgeom : ndarray of float
(nat, 3) array of reference/target/unchanged geometry. Assumed [a0]
for RMSD purposes.
cgeom : ndarray of float
(nat, 3) array of concern/changeable geometry. Assumed [a0] for RMSD
purposes. Must have same Natom, units, and 1-to-1 atom ordering as rgeom.
weight : ndarray of float
(nat,) array of weights applied to `rgeom`. Note that definitions of
weights (nothing to do with atom masses) are several, and I haven't
seen one yet that can make centroid the center-of-mass and
also make the RMSD match the usual mass-wtd-RMSD definition.
Also, only one weight vector used rather than split btwn R & C,
which may be invalid if not 1-to-1. Weighting is not recommended.
Returns
-------
float, ndarray, ndarray
First item is RMSD [A] between `rgeom` and the optimally aligned
geometry computed.
Second item is (3, 3) rotation matrix to optimal alignment.
Third item is (3,) translation vector [a0] to optimal alignment.
Sources
-------
Kabsch: Acta Cryst. (1978). A34, 827-828 http://journals.iucr.org/a/issues/1978/05/00/a15629/a15629.pdf
C++ affine code: https://github.com/oleg-alexandrov/projects/blob/master/eigen/Kabsch.cpp
weighted RMSD: http://www.amber.utah.edu/AMBER-workshop/London-2015/tutorial1/
protein wRMSD code: https://pharmacy.umich.edu/sites/default/files/global_wrmsd_v8.3.py.txt
quaternion: https://cnx.org/contents/HV-RsdwL@23/Molecular-Distance-Measures
Author: dsirianni
"""
if weight is None:
w = np.ones((rgeom.shape[0]))
elif isinstance(weight, (list, np.ndarray)):
w = np.asarray(weight)
else:
raise ValidationError(f"""Unrecognized argument type {type(weight)} for kwarg 'weight'.""")
R = rgeom
C = cgeom
N = rgeom.shape[0]
if np.allclose(R, C):
# can hit a mixed non-identity translation/rotation, so head off
return 0.0, np.identity(3), np.zeros(3)
Rcentroid = R.sum(axis=0) / N
Ccentroid = C.sum(axis=0) / N
R = np.subtract(R, Rcentroid)
C = np.subtract(C, Ccentroid)
R *= np.sqrt(w[:, None])
C *= np.sqrt(w[:, None])
RR = kabsch_quaternion(C.T, R.T) # U
TT = Ccentroid - RR.dot(Rcentroid)
C = C.dot(RR)
rmsd = np.linalg.norm(R - C) * constants.bohr2angstroms / np.sqrt(np.sum(w))
return rmsd, RR, TT
def kabsch_quaternion(P, Q):
"""Computes the optimal rotation matrix U which mapping a set of points P
onto the set of points Q according to the minimization of || Q - U * P ||,
using the unit quaternion formulation of the Kabsch algorithm.
Arguments:
<np.ndarray> P := MxN array. M=dimension of space, N=number of points.
<np.ndarray> Q := MxN array. M=dimension of space, N=number of points.
Returns:
<np.ndarray> U := Optimal MxM rotation matrix mapping P onto Q.
Author: dsirianni
"""
# Form covariance matrix
cov = Q.dot(P.T)
# Form the quaternion transformation matrix F
F = np.zeros((4, 4))
# diagonal
F[0, 0] = cov[0, 0] + cov[1, 1] + cov[2, 2]
F[1, 1] = cov[0, 0] - cov[1, 1] - cov[2, 2]
F[2, 2] = -cov[0, 0] + cov[1, 1] - cov[2, 2]
F[3, 3] = -cov[0, 0] - cov[1, 1] + cov[2, 2]
# Upper & lower triangle
F[1, 0] = F[0, 1] = cov[1, 2] - cov[2, 1]
F[2, 0] = F[0, 2] = cov[2, 0] - cov[0, 2]
F[3, 0] = F[0, 3] = cov[0, 1] - cov[1, 0]
F[2, 1] = F[1, 2] = cov[0, 1] + cov[1, 0]
F[3, 1] = F[1, 3] = cov[0, 2] + cov[2, 0]
F[3, 2] = F[2, 3] = cov[1, 2] + cov[2, 1]
# Compute ew, ev of F
ew, ev = np.linalg.eigh(F)
# Construct optimal rotation matrix from leading ev
q = ev[:, -1]
U = np.zeros((3, 3))
U[0, 0] = q[0] ** 2 + q[1] ** 2 - q[2] ** 2 - q[3] ** 2
U[0, 1] = 2 * (q[1] * q[2] - q[0] * q[3])
U[0, 2] = 2 * (q[1] * q[3] + q[0] * q[2])
U[1, 0] = 2 * (q[1] * q[2] + q[0] * q[3])
U[1, 1] = q[0] ** 2 - q[1] ** 2 + q[2] ** 2 - q[3] ** 2
U[1, 2] = 2 * (q[2] * q[3] - q[0] * q[1])
U[2, 0] = 2 * (q[1] * q[3] - q[0] * q[2])
U[2, 1] = 2 * (q[2] * q[3] + q[0] * q[1])
U[2, 2] = q[0] ** 2 - q[1] ** 2 - q[2] ** 2 + q[3] ** 2
return U
def compute_scramble(nat, do_resort=True, do_shift=True, do_rotate=True, deflection=1.0, do_mirror=False):
"""Generate a random or directed translation, rotation, and atom shuffling.
Parameters
----------
nat : int
Number of atoms for which to prepare an atom mapping.
do_resort : bool or array-like, optional
Whether to randomly shuffle atoms (`True`) or leave 1st atom 1st, etc. (`False`)
or shuffle according to specified (nat, ) indices (e.g., [2, 1, 0])
do_shift : bool or array-like, optional
Whether to generate a random atom shift on interval [-3, 3) in each
dimension (`True`) or leave at current origin (`False`) or shift along
specified (3, ) vector (e.g., np.array([0., 1., -1.])).
do_rotate : bool or array-like, optional
Whether to generate a random 3D rotation according to algorithm of Arvo (`True`)
or leave at current orientation (`False`) or rotate with specified (3, 3) matrix.
deflection : float, optional
If `do_rotate`, how random a rotation: 0.0 is no change, 0.1 is small
perturbation, 1.0 is completely random.
do_mirror : bool, optional
Whether to set mirror reflection instruction. Changes identity of
molecule so off by default.
Returns
-------
tuple
AlignmentMill with fields (shift, rotation, atommap, mirror)
as requested: identity, random, or specified.
"""
rand_elord = np.arange(nat)
if do_resort is True:
np.random.shuffle(rand_elord)
elif do_resort is False:
pass
else:
rand_elord = np.array(do_resort)
assert rand_elord.shape == (nat,)
if do_shift is True:
rand_shift = 6 * np.random.random_sample((3,)) - 3
elif do_shift is False:
rand_shift = np.zeros((3,))
else:
rand_shift = np.array(do_shift)
assert rand_shift.shape == (3,)
if do_rotate is True:
rand_rot3d = random_rotation_matrix(deflection=deflection)
elif do_rotate is False:
rand_rot3d = np.identity(3)
else:
rand_rot3d = np.array(do_rotate)
assert rand_rot3d.shape == (3, 3)
perturbation = AlignmentMill(shift=rand_shift, rotation=rand_rot3d, atommap=rand_elord, mirror=do_mirror)
return perturbation
| [
"numpy.sum",
"numpy.random.random_sample",
"numpy.allclose",
"numpy.reciprocal",
"numpy.ones",
"collections.defaultdict",
"numpy.around",
"numpy.arange",
"numpy.linalg.norm",
"numpy.copy",
"itertools.permutations",
"numpy.identity",
"numpy.random.shuffle",
"numpy.asarray",
"numpy.argwher... | [((6800, 6811), 'time.time', 'time.time', ([], {}), '()\n', (6809, 6811), False, 'import time\n'), ((9304, 9315), 'time.time', 'time.time', ([], {}), '()\n', (9313, 9315), False, 'import time\n'), ((11996, 12025), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (12019, 12025), False, 'import collections\n'), ((12105, 12134), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (12128, 12134), False, 'import collections\n'), ((12220, 12245), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (12243, 12245), False, 'import collections\n'), ((19455, 19472), 'numpy.allclose', 'np.allclose', (['R', 'C'], {}), '(R, C)\n', (19466, 19472), True, 'import numpy as np\n'), ((19672, 19697), 'numpy.subtract', 'np.subtract', (['R', 'Rcentroid'], {}), '(R, Rcentroid)\n', (19683, 19697), True, 'import numpy as np\n'), ((19706, 19731), 'numpy.subtract', 'np.subtract', (['C', 'Ccentroid'], {}), '(C, Ccentroid)\n', (19717, 19731), True, 'import numpy as np\n'), ((19742, 19761), 'numpy.sqrt', 'np.sqrt', (['w[:, None]'], {}), '(w[:, None])\n', (19749, 19761), True, 'import numpy as np\n'), ((19771, 19790), 'numpy.sqrt', 'np.sqrt', (['w[:, None]'], {}), '(w[:, None])\n', (19778, 19790), True, 'import numpy as np\n'), ((20642, 20658), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (20650, 20658), True, 'import numpy as np\n'), ((21213, 21230), 'numpy.linalg.eigh', 'np.linalg.eigh', (['F'], {}), '(F)\n', (21227, 21230), True, 'import numpy as np\n'), ((21314, 21330), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (21322, 21330), True, 'import numpy as np\n'), ((23279, 23293), 'numpy.arange', 'np.arange', (['nat'], {}), '(nat)\n', (23288, 23293), True, 'import numpy as np\n'), ((4230, 4250), 'numpy.array', 'np.array', (["([''] * nat)"], {}), "([''] * nat)\n", (4238, 4250), True, 'import numpy as np\n'), ((4267, 4287), 'numpy.array', 'np.array', (["([''] * nat)"], {}), "([''] * nat)\n", (4275, 4287), True, 'import numpy as np\n'), ((4678, 4692), 'numpy.copy', 'np.copy', (['cgeom'], {}), '(cgeom)\n', (4685, 4692), True, 'import numpy as np\n'), ((6069, 6081), 'numpy.sqrt', 'np.sqrt', (['nat'], {}), '(nat)\n', (6076, 6081), True, 'import numpy as np\n'), ((7011, 7022), 'time.time', 'time.time', ([], {}), '()\n', (7020, 7022), False, 'import time\n'), ((7060, 7080), 'numpy.asarray', 'np.asarray', (['ordering'], {}), '(ordering)\n', (7070, 7080), True, 'import numpy as np\n'), ((7515, 7547), 'numpy.around', 'np.around', (['temp_rmsd'], {'decimals': '(8)'}), '(temp_rmsd, decimals=8)\n', (7524, 7547), True, 'import numpy as np\n'), ((7561, 7572), 'time.time', 'time.time', ([], {}), '()\n', (7570, 7572), False, 'import time\n'), ((9731, 9743), 'numpy.sqrt', 'np.sqrt', (['nat'], {}), '(nat)\n', (9738, 9743), True, 'import numpy as np\n'), ((12814, 12841), 'itertools.permutations', 'itertools.permutations', (['cgp'], {}), '(cgp)\n', (12836, 12841), False, 'import itertools\n'), ((13890, 13903), 'numpy.array', 'np.array', (['cgp'], {}), '(cgp)\n', (13898, 13903), True, 'import numpy as np\n'), ((14411, 14424), 'numpy.copy', 'np.copy', (['cost'], {}), '(cost)\n', (14418, 14424), True, 'import numpy as np\n'), ((14700, 14711), 'time.time', 'time.time', ([], {}), '()\n', (14709, 14711), False, 'import time\n'), ((14961, 14972), 'time.time', 'time.time', ([], {}), '()\n', (14970, 14972), False, 'import time\n'), ((15268, 15305), 'numpy.argwhere', 'np.argwhere', (['(reducedcost < uno_cutoff)'], {}), '(reducedcost < uno_cutoff)\n', (15279, 15305), True, 'import numpy as np\n'), ((15356, 15367), 'time.time', 'time.time', ([], {}), '()\n', (15365, 15367), False, 'import time\n'), ((19180, 19203), 'numpy.ones', 'np.ones', (['rgeom.shape[0]'], {}), '(rgeom.shape[0])\n', (19187, 19203), True, 'import numpy as np\n'), ((23328, 23357), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_elord'], {}), '(rand_elord)\n', (23345, 23357), True, 'import numpy as np\n'), ((484, 521), 'numpy.linalg.norm', 'np.linalg.norm', (['(geom[at1] - geom[at2])'], {}), '(geom[at1] - geom[at2])\n', (498, 521), True, 'import numpy as np\n'), ((6010, 6039), 'numpy.linalg.norm', 'np.linalg.norm', (['(cgeom - rgeom)'], {}), '(cgeom - rgeom)\n', (6024, 6039), True, 'import numpy as np\n'), ((7471, 7494), 'numpy.sqrt', 'np.sqrt', (['rgeom.shape[0]'], {}), '(rgeom.shape[0])\n', (7478, 7494), True, 'import numpy as np\n'), ((8125, 8136), 'time.time', 'time.time', ([], {}), '()\n', (8134, 8136), False, 'import time\n'), ((8182, 8196), 'numpy.copy', 'np.copy', (['cgeom'], {}), '(cgeom)\n', (8189, 8196), True, 'import numpy as np\n'), ((8692, 8724), 'numpy.around', 'np.around', (['temp_rmsd'], {'decimals': '(8)'}), '(temp_rmsd, decimals=8)\n', (8701, 8724), True, 'import numpy as np\n'), ((8742, 8753), 'time.time', 'time.time', ([], {}), '()\n', (8751, 8753), False, 'import time\n'), ((9672, 9701), 'numpy.linalg.norm', 'np.linalg.norm', (['(ageom - rgeom)'], {}), '(ageom - rgeom)\n', (9686, 9701), True, 'import numpy as np\n'), ((12941, 12974), 'numpy.allclose', 'np.allclose', (['bnbn', 'cncn'], {'atol': '(1.0)'}), '(bnbn, cncn, atol=1.0)\n', (12952, 12974), True, 'import numpy as np\n'), ((13932, 13948), 'numpy.ix_', 'np.ix_', (['cgp', 'cgp'], {}), '(cgp, cgp)\n', (13938, 13948), True, 'import numpy as np\n'), ((13978, 13994), 'numpy.ix_', 'np.ix_', (['rgp', 'rgp'], {}), '(rgp, rgp)\n', (13984, 13994), True, 'import numpy as np\n'), ((14020, 14044), 'numpy.sum', 'np.sum', (['submatCC'], {'axis': '(0)'}), '(submatCC, axis=0)\n', (14026, 14044), True, 'import numpy as np\n'), ((14123, 14147), 'numpy.sum', 'np.sum', (['submatRR'], {'axis': '(0)'}), '(submatRR, axis=0)\n', (14129, 14147), True, 'import numpy as np\n'), ((16220, 16248), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (16231, 16248), True, 'import numpy as np\n'), ((16273, 16297), 'numpy.reciprocal', 'np.reciprocal', (['ccdistmat'], {}), '(ccdistmat)\n', (16286, 16297), True, 'import numpy as np\n'), ((16321, 16345), 'numpy.reciprocal', 'np.reciprocal', (['rrdistmat'], {}), '(rrdistmat)\n', (16334, 16345), True, 'import numpy as np\n'), ((19267, 19285), 'numpy.asarray', 'np.asarray', (['weight'], {}), '(weight)\n', (19277, 19285), True, 'import numpy as np\n'), ((19567, 19581), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (19578, 19581), True, 'import numpy as np\n'), ((19583, 19594), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (19591, 19594), True, 'import numpy as np\n'), ((19903, 19924), 'numpy.linalg.norm', 'np.linalg.norm', (['(R - C)'], {}), '(R - C)\n', (19917, 19924), True, 'import numpy as np\n'), ((19962, 19971), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (19968, 19971), True, 'import numpy as np\n'), ((23431, 23450), 'numpy.array', 'np.array', (['do_resort'], {}), '(do_resort)\n', (23439, 23450), True, 'import numpy as np\n'), ((23627, 23641), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (23635, 23641), True, 'import numpy as np\n'), ((23673, 23691), 'numpy.array', 'np.array', (['do_shift'], {}), '(do_shift)\n', (23681, 23691), True, 'import numpy as np\n'), ((23876, 23890), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (23887, 23890), True, 'import numpy as np\n'), ((23922, 23941), 'numpy.array', 'np.array', (['do_rotate'], {}), '(do_rotate)\n', (23930, 23941), True, 'import numpy as np\n'), ((6763, 6788), 'numpy.arange', 'np.arange', (['rgeom.shape[0]'], {}), '(rgeom.shape[0])\n', (6772, 6788), True, 'import numpy as np\n'), ((7412, 7441), 'numpy.linalg.norm', 'np.linalg.norm', (['(tgeom - rgeom)'], {}), '(tgeom - rgeom)\n', (7426, 7441), True, 'import numpy as np\n'), ((8644, 8667), 'numpy.sqrt', 'np.sqrt', (['rgeom.shape[0]'], {}), '(rgeom.shape[0])\n', (8651, 8667), True, 'import numpy as np\n'), ((23544, 23573), 'numpy.random.random_sample', 'np.random.random_sample', (['(3,)'], {}), '((3,))\n', (23567, 23573), True, 'import numpy as np\n'), ((8585, 8614), 'numpy.linalg.norm', 'np.linalg.norm', (['(tgeom - rgeom)'], {}), '(tgeom - rgeom)\n', (8599, 8614), True, 'import numpy as np\n'), ((15720, 15736), 'numpy.array', 'np.array', (['subans'], {}), '(subans)\n', (15728, 15736), True, 'import numpy as np\n')] |
from spektral.layers import GraphConv, ChebConv, EdgeConditionedConv, GraphAttention, GraphConvSkip, ARMAConv, APPNP, \
GraphSageConv
from keras import backend as K, Model, Input
import numpy as np
import tensorflow as tf
SINGLE, BATCH, MIXED = 1, 2, 3 # Single, batch, mixed
LAYER_K_, MODES_K_, KWARGS_K_ = 'layer', 'modes', 'kwargs'
TESTS = [
{
LAYER_K_: GraphConv,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8}
},
{
LAYER_K_: ChebConv,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8}
},
{
LAYER_K_: GraphSageConv,
MODES_K_: [SINGLE],
KWARGS_K_: {'channels': 8}
},
{
LAYER_K_: EdgeConditionedConv,
MODES_K_: [BATCH],
KWARGS_K_: {'channels': 8, 'edges': True}
},
{
LAYER_K_: GraphAttention,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8}
},
{
LAYER_K_: GraphConvSkip,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8}
},
{
LAYER_K_: ARMAConv,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8, 'T': 2, 'K': 2, 'recurrent': True}
},
{
LAYER_K_: APPNP,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8, 'mlp_channels': 16, 'H': 2}
}
]
sess = K.get_session()
batch_size = 32
N = 11
F = 7
S = 3
A = np.ones((N, N))
X = np.random.normal(size=(N, F))
E = np.random.normal(size=(N, N, S))
def _test_single_mode(layer, **kwargs):
A_in = Input(shape=(None,))
X_in = Input(shape=(F,))
layer_instance = layer(**kwargs)
output = layer_instance([X_in, A_in])
model = Model([X_in, A_in], output)
sess.run(tf.global_variables_initializer())
output = sess.run(model.output, feed_dict={X_in: X, A_in: A})
assert output.shape == (N, kwargs['channels'])
def _test_batch_mode(layer, **kwargs):
A_batch = np.stack([A] * batch_size)
X_batch = np.stack([X] * batch_size)
A_in = Input(shape=(N, N))
X_in = Input(shape=(N, F))
inputs = [X_in, A_in]
feed_dict = {X_in: X_batch, A_in: A_batch}
if kwargs.get('edges'):
kwargs.pop('edges')
E_batch = np.stack([E] * batch_size)
E_in = Input(shape=(N, N, S))
inputs.append(E_in)
feed_dict[E_in] = E_batch
layer_instance = layer(**kwargs)
output = layer_instance(inputs)
model = Model(inputs, output)
sess.run(tf.global_variables_initializer())
output = sess.run(model.output, feed_dict=feed_dict)
assert output.shape == (batch_size, N, kwargs['channels'])
def _test_mixed_mode(layer, **kwargs):
X_batch = np.stack([X] * batch_size)
A_in = Input(shape=(N,))
X_in = Input(shape=(N, F))
inputs = [X_in, A_in]
feed_dict = {X_in: X_batch, A_in: A}
layer_instance = layer(**kwargs)
output = layer_instance(inputs)
model = Model(inputs, output)
sess.run(tf.global_variables_initializer())
output = sess.run(model.output, feed_dict=feed_dict)
assert output.shape == (batch_size, N, kwargs['channels'])
def _test_get_config(layer, **kwargs):
if kwargs.get('edges'):
kwargs.pop('edges')
layer_instance = layer(**kwargs)
config = layer_instance.get_config()
assert layer(**config)
def test_layers():
for test in TESTS:
for mode in test[MODES_K_]:
if mode == SINGLE:
_test_single_mode(test[LAYER_K_], **test[KWARGS_K_])
elif mode == BATCH:
_test_batch_mode(test[LAYER_K_], **test[KWARGS_K_])
elif mode == MIXED:
_test_mixed_mode(test[LAYER_K_], **test[KWARGS_K_])
_test_get_config(test[LAYER_K_], **test[KWARGS_K_])
| [
"numpy.stack",
"keras.Input",
"keras.Model",
"keras.backend.get_session",
"tensorflow.global_variables_initializer",
"numpy.ones",
"numpy.random.normal"
] | [((1378, 1393), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (1391, 1393), True, 'from keras import backend as K, Model, Input\n'), ((1434, 1449), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (1441, 1449), True, 'import numpy as np\n'), ((1454, 1483), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, F)'}), '(size=(N, F))\n', (1470, 1483), True, 'import numpy as np\n'), ((1488, 1520), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, N, S)'}), '(size=(N, N, S))\n', (1504, 1520), True, 'import numpy as np\n'), ((1574, 1594), 'keras.Input', 'Input', ([], {'shape': '(None,)'}), '(shape=(None,))\n', (1579, 1594), False, 'from keras import backend as K, Model, Input\n'), ((1606, 1623), 'keras.Input', 'Input', ([], {'shape': '(F,)'}), '(shape=(F,))\n', (1611, 1623), False, 'from keras import backend as K, Model, Input\n'), ((1716, 1743), 'keras.Model', 'Model', (['[X_in, A_in]', 'output'], {}), '([X_in, A_in], output)\n', (1721, 1743), False, 'from keras import backend as K, Model, Input\n'), ((1966, 1992), 'numpy.stack', 'np.stack', (['([A] * batch_size)'], {}), '([A] * batch_size)\n', (1974, 1992), True, 'import numpy as np\n'), ((2007, 2033), 'numpy.stack', 'np.stack', (['([X] * batch_size)'], {}), '([X] * batch_size)\n', (2015, 2033), True, 'import numpy as np\n'), ((2046, 2065), 'keras.Input', 'Input', ([], {'shape': '(N, N)'}), '(shape=(N, N))\n', (2051, 2065), False, 'from keras import backend as K, Model, Input\n'), ((2077, 2096), 'keras.Input', 'Input', ([], {'shape': '(N, F)'}), '(shape=(N, F))\n', (2082, 2096), False, 'from keras import backend as K, Model, Input\n'), ((2458, 2479), 'keras.Model', 'Model', (['inputs', 'output'], {}), '(inputs, output)\n', (2463, 2479), False, 'from keras import backend as K, Model, Input\n'), ((2705, 2731), 'numpy.stack', 'np.stack', (['([X] * batch_size)'], {}), '([X] * batch_size)\n', (2713, 2731), True, 'import numpy as np\n'), ((2744, 2761), 'keras.Input', 'Input', ([], {'shape': '(N,)'}), '(shape=(N,))\n', (2749, 2761), False, 'from keras import backend as K, Model, Input\n'), ((2773, 2792), 'keras.Input', 'Input', ([], {'shape': '(N, F)'}), '(shape=(N, F))\n', (2778, 2792), False, 'from keras import backend as K, Model, Input\n'), ((2946, 2967), 'keras.Model', 'Model', (['inputs', 'output'], {}), '(inputs, output)\n', (2951, 2967), False, 'from keras import backend as K, Model, Input\n'), ((1758, 1791), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1789, 1791), True, 'import tensorflow as tf\n'), ((2245, 2271), 'numpy.stack', 'np.stack', (['([E] * batch_size)'], {}), '([E] * batch_size)\n', (2253, 2271), True, 'import numpy as np\n'), ((2287, 2309), 'keras.Input', 'Input', ([], {'shape': '(N, N, S)'}), '(shape=(N, N, S))\n', (2292, 2309), False, 'from keras import backend as K, Model, Input\n'), ((2494, 2527), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2525, 2527), True, 'import tensorflow as tf\n'), ((2982, 3015), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3013, 3015), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# coding: utf-8
import os
import csv
import pdb
import librosa
import ml_metrics
import numpy as np
import scipy as sp
import soundfile as sf
import IPython.display as ipd
import matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_validate
from sklearn.model_selection import KFold
from scipy.spatial.distance import euclidean
from itertools import permutations, combinations, product
# Parameters
num_iterations = 5
modes = ['Random','Heuristic','CAE-B-Original','CAE-B','CAE','CAE-SL','CAE-DL','CAE-SDL']
# Calculate rankings
rankings = np.zeros((len(modes),num_iterations,14,18))
rankings_raw = np.zeros((len(modes),num_iterations,14,18))
for md in range(len(modes)):
mode = modes[md]
for it in range(num_iterations):
if mode=='Heuristic':
embeddings_ref = np.load('data/processed/' + mode + '/Dataset_Ref_Heuristic.npy')
embeddings_imi_pre = np.load('data/processed/' + mode + '/Dataset_Imi_Heuristic.npy')
elif mode=='Random':
np.random.seed(it)
embeddings_ref = np.random.rand(18,32)
np.random.seed(42+it)
embeddings_imi_pre = np.random.rand(252,32)
else:
embeddings_ref = np.load('data/processed/' + mode + '/embeddings_ref_' + mode + '_' + str(it) + '.npy')
embeddings_imi_pre = np.load('data/processed/' + mode + '/embeddings_imi_' + mode + '_' + str(it) + '.npy')
if mode=='Heuristic':
embeddings_all = np.vstack((embeddings_ref,embeddings_imi_pre))
for n in range(embeddings_ref.shape[1]):
mean = np.mean(embeddings_all[:,n])
std = np.std(embeddings_all[:,n])
embeddings_ref[:,n] = (embeddings_ref[:,n]-mean)/(std+1e-16)
embeddings_imi_pre[:,n] = (embeddings_imi_pre[:,n]-mean)/(std+1e-16)
embeddings_imi = []
for n in range(13):
embeddings_imi.append(embeddings_imi_pre[n*18:(n+1)*18])
embeddings_imi.append(embeddings_imi_pre[(n+1)*18:])
embeddings_imi = np.array(embeddings_imi)
# Calculate distances
distances = np.zeros((14,18,18))
for i in range(14):
for j in range(18):
for k in range(18):
embeddings_ref_sample = embeddings_ref[j]
embeddings_imi_sample = embeddings_imi[i,k]
distances[i,j,k] = euclidean(embeddings_ref_sample, embeddings_imi_sample)
# Calculate rankings
for i in range(14):
for j in range(18):
rankings_raw = np.argsort(distances[i,j])
rankings[md,it,i,j] = np.where(rankings_raw==j)[0][0]
# Calculate average precision
reciprocal_ranks = np.zeros((len(modes),num_iterations))
for md in range(len(modes)):
for it in range(num_iterations):
reciprocal_ranks[md,it] = np.mean(np.reciprocal(rankings[md,it]+1))
mean = np.round(np.mean(reciprocal_ranks[md]),3)
ci95 = np.round(np.std(reciprocal_ranks[md]*(1.96/(num_iterations**(0.5)))),3)
print('MRR ' + modes[md] + ': ' + str(mean) + ' +- ' + str(ci95))
# Plot ranking curve
colours = ['black','purple','yellow','grey','cyan','orange','lime','red']
plt.figure()
for md in range(len(modes)):
mode = modes[md]
rank_curve = np.zeros(18)
for it in range(num_iterations):
accumulator = 0
for rank in range(18):
count_rank = np.count_nonzero(rankings[md,it]==rank)
rank_curve[rank] += (count_rank+accumulator)/rankings[md,it].size
accumulator += count_rank
plt.scatter(np.arange(18)+1,rank_curve/num_iterations,marker='D',edgecolor='black',s=150,c=colours[md],label=mode)
plt.legend()
plt.show() | [
"numpy.load",
"matplotlib.pyplot.show",
"numpy.count_nonzero",
"numpy.random.seed",
"scipy.spatial.distance.euclidean",
"numpy.std",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.reciprocal",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"numpy.arange",
"n... | [((3343, 3355), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3353, 3355), True, 'import matplotlib.pyplot as plt\n'), ((3828, 3840), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3838, 3840), True, 'import matplotlib.pyplot as plt\n'), ((3841, 3851), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3849, 3851), True, 'import matplotlib.pyplot as plt\n'), ((3423, 3435), 'numpy.zeros', 'np.zeros', (['(18)'], {}), '(18)\n', (3431, 3435), True, 'import numpy as np\n'), ((2179, 2203), 'numpy.array', 'np.array', (['embeddings_imi'], {}), '(embeddings_imi)\n', (2187, 2203), True, 'import numpy as np\n'), ((2255, 2277), 'numpy.zeros', 'np.zeros', (['(14, 18, 18)'], {}), '((14, 18, 18))\n', (2263, 2277), True, 'import numpy as np\n'), ((3061, 3090), 'numpy.mean', 'np.mean', (['reciprocal_ranks[md]'], {}), '(reciprocal_ranks[md])\n', (3068, 3090), True, 'import numpy as np\n'), ((3114, 3175), 'numpy.std', 'np.std', (['(reciprocal_ranks[md] * (1.96 / num_iterations ** 0.5))'], {}), '(reciprocal_ranks[md] * (1.96 / num_iterations ** 0.5))\n', (3120, 3175), True, 'import numpy as np\n'), ((929, 993), 'numpy.load', 'np.load', (["('data/processed/' + mode + '/Dataset_Ref_Heuristic.npy')"], {}), "('data/processed/' + mode + '/Dataset_Ref_Heuristic.npy')\n", (936, 993), True, 'import numpy as np\n'), ((1027, 1091), 'numpy.load', 'np.load', (["('data/processed/' + mode + '/Dataset_Imi_Heuristic.npy')"], {}), "('data/processed/' + mode + '/Dataset_Imi_Heuristic.npy')\n", (1034, 1091), True, 'import numpy as np\n'), ((1603, 1650), 'numpy.vstack', 'np.vstack', (['(embeddings_ref, embeddings_imi_pre)'], {}), '((embeddings_ref, embeddings_imi_pre))\n', (1612, 1650), True, 'import numpy as np\n'), ((3007, 3042), 'numpy.reciprocal', 'np.reciprocal', (['(rankings[md, it] + 1)'], {}), '(rankings[md, it] + 1)\n', (3020, 3042), True, 'import numpy as np\n'), ((3553, 3595), 'numpy.count_nonzero', 'np.count_nonzero', (['(rankings[md, it] == rank)'], {}), '(rankings[md, it] == rank)\n', (3569, 3595), True, 'import numpy as np\n'), ((3725, 3738), 'numpy.arange', 'np.arange', (['(18)'], {}), '(18)\n', (3734, 3738), True, 'import numpy as np\n'), ((1133, 1151), 'numpy.random.seed', 'np.random.seed', (['it'], {}), '(it)\n', (1147, 1151), True, 'import numpy as np\n'), ((1181, 1203), 'numpy.random.rand', 'np.random.rand', (['(18)', '(32)'], {}), '(18, 32)\n', (1195, 1203), True, 'import numpy as np\n'), ((1215, 1238), 'numpy.random.seed', 'np.random.seed', (['(42 + it)'], {}), '(42 + it)\n', (1229, 1238), True, 'import numpy as np\n'), ((1270, 1293), 'numpy.random.rand', 'np.random.rand', (['(252)', '(32)'], {}), '(252, 32)\n', (1284, 1293), True, 'import numpy as np\n'), ((1726, 1755), 'numpy.mean', 'np.mean', (['embeddings_all[:, n]'], {}), '(embeddings_all[:, n])\n', (1733, 1755), True, 'import numpy as np\n'), ((1777, 1805), 'numpy.std', 'np.std', (['embeddings_all[:, n]'], {}), '(embeddings_all[:, n])\n', (1783, 1805), True, 'import numpy as np\n'), ((2714, 2741), 'numpy.argsort', 'np.argsort', (['distances[i, j]'], {}), '(distances[i, j])\n', (2724, 2741), True, 'import numpy as np\n'), ((2537, 2592), 'scipy.spatial.distance.euclidean', 'euclidean', (['embeddings_ref_sample', 'embeddings_imi_sample'], {}), '(embeddings_ref_sample, embeddings_imi_sample)\n', (2546, 2592), False, 'from scipy.spatial.distance import euclidean\n'), ((2779, 2806), 'numpy.where', 'np.where', (['(rankings_raw == j)'], {}), '(rankings_raw == j)\n', (2787, 2806), True, 'import numpy as np\n')] |
# general
import logging
import json
import os
import random
import math
from collections import defaultdict, Counter
import glob
import shutil, io, base64
from typing import OrderedDict
# general package
from natsort import natsorted
import pandas as pd
import numpy as np
import regex as re
import h5py
# image
import skimage
from skimage import measure as sk_measure
from adjustText import adjust_text
# processing
import ctypes
import subprocess
import dill as pickle
#vis
import dabest
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
#methods
import umap
import hdbscan
import diffxpy.api as de
import anndata
from scipy import ndimage, stats
from scipy.spatial.distance import squareform, pdist
import scipy.cluster as spc
from scipy.cluster.vq import kmeans2
from sklearn import cluster, decomposition
from fcmeans import FCM
from .imzml import IMZMLExtract
from .regions import SpectraRegion, RegionClusterer
#web/html
import jinja2
# applications
import progressbar
def makeProgressBar():
return progressbar.ProgressBar(widgets=[
progressbar.Bar(), ' ', progressbar.Percentage(), ' ', progressbar.AdaptiveETA()
])
import abc
import networkx as nx
class RegionModel:
def __init__(self, no_relation_weight=0, bi_directional=True) -> None:
self.no_relation_weight = no_relation_weight
self.bi_directional = bi_directional
self.relations = nx.DiGraph()
def from_image(self, filepath, mapping=None, diagonal=True):
regImg =np.load(filepath)
if mapping is None:
mapping = {x:x for x in np.unique(regImg)} #id
if not set(np.unique(regImg)).issubset([x for x in mapping]):
raise ValueError
adjacencyCounter = Counter()
for i in range(0, regImg.shape[0]):
for j in range(0, regImg.shape[1]):
curFieldRegion = regImg[i,j]
otherRegions = []
#right
if i+1 < regImg.shape[0]:
otherRegions.append(regImg[i+1, j])
#bottom
if j+1 < regImg.shape[1]:
otherRegions.append(regImg[i, j+1])
if diagonal and i+1 < regImg.shape[0] and j+1 < regImg.shape[1]:
#diagonal
otherRegions.append(regImg[i+1, j+1])
for oRegion in otherRegions:
adjacencyCounter[ (mapping[curFieldRegion], mapping[oRegion]) ] += 1
adjacencyCounter[ (mapping[oRegion],mapping[curFieldRegion]) ] += 1
for interaction in adjacencyCounter:
self.add_relation(interaction[0], interaction[1], weight=1)
def add_relation(self, src, tgt, weight=1.0):
self.relations.add_edge(src, tgt, weight=weight)
if self.bi_directional:
self.relations.add_edge(tgt, src, weight=weight)
def get_score(self, src, tgt):
if (src, tgt) in self.relations.edges:
return self.relations.edges[(src, tgt)]["weight"]
return self.no_relation_weight
def plot_model(self):
plt.figure()
labels = {n: "{} ({})".format(n, self.relations.nodes[n]['weight']) for n in self.relations.nodes}
colors = [self.relations.nodes[n]['weight'] for n in self.relations.nodes]
edgeColors = [self.relations.edges[n]['weight'] for n in self.relations.edges]
nx.draw(self.relations, with_labels=True, labels=labels, node_color=colors, edge_colors=edgeColors)
plt.show()
plt.close()
class RegionEmbedding(metaclass=abc.ABCMeta):
def __init__(self, region:SpectraRegion) -> None:
self.region = region
self.embedded_matrix = None
self.logger = None
self.__set_logger()
def __set_logger(self):
self.logger = logging.getLogger(self.methodname())
self.logger.setLevel(logging.INFO)
if not logging.getLogger().hasHandlers():
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
self.logger.addHandler(consoleHandler)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
consoleHandler.setFormatter(formatter)
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'fit_transform') and callable(subclass.fit_transform) and
hasattr(subclass, 'embedding') and callable(subclass.embedding) and
hasattr(subclass, 'region')
)
def methodname(self):
"""Brief description of the specific clusterer
"""
return self.__class__.__name__
@abc.abstractmethod
def embedding(self) -> np.array:
"""Returns the final embedding for given region
Raises:
NotImplementedError: [description]
Returns:
np.array: embedding
"""
raise NotImplementedError
@abc.abstractmethod
def fit_transform(self, verbose:bool=False) -> np.array:
"""
Returns the final embedding
Args:
num_target_clusters (int): number of target clusters
verbose (bool, optional): Verbose output. Defaults to False.
Raises:
NotImplementedError: (abstract class)
Returns:
np.array: segmentation
"""
raise NotImplementedError
class PCAEmbedding(RegionEmbedding):
def __init__(self, region: SpectraRegion, dimensions: int=2) -> None:
super().__init__(region)
self.dimensions = dimensions
self.idx2coord = None
self.embedding_object = None
def fit_transform(self, verbose: bool = False) -> np.array:
elem_matrix, self.idx2coord = self.region.prepare_elem_matrix()
#np-array dims (n_samples, n_features)
self.logger.info("PCA reduction")
self.embedding_object = decomposition.PCA(
n_components=self.dimensions,
random_state=42,
)
self.logger.info("PCA fit+transform")
self.embedded_matrix = self.embedding_object.fit_transform(elem_matrix)
def embedding(self) -> np.array:
outArray = np.zeros((self.region.region_array.shape[0], self.region.region_array.shape[1], self.dimensions))
for idx in self.idx2coord:
(x,y) = self.idx2coord[idx]
outArray[x,y,:] = self.embedded_matrix[idx]
return outArray
def covariances(self):
return self.embedding_object.get_covariance()
def loading(self):
# according to https://scentellegher.github.io/machine-learning/2020/01/27/pca-loadings-sklearn.html
computedPCs = ["PC{}".format(x) for x in range(1, self.dimensions+1)] # 1-based
loadings = pd.DataFrame(self.embedding_object.components_.T, columns=computedPCs, index=self.region.idx2mass)
return loadings
def explained_variance_ratio(self):
return self.embedding_object.explained_variance_ratio_
def plot_embedding(self):
dimExplained = self.embedding_object.explained_variance_ratio_
reductionName = "PCA"
plt.figure(figsize=(12, 12))
plt.scatter(self.embedded_matrix[:, 0], self.embedded_matrix[:, 1])
plt.xlabel("{} dim1 ({:.2})".format(reductionName, dimExplained[0]))
plt.ylabel("{} dim2 ({:.2})".format(reductionName, dimExplained[1]))
plt.gca().set_aspect('equal', adjustable='box')
plt.legend(bbox_to_anchor=(0, -0.2, 1, 0), loc="upper left", mode="expand", ncol=2)
plt.show()
plt.close()
class UMAPEmbedding(RegionEmbedding):
def __init__(self, region: SpectraRegion, dimensions: int=2) -> None:
super().__init__(region)
self.dimensions = dimensions
self.idx2coord = None
self.embedding_object = None
def fit_transform(self, verbose: bool = False, densmap: bool=False, n_neighbours: int=10, min_dist: float=0) -> np.array:
elem_matrix, self.idx2coord = self.region.prepare_elem_matrix()
#np-array dims (n_samples, n_features)
self.logger.info("UMAP reduction")
self.embedding_object = umap.UMAP(
densmap=densmap,
n_neighbors=n_neighbours,
min_dist=min_dist,
n_components=self.dimensions,
random_state=42,
)
self.embedded_matrix = self.embedding_object.fit_transform(elem_matrix)
def embedding(self) -> np.array:
outArray = np.zeros((self.region.region_array.shape[0], self.region.region_array.shape[1], self.dimensions))
for idx in self.idx2coord:
(x,y) = self.idx2coord[idx]
outArray[x,y,:] = self.embedded_matrix[idx]
return outArray
class UMAP_DBSCAN_Clusterer(RegionClusterer):
def __init__(self, region: SpectraRegion) -> None:
super().__init__(region)
self.matrix_mz = np.copy(self.region.idx2mass)
self.segmented = None
def fit(self, num_target_clusters: int, max_iterations: int = 100, verbose: bool = False):
elem_matrix, idx2ij = self.region.prepare_elem_matrix()
kmeans = cluster.KMeans(n_clusters=num_target_clusters, random_state=0).fit(elem_matrix)
if hasattr(kmeans, 'labels_'):
y_pred = kmeans.labels_.astype(int)
else:
y_pred = kmeans.predict(elem_matrix)
clusts = np.zeros((self.region.region_array.shape[0], self.region.region_array.shape[1]))
for idx, ypred in enumerate(y_pred):
clusts[idx2ij[idx]] = y_pred[idx]
self.segmented = clusts
def transform(self, num_target_clusters: int, verbose: bool = False) -> np.array:
return self.segmented
def segmentation(self) -> np.array:
return self.segmented
class FuzzyCMeansClusterer(RegionClusterer):
def __init__(self, region: SpectraRegion) -> None:
super().__init__(region)
self.matrix_mz = np.copy(self.region.idx2mass)
self.segmented = None
def fit(self, num_target_clusters: int, max_iterations: int = 100, verbose: bool = False):
elem_matrix, idx2ij = self.region.prepare_elem_matrix()
fcm = FCM(n_clusters=num_target_clusters).fit(elem_matrix)
fcm_labels = fcm.predict(elem_matrix)
y_pred = fcm_labels
clusts = np.zeros((self.region.region_array.shape[0], self.region.region_array.shape[1]))
for idx, ypred in enumerate(y_pred):
clusts[idx2ij[idx]] = y_pred[idx]
self.segmented = clusts
def transform(self, num_target_clusters: int, verbose: bool = False) -> np.array:
return self.segmented
def segmentation(self) -> np.array:
return self.segmented
class KMeansClusterer(RegionClusterer):
def __init__(self, region: SpectraRegion) -> None:
super().__init__(region)
self.matrix_mz = np.copy(self.region.idx2mass)
self.segmented = None
def fit(self, num_target_clusters: int, max_iterations: int = 100, verbose: bool = False):
elem_matrix, idx2ij = self.region.prepare_elem_matrix()
kmeans = cluster.KMeans(n_clusters=num_target_clusters, random_state=0).fit(elem_matrix)
if hasattr(kmeans, 'labels_'):
y_pred = kmeans.labels_.astype(int)
else:
y_pred = kmeans.predict(elem_matrix)
clusts = np.zeros((self.region.region_array.shape[0], self.region.region_array.shape[1]))
for idx, ypred in enumerate(y_pred):
clusts[idx2ij[idx]] = y_pred[idx]
self.segmented = clusts
def transform(self, num_target_clusters: int, verbose: bool = False) -> np.array:
return self.segmented
def segmentation(self) -> np.array:
return self.segmented
class ShrunkenCentroidClusterer(RegionClusterer):
def __init__(self, region: SpectraRegion, delta=0.2) -> None:
super().__init__(region)
self.delta = delta
self.results = None
self.matrix_mz = np.copy(self.region.idx2mass)
def _get_overall_centroid(self, spectra):
#Calculate the overall centroid
n = spectra.shape[0]*spectra.shape[1]
return np.sum(spectra, axis=(0,1))/n
def _get_segments(self, image):
cluster2coords = defaultdict(list)
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
clusterID = int(image[i, j])
cluster2coords[clusterID].append((i,j))
return cluster2coords
def _get_seg_centroids(self, segments, spectra_orig):
#Calculate the segment centroids
seg_cent = defaultdict(lambda : np.zeros( (spectra_orig.shape[2],)))
for s in sorted(segments):
allCoordIntensities = []
for coord in segments[s]:
#allCoordIntensities.append(spectra_orig[coord])
seg_cent[s] += spectra_orig[coord]
n = len(segments[s])
seg_cent[s] = seg_cent[s] / n
return seg_cent
def _get_all_s_vec(self, segments, spectra_orig, seg_centroids, verbose=False):
n = spectra_orig.shape[0]*spectra_orig.shape[1]
K = len(segments.keys())
seenCoords = 0
curS = np.zeros((spectra_orig.shape[2],))
for seg in segments:
seg_centroid = seg_centroids[seg]
coordinates = segments[seg]
for coord in coordinates:
seenCoords += 1
curS += np.multiply(spectra_orig[coord]-seg_centroid, spectra_orig[coord]-seg_centroid)
curS = (1.0/(n-K)) * curS
curS = np.sqrt(curS)
if verbose:
print(seenCoords, "of",n )
return curS
def _get_shr_centroids(self, segments, spectra_orig, seg_centroids, overall_centroid, verbose=False):
#Calculate the segment shrunken centroids
seg_shr_cent = dict()
seg_tstat_cent = dict()
n = spectra_orig.shape[0]*spectra_orig.shape[1]
K = np.max(list(segments.keys()))
s_list = self._get_all_s_vec(segments, spectra_orig, seg_centroids, verbose=verbose)
s_0 = np.median(s_list)
if verbose:
ovrAllCentroid = np.copy(overall_centroid)
ovrAllCentroid[ovrAllCentroid<=0] = 0
ovrAllCentroid[ovrAllCentroid>0] = 1
print("Selected fields OvrAll Centroid:", sum(ovrAllCentroid), "of", len(ovrAllCentroid))
for seg in sorted(segments):
seg_centroid = seg_centroids[seg]
coordinates = segments[seg]
if verbose:
print("seg centroid", seg_centroid)
m = math.sqrt((1/len(coordinates)) + (1/n))
shr_centroid = np.zeros(seg_centroid.shape)
tstat_centroid = np.zeros(seg_centroid.shape)
if verbose:
segCentroid = np.copy(seg_centroids[seg])
segCentroid[segCentroid <= 0] = 0
segCentroid[segCentroid > 0] = 1
print("Selected fields Seg Centroids", seg, ":", sum(segCentroid), "of", len(segCentroid), "with s0=", s_0, "and m=", m)
for mz in range(spectra_orig.shape[2]):
d_ik = (seg_centroid[mz] - overall_centroid[mz])/(m*(s_list[mz]+s_0))
dp_ik = np.sign(d_ik)*max(0, (abs(d_ik)-self.delta)) #where + means positive part (t+ = t if t 0 and zero otherwise)
#only d_ik > delta will result in change!
tstat_centroid[mz] = dp_ik
#shr_centroid[mz] = seg_centroid[mz] + m*(s_list[mz]+s_0)*dp_ik was used, but checking literature it should be
shr_centroid[mz] = overall_centroid[mz] + m*(s_list[mz]+s_0)*dp_ik
if shr_centroid[mz] < 0:
pass
# it's a centroid and therefore a possible element of class spectrum!
#shr_centroid[mz] = 0
#if shr_centroid[mz] < 0 and seg_centroid[mz] > 0:
# print(seg, mz, seg_centroid[mz], d_ik, dp_ik, shr_centroid[mz])
shr_centroid = np.nan_to_num(shr_centroid, copy=True, nan=0.0, posinf=0.0, neginf=0.0)
seg_shr_cent[seg] = shr_centroid
seg_tstat_cent[seg] = tstat_centroid
allShrCentroid = np.zeros((spectra_orig.shape[2],))
for seg in sorted(seg_shr_cent):
allShrCentroid += seg_shr_cent[seg]
if verbose:
shrCentroid = np.copy(seg_shr_cent[seg])
shrCentroid[shrCentroid <= 0] = 0
shrCentroid[shrCentroid > 0] = 1
print("Selected fields Shr Centroids", seg, ":", sum(shrCentroid), "of", len(shrCentroid))
fiveNumSummary = (
np.min(seg_tstat_cent[seg]),
np.quantile(seg_tstat_cent[seg], 0.25),
np.median(seg_tstat_cent[seg]),
np.quantile(seg_tstat_cent[seg], 0.75),
np.max(seg_tstat_cent[seg]),
np.mean(seg_tstat_cent[seg])
)
print("t stats:", fiveNumSummary)
if verbose:
allShrCentroid[allShrCentroid <= 0] = 0
allShrCentroid[allShrCentroid > 0] = 1
print("Selected fields over all Shr Centroids", sum(allShrCentroid), "of", len(allShrCentroid))
return seg_shr_cent, seg_tstat_cent
def _plot_segment_centroids(matrix_shr_centroids, matrix_global_centroid, matrix_segments, matrix, matrix_mz, ylim, addSpecs=[], xlim=(-500, 1000)):
oldFigSize = plt.rcParams["figure.figsize"]
plt.rcParams["figure.figsize"] = (20,30)
fig, ax = plt.subplots(1, len(matrix_shr_centroids)+1+ len(addSpecs))
for sidx, seg in enumerate(sorted(matrix_shr_centroids)):
usePixels = matrix_segments[seg]
if len(usePixels) > 200:
usePixels = random.sample(list(matrix_segments[seg]), 200)
for px in usePixels:
ax[sidx].plot(matrix[px], matrix_mz, alpha=0.01, color="blue")
ax[sidx].plot(matrix_shr_centroids[seg], matrix_mz, color="black")
ax[sidx].set_title("Segment: {}".format(seg))
ax[sidx].set_xlim(xlim)
ax[sidx].set_ylim(ylim)
ax[len(matrix_shr_centroids)].plot(matrix_global_centroid,matrix_mz, color="black")
ax[len(matrix_shr_centroids)].set_title("Segment: {}".format("global"))
ax[len(matrix_shr_centroids)].set_xlim(xlim)
ax[len(matrix_shr_centroids)].set_ylim(ylim)
for asi, addSpec in enumerate(addSpecs):
ax[len(matrix_shr_centroids)+1+asi].plot(matrix[addSpec],matrix_mz, color="black")
ax[len(matrix_shr_centroids)+1+asi].set_title("Segment: {}".format(addSpec))
ax[len(matrix_shr_centroids)+1+asi].set_xlim(xlim)
ax[len(matrix_shr_centroids)+1+asi].set_ylim(ylim)
plt.show()
plt.close()
plt.rcParams["figure.figsize"] = oldFigSize
def _plotTStatistics( self, tStat, mzValues, plotRange=None ):
plt.figure()
for x in tStat:
plt.plot(mzValues, tStat[x], label=str(x))
if not plotRange is None:
plt.xlim(plotRange)
plt.legend()
plt.show()
plt.close()
def plot_segment_centroid(self, iteration=-1, mz_range=(200,620), intensity_range=(-1,5)):
resultDict = self._get_iteration_data(iteration)
mzValues = np.copy(self.region.region_array)
self._plot_segment_centroids(resultDict["centroids"], resultDict["global_centroid"], resultDict["segmentation"], mzValues, self.matrix_mz, mz_range, xlim=intensity_range)
def plot_t_statistics(self, iteration=-1, mz_range=None):
resultDict = self._get_iteration_data(iteration)
mzValues = np.copy(self.region.region_array)
self._plotTStatistics(resultDict["centroids_tstats"], mzValues, mz_range)
def _distance_tibschirani(self, matrix, pxCoord, centroid, sqSStats, centroidProbability):
specDiff = matrix[pxCoord]-centroid
sigma = np.divide(np.multiply(specDiff, specDiff), sqSStats)
sigma = np.nan_to_num(sigma, copy=True, nan=0.0, posinf=0.0, neginf=0.0)
sigma = sum(sigma)
sigma = sigma - 2*math.log(centroidProbability)
return sigma
def _get_new_clusters_func(self, orig_segments, segments, spectra_orig, seg_centroids, shr_seg_centroids, print_area=5, distance_func=None, verbose=False):
assert(not distance_func is None)
#Calculate the segment membership probability
new_matrix = np.zeros((spectra_orig.shape[0], spectra_orig.shape[1]))
n = spectra_orig.shape[0] * spectra_orig.shape[1]
s_list = self._get_all_s_vec(segments, spectra_orig, seg_centroids)
s_0 = np.median(s_list)
sigmas = list()
sSum = s_list+s_0
sSumSq = np.multiply(sSum, sSum)
oldSegmentCount = 0
allMaxSigmas = []
takenClusters = []
printXlow = (orig_segments.shape[0]/2) - print_area
printXhi = (orig_segments.shape[0]/2) + print_area
printYlow = (orig_segments.shape[1]/2)-print_area
printYhi = (orig_segments.shape[1]/2)+print_area
allShrCentroid = np.zeros((spectra_orig.shape[2],))
for seg in sorted(shr_seg_centroids):
allShrCentroid += shr_seg_centroids[seg]
allShrCentroid[allShrCentroid <= 0] = 0
allShrCentroid[allShrCentroid > 0] = 1
if verbose:
print("Total field considered", sum(allShrCentroid), "of", len(allShrCentroid))
for seg in sorted(segments):
print("Segment", seg, "elements", len(segments[seg]), "of all", n, len(segments[seg])/n)
for i in range(spectra_orig.shape[0]):
for j in range(spectra_orig.shape[1]):
spectrum = spectra_orig[(i,j)]
sigmas = dict()
for seg in sorted(segments):
shr_seg_centroid = shr_seg_centroids[seg]
coordinates = segments[seg]
sigma = distance_func(spectra_orig, (i,j), shr_seg_centroid, sSumSq, len(coordinates)/n)
sigmas[seg] = sigma
allMaxSigmas += [sigmas[seg] for seg in segments]
#this very likely becomes 0 and SHOULD NOT be used for class assignment!
#summed_probabilities = sum([math.exp(-0.5*sigmas[cluster]) for cluster in sorted(sigmas)])
if verbose:
if (printXlow<=i<=printXhi and printYlow<=j<=printYhi) or (i,j) in [(22,26)]:# or lower_probability == 0:
for seg in sorted(sigmas):
print("[PS]", i,j, seg, sigmas[seg], math.exp(-0.5*sigmas[seg]),2*math.log(len(segments[seg])/n))
#print([(cluster,math.exp(-0.5*sigmas[cluster])/summed_probabilities) for cluster in sorted(sigmas)], lower_probability)
minSigma = min(sigmas.values())
if len(sigmas) == 0:
print("sigmas is empty!", sigmas, (i,j))
minSigmaClass = [x for x in sigmas if sigmas[x] == minSigma]
if len(minSigmaClass) == 0:
print("minSigmaClass Empty", i, j)
print(sigmas)
print(minSigma)
minSigmaClass = minSigmaClass[0]
new_matrix[i][j] = minSigmaClass
takenClusters.append(minSigmaClass)
if verbose:
plt.hist(takenClusters, bins=len(set(takenClusters)))
plt.show()
plt.close()
if verbose:
print("Old segments taken:", oldSegmentCount, "of", spectra_orig.shape[0]*spectra_orig.shape[1] )
return new_matrix, allMaxSigmas
def fit(self, num_target_clusters: int, max_iterations: int = 100, verbose: bool = False):
matrix = np.copy(self.region.region_array)
shr_segmented = KMeansClusterer(self.region).fit_transform(num_target_clusters=num_target_clusters, verbose=verbose)
iteration = 0
self.results = OrderedDict()
self.results[iteration] = {'segmentation': shr_segmented, 'centroids': None, 'centroids_tstats': None, 'global_centroid': None}
progBar = progressbar.ProgressBar(widgets=[
progressbar.Bar(), ' ', progressbar.Percentage(), ' ', progressbar.AdaptiveETA()
])
for iteration in progBar(range(1, max_iterations+1)):
#iteration += 1
matrix_global_centroid = self._get_overall_centroid(matrix)
matrix_segments = self._get_segments(shr_segmented)
matrix_seg_centroids = self._get_seg_centroids(matrix_segments, matrix)
matrix_shr_centroids, matrix_tstat_centroids = self._get_shr_centroids(matrix_segments, matrix, matrix_seg_centroids, matrix_global_centroid)
shr_segmented, ams = self._call_new_clusters(shr_segmented, matrix_segments, matrix, matrix_seg_centroids, matrix_shr_centroids)
if verbose:
for x in sorted(matrix_segments):
print("SegLen2", x, len(matrix_segments[x]))
self.results[iteration] = {'segmentation': shr_segmented, 'centroids': matrix_shr_centroids, 'centroids_tstats': matrix_tstat_centroids, 'global_centroid': matrix_global_centroid}
if self._matrixEqual(self.results[iteration]['segmentation'], self.results[iteration-1]['segmentation']):
print("Finishing iterations due to same result after", iteration, "iterations")
break
def _call_new_clusters(self, shr_segmented, matrix_segments, matrix, matrix_seg_centroids, matrix_shr_centroids):
shr_segmented, ams = self._get_new_clusters_func(shr_segmented, matrix_segments, matrix, matrix_seg_centroids, matrix_shr_centroids, print_area=0, distance_func=self._distance_tibschirani)
return shr_segmented, ams
def _matrixEqual(self, mat1, mat2):
comparison = mat1 == mat2
equal_arrays = comparison.all()
return equal_arrays
def _get_iteration_data(self, iteration):
resultKeys = list(self.results.keys())
desiredKey = resultKeys[iteration]
resultDict = self.results[desiredKey]
return resultDict
def segmentation(self) -> np.array:
resDict = self._get_iteration_data(-1)
return resDict["segmentation"]
def transform(self, num_target_clusters: int, verbose: bool = False) -> np.array:
if verbose:
print("Warning: num_target_clusters not applicable to this method")
segResult = self.segmentation()
return segResult
class SARegionClusterer(ShrunkenCentroidClusterer):
def __init__(self, region: SpectraRegion, delta=0.2, radius=2) -> None:
super().__init__(region, delta=delta)
self.radius = radius
def _distance_sa(self, matrix, pxCoord, centroid, sqSStats, centroidProbability, radius=2):
distance = 0
sigma = (2*radius+1)/4
for i in range(-radius, radius+1):
for j in range(-radius, radius+1):
neighbor = (pxCoord[0]+i, pxCoord[1]+j)
if neighbor[0] < 0 or neighbor[1] < 0:
#invalid coord
# TODO implement some working padding!
continue
if neighbor[0] >= matrix.shape[0] or neighbor[1] >= matrix.shape[1]:
#invalid coord
# TODO implement some working padding!
continue
weight = math.exp(-i**2-j**2)/(2*sigma**2)
specDiff = np.linalg.norm(matrix[neighbor]-centroid) ** 2
distance += weight * specDiff
return np.sqrt(distance)
def _call_new_clusters(self, shr_segmented, matrix_segments, matrix, matrix_seg_centroids, matrix_shr_centroids):
shr_segmented, ams = self._get_new_clusters_func(shr_segmented, matrix_segments, matrix, matrix_seg_centroids, matrix_shr_centroids, print_area=0, distance_func=lambda matrix, pxCoord, centroid, sqSStats, centroidProbability: self._distance_sa(matrix, pxCoord,
centroid, sqSStats, centroidProbability, radius=self.radius))
return shr_segmented, ams
class SASARegionClusterer(ShrunkenCentroidClusterer):
def __init__(self, region: SpectraRegion, delta=0.2, radius=2) -> None:
super().__init__(region, delta=delta)
self.radius = radius
def _spectra_dist_sasa(self, spec1, spec2):
return np.linalg.norm(spec1-spec2)
def _distance_sasa_beta(self, xpos, npos, matrix, radius, lambda_):
postFactor = (self._spectra_dist_sasa(matrix[npos], matrix[xpos]) / lambda_) ** 2
return 1.0/math.exp(0.25 * postFactor)
def _distance_sasa_alpha(self, matrix, xpos, npos, dpos, radius, lambda_):
sigma = (2*radius+1)/4.0
alpha_pre_top = (dpos[0]**2)-(dpos[1]**2)
alpha_pre_bottom = (2*(sigma**2))
if alpha_pre_top > 0:
alpha_pre = math.exp( alpha_pre_top / alpha_pre_bottom )
else:
alpha_pre = 1.0 / math.exp( abs(alpha_pre_top) / alpha_pre_bottom )
alpha_post = self._distance_sasa_beta(xpos, npos, matrix, radius, lambda_)
return alpha_pre * alpha_post
def _distance_sasa(self, matrix, pxCoord, centroid, sqSStats, centroidProbability, radius=2):
allDeltas = []
for i in range(-radius, radius+1):
for j in range(-radius, radius+1):
neighbor = (pxCoord[0]+i, pxCoord[1]+j)
if neighbor[0] < 0 or neighbor[1] < 0:
#invalid coord
# TODO implement some working padding!
continue
if neighbor[0] >= matrix.shape[0] or neighbor[1] >= matrix.shape[1]:
#invalid coord
# TODO implement some working padding!
continue
delta_ij_xy = self._spectra_dist_sasa(matrix[neighbor], matrix[pxCoord]) # 2-norm
allDeltas.append(delta_ij_xy)
minDelta = np.min(allDeltas)
allDeltaHats = [x-minDelta for x in allDeltas]
lambda_ = 0.5 * np.max(allDeltaHats)
if lambda_ == 0:
lambda_ = 1
distance = 0
for i in range(-radius, radius+1):
for j in range(-radius, radius+1):
neighbor = (pxCoord[0]+i, pxCoord[1]+j)
dpos = (i,j)
if neighbor[0] < 0 or neighbor[1] < 0:
#invalid coord
# TODO implement some working padding!
continue
if neighbor[0] >= matrix.shape[0] or neighbor[1] >= matrix.shape[1]:
#invalid coord
# TODO implement some working padding!
continue
specDiff = np.linalg.norm(matrix[neighbor]-centroid) ** 2 # 2-norm squared
alpha_ij = self._distance_sasa_alpha(matrix, pxCoord, neighbor, dpos, radius, lambda_)
distance += alpha_ij * specDiff
return np.sqrt(distance)
def _call_new_clusters(self, shr_segmented, matrix_segments, matrix, matrix_seg_centroids, matrix_shr_centroids):
shr_segmented, ams = self._get_new_clusters_func(shr_segmented, matrix_segments, matrix, matrix_seg_centroids, matrix_shr_centroids, print_area=0, distance_func=lambda matrix, pxCoord, centroid, sqSStats, centroidProbability: self._distance_sasa(matrix, pxCoord,
centroid, sqSStats, centroidProbability, radius=self.radius))
return shr_segmented, ams
| [
"numpy.load",
"numpy.sum",
"numpy.nan_to_num",
"typing.OrderedDict",
"collections.defaultdict",
"logging.Formatter",
"matplotlib.pyplot.figure",
"progressbar.Percentage",
"numpy.linalg.norm",
"numpy.mean",
"matplotlib.pyplot.gca",
"numpy.unique",
"pandas.DataFrame",
"numpy.multiply",
"nu... | [((1433, 1445), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1443, 1445), True, 'import networkx as nx\n'), ((1530, 1547), 'numpy.load', 'np.load', (['filepath'], {}), '(filepath)\n', (1537, 1547), True, 'import numpy as np\n'), ((1764, 1773), 'collections.Counter', 'Counter', ([], {}), '()\n', (1771, 1773), False, 'from collections import defaultdict, Counter\n'), ((3145, 3157), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3155, 3157), True, 'import matplotlib.pyplot as plt\n'), ((3446, 3549), 'networkx.draw', 'nx.draw', (['self.relations'], {'with_labels': '(True)', 'labels': 'labels', 'node_color': 'colors', 'edge_colors': 'edgeColors'}), '(self.relations, with_labels=True, labels=labels, node_color=colors,\n edge_colors=edgeColors)\n', (3453, 3549), True, 'import networkx as nx\n'), ((3554, 3564), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3562, 3564), True, 'import matplotlib.pyplot as plt\n'), ((3573, 3584), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3582, 3584), True, 'import matplotlib.pyplot as plt\n'), ((5980, 6044), 'sklearn.decomposition.PCA', 'decomposition.PCA', ([], {'n_components': 'self.dimensions', 'random_state': '(42)'}), '(n_components=self.dimensions, random_state=42)\n', (5997, 6044), False, 'from sklearn import cluster, decomposition\n'), ((6273, 6375), 'numpy.zeros', 'np.zeros', (['(self.region.region_array.shape[0], self.region.region_array.shape[1], self\n .dimensions)'], {}), '((self.region.region_array.shape[0], self.region.region_array.shape\n [1], self.dimensions))\n', (6281, 6375), True, 'import numpy as np\n'), ((6859, 6961), 'pandas.DataFrame', 'pd.DataFrame', (['self.embedding_object.components_.T'], {'columns': 'computedPCs', 'index': 'self.region.idx2mass'}), '(self.embedding_object.components_.T, columns=computedPCs,\n index=self.region.idx2mass)\n', (6871, 6961), True, 'import pandas as pd\n'), ((7230, 7258), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (7240, 7258), True, 'import matplotlib.pyplot as plt\n'), ((7269, 7336), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.embedded_matrix[:, 0]', 'self.embedded_matrix[:, 1]'], {}), '(self.embedded_matrix[:, 0], self.embedded_matrix[:, 1])\n', (7280, 7336), True, 'import matplotlib.pyplot as plt\n'), ((7557, 7644), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0, -0.2, 1, 0)', 'loc': '"""upper left"""', 'mode': '"""expand"""', 'ncol': '(2)'}), "(bbox_to_anchor=(0, -0.2, 1, 0), loc='upper left', mode='expand',\n ncol=2)\n", (7567, 7644), True, 'import matplotlib.pyplot as plt\n'), ((7650, 7660), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7658, 7660), True, 'import matplotlib.pyplot as plt\n'), ((7669, 7680), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7678, 7680), True, 'import matplotlib.pyplot as plt\n'), ((8265, 8387), 'umap.UMAP', 'umap.UMAP', ([], {'densmap': 'densmap', 'n_neighbors': 'n_neighbours', 'min_dist': 'min_dist', 'n_components': 'self.dimensions', 'random_state': '(42)'}), '(densmap=densmap, n_neighbors=n_neighbours, min_dist=min_dist,\n n_components=self.dimensions, random_state=42)\n', (8274, 8387), False, 'import umap\n'), ((8602, 8704), 'numpy.zeros', 'np.zeros', (['(self.region.region_array.shape[0], self.region.region_array.shape[1], self\n .dimensions)'], {}), '((self.region.region_array.shape[0], self.region.region_array.shape\n [1], self.dimensions))\n', (8610, 8704), True, 'import numpy as np\n'), ((9030, 9059), 'numpy.copy', 'np.copy', (['self.region.idx2mass'], {}), '(self.region.idx2mass)\n', (9037, 9059), True, 'import numpy as np\n'), ((9525, 9610), 'numpy.zeros', 'np.zeros', (['(self.region.region_array.shape[0], self.region.region_array.shape[1])'], {}), '((self.region.region_array.shape[0], self.region.region_array.shape[1])\n )\n', (9533, 9610), True, 'import numpy as np\n'), ((10083, 10112), 'numpy.copy', 'np.copy', (['self.region.idx2mass'], {}), '(self.region.idx2mass)\n', (10090, 10112), True, 'import numpy as np\n'), ((10480, 10565), 'numpy.zeros', 'np.zeros', (['(self.region.region_array.shape[0], self.region.region_array.shape[1])'], {}), '((self.region.region_array.shape[0], self.region.region_array.shape[1])\n )\n', (10488, 10565), True, 'import numpy as np\n'), ((11039, 11068), 'numpy.copy', 'np.copy', (['self.region.idx2mass'], {}), '(self.region.idx2mass)\n', (11046, 11068), True, 'import numpy as np\n'), ((11534, 11619), 'numpy.zeros', 'np.zeros', (['(self.region.region_array.shape[0], self.region.region_array.shape[1])'], {}), '((self.region.region_array.shape[0], self.region.region_array.shape[1])\n )\n', (11542, 11619), True, 'import numpy as np\n'), ((12161, 12190), 'numpy.copy', 'np.copy', (['self.region.idx2mass'], {}), '(self.region.idx2mass)\n', (12168, 12190), True, 'import numpy as np\n'), ((12431, 12448), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12442, 12448), False, 'from collections import defaultdict, Counter\n'), ((13415, 13449), 'numpy.zeros', 'np.zeros', (['(spectra_orig.shape[2],)'], {}), '((spectra_orig.shape[2],))\n', (13423, 13449), True, 'import numpy as np\n'), ((13818, 13831), 'numpy.sqrt', 'np.sqrt', (['curS'], {}), '(curS)\n', (13825, 13831), True, 'import numpy as np\n'), ((14338, 14355), 'numpy.median', 'np.median', (['s_list'], {}), '(s_list)\n', (14347, 14355), True, 'import numpy as np\n'), ((16540, 16574), 'numpy.zeros', 'np.zeros', (['(spectra_orig.shape[2],)'], {}), '((spectra_orig.shape[2],))\n', (16548, 16574), True, 'import numpy as np\n'), ((19302, 19312), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19310, 19312), True, 'import matplotlib.pyplot as plt\n'), ((19321, 19332), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19330, 19332), True, 'import matplotlib.pyplot as plt\n'), ((19463, 19475), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19473, 19475), True, 'import matplotlib.pyplot as plt\n'), ((19632, 19644), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19642, 19644), True, 'import matplotlib.pyplot as plt\n'), ((19653, 19663), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19661, 19663), True, 'import matplotlib.pyplot as plt\n'), ((19672, 19683), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19681, 19683), True, 'import matplotlib.pyplot as plt\n'), ((19873, 19906), 'numpy.copy', 'np.copy', (['self.region.region_array'], {}), '(self.region.region_array)\n', (19880, 19906), True, 'import numpy as np\n'), ((20241, 20274), 'numpy.copy', 'np.copy', (['self.region.region_array'], {}), '(self.region.region_array)\n', (20248, 20274), True, 'import numpy as np\n'), ((20584, 20648), 'numpy.nan_to_num', 'np.nan_to_num', (['sigma'], {'copy': '(True)', 'nan': '(0.0)', 'posinf': '(0.0)', 'neginf': '(0.0)'}), '(sigma, copy=True, nan=0.0, posinf=0.0, neginf=0.0)\n', (20597, 20648), True, 'import numpy as np\n'), ((21038, 21094), 'numpy.zeros', 'np.zeros', (['(spectra_orig.shape[0], spectra_orig.shape[1])'], {}), '((spectra_orig.shape[0], spectra_orig.shape[1]))\n', (21046, 21094), True, 'import numpy as np\n'), ((21243, 21260), 'numpy.median', 'np.median', (['s_list'], {}), '(s_list)\n', (21252, 21260), True, 'import numpy as np\n'), ((21329, 21352), 'numpy.multiply', 'np.multiply', (['sSum', 'sSum'], {}), '(sSum, sSum)\n', (21340, 21352), True, 'import numpy as np\n'), ((21699, 21733), 'numpy.zeros', 'np.zeros', (['(spectra_orig.shape[2],)'], {}), '((spectra_orig.shape[2],))\n', (21707, 21733), True, 'import numpy as np\n'), ((24455, 24488), 'numpy.copy', 'np.copy', (['self.region.region_array'], {}), '(self.region.region_array)\n', (24462, 24488), True, 'import numpy as np\n'), ((24665, 24678), 'typing.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24676, 24678), False, 'from typing import OrderedDict\n'), ((28348, 28365), 'numpy.sqrt', 'np.sqrt', (['distance'], {}), '(distance)\n', (28355, 28365), True, 'import numpy as np\n'), ((29143, 29172), 'numpy.linalg.norm', 'np.linalg.norm', (['(spec1 - spec2)'], {}), '(spec1 - spec2)\n', (29157, 29172), True, 'import numpy as np\n'), ((30733, 30750), 'numpy.min', 'np.min', (['allDeltas'], {}), '(allDeltas)\n', (30739, 30750), True, 'import numpy as np\n'), ((31751, 31768), 'numpy.sqrt', 'np.sqrt', (['distance'], {}), '(distance)\n', (31758, 31768), True, 'import numpy as np\n'), ((4020, 4043), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (4041, 4043), False, 'import logging\n'), ((4171, 4241), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(name)s %(levelname)s: %(message)s"""'], {}), "('%(asctime)s %(name)s %(levelname)s: %(message)s')\n", (4188, 4241), False, 'import logging\n'), ((12339, 12367), 'numpy.sum', 'np.sum', (['spectra'], {'axis': '(0, 1)'}), '(spectra, axis=(0, 1))\n', (12345, 12367), True, 'import numpy as np\n'), ((14408, 14433), 'numpy.copy', 'np.copy', (['overall_centroid'], {}), '(overall_centroid)\n', (14415, 14433), True, 'import numpy as np\n'), ((14934, 14962), 'numpy.zeros', 'np.zeros', (['seg_centroid.shape'], {}), '(seg_centroid.shape)\n', (14942, 14962), True, 'import numpy as np\n'), ((14992, 15020), 'numpy.zeros', 'np.zeros', (['seg_centroid.shape'], {}), '(seg_centroid.shape)\n', (15000, 15020), True, 'import numpy as np\n'), ((16347, 16418), 'numpy.nan_to_num', 'np.nan_to_num', (['shr_centroid'], {'copy': '(True)', 'nan': '(0.0)', 'posinf': '(0.0)', 'neginf': '(0.0)'}), '(shr_centroid, copy=True, nan=0.0, posinf=0.0, neginf=0.0)\n', (16360, 16418), True, 'import numpy as np\n'), ((19603, 19622), 'matplotlib.pyplot.xlim', 'plt.xlim', (['plotRange'], {}), '(plotRange)\n', (19611, 19622), True, 'import matplotlib.pyplot as plt\n'), ((20525, 20556), 'numpy.multiply', 'np.multiply', (['specDiff', 'specDiff'], {}), '(specDiff, specDiff)\n', (20536, 20556), True, 'import numpy as np\n'), ((24126, 24136), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24134, 24136), True, 'import matplotlib.pyplot as plt\n'), ((24149, 24160), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (24158, 24160), True, 'import matplotlib.pyplot as plt\n'), ((29355, 29382), 'math.exp', 'math.exp', (['(0.25 * postFactor)'], {}), '(0.25 * postFactor)\n', (29363, 29382), False, 'import math\n'), ((29646, 29688), 'math.exp', 'math.exp', (['(alpha_pre_top / alpha_pre_bottom)'], {}), '(alpha_pre_top / alpha_pre_bottom)\n', (29654, 29688), False, 'import math\n'), ((30830, 30850), 'numpy.max', 'np.max', (['allDeltaHats'], {}), '(allDeltaHats)\n', (30836, 30850), True, 'import numpy as np\n'), ((1086, 1103), 'progressbar.Bar', 'progressbar.Bar', ([], {}), '()\n', (1101, 1103), False, 'import progressbar\n'), ((1110, 1134), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (1132, 1134), False, 'import progressbar\n'), ((1141, 1166), 'progressbar.AdaptiveETA', 'progressbar.AdaptiveETA', ([], {}), '()\n', (1164, 1166), False, 'import progressbar\n'), ((7501, 7510), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7508, 7510), True, 'import matplotlib.pyplot as plt\n'), ((9276, 9338), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'n_clusters': 'num_target_clusters', 'random_state': '(0)'}), '(n_clusters=num_target_clusters, random_state=0)\n', (9290, 9338), False, 'from sklearn import cluster, decomposition\n'), ((10326, 10361), 'fcmeans.FCM', 'FCM', ([], {'n_clusters': 'num_target_clusters'}), '(n_clusters=num_target_clusters)\n', (10329, 10361), False, 'from fcmeans import FCM\n'), ((11285, 11347), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'n_clusters': 'num_target_clusters', 'random_state': '(0)'}), '(n_clusters=num_target_clusters, random_state=0)\n', (11299, 11347), False, 'from sklearn import cluster, decomposition\n'), ((12812, 12846), 'numpy.zeros', 'np.zeros', (['(spectra_orig.shape[2],)'], {}), '((spectra_orig.shape[2],))\n', (12820, 12846), True, 'import numpy as np\n'), ((13668, 13755), 'numpy.multiply', 'np.multiply', (['(spectra_orig[coord] - seg_centroid)', '(spectra_orig[coord] - seg_centroid)'], {}), '(spectra_orig[coord] - seg_centroid, spectra_orig[coord] -\n seg_centroid)\n', (13679, 13755), True, 'import numpy as np\n'), ((15078, 15105), 'numpy.copy', 'np.copy', (['seg_centroids[seg]'], {}), '(seg_centroids[seg])\n', (15085, 15105), True, 'import numpy as np\n'), ((16719, 16745), 'numpy.copy', 'np.copy', (['seg_shr_cent[seg]'], {}), '(seg_shr_cent[seg])\n', (16726, 16745), True, 'import numpy as np\n'), ((20703, 20732), 'math.log', 'math.log', (['centroidProbability'], {}), '(centroidProbability)\n', (20711, 20732), False, 'import math\n'), ((1613, 1630), 'numpy.unique', 'np.unique', (['regImg'], {}), '(regImg)\n', (1622, 1630), True, 'import numpy as np\n'), ((3955, 3974), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3972, 3974), False, 'import logging\n'), ((15510, 15523), 'numpy.sign', 'np.sign', (['d_ik'], {}), '(d_ik)\n', (15517, 15523), True, 'import numpy as np\n'), ((17025, 17052), 'numpy.min', 'np.min', (['seg_tstat_cent[seg]'], {}), '(seg_tstat_cent[seg])\n', (17031, 17052), True, 'import numpy as np\n'), ((17090, 17128), 'numpy.quantile', 'np.quantile', (['seg_tstat_cent[seg]', '(0.25)'], {}), '(seg_tstat_cent[seg], 0.25)\n', (17101, 17128), True, 'import numpy as np\n'), ((17166, 17196), 'numpy.median', 'np.median', (['seg_tstat_cent[seg]'], {}), '(seg_tstat_cent[seg])\n', (17175, 17196), True, 'import numpy as np\n'), ((17234, 17272), 'numpy.quantile', 'np.quantile', (['seg_tstat_cent[seg]', '(0.75)'], {}), '(seg_tstat_cent[seg], 0.75)\n', (17245, 17272), True, 'import numpy as np\n'), ((17310, 17337), 'numpy.max', 'np.max', (['seg_tstat_cent[seg]'], {}), '(seg_tstat_cent[seg])\n', (17316, 17337), True, 'import numpy as np\n'), ((17375, 17403), 'numpy.mean', 'np.mean', (['seg_tstat_cent[seg]'], {}), '(seg_tstat_cent[seg])\n', (17382, 17403), True, 'import numpy as np\n'), ((24877, 24894), 'progressbar.Bar', 'progressbar.Bar', ([], {}), '()\n', (24892, 24894), False, 'import progressbar\n'), ((24901, 24925), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (24923, 24925), False, 'import progressbar\n'), ((24932, 24957), 'progressbar.AdaptiveETA', 'progressbar.AdaptiveETA', ([], {}), '()\n', (24955, 24957), False, 'import progressbar\n'), ((28173, 28199), 'math.exp', 'math.exp', (['(-i ** 2 - j ** 2)'], {}), '(-i ** 2 - j ** 2)\n', (28181, 28199), False, 'import math\n'), ((28234, 28277), 'numpy.linalg.norm', 'np.linalg.norm', (['(matrix[neighbor] - centroid)'], {}), '(matrix[neighbor] - centroid)\n', (28248, 28277), True, 'import numpy as np\n'), ((31515, 31558), 'numpy.linalg.norm', 'np.linalg.norm', (['(matrix[neighbor] - centroid)'], {}), '(matrix[neighbor] - centroid)\n', (31529, 31558), True, 'import numpy as np\n'), ((1656, 1673), 'numpy.unique', 'np.unique', (['regImg'], {}), '(regImg)\n', (1665, 1673), True, 'import numpy as np\n'), ((23273, 23301), 'math.exp', 'math.exp', (['(-0.5 * sigmas[seg])'], {}), '(-0.5 * sigmas[seg])\n', (23281, 23301), False, 'import math\n')] |
from copy import deepcopy
from functools import partial
from random import choice, randint, random, sample
from typing import Any, Callable, List, TYPE_CHECKING, Union
import numpy as np
from fedot.core.composer.constraint import constraint_function
from fedot.core.log import Log
from fedot.core.optimisers.gp_comp.gp_operators import random_graph
from fedot.core.optimisers.gp_comp.individual import Individual
from fedot.core.optimisers.graph import OptGraph, OptNode
from fedot.core.optimisers.opt_history import ParentOperator
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.utils import ComparableEnum as Enum, DEFAULT_PARAMS_STUB
if TYPE_CHECKING:
from fedot.core.optimisers.gp_comp.gp_optimiser import GraphGenerationParams
MAX_NUM_OF_ATTEMPTS = 100
MAX_MUT_CYCLES = 5
STATIC_MUTATION_PROBABILITY = 0.7
class MutationTypesEnum(Enum):
simple = 'simple'
growth = 'growth'
local_growth = 'local_growth'
reduce = 'reduce'
single_add = 'single_add',
single_change = 'single_change',
single_drop = 'single_drop',
single_edge = 'single_edge'
none = 'none'
class MutationStrengthEnum(Enum):
weak = 0.2
mean = 1.0
strong = 5.0
def get_mutation_prob(mut_id, node):
""" Function returns mutation probability for certain node in the graph
:param mut_id: MutationStrengthEnum mean weak or strong mutation
:param node: root node of the graph
:return mutation_prob: mutation probability
"""
default_mutation_prob = 0.7
if mut_id in list(MutationStrengthEnum):
mutation_strength = mut_id.value
mutation_prob = mutation_strength / (node.distance_to_primary_level + 1)
else:
mutation_prob = default_mutation_prob
return mutation_prob
def _will_mutation_be_applied(mutation_prob, mutation_type) -> bool:
return not (random() > mutation_prob or mutation_type == MutationTypesEnum.none)
def _adapt_and_apply_mutations(new_graph: Any, mutation_prob: float, types: List[Union[MutationTypesEnum, Callable]],
num_mut: int, requirements, params: 'GraphGenerationParams', max_depth: int):
"""
Apply mutation in several iterations with specific adaptation of each graph
"""
is_static_mutation_type = random() < STATIC_MUTATION_PROBABILITY
static_mutation_type = choice(types)
mutation_names = []
for _ in range(num_mut):
mutation_type = static_mutation_type \
if is_static_mutation_type else choice(types)
is_custom_mutation = isinstance(mutation_type, Callable)
if is_custom_mutation:
new_graph = params.adapter.restore(new_graph)
else:
if not isinstance(new_graph, OptGraph):
new_graph = params.adapter.adapt(new_graph)
new_graph = _apply_mutation(new_graph=new_graph, mutation_prob=mutation_prob,
mutation_type=mutation_type, is_custom_mutation=is_custom_mutation,
requirements=requirements, params=params, max_depth=max_depth)
mutation_names.append(str(mutation_type))
if not isinstance(new_graph, OptGraph):
new_graph = params.adapter.adapt(new_graph)
if is_custom_mutation:
# custom mutation occurs once
break
return new_graph, mutation_names
def _apply_mutation(new_graph: Any, mutation_prob: float, mutation_type: Union[MutationTypesEnum, Callable],
is_custom_mutation: bool, requirements, params: 'GraphGenerationParams', max_depth: int):
"""
Apply mutation for adapted graph
"""
if _will_mutation_be_applied(mutation_prob, mutation_type):
if mutation_type in mutation_by_type or is_custom_mutation:
if is_custom_mutation:
mutation_func = mutation_type
else:
mutation_func = mutation_by_type[mutation_type]
new_graph = mutation_func(new_graph, requirements=requirements,
params=params,
max_depth=max_depth)
elif mutation_type != MutationTypesEnum.none:
raise ValueError(f'Required mutation type is not found: {mutation_type}')
return new_graph
def mutation(types: List[Union[MutationTypesEnum, Callable]], params: 'GraphGenerationParams',
ind: Individual, requirements, log: Log,
max_depth: int = None, add_to_history=True) -> Any:
""" Function apply mutation operator to graph """
max_depth = max_depth if max_depth else requirements.max_depth
mutation_prob = requirements.mutation_prob
for _ in range(MAX_NUM_OF_ATTEMPTS):
new_graph = deepcopy(ind.graph)
num_mut = max(int(round(np.random.lognormal(0, sigma=0.5))), 1)
new_graph, mutation_names = _adapt_and_apply_mutations(new_graph=new_graph, mutation_prob=mutation_prob,
types=types, num_mut=num_mut,
requirements=requirements, params=params,
max_depth=max_depth)
is_correct_graph = constraint_function(new_graph, params)
if is_correct_graph:
new_individual = Individual(new_graph)
if add_to_history:
new_individual = Individual(new_graph)
new_individual.parent_operators = ind.parent_operators
for mutation_name in mutation_names:
new_individual.parent_operators.append(
ParentOperator(operator_type='mutation',
operator_name=str(mutation_name),
parent_objects=[params.adapter.restore_as_template(ind.graph)]))
return new_individual
log.debug('Number of mutation attempts exceeded. '
'Please check composer requirements for correctness.')
return deepcopy(ind)
def simple_mutation(graph: Any, requirements, **kwargs) -> Any:
"""
This type of mutation is passed over all nodes of the tree started from the root node and changes
nodes’ operations with probability - 'node mutation probability'
which is initialised inside the function
"""
def replace_node_to_random_recursive(node: Any) -> Any:
if node.nodes_from:
if random() < node_mutation_probability:
secondary_node = OptNode(content={'name': choice(requirements.secondary),
'params': DEFAULT_PARAMS_STUB},
nodes_from=node.nodes_from)
graph.update_node(node, secondary_node)
for child in node.nodes_from:
replace_node_to_random_recursive(child)
else:
if random() < node_mutation_probability:
primary_node = OptNode(content={'name': choice(requirements.primary),
'params': DEFAULT_PARAMS_STUB})
graph.update_node(node, primary_node)
node_mutation_probability = get_mutation_prob(mut_id=requirements.mutation_strength,
node=graph.root_node)
replace_node_to_random_recursive(graph.root_node)
return graph
def single_edge_mutation(graph: Any, max_depth, *args, **kwargs):
old_graph = deepcopy(graph)
for _ in range(MAX_NUM_OF_ATTEMPTS):
if len(graph.nodes) < 2 or graph.depth > max_depth:
return graph
source_node, target_node = sample(graph.nodes, 2)
nodes_not_cycling = (target_node.descriptive_id not in
[n.descriptive_id for n in source_node.ordered_subnodes_hierarchy()])
if nodes_not_cycling and (target_node.nodes_from is None or source_node not in target_node.nodes_from):
graph.operator.connect_nodes(source_node, target_node)
break
if graph.depth > max_depth:
return old_graph
return graph
def _add_intermediate_node(graph: Any, requirements, params, node_to_mutate):
# add between node and parent
candidates = params.advisor.propose_parent(str(node_to_mutate.content['name']),
[str(n.content['name']) for n in node_to_mutate.nodes_from],
requirements.secondary)
if len(candidates) == 0:
return graph
new_node = OptNode(content={'name': choice(candidates),
'params': DEFAULT_PARAMS_STUB})
new_node.nodes_from = node_to_mutate.nodes_from
node_to_mutate.nodes_from = [new_node]
graph.nodes.append(new_node)
return graph
def _add_separate_parent_node(graph: Any, requirements, params, node_to_mutate):
# add as separate parent
candidates = params.advisor.propose_parent(str(node_to_mutate.content['name']), None,
requirements.primary)
if len(candidates) == 0:
return graph
for iter_num in range(randint(1, 3)):
if iter_num == len(candidates):
break
new_node = OptNode(content={'name': choice(candidates),
'params': DEFAULT_PARAMS_STUB})
if node_to_mutate.nodes_from:
node_to_mutate.nodes_from.append(new_node)
else:
node_to_mutate.nodes_from = [new_node]
graph.nodes.append(new_node)
return graph
def _add_as_child(graph: Any, requirements, params, node_to_mutate):
# add as child
new_node = OptNode(content={'name': choice(requirements.secondary),
'params': DEFAULT_PARAMS_STUB})
new_node.nodes_from = [node_to_mutate]
graph.operator.actualise_old_node_children(node_to_mutate, new_node)
graph.nodes.append(new_node)
return graph
def single_add_mutation(graph: Any, requirements, params, max_depth, *args, **kwargs):
"""
Add new node between two sequential existing modes
"""
if graph.depth >= max_depth:
# add mutation is not possible
return graph
node_to_mutate = choice(graph.nodes)
single_add_strategies = [_add_as_child, _add_separate_parent_node]
if node_to_mutate.nodes_from:
single_add_strategies.append(_add_intermediate_node)
strategy = choice(single_add_strategies)
result = strategy(graph, requirements, params, node_to_mutate)
return result
def single_change_mutation(graph: Any, requirements, params, *args, **kwargs):
"""
Add new node between two sequential existing modes
"""
node = choice(graph.nodes)
nodes_from = node.nodes_from
candidates = requirements.secondary if node.nodes_from else requirements.primary
if params.advisor:
candidates = params.advisor.propose_change(current_operation_id=str(node.content['name']),
possible_operations=candidates)
if len(candidates) == 0:
return graph
node_new = OptNode(content={'name': choice(candidates),
'params': DEFAULT_PARAMS_STUB})
node_new.nodes_from = nodes_from
graph.nodes = [node_new if n == node else n for n in graph.nodes]
graph.operator.actualise_old_node_children(node, node_new)
return graph
def single_drop_mutation(graph: Any, *args, **kwargs):
"""
Add new node between two sequential existing modes
"""
node_to_del = choice(graph.nodes)
# TODO replace as workaround
node_name = node_to_del.content['name']
if (hasattr(node_name, 'operation_type') and
'data_source' in node_name.operation_type):
nodes_to_delete = \
[n for n in graph.nodes if node_name.operation_type in n.descriptive_id and
n.descriptive_id.count('data_source') == 1]
for child_node in nodes_to_delete:
graph.delete_node(child_node)
graph.delete_node(node_to_del)
else:
graph.delete_node(node_to_del)
if node_to_del.nodes_from:
childs = graph.operator.node_children(node_to_del)
for child in childs:
if child.nodes_from:
child.nodes_from.extend(node_to_del.nodes_from)
else:
child.nodes_from = node_to_del.nodes_from
return graph
def _tree_growth(graph: Any, requirements, params, max_depth: int, local_growth=True):
"""
This mutation selects a random node in a tree, generates new subtree,
and replaces the selected node's subtree.
"""
random_layer_in_graph = randint(0, graph.depth - 1)
node_from_graph = choice(graph.operator.nodes_from_layer(random_layer_in_graph))
if local_growth:
is_primary_node_selected = (not node_from_graph.nodes_from) or (
node_from_graph.nodes_from and
node_from_graph != graph.root_node
and randint(0, 1))
else:
is_primary_node_selected = \
randint(0, 1) and \
not graph.operator.distance_to_root_level(node_from_graph) < max_depth
if is_primary_node_selected:
new_subtree = OptNode(content={'name': choice(requirements.primary),
'params': DEFAULT_PARAMS_STUB})
else:
if local_growth:
max_depth = node_from_graph.distance_to_primary_level
else:
max_depth = max_depth - graph.operator.distance_to_root_level(node_from_graph)
new_subtree = random_graph(params=params, requirements=requirements,
max_depth=max_depth).root_node
graph.update_subtree(node_from_graph, new_subtree)
return graph
def growth_mutation(graph: Any, requirements, params, max_depth: int, local_growth=True) -> Any:
"""
This mutation adds new nodes to the graph (just single node between existing nodes or new subtree).
:param local_growth: if true then maximal depth of new subtree equals depth of tree located in
selected random node, if false then previous depth of selected node doesn't affect to
new subtree depth, maximal depth of new subtree just should satisfy depth constraint in parent tree
"""
if random() > 0.5:
# simple growth (one node can be added)
return single_add_mutation(graph, requirements, params, max_depth)
else:
# advanced growth (several nodes can be added)
return _tree_growth(graph, requirements, params, max_depth, local_growth)
def reduce_mutation(graph: OptGraph, requirements, **kwargs) -> OptGraph:
"""
Selects a random node in a tree, then removes its subtree. If the current arity of the node's
parent is more than the specified minimal arity, then the selected node is also removed.
Otherwise, it is replaced by a random primary node.
"""
if len(graph.nodes) == 1:
return graph
nodes = [node for node in graph.nodes if node is not graph.root_node]
node_to_del = choice(nodes)
children = graph.operator.node_children(node_to_del)
is_possible_to_delete = all([len(child.nodes_from) - 1 >= requirements.min_arity for child in children])
if is_possible_to_delete:
graph.delete_subtree(node_to_del)
else:
primary_node = OptNode(content={'name': choice(requirements.primary),
'params': DEFAULT_PARAMS_STUB})
graph.update_subtree(node_to_del, primary_node)
return graph
mutation_by_type = {
MutationTypesEnum.simple: simple_mutation,
MutationTypesEnum.growth: partial(growth_mutation, local_growth=False),
MutationTypesEnum.local_growth: partial(growth_mutation, local_growth=True),
MutationTypesEnum.reduce: reduce_mutation,
MutationTypesEnum.single_add: single_add_mutation,
MutationTypesEnum.single_edge: single_edge_mutation,
MutationTypesEnum.single_drop: single_drop_mutation,
MutationTypesEnum.single_change: single_change_mutation,
}
| [
"functools.partial",
"copy.deepcopy",
"random.randint",
"fedot.core.composer.constraint.constraint_function",
"random.sample",
"fedot.core.optimisers.gp_comp.gp_operators.random_graph",
"fedot.core.optimisers.gp_comp.individual.Individual",
"random.choice",
"random.random",
"numpy.random.lognormal... | [((2347, 2360), 'random.choice', 'choice', (['types'], {}), '(types)\n', (2353, 2360), False, 'from random import choice, randint, random, sample\n'), ((6055, 6068), 'copy.deepcopy', 'deepcopy', (['ind'], {}), '(ind)\n', (6063, 6068), False, 'from copy import deepcopy\n'), ((7510, 7525), 'copy.deepcopy', 'deepcopy', (['graph'], {}), '(graph)\n', (7518, 7525), False, 'from copy import deepcopy\n'), ((10276, 10295), 'random.choice', 'choice', (['graph.nodes'], {}), '(graph.nodes)\n', (10282, 10295), False, 'from random import choice, randint, random, sample\n'), ((10478, 10507), 'random.choice', 'choice', (['single_add_strategies'], {}), '(single_add_strategies)\n', (10484, 10507), False, 'from random import choice, randint, random, sample\n'), ((10757, 10776), 'random.choice', 'choice', (['graph.nodes'], {}), '(graph.nodes)\n', (10763, 10776), False, 'from random import choice, randint, random, sample\n'), ((11609, 11628), 'random.choice', 'choice', (['graph.nodes'], {}), '(graph.nodes)\n', (11615, 11628), False, 'from random import choice, randint, random, sample\n'), ((12747, 12774), 'random.randint', 'randint', (['(0)', '(graph.depth - 1)'], {}), '(0, graph.depth - 1)\n', (12754, 12774), False, 'from random import choice, randint, random, sample\n'), ((15140, 15153), 'random.choice', 'choice', (['nodes'], {}), '(nodes)\n', (15146, 15153), False, 'from random import choice, randint, random, sample\n'), ((15725, 15769), 'functools.partial', 'partial', (['growth_mutation'], {'local_growth': '(False)'}), '(growth_mutation, local_growth=False)\n', (15732, 15769), False, 'from functools import partial\n'), ((15807, 15850), 'functools.partial', 'partial', (['growth_mutation'], {'local_growth': '(True)'}), '(growth_mutation, local_growth=True)\n', (15814, 15850), False, 'from functools import partial\n'), ((2281, 2289), 'random.random', 'random', ([], {}), '()\n', (2287, 2289), False, 'from random import choice, randint, random, sample\n'), ((4737, 4756), 'copy.deepcopy', 'deepcopy', (['ind.graph'], {}), '(ind.graph)\n', (4745, 4756), False, 'from copy import deepcopy\n'), ((5253, 5291), 'fedot.core.composer.constraint.constraint_function', 'constraint_function', (['new_graph', 'params'], {}), '(new_graph, params)\n', (5272, 5291), False, 'from fedot.core.composer.constraint import constraint_function\n'), ((7689, 7711), 'random.sample', 'sample', (['graph.nodes', '(2)'], {}), '(graph.nodes, 2)\n', (7695, 7711), False, 'from random import choice, randint, random, sample\n'), ((9190, 9203), 'random.randint', 'randint', (['(1)', '(3)'], {}), '(1, 3)\n', (9197, 9203), False, 'from random import choice, randint, random, sample\n'), ((14371, 14379), 'random.random', 'random', ([], {}), '()\n', (14377, 14379), False, 'from random import choice, randint, random, sample\n'), ((2505, 2518), 'random.choice', 'choice', (['types'], {}), '(types)\n', (2511, 2518), False, 'from random import choice, randint, random, sample\n'), ((5350, 5371), 'fedot.core.optimisers.gp_comp.individual.Individual', 'Individual', (['new_graph'], {}), '(new_graph)\n', (5360, 5371), False, 'from fedot.core.optimisers.gp_comp.individual import Individual\n'), ((13146, 13159), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (13153, 13159), False, 'from random import choice, randint, random, sample\n'), ((13658, 13733), 'fedot.core.optimisers.gp_comp.gp_operators.random_graph', 'random_graph', ([], {'params': 'params', 'requirements': 'requirements', 'max_depth': 'max_depth'}), '(params=params, requirements=requirements, max_depth=max_depth)\n', (13670, 13733), False, 'from fedot.core.optimisers.gp_comp.gp_operators import random_graph\n'), ((1856, 1864), 'random.random', 'random', ([], {}), '()\n', (1862, 1864), False, 'from random import choice, randint, random, sample\n'), ((5436, 5457), 'fedot.core.optimisers.gp_comp.individual.Individual', 'Individual', (['new_graph'], {}), '(new_graph)\n', (5446, 5457), False, 'from fedot.core.optimisers.gp_comp.individual import Individual\n'), ((6471, 6479), 'random.random', 'random', ([], {}), '()\n', (6477, 6479), False, 'from random import choice, randint, random, sample\n'), ((6933, 6941), 'random.random', 'random', ([], {}), '()\n', (6939, 6941), False, 'from random import choice, randint, random, sample\n'), ((8614, 8632), 'random.choice', 'choice', (['candidates'], {}), '(candidates)\n', (8620, 8632), False, 'from random import choice, randint, random, sample\n'), ((9738, 9768), 'random.choice', 'choice', (['requirements.secondary'], {}), '(requirements.secondary)\n', (9744, 9768), False, 'from random import choice, randint, random, sample\n'), ((11192, 11210), 'random.choice', 'choice', (['candidates'], {}), '(candidates)\n', (11198, 11210), False, 'from random import choice, randint, random, sample\n'), ((13072, 13085), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (13079, 13085), False, 'from random import choice, randint, random, sample\n'), ((4789, 4822), 'numpy.random.lognormal', 'np.random.lognormal', (['(0)'], {'sigma': '(0.5)'}), '(0, sigma=0.5)\n', (4808, 4822), True, 'import numpy as np\n'), ((9308, 9326), 'random.choice', 'choice', (['candidates'], {}), '(candidates)\n', (9314, 9326), False, 'from random import choice, randint, random, sample\n'), ((13329, 13357), 'random.choice', 'choice', (['requirements.primary'], {}), '(requirements.primary)\n', (13335, 13357), False, 'from random import choice, randint, random, sample\n'), ((15450, 15478), 'random.choice', 'choice', (['requirements.primary'], {}), '(requirements.primary)\n', (15456, 15478), False, 'from random import choice, randint, random, sample\n'), ((6567, 6597), 'random.choice', 'choice', (['requirements.secondary'], {}), '(requirements.secondary)\n', (6573, 6597), False, 'from random import choice, randint, random, sample\n'), ((7027, 7055), 'random.choice', 'choice', (['requirements.primary'], {}), '(requirements.primary)\n', (7033, 7055), False, 'from random import choice, randint, random, sample\n')] |
# (c) 2018-2021, <NAME> @ ETH Zurich
# Computer-assisted Applications in Medicine (CAiM) Group, Prof. <NAME>
import numpy as np
import os
from data_read.imarisfiles import ImarisFiles
from skimage.transform import resize
from utils.im_processing import interp3
from data_read.spots import Spots
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Patch:
def __init__(self, filename, out_psize=None, overlap_vox=None, padsize_vox=None, patchsize_vox=None,
lChannels=None, lChannelsGT=None, padtype='constant', z_minstep=10):
self.filename = filename
def checkInput(pin, fillz=1):
pout = pin
try:
vaux = pout[0]
except TypeError:
pout = np.repeat(pout, 3)
if len(pout) == 1:
pout = np.repeat(pout, 3)
elif len(pout) == 2:
pout = np.array([fillz, pin[0], pin[1]])
else:
pout = np.array(pin)
return pout
# in_parameters
self.filetype = os.path.splitext(filename)[1]
self.patchsize_vox = checkInput(patchsize_vox)
if self.filetype == '.ims':
self.imfile = ImarisFiles(self.filename)
self.in_imsize = np.flip(self.imfile.imsize, 0)
self.in_psize = np.flip(self.imfile.voxelSize, 0)
if len(self.patchsize_vox) == 2:
self.in_psize = self.in_psize[1::]
else:
raise Exception('File extension not recognized')
if not out_psize:
out_psize = self.in_psize
self.padtype = padtype
self.lChannels = lChannels
self.lChannelsGT = lChannelsGT
self.out_psize = checkInput(out_psize)
self.overlap_vox = checkInput(overlap_vox, 0)
self.overlap_in = np.round(self.overlap_vox * self.out_psize / self.in_psize).astype(np.uint16)
self.z_minstep = z_minstep
if padsize_vox is None: # If overlap has been calculated to correctly stitch the sample, padsize should be the same
self.padsize_vox = (np.round(self.overlap_vox / 2)).astype(np.uint16)
else:
self.padsize_vox = checkInput(padsize_vox)
self.padsize_in = np.round(self.padsize_vox * self.out_psize / self.in_psize).astype(np.uint16)
self.patchsize_in = np.round(self.patchsize_vox * self.out_psize / self.in_psize).astype(np.uint16)
self.patchsize_in = [1 if x == 1 else self.patchsize_in[c] for c, x in
enumerate(self.patchsize_vox)] # to avoid problems in 2D
# Corrections for z
self.patchsize_in[0] = max(self.patchsize_in[0], 1)
if self.overlap_in[0] == self.patchsize_in[0]:
Warning("Overlap equals patchsize in z. Decreasing overlap... check if the result makes sense!")
self.overlap_in[0] -= 1
schannels = set([x.lower() for x in self.lChannelsGT]) if not (self.lChannelsGT is None) else set()
self.npatches = np.ceil(self.in_imsize / (self.patchsize_in - self.padsize_in * 2)).astype(np.uint16)
self.patchsize_crop = self.patchsize_vox - self.padsize_vox * 2
def getAnnotated_Ind(self):
zInd = np.empty(shape=[self.in_imsize[0], len(self.lChannelsGT)])
for nc, channel in enumerate(self.lChannelsGT):
vol = self.imfile.getVolume((channel,))
zInd[:, nc] = vol[..., 0].sum(1).sum(1) > 0
if (zInd == 1).all(): # all slices are annotated
fInd = np.arange(self.z_minstep, self.in_imsize[0], self.z_minstep)
else:
fInd = np.where(zInd.all(1))[0]
return fInd
def getPatchIndexes(self, zInd=None, do_zInd=True):
# vIndex = [lbx, ubx, lby, uby, lbz, ubz]
if do_zInd:
zInd = self.getAnnotated_Ind()
def calc1Dind(ldim, ndim):
lInd = []
if ndim == 0 and zInd is not None:
for zz in zInd:
if self.patchsize_in[ndim] % 2:
lb = zz - self.patchsize_in[ndim] // 2
ub = zz + self.patchsize_in[ndim] // 2 + 1
else:
lb = zz - self.patchsize_in[ndim] // 2
ub = zz + self.patchsize_in[ndim] // 2
lInd.append([lb, ub])
else:
blim = False
maxub = ldim + self.padsize_in[ndim]
minub = -1 * self.padsize_in[ndim]
lb = minub
ub = lb + self.patchsize_in[ndim]
lInd.append([lb, ub])
if ub >= maxub:
blim = True
while not blim:
lb = ub - self.overlap_in[ndim]
ub = lb + self.patchsize_in[ndim]
if ub == ldim:
blim = True
elif ub > maxub:
ub = maxub
lb = maxub - self.patchsize_in[ndim]
blim = True
lInd.append([lb, ub])
return lInd
ind_aux = []
for ndim, ldim in enumerate(self.in_imsize):
ind_aux.append(calc1Dind(ldim, ndim))
vIndex = []
for lb1, ub1 in ind_aux[0]:
for lb2, ub2 in ind_aux[1]:
for lb3, ub3 in ind_aux[2]:
vIndex.append([lb1, ub1, lb2, ub2, lb3, ub3])
return vIndex
def getPatch_spots(self, channel=None, ind=None, spot_rad=None):
channel = self.lChannelsGT if channel is None else channel
cspot = self.imfile.getSpotsObj(channel, spot_rad=spot_rad)
cspot.set_newlimits_vox(ind)
return cspot.X_um0
def transform_spotframe_um0(self, X0, ind):
if len(X0) == 0:
return X0
else:
ind_aux = [max(x, 0) for x in [ind[0], ind[2], ind[4]]]
Xf_aux = X0 * self.out_psize + ind_aux * self.in_psize
Xf = Xf_aux[:, ::-1]
return Xf
def spot_gtmatch(self, X0, channel=None, ind=None, spot_rad=None):
Xgt_um = self.getPatch_spots(channel=channel, ind=ind, spot_rad=spot_rad)
Xf_um = self.transform_spotframe_um0(X0, ind)
metrics = Spots.gt_match_um(Xgt=Xgt_um, Xpred=Xf_um, rmatch=spot_rad)
return metrics
def transform_spotframe_um(self, X0, ind=None):
Xf_um0 = self.transform_spotframe_um0(X0, ind)
Xf_um = Xf_um0 + self.imfile.imExtends[0]
return Xf_um
def dataset_norm(self, X):
Xout = X.copy().astype(np.float64)
# if (pconfig['dataset_type'] in ['synthesis']) and (Xout.shape[-1] > len(self.lChannels)):
# lChannels = self.lChannels + self.lChannelsGT
# else:
lChannels = self.lChannels
for cnum, cname in enumerate(lChannels):
Xout[..., cnum] = (Xout[..., cnum] - self.imfile.get_stat(cname, 'mean')) / self.imfile.get_stat(cname,
'std')
return Xout
def patch_resize(self, lPatch_orig, dim_mode=3):
lPatch = np.empty(shape=list(self.patchsize_vox) + [lPatch_orig.shape[-1]], dtype=lPatch_orig.dtype)
if dim_mode == 3:
for nc in range(lPatch_orig.shape[-1]):
lPatch[..., nc] = interp3(lPatch_orig[..., nc],
[self.patchsize_vox[0], self.patchsize_vox[1], self.patchsize_vox[2]],
interp_method='linear')
elif dim_mode == 2:
for nc in range(lPatch_orig.shape[-1]):
for zz in range(lPatch_orig.shape[0]):
lPatch[zz, ..., nc] = resize(lPatch_orig[zz, ..., nc],
[self.patchsize_vox[1], self.patchsize_vox[2]], order=1,
mode='reflect', preserve_range=True)
return lPatch
def padded_inds(self):
aInds = self.aInd
# Equivalent size of the cropped patch in original resolution
size_orig = np.round(self.patchsize_crop * self.out_psize / self.in_psize).astype(np.uint16)
size_orig = [1 if x == 1 else size_orig[c] for c, x in enumerate(self.patchsize_vox)] # to avoid problems in 2D
# round to closest even
# size_orig = np.array([np.round(x / 2.) * 2 for x in size_orig], dtype=np.uint16)
# size_orig = size_orig if len(self.patchsize_vox) > 2 else np.array([1] + list(size_orig), dtype=np.uint16)
pad_aux = np.array(self.patchsize_in) - size_orig
pad_orig = pad_aux // 2
# We compensate the left border in uneven compensation of pad
pad_orig0 = [x // 2 + 1 if x % 2 else x // 2 for x in pad_aux]
if self.patchsize_vox[0] == 1:
pad_orig[0] = 0
pad_orig0[0] = 0
aInds_pad = [None] * len(aInds)
for c, ind in enumerate(aInds):
v_aux = np.empty(shape=len(pad_orig0 * 2), dtype=np.int32)
v_aux[0::2] = np.array(ind)[0::2] + pad_orig0
v_aux[1::2] = np.array(ind)[1::2] - pad_orig
if min(v_aux) < 0:
logger.warning("Indices smaller than 0 for padded indices in sample {}, patch {}".format(
self.filename, str(c)))
aInds_pad[c] = v_aux
return aInds_pad
def ind2pos(self, ind):
# patchsize_eff = self.patchsize_in - self.padsize_in * 2
overlap_aux = np.array([x + 1 if (x > 0) else x for x in self.overlap_in])
patchsize_eff = self.patchsize_in - overlap_aux
ndims = int(len(ind) / 2)
pos = np.empty(shape=ndims, dtype=np.uint16)
for dim in range(ndims):
dind = ind[dim * 2:dim * 2 + 2]
sind = dind[0] + self.padsize_in[dim]
# pos_aux = max((sind - 1), 0) / patchsize_eff[dim]
if patchsize_eff[dim] == 1:
pos_aux = sind
else:
pos_aux = sind / (patchsize_eff[dim] + 1)
assert (dind[1] >= self.in_imsize[dim] or (pos_aux % 1) == 0)
pos[dim] = pos_aux
return pos
def h5resize(self, ngroup, new_size=None):
if new_size is None:
pass # todo: take the one from the original image
pass
def create_spots(self, X, spotrad_um=None):
return Spots(X_um_rw=X, imExtends=self.imfile.imExtends, voxelSize=self.imfile.voxelSize,
imsize=self.imfile.imsize, radius_um=spotrad_um)
| [
"data_read.imarisfiles.ImarisFiles",
"numpy.flip",
"numpy.ceil",
"data_read.spots.Spots",
"numpy.empty",
"data_read.spots.Spots.gt_match_um",
"utils.im_processing.interp3",
"numpy.array",
"os.path.splitext",
"numpy.arange",
"skimage.transform.resize",
"numpy.round",
"logging.getLogger",
"n... | [((326, 353), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (343, 353), False, 'import logging\n'), ((6300, 6359), 'data_read.spots.Spots.gt_match_um', 'Spots.gt_match_um', ([], {'Xgt': 'Xgt_um', 'Xpred': 'Xf_um', 'rmatch': 'spot_rad'}), '(Xgt=Xgt_um, Xpred=Xf_um, rmatch=spot_rad)\n', (6317, 6359), False, 'from data_read.spots import Spots\n'), ((9594, 9654), 'numpy.array', 'np.array', (['[(x + 1 if x > 0 else x) for x in self.overlap_in]'], {}), '([(x + 1 if x > 0 else x) for x in self.overlap_in])\n', (9602, 9654), True, 'import numpy as np\n'), ((9760, 9798), 'numpy.empty', 'np.empty', ([], {'shape': 'ndims', 'dtype': 'np.uint16'}), '(shape=ndims, dtype=np.uint16)\n', (9768, 9798), True, 'import numpy as np\n'), ((10478, 10614), 'data_read.spots.Spots', 'Spots', ([], {'X_um_rw': 'X', 'imExtends': 'self.imfile.imExtends', 'voxelSize': 'self.imfile.voxelSize', 'imsize': 'self.imfile.imsize', 'radius_um': 'spotrad_um'}), '(X_um_rw=X, imExtends=self.imfile.imExtends, voxelSize=self.imfile.\n voxelSize, imsize=self.imfile.imsize, radius_um=spotrad_um)\n', (10483, 10614), False, 'from data_read.spots import Spots\n'), ((1097, 1123), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1113, 1123), False, 'import os\n'), ((1244, 1270), 'data_read.imarisfiles.ImarisFiles', 'ImarisFiles', (['self.filename'], {}), '(self.filename)\n', (1255, 1270), False, 'from data_read.imarisfiles import ImarisFiles\n'), ((1300, 1330), 'numpy.flip', 'np.flip', (['self.imfile.imsize', '(0)'], {}), '(self.imfile.imsize, 0)\n', (1307, 1330), True, 'import numpy as np\n'), ((1359, 1392), 'numpy.flip', 'np.flip', (['self.imfile.voxelSize', '(0)'], {}), '(self.imfile.voxelSize, 0)\n', (1366, 1392), True, 'import numpy as np\n'), ((3556, 3616), 'numpy.arange', 'np.arange', (['self.z_minstep', 'self.in_imsize[0]', 'self.z_minstep'], {}), '(self.z_minstep, self.in_imsize[0], self.z_minstep)\n', (3565, 3616), True, 'import numpy as np\n'), ((8663, 8690), 'numpy.array', 'np.array', (['self.patchsize_in'], {}), '(self.patchsize_in)\n', (8671, 8690), True, 'import numpy as np\n'), ((860, 878), 'numpy.repeat', 'np.repeat', (['pout', '(3)'], {}), '(pout, 3)\n', (869, 878), True, 'import numpy as np\n'), ((1861, 1920), 'numpy.round', 'np.round', (['(self.overlap_vox * self.out_psize / self.in_psize)'], {}), '(self.overlap_vox * self.out_psize / self.in_psize)\n', (1869, 1920), True, 'import numpy as np\n'), ((2276, 2335), 'numpy.round', 'np.round', (['(self.padsize_vox * self.out_psize / self.in_psize)'], {}), '(self.padsize_vox * self.out_psize / self.in_psize)\n', (2284, 2335), True, 'import numpy as np\n'), ((2382, 2443), 'numpy.round', 'np.round', (['(self.patchsize_vox * self.out_psize / self.in_psize)'], {}), '(self.patchsize_vox * self.out_psize / self.in_psize)\n', (2390, 2443), True, 'import numpy as np\n'), ((3050, 3117), 'numpy.ceil', 'np.ceil', (['(self.in_imsize / (self.patchsize_in - self.padsize_in * 2))'], {}), '(self.in_imsize / (self.patchsize_in - self.padsize_in * 2))\n', (3057, 3117), True, 'import numpy as np\n'), ((7424, 7552), 'utils.im_processing.interp3', 'interp3', (['lPatch_orig[..., nc]', '[self.patchsize_vox[0], self.patchsize_vox[1], self.patchsize_vox[2]]'], {'interp_method': '"""linear"""'}), "(lPatch_orig[..., nc], [self.patchsize_vox[0], self.patchsize_vox[1],\n self.patchsize_vox[2]], interp_method='linear')\n", (7431, 7552), False, 'from utils.im_processing import interp3\n'), ((8202, 8264), 'numpy.round', 'np.round', (['(self.patchsize_crop * self.out_psize / self.in_psize)'], {}), '(self.patchsize_crop * self.out_psize / self.in_psize)\n', (8210, 8264), True, 'import numpy as np\n'), ((787, 805), 'numpy.repeat', 'np.repeat', (['pout', '(3)'], {}), '(pout, 3)\n', (796, 805), True, 'import numpy as np\n'), ((935, 968), 'numpy.array', 'np.array', (['[fillz, pin[0], pin[1]]'], {}), '([fillz, pin[0], pin[1]])\n', (943, 968), True, 'import numpy as np\n'), ((1010, 1023), 'numpy.array', 'np.array', (['pin'], {}), '(pin)\n', (1018, 1023), True, 'import numpy as np\n'), ((2131, 2161), 'numpy.round', 'np.round', (['(self.overlap_vox / 2)'], {}), '(self.overlap_vox / 2)\n', (2139, 2161), True, 'import numpy as np\n'), ((9149, 9162), 'numpy.array', 'np.array', (['ind'], {}), '(ind)\n', (9157, 9162), True, 'import numpy as np\n'), ((9207, 9220), 'numpy.array', 'np.array', (['ind'], {}), '(ind)\n', (9215, 9220), True, 'import numpy as np\n'), ((7810, 7941), 'skimage.transform.resize', 'resize', (['lPatch_orig[zz, ..., nc]', '[self.patchsize_vox[1], self.patchsize_vox[2]]'], {'order': '(1)', 'mode': '"""reflect"""', 'preserve_range': '(True)'}), "(lPatch_orig[zz, ..., nc], [self.patchsize_vox[1], self.patchsize_vox\n [2]], order=1, mode='reflect', preserve_range=True)\n", (7816, 7941), False, 'from skimage.transform import resize\n')] |
# -*- coding: utf-8 -*-
# @author: songjie
# @email: <EMAIL>
# @date: 2021/03/24
# SJ编程规范
# 命名:
# 1. 见名思意,变量的名字必须准确反映它的含义和内容
# 2. 遵循当前语言的变量命名规则
# 3. 不要对不同使用目的的变量使用同一个变量名
# 4. 同个项目不要使用不同名称表述同个东西
# 5. 函数/方法 使用动词+名词组合,其它使用名词组合
# 设计原则:
# 1. KISS原则: Keep it simple and stupid !
# 2. SOLID原则: S: 单一职责 O: 开闭原则 L: 迪米特法则 I: 接口隔离原则 D: 依赖倒置原则
#
import calendar
from itertools import product
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
def build_shop_features(shops_df: pd.DataFrame):
"""
构建商店特征
:param shops_df:
:return:
"""
shops_df['city'] = shops_df['shop_name'].apply(lambda x: x.split()[0].lower())
shops_df.loc[shops_df.city == '!якутск', 'city'] = 'якутск'
shops_df['city_code'] = LabelEncoder().fit_transform(shops_df['city'])
coords = dict()
coords['якутск'] = (62.028098, 129.732555, 4)
coords['адыгея'] = (44.609764, 40.100516, 3)
coords['балашиха'] = (55.8094500, 37.9580600, 1)
coords['волжский'] = (53.4305800, 50.1190000, 3)
coords['вологда'] = (59.2239000, 39.8839800, 2)
coords['воронеж'] = (51.6720400, 39.1843000, 3)
coords['выездная'] = (0, 0, 0)
coords['жуковский'] = (55.5952800, 38.1202800, 1)
coords['интернет-магазин'] = (0, 0, 0)
coords['казань'] = (55.7887400, 49.1221400, 4)
coords['калуга'] = (54.5293000, 36.2754200, 4)
coords['коломна'] = (55.0794400, 38.7783300, 4)
coords['красноярск'] = (56.0183900, 92.8671700, 4)
coords['курск'] = (51.7373300, 36.1873500, 3)
coords['москва'] = (55.7522200, 37.6155600, 1)
coords['мытищи'] = (55.9116300, 37.7307600, 1)
coords['н.новгород'] = (56.3286700, 44.0020500, 4)
coords['новосибирск'] = (55.0415000, 82.9346000, 4)
coords['омск'] = (54.9924400, 73.3685900, 4)
coords['ростовнадону'] = (47.2313500, 39.7232800, 3)
coords['спб'] = (59.9386300, 30.3141300, 2)
coords['самара'] = (53.2000700, 50.1500000, 4)
coords['сергиев'] = (56.3000000, 38.1333300, 4)
coords['сургут'] = (61.2500000, 73.4166700, 4)
coords['томск'] = (56.4977100, 84.9743700, 4)
coords['тюмень'] = (57.1522200, 65.5272200, 4)
coords['уфа'] = (54.7430600, 55.9677900, 4)
coords['химки'] = (55.8970400, 37.4296900, 1)
coords['цифровой'] = (0, 0, 0)
coords['чехов'] = (55.1477000, 37.4772800, 4)
coords['ярославль'] = (57.6298700, 39.8736800, 2)
shops_df['city_coord_1'] = shops_df['city'].apply(lambda x: coords[x][0])
shops_df['city_coord_2'] = shops_df['city'].apply(lambda x: coords[x][1])
shops_df['country_part'] = shops_df['city'].apply(lambda x: coords[x][2])
shops_df = shops_df[['shop_id', 'city_code', 'city_coord_1', 'city_coord_2', 'country_part']]
return shops_df
def build_item_features(items_df: pd.DataFrame, item_cats_df: pd.DataFrame):
"""
构建商品特征
:param items_df:
:param item_cats_df:
:return:
"""
map_dict = {
'Чистые носители (штучные)': 'Чистые носители',
'Чистые носители (шпиль)': 'Чистые носители',
'PC ': 'Аксессуары',
'Служебные': 'Служебные '
}
items_df = pd.merge(items_df, item_cats_df, on='item_category_id')
items_df['item_category'] = items_df['item_category_name'].apply(lambda x: x.split('-')[0])
items_df['item_category'] = items_df['item_category'].apply(lambda x: map_dict[x] if x in map_dict.keys() else x)
items_df['item_category_common'] = LabelEncoder().fit_transform(items_df['item_category'])
items_df['item_category_code'] = LabelEncoder().fit_transform(items_df['item_category_name'])
items_df = items_df[['item_id', 'item_category_common', 'item_category_code']]
return items_df
def count_days(date_block_num):
"""
返回当前日的特征, 下面逻辑有问题 根据实际修改
:param date_block_num:
:return:
"""
year = 2013 + date_block_num // 12
month = 1 + date_block_num % 12
weeknd_count = len([1 for i in calendar.monthcalendar(year, month) if i[6] != 0])
days_in_month = calendar.monthrange(year, month)[1]
return weeknd_count, days_in_month, month
def build_feature_matrix(train_df: pd.DataFrame):
index_cols = ['shop_id', 'item_id', 'date_block_num']
feature_matrix_df = []
for block_num in train_df['date_block_num'].unique():
cur_shops = train_df.loc[train_df['date_block_num'] == block_num, 'shop_id'].unique()
cur_items = train_df.loc[train_df['date_block_num'] == block_num, 'item_id'].unique()
feature_matrix_df.append(np.array(list(product(*[cur_shops, cur_items, [block_num]])), dtype='int32'))
feature_matrix_df = pd.DataFrame(np.vstack(feature_matrix_df), columns=index_cols, dtype=np.int32)
# Add month sales
group = train_df.groupby(['date_block_num', 'shop_id', 'item_id']).agg({'item_cnt_day': ['sum']})
group.columns = ['item_cnt_month']
group.reset_index(inplace=True)
feature_matrix_df = pd.merge(feature_matrix_df, group, on=index_cols, how='left')
feature_matrix_df['item_cnt_month'] = (feature_matrix_df['item_cnt_month']
.fillna(0)
.clip(0, 20)
.astype(np.float16))
return feature_matrix_df
def build_date_features(feature_matrix_df: pd.DataFrame):
"""
构建销售日期特征
:param feature_matrix_df:
:return:
"""
map_dict = {i: count_days(i) for i in range(35)}
feature_matrix_df['weeknd_count'] = feature_matrix_df['date_block_num'].apply(lambda x: map_dict[x][0])
feature_matrix_df['days_in_month'] = feature_matrix_df['date_block_num'].apply(lambda x: map_dict[x][1])
return feature_matrix_df
def build_interaction_features(feature_matrix_df: pd.DataFrame):
"""
构建商品门店间相互作用特征
:param feature_matrix_df:
:return:
"""
first_item_block_df = feature_matrix_df.groupby(['item_id'])['date_block_num'].min().reset_index()
first_item_block_df['item_first_interaction'] = 1
first_shop_item_buy_block_df = feature_matrix_df[feature_matrix_df['date_block_num'] > 0].groupby(['shop_id', 'item_id'])['date_block_num'].min().reset_index()
first_shop_item_buy_block_df['first_date_block_num'] = first_shop_item_buy_block_df['date_block_num']
feature_matrix_df = pd.merge(feature_matrix_df, first_item_block_df[['item_id', 'date_block_num', 'item_first_interaction']], on=['item_id', 'date_block_num'], how='left')
feature_matrix_df = pd.merge(feature_matrix_df, first_shop_item_buy_block_df[['item_id', 'shop_id', 'first_date_block_num']], on=['item_id', 'shop_id'], how='left')
feature_matrix_df['first_date_block_num'].fillna(100, inplace=True)
feature_matrix_df['shop_item_sold_before'] = (feature_matrix_df['first_date_block_num'] < feature_matrix_df['date_block_num']).astype('int8')
feature_matrix_df.drop(['first_date_block_num'], axis=1, inplace=True)
feature_matrix_df['item_first_interaction'].fillna(0, inplace=True)
feature_matrix_df['shop_item_sold_before'].fillna(0, inplace=True)
feature_matrix_df['item_first_interaction'] = feature_matrix_df['item_first_interaction'].astype('int8')
feature_matrix_df['shop_item_sold_before'] = feature_matrix_df['shop_item_sold_before'].astype('int8')
return feature_matrix_df, first_item_block_df, first_shop_item_buy_block_df
def build_lag_features(feature_matrix_df: pd.DataFrame, lags, col):
"""
构建滞后特征
:param feature_matrix_df:
:param lags:
:param col:
:return:
"""
tmp = feature_matrix_df[['date_block_num', 'shop_id', 'item_id', col]]
for i in lags:
shifted = tmp.copy()
shifted.columns = ['date_block_num', 'shop_id', 'item_id', col + '_lag_' + str(i)]
shifted['date_block_num'] += i
df = pd.merge(feature_matrix_df, shifted, on=['date_block_num', 'shop_id', 'item_id'], how='left')
df[col + '_lag_' + str(i)] = df[col + '_lag_' + str(i)].astype('float16')
return feature_matrix_df
AVG_ITEM_PRICE_COLS = ['item_id', 'date_block_num']
AVG_SHOP_ITEM_PRICE_COLS = ['shop_id', 'item_id', 'date_block_num']
def build_avg_shop_item_price_features(feature_matrix_df: pd.DataFrame, date_block_item_shop_avg_price_df, date_block_item_avg_price_df):
feature_matrix_df = pd.merge(feature_matrix_df, date_block_item_avg_price_df, on=AVG_ITEM_PRICE_COLS, how='left')
feature_matrix_df = pd.merge(feature_matrix_df, date_block_item_shop_avg_price_df, on=AVG_SHOP_ITEM_PRICE_COLS, how='left')
feature_matrix_df['avg_shop_price'] = (feature_matrix_df['avg_shop_price']
.fillna(0)
.astype(np.float16))
feature_matrix_df['avg_item_price'] = (feature_matrix_df['avg_item_price']
.fillna(0)
.astype(np.float16))
feature_matrix_df['item_shop_price_avg'] = (feature_matrix_df['avg_shop_price'] - feature_matrix_df['avg_item_price']) / feature_matrix_df[
'avg_item_price']
feature_matrix_df['item_shop_price_avg'].fillna(0, inplace=True)
feature_matrix_df = build_lag_features(feature_matrix_df, [1, 2, 3], 'item_shop_price_avg')
feature_matrix_df.drop(['avg_shop_price', 'avg_item_price', 'item_shop_price_avg'], axis=1, inplace=True)
return feature_matrix_df
def build_target_enc_features(feature_matrix_df: pd.DataFrame):
item_id_target_mean = feature_matrix_df.groupby(['date_block_num', 'item_id'])['item_cnt_month'].mean().reset_index().rename(columns={"item_cnt_month": "item_target_enc"},
errors="raise")
feature_matrix_df = pd.merge(feature_matrix_df, item_id_target_mean, on=['date_block_num', 'item_id'], how='left')
feature_matrix_df['item_target_enc'] = (feature_matrix_df['item_target_enc']
.fillna(0)
.astype(np.float16))
feature_matrix_df = build_lag_features(feature_matrix_df, [1, 2, 3], 'item_target_enc')
feature_matrix_df.drop(['item_target_enc'], axis=1, inplace=True)
# Add target encoding for item/city for last 3 months
item_id_target_mean = feature_matrix_df.groupby(['date_block_num', 'item_id', 'city_code'])['item_cnt_month'].mean().reset_index().rename(columns={
"item_cnt_month": "item_loc_target_enc"}, errors="raise")
feature_matrix_df = pd.merge(feature_matrix_df, item_id_target_mean, on=['date_block_num', 'item_id', 'city_code'], how='left')
feature_matrix_df['item_loc_target_enc'] = (feature_matrix_df['item_loc_target_enc']
.fillna(0)
.astype(np.float16))
feature_matrix_df = build_lag_features(feature_matrix_df, [1, 2, 3], 'item_loc_target_enc')
feature_matrix_df.drop(['item_loc_target_enc'], axis=1, inplace=True)
# Add target encoding for item/shop for last 3 months
item_id_target_mean = feature_matrix_df.groupby(['date_block_num', 'item_id', 'shop_id'])['item_cnt_month'].mean().reset_index().rename(columns={
"item_cnt_month": "item_shop_target_enc"}, errors="raise")
feature_matrix_df = pd.merge(feature_matrix_df, item_id_target_mean, on=['date_block_num', 'item_id', 'shop_id'], how='left')
feature_matrix_df['item_shop_target_enc'] = (feature_matrix_df['item_shop_target_enc']
.fillna(0)
.astype(np.float16))
feature_matrix_df = build_lag_features(feature_matrix_df, [1, 2, 3], 'item_shop_target_enc')
feature_matrix_df.drop(['item_shop_target_enc'], axis=1, inplace=True)
return feature_matrix_df
def build_extra_interaction_features(feature_matrix_df: pd.DataFrame):
# For new items add avg category sales for last 3 months
item_id_target_mean = feature_matrix_df[feature_matrix_df['item_first_interaction'] == 1].groupby(['date_block_num', 'item_category_code'])[
'item_cnt_month'].mean().reset_index().rename(columns={
"item_cnt_month": "new_item_cat_avg"}, errors="raise")
feature_matrix_df = pd.merge(feature_matrix_df, item_id_target_mean, on=['date_block_num', 'item_category_code'], how='left')
feature_matrix_df['new_item_cat_avg'] = (feature_matrix_df['new_item_cat_avg']
.fillna(0)
.astype(np.float16))
feature_matrix_df = build_lag_features(feature_matrix_df, [1, 2, 3], 'new_item_cat_avg')
feature_matrix_df.drop(['new_item_cat_avg'], axis=1, inplace=True)
# For new items add avg category sales in a separate store for last 3 months
item_id_target_mean = feature_matrix_df[feature_matrix_df['item_first_interaction'] == 1].groupby(['date_block_num', 'item_category_code', 'shop_id'])[
'item_cnt_month'].mean().reset_index().rename(columns={
"item_cnt_month": "new_item_shop_cat_avg"}, errors="raise")
feature_matrix_df = pd.merge(feature_matrix_df, item_id_target_mean, on=['date_block_num', 'item_category_code', 'shop_id'], how='left')
feature_matrix_df['new_item_shop_cat_avg'] = (feature_matrix_df['new_item_shop_cat_avg']
.fillna(0)
.astype(np.float16))
feature_matrix_df = build_lag_features(feature_matrix_df, [1, 2, 3], 'new_item_shop_cat_avg')
feature_matrix_df.drop(['new_item_shop_cat_avg'], axis=1, inplace=True)
return feature_matrix_df
def lag_feature_adv(feature_matrix_df, lags, col):
tmp = feature_matrix_df[['date_block_num', 'shop_id', 'item_id', col]]
for i in lags:
shifted = tmp.copy()
shifted.columns = ['date_block_num', 'shop_id', 'item_id', col + '_lag_' + str(i) + '_adv']
shifted['date_block_num'] += i
shifted['item_id'] -= 1
feature_matrix_df = pd.merge(feature_matrix_df, shifted, on=['date_block_num', 'shop_id', 'item_id'], how='left')
feature_matrix_df[col + '_lag_' + str(i) + '_adv'] = feature_matrix_df[col + '_lag_' + str(i) + '_adv'].astype('float16')
return feature_matrix_df
| [
"calendar.monthcalendar",
"pandas.merge",
"sklearn.preprocessing.LabelEncoder",
"itertools.product",
"calendar.monthrange",
"numpy.vstack"
] | [((3137, 3192), 'pandas.merge', 'pd.merge', (['items_df', 'item_cats_df'], {'on': '"""item_category_id"""'}), "(items_df, item_cats_df, on='item_category_id')\n", (3145, 3192), True, 'import pandas as pd\n'), ((4910, 4971), 'pandas.merge', 'pd.merge', (['feature_matrix_df', 'group'], {'on': 'index_cols', 'how': '"""left"""'}), "(feature_matrix_df, group, on=index_cols, how='left')\n", (4918, 4971), True, 'import pandas as pd\n'), ((6282, 6441), 'pandas.merge', 'pd.merge', (['feature_matrix_df', "first_item_block_df[['item_id', 'date_block_num', 'item_first_interaction']]"], {'on': "['item_id', 'date_block_num']", 'how': '"""left"""'}), "(feature_matrix_df, first_item_block_df[['item_id',\n 'date_block_num', 'item_first_interaction']], on=['item_id',\n 'date_block_num'], how='left')\n", (6290, 6441), True, 'import pandas as pd\n'), ((6458, 6606), 'pandas.merge', 'pd.merge', (['feature_matrix_df', "first_shop_item_buy_block_df[['item_id', 'shop_id', 'first_date_block_num']]"], {'on': "['item_id', 'shop_id']", 'how': '"""left"""'}), "(feature_matrix_df, first_shop_item_buy_block_df[['item_id',\n 'shop_id', 'first_date_block_num']], on=['item_id', 'shop_id'], how='left')\n", (6466, 6606), True, 'import pandas as pd\n'), ((8269, 8367), 'pandas.merge', 'pd.merge', (['feature_matrix_df', 'date_block_item_avg_price_df'], {'on': 'AVG_ITEM_PRICE_COLS', 'how': '"""left"""'}), "(feature_matrix_df, date_block_item_avg_price_df, on=\n AVG_ITEM_PRICE_COLS, how='left')\n", (8277, 8367), True, 'import pandas as pd\n'), ((8387, 8495), 'pandas.merge', 'pd.merge', (['feature_matrix_df', 'date_block_item_shop_avg_price_df'], {'on': 'AVG_SHOP_ITEM_PRICE_COLS', 'how': '"""left"""'}), "(feature_matrix_df, date_block_item_shop_avg_price_df, on=\n AVG_SHOP_ITEM_PRICE_COLS, how='left')\n", (8395, 8495), True, 'import pandas as pd\n'), ((9770, 9868), 'pandas.merge', 'pd.merge', (['feature_matrix_df', 'item_id_target_mean'], {'on': "['date_block_num', 'item_id']", 'how': '"""left"""'}), "(feature_matrix_df, item_id_target_mean, on=['date_block_num',\n 'item_id'], how='left')\n", (9778, 9868), True, 'import pandas as pd\n'), ((10531, 10642), 'pandas.merge', 'pd.merge', (['feature_matrix_df', 'item_id_target_mean'], {'on': "['date_block_num', 'item_id', 'city_code']", 'how': '"""left"""'}), "(feature_matrix_df, item_id_target_mean, on=['date_block_num',\n 'item_id', 'city_code'], how='left')\n", (10539, 10642), True, 'import pandas as pd\n'), ((11329, 11438), 'pandas.merge', 'pd.merge', (['feature_matrix_df', 'item_id_target_mean'], {'on': "['date_block_num', 'item_id', 'shop_id']", 'how': '"""left"""'}), "(feature_matrix_df, item_id_target_mean, on=['date_block_num',\n 'item_id', 'shop_id'], how='left')\n", (11337, 11438), True, 'import pandas as pd\n'), ((12290, 12399), 'pandas.merge', 'pd.merge', (['feature_matrix_df', 'item_id_target_mean'], {'on': "['date_block_num', 'item_category_code']", 'how': '"""left"""'}), "(feature_matrix_df, item_id_target_mean, on=['date_block_num',\n 'item_category_code'], how='left')\n", (12298, 12399), True, 'import pandas as pd\n'), ((13161, 13281), 'pandas.merge', 'pd.merge', (['feature_matrix_df', 'item_id_target_mean'], {'on': "['date_block_num', 'item_category_code', 'shop_id']", 'how': '"""left"""'}), "(feature_matrix_df, item_id_target_mean, on=['date_block_num',\n 'item_category_code', 'shop_id'], how='left')\n", (13169, 13281), True, 'import pandas as pd\n'), ((4004, 4036), 'calendar.monthrange', 'calendar.monthrange', (['year', 'month'], {}), '(year, month)\n', (4023, 4036), False, 'import calendar\n'), ((4619, 4647), 'numpy.vstack', 'np.vstack', (['feature_matrix_df'], {}), '(feature_matrix_df)\n', (4628, 4647), True, 'import numpy as np\n'), ((7778, 7875), 'pandas.merge', 'pd.merge', (['feature_matrix_df', 'shifted'], {'on': "['date_block_num', 'shop_id', 'item_id']", 'how': '"""left"""'}), "(feature_matrix_df, shifted, on=['date_block_num', 'shop_id',\n 'item_id'], how='left')\n", (7786, 7875), True, 'import pandas as pd\n'), ((14083, 14180), 'pandas.merge', 'pd.merge', (['feature_matrix_df', 'shifted'], {'on': "['date_block_num', 'shop_id', 'item_id']", 'how': '"""left"""'}), "(feature_matrix_df, shifted, on=['date_block_num', 'shop_id',\n 'item_id'], how='left')\n", (14091, 14180), True, 'import pandas as pd\n'), ((780, 794), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (792, 794), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3447, 3461), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (3459, 3461), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3540, 3554), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (3552, 3554), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3933, 3968), 'calendar.monthcalendar', 'calendar.monthcalendar', (['year', 'month'], {}), '(year, month)\n', (3955, 3968), False, 'import calendar\n'), ((4517, 4562), 'itertools.product', 'product', (['*[cur_shops, cur_items, [block_num]]'], {}), '(*[cur_shops, cur_items, [block_num]])\n', (4524, 4562), False, 'from itertools import product\n')] |
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ********************************************** MappingProjection ****************************************************
"""
.. _Mapping_Overview:
Overview
--------
A MappingProjection transmits the `value <OutputState.value>` of an `OutputState` of one `ProcessingMechanism
<ProcessingMechanism>` (its `sender <MappingProjection.sender>`) to the `InputState` of another (its `receiver
<MappingProjection.receiver>`). The default `function <MappingProjection.function>` for a MappingProjection is
`LinearMatrix`, which uses the MappingProjection's `matrix <MappingProjection.matrix>` attribute to transform the
value received from its `sender <MappingProjection.sender>` and provide the result to its `receiver
<MappingProjection.receiver>`.
.. _Mapping_Creation:
Creating a MappingProjection
-----------------------------
A MappingProjection can be created in any of the ways that can be used to create a `Projection <Projection_Creation>`
(see `Projection_Sender` and `Projection_Receiver` for specifying its `sender <MappingProjection.sender>` and
`receiver <MappingProjection.receiver>` attributes, respectively), or simply by `specifying it by its matrix parameter
<Mapping_Matrix_Specification>` wherever a `Projection can be specified <Projection_Specification>`.
MappingProjections are also generated automatically in the following circumstances, using a value for its `matrix
<MappingProjection.matrix>` parameter appropriate to the circumstance:
* by a `Composition`, when two adjacent `Mechanisms <Mechanism>` in its `pathway <Process.pathway>` do not already
have a Projection assigned between them (`AUTO_ASSIGN_MATRIX` is used as the `matrix <MappingProjection.matrix>`
specification, which determines the appropriate matrix by context);
..
* by an `ObjectiveMechanism`, from each `OutputState` listed in its `monitored_output_states
<ObjectiveMechanism.monitored_output_states>` attribute to the corresponding `InputState` of the ObjectiveMechanism
(`AUTO_ASSIGN_MATRIX` is used as the `matrix <MappingProjection.matrix>` specification, which determines the
appropriate matrix by context);
..
* by a `LearningMechanism`, between it and the other components required to implement learning
(see `LearningMechanism_Learning_Configurations` for details);
..
* by a `ControlMechanism <ControlMechanism>`, from the *OUTCOME* `OutputState` of the `ObjectiveMechanism` that `it
creates <ControlMechanism_ObjectiveMechanism>` to its *OUTCOME* `InputState`, and from the `OutputStates
<OutputState>` listed in the ObjectiveMechanism's `monitored_output_states <ObjectiveMechanism.monitored_output_states>`
attribute to the ObjectiveMechanism's `primary InputState <InputState_Primary>` (as described above; an
`IDENTITY_MATRIX` is used for all of these).
.. _Mapping_Matrix_Specification:
*Specifying the Matrix Parameter*
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When a MappingProjection is created automatically, its `matrix <MappingProjection.matrix>` attribute is generally
assigned using `AUTO_ASSIGN_MATRIX`, which determines its size by context: an `IDENTITY_MATRIX` is used if the
`sender <MappingProjection.sender>` and `receiver <MappingProjection.receiver>` are of equal length; otherwise a
`FULL_CONNECTIVITY_MATRIX` (all 1's) is used (see `below <Mapping_Replace_Identity_Matrix>` for special handling of
`IDENTITY_MATRIX`).
When a MappingProjection is created explicitly, the **matrix** argument of its constructor can be used to specify
its `matrix <MappingProjection.matrix>` parameter. This is used by the MappingProjection's `function
<MappingProjection.function>` to transform the input from its `sender <MappingProjection.sender>` into the `value
<MappingProjection.value>` provided to its `receiver <MappingProjection.receiver>`. It can be specified in any of the
following ways:
* **List, array or matrix** -- if it is a list, each item must be a list or 1d np.array of numbers; otherwise,
it must be a 2d np.array or np.matrix. In each case, the outer dimension (outer list items, array axis 0,
or matrix rows) corresponds to the elements of the `sender <MappingProjection.sender>`, and the inner dimension
(inner list items, array axis 1, or matrix columns) corresponds to the weighting of the contribution that a
given `sender <MappingProjection.sender>` makes to the `receiver <MappingProjection.receiver>` (the number of which
must match the length of the receiver's `variable <InputState.variable>`).
.. _Matrix_Keywords:
* **Matrix keyword** -- used to specify a standard type of matrix without having to specify its individual
values, or to allow the type of matrix to be determined by context; any of the `matrix keywords
<Keywords.MatrixKeywords>` can be used.
..
* **Random matrix function** (`random_matrix <Utilities.random_matrix>`) -- a convenience function
that provides more flexibility than `RANDOM_CONNECTIVITY_MATRIX`. It generates a random matrix sized for a
**sender** and **receiver**, with random numbers drawn from a uniform distribution within a specified **range** and
with a specified **offset**.
.. _MappingProjection_Tuple_Specification:
* **Tuple** -- used to specify the `matrix <MappingProjection.matrix>` along with a specification for learning;
The tuple must have two items: the first can be any of the specifications described above; the second must be
a `learning specification <MappingProjection_Learning_Tuple_Specification>`.
.. _MappingProjection_Learning_Specification:
*Specifying Learning*
~~~~~~~~~~~~~~~~~~~~~
A MappingProjection is specified for learning in any of the following ways:
* in the **matrix** argument of the MappingProjection's constructor, using the `tuple format
<MappingProjection_Tuple_Specification>` described above;
..
* specifying the MappingProjection (or its *MATRIX* `ParameterState`) as the `receiver
<LearningProjection.receiver>` of a `LearningProjection`;
..
* specifying the MappingProjection (or its *MATRIX* `ParameterState`) in the **projections** argument of
the constructor for a `LearningSignal <LearningSignal_Specification>`
..
* specifying the MappingProjection (or its *MATRIX* `ParameterState`) in the **learning_signals** argument of
the constructor for a `LearningMechanism <LearningSignal_Specification>`
..
* specifying a MappingProjection in the `pathway <Process.pathway>` for a `Process`,
using the `tuple format <MappingProjection_Learning_Tuple_Specification>` to include a learning specification;
..
* `specifying learning <Process_Learning_Sequence>` for a `Process`, which assigns `LearningProjections
<LearningProjection>` to all of the MappingProjections in the Process' `pathway <Process.pathway>`;
See `LearningMechanism` documentation for an overview of `learning components <LearningMechanism_Overview>` and a
detailed description of `LearningMechanism_Learning_Configurations`; see `MappingProjection_Learning` below for a
description of how learning modifies a MappingProjection.
.. _MappingProjection_Learning_Tuple_Specification:
Specifying Learning in a Tuple
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A tuple can be used to specify learning for a MappingProjection in the **matrix** `argument of its constructor
<Mapping_Matrix_Specification>` or in the `pathway of a Process <Process_Projections>`. In both cases,
the second item of the tuple must be a learning specification, which can be any of the following:
* an existing `LearningProjection`, `LearningSignal`, or a constructor for one -- the specified Component is used, and
defaults are automatically created for the other `learning Components <LearningMechanism_Learning_Configurations>`;
..
* a reference to the LearningProjection or LearningSignal class, or the keyword *LEARNING* or *LEARNING_PROJECTION* --
a default set of `learning Components <LearningMechanism_Learning_Configurations>` is automatically created.
.. _MappingProjection_Deferred_Initialization:
*Deferred Initialization*
~~~~~~~~~~~~~~~~~~~~~~~~~
When a MappingProjection is created, its full initialization is `deferred <Component_Deferred_Init>` until its
`sender <MappingProjection.sender>` and `receiver <MappingProjection.receiver>` have been fully specified. This
allows a MappingProjection to be created before its `sender <MappingProjection.sender>` and/or `receiver
<MappingProjection.receiver>` have been created (e.g., before them in a script), by calling its constructor without
specifying its **sender** or **receiver** arguments. However, for the MappingProjection to be operational,
initialization must be completed by calling its `deferred_init` method. This is not necessary if the MappingProjection
is specified in the `pathway <Process.pathway>` of `Process`, or anywhere else that its `sender
<MappingProjection.sender>` and `receiver <MappingProjection.receiver>` can be determined by context.
.. _Mapping_Structure:
Structure
---------
In addition to its `sender <MappingProjection.sender>`, `receiver <MappingProjection.receiver>`, and `function
<MappingProjection.function>`, a MappingProjection has the following characteristic attributes:
.. _Mapping_Matrix:
* `matrix <MappingProjection.matrix>` parameter - used by the MappingProjection's `function
<MappingProjection.function>` to carry out a matrix transformation of its input, that is then provided to its
`receiver <MappingProjection.receiver>`. It can be specified in a variety of ways, as described `above
<Mapping_Matrix_Specification>` (see `below <Mapping_Replace_Identity_Matrix>` for special handling of
`IDENTITY_MATRIX`).
.. _Mapping_Matrix_Dimensionality
* **Matrix Dimensionality** -- this must match the dimensionality of the MappingProjection's `sender
<MappingProjection.sender>` and `receiver <MappingProjection.receiver>`. For a standard 2d "weight" matrix (i.e.,
one that maps a 1d array from its `sender <MappingProjection.sender>` to a 1d array of its `receiver
<MappingProjection.receiver>`), the dimensionality of the sender is the number of rows and of the receiver
the number of columns. More generally, the sender dimensionality is the number of outer dimensions (i.e.,
starting with axis 0 of numpy array) equal to the number of dimensions of its `sender <MappingProjection.sender>`'s
`value <State_Base.value>`, and the receiver dimensionality is the number of inner dimensions equal to its
`receiver <MappingProjection.receiver>`'s `variable <MappingProjection.variable>` (equal to the dimensionality of
the matrix minus its sender dimensionality).
.. _Mapping_Replace_Identity_Matrix:
|
* **Handling of IDENTITY_MATRIX** -- for efficiency of processing, if `matrix <MappingProjection.matrix>` is
assigned the `IDENTITY_MATRIX`, `function <MappingProjection.function>` is replaced by the `Identity` Function,
which simply copies `variable <MappingProjection.variable>` to `value <MappingProjection.value>` (avoiding any
matrix computations). In this case, since the `Identity` Function does not have a matrix parameter, the `matrix
<MappingProjection.matrix>` attribute of the MappingProjection simply returns the string "IdentityMatrix", and its
matrix ParameterState is not operational. This behavior can be suppressed by setting the `suppress_identity_function
<MappingProjection.suppress_identity_function>` to True, in which case the IDENTITY_MATRIX is treated as any other.
.. _Mapping_Matrix_ParameterState:
* *MATRIX* `ParameterState` - this receives any `LearningProjections <LearningProjection>` that are assigned to the
MappingProjection (see `MappingProjection_Learning_Specification` above), and updates the current value of the
MappingProjection's `matrix <MappingProjection.matrix>` parameter in response to `learning
<LearningMechanism>`. The `function <ParameterState.function>` of a *MATRIX* ParameterState is an
`AccumulatorIntegrator`, which accumulates the weight changes received from the LearningProjections
that project to it (see `MappingProjection_Learning` below). This can be replaced by any function that defines an
*ADDITIVE_PARAM* `modulatory parameter <ModulatorySignal_Modulation>`), and that takes as its input an array or
matrix and returns one of the same size.
.. _Mapping_Weight_Exponent:
* `weight <MappingProjection.weight>` and `exponent <MappingProjection.exponent>` - applied to the `value
<MappingProjection.value>` of the MappingProjection before it is combined with other MappingProjections
to the same `InputState` to determine its `value <InputState.value>` (see description under `Projection
<Projection_Weight_Exponent>` for additional details).
.. note::
The `weight <MappingProjection.weight>` and `exponent <MappingProjection.exponent>` attributes of a
MappingProjection are distinct from those of the `InputState` to which it projects. It is also important
to recognize that, as noted under `Projection <Projection_Weight_Exponent>`, they are not normalized,
and thus contribute to the magnitude of the InputState's `variable <InputState.variable>` and therefore its
relationship to that of other InputStates that may belong to the same Mechanism.
.. _Mapping_Execution:
Execution
---------
A MappingProjection uses its `function <MappingProjection.function>` and `matrix <MappingProjection.matrix>`
parameter to transform its `sender <MappingProjection.sender>` into a form suitable for the `variable
<InputState.variable>` of its `receiver <MappingProjection.receiver>`. A MappingProjection cannot be executed
directly. It is executed when the `InputState` to which it projects (i.e., its `receiver
<MappingProjection.receiver>`) is updated; that occurs when the InputState's owner `Mechanism <Mechanism>` is executed.
When executed, the MappingProjection's *MATRIX* `ParameterState` updates its `matrix <MappingProjection.matrix>`
parameter based on any `LearningProjection(s)` it receives (listed in the ParameterState's `mod_afferents
<ParameterState.mod_afferents>` attribute). This brings into effect any changes that occurred due to `learning
<MappingProjection_Learning>`. Since this does not occur until the Mechanism that receives the MappingProjection
is executed (in accord with :ref:`Lazy Evaluation <LINK>`), any changes due to learning do not take effect, and are not
observable (e.g., through inspection of the `matrix <MappingProjection.matrix>` attribute or the
`value <ParameterState.value>` of its ParameterState) until the next `TRIAL` of execution (see :ref:`Lazy Evaluation`
for an explanation of "lazy" updating).
.. _MappingProjection_Learning:
*Learning*
~~~~~~~~~~
Learning modifies the `matrix <MappingProjection.matrix>` parameter of a MappingProjection, under the influence
of one or more `LearningProjections <LearningProjection>` that project to its *MATRIX* `ParameterState`.
This conforms to the general procedures for modulation used by `ModulatoryProjections <ModulatoryProjection>`
A LearningProjection `modulates <LearningSignal_Modulation>` the `function <ParameterState.function>` of the
*MATRIX* ParameterState, which is responsible for keeping a record of the value of the MappingProjection's matrix,
and providing it to the MappingProjection's `function <MappingProjection.function>` (usually `LinearMatrix`). By
default, the function for the *MATRIX* ParameterState is an `AccumulatorIntegrator`. A LearningProjection
modulates it by assigning the value of its `additive_param <AccumulatorIntegrator.additive_param>` (`increment
<AccumulatorIntegrator.increment>`), which is added to its `previous_value <AccumulatorIntegrator.previous_value>`
attribute each time it is executed. The result is that each time the MappingProjection is executed, and in turn
executes its *MATRIX* ParameterState, the `weight changes <LearningProjection_Structure>` conveyed to the
MappingProjection from any LearningProjection(s) are added to the record of the matrix kept by the *MATRIX*
ParameterState's `AccumulatorIntegrator` function in its `previous_value <AccumulatorIntegrator.previous_value>`
attribute. This is then the value of the matrix used by the MappingProjection's `LinearMatrix` function when it is
executed. It is important to note that the accumulated weight changes received by a MappingProjection from its
LearningProjection(s) are stored by the *MATRIX* ParameterState's function, and not the MappingProjection's `matrix
<MappingProjection.matrix>` parameter itself; the latter stores the original value of the matrix before learning (that
is, its unmodulated value, conforming to the general protocol for `modulation <ModulatorySignal_Modulation>` in
PsyNeuLink). The most recent value of the matrix used by the MappingProjection is stored in the `value
<ParameterState.value>` of its *MATRIX* ParameterState. As noted `above <Mapping_Execution>`, however, this does not
reflect any changes due to learning on the current `TRIAL` of execution; those are assigned to the ParameterState's
`value <ParameterState.value>` when it executes, which does not occur until the `Mechanism <Mechanism>` that receives
the MappingProjection is executed in the next `TRIAL` of execution (see :ref:`Lazy Evaluation <LINK>` for an explanation
of "lazy" updating)
.. _Mapping_Class_Reference:
Class Reference
---------------
"""
import inspect
import numpy as np
import typecheck as tc
import warnings
from psyneulink.core.components.component import parameter_keywords
from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import AccumulatorIntegrator
from psyneulink.core.components.functions.transferfunctions import LinearMatrix, get_matrix, Identity
from psyneulink.core.components.projections.pathway.pathwayprojection import PathwayProjection_Base
from psyneulink.core.components.projections.projection import ProjectionError, Projection_Base, projection_keywords
from psyneulink.core.components.states.outputstate import OutputState
from psyneulink.core.globals.keywords import AUTO_ASSIGN_MATRIX, DEFAULT_MATRIX, FULL_CONNECTIVITY_MATRIX, FUNCTION, FUNCTION_PARAMS, HOLLOW_MATRIX, IDENTITY_MATRIX, INPUT_STATE, LEARNING, LEARNING_PROJECTION, MAPPING_PROJECTION, MATRIX, OUTPUT_STATE, PROCESS_INPUT_STATE, PROJECTION_SENDER, SYSTEM_INPUT_STATE, VALUE
from psyneulink.core.globals.log import ContextFlags
from psyneulink.core.globals.parameters import Parameter
from psyneulink.core.globals.preferences.componentpreferenceset import is_pref_set
from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel
__all__ = [
'MappingError', 'MappingProjection',
]
parameter_keywords.update({MAPPING_PROJECTION})
projection_keywords.update({MAPPING_PROJECTION})
class MappingError(Exception):
def __init__(self, error_value):
self.error_value = error_value
def _mapping_projection_matrix_getter(owning_component=None, execution_id=None):
'''Get matrix parameter for MappingProjection
If MappingProjection is using Identity function (for efficiency), return properly shaped identity function
# IMPLEMENTATION NOTE:
# This is for consistency of interpretation of matrix parameter on MappingProjection;
# It is OK to do this, even though the MappingProjection's function doesn't actually have a matrix parameter
# since, if any attempt is made to modify it by assigning a new one, the MappingProjection's original function
# (stored in _original_function) is restored.
'''
# # MODIFIED 5/24/19 OLD:
# return owning_component.function.parameters.matrix.get(execution_id)
# MODIFIED 5/24/19 NEW [JDC]:
try:
return owning_component.function.parameters.matrix.get(execution_id)
# If MappingProjection uses Identity function, it doesn't have a matrix parameter, so return Identity matrix
except AttributeError:
assert isinstance(owning_component.function, Identity), \
f'PROGRAM ERROR: AttributeError getting {MATRIX} parameter for {MappingProjection.__name__} ' \
f'({owning_component.name}) that is does not use {Identity.__name__}'
# return np.identity(len(owning_component.parameters.variable.get(execution_id)))
return IDENTITY_MATRIX
# MODIFIED 5/24/19 END
# # MODIFIED 5/24/19 OLD:
# def _mapping_projection_matrix_setter(value, owning_component=None, execution_id=None):
# value = np.array(value)
# owning_component.function.parameters.matrix.set(value, execution_id)
# # KDM 11/13/18: not sure that below is correct to do here, probably is better to do this in a "reinitialize" type method
# # but this is needed for Kalanthroff model to work correctly (though untested, it is in Scripts/Models)
# owning_component.parameter_states["matrix"].function.parameters.previous_value.set(value, execution_id)
# return value
# MODIFIED 5/24/19 NEW: [JDC]
def _mapping_projection_matrix_setter(value, owning_component=None, execution_id=None):
'''Assign matrix parameter for MappingProjection
If value is identity matrix and MappingProjection's matrix ParameterState has no modulatory projections then,
for efficiency, assign Identity Function which simply pass the variable of the MappingProjection as its value.
'''
matrix = np.array(value)
current_function = owning_component.parameters.function.get(execution_id)
current_function_variable = current_function.parameters.variable.get(execution_id)
# Determine whether or not to use Identity Function
rows, cols = matrix.shape
_use_identity_function = (rows==cols and (matrix == np.identity(rows)).all() and
len(owning_component._parameter_states[MATRIX].mod_afferents)==0 and
not owning_component.parameters.suppress_identity_function.get(execution_id))
# If it should be used and it is not already, then store current function in _original_function and assign Identity
if _use_identity_function:
if not isinstance(current_function, Identity):
owning_component._original_function = current_function
owning_component.parameters.function.set(Identity(default_variable=current_function_variable), execution_id)
# 5/24/19: THIS SHOULD SIMPLY ASSIGN THE IDENTITY_MATRIX STRING TO THE CORRECT EXECUTION_ID CONTEXT:
# owning_component.parameters.matrix.set(IDENTITY_MATRIX, execution_id)
# May be needed for Kalanthroff model to work correctly (though untested, it is in Scripts/Models) see below
# owning_component.parameter_states["matrix"].function.parameters.previous_value.set(matrix, execution_id)
# Don't use Identity Function
else:
# If Identity function is currently in use, restore function to _original_function
if isinstance(current_function, Identity):
owning_component.parameters.function.set(owning_component._original_function, execution_id)
owning_component._original_function = None
# Assign matrix
owning_component.function.parameters.matrix.set(matrix, execution_id)
# KDM 11/13/18: not sure that below is correct to do here, probably is better to do this in a "reinitialize" type method
# but this is needed for Kalanthroff model to work correctly (though untested, it is in Scripts/Models)
owning_component.parameter_states["matrix"].function.parameters.previous_value.set(matrix, execution_id)
#
return matrix
# MODIFIED 5/24/19 END
class MappingProjection(PathwayProjection_Base):
"""
MappingProjection( \
sender=None, \
receiver=None, \
matrix=DEFAULT_MATRIX, \
weight=None, \
exponent=None, \
function=LinearMatrix, \
suppress_identity_function=False, \
params=None, \
name=None, \
prefs=None)
Implements a Projection that transmits the output of one Mechanism to the input of another.
COMMENT:
Description:
The MappingProjection class is a type in the Projection category of Component.
It implements a Projection that takes the value of an OutputState of one Mechanism, transforms it as
necessary, and provides it to the inputState of another ProcessingMechanism.
It's function conveys (and possibly transforms) the OutputState.value of a sender
to the InputState.value of a receiver.
IMPLEMENTATION NOTE:
AUGMENT SO THAT SENDER CAN BE A Mechanism WITH MULTIPLE OUTPUT STATES, IN WHICH CASE:
RECEIVER MUST EITHER BE A MECHANISM WITH SAME NUMBER OF INPUT STATES AS SENDER HAS OUTPUTSTATES
(FOR WHICH SENDER OUTPUTSTATE IS MAPPED TO THE CORRESPONDING RECEIVER INPUT STATE
USING THE SAME MAPPING_PROJECTION MATRIX, OR AN ARRAY OF THEM)
OR BOTH MUST BE 1D ARRAYS (I.E., SINGLE VECTOR)
SHOULD BE CHECKED IN OVERRIDE OF _validate_variable
THEN HANDLED IN _instantiate_sender and _instantiate_receiver
Class attributes:
+ className = MAPPING_PROJECTION
+ componentType = PROJECTION
+ paramClassDefaults (dict)
paramClassDefaults.update({
FUNCTION:LinearMatrix,
FUNCTION_PARAMS: {
# LinearMatrix.kwReceiver: receiver.value,
LinearMatrix.MATRIX: LinearMatrix.DEFAULT_MATRIX},
PROJECTION_SENDER: INPUT_STATE, # Assigned to class ref in __init__ module
})
+ classPreference (PreferenceSet): MappingPreferenceSet, instantiated in __init__()
+ classPreferenceLevel (PreferenceLevel): PreferenceLevel.TYPE
Class methods:
function (executes function specified in params[FUNCTION]
COMMENT
Arguments
---------
sender : Optional[OutputState or Mechanism]
specifies the source of the Projection's input. If a `Mechanism <Mechanism>` is specified, its
`primary OutputState <OutputState_Primary>` will be used. If it is not specified, it will be assigned in
the context in which the Projection is used, or its initialization will be `deferred
<MappingProjection_Deferred_Initialization>`.
receiver: Optional[InputState or Mechanism]
specifies the destination of the Projection's output. If a `Mechanism <Mechanism>` is specified, its
`primary InputState <InputState_Primary>` will be used. If it is not specified, it will be assigned in
the context in which the Projection is used, or its initialization will be `deferred
<MappingProjection_Deferred_Initialization>`.
weight : number : default None
specifies the value by which to multiply the MappingProjection's `value <MappingProjection.value>`
before combining it with others (see `weight <MappingProjection.weight>` for additional details).
exponent : number : default None
specifies the value by which to exponentiate the MappingProjection's `value <MappingProjection.value>`
before combining it with others (see `exponent <MappingProjection.exponent>` for additional details).
function : function : default LinearMatrix
specifies function used to transform `variable <MappingProjection.variable>` into `value
<MappingProjection.value>`; must be a `TransferFunction` that takes an input of the same shape as
`variable <MappingProjection.variable>`.
suppress_identity_function : bool : default True
specifies whether `function <MappingProjection.function>` is replaced by the `Identity` Function
for efficiency if `matrix <MappingProjection.matrix>` is the `IDENTITY_MATRIX` (see `Handling of IDENTITY_MATRIX
<Mapping_Replace_Identity_Matrix>` for details).
matrix : list, np.ndarray, np.matrix, function or keyword : default DEFAULT_MATRIX
the matrix used by `function <MappingProjection.function>` (default: `LinearCombination`) to transform the
value of the `sender <MappingProjection.sender>` into a form suitable for the `variable <InputState.variable>`
of its `receiver <MappingProjection.receiver>`.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterState_Specification>` that can be used to specify the parameters for
the Projection, its function, and/or a custom function and its parameters. By default, it contains an entry for
the Projection's default assignment (`LinearCombination`). Values specified for parameters in the dictionary
override any assigned to those parameters in arguments of the constructor.
name : str : default see MappingProjection `name <MappingProjection.name>`
specifies the name of the MappingProjection.
prefs : PreferenceSet or specification dict : default State.classPreferences
specifies the `PreferenceSet` for the MappingProjection; see `prefs <MappingProjection.prefs>` for details.
Attributes
----------
componentType : MAPPING_PROJECTION
variable : ndarray
input to MappingProjection, received from `value <OutputState.varlue>` of `sender <MappingProjection.sender>`.
sender : OutputState
the `OutputState` of the `Mechanism <Mechanism>` that is the source of the Projection's input
receiver: InputState
the `InputState` of the `Mechanism <Mechanism>` that is the destination of the Projection's output.
matrix : 2d np.array
the matrix used by `function <MappingProjection.function>` to transform the input from the MappingProjection's
`sender <MappingProjection.sender>` into the value provided to its `receiver <MappingProjection.receiver>`.
has_learning_projection : bool : None
identifies the `LearningProjection` assigned to the MappingProjection's `MATRIX` `ParameterState
<ParameterState>`.
function : function
determines function used to transform `variable <MappingProjection.variable>` into `value
<MappingProjection.value>`.
suppress_identity_function : bool : default True
determines whether `function <MappingProjection.function>` is replaced by the `Identity` Function
for efficiency if `matrix <MappingProjection.matrix>` is the `IDENTITY_MATRIX` (see `Handling of IDENTITY_MATRIX
<Mapping_Replace_Identity_Matrix>` for details).
learning_mechanism : LearningMechanism
source of the `learning signal <LearningSignal>` that determines the changes to the `matrix
<MappingProjection.matrix>` when `learning <LearningMechanism>` is used.
value : ndarray
output of MappingProjection, sent to `variable <InputState.variable>` of `receiver
<MappingProjection.receiver>`.
weight : number
multiplies `value <MappingProjection.value>` of the MappingProjection after applying `exponent
<MappingProjection.exponent>`, and before combining with any others that project to the same `InputState` to
determine that InputState's `variable <InputState.variable>` (see `description above
<Mapping_Weight_Exponent>` for details).
exponent : number
exponentiates the `value <MappingProjection.value>` of the MappingProjection, before applying `weight
<MappingProjection.weight>`, and before combining it with any others that project to the same
`InputState` to determine that InputState's `variable <InputState.variable>` (see `description above
<Mapping_Weight_Exponent>` for details).
name : str
the name of the MappingProjection. If the specified name is the name of an existing MappingProjection,
it is appended with an indexed suffix, incremented for each MappingProjection with the same base name (see
`Naming`). If the name is not specified in the **name** argument of its constructor, a default name is
assigned using the following format:
'MappingProjection from <sender Mechanism>[<OutputState>] to <receiver Mechanism>[InputState]'
(for example, ``'MappingProjection from my_mech_1[OutputState-0] to my_mech2[InputState-0]'``).
If either the `sender <MappingProjection.sender>` or `receiver <MappingProjection.receiver>` has not yet been
assigned (the MappingProjection is in `deferred initialization <MappingProjection_Deferred_Initialization>`),
then the parenthesized name of class is used in place of the unassigned attribute
(for example, if the `sender <MappingProjection.sender>` has not yet been specified:
``'MappingProjection from (OutputState-0) to my_mech2[InputState-0]'``).
prefs : PreferenceSet or specification dict
the `PreferenceSet` for the MappingProjection; if it is not specified in the **prefs** argument of the
constructor, a default is assigned using `classPreferences` defined in __init__.py (see :doc:`PreferenceSet
<LINK>` for details).
"""
componentType = MAPPING_PROJECTION
className = componentType
suffix = " " + className
class Parameters(PathwayProjection_Base.Parameters):
"""
Attributes
----------
function
see `function <MappingProjection.function>`
:default value: `LinearMatrix`
:type: `Function`
suppress_identity_function
see `function <MappingProjection.suppress_identity_function>`
:default value: False
:type: bool
matrix
see `matrix <MappingProjection.matrix>`
:default value: `AUTO_ASSIGN_MATRIX`
:type: str
"""
# # MODIFIED 5/24/19 OLD:
function = Parameter(LinearMatrix, stateful=False, loggable=False)
# MODIFIED 5/24/19 NEW: [JDC]
# function = Parameter(LinearMatrix, stateful=True, loggable=False)
suppress_identity_function = Parameter(False, stateful=True, loggable=False)
# MODIFIED 5/24/19 END
matrix = Parameter(DEFAULT_MATRIX,
modulable=True,
getter=_mapping_projection_matrix_getter,
setter=_mapping_projection_matrix_setter)
classPreferenceLevel = PreferenceLevel.TYPE
@property
def _loggable_items(self):
# States and afferent Projections are loggable for a Mechanism
# - this allows the value of InputStates and OutputStates to be logged
# - for MappingProjections, this logs the value of the Projection's matrix parameter
# - for ModulatoryProjections, this logs the value of the Projection
# IMPLEMENTATION NOTE: this needs to be a property as that is expected by Log.loggable_items
return list(self.parameter_states)
class sockets:
sender=[OUTPUT_STATE, PROCESS_INPUT_STATE, SYSTEM_INPUT_STATE]
receiver=[INPUT_STATE]
paramClassDefaults = Projection_Base.paramClassDefaults.copy()
paramClassDefaults.update({FUNCTION: LinearMatrix,
PROJECTION_SENDER: OutputState,
})
@tc.typecheck
def __init__(self,
sender=None,
receiver=None,
weight=None,
exponent=None,
matrix=DEFAULT_MATRIX,
function=None,
suppress_identity_function=False,
params=None,
name=None,
prefs:is_pref_set=None):
# Assign args to params and functionParams dicts
# Assign matrix to function_params for use as matrix param of MappingProjection.function
# (7/12/17 CW) this is a PATCH to allow the user to set matrix as an np.matrix... I still don't know why
# it wasn't working.
if isinstance(matrix, (np.matrix, list)):
matrix = np.array(matrix)
params = self._assign_args_to_param_dicts(function_params={MATRIX: matrix},
suppress_identity_function=suppress_identity_function,
params=params)
self.learning_mechanism = None
self.has_learning_projection = None
# If sender or receiver has not been assigned, defer init to State.instantiate_projection_to_state()
if sender is None or receiver is None:
self.context.initialization_status = ContextFlags.DEFERRED_INIT
# Validate sender (as variable) and params, and assign to variable and paramInstanceDefaults
super().__init__(sender=sender,
receiver=receiver,
weight=weight,
exponent=exponent,
function=function,
params=params,
name=name,
prefs=prefs,
context=ContextFlags.CONSTRUCTOR)
def _instantiate_parameter_states(self, function=None, context=None):
super()._instantiate_parameter_states(function=function, context=context)
# FIX: UPDATE FOR LEARNING
# FIX: UPDATE WITH MODULATION_MODS
# FIX: MOVE THIS TO MappingProjection.__init__;
# FIX: AS IT IS, OVER-WRITES USER ASSIGNMENT OF FUNCTION IN params dict FOR MappingProjection
matrix = get_matrix(self._parameter_states[MATRIX].value)
initial_rate = matrix * 0.0
self._parameter_states[MATRIX].function = AccumulatorIntegrator(owner=self._parameter_states[MATRIX],
default_variable=matrix,
initializer=matrix,
# rate=initial_rate
)
# # Assign ParameterState the same Log as the MappingProjection, so that its entries are accessible to Mechanisms
# self._parameter_states[MATRIX].log = self.log
def _instantiate_receiver(self, context=None):
"""Determine matrix needed to map from sender to receiver
Assign specification to self.matrix_spec attribute
Assign matrix to self.matrix attribute
"""
self.reshapedWeightMatrix = False
# Get sender and receiver lengths
# Note: if either is a scalar, manually set length to 1 to avoid TypeError in call to len()
try:
mapping_input_len = len(self.defaults.variable)
except TypeError:
mapping_input_len = 1
try:
receiver_len = self.receiver.socket_width
except TypeError:
receiver_len = 1
# Compare length of MappingProjection output and receiver's variable to be sure matrix has proper dimensions
try:
mapping_output_len = len(self.defaults.value)
except TypeError:
mapping_output_len = 1
# FIX: CONVERT ALL REFS TO paramsCurrent[FUNCTION_PARAMS][MATRIX] TO self.matrix (CHECK THEY'RE THE SAME)
# FIX: CONVERT ALL REFS TO matrix_spec TO self._matrix_spec
# FIX: CREATE @PROPERTY FOR self._learning_spec AND ASSIGN IN INIT??
# FIX: HOW DOES mapping_output_len RELATE TO receiver_len?/
if self._matrix_spec is AUTO_ASSIGN_MATRIX:
if mapping_input_len == receiver_len:
self._matrix_spec = IDENTITY_MATRIX
else:
self._matrix_spec = FULL_CONNECTIVITY_MATRIX
# Length of the output of the Projection doesn't match the length of the receiving input state
# so consider reshaping the matrix
if mapping_output_len != receiver_len:
if 'projection' in self.name or 'Projection' in self.name:
projection_string = ''
else:
projection_string = 'projection'
if all(string in self.name for string in {'from', 'to'}):
states_string = ''
else:
states_string = "from \'{}\' OuputState of \'{}\' to \'{}\'".format(self.sender.name,
self.sender.owner.name,
self.receiver.owner.name)
if not isinstance(self._matrix_spec, str):
# if all(string in self.name for string in {'from', 'to'}):
raise ProjectionError("Width ({}) of the {} of \'{}{}\'{} "
"does not match the length of its \'{}\' InputState ({})".
format(mapping_output_len,
VALUE,
self.name,
projection_string,
states_string,
self.receiver.name,
receiver_len))
elif self._matrix_spec == IDENTITY_MATRIX or self._matrix_spec == HOLLOW_MATRIX:
# Identity matrix is not reshapable
raise ProjectionError("Output length ({}) of \'{}{}\' from {} to Mechanism \'{}\'"
" must equal length of it InputState ({}) to use {}".
format(mapping_output_len,
self.name,
projection_string,
self.sender.name,
self.receiver.owner.name,
receiver_len,
self._matrix_spec))
else:
# Flag that matrix is being reshaped
self.reshapedWeightMatrix = True
if self.prefs.verbosePref:
print("Length ({}) of the output of {}{} does not match the length ({}) "
"of the InputState for the receiver {}; the width of the matrix (number of columns); "
"the width of the matrix (number of columns) will be adjusted to accomodate the receiver".
format(mapping_output_len,
self.name,
projection_string,
receiver_len,
self.receiver.owner.name))
self.matrix = get_matrix(self._matrix_spec, mapping_input_len, receiver_len, context=context)
# Since matrix shape has changed, output of self.function may have changed, so update value
self._instantiate_value(context=context)
super()._instantiate_receiver(context=context)
def _execute(self, variable=None, execution_id=None, runtime_params=None, context=None):
self.parameters.context.get(execution_id).execution_phase = ContextFlags.PROCESSING
self.parameters.context.get(execution_id).string = context
# If function is Identity Function, no need to update ParameterStates, as matrix is not used
if not isinstance(self.function, Identity):
if (hasattr(self.context, "composition") and
hasattr(self.context.composition, "learning_enabled") and
self.context.composition.learning_enabled):
self.parameters.context.get(execution_id).execution_phase = ContextFlags.LEARNING
self._update_parameter_states(execution_id=execution_id, runtime_params=runtime_params, context=context)
self.parameters.context.get(execution_id).execution_phase = ContextFlags.PROCESSING
self._update_parameter_states(execution_id=execution_id, runtime_params=runtime_params, context=context)
value = super()._execute(
variable=variable,
execution_id=execution_id,
runtime_params=runtime_params,
context=context
)
return value
@property
def _matrix_spec(self):
"""Returns matrix specification in self.paramsCurrent[FUNCTION_PARAMS][MATRIX]
Returns matrix param for MappingProjection, getting second item if it is
an unnamed (matrix, projection) tuple
"""
return self._get_param_value_from_tuple(self.paramsCurrent[FUNCTION_PARAMS][MATRIX])
@_matrix_spec.setter
def _matrix_spec(self, value):
"""Assign matrix specification for self.paramsCurrent[FUNCTION_PARAMS][MATRIX]
Assigns matrix param for MappingProjection, assigning second item if it is
a 2-item tuple or unnamed (matrix, projection) tuple
"""
# Specification is a two-item tuple, so validate that 2nd item is:
# *LEARNING* or *LEARNING_PROJECTION* keyword, LearningProjection subclass, or instance of a LearningPojection
from psyneulink.core.components.projections.modulatory.learningprojection import LearningProjection
if (isinstance(self.paramsCurrent[FUNCTION_PARAMS][MATRIX], tuple) and
len(self.paramsCurrent[FUNCTION_PARAMS][MATRIX]) is 2 and
(self.paramsCurrent[FUNCTION_PARAMS][MATRIX][1] in {LEARNING, LEARNING_PROJECTION}
or isinstance(self.paramsCurrent[FUNCTION_PARAMS][MATRIX][1], LearningProjection) or
(inspect.isclass(self.paramsCurrent[FUNCTION_PARAMS][MATRIX][1]) and
issubclass(self.paramsCurrent[FUNCTION_PARAMS][MATRIX][1], LearningProjection)))
):
self.paramsCurrent[FUNCTION_PARAMS].__additem__(MATRIX,
(value, self.paramsCurrent[FUNCTION_PARAMS][MATRIX][1]))
else:
self.paramsCurrent[FUNCTION_PARAMS].__additem__(MATRIX, value)
@property
def logPref(self):
return self.prefs.logPref
# Always assign matrix Parameter state the same logPref as the MappingProjection
@logPref.setter
def logPref(self, setting):
self.prefs.logPref = setting
self.parameter_states[MATRIX].logPref = setting
| [
"psyneulink.core.components.functions.transferfunctions.Identity",
"psyneulink.core.components.component.parameter_keywords.update",
"psyneulink.core.components.functions.statefulfunctions.integratorfunctions.AccumulatorIntegrator",
"inspect.isclass",
"psyneulink.core.components.functions.transferfunctions.... | [((19377, 19424), 'psyneulink.core.components.component.parameter_keywords.update', 'parameter_keywords.update', (['{MAPPING_PROJECTION}'], {}), '({MAPPING_PROJECTION})\n', (19402, 19424), False, 'from psyneulink.core.components.component import parameter_keywords\n'), ((19425, 19473), 'psyneulink.core.components.projections.projection.projection_keywords.update', 'projection_keywords.update', (['{MAPPING_PROJECTION}'], {}), '({MAPPING_PROJECTION})\n', (19451, 19473), False, 'from psyneulink.core.components.projections.projection import ProjectionError, Projection_Base, projection_keywords\n'), ((22021, 22036), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (22029, 22036), True, 'import numpy as np\n'), ((36173, 36214), 'psyneulink.core.components.projections.projection.Projection_Base.paramClassDefaults.copy', 'Projection_Base.paramClassDefaults.copy', ([], {}), '()\n', (36212, 36214), False, 'from psyneulink.core.components.projections.projection import ProjectionError, Projection_Base, projection_keywords\n'), ((34943, 34998), 'psyneulink.core.globals.parameters.Parameter', 'Parameter', (['LinearMatrix'], {'stateful': '(False)', 'loggable': '(False)'}), '(LinearMatrix, stateful=False, loggable=False)\n', (34952, 34998), False, 'from psyneulink.core.globals.parameters import Parameter\n'), ((35150, 35197), 'psyneulink.core.globals.parameters.Parameter', 'Parameter', (['(False)'], {'stateful': '(True)', 'loggable': '(False)'}), '(False, stateful=True, loggable=False)\n', (35159, 35197), False, 'from psyneulink.core.globals.parameters import Parameter\n'), ((35246, 35381), 'psyneulink.core.globals.parameters.Parameter', 'Parameter', (['DEFAULT_MATRIX'], {'modulable': '(True)', 'getter': '_mapping_projection_matrix_getter', 'setter': '_mapping_projection_matrix_setter'}), '(DEFAULT_MATRIX, modulable=True, getter=\n _mapping_projection_matrix_getter, setter=_mapping_projection_matrix_setter\n )\n', (35255, 35381), False, 'from psyneulink.core.globals.parameters import Parameter\n'), ((38612, 38660), 'psyneulink.core.components.functions.transferfunctions.get_matrix', 'get_matrix', (['self._parameter_states[MATRIX].value'], {}), '(self._parameter_states[MATRIX].value)\n', (38622, 38660), False, 'from psyneulink.core.components.functions.transferfunctions import LinearMatrix, get_matrix, Identity\n'), ((38748, 38856), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.AccumulatorIntegrator', 'AccumulatorIntegrator', ([], {'owner': 'self._parameter_states[MATRIX]', 'default_variable': 'matrix', 'initializer': 'matrix'}), '(owner=self._parameter_states[MATRIX],\n default_variable=matrix, initializer=matrix)\n', (38769, 38856), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import AccumulatorIntegrator\n'), ((37124, 37140), 'numpy.array', 'np.array', (['matrix'], {}), '(matrix)\n', (37132, 37140), True, 'import numpy as np\n'), ((22908, 22960), 'psyneulink.core.components.functions.transferfunctions.Identity', 'Identity', ([], {'default_variable': 'current_function_variable'}), '(default_variable=current_function_variable)\n', (22916, 22960), False, 'from psyneulink.core.components.functions.transferfunctions import LinearMatrix, get_matrix, Identity\n'), ((22345, 22362), 'numpy.identity', 'np.identity', (['rows'], {}), '(rows)\n', (22356, 22362), True, 'import numpy as np\n'), ((43961, 44040), 'psyneulink.core.components.functions.transferfunctions.get_matrix', 'get_matrix', (['self._matrix_spec', 'mapping_input_len', 'receiver_len'], {'context': 'context'}), '(self._matrix_spec, mapping_input_len, receiver_len, context=context)\n', (43971, 44040), False, 'from psyneulink.core.components.functions.transferfunctions import LinearMatrix, get_matrix, Identity\n'), ((46884, 46947), 'inspect.isclass', 'inspect.isclass', (['self.paramsCurrent[FUNCTION_PARAMS][MATRIX][1]'], {}), '(self.paramsCurrent[FUNCTION_PARAMS][MATRIX][1])\n', (46899, 46947), False, 'import inspect\n')] |
# encoding = utf-8
import requests
import json
import jieba
from pandas import DataFrame
import pandas as pd
import numpy as np
def getnews(pages):
global newsbag
newsbag = []
for page in range(1, pages + 1):
raw_url = 'http://api.roll.news.sina.com.cn/zt_list?channel=news&cat_1=gnxw&cat_2==gdxw1||=gatxw||=zs-pl||=mtjj&level==1||=2&show_ext=1&show_all=10&show_num=100&tag=1&format=json&page={}&callback=newsloadercallback&_=1487824946231'
url = raw_url.format(page)
res = requests.get(url)
jd = json.loads(res.text.lstrip(' newsloadercallback(').rstrip(');'))
diclist = jd['result']['data']
for ent in diclist:
newsbag.append(ent['title'])
continue
return newsbag
def cutseg():
global seg_list
seg_list = []
title_list = []
for i in newsbag:
title_list = list(jieba.cut(i))
seg_list = seg_list + title_list
return seg_list
print('欢迎使用新浪国内新闻标题分词!')
pages = int(input("你想查询(返回输入值的10倍):"))
getnews(pages)
cutseg()
local_list = []
newslocal_list = []
# 载入地名词典
with open('D:\local.txt', 'r', encoding='utf-8') as reader:
for local in reader.readlines():
local = local.strip('\n')
local_list.append(local)
reader.close()
for i in seg_list:
if i in local_list :
newslocal_list.append(i)
else:
continue
#统计频次并输出
local_set = ()
count_local_list = []
final_count_local = []
local_set = set(newslocal_list)
for local in local_set:
count_local_list.append(newslocal_list.count(local))
final_count_local = list(zip(local_set,count_local_list))
dataNumPy = np.asarray(final_count_local)
DF1 = pd.DataFrame(dataNumPy,columns=['地名','出现次数'])
DF1.to_excel('3000条.xlsx')
print('Done!') | [
"pandas.DataFrame",
"numpy.asarray",
"jieba.cut",
"requests.get"
] | [((1689, 1718), 'numpy.asarray', 'np.asarray', (['final_count_local'], {}), '(final_count_local)\n', (1699, 1718), True, 'import numpy as np\n'), ((1726, 1773), 'pandas.DataFrame', 'pd.DataFrame', (['dataNumPy'], {'columns': "['地名', '出现次数']"}), "(dataNumPy, columns=['地名', '出现次数'])\n", (1738, 1773), True, 'import pandas as pd\n'), ((525, 542), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (537, 542), False, 'import requests\n'), ((901, 913), 'jieba.cut', 'jieba.cut', (['i'], {}), '(i)\n', (910, 913), False, 'import jieba\n')] |
#!/usr/bin/env python
###############################################################################
# - FILE: benchmark_linegraph
# - DESC: Render benchmark scores as a line graph
###############################################################################
import sys
import os
import numpy as np
import cv2 as cv
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
# parse commandline args
pwd = os.getcwd()
out_folder = "/Users/Devreckas/Google-Drive/Wheeler-Labs/Personal_Work/fb-pruner/data-vis/alpha-linegraphs/"
filenames = []
eval = ""
if len(sys.argv) >= 2:
i = 1
while (i < len(sys.argv)):
if sys.argv[i] == "-o":
i += 1
out_folder = sys.argv[i]
else:
filenames.append(sys.argv[i])
i += 1
else:
print("Usage: <results_filename1> <results_filename2> ... -o <out_folder>")
sys.exit(EXIT_SUCCESS)
# read in file
for filename in filenames:
hmm_name = []
fasta_name = []
fa_name = []
viterbi = []
fwd = []
bck = []
cloud_fwd = []
cloud_bck = []
alpha = []
beta = []
perc_cells = []
perc_wind = []
perc_tot = []
q_len = []
t_len = []
line_cnt = 0
with open(filename, "r") as fp:
for line in fp:
line = line.split()
print("line:", line)
name = line[0].split("/")
name = name[len(name)-1]
hmm_name.append(name)
name = line[1].split("/")
name = name[len(name)-1]
fasta_name.append(name)
viterbi.append(float(line[2]))
fwd.append(float(line[3]))
bck.append(float(line[4]))
cloud_fwd.append(float(line[5]))
cloud_bck.append(float(line[6]))
alpha.append(float(line[7]))
beta.append(int(line[8]))
perc_cells.append(np.log10(float(line[9])))
perc_wind.append(np.log10(float(line[10])))
perc_tot.append(np.log10(1))
q_len.append(int(line[11]))
t_len.append(int(line[12]))
line_cnt += 1
print(line_cnt)
# render results
plt.subplot(2, 1, 1)
title = "{} || {} \n QUERY: {}, TARGET: {}".format(
hmm_name[0], fasta_name[0], q_len[0], t_len[0])
plt.title(title)
plt.plot(alpha, viterbi, 'r--', label="viterbi score")
# plt.plot(alpha, alpha, 'b--', label="alpha score")
plt.plot(alpha, fwd, 'b--', label="forward score")
# plt.plot(alpha, fwd, 'b--', label="backward score")
plt.plot(alpha, cloud_fwd, 'g--', label="cloud-fwd score")
# plt.plot(alpha, cloud_bck, 'g--', label="cloud-bck score")
plt.ylabel("score")
plt.xticks(alpha)
plt.subplot(2, 1, 2)
plt.plot(alpha, perc_cells, 'k--', label="percent cells computed")
plt.plot(alpha, perc_tot, 'k-', label="total percent")
plt.ylabel("percent of cells computed")
y_ticks = list(range(0, -4, -1))
y_vals = [pow(10, y) for y in y_ticks]
plt.yticks(y_ticks, y_vals)
plt.xticks(alpha)
fasta_name = fasta_name[0].split(".")
out_file = ""
for i in range(len(fasta_name)-1):
out_file += fasta_name[i]
plt.xlabel('alpha pruning threshold')
out_dest = "{}/{}.jpg".format(out_folder, out_file)
print("saving figure to: '{}'... ".format(out_dest))
plt.savefig(out_dest)
# plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"os.getcwd",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"matplotlib.pyplot.xticks",
"sys.exit",
"matplotlib.pyplot.xlabel"
] | [((427, 438), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (436, 438), False, 'import os\n'), ((886, 908), 'sys.exit', 'sys.exit', (['EXIT_SUCCESS'], {}), '(EXIT_SUCCESS)\n', (894, 908), False, 'import sys\n'), ((2174, 2194), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (2185, 2194), True, 'import matplotlib.pyplot as plt\n'), ((2311, 2327), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2320, 2327), True, 'import matplotlib.pyplot as plt\n'), ((2332, 2386), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha', 'viterbi', '"""r--"""'], {'label': '"""viterbi score"""'}), "(alpha, viterbi, 'r--', label='viterbi score')\n", (2340, 2386), True, 'import matplotlib.pyplot as plt\n'), ((2448, 2498), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha', 'fwd', '"""b--"""'], {'label': '"""forward score"""'}), "(alpha, fwd, 'b--', label='forward score')\n", (2456, 2498), True, 'import matplotlib.pyplot as plt\n'), ((2561, 2619), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha', 'cloud_fwd', '"""g--"""'], {'label': '"""cloud-fwd score"""'}), "(alpha, cloud_fwd, 'g--', label='cloud-fwd score')\n", (2569, 2619), True, 'import matplotlib.pyplot as plt\n'), ((2689, 2708), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""score"""'], {}), "('score')\n", (2699, 2708), True, 'import matplotlib.pyplot as plt\n'), ((2713, 2730), 'matplotlib.pyplot.xticks', 'plt.xticks', (['alpha'], {}), '(alpha)\n', (2723, 2730), True, 'import matplotlib.pyplot as plt\n'), ((2736, 2756), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (2747, 2756), True, 'import matplotlib.pyplot as plt\n'), ((2761, 2827), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha', 'perc_cells', '"""k--"""'], {'label': '"""percent cells computed"""'}), "(alpha, perc_cells, 'k--', label='percent cells computed')\n", (2769, 2827), True, 'import matplotlib.pyplot as plt\n'), ((2832, 2886), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha', 'perc_tot', '"""k-"""'], {'label': '"""total percent"""'}), "(alpha, perc_tot, 'k-', label='total percent')\n", (2840, 2886), True, 'import matplotlib.pyplot as plt\n'), ((2891, 2930), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""percent of cells computed"""'], {}), "('percent of cells computed')\n", (2901, 2930), True, 'import matplotlib.pyplot as plt\n'), ((3015, 3042), 'matplotlib.pyplot.yticks', 'plt.yticks', (['y_ticks', 'y_vals'], {}), '(y_ticks, y_vals)\n', (3025, 3042), True, 'import matplotlib.pyplot as plt\n'), ((3047, 3064), 'matplotlib.pyplot.xticks', 'plt.xticks', (['alpha'], {}), '(alpha)\n', (3057, 3064), True, 'import matplotlib.pyplot as plt\n'), ((3203, 3240), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""alpha pruning threshold"""'], {}), "('alpha pruning threshold')\n", (3213, 3240), True, 'import matplotlib.pyplot as plt\n'), ((3359, 3380), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_dest'], {}), '(out_dest)\n', (3370, 3380), True, 'import matplotlib.pyplot as plt\n'), ((1999, 2010), 'numpy.log10', 'np.log10', (['(1)'], {}), '(1)\n', (2007, 2010), True, 'import numpy as np\n')] |
"""Trajectory interpolation utility module.
Provides two main interpolation methods:
- linear
- minimum jerk
"""
from enum import Enum
from typing import Callable, Optional
import numpy as np
InterpolationFunc = Callable[[float], np.ndarray]
def linear(
starting_position: np.ndarray,
goal_position: np.ndarray,
duration: float,
) -> InterpolationFunc:
"""Compute the linear interpolation function from starting position to goal position."""
def f(t: float) -> np.ndarray:
return starting_position + (goal_position - starting_position) * t / duration
return f
def minimum_jerk(
starting_position: np.ndarray,
goal_position: np.ndarray,
duration: float,
starting_velocity: Optional[np.ndarray] = None,
starting_acceleration: Optional[np.ndarray] = None,
final_velocity: Optional[np.ndarray] = None,
final_acceleration: Optional[np.ndarray] = None,
) -> InterpolationFunc:
"""Compute the mimimum jerk interpolation function from starting position to goal position."""
if starting_velocity is None:
starting_velocity = np.zeros(starting_position.shape)
if starting_acceleration is None:
starting_acceleration = np.zeros(starting_position.shape)
if final_velocity is None:
final_velocity = np.zeros(goal_position.shape)
if final_acceleration is None:
final_acceleration = np.zeros(goal_position.shape)
a0 = starting_position
a1 = starting_velocity
a2 = starting_acceleration / 2
d1, d2, d3, d4, d5 = [duration ** i for i in range(1, 6)]
A = np.array((
(d3, d4, d5),
(3 * d2, 4 * d3, 5 * d4),
(6 * d1, 12 * d2, 20 * d3)
))
B = np.array((
goal_position - a0 - (a1 * d1) - (a2 * d2),
final_velocity - a1 - (2 * a2 * d1),
final_acceleration - (2 * a2)
))
X = np.linalg.solve(A, B)
coeffs = [a0, a1, a2, X[0], X[1], X[2]]
def f(t: float) -> np.ndarray:
return np.sum([
c * t ** i
for i, c in enumerate(coeffs)
], axis=0)
return f
class InterpolationMode(Enum):
"""Inteprolation Mode enumeration."""
LINEAR: Callable[[np.ndarray, np.ndarray, float], InterpolationFunc] = linear
MINIMUM_JERK: Callable[[np.ndarray, np.ndarray, float], InterpolationFunc] = minimum_jerk
| [
"numpy.zeros",
"numpy.linalg.solve",
"numpy.array"
] | [((1581, 1659), 'numpy.array', 'np.array', (['((d3, d4, d5), (3 * d2, 4 * d3, 5 * d4), (6 * d1, 12 * d2, 20 * d3))'], {}), '(((d3, d4, d5), (3 * d2, 4 * d3, 5 * d4), (6 * d1, 12 * d2, 20 * d3)))\n', (1589, 1659), True, 'import numpy as np\n'), ((1698, 1816), 'numpy.array', 'np.array', (['(goal_position - a0 - a1 * d1 - a2 * d2, final_velocity - a1 - 2 * a2 * d1,\n final_acceleration - 2 * a2)'], {}), '((goal_position - a0 - a1 * d1 - a2 * d2, final_velocity - a1 - 2 *\n a2 * d1, final_acceleration - 2 * a2))\n', (1706, 1816), True, 'import numpy as np\n'), ((1859, 1880), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'B'], {}), '(A, B)\n', (1874, 1880), True, 'import numpy as np\n'), ((1101, 1134), 'numpy.zeros', 'np.zeros', (['starting_position.shape'], {}), '(starting_position.shape)\n', (1109, 1134), True, 'import numpy as np\n'), ((1205, 1238), 'numpy.zeros', 'np.zeros', (['starting_position.shape'], {}), '(starting_position.shape)\n', (1213, 1238), True, 'import numpy as np\n'), ((1295, 1324), 'numpy.zeros', 'np.zeros', (['goal_position.shape'], {}), '(goal_position.shape)\n', (1303, 1324), True, 'import numpy as np\n'), ((1389, 1418), 'numpy.zeros', 'np.zeros', (['goal_position.shape'], {}), '(goal_position.shape)\n', (1397, 1418), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 10 21:16:47 2019
@author: self-driver
"""
import numpy as np
import matplotlib.pyplot as plt
from pylab import mpl
import math
'''
x = [3, 4.5, 7, 9]
SizeOfX = len(x)
paremeter = []
i = 1
while i < SizeOfX:
data = np.zeros((n * (SizeOfX)))
for j in range(n-1):
data[n*i+j+1] = -x[i]**j*(j+1)
data[n*(i-1)+j+1] = x[i]**j*(j+1)
paremeter.append(data)
i = i+1
'''
order = 4
para_a = []
para_x = []
para = []
for i in range(order):
#构建x矩阵及系数矩阵
j = i
data_x = np.zeros(order+1)
data_a = np.zeros(order+1)
data = np.zeros(order+1)
while j <= (order):
data_x[j] = j - i
data_a[j] = math.factorial(j)/math.factorial(j-i)
data [j] = 2**(j-i) * math.factorial(j)/math.factorial(j-i)
j += 1
para_x.append(data_x)
para_a.append(data_a)
para.append(data)
#构建系数矩阵
print('para_a :')
for i in range(len(para_a)):
print(para_a[i])
print('para_x :')
for i in range(len(para_x)):
print(para_x[i])
print('para :')
for i in range(len(para)):
print(para[i])
| [
"numpy.zeros",
"math.factorial"
] | [((584, 603), 'numpy.zeros', 'np.zeros', (['(order + 1)'], {}), '(order + 1)\n', (592, 603), True, 'import numpy as np\n'), ((615, 634), 'numpy.zeros', 'np.zeros', (['(order + 1)'], {}), '(order + 1)\n', (623, 634), True, 'import numpy as np\n'), ((644, 663), 'numpy.zeros', 'np.zeros', (['(order + 1)'], {}), '(order + 1)\n', (652, 663), True, 'import numpy as np\n'), ((738, 755), 'math.factorial', 'math.factorial', (['j'], {}), '(j)\n', (752, 755), False, 'import math\n'), ((756, 777), 'math.factorial', 'math.factorial', (['(j - i)'], {}), '(j - i)\n', (770, 777), False, 'import math\n'), ((824, 845), 'math.factorial', 'math.factorial', (['(j - i)'], {}), '(j - i)\n', (838, 845), False, 'import math\n'), ((806, 823), 'math.factorial', 'math.factorial', (['j'], {}), '(j)\n', (820, 823), False, 'import math\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 29 12:35:19 2019
@author: HCHO
"""
from DouglasPeucker import *
import sys
import os
import math
import pandas as pd
import numpy as np
#命令行输入车道路径,如python LaneTracking.py lane/1.txt
#后面按代码规范修改下
if __name__=="__main__":
#lane_path = sys.argv[1]
#print(lane_path)
#测试用
lane_path="data\\lane\\NovAtel2019-11-22-11-35-10.txt"
father_path=os.path.dirname(lane_path)
list_Lane = os.listdir(father_path)
is_DouglasPecker_processing=True
for item in list_Lane:
if("after" in item):
is_DouglasPecker_processing=False
if(is_DouglasPecker_processing):
outputResult(lane_path,1)
after_lane_path=lane_path.replace('.txt','.after.txt')
#after_lane列表 id,timestamp,lon,lat,gauss_Y,guass_X,height,index
after_lane=pd.read_csv(after_lane_path,header=None,delim_whitespace=True,dtype='double')
#建立索引
x_max=after_lane[4].max()
x_min=after_lane[4].min()
y_max=after_lane[5].max()
y_min=after_lane[5].min()
x_difference=x_max-x_min
y_difference=y_max-y_min
x_index_num=math.ceil(x_difference/10)
y_index_num=math.ceil(y_difference/10)
#网格索引
#计算方法:floor(x-x_min)+ceil(y-y_min)*x_index_num
after_lane_np=after_lane.values
index_row=np.floor((after_lane_np[:,4]-x_min)/10)+np.ceil((after_lane_np[:,5]-y_min)/10)*x_index_num
after_lane[8]=index_row
#检索
current_gird=after_lane[after_lane[8]==32].copy()
#传入坐标点position
#x=position.x
#y=position.y
x=after_lane.loc[0,4]
y=after_lane.loc[0,5]
current_gird[9]=((current_gird[4]-x)**2+(current_gird[5]-y)**2)**0.5
current_point_id=current_gird[9].idxmin()
if(current_point_id-10>=0):
min_point_id=current_point_id-10
else:
min_point_id=0
if(current_point_id+50<=after_lane.shape[0]):
max_point_id=current_point_id+50
else:
max_point_id=lane_path.shape[0]
| [
"numpy.ceil",
"math.ceil",
"pandas.read_csv",
"os.path.dirname",
"numpy.floor",
"os.listdir"
] | [((425, 451), 'os.path.dirname', 'os.path.dirname', (['lane_path'], {}), '(lane_path)\n', (440, 451), False, 'import os\n'), ((468, 491), 'os.listdir', 'os.listdir', (['father_path'], {}), '(father_path)\n', (478, 491), False, 'import os\n'), ((870, 955), 'pandas.read_csv', 'pd.read_csv', (['after_lane_path'], {'header': 'None', 'delim_whitespace': '(True)', 'dtype': '"""double"""'}), "(after_lane_path, header=None, delim_whitespace=True, dtype='double'\n )\n", (881, 955), True, 'import pandas as pd\n'), ((1157, 1185), 'math.ceil', 'math.ceil', (['(x_difference / 10)'], {}), '(x_difference / 10)\n', (1166, 1185), False, 'import math\n'), ((1200, 1228), 'math.ceil', 'math.ceil', (['(y_difference / 10)'], {}), '(y_difference / 10)\n', (1209, 1228), False, 'import math\n'), ((1343, 1387), 'numpy.floor', 'np.floor', (['((after_lane_np[:, 4] - x_min) / 10)'], {}), '((after_lane_np[:, 4] - x_min) / 10)\n', (1351, 1387), True, 'import numpy as np\n'), ((1383, 1426), 'numpy.ceil', 'np.ceil', (['((after_lane_np[:, 5] - y_min) / 10)'], {}), '((after_lane_np[:, 5] - y_min) / 10)\n', (1390, 1426), True, 'import numpy as np\n')] |
#!/ur/bin/python
# -*- coding: utf-8 -*-
"""
@author: <NAME>
iBEAt study T2* model fit
2021
"""
import numpy as np
import pydicom
from scipy.optimize import curve_fit
def read_and_sort_echo_times(fname,lstFilesDCM):
""" This function provides sorted list of DICOMs from a sorted list of T2* echo times (TE).
Args
----
fname (pathlib.PosixPath): dicom filenames to process
lstFilesDCM (list): list of dicom files to process
Returns
-------
echo_times (list): sorted list of echo times
slice_sorted_echo_time (list): sorted list of DICOMs from sorted list of echo times (TE).
"""
echo_times = []
files = []
for fname in lstFilesDCM:
dataset = pydicom.dcmread(fname)
echo_times.append(dataset.EchoTime)
files.append(pydicom.dcmread(fname))
sort_index = np.argsort(echo_times)
echo_times.sort()
slice_echo_time = []
slice_sorted_echo_time = []
for f in files:
slice_echo_time.append(f)
# sorted slices using sorted echo times
for i in range(0, len(slice_echo_time)):
slice_sorted_echo_time.append(slice_echo_time[sort_index[i]])
return echo_times, slice_sorted_echo_time
def exp_func(TE,S0,T2star):
""" mono-exponential decay function used to perform T2star-fitting.
Args
----
TE (int): Echo times (TE) for per time-series point for the T2* mapping sequence
Returns
-------
S0, T2star(numpy.ndarray): signal model fitted parameters as np.ndarray.
"""
return S0*np.exp(-TE/T2star)
def T2star_fitting(images_to_be_fitted, echo_times):
""" curve fit function which returns the fit and fitted params S0 and T2*.
Args
----
images_to_be_fitted (numpy.ndarray): pixel value for time-series (i.e. at each TE time) with shape [x,:]
echo_times (list): list of TE times
Returns
-------
fit (list): signal model fit per pixel
S0 (numpy.float64): fitted parameter 'S0' per pixel
T2star (numpy.float64): fitted parameter 'T2*' (ms) per pixel.
"""
lb = [0,10]
ub = [np.inf,100]
initial_guess = [np.max(images_to_be_fitted),50]
popt, pcov = curve_fit(exp_func, xdata = echo_times, ydata = images_to_be_fitted, p0=initial_guess, bounds=(lb,ub), method='trf')
fit = []
for te in echo_times:
fit.append(exp_func(te, *popt))
S0 = popt[0]
T2star = popt[1]
return fit, S0, T2star
def main(images_to_be_fitted, signal_model_parameters):
""" main function for model fitting of T2* at single pixel level.
Args
----
images_to_be_fitted (numpy.ndarray): pixel value for time-series (i.e. at each TE) with shape [x,:]
signal_model_parameters (list): TE times as a list
Returns
-------
fit (list): signal model fit per pixel
fitted_parameters (list): list with signal model fitted parameters 'S0' and 'T2star'.
"""
echo_times = signal_model_parameters
results = T2star_fitting(images_to_be_fitted, echo_times)
fit = results[0]
S0 = results[1]
T2star = results[2]
fitted_parameters = [S0, T2star]
return fit, fitted_parameters
| [
"pydicom.dcmread",
"scipy.optimize.curve_fit",
"numpy.argsort",
"numpy.max",
"numpy.exp"
] | [((842, 864), 'numpy.argsort', 'np.argsort', (['echo_times'], {}), '(echo_times)\n', (852, 864), True, 'import numpy as np\n'), ((2190, 2308), 'scipy.optimize.curve_fit', 'curve_fit', (['exp_func'], {'xdata': 'echo_times', 'ydata': 'images_to_be_fitted', 'p0': 'initial_guess', 'bounds': '(lb, ub)', 'method': '"""trf"""'}), "(exp_func, xdata=echo_times, ydata=images_to_be_fitted, p0=\n initial_guess, bounds=(lb, ub), method='trf')\n", (2199, 2308), False, 'from scipy.optimize import curve_fit\n'), ((708, 730), 'pydicom.dcmread', 'pydicom.dcmread', (['fname'], {}), '(fname)\n', (723, 730), False, 'import pydicom\n'), ((1552, 1572), 'numpy.exp', 'np.exp', (['(-TE / T2star)'], {}), '(-TE / T2star)\n', (1558, 1572), True, 'import numpy as np\n'), ((2139, 2166), 'numpy.max', 'np.max', (['images_to_be_fitted'], {}), '(images_to_be_fitted)\n', (2145, 2166), True, 'import numpy as np\n'), ((799, 821), 'pydicom.dcmread', 'pydicom.dcmread', (['fname'], {}), '(fname)\n', (814, 821), False, 'import pydicom\n')] |
import os
import numpy as np
from sklearn.externals import joblib
from . import features
from .features import loudness, mfcc, onsetflux, onsetcsd, onsethfc
feature_modules = [features.loudness, features.mfcc, features.onsetflux, features.onsetcsd, features.onsethfc]
class DownbeatTracker:
"""
Detects the downbeat locations given the beat locations and audio
"""
def __init__(self):
self.model = joblib.load(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'model.pkl'))
self.scaler = joblib.load(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'scaler.pkl'))
def trimAudio(self, audio, beats):
beats = np.array(beats) * 44100 # Beats in samples
rms = []
for i in range(len(beats) - 1):
rms.append(np.sqrt(np.mean(np.square(audio[int(beats[i]): int(beats[i + 1])]))))
def adaptive_mean(x, N):
return np.convolve(x, [1.0] * int(N), mode='same') / N
rms_adaptive = adaptive_mean(rms, 4)
rms_adaptive_max = max(rms_adaptive)
start, end, ratiox = 0, 0, 0
ratios = [.9, .8, .7, .6, .5, .4, .3, .2, .1]
for ratio in ratios:
for i in range(len(rms)):
if rms[i] > ratio * rms_adaptive_max:
start = i
break
for i in range(len(rms)):
if rms[len(rms) - i - 1] > ratio * rms_adaptive_max:
end = len(rms) - i - 1
break
return start, end
def getFeaturesForAudio(self, input_features):
FRAME_INDEXER_MIN = 4
FRAME_INDEXER_MAX = len(input_features['beats']) - 9
trim_start_beat, trim_end_beat = self.trimAudio(input_features['audio'], input_features['beats'])
indexer_start = np.max(FRAME_INDEXER_MIN, trim_start_beat)
indexer_end = np.min(FRAME_INDEXER_MAX, trim_end_beat)
frame_indexer = range(indexer_start, indexer_end)
features_cur_file = None
for module in feature_modules:
absolute_feature_submatrix = module.feature_allframes(input_features, frame_indexer)
if features_cur_file is None:
features_cur_file = absolute_feature_submatrix
else:
features_cur_file = np.append(features_cur_file, absolute_feature_submatrix, axis=1)
return features_cur_file, trim_start_beat
def track(self, audio, beats, fft_mag, fft_phase, onset_curve):
"""
Track the downbeats of the given audio file
"""
input_features = {
'audio': audio,
'beats': beats,
'fft_mag': fft_mag,
'fft_ang': fft_phase,
'onset_curve': onset_curve,
}
features, trim_start_beat = self.getFeaturesForAudio(input_features)
probas = self.model.predict_log_proba(features)
sum_log_probas = np.array([[0, 0, 0, 0]], dtype='float64')
permuted_row = [0] * 4
for i, j, row in zip(range(len(probas)), np.array(range(len(probas))) % 4, probas):
permuted_row[:4 - j] = row[j:]
permuted_row[4 - j:] = row[:j]
sum_log_probas = sum_log_probas + permuted_row
downbeatIndex = ((4 - np.argmax(sum_log_probas)) + trim_start_beat) % 4
return beats[downbeatIndex::4]
| [
"numpy.argmax",
"os.path.dirname",
"numpy.append",
"numpy.max",
"numpy.min",
"numpy.array"
] | [((1802, 1844), 'numpy.max', 'np.max', (['FRAME_INDEXER_MIN', 'trim_start_beat'], {}), '(FRAME_INDEXER_MIN, trim_start_beat)\n', (1808, 1844), True, 'import numpy as np\n'), ((1867, 1907), 'numpy.min', 'np.min', (['FRAME_INDEXER_MAX', 'trim_end_beat'], {}), '(FRAME_INDEXER_MAX, trim_end_beat)\n', (1873, 1907), True, 'import numpy as np\n'), ((2911, 2952), 'numpy.array', 'np.array', (['[[0, 0, 0, 0]]'], {'dtype': '"""float64"""'}), "([[0, 0, 0, 0]], dtype='float64')\n", (2919, 2952), True, 'import numpy as np\n'), ((673, 688), 'numpy.array', 'np.array', (['beats'], {}), '(beats)\n', (681, 688), True, 'import numpy as np\n'), ((2294, 2358), 'numpy.append', 'np.append', (['features_cur_file', 'absolute_feature_submatrix'], {'axis': '(1)'}), '(features_cur_file, absolute_feature_submatrix, axis=1)\n', (2303, 2358), True, 'import numpy as np\n'), ((469, 494), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (484, 494), False, 'import os\n'), ((574, 599), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (589, 599), False, 'import os\n'), ((3251, 3276), 'numpy.argmax', 'np.argmax', (['sum_log_probas'], {}), '(sum_log_probas)\n', (3260, 3276), True, 'import numpy as np\n')] |
"""rio-tiler colormap functions and classes."""
import os
import pathlib
import re
from typing import Dict, List, Sequence, Tuple, Union
import attr
import numpy
from .constants import NumType
from .errors import (
ColorMapAlreadyRegistered,
InvalidColorFormat,
InvalidColorMapName,
InvalidFormat,
)
try:
from importlib.resources import files as resources_files # type: ignore
except ImportError:
# Try backported to PY<39 `importlib_resources`.
from importlib_resources import files as resources_files # type: ignore
EMPTY_COLORMAP: Dict = {i: [0, 0, 0, 0] for i in range(256)}
DEFAULT_CMAPS_FILES = {
f.stem: str(f) for f in (resources_files(__package__) / "cmap_data").glob("*.npy") # type: ignore
}
USER_CMAPS_DIR = os.environ.get("COLORMAP_DIRECTORY", None)
if USER_CMAPS_DIR:
DEFAULT_CMAPS_FILES.update(
{f.stem: str(f) for f in pathlib.Path(USER_CMAPS_DIR).glob("*.npy")}
)
def _update_alpha(cmap: Dict, idx: Sequence[int], alpha: int = 0) -> None:
"""Update the alpha value of a colormap index."""
if isinstance(idx, int):
idx = (idx,)
for i in idx:
cmap[i] = cmap[i][0:3] + [alpha]
def _remove_value(cmap: Dict, idx: Sequence[int]) -> None:
"""Remove value from a colormap dict."""
if isinstance(idx, int):
idx = (idx,)
for i in idx:
cmap.pop(i, None)
def _update_cmap(cmap: Dict, values: Dict) -> None:
"""Update a colormap dict."""
for i, color in values.items():
if len(color) == 3:
color += [255]
cmap[i] = color
# From https://github.com/mojodna/marblecutter/blob/5b9040ba6c83562a465eabdbb6e8959e6a8bf041/marblecutter/utils.py#L35
def make_lut(colormap: Dict) -> numpy.ndarray:
"""Create a lookup table numpy.ndarray from a GDAL RGBA Color Table dictionary.
Args:
colormap (dict): GDAL RGBA Color Table dictionary.
Returns:
numpy.ndarray: colormap lookup table.
"""
lut = numpy.zeros(shape=(256, 4), dtype=numpy.uint8)
for i, color in colormap.items():
lut[int(i)] = color
return lut
def apply_cmap(
data: numpy.ndarray, colormap: Union[Dict, Sequence]
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""Apply colormap on data.
Args:
data (numpy ndarray): 1D image array to translate to RGB.
colormap (dict): GDAL RGBA Color Table dictionary.
Returns:
tuple: Data (numpy.ndarray) and Mask (numpy.ndarray) values.
Raises:
InvalidFormat: If data is not a 1 band dataset (1, col, row).
"""
if data.shape[0] > 1:
raise InvalidFormat("Source data must be 1 band")
if isinstance(colormap, Sequence):
return apply_intervals_cmap(data, colormap)
# if colormap has more than 256 values OR its `max` key >= 256 we can't use
# rio_tiler.colormap.make_lut, because we don't want to create a `lookup table`
# with more than 256 entries (256 x 4) array. In this case we use `apply_discrete_cmap`
# which can work with arbitrary colormap dict.
if len(colormap) > 256 or max(colormap) >= 256:
return apply_discrete_cmap(data, colormap)
lookup_table = make_lut(colormap)
data = lookup_table[data[0], :]
data = numpy.transpose(data, [2, 0, 1])
# If the colormap has values between 0-255
# we cast the output array to Uint8.
if data.min() >= 0 and data.max() <= 255:
data = data.astype("uint8")
return data[:-1], data[-1]
def apply_discrete_cmap(
data: numpy.ndarray, colormap: Dict
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""Apply discrete colormap.
Args:
data (numpy ndarray): 1D image array to translate to RGB.
color_map (dict): Discrete ColorMap dictionary.
Returns:
tuple: Data (numpy.ndarray) and Alpha band (numpy.ndarray).
Examples:
>>> data = numpy.random.randint(0, 3, size=(1, 256, 256))
cmap = {
0: [0, 0, 0, 0],
1: [255, 255, 255, 255],
2: [255, 0, 0, 255],
3: [255, 255, 0, 255],
}
data, mask = apply_discrete_cmap(data, cmap)
assert data.shape == (3, 256, 256)
"""
res = numpy.zeros((data.shape[1], data.shape[2], 4), dtype=numpy.uint8)
for k, v in colormap.items():
res[data[0] == k] = v
data = numpy.transpose(res, [2, 0, 1])
# If the colormap has values between 0-255
# we cast the output array to Uint8
if data.min() >= 0 and data.max() <= 255:
data = data.astype("uint8")
return data[:-1], data[-1]
def apply_intervals_cmap(
data: numpy.ndarray, colormap: Sequence[Sequence[Sequence[NumType]]]
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""Apply intervals colormap.
Args:
data (numpy ndarray): 1D image array to translate to RGB.
color_map (Sequence): Sequence of intervals and color in form of [([min, max], [r, g, b, a]), ...].
Returns:
tuple: Data (numpy.ndarray) and Alpha band (numpy.ndarray).
Examples:
>>> data = numpy.random.randint(0, 3, size=(1, 256, 256))
cmap = [
([0, 1], [0, 0, 0, 0]),
([1, 2], [255, 255, 255, 255]),
([2, 3], [255, 0, 0, 255]),
([3, 4], [255, 255, 0, 255]),
]
data, mask = apply_intervals_cmap(data, cmap)
assert data.shape == (3, 256, 256)
"""
res = numpy.zeros((data.shape[1], data.shape[2], 4), dtype=numpy.uint8)
for (k, v) in colormap:
res[(data[0] >= k[0]) & (data[0] < k[1])] = v
data = numpy.transpose(res, [2, 0, 1])
# If the colormap has values between 0-255
# we cast the output array to Uint8
if data.min() >= 0 and data.max() <= 255:
data = data.astype("uint8")
return data[:-1], data[-1]
def parse_color(rgba: Union[Sequence[int], str]) -> Tuple[int, int, int, int]:
"""Parse RGB/RGBA color and return valid rio-tiler compatible RGBA colormap entry.
Args:
rgba (str or list of int): HEX encoded or list RGB or RGBA colors.
Returns:
tuple: RGBA values.
Examples:
>>> parse_color("#FFF")
[255, 255, 255, 255]
>>> parse_color("#FF0000FF")
[255, 0, 0, 255]
>>> parse_color("#FF0000")
[255, 0, 0, 255]
>>> parse_color([255, 255, 255])
[255, 255, 255, 255]
"""
if isinstance(rgba, str):
if re.match("^#[a-fA-F0-9]{3,4}$", rgba):
factor = 2
hex_pattern = (
r"^#"
r"(?P<red>[a-fA-F0-9])"
r"(?P<green>[a-fA-F0-9])"
r"(?P<blue>[a-fA-F0-9])"
r"(?P<alpha>[a-fA-F0-9])?"
r"$"
)
elif re.match("^#([a-fA-F0-9][a-fA-F0-9]){3,4}$", rgba):
factor = 1
hex_pattern = (
r"^#"
r"(?P<red>[a-fA-F0-9][a-fA-F0-9])"
r"(?P<green>[a-fA-F0-9][a-fA-F0-9])"
r"(?P<blue>[a-fA-F0-9][a-fA-F0-9])"
r"(?P<alpha>[a-fA-F0-9][a-fA-F0-9])?"
r"$"
)
else:
raise InvalidColorFormat(f"Invalid color format: {rgba}")
match = re.match(hex_pattern, rgba)
rgba = [
int(n * factor, 16) for n in match.groupdict().values() if n is not None
]
if len(rgba) > 4 or len(rgba) < 3:
raise InvalidColorFormat(f"Invalid color format: {rgba}")
rgba = tuple(rgba)
if len(rgba) == 3:
rgba += (255,)
return rgba # type: ignore
@attr.s(frozen=True)
class ColorMaps:
"""Default Colormaps holder.
Attributes:
data (dict): colormaps. Defaults to `rio_tiler.colormap.DEFAULTS_CMAPS`.
"""
data: Dict[str, Union[str, Dict]] = attr.ib(
default=attr.Factory(lambda: DEFAULT_CMAPS_FILES)
)
def get(self, name: str) -> Dict:
"""Fetch a colormap.
Args:
name (dict): colormap name.
Returns
dict: colormap dictionary.
"""
cmap = self.data.get(name, None)
if cmap is None:
raise InvalidColorMapName(f"Invalid colormap name: {name}")
if isinstance(cmap, str):
colormap = numpy.load(cmap)
assert colormap.shape == (256, 4)
assert colormap.dtype == numpy.uint8
return {idx: value.tolist() for idx, value in enumerate(colormap)}
else:
return cmap
def list(self) -> List[str]:
"""List registered Colormaps.
Returns
list: list of colormap names.
"""
return list(self.data)
def register(
self, custom_cmap: Dict[str, Union[str, Dict]], overwrite: bool = False,
) -> "ColorMaps":
"""Register a custom colormap.
Args:
custom_cmap (dict): custom colormap(s) to register.
overwrite (bool): Overwrite existing colormap with same key (default: False)
Examples:
>>> cmap = cmap.register({"acmap": {0: [0, 0, 0, 0]}})
>>> cmap = cmap.register({"acmap": "acmap.npy"})
"""
for name, cmap in custom_cmap.items():
if not overwrite and name in self.data:
raise ColorMapAlreadyRegistered(
f"{name} is already registered. Use force=True to overwrite."
)
return ColorMaps({**self.data, **custom_cmap})
cmap = ColorMaps() # noqa
| [
"numpy.load",
"attr.Factory",
"attr.s",
"numpy.transpose",
"numpy.zeros",
"re.match",
"importlib_resources.files",
"os.environ.get",
"pathlib.Path"
] | [((763, 805), 'os.environ.get', 'os.environ.get', (['"""COLORMAP_DIRECTORY"""', 'None'], {}), "('COLORMAP_DIRECTORY', None)\n", (777, 805), False, 'import os\n'), ((7628, 7647), 'attr.s', 'attr.s', ([], {'frozen': '(True)'}), '(frozen=True)\n', (7634, 7647), False, 'import attr\n'), ((1985, 2031), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(256, 4)', 'dtype': 'numpy.uint8'}), '(shape=(256, 4), dtype=numpy.uint8)\n', (1996, 2031), False, 'import numpy\n'), ((3247, 3279), 'numpy.transpose', 'numpy.transpose', (['data', '[2, 0, 1]'], {}), '(data, [2, 0, 1])\n', (3262, 3279), False, 'import numpy\n'), ((4228, 4293), 'numpy.zeros', 'numpy.zeros', (['(data.shape[1], data.shape[2], 4)'], {'dtype': 'numpy.uint8'}), '((data.shape[1], data.shape[2], 4), dtype=numpy.uint8)\n', (4239, 4293), False, 'import numpy\n'), ((4371, 4402), 'numpy.transpose', 'numpy.transpose', (['res', '[2, 0, 1]'], {}), '(res, [2, 0, 1])\n', (4386, 4402), False, 'import numpy\n'), ((5467, 5532), 'numpy.zeros', 'numpy.zeros', (['(data.shape[1], data.shape[2], 4)'], {'dtype': 'numpy.uint8'}), '((data.shape[1], data.shape[2], 4), dtype=numpy.uint8)\n', (5478, 5532), False, 'import numpy\n'), ((5628, 5659), 'numpy.transpose', 'numpy.transpose', (['res', '[2, 0, 1]'], {}), '(res, [2, 0, 1])\n', (5643, 5659), False, 'import numpy\n'), ((6479, 6516), 're.match', 're.match', (['"""^#[a-fA-F0-9]{3,4}$"""', 'rgba'], {}), "('^#[a-fA-F0-9]{3,4}$', rgba)\n", (6487, 6516), False, 'import re\n'), ((7276, 7303), 're.match', 're.match', (['hex_pattern', 'rgba'], {}), '(hex_pattern, rgba)\n', (7284, 7303), False, 'import re\n'), ((6805, 6855), 're.match', 're.match', (['"""^#([a-fA-F0-9][a-fA-F0-9]){3,4}$"""', 'rgba'], {}), "('^#([a-fA-F0-9][a-fA-F0-9]){3,4}$', rgba)\n", (6813, 6855), False, 'import re\n'), ((7871, 7913), 'attr.Factory', 'attr.Factory', (['(lambda : DEFAULT_CMAPS_FILES)'], {}), '(lambda : DEFAULT_CMAPS_FILES)\n', (7883, 7913), False, 'import attr\n'), ((8307, 8323), 'numpy.load', 'numpy.load', (['cmap'], {}), '(cmap)\n', (8317, 8323), False, 'import numpy\n'), ((669, 697), 'importlib_resources.files', 'resources_files', (['__package__'], {}), '(__package__)\n', (684, 697), True, 'from importlib_resources import files as resources_files\n'), ((890, 918), 'pathlib.Path', 'pathlib.Path', (['USER_CMAPS_DIR'], {}), '(USER_CMAPS_DIR)\n', (902, 918), False, 'import pathlib\n')] |
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import matplotlib.pyplot as plt
import vtk as v
from scipy.spatial import Delaunay
from vtk.numpy_interface import dataset_adapter as dsa
from vtk.numpy_interface import algorithms as alg
def numpy2PolyData(data):
vtkArr = dsa.numpyTovtkDataArray( data )
pts = v.vtkPoints()
pts.SetData(vtkArr)
pd = v.vtkPolyData()
pd.SetPoints( pts )
vgf = v.vtkVertexGlyphFilter()
vgf.SetInputData( pd )
vgf.Update()
return vgf.GetOutput()
def writeVTK( pd, fileName ):
wr = v.vtkDataSetWriter()
wr.SetFileName(fileName)
wr.SetInputData(pd)
wr.Write()
def triangulate(sphereXyz):
"""
Generates a triangle mesh for a spherical point cloud.
"""
sphereXyz = np.around(sphereXyz,decimals=2)
sphereArr = dsa.numpyTovtkDataArray( sphereXyz, name='SpherePts' )
pts = v.vtkPoints()
pts.SetData( sphereArr )
sphere = v.vtkPolyData()
sphere.SetPoints( pts )
# Store the original point ids
idf = v.vtkIdFilter()
idf.SetIdsArrayName('PointIds')
idf.PointIdsOn()
idf.SetInputData( sphere )
# Delaunay3D to make a convex hull
d3d = v.vtkDelaunay3D()
d3d.SetInputConnection( idf.GetOutputPort() )
# Extract the surface
surf = v.vtkDataSetSurfaceFilter()
surf.SetInputConnection( d3d.GetOutputPort() )
surf.Update()
# Now make a new cell array mapping to the old ids
polyCells = v.vtkCellArray()
sphereCells = surf.GetOutput().GetPolys()
sphereCells.InitTraversal()
origIds = surf.GetOutput().GetPointData().GetArray('PointIds')
ptIds = v.vtkIdList()
while( sphereCells.GetNextCell( ptIds ) ):
polyCells.InsertNextCell(3)
polyCells.InsertCellPoint( int(origIds.GetTuple1( ptIds.GetId(0) )) )
polyCells.InsertCellPoint( int(origIds.GetTuple1( ptIds.GetId(1) )) )
polyCells.InsertCellPoint( int(origIds.GetTuple1( ptIds.GetId(2) )) )
connectivity = dsa.vtkDataArrayToVTKArray( polyCells.GetData() )
return connectivity
# Read T=7 points
rd = v.vtkPolyDataReader()
rd.SetFileName('T7.vtk')
rd.Update()
pdVtk = rd.GetOutput()
pd = dsa.WrapDataObject( pdVtk )
pts = pd.Points
N = pts.shape[0]
# Get the points on a sphere
sphere = alg.norm( pts )
# Set the center of the sphere to origin
center = np.mean(sphere,axis=0)
sphere -= center
conn = triangulate(sphere)
# Convert all points to spherical coordinates
theta = np.arctan2(sphere[:,1],sphere[:,0])
phi = np.arctan2(np.sqrt(sphere[:,0]**2 + sphere[:,1]**2),sphere[:,2])
print('Theta =')
print(theta)
print('Phi =')
print(phi)
phi = np.append(phi, phi)
theta = np.append(theta, theta - 2*np.pi)
conn2 = Delaunay(np.hstack([theta[:,np.newaxis],phi[:,np.newaxis]]))
fig, (ax1,ax2) = plt.subplots(2,1,sharex=True)
ax1.plot(theta,phi,'.')
ax1.triplot(theta,phi,triangles=conn2.simplices)
ax1.axvline(x=-np.pi)
ax1.set_ylabel(r'$\phi$')
ax2.plot(theta,phi,'.')
ax2.triplot(theta,phi,triangles=conn)
ax2.axvline(x=-np.pi)
ax2.set_xlabel(r'$\theta$')
ax2.set_ylabel(r'$\phi$')
plt.tight_layout()
plt.show()
| [
"vtk.vtkPolyDataReader",
"numpy.arctan2",
"vtk.vtkPoints",
"numpy.around",
"numpy.mean",
"vtk.numpy_interface.dataset_adapter.WrapDataObject",
"matplotlib.pyplot.tight_layout",
"vtk.numpy_interface.algorithms.norm",
"vtk.vtkIdList",
"numpy.append",
"vtk.numpy_interface.dataset_adapter.numpyTovtk... | [((16, 49), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (39, 49), False, 'import warnings\n'), ((2092, 2113), 'vtk.vtkPolyDataReader', 'v.vtkPolyDataReader', ([], {}), '()\n', (2111, 2113), True, 'import vtk as v\n'), ((2179, 2204), 'vtk.numpy_interface.dataset_adapter.WrapDataObject', 'dsa.WrapDataObject', (['pdVtk'], {}), '(pdVtk)\n', (2197, 2204), True, 'from vtk.numpy_interface import dataset_adapter as dsa\n'), ((2279, 2292), 'vtk.numpy_interface.algorithms.norm', 'alg.norm', (['pts'], {}), '(pts)\n', (2287, 2292), True, 'from vtk.numpy_interface import algorithms as alg\n'), ((2346, 2369), 'numpy.mean', 'np.mean', (['sphere'], {'axis': '(0)'}), '(sphere, axis=0)\n', (2353, 2369), True, 'import numpy as np\n'), ((2469, 2507), 'numpy.arctan2', 'np.arctan2', (['sphere[:, 1]', 'sphere[:, 0]'], {}), '(sphere[:, 1], sphere[:, 0])\n', (2479, 2507), True, 'import numpy as np\n'), ((2638, 2657), 'numpy.append', 'np.append', (['phi', 'phi'], {}), '(phi, phi)\n', (2647, 2657), True, 'import numpy as np\n'), ((2666, 2701), 'numpy.append', 'np.append', (['theta', '(theta - 2 * np.pi)'], {}), '(theta, theta - 2 * np.pi)\n', (2675, 2701), True, 'import numpy as np\n'), ((2788, 2819), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)'}), '(2, 1, sharex=True)\n', (2800, 2819), True, 'import matplotlib.pyplot as plt\n'), ((3077, 3095), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3093, 3095), True, 'import matplotlib.pyplot as plt\n'), ((3096, 3106), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3104, 3106), True, 'import matplotlib.pyplot as plt\n'), ((297, 326), 'vtk.numpy_interface.dataset_adapter.numpyTovtkDataArray', 'dsa.numpyTovtkDataArray', (['data'], {}), '(data)\n', (320, 326), True, 'from vtk.numpy_interface import dataset_adapter as dsa\n'), ((339, 352), 'vtk.vtkPoints', 'v.vtkPoints', ([], {}), '()\n', (350, 352), True, 'import vtk as v\n'), ((386, 401), 'vtk.vtkPolyData', 'v.vtkPolyData', ([], {}), '()\n', (399, 401), True, 'import vtk as v\n'), ((436, 460), 'vtk.vtkVertexGlyphFilter', 'v.vtkVertexGlyphFilter', ([], {}), '()\n', (458, 460), True, 'import vtk as v\n'), ((572, 592), 'vtk.vtkDataSetWriter', 'v.vtkDataSetWriter', ([], {}), '()\n', (590, 592), True, 'import vtk as v\n'), ((781, 813), 'numpy.around', 'np.around', (['sphereXyz'], {'decimals': '(2)'}), '(sphereXyz, decimals=2)\n', (790, 813), True, 'import numpy as np\n'), ((829, 881), 'vtk.numpy_interface.dataset_adapter.numpyTovtkDataArray', 'dsa.numpyTovtkDataArray', (['sphereXyz'], {'name': '"""SpherePts"""'}), "(sphereXyz, name='SpherePts')\n", (852, 881), True, 'from vtk.numpy_interface import dataset_adapter as dsa\n'), ((894, 907), 'vtk.vtkPoints', 'v.vtkPoints', ([], {}), '()\n', (905, 907), True, 'import vtk as v\n'), ((950, 965), 'vtk.vtkPolyData', 'v.vtkPolyData', ([], {}), '()\n', (963, 965), True, 'import vtk as v\n'), ((1040, 1055), 'vtk.vtkIdFilter', 'v.vtkIdFilter', ([], {}), '()\n', (1053, 1055), True, 'import vtk as v\n'), ((1194, 1211), 'vtk.vtkDelaunay3D', 'v.vtkDelaunay3D', ([], {}), '()\n', (1209, 1211), True, 'import vtk as v\n'), ((1300, 1327), 'vtk.vtkDataSetSurfaceFilter', 'v.vtkDataSetSurfaceFilter', ([], {}), '()\n', (1325, 1327), True, 'import vtk as v\n'), ((1469, 1485), 'vtk.vtkCellArray', 'v.vtkCellArray', ([], {}), '()\n', (1483, 1485), True, 'import vtk as v\n'), ((1643, 1656), 'vtk.vtkIdList', 'v.vtkIdList', ([], {}), '()\n', (1654, 1656), True, 'import vtk as v\n'), ((2522, 2568), 'numpy.sqrt', 'np.sqrt', (['(sphere[:, 0] ** 2 + sphere[:, 1] ** 2)'], {}), '(sphere[:, 0] ** 2 + sphere[:, 1] ** 2)\n', (2529, 2568), True, 'import numpy as np\n'), ((2718, 2771), 'numpy.hstack', 'np.hstack', (['[theta[:, np.newaxis], phi[:, np.newaxis]]'], {}), '([theta[:, np.newaxis], phi[:, np.newaxis]])\n', (2727, 2771), True, 'import numpy as np\n')] |
from __future__ import division, print_function
from hscom import __common__
(print, print_, print_on, print_off,
rrr, profile) = __common__.init(__name__, '[extern_feat]')
# Standard
import os
import sys
from os.path import dirname, realpath, join
# Scientific
import numpy as np
OLD_HESAFF = False or '--oldhesaff' in sys.argv
if '--newhesaff' in sys.argv:
OLD_HESAFF = False
def reload_module():
import imp
import sys
imp.reload(sys.modules[__name__])
EXE_EXT = {'win32': '.exe', 'darwin': '.mac', 'linux2': '.ln'}[sys.platform]
if not '__file__' in vars():
__file__ = os.path.realpath('extern_feat.py')
EXE_PATH = realpath(dirname(__file__))
if not os.path.exists(EXE_PATH):
EXE_PATH = realpath('tpl/extern_feat')
if not os.path.exists(EXE_PATH):
EXE_PATH = realpath('hotspotter/tpl/extern_feat')
HESAFF_EXE = join(EXE_PATH, 'hesaff' + EXE_EXT)
INRIA_EXE = join(EXE_PATH, 'compute_descriptors' + EXE_EXT)
KPTS_DTYPE = np.float64
DESC_DTYPE = np.uint8
#---------------------------------------
# Define precompute functions
def precompute(rchip_fpath, feat_fpath, dict_args, compute_fn):
# Calls the function which reads the chip and computes features
kpts, desc = compute_fn(rchip_fpath, dict_args)
# Saves the features to the feature cache dir
np.savez(feat_fpath, kpts, desc)
return kpts, desc
def precompute_hesaff(rchip_fpath, feat_fpath, dict_args):
return precompute(rchip_fpath, feat_fpath, dict_args, compute_hesaff)
#---------------------------------------
# Work functions which call the external feature detectors
# Helper function to call commands
#try:
from hstpl.extern_feat import pyhesaff
def detect_kpts(rchip_fpath, dict_args):
kpts, desc = pyhesaff.detect_kpts(rchip_fpath, **dict_args)
return kpts, desc
#print('[extern_feat] new hessaff is available')
#except ImportError as ex:
#print('[extern_feat] new hessaff is not available: %r' % ex)
#if '--strict' in sys.argv:
#raise
#try:
#from hstpl.extern_feat import pyhesaffexe
#def detect_kpts_old(rchip_fpath, dict_args):
#kpts, desc = pyhesaffexe.detect_kpts(rchip_fpath, **dict_args)
#return kpts, desc
#print('[extern_feat] old hessaff is available')
#except ImportError as ex:
#print('[extern_feat] old hessaff is not available: %r' % ex)
#if '--strict' in sys.argv:
#raise
#if OLD_HESAFF:
#detect_kpts = detect_kpts_old
#print('[extern_feat] using: old hessian affine')
#else:
#detect_kpts = detect_kpts_new
#print('[extern_feat] using: new pyhesaff')
#----
def compute_hesaff(rchip_fpath, dict_args):
return detect_kpts(rchip_fpath, dict_args)
| [
"imp.reload",
"os.path.realpath",
"os.path.dirname",
"os.path.exists",
"hstpl.extern_feat.pyhesaff.detect_kpts",
"numpy.savez",
"os.path.join",
"hscom.__common__.init"
] | [((131, 173), 'hscom.__common__.init', '__common__.init', (['__name__', '"""[extern_feat]"""'], {}), "(__name__, '[extern_feat]')\n", (146, 173), False, 'from hscom import __common__\n'), ((849, 883), 'os.path.join', 'join', (['EXE_PATH', "('hesaff' + EXE_EXT)"], {}), "(EXE_PATH, 'hesaff' + EXE_EXT)\n", (853, 883), False, 'from os.path import dirname, realpath, join\n'), ((897, 944), 'os.path.join', 'join', (['EXE_PATH', "('compute_descriptors' + EXE_EXT)"], {}), "(EXE_PATH, 'compute_descriptors' + EXE_EXT)\n", (901, 944), False, 'from os.path import dirname, realpath, join\n'), ((441, 474), 'imp.reload', 'imp.reload', (['sys.modules[__name__]'], {}), '(sys.modules[__name__])\n', (451, 474), False, 'import imp\n'), ((598, 632), 'os.path.realpath', 'os.path.realpath', (['"""extern_feat.py"""'], {}), "('extern_feat.py')\n", (614, 632), False, 'import os\n'), ((653, 670), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (660, 670), False, 'from os.path import dirname, realpath, join\n'), ((679, 703), 'os.path.exists', 'os.path.exists', (['EXE_PATH'], {}), '(EXE_PATH)\n', (693, 703), False, 'import os\n'), ((720, 747), 'os.path.realpath', 'realpath', (['"""tpl/extern_feat"""'], {}), "('tpl/extern_feat')\n", (728, 747), False, 'from os.path import dirname, realpath, join\n'), ((755, 779), 'os.path.exists', 'os.path.exists', (['EXE_PATH'], {}), '(EXE_PATH)\n', (769, 779), False, 'import os\n'), ((796, 834), 'os.path.realpath', 'realpath', (['"""hotspotter/tpl/extern_feat"""'], {}), "('hotspotter/tpl/extern_feat')\n", (804, 834), False, 'from os.path import dirname, realpath, join\n'), ((1303, 1335), 'numpy.savez', 'np.savez', (['feat_fpath', 'kpts', 'desc'], {}), '(feat_fpath, kpts, desc)\n', (1311, 1335), True, 'import numpy as np\n'), ((1735, 1781), 'hstpl.extern_feat.pyhesaff.detect_kpts', 'pyhesaff.detect_kpts', (['rchip_fpath'], {}), '(rchip_fpath, **dict_args)\n', (1755, 1781), False, 'from hstpl.extern_feat import pyhesaff\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""SpeciesEvolver tests."""
from collections import Counter
import numpy as np
import pandas as pd
import pytest
from landlab import RasterModelGrid
from landlab.components import SpeciesEvolver
from landlab.components.species_evolution.base_taxon import Taxon
class TestTaxon(Taxon):
"""Taxon to test SpeciesEvolver."""
def __init__(self, parent=None):
super(TestTaxon, self).__init__()
self.parent = parent
@property
def range_mask(self):
return np.array(
[False, False, False, True, True, True, False, False, False]
)
def _evolve(self, dt, stage, record):
if stage == 1:
self._extant = False
TestTaxon(parent=self)
return stage < 1
@pytest.fixture()
def zone_example_grid():
return RasterModelGrid((3, 3), 1)
def test_track_taxa_and_component_attributes(zone_example_grid):
se = SpeciesEvolver(zone_example_grid)
# Introduce multiple taxa.
taxa = [TestTaxon(), TestTaxon()]
se.track_taxa(taxa)
# Introduce a single taxon.
taxon = TestTaxon()
se.track_taxa(taxon)
# Test attributes at initial time step.
expected_df = pd.DataFrame({
'appeared': [0, 0, 0],
'latest_time': [0, 0, 0],
'extant': [True, True, True]},
index=[0, 1, 2]
)
expected_df.index.name = 'uid'
pd.testing.assert_frame_equal(
se.taxa_data_frame, expected_df, check_like=True
)
expected_df = pd.DataFrame({
'time': [0],
'taxa': [3]
})
pd.testing.assert_frame_equal(
se.record_data_frame, expected_df, check_like=True
)
# Test attributes at a later time.
se.run_one_step(10)
expected_df = pd.DataFrame({
'appeared': [0, 0, 0, 10, 10, 10],
'latest_time': [10, 10, 10, 10, 10, 10],
'extant': [False, False, False, True, True, True]},
index=[0, 1, 2, 3, 4, 5]
)
pd.testing.assert_frame_equal(
se.taxa_data_frame, expected_df, check_like=True
)
expected_df = pd.DataFrame({
'time': [0, 10],
'taxa': [3, 3]
})
pd.testing.assert_frame_equal(
se.record_data_frame, expected_df, check_like=True
)
def test_get_taxon_objects(zone_example_grid):
se = SpeciesEvolver(zone_example_grid)
introduced_taxa = [TestTaxon(), TestTaxon()]
se.track_taxa(introduced_taxa)
se.run_one_step(10)
se.run_one_step(10)
# Test no parameters.
queried_taxa = se.get_taxon_objects()
np.testing.assert_equal(
Counter(queried_taxa), Counter(se._taxa['object'])
)
# Test `time` parameter.
queried_taxa = se.get_taxon_objects(time=0)
np.testing.assert_equal(
Counter(queried_taxa), Counter(introduced_taxa)
)
queried_taxa = se.get_taxon_objects(time=10)
ids = [s.uid for s in queried_taxa]
expected_ids = [0, 1, 2, 3]
np.testing.assert_equal(Counter(ids), Counter(expected_ids))
np.testing.assert_raises(ValueError, se.get_taxon_objects, time=5)
np.testing.assert_raises(ValueError, se.get_taxon_objects, time=11)
# Test `extant_at_latest_time` parameter.
queried_taxa = se.get_taxon_objects(extant_at_latest_time=True)
ids = [s.uid for s in queried_taxa]
expected_ids = [4, 5]
np.testing.assert_equal(Counter(ids), Counter(expected_ids))
# Test `ancestor` parameter.
queried_taxa = se.get_taxon_objects(ancestor=1)
ids = [s.uid for s in queried_taxa]
expected_ids = [3, 5]
np.testing.assert_equal(Counter(ids), Counter(expected_ids))
queried_taxa = se.get_taxon_objects(ancestor=5)
np.testing.assert_equal(queried_taxa, [])
queried_taxa = se.get_taxon_objects(ancestor=6)
np.testing.assert_equal(queried_taxa, [])
# Test multiple parameters.
queried_taxa = se.get_taxon_objects(ancestor=1, time=10)
ids = [s.uid for s in queried_taxa]
expected_ids = [3]
np.testing.assert_equal(Counter(ids), Counter(expected_ids))
def test_taxa_richness_field(zone_example_grid):
mg = zone_example_grid
se = SpeciesEvolver(mg)
expected_field = np.zeros(mg.number_of_nodes)
np.testing.assert_array_equal(
mg.at_node['taxa__richness'], expected_field
)
introduced_taxa = [TestTaxon(), TestTaxon()]
se.track_taxa(introduced_taxa)
se.run_one_step(10)
expected_field = np.array([0, 0, 0, 2, 2, 2, 0, 0, 0])
np.testing.assert_array_equal(
mg.at_node['taxa__richness'], expected_field
)
| [
"pandas.DataFrame",
"pandas.testing.assert_frame_equal",
"landlab.components.SpeciesEvolver",
"landlab.RasterModelGrid",
"numpy.testing.assert_raises",
"numpy.testing.assert_array_equal",
"pytest.fixture",
"numpy.zeros",
"numpy.array",
"numpy.testing.assert_equal",
"collections.Counter"
] | [((796, 812), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (810, 812), False, 'import pytest\n'), ((849, 875), 'landlab.RasterModelGrid', 'RasterModelGrid', (['(3, 3)', '(1)'], {}), '((3, 3), 1)\n', (864, 875), False, 'from landlab import RasterModelGrid\n'), ((952, 985), 'landlab.components.SpeciesEvolver', 'SpeciesEvolver', (['zone_example_grid'], {}), '(zone_example_grid)\n', (966, 985), False, 'from landlab.components import SpeciesEvolver\n'), ((1226, 1341), 'pandas.DataFrame', 'pd.DataFrame', (["{'appeared': [0, 0, 0], 'latest_time': [0, 0, 0], 'extant': [True, True, True]}"], {'index': '[0, 1, 2]'}), "({'appeared': [0, 0, 0], 'latest_time': [0, 0, 0], 'extant': [\n True, True, True]}, index=[0, 1, 2])\n", (1238, 1341), True, 'import pandas as pd\n'), ((1414, 1493), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['se.taxa_data_frame', 'expected_df'], {'check_like': '(True)'}), '(se.taxa_data_frame, expected_df, check_like=True)\n', (1443, 1493), True, 'import pandas as pd\n'), ((1527, 1567), 'pandas.DataFrame', 'pd.DataFrame', (["{'time': [0], 'taxa': [3]}"], {}), "({'time': [0], 'taxa': [3]})\n", (1539, 1567), True, 'import pandas as pd\n'), ((1594, 1680), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['se.record_data_frame', 'expected_df'], {'check_like': '(True)'}), '(se.record_data_frame, expected_df, check_like\n =True)\n', (1623, 1680), True, 'import pandas as pd\n'), ((1774, 1950), 'pandas.DataFrame', 'pd.DataFrame', (["{'appeared': [0, 0, 0, 10, 10, 10], 'latest_time': [10, 10, 10, 10, 10, 10],\n 'extant': [False, False, False, True, True, True]}"], {'index': '[0, 1, 2, 3, 4, 5]'}), "({'appeared': [0, 0, 0, 10, 10, 10], 'latest_time': [10, 10, 10,\n 10, 10, 10], 'extant': [False, False, False, True, True, True]}, index=\n [0, 1, 2, 3, 4, 5])\n", (1786, 1950), True, 'import pandas as pd\n'), ((1984, 2063), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['se.taxa_data_frame', 'expected_df'], {'check_like': '(True)'}), '(se.taxa_data_frame, expected_df, check_like=True)\n', (2013, 2063), True, 'import pandas as pd\n'), ((2097, 2144), 'pandas.DataFrame', 'pd.DataFrame', (["{'time': [0, 10], 'taxa': [3, 3]}"], {}), "({'time': [0, 10], 'taxa': [3, 3]})\n", (2109, 2144), True, 'import pandas as pd\n'), ((2171, 2257), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['se.record_data_frame', 'expected_df'], {'check_like': '(True)'}), '(se.record_data_frame, expected_df, check_like\n =True)\n', (2200, 2257), True, 'import pandas as pd\n'), ((2325, 2358), 'landlab.components.SpeciesEvolver', 'SpeciesEvolver', (['zone_example_grid'], {}), '(zone_example_grid)\n', (2339, 2358), False, 'from landlab.components import SpeciesEvolver\n'), ((3018, 3084), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError', 'se.get_taxon_objects'], {'time': '(5)'}), '(ValueError, se.get_taxon_objects, time=5)\n', (3042, 3084), True, 'import numpy as np\n'), ((3089, 3156), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError', 'se.get_taxon_objects'], {'time': '(11)'}), '(ValueError, se.get_taxon_objects, time=11)\n', (3113, 3156), True, 'import numpy as np\n'), ((3679, 3720), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['queried_taxa', '[]'], {}), '(queried_taxa, [])\n', (3702, 3720), True, 'import numpy as np\n'), ((3778, 3819), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['queried_taxa', '[]'], {}), '(queried_taxa, [])\n', (3801, 3819), True, 'import numpy as np\n'), ((4131, 4149), 'landlab.components.SpeciesEvolver', 'SpeciesEvolver', (['mg'], {}), '(mg)\n', (4145, 4149), False, 'from landlab.components import SpeciesEvolver\n'), ((4172, 4200), 'numpy.zeros', 'np.zeros', (['mg.number_of_nodes'], {}), '(mg.number_of_nodes)\n', (4180, 4200), True, 'import numpy as np\n'), ((4205, 4280), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["mg.at_node['taxa__richness']", 'expected_field'], {}), "(mg.at_node['taxa__richness'], expected_field)\n", (4234, 4280), True, 'import numpy as np\n'), ((4426, 4463), 'numpy.array', 'np.array', (['[0, 0, 0, 2, 2, 2, 0, 0, 0]'], {}), '([0, 0, 0, 2, 2, 2, 0, 0, 0])\n', (4434, 4463), True, 'import numpy as np\n'), ((4468, 4543), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["mg.at_node['taxa__richness']", 'expected_field'], {}), "(mg.at_node['taxa__richness'], expected_field)\n", (4497, 4543), True, 'import numpy as np\n'), ((540, 610), 'numpy.array', 'np.array', (['[False, False, False, True, True, True, False, False, False]'], {}), '([False, False, False, True, True, True, False, False, False])\n', (548, 610), True, 'import numpy as np\n'), ((2599, 2620), 'collections.Counter', 'Counter', (['queried_taxa'], {}), '(queried_taxa)\n', (2606, 2620), False, 'from collections import Counter\n'), ((2622, 2649), 'collections.Counter', 'Counter', (["se._taxa['object']"], {}), "(se._taxa['object'])\n", (2629, 2649), False, 'from collections import Counter\n'), ((2772, 2793), 'collections.Counter', 'Counter', (['queried_taxa'], {}), '(queried_taxa)\n', (2779, 2793), False, 'from collections import Counter\n'), ((2795, 2819), 'collections.Counter', 'Counter', (['introduced_taxa'], {}), '(introduced_taxa)\n', (2802, 2819), False, 'from collections import Counter\n'), ((2976, 2988), 'collections.Counter', 'Counter', (['ids'], {}), '(ids)\n', (2983, 2988), False, 'from collections import Counter\n'), ((2990, 3011), 'collections.Counter', 'Counter', (['expected_ids'], {}), '(expected_ids)\n', (2997, 3011), False, 'from collections import Counter\n'), ((3367, 3379), 'collections.Counter', 'Counter', (['ids'], {}), '(ids)\n', (3374, 3379), False, 'from collections import Counter\n'), ((3381, 3402), 'collections.Counter', 'Counter', (['expected_ids'], {}), '(expected_ids)\n', (3388, 3402), False, 'from collections import Counter\n'), ((3585, 3597), 'collections.Counter', 'Counter', (['ids'], {}), '(ids)\n', (3592, 3597), False, 'from collections import Counter\n'), ((3599, 3620), 'collections.Counter', 'Counter', (['expected_ids'], {}), '(expected_ids)\n', (3606, 3620), False, 'from collections import Counter\n'), ((4006, 4018), 'collections.Counter', 'Counter', (['ids'], {}), '(ids)\n', (4013, 4018), False, 'from collections import Counter\n'), ((4020, 4041), 'collections.Counter', 'Counter', (['expected_ids'], {}), '(expected_ids)\n', (4027, 4041), False, 'from collections import Counter\n')] |
import matplotlib.pyplot as plt
import numpy as np
import os, glob
#nBins = int(input('Give number of bins: '))
nBins = 100
datasetSize = 50000
error = 0.03
datafiles = [a for a in os.listdir() if a.endswith('50000.txt') and not(a.startswith('time'))]
n = len(datafiles)
print(datafiles)
data = {}
for i, file in enumerate(datafiles):
with open(file) as f:
data[file] = np.array([int(line) for line in f.readlines()])
m = np.mean(data[file])
data[file] = (data[file]-m)/m
colors = ['black', 'red', 'green', 'blue']
for i in range(n):
dat = data[datafiles[i]]
print(dat.shape)
print(datafiles[i])
plt.scatter(dat, np.full_like(dat, fill_value=i), color='black', alpha=0.3, label=datafiles[i][:-4])
plt.axvline(x=-error)
plt.axvline(x=error)
#plt.xlim([-error-0.01,error+0.01])
plt.yticks([0,1,2,3,4,5,6], ['Tab1Perm', 'Poly2', 'Poly3', 'MultShift', 'TwistTab', 'MixedTab', 'Murmur'])
#plt.legend()
plt.savefig('pic.png')
#
#
# i = 0
# n = 0
# for fl in glob.glob("*.txt"):
# n += 1
# print(str(n) + " folders/colors.")
# color=iter(plt.cm.rainbow(np.linspace(0,1,n))) #set of n colors
# for fl in glob.glob("*.txt"):
# print(fl)
# i += 1
# data = []
#
# with open(fl) as f:
# for line in f:
# tmp = int(line)
# data.append((datasetSize-tmp)/datasetSize)
#
# # evaluate the histogram
# values, base = np.histogram(data, bins='auto')
# values = [x/len(data) for x in values]
# #evaluate the cumulative
# cumulative = np.cumsum(values)
# # plot the cumulative function
# c=next(color)
# plt.plot(base[:-1], cumulative, c=c, label=fl)
# # plt.xlim([-0.01, 0])
# # plt.ylim([0, 0.01])
# #plot the survival function
# #plt.plot(base[:-1], len(data)-cumulative, c='green')
#
# plt.legend()
# plt.show()
| [
"matplotlib.pyplot.axvline",
"numpy.full_like",
"matplotlib.pyplot.yticks",
"numpy.mean",
"os.listdir",
"matplotlib.pyplot.savefig"
] | [((747, 768), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(-error)'}), '(x=-error)\n', (758, 768), True, 'import matplotlib.pyplot as plt\n'), ((769, 789), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'error'}), '(x=error)\n', (780, 789), True, 'import matplotlib.pyplot as plt\n'), ((826, 942), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 1, 2, 3, 4, 5, 6]', "['Tab1Perm', 'Poly2', 'Poly3', 'MultShift', 'TwistTab', 'MixedTab', 'Murmur']"], {}), "([0, 1, 2, 3, 4, 5, 6], ['Tab1Perm', 'Poly2', 'Poly3',\n 'MultShift', 'TwistTab', 'MixedTab', 'Murmur'])\n", (836, 942), True, 'import matplotlib.pyplot as plt\n'), ((947, 969), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pic.png"""'], {}), "('pic.png')\n", (958, 969), True, 'import matplotlib.pyplot as plt\n'), ((183, 195), 'os.listdir', 'os.listdir', ([], {}), '()\n', (193, 195), False, 'import os, glob\n'), ((446, 465), 'numpy.mean', 'np.mean', (['data[file]'], {}), '(data[file])\n', (453, 465), True, 'import numpy as np\n'), ((662, 693), 'numpy.full_like', 'np.full_like', (['dat'], {'fill_value': 'i'}), '(dat, fill_value=i)\n', (674, 693), True, 'import numpy as np\n')] |
"""Class decorators and other helpers."""
import numpy as np
import inspect
from vectorbt.utils import checks
from vectorbt.utils.config import merge_dicts
def get_kwargs(func):
"""Get names and default values of keyword arguments from the signature of `func`."""
return {
k: v.default
for k, v in inspect.signature(func).parameters.items()
if v.default is not inspect.Parameter.empty
}
def add_nb_methods(nb_funcs, module_name=None):
"""Class decorator to wrap Numba functions methods of an accessor class.
`nb_funcs` should contain tuples of Numba functions, whether they are reducing, and optionally `index_or_name`.
Requires the instance to have attribute `wrapper` of type `vectorbt.base.array_wrapper.ArrayWrapper`."""
def wrapper(cls):
for info in nb_funcs:
checks.assert_type(info, tuple)
if len(info) == 3:
nb_func, is_reducing, name_or_index = info
elif len(info) == 2:
nb_func, is_reducing = info
name_or_index = None
else:
raise ValueError("Each tuple should have either length 2 or 3")
def nb_method(self,
*args,
_nb_func=nb_func,
_is_reducing=is_reducing,
_name_or_index=name_or_index,
wrap_kwargs=None,
**kwargs):
default_kwargs = get_kwargs(nb_func)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
if '_1d' in _nb_func.__name__:
# One-dimensional array as input
a = _nb_func(self.to_1d_array(), *args, **{**default_kwargs, **kwargs})
if _is_reducing:
return self.wrapper.wrap_reduced(a, name_or_index=_name_or_index, **wrap_kwargs)
return self.wrapper.wrap(a, **wrap_kwargs)
else:
# Two-dimensional array as input
a = _nb_func(self.to_2d_array(), *args, **{**default_kwargs, **kwargs})
if _is_reducing:
return self.wrapper.wrap_reduced(a, name_or_index=_name_or_index, **wrap_kwargs)
return self.wrapper.wrap(a, **wrap_kwargs)
# Replace the function's signature with the original one
sig = inspect.signature(nb_func)
self_arg = tuple(inspect.signature(nb_method).parameters.values())[0]
sig = sig.replace(parameters=(self_arg,) + tuple(sig.parameters.values())[1:])
nb_method.__signature__ = sig
if module_name is not None:
nb_method.__doc__ = f"See `{module_name}.{nb_func.__name__}`"
else:
nb_method.__doc__ = f"See `{nb_func.__name__}`"
setattr(cls, nb_func.__name__.replace('_1d', '').replace('_nb', ''), nb_method)
return cls
return wrapper
def add_binary_magic_methods(np_funcs, translate_func):
"""Class decorator to add binary magic methods using NumPy to the class."""
def wrapper(cls):
for fname, np_func in np_funcs:
def magic_func(self, other, np_func=np_func):
return translate_func(self, other, np_func)
setattr(cls, fname, magic_func)
return cls
return wrapper
def add_unary_magic_methods(np_funcs, translate_func):
"""Class decorator to add unary magic methods using NumPy to the class."""
def wrapper(cls):
for fname, np_func in np_funcs:
def magic_func(self, np_func=np_func):
return translate_func(self, np_func)
setattr(cls, fname, magic_func)
return cls
return wrapper
binary_magic_methods = [
# comparison ops
('__eq__', np.equal),
('__ne__', np.not_equal),
('__lt__', np.less),
('__gt__', np.greater),
('__le__', np.less_equal),
('__ge__', np.greater_equal),
# arithmetic ops
('__add__', np.add),
('__sub__', np.subtract),
('__mul__', np.multiply),
('__pow__', np.power),
('__mod__', np.mod),
('__floordiv__', np.floor_divide),
('__truediv__', np.true_divide),
('__radd__', lambda x, y: np.add(y, x)),
('__rsub__', lambda x, y: np.subtract(y, x)),
('__rmul__', lambda x, y: np.multiply(y, x)),
('__rpow__', lambda x, y: np.power(y, x)),
('__rmod__', lambda x, y: np.mod(y, x)),
('__rfloordiv__', lambda x, y: np.floor_divide(y, x)),
('__rtruediv__', lambda x, y: np.true_divide(y, x)),
# mask ops
('__and__', np.bitwise_and),
('__or__', np.bitwise_or),
('__xor__', np.bitwise_xor),
('__rand__', lambda x, y: np.bitwise_and(y, x)),
('__ror__', lambda x, y: np.bitwise_or(y, x)),
('__rxor__', lambda x, y: np.bitwise_xor(y, x))
]
unary_magic_methods = [
('__neg__', np.negative),
('__pos__', np.positive),
('__abs__', np.absolute),
('__invert__', np.invert)
]
| [
"numpy.multiply",
"numpy.subtract",
"numpy.true_divide",
"numpy.power",
"numpy.floor_divide",
"numpy.bitwise_xor",
"numpy.mod",
"vectorbt.utils.checks.assert_type",
"inspect.signature",
"numpy.bitwise_and",
"numpy.add",
"vectorbt.utils.config.merge_dicts",
"numpy.bitwise_or"
] | [((846, 877), 'vectorbt.utils.checks.assert_type', 'checks.assert_type', (['info', 'tuple'], {}), '(info, tuple)\n', (864, 877), False, 'from vectorbt.utils import checks\n'), ((2449, 2475), 'inspect.signature', 'inspect.signature', (['nb_func'], {}), '(nb_func)\n', (2466, 2475), False, 'import inspect\n'), ((4298, 4310), 'numpy.add', 'np.add', (['y', 'x'], {}), '(y, x)\n', (4304, 4310), True, 'import numpy as np\n'), ((4343, 4360), 'numpy.subtract', 'np.subtract', (['y', 'x'], {}), '(y, x)\n', (4354, 4360), True, 'import numpy as np\n'), ((4393, 4410), 'numpy.multiply', 'np.multiply', (['y', 'x'], {}), '(y, x)\n', (4404, 4410), True, 'import numpy as np\n'), ((4443, 4457), 'numpy.power', 'np.power', (['y', 'x'], {}), '(y, x)\n', (4451, 4457), True, 'import numpy as np\n'), ((4490, 4502), 'numpy.mod', 'np.mod', (['y', 'x'], {}), '(y, x)\n', (4496, 4502), True, 'import numpy as np\n'), ((4540, 4561), 'numpy.floor_divide', 'np.floor_divide', (['y', 'x'], {}), '(y, x)\n', (4555, 4561), True, 'import numpy as np\n'), ((4598, 4618), 'numpy.true_divide', 'np.true_divide', (['y', 'x'], {}), '(y, x)\n', (4612, 4618), True, 'import numpy as np\n'), ((4763, 4783), 'numpy.bitwise_and', 'np.bitwise_and', (['y', 'x'], {}), '(y, x)\n', (4777, 4783), True, 'import numpy as np\n'), ((4815, 4834), 'numpy.bitwise_or', 'np.bitwise_or', (['y', 'x'], {}), '(y, x)\n', (4828, 4834), True, 'import numpy as np\n'), ((4867, 4887), 'numpy.bitwise_xor', 'np.bitwise_xor', (['y', 'x'], {}), '(y, x)\n', (4881, 4887), True, 'import numpy as np\n'), ((1563, 1591), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (1574, 1591), False, 'from vectorbt.utils.config import merge_dicts\n'), ((326, 349), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (343, 349), False, 'import inspect\n'), ((2505, 2533), 'inspect.signature', 'inspect.signature', (['nb_method'], {}), '(nb_method)\n', (2522, 2533), False, 'import inspect\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from eggroll.core.constants import StoreTypes
from eggroll.core.utils import time_now
from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, \
get_cluster_context, get_standalone_context, get_default_options
def get_value(roll_pair):
return list(sorted(roll_pair.get_all(), key=lambda x: x[0]))
class TestRollPairBase(unittest.TestCase):
def setUp(self):
self.ctx = get_debug_test_context()
def tearDown(self) -> None:
print("stop test session")
#self.ctx.get_session().stop()
@staticmethod
def store_opts(**kwargs):
opts = {'total_partitions': 1}
opts.update(kwargs)
return opts
def assertUnOrderListEqual(self, list1, list2):
self.assertEqual(sorted(list1), sorted(list2))
@staticmethod
def str_generator(include_key=True, row_limit=10, key_suffix_size=0, value_suffix_size=0):
for i in range(row_limit):
if include_key:
yield str(i) + "s"*key_suffix_size, str(i) + "s"*value_suffix_size
else:
yield str(i) + "s"*value_suffix_size
def test_parallelize_include_key(self):
rp = self.ctx.parallelize(self.str_generator(True),
options=self.store_opts(include_key=True))
self.assertUnOrderListEqual(self.str_generator(True), rp.get_all())
def test_parallelize(self):
rp = self.ctx.parallelize(self.str_generator(False), options=self.store_opts(include_key=False))
print(rp)
print(list(rp.get_all()))
self.assertUnOrderListEqual(self.str_generator(False), (v for k,v in rp.get_all()))
def test_serdes(self):
rp = self.ctx.load("ns12020","n_serdes", self.store_opts(serdes="EMPTY"))
rp.put_all((b"a",b"b") for k in range(10))
print(list(rp.get_all()))
print(rp.count())
def test_put(self):
rp = self.ctx.load('ns12020', f'test_put_{time_now()}')
object = b'1' * 10
rp.put(b'k1', object)
rp.destroy()
def test_get(self):
rp = self.ctx.parallelize(self.str_generator())
for i in range(10):
self.assertEqual(str(i), rp.get(str(i)))
def test_put_get(self):
rp = self.ctx.load('ns12020', f'test_put_get_{time_now()}')
length = (2 << 10) - 10
k = b'k'
v = b'1' * length
rp.put(k, v)
v1 = rp.get(k)
print(f'length: {len(v1)}')
self.assertEqual(len(v1), length)
self.assertEquals(v, v1)
def test_count(self):
rp = self.ctx.parallelize(self.str_generator(row_limit=11))
self.assertEqual(11, rp.count())
def test_put_all(self):
rp = self.ctx.load("ns12020","n1")
data = [("k1","v1"),("k2","v2"),("k3","v3"),("k4","v4"),("k5","v5"),("k6","v6")]
rp.put_all(data)
self.assertUnOrderListEqual(data, rp.get_all())
def test_cleanup(self):
rp = self.ctx.load("ns168","n1")
data = [("k1","v1"),("k2","v2"),("k3","v3"),("k4","v4"),("k5","v5"),("k6","v6")]
rp.put_all(data)
rp1 = self.ctx.load("ns168","n111")
rp1.put_all(data)
self.ctx.cleanup(namespace='ns168', name='n11*')
def test_cleanup_namespace(self):
namespace = 'ns180'
rp = self.ctx.load(namespace,"n1")
data = [("k1","v1"),("k2","v2"),("k3","v3"),("k4","v4"),("k5","v5"),("k6","v6")]
rp.put_all(data)
rp1 = self.ctx.load(namespace,"n111")
rp1.put_all(data)
rp2 = self.ctx.parallelize(data, options={'namespace': namespace})
self.ctx.cleanup(namespace=namespace, name='*')
def test_cleanup_namespace_specified_store_type(self):
namespace = 'ns181'
rp = self.ctx.load(namespace,"n1")
data = [("k1","v1"),("k2","v2"),("k3","v3"),("k4","v4"),("k5","v5"),("k6","v6")]
rp.put_all(data)
rp1 = self.ctx.parallelize(data, options={'namespace': namespace})
self.ctx.cleanup(namespace=namespace,
name='*',
options={'store_type': StoreTypes.ROLLPAIR_IN_MEMORY})
def test_map(self):
rp = self.ctx.parallelize(self.str_generator())
rp2 = rp.map(lambda k,v: (k + "_1", v))
self.assertUnOrderListEqual(((k + "_1", v) for k, v in self.str_generator()), rp2.get_all())
def test_reduce(self):
options = self.store_opts()
#options['total_partitions'] = 10
rp = self.ctx.parallelize([(i, i) for i in range(1, 7)], options)
#data = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]
#rp.put_all(data)
print(list(rp.get_all()))
print(rp.count())
from operator import add
result = rp.reduce(add)
print(f'reduce result: {result}')
self.assertEqual(result, 21)
def test_reduce_numpy(self):
import numpy as np
rp = self.ctx.load('ns12020', 'testNumpyReduce', self.store_opts())
rp.put('0', np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
rp.put('1', np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]))
rp.put('2', np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]]))
rp.put('3', np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3]]))
rp.put('4', np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4]]))
rp.put('5', np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5]]))
#rp.put('6', None)
result = rp.reduce(lambda x, y: x + y)
print(result)
self.assertEqual(result[0][-1], 15)
def test_aggregate(self):
from operator import mul, add
options = self.store_opts()
#options['total_partitions'] = 10
data1 = self.ctx.parallelize([(i, i) for i in range(1, 7)], options)
print(data1.get_partitions())
h2 = data1.aggregate(zero_value=1, seq_op=mul, comb_op=add)
print("aggregate result: ", h2)
#self.assertEqual(h2, 25)
self.assertEqual(h2, 720)
def test_join_self(self):
options = get_default_options()
left_rp = self.ctx.load("ns12020", "testJoinLeft2020", options=options).put_all([('a', 1), ('b', 4)], options={"include_key": True})
print(list(left_rp.join(left_rp, lambda v1, v2: v1 + v2).get_all()))
self.assertEqual(get_value(left_rp.join(left_rp, lambda v1, v2: v1 + v2)), [('a', 2), ('b', 8)])
def test_delete(self):
options = get_default_options()
options['include_key'] = True
data = [("k1", "v1"), ("k2", "v2"), ("k3", "v3"), ("k4", "v4")]
table = self.ctx.load('ns1', 'test_delete_one', options=options).put_all(data, options=options)
print("before delete:{}".format(list(table.get_all())))
table.delete("k1")
print("after delete:{}".format(list(table.get_all())))
self.assertEqual(get_value(table), ([("k2", "v2"), ("k3", "v3"), ("k4", "v4")]))
def test_destroy(self):
options = get_default_options()
options['include_key'] = True
data = [("k1", "v1"), ("k2", "v2"), ("k3", "v3"), ("k4", "v4")]
table = self.ctx.load('ns12020020618', 'test_destroy', options=options)#.put_all(data, options=options)
print("before destroy:{}".format(list(table.get_all())))
table.destroy()
# TODO:1: table which has been destroyed cannot get_all, should raise exception
#print("after destroy:{}".format(list(table.get_all())))
self.assertEqual(table.count(), 0)
def test_destroy_simple(self):
options = get_default_options()
options['include_key'] = True
table = self.ctx.load('ns1', 'test_destroy', options=options)
table.destroy()
def test_take(self):
options = get_default_options()
options['keys_only'] = True
options['include_key'] = False
table = self.ctx.load('ns1', 'test_take', options=options).put_all(range(10), options=options)
print(table.take(n=3, options=options))
self.assertEqual(table.take(n=3, options=options), [0, 1, 2])
options_kv = get_default_options()
options_kv['keys_only'] = False
options_kv['include_key'] = False
table = self.ctx.load('ns1', 'test_take_kv', options=options_kv).put_all(range(10), options=options_kv)
print(table.take(n=3, options=options_kv))
self.assertEqual(table.take(n=3, options=options_kv), [(0, 0), (1, 1), (2, 2)])
def test_first(self):
options = get_default_options()
options['keys_only'] = True
options['include_key'] = False
table = self.ctx.load('ns1', 'test_take', options=options).put_all(range(10), options=options)
print(table.first(options=options))
self.assertEqual(table.first(options=options), 0)
options_kv = get_default_options()
options_kv['include_key'] = False
options_kv['keys_only'] = False
table = self.ctx.load('ns12020', 'test_take_kv', options=options_kv).put_all(range(10), options=options_kv)
print(table.first(options=options_kv))
self.assertEqual(table.first(options=options_kv), (0, 0))
def test_map_values(self):
options = get_default_options()
options['include_key'] = False
rp = self.ctx.load("ns12020", "test_map_values", options=options).put_all(range(10), options=options)
res = rp.map_values(lambda v: str(v) + 'map_values')
print(list(res.get_all()))
self.assertEqual(get_value(res), [(0, '0map_values'), (1, '1map_values'), (2, '2map_values'), (3, '3map_values'),
(4, '4map_values'), (5, '5map_values'), (6, '6map_values'), (7, '7map_values'),
(8, '8map_values'), (9, '9map_values')])
def test_map_partitions(self):
options = get_default_options()
options['total_partitions'] = 12
data = [(str(i), i) for i in range(10)]
rp = self.ctx.load("ns1", "test_map_partitions", options=options).put_all(data, options={"include_key": True})
def func(iter):
ret = []
for k, v in iter:
ret.append((f"{k}_{v}_0", v ** 2))
ret.append((f"{k}_{v}_1", v ** 3))
return ret
table = rp.map_partitions(func)
self.assertEqual(table.get("6_6_0"), 36)
self.assertEqual(table.get("0_0_1"), 0)
self.assertEqual(table.get("1_1_0"), 1)
self.assertEqual(sorted(table.get_all(), key=lambda x: x[0]), [('0_0_0', 0), ('0_0_1', 0), ('1_1_0', 1), ('1_1_1', 1), ('2_2_0', 4), ('2_2_1', 8),
('3_3_0', 9), ('3_3_1', 27), ('4_4_0', 16), ('4_4_1', 64), ('5_5_0', 25),
('5_5_1', 125), ('6_6_0', 36), ('6_6_1', 216), ('7_7_0', 49), ('7_7_1', 343),
('8_8_0', 64), ('8_8_1', 512), ('9_9_0', 81), ('9_9_1', 729)])
def test_collapse_partitions(self):
options = get_default_options()
options['include_key'] = False
rp = self.ctx.load("ns1", "test_collapse_partitions", options=options).put_all(range(5), options=options)
def f(iterator):
sum = []
for k, v in iterator:
sum.append((k, v))
return sum
print(list(rp.collapse_partitions(f).get_all()))
self.assertEqual(get_value(rp.collapse_partitions(f)), [(4, [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)])])
def test_filter(self):
options = get_default_options()
options['include_key'] = False
rp = self.ctx.load("ns1", "test_filter", options=options).put_all(range(5), options=options)
print(list(rp.filter(lambda k, v: v % 2 != 0).get_all()))
self.assertEqual(get_value(rp.filter(lambda k, v: v % 2 != 0)), [(1, 1), (3, 3)])
def test_flatMap(self):
options = get_default_options()
options['include_key'] = False
rp = self.ctx.load("ns1", "test_flat_map", options=options).put_all(range(5), options=options)
import random
def foo(k, v):
result = []
r = random.randint(10000, 99999)
for i in range(0, k):
result.append((k + r + i, v + r + i))
return result
print(list(rp.flat_map(foo).get_all()))
self.assertEqual(rp.flat_map(foo).count(), 10)
def test_glom(self):
options = get_default_options()
options['include_key'] = False
rp = self.ctx.load("ns1", "test_glom", options=options).put_all(range(5), options=options)
print(list(rp.glom().get_all()))
self.assertEqual(get_value(rp.glom()), [(4, [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)])])
def test_join(self):
options = get_default_options()
left_rp = self.ctx.load("ns1", "testJoinLeft", options=options).put_all([('a', 1), ('b', 4), ('d', 6), ('e', 0)], options={"include_key": True})
right_rp = self.ctx.load("ns1", "testJoinRight", options=options).put_all([('a', 2), ('c', 4), ('d', 1), ('f', 0), ('g', 1)], options={"include_key": True})
print(list(left_rp.join(right_rp, lambda v1, v2: v1 + v2).get_all()))
self.assertEqual(get_value(left_rp.join(right_rp, lambda v1, v2: v1 + v2)), [('a', 3), ('d', 7)])
self.assertEqual(get_value(right_rp.join(left_rp, lambda v1, v2: v1 + v2)), [('a', 3), ('d', 7)])
def test_join_diff_partitions(self):
options_left = get_default_options()
options_right = get_default_options()
options_left['total_partitions'] = 10
options_right['total_partitions'] = 5
left_rp = self.ctx.load("ns1", "testJoinLeft_10p_6", options=options_left).put_all([('a', 1), ('b', 4), ('d', 6), ('e', 0), ('f', 3), ('g', 12), ('h', 13), ('i', 14), ('j', 15), ('k', 16), ('l', 17)],
options={"include_key": True})
right_rp = self.ctx.load("ns1", "testJoinRight_5p_6", options=options_right).put_all([('a', 2), ('c', 4), ('d', 1), ('f', 0), ('g', 1)],
options={"include_key": True})
print(f'left:{get_value(left_rp)}, right:{get_value(right_rp)}')
print('111', get_value(left_rp.join(right_rp, lambda v1, v2: v1 + v2)))
print('222', get_value(right_rp.join(left_rp, lambda v1, v2: v1 + v2)))
self.assertEqual(get_value(left_rp.join(right_rp, lambda v1, v2: v1 + v2)), [('a', 3), ('d', 7), ('f', 3), ('g', 13)])
self.assertEqual(get_value(right_rp.join(left_rp, lambda v1, v2: v1 + v2)), [('a', 3), ('d', 7), ('f', 3), ('g', 13)])
right_rp = self.ctx.load("ns1", "testJoinRight_10p_7", options=options_right).put_all([('a', 1), ('b', 4), ('d', 6), ('e', 0), ('f', 3), ('g', 12), ('h', 13), ('i', 14), ('j', 15), ('k', 16), ('l', 17)],
options={"include_key": True})
left_rp = self.ctx.load("ns1", "testJoinLeft_5p_7", options=options_left).put_all([('a', 2), ('c', 4), ('d', 1), ('f', 0), ('g', 1)],
options={"include_key": True})
print(f'left:{get_value(left_rp)}, right:{get_value(right_rp)}')
print('333', get_value(left_rp.join(right_rp, lambda v1, v2: v1 + v2)))
self.assertEqual(get_value(left_rp.join(right_rp, lambda v1, v2: v1 + v2)), [('a', 3), ('d', 7), ('f', 3), ('g', 13)])
def test_sample(self):
options = get_default_options()
options['include_key'] = False
rp = self.ctx.load("ns1", "testSample", options=options).put_all(range(100), options=options)
self.assertEqual(6 <= rp.sample(0.1, 81).count() <= 14, True)
def test_subtract_by_key(self):
options = get_default_options()
options['total_partitions'] = 1
options['include_key'] = False
left_rp = self.ctx.load("namespace20201", "testSubtractByKeyLeft202013", options=options).put_all(range(10), options=options)
right_rp = self.ctx.load("namespace2020131", "testSubtractByKeyRight202013", options=options).put_all(range(5), options=options)
self.assertEqual(list(left_rp.subtract_by_key(right_rp).get_all()), [(5, 5), (6, 6), (7, 7), (8, 8), (9, 9)])
print(list(left_rp.subtract_by_key(right_rp).get_all()))
def test_subtract_diff_partitions(self):
options_left = get_default_options()
options_right = get_default_options()
options_left['total_partitions'] = 10
options_right['total_partitions'] = 5
left_rp = self.ctx.load("ns1", "testSubtractLeft_10p_6", options=options_left).put_all([('a', 1), ('b', 4), ('d', 6), ('e', 0), ('f', 3), ('g', 12), ('h', 13), ('i', 14), ('j', 15), ('k', 16), ('l', 17)],
options={"include_key": True})
right_rp = self.ctx.load("ns1", "testSubtractRight_5p_6", options=options_right).put_all([('a', 2), ('c', 4), ('d', 1), ('f', 0), ('g', 1)],
options={"include_key": True})
print(f'left:{get_value(left_rp)}, right:{get_value(right_rp)}')
print('111', get_value(left_rp.subtract_by_key(right_rp)))
print('222', get_value(right_rp.subtract_by_key(left_rp)))
self.assertEqual(get_value(left_rp.subtract_by_key(right_rp)), [('b', 4), ('e', 0), ('h', 13), ('i', 14), ('j', 15), ('k', 16), ('l', 17)])
self.assertEqual(get_value(right_rp.subtract_by_key(left_rp)), [('c', 4)])
right_rp = self.ctx.load("ns1", "testSubtractRight_10p_7", options=options_right).put_all([('a', 1), ('b', 4), ('d', 6), ('e', 0), ('f', 3), ('g', 12), ('h', 13), ('i', 14), ('j', 15), ('k', 16), ('l', 17)],
options={"include_key": True})
left_rp = self.ctx.load("ns1", "testSubtractLeft_5p_7", options=options_left).put_all([('a', 2), ('c', 4), ('d', 1), ('f', 0), ('g', 1)],
options={"include_key": True})
print(f'left:{get_value(left_rp)}, right:{get_value(right_rp)}')
print('333', get_value(left_rp.subtract_by_key(right_rp)))
self.assertEqual(get_value(left_rp.subtract_by_key(right_rp)), [('c', 4)])
def test_save_as_more_partition(self):
rp = self.ctx.parallelize(range(10), options={'include_key': False})
import time
sa = rp.save_as(f'test_name_{time.monotonic()}', 'test_ns', 2)
self.assertEqual(sa.get_partitions(), 2)
self.assertUnOrderListEqual(list(rp.get_all()), list(sa.get_all()))
def test_save_as_less_partition(self):
rp = self.ctx.parallelize(range(10), options={'include_key': False, 'total_partitions': 10})
import time
sa = rp.save_as(f'test_name_{time.monotonic()}', 'test_ns', 2)
self.assertEqual(sa.get_partitions(), 2)
self.assertUnOrderListEqual(list(rp.get_all()), list(sa.get_all()))
def test_save_as_equal_partition(self):
rp = self.ctx.parallelize(range(10), options={'include_key': False, 'total_partitions': 2})
import time
sa = rp.save_as(f'test_name_{time.monotonic()}', 'test_ns', 2)
self.assertEqual(sa.get_partitions(), 2)
self.assertUnOrderListEqual(list(rp.get_all()), list(sa.get_all()))
@staticmethod
def gen_data(self):
ret = []
for i in range(1, 2000000):
ret.append(i)
return ret
@staticmethod
def gen_kv(self):
for i in range(1, 2000000):
yield [i, i]
def test_union(self):
options = get_default_options()
options['include_key'] = False
left_rp = self.ctx.load("ns1202010", "testUnionLeft2020", options=options).put_all([1, 2, 3], options=options)
print(left_rp)
options['include_key'] = True
options['total_partitions'] = 1
right_rp = self.ctx.load("ns12020101", "testUnionRight2020", options=options).put_all([(1, 1), (2, 2), (3, 3)])
print(right_rp)
print(list(left_rp.union(right_rp, lambda v1, v2: v1 + v2).get_all()))
options = get_default_options()
options['total_partitions'] = 1
options['include_key'] = False
left_rp = self.ctx.load("namespace20200110", "testUnionLeft2020", options=options).put_all([1, 2, 3], options=options)
print("left:", left_rp)
options['include_key'] = True
right_rp = self.ctx.load("namespace20200110", "testUnionRight2020", options=options).put_all([(1, 1), (2, 2), (3, 3)], options=options)
print("right:", right_rp)
print("left:", list(left_rp.get_all()))
print("right:", list(right_rp.get_all()))
print(list(left_rp.union(right_rp, lambda v1, v2: v1 + v2).get_all()))
def test_union_diff_partitions(self):
options_left = get_default_options()
options_right = get_default_options()
options_left['total_partitions'] = 10
options_right['total_partitions'] = 5
left_rp = self.ctx.load("ns1", "testUniontLeft_10p_6", options=options_left).put_all([('a', 1), ('b', 4), ('d', 6), ('e', 0), ('f', 3), ('g', 12), ('h', 13), ('i', 14), ('j', 15), ('k', 16), ('l', 17)],
options={"include_key": True})
right_rp = self.ctx.load("ns1", "testUniontRight_5p_6", options=options_right).put_all([('a', 2), ('c', 4), ('d', 1), ('f', 0), ('g', 1)],
options={"include_key": True})
print(f'left:{get_value(left_rp)}, right:{get_value(right_rp)}')
print('111', get_value(left_rp.union(right_rp, lambda v1, v2: v1 + v2)))
print('222', get_value(right_rp.union(left_rp, lambda v1, v2: v1 + v2)))
self.assertEqual(get_value(left_rp.union(right_rp, lambda v1, v2: v1 + v2)),
[('a', 3), ('b', 4), ('c', 4), ('d', 7), ('e', 0), ('f', 3), ('g', 13), ('h', 13), ('i', 14), ('j', 15), ('k', 16), ('l', 17)])
self.assertEqual(get_value(right_rp.union(left_rp, lambda v1, v2: v1 + v2)),
[('a', 3), ('b', 4), ('c', 4), ('d', 7), ('e', 0), ('f', 3), ('g', 13), ('h', 13), ('i', 14), ('j', 15), ('k', 16), ('l', 17)])
right_rp = self.ctx.load("ns1", "testUniontRight_10p_7", options=options_right).put_all([('a', 1), ('b', 4), ('d', 6), ('e', 0), ('f', 3), ('g', 12), ('h', 13), ('i', 14), ('j', 15), ('k', 16), ('l', 17)],
options={"include_key": True})
left_rp = self.ctx.load("ns1", "testUniontLeft_5p_7", options=options_left).put_all([('a', 2), ('c', 4), ('d', 1), ('f', 0), ('g', 1)],
options={"include_key": True})
print(f'left:{get_value(left_rp)}, right:{get_value(right_rp)}')
print('333', get_value(left_rp.union(right_rp, lambda v1, v2: v1 + v2)))
self.assertEqual(get_value(left_rp.union(right_rp, lambda v1, v2: v1 + v2)),
[('a', 3), ('b', 4), ('c', 4), ('d', 7), ('e', 0), ('f', 3), ('g', 13), ('h', 13), ('i', 14), ('j', 15), ('k', 16), ('l', 17)])
class TestRollPairMultiPartition(TestRollPairBase):
def setUp(self):
self.ctx = get_debug_test_context()
@staticmethod
def store_opts(**kwargs):
opts = {'total_partitions': 3}
opts.update(kwargs)
return opts
@staticmethod
def str_generator(include_key=True, row_limit=100, key_suffix_size=0, value_suffix_size=0):
return TestRollPairBase.str_generator(include_key, row_limit, key_suffix_size, value_suffix_size)
def test_put_all(self):
st_opts = self.store_opts(include_key=True)
rp = self.ctx.load("test_roll_pair", "TestRollPairMultiPartition", options=self.store_opts())
row_limit = 3
rp.put_all(self.str_generator(row_limit=row_limit))
self.assertUnOrderListEqual(self.str_generator(include_key=True, row_limit=row_limit), rp.get_all())
self.assertEqual(st_opts["total_partitions"], rp.get_partitions())
def test_count(self):
st_opts = self.store_opts(include_key=True)
rp = self.ctx.load("test_roll_pair", "TestRollPairMultiPartition", options=self.store_opts())
count = rp.count()
print(count)
self.assertEqual(count, 10000)
def test_reduce_numpy(self):
super().test_reduce_numpy()
def test_parallelize_include_key(self):
st_opts = self.store_opts(include_key=True)
rp = self.ctx.parallelize(self.str_generator(True),st_opts)
self.assertUnOrderListEqual(self.str_generator(True), rp.get_all())
self.assertEqual(st_opts["total_partitions"], rp.get_partitions())
def test_count(self):
super().test_count()
def test_reduce(self):
super().test_reduce()
def test_aggregate(self):
from operator import mul, add
data1 = self.ctx.parallelize([(i, i) for i in range(1, 7)], self.store_opts())
print(data1.get_partitions())
h2 = data1.aggregate(zero_value=1, seq_op=mul, comb_op=add)
print(f"aggregate result: {h2}")
self.assertEqual(h2, 32)
class TestRollPairStandalone(TestRollPairBase):
ctx = None
@classmethod
def setUpClass(cls) -> None:
cls.ctx = get_standalone_context()
def setUp(self):
pass
def tearDown(self) -> None:
pass
@classmethod
def tearDownClass(cls) -> None:
cls.ctx.get_session().stop()
class TestRollPairClusterEverySession(TestRollPairBase):
def setUp(self):
self.ctx = get_cluster_context()
@staticmethod
def store_opts(**kwargs):
opts = {'total_partitions': 10}
opts.update(kwargs)
return opts
def test_get_and_stop_and_kill_session(self):
session = self.ctx.get_session()
id = session.get_session_id()
session.stop()
from eggroll.core.session import ErSession
dead_session = ErSession(id)
dead_session.stop()
dead_session = ErSession(id)
dead_session.kill()
def tearDown(self) -> None:
self.ctx.get_session().stop()
class TestRollPairCluster(TestRollPairBase):
ctx = None
@classmethod
def setUpClass(cls) -> None:
opts = {"eggroll.session.processors.per.node": "10"}
#opts = {}
cls.ctx = get_cluster_context(options=opts)
def setUp(self):
pass
@staticmethod
def store_opts(**kwargs):
opts = {'total_partitions': 10}
opts.update(kwargs)
return opts
def test_aggregate(self):
from operator import mul, add
data1 = self.ctx.parallelize([(i, i) for i in range(1, 7)], self.store_opts())
print(data1.get_partitions())
h2 = data1.aggregate(zero_value=1, seq_op=mul, comb_op=add)
print("aggregate result: ", h2)
# note that if there is no data in a partition then the zero value will be sent, thus 21 + 4 * 1 = 25
self.assertEqual(h2, 25)
def test_map_values(self):
super().test_map_values()
def test_reduce(self):
rp = self.ctx.parallelize([(i, i) for i in range(1, 7)], self.store_opts())
#data = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]
#rp.put_all(data)
print("all: ", list(rp.get_all()))
print("count: ", rp.count())
from operator import add
result = rp.reduce(add)
print(f'reduce result: {result}')
self.assertEqual(result, 21)
def test_save_as_equal_partition(self):
super().test_save_as_equal_partition()
def test_save_as_less_partition(self):
super().test_save_as_less_partition()
def test_save_as_more_partition(self):
super().test_save_as_more_partition()
def test_join_diff_partitions(self):
super().test_join_diff_partitions()
def test_empty(self):
pass
def tearDown(self) -> None:
pass
@classmethod
def tearDownClass(cls) -> None:
cls.ctx.get_session().stop()
| [
"eggroll.roll_pair.test.roll_pair_test_assets.get_cluster_context",
"random.randint",
"eggroll.core.session.ErSession",
"eggroll.roll_pair.test.roll_pair_test_assets.get_standalone_context",
"eggroll.roll_pair.test.roll_pair_test_assets.get_default_options",
"numpy.array",
"eggroll.core.utils.time_now",... | [((1081, 1105), 'eggroll.roll_pair.test.roll_pair_test_assets.get_debug_test_context', 'get_debug_test_context', ([], {}), '()\n', (1103, 1105), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((6738, 6759), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (6757, 6759), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((7129, 7150), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (7148, 7150), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((7655, 7676), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (7674, 7676), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((8238, 8259), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (8257, 8259), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((8436, 8457), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (8455, 8457), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((8776, 8797), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (8795, 8797), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((9176, 9197), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (9195, 9197), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((9500, 9521), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (9519, 9521), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((9883, 9904), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (9902, 9904), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((10531, 10552), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (10550, 10552), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((11707, 11728), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (11726, 11728), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((12236, 12257), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (12255, 12257), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((12601, 12622), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (12620, 12622), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((13141, 13162), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (13160, 13162), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((13482, 13503), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (13501, 13503), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((14177, 14198), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (14196, 14198), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((14223, 14244), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (14242, 14244), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((16336, 16357), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (16355, 16357), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((16624, 16645), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (16643, 16645), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((17248, 17269), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (17267, 17269), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((17294, 17315), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (17313, 17315), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((20633, 20654), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (20652, 20654), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((21156, 21177), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (21175, 21177), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((21876, 21897), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (21895, 21897), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((21922, 21943), 'eggroll.roll_pair.test.roll_pair_test_assets.get_default_options', 'get_default_options', ([], {}), '()\n', (21941, 21943), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((24458, 24482), 'eggroll.roll_pair.test.roll_pair_test_assets.get_debug_test_context', 'get_debug_test_context', ([], {}), '()\n', (24480, 24482), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((26529, 26553), 'eggroll.roll_pair.test.roll_pair_test_assets.get_standalone_context', 'get_standalone_context', ([], {}), '()\n', (26551, 26553), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((26825, 26846), 'eggroll.roll_pair.test.roll_pair_test_assets.get_cluster_context', 'get_cluster_context', ([], {}), '()\n', (26844, 26846), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((27213, 27226), 'eggroll.core.session.ErSession', 'ErSession', (['id'], {}), '(id)\n', (27222, 27226), False, 'from eggroll.core.session import ErSession\n'), ((27279, 27292), 'eggroll.core.session.ErSession', 'ErSession', (['id'], {}), '(id)\n', (27288, 27292), False, 'from eggroll.core.session import ErSession\n'), ((27603, 27636), 'eggroll.roll_pair.test.roll_pair_test_assets.get_cluster_context', 'get_cluster_context', ([], {'options': 'opts'}), '(options=opts)\n', (27622, 27636), False, 'from eggroll.roll_pair.test.roll_pair_test_assets import get_debug_test_context, get_cluster_context, get_standalone_context, get_default_options\n'), ((5667, 5712), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])\n', (5675, 5712), True, 'import numpy as np\n'), ((5744, 5789), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])\n', (5752, 5789), True, 'import numpy as np\n'), ((5821, 5866), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]])\n', (5829, 5866), True, 'import numpy as np\n'), ((5898, 5943), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3]])\n', (5906, 5943), True, 'import numpy as np\n'), ((5975, 6020), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4]])\n', (5983, 6020), True, 'import numpy as np\n'), ((6052, 6097), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5]])\n', (6060, 6097), True, 'import numpy as np\n'), ((12851, 12879), 'random.randint', 'random.randint', (['(10000)', '(99999)'], {}), '(10000, 99999)\n', (12865, 12879), False, 'import random\n'), ((2623, 2633), 'eggroll.core.utils.time_now', 'time_now', ([], {}), '()\n', (2631, 2633), False, 'from eggroll.core.utils import time_now\n'), ((2960, 2970), 'eggroll.core.utils.time_now', 'time_now', ([], {}), '()\n', (2968, 2970), False, 'from eggroll.core.utils import time_now\n'), ((19464, 19480), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (19478, 19480), False, 'import time\n'), ((19825, 19841), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (19839, 19841), False, 'import time\n'), ((20186, 20202), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (20200, 20202), False, 'import time\n')] |
from snsql import *
import pandas as pd
import numpy as np
privacy = Privacy(epsilon=3.0, delta=0.1)
class TestPreAggregatedSuccess:
# Test input checks for pre_aggregated
def test_list_success(self, test_databases):
# pass in properly formatted list
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
res = priv.execute(query, pre_aggregated=pre_aggregated)
assert(str(res[1][0]) == '1') # it's sorted
def test_pandas_success(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated = pd.DataFrame(data=pre_aggregated[1:], index=None)
pre_aggregated.columns = colnames
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute(query, pre_aggregated=pre_aggregated)
assert(str(res[1][0]) == '1') # it's sorted
def test_pandas_success_df(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated = pd.DataFrame(data=pre_aggregated[1:], index=None)
pre_aggregated.columns = colnames
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute_df(query, pre_aggregated=pre_aggregated)
assert(str(res['sex'][0]) == '1') # it's sorted
def test_np_ndarray_success(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated = pd.DataFrame(data=pre_aggregated[1:], index=None)
pre_aggregated.columns = colnames
pre_aggregated = pre_aggregated.to_numpy()
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute(query, pre_aggregated=pre_aggregated)
assert(str(res[1][0]) == '1') # it's sorted
def test_np_array_success(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
pre_aggregated = np.array(pre_aggregated[1:])
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute(query, pre_aggregated=pre_aggregated)
assert(str(res[1][0]) == '1') # it's sorted
def test_spark_df_success(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="spark"
)
if priv:
pre_aggregated = priv.reader.api.createDataFrame(pre_aggregated[1:], pre_aggregated[0])
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute(query, pre_aggregated=pre_aggregated)
res = test_databases.to_tuples(res)
assert(str(res[1][0]) == '1') # it's sorted
def test_spark_df_success_df(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="spark"
)
if priv:
pre_aggregated = priv.reader.api.createDataFrame(pre_aggregated[1:], pre_aggregated[0])
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute_df(query, pre_aggregated=pre_aggregated)
assert(str(res['sex'][0]) == '1') # it's sorted
def test_spark_rdd_success(self, test_databases):
# pass in properly formatted dataframe
pre_aggregated = [
('keycount', 'sex', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="spark"
)
if priv:
pre_aggregated = priv.reader.api.createDataFrame(pre_aggregated[1:], pre_aggregated[0])
pre_aggregated = pre_aggregated.rdd
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
res = priv.execute(query, pre_aggregated=pre_aggregated)
res = test_databases.to_tuples(res)
assert(str(res[1][0]) == '1') # it's sorted
class TestPreAggregatedColumnFail:
# Test input checks for pre_aggregated
def test_list_col_fail(self, test_databases):
# pass in wrongly formatted list
pre_aggregated = [
('count_star', 'sex', 'count_age'),
(1000, 2, 2000),
(1000, 1, 2000)
]
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
try:
_ = priv.execute(query, pre_aggregated=pre_aggregated)
except ValueError:
return
raise AssertionError("execute should have raised an exception")
def test_pandas_col_fail(self, test_databases):
# pass in wrongly formatted dataframe
pre_aggregated = [
('count_star', 'sex', 'count_age'),
(1000, 2, 2000),
(1000, 1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated = pd.DataFrame(data=pre_aggregated[1:], index=None)
pre_aggregated.columns = colnames
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
if priv:
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
try:
_ = priv.execute(query, pre_aggregated=pre_aggregated)
except ValueError:
return
raise AssertionError("execute should have raised an exception")
def test_pandas_col_fail_2(self, test_databases):
# pass in wrongly formatted dataframe
pre_aggregated = [
('sex', 'count_star'),
(2, 2000),
(1, 2000)
]
colnames = pre_aggregated[0]
pre_aggregated = pd.DataFrame(data=pre_aggregated[1:], index=None)
pre_aggregated.columns = colnames
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="pandas"
)
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
if priv:
try:
_ = priv.execute(query, pre_aggregated=pre_aggregated)
except ValueError:
return
raise AssertionError("execute should have raised an exception")
def test_spark_df_col_fail(self, test_databases):
# pass in wrongly formatted dataframe
pre_aggregated = [
('keycount', 'age', 'count_star'),
(1000, 2, 2000),
(1000, 1, 2000)
]
priv = test_databases.get_private_reader(
privacy=privacy,
database="PUMS_pid",
engine="spark"
)
if priv:
pre_aggregated = priv.reader.api.createDataFrame(pre_aggregated[1:], pre_aggregated[0])
query = 'SELECT sex, COUNT(*) AS n, COUNT(*) AS foo FROM PUMS.PUMS GROUP BY sex ORDER BY sex'
try:
_ = priv.execute(query, pre_aggregated=pre_aggregated)
except ValueError:
return
raise AssertionError("execute should have raised an exception")
| [
"pandas.DataFrame",
"numpy.array"
] | [((1107, 1156), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'pre_aggregated[1:]', 'index': 'None'}), '(data=pre_aggregated[1:], index=None)\n', (1119, 1156), True, 'import pandas as pd\n'), ((1902, 1951), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'pre_aggregated[1:]', 'index': 'None'}), '(data=pre_aggregated[1:], index=None)\n', (1914, 1951), True, 'import pandas as pd\n'), ((2705, 2754), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'pre_aggregated[1:]', 'index': 'None'}), '(data=pre_aggregated[1:], index=None)\n', (2717, 2754), True, 'import pandas as pd\n'), ((3513, 3541), 'numpy.array', 'np.array', (['pre_aggregated[1:]'], {}), '(pre_aggregated[1:])\n', (3521, 3541), True, 'import numpy as np\n'), ((7415, 7464), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'pre_aggregated[1:]', 'index': 'None'}), '(data=pre_aggregated[1:], index=None)\n', (7427, 7464), True, 'import pandas as pd\n'), ((8278, 8327), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'pre_aggregated[1:]', 'index': 'None'}), '(data=pre_aggregated[1:], index=None)\n', (8290, 8327), True, 'import pandas as pd\n')] |
import message_queue as mq
# -*- coding: utf-8 -*-
print("Loading HaasoscopeLib.py")
from HaasoscopeOversampleLib import HaasoscopeOversample as hos
from serial import Serial, SerialException
from struct import unpack
import numpy as np
import time, json, os
import matplotlib
from const import *
import threading
mearm=False
mewin=False
try:
print((os.uname()))
if os.uname()[4].startswith("arm") or os.uname()[4].startswith("aarch"):
print("On a raspberry pi?")
mearm=True
except AttributeError:
mewin=True
print("Not on Linux?")
dofast=False #do the fast way of redrawing, just the specific things that could have likely changed, only works well on Windows?
if mewin:
dofast=True
matplotlib.use('Qt4Agg')
#to print possible backends
#import matplotlib.rcsetup as rcsetup
#print rcsetup.interactive_bk
import matplotlib.pyplot as plt
print(("matplotlib backend is",plt.get_backend()))
#disable some default key mappings
#keymap.fullscreen : f, ctrl+f # toggling
#keymap.home : h, r, home # home or reset mnemonic
#keymap.back : left, c, backspace # forward / backward keys to enable
#keymap.forward : right, v # left handed quick navigation
#keymap.pan : p # pan mnemonic
#keymap.zoom : o # zoom mnemonic
#keymap.save : s # saving current figure
#keymap.quit : ctrl+w, cmd+w # close the current figure
#keymap.grid : g # switching on/off a grid in current axes
#keymap.yscale : l # toggle scaling of y-axes ('log'/'linear')
#keymap.xscale : L, k # toggle scaling of x-axes ('log'/'linear')
#keymap.all_axes : a # enable all axes
plt.rcParams['keymap.fullscreen'] = ''
plt.rcParams['keymap.home'] = ''
plt.rcParams['keymap.back'] = ''
plt.rcParams['keymap.forward'] = ''
plt.rcParams['keymap.pan'] = ''
plt.rcParams['keymap.zoom'] = ''
#plt.rcParams['keymap.save'] = ''
#plt.rcParams['keymap.quit'] = ''
plt.rcParams['keymap.grid'] = ''
#plt.rcParams['keymap.yscale'] = ''
plt.rcParams['keymap.xscale'] = ''
plt.rcParams['keymap.all_axes'] = ''
from scipy.signal import resample
import serial.tools.list_ports
import scipy.optimize
enable_ripyl=False # set to True to use ripyl serial decoding... have to get it from https://github.com/kevinpt/ripyl and then install it first!
if enable_ripyl:
import ripyl.util.plot as rplot
from collections import OrderedDict
import ripyl.streaming as stream
import ripyl.protocol.uart as uart
class Haasoscope():
def construct(self, hos):
self.hos = hos
self.mq_adapter = mq.Adapter('main_queue')
self.mq_publisher = mq.Publisher(self.mq_adapter)
self.serialdelaytimerwait=100 #150 # 600 # delay (in 2 us steps) between each 32 bytes of serial output (set to 600 for some slow USB serial setups, but 0 normally)
if mearm: self.serialdelaytimerwait=600
self.brate = 1500000 #serial baud rate #1500000 #115200 #921600
self.sertimeout = 3.0 #time to wait for serial response #3.0, HAAS_NUM_BYTES*8*10.0/brate, or None
self.serport="" # the name of the serial port on your computer, connected to Haasoscope, like /dev/ttyUSB0 or COM8, leave blank to detect automatically!
self.usbport=[] # the names of the USB2 ports on your computer, connected to Haasoscope, leave blank to detect automatically!
self.usbser=[]
self.otherlines = []
self.texts = []
# self.xdata=np.arange(HAAS_NUM_SAMPLES)
# self.xdata2=np.arange(HAAS_NUM_SAMPLES*2) # for oversampling
# self.xdata4=np.arange(HAAS_NUM_SAMPLES*4) # for over-oversampling
self.ydata = []
# ysampdatat=np.zeros(HAAS_NSAMP*len(HAAS_MAX10ADCCHANS)); self.ysampdata=np.reshape(ysampdatat,(len(HAAS_MAX10ADCCHANS),HAAS_NSAMP))
# self.xsampdata=np.arange(HAAS_NSAMP)
self.paused=False
self.getone=False
self.average=False #will average every 2 samples
self.fallingedge=True #trigger on falling edge
self.dogrid=True #redraw the grid
self.chanforscreen=0 #channel to draw on the mini-display
self.triggertimethresh=1 #samples for which the trigger must be over/under threshold
self.dofft=False #drawing the FFT plot
self.dousb=False #whether to use USB2 output
self.dogetotherdata=False # whether to read other calculated data like TDC
self.domaindrawing=True # whether to update the main window data and redraw it
self.selectedchannel=0 #what channel some actions apply to
self.selectedmax10channel=0 #what max10 channel is selected
self.dohighres=False #whether to do averaging during downsampling or not (turned on by default during startup, and off again during shutdown)
self.autocalibchannel=-1 #which channel we are auto-calibrating
self.autocalibgainac=0 #which stage of gain and acdc we are auto-calibrating
self.recordedchannellength=250 #number of events to overlay in the 2d persist plot
self.chtext = "Ch." #the text in the legend for each channel
self.db = False #debugging #True #False
self.dolockin=False # read lockin info
self.dolockinplot=True # plot the lockin info
self.lockinanalyzedataboard=0 # the board to analyze lockin info from
self.debuglockin=False #debugging of lockin calculations #True #False
self.reffreq = 0.008 #MHz of reference signal on chan 3 for lockin calculations
self.refsinchan = 3 #the channel number of the ref input signal (for auto reffreq calculation via sin fit)
self.autorearm=False #whether to automatically rearm the trigger after each event, or wait for a signal from software
self.xscaling=1.e0 # for the x-axis scale
self.rollingtrigger=True #rolling auto trigger at 5 Hz
self.dologicanalyzer=False #whether to send logic analyzer data
self.thread_running=True
self._lock = threading.Lock()
#These hold the state of the IO expanders
self.a20= int('f0',16) # oversamp (set bits 0,1 to 0 to send 0->2 and 1->3) / gain (set second char to 0 for low gain)
self.b20= int('0f',16) # shdn (set first char to 0 to turn on) / ac coupling (set second char to f for DC, 0 for AC)
self.a21= int('00',16) # leds (on is 1)
self.b21= int('00',16)# free pins
def tellrolltrig(self,rt):
#tell them to roll the trigger (a self-trigger each ~second), or not
self.rollingtrigger = rt
frame=[]
if rt: frame.append(101)
else: frame.append(102)
with self._lock:
self.ser.write(frame)
def tellsamplesmax10adc(self, nsamp):
#tell it the number of samples to use for the 1MHz internal Max10 ADC
frame=[]
frame.append(120)
frame.extend(bytearray.fromhex('{:04x}'.format(nsamp)))
with self._lock:
self.ser.write(frame)
if self.db: print(("Nsamp for max10 ADC is ",256*frame[1]+1*frame[2]," HAAS_NSAMP:",HAAS_NSAMP))
def settriggerpoint(self,tp):
#tell it the trigger point
frame=[]
frame.append(121)
offset=5 #small offset due to drawing and delay
myb=bytearray.fromhex('{:04x}'.format(tp+offset))
frame.extend(myb)
with self._lock:
self.ser.write(frame)
print(("Trigger point is",256*myb[0]+1*myb[1]-offset))
def tellsamplessend(self, num_samples_to_send):
#tell it the number of samples to send
frame=[]
frame.append(122)
# Either 0 for all, or num_samples*pow(2,HAAS_SENDINCREMENT)
frame.extend(bytearray.fromhex('{:04x}'.format(num_samples_to_send)))
with self._lock:
self.ser.write(frame)
print(("num samples is",256*frame[1]+1*frame[2]))
def telllockinnumtoshift(self,numtoshift):
#tell it the number of samples to shift when calculating 90deg outofphase sum for lockin
frame=[]
frame.append(138)
myb=bytearray.fromhex('{:04x}'.format(numtoshift))
frame.extend(myb)
with self._lock:
self.ser.write(frame)
if self.db: print(("lockinnumtoshift is",256*myb[0]+1*myb[1]))
def tellserialdelaytimerwait(self):
#tell it the number of microseconds to wait between every 32 (64?) bytes of serial output (for some slow USB serial setups)
frame=[]
frame.append(135)
frame.extend(bytearray.fromhex('{:04x}'.format(self.serialdelaytimerwait)))
with self._lock:
self.ser.write(frame)
print(("serialdelaytimerwait is",256*frame[1]+1*frame[2]))
def tellbytesskip(self, HAAS_SENDINCREMENT):
#tell it the number of bytes to skip after each send, log2
frame=[]
frame.append(123)
frame.append(HAAS_SENDINCREMENT)
with self._lock:
self.ser.write(frame)
print(("123 send increment is",HAAS_SENDINCREMENT))
def setlogicanalyzer(self, dologicanalyzer):
#tell it start/stop doing logic analyzer
self.dologicanalyzer = dologicanalyzer
frame=[]
frame.append(145)
if self.dologicanalyzer:
frame.append(5)
else:
frame.append(4)
with self._lock:
self.ser.write(frame)
print(("dologicanalyzer is now",self.dologicanalyzer))
minfirmwareversion=255
def getfirmwareversion(self, board):
#get the firmware version of a board
oldtime=time.time()
frame=[]
frame.append(30+board) #make the next board active (serial_passthrough 0)
frame.append(147) #request the firmware version byte
with self._lock:
self.ser.write(frame)
self.ser.timeout=0.1; rslt = self.ser.read(1); self.ser.timeout=self.sertimeout # reduce the serial timeout temporarily, since the old firmware versions will return nothing for command 147
byte_array = unpack('%dB'%len(rslt),rslt)
firmwareversion=0
if len(byte_array)>0: firmwareversion=byte_array[0]
print(("got firmwareversion",firmwareversion,"for board",board,"in",round((time.time()-oldtime)*1000.,2),"ms"))
return firmwareversion # is 0 if not found (firmware version <5)
def telltickstowait(self,ds):
#tell it the number of clock ticks to wait, log2, between sending bytes
frame=[]
frame.append(125)
frame.append(ds)
with self._lock:
self.ser.write(frame)
if self.db: print(("clockbitstowait is",ds))
def tellminidisplaychan(self,ch):
#tell it the channel to show on the mini-display
frame=[]
frame.append(126)
frame.append(ch)
with self._lock:
self.ser.write(frame)
print(("chanforscreen is",ch))
def settriggerthresh(self,tp):
#tell it the trigger threshold
tp=255-tp # need to flip it due to op amp
frame=[]
frame.append(127)
frame.append(tp)
with self._lock:
self.ser.write(frame)
print(("Trigger threshold is",tp))
def settriggerthresh2(self,tp):
#tell it the high trigger threshold (must be below this to trigger)
tp=255-tp # need to flip it due to op amp
frame=[]
frame.append(140)
frame.append(tp)
with self._lock:
self.ser.write(frame)
print(("Trigger high threshold is",tp))
def settriggertype(self,tp):
#tell it the trigger type: rising edge, falling edge, either, ...
frame=[]
frame.append(128)
frame.append(tp)
with self._lock:
self.ser.write(frame)
if self.db: print(("Trigger type is",tp))
def settriggertime(self,ttt):
#tell it the trigger time over/under threshold required
# if ttt>HAAS_NUM_SAMPLES and ttt>10:
usedownsamplefortriggertot=True
if usedownsamplefortriggertot: ttt+=pow(2,12) #set bit [HAAS_RAM_WIDTH] (max) = 1
frame=[]
frame.append(129)
frame.extend(bytearray.fromhex('{:04x}'.format(ttt)))
with self._lock:
self.ser.write(frame)
print(("129 trigger time over/under thresh now",256*frame[1]+1*frame[2]-pow(2,12),"and usedownsamplefortriggertot is",usedownsamplefortriggertot))
def getfirmchan(self,chan):
theboard = HAAS_NUM_BOARD-1-int(chan/HAAS_NUM_CHAN_PER_BOARD)
chanonboard = chan%HAAS_NUM_CHAN_PER_BOARD
firmchan=theboard*HAAS_NUM_CHAN_PER_BOARD+chanonboard
return firmchan # the channels are numbered differently in the firmware
def tellSPIsetup(self,what):
time.sleep(.01) #pause to make sure other SPI writng is done
frame=[]
frame.append(131)
myb=bytearray.fromhex('06 10') #default
#SPIsenddata[14:8]=7'h08;//Common mode bias voltages
#SPIsenddata[7:0]=8'b00000000;//off //0x00
#SPIsenddata[7:0]=8'b11111111;//on 0.45V //0xff
#SPIsenddata[7:0]=8'b11001100;//on 0.9V //0xcc
#SPIsenddata[7:0]=8'b10111011;//on 1.35V //0xbb
if what==0: myb=bytearray.fromhex('08 00') #not connected, 0.9V
if what==1: myb=bytearray.fromhex('08 ff') #0.45V
if what==2: myb=bytearray.fromhex('08 dd') #0.75V
if what==3: myb=bytearray.fromhex('08 cc') #0.9V
if what==4: myb=bytearray.fromhex('08 99') #1.05V
if what==5: myb=bytearray.fromhex('08 aa') #1.2V
if what==6: myb=bytearray.fromhex('08 bb') #1.35V
#SPIsenddata[14:8]=7'h06; //Clock Divide/Data Format/Test Pattern
#SPIsenddata[7:0]=8'b01010000;//do test pattern in offset binary // 0x50
#SPIsenddata[7:0]=8'b00010000;//do offset binary //0x10
if what==10: myb=bytearray.fromhex('06 50') #test pattern output
if what==11: myb=bytearray.fromhex('06 10') #offset binary output + no clock divide
if what==12: myb=bytearray.fromhex('06 11') #offset binary output + divide clock by 2
if what==13: myb=bytearray.fromhex('06 12') #offset binary output + divide clock by 4
if what==20: myb=bytearray.fromhex('04 00') #50 Ohm termination chA (default)
if what==21: myb=bytearray.fromhex('05 00') #50 Ohm termination chB (default)
if what==22: myb=bytearray.fromhex('04 1b') #150 Ohm termination chA
if what==23: myb=bytearray.fromhex('05 1b') #150 Ohm termination chB
if what==24: myb=bytearray.fromhex('04 24') #300 Ohm termination chA
if what==25: myb=bytearray.fromhex('05 24') #300 Ohm termination chB
if what==30: myb=bytearray.fromhex('01 02') #multiplexed, with chA first
if what==31: myb=bytearray.fromhex('01 06') #multiplexed, with chB first
if what==32: myb=bytearray.fromhex('01 00') # not multiplexed output
frame.extend(myb)
with self._lock:
self.ser.write(frame)
print(("tell SPI setup: 131 ",myb[0],myb[1]))
time.sleep(.01) #pause to make sure other SPI writng is done
# testBit() returns a nonzero result, 2**offset, if the bit at 'offset' is one.
def testBit(self,int_type, offset):
mask = 1 << offset
return(int_type & mask)
# setBit() returns an integer with the bit at 'offset' set to 1.
def setBit(self,int_type, offset):
mask = 1 << offset
return(int_type | mask)
# clearBit() returns an integer with the bit at 'offset' cleared.
def clearBit(self,int_type, offset):
mask = ~(1 << offset)
return(int_type & mask)
# toggleBit() returns an integer with the bit at 'offset' inverted, 0 -> 1 and 1 -> 0.
def toggleBit(self,int_type, offset):
mask = 1 << offset
return(int_type ^ mask)
def sendi2c(self,whattosend,board=200):
db2=False
time.sleep(.02)
frame=[]
frame.append(136)
myb=bytearray.fromhex(whattosend)
frame.append(len(myb)-1)
frame.extend(myb)
# pad with extra bytes since the command expects a total of 5 bytes (numtosend, addr, and 3 more bytes)
for b in np.arange(4-len(myb)):
frame.append(255)
frame.append(board)
with self._lock:
self.ser.write(frame) #200 (default) will address message to all boards, otherwise only the given board ID will listen
time.sleep(.02)
if db2: print("sendi2c frame:",unpack('%dB' % len(frame), frame))
def setupi2c(self):
self.sendi2c("20 00 00") #port A on IOexp 1 are outputs
self.sendi2c("20 01 00") #port B on IOexp 1 are outputs
self.sendi2c("21 00 00") #port A on IOexp 2 are outputs
self.sendi2c("20 12 "+ ('%0*x' % (2,self.a20)) ) #port A of IOexp 1
self.sendi2c("20 13 "+ ('%0*x' % (2,self.b20)) ) #port B of IOexp 1
self.sendi2c("21 12 "+ ('%0*x' % (2,self.a21)) ) #port A of IOexp 2
if self.minfirmwareversion<15:
self.sendi2c("21 01 00") #port B on IOexp 2 are outputs
self.sendi2c("21 13 "+ ('%0*x' % (2,self.b21)) ) #port B of IOexp 2
else:
self.sendi2c("21 01 ff") #port B on IOexp 2 are inputs!
self.sendi2c("21 0d ff") #port B on IOexp 2 enable pull-up resistors!
#print "portB on IOexp2 are inputs now"
#print "initialized all i2c ports and set to starting values"
def shutdownadcs(self):
self.b20= int('ff',16) # shdn (set first char to f to turn off) / ac coupling (?)
self.sendi2c("20 13 "+ ('%0*x' % (2,self.b20)) ) #port B of IOexp 1
print("shut down adcs")
def testi2c(self):
print("test i2c")
dotest=3 # what to test
if dotest==0:
# IO expander 1
self.sendi2c("20 12 ff") #turn on all port A of IOexp 1 (12 means A, ff is which of the 8 bits to turn on)
self.sendi2c("20 13 ff") #turn on all port B of IOexp 1 (13 means B, ff is which of the 8 bits to turn on)
time.sleep(3)
self.sendi2c("20 12 00") #turn off all port A of IOexp 1
self.sendi2c("20 13 00") #turn off all port B of IOexp 1
elif dotest==1:
#Test the DAC
self.setdac(0,0)
time.sleep(3)
self.setdac(0,1200)
elif dotest==2:
#toggle led 3, at 0x21 a0
self.a21=self.toggleBit(self.a21,3); self.sendi2c("21 12 "+ ('%0*x' % (2,self.a21)) )
elif dotest==3:
#toggle pin E24 B7, at 0x21 b7
self.b21=self.toggleBit(self.b21,7); self.sendi2c("21 13 "+ ('%0*x' % (2,self.b21)) )
def toggledousb(self):#toggle whether to read over FT232H USB or not
if len(self.usbser)==0:
self.dousb=False
print("usb2 connection not available")
else:
self.dousb = not self.dousb
frame=[]
frame.append(137)
with self._lock:
self.ser.write(frame)
print(("dousb toggled to",self.dousb))
if self.dousb: print(("rate theoretically",round(4000000./(HAAS_NUM_BYTES*HAAS_NUM_BOARD+len(HAAS_MAX10ADCCHANS)*HAAS_NSAMP),2),"Hz over USB2"))
self.telltickstowait()
def togglehighres(self):#toggle whether to do highres averaging during downsampling or not
frame=[]
frame.append(143)
with self._lock:
self.ser.write(frame)
self.dohighres = not self.dohighres
print(("143 do highres is",self.dohighres))
def toggleuseexttrig(self):#toggle whether to use the external trigger input or not
frame=[]
frame.append(144)
with self._lock:
self.ser.write(frame)
def settriggerchan(self,firmchan):
#tell it to trigger or not trigger on a given channel
frame=[]
frame.append(130)
frame.append(firmchan)
with self._lock:
self.ser.write(frame)
def toggleautorearm(self):
self.autorearm = not self.autorearm
frame=[]
#tell it to toggle the auto rearm of the tirgger after readout
frame.append(139)
# prime the trigger one last time
frame.append(100)
with self._lock:
self.ser.write(frame)
if self.db: print((time.time()-self.oldtime,"priming trigger"))
print(("Autorearm is now:",self.autorearm))
def getID(self, n):
debug3=True
frame=[]
frame.append(30+n)
frame.append(142)
with self._lock:
self.ser.write(frame)
num_other_bytes = 8
rslt = self.ser.read(num_other_bytes)
return rslt
def togglesupergainchan(self,chan):
if len(plt.get_fignums())>0: origline,legline,channum = self.lined[chan]
if self.supergain[chan]==1:
self.supergain[chan]=0 #x100 super gain on!
if len(plt.get_fignums())>0:
if self.gain[chan]==1:
origline.set_label(self.chtext+str(chan)+" x100")
self.leg.get_texts()[chan].set_text(self.chtext+str(chan)+" x100")
else:
origline.set_label(self.chtext+str(chan)+" x1000")
self.leg.get_texts()[chan].set_text(self.chtext+str(chan)+" x1000")
else:
self.supergain[chan]=1 #normal gain
if len(plt.get_fignums())>0:
if self.gain[chan]==1:
origline.set_label(self.chtext+str(chan))
self.leg.get_texts()[chan].set_text(self.chtext+str(chan))
else:
origline.set_label(self.chtext+str(chan)+" x10")
self.leg.get_texts()[chan].set_text(self.chtext+str(chan)+" x10")
self.selectedchannel=chan
self.setdacvalue()
if len(plt.get_fignums())>0: self.figure.canvas.draw()
print(("Supergain switched for channel",chan,"to",self.supergain[chan]))
def tellswitchgain(self,chan):
#tell it to switch the gain of a channel
frame=[]
frame.append(134)
firmchan=self.getfirmchan(chan)
frame.append(firmchan)
with self._lock:
self.ser.write(frame)
if len(plt.get_fignums())>0: origline,legline,channum = self.lined[chan]
if self.gain[chan]==1:
self.gain[chan]=0 # x10 gain on!
if len(plt.get_fignums())>0:
if self.supergain[chan]==1:
origline.set_label(self.chtext+str(chan)+" x10")
self.leg.get_texts()[chan].set_text(self.chtext+str(chan)+" x10")
else:
origline.set_label(self.chtext+str(chan)+" x1000")
self.leg.get_texts()[chan].set_text(self.chtext+str(chan)+" x1000")
else:
self.gain[chan]=1 #low gain
if len(plt.get_fignums())>0:
if self.supergain[chan]==1:
origline.set_label(self.chtext+str(chan))
self.leg.get_texts()[chan].set_text(self.chtext+str(chan))
else:
origline.set_label(self.chtext+str(chan)+" x100")
self.leg.get_texts()[chan].set_text(self.chtext+str(chan)+" x100")
self.selectedchannel=chan # needed for setdacvalue
self.setdacvalue()
if len(plt.get_fignums())>0: self.figure.canvas.draw()
print(("Gain switched for channel",chan,"to",self.gain[chan]))
def toggleoversamp(self,firmchan):
#tell it to toggle oversampling for this channel
frame=[]
frame.append(141)
frame.append(firmchan)
with self._lock:
self.ser.write(frame)
def overoversamp(self):
if self.selectedchannel%4: print("over-oversampling only for channel 0 of a board!")
elif self.dooversample[self.selectedchannel]==0 or self.dooversample[self.selectedchannel+1]==0: print("for over-oversampling, first do oversampling on channels 0 and 1 of the board")
elif self.dooversample[self.selectedchannel]==1: self.dooversample[self.selectedchannel]=9; self.togglechannel(self.selectedchannel+1,True); print("over-oversampling")
elif self.dooversample[self.selectedchannel]==9: self.dooversample[self.selectedchannel]=1; print("no more over-oversampling")
def resetchans(self):
for chan in np.arange(HAAS_NUM_BOARD*HAAS_NUM_CHAN_PER_BOARD):
if self.gain[chan]==0:
self.tellswitchgain(chan) # set all gains back to low gain
# if self.trigsactive[chan]==0:
# TODO fix this
# self.settriggerchan(chan) # set all trigger channels back to active
if self.dooversample[chan]:
self.oversamp(chan) # set all channels back to no oversampling
def setbacktoserialreadout(self):
if self.dousb:
frame=[]
frame.append(137)
with self._lock:
self.ser.write(frame)
self.dousb=False
print(("dousb set back to",self.dousb))
def telldownsample(self,ds):
#tell it the amount to downsample, log2... so 0 (none), 1(factor 2), 2(factor 4), etc.
frame=[]
frame.append(124)
frame.append(ds)
with self._lock:
self.ser.write(frame)
def adjustvertical(self,up,amount=10):
if self.keyShift: amount*=5
if self.keyControl: amount/=10
#print "amount is",amount
if self.gain[self.selectedchannel]: amount*=10 #low gain
if self.supergain[self.selectedchannel]==0 and self.acdc[self.selectedchannel]: amount=max(1,amount/10) #super gain
#print "now amount is",amount
if up:
self.chanlevel[self.selectedchannel] = self.chanlevel[self.selectedchannel] - amount
else:
self.chanlevel[self.selectedchannel] = self.chanlevel[self.selectedchannel] + amount
self.rememberdacvalue()
self.setdacvalue()
def rememberdacvalue(self):
#remember current dac level for the future to the right daclevel, depending on other settings
if self.gain[self.selectedchannel]: # low gain
if self.supergain[self.selectedchannel]:
if self.acdc[self.selectedchannel]: self.lowdaclevel[self.selectedchannel]=self.chanlevel[self.selectedchannel]
else: self.lowdaclevelac[self.selectedchannel]=self.chanlevel[self.selectedchannel]
else: #supergain
if self.acdc[self.selectedchannel]: self.lowdaclevelsuper[self.selectedchannel]=self.chanlevel[self.selectedchannel] #dc super gain
else: self.lowdaclevelsuperac[self.selectedchannel]=self.chanlevel[self.selectedchannel]
else: # high gain
if self.supergain[self.selectedchannel]:
if self.acdc[self.selectedchannel]: self.highdaclevel[self.selectedchannel]=self.chanlevel[self.selectedchannel]
else: self.highdaclevelac[self.selectedchannel]=self.chanlevel[self.selectedchannel]
else: #supergain
if self.acdc[self.selectedchannel]: self.highdaclevelsuper[self.selectedchannel]=self.chanlevel[self.selectedchannel] #dc super gain
else: self.highdaclevelsuperac[self.selectedchannel]=self.chanlevel[self.selectedchannel]
def setacdc(self):
chan=self.selectedchannel
theboard = HAAS_NUM_BOARD-1-int(chan/HAAS_NUM_CHAN_PER_BOARD)
chanonboard = chan%HAAS_NUM_CHAN_PER_BOARD
print(("toggling acdc for chan",chan,"which is chan",chanonboard,"on board",theboard))
self.acdc[int(chan)] = not self.acdc[int(chan)]
self.b20= int('00',16) # shdn (set first char to 0 to turn on) / ac coupling (set second char to f for DC, 0 for AC)
for c in range(0,4):
realchan = (HAAS_NUM_BOARD-1-theboard)*HAAS_NUM_CHAN_PER_BOARD+c
if self.acdc[int(realchan)]:
self.b20 = self.toggleBit(self.b20,int(c)) # 1 is dc, 0 is ac
if self.db: print(("toggling bit",c,"for chan",realchan))
self.sendi2c("20 13 "+ ('%0*x' % (2,self.b20)), theboard) #port B of IOexp 1, only for the selected board
self.setdacvalue()
self.drawtext()
def storecalib(self):
cwd = os.getcwd()
print(("current directory is",cwd))
for board in range(0,HAAS_NUM_BOARD):
self.storecalibforboard(board)
def storecalibforboard(self,board):
sc = board*HAAS_NUM_CHAN_PER_BOARD
print(("storing calibrations for board",board,", channels",sc,"-",sc+4))
c = dict(
boardID=self.uniqueID[board],
lowdaclevels=self.lowdaclevel[sc : sc+4].tolist(),
highdaclevels=self.highdaclevel[sc : sc+4].tolist(),
lowdaclevelssuper=self.lowdaclevelsuper[sc : sc+4].tolist(),
highdaclevelssuper=self.highdaclevelsuper[sc : sc+4].tolist(),
lowdaclevelsac=self.lowdaclevelac[sc : sc+4].tolist(),
highdaclevelsac=self.highdaclevelac[sc : sc+4].tolist(),
lowdaclevelssuperac=self.lowdaclevelsuperac[sc : sc+4].tolist(),
highdaclevelssuperac=self.highdaclevelsuperac[sc : sc+4].tolist(),
firmwareversion=self.minfirmwareversion
)
#print json.dumps(c,indent=4)
fname = "calib/calib_"+self.uniqueID[board]+".json.txt"
json.dump(c,open(fname,'w'),indent=4)
print(("wrote",fname))
#called when sampling is changed, to reset some things
def prepareforsamplechange(self):
self.recordedchannel=[]
if self.doxyplot:
plt.close(self.figxy)
if self.recorddata:
plt.close(self.fig2d)
#will grab the next keys as input
keyResample=False
keysettriggertime=False
keySPI=False
keyi2c=False
keyLevel=False
keyShift=False
keyAlt=False
keyControl=False
def fittosin(self,xdatanew, ydatanew, chan):
res = self.fit_sin(xdatanew, ydatanew)
phase=res['phase']*180./np.pi
if res['amp']<0.: phase+=180.
print(("Chan:",chan, "cov=",res['maxcov'], "amp=",abs(res['amp']), "phase=",phase, "offset=", res['offset'], res['freq']*1000000./self.xscaling,'kHz'))
if res['maxcov']<1e-4:
if self.oldchanphase>=0.:
diff=phase-self.oldchanphase
if diff<0: diff+=360
print(("phase diff=",diff))
self.oldchanphase=phase
return res['freq']
else: print("sin fit failed!"); return 0;
#For finding the frequency of a reference sin wave signal, for lockin calculations
def fit_sin(self,tt, yy):
'''Fit sin to the input time sequence, and return fitting parameters "amp", "omega", "phase", "offset", "freq", "period" and "fitfunc"'''
tt = np.array(tt)
yy = np.array(yy)
ff = np.fft.fftfreq(len(tt), (tt[1]-tt[0])) # assume uniform spacing
Fyy = abs(np.fft.fft(yy))
guess_freq = abs(ff[np.argmax(Fyy[1:])+1]) # excluding the zero frequency "peak", which is related to offset
guess_amp = np.std(yy) * 2.**0.5
guess_offset = np.mean(yy)
guess = np.array([guess_amp, 2.*np.pi*guess_freq, 0., guess_offset])
def sinfunc(t, A, w, p, c): return A * np.sin(w*t + p) + c
popt, pcov = scipy.optimize.curve_fit(sinfunc, tt, yy, p0=guess)
A, w, p, c = popt
f = w/(2.*np.pi)
fitfunc = lambda t: A * np.sin(w*t + p) + c
return {"amp": A, "omega": w, "phase": p, "offset": c, "freq": f, "period": 1./f, "fitfunc": fitfunc, "maxcov": np.max(pcov), "rawres": (guess,popt,pcov)}
def autocalibrate(self,thechan,ydatanew):
self.selectedchannel=thechan
avg = np.average(ydatanew)
#print avg
gotonext=False
tol = 1.0
tol2 = 0.25
if self.supergain[self.selectedchannel] or self.gain[self.selectedchannel]: # normal gain or low gain
tol = 0.3
tol2 = 0.02
if avg>0+tol:
self.adjustvertical(False,10)
elif avg<0-tol:
self.adjustvertical(True,10)
elif avg>0+tol2:
self.adjustvertical(False,1)
elif avg<0-tol2:
self.adjustvertical(True,1)
else: gotonext=True
if self.chanlevel[self.selectedchannel]==0: gotonext=True
if gotonext:
#go to the next channel, unless we're at the end of all channels
self.autocalibchannel=self.autocalibchannel+1
if self.autocalibchannel==HAAS_NUM_CHAN_PER_BOARD*HAAS_NUM_BOARD:
self.autocalibgainac=self.autocalibgainac+1
if self.autocalibgainac==1:
self.autocalibchannel=0
for chan in range(HAAS_NUM_CHAN_PER_BOARD*HAAS_NUM_BOARD):
self.selectedchannel=chan
self.setacdc()
elif self.autocalibgainac==2:
self.autocalibchannel=0
for chan in range(HAAS_NUM_CHAN_PER_BOARD*HAAS_NUM_BOARD):
self.selectedchannel=chan
self.tellswitchgain(chan)
elif self.autocalibgainac==3:
self.autocalibchannel=0
for chan in range(HAAS_NUM_CHAN_PER_BOARD*HAAS_NUM_BOARD):
self.selectedchannel=chan
self.setacdc()
else:
self.autocalibchannel=-1 #all done
self.autocalibgainac=0
for chan in range(HAAS_NUM_CHAN_PER_BOARD*HAAS_NUM_BOARD):
self.selectedchannel=chan
self.tellswitchgain(chan)
if self.minfirmwareversion<15: self.togglesupergainchan(chan)
print("done with autocalibration \a") # beep!
def handle_main_close(self,evt):
plt.close('all')
def handle_xy_close(self,evt):
self.drawnxy=False
self.doxyplot=False
def handle_persist_close(self,evt):
self.drawn2d=False
self.recorddata=False
def handle_fft_close(self,evt):
self.dofft=False
self.fftdrawn=False
def handle_lockin_close(self,evt):
self.dolockinplot=False
self.lockindrawn=False
def getotherdata(self,board):
debug3=True
frame=[]
frame.append(132)
self.ser.write(frame)
num_other_bytes = 1
rslt = self.ser.read(num_other_bytes)
if len(rslt)==num_other_bytes:
byte_array = unpack('%dB'%len(rslt),rslt) #Convert serial data to array of numbers
if debug3: print(("\n delay counter data",byte_array[0],"from board",board))
#if debug3: print "other data",bin(byte_array[0])
else: print(("getotherdata asked for",num_other_bytes,"delay counter bytes and got",len(rslt)))
frame=[]
frame.append(133)
self.ser.write(frame)
num_other_bytes = 1
rslt = self.ser.read(num_other_bytes)
if len(rslt)==num_other_bytes:
byte_array = unpack('%dB'%len(rslt),rslt) #Convert serial data to array of numbers
if debug3: print((" carry counter data",byte_array[0],"from board",board))
#if debug3: print "other data",bin(byte_array[0])
else: print(("getotherdata asked for",num_other_bytes,"carry counter bytes and got",len(rslt)))
def to_int(self,n): # takes a 32 bit decimal number in two's complement and converts to a binary and then to a signed integer
bin = '{0:32b}'.format(n)
x = int(bin, 2)
if bin[0] == '1': # "sign bit", big-endian
x -= 2**len(bin)
return x
def lockinanalyzedata(self,board):
if self.lockinanalyzedataboard!=board: return False
y2 = self.ydata[2] # channel 2 signal
y3 = self.ydata[3] # channel 3 signal
meany2=np.sum(y2)/HAAS_NUM_SAMPLES
meany3=np.sum(y3)/HAAS_NUM_SAMPLES
y2 = y2-meany2
y3 = y3-meany3
y3shifted = np.roll(y3,self.numtoshift)
res1=y2*y3
res2=y2*y3shifted
r1m=np.sum(res1)
r2m=np.sum(res2)
#print r1m,r2m
r1m/=4096.
r2m/=4096.
ampl = np.sqrt(r1m*r1m+r2m*r2m)
phase = 180.*np.arctan2(r2m,r1m)/np.pi
if self.debuglockin:
print(("no window: ",r1m.round(2), r2m.round(2), self.numtoshift, meany2.round(1),meany3.round(1)))
print((ampl.round(2), phase.round(2), "<------ offline no window"))
lowerwindowedge = self.numtoshift+1
upperwindowedge = HAAS_NUM_SAMPLES-self.numtoshift
if self.debuglockin:
self.ydata[0]= y3shifted+127 # to see on screen, alter self.ydata here
self.ydata[0][0:lowerwindowedge] = np.zeros((lowerwindowedge,), dtype=np.int)+127
self.ydata[0][upperwindowedge:HAAS_NUM_SAMPLES] = np.zeros((HAAS_NUM_SAMPLES-upperwindowedge,), dtype=np.int)+127
y2window = y2[lowerwindowedge:upperwindowedge]
y3window = y3[lowerwindowedge:upperwindowedge]
y3shiftedwindow = y3shifted[lowerwindowedge:upperwindowedge]
res1window=y2window*y3window
res2window=y2window*y3shiftedwindow
r1mwindow=np.sum(res1window)
r2mwindow=np.sum(res2window)
if self.debuglockin: print(("window:",r1mwindow,r2mwindow))
r1mwindow/=4096.
r2mwindow/=4096.
amplwindow = np.sqrt(r1mwindow*r1mwindow+r2mwindow*r2mwindow)
phasewindow = 180.*np.arctan2(r2mwindow,r1mwindow)/np.pi
if self.debuglockin:
print(("with window:",r1mwindow.round(2), r2mwindow.round(2), self.numtoshift, meany2.round(1),meany3.round(1)))
print((amplwindow.round(2), phasewindow.round(2), "<------ offline with window"))
meany2float=np.mean(self.ydata[2])
meany3float=np.mean(self.ydata[3])
y3shiftedfloat = np.roll(self.ydata[3]-meany3float,self.numtoshift)
y2windowfloat = self.ydata[2][lowerwindowedge:upperwindowedge]-meany2float
y3windowfloat = self.ydata[3][lowerwindowedge:upperwindowedge]-meany3float
y3shiftedwindowfloat = y3shiftedfloat[lowerwindowedge:upperwindowedge]
res1windowfloat=y2windowfloat*y3windowfloat
res2windowfloat=y2windowfloat*y3shiftedwindowfloat
r1mwindowfloat=np.sum(res1windowfloat)
r2mwindowfloat=np.sum(res2windowfloat)
#print "windowfloat:",r1mwindowfloat,r2mwindowfloat
r1mwindowfloat/=4096.
r2mwindowfloat/=4096.
amplwindowfloat = np.sqrt(r1mwindowfloat*r1mwindowfloat+r2mwindowfloat*r2mwindowfloat)
phasewindowfloat = 180.*np.arctan2(r2mwindowfloat,r1mwindowfloat)/np.pi
if self.debuglockin:
print(("float with window:",r1mwindowfloat.round(2), r2mwindowfloat.round(2), self.numtoshift, meany2.round(1),meany3.round(1)))
print((amplwindowfloat.round(2), phasewindowfloat.round(2), "<------ offline with window float\n"))
self.lockinampo = amplwindowfloat
self.lockinphaseo = phasewindowfloat
def getlockindata(self,board):
rslt = self.ser.read(16)
byte_array = unpack('%dB'%len(rslt),rslt) #Convert serial data to array of numbers
if len(rslt)==16:
r1_fpga = (256*256*256*byte_array[3]+256*256*byte_array[2]+256*byte_array[1]+byte_array[0])
r2_fpga = (256*256*256*byte_array[7]+256*256*byte_array[6]+256*byte_array[5]+byte_array[4])
r1_fpga = self.to_int(r1_fpga)
r2_fpga = self.to_int(r2_fpga)
mean_c2 = (256*256*256*byte_array[11]+256*256*byte_array[10]+256*byte_array[9]+byte_array[8])
mean_c3 = (256*256*256*byte_array[15]+256*256*byte_array[14]+256*byte_array[13]+byte_array[12])
if self.debuglockin:
print((byte_array[0:4], r1_fpga))
print((byte_array[4:8], r2_fpga))
print((byte_array[8:12], mean_c2))
print((byte_array[12:16], mean_c3))
r1_fpga/=4096.
r2_fpga/=4096.
ampl_fpga = np.sqrt(r1_fpga*r1_fpga+r2_fpga*r2_fpga)
phase_fpga = 180.*np.arctan2(r2_fpga,r1_fpga)/np.pi
if self.lockinanalyzedataboard==board:
self.lockinamp = ampl_fpga
self.lockinphase = phase_fpga
if False:
print((ampl_fpga.round(2), phase_fpga.round(2), "<------ fpga "))
else: print(("getdata asked for",16,"lockin bytes and got",len(rslt),"from board",board))
usbsermap=[]
def makeusbsermap(self): # figure out which board is connected to which USB 2 connection
self.usbsermap=np.zeros(HAAS_NUM_BOARD, dtype=int)
if len(self.usbser)<HAAS_NUM_BOARD:
print("Not a USB2 connection for each board!")
return False
if len(self.usbser)>1:
for usb in np.arange(HAAS_NUM_BOARD): self.usbser[usb].timeout=.5 # lower the timeout on the connections, temporarily
foundusbs=[]
for bn in np.arange(HAAS_NUM_BOARD):
frame=[]
frame.append(100)
frame.append(10+bn)
with self._lock:
self.ser.write(frame)
for usb in np.arange(len(self.usbser)):
if not usb in foundusbs: # it's not already known that this usb connection is assigned to a board
rslt = self.usbser[usb].read(HAAS_NUM_BYTES) # try to get data from the board
if len(rslt)==HAAS_NUM_BYTES:
#print " got the right nbytes for board",bn,"from usb",usb
self.usbsermap[bn]=usb
foundusbs.append(usb) # remember that we already have figured out which board this usb connection is for, so we don't bother trying again for another board
break # already found which board this usb connection is used for, so bail out
#else: print " got the wrong nbytes for board",bn,"from usb",usb
#else: print " already know what usb",usb,"is for"
for usb in np.arange(HAAS_NUM_BOARD): self.usbser[usb].timeout=self.sertimeout # put back the timeout on the connections
print(("usbsermap is",self.usbsermap))
return True
timedout = False
def getdata(self,board):
frame=[]
frame.append(10+board)
self.ser.write(frame)
if self.db: print((time.time()-self.oldtime,"asked for data from board",board))
if self.dolockin: self.getlockindata(board)
if self.dousb:
#try:
rslt = self.usbser[self.usbsermap[board]].read(HAAS_NUM_BYTES)
#usbser.flushInput() #just in case
#except serial.SerialException: pass
else:
rslt = self.ser.read(HAAS_NUM_BYTES)
#ser.flushInput() #just in case
if self.db: print((time.time()-self.oldtime,"getdata wanted",HAAS_NUM_BYTES,"bytes and got",len(rslt),"from board",board))
byte_array = unpack('%dB'%len(rslt),rslt) #Convert serial data to array of numbers
if len(rslt)==HAAS_NUM_BYTES:
self.timedout = False
db2=False #True
if db2: print((byte_array[1:11]))
self.ydata=np.reshape(byte_array,(HAAS_NUM_CHAN_PER_BOARD,HAAS_NUM_SAMPLES))
self.hos.TryOversample(board, self.ydata)
# if self.dooversample[HAAS_NUM_CHAN_PER_BOARD*(HAAS_NUM_BOARD-board-1)]: self.oversample(0,2)
# if self.dooversample[HAAS_NUM_CHAN_PER_BOARD*(HAAS_NUM_BOARD-board-1)+1]: self.oversample(1,3)
# if self.dooversample[HAAS_NUM_CHAN_PER_BOARD*(HAAS_NUM_BOARD-board-1)]==9: self.overoversample(0,1)
if self.average:
for c in np.arange(HAAS_NUM_CHAN_PER_BOARD):
for i in np.arange(HAAS_NUM_SAMPLES/2):
val=(self.ydata[c][2*i]+self.ydata[c][2*i+1])/2
self.ydata[c][2*i]=val; self.ydata[c][2*i+1]=val;
else:
self.timedout = True
if not self.db and self.rollingtrigger: print(("getdata asked for",HAAS_NUM_BYTES,"bytes and got",len(rslt),"from board",board))
if len(rslt)>0 and self.rollingtrigger: print((byte_array[0:10]))
if self.dologicanalyzer:
#get extra logic analyzer data, if needed
logicbytes=HAAS_NUM_BYTES/4
if self.dousb:
#try:
rslt = self.usbser[self.usbsermap[board]].read(logicbytes)
#usbser.flushInput() #just in case
#except serial.SerialException: pass
else:
rslt = self.ser.read(logicbytes)
#ser.flushInput() #just in case
if self.db: print((time.time()-self.oldtime,"getdata wanted",logicbytes,"logic bytes and got",len(rslt),"from board",board))
byte_array = unpack('%dB'%len(rslt),rslt) #Convert serial data to array of numbers
if len(rslt)==logicbytes:
db2=False #True
if db2: print((byte_array[1:11]))
self.ydatalogic=np.reshape(byte_array,(1,HAAS_NUM_SAMPLES))
else:
if not self.db and self.rollingtrigger: print(("getdata asked for",HAAS_NUM_BYTES,"logic bytes and got",len(rslt),"from board",board))
if len(rslt)>0 and self.rollingtrigger: print((byte_array[0:10]))
def getmax10adc(self,bn):
chansthisboard = [(x,y) for (x,y) in HAAS_MAX10ADCCHANS if x==bn]
if self.db: print((time.time()-self.oldtime,"getting",chansthisboard))
for chans in chansthisboard:
chan=chans[1]
#chan: 110=ain1, 111=pin6, ..., 118=pin14, 119=temp
frame=[]
frame.append(chan)
self.ser.write(frame)
if self.db: print((time.time()-self.oldtime,"getting max10adc chan",chan,"for bn",bn))
rslt = self.ser.read(HAAS_NSAMP*2) #read N bytes (2 per sample)
if self.db: print((time.time()-self.oldtime,"getmax10adc got bytes:",len(rslt)))
if len(rslt)!=(HAAS_NSAMP*2):
print((time.time()-self.oldtime,"getmax10adc got bytes:",len(rslt),"for board",bn,"and chan",chan))
return
byte_array = unpack('%dB'%len(rslt),rslt) #Convert serial data to array of numbers
db2=False #True #False
self.ysampdata[self.max10adcchan-1]=np.add(np.multiply(256,byte_array[1:2*HAAS_NSAMP:2]),byte_array[0:2*HAAS_NSAMP:2])
self.ysampdata[self.max10adcchan-1]/=16
if db2:
for samp in np.arange(10):
code=256*byte_array[1+2*samp]+byte_array[2*samp]
self.ysampdata[self.max10adcchan-1][samp]=code/16
if chan==119:
temp=-3.056e-4*code*code+1.763*code-2325.049
print((samp,chan,code,round(temp,1),"C",round(temp*1.8+32,1),"F"))
else: print((samp,chan,code,round( (3.3*code)/pow(2,12) ,4),"V"))
# TODO: Add drawing the plots.
# self.on_running(self.ysampdata[self.max10adcchan-1], -self.max10adcchan)
self.max10adcchan+=1
oldtime=time.time()
oldtime2=time.time()
def rearm(self):
if self.db: print((time.time()-self.oldtime,"priming trigger"))
frame=[]
frame.append(100)
self.ser.write(frame)
def getchannels(self):
self.max10adcchan=1
for bn in np.arange(HAAS_NUM_BOARD):
if self.db: print((time.time()-self.oldtime,"getting board",bn))
self.getdata(bn) #this sets all boards before this board into serial passthrough mode, so this and following calls for data will go to this board and then travel back over serial
self.getmax10adc(bn) # get data from 1 MHz Max10 ADC channels
if self.dogetotherdata: self.getotherdata(bn) # get other data, like TDC info, or other bytes
if self.dofft: self.plot_fft(bn) #do the FFT plot
if self.dolockin and self.debuglockin:
if HAAS_SENDINCREMENT==0: self.lockinanalyzedata(bn)
else: print("you need to set HAAS_SENDINCREMENT = 0 first before debugging lockin info"); return False
if self.dolockin and self.dolockinplot: self.plot_lockin()
msg = mq.Message({
'id': MSG_ID_YDATA,
'ydata': self.ydata,
'bn': bn
})
self.mq_publisher.publish(msg)
# self.on_running(self.ydata, bn) #update data in main window
if self.db: print((time.time()-self.oldtime,"done with board",bn))
if self.domaindrawing and self.domeasure:
thetime=time.time()
elapsedtime=thetime-self.oldtime
if elapsedtime>1.0:
msg = mq.Message({
'id': MSG_ID_DRAWTEXT
})
self.mq_publisher.publish(msg)
# self.drawtext() #redraws the measurements
self.oldtime=thetime
if self.minfirmwareversion>=15: #v9.0 and up
thetime2=time.time()
elapsedtime=thetime2-self.oldtime2
if elapsedtime>1.0:
if not self.havereadswitchdata: self.switchpos = [0] * HAAS_NUM_BOARD
for b in range(HAAS_NUM_BOARD): self.getswitchdata(b) #gets the dpdt switch positions
self.havereadswitchdata=True
self.oldtime2=thetime2
return True
#get the positions of the dpdt switches from IO expander 2B, and then take action (v9.0 and up!)
havereadswitchdata=False
def getswitchdata(self,board):
#for i in range(2): #twice because the first time just reads it into the board's fpga
frame=[]
frame.append(30+board)
frame.append(146)
frame.append(33)
frame.append(board)
self.ser.write(frame)
rslt = self.ser.read(1)
if len(rslt)>0:# and i==1:
byte_array = unpack('%dB'%len(rslt),rslt)
#print "i2c data from board",board,"IO 2B",byte_array[0]
newswitchpos=byte_array[0]
if newswitchpos!=self.switchpos[board] or not self.havereadswitchdata:
for b in range(8):
if self.testBit(newswitchpos,b) != self.testBit(self.switchpos[board],b) or not self.havereadswitchdata:
#print "switch",b,"is now",self.testBit(newswitchpos,b)
#switch 0-3 is 50/1M Ohm termination on channels 0-3, on is 1M, off is 50
#switch 4-7 is super/normal gain on channels 0-3, on is super, off is normal
if b>=4:
thechan=b-4+(HAAS_NUM_BOARD-board-1)*HAAS_NUM_CHAN_PER_BOARD
if self.supergain[thechan] and self.testBit(newswitchpos,b)>0:
self.togglesupergainchan(thechan)
if not self.supergain[thechan] and not self.testBit(newswitchpos,b)>0:
self.togglesupergainchan(thechan)
self.switchpos[board] = newswitchpos
def thread_function(self):
# time.sleep(2)
while self.thread_running:
with self._lock:
if not self.autorearm:
self.rearm()
self.getchannels()
time.sleep(0.01)
#initialization
def init(self):
frame=[]
frame.append(0)
frame.append(20+(HAAS_NUM_BOARD-1))
self.ser.write(frame)
for b in range(HAAS_NUM_BOARD):
firmwareversion = self.getfirmwareversion(b)
if firmwareversion<self.minfirmwareversion: self.minfirmwareversion=firmwareversion
print(("minimum firmwareversion of all boards is",self.minfirmwareversion))
self.maxdownsample=15 # slowest I can run
if self.minfirmwareversion>=5: #updated firmware
self.maxdownsample=15 +(12-HAAS_RAM_WIDTH) # slowest I can run (can add 12-HAAS_RAM_WIDTH when using newer firmware)
# self.tellbytesskip()
# self.telldownsample(self.downsample)
self.tellsamplessend(HAAS_NUM_SAMPLES*pow(2,HAAS_SENDINCREMENT))
self.tellsamplesmax10adc(HAAS_NSAMP)
self.tellbytesskip(HAAS_SENDINCREMENT)
self.togglehighres()
self.settriggertime(self.triggertimethresh)
self.tellserialdelaytimerwait()
self.tellSPIsetup(0) #0.9V CM but not connected
self.tellSPIsetup(11) #offset binary output
self.tellSPIsetup(24) #300 Ohm termination ChA
self.tellSPIsetup(25) #300 Ohm termination ChB
#self.tellSPIsetup(30) # multiplexed output
self.tellSPIsetup(32) # non-multiplexed output (less noise)
self.setupi2c() # sets all ports to be outputs
self.toggledousb() # switch to USB2 connection for readout of events, if available
if self.dousb:
if not self.makeusbsermap(): return False # figure out which usb connection has which board's data
# self.getIDs() # get the unique ID of each board, for calibration etc.
# self.readcalib() # get the calibrated DAC values for each board; if it fails then use defaults
self.domeasure=self.domaindrawing #by default we will calculate measurements if we are drawing
return True
def StartDataThread(self):
self.x = threading.Thread(target=self.thread_function)
self.x.start()
self.thread_running=True
#cleanup
def cleanup(self):
if self.thread_running:
self.thread_running=False
self.x.join()
try:
if self.autorearm: self.toggleautorearm()
except SerialException:
print("failed to talk to board when cleaning up!")
print("bye bye from serial")
return
try:
self.setbacktoserialreadout()
self.resetchans()
if self.dohighres: self.togglehighres()
# if self.useexttrig: self.toggleuseexttrig()
if self.dologicanalyzer: self.setlogicanalyzer(False)
if self.serport!="" and hasattr(self,'ser'):
self.shutdownadcs()
for p in self.usbser: p.close()
self.ser.close()
except SerialException:
print("failed to talk to board when cleaning up!")
#For setting up serial and USB connections
def setup_connections(self):
adjustedbrate=1./(1./self.brate+2.*self.serialdelaytimerwait*1.e-6/(32.*11.)) # delay of 2*serialdelaytimerwait microseconds every 32*11 bits
# serialrate=adjustedbrate/11./(HAAS_NUM_BYTES*HAAS_NUM_BOARD+len(HAAS_MAX10ADCCHANS)*HAAS_NSAMP) #including start+2stop bits
# print(("rate theoretically",round(serialrate,2),"Hz over serial"))
ports = list(serial.tools.list_ports.comports()); ports.sort(reverse=True)
autofindusbports = len(self.usbport)==0
if self.serport=="" or True:
for port_no, description, address in ports: print((port_no,":",description,":",address))
for port_no, description, address in ports:
if self.serport=="":
if '1A86:7523' in address or '1a86:7523' in address: self.serport = port_no
if autofindusbports:
if "USB Serial" in description or "Haasoscope" in description: self.usbport.append(port_no)
if self.serport!="":
try:
self.ser = Serial(self.serport,self.brate,timeout=self.sertimeout,stopbits=2)
except SerialException:
print(("Could not open",self.serport,"!")); return False
print(("connected serial to",self.serport,", timeout",self.sertimeout,"seconds"))
else: self.ser=""
for p in self.usbport:
self.usbser.append(Serial(p,timeout=self.sertimeout))
print(("connected USBserial to",p,", timeout",self.sertimeout,"seconds"))
if self.serport=="": print("No serial COM port opened!"); return False
return True
| [
"numpy.sum",
"numpy.arctan2",
"numpy.argmax",
"numpy.mean",
"matplotlib.pyplot.get_backend",
"numpy.arange",
"numpy.sin",
"serial.Serial",
"numpy.multiply",
"numpy.std",
"matplotlib.pyplot.close",
"numpy.fft.fft",
"os.uname",
"message_queue.Adapter",
"threading.Lock",
"numpy.max",
"n... | [((725, 749), 'matplotlib.use', 'matplotlib.use', (['"""Qt4Agg"""'], {}), "('Qt4Agg')\n", (739, 749), False, 'import matplotlib\n'), ((47662, 47673), 'time.time', 'time.time', ([], {}), '()\n', (47671, 47673), False, 'import time, json, os\n'), ((47687, 47698), 'time.time', 'time.time', ([], {}), '()\n', (47696, 47698), False, 'import time, json, os\n'), ((355, 365), 'os.uname', 'os.uname', ([], {}), '()\n', (363, 365), False, 'import time, json, os\n'), ((909, 926), 'matplotlib.pyplot.get_backend', 'plt.get_backend', ([], {}), '()\n', (924, 926), True, 'import matplotlib.pyplot as plt\n'), ((2663, 2687), 'message_queue.Adapter', 'mq.Adapter', (['"""main_queue"""'], {}), "('main_queue')\n", (2673, 2687), True, 'import message_queue as mq\n'), ((2716, 2745), 'message_queue.Publisher', 'mq.Publisher', (['self.mq_adapter'], {}), '(self.mq_adapter)\n', (2728, 2745), True, 'import message_queue as mq\n'), ((6038, 6054), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (6052, 6054), False, 'import threading\n'), ((9607, 9618), 'time.time', 'time.time', ([], {}), '()\n', (9616, 9618), False, 'import time, json, os\n'), ((12775, 12791), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (12785, 12791), False, 'import time, json, os\n'), ((15918, 15934), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (15928, 15934), False, 'import time, json, os\n'), ((24480, 24531), 'numpy.arange', 'np.arange', (['(HAAS_NUM_BOARD * HAAS_NUM_CHAN_PER_BOARD)'], {}), '(HAAS_NUM_BOARD * HAAS_NUM_CHAN_PER_BOARD)\n', (24489, 24531), True, 'import numpy as np\n'), ((28415, 28426), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (28424, 28426), False, 'import time, json, os\n'), ((30977, 30989), 'numpy.array', 'np.array', (['tt'], {}), '(tt)\n', (30985, 30989), True, 'import numpy as np\n'), ((31003, 31015), 'numpy.array', 'np.array', (['yy'], {}), '(yy)\n', (31011, 31015), True, 'import numpy as np\n'), ((31312, 31323), 'numpy.mean', 'np.mean', (['yy'], {}), '(yy)\n', (31319, 31323), True, 'import numpy as np\n'), ((31340, 31406), 'numpy.array', 'np.array', (['[guess_amp, 2.0 * np.pi * guess_freq, 0.0, guess_offset]'], {}), '([guess_amp, 2.0 * np.pi * guess_freq, 0.0, guess_offset])\n', (31348, 31406), True, 'import numpy as np\n'), ((31907, 31927), 'numpy.average', 'np.average', (['ydatanew'], {}), '(ydatanew)\n', (31917, 31927), True, 'import numpy as np\n'), ((34095, 34111), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (34104, 34111), True, 'import matplotlib.pyplot as plt\n'), ((36246, 36274), 'numpy.roll', 'np.roll', (['y3', 'self.numtoshift'], {}), '(y3, self.numtoshift)\n', (36253, 36274), True, 'import numpy as np\n'), ((36331, 36343), 'numpy.sum', 'np.sum', (['res1'], {}), '(res1)\n', (36337, 36343), True, 'import numpy as np\n'), ((36356, 36368), 'numpy.sum', 'np.sum', (['res2'], {}), '(res2)\n', (36362, 36368), True, 'import numpy as np\n'), ((36445, 36475), 'numpy.sqrt', 'np.sqrt', (['(r1m * r1m + r2m * r2m)'], {}), '(r1m * r1m + r2m * r2m)\n', (36452, 36475), True, 'import numpy as np\n'), ((37452, 37470), 'numpy.sum', 'np.sum', (['res1window'], {}), '(res1window)\n', (37458, 37470), True, 'import numpy as np\n'), ((37489, 37507), 'numpy.sum', 'np.sum', (['res2window'], {}), '(res2window)\n', (37495, 37507), True, 'import numpy as np\n'), ((37647, 37701), 'numpy.sqrt', 'np.sqrt', (['(r1mwindow * r1mwindow + r2mwindow * r2mwindow)'], {}), '(r1mwindow * r1mwindow + r2mwindow * r2mwindow)\n', (37654, 37701), True, 'import numpy as np\n'), ((38029, 38051), 'numpy.mean', 'np.mean', (['self.ydata[2]'], {}), '(self.ydata[2])\n', (38036, 38051), True, 'import numpy as np\n'), ((38072, 38094), 'numpy.mean', 'np.mean', (['self.ydata[3]'], {}), '(self.ydata[3])\n', (38079, 38094), True, 'import numpy as np\n'), ((38120, 38173), 'numpy.roll', 'np.roll', (['(self.ydata[3] - meany3float)', 'self.numtoshift'], {}), '(self.ydata[3] - meany3float, self.numtoshift)\n', (38127, 38173), True, 'import numpy as np\n'), ((38550, 38573), 'numpy.sum', 'np.sum', (['res1windowfloat'], {}), '(res1windowfloat)\n', (38556, 38573), True, 'import numpy as np\n'), ((38597, 38620), 'numpy.sum', 'np.sum', (['res2windowfloat'], {}), '(res2windowfloat)\n', (38603, 38620), True, 'import numpy as np\n'), ((38767, 38841), 'numpy.sqrt', 'np.sqrt', (['(r1mwindowfloat * r1mwindowfloat + r2mwindowfloat * r2mwindowfloat)'], {}), '(r1mwindowfloat * r1mwindowfloat + r2mwindowfloat * r2mwindowfloat)\n', (38774, 38841), True, 'import numpy as np\n'), ((40971, 41006), 'numpy.zeros', 'np.zeros', (['HAAS_NUM_BOARD'], {'dtype': 'int'}), '(HAAS_NUM_BOARD, dtype=int)\n', (40979, 41006), True, 'import numpy as np\n'), ((47940, 47965), 'numpy.arange', 'np.arange', (['HAAS_NUM_BOARD'], {}), '(HAAS_NUM_BOARD)\n', (47949, 47965), True, 'import numpy as np\n'), ((54149, 54194), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.thread_function'}), '(target=self.thread_function)\n', (54165, 54194), False, 'import threading\n'), ((15070, 15086), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (15080, 15086), False, 'import time, json, os\n'), ((16456, 16472), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (16466, 16472), False, 'import time, json, os\n'), ((18074, 18087), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (18084, 18087), False, 'import time, json, os\n'), ((29768, 29789), 'matplotlib.pyplot.close', 'plt.close', (['self.figxy'], {}), '(self.figxy)\n', (29777, 29789), True, 'import matplotlib.pyplot as plt\n'), ((29830, 29851), 'matplotlib.pyplot.close', 'plt.close', (['self.fig2d'], {}), '(self.fig2d)\n', (29839, 29851), True, 'import matplotlib.pyplot as plt\n'), ((31113, 31127), 'numpy.fft.fft', 'np.fft.fft', (['yy'], {}), '(yy)\n', (31123, 31127), True, 'import numpy as np\n'), ((31268, 31278), 'numpy.std', 'np.std', (['yy'], {}), '(yy)\n', (31274, 31278), True, 'import numpy as np\n'), ((31766, 31778), 'numpy.max', 'np.max', (['pcov'], {}), '(pcov)\n', (31772, 31778), True, 'import numpy as np\n'), ((36109, 36119), 'numpy.sum', 'np.sum', (['y2'], {}), '(y2)\n', (36115, 36119), True, 'import numpy as np\n'), ((36152, 36162), 'numpy.sum', 'np.sum', (['y3'], {}), '(y3)\n', (36158, 36162), True, 'import numpy as np\n'), ((40362, 40408), 'numpy.sqrt', 'np.sqrt', (['(r1_fpga * r1_fpga + r2_fpga * r2_fpga)'], {}), '(r1_fpga * r1_fpga + r2_fpga * r2_fpga)\n', (40369, 40408), True, 'import numpy as np\n'), ((41189, 41214), 'numpy.arange', 'np.arange', (['HAAS_NUM_BOARD'], {}), '(HAAS_NUM_BOARD)\n', (41198, 41214), True, 'import numpy as np\n'), ((41343, 41368), 'numpy.arange', 'np.arange', (['HAAS_NUM_BOARD'], {}), '(HAAS_NUM_BOARD)\n', (41352, 41368), True, 'import numpy as np\n'), ((42528, 42553), 'numpy.arange', 'np.arange', (['HAAS_NUM_BOARD'], {}), '(HAAS_NUM_BOARD)\n', (42537, 42553), True, 'import numpy as np\n'), ((43684, 43751), 'numpy.reshape', 'np.reshape', (['byte_array', '(HAAS_NUM_CHAN_PER_BOARD, HAAS_NUM_SAMPLES)'], {}), '(byte_array, (HAAS_NUM_CHAN_PER_BOARD, HAAS_NUM_SAMPLES))\n', (43694, 43751), True, 'import numpy as np\n'), ((48805, 48868), 'message_queue.Message', 'mq.Message', (["{'id': MSG_ID_YDATA, 'ydata': self.ydata, 'bn': bn}"], {}), "({'id': MSG_ID_YDATA, 'ydata': self.ydata, 'bn': bn})\n", (48815, 48868), True, 'import message_queue as mq\n'), ((49201, 49212), 'time.time', 'time.time', ([], {}), '()\n', (49210, 49212), False, 'import time, json, os\n'), ((49608, 49619), 'time.time', 'time.time', ([], {}), '()\n', (49617, 49619), False, 'import time, json, os\n'), ((51990, 52006), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (52000, 52006), False, 'import time, json, os\n'), ((18317, 18330), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (18327, 18330), False, 'import time, json, os\n'), ((20822, 20839), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (20837, 20839), True, 'import matplotlib.pyplot as plt\n'), ((21934, 21951), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (21949, 21951), True, 'import matplotlib.pyplot as plt\n'), ((22336, 22353), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (22351, 22353), True, 'import matplotlib.pyplot as plt\n'), ((23459, 23476), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (23474, 23476), True, 'import matplotlib.pyplot as plt\n'), ((36491, 36511), 'numpy.arctan2', 'np.arctan2', (['r2m', 'r1m'], {}), '(r2m, r1m)\n', (36501, 36511), True, 'import numpy as np\n'), ((37001, 37043), 'numpy.zeros', 'np.zeros', (['(lowerwindowedge,)'], {'dtype': 'np.int'}), '((lowerwindowedge,), dtype=np.int)\n', (37009, 37043), True, 'import numpy as np\n'), ((37110, 37171), 'numpy.zeros', 'np.zeros', (['(HAAS_NUM_SAMPLES - upperwindowedge,)'], {'dtype': 'np.int'}), '((HAAS_NUM_SAMPLES - upperwindowedge,), dtype=np.int)\n', (37118, 37171), True, 'import numpy as np\n'), ((37723, 37755), 'numpy.arctan2', 'np.arctan2', (['r2mwindow', 'r1mwindow'], {}), '(r2mwindow, r1mwindow)\n', (37733, 37755), True, 'import numpy as np\n'), ((38868, 38910), 'numpy.arctan2', 'np.arctan2', (['r2mwindowfloat', 'r1mwindowfloat'], {}), '(r2mwindowfloat, r1mwindowfloat)\n', (38878, 38910), True, 'import numpy as np\n'), ((44188, 44222), 'numpy.arange', 'np.arange', (['HAAS_NUM_CHAN_PER_BOARD'], {}), '(HAAS_NUM_CHAN_PER_BOARD)\n', (44197, 44222), True, 'import numpy as np\n'), ((45550, 45595), 'numpy.reshape', 'np.reshape', (['byte_array', '(1, HAAS_NUM_SAMPLES)'], {}), '(byte_array, (1, HAAS_NUM_SAMPLES))\n', (45560, 45595), True, 'import numpy as np\n'), ((46876, 46924), 'numpy.multiply', 'np.multiply', (['(256)', 'byte_array[1:2 * HAAS_NSAMP:2]'], {}), '(256, byte_array[1:2 * HAAS_NSAMP:2])\n', (46887, 46924), True, 'import numpy as np\n'), ((47052, 47065), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (47061, 47065), True, 'import numpy as np\n'), ((49312, 49347), 'message_queue.Message', 'mq.Message', (["{'id': MSG_ID_DRAWTEXT}"], {}), "({'id': MSG_ID_DRAWTEXT})\n", (49322, 49347), True, 'import message_queue as mq\n'), ((56233, 56302), 'serial.Serial', 'Serial', (['self.serport', 'self.brate'], {'timeout': 'self.sertimeout', 'stopbits': '(2)'}), '(self.serport, self.brate, timeout=self.sertimeout, stopbits=2)\n', (56239, 56302), False, 'from serial import Serial, SerialException\n'), ((56591, 56625), 'serial.Serial', 'Serial', (['p'], {'timeout': 'self.sertimeout'}), '(p, timeout=self.sertimeout)\n', (56597, 56625), False, 'from serial import Serial, SerialException\n'), ((375, 385), 'os.uname', 'os.uname', ([], {}), '()\n', (383, 385), False, 'import time, json, os\n'), ((410, 420), 'os.uname', 'os.uname', ([], {}), '()\n', (418, 420), False, 'import time, json, os\n'), ((20999, 21016), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (21014, 21016), True, 'import matplotlib.pyplot as plt\n'), ((21479, 21496), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (21494, 21496), True, 'import matplotlib.pyplot as plt\n'), ((22497, 22514), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (22512, 22514), True, 'import matplotlib.pyplot as plt\n'), ((22972, 22989), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (22987, 22989), True, 'import matplotlib.pyplot as plt\n'), ((31157, 31175), 'numpy.argmax', 'np.argmax', (['Fyy[1:]'], {}), '(Fyy[1:])\n', (31166, 31175), True, 'import numpy as np\n'), ((31450, 31467), 'numpy.sin', 'np.sin', (['(w * t + p)'], {}), '(w * t + p)\n', (31456, 31467), True, 'import numpy as np\n'), ((31626, 31643), 'numpy.sin', 'np.sin', (['(w * t + p)'], {}), '(w * t + p)\n', (31632, 31643), True, 'import numpy as np\n'), ((40437, 40465), 'numpy.arctan2', 'np.arctan2', (['r2_fpga', 'r1_fpga'], {}), '(r2_fpga, r1_fpga)\n', (40447, 40465), True, 'import numpy as np\n'), ((44253, 44284), 'numpy.arange', 'np.arange', (['(HAAS_NUM_SAMPLES / 2)'], {}), '(HAAS_NUM_SAMPLES / 2)\n', (44262, 44284), True, 'import numpy as np\n'), ((20393, 20404), 'time.time', 'time.time', ([], {}), '()\n', (20402, 20404), False, 'import time, json, os\n'), ((42861, 42872), 'time.time', 'time.time', ([], {}), '()\n', (42870, 42872), False, 'import time, json, os\n'), ((43320, 43331), 'time.time', 'time.time', ([], {}), '()\n', (43329, 43331), False, 'import time, json, os\n'), ((45977, 45988), 'time.time', 'time.time', ([], {}), '()\n', (45986, 45988), False, 'import time, json, os\n'), ((47748, 47759), 'time.time', 'time.time', ([], {}), '()\n', (47757, 47759), False, 'import time, json, os\n'), ((10258, 10269), 'time.time', 'time.time', ([], {}), '()\n', (10267, 10269), False, 'import time, json, os\n'), ((45197, 45208), 'time.time', 'time.time', ([], {}), '()\n', (45206, 45208), False, 'import time, json, os\n'), ((46273, 46284), 'time.time', 'time.time', ([], {}), '()\n', (46282, 46284), False, 'import time, json, os\n'), ((46448, 46459), 'time.time', 'time.time', ([], {}), '()\n', (46457, 46459), False, 'import time, json, os\n'), ((46575, 46586), 'time.time', 'time.time', ([], {}), '()\n', (46584, 46586), False, 'import time, json, os\n'), ((47998, 48009), 'time.time', 'time.time', ([], {}), '()\n', (48007, 48009), False, 'import time, json, os\n'), ((49083, 49094), 'time.time', 'time.time', ([], {}), '()\n', (49092, 49094), False, 'import time, json, os\n')] |
"""
思路:
输入一个json,
把他的字段改成poly,
再集成成segementation字段,
然后输出一个json字段
"""
import itertools
import json
import numpy as np
def bezier_to_poly(bezier):
# bezier to polygon
u = np.linspace(0, 1, 20)
bezier = np.array(bezier)
bezier = bezier.reshape(2, 4, 2).transpose(0, 2, 1).reshape(4, 4)
points = np.outer((1 - u) ** 3, bezier[:, 0]) \
+ np.outer(3 * u * ((1 - u) ** 2), bezier[:, 1]) \
+ np.outer(3 * (u ** 2) * (1 - u), bezier[:, 2]) \
+ np.outer(u ** 3, bezier[:, 3])
points = np.concatenate((points[:, :2], points[:, 2:]), axis=None)
# points = points.reshape(-1).tolist()
return points.tolist()
resut_source_path = r"/home/wengkangming/map_file/AdelaiDet/output/batext/totaltext/attn_R_50/inference/text_results.json"
result_output = r"/home/wengkangming/map_file/AdelaiDet/output/batext/totaltext/attn_R_50/inference/coco_format_text_result.json"
def result_bezier_pts_to_segementation(resut_source_path, result_output):
with open(resut_source_path, "r") as f:
data = json.load(f)
for annotations in data:
polys = annotations["polys"]
segmentation = list(itertools.chain.from_iterable(polys))
annotations["segmentation"] = []
annotations["segmentation"].append(segmentation)
with open(result_output, "w") as f:
json.dump(data, f)
"""
对输出结果的poly字段,转为segmentation
"""
result_bezier_pts_to_segementation(resut_source_path, result_output)
| [
"json.dump",
"numpy.outer",
"json.load",
"numpy.array",
"numpy.linspace",
"itertools.chain.from_iterable",
"numpy.concatenate"
] | [((201, 222), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (212, 222), True, 'import numpy as np\n'), ((237, 253), 'numpy.array', 'np.array', (['bezier'], {}), '(bezier)\n', (245, 253), True, 'import numpy as np\n'), ((569, 626), 'numpy.concatenate', 'np.concatenate', (['(points[:, :2], points[:, 2:])'], {'axis': 'None'}), '((points[:, :2], points[:, 2:]), axis=None)\n', (583, 626), True, 'import numpy as np\n'), ((524, 554), 'numpy.outer', 'np.outer', (['(u ** 3)', 'bezier[:, 3]'], {}), '(u ** 3, bezier[:, 3])\n', (532, 554), True, 'import numpy as np\n'), ((1096, 1108), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1105, 1108), False, 'import json\n'), ((1416, 1434), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (1425, 1434), False, 'import json\n'), ((459, 503), 'numpy.outer', 'np.outer', (['(3 * u ** 2 * (1 - u))', 'bezier[:, 2]'], {}), '(3 * u ** 2 * (1 - u), bezier[:, 2])\n', (467, 503), True, 'import numpy as np\n'), ((339, 375), 'numpy.outer', 'np.outer', (['((1 - u) ** 3)', 'bezier[:, 0]'], {}), '((1 - u) ** 3, bezier[:, 0])\n', (347, 375), True, 'import numpy as np\n'), ((394, 438), 'numpy.outer', 'np.outer', (['(3 * u * (1 - u) ** 2)', 'bezier[:, 1]'], {}), '(3 * u * (1 - u) ** 2, bezier[:, 1])\n', (402, 438), True, 'import numpy as np\n'), ((1218, 1254), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['polys'], {}), '(polys)\n', (1247, 1254), False, 'import itertools\n')] |
#! /usr/bin/env python
import sys
import xacro
import numpy as np
import xml.etree.ElementTree as ET
import xml.dom.minidom
def xacro_macro(doc, name, params):
return ET.SubElement(doc, 'xacro:macro', {'name' : name, 'params' : params})
def xacro_include(doc, filename):
return ET.SubElement(doc, 'xacro:include', {'filename' : filename})
def xacro_property(doc, name, value):
return ET.SubElement(doc, 'xacro:property', {'name' : name, 'value' : value})
def xacro_origin(doc, xyz, rpy):
return ET.SubElement(doc, 'origin', {'xyz' : xyz, 'rpy' : rpy})
def xacro_axis(doc, xyz):
return ET.SubElement(doc, "axis", {'xyz' : xyz})
def xacro_limit(doc, effort, upper, lower, velocity):
return ET.SubElement(doc, "limit", {"effort" : effort,
"lower" : lower,
"upper" : upper,
"velocity" : velocity})
def kuka_link_macro(doc, link_name, color, origin=['0 0 0', '0 0 0']):
link = ET.SubElement(doc, 'xacro:kuka_link', {'link_name' : link_name, 'color' : color})
xacro_origin(link, origin[0], origin[1])
return link
def kuka_joint_macro(doc, joint_name, parent_link, child_link, origin, axis, limit=None):
joint = ET.SubElement(doc, 'xacro:kuka_joint', {'joint_name': joint_name,
'parent_link' : parent_link,
'child_link' : child_link})
xacro_origin(joint, origin['xyz'], origin['rpy'])
xacro_axis(joint, axis['xyz'])
if limit is not None:
xacro_limit(joint, limit['effort'], limit['upper'],
limit['lower'], limit['velocity'])
def add_revolute_joint(doc, joint_name, parent_link, child_link, origin,
axis, limit=None):
joint = ET.SubElement(doc, 'joint', {'name' : "${prefix}" + joint_name,
'type' :'revolute'})
xacro_origin(joint, origin['xyz'], origin['rpy'])
ET.SubElement(joint, 'parent', {'link' : "${prefix}" + parent_link})
ET.SubElement(joint, 'child', {'link' : "${prefix}" + child_link})
xacro_axis(joint, axis['xyz'])
if limit is not None:
xacro_limit(joint, limit['effort'], limit['upper'],
limit['lower'], limit['velocity'])
def add_fixed_joint(doc, joint_name, parent_link, child_link, origin):
joint = ET.SubElement(doc, 'joint', {'name' : "${prefix}" + joint_name, 'type' :'fixed'})
xacro_origin(joint, origin['xyz'], origin['rpy'])
ET.SubElement(joint, 'parent', {'link' : "${prefix}" + parent_link})
ET.SubElement(joint, 'child', {'link' : "${prefix}" + child_link})
def add_link(doc, link_name, color=None, origin=None, do_visual=False, do_collision=False):
link = ET.SubElement(doc, 'link', {'name' : "${prefix}" + link_name})
# Visual mesh
if do_visual:
visual = ET.SubElement(link, 'visual')
if origin is not None:
xacro_origin(visual, origin['xyz'], origin['rpy'])
geometry = ET.SubElement(visual, 'geometry')
mesh = ET.SubElement(geometry, 'mesh',
{'filename' : "package://" + "${package_name}" + "/meshes/" \
+ "${robot_type}" + "/visual/{link_name}.stl".format(link_name=link_name),
'scale' : "${mesh_scale}"})
if color is not None:
material = ET.SubElement(visual, 'material', {'name' : color})
color = ET.SubElement(material, 'color', {'rgba' : kuka_colors[color]})
# Collision mesh
if do_collision:
collision = ET.SubElement(link, 'collision')
if origin is not None:
xacro_origin(collision, origin['xyz'], origin['rpy'])
geometry = ET.SubElement(collision, 'geometry')
mesh = ET.SubElement(geometry, 'mesh',
{'filename' : "package://" + "${package_name}" + "/meshes/" \
+ "${robot_type}" + "/visual/{link_name}.stl".format(link_name=link_name),
'scale' : "${mesh_scale}"})
def add_motor(doc, robot_definition, parent_link_number, motor_number):
robot = robot_definition
if parent_link_number == 1:
motor_origin = {'xyz' : '0 0 {}'.format(-robot.L01Z), 'rpy' : '0 0 0'}
if parent_link_number == 3:
motor_origin = {'xyz' : '{0} 0 {1}'.format(-robot.L12X, -(robot.L01Z+robot.L23Z) ), 'rpy' : '0 0 0'}
add_link(doc,
"link_{0}_motor{1}".format(parent_link_number, motor_number),
'kuka_black', motor_origin, True, True)
add_fixed_joint(doc,
"joint_{0}_motor{1}".format(parent_link_number, motor_number),
"link_{0}".format(parent_link_number),
'link_{0}_motor{1}'.format(parent_link_number, motor_number),
{'xyz' : '0 0 0', 'rpy' : '0 0 0'})
def kuka_motor_macro(doc, parent_link, motor_number, origin):
motor = ET.SubElement(doc, 'xacro:kuka_motor',
{'parent_link_number' : parent_link_number,
'motor_number' : motor_number})
xacro_origin(motor, origin['xyz'], origin['rpy'])
return motor
def create_kuka_robot_xacro(robot_definition):
robot = robot_definition
doc = ET.Element('robot', {'xmlns:xacro' : 'http://www.ros.org/wiki/xacro'})
xacro_property(doc, 'mesh_scale', robot.mesh_scale)
xacro_property(doc, 'package_name', robot.package_name)
xacro_property(doc, 'robot_type', robot.robot_type)
robot_macro = xacro_macro(doc, 'kuka_{0}'.format(robot.robot_type), "prefix")
joint_origins = [
# Joint 1 (A1)
{'xyz' : '{x} {y} {z}'.format(x=0, y=0, z=robot.L01Z), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
# Joint 2 (A2)
{'xyz' : '{x} {y} {z}'.format(x=robot.L12X, y=0, z=0), 'rpy' : '{r} {p} {y}'.format(r=0, p=np.deg2rad(90), y=0)},
# Joint 3 (A3)
{'xyz' : '{x} {y} {z}'.format(x=0, y=0, z=robot.L23Z), 'rpy' : '{r} {p} {y}'.format(r=0, p=np.deg2rad(-90), y=0)},
# Joint 4 (A4)
{'xyz' : '{x} {y} {z}'.format(x=robot.L34X, y=0, z=robot.L34Z), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
# Joint 5 (A5)
{'xyz' : '{x} {y} {z}'.format(x=0, y=0, z=0), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
# Joint 6 (A6)
{'xyz' : '{x} {y} {z}'.format(x=robot.L56X, y=0, z=0), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)}]
link_origins = [
{'xyz' : '{x} {y} {z}'.format(x=0, y=0, z=0), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
{'xyz' : '{x} {y} {z}'.format(x=0, y=0, z=-robot.L01Z), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
{'xyz' : '{x} {y} {z}'.format(x=-robot.L12X, y=0, z=-robot.L01Z), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
{'xyz' : '{x} {y} {z}'.format(x=-robot.L12X, y=0, z=-(robot.L01Z+robot.L23Z)), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
{'xyz' : '{x} {y} {z}'.format(x=-(robot.L12X+robot.L34X), y=0, z=-(robot.L01Z+robot.L23Z+robot.L34Z)), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
{'xyz' : '{x} {y} {z}'.format(x=-(robot.L12X+robot.L34X), y=0, z=-(robot.L01Z+robot.L23Z+robot.L34Z)), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
{'xyz' : '{x} {y} {z}'.format(x=-(robot.L12X+robot.L34X+robot.L56X), y=0, z=-(robot.L01Z+robot.L23Z+robot.L34Z)), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)}]
# 6 rotation axes for the 6 revolute joints
joint_rotation_axes = [
# Joint 1 (A1)
{'xyz' : '0 0 -1'},
# Joint 2 (A2)
{'xyz' : '0 1 0'},
# Joint 3 (A3)
{'xyz' : '0 1 0'},
# Joint 4 (A4)
{'xyz' : '-1 0 0'},
# Joint 5 (A5)
{'xyz' : '0 1 0'},
# Joint 6 (A6)
{'xyz' : '-1 0 0'}]
n = 6
for i in range(n+1):
# add links
add_link(robot_macro, robot.link_names[i], robot.link_colors[i], link_origins[i], True, True)
for i in range(n):
# add joints
add_revolute_joint(robot_macro,
joint_name = robot.joint_names[i],
parent_link = robot.link_names[i],
child_link = robot.link_names[i+1],
origin = joint_origins[i],
axis = joint_rotation_axes[i],
limit=robot.limits[i])
# add motors
if robot.motors[i] is not None:
for motor_number in robot.motors[i]:
add_motor(robot_macro, robot, i, motor_number)
# add end effector frame
add_link(robot_macro, robot.link_names[-1])
add_fixed_joint(robot_macro, robot.joint_names[-1],
robot.link_names[-2], robot.link_names[-1], {'xyz' : '0 0 0', 'rpy' : '0 0 0'})
doc_string = ET.tostring(doc)
doc = xml.dom.minidom.parseString(doc_string)
#xacro.eval_self_contained(doc)
print(doc.toprettyxml(indent=' '))
#return doc
kuka_colors = {'kuka_orange' : '1.0 0.5 0.0 1.0', 'kuka_black' : '0.05 0.05. 0.05 1.0'}
def get_joint_origins():
joint_origins = [
# Joint 1 (A1)
{'xyz' : '{x} {y} {z}'.format(x=0, y=0, z=L01Z), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
# Joint 2 (A2)
{'xyz' : '{x} {y} {z}'.format(x=L12X, y=0, z=0), 'rpy' : '{r} {p} {y}'.format(r=0, p=np.deg2rad(90), y=0)},
# Joint 3 (A3)
{'xyz' : '{x} {y} {z}'.format(x=0, y=0, z=L23Z), 'rpy' : '{r} {p} {y}'.format(r=0, p=np.deg2rad(-90), y=0)},
# Joint 4 (A4)
{'xyz' : '{x} {y} {z}'.format(x=L34X, y=0, z=L34Z), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
# Joint 5 (A5)
{'xyz' : '{x} {y} {z}'.format(x=0, y=0, z=0), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
# Joint 6 (A6)
{'xyz' : '{x} {y} {z}'.format(x=L56X, y=0, z=0), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)}]
return joint_origins
def get_link_origins():
link_origins = [
{'xyz' : '{x} {y} {z}'.format(x=0, y=0, z=0), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
{'xyz' : '{x} {y} {z}'.format(x=0, y=0, z=-L01Z), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
{'xyz' : '{x} {y} {z}'.format(x=-L12X, y=0, z=-L01Z), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
{'xyz' : '{x} {y} {z}'.format(x=-L12X, y=0, z=-(L01Z+L23Z)), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
{'xyz' : '{x} {y} {z}'.format(x=-(L12X+L34X), y=0, z=-(L01Z+L23Z+L34Z)), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
{'xyz' : '{x} {y} {z}'.format(x=-(L12X+L34X), y=0, z=-(L01Z+L23Z+L34Z)), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)},
{'xyz' : '{x} {y} {z}'.format(x=-(L12X+L34X+L56X), y=0, z=-(L01Z+L23Z+L34Z)), 'rpy' : '{r} {p} {y}'.format(r=0, p=0, y=0)}]
return link_origins
def get_joint_rotation_axes():
# 6 rotation axes for the 6 revolute joints
joint_rotation_axes = [
# Joint 1 (A1)
{'xyz' : '0 0 -1'},
# Joint 2 (A2)
{'xyz' : '0 1 0'},
# Joint 3 (A3)
{'xyz' : '0 1 0'},
# Joint 4 (A4)
{'xyz' : '-1 0 0'},
# Joint 5 (A5)
{'xyz' : '0 1 0'},
# Joint 6 (A6)
{'xyz' : '-1 0 0'}]
return joint_rotation_axes
if __name__ == '__main__':
pass
#doc = create_kuka_robot_xacro()
#tree = ET.ElementTree(doc)
#tree.write('../urdf/kr5arc_macro.xacro')
#import xml.dom.minidom
#newdoc = open('../urdf/kr5arc_macro.xacro')
#while(newdoc.closed):
#pass
#dom = xml.dom.minidom.parse(newdoc)
#print(dom.toprettyxml())
#newdoc.close()
| [
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.tostring",
"numpy.deg2rad",
"xml.etree.ElementTree.SubElement"
] | [((174, 241), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['doc', '"""xacro:macro"""', "{'name': name, 'params': params}"], {}), "(doc, 'xacro:macro', {'name': name, 'params': params})\n", (187, 241), True, 'import xml.etree.ElementTree as ET\n'), ((290, 349), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['doc', '"""xacro:include"""', "{'filename': filename}"], {}), "(doc, 'xacro:include', {'filename': filename})\n", (303, 349), True, 'import xml.etree.ElementTree as ET\n'), ((401, 469), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['doc', '"""xacro:property"""', "{'name': name, 'value': value}"], {}), "(doc, 'xacro:property', {'name': name, 'value': value})\n", (414, 469), True, 'import xml.etree.ElementTree as ET\n'), ((517, 571), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['doc', '"""origin"""', "{'xyz': xyz, 'rpy': rpy}"], {}), "(doc, 'origin', {'xyz': xyz, 'rpy': rpy})\n", (530, 571), True, 'import xml.etree.ElementTree as ET\n'), ((612, 652), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['doc', '"""axis"""', "{'xyz': xyz}"], {}), "(doc, 'axis', {'xyz': xyz})\n", (625, 652), True, 'import xml.etree.ElementTree as ET\n'), ((720, 825), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['doc', '"""limit"""', "{'effort': effort, 'lower': lower, 'upper': upper, 'velocity': velocity}"], {}), "(doc, 'limit', {'effort': effort, 'lower': lower, 'upper':\n upper, 'velocity': velocity})\n", (733, 825), True, 'import xml.etree.ElementTree as ET\n'), ((1029, 1108), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['doc', '"""xacro:kuka_link"""', "{'link_name': link_name, 'color': color}"], {}), "(doc, 'xacro:kuka_link', {'link_name': link_name, 'color': color})\n", (1042, 1108), True, 'import xml.etree.ElementTree as ET\n'), ((1275, 1399), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['doc', '"""xacro:kuka_joint"""', "{'joint_name': joint_name, 'parent_link': parent_link, 'child_link': child_link\n }"], {}), "(doc, 'xacro:kuka_joint', {'joint_name': joint_name,\n 'parent_link': parent_link, 'child_link': child_link})\n", (1288, 1399), True, 'import xml.etree.ElementTree as ET\n'), ((1860, 1947), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['doc', '"""joint"""', "{'name': '${prefix}' + joint_name, 'type': 'revolute'}"], {}), "(doc, 'joint', {'name': '${prefix}' + joint_name, 'type':\n 'revolute'})\n", (1873, 1947), True, 'import xml.etree.ElementTree as ET\n'), ((2044, 2111), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['joint', '"""parent"""', "{'link': '${prefix}' + parent_link}"], {}), "(joint, 'parent', {'link': '${prefix}' + parent_link})\n", (2057, 2111), True, 'import xml.etree.ElementTree as ET\n'), ((2117, 2182), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['joint', '"""child"""', "{'link': '${prefix}' + child_link}"], {}), "(joint, 'child', {'link': '${prefix}' + child_link})\n", (2130, 2182), True, 'import xml.etree.ElementTree as ET\n'), ((2444, 2529), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['doc', '"""joint"""', "{'name': '${prefix}' + joint_name, 'type': 'fixed'}"], {}), "(doc, 'joint', {'name': '${prefix}' + joint_name, 'type': 'fixed'}\n )\n", (2457, 2529), True, 'import xml.etree.ElementTree as ET\n'), ((2584, 2651), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['joint', '"""parent"""', "{'link': '${prefix}' + parent_link}"], {}), "(joint, 'parent', {'link': '${prefix}' + parent_link})\n", (2597, 2651), True, 'import xml.etree.ElementTree as ET\n'), ((2657, 2722), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['joint', '"""child"""', "{'link': '${prefix}' + child_link}"], {}), "(joint, 'child', {'link': '${prefix}' + child_link})\n", (2670, 2722), True, 'import xml.etree.ElementTree as ET\n'), ((2829, 2890), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['doc', '"""link"""', "{'name': '${prefix}' + link_name}"], {}), "(doc, 'link', {'name': '${prefix}' + link_name})\n", (2842, 2890), True, 'import xml.etree.ElementTree as ET\n'), ((5031, 5147), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['doc', '"""xacro:kuka_motor"""', "{'parent_link_number': parent_link_number, 'motor_number': motor_number}"], {}), "(doc, 'xacro:kuka_motor', {'parent_link_number':\n parent_link_number, 'motor_number': motor_number})\n", (5044, 5147), True, 'import xml.etree.ElementTree as ET\n'), ((5358, 5427), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""robot"""', "{'xmlns:xacro': 'http://www.ros.org/wiki/xacro'}"], {}), "('robot', {'xmlns:xacro': 'http://www.ros.org/wiki/xacro'})\n", (5368, 5427), True, 'import xml.etree.ElementTree as ET\n'), ((8890, 8906), 'xml.etree.ElementTree.tostring', 'ET.tostring', (['doc'], {}), '(doc)\n', (8901, 8906), True, 'import xml.etree.ElementTree as ET\n'), ((2945, 2974), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['link', '"""visual"""'], {}), "(link, 'visual')\n", (2958, 2974), True, 'import xml.etree.ElementTree as ET\n'), ((3088, 3121), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['visual', '"""geometry"""'], {}), "(visual, 'geometry')\n", (3101, 3121), True, 'import xml.etree.ElementTree as ET\n'), ((3669, 3701), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['link', '"""collision"""'], {}), "(link, 'collision')\n", (3682, 3701), True, 'import xml.etree.ElementTree as ET\n'), ((3818, 3854), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['collision', '"""geometry"""'], {}), "(collision, 'geometry')\n", (3831, 3854), True, 'import xml.etree.ElementTree as ET\n'), ((3471, 3521), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['visual', '"""material"""', "{'name': color}"], {}), "(visual, 'material', {'name': color})\n", (3484, 3521), True, 'import xml.etree.ElementTree as ET\n'), ((3543, 3605), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['material', '"""color"""', "{'rgba': kuka_colors[color]}"], {}), "(material, 'color', {'rgba': kuka_colors[color]})\n", (3556, 3605), True, 'import xml.etree.ElementTree as ET\n'), ((5962, 5976), 'numpy.deg2rad', 'np.deg2rad', (['(90)'], {}), '(90)\n', (5972, 5976), True, 'import numpy as np\n'), ((6107, 6122), 'numpy.deg2rad', 'np.deg2rad', (['(-90)'], {}), '(-90)\n', (6117, 6122), True, 'import numpy as np\n'), ((9429, 9443), 'numpy.deg2rad', 'np.deg2rad', (['(90)'], {}), '(90)\n', (9439, 9443), True, 'import numpy as np\n'), ((9568, 9583), 'numpy.deg2rad', 'np.deg2rad', (['(-90)'], {}), '(-90)\n', (9578, 9583), True, 'import numpy as np\n')] |
from PIL import Image
import os
import json
import random
import torchvision.transforms.functional as FT
import torch
import math
import numpy as np
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Some constants
rgb_weights = torch.FloatTensor([65.481, 128.553, 24.966]).to(device)
imagenet_mean = torch.FloatTensor([0.485, 0.456, 0.406]).unsqueeze(1).unsqueeze(2)
imagenet_std = torch.FloatTensor([0.229, 0.224, 0.225]).unsqueeze(1).unsqueeze(2)
imagenet_mean_cuda = torch.FloatTensor([0.485, 0.456, 0.406]).to(device).unsqueeze(0).unsqueeze(2).unsqueeze(3)
imagenet_std_cuda = torch.FloatTensor([0.229, 0.224, 0.225]).to(device).unsqueeze(0).unsqueeze(2).unsqueeze(3)
def create_data_lists(train_folders, test_folders, min_size, output_folder):
"""
Create lists for images in the training set and each of the test sets.
:param train_folders: folders containing the training images; these will be merged
:param test_folders: folders containing the test images; each test folder will form its own test set
:param min_size: minimum width and height of images to be considered
:param output_folder: save data lists here
"""
print("\nCreating data lists... this may take some time.\n")
train_images = list()
for d in train_folders:
for i in os.listdir(d):
img_path = os.path.join(d, i)
img = Image.open(img_path, mode='r')
if img.width >= min_size and img.height >= min_size:
train_images.append(img_path)
print("There are %d images in the training data.\n" % len(train_images))
with open(os.path.join(output_folder, 'train_images.json'), 'w') as j:
json.dump(train_images, j)
for d in test_folders:
test_images = list()
test_name = d.split("/")[-1]
for i in os.listdir(d):
img_path = os.path.join(d, i)
img = Image.open(img_path, mode='r')
if img.width >= min_size and img.height >= min_size:
test_images.append(img_path)
print("There are %d images in the %s test data.\n" % (len(test_images), test_name))
with open(os.path.join(output_folder, test_name + '_test_images.json'), 'w') as j:
json.dump(test_images, j)
print("JSONS containing lists of Train and Test images have been saved to %s\n" % output_folder)
def convert_image(img, source, target):
"""
Convert an image from a source format to a target format.
:param img: image
:param source: source format, one of 'pil' (PIL image), '[0, 1]' or '[-1, 1]' (pixel value ranges)
:param target: target format, one of 'pil' (PIL image), '[0, 255]', '[0, 1]', '[-1, 1]' (pixel value ranges),
'imagenet-norm' (pixel values standardized by imagenet mean and std.),
'y-channel' (luminance channel Y in the YCbCr color format, used to calculate PSNR and SSIM)
:return: converted image
"""
assert source in {'pil', '[0, 1]', '[-1, 1]'}, "Cannot convert from source format %s!" % source
assert target in {'pil', '[0, 255]', '[0, 1]', '[-1, 1]', 'imagenet-norm',
'y-channel'}, "Cannot convert to target format %s!" % target
# Convert from source to [0, 1]
if source == 'pil':
img = FT.to_tensor(img)
elif source == '[0, 1]':
pass # already in [0, 1]
elif source == '[-1, 1]':
img = (img + 1.) / 2.
# Convert from [0, 1] to target
if target == 'pil':
img = FT.to_pil_image(img)
elif target == '[0, 255]':
img = 255. * img
elif target == '[0, 1]':
pass # already in [0, 1]
elif target == '[-1, 1]':
img = 2. * img - 1.
elif target == 'imagenet-norm':
if img.ndimension() == 3:
img = (img - imagenet_mean) / imagenet_std
elif img.ndimension() == 4:
img = (img - imagenet_mean_cuda) / imagenet_std_cuda
elif target == 'y-channel':
# Based on definitions at https://github.com/xinntao/BasicSR/wiki/Color-conversion-in-SR
# torch.dot() does not work the same way as numpy.dot()
# So, use torch.matmul() to find the dot product between the last dimension of an 4-D tensor and a 1-D tensor
img = torch.matmul(255. * img.permute(0, 2, 3, 1)[:, 4:-4, 4:-4, :], rgb_weights) / 255. + 16.
return img
class ImageTransforms(object):
"""
Image transformation pipeline.
"""
def __init__(self, split, crop_size, scaling_factor, lr_img_type, hr_img_type):
"""
:param split: one of 'train' or 'test'
:param crop_size: crop size of HR images
:param scaling_factor: LR images will be downsampled from the HR images by this factor
:param lr_img_type: the target format for the LR image; see convert_image() above for available formats
:param hr_img_type: the target format for the HR image; see convert_image() above for available formats
"""
self.split = split.lower()
self.crop_size = crop_size
self.scaling_factor = scaling_factor
self.lr_img_type = lr_img_type
self.hr_img_type = hr_img_type
def __call__(self, img):
"""
:param img: a PIL source image from which the HR image will be cropped, and then downsampled to create the LR image
:return: LR and HR images in the specified format
"""
# Important!! padding
if img.width <= self.crop_size or img.height <= self.crop_size:
img = padding(img, self.crop_size)
# Crop
if self.split == 'train':
if img.width - self.crop_size <= 1:
left = 0
else:
left = random.randint(1, img.width - self.crop_size)
if img.height - self.crop_size <= 1:
top = 0
else:
top = random.randint(1, img.height - self.crop_size)
# Take a random fixed-size crop of the image, which will serve as the high-resolution (HR) image
# left = random.randint(1, img.width - self.crop_size)
# top = random.randint(1, img.height - self.crop_size)
right = left + self.crop_size
bottom = top + self.crop_size
hr_img = img.crop((left, top, right, bottom))
elif self.split == 'val':
x_remainder = img.width % self.scaling_factor
y_remainder = img.height % self.scaling_factor
left = x_remainder // 2
top = y_remainder // 2
right = left + (img.width - x_remainder)
bottom = top + (img.height - y_remainder)
hr_img = img.crop((left, top, right, bottom))
else:
# Take the largest possible center-crop of it such that its dimensions are perfectly divisible by the scaling factor
x_remainder = img.width % self.scaling_factor
y_remainder = img.height % self.scaling_factor
left = x_remainder // 2
top = y_remainder // 2
right = left + (img.width - x_remainder)
bottom = top + (img.height - y_remainder)
hr_img = img.crop((left, top, right, bottom))
# Downsize this crop to obtain a low-resolution version of it
lr_img = hr_img.resize((int(hr_img.width / self.scaling_factor), int(hr_img.height / self.scaling_factor)),
Image.BICUBIC)
# Sanity check
assert hr_img.width == lr_img.width * self.scaling_factor and hr_img.height == lr_img.height * self.scaling_factor
# Convert the LR and HR image to the required type
lr_img = convert_image(lr_img, source='pil', target=self.lr_img_type)
hr_img = convert_image(hr_img, source='pil', target=self.hr_img_type)
return lr_img, hr_img
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def clip_gradient(optimizer, grad_clip):
"""
Clips gradients computed during backpropagation to avoid explosion of gradients.
:param optimizer: optimizer with the gradients to be clipped
:param grad_clip: clip value
"""
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def save_checkpoint(state, filename):
"""
Save model checkpoint.
:param state: checkpoint contents
"""
torch.save(state, filename)
def adjust_learning_rate(optimizer, shrink_factor):
"""
Shrinks learning rate by a specified factor.
:param optimizer: optimizer whose learning rate must be shrunk.
:param shrink_factor: factor in interval (0, 1) to multiply learning rate with.
"""
print("\nDECAYING learning rate.")
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * shrink_factor
print("The new learning rate is %f\n" % (optimizer.param_groups[0]['lr'],))
def padding(img, crop_size):
w, h = img.size
max_h = np.max([h, crop_size])
max_w = np.max([w, crop_size])
hp = int((max_h - h) / 2)
wp = int((max_w - w) / 2)
pad = [wp, hp, wp, hp]
return FT.pad(img, pad, 0, 'constant') | [
"json.dump",
"torchvision.transforms.functional.to_tensor",
"random.randint",
"torch.FloatTensor",
"torchvision.transforms.functional.pad",
"PIL.Image.open",
"torch.save",
"torchvision.transforms.functional.to_pil_image",
"numpy.max",
"torch.cuda.is_available",
"os.path.join",
"os.listdir"
] | [((8802, 8829), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (8812, 8829), False, 'import torch\n'), ((9393, 9415), 'numpy.max', 'np.max', (['[h, crop_size]'], {}), '([h, crop_size])\n', (9399, 9415), True, 'import numpy as np\n'), ((9428, 9450), 'numpy.max', 'np.max', (['[w, crop_size]'], {}), '([w, crop_size])\n', (9434, 9450), True, 'import numpy as np\n'), ((9549, 9580), 'torchvision.transforms.functional.pad', 'FT.pad', (['img', 'pad', '(0)', '"""constant"""'], {}), "(img, pad, 0, 'constant')\n", (9555, 9580), True, 'import torchvision.transforms.functional as FT\n'), ((182, 207), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (205, 207), False, 'import torch\n'), ((252, 296), 'torch.FloatTensor', 'torch.FloatTensor', (['[65.481, 128.553, 24.966]'], {}), '([65.481, 128.553, 24.966])\n', (269, 296), False, 'import torch\n'), ((1315, 1328), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (1325, 1328), False, 'import os\n'), ((1692, 1718), 'json.dump', 'json.dump', (['train_images', 'j'], {}), '(train_images, j)\n', (1701, 1718), False, 'import json\n'), ((1830, 1843), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (1840, 1843), False, 'import os\n'), ((3297, 3314), 'torchvision.transforms.functional.to_tensor', 'FT.to_tensor', (['img'], {}), '(img)\n', (3309, 3314), True, 'import torchvision.transforms.functional as FT\n'), ((3515, 3535), 'torchvision.transforms.functional.to_pil_image', 'FT.to_pil_image', (['img'], {}), '(img)\n', (3530, 3535), True, 'import torchvision.transforms.functional as FT\n'), ((1353, 1371), 'os.path.join', 'os.path.join', (['d', 'i'], {}), '(d, i)\n', (1365, 1371), False, 'import os\n'), ((1390, 1420), 'PIL.Image.open', 'Image.open', (['img_path'], {'mode': '"""r"""'}), "(img_path, mode='r')\n", (1400, 1420), False, 'from PIL import Image\n'), ((1623, 1671), 'os.path.join', 'os.path.join', (['output_folder', '"""train_images.json"""'], {}), "(output_folder, 'train_images.json')\n", (1635, 1671), False, 'import os\n'), ((1868, 1886), 'os.path.join', 'os.path.join', (['d', 'i'], {}), '(d, i)\n', (1880, 1886), False, 'import os\n'), ((1905, 1935), 'PIL.Image.open', 'Image.open', (['img_path'], {'mode': '"""r"""'}), "(img_path, mode='r')\n", (1915, 1935), False, 'from PIL import Image\n'), ((2241, 2266), 'json.dump', 'json.dump', (['test_images', 'j'], {}), '(test_images, j)\n', (2250, 2266), False, 'import json\n'), ((324, 364), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (341, 364), False, 'import torch\n'), ((406, 446), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (423, 446), False, 'import torch\n'), ((2156, 2216), 'os.path.join', 'os.path.join', (['output_folder', "(test_name + '_test_images.json')"], {}), "(output_folder, test_name + '_test_images.json')\n", (2168, 2216), False, 'import os\n'), ((5733, 5778), 'random.randint', 'random.randint', (['(1)', '(img.width - self.crop_size)'], {}), '(1, img.width - self.crop_size)\n', (5747, 5778), False, 'import random\n'), ((5892, 5938), 'random.randint', 'random.randint', (['(1)', '(img.height - self.crop_size)'], {}), '(1, img.height - self.crop_size)\n', (5906, 5938), False, 'import random\n'), ((494, 534), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (511, 534), False, 'import torch\n'), ((605, 645), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (622, 645), False, 'import torch\n')] |
import argparse
import os
import ndreg
from ndreg import preprocessor, util
import numpy as np
import SimpleITK as sitk
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
import time
import requests
dimension = 3
vectorComponentType = sitk.sitkFloat32
vectorType = sitk.sitkVectorFloat32
affine = sitk.AffineTransform(dimension)
identityAffine = list(affine.GetParameters())
identityDirection = identityAffine[0:9]
zeroOrigin = [0] * dimension
zeroIndex = [0] * dimension
dimension = 3
vectorComponentType = sitk.sitkFloat32
vectorType = sitk.sitkVectorFloat32
affine = sitk.AffineTransform(dimension)
identityAffine = list(affine.GetParameters())
identityDirection = identityAffine[0:9]
zeroOrigin = [0] * dimension
zeroIndex = [0] * dimension
ndToSitkDataTypes = {'uint8': sitk.sitkUInt8,
'uint16': sitk.sitkUInt16,
'uint32': sitk.sitkUInt32,
'float32': sitk.sitkFloat32,
'uint64': sitk.sitkUInt64}
sitkToNpDataTypes = {sitk.sitkUInt8: np.uint8,
sitk.sitkUInt16: np.uint16,
sitk.sitkUInt32: np.uint32,
sitk.sitkInt8: np.int8,
sitk.sitkInt16: np.int16,
sitk.sitkInt32: np.int32,
sitk.sitkFloat32: np.float32,
sitk.sitkFloat64: np.float64,
}
# Boss Stuff:
def setup_experiment_boss(remote, collection, experiment):
"""
Get experiment and coordinate frame information from the boss.
"""
exp_setup = ExperimentResource(experiment, collection)
try:
exp_actual = remote.get_project(exp_setup)
coord_setup = CoordinateFrameResource(exp_actual.coord_frame)
coord_actual = remote.get_project(coord_setup)
return (exp_setup, coord_actual)
except Exception as e:
print(e.message)
def setup_channel_boss(
remote,
collection,
experiment,
channel,
channel_type='image',
datatype='uint16'):
(exp_setup, coord_actual) = setup_experiment_boss(
remote, collection, experiment)
chan_setup = ChannelResource(
channel,
collection,
experiment,
channel_type,
datatype=datatype)
try:
chan_actual = remote.get_project(chan_setup)
return (exp_setup, coord_actual, chan_actual)
except Exception as e:
print(e.message)
# Note: The following functions assume an anisotropic dataset. This is generally a bad assumption. These
# functions are stopgaps until proper coordinate frame at resulution
# support exists in intern.
def get_xyz_extents(rmt, ch_rsc, res=0, iso=True):
boss_url = 'https://api.boss.neurodata.io/v1/'
ds = boss_url + \
'/downsample/{}?iso={}'.format(ch_rsc.get_cutout_route(), iso)
headers = {'Authorization': 'Token ' + rmt.token_project}
r_ds = requests.get(ds, headers=headers)
response = r_ds.json()
x_range = [0, response['extent']['{}'.format(res)][0]]
y_range = [0, response['extent']['{}'.format(res)][1]]
z_range = [0, response['extent']['{}'.format(res)][2]]
spacing = response['voxel_size']['{}'.format(res)]
return (x_range, y_range, z_range, spacing)
def get_offset_boss(coord_frame, res=0, isotropic=False):
return [
int(coord_frame.x_start / (2.**res)),
int(coord_frame.y_start / (2.**res)),
int(coord_frame.z_start / (2.**res)) if isotropic else coord_frame.z_start]
def get_image_size_boss(coord_frame, res=0, isotropic=False):
return [
int(coord_frame.x_stop / (2.**res)),
int(coord_frame.y_stop / (2.**res)),
int(coord_frame.z_stop / (2.**res)) if isotropic else coord_frame.z_stop]
def imgDownload_boss(
remote,
channel_resource,
coordinate_frame_resource,
resolution=0,
size=[],
start=[],
isotropic=False):
"""
Download image with given token from given server at given resolution.
If channel isn't specified the first channel is downloaded.
"""
# TODO: Fix size and start parameters
voxel_unit = coordinate_frame_resource.voxel_unit
voxel_units = ('nanometers', 'micrometers', 'millimeters', 'centimeters')
factor_divide = (1e-6, 1e-3, 1, 10)
fact_div = factor_divide[voxel_units.index(voxel_unit)]
spacingBoss = [
coordinate_frame_resource.x_voxel_size,
coordinate_frame_resource.y_voxel_size,
coordinate_frame_resource.z_voxel_size]
spacing = [x * fact_div for x in spacingBoss] # Convert spacing to mm
if isotropic:
spacing = [x * 2**resolution for x in spacing]
else:
spacing[0] = spacing[0] * 2**resolution
spacing[1] = spacing[1] * 2**resolution
# z spacing unchanged since not isotropic
if size == []:
size = get_image_size_boss(
coordinate_frame_resource, resolution, isotropic)
if start == []:
start = get_offset_boss(
coordinate_frame_resource, resolution, isotropic)
# if isotropic:
# x_range, y_range, z_range, spacing = get_xyz_extents(
# remote, channel_resource, res=resolution, iso=isotropic)
# size[2] = 200
# dataType = metadata['channels'][channel]['datatype']
dataType = channel_resource.datatype
# Download all image data from specified channel
array = remote.get_cutout(
channel_resource, resolution, [
start[0], size[0]], [
start[1], size[1]], [
start[2], size[2]])
# Cast downloaded image to server's data type
# convert numpy array to sitk image
img = sitk.Cast(sitk.GetImageFromArray(array), ndToSitkDataTypes[dataType])
# Reverse axes order
# img = sitk.PermuteAxesImageFilter().Execute(img,range(dimension-1,-1,-1))
img.SetDirection(identityDirection)
img.SetSpacing(spacing)
# Convert to 2D if only one slice
img = util.imgCollapseDimension(img)
return img
def get_offset_boss(coord_frame, res=0, isotropic=False):
return [
int(coord_frame.x_start / (2.**res)),
int(coord_frame.y_start / (2.**res)),
int(coord_frame.z_start / (2.**res)) if isotropic else coord_frame.z_start]
def create_channel_resource(rmt, chan_name, coll_name, exp_name, type='image',
base_resolution=0, sources=[], datatype='uint16', new_channel=True):
channel_resource = ChannelResource(chan_name, coll_name, exp_name, type=type,
base_resolution=base_resolution, sources=sources, datatype=datatype)
if new_channel:
new_rsc = rmt.create_project(channel_resource)
return new_rsc
return channel_resource
def upload_to_boss(rmt, data, channel_resource, resolution=0):
Z_LOC = 0
size = data.shape
for i in range(0, data.shape[Z_LOC], 16):
last_z = i+16
if last_z > data.shape[Z_LOC]:
last_z = data.shape[Z_LOC]
print(resolution, [0, size[2]], [0, size[1]], [i, last_z])
rmt.create_cutout(channel_resource, resolution,
[0, size[2]], [0, size[1]], [i, last_z],
np.asarray(data[i:last_z,:,:], order='C'))
def download_image(rmt, collection, experiment, channel, res=0, isotropic=True, ara_res=None):
(exp_resource, coord_resource, channel_resource) = setup_channel_boss(rmt, collection, experiment, channel)
img = imgDownload_boss(rmt, channel_resource, coord_resource, resolution=res, isotropic=isotropic)
return img
def main():
t_start_overall = time.time()
parser = argparse.ArgumentParser(description='Register a brain in the BOSS and upload it back in a new experiment.')
parser.add_argument('--collection', help='Name of collection to upload tif stack to', type=str)
parser.add_argument('--experiment', help='Name of experiment to upload tif stack to', type=str)
parser.add_argument('--channel', help='Name of channel to upload tif stack to. Default is new channel will be created unless otherwise specified. See --new_channel', type=str)
parser.add_argument('--config', help='Path to configuration file with Boss API token. Default: ~/.intern/intern.cfg', default=os.path.expanduser('~/.intern/intern.cfg'))
parser.add_argument('--scale', help='Scale at which to perform the bias correction. Default is 0.1 meaning 1/10th the size of the original image', default=0.1, type=float)
args = parser.parse_args()
# mm to um conversion factor
mm_to_um = 1000.0
# download image
rmt = BossRemote(cfg_file_or_dict=args.config)
# resolution level from 0-6
resolution_image = 0
image_isotropic = True
# downloading image
print('downloading experiment: {}, channel: {}...'.format(args.experiment, args.channel))
t1 = time.time()
img = download_image(rmt, args.collection, args.experiment, args.channel, res=resolution_image, isotropic=image_isotropic)
print("time to download image at res {} um: {} seconds".format(img.GetSpacing()[0] * mm_to_um, time.time()-t1))
print("correcting bias in image...")
t1 = time.time()
# scale = 0.05 # scale at which to perform bias correction
img_bc = preprocessor.correct_bias_field(img, scale=args.scale, niters=[100,100,100,100])
print("time to correct bias in image at res {} um: {} seconds".format(img.GetSpacing()[0] * mm_to_um * (1.0/args.scale), time.time()-t1))
# print("upsampling bias to match original size of image...")
# ch_rsc_og = create_channel_resource(rmt, args.channel, args.collection, args.experiment, new_channel=False)
# meta = get_xyz_extents(rmt, ch_rsc_og)
# spacing = np.array(meta[-1])/mm_to_um
# x_size = meta[0][1]
# y_size = meta[1][1]
# z_size = meta[2][1]
# size = (x_size, y_size, z_size)
#
# # recover bias
# bias_ds = img_bc / sitk.Cast(img, img_bc.GetPixelID())
#
# # Upsample bias
# bias = imgResample(bias_ds, spacing=spacing, size=img.GetSize())
#
# # apply bias to image
# img_bc = sitk.Cast(img, sitk.sitkFloat32) * sitk.Cast(bias, sitk.sitkFloat32)
#
print("uploading bias corrected image back to the BOSS")
new_channel = args.channel + '_bias_corrected'
ch_rsc_bc = create_channel_resource(rmt, new_channel, args.collection, args.experiment, datatype='uint16', type='image')
upload_to_boss(rmt, sitk.GetArrayFromImage(img_bc).astype('uint16'), ch_rsc_bc)
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"SimpleITK.AffineTransform",
"numpy.asarray",
"SimpleITK.GetArrayFromImage",
"time.time",
"intern.remote.boss.BossRemote",
"requests.get",
"SimpleITK.GetImageFromArray",
"ndreg.util.imgCollapseDimension",
"ndreg.preprocessor.correct_bias_field",
"os.path.expanduser"
] | [((334, 365), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['dimension'], {}), '(dimension)\n', (354, 365), True, 'import SimpleITK as sitk\n'), ((608, 639), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['dimension'], {}), '(dimension)\n', (628, 639), True, 'import SimpleITK as sitk\n'), ((2964, 2997), 'requests.get', 'requests.get', (['ds'], {'headers': 'headers'}), '(ds, headers=headers)\n', (2976, 2997), False, 'import requests\n'), ((6014, 6044), 'ndreg.util.imgCollapseDimension', 'util.imgCollapseDimension', (['img'], {}), '(img)\n', (6039, 6044), False, 'from ndreg import preprocessor, util\n'), ((7675, 7686), 'time.time', 'time.time', ([], {}), '()\n', (7684, 7686), False, 'import time\n'), ((7700, 7812), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Register a brain in the BOSS and upload it back in a new experiment."""'}), "(description=\n 'Register a brain in the BOSS and upload it back in a new experiment.')\n", (7723, 7812), False, 'import argparse\n'), ((8658, 8698), 'intern.remote.boss.BossRemote', 'BossRemote', ([], {'cfg_file_or_dict': 'args.config'}), '(cfg_file_or_dict=args.config)\n', (8668, 8698), False, 'from intern.remote.boss import BossRemote\n'), ((8912, 8923), 'time.time', 'time.time', ([], {}), '()\n', (8921, 8923), False, 'import time\n'), ((9222, 9233), 'time.time', 'time.time', ([], {}), '()\n', (9231, 9233), False, 'import time\n'), ((9309, 9397), 'ndreg.preprocessor.correct_bias_field', 'preprocessor.correct_bias_field', (['img'], {'scale': 'args.scale', 'niters': '[100, 100, 100, 100]'}), '(img, scale=args.scale, niters=[100, 100, \n 100, 100])\n', (9340, 9397), False, 'from ndreg import preprocessor, util\n'), ((5731, 5760), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['array'], {}), '(array)\n', (5753, 5760), True, 'import SimpleITK as sitk\n'), ((7270, 7313), 'numpy.asarray', 'np.asarray', (['data[i:last_z, :, :]'], {'order': '"""C"""'}), "(data[i:last_z, :, :], order='C')\n", (7280, 7313), True, 'import numpy as np\n'), ((8318, 8360), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.intern/intern.cfg"""'], {}), "('~/.intern/intern.cfg')\n", (8336, 8360), False, 'import os\n'), ((9150, 9161), 'time.time', 'time.time', ([], {}), '()\n', (9159, 9161), False, 'import time\n'), ((9515, 9526), 'time.time', 'time.time', ([], {}), '()\n', (9524, 9526), False, 'import time\n'), ((10468, 10498), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img_bc'], {}), '(img_bc)\n', (10490, 10498), True, 'import SimpleITK as sitk\n')] |
''' Create the Sounding (Profile) Object '''
from __future__ import division
import numpy as np
import numpy.ma as ma
import getpass
from datetime import datetime
from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire
import sharppy.io.qc_tools as qc_tools
from sharppy.databases.sars import hail, supercell
from sharppy.databases.pwv import pwv_climo
from sharppy.sharptab.constants import MISSING
import logging
import warnings
def create_profile(**kwargs):
'''
This is a wrapper function for constructing Profile objects
and objects that inherit from the Profile class. This will
construct and return the appropriate Profile object
based on the supplied keyword argument. If no profile keyword
is supplied, it defaults to a basic Profile. This also requires
that you pass through all the relevant keyword arguments for
the constructors to the Profile objects and the objects that
inherit from Profile.
Parameters
----------
Mandatory Keywords
pres : array_like
The pressure values (Hectopascals)
hght : array_like
The corresponding height values (Meters)
tmpc : array_like
The corresponding temperature values (Celsius)
dwpc : array_like
The corresponding dewpoint temperature values (Celsius)
Optional Keyword Pairs (must use one or the other)
wdir : array_like
The direction from which the wind is blowing in meteorological degrees
wspd : array_like
The speed of the wind (kts)
OR
u : array_like
The U-component of the direction from which the wind is blowing. (kts)
v : array_like
The V-component of the direction from which the wind is blowing. (kts)
Optional Keywords
missing : number, optional (default: sharppy.sharptab.constants.MISSING)
The value of the missing flag used in the Profile objects
profile : string, optional (default: 'default')
The text identifier for the Profile to be generated. Valid options
include ('default' | 'basic' | 'convective'). Default will construct a basic
Profile, and convective will construct a ConvectiveProfile used for
the SPC style GUI.
omeg: array_like
The corresponding vertical velocity values (Pa/s)
Returns
-------
Profile : a basic Profile object
This is the most basic and default object.
OR
ConvectiveProfile : a child of Profile
This is the class used for the SPC GUI.
'''
## Get the user's input for which Profile to construct.
## Make the default the 'default' profile.
profile = kwargs.get('profile', 'default')
## if the profile is default, pass the rest of the keyword
## arguments to the BasicProfile object and return it
if profile == 'default':
return BasicProfile(**kwargs)
## if the profile is raw, return a base profile object
elif profile == 'raw':
return Profile(**kwargs)
## if the profile is convective, pass the rest of the keyword
## arguments to the ConvectiveProfile object and return it
elif profile == 'convective':
return ConvectiveProfile(**kwargs)
class Profile(object):
def __init__(self, **kwargs):
## set the missing variable
self.missing = kwargs.get('missing', MISSING)
self.profile = kwargs.get('profile')
self.latitude = kwargs.get('latitude', ma.masked)
self.strictQC = kwargs.get('strictQC', False)
## get the data and turn them into arrays
self.pres = ma.asanyarray(kwargs.get('pres'), dtype=float)
self.hght = ma.asanyarray(kwargs.get('hght'), dtype=float)
self.tmpc = ma.asanyarray(kwargs.get('tmpc'), dtype=float)
self.dwpc = ma.asanyarray(kwargs.get('dwpc'), dtype=float)
assert self.pres.ndim == 1 and self.hght.ndim == 1 and self.tmpc.ndim == 1 and self.dwpc.ndim == 1,\
"The dimensions of the pres, hght, tmpc, and dwpc arrays passed to the Profile object constructor are not all one dimensional."
assert len(self.pres) > 1 and len(self.hght) > 1 and len(self.tmpc) > 1 and len(self.dwpc) > 1,\
"The length of the pres, hght, tmpc, and dwpc arrays passed to Profile object constructor must all have a length greater than 1."
assert len(self.pres) == len(self.hght) == len(self.tmpc) == len(self.dwpc),\
"The pres, hght, tmpc, or dwpc arrays passed to the Profile object constructor must all have the same length."
if np.ma.max(self.pres) <= 100:
warnings.warn("The pressure values passed to the profile object are below 100 mb. This may cause some the SHARPpy routines not to behave as expected.")
if 'wdir' in kwargs and 'wspd' in kwargs:
self.wdir = ma.asanyarray(kwargs.get('wdir'), dtype=float)
self.wspd = ma.asanyarray(kwargs.get('wspd'), dtype=float)
assert len(self.wdir) == len(self.wspd) == len(self.pres), "The wdir and wspd arrays passed to the Profile constructor must have the same length as the pres array."
assert self.wdir.ndim == 1 and self.wspd.ndim == 1, "The wdir and wspd arrays passed to the Profile constructor are not one dimensional."
#self.u, self.v = utils.vec2comp(self.wdir, self.wspd)
self.u = None
self.v = None
## did the user provide the wind in u,v form?
elif 'u' in kwargs and 'v' in kwargs:
self.u = ma.asanyarray(kwargs.get('u'), dtype=float)
self.v = ma.asanyarray(kwargs.get('v'), dtype=float)
assert len(self.u) == len(self.v) == len(self.pres), "The u and v arrays passed to the Profile constructor must have the same length as the pres array."
assert self.u.ndim == 1 and self.v.ndim == 1, "The wdir and wspd arrays passed to the Profile constructor are not one dimensional."
#self.wdir, self.wspd = utils.comp2vec(self.u, self.v)
self.wdir = None
self.wspd = None
else:
warnings.warn("No wind data (wdir/wspd or u/v) passed to the Profile object constructor. This may cause some of the SHARPpy routines to not behave as expected.")
## check if any standard deviation data was supplied
if 'tmp_stdev' in kwargs:
self.dew_stdev = ma.asanyarray(kwargs.get('dew_stdev'), dtype=float)
self.tmp_stdev = ma.asanyarray(kwargs.get('tmp_stdev'), dtype=float)
else:
self.dew_stdev = None
self.tmp_stdev = None
if kwargs.get('omeg', None) is not None:
## get the omega data and turn into arrays
self.omeg = ma.asanyarray(kwargs.get('omeg'))
assert len(self.omeg) == len(self.pres), "Length of omeg array passed to constructor is not the same length as the pres array."
assert self.omeg.ndim == 1, "omeg array is not one dimensional."
assert len(self.omeg) > 1, "omeg array length must have a length greater than 1."
else:
self.omeg = None
## optional keyword argument for location
self.location = kwargs.get('location', None)
self.date = kwargs.get('date', None)
if self.strictQC is True:
self.checkDataIntegrity()
@classmethod
def copy(cls, prof, strictQC=False, **kwargs):
'''
Copies a profile object.
'''
new_kwargs = dict( (k, prof.__dict__[k]) for k in [ 'pres', 'hght', 'tmpc', 'dwpc', 'omeg', 'location', 'date', 'latitude', 'strictQC', 'missing' ])
if prof.u is not None and prof.v is not None:
new_kwargs.update({'u':prof.u, 'v':prof.v})
else:
new_kwargs.update({'wspd':prof.wspd, 'wdir':prof.wdir})
new_kwargs.update({'strictQC':strictQC})
# Create a new profile object using the old profile object data cls is the Class type (e.g., ConvectiveProfile)
new_kwargs.update(kwargs)
new_prof = cls(**new_kwargs)
if hasattr(prof, 'srwind'):
rmu, rmv, lmu, lmv = prof.srwind
new_prof.set_srright(rmu, rmv)
new_prof.set_srleft(lmu, lmv)
return new_prof
def toFile(self, file_name):
snd_file = open(file_name, 'w')
def qc(val):
return -9999. if not utils.QC(val) else val
snd_loc = (" " * (4 - len(self.location))) + self.location
now = datetime.utcnow()
#print(now, self.date)
user = getpass.getuser()
snd_file.write("%TITLE%\n")
snd_file.write("%s %s\n Saved by user: %s on %s UTC\n" % (snd_loc, self.date.strftime("%y%m%d/%H%M"), user, now.strftime('%Y%m%d/%H%M')))
snd_file.write(" LEVEL HGHT TEMP DWPT WDIR WSPD\n")
snd_file.write("-------------------------------------------------------------------\n")
snd_file.write("%RAW%\n")
for idx in range(self.pres.shape[0]):
str = ""
for col in ['pres', 'hght', 'tmpc', 'dwpc', 'wdir', 'wspd']:
str += "%8.2f, " % qc(self.__dict__[col][idx])
snd_file.write(str[:-3] + "\n")
snd_file.write("%END%\n")
snd_file.close()
def checkDataIntegrity(self):
if not qc_tools.isHGHTValid(self.hght):
qc_tools.raiseError("Invalid height data. Data has repeat height values or height does not increase as pressure decreases.", qc_tools.DataQualityException)
if not qc_tools.isTMPCValid(self.tmpc):
qc_tools.raiseError("Invalid temperature data. Profile contains a temperature value < -273.15 Celsius.", qc_tools.DataQualityException)
if not qc_tools.isDWPCValid(self.dwpc):
qc_tools.raiseError("Invalid dewpoint data. Profile contains a dewpoint value < -273.15 Celsius.", qc_tools.DataQualityException)
if not qc_tools.isWSPDValid(self.wspd):
qc_tools.raiseError("Invalid wind speed data. Profile contains a wind speed value < 0 knots.", qc_tools.DataQualityException)
if not qc_tools.isWDIRValid(self.wdir):
qc_tools.raiseError("Invalid wind direction data. Profile contains a wind direction < 0 degrees or >= 360 degrees.", qc_tools.DataQualityException)
class BasicProfile(Profile):
'''
The default data class for SHARPpy.
All other data classes inherit from this class.
This class holds the vertical data for pressure,
height, temperature, dewpoint, and winds. This class
has no indices computed.
'''
def __init__(self, **kwargs):
'''
Create the sounding data object
Parameters
----------
Mandatory Keywords
pres : array_like
The pressure values (Hectopaschals)
hght : array_like
The corresponding height values (Meters)
tmpc : array_like
The corresponding temperature values (Celsius)
dwpc : array_like
The corresponding dewpoint temperature values (Celsius)
Optional Keyword Pairs (must use one or the other)
wdir : array_like
The direction from which the wind is blowing in
meteorological degrees
wspd : array_like
The speed of the wind (kts)
OR
u : array_like
The U-component of the direction from which the wind
is blowing (kts)
v : array_like
The V-component of the direction from which the wind
is blowing. (kts)
Optional Keywords
missing : number (default: sharppy.sharptab.constants.MISSING)
The value of the missing flag
location : string (default: None)
The 3 character station identifier or 4 character
WMO station ID for radiosonde locations. Used for
the PWV database.
strictQC : boolean
A flag that indicates whether or not the strict quality control
routines should be run on the profile upon construction.
Returns
-------
prof: Profile object
'''
super(BasicProfile, self).__init__(**kwargs)
self.strictQC = kwargs.get('strictQC', True)
## did the user provide the wind in vector form?
if self.wdir is not None:
self.wdir[self.wdir == self.missing] = ma.masked
self.wspd[self.wspd == self.missing] = ma.masked
self.wdir[self.wspd.mask] = ma.masked
self.wspd[self.wdir.mask] = ma.masked
self.u, self.v = utils.vec2comp(self.wdir, self.wspd)
## did the user provide the wind in u,v form?
elif self.u is not None:
self.u[self.u == self.missing] = ma.masked
self.v[self.v == self.missing] = ma.masked
self.u[self.v.mask] = ma.masked
self.v[self.u.mask] = ma.masked
self.wdir, self.wspd = utils.comp2vec(self.u, self.v)
## check if any standard deviation data was supplied
if self.tmp_stdev is not None:
self.dew_stdev[self.dew_stdev == self.missing] = ma.masked
self.tmp_stdev[self.tmp_stdev == self.missing] = ma.masked
self.dew_stdev.set_fill_value(self.missing)
self.tmp_stdev.set_fill_value(self.missing)
if self.omeg is not None:
## get the omega data and turn into arrays
self.omeg[self.omeg == self.missing] = ma.masked
else:
self.omeg = ma.masked_all(len(self.hght))
# QC Checks on the arrays passed to the constructor.
qc_tools.areProfileArrayLengthEqual(self)
## mask the missing values
self.pres[self.pres == self.missing] = ma.masked
self.hght[self.hght == self.missing] = ma.masked
self.tmpc[self.tmpc == self.missing] = ma.masked
self.dwpc[self.dwpc == self.missing] = ma.masked
self.logp = np.log10(self.pres.copy())
self.vtmp = thermo.virtemp( self.pres, self.tmpc, self.dwpc )
idx = np.ma.where(self.pres > 0)[0]
self.vtmp[self.dwpc.mask[idx]] = self.tmpc[self.dwpc.mask[idx]] # Masking any virtual temperature
## get the index of the top and bottom of the profile
self.sfc = self.get_sfc()
self.top = self.get_top()
if self.strictQC is True:
self.checkDataIntegrity()
## generate the wetbulb profile
self.wetbulb = self.get_wetbulb_profile()
## generate theta-e profile
self.thetae = self.get_thetae_profile()
## generate theta profile
self.theta = self.get_theta_profile()
## generate water vapor mixing ratio profile
self.wvmr = self.get_wvmr_profile()
## generate rh profile
self.relh = self.get_rh_profile()
def get_sfc(self):
'''
Convenience function to get the index of the surface. It is
determined by finding the lowest level in which a temperature is
reported.
Parameters
----------
None
Returns
-------
Index of the surface
'''
return np.where(~self.tmpc.mask)[0].min()
def get_top(self):
'''
Convenience function to get the index of the surface. It is
determined by finding the lowest level in which a temperature is
reported.
Parameters
----------
None
Returns
-------
Index of the surface
'''
return np.where(~self.tmpc.mask)[0].max()
def get_wvmr_profile(self):
'''
Function to calculate the water vapor mixing ratio profile.
Parameters
----------
None
Returns
-------
Array of water vapor mixing ratio profile
'''
#wvmr = ma.empty(self.pres.shape[0])
#for i in range(len(self.v)):
wvmr = thermo.mixratio( self.pres, self.dwpc )
wvmr[wvmr == self.missing] = ma.masked
wvmr.set_fill_value(self.missing)
return wvmr
def get_wetbulb_profile(self):
'''
Function to calculate the wetbulb profile.
Parameters
----------
None
Returns
-------
Array of wet bulb profile
'''
wetbulb = ma.empty(self.pres.shape[0])
for i in range(len(self.v)):
wetbulb[i] = thermo.wetbulb( self.pres[i], self.tmpc[i], self.dwpc[i] )
wetbulb[wetbulb == self.missing] = ma.masked
wetbulb.set_fill_value(self.missing)
return wetbulb
def get_theta_profile(self):
'''
Function to calculate the theta profile.
Parameters
----------
None
Returns
-------
Array of theta profile
'''
theta = ma.empty(self.pres.shape[0])
for i in range(len(self.v)):
theta[i] = thermo.theta(self.pres[i], self.tmpc[i])
theta[theta == self.missing] = ma.masked
theta.set_fill_value(self.missing)
theta = thermo.ctok(theta)
return theta
def get_thetae_profile(self):
'''
Function to calculate the theta-e profile.
Parameters
----------
None
Returns
-------
Array of theta-e profile
'''
thetae = ma.empty(self.pres.shape[0])
for i in range(len(self.v)):
thetae[i] = thermo.ctok( thermo.thetae(self.pres[i], self.tmpc[i], self.dwpc[i]) )
thetae[thetae == self.missing] = ma.masked
thetae.set_fill_value(self.missing)
return thetae
def get_rh_profile(self):
'''
Function to calculate the relative humidity profile
Parameters
----------
None
Returns
-------
Array of the relative humidity profile
'''
rh = thermo.relh(self.pres, self.tmpc, self.dwpc)
rh[rh == self.missing] = ma.masked
rh.set_fill_value(self.missing)
return rh
class ConvectiveProfile(BasicProfile):
'''
The Convective data class for SHARPPy. This is the class used
to generate the indices that are default for the SPC NSHARP display.
This class inherits from the Profile object.
'''
def __init__(self, **kwargs):
'''
Create the sounding data object
Parameters
----------
Mandatory Keywords
pres : array_like
The pressure values (Hectopaschals)
hght : array_like
The corresponding height values (Meters)
tmpc : array_like
The corresponding temperature values (Celsius)
dwpc : array_like
The corresponding dewpoint temperature values (Celsius)
Optional Keyword Pairs (must use one or the other)
wdir : array_like
The direction from which the wind is blowing in
meteorological degrees
wspd : array_like
The speed of the wind (kts)
OR
u : array_like
The U-component of the direction from which the wind
is blowing
v : array_like
The V-component of the direction from which the wind
is blowing.
missing : number, optional (default: sharppy.sharptab.constants.MISSING)
The value of the missing flag
location : string, optional (default: None)
The 3 character station identifier or 4 character
WMO station ID for radiosonde locations. Used for
the PWV database.
omeg : array_like, optional
List of the vertical velocity in pressure coordinates with height (Pascals/second)
Returns
-------
A profile object
'''
## call the constructor for Profile
super(ConvectiveProfile, self).__init__(**kwargs)
assert np.ma.max(self.pres) > 100, "ConvectiveProfile objects require that the minimum pressure passed in the data array is greater than 100 mb."
self.user_srwind = None
# Generate the fire weather paramters
logging.debug("Calling get_fire().")
dt = datetime.now()
self.get_fire()
logging.debug("get_fire() took: " + str((datetime.now() - dt)))
# Generate the winter inset/precipitation types
logging.debug("Calling get_precip().")
dt = datetime.now()
self.get_precip()
logging.debug("get_precip() took: " + str((datetime.now() - dt)))
## generate various parcels
logging.debug("Calling get_parcels().")
dt = datetime.now()
self.get_parcels()
logging.debug("get_parcels() took: " + str((datetime.now() - dt)))
## calculate thermodynamic window indices
logging.debug("Calling get_thermo().")
dt = datetime.now()
self.get_thermo()
logging.debug("get_thermo() took: " + str((datetime.now() - dt)))
## generate wind indices
logging.debug("Calling get_kinematics().")
dt = datetime.now()
self.get_kinematics()
logging.debug("get_kinematics() took: " + str((datetime.now() - dt)))
## get SCP, STP(cin), STP(fixed), SHIP
logging.debug("Calling get_severe().")
dt = datetime.now()
self.get_severe()
logging.debug("get_severe() took: " + str((datetime.now() - dt)))
## calculate the SARS database matches
logging.debug("Calling get_sars().")
dt = datetime.now()
self.get_sars()
logging.debug("get_sars() took: " + str((datetime.now() - dt)))
## get the precipitable water climatology
logging.debug("Calling get_PWV_loc().")
dt = datetime.now()
self.get_PWV_loc()
logging.debug("get_PWV_loc() took: " + str((datetime.now() - dt)))
## get the parcel trajectory
logging.debug("Calling get_traj().")
dt = datetime.now()
self.get_traj()
logging.debug("get_traj() took: " + str((datetime.now() - dt)))
## miscellaneous indices I didn't know where to put
logging.debug("Calling get_indices().")
dt = datetime.now()
self.get_indices()
logging.debug("get_indices() took: " + str((datetime.now() - dt)))
## get the possible watch type
logging.debug("Calling get_watch().")
dt = datetime.now()
self.get_watch()
logging.debug("get_watch() took: " + str((datetime.now() - dt)))
def get_fire(self):
'''
Function to generate different indices and information
regarding any fire weather in the sounding. This helps fill
the data shown in the FIRE inset.
Parameters
----------
None
Returns
-------
None
'''
self.fosberg = fire.fosberg(self)
self.haines_hght = fire.haines_height(self)
self.haines_low = fire.haines_low(self)
self.haines_mid = fire.haines_mid(self)
self.haines_high = fire.haines_high(self)
self.ppbl_top = params.pbl_top(self)
self.sfc_rh = thermo.relh(self.pres[self.sfc], self.tmpc[self.sfc], self.dwpc[self.sfc])
pres_sfc = self.pres[self.sfc]
pres_1km = interp.pres(self, interp.to_msl(self, 1000.))
self.pbl_h = interp.to_agl(self, interp.hght(self, self.ppbl_top))
self.rh01km = params.mean_relh(self, pbot=pres_sfc, ptop=pres_1km)
self.pblrh = params.mean_relh(self, pbot=pres_sfc, ptop=self.ppbl_top)
self.meanwind01km = winds.mean_wind(self, pbot=pres_sfc, ptop=pres_1km)
self.meanwindpbl = winds.mean_wind(self, pbot=pres_sfc, ptop=self.ppbl_top)
self.pblmaxwind = winds.max_wind(self, lower=0, upper=self.pbl_h)
#self.pblmaxwind = [np.ma.masked, np.ma.masked]
mulplvals = params.DefineParcel(self, flag=3, pres=500)
mupcl = params.cape(self, lplvals=mulplvals)
self.bplus_fire = mupcl.bplus
def get_precip(self):
'''
Function to generate different indices and information
regarding any precipitation in the sounding. This helps fill
the data shown in the WINTER inset.
Returns nothing, but sets the following
variables:
self.dgz_pbot, self.dgz_ptop : the dendretic growth zone (DGZ) top and bottom (mb)
self.dgz_meanrh : DGZ mean relative humidity (%)
self.dgz_pw : the preciptable water vapor in the DGZ (inches)
self.dgz_meanq : the mean water vapor mixing ratio in the DGZ (g/kg)
self.dgz_meanomeg : the mean omega in the DGZ (microbars/second)
self.oprh : the OPRH variable (units don't mean anything)
self.plevel, self.phase, self.tmp, self.st : the initial phase, level, temperature, and state of any precip in the sounding
self.tpos, self.tneg, self.ttop, self.tbot : positive and negative temperature layers in the sounding
self.wpos, self.wneg, self.wtop, self.wbot : positive and negative wetbulb layers in the soundings
self.precip_type : the best guess precipitation type
Parameters
----------
None
Returns
-------
None
'''
self.dgz_pbot, self.dgz_ptop = params.dgz(self)
self.dgz_meanrh = params.mean_relh(self, pbot=self.dgz_pbot, ptop=self.dgz_ptop)
self.dgz_pw = params.precip_water(self, pbot=self.dgz_pbot, ptop=self.dgz_ptop)
self.dgz_meanq = params.mean_mixratio(self, pbot=self.dgz_pbot, ptop=self.dgz_ptop)
self.dgz_meanomeg = params.mean_omega(self, pbot=self.dgz_pbot, ptop=self.dgz_ptop) * 10 # to microbars/sec
self.oprh = self.dgz_meanomeg * self.dgz_pw * (self.dgz_meanrh/100.)
self.plevel, self.phase, self.tmp, self.st = watch_type.init_phase(self)
self.tpos, self.tneg, self.ttop, self.tbot = watch_type.posneg_temperature(self, start=self.plevel)
self.wpos, self.wneg, self.wtop, self.wbot = watch_type.posneg_wetbulb(self, start=self.plevel)
self.precip_type = watch_type.best_guess_precip(self, self.phase, self.plevel, self.tmp, self.tpos, self.tneg)
def get_parcels(self):
'''
Function to generate various parcels and parcel
traces.
Returns nothing, but sets the following
variables:
self.mupcl : Most Unstable Parcel
self.sfcpcl : Surface Based Parcel
self.mlpcl : Mixed Layer Parcel
self.fcstpcl : Forecast Surface Parcel
self.ebottom : The bottom pressure level of the effective inflow layer
self.etop : the top pressure level of the effective inflow layer
self.ebotm : The bottom, meters (agl), of the effective inflow layer
self.etopm : The top, meters (agl), of the effective inflow layer
Parameters
----------
None
Returns
-------
None
'''
self.mupcl = params.parcelx( self, flag=3 )
if self.mupcl.lplvals.pres == self.pres[self.sfc]:
self.sfcpcl = self.mupcl
else:
self.sfcpcl = params.parcelx( self, flag=1 )
self.fcstpcl = params.parcelx( self, flag=2 )
self.mlpcl = params.parcelx( self, flag=4 )
self.usrpcl = params.Parcel()
## get the effective inflow layer data
self.ebottom, self.etop = params.effective_inflow_layer( self, mupcl=self.mupcl )
## if there was no effective inflow layer, set the values to masked
if self.etop is ma.masked or self.ebottom is ma.masked:
self.ebotm = ma.masked; self.etopm = ma.masked
self.effpcl = self.sfcpcl # Default to surface parcel, as in params.DefineProfile().
## otherwise, interpolate the heights given to above ground level
else:
self.ebotm = interp.to_agl(self, interp.hght(self, self.ebottom))
self.etopm = interp.to_agl(self, interp.hght(self, self.etop))
# The below code was adapted from params.DefineProfile()
# Lifting one additional parcel probably won't slow the program too much.
# It's just one more lift compared to all the lifts in the params.effective_inflow_layer() call.
mtha = params.mean_theta(self, self.ebottom, self.etop)
mmr = params.mean_mixratio(self, self.ebottom, self.etop)
effpres = (self.ebottom+self.etop)/2.
efftmpc = thermo.theta(1000., mtha, effpres)
effdwpc = thermo.temp_at_mixrat(mmr, effpres)
self.effpcl = params.parcelx(self, flag=5, pres=effpres, tmpc=efftmpc, dwpc=effdwpc) #This is the effective parcel.
def get_kinematics(self):
'''
Function to generate the numerous kinematic quantities
used for display and calculations. It requires that the
parcel calculations have already been called for the lcl
to el shear and mean wind vectors, as well as indices
that require an effective inflow layer.
Parameters
----------
None
Returns
-------
None
'''
sfc = self.pres[self.sfc]
heights = np.array([1000., 3000., 4000., 5000., 6000., 8000., 9000.])
p1km, p3km, p4km, p5km, p6km, p8km, p9km = interp.pres(self, interp.to_msl(self, heights))
## 1km and 6km winds
self.wind1km = interp.vec(self, p1km)
self.wind6km = interp.vec(self, p6km)
## calcluate wind shear
self.sfc_1km_shear = winds.wind_shear(self, pbot=sfc, ptop=p1km)
self.sfc_3km_shear = winds.wind_shear(self, pbot=sfc, ptop=p3km)
self.sfc_6km_shear = winds.wind_shear(self, pbot=sfc, ptop=p6km)
self.sfc_8km_shear = winds.wind_shear(self, pbot=sfc, ptop=p8km)
self.sfc_9km_shear = winds.wind_shear(self, pbot=sfc, ptop=p9km)
self.lcl_el_shear = winds.wind_shear(self, pbot=self.mupcl.lclpres, ptop=self.mupcl.elpres)
## calculate mean wind
self.mean_1km = utils.comp2vec(*winds.mean_wind(self, pbot=sfc, ptop=p1km))
self.mean_3km = utils.comp2vec(*winds.mean_wind(self, pbot=sfc, ptop=p3km))
self.mean_6km = utils.comp2vec(*winds.mean_wind(self, pbot=sfc, ptop=p6km))
self.mean_8km = utils.comp2vec(*winds.mean_wind(self, pbot=sfc, ptop=p8km))
self.mean_lcl_el = utils.comp2vec(*winds.mean_wind(self, pbot=self.mupcl.lclpres, ptop=self.mupcl.elpres))
## parameters that depend on the presence of an effective inflow layer
if self.etop is ma.masked or self.ebottom is ma.masked:
self.etopm = ma.masked; self.ebotm = ma.masked
self.bunkers = winds.non_parcel_bunkers_motion( self )
if self.user_srwind is None:
self.user_srwind = self.bunkers
self.srwind = self.user_srwind
self.eff_shear = [MISSING, MISSING]
self.ebwd = [MISSING, MISSING, MISSING]
self.ebwspd = MISSING
self.mean_eff = [MISSING, MISSING, MISSING]
self.mean_ebw = [MISSING, MISSING, MISSING]
self.right_srw_eff = [MISSING, MISSING, MISSING]
self.right_srw_ebw = [MISSING, MISSING, MISSING]
self.right_esrh = [ma.masked, ma.masked, ma.masked]
self.right_critical_angle = ma.masked
self.left_srw_eff = [MISSING, MISSING, MISSING]
self.left_srw_ebw = [MISSING, MISSING, MISSING]
self.left_esrh = [ma.masked, ma.masked, ma.masked]
self.left_critical_angle = ma.masked
else:
self.bunkers = params.bunkers_storm_motion(self, mupcl=self.mupcl, pbot=self.ebottom)
if self.user_srwind is None:
self.user_srwind = self.bunkers
self.srwind = self.user_srwind
depth = ( self.mupcl.elhght - self.ebotm ) / 2
elh = interp.pres(self, interp.to_msl(self, self.ebotm + depth))
## calculate mean wind
self.mean_eff = winds.mean_wind(self, self.ebottom, self.etop )
self.mean_ebw = winds.mean_wind(self, pbot=self.ebottom, ptop=elh )
## calculate wind shear of the effective layer
self.eff_shear = winds.wind_shear(self, pbot=self.ebottom, ptop=self.etop)
self.ebwd = winds.wind_shear(self, pbot=self.ebottom, ptop=elh)
self.ebwspd = utils.mag( self.ebwd[0], self.ebwd[1] )
## calculate quantities relative to the right-mover vector
self.right_srw_eff = winds.sr_wind(self, pbot=self.ebottom, ptop=self.etop, stu=self.srwind[0], stv=self.srwind[1] )
self.right_srw_ebw = winds.sr_wind(self, pbot=self.ebottom, ptop=elh, stu=self.srwind[0], stv=self.srwind[1] )
self.right_esrh = winds.helicity(self, self.ebotm, self.etopm, stu=self.srwind[0], stv=self.srwind[1])
self.right_critical_angle = winds.critical_angle(self, stu=self.srwind[0], stv=self.srwind[1])
## calculate quantities relative to the left-mover vector
self.left_srw_eff = winds.sr_wind(self, pbot=self.ebottom, ptop=self.etop, stu=self.srwind[2], stv=self.srwind[3] )
self.left_srw_ebw = winds.sr_wind(self, pbot=self.ebottom, ptop=elh, stu=self.srwind[2], stv=self.srwind[3] )
self.left_esrh = winds.helicity(self, self.ebotm, self.etopm, stu=self.srwind[2], stv=self.srwind[3])
self.left_critical_angle = winds.critical_angle(self, stu=self.srwind[2], stv=self.srwind[3])
## calculate quantities relative to the right-mover vector
self.right_srw_1km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p1km, stu=self.srwind[0], stv=self.srwind[1] ))
self.right_srw_3km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p3km, stu=self.srwind[0], stv=self.srwind[1] ))
self.right_srw_6km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p6km, stu=self.srwind[0], stv=self.srwind[1] ))
self.right_srw_8km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p8km, stu=self.srwind[0], stv=self.srwind[1] ))
self.right_srw_4_5km = utils.comp2vec(*winds.sr_wind(self, pbot=p4km, ptop=p5km, stu=self.srwind[0], stv=self.srwind[1] ))
self.right_srw_lcl_el = utils.comp2vec(*winds.sr_wind(self, pbot=self.mupcl.lclpres, ptop=self.mupcl.elpres, stu=self.srwind[0], stv=self.srwind[1] ))
# This is for the red, blue, and purple bars that appear on the SR Winds vs. Height plot
self.right_srw_0_2km = winds.sr_wind(self, pbot=sfc, ptop=interp.pres(self, interp.to_msl(self, 2000.)), stu=self.srwind[0], stv=self.srwind[1])
self.right_srw_4_6km = winds.sr_wind(self, pbot=interp.pres(self, interp.to_msl(self, 4000.)), ptop=p6km, stu=self.srwind[0], stv=self.srwind[1])
self.right_srw_9_11km = winds.sr_wind(self, pbot=interp.pres(self, interp.to_msl(self, 9000.)), ptop=interp.pres(self, interp.to_msl(self, 11000.)), stu=self.srwind[0], stv=self.srwind[1])
## calculate quantities relative to the left-mover vector
self.left_srw_1km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p1km, stu=self.srwind[2], stv=self.srwind[3] ))
self.left_srw_3km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p3km, stu=self.srwind[2], stv=self.srwind[3] ))
self.left_srw_6km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p6km, stu=self.srwind[2], stv=self.srwind[3] ))
self.left_srw_8km = utils.comp2vec(*winds.sr_wind(self, pbot=sfc, ptop=p8km, stu=self.srwind[2], stv=self.srwind[3] ))
self.left_srw_4_5km = utils.comp2vec(*winds.sr_wind(self, pbot=p4km, ptop=p5km, stu=self.srwind[2], stv=self.srwind[3] ))
self.left_srw_lcl_el = utils.comp2vec(*winds.sr_wind(self, pbot=self.mupcl.lclpres, ptop=self.mupcl.elpres, stu=self.srwind[2], stv=self.srwind[3] ))
# This is for the red, blue, and purple bars that appear on the SR Winds vs. Height plot
self.left_srw_0_2km = winds.sr_wind(self, pbot=sfc, ptop=interp.pres(self, interp.to_msl(self, 2000.)), stu=self.srwind[2], stv=self.srwind[3])
self.left_srw_4_6km = winds.sr_wind(self, pbot=interp.pres(self, interp.to_msl(self, 4000.)), ptop=p6km, stu=self.srwind[2], stv=self.srwind[3])
self.left_srw_9_11km = winds.sr_wind(self, pbot=interp.pres(self, interp.to_msl(self, 9000.)), ptop=interp.pres(self, interp.to_msl(self, 11000.)), stu=self.srwind[2], stv=self.srwind[3])
## calculate upshear and downshear
self.upshear_downshear = winds.mbe_vectors(self)
self.right_srh1km = winds.helicity(self, 0, 1000., stu=self.srwind[0], stv=self.srwind[1])
self.right_srh3km = winds.helicity(self, 0, 3000., stu=self.srwind[0], stv=self.srwind[1])
self.left_srh1km = winds.helicity(self, 0, 1000., stu=self.srwind[2], stv=self.srwind[3])
self.left_srh3km = winds.helicity(self, 0, 3000., stu=self.srwind[2], stv=self.srwind[3])
self.srw_eff = self.right_srw_eff
self.srw_ebw = self.right_srw_ebw
self.esrh = self.right_esrh
self.critical_angle = self.right_critical_angle
self.srw_1km = self.right_srw_1km
self.srw_3km = self.right_srw_3km
self.srw_6km = self.right_srw_6km
self.srw_8km = self.right_srw_8km
self.srw_4_5km = self.right_srw_4_5km
self.srw_lcl_el = self.right_srw_lcl_el
self.srw_0_2km = self.right_srw_0_2km
self.srw_4_6km = self.right_srw_4_6km
self.srw_9_11km = self.right_srw_9_11km
self.srh1km = self.right_srh1km
self.srh3km = self.right_srh3km
def get_thermo(self):
'''
Function to generate thermodynamic indices.
Function returns nothing, but sets the following
variables:
self.k_idx - K Index, a severe weather index
self.pwat - Precipitable Water Vapor (inches)
self.lapserate_3km - 0 to 3km AGL lapse rate (C/km)
self.lapserate_3_6km - 3 to 6km AGL lapse rate (C/km)
self.lapserate_850_500 - 850 to 500mb lapse rate (C/km)
self.lapserate_700_500 - 700 to 500mb lapse rate (C/km)
self.convT - The Convective Temperature (F)
self.maxT - The Maximum Forecast Surface Temp (F)
self.mean_mixr - Mean Mixing Ratio
self.low_rh - low level mean relative humidity
self.mid_rh - mid level mean relative humidity
self.totals_totals - Totals Totals index, a severe weather index
Parameters
----------
None
Returns
-------
None
'''
## either get or calculate the indices, round to the nearest int, and
## convert them to strings.
## K Index
self.k_idx = params.k_index( self )
## precipitable water
self.pwat = params.precip_water( self )
## 0-3km agl lapse rate
self.lapserate_3km = params.lapse_rate( self, 0., 3000., pres=False )
## 3-6km agl lapse rate
self.lapserate_3_6km = params.lapse_rate( self, 3000., 6000., pres=False )
## 850-500mb lapse rate
self.lapserate_850_500 = params.lapse_rate( self, 850., 500., pres=True )
## 700-500mb lapse rate
self.lapserate_700_500 = params.lapse_rate( self, 700., 500., pres=True )
## 2-6 km max lapse rate
self.max_lapse_rate_2_6 = params.max_lapse_rate( self )
## convective temperature
self.convT = thermo.ctof( params.convective_temp( self ) )
## sounding forecast surface temperature
self.maxT = thermo.ctof( params.max_temp( self ) )
#fzl = str(int(self.sfcparcel.hght0c))
## 100mb mean mixing ratio
self.mean_mixr = params.mean_mixratio( self )
## 150mb mean rh
self.low_rh = params.mean_relh( self )
self.mid_rh = params.mean_relh( self, pbot=(self.pres[self.sfc] - 150),
ptop=(self.pres[self.sfc] - 350) )
## calculate the totals totals index
self.totals_totals = params.t_totals( self )
## calculate the inferred temperature advection
self.inf_temp_adv = params.inferred_temp_adv(self, lat=self.latitude)
def get_severe(self):
'''
Function to calculate special severe weather indices.
Requires calling get_parcels() and get_kinematics().
Returns nothing, but sets the following variables:
self.right_stp_fixed - fixed layer significant tornado parameter (computed with SRH relative to the right-mover vector)
self.left_stp_fixed - fixed layer significant tornado parameter (computed with SRH relative to the left-mover vector)
self.right_stp_cin - effective layer significant tornado parameter (computed with SRH relative to the right-mover vector)
self.left_stp_cin - effective layer significant tornado parameter (computed with SRH relative to the left-mover vector)
self.right_scp - right moving supercell composite parameter
self.left_scp - left moving supercell composite parameter
Parameters
----------
None
Returns
-------
None
'''
wspd = utils.mag(self.sfc_6km_shear[0], self.sfc_6km_shear[1])
self.right_stp_fixed = params.stp_fixed(self.sfcpcl.bplus, self.sfcpcl.lclhght, self.right_srh1km[0], utils.KTS2MS(wspd))
self.left_stp_fixed = params.stp_fixed(self.sfcpcl.bplus, self.sfcpcl.lclhght, self.left_srh1km[0], utils.KTS2MS(wspd))
self.sherbe = params.sherb(self, effective=True)
if self.etop is np.ma.masked or self.ebottom is np.ma.masked:
self.right_scp = 0.0; self.left_scp = 0.0
self.right_stp_cin = 0.0; self.left_stp_cin = 0.0
else:
self.right_scp = params.scp( self.mupcl.bplus, self.right_esrh[0], utils.KTS2MS(self.ebwspd))
self.left_scp = params.scp( self.mupcl.bplus, self.left_esrh[0], utils.KTS2MS(self.ebwspd))
right_esrh = self.right_esrh[0]
left_esrh = self.left_esrh[0]
if self.latitude < 0:
right_esrh = -right_esrh
left_esrh = -left_esrh
self.right_stp_cin = params.stp_cin(self.mlpcl.bplus, right_esrh, utils.KTS2MS(self.ebwspd),
self.mlpcl.lclhght, self.mlpcl.bminus)
self.left_stp_cin = params.stp_cin(self.mlpcl.bplus, left_esrh, utils.KTS2MS(self.ebwspd),
self.mlpcl.lclhght, self.mlpcl.bminus)
if self.latitude < 0:
self.right_stp_cin = -self.right_stp_cin
self.left_stp_cin = -self.left_stp_cin
if self.latitude < 0:
self.stp_fixed = self.left_stp_fixed
self.stp_cin = self.left_stp_cin
self.scp = self.left_scp
else:
self.stp_fixed = self.right_stp_fixed
self.stp_cin = self.right_stp_cin
self.scp = self.right_scp
def get_sars(self):
'''
Function to get the SARS analogues from the hail and
supercell databases. Requires calling get_kinematics()
and get_parcels() first. Also calculates the significant
hail parameter.
Function returns nothing, but sets the following variables:
self.matches - the matches from SARS HAIL
self.ship - significant hail parameter
self.supercell_matches - the matches from SARS SUPERCELL
Parameters
----------
None
Returns
-------
None
'''
sfc_6km_shear = utils.KTS2MS( utils.mag( self.sfc_6km_shear[0], self.sfc_6km_shear[1]) )
sfc_3km_shear = utils.KTS2MS( utils.mag( self.sfc_3km_shear[0], self.sfc_3km_shear[1]) )
sfc_9km_shear = utils.KTS2MS( utils.mag( self.sfc_9km_shear[0], self.sfc_9km_shear[1]) )
h500t = interp.temp(self, 500.)
lapse_rate = params.lapse_rate( self, 700., 500., pres=True )
right_srh3km = self.right_srh3km[0]
right_srh1km = self.right_srh1km[0]
left_srh3km = self.left_srh3km[0]
left_srh1km = self.left_srh1km[0]
mucape = self.mupcl.bplus
mlcape = self.mlpcl.bplus
mllcl = self.mlpcl.lclhght
mumr = thermo.mixratio(self.mupcl.pres, self.mupcl.dwpc)
self.ship = params.ship(self)
self.hail_database = 'sars_hail.txt'
self.supercell_database = 'sars_supercell.txt'
try:
self.right_matches = hail(self.hail_database, mumr, mucape, h500t, lapse_rate, sfc_6km_shear,
sfc_9km_shear, sfc_3km_shear, right_srh3km)
except:
self.right_matches = ([], [], 0, 0, 0)
try:
self.left_matches = hail(self.hail_database, mumr, mucape, h500t, lapse_rate, sfc_6km_shear,
sfc_9km_shear, sfc_3km_shear, -left_srh3km)
except:
self.left_matches = ([], [], 0, 0, 0)
try:
self.right_supercell_matches = supercell(self.supercell_database, mlcape, mllcl, h500t, lapse_rate,
utils.MS2KTS(sfc_6km_shear), right_srh1km, utils.MS2KTS(sfc_3km_shear), utils.MS2KTS(sfc_9km_shear),
right_srh3km)
except:
self.right_supercell_matches = ([], [], 0, 0, 0)
try:
self.left_supercell_matches = supercell(self.supercell_database, mlcape, mllcl, h500t, lapse_rate,
utils.MS2KTS(sfc_6km_shear), -left_srh1km, utils.MS2KTS(sfc_3km_shear), utils.MS2KTS(sfc_9km_shear),
-left_srh3km)
except Exception as e:
self.left_supercell_matches = ([], [], 0, 0, 0)
if self.latitude < 0:
self.supercell_matches = self.left_supercell_matches
self.matches = self.left_matches
else:
self.supercell_matches = self.right_supercell_matches
self.matches = self.right_matches
def get_watch(self):
'''
Function to get the possible watch type.
Function returns nothing, but sets the following
variables:
self.watch_type - possible watch type
Parameters
----------
None
Returns
-------
None
'''
watch_types = watch_type.possible_watch(self, use_left=False)
self.right_watch_type = watch_types[0]
watch_types = watch_type.possible_watch(self, use_left=True)
self.left_watch_type = watch_types[0]
if self.latitude < 0:
self.watch_type = self.left_watch_type
else:
self.watch_type = self.right_watch_type
def get_traj(self):
'''
Function to compute the storm slinky profile using
the trajectory model.
self.slinky_traj - the list containing the position vector for the updraft
self.updraft_tilt - the updraft tilt (an angle) with respect to the horizon
Parameters
----------
None
Returns
-------
None
'''
parcel = self.mupcl
slinky = params.parcelTraj(self, parcel)
if slinky == None:
self.slinky_traj = ma.masked
self.updraft_tilt = ma.masked
else:
self.slinky_traj = slinky[0]
self.updraft_tilt = slinky[1]
def get_PWV_loc(self):
'''
Function to compute the location of the current PWV with respect to
it's sounding climatology from Bunkers.
Parameters
----------
None
Returns
-------
None
'''
self.pwv_flag = pwv_climo(self, self.location, month=int(self.date.strftime('%m')))
def get_indices(self):
'''
Function to set any additional indices that are included in the
thermo window.
Parameters
----------
None
Returns
-------
None
'''
self.tei = params.tei(self)
self.esp = params.esp(self)
self.mmp = params.mmp(self)
self.wndg = params.wndg(self)
self.sig_severe = params.sig_severe(self)
self.dcape, self.dpcl_ttrace, self.dpcl_ptrace = params.dcape(self)
self.drush = thermo.ctof(self.dpcl_ttrace[-1])
self.mburst = params.mburst(self)
def set_srleft(self, lm_u, lm_v):
'''
Sets the u and v values of the left mover supercell storm motion vector.
Parameters
----------
lm_u : number
Left mover u-component of the storm motion vector
lm_v : number
Left mover v-component of the storm motion vector
Returns
-------
None
'''
self.user_srwind = self.user_srwind[:2] + (lm_u, lm_v)
self.get_kinematics()
self.get_severe()
def set_srright(self, rm_u, rm_v):
'''
Sets the u and v values of the right mover supercell storm motion vector.
Parameters
----------
rm_u : number
Right mover u-component of the storm motion vector
rm_v : number
Right mover v-component of the storm motion vector
Returns
-------
None
'''
self.user_srwind = (rm_u, rm_v) + self.user_srwind[2:]
self.get_kinematics()
self.get_severe()
def reset_srm(self):
'''
Resets the storm motion vector to those found by the Bunkers algorithm
Parameters
----------
None
Returns
-------
None
'''
self.user_srwind = self.bunkers
self.get_kinematics()
self.get_severe()
| [
"sharppy.sharptab.watch_type.possible_watch",
"sharppy.sharptab.winds.wind_shear",
"sharppy.sharptab.interp.vec",
"sharppy.sharptab.thermo.mixratio",
"sharppy.sharptab.params.k_index",
"sharppy.sharptab.winds.sr_wind",
"sharppy.sharptab.winds.helicity",
"sharppy.sharptab.params.mburst",
"sharppy.sha... | [((8481, 8498), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (8496, 8498), False, 'from datetime import datetime\n'), ((8545, 8562), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (8560, 8562), False, 'import getpass\n'), ((13734, 13775), 'sharppy.io.qc_tools.areProfileArrayLengthEqual', 'qc_tools.areProfileArrayLengthEqual', (['self'], {}), '(self)\n', (13769, 13775), True, 'import sharppy.io.qc_tools as qc_tools\n'), ((14115, 14162), 'sharppy.sharptab.thermo.virtemp', 'thermo.virtemp', (['self.pres', 'self.tmpc', 'self.dwpc'], {}), '(self.pres, self.tmpc, self.dwpc)\n', (14129, 14162), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((16260, 16297), 'sharppy.sharptab.thermo.mixratio', 'thermo.mixratio', (['self.pres', 'self.dwpc'], {}), '(self.pres, self.dwpc)\n', (16275, 16297), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((16726, 16754), 'numpy.ma.empty', 'ma.empty', (['self.pres.shape[0]'], {}), '(self.pres.shape[0])\n', (16734, 16754), True, 'import numpy.ma as ma\n'), ((17296, 17324), 'numpy.ma.empty', 'ma.empty', (['self.pres.shape[0]'], {}), '(self.pres.shape[0])\n', (17304, 17324), True, 'import numpy.ma as ma\n'), ((17534, 17552), 'sharppy.sharptab.thermo.ctok', 'thermo.ctok', (['theta'], {}), '(theta)\n', (17545, 17552), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((17879, 17907), 'numpy.ma.empty', 'ma.empty', (['self.pres.shape[0]'], {}), '(self.pres.shape[0])\n', (17887, 17907), True, 'import numpy.ma as ma\n'), ((18430, 18474), 'sharppy.sharptab.thermo.relh', 'thermo.relh', (['self.pres', 'self.tmpc', 'self.dwpc'], {}), '(self.pres, self.tmpc, self.dwpc)\n', (18441, 18474), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((20749, 20785), 'logging.debug', 'logging.debug', (['"""Calling get_fire()."""'], {}), "('Calling get_fire().')\n", (20762, 20785), False, 'import logging\n'), ((20799, 20813), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (20811, 20813), False, 'from datetime import datetime\n'), ((20975, 21013), 'logging.debug', 'logging.debug', (['"""Calling get_precip()."""'], {}), "('Calling get_precip().')\n", (20988, 21013), False, 'import logging\n'), ((21027, 21041), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21039, 21041), False, 'from datetime import datetime\n'), ((21187, 21226), 'logging.debug', 'logging.debug', (['"""Calling get_parcels()."""'], {}), "('Calling get_parcels().')\n", (21200, 21226), False, 'import logging\n'), ((21240, 21254), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21252, 21254), False, 'from datetime import datetime\n'), ((21416, 21454), 'logging.debug', 'logging.debug', (['"""Calling get_thermo()."""'], {}), "('Calling get_thermo().')\n", (21429, 21454), False, 'import logging\n'), ((21468, 21482), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21480, 21482), False, 'from datetime import datetime\n'), ((21625, 21667), 'logging.debug', 'logging.debug', (['"""Calling get_kinematics()."""'], {}), "('Calling get_kinematics().')\n", (21638, 21667), False, 'import logging\n'), ((21681, 21695), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21693, 21695), False, 'from datetime import datetime\n'), ((21860, 21898), 'logging.debug', 'logging.debug', (['"""Calling get_severe()."""'], {}), "('Calling get_severe().')\n", (21873, 21898), False, 'import logging\n'), ((21912, 21926), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21924, 21926), False, 'from datetime import datetime\n'), ((22083, 22119), 'logging.debug', 'logging.debug', (['"""Calling get_sars()."""'], {}), "('Calling get_sars().')\n", (22096, 22119), False, 'import logging\n'), ((22133, 22147), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22145, 22147), False, 'from datetime import datetime\n'), ((22303, 22342), 'logging.debug', 'logging.debug', (['"""Calling get_PWV_loc()."""'], {}), "('Calling get_PWV_loc().')\n", (22316, 22342), False, 'import logging\n'), ((22356, 22370), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22368, 22370), False, 'from datetime import datetime\n'), ((22519, 22555), 'logging.debug', 'logging.debug', (['"""Calling get_traj()."""'], {}), "('Calling get_traj().')\n", (22532, 22555), False, 'import logging\n'), ((22569, 22583), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22581, 22583), False, 'from datetime import datetime\n'), ((22749, 22788), 'logging.debug', 'logging.debug', (['"""Calling get_indices()."""'], {}), "('Calling get_indices().')\n", (22762, 22788), False, 'import logging\n'), ((22802, 22816), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22814, 22816), False, 'from datetime import datetime\n'), ((22967, 23004), 'logging.debug', 'logging.debug', (['"""Calling get_watch()."""'], {}), "('Calling get_watch().')\n", (22980, 23004), False, 'import logging\n'), ((23018, 23032), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (23030, 23032), False, 'from datetime import datetime\n'), ((23479, 23497), 'sharppy.sharptab.fire.fosberg', 'fire.fosberg', (['self'], {}), '(self)\n', (23491, 23497), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((23525, 23549), 'sharppy.sharptab.fire.haines_height', 'fire.haines_height', (['self'], {}), '(self)\n', (23543, 23549), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((23576, 23597), 'sharppy.sharptab.fire.haines_low', 'fire.haines_low', (['self'], {}), '(self)\n', (23591, 23597), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((23624, 23645), 'sharppy.sharptab.fire.haines_mid', 'fire.haines_mid', (['self'], {}), '(self)\n', (23639, 23645), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((23673, 23695), 'sharppy.sharptab.fire.haines_high', 'fire.haines_high', (['self'], {}), '(self)\n', (23689, 23695), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((23720, 23740), 'sharppy.sharptab.params.pbl_top', 'params.pbl_top', (['self'], {}), '(self)\n', (23734, 23740), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((23763, 23837), 'sharppy.sharptab.thermo.relh', 'thermo.relh', (['self.pres[self.sfc]', 'self.tmpc[self.sfc]', 'self.dwpc[self.sfc]'], {}), '(self.pres[self.sfc], self.tmpc[self.sfc], self.dwpc[self.sfc])\n', (23774, 23837), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((24039, 24091), 'sharppy.sharptab.params.mean_relh', 'params.mean_relh', (['self'], {'pbot': 'pres_sfc', 'ptop': 'pres_1km'}), '(self, pbot=pres_sfc, ptop=pres_1km)\n', (24055, 24091), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((24113, 24170), 'sharppy.sharptab.params.mean_relh', 'params.mean_relh', (['self'], {'pbot': 'pres_sfc', 'ptop': 'self.ppbl_top'}), '(self, pbot=pres_sfc, ptop=self.ppbl_top)\n', (24129, 24170), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((24199, 24250), 'sharppy.sharptab.winds.mean_wind', 'winds.mean_wind', (['self'], {'pbot': 'pres_sfc', 'ptop': 'pres_1km'}), '(self, pbot=pres_sfc, ptop=pres_1km)\n', (24214, 24250), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((24278, 24334), 'sharppy.sharptab.winds.mean_wind', 'winds.mean_wind', (['self'], {'pbot': 'pres_sfc', 'ptop': 'self.ppbl_top'}), '(self, pbot=pres_sfc, ptop=self.ppbl_top)\n', (24293, 24334), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((24361, 24408), 'sharppy.sharptab.winds.max_wind', 'winds.max_wind', (['self'], {'lower': '(0)', 'upper': 'self.pbl_h'}), '(self, lower=0, upper=self.pbl_h)\n', (24375, 24408), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((24485, 24528), 'sharppy.sharptab.params.DefineParcel', 'params.DefineParcel', (['self'], {'flag': '(3)', 'pres': '(500)'}), '(self, flag=3, pres=500)\n', (24504, 24528), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((24545, 24581), 'sharppy.sharptab.params.cape', 'params.cape', (['self'], {'lplvals': 'mulplvals'}), '(self, lplvals=mulplvals)\n', (24556, 24581), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((25902, 25918), 'sharppy.sharptab.params.dgz', 'params.dgz', (['self'], {}), '(self)\n', (25912, 25918), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((25945, 26007), 'sharppy.sharptab.params.mean_relh', 'params.mean_relh', (['self'], {'pbot': 'self.dgz_pbot', 'ptop': 'self.dgz_ptop'}), '(self, pbot=self.dgz_pbot, ptop=self.dgz_ptop)\n', (25961, 26007), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((26030, 26095), 'sharppy.sharptab.params.precip_water', 'params.precip_water', (['self'], {'pbot': 'self.dgz_pbot', 'ptop': 'self.dgz_ptop'}), '(self, pbot=self.dgz_pbot, ptop=self.dgz_ptop)\n', (26049, 26095), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((26121, 26187), 'sharppy.sharptab.params.mean_mixratio', 'params.mean_mixratio', (['self'], {'pbot': 'self.dgz_pbot', 'ptop': 'self.dgz_ptop'}), '(self, pbot=self.dgz_pbot, ptop=self.dgz_ptop)\n', (26141, 26187), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((26435, 26462), 'sharppy.sharptab.watch_type.init_phase', 'watch_type.init_phase', (['self'], {}), '(self)\n', (26456, 26462), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((26516, 26570), 'sharppy.sharptab.watch_type.posneg_temperature', 'watch_type.posneg_temperature', (['self'], {'start': 'self.plevel'}), '(self, start=self.plevel)\n', (26545, 26570), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((26624, 26674), 'sharppy.sharptab.watch_type.posneg_wetbulb', 'watch_type.posneg_wetbulb', (['self'], {'start': 'self.plevel'}), '(self, start=self.plevel)\n', (26649, 26674), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((26702, 26798), 'sharppy.sharptab.watch_type.best_guess_precip', 'watch_type.best_guess_precip', (['self', 'self.phase', 'self.plevel', 'self.tmp', 'self.tpos', 'self.tneg'], {}), '(self, self.phase, self.plevel, self.tmp, self.\n tpos, self.tneg)\n', (26730, 26798), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((27586, 27614), 'sharppy.sharptab.params.parcelx', 'params.parcelx', (['self'], {'flag': '(3)'}), '(self, flag=3)\n', (27600, 27614), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((27807, 27835), 'sharppy.sharptab.params.parcelx', 'params.parcelx', (['self'], {'flag': '(2)'}), '(self, flag=2)\n', (27821, 27835), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((27859, 27887), 'sharppy.sharptab.params.parcelx', 'params.parcelx', (['self'], {'flag': '(4)'}), '(self, flag=4)\n', (27873, 27887), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((27912, 27927), 'sharppy.sharptab.params.Parcel', 'params.Parcel', ([], {}), '()\n', (27925, 27927), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((28010, 28063), 'sharppy.sharptab.params.effective_inflow_layer', 'params.effective_inflow_layer', (['self'], {'mupcl': 'self.mupcl'}), '(self, mupcl=self.mupcl)\n', (28039, 28063), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((29807, 29873), 'numpy.array', 'np.array', (['[1000.0, 3000.0, 4000.0, 5000.0, 6000.0, 8000.0, 9000.0]'], {}), '([1000.0, 3000.0, 4000.0, 5000.0, 6000.0, 8000.0, 9000.0])\n', (29815, 29873), True, 'import numpy as np\n'), ((30018, 30040), 'sharppy.sharptab.interp.vec', 'interp.vec', (['self', 'p1km'], {}), '(self, p1km)\n', (30028, 30040), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((30064, 30086), 'sharppy.sharptab.interp.vec', 'interp.vec', (['self', 'p6km'], {}), '(self, p6km)\n', (30074, 30086), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((30148, 30191), 'sharppy.sharptab.winds.wind_shear', 'winds.wind_shear', (['self'], {'pbot': 'sfc', 'ptop': 'p1km'}), '(self, pbot=sfc, ptop=p1km)\n', (30164, 30191), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((30221, 30264), 'sharppy.sharptab.winds.wind_shear', 'winds.wind_shear', (['self'], {'pbot': 'sfc', 'ptop': 'p3km'}), '(self, pbot=sfc, ptop=p3km)\n', (30237, 30264), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((30294, 30337), 'sharppy.sharptab.winds.wind_shear', 'winds.wind_shear', (['self'], {'pbot': 'sfc', 'ptop': 'p6km'}), '(self, pbot=sfc, ptop=p6km)\n', (30310, 30337), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((30367, 30410), 'sharppy.sharptab.winds.wind_shear', 'winds.wind_shear', (['self'], {'pbot': 'sfc', 'ptop': 'p8km'}), '(self, pbot=sfc, ptop=p8km)\n', (30383, 30410), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((30440, 30483), 'sharppy.sharptab.winds.wind_shear', 'winds.wind_shear', (['self'], {'pbot': 'sfc', 'ptop': 'p9km'}), '(self, pbot=sfc, ptop=p9km)\n', (30456, 30483), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((30512, 30583), 'sharppy.sharptab.winds.wind_shear', 'winds.wind_shear', (['self'], {'pbot': 'self.mupcl.lclpres', 'ptop': 'self.mupcl.elpres'}), '(self, pbot=self.mupcl.lclpres, ptop=self.mupcl.elpres)\n', (30528, 30583), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((37144, 37167), 'sharppy.sharptab.winds.mbe_vectors', 'winds.mbe_vectors', (['self'], {}), '(self)\n', (37161, 37167), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((37196, 37267), 'sharppy.sharptab.winds.helicity', 'winds.helicity', (['self', '(0)', '(1000.0)'], {'stu': 'self.srwind[0]', 'stv': 'self.srwind[1]'}), '(self, 0, 1000.0, stu=self.srwind[0], stv=self.srwind[1])\n', (37210, 37267), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((37295, 37366), 'sharppy.sharptab.winds.helicity', 'winds.helicity', (['self', '(0)', '(3000.0)'], {'stu': 'self.srwind[0]', 'stv': 'self.srwind[1]'}), '(self, 0, 3000.0, stu=self.srwind[0], stv=self.srwind[1])\n', (37309, 37366), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((37393, 37464), 'sharppy.sharptab.winds.helicity', 'winds.helicity', (['self', '(0)', '(1000.0)'], {'stu': 'self.srwind[2]', 'stv': 'self.srwind[3]'}), '(self, 0, 1000.0, stu=self.srwind[2], stv=self.srwind[3])\n', (37407, 37464), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((37491, 37562), 'sharppy.sharptab.winds.helicity', 'winds.helicity', (['self', '(0)', '(3000.0)'], {'stu': 'self.srwind[2]', 'stv': 'self.srwind[3]'}), '(self, 0, 3000.0, stu=self.srwind[2], stv=self.srwind[3])\n', (37505, 37562), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((39363, 39383), 'sharppy.sharptab.params.k_index', 'params.k_index', (['self'], {}), '(self)\n', (39377, 39383), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((39436, 39461), 'sharppy.sharptab.params.precip_water', 'params.precip_water', (['self'], {}), '(self)\n', (39455, 39461), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((39525, 39573), 'sharppy.sharptab.params.lapse_rate', 'params.lapse_rate', (['self', '(0.0)', '(3000.0)'], {'pres': '(False)'}), '(self, 0.0, 3000.0, pres=False)\n', (39542, 39573), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((39637, 39688), 'sharppy.sharptab.params.lapse_rate', 'params.lapse_rate', (['self', '(3000.0)', '(6000.0)'], {'pres': '(False)'}), '(self, 3000.0, 6000.0, pres=False)\n', (39654, 39688), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((39754, 39802), 'sharppy.sharptab.params.lapse_rate', 'params.lapse_rate', (['self', '(850.0)', '(500.0)'], {'pres': '(True)'}), '(self, 850.0, 500.0, pres=True)\n', (39771, 39802), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((39868, 39916), 'sharppy.sharptab.params.lapse_rate', 'params.lapse_rate', (['self', '(700.0)', '(500.0)'], {'pres': '(True)'}), '(self, 700.0, 500.0, pres=True)\n', (39885, 39916), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((39984, 40011), 'sharppy.sharptab.params.max_lapse_rate', 'params.max_lapse_rate', (['self'], {}), '(self)\n', (40005, 40011), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((40330, 40356), 'sharppy.sharptab.params.mean_mixratio', 'params.mean_mixratio', (['self'], {}), '(self)\n', (40350, 40356), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((40406, 40428), 'sharppy.sharptab.params.mean_relh', 'params.mean_relh', (['self'], {}), '(self)\n', (40422, 40428), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((40453, 40544), 'sharppy.sharptab.params.mean_relh', 'params.mean_relh', (['self'], {'pbot': '(self.pres[self.sfc] - 150)', 'ptop': '(self.pres[self.sfc] - 350)'}), '(self, pbot=self.pres[self.sfc] - 150, ptop=self.pres[self.\n sfc] - 350)\n', (40469, 40544), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((40632, 40653), 'sharppy.sharptab.params.t_totals', 'params.t_totals', (['self'], {}), '(self)\n', (40647, 40653), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((40740, 40789), 'sharppy.sharptab.params.inferred_temp_adv', 'params.inferred_temp_adv', (['self'], {'lat': 'self.latitude'}), '(self, lat=self.latitude)\n', (40764, 40789), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((41792, 41847), 'sharppy.sharptab.utils.mag', 'utils.mag', (['self.sfc_6km_shear[0]', 'self.sfc_6km_shear[1]'], {}), '(self.sfc_6km_shear[0], self.sfc_6km_shear[1])\n', (41801, 41847), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((42128, 42162), 'sharppy.sharptab.params.sherb', 'params.sherb', (['self'], {'effective': '(True)'}), '(self, effective=True)\n', (42140, 42162), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((44475, 44499), 'sharppy.sharptab.interp.temp', 'interp.temp', (['self', '(500.0)'], {}), '(self, 500.0)\n', (44486, 44499), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((44520, 44568), 'sharppy.sharptab.params.lapse_rate', 'params.lapse_rate', (['self', '(700.0)', '(500.0)'], {'pres': '(True)'}), '(self, 700.0, 500.0, pres=True)\n', (44537, 44568), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((44859, 44908), 'sharppy.sharptab.thermo.mixratio', 'thermo.mixratio', (['self.mupcl.pres', 'self.mupcl.dwpc'], {}), '(self.mupcl.pres, self.mupcl.dwpc)\n', (44874, 44908), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((44929, 44946), 'sharppy.sharptab.params.ship', 'params.ship', (['self'], {}), '(self)\n', (44940, 44946), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((46889, 46936), 'sharppy.sharptab.watch_type.possible_watch', 'watch_type.possible_watch', (['self'], {'use_left': '(False)'}), '(self, use_left=False)\n', (46914, 46936), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((47007, 47053), 'sharppy.sharptab.watch_type.possible_watch', 'watch_type.possible_watch', (['self'], {'use_left': '(True)'}), '(self, use_left=True)\n', (47032, 47053), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((47718, 47749), 'sharppy.sharptab.params.parcelTraj', 'params.parcelTraj', (['self', 'parcel'], {}), '(self, parcel)\n', (47735, 47749), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((48620, 48636), 'sharppy.sharptab.params.tei', 'params.tei', (['self'], {}), '(self)\n', (48630, 48636), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((48656, 48672), 'sharppy.sharptab.params.esp', 'params.esp', (['self'], {}), '(self)\n', (48666, 48672), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((48692, 48708), 'sharppy.sharptab.params.mmp', 'params.mmp', (['self'], {}), '(self)\n', (48702, 48708), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((48729, 48746), 'sharppy.sharptab.params.wndg', 'params.wndg', (['self'], {}), '(self)\n', (48740, 48746), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((48773, 48796), 'sharppy.sharptab.params.sig_severe', 'params.sig_severe', (['self'], {}), '(self)\n', (48790, 48796), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((48854, 48872), 'sharppy.sharptab.params.dcape', 'params.dcape', (['self'], {}), '(self)\n', (48866, 48872), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((48894, 48927), 'sharppy.sharptab.thermo.ctof', 'thermo.ctof', (['self.dpcl_ttrace[-1]'], {}), '(self.dpcl_ttrace[-1])\n', (48905, 48927), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((48950, 48969), 'sharppy.sharptab.params.mburst', 'params.mburst', (['self'], {}), '(self)\n', (48963, 48969), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((4557, 4577), 'numpy.ma.max', 'np.ma.max', (['self.pres'], {}), '(self.pres)\n', (4566, 4577), True, 'import numpy as np\n'), ((4598, 4760), 'warnings.warn', 'warnings.warn', (['"""The pressure values passed to the profile object are below 100 mb. This may cause some the SHARPpy routines not to behave as expected."""'], {}), "(\n 'The pressure values passed to the profile object are below 100 mb. This may cause some the SHARPpy routines not to behave as expected.'\n )\n", (4611, 4760), False, 'import warnings\n'), ((9327, 9358), 'sharppy.io.qc_tools.isHGHTValid', 'qc_tools.isHGHTValid', (['self.hght'], {}), '(self.hght)\n', (9347, 9358), True, 'import sharppy.io.qc_tools as qc_tools\n'), ((9372, 9538), 'sharppy.io.qc_tools.raiseError', 'qc_tools.raiseError', (['"""Invalid height data. Data has repeat height values or height does not increase as pressure decreases."""', 'qc_tools.DataQualityException'], {}), "(\n 'Invalid height data. Data has repeat height values or height does not increase as pressure decreases.'\n , qc_tools.DataQualityException)\n", (9391, 9538), True, 'import sharppy.io.qc_tools as qc_tools\n'), ((9544, 9575), 'sharppy.io.qc_tools.isTMPCValid', 'qc_tools.isTMPCValid', (['self.tmpc'], {}), '(self.tmpc)\n', (9564, 9575), True, 'import sharppy.io.qc_tools as qc_tools\n'), ((9589, 9734), 'sharppy.io.qc_tools.raiseError', 'qc_tools.raiseError', (['"""Invalid temperature data. Profile contains a temperature value < -273.15 Celsius."""', 'qc_tools.DataQualityException'], {}), "(\n 'Invalid temperature data. Profile contains a temperature value < -273.15 Celsius.'\n , qc_tools.DataQualityException)\n", (9608, 9734), True, 'import sharppy.io.qc_tools as qc_tools\n'), ((9740, 9771), 'sharppy.io.qc_tools.isDWPCValid', 'qc_tools.isDWPCValid', (['self.dwpc'], {}), '(self.dwpc)\n', (9760, 9771), True, 'import sharppy.io.qc_tools as qc_tools\n'), ((9785, 9924), 'sharppy.io.qc_tools.raiseError', 'qc_tools.raiseError', (['"""Invalid dewpoint data. Profile contains a dewpoint value < -273.15 Celsius."""', 'qc_tools.DataQualityException'], {}), "(\n 'Invalid dewpoint data. Profile contains a dewpoint value < -273.15 Celsius.'\n , qc_tools.DataQualityException)\n", (9804, 9924), True, 'import sharppy.io.qc_tools as qc_tools\n'), ((9930, 9961), 'sharppy.io.qc_tools.isWSPDValid', 'qc_tools.isWSPDValid', (['self.wspd'], {}), '(self.wspd)\n', (9950, 9961), True, 'import sharppy.io.qc_tools as qc_tools\n'), ((9975, 10109), 'sharppy.io.qc_tools.raiseError', 'qc_tools.raiseError', (['"""Invalid wind speed data. Profile contains a wind speed value < 0 knots."""', 'qc_tools.DataQualityException'], {}), "(\n 'Invalid wind speed data. Profile contains a wind speed value < 0 knots.',\n qc_tools.DataQualityException)\n", (9994, 10109), True, 'import sharppy.io.qc_tools as qc_tools\n'), ((10116, 10147), 'sharppy.io.qc_tools.isWDIRValid', 'qc_tools.isWDIRValid', (['self.wdir'], {}), '(self.wdir)\n', (10136, 10147), True, 'import sharppy.io.qc_tools as qc_tools\n'), ((10161, 10318), 'sharppy.io.qc_tools.raiseError', 'qc_tools.raiseError', (['"""Invalid wind direction data. Profile contains a wind direction < 0 degrees or >= 360 degrees."""', 'qc_tools.DataQualityException'], {}), "(\n 'Invalid wind direction data. Profile contains a wind direction < 0 degrees or >= 360 degrees.'\n , qc_tools.DataQualityException)\n", (10180, 10318), True, 'import sharppy.io.qc_tools as qc_tools\n'), ((12701, 12737), 'sharppy.sharptab.utils.vec2comp', 'utils.vec2comp', (['self.wdir', 'self.wspd'], {}), '(self.wdir, self.wspd)\n', (12715, 12737), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((14179, 14205), 'numpy.ma.where', 'np.ma.where', (['(self.pres > 0)'], {}), '(self.pres > 0)\n', (14190, 14205), True, 'import numpy as np\n'), ((16817, 16873), 'sharppy.sharptab.thermo.wetbulb', 'thermo.wetbulb', (['self.pres[i]', 'self.tmpc[i]', 'self.dwpc[i]'], {}), '(self.pres[i], self.tmpc[i], self.dwpc[i])\n', (16831, 16873), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((17385, 17425), 'sharppy.sharptab.thermo.theta', 'thermo.theta', (['self.pres[i]', 'self.tmpc[i]'], {}), '(self.pres[i], self.tmpc[i])\n', (17397, 17425), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((20521, 20541), 'numpy.ma.max', 'np.ma.max', (['self.pres'], {}), '(self.pres)\n', (20530, 20541), True, 'import numpy as np\n'), ((23914, 23941), 'sharppy.sharptab.interp.to_msl', 'interp.to_msl', (['self', '(1000.0)'], {}), '(self, 1000.0)\n', (23927, 23941), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((23983, 24015), 'sharppy.sharptab.interp.hght', 'interp.hght', (['self', 'self.ppbl_top'], {}), '(self, self.ppbl_top)\n', (23994, 24015), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((26216, 26279), 'sharppy.sharptab.params.mean_omega', 'params.mean_omega', (['self'], {'pbot': 'self.dgz_pbot', 'ptop': 'self.dgz_ptop'}), '(self, pbot=self.dgz_pbot, ptop=self.dgz_ptop)\n', (26233, 26279), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((27753, 27781), 'sharppy.sharptab.params.parcelx', 'params.parcelx', (['self'], {'flag': '(1)'}), '(self, flag=1)\n', (27767, 27781), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((28888, 28936), 'sharppy.sharptab.params.mean_theta', 'params.mean_theta', (['self', 'self.ebottom', 'self.etop'], {}), '(self, self.ebottom, self.etop)\n', (28905, 28936), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((28955, 29006), 'sharppy.sharptab.params.mean_mixratio', 'params.mean_mixratio', (['self', 'self.ebottom', 'self.etop'], {}), '(self, self.ebottom, self.etop)\n', (28975, 29006), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((29079, 29114), 'sharppy.sharptab.thermo.theta', 'thermo.theta', (['(1000.0)', 'mtha', 'effpres'], {}), '(1000.0, mtha, effpres)\n', (29091, 29114), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((29136, 29171), 'sharppy.sharptab.thermo.temp_at_mixrat', 'thermo.temp_at_mixrat', (['mmr', 'effpres'], {}), '(mmr, effpres)\n', (29157, 29171), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((29198, 29268), 'sharppy.sharptab.params.parcelx', 'params.parcelx', (['self'], {'flag': '(5)', 'pres': 'effpres', 'tmpc': 'efftmpc', 'dwpc': 'effdwpc'}), '(self, flag=5, pres=effpres, tmpc=efftmpc, dwpc=effdwpc)\n', (29212, 29268), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((29936, 29964), 'sharppy.sharptab.interp.to_msl', 'interp.to_msl', (['self', 'heights'], {}), '(self, heights)\n', (29949, 29964), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((31295, 31332), 'sharppy.sharptab.winds.non_parcel_bunkers_motion', 'winds.non_parcel_bunkers_motion', (['self'], {}), '(self)\n', (31326, 31332), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((32224, 32294), 'sharppy.sharptab.params.bunkers_storm_motion', 'params.bunkers_storm_motion', (['self'], {'mupcl': 'self.mupcl', 'pbot': 'self.ebottom'}), '(self, mupcl=self.mupcl, pbot=self.ebottom)\n', (32251, 32294), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((32626, 32672), 'sharppy.sharptab.winds.mean_wind', 'winds.mean_wind', (['self', 'self.ebottom', 'self.etop'], {}), '(self, self.ebottom, self.etop)\n', (32641, 32672), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((32702, 32752), 'sharppy.sharptab.winds.mean_wind', 'winds.mean_wind', (['self'], {'pbot': 'self.ebottom', 'ptop': 'elh'}), '(self, pbot=self.ebottom, ptop=elh)\n', (32717, 32752), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((32842, 32899), 'sharppy.sharptab.winds.wind_shear', 'winds.wind_shear', (['self'], {'pbot': 'self.ebottom', 'ptop': 'self.etop'}), '(self, pbot=self.ebottom, ptop=self.etop)\n', (32858, 32899), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((32924, 32975), 'sharppy.sharptab.winds.wind_shear', 'winds.wind_shear', (['self'], {'pbot': 'self.ebottom', 'ptop': 'elh'}), '(self, pbot=self.ebottom, ptop=elh)\n', (32940, 32975), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((33002, 33039), 'sharppy.sharptab.utils.mag', 'utils.mag', (['self.ebwd[0]', 'self.ebwd[1]'], {}), '(self.ebwd[0], self.ebwd[1])\n', (33011, 33039), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((33146, 33244), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'self.ebottom', 'ptop': 'self.etop', 'stu': 'self.srwind[0]', 'stv': 'self.srwind[1]'}), '(self, pbot=self.ebottom, ptop=self.etop, stu=self.srwind[0],\n stv=self.srwind[1])\n', (33159, 33244), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((33275, 33368), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'self.ebottom', 'ptop': 'elh', 'stu': 'self.srwind[0]', 'stv': 'self.srwind[1]'}), '(self, pbot=self.ebottom, ptop=elh, stu=self.srwind[0], stv=\n self.srwind[1])\n', (33288, 33368), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((33395, 33484), 'sharppy.sharptab.winds.helicity', 'winds.helicity', (['self', 'self.ebotm', 'self.etopm'], {'stu': 'self.srwind[0]', 'stv': 'self.srwind[1]'}), '(self, self.ebotm, self.etopm, stu=self.srwind[0], stv=self.\n srwind[1])\n', (33409, 33484), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((33520, 33586), 'sharppy.sharptab.winds.critical_angle', 'winds.critical_angle', (['self'], {'stu': 'self.srwind[0]', 'stv': 'self.srwind[1]'}), '(self, stu=self.srwind[0], stv=self.srwind[1])\n', (33540, 33586), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((33689, 33787), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'self.ebottom', 'ptop': 'self.etop', 'stu': 'self.srwind[2]', 'stv': 'self.srwind[3]'}), '(self, pbot=self.ebottom, ptop=self.etop, stu=self.srwind[2],\n stv=self.srwind[3])\n', (33702, 33787), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((33817, 33910), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'self.ebottom', 'ptop': 'elh', 'stu': 'self.srwind[2]', 'stv': 'self.srwind[3]'}), '(self, pbot=self.ebottom, ptop=elh, stu=self.srwind[2], stv=\n self.srwind[3])\n', (33830, 33910), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((33936, 34025), 'sharppy.sharptab.winds.helicity', 'winds.helicity', (['self', 'self.ebotm', 'self.etopm'], {'stu': 'self.srwind[2]', 'stv': 'self.srwind[3]'}), '(self, self.ebotm, self.etopm, stu=self.srwind[2], stv=self.\n srwind[3])\n', (33950, 34025), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((34060, 34126), 'sharppy.sharptab.winds.critical_angle', 'winds.critical_angle', (['self'], {'stu': 'self.srwind[2]', 'stv': 'self.srwind[3]'}), '(self, stu=self.srwind[2], stv=self.srwind[3])\n', (34080, 34126), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((40082, 40110), 'sharppy.sharptab.params.convective_temp', 'params.convective_temp', (['self'], {}), '(self)\n', (40104, 40110), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((40197, 40218), 'sharppy.sharptab.params.max_temp', 'params.max_temp', (['self'], {}), '(self)\n', (40212, 40218), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((41958, 41976), 'sharppy.sharptab.utils.KTS2MS', 'utils.KTS2MS', (['wspd'], {}), '(wspd)\n', (41970, 41976), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((42086, 42104), 'sharppy.sharptab.utils.KTS2MS', 'utils.KTS2MS', (['wspd'], {}), '(wspd)\n', (42098, 42104), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((44206, 44261), 'sharppy.sharptab.utils.mag', 'utils.mag', (['self.sfc_6km_shear[0]', 'self.sfc_6km_shear[1]'], {}), '(self.sfc_6km_shear[0], self.sfc_6km_shear[1])\n', (44215, 44261), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((44303, 44358), 'sharppy.sharptab.utils.mag', 'utils.mag', (['self.sfc_3km_shear[0]', 'self.sfc_3km_shear[1]'], {}), '(self.sfc_3km_shear[0], self.sfc_3km_shear[1])\n', (44312, 44358), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((44400, 44455), 'sharppy.sharptab.utils.mag', 'utils.mag', (['self.sfc_9km_shear[0]', 'self.sfc_9km_shear[1]'], {}), '(self.sfc_9km_shear[0], self.sfc_9km_shear[1])\n', (44409, 44455), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((45094, 45214), 'sharppy.databases.sars.hail', 'hail', (['self.hail_database', 'mumr', 'mucape', 'h500t', 'lapse_rate', 'sfc_6km_shear', 'sfc_9km_shear', 'sfc_3km_shear', 'right_srh3km'], {}), '(self.hail_database, mumr, mucape, h500t, lapse_rate, sfc_6km_shear,\n sfc_9km_shear, sfc_3km_shear, right_srh3km)\n', (45098, 45214), False, 'from sharppy.databases.sars import hail, supercell\n'), ((45340, 45460), 'sharppy.databases.sars.hail', 'hail', (['self.hail_database', 'mumr', 'mucape', 'h500t', 'lapse_rate', 'sfc_6km_shear', 'sfc_9km_shear', 'sfc_3km_shear', '(-left_srh3km)'], {}), '(self.hail_database, mumr, mucape, h500t, lapse_rate, sfc_6km_shear,\n sfc_9km_shear, sfc_3km_shear, -left_srh3km)\n', (45344, 45460), False, 'from sharppy.databases.sars import hail, supercell\n'), ((6082, 6254), 'warnings.warn', 'warnings.warn', (['"""No wind data (wdir/wspd or u/v) passed to the Profile object constructor. This may cause some of the SHARPpy routines to not behave as expected."""'], {}), "(\n 'No wind data (wdir/wspd or u/v) passed to the Profile object constructor. This may cause some of the SHARPpy routines to not behave as expected.'\n )\n", (6095, 6254), False, 'import warnings\n'), ((13059, 13089), 'sharppy.sharptab.utils.comp2vec', 'utils.comp2vec', (['self.u', 'self.v'], {}), '(self.u, self.v)\n', (13073, 13089), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((17982, 18037), 'sharppy.sharptab.thermo.thetae', 'thermo.thetae', (['self.pres[i]', 'self.tmpc[i]', 'self.dwpc[i]'], {}), '(self.pres[i], self.tmpc[i], self.dwpc[i])\n', (17995, 18037), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((28497, 28528), 'sharppy.sharptab.interp.hght', 'interp.hght', (['self', 'self.ebottom'], {}), '(self, self.ebottom)\n', (28508, 28528), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((28575, 28603), 'sharppy.sharptab.interp.hght', 'interp.hght', (['self', 'self.etop'], {}), '(self, self.etop)\n', (28586, 28603), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((30655, 30697), 'sharppy.sharptab.winds.mean_wind', 'winds.mean_wind', (['self'], {'pbot': 'sfc', 'ptop': 'p1km'}), '(self, pbot=sfc, ptop=p1km)\n', (30670, 30697), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((30739, 30781), 'sharppy.sharptab.winds.mean_wind', 'winds.mean_wind', (['self'], {'pbot': 'sfc', 'ptop': 'p3km'}), '(self, pbot=sfc, ptop=p3km)\n', (30754, 30781), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((30823, 30865), 'sharppy.sharptab.winds.mean_wind', 'winds.mean_wind', (['self'], {'pbot': 'sfc', 'ptop': 'p6km'}), '(self, pbot=sfc, ptop=p6km)\n', (30838, 30865), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((30907, 30949), 'sharppy.sharptab.winds.mean_wind', 'winds.mean_wind', (['self'], {'pbot': 'sfc', 'ptop': 'p8km'}), '(self, pbot=sfc, ptop=p8km)\n', (30922, 30949), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((30994, 31064), 'sharppy.sharptab.winds.mean_wind', 'winds.mean_wind', (['self'], {'pbot': 'self.mupcl.lclpres', 'ptop': 'self.mupcl.elpres'}), '(self, pbot=self.mupcl.lclpres, ptop=self.mupcl.elpres)\n', (31009, 31064), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((32522, 32561), 'sharppy.sharptab.interp.to_msl', 'interp.to_msl', (['self', '(self.ebotm + depth)'], {}), '(self, self.ebotm + depth)\n', (32535, 32561), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((34240, 34325), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'sfc', 'ptop': 'p1km', 'stu': 'self.srwind[0]', 'stv': 'self.srwind[1]'}), '(self, pbot=sfc, ptop=p1km, stu=self.srwind[0], stv=self.srwind[1]\n )\n', (34253, 34325), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((34368, 34453), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'sfc', 'ptop': 'p3km', 'stu': 'self.srwind[0]', 'stv': 'self.srwind[1]'}), '(self, pbot=sfc, ptop=p3km, stu=self.srwind[0], stv=self.srwind[1]\n )\n', (34381, 34453), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((34496, 34581), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'sfc', 'ptop': 'p6km', 'stu': 'self.srwind[0]', 'stv': 'self.srwind[1]'}), '(self, pbot=sfc, ptop=p6km, stu=self.srwind[0], stv=self.srwind[1]\n )\n', (34509, 34581), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((34624, 34709), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'sfc', 'ptop': 'p8km', 'stu': 'self.srwind[0]', 'stv': 'self.srwind[1]'}), '(self, pbot=sfc, ptop=p8km, stu=self.srwind[0], stv=self.srwind[1]\n )\n', (34637, 34709), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((34754, 34840), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'p4km', 'ptop': 'p5km', 'stu': 'self.srwind[0]', 'stv': 'self.srwind[1]'}), '(self, pbot=p4km, ptop=p5km, stu=self.srwind[0], stv=self.\n srwind[1])\n', (34767, 34840), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((34886, 34999), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'self.mupcl.lclpres', 'ptop': 'self.mupcl.elpres', 'stu': 'self.srwind[0]', 'stv': 'self.srwind[1]'}), '(self, pbot=self.mupcl.lclpres, ptop=self.mupcl.elpres, stu=\n self.srwind[0], stv=self.srwind[1])\n', (34899, 34999), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((35709, 35794), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'sfc', 'ptop': 'p1km', 'stu': 'self.srwind[2]', 'stv': 'self.srwind[3]'}), '(self, pbot=sfc, ptop=p1km, stu=self.srwind[2], stv=self.srwind[3]\n )\n', (35722, 35794), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((35836, 35921), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'sfc', 'ptop': 'p3km', 'stu': 'self.srwind[2]', 'stv': 'self.srwind[3]'}), '(self, pbot=sfc, ptop=p3km, stu=self.srwind[2], stv=self.srwind[3]\n )\n', (35849, 35921), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((35963, 36048), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'sfc', 'ptop': 'p6km', 'stu': 'self.srwind[2]', 'stv': 'self.srwind[3]'}), '(self, pbot=sfc, ptop=p6km, stu=self.srwind[2], stv=self.srwind[3]\n )\n', (35976, 36048), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((36090, 36175), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'sfc', 'ptop': 'p8km', 'stu': 'self.srwind[2]', 'stv': 'self.srwind[3]'}), '(self, pbot=sfc, ptop=p8km, stu=self.srwind[2], stv=self.srwind[3]\n )\n', (36103, 36175), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((36219, 36305), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'p4km', 'ptop': 'p5km', 'stu': 'self.srwind[2]', 'stv': 'self.srwind[3]'}), '(self, pbot=p4km, ptop=p5km, stu=self.srwind[2], stv=self.\n srwind[3])\n', (36232, 36305), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((36350, 36463), 'sharppy.sharptab.winds.sr_wind', 'winds.sr_wind', (['self'], {'pbot': 'self.mupcl.lclpres', 'ptop': 'self.mupcl.elpres', 'stu': 'self.srwind[2]', 'stv': 'self.srwind[3]'}), '(self, pbot=self.mupcl.lclpres, ptop=self.mupcl.elpres, stu=\n self.srwind[2], stv=self.srwind[3])\n', (36363, 36463), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((42451, 42476), 'sharppy.sharptab.utils.KTS2MS', 'utils.KTS2MS', (['self.ebwspd'], {}), '(self.ebwspd)\n', (42463, 42476), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((42555, 42580), 'sharppy.sharptab.utils.KTS2MS', 'utils.KTS2MS', (['self.ebwspd'], {}), '(self.ebwspd)\n', (42567, 42580), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((42863, 42888), 'sharppy.sharptab.utils.KTS2MS', 'utils.KTS2MS', (['self.ebwspd'], {}), '(self.ebwspd)\n', (42875, 42888), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((43021, 43046), 'sharppy.sharptab.utils.KTS2MS', 'utils.KTS2MS', (['self.ebwspd'], {}), '(self.ebwspd)\n', (43033, 43046), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((45682, 45709), 'sharppy.sharptab.utils.MS2KTS', 'utils.MS2KTS', (['sfc_6km_shear'], {}), '(sfc_6km_shear)\n', (45694, 45709), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((45725, 45752), 'sharppy.sharptab.utils.MS2KTS', 'utils.MS2KTS', (['sfc_3km_shear'], {}), '(sfc_3km_shear)\n', (45737, 45752), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((45754, 45781), 'sharppy.sharptab.utils.MS2KTS', 'utils.MS2KTS', (['sfc_9km_shear'], {}), '(sfc_9km_shear)\n', (45766, 45781), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((46033, 46060), 'sharppy.sharptab.utils.MS2KTS', 'utils.MS2KTS', (['sfc_6km_shear'], {}), '(sfc_6km_shear)\n', (46045, 46060), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((46076, 46103), 'sharppy.sharptab.utils.MS2KTS', 'utils.MS2KTS', (['sfc_3km_shear'], {}), '(sfc_3km_shear)\n', (46088, 46103), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((46105, 46132), 'sharppy.sharptab.utils.MS2KTS', 'utils.MS2KTS', (['sfc_9km_shear'], {}), '(sfc_9km_shear)\n', (46117, 46132), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((8375, 8388), 'sharppy.sharptab.utils.QC', 'utils.QC', (['val'], {}), '(val)\n', (8383, 8388), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((15358, 15383), 'numpy.where', 'np.where', (['(~self.tmpc.mask)'], {}), '(~self.tmpc.mask)\n', (15366, 15383), True, 'import numpy as np\n'), ((15797, 15822), 'numpy.where', 'np.where', (['(~self.tmpc.mask)'], {}), '(~self.tmpc.mask)\n', (15805, 15822), True, 'import numpy as np\n'), ((35178, 35205), 'sharppy.sharptab.interp.to_msl', 'interp.to_msl', (['self', '(2000.0)'], {}), '(self, 2000.0)\n', (35191, 35205), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((35321, 35348), 'sharppy.sharptab.interp.to_msl', 'interp.to_msl', (['self', '(4000.0)'], {}), '(self, 4000.0)\n', (35334, 35348), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((35476, 35503), 'sharppy.sharptab.interp.to_msl', 'interp.to_msl', (['self', '(9000.0)'], {}), '(self, 9000.0)\n', (35489, 35503), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((35528, 35556), 'sharppy.sharptab.interp.to_msl', 'interp.to_msl', (['self', '(11000.0)'], {}), '(self, 11000.0)\n', (35541, 35556), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((36641, 36668), 'sharppy.sharptab.interp.to_msl', 'interp.to_msl', (['self', '(2000.0)'], {}), '(self, 2000.0)\n', (36654, 36668), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((36783, 36810), 'sharppy.sharptab.interp.to_msl', 'interp.to_msl', (['self', '(4000.0)'], {}), '(self, 4000.0)\n', (36796, 36810), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((36937, 36964), 'sharppy.sharptab.interp.to_msl', 'interp.to_msl', (['self', '(9000.0)'], {}), '(self, 9000.0)\n', (36950, 36964), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((36989, 37017), 'sharppy.sharptab.interp.to_msl', 'interp.to_msl', (['self', '(11000.0)'], {}), '(self, 11000.0)\n', (37002, 37017), False, 'from sharppy.sharptab import utils, winds, params, interp, thermo, watch_type, fire\n'), ((20887, 20901), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (20899, 20901), False, 'from datetime import datetime\n'), ((21119, 21133), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21131, 21133), False, 'from datetime import datetime\n'), ((21334, 21348), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21346, 21348), False, 'from datetime import datetime\n'), ((21560, 21574), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21572, 21574), False, 'from datetime import datetime\n'), ((21781, 21795), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21793, 21795), False, 'from datetime import datetime\n'), ((22004, 22018), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22016, 22018), False, 'from datetime import datetime\n'), ((22221, 22235), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22233, 22235), False, 'from datetime import datetime\n'), ((22450, 22464), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22462, 22464), False, 'from datetime import datetime\n'), ((22657, 22671), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22669, 22671), False, 'from datetime import datetime\n'), ((22896, 22910), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22908, 22910), False, 'from datetime import datetime\n'), ((23108, 23122), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (23120, 23122), False, 'from datetime import datetime\n')] |
__author__ = 'jlu96'
import numpy as np
from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.model_selection import LeaveOneOut
import collections
from lag_conversion import get_XY_lagged
import pandas as pd
import time
def fit_ols(X, Y, verbose=False,**kwargs):
"""
X: n x p matrix
Y: n x 1 matrix
hyperparams: regularization used for lasso
return:
coef: p x 1
"""
assert X.shape[0] == Y.shape[0]
n = X.shape[0]
p = X.shape[1]
linreg = LinearRegression()
linreg.fit(X, Y)
coef = np.reshape(linreg.coef_, (p, 1))
intercept = linreg.intercept_
Y_pred, fit_result = compute_fit(X, Y, coef, intercept)
if verbose:
print("Diff in prediction")
print(linreg.predict(X).shape)
print(Y_pred.shape)
print(Y_pred - np.reshape(linreg.predict(X), (n,1)))
return Y_pred, coef, intercept, fit_result
def fit_lasso(X, Y, verbose=False, max_iter = 50000, **kwargs):
"""
X: n x p matrix
Y: n x 1 matrix
hyper: regularization used for lasso
return:
coef: p x 1
"""
assert X.shape[0] == Y.shape[0]
n = X.shape[0]
p = X.shape[1]
alpha = kwargs["hyper"]
lasso = Lasso(alpha=alpha, max_iter=max_iter, selection='random')
lasso.fit(X, Y)
coef = np.reshape(lasso.coef_, (p, 1))
intercept = lasso.intercept_
Y_pred, fit_result = compute_fit(X, Y, coef, intercept)
fit_result["dual_gap"] = lasso.dual_gap_
# Temp adding 2/6/17 -JLu
if verbose:
print("Tolerance: ")
print(lasso.tol)
print("Duality gap: ")
print(lasso.dual_gap_)
print(coef)
# print "Diff in prediction"
# print lasso.predict(X).shape
# print Y_pred.shape
# print Y_pred - np.reshape(lasso.predict(X), (n,1))
return Y_pred, coef, intercept, fit_result
def fit_ridge(X, Y, verbose=False, max_iter=50000, **kwargs):
"""
X: n x p matrix
Y: n x 1 matrix
hyper: regularization used for ridge
return:
coef: p x 1
"""
assert X.shape[0] == Y.shape[0]
n = X.shape[0]
p = X.shape[1]
alpha = kwargs["hyper"]
ridge = Ridge(alpha=alpha, max_iter=max_iter)
ridge.fit(X, Y)
coef = np.reshape(ridge.coef_, (p, 1))
intercept = ridge.intercept_
Y_pred, fit_result = compute_fit(X, Y, coef, intercept)
if verbose:
print("Diff in prediction")
print(ridge.predict(X).shape)
print(Y_pred.shape)
print(Y_pred - np.reshape(ridge.predict(X), (n,1)))
return Y_pred, coef, intercept, fit_result
def fit_enet(X, Y, verbose=False, max_iter=30000, **kwargs):
"""
X: n x p matrix
Y: n x 1 matrix
hyper: (alpha, l1_ratio)
Minimizes the objective function:
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
return:
coef: p x 1
"""
assert X.shape[0] == Y.shape[0]
n = X.shape[0]
p = X.shape[1]
alpha, l1_ratio = kwargs["hyper"]
enet = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, max_iter=max_iter,
selection='random')
enet.fit(X, Y)
coef = np.reshape(enet.coef_, (p, 1))
intercept = enet.intercept_
Y_pred, fit_result = compute_fit(X, Y, coef, intercept)
fit_result["dual_gap"] = enet.dual_gap_
if verbose:
print("Diff in prediction")
print(enet.predict(X).shape)
print(Y_pred.shape)
print(Y_pred - np.reshape(enet.predict(X), (n,1)))
return Y_pred, coef, intercept, fit_result
def durbin_watson(resid, lag=1, verbose=False):
"""
Calculates the Durbin-Watson statistic:
DW = sum([resid_{i} - resid_{i-lag}]^2)/sum(resid_{i}^2)
Around 2 is no correlation-- residuals well captured by model.
Close to 1 is positive correlation.
"""
resid = resid.flatten()
# sum of squared auto-residuals
ssar = np.sum(np.diff(resid,n=lag)**2)
ssr = np.sum(resid**2)
if verbose:
print(np.diff(resid,n=lag), resid)
print(ssar, ssr)
return ssar * 1.0 / ssr
def predict(X, coef, fit_intercept):
assert X.shape[1] == coef.shape[0]
return np.dot(X, coef) + fit_intercept
def compute_fit(X, Y, coef, fit_intercept, dw_lags=[1]):
"""
X: n x p matrix
Y: n x o matrix
coef: p x o
"""
assert X.shape[1] == coef.shape[0]
assert X.shape[0] == Y.shape[0]
assert Y.shape[1] == coef.shape[1]
Y_pred = predict(X, coef, fit_intercept)
resid = Y_pred - Y
fit_result = collections.OrderedDict()
fit_result["r2"] = r2_score(Y, Y_pred)
fit_result["mse"] = mean_squared_error(Y, Y_pred)
fit_result["sse"] = np.sum((resid)**2)
fit_result["n"] = Y.shape[0]
fit_result["df"] = len(np.nonzero(coef)[0])
for dw_lag in dw_lags:
fit_result["DW:Lag" + str(dw_lag)] = durbin_watson(resid, lag=dw_lag)
return Y_pred, fit_result
def perform_test(X_matr, Y_matr, lag, fit_method, replace_row,
has_reps=False, bootstrap=False, seed=None, **kwargs):
"""
X_matr: n x T (x r) matrix of input genes
Y_matr: 1 x T matrix of output gene
lag: lag
fit_method: one of the fit_ methods, e.g. fit_lasso
hyper: hyperparams for calling test
replace_rows: which row Y_matr is in in X
bootstrap: do it?
seed: random seed for bootstrap
return: X_t, Y_t, Y_pred, coef, intercept, fit_result
"""
## Get X and Y lagged
X_t, Y_t = get_XY_lagged(X_matr, Y_matr, lag, replace_row=replace_row,
has_reps=has_reps, bootstrap=bootstrap, seed=seed)
t = time.time()
Y_pred, coef, intercept, fit_result = fit_method(X_t, Y_t, **kwargs)
fit_result["time"] = round(time.time() - t, 3)
return X_t, Y_t, Y_pred, coef, intercept, fit_result
def perform_test_random(X_matr, rand_X_matr, Y_matr, lag, fit_method, replace_row,
has_reps=False, verbose=False, bootstrap=False, seed=None, **kwargs):
"""
Perform a single fit.
X_matr: n x T (x r) matrix of input genes
rand_X_matr: n x T (x r) matrix of randomized input genes, where randomized across time.
Y_matr: 1 x T (x r) matrix of output gene
lag: lag
fit_method: one of the fit_ methods, e.g. fit_lasso
hyper: hyperparams for calling test
replace_rows: which row Y_matr is in in X. Set to None, otherwise. If it is inside, set the coefficients to zero.
return: coef, intercept, fit_result. Note the 0, i * lag indices of the coef are for the output genes.
"""
## Get X and Y lagged
# iterate through all possible predictors
n = X_matr.shape[0]
T = X_matr.shape[1]
coef = np.zeros(n * lag)
coef_temps = np.zeros( (n*lag, n))
intercept_temps = np.zeros((1, n))
# There will be m different fits. fit_result should just average all of the individual fit_results
fit_result_temps = []
X_t_orig, Y_t_orig = get_XY_lagged(X_matr, Y_matr, lag, replace_row=replace_row,
has_reps=has_reps, bootstrap=bootstrap, seed=seed)
Y_pred_orig, coef_orig, intercept_orig, fit_result_orig = fit_method(X_t_orig, Y_t_orig, **kwargs)
for p in range(n):
if p != replace_row:
X_matr_temp = X_matr.copy()
# replace the predictor row with the randomized row
X_matr_temp[p] = rand_X_matr[p]
# X_t_temp is a matrix of T - lag replace_rows where for each row j, i = j + lag-1
#[A_i, B_i, .... Z_i, A_{i-1}, B_{i-1}, .... Z_{i-1}...... A_{i-lag+1}, B_{i-lag+1}, .... Z_{i-lag+1}
X_t_temp, Y_t_temp = get_XY_lagged(X_matr_temp, Y_matr, lag, replace_row=replace_row,
has_reps=has_reps, bootstrap=bootstrap, seed=seed)
# coef_temp is (A_i, B_i, .... Z_i, A_{i-1}, B_{i-1}, .... Z_{i-1}...... A_{i-lag+1}, B_{i-lag+1}, .... Z_{i-lag+1}, 1)
# we only want the p, p + n,... p + (lag - 1) * n indices.
t = time.time()
Y_pred_temp, coef_temp, intercept_temp, fit_result_temp = fit_method(X_t_temp, Y_t_temp, **kwargs)
fit_result_temp["time"] = round(time.time() - t, 3)
# Not
coef[p: p + (lag - 1) * n + 1: n] = coef_temp[p : p + (lag - 1) * n + 1: n].flatten()
# Store in the list of all coefs
coef_temps[:, p] = coef_temp.flatten()
intercept_temps[:, p] = intercept_temp
fit_result_temps.append(fit_result_temp)
if verbose:
print("Original TS: ", X_matr[p])
print("Randomized TS: ", X_matr_temp[p])
print("Original X_t: ", X_t_orig[:T - lag, p:-1:n])
print("Randomized X_t", X_t_temp[:T - lag, p:-1:n])
print("Same Y_t?", (Y_t_orig == Y_t_temp).all())
print("Orig coefs: ", coef_orig[p: lag * n + 1: n])
print("Right around: ")
print(coef_orig[p - 1: lag * n + 1: n])
print(coef_orig[p + 1: lag * n + 1: n])
print("Updated coefs: ", coef_temp[p: lag * n + 1: n])
print("Right around: ")
print(coef_temp[p - 1: lag * n + 1: n])
print(coef_temp[p + 1: lag * n + 1: n])
print("Updated coefs:", coef)
print("Y_pred_temp", Y_pred_temp)
coef = coef.reshape(n * lag, 1)
fit_result_temps_df = pd.DataFrame(fit_result_temps)
fit_result_std_dict = fit_result_temps_df.std()
fit_result = fit_result_temps_df.mean().to_dict()
keys = list(fit_result.keys())
for key in keys:
fit_result[key + "_mean"] = fit_result[key] # LEFT OFF HERE 1/25
del fit_result[key]
fit_result[key + "_std"] = fit_result_std_dict[key]
return coef, fit_result, coef_temp, coef_temps, intercept_temps
def perform_loto_cv(X_matr, Y_matr, lag, fit_method, replace_row, verbose=False, has_reps=False,
**kwargs):
"""
Perform leave-one-timepoint-out cross-validation.
X_matr: n x T (x r) matrix of input genes, where r is # reps
Y_matr: 1 x T (x r) matrix of output gene
lag: lag
fit_method: one of the fit_ methods, e.g. fit_lasso
hyper: hyperparams for calling test
replace_row: which row Y_matr is in in X
return: fit_result
"""
X_t, Y_t = get_XY_lagged(X_matr, Y_matr, lag, replace_row=replace_row, has_reps=has_reps)
T_test = X_t.shape[0]
loo = LeaveOneOut().split(X_t)
Y_tests = np.zeros(T_test)
Y_preds = np.zeros(T_test)
dfs = np.zeros(T_test)
for train_index, test_index in loo:
X_train = X_t[train_index]
Y_train = Y_t[train_index]
X_test = X_t[test_index]
Y_test = Y_t[test_index]
_, coef, intercept, _ = fit_method(X_train, Y_train, **kwargs)
Y_pred = predict(X_test, coef, intercept)
Y_tests[test_index] = Y_test
Y_preds[test_index] = Y_pred
dfs[test_index] = len(np.nonzero(coef)[0])
mse = mean_squared_error(Y_tests, Y_preds)
sse = np.sum((Y_tests - Y_preds)**2)
avg_df = np.average(dfs)
r2 = r2_score(Y_tests, Y_preds)
# formula for adjusted R^2 : R^2_a = (n-1) R^2 / (n- k - 1) - k/(n - k -1)
# note: this is a little hacky, since the dfs vary in between
#r2_adj =
# TODO - Jonathan
fit_result = collections.OrderedDict()
fit_result["n"] = T_test
fit_result["lag"] = lag
fit_result["mse"] = mse
fit_result["sse"] = sse
fit_result["avg_df"] = avg_df
fit_result["r2"] = r2
#fit_result["r2_adj"] = r2_adj
if verbose:
print("Y_tests: ", Y_tests)
print("Y_preds: ", Y_preds)
print(fit_result)
return fit_result | [
"pandas.DataFrame",
"numpy.sum",
"numpy.average",
"sklearn.linear_model.ElasticNet",
"sklearn.metrics.r2_score",
"numpy.zeros",
"time.time",
"sklearn.linear_model.LinearRegression",
"sklearn.linear_model.Lasso",
"numpy.nonzero",
"numpy.diff",
"sklearn.model_selection.LeaveOneOut",
"numpy.res... | [((582, 600), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (598, 600), False, 'from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression\n'), ((635, 667), 'numpy.reshape', 'np.reshape', (['linreg.coef_', '(p, 1)'], {}), '(linreg.coef_, (p, 1))\n', (645, 667), True, 'import numpy as np\n'), ((1303, 1360), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'alpha', 'max_iter': 'max_iter', 'selection': '"""random"""'}), "(alpha=alpha, max_iter=max_iter, selection='random')\n", (1308, 1360), False, 'from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression\n'), ((1394, 1425), 'numpy.reshape', 'np.reshape', (['lasso.coef_', '(p, 1)'], {}), '(lasso.coef_, (p, 1))\n', (1404, 1425), True, 'import numpy as np\n'), ((2274, 2311), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'alpha', 'max_iter': 'max_iter'}), '(alpha=alpha, max_iter=max_iter)\n', (2279, 2311), False, 'from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression\n'), ((2345, 2376), 'numpy.reshape', 'np.reshape', (['ridge.coef_', '(p, 1)'], {}), '(ridge.coef_, (p, 1))\n', (2355, 2376), True, 'import numpy as np\n'), ((3163, 3249), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {'alpha': 'alpha', 'l1_ratio': 'l1_ratio', 'max_iter': 'max_iter', 'selection': '"""random"""'}), "(alpha=alpha, l1_ratio=l1_ratio, max_iter=max_iter, selection=\n 'random')\n", (3173, 3249), False, 'from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression\n'), ((3299, 3329), 'numpy.reshape', 'np.reshape', (['enet.coef_', '(p, 1)'], {}), '(enet.coef_, (p, 1))\n', (3309, 3329), True, 'import numpy as np\n'), ((4093, 4111), 'numpy.sum', 'np.sum', (['(resid ** 2)'], {}), '(resid ** 2)\n', (4099, 4111), True, 'import numpy as np\n'), ((4681, 4706), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (4704, 4706), False, 'import collections\n'), ((4730, 4749), 'sklearn.metrics.r2_score', 'r2_score', (['Y', 'Y_pred'], {}), '(Y, Y_pred)\n', (4738, 4749), False, 'from sklearn.metrics import r2_score, mean_squared_error\n'), ((4774, 4803), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['Y', 'Y_pred'], {}), '(Y, Y_pred)\n', (4792, 4803), False, 'from sklearn.metrics import r2_score, mean_squared_error\n'), ((4828, 4846), 'numpy.sum', 'np.sum', (['(resid ** 2)'], {}), '(resid ** 2)\n', (4834, 4846), True, 'import numpy as np\n'), ((5619, 5734), 'lag_conversion.get_XY_lagged', 'get_XY_lagged', (['X_matr', 'Y_matr', 'lag'], {'replace_row': 'replace_row', 'has_reps': 'has_reps', 'bootstrap': 'bootstrap', 'seed': 'seed'}), '(X_matr, Y_matr, lag, replace_row=replace_row, has_reps=\n has_reps, bootstrap=bootstrap, seed=seed)\n', (5632, 5734), False, 'from lag_conversion import get_XY_lagged\n'), ((5768, 5779), 'time.time', 'time.time', ([], {}), '()\n', (5777, 5779), False, 'import time\n'), ((6832, 6849), 'numpy.zeros', 'np.zeros', (['(n * lag)'], {}), '(n * lag)\n', (6840, 6849), True, 'import numpy as np\n'), ((6868, 6890), 'numpy.zeros', 'np.zeros', (['(n * lag, n)'], {}), '((n * lag, n))\n', (6876, 6890), True, 'import numpy as np\n'), ((6912, 6928), 'numpy.zeros', 'np.zeros', (['(1, n)'], {}), '((1, n))\n', (6920, 6928), True, 'import numpy as np\n'), ((7085, 7200), 'lag_conversion.get_XY_lagged', 'get_XY_lagged', (['X_matr', 'Y_matr', 'lag'], {'replace_row': 'replace_row', 'has_reps': 'has_reps', 'bootstrap': 'bootstrap', 'seed': 'seed'}), '(X_matr, Y_matr, lag, replace_row=replace_row, has_reps=\n has_reps, bootstrap=bootstrap, seed=seed)\n', (7098, 7200), False, 'from lag_conversion import get_XY_lagged\n'), ((9633, 9663), 'pandas.DataFrame', 'pd.DataFrame', (['fit_result_temps'], {}), '(fit_result_temps)\n', (9645, 9663), True, 'import pandas as pd\n'), ((10567, 10645), 'lag_conversion.get_XY_lagged', 'get_XY_lagged', (['X_matr', 'Y_matr', 'lag'], {'replace_row': 'replace_row', 'has_reps': 'has_reps'}), '(X_matr, Y_matr, lag, replace_row=replace_row, has_reps=has_reps)\n', (10580, 10645), False, 'from lag_conversion import get_XY_lagged\n'), ((10726, 10742), 'numpy.zeros', 'np.zeros', (['T_test'], {}), '(T_test)\n', (10734, 10742), True, 'import numpy as np\n'), ((10757, 10773), 'numpy.zeros', 'np.zeros', (['T_test'], {}), '(T_test)\n', (10765, 10773), True, 'import numpy as np\n'), ((10784, 10800), 'numpy.zeros', 'np.zeros', (['T_test'], {}), '(T_test)\n', (10792, 10800), True, 'import numpy as np\n'), ((11241, 11277), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['Y_tests', 'Y_preds'], {}), '(Y_tests, Y_preds)\n', (11259, 11277), False, 'from sklearn.metrics import r2_score, mean_squared_error\n'), ((11288, 11320), 'numpy.sum', 'np.sum', (['((Y_tests - Y_preds) ** 2)'], {}), '((Y_tests - Y_preds) ** 2)\n', (11294, 11320), True, 'import numpy as np\n'), ((11332, 11347), 'numpy.average', 'np.average', (['dfs'], {}), '(dfs)\n', (11342, 11347), True, 'import numpy as np\n'), ((11357, 11383), 'sklearn.metrics.r2_score', 'r2_score', (['Y_tests', 'Y_preds'], {}), '(Y_tests, Y_preds)\n', (11365, 11383), False, 'from sklearn.metrics import r2_score, mean_squared_error\n'), ((11583, 11608), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (11606, 11608), False, 'import collections\n'), ((4314, 4329), 'numpy.dot', 'np.dot', (['X', 'coef'], {}), '(X, coef)\n', (4320, 4329), True, 'import numpy as np\n'), ((4058, 4079), 'numpy.diff', 'np.diff', (['resid'], {'n': 'lag'}), '(resid, n=lag)\n', (4065, 4079), True, 'import numpy as np\n'), ((4141, 4162), 'numpy.diff', 'np.diff', (['resid'], {'n': 'lag'}), '(resid, n=lag)\n', (4148, 4162), True, 'import numpy as np\n'), ((4907, 4923), 'numpy.nonzero', 'np.nonzero', (['coef'], {}), '(coef)\n', (4917, 4923), True, 'import numpy as np\n'), ((5884, 5895), 'time.time', 'time.time', ([], {}), '()\n', (5893, 5895), False, 'import time\n'), ((7789, 7909), 'lag_conversion.get_XY_lagged', 'get_XY_lagged', (['X_matr_temp', 'Y_matr', 'lag'], {'replace_row': 'replace_row', 'has_reps': 'has_reps', 'bootstrap': 'bootstrap', 'seed': 'seed'}), '(X_matr_temp, Y_matr, lag, replace_row=replace_row, has_reps=\n has_reps, bootstrap=bootstrap, seed=seed)\n', (7802, 7909), False, 'from lag_conversion import get_XY_lagged\n'), ((8168, 8179), 'time.time', 'time.time', ([], {}), '()\n', (8177, 8179), False, 'import time\n'), ((10685, 10698), 'sklearn.model_selection.LeaveOneOut', 'LeaveOneOut', ([], {}), '()\n', (10696, 10698), False, 'from sklearn.model_selection import LeaveOneOut\n'), ((11207, 11223), 'numpy.nonzero', 'np.nonzero', (['coef'], {}), '(coef)\n', (11217, 11223), True, 'import numpy as np\n'), ((8348, 8359), 'time.time', 'time.time', ([], {}), '()\n', (8357, 8359), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Global settings for example scripts
"""
from pyplis.inout import find_test_data
from pyplis import __version__, LineOnImage
from numpy import subtract
from os.path import join
from optparse import OptionParser
# the pyplis version for which these scripts
SCRIPTS_VERSION = "0.12"
SAVEFIGS = 1 # save plots from this script in SAVE_DIR
DPI = 150 #pixel resolution for saving
FORMAT = "png" #format for saving
SCREENPRINT = 0 #show images on screen when executing script
# Image directory
IMG_DIR = join(find_test_data(), "/home/aky/software/pyplis-master/pyplis/data/testdata_minimal/images")
# Directory where results are stored
SAVE_DIR = join(".", "scripts_out")
#SAVE_DIR = r'D:/Dropbox/TEMP/jgliss_publications/pyplis/graphics/out_code/'
# Emission rate retrieval lines
#ORANGE LINE IN YOUNG PLUME
PCS1 = LineOnImage(345, 350, 450, 195, pyrlevel_def=1,
line_id="young_plume", color="#e67300",
normal_orientation="left")
#BLUE LINE IN AGED PLUME
PCS2 = LineOnImage(80, 10, 80, 270, pyrlevel_def=1,
line_id="old_plume", color="#1a1aff",
normal_orientation="left")
LINES = [PCS1, PCS2]
OPTPARSE = OptionParser(usage='')
OPTPARSE.add_option('--show', dest="show", default=SCREENPRINT)
from matplotlib import rcParams
rcParams.update({'font.size': 13})
def check_version():
v_code = [int(x) for x in __version__.split(".")[:2]]
v_scripts = [int(x) for x in SCRIPTS_VERSION.split(".")[:2]]
if any(subtract(v_scripts, v_code)) != 0:
raise Exception("Version conflict between pyplis installation (v%s) "
"and version of example scripts used (v%s). Please "
"update your pyplis installation or use the set of example "
"scripts corresponding to your installation. "
%(__version__, SCRIPTS_VERSION))
| [
"numpy.subtract",
"optparse.OptionParser",
"matplotlib.rcParams.update",
"pyplis.LineOnImage",
"pyplis.__version__.split",
"pyplis.inout.find_test_data",
"os.path.join"
] | [((674, 698), 'os.path.join', 'join', (['"""."""', '"""scripts_out"""'], {}), "('.', 'scripts_out')\n", (678, 698), False, 'from os.path import join\n'), ((845, 963), 'pyplis.LineOnImage', 'LineOnImage', (['(345)', '(350)', '(450)', '(195)'], {'pyrlevel_def': '(1)', 'line_id': '"""young_plume"""', 'color': '"""#e67300"""', 'normal_orientation': '"""left"""'}), "(345, 350, 450, 195, pyrlevel_def=1, line_id='young_plume',\n color='#e67300', normal_orientation='left')\n", (856, 963), False, 'from pyplis import __version__, LineOnImage\n'), ((1045, 1159), 'pyplis.LineOnImage', 'LineOnImage', (['(80)', '(10)', '(80)', '(270)'], {'pyrlevel_def': '(1)', 'line_id': '"""old_plume"""', 'color': '"""#1a1aff"""', 'normal_orientation': '"""left"""'}), "(80, 10, 80, 270, pyrlevel_def=1, line_id='old_plume', color=\n '#1a1aff', normal_orientation='left')\n", (1056, 1159), False, 'from pyplis import __version__, LineOnImage\n'), ((1241, 1263), 'optparse.OptionParser', 'OptionParser', ([], {'usage': '""""""'}), "(usage='')\n", (1253, 1263), False, 'from optparse import OptionParser\n'), ((1361, 1395), 'matplotlib.rcParams.update', 'rcParams.update', (["{'font.size': 13}"], {}), "({'font.size': 13})\n", (1376, 1395), False, 'from matplotlib import rcParams\n'), ((534, 550), 'pyplis.inout.find_test_data', 'find_test_data', ([], {}), '()\n', (548, 550), False, 'from pyplis.inout import find_test_data\n'), ((1552, 1579), 'numpy.subtract', 'subtract', (['v_scripts', 'v_code'], {}), '(v_scripts, v_code)\n', (1560, 1579), False, 'from numpy import subtract\n'), ((1448, 1470), 'pyplis.__version__.split', '__version__.split', (['"""."""'], {}), "('.')\n", (1465, 1470), False, 'from pyplis import __version__, LineOnImage\n')] |
# derivative.py - determine first and second derivative
import numpy as np
## ---- First derivative of y w.r.t x ----
def firstderivxy(x, y):
"""
Function to determine first derivative of y w.r.t x (dy/dx).
Parameters
----------
x : ndarray
y : ndarray
Output
------
ndarray
first derivative
Reference
---------
Implementation of MATLAB code from
http://terpconnect.umd.edu/~toh/spectrum/functions.html
"""
n = y.shape[0]
d = np.zeros(n)
d[0] = (y[2] - y[1]) / (x[2] -x[1])
d[n-1] = (y[n-1] - y[n-2]) / (x[n-1] -x[n-2])
for i in range(1, n-1):
d[i] = (y[i] - y[i-1]) / (2 * (x[i] -x[i-1]))
return d
## ---- Second derivative of y w.r.t. x ----
def secderivxy(x, y):
"""
Function to determine second derivative of y w.r.t x (dy/dx).
Parameters
----------
x : ndarray
y : ndarray
Output
------
ndarray
Second derivative
Reference
---------
Implementation of MATLAB code from
http://terpconnect.umd.edu/~toh/spectrum/functions.html
"""
n = y.shape[0]
d = np.zeros(n)
for i in range(1, n-1):
x1, x2, x3 = x[i-1], x[i], x[i+1]
d[i] = ((y[i+1] - y[i]) / (x3 - x2) - (y[i] - y[i-1]) / (x2 - x1)) / ((x3 -x1)/2)
d[0] = d[1]
d[n-1] = d[n-2]
return d
## ---- First derivative of signal ----
def firstderiv(signal):
"""
Function to determine first derivative of signal using 2-point central
difference.
Parameters
----------
signal : ndarray
Output
------
ndarray
First derivative
Reference
---------
Implementation of MATLAB code from
http://terpconnect.umd.edu/~toh/spectrum/functions.html
"""
n = signal.shape[0]
d = np.zeros(n)
d[0] = signal[1] - signal[0]
d[n-1] = signal[n-1] - signal[n-2]
for j in range(1, n-1):
d[j] = (signal[j+1] - signal[j-1]) / 2.0
return d
## ---- Second Derivative of signal ----
def secderiv(signal):
"""
Function to determine second derivative of signal using 3-point central
difference.
Parameters
----------
signal : ndarray
Output
------
ndarray
Second derivative
Reference
---------
Implementation of MATLAB code from
http://terpconnect.umd.edu/~toh/spectrum/functions.html
"""
# Second derivative of vector using 3-point central difference.
n = signal.shape[0]
d = np.zeros(n)
for j in range(1, n-1):
d[j] = signal[j+1] - 2 * signal[j] + signal[j]
d[0] = d[1]
d[n-1] = d[n-2]
return d | [
"numpy.zeros"
] | [((525, 536), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (533, 536), True, 'import numpy as np\n'), ((1199, 1210), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1207, 1210), True, 'import numpy as np\n'), ((1902, 1913), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1910, 1913), True, 'import numpy as np\n'), ((2628, 2639), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2636, 2639), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from pybnn.bohamiann import Bohamiann
def f(x):
return np.sinc(x * 10 - 5)
rng = np.random.RandomState(42)
x = rng.rand(20)
y = f(x)
grid = np.linspace(0, 1, 200)
fvals = f(grid)
plt.plot(grid, fvals, "k--")
plt.plot(x, y, "ro")
plt.grid()
plt.xlim(0, 1)
plt.show()
# -- Train Model ---
model = Bohamiann(print_every_n_steps=1000)
model.train(x[:, None], y, num_steps=20000, num_burn_in_steps=2000, keep_every=50, lr=1e-2, verbose=True)
# -- Predict with Model ---
m, v = model.predict(grid[:, None])
plt.plot(x, y, "ro")
plt.grid()
plt.plot(grid, fvals, "k--")
plt.plot(grid, m, "blue")
plt.fill_between(grid, m + np.sqrt(v), m - np.sqrt(v), color="orange", alpha=0.8)
plt.fill_between(grid, m + 2 * np.sqrt(v), m - 2 * np.sqrt(v), color="orange", alpha=0.6)
plt.fill_between(grid, m + 3 * np.sqrt(v), m - 3 * np.sqrt(v), color="orange", alpha=0.4)
plt.xlim(0, 1)
plt.xlabel(r"Input $x$")
plt.ylabel(r"Output $f(x)$")
plt.show()
# -- Get Prediction Samples --
# m, v, samples = model.predict(grid[:, None], return_individual_predictions=True)
# print(samples.shape)
# for sample in samples:
# plt.plot(grid, sample, "blue", alpha=0.2)
#
# plt.plot(x, y, "ro")
# plt.grid(True)
# plt.plot(grid, fvals, "k--")
# plt.xlim(0, 1)
# plt.xlabel(r"Input $x$")
# plt.ylabel(r"Output $f(x)$")
# plt.show() | [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.random.RandomState",
"numpy.sinc",
"matplotlib.use",
"pybnn.bohamiann.Bohamiann",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"numpy.sqrt"
] | [((37, 60), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (51, 60), False, 'import matplotlib\n'), ((177, 202), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (198, 202), True, 'import numpy as np\n'), ((238, 260), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(200)'], {}), '(0, 1, 200)\n', (249, 260), True, 'import numpy as np\n'), ((278, 306), 'matplotlib.pyplot.plot', 'plt.plot', (['grid', 'fvals', '"""k--"""'], {}), "(grid, fvals, 'k--')\n", (286, 306), True, 'import matplotlib.pyplot as plt\n'), ((307, 327), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""ro"""'], {}), "(x, y, 'ro')\n", (315, 327), True, 'import matplotlib.pyplot as plt\n'), ((328, 338), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (336, 338), True, 'import matplotlib.pyplot as plt\n'), ((339, 353), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (347, 353), True, 'import matplotlib.pyplot as plt\n'), ((355, 365), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (363, 365), True, 'import matplotlib.pyplot as plt\n'), ((397, 432), 'pybnn.bohamiann.Bohamiann', 'Bohamiann', ([], {'print_every_n_steps': '(1000)'}), '(print_every_n_steps=1000)\n', (406, 432), False, 'from pybnn.bohamiann import Bohamiann\n'), ((604, 624), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""ro"""'], {}), "(x, y, 'ro')\n", (612, 624), True, 'import matplotlib.pyplot as plt\n'), ((625, 635), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (633, 635), True, 'import matplotlib.pyplot as plt\n'), ((636, 664), 'matplotlib.pyplot.plot', 'plt.plot', (['grid', 'fvals', '"""k--"""'], {}), "(grid, fvals, 'k--')\n", (644, 664), True, 'import matplotlib.pyplot as plt\n'), ((665, 690), 'matplotlib.pyplot.plot', 'plt.plot', (['grid', 'm', '"""blue"""'], {}), "(grid, m, 'blue')\n", (673, 690), True, 'import matplotlib.pyplot as plt\n'), ((953, 967), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (961, 967), True, 'import matplotlib.pyplot as plt\n'), ((968, 991), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Input $x$"""'], {}), "('Input $x$')\n", (978, 991), True, 'import matplotlib.pyplot as plt\n'), ((993, 1020), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Output $f(x)$"""'], {}), "('Output $f(x)$')\n", (1003, 1020), True, 'import matplotlib.pyplot as plt\n'), ((1022, 1032), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1030, 1032), True, 'import matplotlib.pyplot as plt\n'), ((150, 169), 'numpy.sinc', 'np.sinc', (['(x * 10 - 5)'], {}), '(x * 10 - 5)\n', (157, 169), True, 'import numpy as np\n'), ((718, 728), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (725, 728), True, 'import numpy as np\n'), ((734, 744), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (741, 744), True, 'import numpy as np\n'), ((804, 814), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (811, 814), True, 'import numpy as np\n'), ((824, 834), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (831, 834), True, 'import numpy as np\n'), ((894, 904), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (901, 904), True, 'import numpy as np\n'), ((914, 924), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (921, 924), True, 'import numpy as np\n')] |
# Python modules
import os
import math
import xml.etree.cElementTree as ElementTree
# 3rd party modules
import wx
import numpy as np
# Our modules
import vespa.simulation.constants as constants
import vespa.simulation.util_simulation_config as util_simulation_config
import vespa.simulation.dialog_mixed_metabolite_designer as dialog_mixed_metabolite_designer
import vespa.simulation.auto_gui.mixed_metabolite_output as mixed_metabolite_output
import vespa.common.mrs_experiment as mrs_experiment
import vespa.common.util.ppm as util_ppm
import vespa.common.util.xml_ as util_xml
import vespa.common.util.time_ as util_time
import vespa.common.util.misc as util_misc
import vespa.common.util.config as util_config
import vespa.common.util.export as util_export
import vespa.common.util.generic_spectral as util_generic_spectral
import vespa.common.constants as common_constants
import vespa.common.wx_gravy.common_dialogs as common_dialogs
import vespa.common.wx_gravy.util as wx_util
import vespa.common.mrs_prior as mrs_prior
import vespa.common.mrs_prior_metabolite as mrs_prior_metabolite
from wx.lib.agw.floatspin import FloatSpin, EVT_FLOATSPIN, FS_LEFT, FS_RIGHT, FS_CENTRE, FS_READONLY
from vespa.common.wx_gravy.widgets.floatspin_multiplier.floatspin_multiplier_base import FloatSpinMultiplier
from vespa.common.constants import Deflate
PI = math.pi
# This is the number of places to the right of the decimal displayed when
# a dim is a floating point number.
_SIGNIFICANT_DIGITS = 6
_VESPA_FYI_COMMENT = """This file was generated by Vespa-Simulation. You \
can learn about and download Vespa here:
https://github.com/vespa-mrs/vespa/tree/main/vespa
"""
_DEFAULT_OUTPUT_FORMAT = constants.ThirdPartyExportTypes.ANALYSIS_PRIOR
# _OUTPUT_ABBREVIATION_DISALLOWED lists the characters that we don't allow in
# metab abbreviations. The only reason we have any restrictions is because
# the abbreviations become filenames in some formats and Windows disallows
# all of these characters in filenames.
# ref: http://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words
_OUTPUT_ABBREVIATION_DISALLOWED = ('/*?|<>"\\')
# The message displayed when a metab name isn't kosher.
_INVALID_ABBREVIATION_MESSAGE = """Sorry, the abbreviation "%s" contains characters that \
Simulation Mixed Output doesn't allow.
Please don't use the following characters:
"""
_INVALID_ABBREVIATION_MESSAGE += _OUTPUT_ABBREVIATION_DISALLOWED
def _find_best_column_size(listctrl, column):
# ListCtrls can be sized according to the header or the largest content
# item. Sometimes sizing by the header means content gets clipped, and
# sometimes the reverse happens. This function figures out which of the
# two is larger and returns that.
listctrl.SetColumnWidth(column, wx.LIST_AUTOSIZE)
value_width = listctrl.GetColumnWidth(column)
listctrl.SetColumnWidth(column, wx.LIST_AUTOSIZE_USEHEADER)
header_width = listctrl.GetColumnWidth(column)
return max(value_width, header_width)
def _make_basis(vals, metabs_dict, npts, sw, apod, broad, field,
resppm, singlet_flag=False):
# This first section parses the vals dictionary to create
# lists of values for ppm, areas and phases. This may be just
# one metabolite's values or some mixture of them.
ppms = np.array([],dtype='float')
areas = np.array([],dtype='float')
phases = np.array([],dtype='float')
mname = vals["metabolite"]
abbr = vals["abbr"]
scale = vals["scale"]
shift = vals["shift"]
ppmstr = vals["range_start"]
ppmend = vals["range_end"]
# formula is a tuple of metabolite name and scale
# factors, or it is None if not a mixture
formula = vals["mixture"]
if not formula:
# single metabolite - apply global shift and scale
# values to the ppms and areas respectively
tmp = metabs_dict[mname]
ppms = tmp['ppms'] + shift
areas = tmp['areas'] * scale
phases = tmp['phases']
else:
# metabolite mixture - apply global shift and scale
# values as well as mixture scale value
for mix in formula:
tmp = metabs_dict[mix[0]]
ppms = np.concatenate((ppms, (tmp['ppms'] + shift)))
areas = np.concatenate((areas, (tmp['areas'] * scale * mix[1])))
phases = np.concatenate((phases, tmp['phases']))
# sort for ppmstr and ppmend values
indx = ((ppms > ppmstr) & (ppms < ppmend)).nonzero()[0]
if indx.size:
ppms = ppms[indx]
areas = areas[indx]
phases = phases[indx]
# LCModel can optionally add a singlet reference peak at 0.0 ppm
if singlet_flag:
ppms = np.concatenate((ppms, np.array([0.0])))
areas = np.concatenate((areas, np.array([1.0])))
phases = np.concatenate((phases, np.array([0.0])))
# Create basis functions for the metabolite
td = 1.0/float(sw)
nhalf = npts / 2
hpp = float(sw) / float(npts)
const1 = field/hpp
const2 = PI * 2 * hpp
arr1 = np.ones(npts, dtype='float32')
nlines = len(areas)
xx = (np.arange(npts, dtype='float32') % npts)/sw
# create apodization function
lshape = util_generic_spectral.apodize(xx, apod, broad)
# convert freq(ppm)/phase(deg) to freq(hz)/phase(rad)
freq = (nhalf - (ppms - resppm) * const1) # ppm2pts
freq = freq * const2 * 1j
freq = np.repeat(freq, npts).reshape(nlines,npts)
phas = areas * np.exp(phases * common_constants.DEGREES_TO_RADIANS * 1j)
phas = np.repeat(phas, npts).reshape(nlines,npts)
# calculate the FIDs for each line
xx = np.tile(xx, nlines)
xx.shape = nlines, npts
xx = freq * xx
xx = phas * np.exp(xx)
# sum lines from each simulation into one apodized uber-FID
xx = xx[0:nlines,:].sum(axis=0)
res = xx * lshape
return res
def _write_lines(filename, lines, force_unix_newlines=False):
"""Given a filename and a list of lines, joins those lines into a
newline-delimited string and writes it to filename. If the file
exists, it's truncated before writing.
The strings in the list of lines are converted to Unicode before
writing if they're not Unicode already. If any of the non-Unicode
strings contain non-ASCII, the conversion will fail. See:
https://vespa-mrs.github.io/vespa.io/development/project_dev/technical/ThePerilsOfStr.html?highlight=perils
By default, files are written with newline characters appropriate to the
current platform. That's CRLF (0x0d 0x0a) on Windows and LF (0x0a)
everywhere else. If the param force_unix_newlines is True, then this
function will write LF for line endings regardless of platform.
In practice, we leave force_unix_newlines at its default except when
generating LCModel files. Regardless of where they're generated,
they're only used under *nix.
"""
# Note that all_lines will be a Unicode string after this join()
# because anything returned from wx will be Unicode, and any
# non-Unicode string joined to a Unicode string is "promoted" to
# Unicode. Atfer the join we explicitly force the Unicode string
# to UTF-8 so that it will be safe for write().
lines = "\n".join(lines)
lines = lines.encode("utf-8")
if (wx.Platform == "__WXMSW__") and not force_unix_newlines:
lines = lines.replace("\n", "\r\n")
open(filename, "wb").write(lines)
#------------------------------------------------------------------------------
# Note. GUI Architecture/Style
#
# Many of the GUI components in Vespa are designed using the WxGlade
# application to speed up development times. The GUI components are designed
# interactively and users can preview the resultant window/panel/dialog, but
# while event functions can be specified, only stub functions with those
# names are created. The WxGlade files (with *.wxg extensions) are stored in
# the 'wxglade' subdirectory. The ouput of their code generation are stored
# in the 'auto_gui' subdirectory.
#
# To used these GUI classes, each one is inherited into a unique 'vespa'
# class, where program specific initialization and other functionality are
# written. Also, the original stub functions for widget event handlers are
# overloaded to provide program specific event handling.
#------------------------------------------------------------------------------
class DialogMixedMetaboliteOutput(mixed_metabolite_output.MyDialog):
"""
This dialog is used to output metabolite results and mixtures of
metabolite results to a variety of formats as governed by the format
selector widget.
The format parameter to __init__() must be one of the constants in
constants.ThirdPartyExportTypes.
In general - There must be at least one Experiment tab open in the
main application for this dialog to launch. If the only tab contains
a 'New' Experiment, it must be run at least once before this dialog
can be used. If more than one Experiment tab is open in the Notebook,
then the currently active one is the one whose results are output.
For the lcmodel and jmruitext formats, FID basis functions are created
at a specified number of points and spectral resolution. The default
values for these parameters are taken from the spectral resolution set
in the Visualize tab (passed into the dialog by the 'local' parameter)
but can be changed by the user in the relevant widgets if necessary.
Format specific details:
"lcmodel" format outputs all metabolites for one set of Experiment
loop index (loop1, loop2, loop3) values in the currently selected tab.
The loop values selected in the Visualize tab are used. The user must
select a directory and filename for output. The filename is used to
write a text description of how the LCModel RAW files are created.
All RAW files also contain a copy of this description above the actual
LCModel RAW header and data sections
"gava" format outputs all metabolites for all loop indices in the
Experiment in the currently selected tab. The user must select a
directory and filename for output. The filename is used for the gava
output file. This file also contains, above the results section, a text
description of how the gava results (and mixtures) are created.
"jmruitext" format outputs all metabolites for one set of Experiment
loop index (loop1, loop2, loop3) values in the currently selected tab.
The loop values selected in the Visualize tab are used. The user must
select a directory and filename for output. The filename is used to
write a text description of how the jMRUI Data Text files are created.
"midasxml" format outputs all metabolites for all loop indices in the
Experiment in the currently selected tab. The user must select a
directory and filename for output. The filename is used for the MIDAS XML
output file name. This file contains two nodes,
1) VESPA_SIMULATION_MIDAS_EXPORT - has the description of how the
metabolites and metabolite mixtures were output.
2) FITT_Generic_XML - contains the Experiment results.
In both nodes, there are multiple "comment" or "param" tags, respectively
which contain "name" and "value" attributes in which data is stored.
There is no data stored in the actual tag, just attributes. This type of
file is typically read into the MIDAS program to provide prior metabolite
information for the FITT2 application.
"""
def __init__(self, parent, experiment, local=None, format=None):
if not parent:
parent = wx.GetApp().GetTopWindow()
mixed_metabolite_output.MyDialog.__init__(self, parent)
#------------------------------------------------------------
# set up a local container for some parameter settings that are
# frequently referenced between processing objects for convenience
#------------------------------------------------------------
# class FakeDataset(object):
# pass
class FakeDataset(object):
def __init__(self):
self.spectral_dims = [2048,1,1,1]
self.raw_dims = [2048,1,1,1]
self.sw = 2500.0
self.hpp = self.sw / self.spectral_dims[0]
self.resppm = 4.7
self.frequency = 124.0
self.spectral_hpp = self.sw / self.spectral_dims[0]
self.raw_hpp = self.sw / self.raw_dims[0]
def ppm2pts(self, val, acq=False, rel=False):
dim0 = self.raw_dims[0] if acq else self.spectral_dims[0]
hpp = self.raw_hpp if acq else self.spectral_hpp
pts = self.frequency*val/hpp if rel else (dim0/2) - (self.frequency*(val-self.resppm)/hpp)
pts = np.where(pts > 0, pts, 0)
return pts
def ppm2hz(self, val, acq=False, rel=False):
hpp = self.raw_hpp if acq else self.spectral_hpp
ppm = self.pts2hz(self.ppm2pts(val)) if rel else self.ppm2pts(val, rel=rel) * hpp
return ppm
def pts2ppm(self, val, acq=False, rel=False):
dim0 = self.raw_dims[0] if acq else self.spectral_dims[0]
hpp = self.raw_hpp if acq else self.spectral_hpp
ppm = val*hpp/self.frequency if rel else (((dim0/2)-val)*(hpp/self.frequency))+self.resppm
return ppm
def pts2hz(self, val, acq=False, rel=False):
hpp = self.raw_hpp if acq else self.spectral_hpp
hz = val * hpp if rel else (self.ppm2pts(0.0) - val) * hpp
return hz
def hz2ppm(self, val, acq=False, rel=False):
hpp = self.raw_hpp if acq else self.spectral_hpp
val = self.pts2ppm(self.hz2pts(val)) if rel else self.pts2ppm(val / hpp)
return val
def hz2pts(self, val, acq=False, rel=False):
hpp = self.raw_hpp if acq else self.spectral_hpp
pts = val / hpp if rel else self.ppm2pts(0.0) - (val / hpp)
return pts
self.local = FakeDataset()
if not local:
self.local.spectral_dims = [2048,1,1,1]
self.local.raw_dims = [2048,1,1,1]
self.local.sw = 2500.0
self.local.hpp = self.local.sw / self.local.spectral_dims[0]
self.local.spectral_hpp = local.sw / local.spectral_dims[0]
self.local.raw_hpp = local.sw / local.spectral_dims[0]
self.local.resppm = 4.7
self.local.frequency = 124.0
else:
self.local.spectral_dims = local.spectral_dims
self.local.raw_dims = local.raw_dims
self.local.sw = local.sw
self.local.hpp = local.sw / local.spectral_dims[0]
self.local.spectral_hpp = local.sw / local.spectral_dims[0]
self.local.raw_hpp = local.sw / local.spectral_dims[0]
self.local.resppm = local.resppm
self.local.frequency = local.frequency
self.parent = parent
self.experiment = experiment
self.format = format if format else _DEFAULT_OUTPUT_FORMAT
self.local.apodization_value = common_constants.DEFAULT_LINEWIDTH
self.local.apodization_shape = 'gaussian'
self.final_prior = None
#------------------------------
# Initialize widget controls
self._initialize_controls()
self.Layout()
self.Fit()
self._improve_height()
# Under Linux, the focus is elsewhere (?) unless I manually
# set it to the list
self.ListLoop1.SetFocus()
self.Bind(wx.EVT_SIZE, self.on_size)
##### Event Handlers ######################################################
def on_list_select(self, event):
# This is called when the user makes a selection in any of the lists.
# Figure out which listctrl was selected
for i in range(1, 4):
listctrl = getattr(self, "ListLoop%d" % i)
if event.GetId() == listctrl.GetId():
break
# Update the heading to reflect what was selected
self._set_list_heading(listctrl, i - 1)
def on_size(self, event):
# Correct label wrapping if necessary.
self._wrap_instructions()
def on_browse(self, event):
default_path = util_config.get_last_export_path()
if self.format in (constants.ThirdPartyExportTypes.LCMODEL,
constants.ThirdPartyExportTypes.JMRUI,
):
# These formats write a bunch of files and so we prompt users
# to select a directory rather than a single filename.
if self.format == constants.ThirdPartyExportTypes.LCMODEL:
filename = "lcmodel_output_summary.txt"
message = "LCModel Output Path"
if self.format == constants.ThirdPartyExportTypes.JMRUI:
filename = "jmrui-text_output_summary.txt"
message = "jMRUI Data Output Path"
path = common_dialogs.pickdir(message=message,
default_path=default_path)
if path:
self.LabelFilename.SetLabel(path)
elif self.format in (constants.ThirdPartyExportTypes.ANALYSIS_DIRECT,):
pass
else:
# These formats write a single file and so we prompt users
# to select single filename.
if self.format == constants.ThirdPartyExportTypes.GAVA:
default_filename = "gava_output.txt"
filter_ = "GAVA Mixed Output Filename (*.txt)|*.txt"
elif self.format == constants.ThirdPartyExportTypes.MIDAS_PRIOR:
default_filename = "midas_output.xml"
filter_ = "MIDAS Mixed Output Filename (*.xml)|*.xml"
elif self.format == constants.ThirdPartyExportTypes.ANALYSIS_PRIOR:
default_filename = "analysis_prior_output.xml"
filter_ = "Analysis Prior Mixed Output Filename (*.xml)|*.xml"
filename = common_dialogs.save_as(filetype_filter=filter_,
default_path=default_path,
default_filename=default_filename)
if filename:
self.LabelFilename.SetLabel(filename)
path, _ = os.path.split(filename)
else:
path = ""
util_config.set_last_export_path(path)
def on_sweep_width(self, event):
val = event.GetEventObject().GetValue()
self.FloatLcmSweepWidth.SetValue(val)
self.FloatMetrepSweepWidth.SetValue(val)
self.FloatJmruiSweepWidth.SetValue(val)
sw = self.FloatLcmSweepWidth.GetValue()
npts = self.SpinLcmDataPoints.GetValue()
self.dynamic_output_list.update_ppm_range(npts, sw)
def on_data_points(self, event):
val = event.GetEventObject().GetValue()
self.SpinLcmDataPoints.SetValue(val)
self.SpinMetrepDataPoints.SetValue(val)
self.SpinJmruiDataPoints.SetValue(val)
sw = self.FloatLcmSweepWidth.GetValue()
npts = self.SpinLcmDataPoints.GetValue()
self.dynamic_output_list.update_ppm_range(npts, sw)
def on_apodize(self, event):
val = event.GetEventObject().GetValue()
self.FloatLcmApodize.SetValue(val)
self.FloatMetrepApodize.SetValue(val)
self.FloatJmruiApodize.SetValue(val)
def on_lineshape(self, event):
index = event.GetEventObject().GetCurrentSelection()
self.ChoiceLcmLineshape.SetSelection(index)
self.ChoiceMetrepLineshape.SetSelection(index)
self.ChoiceJmruiLineshape.SetSelection(index)
def on_select_all(self, event):
self.dynamic_output_list.select_all()
def on_deselect_all(self, event):
self.dynamic_output_list.deselect_all()
def on_add_metabolite(self, event):
self.dynamic_output_list.add_row()
self.Layout()
self.Fit()
wx.CallAfter(self._improve_height)
def on_remove_selected(self, event):
self.dynamic_output_list.remove_checked_rows()
self.Layout()
self.Refresh()
def on_add_mixture(self, event):
names = self.dynamic_output_list.names
abbr = self.dynamic_output_list.get_abbr()
dialog = dialog_mixed_metabolite_designer.DialogMixedMetaboliteDesigner(self, names, abbr)
if dialog.ShowModal() == wx.ID_OK:
unique_name = dialog.unique_name
mixture = dialog.values
if mixture:
self.dynamic_output_list.add_row(unique_name, mixture)
self.Layout()
self.Fit()
# When the user chooses a new format, the dialog's content
# changes and we might have to readjust the height. On Gnome
# (but not OS X or Windows) we have to allow wx to process
# messages before calling self._improve_height(). If we just
# call it directly here, it won't calculate the correct
# height.
wx.CallAfter(self._improve_height)
def on_ok(self,event):
# Validate controls
msg = self._is_gui_valid()
if msg:
common_dialogs.message(msg, None, common_dialogs.I_OK)
else:
# All is well, do the format specific output
wx.BeginBusyCursor()
cmt = ["Vespa-Simulation Mixed Metabolite Output"]
cmt.append(_VESPA_FYI_COMMENT)
cmt.append("")
cmt.append("Output Path/Filename = " + self.LabelFilename.GetLabel().strip())
cmt.append("Output Loop Values = " + self._build_loops_string())
cmt.append("Output Comment")
cmt.append("-" * 75)
cmt += (self.TextComment.GetValue()).split('\n')
cmt.append("")
cmt.append("Experiment Information")
cmt.append("-" * 75)
cmt += (str(self.experiment)).split('\n')
cmt.append("")
cmt.append("Metabolite Formatting Information")
cmt.append("-" * 75)
lines = self.dynamic_output_list.get_values()
for line in lines:
s = "Name=%(metabolite)s Abbr=%(abbr)s Scale=%(scale)s " \
"Shift=%(shift)s PPM Start=%(range_start)s " \
"PPM End=%(range_end)s" % line
cmt.append(s)
if line["mixture"]:
mix_cmt = " Mixture of [metab*scale] = "
for i, mix_line in enumerate(line["mixture"]):
if i:
mix_cmt += " + "
mix_cmt += "%s*%f" % (mix_line[0], mix_line[1])
cmt.append(mix_cmt)
# Map the format to the appropriate function.
ExportTypes = constants.ThirdPartyExportTypes
function_map = {
ExportTypes.LCMODEL : self._do_output_lcmodel,
ExportTypes.ANALYSIS_PRIOR : self._do_output_analysis_prior,
ExportTypes.MIDAS_PRIOR : self._do_output_midasxml,
ExportTypes.JMRUI : self._do_output_jmruitext,
ExportTypes.GAVA : self._do_output_gava,
ExportTypes.ANALYSIS_DIRECT : self._do_output_analysis_direct,
}
function = function_map[self.format]
wx.EndBusyCursor()
try:
function(cmt, lines)
except IOError as xxx_todo_changeme:
(error_number, error_string) = xxx_todo_changeme.args
msg = """Exporting to "%s" failed. The operating system message is below --\n\n""" % filename
msg += error_string
else:
# FIXME - decide if we should allow user to return to dialog to
# output same data in another format without losing
# the work they did setting up the mixtures.
# we were successful, close dialog
self.Close()
if msg:
common_dialogs.message(msg, None, common_dialogs.I_OK)
##### Internal helper functions ##########################################
def _build_loops_string(self):
"""
When this dialog is called, some of the output formats need to know
what one set of metabolite results to output if the Experiment has
more than one settings in either loop1, loop2 or loop3. A dictionary
called 'loops' is passed in that contains the index of loop1/2/3
that is currently selected in the Visualize tab. Also send in are
the label for loop1/2/3 and actual loop 'value' for the given index.
"""
if self.ListLoop1.IsShown() and self.panel_grid_loop.IsShown():
label1 = self.LabelLoop1.GetLabel().strip()
index1 = wx_util.get_selected_item_indices(self.ListLoop1)[0]
else:
label1 = 'Loop1 Inactive'
index1 = 0
if self.ListLoop2.IsShown() and self.panel_grid_loop.IsShown():
label2 = self.LabelLoop2.GetLabel().strip()
index2 = wx_util.get_selected_item_indices(self.ListLoop2)[0]
else:
label2 = 'Loop2 Inactive'
index2 = 0
if self.ListLoop3.IsShown() and self.panel_grid_loop.IsShown():
label3 = self.LabelLoop3.GetLabel().strip()
index3 = wx_util.get_selected_item_indices(self.ListLoop3)[0]
else:
label3 = 'Loop3 Inactive'
index3 = 0
loops_string_label = '( '+label1+', '+label2+', '+label3+' )'
loops_string_index = '('+str(index1+1)+','+str(index2+1)+','+str(index3+1)+')'
return loops_string_index+' '+loops_string_label
def _improve_height(self):
# No matter what format the user chooses, we use a scrolled window to
# contain the list of metabs because with a fixed-sized (non-scrolled)
# panel, experiments that contain a lot of metabs (20+) can cause
# this dialog to exceed the display height. However, the scrolled
# window has its own problem: it doesn't size itself intelligently.
# It always has the same (small) default height.
#
# So here we check to see if the scrolled window is actually scrolled;
# i.e. it contains content that it can't completely display in its
# current area. If so, we increase the dialog's height exactly
# enough to allow the scrolled window to expand so that the user
# doesn't have to scroll to see the contents.
_, display_height = wx.GetDisplaySize()
dialog_width, dialog_height = self.GetSize()
# Compare virtual height with real height.
# delta is how much bigger it needs to be to display all of its
# content without scrolling.
_, v_height = self.ScrolledWindowDynamicList.GetVirtualSize()
_, r_height = self.ScrolledWindowDynamicList.GetClientSize()
delta = v_height - r_height
# max_delta is the max we can increase the dialog height before it
# exceeds the display area. Note that wx reports the raw display
# area without accounting for things like the Windows taskbar or the
# OS X dock. Actual space available for use by applications may be
# less. To account for this we subtract a fudge factor of 132 pixels.
# This is pretty arbitrary, although on my Mac laptop the OS X dock
# and the top menu occupy 132 pixels and the dock is huge relative
# to the Windows taskbar and Gnome's similar thingies, so
# hopefully this will be sufficient everywhere.
max_delta = (display_height - dialog_height) - 132
delta = min(delta, max_delta)
if delta > 0:
self.SetSize( (dialog_width, dialog_height + delta) )
self.Center()
def _initialize_controls(self):
# We set this to a default value and change it if we need to.
self.LabelInstructions.SetLabel("There are no loops for this experiment.")
#----------------------------------------------------------------------
# set up Experiment Loop list controls
# Make a shorthand reference for the labels above the lists
self.heading_labels = [getattr(self, "LabelLoop%d" % i) for i
in range(1, 4)]
# # Fiddle with the grid sizer. We tell it that it should have flexible
# rows (i.e. they can be of different height) but that the columns
# should be inflexible (i.e. they shrink/grow, but they're always
# equal to one another).
# When there's just one or two loops, this arrangement looks better
# than allowing the lists to grow all the way across the dialog.
grid_sizer = self.ListLoop1.GetContainingSizer()
grid_sizer.SetFlexibleDirection(wx.VERTICAL)
grid_sizer.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_ALL)
# All we need from the pulse seq is loop labels.
loop_labels = self.experiment.pulse_sequence.loop_labels
for i, dim in enumerate(self.experiment.dims):
listctrl = getattr(self, "ListLoop%d" % (i + 1))
label = getattr(self, "LabelLoop%d" % (i + 1))
if dim == mrs_experiment.DEFAULT_LOOP:
# This is an empty loop.
listctrl.Hide()
label.Hide()
else:
self.LabelInstructions.SetLabel("Select the experiment dimension (loop indices) you want to use.")
# Build the ListCtrl columns, set label & font
listctrl.ClearAll()
listctrl.InsertColumn(0, "Index", wx.LIST_FORMAT_RIGHT)
listctrl.InsertColumn(1, loop_labels[i])
label.SetLabel("Loop %d" % (i + 1))
self._set_list_heading(listctrl, i)
# We use monospace font so that the padding we use (spaces)
# and the numbers will line up correctly.
wx_util.set_font_to_monospace(listctrl)
# Figure out the width (in digits) of the max index value and
# the max dim. This allows us to right justify the numbers
# in the lists which makes them much easier to read.
index_digits = len(str(len(dim)))
value_digits = len("%d" % max(dim))
is_int = all([int(value) == value for value in dim])
if is_int:
# All dim values are ints
formatter = "%d"
else:
# Some values are floats. Format them as such and account
# for the width of the digits to the right of the decimal.
formatter = "%." + str(_SIGNIFICANT_DIGITS) + "f"
value_digits += _SIGNIFICANT_DIGITS
# Add some padding
formatter = " " + formatter
# Populate the list
for j, value in enumerate(dim):
listctrl.InsertItem(j, str(j + 1).rjust(index_digits))
listctrl.SetItem(j, 1, (formatter % value).rjust(value_digits))
# Size the columns optimally
listctrl.SetColumnWidth(0, _find_best_column_size(listctrl, 0))
listctrl.SetColumnWidth(1, _find_best_column_size(listctrl, 1))
#----------------------------------------------------------------------
# sets up other widgets in mixed output dialog from values in the
# experiment sent into the object initialization
self.LabelStatus.SetLabel("")
self.LabelFilename.SetLabel("")
# The grid sizer for the metabs/mixed metabs list is marked with
# a placeholder so that I can find it now (at runtime).
placeholder = self.LabelMetaboliteListGridSizerPlaceholder
self.MetaboliteGridSizer = placeholder.GetContainingSizer()
parent = placeholder.GetParent()
placeholder.Destroy()
# Add headings to the first row of the grid sizer.
self.MetaboliteGridSizer.Clear()
self.MetaboliteGridSizer.SetRows(1)
headings = (None, "Metabolite\nList", "Unique\nAbbreviation", "Scale",
"Frequency\nShift [ppm]", "Range Start\n[ppm]", "Range End\n[ppm]")
for heading in headings:
if heading:
label = wx.StaticText(parent, label=heading, style=wx.ALIGN_CENTRE)
self.MetaboliteGridSizer.Add(label, 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL)
else:
self.MetaboliteGridSizer.AddSpacer( 1 )
# Columns 1 & 2 (the metab combobox and the metab abbreviation)
# expand to fill whatever space is available.
self.MetaboliteGridSizer.AddGrowableCol(1, 1)
self.MetaboliteGridSizer.AddGrowableCol(2, 1)
# Add widgets to dialog that wxGlade could not
self.dynamic_output_list = DynamicOutputList(self.ScrolledWindowDynamicList,
self.MetaboliteGridSizer,
self.experiment.metabolites,
self.local)
# We add the OK & Cancel buttons dynamically so that they're in the
# right order under OS X, GTK, Windows, etc.
self.ButtonOk, self.ButtonCancel = \
wx_util.add_ok_cancel(self, self.LabelOkCancelPlaceholder,
self.on_ok)
# set initial values in some of the widgets
if self.local.apodization_shape == 'gaussian':
shape = 0
else:
shape = 1
# All panels are hidden by default, and then we show the ones
# appropriate for the selected format.
for panel in (self.panel_grid_loop, self.panel_output_location,
self.panel_format_specific_parameters,
self.panel_parameters_lcmodel,
self.panel_parameters_metabolitereport,
self.panel_parameters_jmruitext,
):
panel.Hide()
if self.format == constants.ThirdPartyExportTypes.LCMODEL:
self.SetTitle("Third Party Export to LCModel")
self.panel_grid_loop.Show()
self.panel_output_location.Show()
self.panel_format_specific_parameters.Show()
self.panel_parameters_lcmodel.Show()
elif self.format == constants.ThirdPartyExportTypes.GAVA:
self.SetTitle("Third Party Export to GAVA Text")
self.LabelInstructions.SetLabel("All loops will be saved for this format.")
self.panel_output_location.Show()
# FIXME PS - at present, this is the only code that references
# the MetaboliteReport format. It's not implemented otherwise.
# elif self.format == constants.ThirdPartyExportTypes.METABOLITEREPORT:
# self.SetTitle("Third Party Export to MetaboliteReport")
# self.panel_grid_loop.Show()
# self.panel_output_location.Show()
# self.panel_format_specific_parameters.Show()
# self.panel_parameters_metabolitereport.Show()
elif self.format == constants.ThirdPartyExportTypes.JMRUI:
self.SetTitle("Third Party Export to jMRUI Text")
self.panel_grid_loop.Show()
self.panel_output_location.Show()
self.panel_format_specific_parameters.Show()
self.panel_parameters_jmruitext.Show()
elif self.format == constants.ThirdPartyExportTypes.MIDAS_PRIOR:
self.SetTitle("Third Party Export to MIDAS XML")
self.panel_grid_loop.Show()
self.panel_output_location.Show()
elif self.format == constants.ThirdPartyExportTypes.ANALYSIS_PRIOR:
self.SetTitle("Third Party Export to Analysis Prior XML")
self.panel_grid_loop.Show()
self.panel_output_location.Show()
elif self.format == constants.ThirdPartyExportTypes.ANALYSIS_DIRECT:
self.SetTitle("Analysis Prior Selection ")
self.panel_grid_loop.Show()
# self.panel_output_location.Hide()
#-----------------------------
self.TextLcmFmtdat.SetValue('(2E16.6)')
self.FloatLcmTramp.SetValue(1.0)
self.FloatLcmVolume.SetValue(1.0)
self.FloatLcmSweepWidth.SetRange(constants.SWEEP_WIDTH_MIN,constants.SWEEP_WIDTH_MAX)
self.FloatLcmSweepWidth.SetValue(self.local.sw)
self.SpinLcmDataPoints.SetRange(constants.SPECTRAL_POINTS_MIN,constants.SPECTRAL_POINTS_MAX)
self.SpinLcmDataPoints.SetValue(self.local.spectral_dims[0])
self.FloatLcmApodize.SetRange(constants.LINEWIDTH_MIN,constants.LINEWIDTH_MAX)
self.FloatLcmApodize.SetValue(self.local.apodization_value)
self.ChoiceLcmLineshape.SetSelection(shape)
#-----------------------------
self.TextMetrepPulseq.SetValue('se')
self.TextMetrepEcho.SetValue('030')
self.FloatMetrepField.SetValue(3.0)
self.SpinMetrepOffset.SetValue(10)
self.FloatMetrepTime1.SetValue(0.0)
self.FloatMetrepTime2.SetValue(0.0)
self.FloatMetrepTime3.SetValue(0.0)
self.FloatMetrepTimeMix.SetValue(0.0)
self.FloatMetrepSweepWidth.SetRange(constants.SWEEP_WIDTH_MIN,constants.SWEEP_WIDTH_MAX)
self.FloatMetrepSweepWidth.SetValue(self.local.sw)
self.SpinMetrepDataPoints.SetRange(constants.SPECTRAL_POINTS_MIN,constants.SPECTRAL_POINTS_MAX)
self.SpinMetrepDataPoints.SetValue(self.local.spectral_dims[0])
self.FloatMetrepApodize.SetRange(constants.LINEWIDTH_MIN,constants.LINEWIDTH_MAX)
self.FloatMetrepApodize.SetValue(self.local.apodization_value)
self.ChoiceMetrepLineshape.SetSelection(shape)
#-----------------------------
self.FloatJmruiSweepWidth.SetRange(constants.SWEEP_WIDTH_MIN,constants.SWEEP_WIDTH_MAX)
self.FloatJmruiSweepWidth.SetValue(self.local.sw)
self.SpinJmruiDataPoints.SetRange(constants.SPECTRAL_POINTS_MIN,constants.SPECTRAL_POINTS_MAX)
self.SpinJmruiDataPoints.SetValue(self.local.spectral_dims[0])
self.FloatJmruiApodize.SetRange(constants.LINEWIDTH_MIN,constants.LINEWIDTH_MAX)
self.FloatJmruiApodize.SetValue(self.local.apodization_value)
self.ChoiceJmruiLineshape.SetSelection(shape)
sw = self.FloatLcmSweepWidth.GetValue()
npts = self.SpinLcmDataPoints.GetValue()
self.dynamic_output_list.set_max_ppm_range(npts, sw)
def _is_gui_valid(self, check_for_unique_name=False):
"""
Examines the contents of the GUI and determines whether or not
what the user has entered is valid. If this method finds something
it doesn't like, it returns a message string indicating the problem
that's appropriate for displaying in a message box. Otherwise it
returns None.
"""
msg = None
if self.panel_grid_loop.IsShown():
# Ensure a dim is selected in each visible list.
for i in range(1, 4):
listctrl = getattr(self, "ListLoop%d" % i)
if listctrl.IsShown():
selections = wx_util.get_selected_item_indices(listctrl)
if not selections:
msg = "Please select a value for loop %d." % i
break
if not msg and self.format != constants.ThirdPartyExportTypes.ANALYSIS_DIRECT:
# All outputs except ANALYSIS_DIRECT need a filename and path
filename = self.LabelFilename.GetLabel()
if not filename:
msg = "Please use the Browse button to select an output destination."
else:
path, _ = os.path.split(filename)
if not os.path.exists(path) or not os.path.isdir(path):
msg = """The path "%s" doesn't exist or is not a directory.""" % path
if not msg:
# not checking for parameters in spin controls or
# float spin controls. We assume they should have
# values and be ints or floats
# check to ensure there are values in text controls
if self.format == constants.ThirdPartyExportTypes.LCMODEL:
if not self.TextLcmFmtdat.GetValue().strip():
msg = "Please enter a value for FMTDAT."
# FIXME PS - METABOLITEREPORT isn't implemented. Please implement
# or remove.
# elif self.format == constants.ThirdPartyExportTypes.METABOLITEREPORT:
# val = self.TextMetrepPulseq.GetValue().strip()
# if val == '':
# msg = "Please enter a value for Pulseq String."
# val = self.TextMetrepEcho.GetValue().strip()
# if val == '':
# msg = "Please enter a value for Echo String."
if not msg:
lines = self.dynamic_output_list.get_values()
abbrs = []
for line in lines:
abbr = line["abbr"]
if not abbr:
msg = "Please enter an abbreviation for all metabolites in the list."
if not msg:
# be sure that no inappropriate characters are in abbreviations
match = [c in _OUTPUT_ABBREVIATION_DISALLOWED for c in abbr]
if any(match):
msg = _INVALID_ABBREVIATION_MESSAGE % abbr
if msg:
# No point in going through the other abbreviations.
break
else:
abbrs.append(abbr)
# make sure there are no duplicate output names, these are used
# as output file names or unique identifiers in the formats
if not msg:
if len(set(abbrs)) != len(abbrs):
msg = "There is at least one duplicate metabolite abbreviation in the output list."
return msg
def _set_list_heading(self, listctrl, i):
# Figure out which item (if any) is selected and update the heading
# to contain that info. We do this because the background color for
# a selected item is very hard to see on Windows 7, so it's hard for
# users to see what they've selected. This is only a problem under
# Win7. It's fine under Win XP, OS X, and Linux/GTK.
selections = wx_util.get_selected_item_indices(listctrl)
label = "Loop %d" % (i + 1)
if selections:
label += ( " = %s" % listctrl.GetItem(selections[0], 1).GetText().strip())
self.heading_labels[i].SetLabel(label)
# This recenters the text
self.heading_labels[i].GetContainingSizer().Layout()
def _wrap_instructions(self):
wx_util.wrap_label(self.LabelInstructions, self)
##### Methods for different output formats ####################
def _do_output_lcmodel(self, all_lines, output_list):
# In LCMode headers all lines start with a space " "
all_lines = [' '+line for line in all_lines]
# Create a dictionary of all metabolites that exist
# for this step (ie. loop1, loop2, loop3) of the Experiment
nmet = len(self.experiment.metabolites)
nstep1 = self.ListLoop1.GetItemCount()
nstep2 = self.ListLoop2.GetItemCount()
nstep3 = self.ListLoop3.GetItemCount()
if nstep1 == 0:
nstep1 = 1
index1 = 0
else:
index1 = wx_util.get_selected_item_indices(self.ListLoop1)[0]
if nstep2 == 0:
nstep2 = 1
index2 = 0
else:
index2 = wx_util.get_selected_item_indices(self.ListLoop2)[0]
if nstep3 == 0:
nstep3 = 1
index3 = 0
else:
index3 = wx_util.get_selected_item_indices(self.ListLoop3)[0]
offset = index1 + index2*nstep1 + index3*nstep1*nstep2
sims = self.experiment.simulations[(nmet*offset):(nmet*(offset+1))]
metabs_dict = {}
for sim in sims:
metabs_dict[sim.metabolite.name] = sim.deflate(flavor=Deflate.DICTIONARY)
# Retrieve header specific information from GUI and spectral
# parameters for calculating the FIDS
sw = self.FloatLcmSweepWidth.GetValue()
npts = self.SpinLcmDataPoints.GetValue()
apod = self.FloatLcmApodize.GetValue()
b0 = self.experiment.b0
fmtdat = self.TextLcmFmtdat.GetValue()
tramp = str(self.FloatLcmTramp.GetValue())
volume = str(self.FloatLcmVolume.GetValue())
broad = self.ChoiceLcmLineshape.GetStringSelection()
sw_str = str(sw)
vsize = str(npts)
apod_str = str(apod)
b0_str = str(b0)
if self.experiment.isotope == "1H":
# should be 4.7
resppm = common_constants.DEFAULT_PROTON_CENTER_PPM
else:
# should be 0.0
resppm = common_constants.DEFAULT_XNUCLEI_CENTER_PPM
singlet_flag = self.CheckLcmSinglet.IsChecked()
# retrieve path from label
path = self.LabelFilename.GetLabel()
filename = os.path.join(path, "lcmodel_output_summary.txt")
# create the LCModel common header
lcbase = [" "]
lcbase.append(" "+"=" * 75)
lcbase.append(" This LCModel format RAW file was created using the ")
lcbase.append(" Vespa-Simulation spectral simulation program using ")
lcbase.append(" the following settings and parameters ")
lcbase.append(" ")
lcbase.append(" Sweep Width = "+sw_str +" Hz ")
lcbase.append(" Vector Size = "+vsize +" points ")
lcbase.append(" Apodization = "+apod_str+" Hz "+broad+" broadening")
lcbase.append(" B0 Field = "+b0_str +" MHz ")
lcbase.append(" Ref Singlet = "+str(singlet_flag))
lcbase.append(" ")
# save the Mixed Output and LCModel common headers out into
# an informative text file in the directory that will contain
# the LCModel RAW files.
_write_lines(filename, all_lines + lcbase, True)
# parse all of the metabolites in the dynamic metabolite list
# into time and frequency FID data with LCModel headers
for vals in output_list:
abbr = vals["abbr"]
lctime = [" $NMID ID='"+abbr+"'"]
lctime.append(" FMTDAT='"+fmtdat+"'")
lctime.append(" VOLUME="+volume)
lctime.append(" TRAMP="+tramp)
lctime.append(" $END")
header_time = all_lines + lcbase + lctime
time = _make_basis(vals, metabs_dict, npts, sw, apod,
broad, b0, resppm, singlet_flag)
time = time * (((np.arange(npts)+1)%2)*2-1) # chop data
header_time += [" %15.6E %15.6E" % (z.real, z.imag) for z in time]
header_time.append(" ")
_write_lines(os.path.join(path, abbr + '.RAW'), header_time, True)
lcfreq = [" This data has been FFTd to be in the FREQUENCY domain"]
lcfreq.append(" ")
lcfreq.append(" $NMID ID='"+abbr+"'")
lcfreq.append(" FMTDAT='"+fmtdat+"'")
lcfreq.append(" VOLUME="+volume)
lcfreq.append(" TRAMP="+tramp)
lcfreq.append(" $END")
header_freq = all_lines + lcbase + lcfreq
freq = np.fft.fft(time[:])
header_freq += [" %15.6E %15.6E" % (z.real, z.imag) for z in freq]
header_freq.append(" ")
_write_lines(os.path.join(path, abbr + '_freq.RAW'), header_freq,
True)
def _do_output_gava(self, all_lines, output_list):
# Add the Results section header
all_lines.append("")
all_lines.append("Simulation Spectral Results")
all_lines.append("-" * 75)
# In GAVA, comment lines start with ';'
# Set all lines in the 'header' to be comment lines
all_lines = [';'+line for line in all_lines]
filename = self.LabelFilename.GetLabel()
lines = []
nmet = len(self.experiment.metabolites)
nstep1 = self.ListLoop1.GetItemCount()
nstep2 = self.ListLoop2.GetItemCount()
nstep3 = self.ListLoop3.GetItemCount()
if nstep1 == 0:
nstep1 = 1
if nstep2 == 0:
nstep2 = 1
if nstep3 == 0:
nstep3 = 1
nsteps = nstep1 * nstep2 * nstep3
for i in range(nsteps):
sims = self.experiment.simulations[(nmet*i):(nmet*(i+1))]
# Create a dictionary of all metabolites that exist
# for this step (ie. loop1, loop2, loop3) of the Experiment
metabs_dict = {}
for sim in sims:
mdict = sim.deflate(flavor=Deflate.DICTIONARY)
dims = [sim.metabolite.name,sim.dims[0],sim.dims[1],sim.dims[2]]
metabs_dict[sim.metabolite.name] = [mdict,dims]
# Use the dictionary of metabolite values to create text
# output for only the metabolites and mixtures listed in
# the dynamic list widget
for vals in output_list:
ppms = np.array([],dtype='float')
areas = np.array([],dtype='float')
phases = np.array([],dtype='float')
mname = vals["metabolite"]
abbr = vals["abbr"]
scale = vals["scale"]
shift = vals["shift"]
ppmstr = vals["range_start"]
ppmend = vals["range_end"]
# formula is a tuple of metabolite name and scale
# factors, or it is None if not a mixture
formula = vals["mixture"]
if not formula:
# single metabolite - apply global shift and scale
# values to the ppms and areas respectively
tmp = metabs_dict[mname][0]
dims = metabs_dict[mname][1]
ppms = tmp['ppms'] + shift
areas = tmp['areas'] * scale
phases = tmp['phases']
else:
# metabolite mixture - apply global shift and scale
# values as well as mixture scale value
for mix in formula:
tmp = metabs_dict[mix[0]][0]
dims = metabs_dict[mix[0]][1]
ppms = np.concatenate((ppms, (tmp['ppms'] + shift)))
areas = np.concatenate((areas, (tmp['areas'] * scale * mix[1])))
phases = np.concatenate((phases, tmp['phases']))
# sort for ppmstr and ppmend values
indx = ((ppms > ppmstr) & (ppms < ppmend)).nonzero()[0]
if indx.size:
ppms = ppms[indx]
areas = areas[indx]
phases = phases[indx]
for i in range(len(ppms)):
line = abbr + '\t' + str(dims[1]) + '\t' + \
str(dims[2]) + '\t' + str(dims[3]) + '\t'
line += str(i) + '\t' + str(ppms[i]) + '\t' + \
str(areas[i]) + '\t' + str(phases[i])
lines.append(line)
all_lines += lines
_write_lines(filename, all_lines)
def _do_output_jmruitext(self, all_lines, output_list):
# Create a dictionary of all metabolites that exist
# for this step (ie. loop1, loop2, loop3) of the Experiment
nmet = len(self.experiment.metabolites)
nstep1 = self.ListLoop1.GetItemCount()
nstep2 = self.ListLoop2.GetItemCount()
nstep3 = self.ListLoop3.GetItemCount()
if nstep1 == 0:
nstep1 = 1
index1 = 0
else:
index1 = wx_util.get_selected_item_indices(self.ListLoop1)[0]
if nstep2 == 0:
nstep2 = 1
index2 = 0
else:
index2 = wx_util.get_selected_item_indices(self.ListLoop2)[0]
if nstep3 == 0:
nstep3 = 1
index3 = 0
else:
index3 = wx_util.get_selected_item_indices(self.ListLoop3)[0]
offset = index1 + index2*nstep1 + index3*nstep1*nstep2
sims = self.experiment.simulations[(nmet*offset):(nmet*(offset+1))]
metabs_dict = {}
for sim in sims:
metabs_dict[sim.metabolite.name] = sim.deflate(flavor=Deflate.DICTIONARY)
# Retrieve header specific information from GUI and spectral
# parameters for calculating the FIDS
sw = self.FloatJmruiSweepWidth.GetValue()
dwell = 1000.0/float(sw) # in [ms]
npts = self.SpinJmruiDataPoints.GetValue()
apod = self.FloatJmruiApodize.GetValue()
b0 = float(self.experiment.b0) * 1e6
broad = self.ChoiceJmruiLineshape.GetStringSelection()
sw_str = str(sw)
dwell_str = "%6.5E" % dwell
vsize = str(npts)
apod_str = str(apod)
b0_str = "%6.5E" % b0
if self.experiment.isotope == "1H":
# should be 4.7
resppm = common_constants.DEFAULT_PROTON_CENTER_PPM
else:
# should be 0.0
resppm = common_constants.DEFAULT_XNUCLEI_CENTER_PPM
# retrieve path from label
path = self.LabelFilename.GetLabel()
filename = os.path.join(path, "jmrui-text_output_summary.txt")
# save the Mixed Output and jMRUI common headers out into
# an informative text file in the directory that will contain
# the jMRUI text files.
_write_lines(filename, all_lines)
# parse all of the metabolites in the dynanic metabolite list
# into time and frequency FID data with LCModel headers
for vals in output_list:
abbr = vals["abbr"]
filename_data = os.path.join(path, abbr+'.txt')
jheader = ["jMRUI Data Textfile"]
jheader.append(" ")
jheader.append("Filename: "+abbr+".txt")
jheader.append(" ")
jheader.append("PointsInDataset: "+vsize)
jheader.append("DatasetsInFile: 1")
jheader.append("SamplingInterval: "+dwell_str)
jheader.append("ZeroOrderPhase: 0E0")
jheader.append("BeginTime: 0E0")
jheader.append("TransmitterFrequency: "+b0_str)
jheader.append("MagneticField: 0E0")
jheader.append("TypeOfNucleus: 0E0")
jheader.append("NameOfPatient: ")
jheader.append("DateOfExperiment: ")
jheader.append("Spectrometer: Vespa-Simulation")
jheader.append("AdditionalInfo: see 'readme' file for Vespa-Simulation output synopsis")
jheader.append(" ")
jheader.append(" ")
jheader.append("Signal and FFT ")
jheader.append("sig(real)\tsig(imag)\tfft(real)\tfft(imag)")
jheader.append("Signal 1 out of 1 in file")
time = _make_basis(vals, metabs_dict, npts, sw, apod,
broad, b0/1e6, resppm, False)
#time = np.conj(time[:])
time = time * (((np.arange(npts)+1)%2)*2-1) # chop data
# to make data display correctly in jMRUI we need to apply
# a complex conjugate to the FIDs
for i in range(len(time)):
time[i] = time[i].real-1j*time[i].imag
freq = np.fft.fft(time[:])
for a_time, a_freq in zip(time, freq):
params = (a_time.real, a_time.imag, a_freq.real, a_freq.imag)
jheader.append("%6.5E\t%6.5E\t%6.5E\t%6.5E" % params)
_write_lines(filename_data, jheader)
def _do_output_midasxml(self, all_lines, output_list):
stamp = util_time.now(util_time.ISO_TIMESTAMP_FORMAT).split('T')
# MIDAS makes a somewhat odd use of XML in that all its data seems
# to be saved into attributes of elements named 'param'
#
# Still, we agreed to support them so we will humour them.
# first, let MIDAS know which node uses it (ie. FITT_Generic_XML)
# second, drop our Vespa comment into its own little node
root = ElementTree.Element("FITT_Generic_XML",
{ "Creation_date" : stamp[0],
"Creation_time" : stamp[1] })
e = ElementTree.Element("VESPA_SIMULATION_MIDAS_EXPORT")
for i,line in enumerate(all_lines):
iline = "line%4.4i" % i
util_xml.TextSubElement(e, "comment", "",
{'line':iline, 'value':line})
root.append(e)
# third, add all the Experiment lines in MIDAS style ...
e = ElementTree.Element("PRIOR_METABOLITE_INFORMATION")
# Create a dictionary of all metabolites that exist
# for this step (ie. loop1, loop2, loop3) of the Experiment
filename = self.LabelFilename.GetLabel()
nmet = len(self.experiment.metabolites)
nstep1 = self.ListLoop1.GetItemCount()
nstep2 = self.ListLoop2.GetItemCount()
nstep3 = self.ListLoop3.GetItemCount()
if nstep1 == 0:
nstep1 = 1
index1 = 0
else:
index1 = wx_util.get_selected_item_indices(self.ListLoop1)[0]
if nstep2 == 0:
nstep2 = 1
index2 = 0
else:
index2 = wx_util.get_selected_item_indices(self.ListLoop2)[0]
if nstep3 == 0:
nstep3 = 1
index3 = 0
else:
index3 = wx_util.get_selected_item_indices(self.ListLoop3)[0]
offset = index1 + index2*nstep1 + index3*nstep1*nstep2
sims = self.experiment.simulations[(nmet*offset):(nmet*(offset+1))]
lines = []
irun = 1 # running index
# Create a dictionary of all metabolites that exist
# for this step (ie. loop1, loop2, loop3) of the Experiment
metabs_dict = {}
for sim in sims:
mdict = sim.deflate(flavor=Deflate.DICTIONARY)
dims = [sim.metabolite.name] + sim.dims
metabs_dict[sim.metabolite.name] = [mdict,dims]
# Use the dictionary of metabolite values to create text
# output for only the metabolites and mixtures listed in
# the dynamic list widget
for vals in output_list:
ppms = np.array([],dtype='float')
areas = np.array([],dtype='float')
phases = np.array([],dtype='float')
mname = vals["metabolite"]
abbr = vals["abbr"]
scale = vals["scale"]
shift = vals["shift"]
ppmstr = vals["range_start"]
ppmend = vals["range_end"]
# formula is a tuple of metabolite name and scale
# factors, or it is None if not a mixture
formula = vals["mixture"]
if not formula:
# single metabolite - apply global shift and scale
# values to the ppms and areas respectively
tmp = metabs_dict[mname][0]
dims = metabs_dict[mname][1]
ppms = tmp['ppms'] + shift
areas = tmp['areas'] * scale
phases = tmp['phases']
else:
# metabolite mixture - apply global shift and scale
# values as well as mixture scale value
for mix in formula:
tmp = metabs_dict[mix[0]][0]
dims = metabs_dict[mix[0]][1]
ppms = np.concatenate((ppms, (tmp['ppms'] + shift)))
areas = np.concatenate((areas, (tmp['areas'] * scale * mix[1])))
phases = np.concatenate((phases, tmp['phases']))
# sort for ppmstr and ppmend values
indx = ((ppms > ppmstr) & (ppms < ppmend)).nonzero()[0]
if indx.size:
ppms = ppms[indx]
areas = areas[indx]
phases = phases[indx]
for i in range(len(ppms)):
pname = "fitt_PriorLine%5.5i" % irun
# Create a line of output. abbr is already a string,
# but everything else needs to be stringified.
line = dims[1:] + [i, ppms[i], areas[i], phases[i]]
line = [abbr] + list(map(str, line))
line = "++".join(line)
util_xml.TextSubElement(e, "param", "",
{'name':pname, 'value':line})
irun += 1
root.append(e)
# Prettify the XML and stuff root into a tree and write
util_xml.indent(root)
tree = ElementTree.ElementTree(root)
tree.write(filename, "utf-8")
def _do_output_analysis_direct(self, all_lines, output_list):
self._do_output_analysis_prior( all_lines, output_list, analysis_direct=True)
def _do_output_analysis_prior(self, all_lines, output_list, analysis_direct=False):
lines = "\n".join(all_lines)
# Extract metabolites only for the specified Experiment loop indices
nmet = len(self.experiment.metabolites)
nstep1 = self.ListLoop1.GetItemCount()
nstep2 = self.ListLoop2.GetItemCount()
nstep3 = self.ListLoop3.GetItemCount()
if nstep1 == 0:
nstep1 = 1
index1 = 0
else:
index1 = wx_util.get_selected_item_indices(self.ListLoop1)[0]
if nstep2 == 0:
nstep2 = 1
index2 = 0
else:
index2 = wx_util.get_selected_item_indices(self.ListLoop2)[0]
if nstep3 == 0:
nstep3 = 1
index3 = 0
else:
index3 = wx_util.get_selected_item_indices(self.ListLoop3)[0]
offset = index1 + index2*nstep1 + index3*nstep1*nstep2
sims = self.experiment.simulations[(nmet*offset):(nmet*(offset+1))]
# Create a dictionary of all metabolites that exist
# for this step (ie. loop1, loop2, loop3) of the Experiment
metabs_dict = {}
for sim in sims:
mdict = sim.deflate(flavor=Deflate.DICTIONARY)
dims = [sim.metabolite.name] + sim.dims
metabs_dict[sim.metabolite.name] = [mdict,dims]
prior = mrs_prior.Prior()
prior.source = 'experiment'
prior.source_id = self.experiment.id
prior.comment = lines
prior.nucleus = self.experiment.isotope
# Use the dictionary of metabolite values to create metabolite
# objects for only the metabolites and mixtures listed in the
# dynamic list widget
for vals in output_list:
ppms = np.array([],dtype='float')
areas = np.array([],dtype='float')
phases = np.array([],dtype='float')
mname = vals["metabolite"]
abbr = vals["abbr"]
scale = vals["scale"]
shift = vals["shift"]
ppmstr = vals["range_start"]
ppmend = vals["range_end"]
nspins = vals["nspins"]
# formula is a tuple of metabolite name and scale
# factors, or it is None if not a mixture
formula = vals["mixture"]
if not formula:
# single metabolite - apply global shift and scale
# values to the ppms and areas respectively
tmp = metabs_dict[mname][0]
dims = metabs_dict[mname][1]
ppms = tmp['ppms'] + shift
areas = tmp['areas'] * scale
phases = tmp['phases']
else:
# metabolite mixture - apply global shift and scale
# values as well as mixture scale value
for mix in formula:
tmp = metabs_dict[mix[0]][0]
dims = metabs_dict[mix[0]][1]
ppms = np.concatenate((ppms, (tmp['ppms'] + shift)))
areas = np.concatenate((areas, (tmp['areas'] * scale * mix[1])))
phases = np.concatenate((phases, tmp['phases']))
# sort for ppmstr and ppmend values
indx = ((ppms > ppmstr) & (ppms < ppmend)).nonzero()[0]
if indx.size:
ppms = ppms[indx]
areas = areas[indx]
phases = phases[indx]
met = mrs_prior_metabolite.PriorMetabolite()
met.spins = nspins
met.dims = [abbr, index1, index2, index3]
met.group = ppms * 0
met.ppms = ppms
met.areas = areas
met.phases = phases
prior.metabolites[met.name] = met
if analysis_direct:
self.final_prior = prior
else:
filename = self.LabelFilename.GetLabel()
util_export.export(filename, [prior])
###############################################################################
##### Dynamic Output List Class ###############################################
class DynamicOutputList(object):
def __init__(self, parent, metabolite_grid_sizer, metabolites, local):
self.parent = parent
self.local = local
# We follow the wx CamelCaps naming convention for this wx object.
self.MetaboliteGridSizer = metabolite_grid_sizer
self.list_lines = []
self.maxppm = self.local.pts2ppm(0)
# self.minppm = self.local.pts2ppm(self.local.dims[0]-1)
self.minppm = self.local.pts2ppm(self.local.spectral_dims[0]-1)
self.names = [metabolite.name for metabolite in metabolites]
self.nspins = [len(metabolite.spins) for metabolite in metabolites]
for abbr_name in self.names:
self.add_row(abbr_name)
def add_row(self, abbr_name=None, mixture=None):
"""
Adds a row to the end of the list. Mixture, if given, should be
a list of 2-tuples of (mixture name, scale).
"""
if abbr_name == None:
abbr_name = self.names[0]
# parse the mixture dialog values if necessary
if mixture:
nspins = 0
names = []
for line in mixture:
names.append(line[0])
nspins = nspins + self.nspins[self.names.index(line[0])]
names = ['+'.join(names)]
else:
names = self.names
nspins = self.nspins[self.names.index(abbr_name)]
# create widgets to go into the line
list_line = { }
check = wx.CheckBox(self.parent)
combo_metabolites = wx.Choice(self.parent, choices=names)
abbr = wx.TextCtrl(self.parent)
# I want this text control to expand horizontally as the dialog grows
# so I set the wx.EXPAND flag on it when I add it to the sizer.
# However, this also makes it expand vertically which makes it taller
# than all of its neighbors.
# To prevent that, I get its current height (before being added to the
# sizer) and force that to be the max.
_, height = abbr.GetSize()
abbr.SetMaxSize( (-1, height) )
scale = FloatSpinMultiplier(self.parent, increment=1.25, digits=5,
style=wx.SP_ARROW_KEYS|wx.SP_WRAP|wx.TE_PROCESS_ENTER,
agwStyle=FS_LEFT)
shift = FloatSpin(self.parent, agwStyle=FS_LEFT)
ppmstr = FloatSpin(self.parent, agwStyle=FS_LEFT)
ppmend = FloatSpin(self.parent, agwStyle=FS_LEFT)
# keep a copy of panel and widgets to access later
line = { "checkbox" : check,
"metabolites" : combo_metabolites,
"abbr" : abbr,
"scale" : scale,
"shift" : shift,
"range_start" : ppmstr,
"range_end" : ppmend,
"mixture" : mixture,
"nspins" : nspins, # just an FYI entry
}
# Add the controls to the grid sizer
self.MetaboliteGridSizer.SetRows(self.MetaboliteGridSizer.GetRows() + 1)
self.MetaboliteGridSizer.Add(line["checkbox"], 0, wx.ALIGN_CENTER_VERTICAL)
for key in ("metabolites", "abbr", "scale", "shift", "range_start",
"range_end"):
self.MetaboliteGridSizer.Add(line[key], 0, wx.EXPAND)
# Configure the controls I just created
combo_metabolites.SetMinSize((100, -1))
# Selecting the correct string is done a bit oddly. The combobox holds
# either a list of the experiment's metabs or a single item
# representing a metab mixture. In the first case the abbreviated
# name will match one of the metabs, otherwise it's correct to
# select the 0th (and only) item in the combobox.
i = combo_metabolites.FindString(abbr_name)
if i == wx.NOT_FOUND:
i = 0
combo_metabolites.SetSelection(i)
abbr.SetValue(abbr_name)
# All of the floatspins have the same size.
floatspin_size = wx.Size(90, -1)
# Note. On these Spin and FloatSpin widgets, if the value you want to
# set is outside the wxGlade standard range, you should make the
# call to reset the range first and then set the value you want.
scale.multiplier = 1.25
scale.SetDigits(5)
scale.SetIncrement(1.0)
scale.SetRange(0.00001,100000.0)
scale.SetValue(1.0)
scale.SetMinSize(floatspin_size)
shift.SetDigits(3)
shift.SetIncrement(0.1)
shift.SetRange(-1000.0,1000.0)
shift.SetValue(0.0)
shift.SetMinSize(floatspin_size)
ppmstr.SetDigits(3)
ppmstr.SetIncrement(0.1)
ppmstr.SetRange(self.minppm,self.maxppm)
ppmstr.SetValue(self.minppm)
ppmstr.SetMinSize(floatspin_size)
ppmend.SetDigits(3)
ppmend.SetIncrement(0.1)
ppmend.SetRange(self.minppm,self.maxppm)
ppmend.SetValue(self.maxppm)
ppmend.SetMinSize(floatspin_size)
self.list_lines.append(line)
self.parent.Layout()
self.parent.Fit()
def remove_checked_rows(self):
# gather indices of all checked boxes
checklist = []
for i, line in enumerate(self.list_lines):
if line["checkbox"].GetValue():
checklist.append(i)
# remove in reverse order so we don't invalidate later
# indices by removing the ones preceding them in the list
checklist.reverse()
for i in checklist:
# Each line is a dict of controls + mixture info
for item in list(self.list_lines[i].values()):
if hasattr(item, "Destroy"):
# It's a wx control
item.Destroy()
del self.list_lines[i]
# Reduce the # of rows in the grid sizer
rows = self.MetaboliteGridSizer.GetRows()
self.MetaboliteGridSizer.SetRows(rows - len(checklist))
self.MetaboliteGridSizer.Layout()
def get_values(self):
return [self.get_line_values(line) for line in self.list_lines]
def get_abbr(self):
vals = self.get_values()
return [line['abbr'] for line in vals]
def get_line_values(self, line):
# Returns a dict containing the values of six of the controls in a
# given line.
# Also appended are the mixture values for a total of seven items
# in the list.
return { "metabolite" : line["metabolites"].GetStringSelection(),
"abbr" : line["abbr"].GetValue(),
"scale" : line["scale"].GetValue(),
"shift" : line["shift"].GetValue(),
"range_start" : line["range_start"].GetValue(),
"range_end" : line["range_end"].GetValue(),
"mixture" : line["mixture"],
"nspins" : line["nspins"],
}
def select_all(self):
for line in self.list_lines:
line["checkbox"].SetValue(True)
def deselect_all(self):
for line in self.list_lines:
line["checkbox"].SetValue(False)
def update_ppm_range(self, npts, sw):
save_dim0 = self.local.spectral_dims[0]
save_sw = self.local.sw
self.local.spectral_dims[0] = npts
self.local.sw = sw
self.maxppm = self.local.pts2ppm(0)
self.minppm = self.local.pts2ppm(self.local.spectral_dims[0]-1)
for line in self.list_lines:
ppmstr = line['range_start']
ppmend = line['range_end']
ppmstr.SetRange(self.minppm,self.maxppm)
ppmend.SetRange(self.minppm,self.maxppm)
self.local.spectral_dims[0] = save_dim0
self.local.sw = save_sw
def set_max_ppm_range(self, npts, sw):
"""
Given a sweep width, sw, number of spectral points, npts, we
calculate the minimum and maximum PPM values allowed in the spectral
range. We then set the Start and End of Range widgets to these
values.
Note. For some npts, sw pairs we found that setting the Value to
the self.maxppm value exceeded the range set in the SetRange line,
even though it was the same float value. This is likely due to
float precision error in the FloatSpinControl widget. We solve this
for these PPM ranges by subtracting/adding a very small amount from
the value set so as to ensure that we are always within the min/max
range.
"""
save_dim0 = self.local.spectral_dims[0]
save_sw = self.local.sw
self.local.spectral_dims[0] = npts
self.local.sw = sw
self.maxppm = self.local.pts2ppm(0)
self.minppm = self.local.pts2ppm(self.local.spectral_dims[0]-1)
for line in self.list_lines:
ppmstr = line['range_start']
ppmend = line['range_end']
ppmstr.SetRange(self.minppm,self.maxppm)
ppmend.SetRange(self.minppm,self.maxppm)
ppmstr.SetValue(self.minppm + 0.00001)
ppmend.SetValue(self.maxppm - 0.00001)
self.local.spectral_dims[0] = save_dim0
self.local.sw = save_sw
| [
"vespa.common.util.xml_.TextSubElement",
"numpy.ones",
"wx.CheckBox",
"wx.CallAfter",
"vespa.common.mrs_prior.Prior",
"wx.lib.agw.floatspin.FloatSpin",
"numpy.arange",
"numpy.tile",
"numpy.exp",
"xml.etree.cElementTree.Element",
"vespa.simulation.auto_gui.mixed_metabolite_output.MyDialog.__init_... | [((3325, 3352), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (3333, 3352), True, 'import numpy as np\n'), ((3365, 3392), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (3373, 3392), True, 'import numpy as np\n'), ((3405, 3432), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (3413, 3432), True, 'import numpy as np\n'), ((5113, 5143), 'numpy.ones', 'np.ones', (['npts'], {'dtype': '"""float32"""'}), "(npts, dtype='float32')\n", (5120, 5143), True, 'import numpy as np\n'), ((5274, 5320), 'vespa.common.util.generic_spectral.apodize', 'util_generic_spectral.apodize', (['xx', 'apod', 'broad'], {}), '(xx, apod, broad)\n', (5303, 5320), True, 'import vespa.common.util.generic_spectral as util_generic_spectral\n'), ((5707, 5726), 'numpy.tile', 'np.tile', (['xx', 'nlines'], {}), '(xx, nlines)\n', (5714, 5726), True, 'import numpy as np\n'), ((5542, 5601), 'numpy.exp', 'np.exp', (['(phases * common_constants.DEGREES_TO_RADIANS * 1.0j)'], {}), '(phases * common_constants.DEGREES_TO_RADIANS * 1.0j)\n', (5548, 5601), True, 'import numpy as np\n'), ((5790, 5800), 'numpy.exp', 'np.exp', (['xx'], {}), '(xx)\n', (5796, 5800), True, 'import numpy as np\n'), ((11854, 11909), 'vespa.simulation.auto_gui.mixed_metabolite_output.MyDialog.__init__', 'mixed_metabolite_output.MyDialog.__init__', (['self', 'parent'], {}), '(self, parent)\n', (11895, 11909), True, 'import vespa.simulation.auto_gui.mixed_metabolite_output as mixed_metabolite_output\n'), ((16908, 16942), 'vespa.common.util.config.get_last_export_path', 'util_config.get_last_export_path', ([], {}), '()\n', (16940, 16942), True, 'import vespa.common.util.config as util_config\n'), ((20749, 20783), 'wx.CallAfter', 'wx.CallAfter', (['self._improve_height'], {}), '(self._improve_height)\n', (20761, 20783), False, 'import wx\n'), ((21089, 21174), 'vespa.simulation.dialog_mixed_metabolite_designer.DialogMixedMetaboliteDesigner', 'dialog_mixed_metabolite_designer.DialogMixedMetaboliteDesigner', (['self', 'names', 'abbr'], {}), '(self, names,\n abbr)\n', (21151, 21174), True, 'import vespa.simulation.dialog_mixed_metabolite_designer as dialog_mixed_metabolite_designer\n'), ((27663, 27682), 'wx.GetDisplaySize', 'wx.GetDisplaySize', ([], {}), '()\n', (27680, 27682), False, 'import wx\n'), ((34739, 34809), 'vespa.common.wx_gravy.util.add_ok_cancel', 'wx_util.add_ok_cancel', (['self', 'self.LabelOkCancelPlaceholder', 'self.on_ok'], {}), '(self, self.LabelOkCancelPlaceholder, self.on_ok)\n', (34760, 34809), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((44005, 44048), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['listctrl'], {}), '(listctrl)\n', (44038, 44048), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((44407, 44455), 'vespa.common.wx_gravy.util.wrap_label', 'wx_util.wrap_label', (['self.LabelInstructions', 'self'], {}), '(self.LabelInstructions, self)\n', (44425, 44455), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((46923, 46971), 'os.path.join', 'os.path.join', (['path', '"""lcmodel_output_summary.txt"""'], {}), "(path, 'lcmodel_output_summary.txt')\n", (46935, 46971), False, 'import os\n'), ((55674, 55725), 'os.path.join', 'os.path.join', (['path', '"""jmrui-text_output_summary.txt"""'], {}), "(path, 'jmrui-text_output_summary.txt')\n", (55686, 55725), False, 'import os\n'), ((58633, 58732), 'xml.etree.cElementTree.Element', 'ElementTree.Element', (['"""FITT_Generic_XML"""', "{'Creation_date': stamp[0], 'Creation_time': stamp[1]}"], {}), "('FITT_Generic_XML', {'Creation_date': stamp[0],\n 'Creation_time': stamp[1]})\n", (58652, 58732), True, 'import xml.etree.cElementTree as ElementTree\n'), ((58827, 58879), 'xml.etree.cElementTree.Element', 'ElementTree.Element', (['"""VESPA_SIMULATION_MIDAS_EXPORT"""'], {}), "('VESPA_SIMULATION_MIDAS_EXPORT')\n", (58846, 58879), True, 'import xml.etree.cElementTree as ElementTree\n'), ((59201, 59252), 'xml.etree.cElementTree.Element', 'ElementTree.Element', (['"""PRIOR_METABOLITE_INFORMATION"""'], {}), "('PRIOR_METABOLITE_INFORMATION')\n", (59220, 59252), True, 'import xml.etree.cElementTree as ElementTree\n'), ((63376, 63397), 'vespa.common.util.xml_.indent', 'util_xml.indent', (['root'], {}), '(root)\n', (63391, 63397), True, 'import vespa.common.util.xml_ as util_xml\n'), ((63414, 63443), 'xml.etree.cElementTree.ElementTree', 'ElementTree.ElementTree', (['root'], {}), '(root)\n', (63437, 63443), True, 'import xml.etree.cElementTree as ElementTree\n'), ((65089, 65106), 'vespa.common.mrs_prior.Prior', 'mrs_prior.Prior', ([], {}), '()\n', (65104, 65106), True, 'import vespa.common.mrs_prior as mrs_prior\n'), ((69506, 69530), 'wx.CheckBox', 'wx.CheckBox', (['self.parent'], {}), '(self.parent)\n', (69517, 69530), False, 'import wx\n'), ((69559, 69596), 'wx.Choice', 'wx.Choice', (['self.parent'], {'choices': 'names'}), '(self.parent, choices=names)\n', (69568, 69596), False, 'import wx\n'), ((69612, 69636), 'wx.TextCtrl', 'wx.TextCtrl', (['self.parent'], {}), '(self.parent)\n', (69623, 69636), False, 'import wx\n'), ((70130, 70270), 'vespa.common.wx_gravy.widgets.floatspin_multiplier.floatspin_multiplier_base.FloatSpinMultiplier', 'FloatSpinMultiplier', (['self.parent'], {'increment': '(1.25)', 'digits': '(5)', 'style': '(wx.SP_ARROW_KEYS | wx.SP_WRAP | wx.TE_PROCESS_ENTER)', 'agwStyle': 'FS_LEFT'}), '(self.parent, increment=1.25, digits=5, style=wx.\n SP_ARROW_KEYS | wx.SP_WRAP | wx.TE_PROCESS_ENTER, agwStyle=FS_LEFT)\n', (70149, 70270), False, 'from vespa.common.wx_gravy.widgets.floatspin_multiplier.floatspin_multiplier_base import FloatSpinMultiplier\n'), ((70355, 70395), 'wx.lib.agw.floatspin.FloatSpin', 'FloatSpin', (['self.parent'], {'agwStyle': 'FS_LEFT'}), '(self.parent, agwStyle=FS_LEFT)\n', (70364, 70395), False, 'from wx.lib.agw.floatspin import FloatSpin, EVT_FLOATSPIN, FS_LEFT, FS_RIGHT, FS_CENTRE, FS_READONLY\n'), ((70413, 70453), 'wx.lib.agw.floatspin.FloatSpin', 'FloatSpin', (['self.parent'], {'agwStyle': 'FS_LEFT'}), '(self.parent, agwStyle=FS_LEFT)\n', (70422, 70453), False, 'from wx.lib.agw.floatspin import FloatSpin, EVT_FLOATSPIN, FS_LEFT, FS_RIGHT, FS_CENTRE, FS_READONLY\n'), ((70471, 70511), 'wx.lib.agw.floatspin.FloatSpin', 'FloatSpin', (['self.parent'], {'agwStyle': 'FS_LEFT'}), '(self.parent, agwStyle=FS_LEFT)\n', (70480, 70511), False, 'from wx.lib.agw.floatspin import FloatSpin, EVT_FLOATSPIN, FS_LEFT, FS_RIGHT, FS_CENTRE, FS_READONLY\n'), ((72131, 72146), 'wx.Size', 'wx.Size', (['(90)', '(-1)'], {}), '(90, -1)\n', (72138, 72146), False, 'import wx\n'), ((4241, 4284), 'numpy.concatenate', 'np.concatenate', (["(ppms, tmp['ppms'] + shift)"], {}), "((ppms, tmp['ppms'] + shift))\n", (4255, 4284), True, 'import numpy as np\n'), ((4309, 4363), 'numpy.concatenate', 'np.concatenate', (["(areas, tmp['areas'] * scale * mix[1])"], {}), "((areas, tmp['areas'] * scale * mix[1]))\n", (4323, 4363), True, 'import numpy as np\n'), ((4387, 4426), 'numpy.concatenate', 'np.concatenate', (["(phases, tmp['phases'])"], {}), "((phases, tmp['phases']))\n", (4401, 4426), True, 'import numpy as np\n'), ((5182, 5214), 'numpy.arange', 'np.arange', (['npts'], {'dtype': '"""float32"""'}), "(npts, dtype='float32')\n", (5191, 5214), True, 'import numpy as np\n'), ((5480, 5501), 'numpy.repeat', 'np.repeat', (['freq', 'npts'], {}), '(freq, npts)\n', (5489, 5501), True, 'import numpy as np\n'), ((5611, 5632), 'numpy.repeat', 'np.repeat', (['phas', 'npts'], {}), '(phas, npts)\n', (5620, 5632), True, 'import numpy as np\n'), ((17632, 17698), 'vespa.common.wx_gravy.common_dialogs.pickdir', 'common_dialogs.pickdir', ([], {'message': 'message', 'default_path': 'default_path'}), '(message=message, default_path=default_path)\n', (17654, 17698), True, 'import vespa.common.wx_gravy.common_dialogs as common_dialogs\n'), ((22038, 22092), 'vespa.common.wx_gravy.common_dialogs.message', 'common_dialogs.message', (['msg', 'None', 'common_dialogs.I_OK'], {}), '(msg, None, common_dialogs.I_OK)\n', (22060, 22092), True, 'import vespa.common.wx_gravy.common_dialogs as common_dialogs\n'), ((22176, 22196), 'wx.BeginBusyCursor', 'wx.BeginBusyCursor', ([], {}), '()\n', (22194, 22196), False, 'import wx\n'), ((24346, 24364), 'wx.EndBusyCursor', 'wx.EndBusyCursor', ([], {}), '()\n', (24362, 24364), False, 'import wx\n'), ((49232, 49251), 'numpy.fft.fft', 'np.fft.fft', (['time[:]'], {}), '(time[:])\n', (49242, 49251), True, 'import numpy as np\n'), ((56175, 56208), 'os.path.join', 'os.path.join', (['path', "(abbr + '.txt')"], {}), "(path, abbr + '.txt')\n", (56187, 56208), False, 'import os\n'), ((57791, 57810), 'numpy.fft.fft', 'np.fft.fft', (['time[:]'], {}), '(time[:])\n', (57801, 57810), True, 'import numpy as np\n'), ((58972, 59045), 'vespa.common.util.xml_.TextSubElement', 'util_xml.TextSubElement', (['e', '"""comment"""', '""""""', "{'line': iline, 'value': line}"], {}), "(e, 'comment', '', {'line': iline, 'value': line})\n", (58995, 59045), True, 'import vespa.common.util.xml_ as util_xml\n'), ((60949, 60976), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (60957, 60976), True, 'import numpy as np\n'), ((60997, 61024), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (61005, 61024), True, 'import numpy as np\n'), ((61045, 61072), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (61053, 61072), True, 'import numpy as np\n'), ((65515, 65542), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (65523, 65542), True, 'import numpy as np\n'), ((65563, 65590), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (65571, 65590), True, 'import numpy as np\n'), ((65611, 65638), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (65619, 65638), True, 'import numpy as np\n'), ((67289, 67327), 'vespa.common.mrs_prior_metabolite.PriorMetabolite', 'mrs_prior_metabolite.PriorMetabolite', ([], {}), '()\n', (67325, 67327), True, 'import vespa.common.mrs_prior_metabolite as mrs_prior_metabolite\n'), ((67748, 67785), 'vespa.common.util.export.export', 'util_export.export', (['filename', '[prior]'], {}), '(filename, [prior])\n', (67766, 67785), True, 'import vespa.common.util.export as util_export\n'), ((4767, 4782), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (4775, 4782), True, 'import numpy as np\n'), ((4826, 4841), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (4834, 4841), True, 'import numpy as np\n'), ((4885, 4900), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (4893, 4900), True, 'import numpy as np\n'), ((13088, 13113), 'numpy.where', 'np.where', (['(pts > 0)', 'pts', '(0)'], {}), '(pts > 0, pts, 0)\n', (13096, 13113), True, 'import numpy as np\n'), ((18706, 18819), 'vespa.common.wx_gravy.common_dialogs.save_as', 'common_dialogs.save_as', ([], {'filetype_filter': 'filter_', 'default_path': 'default_path', 'default_filename': 'default_filename'}), '(filetype_filter=filter_, default_path=default_path,\n default_filename=default_filename)\n', (18728, 18819), True, 'import vespa.common.wx_gravy.common_dialogs as common_dialogs\n'), ((19111, 19149), 'vespa.common.util.config.set_last_export_path', 'util_config.set_last_export_path', (['path'], {}), '(path)\n', (19143, 19149), True, 'import vespa.common.util.config as util_config\n'), ((21882, 21916), 'wx.CallAfter', 'wx.CallAfter', (['self._improve_height'], {}), '(self._improve_height)\n', (21894, 21916), False, 'import wx\n'), ((25074, 25128), 'vespa.common.wx_gravy.common_dialogs.message', 'common_dialogs.message', (['msg', 'None', 'common_dialogs.I_OK'], {}), '(msg, None, common_dialogs.I_OK)\n', (25096, 25128), True, 'import vespa.common.wx_gravy.common_dialogs as common_dialogs\n'), ((25877, 25926), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop1'], {}), '(self.ListLoop1)\n', (25910, 25926), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((26163, 26212), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop2'], {}), '(self.ListLoop2)\n', (26196, 26212), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((26441, 26490), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop3'], {}), '(self.ListLoop3)\n', (26474, 26490), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((31234, 31273), 'vespa.common.wx_gravy.util.set_font_to_monospace', 'wx_util.set_font_to_monospace', (['listctrl'], {}), '(listctrl)\n', (31263, 31273), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((33698, 33757), 'wx.StaticText', 'wx.StaticText', (['parent'], {'label': 'heading', 'style': 'wx.ALIGN_CENTRE'}), '(parent, label=heading, style=wx.ALIGN_CENTRE)\n', (33711, 33757), False, 'import wx\n'), ((41238, 41261), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (41251, 41261), False, 'import os\n'), ((45151, 45200), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop1'], {}), '(self.ListLoop1)\n', (45184, 45200), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((45324, 45373), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop2'], {}), '(self.ListLoop2)\n', (45357, 45373), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((45492, 45541), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop3'], {}), '(self.ListLoop3)\n', (45525, 45541), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((48761, 48794), 'os.path.join', 'os.path.join', (['path', "(abbr + '.RAW')"], {}), "(path, abbr + '.RAW')\n", (48773, 48794), False, 'import os\n'), ((49395, 49433), 'os.path.join', 'os.path.join', (['path', "(abbr + '_freq.RAW')"], {}), "(path, abbr + '_freq.RAW')\n", (49407, 49433), False, 'import os\n'), ((51158, 51185), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (51166, 51185), True, 'import numpy as np\n'), ((51210, 51237), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (51218, 51237), True, 'import numpy as np\n'), ((51262, 51289), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float"""'}), "([], dtype='float')\n", (51270, 51289), True, 'import numpy as np\n'), ((53992, 54041), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop1'], {}), '(self.ListLoop1)\n', (54025, 54041), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((54165, 54214), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop2'], {}), '(self.ListLoop2)\n', (54198, 54214), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((54333, 54382), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop3'], {}), '(self.ListLoop3)\n', (54366, 54382), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((58178, 58223), 'vespa.common.util.time_.now', 'util_time.now', (['util_time.ISO_TIMESTAMP_FORMAT'], {}), '(util_time.ISO_TIMESTAMP_FORMAT)\n', (58191, 58223), True, 'import vespa.common.util.time_ as util_time\n'), ((59747, 59796), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop1'], {}), '(self.ListLoop1)\n', (59780, 59796), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((59920, 59969), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop2'], {}), '(self.ListLoop2)\n', (59953, 59969), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((60088, 60137), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop3'], {}), '(self.ListLoop3)\n', (60121, 60137), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((64159, 64208), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop1'], {}), '(self.ListLoop1)\n', (64192, 64208), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((64332, 64381), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop2'], {}), '(self.ListLoop2)\n', (64365, 64381), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((64500, 64549), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['self.ListLoop3'], {}), '(self.ListLoop3)\n', (64533, 64549), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((11818, 11829), 'wx.GetApp', 'wx.GetApp', ([], {}), '()\n', (11827, 11829), False, 'import wx\n'), ((19014, 19037), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (19027, 19037), False, 'import os\n'), ((40679, 40722), 'vespa.common.wx_gravy.util.get_selected_item_indices', 'wx_util.get_selected_item_indices', (['listctrl'], {}), '(listctrl)\n', (40712, 40722), True, 'import vespa.common.wx_gravy.util as wx_util\n'), ((62174, 62217), 'numpy.concatenate', 'np.concatenate', (["(ppms, tmp['ppms'] + shift)"], {}), "((ppms, tmp['ppms'] + shift))\n", (62188, 62217), True, 'import numpy as np\n'), ((62250, 62304), 'numpy.concatenate', 'np.concatenate', (["(areas, tmp['areas'] * scale * mix[1])"], {}), "((areas, tmp['areas'] * scale * mix[1]))\n", (62264, 62304), True, 'import numpy as np\n'), ((62336, 62375), 'numpy.concatenate', 'np.concatenate', (["(phases, tmp['phases'])"], {}), "((phases, tmp['phases']))\n", (62350, 62375), True, 'import numpy as np\n'), ((63126, 63197), 'vespa.common.util.xml_.TextSubElement', 'util_xml.TextSubElement', (['e', '"""param"""', '""""""', "{'name': pname, 'value': line}"], {}), "(e, 'param', '', {'name': pname, 'value': line})\n", (63149, 63197), True, 'import vespa.common.util.xml_ as util_xml\n'), ((66790, 66833), 'numpy.concatenate', 'np.concatenate', (["(ppms, tmp['ppms'] + shift)"], {}), "((ppms, tmp['ppms'] + shift))\n", (66804, 66833), True, 'import numpy as np\n'), ((66866, 66920), 'numpy.concatenate', 'np.concatenate', (["(areas, tmp['areas'] * scale * mix[1])"], {}), "((areas, tmp['areas'] * scale * mix[1]))\n", (66880, 66920), True, 'import numpy as np\n'), ((66952, 66991), 'numpy.concatenate', 'np.concatenate', (["(phases, tmp['phases'])"], {}), "((phases, tmp['phases']))\n", (66966, 66991), True, 'import numpy as np\n'), ((41285, 41305), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (41299, 41305), False, 'import os\n'), ((41313, 41332), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (41326, 41332), False, 'import os\n'), ((52495, 52538), 'numpy.concatenate', 'np.concatenate', (["(ppms, tmp['ppms'] + shift)"], {}), "((ppms, tmp['ppms'] + shift))\n", (52509, 52538), True, 'import numpy as np\n'), ((52575, 52629), 'numpy.concatenate', 'np.concatenate', (["(areas, tmp['areas'] * scale * mix[1])"], {}), "((areas, tmp['areas'] * scale * mix[1]))\n", (52589, 52629), True, 'import numpy as np\n'), ((52665, 52704), 'numpy.concatenate', 'np.concatenate', (["(phases, tmp['phases'])"], {}), "((phases, tmp['phases']))\n", (52679, 52704), True, 'import numpy as np\n'), ((48577, 48592), 'numpy.arange', 'np.arange', (['npts'], {}), '(npts)\n', (48586, 48592), True, 'import numpy as np\n'), ((57519, 57534), 'numpy.arange', 'np.arange', (['npts'], {}), '(npts)\n', (57528, 57534), True, 'import numpy as np\n')] |
# from future.utils import iteritems
import os
from os.path import join as pjoin
from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
def find_in_path(name, path):
"""Find a file in a search path"""
# Adapted fom http://code.activestate.com/recipes/52224
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found,
everything is based on finding 'nvcc' in the PATH.
"""
# First check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# Otherwise, search the PATH for NVCC
nvcc = find_in_path('nvcc', os.environ['PATH'])
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, '
'or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home': home, 'nvcc': nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in iter(cudaconfig.items()):
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be '
'located in %s' % (k, v))
return cudaconfig
def customize_compiler_for_nvcc(self):
"""Inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on.
"""
# Tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# Save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# Now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1
# translated from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# Reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# Inject our redefined _compile method into the class
self._compile = _compile
# Run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
ext = Extension('gpuadder',
sources = ['src/manager.cu', 'wrapper.pyx'],
library_dirs = [CUDA['lib64']],
libraries = ['cudart'],
language = 'c++',
runtime_library_dirs = [CUDA['lib64']],
# This syntax is specific to this build system
# we're only going to use certain compiler args with nvcc
# and not with gcc the implementation of this trick is in
# customize_compiler()
extra_compile_args= {
'gcc': [],
'nvcc': [
'-arch=sm_30', '--ptxas-options=-v', '-c',
'--compiler-options', "'-fPIC'"
]
},
include_dirs = [numpy_include, CUDA['include'], 'src']
)
setup(name = 'gpuadder',
# Random metadata. there's more you can supply
author = '<NAME>',
version = '0.1',
ext_modules = [ext],
# Inject our custom trigger
cmdclass = {'build_ext': custom_build_ext},
# Since the package has c code, the egg cannot be zipped
zip_safe = False)
| [
"os.path.abspath",
"setuptools.setup",
"os.path.dirname",
"numpy.get_numpy_include",
"os.path.exists",
"distutils.extension.Extension",
"numpy.get_include",
"Cython.Distutils.build_ext.build_extensions",
"os.path.splitext",
"os.path.join"
] | [((3754, 4117), 'distutils.extension.Extension', 'Extension', (['"""gpuadder"""'], {'sources': "['src/manager.cu', 'wrapper.pyx']", 'library_dirs': "[CUDA['lib64']]", 'libraries': "['cudart']", 'language': '"""c++"""', 'runtime_library_dirs': "[CUDA['lib64']]", 'extra_compile_args': '{\'gcc\': [], \'nvcc\': [\'-arch=sm_30\', \'--ptxas-options=-v\', \'-c\',\n \'--compiler-options\', "\'-fPIC\'"]}', 'include_dirs': "[numpy_include, CUDA['include'], 'src']"}), '(\'gpuadder\', sources=[\'src/manager.cu\', \'wrapper.pyx\'],\n library_dirs=[CUDA[\'lib64\']], libraries=[\'cudart\'], language=\'c++\',\n runtime_library_dirs=[CUDA[\'lib64\']], extra_compile_args={\'gcc\': [],\n \'nvcc\': [\'-arch=sm_30\', \'--ptxas-options=-v\', \'-c\',\n \'--compiler-options\', "\'-fPIC\'"]}, include_dirs=[numpy_include, CUDA[\n \'include\'], \'src\'])\n', (3763, 4117), False, 'from distutils.extension import Extension\n'), ((4488, 4623), 'setuptools.setup', 'setup', ([], {'name': '"""gpuadder"""', 'author': '"""<NAME>"""', 'version': '"""0.1"""', 'ext_modules': '[ext]', 'cmdclass': "{'build_ext': custom_build_ext}", 'zip_safe': '(False)'}), "(name='gpuadder', author='<NAME>', version='0.1', ext_modules=[ext],\n cmdclass={'build_ext': custom_build_ext}, zip_safe=False)\n", (4493, 4623), False, 'from setuptools import setup\n'), ((3657, 3676), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (3674, 3676), False, 'import numpy\n'), ((393, 409), 'os.path.join', 'pjoin', (['dir', 'name'], {}), '(dir, name)\n', (398, 409), True, 'from os.path import join as pjoin\n'), ((421, 444), 'os.path.exists', 'os.path.exists', (['binpath'], {}), '(binpath)\n', (435, 444), False, 'import os\n'), ((979, 1005), 'os.path.join', 'pjoin', (['home', '"""bin"""', '"""nvcc"""'], {}), "(home, 'bin', 'nvcc')\n", (984, 1005), True, 'from os.path import join as pjoin\n'), ((1446, 1468), 'os.path.join', 'pjoin', (['home', '"""include"""'], {}), "(home, 'include')\n", (1451, 1468), True, 'from os.path import join as pjoin\n'), ((1497, 1517), 'os.path.join', 'pjoin', (['home', '"""lib64"""'], {}), "(home, 'lib64')\n", (1502, 1517), True, 'from os.path import join as pjoin\n'), ((3496, 3528), 'Cython.Distutils.build_ext.build_extensions', 'build_ext.build_extensions', (['self'], {}), '(self)\n', (3522, 3528), False, 'from Cython.Distutils import build_ext\n'), ((3720, 3745), 'numpy.get_numpy_include', 'numpy.get_numpy_include', ([], {}), '()\n', (3743, 3745), False, 'import numpy\n'), ((465, 489), 'os.path.abspath', 'os.path.abspath', (['binpath'], {}), '(binpath)\n', (480, 489), False, 'import os\n'), ((1347, 1368), 'os.path.dirname', 'os.path.dirname', (['nvcc'], {}), '(nvcc)\n', (1362, 1368), False, 'import os\n'), ((1576, 1593), 'os.path.exists', 'os.path.exists', (['v'], {}), '(v)\n', (1590, 1593), False, 'import os\n'), ((2680, 2701), 'os.path.splitext', 'os.path.splitext', (['src'], {}), '(src)\n', (2696, 2701), False, 'import os\n')] |
"""
The constants `_MAX_SPEED` and `_BALL_SHAPE` are absolutely immutable (i.e.,
the game engine was built assuming these values to be immutable). Some of the
remaining constants may be changed with caution. Proceed at your own risk.
"""
import numpy as np
###############################################################################
#
# DO NOT CHANGE !!!
# -----------------
# These constants determine the maximum radius up to which graceful dynamics
# are guaranteed without too many visual artifacts. Recommended value: 2.
#
_MAX_SPEED = 2
_BALL_SHAPE = np.array([1, 1])
###############################################################################
###############################################################################
# Maximum number of pixels per entity. Knowing this in advance allows us to
# optimize object creation and tracking.
###############################################################################
MAX_NZIS_PER_ENTITY = 100
ALLOW_BOUNCE_AGAINST_PHYSICS = False
BOUNCE_STOCHASTICITY = 0.25
CORRUPT_RENDERED_IMAGE = False
DEBUGGING = False
DEFAULT_BRICK_REWARD = 1
DEFAULT_BRICK_SHAPE = np.array([8, 4])
DEFAULT_NUM_BRICKS_COLS = 11
DEFAULT_NUM_BRICKS_ROWS = 6
DEFAULT_WALL_THICKNESS = 3
DEFAULT_WIDTH = (DEFAULT_WALL_THICKNESS * 2 +
DEFAULT_NUM_BRICKS_COLS *
DEFAULT_BRICK_SHAPE[0])
DEFAULT_HEIGHT = int(1.25 * DEFAULT_WIDTH)
DEFAULT_PADDLE_SHAPE = np.array([int(DEFAULT_WIDTH * .25), 4])
EXCLUDED_VELOCITIES = frozenset(
{(u*(v+1), 0) for u in (-1, 1) for v in range(_MAX_SPEED)})
NUM_BALLS = 1
NUM_LIVES = 3 # Inf is useful for effortless debugging
PADDLE_SPEED = 2
PADDLE_SPEED_DISTRIBUTION = np.zeros((2 * PADDLE_SPEED + 1,))
PADDLE_SPEED_DISTRIBUTION[-1] = 0.90
PADDLE_SPEED_DISTRIBUTION[-2] = 0.10
PADDLE_STARTING_POSITION = (None, None)
REWARD_UPON_BALL_LOSS = -1
REWARD_UPON_NO_BRICKS_LEFT = 0
STARTING_BALL_MOVEMENT_RADIUS = 1
###############################################################################
# Color constants
###############################################################################
CLASSIC_BACKGROUND_COLOR = (0, 0, 0)
CLASSIC_BALL_COLOR = (200, 72, 73)
CLASSIC_BRICK_COLORS = [(66, 72, 200), (72, 160, 72), (163, 162, 42),
(180, 122, 48), (198, 108, 58), (200, 72, 73)]
CLASSIC_PADDLE_COLOR = (200, 72, 73)
CLASSIC_WALL_COLOR = (142, 142, 142)
DEFAULT_PADDLE_COLOR = CLASSIC_BALL_COLOR
| [
"numpy.zeros",
"numpy.array"
] | [((572, 588), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (580, 588), True, 'import numpy as np\n'), ((1135, 1151), 'numpy.array', 'np.array', (['[8, 4]'], {}), '([8, 4])\n', (1143, 1151), True, 'import numpy as np\n'), ((1684, 1717), 'numpy.zeros', 'np.zeros', (['(2 * PADDLE_SPEED + 1,)'], {}), '((2 * PADDLE_SPEED + 1,))\n', (1692, 1717), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
from torch.utils.data import BatchSampler, SubsetRandomSampler
from advattack.data_handling.cifar.cifar_dataset import Cifar10Dataset
from advattack.data_handling.dataset_repository import DatasetRepository
from advattack.data_handling.dataset_loader import DatasetLoader
from advattack.models.nn.ff_net import FFNet
from advattack.models.model_repository import ModelRepository
from torchvision import transforms
import numpy as np
from advattack.util.tensorboard import TensorboardWrapper
from advattack.util.tensorboard import TensorboardMode
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device: " + str(device))
batch_size = 50
learning_rate = 0.001
epochs = 300
dataset_class = Cifar10Dataset
model_class = FFNet
# instantiate model
tensorboard_writer = TensorboardWrapper.get_summary_writer(dataset_identifier=dataset_class.get_dataset_identifier(), model_identifier=model_class.get_model_identifier(), mode=TensorboardMode.TRAIN)
model_config = {"layer_config": np.array([32*32*3, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 10],).flatten().tolist(),
"tensorboard_writer": tensorboard_writer
}
loss_function = nn.NLLLoss()
model = model_class(**model_config).to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# generate training set
feature_transform_fun = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# load dataset
dataset = DatasetRepository.get_dataset(dataset_class, feature_transform_fun=feature_transform_fun)
train_indices, valid_indices = dataset.get_train_and_validation_set_indices(train_valid_split_ratio=0.8, seed=2)
train_loader = DatasetLoader(dataset, batch_sampler=BatchSampler(sampler=SubsetRandomSampler(train_indices),
batch_size=batch_size, drop_last=False))
valid_loader = DatasetLoader(dataset, batch_sampler=BatchSampler(sampler=SubsetRandomSampler(valid_indices),
batch_size=batch_size, drop_last=False))
# train model
model.train_model(train_loader=train_loader, valid_loader=valid_loader, optimizer=optimizer, loss_function=loss_function, epochs=epochs, device=device)
# save model to disk
ModelRepository.store_model(model=model, dataset_class=dataset_class)
# load model from disk
model = ModelRepository.get_model(model_class=model_class, dataset_class=dataset_class)
| [
"advattack.models.model_repository.ModelRepository.get_model",
"torch.nn.NLLLoss",
"torch.cuda.is_available",
"numpy.array",
"advattack.models.model_repository.ModelRepository.store_model",
"torch.utils.data.SubsetRandomSampler",
"torchvision.transforms.Normalize",
"advattack.data_handling.dataset_rep... | [((1219, 1231), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (1229, 1231), True, 'import torch.nn as nn\n'), ((1518, 1612), 'advattack.data_handling.dataset_repository.DatasetRepository.get_dataset', 'DatasetRepository.get_dataset', (['dataset_class'], {'feature_transform_fun': 'feature_transform_fun'}), '(dataset_class, feature_transform_fun=\n feature_transform_fun)\n', (1547, 1612), False, 'from advattack.data_handling.dataset_repository import DatasetRepository\n'), ((2340, 2409), 'advattack.models.model_repository.ModelRepository.store_model', 'ModelRepository.store_model', ([], {'model': 'model', 'dataset_class': 'dataset_class'}), '(model=model, dataset_class=dataset_class)\n', (2367, 2409), False, 'from advattack.models.model_repository import ModelRepository\n'), ((2442, 2521), 'advattack.models.model_repository.ModelRepository.get_model', 'ModelRepository.get_model', ([], {'model_class': 'model_class', 'dataset_class': 'dataset_class'}), '(model_class=model_class, dataset_class=dataset_class)\n', (2467, 2521), False, 'from advattack.models.model_repository import ModelRepository\n'), ((617, 642), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (640, 642), False, 'import torch\n'), ((1419, 1440), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1438, 1440), False, 'from torchvision import transforms\n'), ((1446, 1488), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (1466, 1488), False, 'from torchvision import transforms\n'), ((1795, 1829), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (1814, 1829), False, 'from torch.utils.data import BatchSampler, SubsetRandomSampler\n'), ((2010, 2044), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['valid_indices'], {}), '(valid_indices)\n', (2029, 2044), False, 'from torch.utils.data import BatchSampler, SubsetRandomSampler\n'), ((1043, 1130), 'numpy.array', 'np.array', (['[32 * 32 * 3, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 10]'], {}), '([32 * 32 * 3, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, \n 250, 10])\n', (1051, 1130), True, 'import numpy as np\n')] |
"""
Library Features:
Name: lib_ef_io_generic
Author(s): <NAME> (<EMAIL>)
Date: '20210104'
Version: '1.0.0'
"""
#######################################################################################
# Libraries
import logging
import tempfile
import os
import json
import pickle
import re
import pandas as pd
import numpy as np
from copy import deepcopy
from scipy.io import loadmat
import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Method to save data info in json format
def save_file_json(file_name, file_data_dict, file_attrs=None, file_indent=4):
file_workspace = {}
for file_key, file_value in file_data_dict.items():
if isinstance(file_value, dict):
file_time_list = []
file_data_list = []
for file_time_step, file_data_step in file_value.items():
file_time_list.append(file_time_step.strftime('%Y-%m-%d %H:%M'))
file_data_list.append(file_data_step)
if 'time' not in list(file_workspace.keys()):
file_workspace['time'] = file_time_list
file_workspace[file_key] = file_data_list
else:
logging.error(' ===> Error in getting datasets')
raise RuntimeError('Datasets case not implemented yet')
if file_attrs is not None:
for attr_key, attr_data in file_attrs.items():
file_workspace[attr_key] = attr_data
file_data = json.dumps(file_workspace, indent=file_indent, ensure_ascii=False, sort_keys=False)
#file_data = re.sub(r'",\s+', '", ', file_data)
with open(file_name, "w", encoding='utf-8') as file_handle:
file_handle.write(file_data)
pass
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Create default dataframe
def create_default_dframe(df_columns, df_shape, df_nodata=0.0):
df_data = np.zeros(shape=df_shape)
df_data[:, :] = df_nodata
df_obj = pd.DataFrame(data=df_data, columns=df_columns)
return df_obj
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read csv file
def read_file_csv(file_name, file_sep=';', file_skiprows=0, tag_time_dim='time'):
file_dframe = pd.read_table(file_name, sep=file_sep, skiprows=file_skiprows)
file_dframe.columns = file_dframe.columns.str.strip()
file_dframe = file_dframe.loc[:, ~file_dframe.columns.str.contains('^Unnamed')]
file_dframe = file_dframe.replace(to_replace=',', value='', regex=True)
file_dframe = file_dframe.rename(columns={'data previsione': tag_time_dim})
return file_dframe
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read mat file
def read_file_mat(file_name, var_name='vm'):
file_data = loadmat(file_name)
if var_name in list(file_data.keys()):
var_data = file_data[var_name]
else:
logging.warning(' ===> Variable not found in mat file. Return none value')
var_data = None
return var_data
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create a tmp name
def create_filename_tmp(prefix='tmp_', suffix='.tiff', folder=None):
if folder is None:
folder = '/tmp'
with tempfile.NamedTemporaryFile(dir=folder, prefix=prefix, suffix=suffix, delete=False) as tmp:
temp_file_name = tmp.name
return temp_file_name
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get file settings in json format
def read_file_settings(file_name_settings):
if os.path.exists(file_name_settings):
with open(file_name_settings) as file_handle:
data_settings = json.load(file_handle)
else:
logging.error(' ===> Error in reading algorithm settings file')
raise IOError('File not found')
return data_settings
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read data obj
def read_obj(filename):
if os.path.exists(filename):
data = pickle.load(open(filename, "rb"))
else:
data = None
return data
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to write data obj
def write_obj(filename, data):
if os.path.exists(filename):
os.remove(filename)
with open(filename, 'wb') as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
# -------------------------------------------------------------------------------------
| [
"pandas.DataFrame",
"tempfile.NamedTemporaryFile",
"logging.error",
"os.remove",
"pickle.dump",
"json.load",
"scipy.io.loadmat",
"logging.warning",
"numpy.zeros",
"os.path.exists",
"json.dumps",
"pandas.read_table"
] | [((1617, 1704), 'json.dumps', 'json.dumps', (['file_workspace'], {'indent': 'file_indent', 'ensure_ascii': '(False)', 'sort_keys': '(False)'}), '(file_workspace, indent=file_indent, ensure_ascii=False,\n sort_keys=False)\n', (1627, 1704), False, 'import json\n'), ((2150, 2174), 'numpy.zeros', 'np.zeros', ([], {'shape': 'df_shape'}), '(shape=df_shape)\n', (2158, 2174), True, 'import numpy as np\n'), ((2219, 2265), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'df_data', 'columns': 'df_columns'}), '(data=df_data, columns=df_columns)\n', (2231, 2265), True, 'import pandas as pd\n'), ((2590, 2652), 'pandas.read_table', 'pd.read_table', (['file_name'], {'sep': 'file_sep', 'skiprows': 'file_skiprows'}), '(file_name, sep=file_sep, skiprows=file_skiprows)\n', (2603, 2652), True, 'import pandas as pd\n'), ((3244, 3262), 'scipy.io.loadmat', 'loadmat', (['file_name'], {}), '(file_name)\n', (3251, 3262), False, 'from scipy.io import loadmat\n'), ((4244, 4278), 'os.path.exists', 'os.path.exists', (['file_name_settings'], {}), '(file_name_settings)\n', (4258, 4278), False, 'import os\n'), ((4767, 4791), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (4781, 4791), False, 'import os\n'), ((5131, 5155), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (5145, 5155), False, 'import os\n'), ((3363, 3437), 'logging.warning', 'logging.warning', (['""" ===> Variable not found in mat file. Return none value"""'], {}), "(' ===> Variable not found in mat file. Return none value')\n", (3378, 3437), False, 'import logging\n'), ((3818, 3905), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'dir': 'folder', 'prefix': 'prefix', 'suffix': 'suffix', 'delete': '(False)'}), '(dir=folder, prefix=prefix, suffix=suffix,\n delete=False)\n', (3845, 3905), False, 'import tempfile\n'), ((4403, 4466), 'logging.error', 'logging.error', (['""" ===> Error in reading algorithm settings file"""'], {}), "(' ===> Error in reading algorithm settings file')\n", (4416, 4466), False, 'import logging\n'), ((5165, 5184), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (5174, 5184), False, 'import os\n'), ((5234, 5293), 'pickle.dump', 'pickle.dump', (['data', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (5245, 5293), False, 'import pickle\n'), ((1347, 1395), 'logging.error', 'logging.error', (['""" ===> Error in getting datasets"""'], {}), "(' ===> Error in getting datasets')\n", (1360, 1395), False, 'import logging\n'), ((4362, 4384), 'json.load', 'json.load', (['file_handle'], {}), '(file_handle)\n', (4371, 4384), False, 'import json\n')] |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestLogicalAnd(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_test_op()
@property
def fp16_enabled(self):
return False
def set_test_op(self):
self.op = paddle.fluid.layers.logical_and
def set_op_attrs(self):
self.attrs = {}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
y = paddle.static.data(name=self.feed_list[1],
shape=self.feed_shape[1],
dtype=self.feed_dtype[1])
out = self.op(x, y, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
self.run_op_test(exec_mode)
def run_test_base(self):
for m in IPUOpTest.ExecutionMode:
if not self.skip_mode(m):
self.build_model()
self.run_model(m)
self.check()
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
self.feed_dtype = ['bool', 'bool']
def set_data_feed0(self):
x = np.random.choice([True, False], size=(1, 3, 5, 5))
y = np.random.choice([True, False], size=(1, 3, 5, 5))
self.feed_fp32 = {
"x": x.astype('bool'),
"y": y.astype('bool'),
}
self.set_feed_attr()
def test_case0(self):
self.set_data_feed0()
self.set_op_attrs()
self.run_test_base()
class TestLogicalOr(TestLogicalAnd):
def set_test_op(self):
self.op = paddle.fluid.layers.logical_or
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"paddle.static.data",
"paddle.is_compiled_with_ipu",
"numpy.random.choice"
] | [((2679, 2694), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2692, 2694), False, 'import unittest\n'), ((1250, 1349), 'paddle.static.data', 'paddle.static.data', ([], {'name': 'self.feed_list[0]', 'shape': 'self.feed_shape[0]', 'dtype': 'self.feed_dtype[0]'}), '(name=self.feed_list[0], shape=self.feed_shape[0], dtype=\n self.feed_dtype[0])\n', (1268, 1349), False, 'import paddle\n'), ((1419, 1518), 'paddle.static.data', 'paddle.static.data', ([], {'name': 'self.feed_list[1]', 'shape': 'self.feed_shape[1]', 'dtype': 'self.feed_dtype[1]'}), '(name=self.feed_list[1], shape=self.feed_shape[1], dtype=\n self.feed_dtype[1])\n', (1437, 1518), False, 'import paddle\n'), ((2166, 2216), 'numpy.random.choice', 'np.random.choice', (['[True, False]'], {'size': '(1, 3, 5, 5)'}), '([True, False], size=(1, 3, 5, 5))\n', (2182, 2216), True, 'import numpy as np\n'), ((2229, 2279), 'numpy.random.choice', 'np.random.choice', (['[True, False]'], {'size': '(1, 3, 5, 5)'}), '([True, False], size=(1, 3, 5, 5))\n', (2245, 2279), True, 'import numpy as np\n'), ((772, 801), 'paddle.is_compiled_with_ipu', 'paddle.is_compiled_with_ipu', ([], {}), '()\n', (799, 801), False, 'import paddle\n')] |
"""
.. module:: analysis
:platform: Unix, Windows
:synopsis: Unified Free Energy Dynamics with OpenMM
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import itertools
import numpy as np
from collections import namedtuple
from scipy import stats
from simtk import openmm
from ufedmm.ufedmm import _standardized, _get_energy_function, _get_parameters
class _RBFContext(openmm.Context):
def __init__(self, variables, variances, centers, weights, platform, properties):
num_particles = len(variables)//3 + 1
coordinates = [f'{x}{i+1}' for i in range(num_particles) for x in 'xyz']
exponents = []
for v, variance, x in zip(variables, variances, coordinates):
if v.periodic: # von Mises
factor = 2*np.pi/v._range
exponents.append(f'{1.0/(variance*factor**2)}*(cos({factor}*({v.id}-{x}))-1)')
else: # Gauss
exponents.append(f'(-{0.5/variance})*({v.id}-{x})^2')
expression = f'weight*exp({"+".join(exponents)})'
force = openmm.CustomCompoundBondForce(num_particles, expression)
force.addPerBondParameter('weight')
for v in variables:
force.addGlobalParameter(v.id, 0)
force.addEnergyParameterDerivative(v.id)
system = openmm.System()
positions = []
for i, (center, weight) in enumerate(zip(centers, weights)):
for position in np.resize(center, (num_particles, 3)):
system.addParticle(1.0)
positions.append(openmm.Vec3(*position))
force.addBond(range(i*num_particles, (i+1)*num_particles), [weight])
system.addForce(force)
integrator = openmm.CustomIntegrator(0)
super().__init__(system, integrator, platform, properties)
self.parameters = [v.id for v in variables]
self.setPositions(positions)
class FreeEnergyAnalyzer(object):
"""
Calculate free energy landscapes from UFED simulation results.
Parameters
----------
ufed : :class:`~ufedmm.ufedmm.UnifiedFreeEnergyDynamics`
The UFED object.
dataframe : pandas.DataFrame
A data frame containing sampled sets of collective variables and driver parameters.
"""
def __init__(self, ufed, dataframe):
self._ufed = ufed
self._dataframe = dataframe
self._bias_variables = filter(lambda v: v.sigma is not None, self._ufed.variables)
def metadynamics_bias_free_energy(self):
"""
Returns a Python function which, in turn, receives the values of extended-space variables
and returns the energy estimated from a Metadynamics bias potential reconstructed from the
simulation data.
Returns
-------
function
The free energy function.
"""
Variable = namedtuple('Variable', 'sigma factor periodic centers')
variables = [
Variable(v.sigma, 2*np.pi/v._range, v.periodic, self._dataframe[v.id].values)
for v in self._bias_variables
]
try:
heights = self._dataframe['Height (kJ/mole)'].values
except KeyError:
heights = self._ufed.height
def free_energy(*position):
exponents = 0.0
for v, x in zip(variables, position):
if v.periodic:
exponents += (np.cos(v.factor*(v.centers - x)) - 1.0)/(v.factor*v.sigma)**2
else:
exponents += -0.5*((v.centers - x)/v.sigma)**2
return -np.sum(heights*np.exp(exponents))
return np.vectorize(free_energy)
def centers_and_mean_forces(self, bins, min_count=1, adjust_centers=False):
"""
Performs binned statistics with the UFED simulation data.
Parameters
----------
bins : list(int) or int
The number of bins in each direction. If a single integer is passed, then the same
number of bins will be considered for all directions.
Keyword Args
------------
min_count : int, default=1
The miminum number of hits for any bin to be considered in the analysis.
adjust_centers : bool, default=False
Whether to consider the center of a bin as the mean value of the its sampled
internal points instead of its geometric center.
Returns
-------
centers : list(numpy.array)
A list of Numpy arrays, each one containing the values of an extended-space
variable at the centers of all bins that satisfy the minimum-count criterion.
mean_forces : list(numpy.array)
A list of Numpy arrays, each one containing the mean forces in the direction of
an extended-space variable.
"""
variables = self._ufed.variables
sample = [self._dataframe[v.id] for v in variables]
forces = self._compute_forces()
ranges = [(v.min_value, v.max_value) for v in variables]
counts = stats.binned_statistic_dd(sample, [], statistic='count', bins=bins, range=ranges)
index = np.where(counts.statistic.flatten() >= min_count)
n = len(variables)
if adjust_centers:
means = stats.binned_statistic_dd(sample, sample + forces, bins=bins, range=ranges)
centers = [means.statistic[i].flatten()[index] for i in range(n)]
mean_forces = [means.statistic[n+i].flatten()[index] for i in range(n)]
else:
means = stats.binned_statistic_dd(sample, forces, bins=bins, range=ranges)
bin_centers = [0.5*(edges[1:] + edges[:-1]) for edges in counts.bin_edges]
center_points = np.stack([np.array(point) for point in itertools.product(*bin_centers)])
centers = [center_points[:, i][index] for i in range(n)]
mean_forces = [statistic.flatten()[index] for statistic in means.statistic]
return centers, mean_forces
def mean_force_free_energy(self, centers, mean_forces, sigma, platform_name='Reference', properties={}):
"""
Returns Python functions for evaluating the potential of mean force and their originating
mean forces as a function of the collective variables.
Parameters
----------
centers : list(numpy.array)
The bin centers.
mean_forces : list(numpy.array)
The mean forces.
sigmas : float or unit.Quantity or list
The standard deviation of kernels.
Keyword Args
------------
platform_name : string, default='Reference'
The name of the OpenMM Platform to be used for potential and mean-force evaluations.
properties : dict, default={}
A set of values for platform-specific properties. Keys are the property names.
Returns
-------
potential : function
A Python function whose arguments are collective variable values and whose result
is the potential of mean force at that values.
mean_force : function
A Python function whose arguments are collective variable values and whose result
is the mean force at those values.
"""
variables = self._ufed.variables
n = len(variables)
try:
variances = [_standardized(value)**2 for value in sigma]
except TypeError:
variances = [_standardized(sigma)**2]*n
exponent = []
derivative = []
for v, variance in zip(variables, variances):
if v.periodic: # von Mises
factor = 2*np.pi/v._range
exponent.append(lambda x: (np.cos(factor*x)-1.0)/(factor*factor*variance))
derivative.append(lambda x: -np.sin(factor*x)/(factor*variance))
else: # Gauss
exponent.append(lambda x: -0.5*x**2/variance)
derivative.append(lambda x: -x/variance)
def kernel(x):
return np.exp(np.sum(exponent[i](x[i]) for i in range(n)))
def gradient(x, i):
return kernel(x)*derivative[i](x[i])
grid_points = [np.array(xc) for xc in zip(*centers)]
coefficients = []
for i in range(n):
for x in grid_points:
coefficients.append(np.array([gradient(x-xc, i) for xc in grid_points]))
M = np.vstack(coefficients)
F = -np.hstack(mean_forces)
A, _, _, _ = np.linalg.lstsq(M, F, rcond=None)
platform = openmm.Platform.getPlatformByName(platform_name)
context = _RBFContext(variables, variances, grid_points, A, platform, properties)
minimum = 0.0
def potential(*x):
for parameter, value in zip(context.parameters, x):
context.setParameter(parameter, value)
state = context.getState(getEnergy=True)
return state.getPotentialEnergy()._value - minimum
def mean_force(*x):
for parameter, value in zip(context.parameters, x):
context.setParameter(parameter, value)
state = context.getState(getParameterDerivatives=True)
return -state.getEnergyParameterDerivatives()._value
minimum = np.min([potential(*x) for x in grid_points])
return np.vectorize(potential), np.vectorize(mean_force)
def _compute_forces(self):
variables = self._ufed.variables
collective_variables = [colvar.id for v in variables for colvar in v.colvars]
extended_variables = [v.id for v in variables]
all_variables = collective_variables + extended_variables
force = openmm.CustomCVForce(_get_energy_function(variables))
for key, value in _get_parameters(variables).items():
force.addGlobalParameter(key, value)
for variable in all_variables:
force.addGlobalParameter(variable, 0)
for xv in extended_variables:
force.addEnergyParameterDerivative(xv)
system = openmm.System()
system.addForce(force)
system.addParticle(0)
platform = openmm.Platform.getPlatformByName('Reference')
context = openmm.Context(system, openmm.CustomIntegrator(0), platform)
context.setPositions([openmm.Vec3(0, 0, 0)])
n = len(self._dataframe.index)
forces = [np.empty(n) for xv in extended_variables]
for j, row in self._dataframe.iterrows():
for variable in all_variables:
context.setParameter(variable, row[variable])
state = context.getState(getParameterDerivatives=True)
derivatives = state.getEnergyParameterDerivatives()
for i, xv in enumerate(extended_variables):
forces[i][j] = -derivatives[xv]
return forces
class Analyzer(FreeEnergyAnalyzer):
"""
UFED Analyzer.
.. warning::
This class is obsolete and will be discontinued. Use :class:`FreeEnergyAnalyzer` instead.
Parameters
----------
ufed : :class:`~ufedmm.ufedmm.UnifiedFreeEnergyDynamics`
The UFED object.
dataframe : pandas.DataFrame
A data frame containing sampled sets of collective variables and driver parameters.
bins : int or list(int)
The number of bins in each direction.
Keyword Args
------------
min_count : int, default=1
The miminum number of hits for a given bin to be considered in the analysis.
adjust_centers : bool, default=False
Whether to consider the center of a bin as the mean value of the its sampled
internal points istead of its geometric center.
"""
def __init__(self, ufed, dataframe, bins, min_count=1, adjust_centers=False):
super().__init__(ufed, dataframe)
try:
self._bins = [bin for bin in bins]
except TypeError:
self._bins = [bins]*len(ufed.variables)
self._min_count = min_count
self._adjust_centers = adjust_centers
def free_energy_functions(self, sigma=None, factor=8):
"""
Returns Python functions for evaluating the potential of mean force and their originating
mean forces as a function of the collective variables.
Keyword Args
------------
sigma : float or unit.Quantity, default=None
The standard deviation of kernels. If this is `None`, then values will be
determined from the distances between nodes.
factor : float, default=8
If ``sigma`` is not explicitly provided, then it will be computed as
``sigma = factor*range/bins`` for each direction.
Returns
-------
potential : function
A Python function whose arguments are collective variable values and whose result
is the potential of mean force at that values.
mean_force : function
A Python function whose arguments are collective variable values and whose result
is the mean force at that values regarding a given direction. Such direction must
be defined through a keyword argument `dir`, whose default value is `0` (meaning
the direction of the first collective variable).
"""
self.centers, self.mean_forces = self.centers_and_mean_forces(
self._bins,
self._min_count,
self._adjust_centers,
)
variables = self._ufed.variables
if sigma is None:
sigmas = [factor*v._range/bin for v, bin in zip(variables, self._bins)]
else:
try:
sigmas = [_standardized(value) for value in sigma]
except TypeError:
sigmas = [_standardized(sigma)]*len(variables)
return self.mean_force_free_energy(self.centers, self.mean_forces, sigmas)
| [
"numpy.resize",
"numpy.empty",
"numpy.sin",
"numpy.exp",
"ufedmm.ufedmm._get_parameters",
"scipy.stats.binned_statistic_dd",
"itertools.product",
"ufedmm.ufedmm._standardized",
"numpy.vectorize",
"simtk.openmm.CustomCompoundBondForce",
"numpy.hstack",
"numpy.cos",
"simtk.openmm.Vec3",
"num... | [((1042, 1099), 'simtk.openmm.CustomCompoundBondForce', 'openmm.CustomCompoundBondForce', (['num_particles', 'expression'], {}), '(num_particles, expression)\n', (1072, 1099), False, 'from simtk import openmm\n'), ((1289, 1304), 'simtk.openmm.System', 'openmm.System', ([], {}), '()\n', (1302, 1304), False, 'from simtk import openmm\n'), ((1695, 1721), 'simtk.openmm.CustomIntegrator', 'openmm.CustomIntegrator', (['(0)'], {}), '(0)\n', (1718, 1721), False, 'from simtk import openmm\n'), ((2858, 2913), 'collections.namedtuple', 'namedtuple', (['"""Variable"""', '"""sigma factor periodic centers"""'], {}), "('Variable', 'sigma factor periodic centers')\n", (2868, 2913), False, 'from collections import namedtuple\n'), ((3622, 3647), 'numpy.vectorize', 'np.vectorize', (['free_energy'], {}), '(free_energy)\n', (3634, 3647), True, 'import numpy as np\n'), ((5109, 5195), 'scipy.stats.binned_statistic_dd', 'stats.binned_statistic_dd', (['sample', '[]'], {'statistic': '"""count"""', 'bins': 'bins', 'range': 'ranges'}), "(sample, [], statistic='count', bins=bins, range=\n ranges)\n", (5134, 5195), False, 'from scipy import stats\n'), ((8541, 8564), 'numpy.vstack', 'np.vstack', (['coefficients'], {}), '(coefficients)\n', (8550, 8564), True, 'import numpy as np\n'), ((8622, 8655), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['M', 'F'], {'rcond': 'None'}), '(M, F, rcond=None)\n', (8637, 8655), True, 'import numpy as np\n'), ((8676, 8724), 'simtk.openmm.Platform.getPlatformByName', 'openmm.Platform.getPlatformByName', (['platform_name'], {}), '(platform_name)\n', (8709, 8724), False, 'from simtk import openmm\n'), ((10167, 10182), 'simtk.openmm.System', 'openmm.System', ([], {}), '()\n', (10180, 10182), False, 'from simtk import openmm\n'), ((10263, 10309), 'simtk.openmm.Platform.getPlatformByName', 'openmm.Platform.getPlatformByName', (['"""Reference"""'], {}), "('Reference')\n", (10296, 10309), False, 'from simtk import openmm\n'), ((1425, 1462), 'numpy.resize', 'np.resize', (['center', '(num_particles, 3)'], {}), '(center, (num_particles, 3))\n', (1434, 1462), True, 'import numpy as np\n'), ((5332, 5407), 'scipy.stats.binned_statistic_dd', 'stats.binned_statistic_dd', (['sample', '(sample + forces)'], {'bins': 'bins', 'range': 'ranges'}), '(sample, sample + forces, bins=bins, range=ranges)\n', (5357, 5407), False, 'from scipy import stats\n'), ((5604, 5670), 'scipy.stats.binned_statistic_dd', 'stats.binned_statistic_dd', (['sample', 'forces'], {'bins': 'bins', 'range': 'ranges'}), '(sample, forces, bins=bins, range=ranges)\n', (5629, 5670), False, 'from scipy import stats\n'), ((8314, 8326), 'numpy.array', 'np.array', (['xc'], {}), '(xc)\n', (8322, 8326), True, 'import numpy as np\n'), ((8578, 8600), 'numpy.hstack', 'np.hstack', (['mean_forces'], {}), '(mean_forces)\n', (8587, 8600), True, 'import numpy as np\n'), ((9459, 9482), 'numpy.vectorize', 'np.vectorize', (['potential'], {}), '(potential)\n', (9471, 9482), True, 'import numpy as np\n'), ((9484, 9508), 'numpy.vectorize', 'np.vectorize', (['mean_force'], {}), '(mean_force)\n', (9496, 9508), True, 'import numpy as np\n'), ((9827, 9858), 'ufedmm.ufedmm._get_energy_function', '_get_energy_function', (['variables'], {}), '(variables)\n', (9847, 9858), False, 'from ufedmm.ufedmm import _standardized, _get_energy_function, _get_parameters\n'), ((10351, 10377), 'simtk.openmm.CustomIntegrator', 'openmm.CustomIntegrator', (['(0)'], {}), '(0)\n', (10374, 10377), False, 'from simtk import openmm\n'), ((10500, 10511), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (10508, 10511), True, 'import numpy as np\n'), ((9886, 9912), 'ufedmm.ufedmm._get_parameters', '_get_parameters', (['variables'], {}), '(variables)\n', (9901, 9912), False, 'from ufedmm.ufedmm import _standardized, _get_energy_function, _get_parameters\n'), ((10419, 10439), 'simtk.openmm.Vec3', 'openmm.Vec3', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (10430, 10439), False, 'from simtk import openmm\n'), ((1537, 1559), 'simtk.openmm.Vec3', 'openmm.Vec3', (['*position'], {}), '(*position)\n', (1548, 1559), False, 'from simtk import openmm\n'), ((5796, 5811), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (5804, 5811), True, 'import numpy as np\n'), ((7494, 7514), 'ufedmm.ufedmm._standardized', '_standardized', (['value'], {}), '(value)\n', (7507, 7514), False, 'from ufedmm.ufedmm import _standardized, _get_energy_function, _get_parameters\n'), ((13862, 13882), 'ufedmm.ufedmm._standardized', '_standardized', (['value'], {}), '(value)\n', (13875, 13882), False, 'from ufedmm.ufedmm import _standardized, _get_energy_function, _get_parameters\n'), ((3587, 3604), 'numpy.exp', 'np.exp', (['exponents'], {}), '(exponents)\n', (3593, 3604), True, 'import numpy as np\n'), ((5825, 5856), 'itertools.product', 'itertools.product', (['*bin_centers'], {}), '(*bin_centers)\n', (5842, 5856), False, 'import itertools\n'), ((3401, 3435), 'numpy.cos', 'np.cos', (['(v.factor * (v.centers - x))'], {}), '(v.factor * (v.centers - x))\n', (3407, 3435), True, 'import numpy as np\n'), ((7589, 7609), 'ufedmm.ufedmm._standardized', '_standardized', (['sigma'], {}), '(sigma)\n', (7602, 7609), False, 'from ufedmm.ufedmm import _standardized, _get_energy_function, _get_parameters\n'), ((13959, 13979), 'ufedmm.ufedmm._standardized', '_standardized', (['sigma'], {}), '(sigma)\n', (13972, 13979), False, 'from ufedmm.ufedmm import _standardized, _get_energy_function, _get_parameters\n'), ((7842, 7860), 'numpy.cos', 'np.cos', (['(factor * x)'], {}), '(factor * x)\n', (7848, 7860), True, 'import numpy as np\n'), ((7935, 7953), 'numpy.sin', 'np.sin', (['(factor * x)'], {}), '(factor * x)\n', (7941, 7953), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.python.framework import ops
from sklearn import metrics
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
def RandomForest(X_train, Y_train, X_test, Y_test) :
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(max_depth = 5, random_state=0)
clf.fit(X_train, Y_train)
Y_pred_test=clf.predict(X_test)
Y_pred_train=clf.predict(X_train)
Y_pred_probs = clf.predict_proba(X_test)
Y_pred_probs = Y_pred_probs[:,1]
fpr, tpr, thresholds = roc_curve(Y_test, Y_pred_probs)
return metrics.roc_auc_score(Y_test,Y_pred_probs),Y_pred_test,Y_pred_train,fpr,tpr,thresholds
#SVM_LinearSVC
def SVM(X_train, Y_train, X_test, Y_test) :
from sklearn.svm import LinearSVC
from sklearn.calibration import CalibratedClassifierCV
svm = LinearSVC()
clf = CalibratedClassifierCV(svm)
clf.fit(X_train,Y_train)
Y_pred_test = clf.predict(X_test)
Y_pred_train=clf.predict(X_train)
Y_pred_probs = clf.predict_proba(X_test)
Y_pred_probs = Y_pred_probs[:,1]
# generate_roc_curve(Y_test,Y_pred_test)
fpr, tpr, thresholds = roc_curve(Y_test, Y_pred_probs)
return metrics.roc_auc_score(Y_test,Y_pred_probs),Y_pred_test,Y_pred_train,fpr,tpr,thresholds
#1D-CNN
def make_feature_count_nine(data):
x = data.shape[1]
if x==9:
return data
y = data.shape[0]
b = np.zeros((y,9-x))
adjusted_data = np.hstack((data,b))
return adjusted_data
# For making sure that input data has 9 columns
def CNN_1D(X_train, Y_train, X_test, Y_test, EPOCHS = 50) :
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
X_train = make_feature_count_nine(X_train)
X_test = make_feature_count_nine(X_test)
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)
print(X_train.shape)
print(X_test.shape)
model = models.Sequential()
model.add(layers.Conv1D(filters=4,kernel_size=2,strides=1,padding='same',activation='relu',input_shape=(9,1)))
model.add(layers.AveragePooling1D())
model.add(layers.Conv1D(8,2,activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(1,activation='sigmoid'))
model.compile(loss = 'binary_crossentropy',optimizer = "adam",metrics = [tf.keras.metrics.AUC()])
# model.summary()
history = model.fit(X_train, Y_train, epochs=EPOCHS,
validation_split = 0.2, verbose=0)
loss, auc = model.evaluate(X_test,Y_test, verbose=0)
# print("Testing set AUC: {:5.2f} ".format(auc))
Y_pred_probs = model.predict_proba(X_test)
Y_pred_train=model.predict(X_train).flatten()
Y_pred_test=model.predict(X_test).flatten()
fpr, tpr, thresholds = roc_curve(Y_test, Y_pred_probs)
# generate_roc_curve(Y_test,Y_pred)
return auc,Y_pred_test,Y_pred_train,fpr,tpr,thresholds
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0:
print('')
print('.', end='')
#Neural Network Function
def NN(X_train, Y_train, X_test, Y_test,EPOCHS) :
model = keras.Sequential([
layers.Dense(18,kernel_regularizer=keras.regularizers.l2(0.001), activation='relu', input_shape=[X_train.shape[1]]),
layers.Dense(15,kernel_regularizer=keras.regularizers.l2(0.001), activation='relu'),
layers.Dense(10,kernel_regularizer=keras.regularizers.l2(0.001), activation='relu'),
layers.Dense(5,kernel_regularizer=keras.regularizers.l2(0.001), activation='relu'),
layers.Dense(1,activation='sigmoid')
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss=tf.keras.losses.Poisson(),
optimizer=optimizer,
metrics=[tf.keras.metrics.AUC()])
history = model.fit(X_train, Y_train, epochs=EPOCHS,
validation_split = 0.2, verbose=0)
loss, auc = model.evaluate(X_test,Y_test, verbose=0)
# print("Testing set AUC: {:5.2f} ".format(auc))
Y_pred_test=model.predict(X_test).flatten()
Y_pred_train=model.predict(X_train).flatten()
Y_pred_probs = model.predict_proba(X_test)
# print(Y_pred_probs.shape)
# Y_pred_probs = Y_pred_probs[:,1]
fpr, tpr, thresholds = roc_curve(Y_test, Y_pred_probs)
# auc_test = metrics.roc_auc_score(Y_test,Y_pred_probs)
# print(auc_test)
# generate_roc_curve(Y_test,Y_pred)
return auc,Y_pred_test,Y_pred_train,fpr,tpr,thresholds
def NN_ensemble(X_train, Y_train, X_test, Y_test,EPOCHS) :
model = keras.Sequential([
layers.Dense(5,kernel_regularizer=keras.regularizers.l2(0.001), activation='relu', input_shape=[X_train.shape[1]]),
layers.Dense(2,kernel_regularizer=keras.regularizers.l2(0.001), activation='relu'),
layers.Dense(1,activation='sigmoid')
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss=tf.keras.losses.Poisson(),
optimizer=optimizer,
metrics=[tf.keras.metrics.AUC()])
history = model.fit(X_train, Y_train, epochs=EPOCHS,
validation_split = 0.2, verbose=0)
loss, auc = model.evaluate(X_test,Y_test, verbose=0)
# print("Testing set AUC: {:5.2f} ".format(auc))
# Y_pred=model.predict(X_test).flatten()
Y_pred_probs = model.predict_proba(X_test)
fpr, tpr, thresholds = roc_curve(Y_test, Y_pred_probs)
# generate_roc_curve(Y_test,Y_pred)
return auc, fpr, tpr, thresholds
# K- nearest neighbors
def KNN(X_train,Y_train,X_test,Y_test) :
from sklearn.neighbors import KNeighborsClassifier
neigh = KNeighborsClassifier()
neigh.fit(X_train, Y_train)
Y_pred_test=neigh.predict(X_test)
Y_pred_train=neigh.predict(X_train)
# generate_roc_curve(Y_test,Y_pred)
Y_pred_probs = neigh.predict_proba(X_test)
# generate_roc_curve(Y_test,Y_pred_test)
Y_pred_probs = Y_pred_probs[:,1]
fpr, tpr, thresholds = roc_curve(Y_test, Y_pred_probs)
return metrics.roc_auc_score(Y_test,Y_pred_probs),Y_pred_test,Y_pred_train,fpr,tpr,thresholds
# Naive Bayes
def NB(X_train,Y_train,X_test,Y_test) :
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(X_train,Y_train)
Y_pred_test = clf.predict(X_test)
Y_pred_train = clf.predict(X_train)
Y_pred_probs = clf.predict_proba(X_test)
# generate_roc_curve(Y_test,Y_pred_test)
Y_pred_probs = Y_pred_probs[:,1]
fpr, tpr, thresholds = roc_curve(Y_test, Y_pred_probs)
return metrics.roc_auc_score(Y_test,Y_pred_probs),Y_pred_test,Y_pred_train,fpr,tpr,thresholds
# Logistic Regression
def Logistic_Regression(X_train,Y_train,X_test,Y_test) :
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(X_train,Y_train)
Y_pred_test = clf.predict(X_test)
Y_pred_train = clf.predict(X_train)
Y_pred_probs = clf.predict_proba(X_test)
Y_pred_probs = Y_pred_probs[:,1]
# generate_roc_curve(Y_test,Y_pred_test)
fpr, tpr, thresholds = roc_curve(Y_test, Y_pred_probs)
return metrics.roc_auc_score(Y_test,Y_pred_probs),Y_pred_test,Y_pred_train,fpr,tpr,thresholds
# XGBoost
def XGBoosting(X_train,Y_train,X_test,Y_test) :
from xgboost import XGBClassifier
clf = XGBClassifier()
clf.fit(X_train,Y_train)
Y_pred_test = clf.predict(X_test)
Y_pred_train = clf.predict(X_train)
Y_pred_probs = clf.predict_proba(X_test)
Y_pred_probs = Y_pred_probs[:,1]
# generate_roc_curve(Y_test,Y_pred_test)
fpr, tpr, thresholds = roc_curve(Y_test, Y_pred_probs)
return metrics.roc_auc_score(Y_test,Y_pred_probs),Y_pred_test,Y_pred_train,fpr,tpr,thresholds | [
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.regularizers.l2",
"xgboost.XGBClassifier",
"sklearn.svm.LinearSVC",
"sklearn.ensemble.RandomForestClassifier",
"tensorflow.keras.metrics.... | [((401, 452), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'max_depth': '(5)', 'random_state': '(0)'}), '(max_depth=5, random_state=0)\n', (423, 452), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((670, 701), 'sklearn.metrics.roc_curve', 'roc_curve', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (679, 701), False, 'from sklearn.metrics import roc_curve\n'), ((974, 985), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (983, 985), False, 'from sklearn.svm import LinearSVC\n'), ((996, 1023), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['svm'], {}), '(svm)\n', (1018, 1023), False, 'from sklearn.calibration import CalibratedClassifierCV\n'), ((1284, 1315), 'sklearn.metrics.roc_curve', 'roc_curve', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (1293, 1315), False, 'from sklearn.metrics import roc_curve\n'), ((1533, 1553), 'numpy.zeros', 'np.zeros', (['(y, 9 - x)'], {}), '((y, 9 - x))\n', (1541, 1553), True, 'import numpy as np\n'), ((1569, 1589), 'numpy.hstack', 'np.hstack', (['(data, b)'], {}), '((data, b))\n', (1578, 1589), True, 'import numpy as np\n'), ((2089, 2108), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (2106, 2108), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2894, 2925), 'sklearn.metrics.roc_curve', 'roc_curve', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (2903, 2925), False, 'from sklearn.metrics import roc_curve\n'), ((3778, 3812), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', (['(0.001)'], {}), '(0.001)\n', (3805, 3812), True, 'import tensorflow as tf\n'), ((4435, 4466), 'sklearn.metrics.roc_curve', 'roc_curve', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (4444, 4466), False, 'from sklearn.metrics import roc_curve\n'), ((5035, 5069), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', (['(0.001)'], {}), '(0.001)\n', (5062, 5069), True, 'import tensorflow as tf\n'), ((5563, 5594), 'sklearn.metrics.roc_curve', 'roc_curve', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (5572, 5594), False, 'from sklearn.metrics import roc_curve\n'), ((5807, 5829), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (5827, 5829), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((6137, 6168), 'sklearn.metrics.roc_curve', 'roc_curve', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (6146, 6168), False, 'from sklearn.metrics import roc_curve\n'), ((6382, 6394), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (6392, 6394), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((6657, 6688), 'sklearn.metrics.roc_curve', 'roc_curve', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (6666, 6688), False, 'from sklearn.metrics import roc_curve\n'), ((6935, 6955), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (6953, 6955), False, 'from sklearn.linear_model import LogisticRegression\n'), ((7218, 7249), 'sklearn.metrics.roc_curve', 'roc_curve', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (7227, 7249), False, 'from sklearn.metrics import roc_curve\n'), ((7457, 7472), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {}), '()\n', (7470, 7472), False, 'from xgboost import XGBClassifier\n'), ((7734, 7765), 'sklearn.metrics.roc_curve', 'roc_curve', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (7743, 7765), False, 'from sklearn.metrics import roc_curve\n'), ((718, 761), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (739, 761), False, 'from sklearn import metrics\n'), ((1327, 1370), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (1348, 1370), False, 'from sklearn import metrics\n'), ((2121, 2230), 'tensorflow.keras.layers.Conv1D', 'layers.Conv1D', ([], {'filters': '(4)', 'kernel_size': '(2)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': '(9, 1)'}), "(filters=4, kernel_size=2, strides=1, padding='same',\n activation='relu', input_shape=(9, 1))\n", (2134, 2230), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2234, 2259), 'tensorflow.keras.layers.AveragePooling1D', 'layers.AveragePooling1D', ([], {}), '()\n', (2257, 2259), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2273, 2311), 'tensorflow.keras.layers.Conv1D', 'layers.Conv1D', (['(8)', '(2)'], {'activation': '"""relu"""'}), "(8, 2, activation='relu')\n", (2286, 2311), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2323, 2339), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2337, 2339), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2353, 2390), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (2365, 2390), False, 'from tensorflow.keras import datasets, layers, models\n'), ((6180, 6223), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (6201, 6223), False, 'from sklearn import metrics\n'), ((6700, 6743), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (6721, 6743), False, 'from sklearn import metrics\n'), ((7261, 7304), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (7282, 7304), False, 'from sklearn import metrics\n'), ((7777, 7820), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['Y_test', 'Y_pred_probs'], {}), '(Y_test, Y_pred_probs)\n', (7798, 7820), False, 'from sklearn import metrics\n'), ((3712, 3749), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3724, 3749), False, 'from tensorflow.keras import datasets, layers, models\n'), ((3837, 3862), 'tensorflow.keras.losses.Poisson', 'tf.keras.losses.Poisson', ([], {}), '()\n', (3860, 3862), True, 'import tensorflow as tf\n'), ((4969, 5006), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (4981, 5006), False, 'from tensorflow.keras import datasets, layers, models\n'), ((5094, 5119), 'tensorflow.keras.losses.Poisson', 'tf.keras.losses.Poisson', ([], {}), '()\n', (5117, 5119), True, 'import tensorflow as tf\n'), ((2466, 2488), 'tensorflow.keras.metrics.AUC', 'tf.keras.metrics.AUC', ([], {}), '()\n', (2486, 2488), True, 'import tensorflow as tf\n'), ((3930, 3952), 'tensorflow.keras.metrics.AUC', 'tf.keras.metrics.AUC', ([], {}), '()\n', (3950, 3952), True, 'import tensorflow as tf\n'), ((5187, 5209), 'tensorflow.keras.metrics.AUC', 'tf.keras.metrics.AUC', ([], {}), '()\n', (5207, 5209), True, 'import tensorflow as tf\n'), ((3344, 3372), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['(0.001)'], {}), '(0.001)\n', (3365, 3372), False, 'from tensorflow import keras\n'), ((3469, 3497), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['(0.001)'], {}), '(0.001)\n', (3490, 3497), False, 'from tensorflow import keras\n'), ((3562, 3590), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['(0.001)'], {}), '(0.001)\n', (3583, 3590), False, 'from tensorflow import keras\n'), ((3654, 3682), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['(0.001)'], {}), '(0.001)\n', (3675, 3682), False, 'from tensorflow import keras\n'), ((4787, 4815), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['(0.001)'], {}), '(0.001)\n', (4808, 4815), False, 'from tensorflow import keras\n'), ((4911, 4939), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['(0.001)'], {}), '(0.001)\n', (4932, 4939), False, 'from tensorflow import keras\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["plot_bpt"]
import numpy as np
import pkg_resources
from .kewley import (NII_OIII_agn_lim, NII_OIII_sf_lim,
OI_OIII_agn_lim, SII_OIII_agn_lim)
import matplotlib.pyplot as plt
import matplotlib.colors as mpl_colors
from matplotlib import cm as cmx
def load_spec():
linefile = pkg_resources.resource_filename(__name__, "data/sdss_data_ls.npz")
data = np.load(linefile)
i, = np.where((data['lineindex_cln'] == 4) | (data['lineindex_cln'] == 5))
outdata = dict()
for key, val in data.iteritems():
outdata[key] = val
def logify(a,b):
if a is None or b is None:
to_return = None
else:
np.seterr(all="ignore")
to_return = np.log10(a/b)
np.seterr(all=None)
return to_return
outdata['log_OIII_OII'] = logify(data['strength_OIII'][i], data['strength_OII'][i])
outdata['log_NII_OII'] = logify(data['strength_NII'][i], data['strength_OII'][i])
outdata['log_OIII_Hb'] = logify(data['strength_OIII'][i], data['strength_Hb'][i])
outdata['log_OIIIb_Hb'] = logify(data['strength_OIIIb'][i], data['strength_Hb'][i])
outdata['log_NII_Ha'] = logify(data['strength_NII'][i], data['strength_Ha'][i])
outdata['log_NIIb_Ha'] = logify(data['strength_NIIb'][i], data['strength_Ha'][i])
outdata['log_SII_Ha'] = logify(data['strength_SII'][i], data['strength_Ha'][i])
outdata['log_OI_Ha'] = logify(data['strength_OI'][i], data['strength_Ha'][i])
outdata['log_OIa_Ha'] = logify(data['strength_OIa'][i], data['strength_Ha'][i])
outdata['log_OII_Ha'] = logify(data['strength_OII'][i], data['strength_Ha'][i])
outdata['log_OIII_OII'] = logify(data['strength_OIII'][i], data['strength_OII'][i])
outdata['HaHb'] = data['strength_Ha'][i]/data['strength_Hb'][i]
outdata['R23'] = np.log10((data['strength_OII'][i] + data['strength_OIII'][i])/data['strength_Hb'][i])
return outdata
def get_line_ratio(data, line_ratio, **kwargs):
both_OIII = kwargs.get('both_OIII', False)
yratio = 'log_OIIIb_Hb'
xratio = 'log_NIIb_Ha'
if line_ratio == 'OII': # this produces NII/OII by OIII/OII plot
yratio = 'log_OIII_OII'
xratio = 'log_NII_OII'
elif line_ratio == 'R23':
xratio = 'R23'
yratio = 'log_OIII_OII'
else:
if line_ratio == 'OI':
xratio = 'log_OIa_Ha'
yratio = 'log_OIIIb_Hb'
else:
xratio = 'log_{}_Ha'.format(line_ratio)
if (line_ratio[-1] == 'b' or line_ratio[-1] == 'a'):
yratio = 'log_OIIIb_Hb'
else:
yratio = 'log_OIII_Hb'
if both_OIII:
yratio = 'log_OIII_Hb'
return xratio, yratio
def plot_bpt(var_label, ax=None, color_code=False, line_ratio='NIIb', **kwargs):
'''
sdss.plot_bpt(True)
SDSS data generated with astroML.fetch_corrected_sdss_spectra()
'''
assert line_ratio in ['NII','NIIb','SII','OI', 'OIa', 'OII', 'R23']
if var_label:
lab = kwargs.get('lab', 'SDSS')
else:
lab = '__nolegend__'
data = load_spec()
lineindex_cln = 'lineindex_cln'
il = np.where((data['lineindex_cln'] == 4) | (data['lineindex_cln'] == 5))
xratio, yratio = get_line_ratio(data, line_ratio, **kwargs)
if ax is None:
plt.figure()
ax = plt.gca()
if color_code:
color_by = kwargs.get('color_by', 'bpt')
if color_by == 'bpt':
ax.scatter(data[xratio], data[yratio],
c=data[lineindex_cln][il], s=9, lw=0,
label=lab)
elif color_by == 'HaHb':
gi, = np.where(data[color_by] <= 15.)
sM = retColors(data[color_by][gi], cname='gist_heat')
for g in gi:
if g == gi[0]:
plab = lab
else:
plab = '__nolegend__'
ax.plot(data[xratio][g], data[yratio][g],
color=sM.to_rgba(data[color_by][g]),
marker='.', markersize=6, label=plab)
fig = plt.gcf()
cb = fig.colorbar(sM)
cb.set_label(r'$H \alpha / H\beta$')
else:
ax.plot(data[xratio], data[yratio], 'o',
markersize=2.0, color='k', alpha=0.5, label=lab)
if line_ratio[0] == 'N':
NII_OIII_agn_lim(ax=ax)
NII_OIII_sf_lim(ax=ax)
ax.set_xlim(-2.0, 1.0)
ax.set_ylim(-1.2, 1.5)
if line_ratio[0] == 'S':
SII_OIII_agn_lim(ax=ax)
ax.set_xlim(-2.0, 0.3)
ax.set_ylim(-1.2, 1.5)
if (line_ratio == 'OI' or line_ratio == 'OIa'):
OI_OIII_agn_lim(ax=ax)
ax.set_xlim(-2.0, 0.0)
ax.set_ylim(-1.2, 1.5)
if (line_ratio == 'OII'):
ax.set_ylim(-2.0, 1.0)
ax.set_xlim(-1.3, 1.3)
return
def retColors(vals, cname='CMRmap', minv=0.05, maxv=0.8, cmap=None,
set_bad_vals=False, return_cNorm=False, logNorm=False):
'''
sM = get_colors(arr, cname='jet', minv=0.0, maxv=1.0)
sM = get_colors(arr, cmap=cubehelix.cmap())
'''
if cmap is None:
cmap = plt.get_cmap(cname)
new_cmap = mpl_colors.LinearSegmentedColormap.from_list('trunc({0}, {1:.2f}, {2:.2f})'.format(cmap.name, minv, maxv), cmap(np.linspace(minv, maxv, 100)))
if set_bad_vals:
new_cmap.set_bad('white', alpha=1.0)
if logNorm:
cNorm = mpl_colors.LogNorm(vmin=vals.min(), vmax=vals.max())
else:
cNorm = mpl_colors.Normalize(vmin=vals.min(), vmax=vals.max())
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=new_cmap)
if return_cNorm:
return scalarMap, cNorm
else:
scalarMap.set_array(vals)
return scalarMap
| [
"numpy.load",
"matplotlib.pyplot.get_cmap",
"numpy.seterr",
"matplotlib.cm.ScalarMappable",
"pkg_resources.resource_filename",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.linspace",
"matplotlib.pyplot.gca",
"numpy.log10",
"matplotlib.pyplot.gcf"
] | [((468, 534), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""data/sdss_data_ls.npz"""'], {}), "(__name__, 'data/sdss_data_ls.npz')\n", (499, 534), False, 'import pkg_resources\n'), ((546, 563), 'numpy.load', 'np.load', (['linefile'], {}), '(linefile)\n', (553, 563), True, 'import numpy as np\n'), ((573, 642), 'numpy.where', 'np.where', (["((data['lineindex_cln'] == 4) | (data['lineindex_cln'] == 5))"], {}), "((data['lineindex_cln'] == 4) | (data['lineindex_cln'] == 5))\n", (581, 642), True, 'import numpy as np\n'), ((1988, 2080), 'numpy.log10', 'np.log10', (["((data['strength_OII'][i] + data['strength_OIII'][i]) / data['strength_Hb'][i])"], {}), "((data['strength_OII'][i] + data['strength_OIII'][i]) / data[\n 'strength_Hb'][i])\n", (1996, 2080), True, 'import numpy as np\n'), ((3319, 3388), 'numpy.where', 'np.where', (["((data['lineindex_cln'] == 4) | (data['lineindex_cln'] == 5))"], {}), "((data['lineindex_cln'] == 4) | (data['lineindex_cln'] == 5))\n", (3327, 3388), True, 'import numpy as np\n'), ((5730, 5775), 'matplotlib.cm.ScalarMappable', 'cmx.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'new_cmap'}), '(norm=cNorm, cmap=new_cmap)\n', (5748, 5775), True, 'from matplotlib import cm as cmx\n'), ((3481, 3493), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3491, 3493), True, 'import matplotlib.pyplot as plt\n'), ((3507, 3516), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3514, 3516), True, 'import matplotlib.pyplot as plt\n'), ((5304, 5323), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cname'], {}), '(cname)\n', (5316, 5323), True, 'import matplotlib.pyplot as plt\n'), ((840, 863), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (849, 863), True, 'import numpy as np\n'), ((888, 903), 'numpy.log10', 'np.log10', (['(a / b)'], {}), '(a / b)\n', (896, 903), True, 'import numpy as np\n'), ((914, 933), 'numpy.seterr', 'np.seterr', ([], {'all': 'None'}), '(all=None)\n', (923, 933), True, 'import numpy as np\n'), ((5451, 5479), 'numpy.linspace', 'np.linspace', (['minv', 'maxv', '(100)'], {}), '(minv, maxv, 100)\n', (5462, 5479), True, 'import numpy as np\n'), ((3812, 3844), 'numpy.where', 'np.where', (['(data[color_by] <= 15.0)'], {}), '(data[color_by] <= 15.0)\n', (3820, 3844), True, 'import numpy as np\n'), ((4264, 4273), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4271, 4273), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import math
from RootSift import RootSIFT
import img_tags
def get_img_res(test_img):
count=0
match_error = False
sift = cv2.xfeatures2d.SIFT_create()
kp2, des2 = sift.detectAndCompute(test_img,None)
rs = RootSIFT()
kp2, des2 = rs.compute(test_img,kp2)
train_img = []
for i in range(0,84):
fp = "Main_Database/SIT1stfloor/"+str(i)+".jpg"
frame = cv2.imread(fp)
train_img.append(frame)
print("All images read")
count+=1
# find the keypoints and descriptors with SIFT
score_list=[]
img_count=0
for img in train_img :
img_count+=1
# gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kp1, des1 = sift.detectAndCompute(img,None)
kp1, des1 = rs.compute(img,kp1)
# FLANN parameters
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 2)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params,search_params)
try:
match_error = True
matches = flann.knnMatch(des1,des2,k=2)
match_error = False
except:
print("No location found")
finally:
if (match_error):
return [-1, "Could not locate.", -1]
# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in range(len(matches))]
a=1.5 # arbitrary positive real number to calculate the score
# ratio test as per Lowe's paper
Dlist=[]
for i,(m,n) in enumerate(matches):
if m.distance < 0.7*n.distance:
matchesMask[i]=[1,0]
# score.append(math.exp(-a*m.distance))
Dlist.append(m.distance)
# sum_score=sum(score)
# avg_score=sum_score/(len(score)+1)
if len(Dlist)==0 :
score=0.0
else:
score=len(Dlist)/(max(Dlist)+0.0001)
#print("img :",img_count," score :",score)
# avg_score_list.append(avg_score)
score_list.append(score)
score_list=np.array(score_list)
# score_list2 = sorted(score_list,reverse=True)
max_indices=score_list.argsort()[-15:][::-1]
# mx_scores=[]
# for ind in max_indices :
# mx_scores.append(score_list[ind])
# mx_scores=np.array(mx_scores)
score_list2 = score_list[max_indices]
# print(max_indices)
print('==== CALC 1 BEST MATCH ',max_indices[0])
test_gray = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)
# cv2.imshow('test image',test_img)
# cv2.waitKey(1000)
SSIM_score=[]
avg_filter_score=[]
print('Max indices ', max_indices)
for t in max_indices :
kp2_1,des2_1=sift.detectAndCompute(train_img[t],None)
kp2_1, des2_1 = rs.compute(train_img[t],kp2_1)
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 2)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params,search_params)
try:
matches = flann.knnMatch(des2_1,des2,k=2)
except cv2.error as e:
print('Error caught')
return [-1, "Could not localize", 0]
# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in range(len(matches))]
a=0.5 # arbitrary positive real number to calculate the score
# ratio test as per Lowe's paper
filter_score=[]
#print('Enumerate ',enumerate(matches))
# for i,(m,n) in enumerate(matches):
#print('m distance ',m.distance, ' n distance ',n.distance)
# if m.distance < 0.7 * n.distance:
# # print('FS Coming')
# matchesMask[i]=[1,0]
# filter_score.append(math.exp(-a*m.distance))
avg_filter_score.append((sum(filter_score)/(len(filter_score)+0.0001)))
avg_filter_score=np.array(avg_filter_score)
# tot_score=np.add(avg_filter_score,mx_scores)
#print(avg_filter_score)
best_matched=np.argmax(avg_filter_score)
print('==== CALC 2 ', avg_filter_score)
#print(best_matched)
# cv2.imshow('best image',train_img[max_indices[best_matched]])
# cv2.waitKey(3000)
# cv2.destroyAllWindows()
sorted_avg_score_i = avg_filter_score.argsort()[::-1]
sorted_avg_score = avg_filter_score[sorted_avg_score_i]
target_img_index = max_indices[best_matched]
print('==== IMG INDEX ', target_img_index)
#max_score = avg_filter_score
tag = 'Location Info. ' + img_tags.img_tags[str(target_img_index)]
return [target_img_index, tag]
| [
"numpy.argmax",
"cv2.cvtColor",
"cv2.FlannBasedMatcher",
"cv2.imread",
"numpy.array",
"RootSift.RootSIFT",
"cv2.xfeatures2d.SIFT_create"
] | [((198, 227), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (225, 227), False, 'import cv2\n'), ((294, 304), 'RootSift.RootSIFT', 'RootSIFT', ([], {}), '()\n', (302, 304), False, 'from RootSift import RootSIFT\n'), ((2259, 2279), 'numpy.array', 'np.array', (['score_list'], {}), '(score_list)\n', (2267, 2279), True, 'import numpy as np\n'), ((2650, 2692), 'cv2.cvtColor', 'cv2.cvtColor', (['test_img', 'cv2.COLOR_BGR2GRAY'], {}), '(test_img, cv2.COLOR_BGR2GRAY)\n', (2662, 2692), False, 'import cv2\n'), ((4176, 4202), 'numpy.array', 'np.array', (['avg_filter_score'], {}), '(avg_filter_score)\n', (4184, 4202), True, 'import numpy as np\n'), ((4307, 4334), 'numpy.argmax', 'np.argmax', (['avg_filter_score'], {}), '(avg_filter_score)\n', (4316, 4334), True, 'import numpy as np\n'), ((465, 479), 'cv2.imread', 'cv2.imread', (['fp'], {}), '(fp)\n', (475, 479), False, 'import cv2\n'), ((1080, 1130), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (1101, 1130), False, 'import cv2\n'), ((3176, 3226), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (3197, 3226), False, 'import cv2\n')] |
import sys
import os
sys.path.append(os.path.abspath('..'))
from vessel_tracking import geometry
import numpy as np
v1 = np.array([1,0,0])
v2 = np.array([1,1,1])
print("orth: {}, {}".format(v1,geometry.orth(v1)))
print("orth: {}, {}".format(v2,geometry.orth(v2)))
print("perpendicular_plane: {}, {}".format(v1,geometry.perpendicular_plane(v1)))
print("perpendicular_plane: {}, {}".format(v2,geometry.perpendicular_plane(v2)))
#test projection
p = np.array([[1,1,1]])
o = np.array([0,0,0])
q1 = np.array([1,0,0])
q2 = np.array([0,1,0])
coeffs = geometry.project_points_into_plane(p,o,q1,q2)
print("project_points_into_plane {} {} {} {} {}".format(p,o,q1,q2,coeffs))
p = np.array([[1,1,1]])
o = np.array([2,2,2])
q1 = np.array([1,0,0])
q2 = np.array([0,1,0])
coeffs = geometry.project_points_into_plane(p,o,q1,q2)
print("project_points_into_plane {} {} {} {} {}".format(p,o,q1,q2,coeffs))
p = np.array([[1,1,1], [-1,-1,-1]])
o = np.array([2,2,2])
q1 = np.array([1,0,0])
q2 = np.array([0,1,0])
coeffs = geometry.project_points_into_plane(p,o,q1,q2)
print("project_points_into_plane {} {} {} {} {}".format(p,o,q1,q2,coeffs))
X = np.array(
[
[1,0,],
[0,1,],
[-1,0,]
]
)
print("fit_circle_3 {} {}".format(X, geometry.fit_circle_3(X)))
X = np.array(
[
[2,0],
[0,2],
[-2,0]
]
)
print("fit_circle_3 {} {}".format(X, geometry.fit_circle_3(X)))
X = np.array(
[
[4,0],
[2,2],
[0,0]
]
)
print("fit_circle_3 {} {}".format(X, geometry.fit_circle_3(X)))
X = np.array(
[
[1,0,0.5],
[0,1,-1],
[np.sqrt(0.5), np.sqrt(0.5),1]
]
)
o = np.array([0,0,0])
d = np.array([0,0,1])
print("fit_cylinder_3 {} {} {} {}".format(X, o, d, geometry.fit_cylinder_3(X,o,d)))
X = np.array(
[
[1,0,0.5],
[0,1,-1],
[np.sqrt(0.5), np.sqrt(0.5),1]
]
)
o = np.array([0,0,2])
d = np.array([0,0,1])
print("fit_cylinder_3 {} {} {} {}".format(X, o, d, geometry.fit_cylinder_3(X,o,d)))
o = np.array([1,1,1])
d = np.array([0,1,1])
q1 = np.array([1,0,0])
q2 = np.array([0,-np.sqrt(0.5), np.sqrt(0.5)])
X = np.zeros((3,3))
X[0,:] = o + 0.5*d + 3*q1 + 4*q2
X[1,:] = o - 0.5*d + 5*q1
X[2,:] = o + 2*d - 4*q1 - 3*q2
print("fit_cylinder_3 {} {} {} {}".format(X, o, d, geometry.fit_cylinder_3(X,o,d)))
print("distance_in_plane {} {} {} {}".format(X,o,d, geometry.distance_in_plane(X,o,d)))
| [
"vessel_tracking.geometry.orth",
"os.path.abspath",
"vessel_tracking.geometry.fit_cylinder_3",
"vessel_tracking.geometry.perpendicular_plane",
"numpy.zeros",
"vessel_tracking.geometry.distance_in_plane",
"numpy.array",
"vessel_tracking.geometry.fit_circle_3",
"vessel_tracking.geometry.project_points... | [((123, 142), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (131, 142), True, 'import numpy as np\n'), ((146, 165), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (154, 165), True, 'import numpy as np\n'), ((454, 475), 'numpy.array', 'np.array', (['[[1, 1, 1]]'], {}), '([[1, 1, 1]])\n', (462, 475), True, 'import numpy as np\n'), ((479, 498), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (487, 498), True, 'import numpy as np\n'), ((502, 521), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (510, 521), True, 'import numpy as np\n'), ((525, 544), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (533, 544), True, 'import numpy as np\n'), ((553, 601), 'vessel_tracking.geometry.project_points_into_plane', 'geometry.project_points_into_plane', (['p', 'o', 'q1', 'q2'], {}), '(p, o, q1, q2)\n', (587, 601), False, 'from vessel_tracking import geometry\n'), ((681, 702), 'numpy.array', 'np.array', (['[[1, 1, 1]]'], {}), '([[1, 1, 1]])\n', (689, 702), True, 'import numpy as np\n'), ((706, 725), 'numpy.array', 'np.array', (['[2, 2, 2]'], {}), '([2, 2, 2])\n', (714, 725), True, 'import numpy as np\n'), ((729, 748), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (737, 748), True, 'import numpy as np\n'), ((752, 771), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (760, 771), True, 'import numpy as np\n'), ((780, 828), 'vessel_tracking.geometry.project_points_into_plane', 'geometry.project_points_into_plane', (['p', 'o', 'q1', 'q2'], {}), '(p, o, q1, q2)\n', (814, 828), False, 'from vessel_tracking import geometry\n'), ((908, 943), 'numpy.array', 'np.array', (['[[1, 1, 1], [-1, -1, -1]]'], {}), '([[1, 1, 1], [-1, -1, -1]])\n', (916, 943), True, 'import numpy as np\n'), ((945, 964), 'numpy.array', 'np.array', (['[2, 2, 2]'], {}), '([2, 2, 2])\n', (953, 964), True, 'import numpy as np\n'), ((968, 987), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (976, 987), True, 'import numpy as np\n'), ((991, 1010), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (999, 1010), True, 'import numpy as np\n'), ((1019, 1067), 'vessel_tracking.geometry.project_points_into_plane', 'geometry.project_points_into_plane', (['p', 'o', 'q1', 'q2'], {}), '(p, o, q1, q2)\n', (1053, 1067), False, 'from vessel_tracking import geometry\n'), ((1147, 1182), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [-1, 0]]'], {}), '([[1, 0], [0, 1], [-1, 0]])\n', (1155, 1182), True, 'import numpy as np\n'), ((1257, 1292), 'numpy.array', 'np.array', (['[[2, 0], [0, 2], [-2, 0]]'], {}), '([[2, 0], [0, 2], [-2, 0]])\n', (1265, 1292), True, 'import numpy as np\n'), ((1364, 1398), 'numpy.array', 'np.array', (['[[4, 0], [2, 2], [0, 0]]'], {}), '([[4, 0], [2, 2], [0, 0]])\n', (1372, 1398), True, 'import numpy as np\n'), ((1543, 1562), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1551, 1562), True, 'import numpy as np\n'), ((1565, 1584), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1573, 1584), True, 'import numpy as np\n'), ((1746, 1765), 'numpy.array', 'np.array', (['[0, 0, 2]'], {}), '([0, 0, 2])\n', (1754, 1765), True, 'import numpy as np\n'), ((1768, 1787), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1776, 1787), True, 'import numpy as np\n'), ((1878, 1897), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (1886, 1897), True, 'import numpy as np\n'), ((1900, 1919), 'numpy.array', 'np.array', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (1908, 1919), True, 'import numpy as np\n'), ((1924, 1943), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (1932, 1943), True, 'import numpy as np\n'), ((1994, 2010), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2002, 2010), True, 'import numpy as np\n'), ((37, 58), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (52, 58), False, 'import os\n'), ((196, 213), 'vessel_tracking.geometry.orth', 'geometry.orth', (['v1'], {}), '(v1)\n', (209, 213), False, 'from vessel_tracking import geometry\n'), ((247, 264), 'vessel_tracking.geometry.orth', 'geometry.orth', (['v2'], {}), '(v2)\n', (260, 264), False, 'from vessel_tracking import geometry\n'), ((314, 346), 'vessel_tracking.geometry.perpendicular_plane', 'geometry.perpendicular_plane', (['v1'], {}), '(v1)\n', (342, 346), False, 'from vessel_tracking import geometry\n'), ((395, 427), 'vessel_tracking.geometry.perpendicular_plane', 'geometry.perpendicular_plane', (['v2'], {}), '(v2)\n', (423, 427), False, 'from vessel_tracking import geometry\n'), ((1225, 1249), 'vessel_tracking.geometry.fit_circle_3', 'geometry.fit_circle_3', (['X'], {}), '(X)\n', (1246, 1249), False, 'from vessel_tracking import geometry\n'), ((1332, 1356), 'vessel_tracking.geometry.fit_circle_3', 'geometry.fit_circle_3', (['X'], {}), '(X)\n', (1353, 1356), False, 'from vessel_tracking import geometry\n'), ((1438, 1462), 'vessel_tracking.geometry.fit_circle_3', 'geometry.fit_circle_3', (['X'], {}), '(X)\n', (1459, 1462), False, 'from vessel_tracking import geometry\n'), ((1635, 1667), 'vessel_tracking.geometry.fit_cylinder_3', 'geometry.fit_cylinder_3', (['X', 'o', 'd'], {}), '(X, o, d)\n', (1658, 1667), False, 'from vessel_tracking import geometry\n'), ((1838, 1870), 'vessel_tracking.geometry.fit_cylinder_3', 'geometry.fit_cylinder_3', (['X', 'o', 'd'], {}), '(X, o, d)\n', (1861, 1870), False, 'from vessel_tracking import geometry\n'), ((1974, 1986), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (1981, 1986), True, 'import numpy as np\n'), ((2152, 2184), 'vessel_tracking.geometry.fit_cylinder_3', 'geometry.fit_cylinder_3', (['X', 'o', 'd'], {}), '(X, o, d)\n', (2175, 2184), False, 'from vessel_tracking import geometry\n'), ((2238, 2273), 'vessel_tracking.geometry.distance_in_plane', 'geometry.distance_in_plane', (['X', 'o', 'd'], {}), '(X, o, d)\n', (2264, 2273), False, 'from vessel_tracking import geometry\n'), ((1504, 1516), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (1511, 1516), True, 'import numpy as np\n'), ((1518, 1530), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (1525, 1530), True, 'import numpy as np\n'), ((1707, 1719), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (1714, 1719), True, 'import numpy as np\n'), ((1721, 1733), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (1728, 1733), True, 'import numpy as np\n'), ((1960, 1972), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (1967, 1972), True, 'import numpy as np\n')] |
"""
Interpolator
============
cloud2cloud implementation and API.
"""
import numpy as np
from scipy import spatial
SMALL = 1e-16
class CloudInterpolator:
"""Wrapper around a source and target meshe to facilitate interpolation between the two."""
def __init__(self, source, target, limitsource=None, stencil=4, function=None):
"""
Initialisation
:param source: ndarray(dim_msh, points_sce) or tuple(ndarray(points_sce),...)
:param target: ndarray(dim_msh, points_tgt) or tuple(ndarray(points_tgt),...)
:param limitsource: the maximum number of points to use for the interpolation
:param stencil: the number of neighbours to use for the interpolation
:param function: determine the coefficient to give to each neighbours from their distance (default is linear)
"""
n_dim = len(source)
if n_dim != len(target):
print("Warning: source and target dim mismatch")
pass #error "mismatch dims"
n_points = source[0].size
self.skipper = 1
if limitsource is not None and n_points > limitsource:
self.skipper = 1+(n_points-1)//limitsource
if isinstance(source, np.ndarray):
source = np.stack(source[:,::self.skipper], axis=1)
else:
source = np.stack([axe[::self.skipper] for axe in source], axis=1)
target = np.stack(target, axis=1)
kdtree = spatial.cKDTree(source)
dists, self.index = kdtree.query(target, k=stencil)
if stencil == 1:
self.index = self.index[:,None]
self.weight = np.ones((self.index.size, 1))
return
if function is not None:
dists[...] = function(dists)
dists[...] = np.reciprocal(np.maximum(dists, SMALL))
dists /= np.sum(dists, axis=1)[:,None]
self.weight = dists
def interp(self, data):
"""
Interpolate data bewteen the source and target meshes.
:param data: ndarray(points_sce, \*shape_val)
:returns: ndarray(points_tgt, \*shape_val)
"""
estimate = data[::self.skipper,...][self.index]
estimate *= self.weight.reshape(*self.weight.shape, *[1]*(data.ndim-1))
return np.sum(estimate, axis=1)
def cloud2cloud(source_msh, source_val, target_msh, unroll_axis=None, verbose=False, **kwargs):
"""
Interpolate source_val between source_msh and target_msh.
:param source: ndarray(\*shape_sce, dim_msh)
:param target: ndarray(\*shape_tgt, dim_msh)
:param values: ndarray(\*shape_sce, \*shape_val)
:param verbose: if True print shape information
:param kwargs: key word arguments forwarded to CloudInterpolator
:returns: ndarray(\*shape_tgt, \*shape_val)
"""
*shp_sce, dim_sce = source_msh.shape
*shp_tgt, dim_tgt = target_msh.shape
shp_sce, shp_tgt = tuple(shp_sce), tuple(shp_tgt)
shp_val = source_val.shape[len(shp_sce):]
n_p_sce = np.prod(shp_sce)
n_p_tgt = np.prod(shp_tgt)
if verbose:
if dim_sce != dim_tgt:
print("Warning: source and target dim mismatch")
if source_val.shape[:len(shp_sce)] != shp_sce:
print("Warning: Source and data mismatch")
print("dim_sce:", dim_sce)
print("dim_tgt:", dim_sce)
print("shp_sce:", shp_sce)
print("shp_tgt:", shp_tgt)
print("shp_val:", shp_val)
print("n_p_sce:", n_p_sce)
print("n_p_tgt:", n_p_tgt)
source_val = source_val.reshape(n_p_sce, *shp_val)
source_msh = source_msh.reshape(n_p_sce, dim_sce)
target_msh = target_msh.reshape(n_p_tgt, dim_tgt)
if verbose:
print("new shp_sce:", source_msh.shape)
print("new shp_tgt:", target_msh.shape)
print("new shp_val:", source_val.shape)
base = CloudInterpolator(source_msh.T, target_msh.T, **kwargs)
if verbose and base.skipper > 1:
print("skipper:", base.skipper)
if unroll_axis is None:
return base.interp(source_val).reshape(*shp_tgt, *shp_val)
else:
axis_size = shp_val[unroll_axis]
shp_partial_val = tuple(_ for i,_ in enumerate(shp_val) if i!=unroll_axis)
shp_partial_tgt = (slice(None),)*len(shp_tgt)
shp_partial_pts = (slice(None),)
result = np.empty((*shp_tgt, *shp_val))
if verbose:
print(axis_size, shp_val, shp_partial_val)
for i in range(axis_size):
partial_indexes = tuple(i if j==unroll_axis else slice(None) for j in range(len(shp_val)))
if verbose:
print(i, partial_indexes, shp_partial_tgt+partial_indexes, shp_partial_pts+partial_indexes)
result[shp_partial_tgt+partial_indexes] = base.interp(source_val[shp_partial_pts+partial_indexes]).reshape(*shp_tgt, *shp_partial_val)
return result
| [
"numpy.stack",
"numpy.sum",
"numpy.maximum",
"numpy.empty",
"numpy.ones",
"scipy.spatial.cKDTree",
"numpy.prod"
] | [((2652, 2668), 'numpy.prod', 'np.prod', (['shp_sce'], {}), '(shp_sce)\n', (2659, 2668), True, 'import numpy as np\n'), ((2680, 2696), 'numpy.prod', 'np.prod', (['shp_tgt'], {}), '(shp_tgt)\n', (2687, 2696), True, 'import numpy as np\n'), ((1250, 1274), 'numpy.stack', 'np.stack', (['target'], {'axis': '(1)'}), '(target, axis=1)\n', (1258, 1274), True, 'import numpy as np\n'), ((1286, 1309), 'scipy.spatial.cKDTree', 'spatial.cKDTree', (['source'], {}), '(source)\n', (1301, 1309), False, 'from scipy import spatial\n'), ((1976, 2000), 'numpy.sum', 'np.sum', (['estimate'], {'axis': '(1)'}), '(estimate, axis=1)\n', (1982, 2000), True, 'import numpy as np\n'), ((3814, 3844), 'numpy.empty', 'np.empty', (['(*shp_tgt, *shp_val)'], {}), '((*shp_tgt, *shp_val))\n', (3822, 3844), True, 'import numpy as np\n'), ((1118, 1161), 'numpy.stack', 'np.stack', (['source[:, ::self.skipper]'], {'axis': '(1)'}), '(source[:, ::self.skipper], axis=1)\n', (1126, 1161), True, 'import numpy as np\n'), ((1181, 1238), 'numpy.stack', 'np.stack', (['[axe[::self.skipper] for axe in source]'], {'axis': '(1)'}), '([axe[::self.skipper] for axe in source], axis=1)\n', (1189, 1238), True, 'import numpy as np\n'), ((1436, 1465), 'numpy.ones', 'np.ones', (['(self.index.size, 1)'], {}), '((self.index.size, 1))\n', (1443, 1465), True, 'import numpy as np\n'), ((1565, 1589), 'numpy.maximum', 'np.maximum', (['dists', 'SMALL'], {}), '(dists, SMALL)\n', (1575, 1589), True, 'import numpy as np\n'), ((1602, 1623), 'numpy.sum', 'np.sum', (['dists'], {'axis': '(1)'}), '(dists, axis=1)\n', (1608, 1623), True, 'import numpy as np\n')] |
import os
import numpy as np
from strategies import RSI
from send_order_signal import SendOrderSignal
from binance.client import Client
from datetime import datetime
import time
signal = SendOrderSignal()
client = signal.get_client()
historicalData = client.get_historical_klines(
'ETHGBP',
Client.KLINE_INTERVAL_1MINUTE,
'120 mins ago UTC'
)
config = {
'period': 14,
'overbought_limit': 70,
'oversold_limit': 30
}
coinsOwned = False
f = open('tmp.csv', 'w+')
closes = []
for data in historicalData:
print(data[6])
s, ms = divmod(int(data[6]), 1000)
timestamp = '%s.%03d' % (time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(s)), ms)
close = float(data[4])
closes.append(close)
rsi = RSI().apply_indicator(np.array(closes), config, coinsOwned)
f.write(f"{timestamp}|{close}|{rsi['results']['RSI Value']}|{rsi['decision']}")
| [
"strategies.RSI",
"time.gmtime",
"numpy.array",
"send_order_signal.SendOrderSignal"
] | [((188, 205), 'send_order_signal.SendOrderSignal', 'SendOrderSignal', ([], {}), '()\n', (203, 205), False, 'from send_order_signal import SendOrderSignal\n'), ((756, 772), 'numpy.array', 'np.array', (['closes'], {}), '(closes)\n', (764, 772), True, 'import numpy as np\n'), ((734, 739), 'strategies.RSI', 'RSI', ([], {}), '()\n', (737, 739), False, 'from strategies import RSI\n'), ((651, 665), 'time.gmtime', 'time.gmtime', (['s'], {}), '(s)\n', (662, 665), False, 'import time\n')] |
import cv2
import numpy as np
image1 = cv2.imread("log_3.png")
image2 = cv2.imread("log_4.png")
def logical_OR(img1,img2):
image_output = np.bitwise_or(image1,image2)
cv2.imwrite('output_or.png',image_output)
if __name__ == "__main__":
logical_OR(image1,image2) | [
"cv2.imread",
"numpy.bitwise_or",
"cv2.imwrite"
] | [((41, 64), 'cv2.imread', 'cv2.imread', (['"""log_3.png"""'], {}), "('log_3.png')\n", (51, 64), False, 'import cv2\n'), ((74, 97), 'cv2.imread', 'cv2.imread', (['"""log_4.png"""'], {}), "('log_4.png')\n", (84, 97), False, 'import cv2\n'), ((145, 174), 'numpy.bitwise_or', 'np.bitwise_or', (['image1', 'image2'], {}), '(image1, image2)\n', (158, 174), True, 'import numpy as np\n'), ((178, 220), 'cv2.imwrite', 'cv2.imwrite', (['"""output_or.png"""', 'image_output'], {}), "('output_or.png', image_output)\n", (189, 220), False, 'import cv2\n')] |
import torch
import torch.nn.functional as F
import torch.optim as optim
import end2you.utils as utils
import copy
import os
import numpy as np
import logging
import shutil
import sys
sys.path.append("..")
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from pathlib import Path
from tqdm import tqdm
from .losses import Losses
from end2you.base import BasePhase
from end2you.utils import Params
from end2you.evaluation import MetricProvider
from end2you.base_process import BaseProcess
class Trainer(BasePhase):
def __init__(self,
loss:Losses,
evaluator:MetricProvider,
data_providers:dict,
summary_writers:dict,
root_dir:str,
model:nn.Module,
ckpt_path:str,
optimizer:torch.optim,
params:Params):
""" Initialize trainer object class.
Args:
loss (Losses): The loss function to use.
evaluator (MetricProvider): The evaluation function to use.
data_providers (dict): The training/evaluation data providers.
summary_writers (dict): The training/evaluation summary writers.
root_dir (str): Directory path to save output files (e.g. models)
model (nn.Module): Instance of a model to train.
ckpt_path (str): Path to pre-train model.
optimizer (torch.optim): Instance of an optimizer to use.
params (Params): Rest of training parameters.
"""
params.valid.dict['save_summary_steps'] = 1
self.params = params
self.root_dir = Path(params.root_dir)
self.root_dir.mkdir(parents=True, exist_ok=True)
self.loss_fn = loss.loss_fn
self.loss_name = loss.loss_name.upper()
self.eval_fn = evaluator.eval_fn
self.metric = evaluator.metric_name
self.provider = data_providers
self.summary_writer = summary_writers
BaseProcess.set_logger('training.log')
super().__init__(model, ckpt_path, optimizer)
def start_training(self):
""" Method that performs the training of the model.
"""
logging.info("Starting training!")
best_score = float('-inf')
if self.ckpt_path:
ckpt = self.load_checkpoint()
best_score = ckpt['validation_score']
logging.info(f'Model\'s score: {best_score}')
save_ckpt_path = self.root_dir / 'model'
for epoch in range(self.params.train.num_epochs):
# Run one epoch
logging.info("Epoch {}/{}".format(epoch + 1, self.params.train.num_epochs))
# compute number of batches in one epoch (one full pass over the training set)
self._epoch_process(is_training=True)
# Evaluate for one epoch on validation set
with torch.no_grad():
val_score = self._epoch_process(is_training=False)
is_best = val_score >= best_score
model_info = {
'validation_score': val_score,
'metric_name': f'{self.metric}',
'loss_name': f'{self.loss_name}'
}
dict2save = {'epoch': epoch + 1,
'state_dict': self.model.state_dict(),
'optim_dict' : self.optimizer.state_dict()}
dict2save.update(model_info)
# Save weights
self.save_checkpoint(dict2save,
is_best=is_best,
checkpoint=save_ckpt_path)
# If best model save it.
if is_best:
logging.info(f"- Found new best model with mean {self.metric}: {val_score:05.3f}")
best_score = val_score
# Write best.txt file
self._write_bestscore(best_score)
# Save best val metrics in a json file in the model directory
best_json_path = str(self.root_dir / "best_valid_scores.json")
self._save_dict_to_json({self.metric:val_score}, str(best_json_path))
# Save latest val metrics in a json file in the model directory
last_json_path = str(self.root_dir / "last_valid_scores.json")
self._save_dict_to_json({self.metric:val_score}, str(last_json_path))
def _write_bestscore(self, best_score:str):
""" Method to write best current score to a file.
Args:
best_score (str): Best score to save to `best_score.txt` file.
"""
f = open(str(self.root_dir / "best_score.txt"),"w+")
f.write(f"Best score: {best_score}")
f.close()
def save_checkpoint(self, state:dict, is_best:bool, checkpoint:str):
""" Saves model and training parameters at checkpoint + 'last.pth.tar'.
If is_best==True, also saves checkpoint + 'best.pth.tar'
Args:
state (dict): contains model's state_dict, and some other info of the model.
is_best (bool): True if it is the best model seen till now.
checkpoint (str): Folder to save model.
"""
checkpoint = Path(checkpoint)
checkpoint.mkdir(exist_ok=True)
filepath = checkpoint / 'last.pth.tar'
torch.save(state, str(filepath))
if is_best:
shutil.copyfile(filepath, str(checkpoint / 'best.pth.tar'))
def _epoch_process(self, is_training:bool):
""" Perform one epoch of training or evaluation.
Depends on the argument `is_training`.
"""
params = self.params.train if is_training else self.params.valid
process = 'train' if is_training else 'valid'
writer = self.summary_writer[process]
provider = self.provider[process]
label_names = provider.dataset._get_label_names()
num_outs = self.model.num_outs if not isinstance(
self.model, nn.DataParallel) else self.model.module.num_outs
self.model.train(is_training)
# summary for current training loop and a running average object for loss
mean_loss = 0.0
# Store all labels/predictions
batch_labels = {str(x):[] for x in provider.dataset.label_names}
batch_preds = {str(x):[] for x in provider.dataset.label_names}
batch_masks = []
bar_string = 'Training' if process == 'train' else 'Validating'
# Use tqdm for progress bar
with tqdm(total=len(provider)) as bar:
bar.set_description(f'{bar_string} model')
for n_iter, (model_input, labels, masked_samples, _) in enumerate(provider):
input_dtype = model_input[0].dtype if isinstance(model_input, list) \
else model_input.dtype
if is_training:
self.optimizer.zero_grad()
# move to GPU if available
if params.cuda:
model_input = [
x.cuda() for x in model_input] if isinstance(model_input, list) \
else model_input.cuda()
labels = labels.cuda()
predictions = self.model(model_input)
total_loss = nn.Parameter(
torch.tensor(0.0, dtype=input_dtype),
requires_grad=is_training)
for o, name in enumerate(label_names):
sl = o
el = o + 1 if len(label_names) > 1 else o + num_outs
label_loss = self.loss_fn(
predictions[...,sl:el], labels[...,sl:el], masked_samples)
total_loss = total_loss + label_loss
# Write label summary
if n_iter % params.save_summary_steps == 0:
writer.add_scalar(f'{self.loss_name}_loss_{name}/', label_loss)
total_loss /= num_outs
mean_loss += total_loss
if is_training:
total_loss.backward()
self.optimizer.step()
# Evaluate summaries once in a while
if n_iter % params.save_summary_steps == 0:
batch_loss = total_loss.item()
np_preds = predictions.data.cpu().numpy()
np_labels = labels.data.cpu().numpy()
batch_masks.extend(masked_samples)
for o, name in enumerate(label_names):
sl = o
el = o + 1 if len(label_names) > 1 else o + num_outs
batch_preds[name].extend(np_preds[...,sl:el])
batch_labels[name].extend(np_labels[...,sl:el])
bar.set_postfix({self.loss_name+' loss':'{:05.3f}'.format(total_loss.item())})
bar.update()
scores = {}
for i, name in enumerate(label_names):
scores[name] = self.eval_fn(batch_preds[name], batch_labels[name], batch_masks)
epoch_summaries = [scores]
# Reseting parameters of the data provider
provider.dataset.reset()
# compute mean of all metrics in summary
mean_scores = {
label_name: np.mean([
batch_sum[label_name] for batch_sum in epoch_summaries
])
for label_name in scores.keys()
}
mean_loss /= (n_iter + 1)
str_list_scores = [f'{label_name}: {mean_scores[label_name]:05.3f}'
for label_name in label_names]
str_scores = ' - '.join(str_list_scores)
str_scores = str_scores + f' || {self.loss_name} loss: {mean_loss:05.3f}'
logging.info(f'* {process} results (wrt {self.metric}): {str_scores}\n')
for label_name in label_names:
writer.add_scalar(
f'{process}_evaluation/{label_name}', mean_scores[label_name])
writer.add_scalar('loss/', mean_loss)
label_scores_mean = np.mean([
mean_scores[label_name] for label_name in label_names])
return label_scores_mean
| [
"sys.path.append",
"logging.info",
"pathlib.Path",
"numpy.mean",
"end2you.base_process.BaseProcess.set_logger",
"torch.no_grad",
"torch.tensor"
] | [((184, 205), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (199, 205), False, 'import sys\n'), ((1671, 1692), 'pathlib.Path', 'Path', (['params.root_dir'], {}), '(params.root_dir)\n', (1675, 1692), False, 'from pathlib import Path\n'), ((2039, 2077), 'end2you.base_process.BaseProcess.set_logger', 'BaseProcess.set_logger', (['"""training.log"""'], {}), "('training.log')\n", (2061, 2077), False, 'from end2you.base_process import BaseProcess\n'), ((2256, 2290), 'logging.info', 'logging.info', (['"""Starting training!"""'], {}), "('Starting training!')\n", (2268, 2290), False, 'import logging\n'), ((5421, 5437), 'pathlib.Path', 'Path', (['checkpoint'], {}), '(checkpoint)\n', (5425, 5437), False, 'from pathlib import Path\n'), ((10395, 10467), 'logging.info', 'logging.info', (['f"""* {process} results (wrt {self.metric}): {str_scores}\n"""'], {}), "(f'* {process} results (wrt {self.metric}): {str_scores}\\n')\n", (10407, 10467), False, 'import logging\n'), ((10709, 10773), 'numpy.mean', 'np.mean', (['[mean_scores[label_name] for label_name in label_names]'], {}), '([mean_scores[label_name] for label_name in label_names])\n', (10716, 10773), True, 'import numpy as np\n'), ((2466, 2510), 'logging.info', 'logging.info', (['f"""Model\'s score: {best_score}"""'], {}), '(f"Model\'s score: {best_score}")\n', (2478, 2510), False, 'import logging\n'), ((9917, 9982), 'numpy.mean', 'np.mean', (['[batch_sum[label_name] for batch_sum in epoch_summaries]'], {}), '([batch_sum[label_name] for batch_sum in epoch_summaries])\n', (9924, 9982), True, 'import numpy as np\n'), ((2983, 2998), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2996, 2998), False, 'import torch\n'), ((3830, 3917), 'logging.info', 'logging.info', (['f"""- Found new best model with mean {self.metric}: {val_score:05.3f}"""'], {}), "(\n f'- Found new best model with mean {self.metric}: {val_score:05.3f}')\n", (3842, 3917), False, 'import logging\n'), ((7752, 7788), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'dtype': 'input_dtype'}), '(0.0, dtype=input_dtype)\n', (7764, 7788), False, 'import torch\n')] |
"""
Conway game of life that simulates cellular automata and consists of the following rules:
* Any live cell with fewer than two live neighbors dies
* Any live cell with two or three neighbors lives
* Any live cell with more than three neighbors dies
* Any dead cell with exactly three neighbors comes to live
"""
import pycuda.autoinit
import pycuda.driver as drv
from pycuda import gpuarray
from pycuda.compiler import SourceModule
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
ker = SourceModule(
"""
#define _X ( threadIdx.x + blockIdx.x * blockDim.x )
#define _Y ( threadIdx.y + blockIdx.y * blockDim.y )
#define _WIDTH ( blockDim.x * gridDim.x )
#define _HEIGHT ( blockDim.y * gridDim.y )
#define _XM(x) ( (x + _WIDTH) % _WIDTH )
#define _YM(y) ( (y + _HEIGHT) % _HEIGHT )
#define _INDEX(x,y) ( _XM(x) + _YM(y) * _WIDTH )
// device function that reuturns a living number of neighbors for a given cell.
__device__ int nbrs(int x, int y, int * in)
{
return ( in[ _INDEX(x-1, y+1) ] + \
in[ _INDEX(x-1, y) ] + \
in[ _INDEX(x-1, y-1) ] + \
in[ _INDEX(x, y+1) ] + \
in[ _INDEX(x, y-1) ] + \
in[ _INDEX(x+1, y+1) ] + \
in[ _INDEX(x+1, y) ] + \
in[ _INDEX(x+1, y-1) ] );
}
__global__ void conway_ker(int* lattice_out, int* lattice)
{
// x, y are the appropriate values for the cell covered by this thread
int x = _X, y = _Y;
// count the number of neighbors around the current cell
int n = nbrs(x, y, lattice);
// if the current cell is alive, then determine if it lives or dies for the next generation.
if ( lattice[_INDEX(x, y)] == 1)
switch(n)
{
// if the cell is alive: it remains alive only if it has 2 or 3 neighbors.
case 2:
case 3:
lattice_out[_INDEX(x,y)] = 1;
break;
default: lattice_out[_INDEX(x,y)] = 0;
}
else if (lattice[_INDEX(x,y)] == 0)
switch(n)
{
// a dead cell comes to life only if it has 3 neighbors taht are alive.
case 3:
lattice_out[_INDEX(x,y)] = 1;
break;
default:
lattice_out[_INDEX(x,y)] = 0;
}
}
""")
conway_ker = ker.get_function("conway_ker")
def update_gpu(frameNum, img, newLattice_gpu, lattice_gpu, N):
conway_ker(newLattice_gpu, lattice_gpu, grid=(int(N/32), int(N/32), 1), block=(32,32,1))
img.set_data(newLattice_gpu.get())
lattice_gpu[:] = newLattice_gpu[:]
return img
if __name__ == '__main__':
# set lattice size
N = 512
lattice = np.int32( np.random.choice([1,0], N*N, p=[0.25, 0.75]).reshape(N, N))
lattice_gpu = gpuarray.to_gpu(lattice)
newLattice_gpu = gpuarray.empty_like(lattice_gpu)
fig, ax = plt.subplots()
img = ax.imshow(lattice_gpu.get(), interpolation='nearest')
ani = animation.FuncAnimation(fig,
update_gpu,
fargs=(img, newLattice_gpu, lattice_gpu, N, ),
interval=1,
frames=1000,
save_count=1000)
plt.show()
| [
"numpy.random.choice",
"pycuda.compiler.SourceModule",
"matplotlib.pyplot.show",
"pycuda.gpuarray.empty_like",
"matplotlib.animation.FuncAnimation",
"pycuda.gpuarray.to_gpu",
"matplotlib.pyplot.subplots"
] | [((536, 2327), 'pycuda.compiler.SourceModule', 'SourceModule', (['"""\n#define _X ( threadIdx.x + blockIdx.x * blockDim.x ) \n#define _Y ( threadIdx.y + blockIdx.y * blockDim.y )\n\n#define _WIDTH ( blockDim.x * gridDim.x )\n#define _HEIGHT ( blockDim.y * gridDim.y )\n\n#define _XM(x) ( (x + _WIDTH) % _WIDTH )\n#define _YM(y) ( (y + _HEIGHT) % _HEIGHT )\n\n#define _INDEX(x,y) ( _XM(x) + _YM(y) * _WIDTH )\n\n\n// device function that reuturns a living number of neighbors for a given cell.\n__device__ int nbrs(int x, int y, int * in)\n{\n return ( in[ _INDEX(x-1, y+1) ] + in[ _INDEX(x-1, y) ] + in[ _INDEX(x-1, y-1) ] + in[ _INDEX(x, y+1) ] + in[ _INDEX(x, y-1) ] + in[ _INDEX(x+1, y+1) ] + in[ _INDEX(x+1, y) ] + in[ _INDEX(x+1, y-1) ] );\n}\n\n__global__ void conway_ker(int* lattice_out, int* lattice)\n{\n // x, y are the appropriate values for the cell covered by this thread\n int x = _X, y = _Y;\n\n // count the number of neighbors around the current cell\n int n = nbrs(x, y, lattice);\n\n // if the current cell is alive, then determine if it lives or dies for the next generation.\n if ( lattice[_INDEX(x, y)] == 1)\n switch(n)\n {\n // if the cell is alive: it remains alive only if it has 2 or 3 neighbors.\n case 2:\n case 3:\n lattice_out[_INDEX(x,y)] = 1;\n break;\n default: lattice_out[_INDEX(x,y)] = 0;\n }\n else if (lattice[_INDEX(x,y)] == 0)\n switch(n)\n {\n // a dead cell comes to life only if it has 3 neighbors taht are alive.\n case 3:\n lattice_out[_INDEX(x,y)] = 1;\n break;\n default:\n lattice_out[_INDEX(x,y)] = 0;\n }\n}\n"""'], {}), '(\n """\n#define _X ( threadIdx.x + blockIdx.x * blockDim.x ) \n#define _Y ( threadIdx.y + blockIdx.y * blockDim.y )\n\n#define _WIDTH ( blockDim.x * gridDim.x )\n#define _HEIGHT ( blockDim.y * gridDim.y )\n\n#define _XM(x) ( (x + _WIDTH) % _WIDTH )\n#define _YM(y) ( (y + _HEIGHT) % _HEIGHT )\n\n#define _INDEX(x,y) ( _XM(x) + _YM(y) * _WIDTH )\n\n\n// device function that reuturns a living number of neighbors for a given cell.\n__device__ int nbrs(int x, int y, int * in)\n{\n return ( in[ _INDEX(x-1, y+1) ] + in[ _INDEX(x-1, y) ] + in[ _INDEX(x-1, y-1) ] + in[ _INDEX(x, y+1) ] + in[ _INDEX(x, y-1) ] + in[ _INDEX(x+1, y+1) ] + in[ _INDEX(x+1, y) ] + in[ _INDEX(x+1, y-1) ] );\n}\n\n__global__ void conway_ker(int* lattice_out, int* lattice)\n{\n // x, y are the appropriate values for the cell covered by this thread\n int x = _X, y = _Y;\n\n // count the number of neighbors around the current cell\n int n = nbrs(x, y, lattice);\n\n // if the current cell is alive, then determine if it lives or dies for the next generation.\n if ( lattice[_INDEX(x, y)] == 1)\n switch(n)\n {\n // if the cell is alive: it remains alive only if it has 2 or 3 neighbors.\n case 2:\n case 3:\n lattice_out[_INDEX(x,y)] = 1;\n break;\n default: lattice_out[_INDEX(x,y)] = 0;\n }\n else if (lattice[_INDEX(x,y)] == 0)\n switch(n)\n {\n // a dead cell comes to life only if it has 3 neighbors taht are alive.\n case 3:\n lattice_out[_INDEX(x,y)] = 1;\n break;\n default:\n lattice_out[_INDEX(x,y)] = 0;\n }\n}\n"""\n )\n', (548, 2327), False, 'from pycuda.compiler import SourceModule\n'), ((2802, 2826), 'pycuda.gpuarray.to_gpu', 'gpuarray.to_gpu', (['lattice'], {}), '(lattice)\n', (2817, 2826), False, 'from pycuda import gpuarray\n'), ((2848, 2880), 'pycuda.gpuarray.empty_like', 'gpuarray.empty_like', (['lattice_gpu'], {}), '(lattice_gpu)\n', (2867, 2880), False, 'from pycuda import gpuarray\n'), ((2896, 2910), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2908, 2910), True, 'import matplotlib.pyplot as plt\n'), ((2986, 3117), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'update_gpu'], {'fargs': '(img, newLattice_gpu, lattice_gpu, N)', 'interval': '(1)', 'frames': '(1000)', 'save_count': '(1000)'}), '(fig, update_gpu, fargs=(img, newLattice_gpu,\n lattice_gpu, N), interval=1, frames=1000, save_count=1000)\n', (3009, 3117), True, 'import matplotlib.animation as animation\n'), ((3290, 3300), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3298, 3300), True, 'import matplotlib.pyplot as plt\n'), ((2724, 2771), 'numpy.random.choice', 'np.random.choice', (['[1, 0]', '(N * N)'], {'p': '[0.25, 0.75]'}), '([1, 0], N * N, p=[0.25, 0.75])\n', (2740, 2771), True, 'import numpy as np\n')] |
"""
This module contains models and fitters for resonators that are operated in transmission.
Fitting resonators in this configuration is more complicated than in the other configurations because the
off-resonance data goes to 0 instead of 1, while the on-resonance data goes to a value that depends on the losses.
Because there is no fixed reference point, more information must be provided in order to successfully fit the data. The
existing models are thus less-developed than those for the other configurations. The current limitations are
- the existing fitters all use hardcoded background models;
- the existing models assume that both ports have equal coupling losses;
- the Kerr nonlinear models are not yet implemented;
- the example notebooks have not been created yet.
If you need to fit resonators in this configuration, ask!
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from . import background, base, guess, linear
class AbstractSymmetricTransmission(base.ResonatorModel):
"""
This class models a resonator operated in transmission. It assumes that two ports have equal coupling losses (or,
equivalently, equal coupling quality factors).
"""
# This is the peak value of the transmission on resonance when the internal loss is zero.
reference_point = 0.5 + 0j
# ToDo: verify
io_coupling_coefficient = 1
# Linear models and fitters
class LinearSymmetricTransmission(AbstractSymmetricTransmission):
"""
This class models a linear resonator operated in transmission where the two ports have equal coupling losses (or,
equivalently, equal coupling quality factors).
The model parameters are the resonance frequency, the internal loss (defined as the inverse of the internal quality
factor), and the coupling loss (defined as the sum of the inverses of the equal coupling quality factors). The
total / loaded / resonator quality factor is
Q = 1 / (internal_loss + coupling_loss).
"""
def __init__(self, *args, **kwargs):
"""
:param args: arguments passed directly to lmfit.model.Model.__init__().
:param kwds: keywords passed directly to lmfit.model.Model.__init__().
"""
def symmetric_transmission(frequency, resonance_frequency, coupling_loss, internal_loss):
detuning = frequency / resonance_frequency - 1
return 1 / (1 + (internal_loss + 2j * detuning) / coupling_loss)
super(LinearSymmetricTransmission, self).__init__(func=symmetric_transmission, *args, **kwargs)
#ToDo: implement and test guess.guess_smooth
def guess(self, data, frequency=None, coupling_loss=None):
"""
Return a lmfit.Parameters object containing reasonable initial values generated from the given data.
:param data: an array of complex transmission data.
:param frequency: an array of real frequencies at which the data was measured.
:param coupling_loss: if not None, the coupling loss is set to the given value and is not varied in the fit.
:return: lmfit.Parameters
"""
params = self.make_params()
smoothed_magnitude = guess.smooth(np.abs(data))
peak_index = np.argmax(smoothed_magnitude)
resonance_frequency_guess = frequency[peak_index] # guess that the resonance is the highest point
params['resonance_frequency'].set(value=resonance_frequency_guess, min=frequency.min(), max=frequency.max())
power_minus_half_max = smoothed_magnitude ** 2 - smoothed_magnitude[peak_index] ** 2 / 2
f1 = np.interp(0, power_minus_half_max[:peak_index], frequency[:peak_index])
f2 = np.interp(0, -power_minus_half_max[peak_index:], frequency[peak_index:])
linewidth = f2 - f1
internal_plus_coupling = linewidth / resonance_frequency_guess
internal_over_coupling = (1 / np.abs(data[peak_index]) - 1)
if coupling_loss is None:
params['coupling_loss'].set(value=internal_plus_coupling / (1 + internal_over_coupling),
min=1e-12, max=1)
params['internal_loss'].set(value=(internal_plus_coupling * internal_over_coupling /
(1 + internal_over_coupling)),
min=1e-12, max=1)
else:
params['coupling_loss'].set(value=coupling_loss, vary=False)
params['internal_loss'].set(value=internal_plus_coupling - coupling_loss, min=1e-12, max=1)
return params
class CCxSTFitterKnownMagnitude(linear.LinearResonatorFitter):
"""
This class fits a composite model that is the product of the ComplexConstant background model and the
SymmetricTransmission model.
It should be used when the magnitude of the background response is known and the cable delay has been calibrated so
that the background phase is constant across the band, but it will fit for a constant phase offset.
"""
def __init__(self, frequency, data, background_magnitude, errors=None, **fit_kwds):
"""
Fit the given data.
:param frequency: an array of real frequencies at which the data was measured.
:param data: an array of complex transmission data.
:param background_magnitude: the value of the transmission in the absence of the resonator, in the same units
as the data meaning NOT in dB.
:param errors: an array of complex numbers that are the standard errors of the mean of the data points; the
errors for the real and imaginary parts may be different; if no errors are provided then all points will be
weighted equally.
:param fit_kwds: keyword arguments passed directly to lmfit.model.Model.fit().
"""
self.background_magnitude = background_magnitude
super(CCxSTFitterKnownMagnitude, self).__init__(frequency=frequency, data=data,
foreground_model=LinearSymmetricTransmission(),
background_model=background.MagnitudePhase(),
errors=errors, **fit_kwds)
def guess(self, frequency, data):
phase_guess = np.angle(data[np.argmax(np.abs(data))])
params = self.background_model.make_params(magnitude=self.background_magnitude, phase=phase_guess)
params['magnitude'].vary = False
background_values = self.background_model.eval(params=params, frequency=frequency)
params.update(self.foreground_model.guess(data=data / background_values, frequency=frequency))
return params
class CCxSTFitterKnownCoupling(linear.LinearResonatorFitter):
"""
This class fits a composite model that is the product of the ComplexConstant background model and the
SymmetricTransmission model.
It should be used when the the coupling loss (i.e. the inverse coupling quality factor) is known, presumably from
another measurement or a simulation, and when the cable delay has been calibrated so that the background phase is
constant across the band.
"""
def __init__(self, frequency, data, coupling_loss, errors=None, **fit_kwds):
"""
Fit the given data to a composite model that is the product of the ComplexConstant background model and the
SymmetricTransmission model.
:param frequency: an array of real frequencies at which the data was measured.
:param data: an array of complex transmission data (NOT in dB).
:param coupling_loss: the fixed value of the coupling loss, or inverse coupling quality factor.
:param errors: an array of complex numbers that are the standard errors of the mean of the data points; the
errors for the real and imaginary parts may be different; if no errors are provided then all points will be
weighted equally.
:param fit_kwds: keyword arguments passed directly to lmfit.model.Model.fit().
"""
self.known_coupling_loss = coupling_loss
super(CCxSTFitterKnownCoupling, self).__init__(frequency=frequency, data=data,
foreground_model=LinearSymmetricTransmission(),
background_model=background.MagnitudePhase(),
errors=errors, **fit_kwds)
def guess(self, frequency, data):
params = self.background_model.guess(data=self.data, frequency=self.frequency)
params.update(self.foreground_model.guess(data=(data /
self.background_model.eval(params=params, frequency=frequency)),
frequency=frequency, coupling_loss=self.known_coupling_loss))
return params
| [
"numpy.interp",
"numpy.abs",
"numpy.argmax"
] | [((3237, 3266), 'numpy.argmax', 'np.argmax', (['smoothed_magnitude'], {}), '(smoothed_magnitude)\n', (3246, 3266), True, 'import numpy as np\n'), ((3601, 3672), 'numpy.interp', 'np.interp', (['(0)', 'power_minus_half_max[:peak_index]', 'frequency[:peak_index]'], {}), '(0, power_minus_half_max[:peak_index], frequency[:peak_index])\n', (3610, 3672), True, 'import numpy as np\n'), ((3686, 3758), 'numpy.interp', 'np.interp', (['(0)', '(-power_minus_half_max[peak_index:])', 'frequency[peak_index:]'], {}), '(0, -power_minus_half_max[peak_index:], frequency[peak_index:])\n', (3695, 3758), True, 'import numpy as np\n'), ((3202, 3214), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (3208, 3214), True, 'import numpy as np\n'), ((3896, 3920), 'numpy.abs', 'np.abs', (['data[peak_index]'], {}), '(data[peak_index])\n', (3902, 3920), True, 'import numpy as np\n'), ((6326, 6338), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (6332, 6338), True, 'import numpy as np\n')] |
"""
This module will test the basic functionalities of
topopy.MergeTree
"""
from unittest import TestCase
import nglpy as ngl
import numpy as np
import topopy
from .test_functions import gerber, generate_test_grid_2d
import sklearn
import sys
import os
class TestMT(TestCase):
"""
Class for testing the Contour Tree and its prerequisite the Merge Tree
"""
def setup(self):
"""
Setup function will create a fixed point set and parameter
settings for testing different aspects of this library.
"""
self.X = generate_test_grid_2d(40)
self.Y = gerber(self.X)
self.graph = ngl.EmptyRegionGraph(max_neighbors=10)
self.norm_x = {}
scaler = sklearn.preprocessing.MinMaxScaler()
self.norm_x["feature"] = scaler.fit_transform(np.atleast_2d(self.X))
self.norm_x["zscore"] = sklearn.preprocessing.scale(
self.X, axis=0, with_mean=True, with_std=True, copy=True
)
self.norm_x["none"] = self.X
def test_debug(self):
"""
Testing if we can build the Merge Tree directly
"""
self.setup()
test_file = "mt_test_debug.txt"
sys.stdout = open(test_file, "w")
mt = topopy.MergeTree(debug=True, graph=self.graph)
mt.build(self.X, self.Y)
sys.stdout.close()
lines = ["Graph Preparation:", "Merge Tree Computation:"]
with open(test_file, "r") as fp:
debug_output = fp.read()
for line in lines:
self.assertIn(line, debug_output)
os.remove(test_file)
# Restore stdout
sys.stdout = sys.__stdout__
def test_merge_tree(self):
"""
Testing if we can build the Merge Tree directly
"""
self.setup()
mt = topopy.MergeTree(debug=False, graph=self.graph)
mt.build(self.X, self.Y)
self.assertEqual(
9,
len(mt.leaves),
"The 2D Gerber test function "
"should have 9 leaves in its split tree",
)
self.assertEqual(
8,
len(mt.branches),
"The 2D Gerber test function "
"should have 8 branches in its split tree",
)
mt.build(self.X, -self.Y)
self.assertEqual(
4,
len(mt.leaves),
"The 2D Gerber test function "
"should have 4 leaves in its join tree",
)
self.assertEqual(
3,
len(mt.branches),
"The 2D Gerber test function "
"should have 3 branches in its join tree",
)
| [
"os.remove",
"nglpy.EmptyRegionGraph",
"sklearn.preprocessing.scale",
"sys.stdout.close",
"topopy.MergeTree",
"sklearn.preprocessing.MinMaxScaler",
"numpy.atleast_2d"
] | [((651, 689), 'nglpy.EmptyRegionGraph', 'ngl.EmptyRegionGraph', ([], {'max_neighbors': '(10)'}), '(max_neighbors=10)\n', (671, 689), True, 'import nglpy as ngl\n'), ((733, 769), 'sklearn.preprocessing.MinMaxScaler', 'sklearn.preprocessing.MinMaxScaler', ([], {}), '()\n', (767, 769), False, 'import sklearn\n'), ((879, 968), 'sklearn.preprocessing.scale', 'sklearn.preprocessing.scale', (['self.X'], {'axis': '(0)', 'with_mean': '(True)', 'with_std': '(True)', 'copy': '(True)'}), '(self.X, axis=0, with_mean=True, with_std=True,\n copy=True)\n', (906, 968), False, 'import sklearn\n'), ((1248, 1294), 'topopy.MergeTree', 'topopy.MergeTree', ([], {'debug': '(True)', 'graph': 'self.graph'}), '(debug=True, graph=self.graph)\n', (1264, 1294), False, 'import topopy\n'), ((1337, 1355), 'sys.stdout.close', 'sys.stdout.close', ([], {}), '()\n', (1353, 1355), False, 'import sys\n'), ((1592, 1612), 'os.remove', 'os.remove', (['test_file'], {}), '(test_file)\n', (1601, 1612), False, 'import os\n'), ((1821, 1868), 'topopy.MergeTree', 'topopy.MergeTree', ([], {'debug': '(False)', 'graph': 'self.graph'}), '(debug=False, graph=self.graph)\n', (1837, 1868), False, 'import topopy\n'), ((824, 845), 'numpy.atleast_2d', 'np.atleast_2d', (['self.X'], {}), '(self.X)\n', (837, 845), True, 'import numpy as np\n')] |
# how many missing values exist or better still what is the % of missing values in the dataset?
import numpy as np
import pandas as pd
import seaborn as sns
def convert_labels(df):
df.columns = [column.replace(' ', '_').lower() for column in df.columns]
return df
def percent_missing(df: pd.DataFrame):
# Calculate total number of cells in dataframe
totalCells = np.product(df.shape)
# Count number of missing values per column
missingCount = df.isnull().sum()
# Calculate total number of missing values
totalMissing = missingCount.sum()
# Calculate percentage of missing values
return print("The dataset contains", round(((totalMissing / totalCells) * 100), 2), "%", "missing values.")
def percent_missing_for_col(df: pd.DataFrame, col_name: str) -> float:
total_count = len(df[col_name])
if total_count <= 0:
return 0.0
missing_count = df[col_name].isnull().sum()
return round((missing_count / total_count) * 100, 2)
| [
"numpy.product"
] | [((384, 404), 'numpy.product', 'np.product', (['df.shape'], {}), '(df.shape)\n', (394, 404), True, 'import numpy as np\n')] |
"""Module to test garage.torch.utils."""
import numpy as np
import torch
import garage.torch.utils as tu
def test_utils_set_gpu_mode():
"""Test setting gpu mode to False to force CPU."""
if torch.cuda.is_available():
tu.set_gpu_mode(mode=True)
assert tu.global_device() == torch.device('cuda:0')
assert tu._USE_GPU
else:
tu.set_gpu_mode(mode=False)
assert tu.global_device() == torch.device('cpu')
assert not tu._USE_GPU
assert not tu._GPU_ID
def test_torch_to_np():
"""Test whether tuples of tensors can be converted to np arrays."""
tup = (torch.zeros(1), torch.zeros(1))
np_out_1, np_out_2 = tu.torch_to_np(tup)
assert isinstance(np_out_1, np.ndarray)
assert isinstance(np_out_2, np.ndarray)
def test_dict_np_to_torch():
"""Test if dict whose values are tensors can be converted to np arrays."""
dic = {'a': np.zeros(1), 'b': np.ones(1)}
tu.dict_np_to_torch(dic)
for tensor in dic.values():
assert isinstance(tensor, torch.Tensor)
| [
"garage.torch.utils.global_device",
"garage.torch.utils.dict_np_to_torch",
"garage.torch.utils.set_gpu_mode",
"numpy.zeros",
"numpy.ones",
"torch.zeros",
"torch.cuda.is_available",
"torch.device",
"garage.torch.utils.torch_to_np"
] | [((202, 227), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (225, 227), False, 'import torch\n'), ((677, 696), 'garage.torch.utils.torch_to_np', 'tu.torch_to_np', (['tup'], {}), '(tup)\n', (691, 696), True, 'import garage.torch.utils as tu\n'), ((945, 969), 'garage.torch.utils.dict_np_to_torch', 'tu.dict_np_to_torch', (['dic'], {}), '(dic)\n', (964, 969), True, 'import garage.torch.utils as tu\n'), ((237, 263), 'garage.torch.utils.set_gpu_mode', 'tu.set_gpu_mode', ([], {'mode': '(True)'}), '(mode=True)\n', (252, 263), True, 'import garage.torch.utils as tu\n'), ((369, 396), 'garage.torch.utils.set_gpu_mode', 'tu.set_gpu_mode', ([], {'mode': '(False)'}), '(mode=False)\n', (384, 396), True, 'import garage.torch.utils as tu\n'), ((620, 634), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (631, 634), False, 'import torch\n'), ((636, 650), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (647, 650), False, 'import torch\n'), ((911, 922), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (919, 922), True, 'import numpy as np\n'), ((929, 939), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (936, 939), True, 'import numpy as np\n'), ((279, 297), 'garage.torch.utils.global_device', 'tu.global_device', ([], {}), '()\n', (295, 297), True, 'import garage.torch.utils as tu\n'), ((301, 323), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (313, 323), False, 'import torch\n'), ((412, 430), 'garage.torch.utils.global_device', 'tu.global_device', ([], {}), '()\n', (428, 430), True, 'import garage.torch.utils as tu\n'), ((434, 453), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (446, 453), False, 'import torch\n')] |
"""
User Defined Functions
======================
"""
import os
import numpy as np
import pandas as pd
import scipy.sparse as sparse
import matplotlib.pyplot as plt
import pyspark.sql.functions as F
from random import seed, random
from pyspark.ml.linalg import Vectors
from pyspark.sql import Window
from pyspark.sql.types import IntegerType
def matrix_updates(states1, states2):
update = [[float(el1[0]), float(el2[0]), el1[1]*el2[1]]
for el1 in states1 for el2 in states2]
return update
def prepare_for_plot(data, type_):
pd_df = data.toPandas()
data = np.array( pd_df[type_] )
rows = np.array( pd_df['y'].astype('int') )
cols = np.array( pd_df['x'].astype('int') )
M = sparse.coo_matrix((data, (rows, cols)), shape = (114+1, 114+1))
return M
def plot_sparse(matrix, fname, title, dirname):
plt.figure(figsize = (20, 20))
plt.spy(matrix, markersize = 10, alpha = 0.5)
plt.grid()
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.xlabel("Polygon", fontsize = 27)
plt.ylabel("Polygon", fontsize = 27)
plt.title(title, fontsize = 30)
plt.savefig(os.path.join(dirname, fname))
def plot_dense(matrix, fname, title, dirname):
plt.figure(figsize = (20, 20))
plt.imshow(matrix.todense())
plt.colorbar()
plt.grid()
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.xlabel("Polygon", fontsize = 27)
plt.ylabel("Polygon", fontsize = 27)
plt.title(title, fontsize = 30)
plt.savefig(os.path.join(dirname, fname))
def plot_trend(vector, fname, title, dirname):
dfStationaryDist = vector
dfStationaryDist.plot()
plt.xlabel("Iterated times", fontsize = 18)
plt.ylabel("Probability", fontsize = 18)
plt.title(title, fontsize = 20)
plt.gcf().set_size_inches(16, 12)
plt.savefig(os.path.join(dirname, fname))
def plot_result(vector, fname, title, dirname):
labels = list(vector.columns)
initial = list(vector.iloc[0])
middle = list(vector.iloc[1])
end = list(vector.iloc[2])
X = np.arange(len(vector.columns))
width = 0.2
plt.figure(figsize = (16, 12))
plt.bar(X - 0.2, initial, width, color = 'deepskyblue', label = 'Initial')
plt.bar(X, middle, width, color = 'gold', label = 'Middle')
plt.bar(X + 0.2, end, width, color = 'grey', label = 'End')
plt.xticks(X, labels)
plt.xlabel("Polygon", fontsize = 18)
plt.ylabel("Probability", fontsize = 18)
plt.legend(['Initial', 'Middle', 'End'], fontsize = 18)
plt.title(title, fontsize = 20)
plt.savefig(os.path.join(dirname, fname))
def vectorize(x):
col = x.col
val = x.val
ml_SparseVector = Vectors.sparse(114+1, col, val)
np_vector = ml_SparseVector.toArray().tolist()
return (ml_SparseVector, np_vector)
def stationary(n, vector, matrix):
current_sv = vector.first()['ml_SparseVector']
current_v = vector.first()['np_vector']
res = np.array([current_v])
for j in range(n-1):
next_v = (current_sv.dot(matrix)).tolist()
res = np.append( res, np.array([next_v]), axis = 0 )
d = {x: next_v[x] for x in np.nonzero(next_v)[0]}
next_sv = Vectors.sparse(len(next_v), d)
current_sv = next_sv
stationary_vector = pd.DataFrame(res)
return stationary_vector
def vectorConverge(spark, vector):
last_vector = vector.iloc[-1:]
drop_zeros = last_vector.loc[:, (last_vector != 0).any(axis = 0)]
transpose = drop_zeros.melt()
next_vector = spark.createDataFrame(transpose)\
.agg(F.collect_list('variable').alias('col'),
F.collect_list('value').alias('val'))
return next_vector
def randomize(current_row):
r = np.random.uniform(0.0, 1.0)
cum = np.cumsum(current_row)
m = (np.where(cum < r))[0]
nextState = m[len(m)-1]+1
return nextState
def simulate(vector, matrix1, m, matrix2, n):
df = vector.toPandas()
P1 = matrix1
P2 = matrix2
for i in df.index:
currentState = df['voronoi_id'][i]
simulated_traj = [currentState.item()]
for x in range(m):
currentRow = np.ma.masked_values((P1[currentState]), 0.0)
nextState = randomize(currentRow)
simulated_traj = simulated_traj + [nextState.item()]
currentState = nextState
df.at[i, 'simulated_traj'] = str(simulated_traj)
for y in range(n):
currentRow = np.ma.masked_values((P2[currentState]), 0.0)
nextState = randomize(currentRow)
simulated_traj = simulated_traj + [nextState.item()]
currentState = nextState
df.at[i, 'simulated_traj'] = str(simulated_traj)
return df
def sim_vectorize(x):
user_id = x.user_id
simulated_traj = x.simulated_traj
col = x.col
val = x.val
ml_SparseVector = Vectors.sparse(114+1, col, val)
sim_vector = ml_SparseVector.toArray().tolist()
i = x.i
return (user_id, simulated_traj, sim_vector, i)
def plot_sim_result(vector, fname, title, dirname):
last_vector = vector.iloc[-1:]
plt.figure(figsize = (16, 12))
plt.bar(x = list(last_vector.columns), height = list(last_vector.iloc[0]))
plt.xlabel("Polygon", fontsize = 18)
plt.ylabel("Probability", fontsize = 18)
plt.xticks(range(0, 114+1, 10))
plt.title(title, fontsize = 20)
plt.savefig(os.path.join(dirname, fname))
def rand_state(data, n):
df = data
for i in range(n-1):
df = df.union(data)
window = Window.partitionBy(['id']).orderBy(F.lit('A'))
df = df.withColumn('i', F.row_number().over(window))\
.withColumn('simulated_traj', F.round(F.rand(seed=0)*114, 0).cast(IntegerType()))\
.withColumnRenamed('id', 'user_id')
return df
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.spy",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.figure",
"pyspark.sql.types.IntegerType",
"os.path.join",
"pandas.DataFrame",
"matplotlib.pyplot.yticks",
"pyspark.sql.functions.row_number",
"matplotlib.pyplot.colorbar",
"numpy.cumsum",
"scipy.spa... | [((594, 616), 'numpy.array', 'np.array', (['pd_df[type_]'], {}), '(pd_df[type_])\n', (602, 616), True, 'import numpy as np\n'), ((724, 789), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(data, (rows, cols))'], {'shape': '(114 + 1, 114 + 1)'}), '((data, (rows, cols)), shape=(114 + 1, 114 + 1))\n', (741, 789), True, 'import scipy.sparse as sparse\n'), ((856, 884), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (866, 884), True, 'import matplotlib.pyplot as plt\n'), ((891, 932), 'matplotlib.pyplot.spy', 'plt.spy', (['matrix'], {'markersize': '(10)', 'alpha': '(0.5)'}), '(matrix, markersize=10, alpha=0.5)\n', (898, 932), True, 'import matplotlib.pyplot as plt\n'), ((941, 951), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (949, 951), True, 'import matplotlib.pyplot as plt\n'), ((956, 979), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (966, 979), True, 'import matplotlib.pyplot as plt\n'), ((986, 1009), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (996, 1009), True, 'import matplotlib.pyplot as plt\n'), ((1016, 1050), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Polygon"""'], {'fontsize': '(27)'}), "('Polygon', fontsize=27)\n", (1026, 1050), True, 'import matplotlib.pyplot as plt\n'), ((1057, 1091), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Polygon"""'], {'fontsize': '(27)'}), "('Polygon', fontsize=27)\n", (1067, 1091), True, 'import matplotlib.pyplot as plt\n'), ((1098, 1127), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(30)'}), '(title, fontsize=30)\n', (1107, 1127), True, 'import matplotlib.pyplot as plt\n'), ((1229, 1257), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (1239, 1257), True, 'import matplotlib.pyplot as plt\n'), ((1297, 1311), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1309, 1311), True, 'import matplotlib.pyplot as plt\n'), ((1316, 1326), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1324, 1326), True, 'import matplotlib.pyplot as plt\n'), ((1331, 1354), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (1341, 1354), True, 'import matplotlib.pyplot as plt\n'), ((1361, 1384), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (1371, 1384), True, 'import matplotlib.pyplot as plt\n'), ((1391, 1425), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Polygon"""'], {'fontsize': '(27)'}), "('Polygon', fontsize=27)\n", (1401, 1425), True, 'import matplotlib.pyplot as plt\n'), ((1432, 1466), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Polygon"""'], {'fontsize': '(27)'}), "('Polygon', fontsize=27)\n", (1442, 1466), True, 'import matplotlib.pyplot as plt\n'), ((1473, 1502), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(30)'}), '(title, fontsize=30)\n', (1482, 1502), True, 'import matplotlib.pyplot as plt\n'), ((1668, 1709), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterated times"""'], {'fontsize': '(18)'}), "('Iterated times', fontsize=18)\n", (1678, 1709), True, 'import matplotlib.pyplot as plt\n'), ((1716, 1754), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {'fontsize': '(18)'}), "('Probability', fontsize=18)\n", (1726, 1754), True, 'import matplotlib.pyplot as plt\n'), ((1761, 1790), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(20)'}), '(title, fontsize=20)\n', (1770, 1790), True, 'import matplotlib.pyplot as plt\n'), ((2124, 2152), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 12)'}), '(figsize=(16, 12))\n', (2134, 2152), True, 'import matplotlib.pyplot as plt\n'), ((2159, 2229), 'matplotlib.pyplot.bar', 'plt.bar', (['(X - 0.2)', 'initial', 'width'], {'color': '"""deepskyblue"""', 'label': '"""Initial"""'}), "(X - 0.2, initial, width, color='deepskyblue', label='Initial')\n", (2166, 2229), True, 'import matplotlib.pyplot as plt\n'), ((2238, 2293), 'matplotlib.pyplot.bar', 'plt.bar', (['X', 'middle', 'width'], {'color': '"""gold"""', 'label': '"""Middle"""'}), "(X, middle, width, color='gold', label='Middle')\n", (2245, 2293), True, 'import matplotlib.pyplot as plt\n'), ((2302, 2357), 'matplotlib.pyplot.bar', 'plt.bar', (['(X + 0.2)', 'end', 'width'], {'color': '"""grey"""', 'label': '"""End"""'}), "(X + 0.2, end, width, color='grey', label='End')\n", (2309, 2357), True, 'import matplotlib.pyplot as plt\n'), ((2366, 2387), 'matplotlib.pyplot.xticks', 'plt.xticks', (['X', 'labels'], {}), '(X, labels)\n', (2376, 2387), True, 'import matplotlib.pyplot as plt\n'), ((2392, 2426), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Polygon"""'], {'fontsize': '(18)'}), "('Polygon', fontsize=18)\n", (2402, 2426), True, 'import matplotlib.pyplot as plt\n'), ((2433, 2471), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {'fontsize': '(18)'}), "('Probability', fontsize=18)\n", (2443, 2471), True, 'import matplotlib.pyplot as plt\n'), ((2478, 2531), 'matplotlib.pyplot.legend', 'plt.legend', (["['Initial', 'Middle', 'End']"], {'fontsize': '(18)'}), "(['Initial', 'Middle', 'End'], fontsize=18)\n", (2488, 2531), True, 'import matplotlib.pyplot as plt\n'), ((2543, 2572), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(20)'}), '(title, fontsize=20)\n', (2552, 2572), True, 'import matplotlib.pyplot as plt\n'), ((2697, 2730), 'pyspark.ml.linalg.Vectors.sparse', 'Vectors.sparse', (['(114 + 1)', 'col', 'val'], {}), '(114 + 1, col, val)\n', (2711, 2730), False, 'from pyspark.ml.linalg import Vectors\n'), ((2965, 2986), 'numpy.array', 'np.array', (['[current_v]'], {}), '([current_v])\n', (2973, 2986), True, 'import numpy as np\n'), ((3287, 3304), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (3299, 3304), True, 'import pandas as pd\n'), ((3769, 3796), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (3786, 3796), True, 'import numpy as np\n'), ((3807, 3829), 'numpy.cumsum', 'np.cumsum', (['current_row'], {}), '(current_row)\n', (3816, 3829), True, 'import numpy as np\n'), ((4922, 4955), 'pyspark.ml.linalg.Vectors.sparse', 'Vectors.sparse', (['(114 + 1)', 'col', 'val'], {}), '(114 + 1, col, val)\n', (4936, 4955), False, 'from pyspark.ml.linalg import Vectors\n'), ((5166, 5194), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 12)'}), '(figsize=(16, 12))\n', (5176, 5194), True, 'import matplotlib.pyplot as plt\n'), ((5280, 5314), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Polygon"""'], {'fontsize': '(18)'}), "('Polygon', fontsize=18)\n", (5290, 5314), True, 'import matplotlib.pyplot as plt\n'), ((5321, 5359), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {'fontsize': '(18)'}), "('Probability', fontsize=18)\n", (5331, 5359), True, 'import matplotlib.pyplot as plt\n'), ((5402, 5431), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(20)'}), '(title, fontsize=20)\n', (5411, 5431), True, 'import matplotlib.pyplot as plt\n'), ((1146, 1174), 'os.path.join', 'os.path.join', (['dirname', 'fname'], {}), '(dirname, fname)\n', (1158, 1174), False, 'import os\n'), ((1521, 1549), 'os.path.join', 'os.path.join', (['dirname', 'fname'], {}), '(dirname, fname)\n', (1533, 1549), False, 'import os\n'), ((1847, 1875), 'os.path.join', 'os.path.join', (['dirname', 'fname'], {}), '(dirname, fname)\n', (1859, 1875), False, 'import os\n'), ((2591, 2619), 'os.path.join', 'os.path.join', (['dirname', 'fname'], {}), '(dirname, fname)\n', (2603, 2619), False, 'import os\n'), ((3839, 3856), 'numpy.where', 'np.where', (['(cum < r)'], {}), '(cum < r)\n', (3847, 3856), True, 'import numpy as np\n'), ((5450, 5478), 'os.path.join', 'os.path.join', (['dirname', 'fname'], {}), '(dirname, fname)\n', (5462, 5478), False, 'import os\n'), ((5625, 5635), 'pyspark.sql.functions.lit', 'F.lit', (['"""A"""'], {}), "('A')\n", (5630, 5635), True, 'import pyspark.sql.functions as F\n'), ((1797, 1806), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1804, 1806), True, 'import matplotlib.pyplot as plt\n'), ((3095, 3113), 'numpy.array', 'np.array', (['[next_v]'], {}), '([next_v])\n', (3103, 3113), True, 'import numpy as np\n'), ((4189, 4231), 'numpy.ma.masked_values', 'np.ma.masked_values', (['P1[currentState]', '(0.0)'], {}), '(P1[currentState], 0.0)\n', (4208, 4231), True, 'import numpy as np\n'), ((4501, 4543), 'numpy.ma.masked_values', 'np.ma.masked_values', (['P2[currentState]', '(0.0)'], {}), '(P2[currentState], 0.0)\n', (4520, 4543), True, 'import numpy as np\n'), ((5590, 5616), 'pyspark.sql.Window.partitionBy', 'Window.partitionBy', (["['id']"], {}), "(['id'])\n", (5608, 5616), False, 'from pyspark.sql import Window\n'), ((3596, 3622), 'pyspark.sql.functions.collect_list', 'F.collect_list', (['"""variable"""'], {}), "('variable')\n", (3610, 3622), True, 'import pyspark.sql.functions as F\n'), ((3668, 3691), 'pyspark.sql.functions.collect_list', 'F.collect_list', (['"""value"""'], {}), "('value')\n", (3682, 3691), True, 'import pyspark.sql.functions as F\n'), ((3161, 3179), 'numpy.nonzero', 'np.nonzero', (['next_v'], {}), '(next_v)\n', (3171, 3179), True, 'import numpy as np\n'), ((5773, 5786), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (5784, 5786), False, 'from pyspark.sql.types import IntegerType\n'), ((5666, 5680), 'pyspark.sql.functions.row_number', 'F.row_number', ([], {}), '()\n', (5678, 5680), True, 'import pyspark.sql.functions as F\n'), ((5745, 5759), 'pyspark.sql.functions.rand', 'F.rand', ([], {'seed': '(0)'}), '(seed=0)\n', (5751, 5759), True, 'import pyspark.sql.functions as F\n')] |
# pylint: disable=redefined-outer-name,missing-docstring,unused-import,no-self-use
import pytest
import os
import json
from numpy.testing import assert_array_equal
from .fixtures import target_dir, load_mock_nsl_data, load_mock_nsl_labels, load_mock_cic_data, load_mock_cic_labels
from .context import single_benchmark, dataset_tools, NSL, CIC
def test_nsl6_filter(monkeypatch, target_dir, load_mock_nsl_data, load_mock_nsl_labels):
def ret_testdir(somepath):
return target_dir
def ret_df(df_name, df_path):
if df_name.endswith('data'):
return load_mock_nsl_data
elif df_name.endswith('labels'):
return load_mock_nsl_labels
def mock_os_join(dirpath, filename):
if 'json' in filename:
return target_dir + '/kdd_label_wordindex.json'
return target_dir
monkeypatch.setattr(dataset_tools, 'load_df', ret_df)
monkeypatch.setattr(os.path, 'dirname', ret_testdir)
monkeypatch.setattr(os.path, 'join', mock_os_join)
tr_data, te_data, tr_labels, te_labels = NSL.get_NSL_6class()
wanted_fields = ['duration', 'protocol_type', 'src_bytes', 'dst_bytes', 'count', 'srv_count']
assert_array_equal(tr_data.columns.values, wanted_fields)
def test_nsl16_filter(monkeypatch, target_dir, load_mock_nsl_data, load_mock_nsl_labels):
def ret_testdir(somepath):
return target_dir
def ret_df(df_name, df_path):
if df_name.endswith('data'):
return load_mock_nsl_data
elif df_name.endswith('labels'):
return load_mock_nsl_labels
def mock_os_join(dirpath, filename):
if 'json' in filename:
return target_dir + '/kdd_label_wordindex.json'
return target_dir
monkeypatch.setattr(dataset_tools, 'load_df', ret_df)
monkeypatch.setattr(os.path, 'dirname', ret_testdir)
monkeypatch.setattr(os.path, 'join', mock_os_join)
tr_data, te_data, tr_labels, te_labels = NSL.get_NSL_16class()
wanted_fields = [
'service', 'flag', 'dst_bytes', 'wrong_fragment', 'count',
'serror_rate', 'srv_serror_rate', 'srv_rerror_rate', 'same_srv_rate',
'dst_host_count', 'dst_host_srv_count', 'dst_host_same_srv_rate', 'dst_host_diff_srv_rate',
'dst_host_serror_rate', 'dst_host_srv_serror_rate', 'dst_host_rerror_rate'
]
assert_array_equal(tr_data.columns.values, wanted_fields)
def test_cic20(monkeypatch, target_dir, load_mock_cic_data, load_mock_cic_labels):
tr_data = call_cic_filter(
CIC.get_CIC_Top20,
monkeypatch, target_dir,
load_mock_cic_data, load_mock_cic_labels
)
wanted_fields = [
'Flow Duration', 'Total Length of Fwd Packets', 'Total Length of Bwd Packets', 'Fwd Packet Length Max',
'Bwd Packet Length Max', 'Bwd Packet Length Mean', 'Bwd Packet Length Std', 'Flow IAT Max',
'Fwd IAT Total', 'Fwd IAT Max', 'Max Packet Length', 'Packet Length Mean', 'Packet Length Std',
'Packet Length Variance', 'Average Packet Size', 'Avg Bwd Segment Size', 'Subflow Fwd Bytes',
'Subflow Bwd Bytes', 'Init_Win_bytes_forward', 'Init_Win_bytes_backward'
]
assert_array_equal(tr_data.columns.values, wanted_fields)
# Heavy lifting function for CIC
def call_cic_filter(filter_function, monkeypatch, target_dir, load_mock_cic_data, load_mock_cic_labels):
def ret_testdir(somepath):
return target_dir
def ret_df(df_name, df_path):
if df_name.endswith('data_rand') or df_name.endswith('data_stratified'):
return load_mock_cic_data
elif df_name.endswith('labels_rand') or df_name.endswith('labels_stratified'):
return load_mock_cic_labels
def mock_os_join(dirpath, filename):
if 'json' in filename:
return target_dir + '/cic_label_wordindex.json'
return target_dir
monkeypatch.setattr(dataset_tools, 'load_df', ret_df)
monkeypatch.setattr(os.path, 'dirname', ret_testdir)
monkeypatch.setattr(os.path, 'join', mock_os_join)
tr_data, te_data, tr_labels, te_labels = filter_function()
# for now, data suffices
return tr_data
| [
"numpy.testing.assert_array_equal"
] | [((1185, 1242), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['tr_data.columns.values', 'wanted_fields'], {}), '(tr_data.columns.values, wanted_fields)\n', (1203, 1242), False, 'from numpy.testing import assert_array_equal\n'), ((2343, 2400), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['tr_data.columns.values', 'wanted_fields'], {}), '(tr_data.columns.values, wanted_fields)\n', (2361, 2400), False, 'from numpy.testing import assert_array_equal\n'), ((3164, 3221), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['tr_data.columns.values', 'wanted_fields'], {}), '(tr_data.columns.values, wanted_fields)\n', (3182, 3221), False, 'from numpy.testing import assert_array_equal\n')] |
import numpy as np
#integer data !
i = 10
print(type(i)) # will print the data type of i !
a_i = np.zeros(i, dtype=int) #declaring an array :D
print(type(a_i))
print(type(a_i[0])) #gives int64 as the number 0 is stored with 64 bit precision! :D
#floats!
x = 119.0
print(type(x))
#scientific !
y = 1.19e2
print(type(y))
z = np.zeros(i, dtype=np.float64)
print(type(z))
print(type(z[0])) | [
"numpy.zeros"
] | [((101, 123), 'numpy.zeros', 'np.zeros', (['i'], {'dtype': 'int'}), '(i, dtype=int)\n', (109, 123), True, 'import numpy as np\n'), ((334, 363), 'numpy.zeros', 'np.zeros', (['i'], {'dtype': 'np.float64'}), '(i, dtype=np.float64)\n', (342, 363), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#Based on http://cyrille.rossant.net/shaders-opengl/
from __future__ import print_function
import numpy as np
import OpenGL.GL as gl
import OpenGL.arrays.vbo as glvbo
# Vertex shader
VS = """
#version 330
// Attribute variable that contains coordinates of the vertices.
layout(location = 0) in vec2 position;
// Main function, which needs to set `gl_Position`.
void main()
{
// The final position is transformed from a null signal to a sinewave here.
// We pass the position to gl_Position, by converting it into
// a 4D vector. The last coordinate should be 0 when rendering 2D figures.
gl_Position = vec4(position.x, .2 * sin(20 * position.x), 0., 1.);
}
"""
# Fragment shader
FS = """
#version 330
// Output variable of the fragment shader, which is a 4D vector containing the
// RGBA components of the pixel color.
out vec4 out_color;
// Main fragment shader function.
void main()
{
// We simply set the pixel color to yellow.
out_color = vec4(1., 1., 0., 1.);
}
"""
def display(program):
#This time consuming function should not normally be in the main loop,
#but it is here for simplicity
data = np.zeros((10000, 2), dtype=np.float32)
data[:,0] = np.linspace(-1., 1., len(data))
vbo = glvbo.VBO(data)
# clear the buffer
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
# bind the VBO
vbo.bind()
# tell OpenGL that the VBO contains an array of vertices
# prepare the shader
gl.glEnableVertexAttribArray(0)
# these vertices contain 2 single precision coordinates
gl.glVertexAttribPointer(0, 2, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
gl.glUseProgram(program)
# draw "count" points from the VBO
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, len(data))
def compile_vertex_shader(source):
"""Compile a vertex shader from source."""
vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER)
gl.glShaderSource(vertex_shader, source)
gl.glCompileShader(vertex_shader)
# check compilation error
result = gl.glGetShaderiv(vertex_shader, gl.GL_COMPILE_STATUS)
if not(result):
raise RuntimeError(gl.glGetShaderInfoLog(vertex_shader))
return vertex_shader
def compile_fragment_shader(source):
"""Compile a fragment shader from source."""
fragment_shader = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)
gl.glShaderSource(fragment_shader, source)
gl.glCompileShader(fragment_shader)
# check compilation error
result = gl.glGetShaderiv(fragment_shader, gl.GL_COMPILE_STATUS)
if not(result):
raise RuntimeError(gl.glGetShaderInfoLog(fragment_shader))
return fragment_shader
def link_shader_program(vertex_shader, fragment_shader):
"""Create a shader program with from compiled shaders."""
program = gl.glCreateProgram()
gl.glAttachShader(program, vertex_shader)
gl.glAttachShader(program, fragment_shader)
gl.glLinkProgram(program)
# check linking error
result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)
if not(result):
raise RuntimeError(gl.glGetProgramInfoLog(program))
return program
def init_shader_program():
versionString = gl.glGetString(gl.GL_VERSION).split(b" ")
openglVersionString = versionString[0]
openglVersionNums = list(map(int, openglVersionString.split(b".")))
if openglVersionNums[0] < 3 or (openglVersionNums[0] == 3 and openglVersionNums[1] < 3):
exit("Requires opengl 3.3 or better, you have {0}".format(openglVersionString))
# background color
gl.glClearColor(0, 0, 0, 0)
# create a Vertex Buffer Object with the specified data
vertex_shader = compile_vertex_shader(VS)
fragment_shader = compile_fragment_shader(FS)
program = link_shader_program(vertex_shader, fragment_shader)
return program
| [
"OpenGL.GL.glGetShaderiv",
"OpenGL.GL.glGetShaderInfoLog",
"OpenGL.GL.glCreateProgram",
"OpenGL.arrays.vbo.VBO",
"OpenGL.GL.glUseProgram",
"OpenGL.GL.glLinkProgram",
"OpenGL.GL.glCompileShader",
"numpy.zeros",
"OpenGL.GL.glVertexAttribPointer",
"OpenGL.GL.glAttachShader",
"OpenGL.GL.glGetProgram... | [((1182, 1220), 'numpy.zeros', 'np.zeros', (['(10000, 2)'], {'dtype': 'np.float32'}), '((10000, 2), dtype=np.float32)\n', (1190, 1220), True, 'import numpy as np\n'), ((1273, 1288), 'OpenGL.arrays.vbo.VBO', 'glvbo.VBO', (['data'], {}), '(data)\n', (1282, 1288), True, 'import OpenGL.arrays.vbo as glvbo\n'), ((1311, 1345), 'OpenGL.GL.glClear', 'gl.glClear', (['gl.GL_COLOR_BUFFER_BIT'], {}), '(gl.GL_COLOR_BUFFER_BIT)\n', (1321, 1345), True, 'import OpenGL.GL as gl\n'), ((1458, 1489), 'OpenGL.GL.glEnableVertexAttribArray', 'gl.glEnableVertexAttribArray', (['(0)'], {}), '(0)\n', (1486, 1489), True, 'import OpenGL.GL as gl\n'), ((1548, 1613), 'OpenGL.GL.glVertexAttribPointer', 'gl.glVertexAttribPointer', (['(0)', '(2)', 'gl.GL_FLOAT', 'gl.GL_FALSE', '(0)', 'None'], {}), '(0, 2, gl.GL_FLOAT, gl.GL_FALSE, 0, None)\n', (1572, 1613), True, 'import OpenGL.GL as gl\n'), ((1615, 1639), 'OpenGL.GL.glUseProgram', 'gl.glUseProgram', (['program'], {}), '(program)\n', (1630, 1639), True, 'import OpenGL.GL as gl\n'), ((1822, 1860), 'OpenGL.GL.glCreateShader', 'gl.glCreateShader', (['gl.GL_VERTEX_SHADER'], {}), '(gl.GL_VERTEX_SHADER)\n', (1839, 1860), True, 'import OpenGL.GL as gl\n'), ((1862, 1902), 'OpenGL.GL.glShaderSource', 'gl.glShaderSource', (['vertex_shader', 'source'], {}), '(vertex_shader, source)\n', (1879, 1902), True, 'import OpenGL.GL as gl\n'), ((1904, 1937), 'OpenGL.GL.glCompileShader', 'gl.glCompileShader', (['vertex_shader'], {}), '(vertex_shader)\n', (1922, 1937), True, 'import OpenGL.GL as gl\n'), ((1975, 2028), 'OpenGL.GL.glGetShaderiv', 'gl.glGetShaderiv', (['vertex_shader', 'gl.GL_COMPILE_STATUS'], {}), '(vertex_shader, gl.GL_COMPILE_STATUS)\n', (1991, 2028), True, 'import OpenGL.GL as gl\n'), ((2230, 2270), 'OpenGL.GL.glCreateShader', 'gl.glCreateShader', (['gl.GL_FRAGMENT_SHADER'], {}), '(gl.GL_FRAGMENT_SHADER)\n', (2247, 2270), True, 'import OpenGL.GL as gl\n'), ((2272, 2314), 'OpenGL.GL.glShaderSource', 'gl.glShaderSource', (['fragment_shader', 'source'], {}), '(fragment_shader, source)\n', (2289, 2314), True, 'import OpenGL.GL as gl\n'), ((2316, 2351), 'OpenGL.GL.glCompileShader', 'gl.glCompileShader', (['fragment_shader'], {}), '(fragment_shader)\n', (2334, 2351), True, 'import OpenGL.GL as gl\n'), ((2389, 2444), 'OpenGL.GL.glGetShaderiv', 'gl.glGetShaderiv', (['fragment_shader', 'gl.GL_COMPILE_STATUS'], {}), '(fragment_shader, gl.GL_COMPILE_STATUS)\n', (2405, 2444), True, 'import OpenGL.GL as gl\n'), ((2675, 2695), 'OpenGL.GL.glCreateProgram', 'gl.glCreateProgram', ([], {}), '()\n', (2693, 2695), True, 'import OpenGL.GL as gl\n'), ((2697, 2738), 'OpenGL.GL.glAttachShader', 'gl.glAttachShader', (['program', 'vertex_shader'], {}), '(program, vertex_shader)\n', (2714, 2738), True, 'import OpenGL.GL as gl\n'), ((2740, 2783), 'OpenGL.GL.glAttachShader', 'gl.glAttachShader', (['program', 'fragment_shader'], {}), '(program, fragment_shader)\n', (2757, 2783), True, 'import OpenGL.GL as gl\n'), ((2785, 2810), 'OpenGL.GL.glLinkProgram', 'gl.glLinkProgram', (['program'], {}), '(program)\n', (2801, 2810), True, 'import OpenGL.GL as gl\n'), ((2844, 2889), 'OpenGL.GL.glGetProgramiv', 'gl.glGetProgramiv', (['program', 'gl.GL_LINK_STATUS'], {}), '(program, gl.GL_LINK_STATUS)\n', (2861, 2889), True, 'import OpenGL.GL as gl\n'), ((3367, 3394), 'OpenGL.GL.glClearColor', 'gl.glClearColor', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (3382, 3394), True, 'import OpenGL.GL as gl\n'), ((2067, 2103), 'OpenGL.GL.glGetShaderInfoLog', 'gl.glGetShaderInfoLog', (['vertex_shader'], {}), '(vertex_shader)\n', (2088, 2103), True, 'import OpenGL.GL as gl\n'), ((2483, 2521), 'OpenGL.GL.glGetShaderInfoLog', 'gl.glGetShaderInfoLog', (['fragment_shader'], {}), '(fragment_shader)\n', (2504, 2521), True, 'import OpenGL.GL as gl\n'), ((2928, 2959), 'OpenGL.GL.glGetProgramInfoLog', 'gl.glGetProgramInfoLog', (['program'], {}), '(program)\n', (2950, 2959), True, 'import OpenGL.GL as gl\n'), ((3022, 3051), 'OpenGL.GL.glGetString', 'gl.glGetString', (['gl.GL_VERSION'], {}), '(gl.GL_VERSION)\n', (3036, 3051), True, 'import OpenGL.GL as gl\n')] |
"""A regressor based on a MLP model."""
from dowel import tabular
import numpy as np
import tensorflow as tf
from garage.tf.misc import tensor_utils
from garage.tf.models import NormalizedInputMLPModel
from garage.tf.optimizers import LbfgsOptimizer
from garage.tf.regressors.regressor import Regressor
class ContinuousMLPRegressor(Regressor):
"""Fits continuously-valued data to an MLP model.
Args:
input_shape (tuple[int]): Input shape of the training data.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (Callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (Callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (Callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (Callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (Callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (Callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
optimizer (garage.tf.Optimizer): Optimizer for minimizing the negative
log-likelihood.
optimizer_args (dict): Arguments for the optimizer. Default is None,
which means no arguments.
normalize_inputs (bool): Bool for normalizing inputs or not.
"""
def __init__(self,
input_shape,
output_dim,
name='ContinuousMLPRegressor',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(),
output_b_init=tf.zeros_initializer(),
optimizer=None,
optimizer_args=None,
normalize_inputs=True):
super().__init__(input_shape, output_dim, name)
self._normalize_inputs = normalize_inputs
with tf.compat.v1.variable_scope(self._name, reuse=False) as vs:
self._variable_scope = vs
if optimizer_args is None:
optimizer_args = dict()
if optimizer is None:
optimizer = LbfgsOptimizer(**optimizer_args)
else:
optimizer = optimizer(**optimizer_args)
self._optimizer = optimizer
self.model = NormalizedInputMLPModel(
input_shape=input_shape,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init)
self._initialize()
def _initialize(self):
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
self._input_shape)
with tf.compat.v1.variable_scope(self._name) as vs:
self._variable_scope = vs
self.model.build(input_var)
ys_var = tf.compat.v1.placeholder(dtype=tf.float32,
name='ys',
shape=(None, self._output_dim))
y_hat = self.model.networks['default'].y_hat
loss = tf.reduce_mean(tf.square(y_hat - ys_var))
self._f_predict = tensor_utils.compile_function([input_var], y_hat)
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[ys_var],
)
optimizer_args['inputs'] = [input_var, ys_var]
with tf.name_scope('update_opt'):
self._optimizer.update_opt(**optimizer_args)
def fit(self, xs, ys):
"""Fit with input data xs and label ys.
Args:
xs (numpy.ndarray): Input data.
ys (numpy.ndarray): Output labels.
"""
if self._normalize_inputs:
# recompute normalizing constants for inputs
self.model.networks['default'].x_mean.load(
np.mean(xs, axis=0, keepdims=True))
self.model.networks['default'].x_std.load(
np.std(xs, axis=0, keepdims=True) + 1e-8)
inputs = [xs, ys]
loss_before = self._optimizer.loss(inputs)
tabular.record('{}/LossBefore'.format(self._name), loss_before)
self._optimizer.optimize(inputs)
loss_after = self._optimizer.loss(inputs)
tabular.record('{}/LossAfter'.format(self._name), loss_after)
tabular.record('{}/dLoss'.format(self._name), loss_before - loss_after)
def predict(self, xs):
"""Predict y based on input xs.
Args:
xs (numpy.ndarray): Input data.
Return:
numpy.ndarray: The predicted ys.
"""
return self._f_predict(xs)
def predict_sym(self, xs, name=None):
"""Build a symbolic graph of the model prediction.
Args:
xs (tf.Tensor): Input tf.Tensor for the input data.
name (str): Name of the new graph.
Return:
tf.Tensor: Output of the symbolic prediction graph.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
y_hat, _, _ = self.model.build(xs, name=name)
return y_hat
@property
def recurrent(self):
"""bool: If this module has a hidden state."""
return False
@property
def vectorized(self):
"""bool: If this module supports vectorization input."""
return True
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_predict']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): unpickled state.
"""
super().__setstate__(state)
self._initialize()
| [
"garage.tf.misc.tensor_utils.compile_function",
"garage.tf.optimizers.LbfgsOptimizer",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.placeholder",
"numpy.std",
"garage.tf.models.NormalizedInputMLPModel",
"numpy.mean",
"tensorflow.initializers.glorot_uniform",
"tensorflow.square",
"t... | [((2436, 2468), 'tensorflow.initializers.glorot_uniform', 'tf.initializers.glorot_uniform', ([], {}), '()\n', (2466, 2468), True, 'import tensorflow as tf\n'), ((2502, 2524), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (2522, 2524), True, 'import tensorflow as tf\n'), ((2602, 2634), 'tensorflow.initializers.glorot_uniform', 'tf.initializers.glorot_uniform', ([], {}), '()\n', (2632, 2634), True, 'import tensorflow as tf\n'), ((2668, 2690), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (2688, 2690), True, 'import tensorflow as tf\n'), ((3349, 3661), 'garage.tf.models.NormalizedInputMLPModel', 'NormalizedInputMLPModel', ([], {'input_shape': 'input_shape', 'output_dim': 'output_dim', 'hidden_sizes': 'hidden_sizes', 'hidden_nonlinearity': 'hidden_nonlinearity', 'hidden_w_init': 'hidden_w_init', 'hidden_b_init': 'hidden_b_init', 'output_nonlinearity': 'output_nonlinearity', 'output_w_init': 'output_w_init', 'output_b_init': 'output_b_init'}), '(input_shape=input_shape, output_dim=output_dim,\n hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity,\n hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init,\n output_nonlinearity=output_nonlinearity, output_w_init=output_w_init,\n output_b_init=output_b_init)\n', (3372, 3661), False, 'from garage.tf.models import NormalizedInputMLPModel\n'), ((3845, 3916), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '((None,) + self._input_shape)'}), '(tf.float32, shape=(None,) + self._input_shape)\n', (3869, 3916), True, 'import tensorflow as tf\n'), ((2931, 2983), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['self._name'], {'reuse': '(False)'}), '(self._name, reuse=False)\n', (2958, 2983), True, 'import tensorflow as tf\n'), ((4026, 4065), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['self._name'], {}), '(self._name)\n', (4053, 4065), True, 'import tensorflow as tf\n'), ((4175, 4265), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.float32', 'name': '"""ys"""', 'shape': '(None, self._output_dim)'}), "(dtype=tf.float32, name='ys', shape=(None, self.\n _output_dim))\n", (4199, 4265), True, 'import tensorflow as tf\n'), ((4510, 4559), 'garage.tf.misc.tensor_utils.compile_function', 'tensor_utils.compile_function', (['[input_var]', 'y_hat'], {}), '([input_var], y_hat)\n', (4539, 4559), False, 'from garage.tf.misc import tensor_utils\n'), ((6404, 6453), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['self._variable_scope'], {}), '(self._variable_scope)\n', (6431, 6453), True, 'import tensorflow as tf\n'), ((3175, 3207), 'garage.tf.optimizers.LbfgsOptimizer', 'LbfgsOptimizer', ([], {}), '(**optimizer_args)\n', (3189, 3207), False, 'from garage.tf.optimizers import LbfgsOptimizer\n'), ((4450, 4475), 'tensorflow.square', 'tf.square', (['(y_hat - ys_var)'], {}), '(y_hat - ys_var)\n', (4459, 4475), True, 'import tensorflow as tf\n'), ((4794, 4821), 'tensorflow.name_scope', 'tf.name_scope', (['"""update_opt"""'], {}), "('update_opt')\n", (4807, 4821), True, 'import tensorflow as tf\n'), ((5257, 5291), 'numpy.mean', 'np.mean', (['xs'], {'axis': '(0)', 'keepdims': '(True)'}), '(xs, axis=0, keepdims=True)\n', (5264, 5291), True, 'import numpy as np\n'), ((5366, 5399), 'numpy.std', 'np.std', (['xs'], {'axis': '(0)', 'keepdims': '(True)'}), '(xs, axis=0, keepdims=True)\n', (5372, 5399), True, 'import numpy as np\n')] |
import os
import tarfile
import tempfile
import shutil
from collections import namedtuple, OrderedDict
import h5py
import numpy
from scipy.io import loadmat
from six import iteritems
from six.moves import range, zip
from PIL import Image
from fuel.converters.base import fill_hdf5_file, check_exists, progress_bar
from fuel.datasets import H5PYDataset
FORMAT_1_FILES = ['{}.tar.gz'.format(s) for s in ['train', 'test', 'extra']]
FORMAT_1_TRAIN_FILE, FORMAT_1_TEST_FILE, FORMAT_1_EXTRA_FILE = FORMAT_1_FILES
FORMAT_2_FILES = ['{}_32x32.mat'.format(s) for s in ['train', 'test', 'extra']]
FORMAT_2_TRAIN_FILE, FORMAT_2_TEST_FILE, FORMAT_2_EXTRA_FILE = FORMAT_2_FILES
@check_exists(required_files=FORMAT_1_FILES)
def convert_svhn_format_1(directory, output_directory,
output_filename='svhn_format_1.hdf5'):
"""Converts the SVHN dataset (format 1) to HDF5.
This method assumes the existence of the files
`{train,test,extra}.tar.gz`, which are accessible through the
official website [SVHNSITE].
.. [SVHNSITE] http://ufldl.stanford.edu/housenumbers/
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_1.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
try:
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
TMPDIR = tempfile.mkdtemp()
# Every image has three channels (RGB) and variable height and width.
# It features a variable number of bounding boxes that identify the
# location and label of digits. The bounding box location is specified
# using the x and y coordinates of its top left corner along with its
# width and height.
BoundingBoxes = namedtuple(
'BoundingBoxes', ['labels', 'heights', 'widths', 'lefts', 'tops'])
sources = ('features',) + tuple('bbox_{}'.format(field)
for field in BoundingBoxes._fields)
source_dtypes = dict([(source, 'uint8') for source in sources[:2]] +
[(source, 'uint16') for source in sources[2:]])
source_axis_labels = {
'features': ('channel', 'height', 'width'),
'bbox_labels': ('bounding_box', 'index'),
'bbox_heights': ('bounding_box', 'height'),
'bbox_widths': ('bounding_box', 'width'),
'bbox_lefts': ('bounding_box', 'x'),
'bbox_tops': ('bounding_box', 'y')}
# The dataset is split into three sets: the training set, the test set
# and an extra set of examples that are somewhat less difficult but
# can be used as extra training data. These sets are stored separately
# as 'train.tar.gz', 'test.tar.gz' and 'extra.tar.gz'. Each file
# contains a directory named after the split it stores. The examples
# are stored in that directory as PNG images. The directory also
# contains a 'digitStruct.mat' file with all the bounding box and
# label information.
splits = ('train', 'test', 'extra')
file_paths = dict(zip(splits, FORMAT_1_FILES))
for split, path in file_paths.items():
file_paths[split] = os.path.join(directory, path)
digit_struct_paths = dict(
[(split, os.path.join(TMPDIR, split, 'digitStruct.mat'))
for split in splits])
# We first extract the data files in a temporary directory. While doing
# that, we also count the number of examples for each split. Files are
# extracted individually, which allows to display a progress bar. Since
# the splits will be concatenated in the HDF5 file, we also compute the
# start and stop intervals of each split within the concatenated array.
def extract_tar(split):
with tarfile.open(file_paths[split], 'r:gz') as f:
members = f.getmembers()
num_examples = sum(1 for m in members if '.png' in m.name)
progress_bar_context = progress_bar(
name='{} file'.format(split), maxval=len(members),
prefix='Extracting')
with progress_bar_context as bar:
for i, member in enumerate(members):
f.extract(member, path=TMPDIR)
bar.update(i)
return num_examples
examples_per_split = OrderedDict(
[(split, extract_tar(split)) for split in splits])
cumulative_num_examples = numpy.cumsum(
[0] + list(examples_per_split.values()))
num_examples = cumulative_num_examples[-1]
intervals = zip(cumulative_num_examples[:-1],
cumulative_num_examples[1:])
split_intervals = dict(zip(splits, intervals))
# The start and stop indices are used to create a split dict that will
# be parsed into the split array required by the H5PYDataset interface.
# The split dict is organized as follows:
#
# dict(split -> dict(source -> (start, stop)))
#
split_dict = OrderedDict([
(split, OrderedDict([(s, split_intervals[split])
for s in sources]))
for split in splits])
h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)
# We then prepare the HDF5 dataset. This involves creating datasets to
# store data sources and datasets to store auxiliary information
# (namely the shapes for variable-length axes, and labels to indicate
# what these variable-length axes represent).
def make_vlen_dataset(source):
# Create a variable-length 1D dataset
dtype = h5py.special_dtype(vlen=numpy.dtype(source_dtypes[source]))
dataset = h5file.create_dataset(
source, (num_examples,), dtype=dtype)
# Create a dataset to store variable-length shapes.
axis_labels = source_axis_labels[source]
dataset_shapes = h5file.create_dataset(
'{}_shapes'.format(source), (num_examples, len(axis_labels)),
dtype='uint16')
# Create a dataset to store labels for variable-length axes.
dataset_vlen_axis_labels = h5file.create_dataset(
'{}_vlen_axis_labels'.format(source), (len(axis_labels),),
dtype='S{}'.format(
numpy.max([len(label) for label in axis_labels])))
# Fill variable-length axis labels
dataset_vlen_axis_labels[...] = [
label.encode('utf8') for label in axis_labels]
# Attach auxiliary datasets as dimension scales of the
# variable-length 1D dataset. This is in accordance with the
# H5PYDataset interface.
dataset.dims.create_scale(dataset_shapes, 'shapes')
dataset.dims[0].attach_scale(dataset_shapes)
dataset.dims.create_scale(dataset_vlen_axis_labels, 'shape_labels')
dataset.dims[0].attach_scale(dataset_vlen_axis_labels)
# Tag fixed-length axis with its label
dataset.dims[0].label = 'batch'
for source in sources:
make_vlen_dataset(source)
# The "fun" part begins: we extract the bounding box and label
# information contained in 'digitStruct.mat'. This is a version 7.3
# Matlab file, which uses HDF5 under the hood, albeit with a very
# convoluted layout.
def get_boxes(split):
boxes = []
with h5py.File(digit_struct_paths[split], 'r') as f:
bar_name = '{} digitStruct'.format(split)
bar_maxval = examples_per_split[split]
with progress_bar(bar_name, bar_maxval) as bar:
for image_number in range(examples_per_split[split]):
# The 'digitStruct' group is the main group of the HDF5
# file. It contains two datasets: 'bbox' and 'name'.
# The 'name' dataset isn't of interest to us, as it
# stores file names and there's already a one-to-one
# mapping between row numbers and image names (e.g.
# row 0 corresponds to '1.png', row 1 corresponds to
# '2.png', and so on).
main_group = f['digitStruct']
# The 'bbox' dataset contains the bounding box and
# label information we're after. It has as many rows
# as there are images, and one column. Elements of the
# 'bbox' dataset are object references that point to
# (yet another) group that contains the information
# for the corresponding image.
image_reference = main_group['bbox'][image_number, 0]
# There are five datasets contained in that group:
# 'label', 'height', 'width', 'left' and 'top'. Each of
# those datasets has as many rows as there are bounding
# boxes in the corresponding image, and one column.
def get_dataset(name):
return main_group[image_reference][name][:, 0]
names = ('label', 'height', 'width', 'left', 'top')
datasets = dict(
[(name, get_dataset(name)) for name in names])
# If there is only one bounding box, the information is
# stored directly in the datasets. If there are
# multiple bounding boxes, elements of those datasets
# are object references pointing to 1x1 datasets that
# store the information (fortunately, it's the last
# hop we need to make).
def get_elements(dataset):
if len(dataset) > 1:
return [int(main_group[reference][0, 0])
for reference in dataset]
else:
return [int(dataset[0])]
# Names are pluralized in the BoundingBox named tuple.
kwargs = dict(
[(name + 's', get_elements(dataset))
for name, dataset in iteritems(datasets)])
boxes.append(BoundingBoxes(**kwargs))
if bar:
bar.update(image_number)
return boxes
split_boxes = dict([(split, get_boxes(split)) for split in splits])
# The final step is to fill the HDF5 file.
def fill_split(split, bar=None):
for image_number in range(examples_per_split[split]):
image_path = os.path.join(
TMPDIR, split, '{}.png'.format(image_number + 1))
image = numpy.asarray(
Image.open(image_path)).transpose(2, 0, 1)
bounding_boxes = split_boxes[split][image_number]
num_boxes = len(bounding_boxes.labels)
index = image_number + split_intervals[split][0]
h5file['features'][index] = image.flatten()
h5file['features'].dims[0]['shapes'][index] = image.shape
for field in BoundingBoxes._fields:
name = 'bbox_{}'.format(field)
h5file[name][index] = getattr(bounding_boxes, field)
h5file[name].dims[0]['shapes'][index] = [num_boxes, 1]
# Replace label '10' with '0'.
labels = h5file['bbox_labels'][index]
labels[labels == 10] = 0
h5file['bbox_labels'][index] = labels
if image_number % 1000 == 0:
h5file.flush()
if bar:
bar.update(index)
with progress_bar('SVHN format 1', num_examples) as bar:
for split in splits:
fill_split(split, bar=bar)
finally:
if os.path.isdir(TMPDIR):
shutil.rmtree(TMPDIR)
h5file.flush()
h5file.close()
return (output_path,)
@check_exists(required_files=FORMAT_2_FILES)
def convert_svhn_format_2(directory, output_directory,
output_filename='svhn_format_2.hdf5'):
"""Converts the SVHN dataset (format 2) to HDF5.
This method assumes the existence of the files
`{train,test,extra}_32x32.mat`, which are accessible through the
official website [SVHNSITE].
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_2.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
train_set = loadmat(os.path.join(directory, FORMAT_2_TRAIN_FILE))
train_features = train_set['X'].transpose(3, 2, 0, 1)
train_targets = train_set['y']
train_targets[train_targets == 10] = 0
test_set = loadmat(os.path.join(directory, FORMAT_2_TEST_FILE))
test_features = test_set['X'].transpose(3, 2, 0, 1)
test_targets = test_set['y']
test_targets[test_targets == 10] = 0
extra_set = loadmat(os.path.join(directory, FORMAT_2_EXTRA_FILE))
extra_features = extra_set['X'].transpose(3, 2, 0, 1)
extra_targets = extra_set['y']
extra_targets[extra_targets == 10] = 0
data = (('train', 'features', train_features),
('test', 'features', test_features),
('extra', 'features', extra_features),
('train', 'targets', train_targets),
('test', 'targets', test_targets),
('extra', 'targets', extra_targets))
fill_hdf5_file(h5file, data)
for i, label in enumerate(('batch', 'channel', 'height', 'width')):
h5file['features'].dims[i].label = label
for i, label in enumerate(('batch', 'index')):
h5file['targets'].dims[i].label = label
h5file.flush()
h5file.close()
return (output_path,)
def convert_svhn(which_format, directory, output_directory,
output_filename=None):
"""Converts the SVHN dataset to HDF5.
Converts the SVHN dataset [SVHN] to an HDF5 dataset compatible
with :class:`fuel.datasets.SVHN`. The converted dataset is
saved as 'svhn_format_1.hdf5' or 'svhn_format_2.hdf5', depending
on the `which_format` argument.
.. [SVHN] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>. *Reading Digits in Natural Images with
Unsupervised Feature Learning*, NIPS Workshop on Deep Learning
and Unsupervised Feature Learning, 2011.
Parameters
----------
which_format : int
Either 1 or 2. Determines which format (format 1: full numbers
or format 2: cropped digits) to convert.
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_1.hdf5' or
'svhn_format_2.hdf5', depending on `which_format`.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
if which_format not in (1, 2):
raise ValueError("SVHN format needs to be either 1 or 2.")
if not output_filename:
output_filename = 'svhn_format_{}.hdf5'.format(which_format)
if which_format == 1:
return convert_svhn_format_1(
directory, output_directory, output_filename)
else:
return convert_svhn_format_2(
directory, output_directory, output_filename)
def fill_subparser(subparser):
"""Sets up a subparser to convert the SVHN dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `svhn` command.
"""
subparser.add_argument(
"which_format", help="which dataset format", type=int, choices=(1, 2))
return convert_svhn
| [
"h5py.File",
"collections.OrderedDict",
"fuel.converters.base.check_exists",
"six.moves.range",
"fuel.converters.base.fill_hdf5_file",
"os.path.isdir",
"numpy.dtype",
"PIL.Image.open",
"six.moves.zip",
"tempfile.mkdtemp",
"fuel.converters.base.progress_bar",
"collections.namedtuple",
"tarfil... | [((672, 715), 'fuel.converters.base.check_exists', 'check_exists', ([], {'required_files': 'FORMAT_1_FILES'}), '(required_files=FORMAT_1_FILES)\n', (684, 715), False, 'from fuel.converters.base import fill_hdf5_file, check_exists, progress_bar\n'), ((12767, 12810), 'fuel.converters.base.check_exists', 'check_exists', ([], {'required_files': 'FORMAT_2_FILES'}), '(required_files=FORMAT_2_FILES)\n', (12779, 12810), False, 'from fuel.converters.base import fill_hdf5_file, check_exists, progress_bar\n'), ((13585, 13632), 'os.path.join', 'os.path.join', (['output_directory', 'output_filename'], {}), '(output_directory, output_filename)\n', (13597, 13632), False, 'import os\n'), ((13646, 13678), 'h5py.File', 'h5py.File', (['output_path'], {'mode': '"""w"""'}), "(output_path, mode='w')\n", (13655, 13678), False, 'import h5py\n'), ((14593, 14621), 'fuel.converters.base.fill_hdf5_file', 'fill_hdf5_file', (['h5file', 'data'], {}), '(h5file, data)\n', (14607, 14621), False, 'from fuel.converters.base import fill_hdf5_file, check_exists, progress_bar\n'), ((1559, 1606), 'os.path.join', 'os.path.join', (['output_directory', 'output_filename'], {}), '(output_directory, output_filename)\n', (1571, 1606), False, 'import os\n'), ((1624, 1656), 'h5py.File', 'h5py.File', (['output_path'], {'mode': '"""w"""'}), "(output_path, mode='w')\n", (1633, 1656), False, 'import h5py\n'), ((1674, 1692), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1690, 1692), False, 'import tempfile\n'), ((2057, 2134), 'collections.namedtuple', 'namedtuple', (['"""BoundingBoxes"""', "['labels', 'heights', 'widths', 'lefts', 'tops']"], {}), "('BoundingBoxes', ['labels', 'heights', 'widths', 'lefts', 'tops'])\n", (2067, 2134), False, 'from collections import namedtuple, OrderedDict\n'), ((4984, 5046), 'six.moves.zip', 'zip', (['cumulative_num_examples[:-1]', 'cumulative_num_examples[1:]'], {}), '(cumulative_num_examples[:-1], cumulative_num_examples[1:])\n', (4987, 5046), False, 'from six.moves import range, zip\n'), ((5630, 5672), 'fuel.datasets.H5PYDataset.create_split_array', 'H5PYDataset.create_split_array', (['split_dict'], {}), '(split_dict)\n', (5660, 5672), False, 'from fuel.datasets import H5PYDataset\n'), ((12634, 12655), 'os.path.isdir', 'os.path.isdir', (['TMPDIR'], {}), '(TMPDIR)\n', (12647, 12655), False, 'import os\n'), ((13704, 13748), 'os.path.join', 'os.path.join', (['directory', 'FORMAT_2_TRAIN_FILE'], {}), '(directory, FORMAT_2_TRAIN_FILE)\n', (13716, 13748), False, 'import os\n'), ((13910, 13953), 'os.path.join', 'os.path.join', (['directory', 'FORMAT_2_TEST_FILE'], {}), '(directory, FORMAT_2_TEST_FILE)\n', (13922, 13953), False, 'import os\n'), ((14110, 14154), 'os.path.join', 'os.path.join', (['directory', 'FORMAT_2_EXTRA_FILE'], {}), '(directory, FORMAT_2_EXTRA_FILE)\n', (14122, 14154), False, 'import os\n'), ((3421, 3448), 'six.moves.zip', 'zip', (['splits', 'FORMAT_1_FILES'], {}), '(splits, FORMAT_1_FILES)\n', (3424, 3448), False, 'from six.moves import range, zip\n'), ((3529, 3558), 'os.path.join', 'os.path.join', (['directory', 'path'], {}), '(directory, path)\n', (3541, 3558), False, 'import os\n'), ((5102, 5124), 'six.moves.zip', 'zip', (['splits', 'intervals'], {}), '(splits, intervals)\n', (5105, 5124), False, 'from six.moves import range, zip\n'), ((11307, 11339), 'six.moves.range', 'range', (['examples_per_split[split]'], {}), '(examples_per_split[split])\n', (11312, 11339), False, 'from six.moves import range, zip\n'), ((12482, 12525), 'fuel.converters.base.progress_bar', 'progress_bar', (['"""SVHN format 1"""', 'num_examples'], {}), "('SVHN format 1', num_examples)\n", (12494, 12525), False, 'from fuel.converters.base import fill_hdf5_file, check_exists, progress_bar\n'), ((12669, 12690), 'shutil.rmtree', 'shutil.rmtree', (['TMPDIR'], {}), '(TMPDIR)\n', (12682, 12690), False, 'import shutil\n'), ((4147, 4186), 'tarfile.open', 'tarfile.open', (['file_paths[split]', '"""r:gz"""'], {}), "(file_paths[split], 'r:gz')\n", (4159, 4186), False, 'import tarfile\n'), ((7909, 7950), 'h5py.File', 'h5py.File', (['digit_struct_paths[split]', '"""r"""'], {}), "(digit_struct_paths[split], 'r')\n", (7918, 7950), False, 'import h5py\n'), ((3615, 3661), 'os.path.join', 'os.path.join', (['TMPDIR', 'split', '"""digitStruct.mat"""'], {}), "(TMPDIR, split, 'digitStruct.mat')\n", (3627, 3661), False, 'import os\n'), ((5470, 5529), 'collections.OrderedDict', 'OrderedDict', (['[(s, split_intervals[split]) for s in sources]'], {}), '([(s, split_intervals[split]) for s in sources])\n', (5481, 5529), False, 'from collections import namedtuple, OrderedDict\n'), ((6091, 6125), 'numpy.dtype', 'numpy.dtype', (['source_dtypes[source]'], {}), '(source_dtypes[source])\n', (6102, 6125), False, 'import numpy\n'), ((8091, 8125), 'fuel.converters.base.progress_bar', 'progress_bar', (['bar_name', 'bar_maxval'], {}), '(bar_name, bar_maxval)\n', (8103, 8125), False, 'from fuel.converters.base import fill_hdf5_file, check_exists, progress_bar\n'), ((8174, 8206), 'six.moves.range', 'range', (['examples_per_split[split]'], {}), '(examples_per_split[split])\n', (8179, 8206), False, 'from six.moves import range, zip\n'), ((11513, 11535), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (11523, 11535), False, 'from PIL import Image\n'), ((10911, 10930), 'six.iteritems', 'iteritems', (['datasets'], {}), '(datasets)\n', (10920, 10930), False, 'from six import iteritems\n')] |
from itertools import chain
import numpy as np
import quaternion
from scipy import linalg as la
from spherical_functions import Wigner_D_matrices
class Rotator:
def __init__(self, basis, phase=np.array([1., 1., 1.])):
self.basis = basis
self.phase = phase
self.lmax = self.basis[:, :, 3].max()
self.lgroups = []
self.lvec = []
for elmnt in self.basis:
ls = elmnt[elmnt[:, 2] > 0, 3]
elmt_lvec = []
row = 0
while row < ls.shape[0]:
l = ls[row]
elmt_lvec.append(l)
row += 2 * l + 1
self.lvec.append(np.array(elmt_lvec, dtype=np.int))
self.lsplit = np.cumsum(
(2 * np.arange(0., self.lmax, dtype=np.int) + 1) ** 2)
self._calc_U()
self._calc_P()
def _calc_P(self):
self.Ps = []
for l in range(self.lmax + 1):
sidx = np.arange(0, 2 * l + 1, 1, dtype=np.int)
self.Ps.append(sidx)
def _calc_U(self):
"""Compute the U transformation matrix."""
self.Us = []
for l in range(self.lmax + 1):
U = np.zeros((2 * l + 1, 2 * l + 1), dtype=np.complex)
for m in range(-l, l + 1):
for n in range(-l, l + 1):
U[m + l, n + l] = self.Umn(l, m, n)
self.Us.append(U)
def Umn(self, l, m, n):
if n < 0:
term1 = 1j
elif n == 0:
term1 = np.sqrt(2) / 2
else:
term1 = 1
if (m > 0) and (n < 0) and (n % 2 == 0):
term2 = -1
elif (m > 0) and (n > 0) and (n % 2 != 0):
term2 = -1
else:
term2 = 1
return term1 * term2 / np.sqrt(2) * ((m == n) + (m == -n))
def _calc_UDUs(self, q):
Ds = Wigner_D_matrices(q, 0, self.lmax)
Ds = np.split(Ds, self.lsplit)
UDUs = []
for U, pidx, D in zip(self.Us, self.Ps, Ds):
D = D.reshape(U.shape)
# udu = np.real(la.inv(U) @ D @ U)
udu = np.real(U.conjugate().T @ D @ U)
# print('pidx', pidx)
# print('udu', udu)
# if len(pidx) == 7:
# #print(pidx)
# # pidx = [3, 4, 2, 5, 1, 6, 0]
# pidx = np.array([
# [0, 0, 0, 1, 0, 0, 0],
# [0, 0, 0, 0, 1, 0, 0],
# [0, 0, 1, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 1, 0],
# [0, 1, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0, 0, 0],
# ])
# udu = pidx.dot(udu.dot(pidx.T))
# else:
udu = udu[np.ix_(pidx, pidx)]
# print('udu', udu)
UDUs.append(udu)
UDUs = np.array(UDUs)
return UDUs
def transform(self, R, H, S, numbers, positions=None, forces=None):
q = quaternion.from_rotation_matrix(R)
vR = quaternion.as_rotation_vector(q) * self.phase
q = quaternion.from_rotation_vector(vR)
UDUs = self._calc_UDUs(q)
M = chain(*[UDUs[self.lvec[z]] for z in numbers])
M = la.block_diag(*M)
H = M.T @ H @ M
S = M.T @ S @ M
pos_rot = positions @ R.T
if forces is not None:
force_rot = forces @ R.T
return H, S, pos_rot, force_rot
return H, S, pos_rot
class OrcaRotator(Rotator):
def __init__(self, basis):
phase = np.array([1., -1., 1.])
self.T = np.array(
[[1],
[1, -1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, -1, 1, -1],
[1, 1, 1, 1, 1, -1, 1, -1, 1],
[1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1],
[1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1,
1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
-1, 1, -1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1,
1, -1, 1, -1, 1]
]
)
super(OrcaRotator, self).__init__(basis, phase)
def _calc_P(self):
self.Ps = []
for l in range(self.lmax + 1):
ms = np.zeros((2 * l + 1,), dtype=np.int)
ms[2::2] = -np.arange(1, l + 1, 1)
self.Ps.append(np.argsort(np.argsort(ms)))
ms[1:-1:2] = np.arange(1, l + 1, 1)
print(self.Ps)
def _calc_U(self):
"""Compute the U transformation matrix."""
super(OrcaRotator, self)._calc_U()
# for l, U in enumerate(self.Us):
# self.Us[l] = np.diag(self.T[l]).dot(self.Us[l].T).T
class AimsRotator(Rotator):
def __init__(self, basis):
phase = np.array([1., -1., 1.])
self.T = np.array(
[[1],
[1, 1, -1],
[1, 1, 1, -1, 1],
[1, 1, 1, 1, -1, 1, -1],
[1, 1, 1, 1, 1, -1, 1, -1, 1],
[1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1],
[1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1,
1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
-1, 1, -1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1,
1, -1, 1, -1, 1]
]
)
super(AimsRotator, self).__init__(basis, phase)
def _calc_U(self):
"""Compute the U transformation matrix."""
super(AimsRotator, self)._calc_U()
for l, U in enumerate(self.Us):
self.Us[l] = np.diag(self.T[l]).dot(self.Us[l].T).T
def rand_rotation_matrix(deflection=1.0, randnums=None):
"""
Creates a random rotation matrix.
deflection: the magnitude of the rotation. For 0, no rotation; for 1, competely random
rotation. Small deflection => small perturbation.
randnums: 3 random numbers in the range [0, 1]. If `None`, they will be auto-generated.
"""
# from http://www.realtimerendering.com/resources/GraphicsGems/gemsiii/rand_rotation.c
if randnums is None:
randnums = np.random.uniform(size=(3,))
theta, phi, z = randnums
theta = theta * 2.0 * deflection * np.pi # Rotation about the pole (Z).
phi = phi * 2.0 * np.pi # For direction of pole deflection.
z = z * 2.0 * deflection # For magnitude of pole deflection.
# Compute a vector V used for distributing points over the sphere
# via the reflection I - V Transpose(V). This formulation of V
# will guarantee that if x[1] and x[2] are uniformly distributed,
# the reflected points will be uniform on the sphere. Note that V
# has length sqrt(2) to eliminate the 2 in the Householder matrix.
r = np.sqrt(z)
V = (
np.sin(phi) * r,
np.cos(phi) * r,
np.sqrt(2.0 - z)
)
st = np.sin(theta)
ct = np.cos(theta)
R = np.array(((ct, st, 0), (-st, ct, 0), (0, 0, 1)))
# Construct the rotation matrix ( V Transpose(V) - I ) R.
M = (np.outer(V, V) - np.eye(3)).dot(R)
return M | [
"numpy.argsort",
"numpy.sin",
"numpy.arange",
"numpy.diag",
"quaternion.from_rotation_matrix",
"itertools.chain",
"scipy.linalg.block_diag",
"numpy.cos",
"quaternion.from_rotation_vector",
"numpy.random.uniform",
"numpy.outer",
"numpy.ix_",
"numpy.zeros",
"numpy.split",
"numpy.array",
... | [((7344, 7354), 'numpy.sqrt', 'np.sqrt', (['z'], {}), '(z)\n', (7351, 7354), True, 'import numpy as np\n'), ((7456, 7469), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7462, 7469), True, 'import numpy as np\n'), ((7479, 7492), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7485, 7492), True, 'import numpy as np\n'), ((7502, 7550), 'numpy.array', 'np.array', (['((ct, st, 0), (-st, ct, 0), (0, 0, 1))'], {}), '(((ct, st, 0), (-st, ct, 0), (0, 0, 1)))\n', (7510, 7550), True, 'import numpy as np\n'), ((201, 226), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (209, 226), True, 'import numpy as np\n'), ((1848, 1882), 'spherical_functions.Wigner_D_matrices', 'Wigner_D_matrices', (['q', '(0)', 'self.lmax'], {}), '(q, 0, self.lmax)\n', (1865, 1882), False, 'from spherical_functions import Wigner_D_matrices\n'), ((1896, 1921), 'numpy.split', 'np.split', (['Ds', 'self.lsplit'], {}), '(Ds, self.lsplit)\n', (1904, 1921), True, 'import numpy as np\n'), ((2867, 2881), 'numpy.array', 'np.array', (['UDUs'], {}), '(UDUs)\n', (2875, 2881), True, 'import numpy as np\n'), ((2987, 3021), 'quaternion.from_rotation_matrix', 'quaternion.from_rotation_matrix', (['R'], {}), '(R)\n', (3018, 3021), False, 'import quaternion\n'), ((3093, 3128), 'quaternion.from_rotation_vector', 'quaternion.from_rotation_vector', (['vR'], {}), '(vR)\n', (3124, 3128), False, 'import quaternion\n'), ((3177, 3222), 'itertools.chain', 'chain', (['*[UDUs[self.lvec[z]] for z in numbers]'], {}), '(*[UDUs[self.lvec[z]] for z in numbers])\n', (3182, 3222), False, 'from itertools import chain\n'), ((3235, 3252), 'scipy.linalg.block_diag', 'la.block_diag', (['*M'], {}), '(*M)\n', (3248, 3252), True, 'from scipy import linalg as la\n'), ((3556, 3582), 'numpy.array', 'np.array', (['[1.0, -1.0, 1.0]'], {}), '([1.0, -1.0, 1.0])\n', (3564, 3582), True, 'import numpy as np\n'), ((3597, 4210), 'numpy.array', 'np.array', (['[[1], [1, -1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, -1, 1, -1], [1, 1, 1, 1, 1,\n -1, 1, -1, 1], [1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1], [1, 1, 1, 1, 1, 1,\n 1, -1, 1, -1, 1, -1, 1], [1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, \n -1], [1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1], [1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1], [1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1], [1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1], [1, 1, 1, 1, 1, \n 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1]]'], {}), '([[1], [1, -1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, -1, 1, -1], [1, 1,\n 1, 1, 1, -1, 1, -1, 1], [1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1], [1, 1, 1,\n 1, 1, 1, 1, -1, 1, -1, 1, -1, 1], [1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1,\n -1, 1, -1], [1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1], [1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1], [1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1], [1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1], [1, 1, 1, \n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1]])\n', (3605, 4210), True, 'import numpy as np\n'), ((5089, 5115), 'numpy.array', 'np.array', (['[1.0, -1.0, 1.0]'], {}), '([1.0, -1.0, 1.0])\n', (5097, 5115), True, 'import numpy as np\n'), ((5130, 5744), 'numpy.array', 'np.array', (['[[1], [1, 1, -1], [1, 1, 1, -1, 1], [1, 1, 1, 1, -1, 1, -1], [1, 1, 1, 1, 1,\n -1, 1, -1, 1], [1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1], [1, 1, 1, 1, 1, 1,\n 1, -1, 1, -1, 1, -1, 1], [1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, \n -1], [1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1], [1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1], [1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1], [1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1], [1, 1, 1, 1, 1, \n 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1]]'], {}), '([[1], [1, 1, -1], [1, 1, 1, -1, 1], [1, 1, 1, 1, -1, 1, -1], [1, 1,\n 1, 1, 1, -1, 1, -1, 1], [1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1], [1, 1, 1,\n 1, 1, 1, 1, -1, 1, -1, 1, -1, 1], [1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1,\n -1, 1, -1], [1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1], [1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1], [1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1], [1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1], [1, 1, 1, \n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1]])\n', (5138, 5744), True, 'import numpy as np\n'), ((6716, 6744), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(3,)'}), '(size=(3,))\n', (6733, 6744), True, 'import numpy as np\n'), ((7423, 7439), 'numpy.sqrt', 'np.sqrt', (['(2.0 - z)'], {}), '(2.0 - z)\n', (7430, 7439), True, 'import numpy as np\n'), ((949, 989), 'numpy.arange', 'np.arange', (['(0)', '(2 * l + 1)', '(1)'], {'dtype': 'np.int'}), '(0, 2 * l + 1, 1, dtype=np.int)\n', (958, 989), True, 'import numpy as np\n'), ((1175, 1225), 'numpy.zeros', 'np.zeros', (['(2 * l + 1, 2 * l + 1)'], {'dtype': 'np.complex'}), '((2 * l + 1, 2 * l + 1), dtype=np.complex)\n', (1183, 1225), True, 'import numpy as np\n'), ((3035, 3067), 'quaternion.as_rotation_vector', 'quaternion.as_rotation_vector', (['q'], {}), '(q)\n', (3064, 3067), False, 'import quaternion\n'), ((4574, 4610), 'numpy.zeros', 'np.zeros', (['(2 * l + 1,)'], {'dtype': 'np.int'}), '((2 * l + 1,), dtype=np.int)\n', (4582, 4610), True, 'import numpy as np\n'), ((4738, 4760), 'numpy.arange', 'np.arange', (['(1)', '(l + 1)', '(1)'], {}), '(1, l + 1, 1)\n', (4747, 4760), True, 'import numpy as np\n'), ((7373, 7384), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (7379, 7384), True, 'import numpy as np\n'), ((7398, 7409), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (7404, 7409), True, 'import numpy as np\n'), ((662, 695), 'numpy.array', 'np.array', (['elmt_lvec'], {'dtype': 'np.int'}), '(elmt_lvec, dtype=np.int)\n', (670, 695), True, 'import numpy as np\n'), ((1769, 1779), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1776, 1779), True, 'import numpy as np\n'), ((2770, 2788), 'numpy.ix_', 'np.ix_', (['pidx', 'pidx'], {}), '(pidx, pidx)\n', (2776, 2788), True, 'import numpy as np\n'), ((4635, 4657), 'numpy.arange', 'np.arange', (['(1)', '(l + 1)', '(1)'], {}), '(1, l + 1, 1)\n', (4644, 4657), True, 'import numpy as np\n'), ((7625, 7639), 'numpy.outer', 'np.outer', (['V', 'V'], {}), '(V, V)\n', (7633, 7639), True, 'import numpy as np\n'), ((7642, 7651), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (7648, 7651), True, 'import numpy as np\n'), ((1505, 1515), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1512, 1515), True, 'import numpy as np\n'), ((4696, 4710), 'numpy.argsort', 'np.argsort', (['ms'], {}), '(ms)\n', (4706, 4710), True, 'import numpy as np\n'), ((748, 787), 'numpy.arange', 'np.arange', (['(0.0)', 'self.lmax'], {'dtype': 'np.int'}), '(0.0, self.lmax, dtype=np.int)\n', (757, 787), True, 'import numpy as np\n'), ((6190, 6208), 'numpy.diag', 'np.diag', (['self.T[l]'], {}), '(self.T[l])\n', (6197, 6208), True, 'import numpy as np\n')] |
# write function to average across frames to give ensembled averaged cosine theta values at each N - 1 value
import MDAnalysis as mda
import statsmodels as stats
import math
import numpy as np
import pandas
import sklearn
import scipy as scipy
import warnings
# Use sklearn to do fitting
from sklearn.linear_model import LinearRegression
from scipy import stats
from scipy.optimize import leastsq
def pers_length(polymer_atoms, n_monomers):
""" This function takes the polymer atoms and number of monomers and outputs the polymer-averaged cosine theta (dot product) values
at specific arc lengths (ds) in a numpy array. This function uses the center of mass points along the polymer chain to calc.
the dot product of the vector from the 1st to 2nd residue and the vector from the 1st to 3rd, 1st to 4th and so on.
The structure of the output vec_poly is outlined below:
# Dimensions are (3 rows, N - 1 columns), N is n_monomers
# row 1: Polymer-averaged cosine theta (dot product) values at each arc length value (length of vector is N - 1)
# row 2: Evenly spaced integer values beginning from 1 to N - 1
# row 3: Polymer-averaged angle values in degrees at each arc length value (Take the inverse cosine of each element in row 1)
The polymer_atoms variable is an output from the atom selection class in MD Analysis. n_monomers
variable is an integer value. """
# Initialize a zeros matrix of 3 rows, N - 1 columns to store key values
vec_poly = np.zeros(shape=(3,n_monomers-1), dtype=float)
# Inititalize a evenly spaced values with length of N - 1
len_vec = np.arange(n_monomers-1)
# Store them in the vec_poly matrix
vec_poly[1,:] = len_vec
# Initialize a counter
count = 0
# Initialize a vector to store each cosine theta value, dtype is object because
# as the code moves down the polymer chain to calc the dot product at each arc length,
# the reference starting point shift down the polymer chain to get more dot product samples
sv_ply = np.zeros(shape=(n_monomers-1), dtype=object)
# For loop that is the length of the monomers
for i in range(n_monomers):
# Add 1 to counter
count += 1
# This array dim(N - count) will contain the dot product values for each round
ds_cor = np.zeros(shape=(n_monomers-count))
# For loop that is N - count monomers
for j in range(n_monomers - count):
# Inititalize a evenly spaced values with length of N - count
jh = np.arange(n_monomers - count)
# Add 1 to all values for the purpose of selecting the next residue for the dot product calculation
jh += count+1
# Use MD Analysis to select the first reference residue
n6_mon1 = polymer_atoms.select_atoms("resid "+str(count))
# Use MD Analysis to select the second reference residue
n6_mon2 = polymer_atoms.select_atoms("resid "+str(jh[j]))
# This if statement ensure calc. of the first dot product at the first arc length
# since this calculation is repeated for the next first arc length/dot product calc
# started after the second round of the initial first for loop
if j == 0:
# Calc. distance between center of masses
v1 = n6_mon1.center_of_mass() - n6_mon2.center_of_mass()
# normalize the distance to form normalized vector
v1_norm = v1/(np.linalg.norm(v1))
# take the dot product of the vetor with its self to get
# cosine theta value at the first arc length
ds_cor[j] = v1_norm.dot(v1_norm)
# This if statement calc the dot product at all N - count arc length values
elif j != 0:
# Calc. distance between center of masses
v2 = n6_mon1.center_of_mass() - n6_mon2.center_of_mass()
# Calc. distance between center of masses
v2_norm = v2/(np.linalg.norm(v2))
# take the dot product of the first arc length vector with the next to get
# cosine theta value at each subsquent arc length
ds_cor[j] = np.dot(v1_norm, v2_norm)
# Store the saved dot product values in the sv_ply vector
sv_ply[i] = ds_cor
# This vector will contain the polymer averaged dot product values
cor_avg = []
# There are N - 1 cosine theta values
# this loop will store each dot product sample in a list, take the mean and store it in cor_avg
for j in range(n_monomers-1):
lss = []
for i in sv_ply.flat:
try:
lss.append(i[j])
except IndexError:
pass
# apeend average cosine theta value to list
cor_avg.append(np.mean(lss))
# Turn cor_avg into a numpy array
nm = np.array(cor_avg)
# This vector will contain the polymer averaged angles at each ar length value
# This is simply calculated by taking the inverse cosine of the dot product values
ang_vg = []
# This loop does the inverse cosine calc, floating point cutoff works well but could be better
for i in nm.flat:
if i >= float(0.99):
ang_vg.append(0)
elif i <= float(0.99):
ang_vg.append(math.degrees(math.acos(i)))
# Store Polymer-averaged cosine theta (dot product) values
vec_poly[0,:] = nm
# Store Polymer-averaged angle values in degrees
vec_poly[2,:] = np.array(ang_vg)
return vec_poly
def mean_sq_e2e(polymer_atoms, universe, n_monomers, start, end):
# Initialize a row vector to store the end to end distance vector at each frame
e2e_ens = np.zeros(shape=(end-start), dtype=object)
# Magnitudes (or distances) end to end distance vector to be stored here
# row 1: squared end to end distance at each frame
# row 2: End to end distance at each frame
dis_e2e = np.zeros(shape=(2, end-start))
count_e2e = 0
c_res = 0
for ts in universe.trajectory[start:end]:
# Get the position vector for COM of the first residue in the polymer atom group
pres_start = polymer_atoms.select_atoms("resid "+str(c_res+1)).center_of_mass()
# Get the position vector for COM of the last residue in the polymer atom group
pres_end = polymer_atoms.select_atoms("resid "+str(n_monomers)).center_of_mass()
# Calc end to end distance vector
e2e = pres_end - pres_start
# Square each distance vector so (x^2 + y^2 + z^2) to get squared e2e distance at current frame
dis_e2e[0,count_e2e] = np.linalg.norm(e2e)**2
# Take square root of squared e2e distance to get e2e distance at current frame
dis_e2e[1,count_e2e] = np.linalg.norm(e2e)
# Store e2e vector at current frame
e2e_ens[count_e2e] = e2e
count_e2e += 1
# Outputs:
# e2e_ens: squared end to end distance vector at each frame
# dis_e2e: end to end distance (in Angstroms) and mean squared e2e at each frame
return e2e_ens, dis_e2e
def hydro_rad_poly(polymer_atoms, universe, n_monomers, start, end):
# Initialize a zeros matrix of 2 rows, N - 1 columns to store key values
# Row 1: Rh at each frame
rh_frame = np.zeros(shape=(end-start))
#store sums
sum_rhstore = np.zeros(shape=(n_monomers-1))
cnn = 0
for ts in universe.trajectory[start:end]:
count = 0
# For loop that is the length of the monomers
for i in range(n_monomers-1):
# Add 1 to counter
count += 1
#print(count)
# This array dim(N - count) will contain the dot product values for each round
r_vec = np.zeros(shape=(n_monomers-count))
# For loop that is N - count monomers
for j in range(n_monomers - count):
# Inititalize a evenly spaced values with length of N - count
jh = np.arange(n_monomers - count)
# Add 1 to all values for the purpose of selecting the next residue for the dot product calculation
jh += count+1
# Use MD Analysis to select the first reference residue
n6_mon1 = polymer_atoms.select_atoms("resid "+str(count))
#print(count)
# Use MD Analysis to select the second reference residue
n6_mon2 = polymer_atoms.select_atoms("resid "+str(jh[j]))
#print(jh)
# Calc. distance between center of masses
v1 = n6_mon1.center_of_mass() - n6_mon2.center_of_mass()
# store the inverse of these distances, same as mda.distance.distance_array
r_vec[j] = 1/(np.linalg.norm(v1))
#print(r_vec)
sum_rhstore[count-1] = np.sum(r_vec)
#print(sum_rhstore)
#rh_frame[0,cnn] = (1/((n_monomers)**2))*(np.sum(sum_rhstore))
rh_frame[cnn] = 1/((1/((n_monomers)**2))*(np.sum(sum_rhstore)))
cnn += 1
return rh_frame
def rh_block_avg(no_of_blks, polymer_atoms, universe, begin, final):
# Block size
n_size = int((final - begin)/no_of_blks)
# Initialize dictionary
ot_dab = {}
# Shift the start of the trajectory to the begin variable
universe.trajectory[begin]
# Using MD Analysis, I can get the number of polymer monomers in the polymer_atoms input variable.
n_monomers = len(np.unique(polymer_atoms.resids))
inv_sto = np.zeros(shape=(no_of_blks), dtype=object)
for nb in range(no_of_blks):
# Shift the start of the trajectory to the start variable
start = universe.trajectory.frame
print(start)
# Define end of block
end = int(start + n_size)
print(end)
# Initialize array to store Rh for each block
inv_frame = np.zeros(shape=(end-start), dtype=object)
cnn = 0
for ts in universe.trajectory[start:end]:
sum_rhstore = np.zeros(shape=(n_monomers-1), dtype=object)
#print(sum_rhstore.shape)
co_t = 0
# For loop that is the length of the monomers
for i in range(n_monomers-1):
# Add 1 to counter
co_t += 1
# This array dim(N - count) will contain the dot product values for each round
r_vec = np.zeros(shape=(n_monomers-co_t))
# For loop that is N - count monomers
for j in range(n_monomers - co_t):
# Inititalize a evenly spaced values with length of N - count
jh = np.arange(n_monomers - co_t)
# Add 1 to all values for the purpose of selecting the next residue for the dot product calculation
jh += co_t+1
# Use MD Analysis to select the first reference residue
n6_mon1 = polymer_atoms.select_atoms("resid "+str(co_t))
#print(polymer_atoms.select_atoms("resid "+str(co_t)).resids)
# Use MD Analysis to select the second reference residue
n6_mon2 = polymer_atoms.select_atoms("resid "+str(jh[j]))
#print(polymer_atoms.select_atoms("resid "+str(jh[j])).resids)
# Calc. distance between center of masses
v1 = n6_mon1.center_of_mass() - n6_mon2.center_of_mass()
# store the inverse of these distances
# This gives the same values as mda.distance.distance_array
r_vec[j] = 1/(np.linalg.norm(v1))
#print("r_vec ="+str(r_vec))
sum_rhstore[co_t-1] = r_vec
#print("sum_rhstore ="+str(sum_rhstore))
inv_frame[cnn] = sum_rhstore
cnn += 1
# Enseble averaging of inverse distances prior to summation
conn = 0
s_rh = np.zeros(shape=(n_monomers-1), dtype=object)
for ns in range(n_monomers-1):
conn += 1
ln = np.zeros(shape=(n_size,n_monomers-conn))
rvc = np.zeros(shape=(n_monomers-conn))
#print("ns = "+str(ns))
for kl in range(n_size):
#print("kl = "+str(kl))
ln[kl] = inv_frame[kl][ns]
#print(ln)
for k in range(n_monomers-conn):
#print(ln[:,k])
#print(np.mean(ln[:,k]))
rvc[k] = np.mean(ln[:,k])
#print(rvc)
s_rh[ns] = np.sum(rvc)
#print(s_rh)
# Calc time averaged Rh
inv_sto[nb] = 1/((1/(n_monomers**2))*np.sum(s_rh))
# Shift the start to the next trajectory block
universe.trajectory[end]
return inv_sto
def orientation_order_param(polymer_atoms, universe, n_monomers, start, end):
# Initialize a zeros matrix of 2 rows, N - 1 columns to store key values
# Row 1: Rh at each frame
oo_frame = np.zeros(shape=(end-start))
c_res = 0
c_n2 = 0
for ts in universe.trajectory[start:end]:
# Get the position vector for COM of the first residue in the polymer atom group
pres_start = polymer_atoms.select_atoms("resid "+str(c_res+1)).center_of_mass()
#print(polymer_atoms.select_atoms("resid "+str(c_res+1)).resids)
# Get the position vector for COM of the last residue in the polymer atom group
pres_end = polymer_atoms.select_atoms("resid "+str(n_monomers)).center_of_mass()
#print(polymer_atoms.select_atoms("resid "+str(n_monomers)).resids)
# Calc end to end distance vector
e2e = pres_end - pres_start
e2e_norm = e2e/(np.linalg.norm(e2e))
# Inititalize a evenly spaced values with length of N - count
jh = np.arange(n_monomers-1)
jh += 2
#print(jh)
cosine_vals = np.zeros(shape=(n_monomers-1))
# For loop that is the length of the monomers
for i in range(len(jh)):
poly_mon = polymer_atoms.select_atoms("resid "+str(jh[i])).center_of_mass()
if i == 0:
oo_vec = poly_mon - pres_start
oov_norm = oo_vec/(np.linalg.norm(oo_vec))
# I get negative values, if I don't take absolute values
cosine_vals[i] = ((np.dot(e2e_norm, oov_norm))**2) - (1/3)
elif i != 0:
poly_fmon = polymer_atoms.select_atoms("resid "+str(jh[i]-1)).center_of_mass()
poly_smon = polymer_atoms.select_atoms("resid "+str(jh[i])).center_of_mass()
oo_vec = poly_smon - poly_fmon
oov_norm = oo_vec/(np.linalg.norm(oo_vec))
# I get negative values, if I don't take absolute values, added correction
cosine_vals[i] = ((np.dot(e2e_norm, oov_norm))**2) - (1/3)
#print(cosine_vals)
oo_frame[c_n2] = (3/(2*(n_monomers-1)))*np.sum(cosine_vals)
#print(oo_frame)
c_n2 += 1
return oo_frame
def obs_autocorr_RA(scalar_set, t_corr, window_shift, start, end):
"""This function calculates the autocorrelation as a function of time, with running averaging method.
t_corr is the number of frames in a window.
Inputs:
scalar_set: 1-D array where each element is a scalar observable from a single frame
t_corr: No. of frames in a window
window_shift: how many frames to slide down before restarting autocorr. calculation
start: Start of trajectory
end: end of trajectory
Outputs:
corr_sc:
- Dim is 2 rows, 1 columns
row 1: ensemble-averaged autocorrelation values
row 2: Time-lag values
var_eq: Equilibrium variance of the scalar observable
"""
assert len(scalar_set.shape) == 1 ,"Dimension of set should be 1 "
assert isinstance(scalar_set, np.ndarray) == True , "set must be numpy array "
# Autoorrelation vs time matrix
sc_autocorr = np.zeros(shape=(int(t_corr)))
# Time lag array
t_lag = np.arange(0, int(t_corr))
# Get No. of samples for a given window displacement
n_wind = 0
c_n = 0
e_n = start + t_corr
for i in range(end):
c_n += 1
if c_n == e_n:
n_wind += 1
e_n = e_n + window_shift
print("No. of Samples: "+str(n_wind))
#Initialize matrix to store correlation values for each window
# Default window displacement is one frame down
tcof_auto = np.zeros(shape=n_wind, dtype=object)
# Vector to store obs at t = 0
obs_t0 = np.zeros(shape=n_wind)
# Vector to store squared obs at t = 0
obs_t0sq = np.zeros(shape=n_wind)
for i in range(n_wind):
if i == 0:
# Shift the start of the trajectory to the start variable
start_to = i
#print(start_to)
# Define end point based on no. of tie origins
end_to = int(t_corr)
#print(str(start_to)+ " to "+str(end_to))
bgh = np.arange(start_to,end_to)
#print(bgh)
# Initialize matrix to store correlation values for each frame within each block
wind_ac = np.zeros(shape=end_to-start_to)
# Store obs t = 0
obs_t0[i] = scalar_set[i]
#print(obs_t0[i])
# store squraed obs t = 0
obs_t0sq[i] = scalar_set[i]**2
#print(obs_t0sq[i])
elif i != 0:
#print(i)
# Shift the start of the trajectory to the start variable
start_to = start_to + window_shift
#print(start_to)
# Define end point based on no. of tie origins
end_to = end_to + window_shift
#print(str(start_to)+ " to "+str(end_to))
bgh = np.arange(start_to,end_to)
#print(bgh)
#Initialize matrix to store correlation values for each frame after the first block
wind_ac = np.zeros(shape=end_to-start_to)
# Store obs t = 0
obs_t0[i] = scalar_set[start_to]
# store squraed obs t = 0
obs_t0sq[i] = scalar_set[start_to]**2
#print(obs_t0)
#print(obs_t0sq)
ac_num = []
# For loop section, frame iteration
for j in range(len(bgh)):
#print(scalar_set[bgh[j]]*obs_t0[start_to])
# Calculate obs(t)*obs(0)
ac_num.append(scalar_set[bgh[j]]*obs_t0[i])
#print(ac_num)
tcof_auto[i] = np.array(ac_num)
#print(tcof_auto)
# Iterate through each time lag, based on no. of time origins
# Calculate equilibrium variance
var_eq = np.mean(obs_t0sq) - (np.mean(obs_t0)**2)
#print(var_eq)
for k in range(t_corr):
nv_sav = []
for l in range(n_wind):
#print(tcof_auto[l][k])
nv_sav.append(tcof_auto[l][k])
sc_autocorr[k] = (np.mean(nv_sav) - (np.mean(obs_t0)**2))/var_eq
#print(sc_autocorr[k])
# Output
corr_sc = np.array([sc_autocorr, t_lag])
return corr_sc, var_eq
def get_rg_pers_poly(polymer_atoms, universe, start, end):
"""This function takes as inputs:
- polymer_atoms variable, an output from the atom selection class in MD Analysis.
- the MD Analysis universe variable that contains trajectory information (Make sure you have removed PBC and made molecule whole)
- Start time of the trajectory in picoseconds
- End time of the trajctory in picoseconds
Once received, this function uses the pers_length function to retrieve polymer averaged cosine theta values at their
specified arc length values for each frame and stored in a output matrix, corr_v, since polymer atom coordinates shift at each frame.
Those cosine theta values at each frame are averaged for each arc length value to get the time averaged, polymer averaged cosine theta
values. Those values are also used to calculate the standard deviation of the dot products for each arc length value. Both vectors are
stored in a output matrix, v_poly
Radius of gyration calculation at each frame is also performed using the MD Analysis Rg function and stored in the
output matrix, rg_ens. The time averaged radius of gyration is a floating point value stored in the output variable, avg_rg
The structure of the outputs are given below:
rg_ens:
- Dim is 2 rows, end - start columns, where each element is the radius of gyration of polymer_atoms at each frame in the
first row and squared radius of gyration of polymer_atoms
v_poly:
- Dim is 4 rows, N - 1 columns
row 1: Time averaged, polymer averaged cosine theta values at each arc length value
row 2: Pop. standard deviation of each time averaged cosine theta at each arc length value
row 3: Time averaged, polymer averaged angle values in degrees at each arc length value
row 4: Evenly spaced integer values beginning from 1 to N - 1
corr_v:
- Dim is N - 1 rows, end - start columns, where each row corresponds to each arc length value
and each polymer averaged cosine theta values at each frame are stored in the columns.
avg_rg:
- Integer value, time averaged radius of gyration
"""
# Using MD Analysis, I can get the number of polymer monomers in the polymer_atoms input variable.
n_monomers = len(np.unique(polymer_atoms.resids))
# Initialize a row vector to store the radius of gyration and squared radius of gyration at each frame
rg_sq_ens = np.zeros(shape=(2, end-start))
# Initialize matrix to store polymer averaged cosine theta values at each frame
corr_v = np.zeros(shape=(n_monomers-1,end-start))
# Initialize matrix to store polymer averaged angle values (in degrees) at each frame
angle_v = np.zeros(shape=(n_monomers-1,end-start))
# Initialize matrix to store time averaged cosine theta for each arc length value
v_poly = np.zeros(shape=(4,n_monomers-1))
# initialize counter
count_rg = 0
# Move trajectory start time to start variable
universe.trajectory[start]
for ts in universe.trajectory[start:end]:
# Get polymer-averaged cosine theta values at this frame
# Even though the pers_length function cannot be fed the universe variable, the polymer
# atom coordinates will update for the polymer atoms because of MD Analysis selection class
p_mat = pers_length(polymer_atoms, n_monomers)
# store polymer-averaged cosine theta values at this frame
corr_v[:,count_rg] = p_mat[0]
# store polymer averaged angle values at this frame
angle_v[:,count_rg] = p_mat[2]
# the radius of gyration at this frame
rg_sq_ens[0, count_rg] = polymer_atoms.radius_of_gyration()
# the squared radius of gyration at this frame
rg_sq_ens[1, count_rg] = (polymer_atoms.radius_of_gyration())**2
# update counter
count_rg += 1
# store evenly spaced integer values beginning from 1 to N - 1 from pers_length function
v_poly[3,:] = p_mat[1]
# For loop calculates time averaged values
for i in range(n_monomers-1):
boot_means = []
for j in range(10000):
bootsample = np.random.choice(corr_v[i,:],size=30, replace=True)
boot_means.append(bootsample.mean())
# Calc time averaged cosine value at each arc length, bootstrapped mean
v_poly[0,i] = np.mean(boot_means)
# Calc time std dev for each cosine theta are each arc length, bootstrapped standard error
v_poly[1,i] = np.std(boot_means)
# Calc time averaged angle value (in degrees) at each arc length
v_poly[2,i] = np.mean(angle_v[i,:])
# Time averaged radius of gyration
avg_rg = np.sqrt(np.mean(rg_sq_ens[1]))
return rg_sq_ens, v_poly, corr_v, avg_rg
def bavg_pers_cnt(no_of_blks, polymer_atoms, universe, len_bnd, fit_pnts, begin, final):
"""This function takes as inputs:
- no_of_blks variable that defines the number of blocks
- polymer_atoms variable, an output from the atom selection class in MD Analysis.
- the MD Analysis universe variable that contains trajectory information (Make sure you have removed PBC and made molecule whole)
- len_bnd variable that defines the length (in Angstroms) of the arc length
- fit_pnts dictates the number of points to fit, in order to calculate the persistence length. The number of points used to fit a
line to ln(cos(theta)) vs. arc length values dictated by the user will be the same for all blocks.
- Start time of the trajectory in picoseconds
- End time of the trajctory in picoseconds
Once received, this function uses the get_rg_pers_poly function to retrieve the average radius of gyration and
the time averaged cosine theta values at each arc length value for each trajectory block. The time averaged values are then
used to fit to the function that relates the persistence length and the flexural rigidity(cosine theta). A linear fit on
ln(cos(theta)) vs. arc length, along with user defined number of points for fitting(fit_pnts), will provide the persistence length (Lp)
values for each block.
As the functions is running, the start and end of the block, Lp, error in Lp from the fit and pearson coefficient are printed.
The structure of the outputs are given below:
ot_dab:
- Dictionary (2 keys, no_of_blks values per key), contains the radius of gyration and Lp at for each block.
mod_res:
- Dim is 4 rows, no_of_blks columns
row 1: Lp for each trajectory block
row 2: Error in Lp from fit [Angstroms], 95% Confidence Interval for each trajectory block
row 3: Model slope value from fit for each trajectory block
row 4: Mean squared error in Lp fit for each trajectory block
"""
# Website reference: https://www.statisticshowto.datasciencecentral.com/
# probability-and-statistics/descriptive-statistics/sample-variance/
# Block size
n_size = (final - begin)/no_of_blks
# Initialize dictionary
ot_dab = {}
# Shift the start of the trajectory to the begin variable
universe.trajectory[begin]
# Keys for the dictionary above
sf_lbl = ["Avg Radius of gyration","Avg Sq. radius of gyration", "Avg end to end distance","Avg Sq. end to end distance", "Avg persistence length", "Avg Hydrodynamic radius"]
# Array that will contain the Rg and Lp for each trajectory block
blk_nparr = np.zeros(shape=(len(sf_lbl),no_of_blks))
# Arrays that will statistics of the Lp linear fitting
mod_res = np.zeros(shape=(4,no_of_blks))
# initialize counter
count = 0
for i in range(no_of_blks):
# Shift the start of the trajectory to the start variable
start = universe.trajectory.frame
print(start)
# Define end of block
end = int(start + n_size)
print(end)
# Get the time averaged cosine theta values for this block
rg_ens, cor_tp, theta_ens, rg_avg = get_rg_pers_poly(polymer_atoms, universe, start, end)
# Using MD Analysis, I can get the number of polymer monomers in the polymer_atoms input variable.
n_monomers = len(np.unique(polymer_atoms.resids))
# End to end distance and squared e2e distance
eVec_poly, e2edis_poly = mean_sq_e2e(polymer_atoms, universe, n_monomers, start, end)
# Store average radius of gyration for the block
blk_nparr[0,count] = rg_avg
# Store averaged squared radius of gyration for the block
blk_nparr[1,count] = np.mean(rg_ens[1])
# Store average end to end distance for the block
blk_nparr[2, count] = np.sqrt(np.mean(e2edis_poly[0]))
# Store mean squared end to end distance for the block
blk_nparr[3, count] = np.mean(e2edis_poly[0])
# Arc length x values
blen = cor_tp[3]*len_bnd
warnings.filterwarnings("error")
try:
# ln(cosine theta) y values
np.log(cor_tp[0])
except RuntimeWarning:
print("Negative cosine theta values present, doing exponential fit...")
def func_exp(x,b):
return np.exp(b*x)
from scipy.optimize import curve_fit
popt, pcov = curve_fit(func_exp, blen, cor_tp[0])
e_lp = np.sqrt(np.diag(pcov))[0]
lp_npoly = -1/popt[0]
# Slope here is in angstroms
print("Lp [Angstroms], Exp. fit:", lp_npoly)
# Pers length error: error propagation from uncertainty in slope
print("Error in Lp from fit [Angstroms]:", e_lp)
# Save Lp in matrix
mod_res[0,count] = -1/popt[0]
blk_nparr[4,count] = -1/popt[0]
# Save error in Lp from fit: Error propagation from the fit to Lp
mod_res[1,count] = e_lp
# Save model slope
mod_res[2, count] = popt[0]
# Save Mean squared error of the fit
mod_res[3,count] = 0
else:
# ln(cosine theta) y values
npoly_lc = np.log(cor_tp[0])
if fit_pnts == len(blen):
# Want to fit a line with no y-intercept
model_npoly = LinearRegression(fit_intercept=False)
# fit line to data
model_npoly.fit(blen.reshape(-1,1), npoly_lc)
# Predict new ln(cos(theta)) values from arc length values
gg_np = model_npoly.predict(blen.reshape(-1,1))
# Residuals between the true y data and model y data
resid_np = npoly_lc - gg_np
# How to calculate mean squared error
mse_p = np.sum(resid_np**2)/len(resid_np)
# How to calculate Sum((Xi - avg(X))^2): X values are the arc length values
blen -= np.mean(blen)
nhui = blen**2
sum_m = np.sum(nhui)
# How to calculate 95% confidence interval for the slope
flc_np = stats.t.ppf(0.975, fit_pnts-1)*np.sqrt(mse_p/sum_m)
# Slope here is in angstroms
print("Lp [Angstroms]:", -1/model_npoly.coef_[0])
# Pers length error: error propagation from uncertainty in slope
print("Error in Lp from fit [Angstroms], 95% CL:", flc_np/((model_npoly.coef_[0])**2))
# Pearson coefficient to evaluate goodness of fit
print("R2 score:", sklearn.metrics.r2_score(npoly_lc, gg_np))
# Save Lp in matrix
mod_res[0,count] = -1/model_npoly.coef_[0]
blk_nparr[4,count] = -1/model_npoly.coef_[0]
# Save error in Lp from fit: Error propagation from the fit to Lp
mod_res[1,count] = flc_np/((model_npoly.coef_[0])**2)
# Save model slope
mod_res[2, count] = model_npoly.coef_[0]
# Save Mean squared error of the fit
mod_res[3,count] = sklearn.metrics.mean_squared_error(npoly_lc, gg_np)
elif fit_pnts != len(blen):
# Want to fit a line with no y-intercept
model_npoly = LinearRegression(fit_intercept=False)
# fit line to data
model_npoly.fit(blen[:fit_pnts].reshape(-1,1), npoly_lc[:fit_pnts])
# Predict new ln(cos(theta)) values from arc length values
gg_np = model_npoly.predict(blen[:fit_pnts].reshape(-1,1))
# Residuals between the true y data and model y data
resid_np = npoly_lc[:fit_pnts] - gg_np[:fit_pnts]
# How to calculate mean squared error
mse_p = np.sum(resid_np**2)/len(resid_np)
# How to calculate Sum((Xi - avg(X))^2): X values are the arc length values
blen -= np.mean(blen[:fit_pnts])
nhui = blen**2
sum_m = np.sum(nhui)
# How to calculate 95% confidence interval for the slope
flc_np = scipy.stats.t.ppf(0.975, fit_pnts-1)*np.sqrt(mse_p/sum_m)
# Slope here is in angstroms
print("Lp [Angstroms]:", -1/model_npoly.coef_[0])
# Pers length error: error propagation from uncertainty in slope
print("Error in Lp from fit [Angstroms], 95% CL :", flc_np/((model_npoly.coef_[0])**2))
# Pearson coefficient to evaluate goodness of fit
print("R2 score:", sklearn.metrics.r2_score(npoly_lc[:fit_pnts], gg_np[:fit_pnts]))
# Save Lp in matrix
mod_res[0,count] = -1/model_npoly.coef_[0]
blk_nparr[4,count] = -1/model_npoly.coef_[0]
# Save error in Lp from fit: Error propagation from the fit to Lp
mod_res[1,count] = flc_np/((model_npoly.coef_[0])**2)
# Save model slope
mod_res[2, count] = model_npoly.coef_[0]
# Save Mean squared error of the fit
mod_res[3,count] = sklearn.metrics.mean_squared_error(npoly_lc[:fit_pnts], gg_np[:fit_pnts])
count += 1
# Shift the start to the next trajectory block
universe.trajectory[end]
# Hydrodynamic Radius for this block
rh_poly = rh_block_avg(no_of_blks, polymer_atoms, universe, begin, final)
ot_dab[sf_lbl[5]] = rh_poly
for i in range(len(sf_lbl)-1):
ot_dab[sf_lbl[i]] = blk_nparr[i,:]
return ot_dab, mod_res
# Name of paper: Computer simulation of dilute polymer solutions with the dissipative particle dynamics method
# Authors: <NAME>, <NAME>, and <NAME>
def pos_bead_autocorr_RA(polymer_atoms, universe, n_monomers, t_corr, window_shift, start, end):
"""This function calculates the positional bead autocorrelation as a function of time, with running averaging method.
t_corr is the number of frames in a window. """
# Correlation vs time matrix
pb_corr = np.zeros(shape=(int(t_corr)))
# Time lag array
t_lag = np.arange(0, int(t_corr))
#Initialize matrix to store correlation values for each window
# Default window displacement is one frame down
#tcof_TO = np.zeros(shape=end-(t_corr-1), dtype=object)
#tcot_sum = np.zeros(shape=end-(t_corr-1), dtype=object)
# counter for frame selection
#count = 0
# Get No. of samples for a given window displacement
n_wind = 0
c_n = 0
e_n = start + t_corr
for i in range(end):
c_n += 1
if c_n == e_n:
n_wind += 1
e_n = e_n + window_shift
print("No. of Samples: "+str(n_wind))
#Initialize matrix to store correlation values for each window
# Default window displacement is one frame down
tcof_TO = np.zeros(shape=n_wind, dtype=object)
tcot_sum = np.zeros(shape=n_wind, dtype=object)
for i in range(n_wind):
if i == 0:
# Shift the start of the trajectory to the start variable
start_to = i
#print(start_to)
# Define end point based on no. of tie origins
end_to = int(t_corr)
print(str(start_to)+ " to "+str(end_to))
# Initialize matrix to store correlation values for each frame within each block
t_cofcorr = np.zeros(shape=end_to-start_to, dtype=object)
t_cofSUM = np.zeros(shape=end_to-start_to, dtype=object)
elif i != 0:
#print(i)
# Shift the start of the trajectory to the start variable
start_to = start_to + window_shift
#print(start_to)
# Define end point based on no. of tie origins
end_to = end_to + window_shift
print(str(start_to)+ " to "+str(end_to))
#Initialize matrix to store correlation values for each frame after the first block
t_cofcorr = np.zeros(shape=end_to-start_to, dtype=object)
t_cofSUM = np.zeros(shape=end_to-start_to, dtype=object)
# Initilaize variable for monomers COM
com_OL = np.zeros(shape=(n_monomers), dtype=object)
# New counter
c_n2 = 0
# Initialize var for olig com at the first time origin frame
com_chain = []
# For loop section, frame iteration
for ts in universe.trajectory[start_to:end_to]:
# First frame(First time origin)
if c_n2 == 0:
# Initialize array for distance storage
dist_com = np.zeros(shape=(n_monomers), dtype=object)
# Initialize variable for dot product values
n_bcorr = np.zeros(shape=(n_monomers))
# Center of mass of the oligomer chain. save in list
com_chain.append(polymer_atoms.center_of_mass())
for j in range(n_monomers):
#print(j+1)
a_fmon = polymer_atoms.select_atoms("resid "+str(j+1)).center_of_mass()
# Save COM for each monomer in an array
com_OL[j] = a_fmon
# save distances and normalize vector
dist_com[j] = (a_fmon - com_chain[0])/(np.linalg.norm((a_fmon - com_chain[0])))
# Take dot product
n_bcorr[j] = np.dot(dist_com[j],dist_com[j])
t_cofcorr[c_n2] = n_bcorr
t_cofSUM[c_n2] = np.sum(n_bcorr)/n_monomers
# Following frames
elif c_n2 != 0:
# Initialize array for distance storage
dist_Afcom = np.zeros(shape=(n_monomers), dtype=object)
# Initialize variable for dot product values
n_Afcorr = np.zeros(shape=(n_monomers))
# Center of mass of the oligomer chain
com_olig = polymer_atoms.center_of_mass()
for j in range(n_monomers):
#print(j+1)
# COM for each monomer
a_NFmon = polymer_atoms.select_atoms("resid "+str(j+1)).center_of_mass()
# save distances and normalize vector
dist_Afcom[j] = (a_NFmon - com_olig)/(np.linalg.norm((a_NFmon - com_olig)))
# Take dot product
n_Afcorr[j] = np.dot(dist_Afcom[j],dist_com[j])
t_cofcorr[c_n2] = n_Afcorr
t_cofSUM[c_n2] = np.sum(n_Afcorr)/n_monomers
#print(n6_plga_ace.trajectory.frame)
c_n2 += 1
#print(c_n2)
#count += 1
#print(count)
# Save correlation data vs time for each block
tcof_TO[i] = t_cofcorr
tcot_sum[i] = t_cofSUM
#print(tcof_TO)
#print(tcot_sum)
# Initiallize array to store time averaged correlation values for each monomer, for each time lag point
sv_MN = np.zeros(shape=(t_corr), dtype=object)
# Time averaging dot product for each monomer then summing to get the correlation values of the polymer vs time
# Iterate through each time lag, based on no. of time origins
for j in range(t_corr):
# Initialize array to store time averaged corrletation values for each monomer, from each block
ct_mean = np.zeros(shape=n_monomers)
#print("j ="+str(j))
# Iterate through monomers
for k in range(n_monomers):
sv_mon = []
#print("k ="+str(k))
# Iterate through time origins
for i in range(tcof_TO.shape[0]):
#print("i ="+str(i))
#print(i)
# Save each correlation values across time blocks, for each monomer
sv_mon.append(tcof_TO[i][j][k])
#print(sv_mon)
# Time averaging happens here
ct_mean[k] = np.mean(sv_mon)
# Correlation values for each time lag is calculated here
pb_corr[j] = np.sum(ct_mean)/n_monomers
# Save mean values, output #1
# row 1: Correlation values at t = 0, each column is the time-averaged correlation for each monomer
sv_MN[j] = ct_mean
# Output #2
corr_ot = np.array([pb_corr, t_lag])
return corr_ot, tcot_sum
# Name of paper: Computer simulation of dilute polymer solutions with the dissipative particle dynamics method
# Authors: <NAME>, <NAME>, and <NAME>
def rouse_relax(x, tr_1, n_bonds):
## Change here for fitting
f = 6/(((n_bonds)**2) - 1)
a1_sv = []
cor_times = []
for i in range(n_bonds):
#print(i)
atv_i = 4*((np.sin(((i+1)*np.pi)/(2*n_bonds)))**2)
if i == 0:
a1_sv.append(atv_i)
trouse_i = (atv_i/atv_i)*tr_1
#trouse_i = (asg_i/asg_i)*tr_1
cor_times.append((1/atv_i)*np.exp(-x/trouse_i))
elif i != 0:
trouse_i = (a1_sv[0]/atv_i)*tr_1
cor_times.append((1/atv_i)*np.exp(-x/trouse_i))
return f*np.sum(cor_times)
def zimm_relax_fit(x, tr_1, h_s, n_bonds):
## Change here for fitting
f = 6/(((n_bonds)**2) - 1)
b_pl = 1 - (1.66*(h_s**0.78))
sigma = -1.40*(h_s**0.78)
zim_eigen = []
#rouse_eig = []
cor_times = []
for i in range(n_bonds):
#print(i)
atv_i = 4*((np.sin(((i+1)*np.pi)/(2*n_bonds)))**2)
#rouse_eig.append(atv_i)
#print(atv_i)
asg_i = atv_i*b_pl*(((i+1)/n_bonds)**sigma)
zim_eigen.append(asg_i)
#print(asg_i)
if i == 0:
trouse_i = (asg_i/asg_i)*tr_1
cor_times.append((1/atv_i)*np.exp(-x/trouse_i))
#cor_times.append((1/asg_i)*np.exp(-x/trouse_i))
elif i != 0:
trouse_i = (zim_eigen[0]/asg_i)*tr_1
cor_times.append((1/atv_i)*np.exp(-x/trouse_i))
#cor_times.append((1/asg_i)*np.exp(-x/trouse_i))
return f*np.sum(cor_times)
def zimm_relax_func(x, tr_1, h_s, n_bonds):
## Change here for fitting
f = 6/(((n_bonds)**2) - 1)
b_pl = 1 - (1.66*(h_s**0.78))
sigma = -1.40*(h_s**0.78)
zim_eigen = []
rouse_eig = []
t_zimm = []
cor_times = []
for i in range(n_bonds):
#print(i)
atv_i = 4*((np.sin(((i+1)*np.pi)/(2*n_bonds)))**2)
rouse_eig.append(atv_i)
#print(atv_i)
asg_i = atv_i*b_pl*(((i+1)/n_bonds)**sigma)
zim_eigen.append(asg_i)
#print(asg_i)
if i == 0:
trouse_i = (asg_i/asg_i)*tr_1
t_zimm.append(trouse_i)
cor_times.append((1/atv_i)*np.exp(-x/trouse_i))
#cor_times.append((1/asg_i)*np.exp(-x/trouse_i))
elif i != 0:
trouse_i = (zim_eigen[0]/asg_i)*tr_1
t_zimm.append(trouse_i)
cor_times.append((1/atv_i)*np.exp(-x/trouse_i))
#cor_times.append((1/asg_i)*np.exp(-x/trouse_i))
return f*np.sum(cor_times), t_zimm, zim_eigen, rouse_eig
| [
"numpy.sum",
"sklearn.metrics.r2_score",
"numpy.mean",
"numpy.linalg.norm",
"numpy.arange",
"numpy.sin",
"numpy.exp",
"numpy.diag",
"numpy.unique",
"numpy.std",
"numpy.random.choice",
"scipy.stats.t.ppf",
"sklearn.metrics.mean_squared_error",
"scipy.optimize.curve_fit",
"sklearn.linear_m... | [((1520, 1568), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3, n_monomers - 1)', 'dtype': 'float'}), '(shape=(3, n_monomers - 1), dtype=float)\n', (1528, 1568), True, 'import numpy as np\n'), ((1647, 1672), 'numpy.arange', 'np.arange', (['(n_monomers - 1)'], {}), '(n_monomers - 1)\n', (1656, 1672), True, 'import numpy as np\n'), ((2074, 2118), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_monomers - 1)', 'dtype': 'object'}), '(shape=n_monomers - 1, dtype=object)\n', (2082, 2118), True, 'import numpy as np\n'), ((5130, 5147), 'numpy.array', 'np.array', (['cor_avg'], {}), '(cor_avg)\n', (5138, 5147), True, 'import numpy as np\n'), ((5771, 5787), 'numpy.array', 'np.array', (['ang_vg'], {}), '(ang_vg)\n', (5779, 5787), True, 'import numpy as np\n'), ((5987, 6028), 'numpy.zeros', 'np.zeros', ([], {'shape': '(end - start)', 'dtype': 'object'}), '(shape=end - start, dtype=object)\n', (5995, 6028), True, 'import numpy as np\n'), ((6225, 6257), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, end - start)'}), '(shape=(2, end - start))\n', (6233, 6257), True, 'import numpy as np\n'), ((7638, 7665), 'numpy.zeros', 'np.zeros', ([], {'shape': '(end - start)'}), '(shape=end - start)\n', (7646, 7665), True, 'import numpy as np\n'), ((7705, 7735), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_monomers - 1)'}), '(shape=n_monomers - 1)\n', (7713, 7735), True, 'import numpy as np\n'), ((10102, 10142), 'numpy.zeros', 'np.zeros', ([], {'shape': 'no_of_blks', 'dtype': 'object'}), '(shape=no_of_blks, dtype=object)\n', (10110, 10142), True, 'import numpy as np\n'), ((13888, 13915), 'numpy.zeros', 'np.zeros', ([], {'shape': '(end - start)'}), '(shape=end - start)\n', (13896, 13915), True, 'import numpy as np\n'), ((17847, 17883), 'numpy.zeros', 'np.zeros', ([], {'shape': 'n_wind', 'dtype': 'object'}), '(shape=n_wind, dtype=object)\n', (17855, 17883), True, 'import numpy as np\n'), ((17938, 17960), 'numpy.zeros', 'np.zeros', ([], {'shape': 'n_wind'}), '(shape=n_wind)\n', (17946, 17960), True, 'import numpy as np\n'), ((18025, 18047), 'numpy.zeros', 'np.zeros', ([], {'shape': 'n_wind'}), '(shape=n_wind)\n', (18033, 18047), True, 'import numpy as np\n'), ((20784, 20814), 'numpy.array', 'np.array', (['[sc_autocorr, t_lag]'], {}), '([sc_autocorr, t_lag])\n', (20792, 20814), True, 'import numpy as np\n'), ((23446, 23478), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, end - start)'}), '(shape=(2, end - start))\n', (23454, 23478), True, 'import numpy as np\n'), ((23579, 23624), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_monomers - 1, end - start)'}), '(shape=(n_monomers - 1, end - start))\n', (23587, 23624), True, 'import numpy as np\n'), ((23729, 23774), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_monomers - 1, end - start)'}), '(shape=(n_monomers - 1, end - start))\n', (23737, 23774), True, 'import numpy as np\n'), ((23874, 23909), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4, n_monomers - 1)'}), '(shape=(4, n_monomers - 1))\n', (23882, 23909), True, 'import numpy as np\n'), ((28804, 28835), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4, no_of_blks)'}), '(shape=(4, no_of_blks))\n', (28812, 28835), True, 'import numpy as np\n'), ((37796, 37832), 'numpy.zeros', 'np.zeros', ([], {'shape': 'n_wind', 'dtype': 'object'}), '(shape=n_wind, dtype=object)\n', (37804, 37832), True, 'import numpy as np\n'), ((37872, 37908), 'numpy.zeros', 'np.zeros', ([], {'shape': 'n_wind', 'dtype': 'object'}), '(shape=n_wind, dtype=object)\n', (37880, 37908), True, 'import numpy as np\n'), ((42349, 42385), 'numpy.zeros', 'np.zeros', ([], {'shape': 't_corr', 'dtype': 'object'}), '(shape=t_corr, dtype=object)\n', (42357, 42385), True, 'import numpy as np\n'), ((43767, 43793), 'numpy.array', 'np.array', (['[pb_corr, t_lag]'], {}), '([pb_corr, t_lag])\n', (43775, 43793), True, 'import numpy as np\n'), ((2375, 2409), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_monomers - count)'}), '(shape=n_monomers - count)\n', (2383, 2409), True, 'import numpy as np\n'), ((7104, 7123), 'numpy.linalg.norm', 'np.linalg.norm', (['e2e'], {}), '(e2e)\n', (7118, 7123), True, 'import numpy as np\n'), ((10050, 10081), 'numpy.unique', 'np.unique', (['polymer_atoms.resids'], {}), '(polymer_atoms.resids)\n', (10059, 10081), True, 'import numpy as np\n'), ((10489, 10530), 'numpy.zeros', 'np.zeros', ([], {'shape': '(end - start)', 'dtype': 'object'}), '(shape=end - start, dtype=object)\n', (10497, 10530), True, 'import numpy as np\n'), ((12788, 12832), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_monomers - 1)', 'dtype': 'object'}), '(shape=n_monomers - 1, dtype=object)\n', (12796, 12832), True, 'import numpy as np\n'), ((14769, 14794), 'numpy.arange', 'np.arange', (['(n_monomers - 1)'], {}), '(n_monomers - 1)\n', (14778, 14794), True, 'import numpy as np\n'), ((14877, 14907), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_monomers - 1)'}), '(shape=n_monomers - 1)\n', (14885, 14907), True, 'import numpy as np\n'), ((20184, 20200), 'numpy.array', 'np.array', (['ac_num'], {}), '(ac_num)\n', (20192, 20200), True, 'import numpy as np\n'), ((20356, 20373), 'numpy.mean', 'np.mean', (['obs_t0sq'], {}), '(obs_t0sq)\n', (20363, 20373), True, 'import numpy as np\n'), ((23284, 23315), 'numpy.unique', 'np.unique', (['polymer_atoms.resids'], {}), '(polymer_atoms.resids)\n', (23293, 23315), True, 'import numpy as np\n'), ((25470, 25489), 'numpy.mean', 'np.mean', (['boot_means'], {}), '(boot_means)\n', (25477, 25489), True, 'import numpy as np\n'), ((25620, 25638), 'numpy.std', 'np.std', (['boot_means'], {}), '(boot_means)\n', (25626, 25638), True, 'import numpy as np\n'), ((25743, 25765), 'numpy.mean', 'np.mean', (['angle_v[i, :]'], {}), '(angle_v[i, :])\n', (25750, 25765), True, 'import numpy as np\n'), ((25831, 25852), 'numpy.mean', 'np.mean', (['rg_sq_ens[1]'], {}), '(rg_sq_ens[1])\n', (25838, 25852), True, 'import numpy as np\n'), ((29864, 29882), 'numpy.mean', 'np.mean', (['rg_ens[1]'], {}), '(rg_ens[1])\n', (29871, 29882), True, 'import numpy as np\n'), ((30117, 30140), 'numpy.mean', 'np.mean', (['e2edis_poly[0]'], {}), '(e2edis_poly[0])\n', (30124, 30140), True, 'import numpy as np\n'), ((30230, 30262), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (30253, 30262), False, 'import warnings\n'), ((39235, 39275), 'numpy.zeros', 'np.zeros', ([], {'shape': 'n_monomers', 'dtype': 'object'}), '(shape=n_monomers, dtype=object)\n', (39243, 39275), True, 'import numpy as np\n'), ((42728, 42754), 'numpy.zeros', 'np.zeros', ([], {'shape': 'n_monomers'}), '(shape=n_monomers)\n', (42736, 42754), True, 'import numpy as np\n'), ((44695, 44712), 'numpy.sum', 'np.sum', (['cor_times'], {}), '(cor_times)\n', (44701, 44712), True, 'import numpy as np\n'), ((45796, 45813), 'numpy.sum', 'np.sum', (['cor_times'], {}), '(cor_times)\n', (45802, 45813), True, 'import numpy as np\n'), ((2609, 2638), 'numpy.arange', 'np.arange', (['(n_monomers - count)'], {}), '(n_monomers - count)\n', (2618, 2638), True, 'import numpy as np\n'), ((5063, 5075), 'numpy.mean', 'np.mean', (['lss'], {}), '(lss)\n', (5070, 5075), True, 'import numpy as np\n'), ((6953, 6972), 'numpy.linalg.norm', 'np.linalg.norm', (['e2e'], {}), '(e2e)\n', (6967, 6972), True, 'import numpy as np\n'), ((8151, 8185), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_monomers - count)'}), '(shape=n_monomers - count)\n', (8159, 8185), True, 'import numpy as np\n'), ((9362, 9375), 'numpy.sum', 'np.sum', (['r_vec'], {}), '(r_vec)\n', (9368, 9375), True, 'import numpy as np\n'), ((10642, 10686), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_monomers - 1)', 'dtype': 'object'}), '(shape=n_monomers - 1, dtype=object)\n', (10650, 10686), True, 'import numpy as np\n'), ((12912, 12955), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_size, n_monomers - conn)'}), '(shape=(n_size, n_monomers - conn))\n', (12920, 12955), True, 'import numpy as np\n'), ((12972, 13005), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_monomers - conn)'}), '(shape=n_monomers - conn)\n', (12980, 13005), True, 'import numpy as np\n'), ((13431, 13442), 'numpy.sum', 'np.sum', (['rvc'], {}), '(rvc)\n', (13437, 13442), True, 'import numpy as np\n'), ((14656, 14675), 'numpy.linalg.norm', 'np.linalg.norm', (['e2e'], {}), '(e2e)\n', (14670, 14675), True, 'import numpy as np\n'), ((16202, 16221), 'numpy.sum', 'np.sum', (['cosine_vals'], {}), '(cosine_vals)\n', (16208, 16221), True, 'import numpy as np\n'), ((18426, 18453), 'numpy.arange', 'np.arange', (['start_to', 'end_to'], {}), '(start_to, end_to)\n', (18435, 18453), True, 'import numpy as np\n'), ((18607, 18640), 'numpy.zeros', 'np.zeros', ([], {'shape': '(end_to - start_to)'}), '(shape=end_to - start_to)\n', (18615, 18640), True, 'import numpy as np\n'), ((20377, 20392), 'numpy.mean', 'np.mean', (['obs_t0'], {}), '(obs_t0)\n', (20384, 20392), True, 'import numpy as np\n'), ((25258, 25311), 'numpy.random.choice', 'np.random.choice', (['corr_v[i, :]'], {'size': '(30)', 'replace': '(True)'}), '(corr_v[i, :], size=30, replace=True)\n', (25274, 25311), True, 'import numpy as np\n'), ((29465, 29496), 'numpy.unique', 'np.unique', (['polymer_atoms.resids'], {}), '(polymer_atoms.resids)\n', (29474, 29496), True, 'import numpy as np\n'), ((29989, 30012), 'numpy.mean', 'np.mean', (['e2edis_poly[0]'], {}), '(e2edis_poly[0])\n', (29996, 30012), True, 'import numpy as np\n'), ((30329, 30346), 'numpy.log', 'np.log', (['cor_tp[0]'], {}), '(cor_tp[0])\n', (30335, 30346), True, 'import numpy as np\n'), ((31588, 31605), 'numpy.log', 'np.log', (['cor_tp[0]'], {}), '(cor_tp[0])\n', (31594, 31605), True, 'import numpy as np\n'), ((38378, 38425), 'numpy.zeros', 'np.zeros', ([], {'shape': '(end_to - start_to)', 'dtype': 'object'}), '(shape=end_to - start_to, dtype=object)\n', (38386, 38425), True, 'import numpy as np\n'), ((38471, 38518), 'numpy.zeros', 'np.zeros', ([], {'shape': '(end_to - start_to)', 'dtype': 'object'}), '(shape=end_to - start_to, dtype=object)\n', (38479, 38518), True, 'import numpy as np\n'), ((43394, 43409), 'numpy.mean', 'np.mean', (['sv_mon'], {}), '(sv_mon)\n', (43401, 43409), True, 'import numpy as np\n'), ((43503, 43518), 'numpy.sum', 'np.sum', (['ct_mean'], {}), '(ct_mean)\n', (43509, 43518), True, 'import numpy as np\n'), ((47009, 47026), 'numpy.sum', 'np.sum', (['cor_times'], {}), '(cor_times)\n', (47015, 47026), True, 'import numpy as np\n'), ((8401, 8430), 'numpy.arange', 'np.arange', (['(n_monomers - count)'], {}), '(n_monomers - count)\n', (8410, 8430), True, 'import numpy as np\n'), ((9560, 9579), 'numpy.sum', 'np.sum', (['sum_rhstore'], {}), '(sum_rhstore)\n', (9566, 9579), True, 'import numpy as np\n'), ((11063, 11096), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_monomers - co_t)'}), '(shape=n_monomers - co_t)\n', (11071, 11096), True, 'import numpy as np\n'), ((13355, 13372), 'numpy.mean', 'np.mean', (['ln[:, k]'], {}), '(ln[:, k])\n', (13362, 13372), True, 'import numpy as np\n'), ((13547, 13559), 'numpy.sum', 'np.sum', (['s_rh'], {}), '(s_rh)\n', (13553, 13559), True, 'import numpy as np\n'), ((19344, 19371), 'numpy.arange', 'np.arange', (['start_to', 'end_to'], {}), '(start_to, end_to)\n', (19353, 19371), True, 'import numpy as np\n'), ((19535, 19568), 'numpy.zeros', 'np.zeros', ([], {'shape': '(end_to - start_to)'}), '(shape=end_to - start_to)\n', (19543, 19568), True, 'import numpy as np\n'), ((20668, 20683), 'numpy.mean', 'np.mean', (['nv_sav'], {}), '(nv_sav)\n', (20675, 20683), True, 'import numpy as np\n'), ((30641, 30677), 'scipy.optimize.curve_fit', 'curve_fit', (['func_exp', 'blen', 'cor_tp[0]'], {}), '(func_exp, blen, cor_tp[0])\n', (30650, 30677), False, 'from scipy.optimize import curve_fit\n'), ((31755, 31792), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (31771, 31792), False, 'from sklearn.linear_model import LinearRegression\n'), ((32426, 32439), 'numpy.mean', 'np.mean', (['blen'], {}), '(blen)\n', (32433, 32439), True, 'import numpy as np\n'), ((32495, 32507), 'numpy.sum', 'np.sum', (['nhui'], {}), '(nhui)\n', (32501, 32507), True, 'import numpy as np\n'), ((33702, 33753), 'sklearn.metrics.mean_squared_error', 'sklearn.metrics.mean_squared_error', (['npoly_lc', 'gg_np'], {}), '(npoly_lc, gg_np)\n', (33736, 33753), False, 'import sklearn\n'), ((39023, 39070), 'numpy.zeros', 'np.zeros', ([], {'shape': '(end_to - start_to)', 'dtype': 'object'}), '(shape=end_to - start_to, dtype=object)\n', (39031, 39070), True, 'import numpy as np\n'), ((39116, 39163), 'numpy.zeros', 'np.zeros', ([], {'shape': '(end_to - start_to)', 'dtype': 'object'}), '(shape=end_to - start_to, dtype=object)\n', (39124, 39163), True, 'import numpy as np\n'), ((39685, 39725), 'numpy.zeros', 'np.zeros', ([], {'shape': 'n_monomers', 'dtype': 'object'}), '(shape=n_monomers, dtype=object)\n', (39693, 39725), True, 'import numpy as np\n'), ((39824, 39850), 'numpy.zeros', 'np.zeros', ([], {'shape': 'n_monomers'}), '(shape=n_monomers)\n', (39832, 39850), True, 'import numpy as np\n'), ((44208, 44247), 'numpy.sin', 'np.sin', (['((i + 1) * np.pi / (2 * n_bonds))'], {}), '((i + 1) * np.pi / (2 * n_bonds))\n', (44214, 44247), True, 'import numpy as np\n'), ((45052, 45091), 'numpy.sin', 'np.sin', (['((i + 1) * np.pi / (2 * n_bonds))'], {}), '((i + 1) * np.pi / (2 * n_bonds))\n', (45058, 45091), True, 'import numpy as np\n'), ((46168, 46207), 'numpy.sin', 'np.sin', (['((i + 1) * np.pi / (2 * n_bonds))'], {}), '((i + 1) * np.pi / (2 * n_bonds))\n', (46174, 46207), True, 'import numpy as np\n'), ((3627, 3645), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (3641, 3645), True, 'import numpy as np\n'), ((4435, 4459), 'numpy.dot', 'np.dot', (['v1_norm', 'v2_norm'], {}), '(v1_norm, v2_norm)\n', (4441, 4459), True, 'import numpy as np\n'), ((9247, 9265), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (9261, 9265), True, 'import numpy as np\n'), ((11327, 11355), 'numpy.arange', 'np.arange', (['(n_monomers - co_t)'], {}), '(n_monomers - co_t)\n', (11336, 11355), True, 'import numpy as np\n'), ((15254, 15276), 'numpy.linalg.norm', 'np.linalg.norm', (['oo_vec'], {}), '(oo_vec)\n', (15268, 15276), True, 'import numpy as np\n'), ((20687, 20702), 'numpy.mean', 'np.mean', (['obs_t0'], {}), '(obs_t0)\n', (20694, 20702), True, 'import numpy as np\n'), ((30529, 30542), 'numpy.exp', 'np.exp', (['(b * x)'], {}), '(b * x)\n', (30535, 30542), True, 'import numpy as np\n'), ((32262, 32283), 'numpy.sum', 'np.sum', (['(resid_np ** 2)'], {}), '(resid_np ** 2)\n', (32268, 32283), True, 'import numpy as np\n'), ((32620, 32652), 'scipy.stats.t.ppf', 'stats.t.ppf', (['(0.975)', '(fit_pnts - 1)'], {}), '(0.975, fit_pnts - 1)\n', (32631, 32652), False, 'from scipy import stats\n'), ((32651, 32673), 'numpy.sqrt', 'np.sqrt', (['(mse_p / sum_m)'], {}), '(mse_p / sum_m)\n', (32658, 32673), True, 'import numpy as np\n'), ((33108, 33149), 'sklearn.metrics.r2_score', 'sklearn.metrics.r2_score', (['npoly_lc', 'gg_np'], {}), '(npoly_lc, gg_np)\n', (33132, 33149), False, 'import sklearn\n'), ((33899, 33936), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (33915, 33936), False, 'from sklearn.linear_model import LinearRegression\n'), ((34624, 34648), 'numpy.mean', 'np.mean', (['blen[:fit_pnts]'], {}), '(blen[:fit_pnts])\n', (34631, 34648), True, 'import numpy as np\n'), ((34704, 34716), 'numpy.sum', 'np.sum', (['nhui'], {}), '(nhui)\n', (34710, 34716), True, 'import numpy as np\n'), ((35938, 36011), 'sklearn.metrics.mean_squared_error', 'sklearn.metrics.mean_squared_error', (['npoly_lc[:fit_pnts]', 'gg_np[:fit_pnts]'], {}), '(npoly_lc[:fit_pnts], gg_np[:fit_pnts])\n', (35972, 36011), False, 'import sklearn\n'), ((40570, 40602), 'numpy.dot', 'np.dot', (['dist_com[j]', 'dist_com[j]'], {}), '(dist_com[j], dist_com[j])\n', (40576, 40602), True, 'import numpy as np\n'), ((40706, 40721), 'numpy.sum', 'np.sum', (['n_bcorr'], {}), '(n_bcorr)\n', (40712, 40721), True, 'import numpy as np\n'), ((40900, 40940), 'numpy.zeros', 'np.zeros', ([], {'shape': 'n_monomers', 'dtype': 'object'}), '(shape=n_monomers, dtype=object)\n', (40908, 40940), True, 'import numpy as np\n'), ((41040, 41066), 'numpy.zeros', 'np.zeros', ([], {'shape': 'n_monomers'}), '(shape=n_monomers)\n', (41048, 41066), True, 'import numpy as np\n'), ((44483, 44504), 'numpy.exp', 'np.exp', (['(-x / trouse_i)'], {}), '(-x / trouse_i)\n', (44489, 44504), True, 'import numpy as np\n'), ((45432, 45453), 'numpy.exp', 'np.exp', (['(-x / trouse_i)'], {}), '(-x / trouse_i)\n', (45438, 45453), True, 'import numpy as np\n'), ((46596, 46617), 'numpy.exp', 'np.exp', (['(-x / trouse_i)'], {}), '(-x / trouse_i)\n', (46602, 46617), True, 'import numpy as np\n'), ((4212, 4230), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (4226, 4230), True, 'import numpy as np\n'), ((5590, 5602), 'math.acos', 'math.acos', (['i'], {}), '(i)\n', (5599, 5602), False, 'import math\n'), ((12366, 12384), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (12380, 12384), True, 'import numpy as np\n'), ((15404, 15430), 'numpy.dot', 'np.dot', (['e2e_norm', 'oov_norm'], {}), '(e2e_norm, oov_norm)\n', (15410, 15430), True, 'import numpy as np\n'), ((15877, 15899), 'numpy.linalg.norm', 'np.linalg.norm', (['oo_vec'], {}), '(oo_vec)\n', (15891, 15899), True, 'import numpy as np\n'), ((30710, 30723), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (30717, 30723), True, 'import numpy as np\n'), ((34460, 34481), 'numpy.sum', 'np.sum', (['(resid_np ** 2)'], {}), '(resid_np ** 2)\n', (34466, 34481), True, 'import numpy as np\n'), ((34829, 34867), 'scipy.stats.t.ppf', 'scipy.stats.t.ppf', (['(0.975)', '(fit_pnts - 1)'], {}), '(0.975, fit_pnts - 1)\n', (34846, 34867), True, 'import scipy as scipy\n'), ((34866, 34888), 'numpy.sqrt', 'np.sqrt', (['(mse_p / sum_m)'], {}), '(mse_p / sum_m)\n', (34873, 34888), True, 'import numpy as np\n'), ((35323, 35386), 'sklearn.metrics.r2_score', 'sklearn.metrics.r2_score', (['npoly_lc[:fit_pnts]', 'gg_np[:fit_pnts]'], {}), '(npoly_lc[:fit_pnts], gg_np[:fit_pnts])\n', (35347, 35386), False, 'import sklearn\n'), ((40444, 40481), 'numpy.linalg.norm', 'np.linalg.norm', (['(a_fmon - com_chain[0])'], {}), '(a_fmon - com_chain[0])\n', (40458, 40481), True, 'import numpy as np\n'), ((41695, 41729), 'numpy.dot', 'np.dot', (['dist_Afcom[j]', 'dist_com[j]'], {}), '(dist_Afcom[j], dist_com[j])\n', (41701, 41729), True, 'import numpy as np\n'), ((41831, 41847), 'numpy.sum', 'np.sum', (['n_Afcorr'], {}), '(n_Afcorr)\n', (41837, 41847), True, 'import numpy as np\n'), ((44648, 44669), 'numpy.exp', 'np.exp', (['(-x / trouse_i)'], {}), '(-x / trouse_i)\n', (44654, 44669), True, 'import numpy as np\n'), ((45675, 45696), 'numpy.exp', 'np.exp', (['(-x / trouse_i)'], {}), '(-x / trouse_i)\n', (45681, 45696), True, 'import numpy as np\n'), ((46888, 46909), 'numpy.exp', 'np.exp', (['(-x / trouse_i)'], {}), '(-x / trouse_i)\n', (46894, 46909), True, 'import numpy as np\n'), ((16044, 16070), 'numpy.dot', 'np.dot', (['e2e_norm', 'oov_norm'], {}), '(e2e_norm, oov_norm)\n', (16050, 16070), True, 'import numpy as np\n'), ((41571, 41605), 'numpy.linalg.norm', 'np.linalg.norm', (['(a_NFmon - com_olig)'], {}), '(a_NFmon - com_olig)\n', (41585, 41605), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import preprocess
import tensorflow as tf
import tensorflow.contrib as tf_contrib
import time
import numpy as np
import pickle
word_dict_size = len(preprocess.get_dict())
# def poetry_2_num(poetry):
# vector = []
# for word in poetry:
# vector.append(word_dict.get(word))
# return vector
class Config(object):
BATCH_SIZE = 1
PROB_KEEP = 0.95 # 每此参与训练的节点比例
HIDEN_SIZE = 1 # 隐藏层神经元个数
NN_LAYER = 2 # 隐藏层数目
MAX_GRAD_NORM = 5 # 最大梯度模
MAX_EPOCH = 50 # 文本循环次数
LEARNING_RATE = 0.002
# class TrainSet(object):
# def __init__(self, batch_size, file_path):
# self._file_path = file_path
# self._batch_size = batch_size
# self._poems = []
# self._poem_vec = []
# batch_times = len(poem_vec) // Config.BATCH_SIZE
with open('./data/Tang.pickle', 'rb') as f:
x_batches = pickle.load(f)
y_batches = pickle.load(f)
# with open('./data/Song.pickle', 'rb') as f:
# x_batches = pickle.load(f)
# y_batches = pickle.load(f)
data_batche = zip(x_batches, y_batches)
input_ids = tf.placeholder(tf.int32, [Config.BATCH_SIZE, None])
output_targets = tf.placeholder(tf.int32, [Config.BATCH_SIZE, None])
def network(hiden_size=128, layer=3):
cell_fun = tf.nn.rnn_cell.GRUCell
cell = cell_fun(hiden_size)
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * layer)
init_state = cell.zero_state(Config.BATCH_SIZE, tf.float32)
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [word_dict_size, hiden_size])
inputs = tf.nn.embedding_lookup(embedding, input_ids)
if Config.PROB_KEEP < 1: # 这是用来随机扔掉一些不参与训练的
inputs = tf.nn.dropout(inputs, Config.PROB_KEEP)
outputs, last_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=init_state)
output = tf.reshape(outputs, [-1, hiden_size])
softmax_w = tf.get_variable("softmax_w", [hiden_size, word_dict_size]) # one-hot表示
softmax_b = tf.get_variable("softmax_b", [word_dict_size])
logits = tf.matmul(output, softmax_w) + softmax_b
probs = tf.nn.softmax(logits)
# 计算loss function
loss = tf_contrib.legacy_seq2seq.sequence_loss_by_example(
[logits], [tf.reshape(output_targets, [-1])],
[tf.ones_like(tf.reshape(output_targets, [-1]), dtype=tf.float32)], word_dict_size) # 交叉熵
cost = tf.reduce_mean(loss)
# 算梯度
learning_rate = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), Config.MAX_GRAD_NORM)
optimizer = tf.train.AdamOptimizer(learning_rate) #
train_op = optimizer.apply_gradients(zip(grads, tvars))
return cost, last_state, train_op, learning_rate, probs, init_state, cell
def creat_poem():
word_dict = preprocess.get_dict()
words = list(word_dict.keys())
print(len(words))
_, last_state, _, _, probs, init_state, cell = network()
def to_word(weights):
t = np.cumsum(weights)
s = np.sum(weights)
sample = int(np.searchsorted(t, np.random.rand(1) * s))
print(sample)
return words[sample]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, './model/test.mod-50')
state_ = sess.run(cell.zero_state(1, tf.float32))
poem = '无'
x = np.array([list(map(word_dict.get, poem))])
[probs_, state_] = sess.run([probs, last_state], feed_dict={input_ids: x, init_state: state_})
word = to_word(probs_)
# word = words[np.argmax(probs_)]
while word != ']':
poem += word
x = np.zeros((1, 1))
x[0, 0] = word_dict[word]
[probs_, state_] = sess.run([probs, last_state], feed_dict={input_ids: x, init_state: state_})
word = to_word(probs_)
# word = words[np.argmax(probs_)]
return poem
if __name__ == '__main__':
# cost__, last_state__, train_op__, lr, _ = network()
# train_nn(cost__, last_state__, train_op__, 'test', lr)
print(creat_poem())
| [
"numpy.sum",
"tensorflow.trainable_variables",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.global_variables",
"pickle.load",
"tensorflow.Variable",
"tensorflow.get_variable",
"tensorflow.nn.softmax",
"tensorflow.placeholder",
"numpy.cumsum",
"tensorflow.gradients",
"tensorflow.nn.... | [((1094, 1145), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[Config.BATCH_SIZE, None]'], {}), '(tf.int32, [Config.BATCH_SIZE, None])\n', (1108, 1145), True, 'import tensorflow as tf\n'), ((1163, 1214), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[Config.BATCH_SIZE, None]'], {}), '(tf.int32, [Config.BATCH_SIZE, None])\n', (1177, 1214), True, 'import tensorflow as tf\n'), ((174, 195), 'preprocess.get_dict', 'preprocess.get_dict', ([], {}), '()\n', (193, 195), False, 'import preprocess\n'), ((883, 897), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (894, 897), False, 'import pickle\n'), ((914, 928), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (925, 928), False, 'import pickle\n'), ((1336, 1379), 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['([cell] * layer)'], {}), '([cell] * layer)\n', (1363, 1379), True, 'import tensorflow as tf\n'), ((1757, 1814), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'inputs'], {'initial_state': 'init_state'}), '(cell, inputs, initial_state=init_state)\n', (1774, 1814), True, 'import tensorflow as tf\n'), ((1828, 1865), 'tensorflow.reshape', 'tf.reshape', (['outputs', '[-1, hiden_size]'], {}), '(outputs, [-1, hiden_size])\n', (1838, 1865), True, 'import tensorflow as tf\n'), ((1883, 1941), 'tensorflow.get_variable', 'tf.get_variable', (['"""softmax_w"""', '[hiden_size, word_dict_size]'], {}), "('softmax_w', [hiden_size, word_dict_size])\n", (1898, 1941), True, 'import tensorflow as tf\n'), ((1971, 2017), 'tensorflow.get_variable', 'tf.get_variable', (['"""softmax_b"""', '[word_dict_size]'], {}), "('softmax_b', [word_dict_size])\n", (1986, 2017), True, 'import tensorflow as tf\n'), ((2084, 2105), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (2097, 2105), True, 'import tensorflow as tf\n'), ((2356, 2376), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (2370, 2376), True, 'import tensorflow as tf\n'), ((2408, 2441), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), '(0.0, trainable=False)\n', (2419, 2441), True, 'import tensorflow as tf\n'), ((2454, 2478), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2476, 2478), True, 'import tensorflow as tf\n'), ((2582, 2619), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (2604, 2619), True, 'import tensorflow as tf\n'), ((2798, 2819), 'preprocess.get_dict', 'preprocess.get_dict', ([], {}), '()\n', (2817, 2819), False, 'import preprocess\n'), ((1454, 1473), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (1463, 1473), True, 'import tensorflow as tf\n'), ((1495, 1553), 'tensorflow.get_variable', 'tf.get_variable', (['"""embedding"""', '[word_dict_size, hiden_size]'], {}), "('embedding', [word_dict_size, hiden_size])\n", (1510, 1553), True, 'import tensorflow as tf\n'), ((1571, 1615), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'input_ids'], {}), '(embedding, input_ids)\n', (1593, 1615), True, 'import tensorflow as tf\n'), ((2031, 2059), 'tensorflow.matmul', 'tf.matmul', (['output', 'softmax_w'], {}), '(output, softmax_w)\n', (2040, 2059), True, 'import tensorflow as tf\n'), ((2517, 2542), 'tensorflow.gradients', 'tf.gradients', (['cost', 'tvars'], {}), '(cost, tvars)\n', (2529, 2542), True, 'import tensorflow as tf\n'), ((2977, 2995), 'numpy.cumsum', 'np.cumsum', (['weights'], {}), '(weights)\n', (2986, 2995), True, 'import numpy as np\n'), ((3008, 3023), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (3014, 3023), True, 'import numpy as np\n'), ((3149, 3161), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3159, 3161), True, 'import tensorflow as tf\n'), ((1690, 1729), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inputs', 'Config.PROB_KEEP'], {}), '(inputs, Config.PROB_KEEP)\n', (1703, 1729), True, 'import tensorflow as tf\n'), ((2211, 2243), 'tensorflow.reshape', 'tf.reshape', (['output_targets', '[-1]'], {}), '(output_targets, [-1])\n', (2221, 2243), True, 'import tensorflow as tf\n'), ((3188, 3221), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3219, 3221), True, 'import tensorflow as tf\n'), ((3255, 3276), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (3274, 3276), True, 'import tensorflow as tf\n'), ((3707, 3723), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (3715, 3723), True, 'import numpy as np\n'), ((2268, 2300), 'tensorflow.reshape', 'tf.reshape', (['output_targets', '[-1]'], {}), '(output_targets, [-1])\n', (2278, 2300), True, 'import tensorflow as tf\n'), ((3064, 3081), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (3078, 3081), True, 'import numpy as np\n')] |
from __future__ import print_function,division
from builtins import range
from six import iteritems
from ..klampt import vectorops
from . import differences
from .objective import ObjectiveFunction
import numpy as np
class PathLengthObjectiveFunction(ObjectiveFunction):
"""Meant for a kinematic planning problem: measures path length.
Assumes the convention of a ControlSpaceAdaptor class, where the control u
is the next state.
Path cost is sum_{i=0}^{n-1} ||x[i+1]-x[i]||.
Incremental cost is ||x-u||.
For numerical optimization solvers, use the EnergyObjectiveFunction
which obtains similar results but is much more numerically-friendly.
"""
def __str__(self):
return "sum ||dx||"
def incremental(self,x,u):
return vectorops.distance(x,u)
def incremental_gradient(self,x,u):
#Function is ||x-u|| = sqrt(v^T v) with v=x-u
#derivative w.r.t. x is v/||v||
d = np.array(x)-u
g = d/np.linalg.norm(d)
return g,-g
def incremental_hessian(self,x,u):
#derivative w.r.t. x is v/||v|| with v=x-u
#hessian w.r.t. xx is (dv/dx ||v|| - v d||v||/dx )/ ||v||^2 =
# I/||v|| - vv^T/||v||^3
d = np.array(x)-u
dnorm = np.linalg.norm(d)
g = d/dnorm
H = np.eye(len(d))/dnorm - np.outer(d,d)/dnorm**3
return H,-H,H
class EnergyObjectiveFunction(ObjectiveFunction):
"""Meant for a kinematic planning problem: measures integral of squared
path length. Assumes the convention of a ControlSpaceAdaptor class, where
the control u is the next state.
Path cost is sum_{i=0}^{n-1} ||x[i+1]-x[i]||^2.
Incremental cost is ||x-u||^2.
For numerical optimization solvers, use the EnergyObjectiveFunction
which obtains similar results but is much more numerically-friendly.
"""
def __str__(self):
return "sum ||dx||^2"
def incremental(self,x,u):
return vectorops.distance(x,u)**2
def incremental_gradient(self,x,u):
d = np.array(x)-u
g = 2*d
return g,-g
def incremental_hessian(self,x,u):
H = 2*np.eye(len(x))
return H,-H,H
class StepCountObjectiveFunction(ObjectiveFunction):
"""Counts the number of steps until the goal is reached."""
def __str__(self):
return "Nsteps"
def incremental(self,x,u):
return 1
class TimeObjectiveFunction(ObjectiveFunction):
"""Integrates time step dt. Meant for a KinodynamicSpace class"""
def __str__(self):
return "T"
def incremental(self,x,u):
return u[0]
def incremental_gradient(self,x,u):
gu = np.zeros(len(u))
gu[0] = 1
return np.zeros(len(x)),gu
class QuadraticObjectiveFunction(ObjectiveFunction):
"""A quadratic objective function.
Incremental cost has the form:
1/2 x^T P x + p^T x + 1/2 u^T Q u + q^T u + x^T R u + s
Terminal cost has the form:
1/2 x^T A x + b^T x + c
The terms in the above equations are given by arguments provided
to __init__
- inc_xx: P
- inc_xu: R
- inc_uu: Q
- inc_x: p
- inc_u: q
- inc_0: s
- term_xx: A
- term_x: b
- term_0: c
"""
def __init__(self,inc_xx,inc_xu,inc_uu,inc_x,inc_u,inc_0,
term_xx,term_x,term_0=0):
self.inc_xx = inc_xx
self.inc_xu = inc_xu
self.inc_uu = inc_uu
self.inc_x = inc_x
self.inc_u = inc_u
self.inc_0 = inc_0
self.term_xx = term_xx
self.term_x = term_x
self.term_0 = term_0
def incremental(self,x,u=None):
return 0.5*(np.dot(x,np.dot(self.inc_xx,x))+np.dot(u,np.dot(self.inc_uu,u))) + np.dot(x,np.dot(self.inc_xu,u)) + np.dot(self.inc_x,x) + np.dot(self.inc_u,u) + self.inc_0
def terminal(self,x):
return 0.5*np.dot(x,np.dot(self.term_xx,x)) + np.dot(self.term_x,x) + self.term_0
def incremental_gradient(self,x,u):
gx = np.dot(self.inc_xx,x)+0.5*np.dot(self.inc_xu,u)+self.inc_x
gu = np.dot(self.inc_uu,u)+0.5*np.dot(self.inc_xu.T,x)+self.inc_u
return gx,gu
def incremental_hessian(self,x,u):
return self.inc_xx,self.inc_xu,self.inc_uu
def terminal_gradient(self,x):
return np.dot(self.term_xx,x) + self.term_x
def terminal_hessian(self,x):
return self.term_xx
class GoalDistanceObjectiveFunction(ObjectiveFunction):
"""Returns the distance between the terminal state and a goal state.
Can provide a weighting vector or matrix, if desired.
"""
def __init__(self,xgoal,weight=None):
self.xgoal = xgoal
self.weight = weight
if weight is not None:
self.weight = np.asarray(weight)
def __str__(self):
return "||xT-"+str(self.xgoal)+"||"
def terminal(self,x):
d = np.asarray(x)-self.xgoal
if self.weight is None:
return np.linalg.norm(d)
elif len(self.weight.shape) == 2:
return math.sqrt(np.dot(d,self.weight.dot(d)))
else:
return math.sqrt(np.dot(d,np.multiply(self.weight,d)))
def terminal_gradient(self,x):
d = np.asarray(x)-self.xgoal
if self.weight is None:
return d/np.linalg.norm(d)
elif len(self.weight.shape) == 2:
wd = self.weight.dot(d)
return wd/math.sqrt(np.dot(d,wd))
else:
wd = np.multiply(self.weight,d)
return wd/math.sqrt(np.dot(d,wd))
def terminal_hessian(self,x):
d = np.asarray(x)-self.xgoal
if self.weight is None:
dnorm = np.linalg.norm(d)
return np.eye(len(d))/dnorm - 2*np.outer(d,d)/dnorm**3
elif len(self.weight.shape) == 2:
wd = self.weight.dot(d)
dnorm = math.sqrt(np.dot(d,wd))
return np.diag(self.weight)/dnorm - 2*np.outer(d,wd)/dnorm**3
else:
wd = np.multiply(self.weight,d)
math.sqrt(np.dot(d,wd))
return self.weight/dnorm - 2*np.outer(d,wd)/dnorm**3
class SetDistanceObjectiveFunction(ObjectiveFunction):
"""Returns the distance between the terminal state and a goal set.
"""
def __init__(self,goalSet):
self.goalSet = goalSet
def __str__(self):
return "d(xT,"+str(self.goalSet)+")"
def terminal(self,x):
return max(self.goalSet.signedDistance(x),0)
def terminal_gradient(self,x):
d = self.goalSet.signedDistance(x)
if d < 0: return np.zeros(len(x))
return self.goalSet.signedDistance_gradient(x)
def terminal_hessian(self,x):
from . import difference
d = self.goalSet.signedDistance(x)
if d < 0: return np.zeros(len(x))
return difference.jacobian_forward_difference(self.goalSet.signedDistance_gradient,x,1e-4)
class TrackingObjectiveFunction(ObjectiveFunction):
"""Integrates tracking error for a timed kinodynamic space.
Assumes x[0] is the time variable, x[1:] is the state variable,
and u[0] is the time step dt.
The tracked trajectory is given by the Trajectory traj. Q
is a quadratic penalty matrix.
"""
def __init__(self,traj,Q,Qterm=None):
self.traj = traj
self.Q = Q
self.Qterm = Qterm
if Qterm is None:
self.Qterm = np.zeros(Q.shape)
def incremental(self,x,u):
t = x[0]
y = x[1:]
dt = u[0]
z = self.traj.eval(t)
d = np.asarray(y)-z
return dt*0.5*np.dot(d,np.dot(self.Q,d))
def terminal(self,x):
t = x[0]
y = x[1:]
z = self.traj.eval(t)
d = np.asarray(y)-z
return 0.5*np.dot(d,np.dot(self.Qterm,d))
def incremental_gradient(self,x,u):
t = x[0]
y = x[1:]
dt = u[0]
z = self.traj.eval(t)
d = np.asarray(y)-z
dz = self.traj.deriv(t)
gx = dt*np.hstack(([-np.dot(d,np.dot(self.Q,dz))],np.dot(self.Q,d)))
gu = np.zeros(len(u))
gu[0] = 0.5*np.dot(d,np.dot(self.Q,d))
return gx,gu
def terminal_gradient(self,x):
t = x[0]
y = x[1:]
z = self.traj.eval(t)
d = np.asarray(y)-z
dz = self.traj.deriv(t)
return np.hstack(([-np.dot(d,np.dot(self.Q,dz))],np.dot(self.Qterm,d)))
def incremental_hessian(self,x,u):
t = x[0]
y = x[1:]
dt = u[0]
z = self.traj.eval(t)
d = np.asarray(y)-z
dz = self.traj.deriv(t)
Hxx = dt*np.block([[[-np.dot(dz,np.dot(self.Q,dz))],np.dot(self.Q,dz)],[np.dot(self.Q,dz).T,self.Q]])
Hxu = np.zeros((len(x),len(u)))
Hxu[:,0] = np.hstack(([-np.dot(d,np.dot(self.Q,dz))],np.dot(self.Q,d)))
Huu = np.zeros((len(u),len(u)))
return Hxx,Hxu,Huu | [
"numpy.outer",
"numpy.multiply",
"numpy.asarray",
"numpy.zeros",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"numpy.diag"
] | [((1268, 1285), 'numpy.linalg.norm', 'np.linalg.norm', (['d'], {}), '(d)\n', (1282, 1285), True, 'import numpy as np\n'), ((964, 975), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (972, 975), True, 'import numpy as np\n'), ((992, 1009), 'numpy.linalg.norm', 'np.linalg.norm', (['d'], {}), '(d)\n', (1006, 1009), True, 'import numpy as np\n'), ((1238, 1249), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1246, 1249), True, 'import numpy as np\n'), ((2063, 2074), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2071, 2074), True, 'import numpy as np\n'), ((4342, 4365), 'numpy.dot', 'np.dot', (['self.term_xx', 'x'], {}), '(self.term_xx, x)\n', (4348, 4365), True, 'import numpy as np\n'), ((4797, 4815), 'numpy.asarray', 'np.asarray', (['weight'], {}), '(weight)\n', (4807, 4815), True, 'import numpy as np\n'), ((4921, 4934), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (4931, 4934), True, 'import numpy as np\n'), ((4997, 5014), 'numpy.linalg.norm', 'np.linalg.norm', (['d'], {}), '(d)\n', (5011, 5014), True, 'import numpy as np\n'), ((5244, 5257), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (5254, 5257), True, 'import numpy as np\n'), ((5614, 5627), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (5624, 5627), True, 'import numpy as np\n'), ((5691, 5708), 'numpy.linalg.norm', 'np.linalg.norm', (['d'], {}), '(d)\n', (5705, 5708), True, 'import numpy as np\n'), ((7400, 7417), 'numpy.zeros', 'np.zeros', (['Q.shape'], {}), '(Q.shape)\n', (7408, 7417), True, 'import numpy as np\n'), ((7544, 7557), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (7554, 7557), True, 'import numpy as np\n'), ((7712, 7725), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (7722, 7725), True, 'import numpy as np\n'), ((7914, 7927), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (7924, 7927), True, 'import numpy as np\n'), ((8249, 8262), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (8259, 8262), True, 'import numpy as np\n'), ((8511, 8524), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (8521, 8524), True, 'import numpy as np\n'), ((1341, 1355), 'numpy.outer', 'np.outer', (['d', 'd'], {}), '(d, d)\n', (1349, 1355), True, 'import numpy as np\n'), ((3845, 3866), 'numpy.dot', 'np.dot', (['self.inc_u', 'u'], {}), '(self.inc_u, u)\n', (3851, 3866), True, 'import numpy as np\n'), ((3959, 3981), 'numpy.dot', 'np.dot', (['self.term_x', 'x'], {}), '(self.term_x, x)\n', (3965, 3981), True, 'import numpy as np\n'), ((4048, 4070), 'numpy.dot', 'np.dot', (['self.inc_xx', 'x'], {}), '(self.inc_xx, x)\n', (4054, 4070), True, 'import numpy as np\n'), ((4120, 4142), 'numpy.dot', 'np.dot', (['self.inc_uu', 'u'], {}), '(self.inc_uu, u)\n', (4126, 4142), True, 'import numpy as np\n'), ((5322, 5339), 'numpy.linalg.norm', 'np.linalg.norm', (['d'], {}), '(d)\n', (5336, 5339), True, 'import numpy as np\n'), ((5495, 5522), 'numpy.multiply', 'np.multiply', (['self.weight', 'd'], {}), '(self.weight, d)\n', (5506, 5522), True, 'import numpy as np\n'), ((6003, 6030), 'numpy.multiply', 'np.multiply', (['self.weight', 'd'], {}), '(self.weight, d)\n', (6014, 6030), True, 'import numpy as np\n'), ((7591, 7608), 'numpy.dot', 'np.dot', (['self.Q', 'd'], {}), '(self.Q, d)\n', (7597, 7608), True, 'import numpy as np\n'), ((7756, 7777), 'numpy.dot', 'np.dot', (['self.Qterm', 'd'], {}), '(self.Qterm, d)\n', (7762, 7777), True, 'import numpy as np\n'), ((8098, 8115), 'numpy.dot', 'np.dot', (['self.Q', 'd'], {}), '(self.Q, d)\n', (8104, 8115), True, 'import numpy as np\n'), ((8354, 8375), 'numpy.dot', 'np.dot', (['self.Qterm', 'd'], {}), '(self.Qterm, d)\n', (8360, 8375), True, 'import numpy as np\n'), ((8770, 8787), 'numpy.dot', 'np.dot', (['self.Q', 'd'], {}), '(self.Q, d)\n', (8776, 8787), True, 'import numpy as np\n'), ((3822, 3843), 'numpy.dot', 'np.dot', (['self.inc_x', 'x'], {}), '(self.inc_x, x)\n', (3828, 3843), True, 'import numpy as np\n'), ((4074, 4096), 'numpy.dot', 'np.dot', (['self.inc_xu', 'u'], {}), '(self.inc_xu, u)\n', (4080, 4096), True, 'import numpy as np\n'), ((4146, 4170), 'numpy.dot', 'np.dot', (['self.inc_xu.T', 'x'], {}), '(self.inc_xu.T, x)\n', (4152, 4170), True, 'import numpy as np\n'), ((5884, 5897), 'numpy.dot', 'np.dot', (['d', 'wd'], {}), '(d, wd)\n', (5890, 5897), True, 'import numpy as np\n'), ((6052, 6065), 'numpy.dot', 'np.dot', (['d', 'wd'], {}), '(d, wd)\n', (6058, 6065), True, 'import numpy as np\n'), ((8020, 8037), 'numpy.dot', 'np.dot', (['self.Q', 'd'], {}), '(self.Q, d)\n', (8026, 8037), True, 'import numpy as np\n'), ((3933, 3956), 'numpy.dot', 'np.dot', (['self.term_xx', 'x'], {}), '(self.term_xx, x)\n', (3939, 3956), True, 'import numpy as np\n'), ((5168, 5195), 'numpy.multiply', 'np.multiply', (['self.weight', 'd'], {}), '(self.weight, d)\n', (5179, 5195), True, 'import numpy as np\n'), ((5450, 5463), 'numpy.dot', 'np.dot', (['d', 'wd'], {}), '(d, wd)\n', (5456, 5463), True, 'import numpy as np\n'), ((5554, 5567), 'numpy.dot', 'np.dot', (['d', 'wd'], {}), '(d, wd)\n', (5560, 5567), True, 'import numpy as np\n'), ((5753, 5767), 'numpy.outer', 'np.outer', (['d', 'd'], {}), '(d, d)\n', (5761, 5767), True, 'import numpy as np\n'), ((5917, 5937), 'numpy.diag', 'np.diag', (['self.weight'], {}), '(self.weight)\n', (5924, 5937), True, 'import numpy as np\n'), ((8619, 8637), 'numpy.dot', 'np.dot', (['self.Q', 'dz'], {}), '(self.Q, dz)\n', (8625, 8637), True, 'import numpy as np\n'), ((3797, 3819), 'numpy.dot', 'np.dot', (['self.inc_xu', 'u'], {}), '(self.inc_xu, u)\n', (3803, 3819), True, 'import numpy as np\n'), ((5948, 5963), 'numpy.outer', 'np.outer', (['d', 'wd'], {}), '(d, wd)\n', (5956, 5963), True, 'import numpy as np\n'), ((6107, 6122), 'numpy.outer', 'np.outer', (['d', 'wd'], {}), '(d, wd)\n', (6115, 6122), True, 'import numpy as np\n'), ((8334, 8352), 'numpy.dot', 'np.dot', (['self.Q', 'dz'], {}), '(self.Q, dz)\n', (8340, 8352), True, 'import numpy as np\n'), ((8639, 8657), 'numpy.dot', 'np.dot', (['self.Q', 'dz'], {}), '(self.Q, dz)\n', (8645, 8657), True, 'import numpy as np\n'), ((8750, 8768), 'numpy.dot', 'np.dot', (['self.Q', 'dz'], {}), '(self.Q, dz)\n', (8756, 8768), True, 'import numpy as np\n'), ((8000, 8018), 'numpy.dot', 'np.dot', (['self.Q', 'dz'], {}), '(self.Q, dz)\n', (8006, 8018), True, 'import numpy as np\n'), ((3730, 3752), 'numpy.dot', 'np.dot', (['self.inc_xx', 'x'], {}), '(self.inc_xx, x)\n', (3736, 3752), True, 'import numpy as np\n'), ((3762, 3784), 'numpy.dot', 'np.dot', (['self.inc_uu', 'u'], {}), '(self.inc_uu, u)\n', (3768, 3784), True, 'import numpy as np\n'), ((8599, 8617), 'numpy.dot', 'np.dot', (['self.Q', 'dz'], {}), '(self.Q, dz)\n', (8605, 8617), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import cv2
import numpy as np
import time
import glfw
import OpenGL.GL as gl
import imgui
from imgui.integrations.glfw import GlfwRenderer
import ceiltrack
import recordreader
# starting position for localization
# negative x because we also mirror the track about X
HOME = [ceiltrack.X_GRID*-2.5, ceiltrack.Y_GRID*0.5]
def load_texture(im):
# gl.glEnable(gl.GL_TEXTURE_2D)
texid = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, texid)
gl.glTexParameteri(gl.GL_TEXTURE_2D,
gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D,
gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA,
im.shape[1], im.shape[0], 0,
gl.GL_BGR, gl.GL_UNSIGNED_BYTE, im)
return texid
def unload_texture(texid):
gl.glDeleteTextures([texid])
def impl_glfw_init():
width, height = 1280, 720
window_name = "cycloid replay viewer"
if not glfw.init():
print("Could not initialize OpenGL context")
exit(1)
# OS X supports only forward-compatible core profiles from 3.2
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, gl.GL_TRUE)
# Create a windowed mode window and its OpenGL context
window = glfw.create_window(
int(width), int(height), window_name, None, None
)
glfw.make_context_current(window)
if not window:
glfw.terminate()
print("Could not initialize Window")
exit(1)
return window
class SLAMGUI:
def __init__(self, fname):
self.unloadlist = []
self.f = open(fname, "rb")
print("scanning ", fname, "...")
self.scanner = recordreader.RecordScanner(self.f)
self.frametexid = None
self.playing = False
self.ts = []
self.camdata = ceiltrack.ceillut()
self.f.seek(0, 0)
self.ceilheight = ceiltrack.CEIL_HEIGHT
# do a full tracking here on load
B = np.float32([HOME[0], HOME[1], 0])
self.track = []
match_time = 0
opt_time = 0
first = True
floordata = []
floormask = None
for frdata in recordreader.RecordIterator(self.f):
if 'yuv420' not in frdata:
continue
self.ts.append(frdata['tstamp'])
yuv420 = frdata['yuv420']
gray = yuv420[:480]
bgr = cv2.cvtColor(yuv420, cv2.COLOR_YUV2BGR_I420)
t0 = time.time()
xy = ceiltrack.match(gray, *self.camdata)
tm = time.time()
if first:
first = False
for i in range(6):
cost, dB = ceiltrack.cost(xy, *B)
B += dB
#B_straight, cost_straight = B, cost
#B = np.float32([HOME[0], HOME[1], np.pi/2])
#for i in range(6):
# cost, dB = ceiltrack.cost(xy, *B)
# B += dB
#if cost_straight < cost:
# B = B_straight
# we need an example frame to initialize the floor lookup table
# to filter out the visible body posts
self.floorlut = ceiltrack.floorlut(gray)
floormask = self.floorlut[0]
else:
for i in range(2):
c, dB = ceiltrack.cost(xy, *B)
B += dB
topt = time.time()
match_time += tm - t0
opt_time += topt - tm
self.track.append(B.copy())
floordata.append(bgr[floormask])
self.ts = np.array(self.ts)
self.track = np.array(self.track)
self.origtrack = self.track.copy()
self.track[:, 0] = -self.track[:, 0]
self.track[:, 2] = -self.track[:, 2]
# mirror the floor-pixel lookup table x coordinates also
self.floorlut[1][0] = -self.floorlut[1][0]
self.floordata = np.array(floordata)
self.loadframe(0)
print("done,", match_time, "secs match_time", opt_time, "sec opt_time")
floorimg = ceiltrack.render_floor(
self.track, self.floordata, self.floorlut[1])
if True:
xgm = ceiltrack.X_GRID * ceiltrack.CEIL_HEIGHT
ygm = ceiltrack.Y_GRID * ceiltrack.CEIL_HEIGHT
Z = 50 # pixels per meter
for x in range(0, 1+int(1000 / (xgm*Z))):
for y in range(0, 1+int(500 / (ygm*Z))):
cv2.circle(floorimg, (int(x*xgm*Z), int(y*ygm*Z)), int(0.25*Z), (255, 255, 0))
cv2.imwrite("map.png", floorimg)
self.floortex = load_texture(floorimg)
print("home location:", HOME)
def loadframe(self, i):
if self.frametexid is not None:
self.unloadlist.append(self.frametexid)
self.i = i
self.frame = self.scanner.frame(i)
if 'yuv420' not in self.frame:
return
yuv420 = self.frame['yuv420']
# optional: front view and annotated ceiling view?
im = cv2.cvtColor(yuv420, cv2.COLOR_YUV2BGR_I420)
xg = ceiltrack.X_GRID * self.ceilheight / ceiltrack.CEIL_HEIGHT
yg = ceiltrack.Y_GRID * self.ceilheight / ceiltrack.CEIL_HEIGHT
gray = yuv420[:480]
xy = ceiltrack.match(gray, *self.camdata)
B = self.origtrack[self.i]
for i in range(6):
cost, dB = ceiltrack.costxyg(xg, yg, xy, *B)
B += dB
for gp in ceiltrack.mkgrid(xg, yg, 31, *-B)[0]:
cv2.circle(im, (int(gp[0]), int(gp[1])), 3, (255, 0, 0), 1)
self.frametexid = load_texture(im)
def nextframe(self):
if self.i < self.scanner.num_frames() - 1:
self.loadframe(self.i+1)
def render_timeline(self):
imgui.begin("timeline")
tstamp = self.frame['tstamp']
if imgui.button("<"):
self.playing = False
if self.i > 0:
self.loadframe(self.i - 1)
imgui.same_line()
if self.playing:
if (self.i == len(self.ts)-1) or imgui.button("stop"):
self.playing = False
elif time.time() >= self.ts[self.i+1] - self.t0:
self.nextframe()
elif imgui.button("play"):
self.playing = True
self.t0 = tstamp - time.time()
imgui.same_line()
if imgui.button(">"):
self.playing = False
self.nextframe()
tsfrac = tstamp - int(tstamp)
tstring = time.strftime("%H:%M:%S.", time.localtime(
tstamp)) + "%02d" % (tsfrac*100)
imgui.same_line()
imgui.text(tstring)
w = imgui.get_window_width()
imgui.image(self.frametexid, w, 480*w/640)
changed, i = imgui.slider_int(
"frame", self.i, 0, self.scanner.num_frames()-1)
if changed:
self.playing = False
self.loadframe(i)
imgui.end()
def render_map(self):
imgui.begin("map")
imgui.slider_float("x (m)", self.track[self.i, 0] * ceiltrack.CEIL_HEIGHT, -80, 80)
imgui.slider_float("y (m)", self.track[self.i, 1] * ceiltrack.CEIL_HEIGHT, -80, 80)
imgui.slider_float("theta", self.track[self.i, 2] % (np.pi*2), -7, 7)
imgui.slider_float("x (grid)", self.track[self.i, 0] / ceiltrack.X_GRID, -10, 10)
imgui.slider_float("y (grid)", self.track[self.i, 1] / ceiltrack.X_GRID, -10, 10)
changed, self.ceilheight = imgui.slider_float("ceiling height (m)", self.ceilheight, 2, 4)
if changed:
self.loadframe(self.i)
dl = imgui.get_window_draw_list()
pos = imgui.get_cursor_screen_pos()
siz = imgui.get_content_region_available()
if siz[1] == 0:
siz = [400, 300]
# just use a fixed size
w = siz[0]
imgui.image_button(self.floortex, w, w/2, frame_padding=0)
# imgui.image_button(self.floortex, siz[0], siz[0])
origin = [pos[0], pos[1]]
scale = 50 * ceiltrack.CEIL_HEIGHT * w/1000
trackcolor = imgui.get_color_u32_rgba(0.3, 0.5, 0.3, 1)
for i in range(1, self.i):
dl.add_line(
origin[0] + scale * self.track[i-1, 0],
origin[1] + scale * self.track[i-1, 1],
origin[0] + scale * self.track[i, 0],
origin[1] + scale * self.track[i, 1],
trackcolor, 1.5)
carcolor = imgui.get_color_u32_rgba(0, 1, 0.6, 1)
B = self.track[self.i]
dl.add_line(
origin[0] + scale * B[0],
origin[1] + scale * B[1],
origin[0] + scale * (B[0] + np.cos(B[2])),
origin[1] + scale * (B[1] - np.sin(B[2])),
carcolor, 1.5)
imgui.end()
def render(self):
for t in self.unloadlist:
unload_texture(t)
self.unloadlist = []
self.render_timeline()
self.render_map()
def main(recfile):
imgui.create_context()
window = impl_glfw_init()
impl = GlfwRenderer(window)
slamgui = SLAMGUI(recfile)
while not glfw.window_should_close(window):
glfw.poll_events()
impl.process_inputs()
imgui.new_frame()
if imgui.begin_main_menu_bar():
if imgui.begin_menu("File", True):
clicked_quit, _ = imgui.menu_item(
"Quit", 'Cmd+Q', False, True)
if clicked_quit:
exit(0)
imgui.end_menu()
imgui.end_main_menu_bar()
slamgui.render()
gl.glClearColor(0, 0, 0, 1)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
imgui.render()
impl.render(imgui.get_draw_data())
glfw.swap_buffers(window)
impl.shutdown()
glfw.terminate()
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("usage:", sys.argv[0], "[cycloid-x.rec]")
exit(1)
main(sys.argv[1])
| [
"recordreader.RecordIterator",
"glfw.poll_events",
"glfw.make_context_current",
"OpenGL.GL.glBindTexture",
"glfw.window_should_close",
"ceiltrack.floorlut",
"imgui.same_line",
"imgui.slider_float",
"numpy.sin",
"imgui.begin_menu",
"OpenGL.GL.glClearColor",
"OpenGL.GL.glClear",
"ceiltrack.ren... | [((457, 476), 'OpenGL.GL.glGenTextures', 'gl.glGenTextures', (['(1)'], {}), '(1)\n', (473, 476), True, 'import OpenGL.GL as gl\n'), ((481, 522), 'OpenGL.GL.glBindTexture', 'gl.glBindTexture', (['gl.GL_TEXTURE_2D', 'texid'], {}), '(gl.GL_TEXTURE_2D, texid)\n', (497, 522), True, 'import OpenGL.GL as gl\n'), ((527, 603), 'OpenGL.GL.glTexParameteri', 'gl.glTexParameteri', (['gl.GL_TEXTURE_2D', 'gl.GL_TEXTURE_MIN_FILTER', 'gl.GL_LINEAR'], {}), '(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)\n', (545, 603), True, 'import OpenGL.GL as gl\n'), ((631, 707), 'OpenGL.GL.glTexParameteri', 'gl.glTexParameteri', (['gl.GL_TEXTURE_2D', 'gl.GL_TEXTURE_MAG_FILTER', 'gl.GL_LINEAR'], {}), '(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)\n', (649, 707), True, 'import OpenGL.GL as gl\n'), ((735, 853), 'OpenGL.GL.glTexImage2D', 'gl.glTexImage2D', (['gl.GL_TEXTURE_2D', '(0)', 'gl.GL_RGBA', 'im.shape[1]', 'im.shape[0]', '(0)', 'gl.GL_BGR', 'gl.GL_UNSIGNED_BYTE', 'im'], {}), '(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA, im.shape[1], im.shape[0], \n 0, gl.GL_BGR, gl.GL_UNSIGNED_BYTE, im)\n', (750, 853), True, 'import OpenGL.GL as gl\n'), ((939, 967), 'OpenGL.GL.glDeleteTextures', 'gl.glDeleteTextures', (['[texid]'], {}), '([texid])\n', (958, 967), True, 'import OpenGL.GL as gl\n'), ((1230, 1277), 'glfw.window_hint', 'glfw.window_hint', (['glfw.CONTEXT_VERSION_MAJOR', '(3)'], {}), '(glfw.CONTEXT_VERSION_MAJOR, 3)\n', (1246, 1277), False, 'import glfw\n'), ((1282, 1329), 'glfw.window_hint', 'glfw.window_hint', (['glfw.CONTEXT_VERSION_MINOR', '(3)'], {}), '(glfw.CONTEXT_VERSION_MINOR, 3)\n', (1298, 1329), False, 'import glfw\n'), ((1334, 1397), 'glfw.window_hint', 'glfw.window_hint', (['glfw.OPENGL_PROFILE', 'glfw.OPENGL_CORE_PROFILE'], {}), '(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)\n', (1350, 1397), False, 'import glfw\n'), ((1403, 1459), 'glfw.window_hint', 'glfw.window_hint', (['glfw.OPENGL_FORWARD_COMPAT', 'gl.GL_TRUE'], {}), '(glfw.OPENGL_FORWARD_COMPAT, gl.GL_TRUE)\n', (1419, 1459), False, 'import glfw\n'), ((1620, 1653), 'glfw.make_context_current', 'glfw.make_context_current', (['window'], {}), '(window)\n', (1645, 1653), False, 'import glfw\n'), ((9226, 9248), 'imgui.create_context', 'imgui.create_context', ([], {}), '()\n', (9246, 9248), False, 'import imgui\n'), ((9290, 9310), 'imgui.integrations.glfw.GlfwRenderer', 'GlfwRenderer', (['window'], {}), '(window)\n', (9302, 9310), False, 'from imgui.integrations.glfw import GlfwRenderer\n'), ((10026, 10042), 'glfw.terminate', 'glfw.terminate', ([], {}), '()\n', (10040, 10042), False, 'import glfw\n'), ((1076, 1087), 'glfw.init', 'glfw.init', ([], {}), '()\n', (1085, 1087), False, 'import glfw\n'), ((1682, 1698), 'glfw.terminate', 'glfw.terminate', ([], {}), '()\n', (1696, 1698), False, 'import glfw\n'), ((1955, 1989), 'recordreader.RecordScanner', 'recordreader.RecordScanner', (['self.f'], {}), '(self.f)\n', (1981, 1989), False, 'import recordreader\n'), ((2094, 2113), 'ceiltrack.ceillut', 'ceiltrack.ceillut', ([], {}), '()\n', (2111, 2113), False, 'import ceiltrack\n'), ((2243, 2276), 'numpy.float32', 'np.float32', (['[HOME[0], HOME[1], 0]'], {}), '([HOME[0], HOME[1], 0])\n', (2253, 2276), True, 'import numpy as np\n'), ((2436, 2471), 'recordreader.RecordIterator', 'recordreader.RecordIterator', (['self.f'], {}), '(self.f)\n', (2463, 2471), False, 'import recordreader\n'), ((3879, 3896), 'numpy.array', 'np.array', (['self.ts'], {}), '(self.ts)\n', (3887, 3896), True, 'import numpy as np\n'), ((3918, 3938), 'numpy.array', 'np.array', (['self.track'], {}), '(self.track)\n', (3926, 3938), True, 'import numpy as np\n'), ((4213, 4232), 'numpy.array', 'np.array', (['floordata'], {}), '(floordata)\n', (4221, 4232), True, 'import numpy as np\n'), ((4359, 4427), 'ceiltrack.render_floor', 'ceiltrack.render_floor', (['self.track', 'self.floordata', 'self.floorlut[1]'], {}), '(self.track, self.floordata, self.floorlut[1])\n', (4381, 4427), False, 'import ceiltrack\n'), ((4834, 4866), 'cv2.imwrite', 'cv2.imwrite', (['"""map.png"""', 'floorimg'], {}), "('map.png', floorimg)\n", (4845, 4866), False, 'import cv2\n'), ((5303, 5347), 'cv2.cvtColor', 'cv2.cvtColor', (['yuv420', 'cv2.COLOR_YUV2BGR_I420'], {}), '(yuv420, cv2.COLOR_YUV2BGR_I420)\n', (5315, 5347), False, 'import cv2\n'), ((5534, 5570), 'ceiltrack.match', 'ceiltrack.match', (['gray', '*self.camdata'], {}), '(gray, *self.camdata)\n', (5549, 5570), False, 'import ceiltrack\n'), ((6037, 6060), 'imgui.begin', 'imgui.begin', (['"""timeline"""'], {}), "('timeline')\n", (6048, 6060), False, 'import imgui\n'), ((6110, 6127), 'imgui.button', 'imgui.button', (['"""<"""'], {}), "('<')\n", (6122, 6127), False, 'import imgui\n'), ((6240, 6257), 'imgui.same_line', 'imgui.same_line', ([], {}), '()\n', (6255, 6257), False, 'import imgui\n'), ((6599, 6616), 'imgui.same_line', 'imgui.same_line', ([], {}), '()\n', (6614, 6616), False, 'import imgui\n'), ((6628, 6645), 'imgui.button', 'imgui.button', (['""">"""'], {}), "('>')\n", (6640, 6645), False, 'import imgui\n'), ((6861, 6878), 'imgui.same_line', 'imgui.same_line', ([], {}), '()\n', (6876, 6878), False, 'import imgui\n'), ((6887, 6906), 'imgui.text', 'imgui.text', (['tstring'], {}), '(tstring)\n', (6897, 6906), False, 'import imgui\n'), ((6920, 6944), 'imgui.get_window_width', 'imgui.get_window_width', ([], {}), '()\n', (6942, 6944), False, 'import imgui\n'), ((6953, 6999), 'imgui.image', 'imgui.image', (['self.frametexid', 'w', '(480 * w / 640)'], {}), '(self.frametexid, w, 480 * w / 640)\n', (6964, 6999), False, 'import imgui\n'), ((7188, 7199), 'imgui.end', 'imgui.end', ([], {}), '()\n', (7197, 7199), False, 'import imgui\n'), ((7235, 7253), 'imgui.begin', 'imgui.begin', (['"""map"""'], {}), "('map')\n", (7246, 7253), False, 'import imgui\n'), ((7262, 7350), 'imgui.slider_float', 'imgui.slider_float', (['"""x (m)"""', '(self.track[self.i, 0] * ceiltrack.CEIL_HEIGHT)', '(-80)', '(80)'], {}), "('x (m)', self.track[self.i, 0] * ceiltrack.CEIL_HEIGHT, \n -80, 80)\n", (7280, 7350), False, 'import imgui\n'), ((7354, 7442), 'imgui.slider_float', 'imgui.slider_float', (['"""y (m)"""', '(self.track[self.i, 1] * ceiltrack.CEIL_HEIGHT)', '(-80)', '(80)'], {}), "('y (m)', self.track[self.i, 1] * ceiltrack.CEIL_HEIGHT, \n -80, 80)\n", (7372, 7442), False, 'import imgui\n'), ((7446, 7517), 'imgui.slider_float', 'imgui.slider_float', (['"""theta"""', '(self.track[self.i, 2] % (np.pi * 2))', '(-7)', '(7)'], {}), "('theta', self.track[self.i, 2] % (np.pi * 2), -7, 7)\n", (7464, 7517), False, 'import imgui\n'), ((7524, 7610), 'imgui.slider_float', 'imgui.slider_float', (['"""x (grid)"""', '(self.track[self.i, 0] / ceiltrack.X_GRID)', '(-10)', '(10)'], {}), "('x (grid)', self.track[self.i, 0] / ceiltrack.X_GRID, -\n 10, 10)\n", (7542, 7610), False, 'import imgui\n'), ((7614, 7700), 'imgui.slider_float', 'imgui.slider_float', (['"""y (grid)"""', '(self.track[self.i, 1] / ceiltrack.X_GRID)', '(-10)', '(10)'], {}), "('y (grid)', self.track[self.i, 1] / ceiltrack.X_GRID, -\n 10, 10)\n", (7632, 7700), False, 'import imgui\n'), ((7732, 7795), 'imgui.slider_float', 'imgui.slider_float', (['"""ceiling height (m)"""', 'self.ceilheight', '(2)', '(4)'], {}), "('ceiling height (m)', self.ceilheight, 2, 4)\n", (7750, 7795), False, 'import imgui\n'), ((7865, 7893), 'imgui.get_window_draw_list', 'imgui.get_window_draw_list', ([], {}), '()\n', (7891, 7893), False, 'import imgui\n'), ((7908, 7937), 'imgui.get_cursor_screen_pos', 'imgui.get_cursor_screen_pos', ([], {}), '()\n', (7935, 7937), False, 'import imgui\n'), ((7952, 7988), 'imgui.get_content_region_available', 'imgui.get_content_region_available', ([], {}), '()\n', (7986, 7988), False, 'import imgui\n'), ((8101, 8161), 'imgui.image_button', 'imgui.image_button', (['self.floortex', 'w', '(w / 2)'], {'frame_padding': '(0)'}), '(self.floortex, w, w / 2, frame_padding=0)\n', (8119, 8161), False, 'import imgui\n'), ((8327, 8369), 'imgui.get_color_u32_rgba', 'imgui.get_color_u32_rgba', (['(0.3)', '(0.5)', '(0.3)', '(1)'], {}), '(0.3, 0.5, 0.3, 1)\n', (8351, 8369), False, 'import imgui\n'), ((8703, 8741), 'imgui.get_color_u32_rgba', 'imgui.get_color_u32_rgba', (['(0)', '(1)', '(0.6)', '(1)'], {}), '(0, 1, 0.6, 1)\n', (8727, 8741), False, 'import imgui\n'), ((9016, 9027), 'imgui.end', 'imgui.end', ([], {}), '()\n', (9025, 9027), False, 'import imgui\n'), ((9357, 9389), 'glfw.window_should_close', 'glfw.window_should_close', (['window'], {}), '(window)\n', (9381, 9389), False, 'import glfw\n'), ((9399, 9417), 'glfw.poll_events', 'glfw.poll_events', ([], {}), '()\n', (9415, 9417), False, 'import glfw\n'), ((9456, 9473), 'imgui.new_frame', 'imgui.new_frame', ([], {}), '()\n', (9471, 9473), False, 'import imgui\n'), ((9486, 9513), 'imgui.begin_main_menu_bar', 'imgui.begin_main_menu_bar', ([], {}), '()\n', (9511, 9513), False, 'import imgui\n'), ((9829, 9856), 'OpenGL.GL.glClearColor', 'gl.glClearColor', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (9844, 9856), True, 'import OpenGL.GL as gl\n'), ((9865, 9899), 'OpenGL.GL.glClear', 'gl.glClear', (['gl.GL_COLOR_BUFFER_BIT'], {}), '(gl.GL_COLOR_BUFFER_BIT)\n', (9875, 9899), True, 'import OpenGL.GL as gl\n'), ((9909, 9923), 'imgui.render', 'imgui.render', ([], {}), '()\n', (9921, 9923), False, 'import imgui\n'), ((9975, 10000), 'glfw.swap_buffers', 'glfw.swap_buffers', (['window'], {}), '(window)\n', (9992, 10000), False, 'import glfw\n'), ((2670, 2714), 'cv2.cvtColor', 'cv2.cvtColor', (['yuv420', 'cv2.COLOR_YUV2BGR_I420'], {}), '(yuv420, cv2.COLOR_YUV2BGR_I420)\n', (2682, 2714), False, 'import cv2\n'), ((2732, 2743), 'time.time', 'time.time', ([], {}), '()\n', (2741, 2743), False, 'import time\n'), ((2761, 2797), 'ceiltrack.match', 'ceiltrack.match', (['gray', '*self.camdata'], {}), '(gray, *self.camdata)\n', (2776, 2797), False, 'import ceiltrack\n'), ((2815, 2826), 'time.time', 'time.time', ([], {}), '()\n', (2824, 2826), False, 'import time\n'), ((3696, 3707), 'time.time', 'time.time', ([], {}), '()\n', (3705, 3707), False, 'import time\n'), ((5656, 5689), 'ceiltrack.costxyg', 'ceiltrack.costxyg', (['xg', 'yg', 'xy', '*B'], {}), '(xg, yg, xy, *B)\n', (5673, 5689), False, 'import ceiltrack\n'), ((5729, 5764), 'ceiltrack.mkgrid', 'ceiltrack.mkgrid', (['xg', 'yg', '(31)', '*(-B)'], {}), '(xg, yg, 31, *(-B))\n', (5745, 5764), False, 'import ceiltrack\n'), ((6494, 6514), 'imgui.button', 'imgui.button', (['"""play"""'], {}), "('play')\n", (6506, 6514), False, 'import imgui\n'), ((9530, 9560), 'imgui.begin_menu', 'imgui.begin_menu', (['"""File"""', '(True)'], {}), "('File', True)\n", (9546, 9560), False, 'import imgui\n'), ((9769, 9794), 'imgui.end_main_menu_bar', 'imgui.end_main_menu_bar', ([], {}), '()\n', (9792, 9794), False, 'import imgui\n'), ((9944, 9965), 'imgui.get_draw_data', 'imgui.get_draw_data', ([], {}), '()\n', (9963, 9965), False, 'import imgui\n'), ((3475, 3499), 'ceiltrack.floorlut', 'ceiltrack.floorlut', (['gray'], {}), '(gray)\n', (3493, 3499), False, 'import ceiltrack\n'), ((6328, 6348), 'imgui.button', 'imgui.button', (['"""stop"""'], {}), "('stop')\n", (6340, 6348), False, 'import imgui\n'), ((6792, 6814), 'time.localtime', 'time.localtime', (['tstamp'], {}), '(tstamp)\n', (6806, 6814), False, 'import time\n'), ((9596, 9641), 'imgui.menu_item', 'imgui.menu_item', (['"""Quit"""', '"""Cmd+Q"""', '(False)', '(True)'], {}), "('Quit', 'Cmd+Q', False, True)\n", (9611, 9641), False, 'import imgui\n'), ((9740, 9756), 'imgui.end_menu', 'imgui.end_menu', ([], {}), '()\n', (9754, 9756), False, 'import imgui\n'), ((2945, 2967), 'ceiltrack.cost', 'ceiltrack.cost', (['xy', '*B'], {}), '(xy, *B)\n', (2959, 2967), False, 'import ceiltrack\n'), ((3626, 3648), 'ceiltrack.cost', 'ceiltrack.cost', (['xy', '*B'], {}), '(xy, *B)\n', (3640, 3648), False, 'import ceiltrack\n'), ((6404, 6415), 'time.time', 'time.time', ([], {}), '()\n', (6413, 6415), False, 'import time\n'), ((6579, 6590), 'time.time', 'time.time', ([], {}), '()\n', (6588, 6590), False, 'import time\n'), ((8910, 8922), 'numpy.cos', 'np.cos', (['B[2]'], {}), '(B[2])\n', (8916, 8922), True, 'import numpy as np\n'), ((8965, 8977), 'numpy.sin', 'np.sin', (['B[2]'], {}), '(B[2])\n', (8971, 8977), True, 'import numpy as np\n')] |
import numpy as np
import tvm
import time
# The sizes of inputs and filters
batch = 256
in_channel = 256
out_channel = 512
in_size = 14
kernel = 3
pad = 1
stride = 1
out_size = (in_size - kernel + 2*pad) // stride + 1
def conv2d():
"""
.. _opt-conv-gpu:
How to optimize convolution on GPU
==================================
**Author**: `<NAME> <https://homes.cs.washington.edu/~haichen/>`_
In this tutorial, we will demonstrate how to write a high performance
convolution implementation in TVM. We use square size input tensors and filters
as an example, and assume the input to convolution has a large batch. In this
example, we use a different layout to store the data in order to achieve better
data locality. The buffer layout is HWCN, which stands for height, width,
channel, batch.
"""
################################################################
# Preparation and Algorithm
# -------------------------
#
# We use the fixed size for input tensors with 256 channels and 14 x 14
# dimensions. The batch size is 256. Convolution filters contain 512 filters
# of size 3 x 3. We use stride size 1 and padding size 1 for the
# convolution. The following code defines the convolution algorithm in TVM.
#
from tvm import te
# Algorithm
A = te.placeholder((in_size, in_size, in_channel, batch), name='A')
W = te.placeholder((kernel, kernel, in_channel, out_channel), name='W')
# Pad input
Apad = te.compute(
(in_size + 2*pad, in_size + 2*pad, in_channel, batch),
lambda yy, xx, cc, nn: tvm.tir.if_then_else(
tvm.tir.all(yy >= pad, yy - pad < in_size,
xx >= pad, xx - pad < in_size),
A[yy - pad, xx - pad, cc, nn], tvm.tir.const(0., "float32")),
name='Apad')
# Create reduction variables
rc = te.reduce_axis((0, in_channel), name='rc')
ry = te.reduce_axis((0, kernel), name='ry')
rx = te.reduce_axis((0, kernel), name='rx')
# Compute the convolution
B = te.compute(
(out_size, out_size, out_channel, batch),
lambda yy, xx, ff, nn: te.sum(
Apad[yy * stride + ry, xx * stride + rx, rc, nn] * W[ry, rx, rc, ff],
axis=[ry, rx, rc]),
name='B')
###############################################################################
# Memory Hierarchy
# ----------------
#
# We first specify the memory hierarchy for buffers. The figure below shows the
# GPU memory hierarchy. One important difference from CPU memory hierarchy is
# that GPU provides a cache buffer called shared memory, which is managed by
# programmers. Thus how to maximize the data reuse in the shared memory is
# critical to achieve high performance in GPU kernels.
#
# .. image:: https://github.com/dmlc/web-data/raw/master/tvm/tutorial/gpu_memory_hierarchy.png
# :align: center
# :height: 319px
# :width: 271px
#
# In this example, we load both Apad and W into buffer AA and WW, which are
# stored in the shared memory. These bufferes will be later shared by all
# threads within the same thread block to compute the convolution. Each thread
# then loads its own part from shared buffer into their local registers, AL and
# WL. BL is a local cache of output B, which is also stored in the thread local
# registers.
#
# Designate the memory hierarchy
s = te.create_schedule(B.op)
s[Apad].compute_inline() # compute Apad inline
AA = s.cache_read(Apad, 'shared', [B])
WW = s.cache_read(W, "shared", [B])
AL = s.cache_read(AA, "local", [B])
WL = s.cache_read(WW, "local", [B])
BL = s.cache_write(B, "local")
###############################################################################
# Blocking
# --------
#
# The following code splits the workload into thread blocks and individual
# threads. We follow the blocking scheme in the matrix multiply. As shown in the
# figure below, given a pixel coordinate (y, x), a thread block is responsible
# for computing a region of block_factor x block_factor (64 x 64) for output
# channels and batch. Due to the limit of shared memory space, we only load step
# x block_factor (8 x 64) data from Apad and B each time to buffers in the
# shared memory.
#
# .. image:: https://github.com/dmlc/web-data/raw/master/tvm/tutorial/conv_gpu_blocking.png
# :align: center
# :height: 308px
# :width: 317px
#
# tile consts
tile = 8
num_thread = 8
block_factor = tile * num_thread
step = 8
vthread = 2
# Get the GPU thread indices
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis((0, num_thread), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread), "threadIdx.y")
thread_xz = te.thread_axis((0, vthread), "vthread", name="vx")
thread_yz = te.thread_axis((0, vthread), "vthread", name="vy")
# Split the workloads
hi, wi, fi, ni = s[B].op.axis
bz = s[B].fuse(hi, wi)
by, fi = s[B].split(fi, factor=block_factor)
bx, ni = s[B].split(ni, factor=block_factor)
# Bind the iteration variables to GPU thread indices
s[B].bind(bz, block_z)
s[B].bind(by, block_y)
s[B].bind(bx, block_x)
###############################################################################
# Virtual Thread Split
# --------------------
#
# We further split the workload from a thread block to individual threads. To
# avoid *memory bank conflict*, we use virtual thread to split the area into 4
# parts, and then tile into 8x8 grids. Therefore, shown in the figure below,
# each thread computes 4 strided grids, where size of each grid is 4 x 4.
#
# .. image:: https://github.com/dmlc/web-data/raw/master/tvm/tutorial/conv_gpu_vthread.png
# :align: center
# :height: 188px
# :width: 268px
#
tyz, fi = s[B].split(fi, nparts=vthread) # virtual thread split
txz, ni = s[B].split(ni, nparts=vthread) # virtual thread split
ty, fi = s[B].split(fi, nparts=num_thread)
tx, ni = s[B].split(ni, nparts=num_thread)
s[B].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
s[B].bind(tyz, thread_yz)
s[B].bind(txz, thread_xz)
s[B].bind(ty, thread_y)
s[B].bind(tx, thread_x)
###############################################################################
# Cooperative Fetching
# --------------------
#
# As mentioned before, each time step we need to transfer step x block_factor
# data from GPU global memory to shared memory. In order to reduce the memory
# transfer per thread, the following code lets threads in the same thread block
# coopertively fetch dependent data from global memory.
#
# Schedule BL local write
s[BL].compute_at(s[B], tx)
yi, xi, fi, ni = s[BL].op.axis
ry, rx, rc = s[BL].op.reduce_axis
rco, rci = s[BL].split(rc, factor=step)
s[BL].reorder(rco, ry, rx, rci, fi, ni)
# Attach computation to iteration variables
s[AA].compute_at(s[BL], rx)
s[WW].compute_at(s[BL], rx)
s[AL].compute_at(s[BL], rci)
s[WL].compute_at(s[BL], rci)
# Schedule for A's shared memory load
yi, xi, ci, ni = s[AA].op.axis
ty, ci = s[AA].split(ci, nparts=num_thread)
tx, ni = s[AA].split(ni, nparts=num_thread)
_, ni = s[AA].split(ni, factor=4)
s[AA].reorder(ty, tx, yi, xi, ci, ni)
s[AA].bind(ty, thread_y)
s[AA].bind(tx, thread_x)
s[AA].vectorize(ni) # vectorize memory load
# Schedule for W's shared memory load
yi, xi, ci, fi = s[WW].op.axis
ty, ci = s[WW].split(ci, nparts=num_thread)
tx, fi = s[WW].split(fi, nparts=num_thread)
_, fi = s[WW].split(fi, factor=4)
s[WW].reorder(ty, tx, yi, xi, ci, fi)
s[WW].bind(ty, thread_y)
s[WW].bind(tx, thread_x)
s[WW].vectorize(fi) # vectorize memory load
###############################################################################
# Generate CUDA Kernel
# --------------------
#
# Finally we use TVM to generate and compile the CUDA kernel, and evaluate the
# latency of convolution.
#
return s, [A, W, B]
def test1():
print("test 1 #####################3")
s, bufs = conv2d()
A, W, B = bufs
import time
build_beg = time.time()
mod = tvm.lower(s, [A, W, B], simple_mode=True)
print(type(mod))
build_end = time.time()
print("lower time cost=", (build_end - build_beg) * 1e3, "ms")
build_beg = time.time()
func = tvm.build(s, [A, W, B], 'cuda')
build_end = time.time()
print("build time cost=", (build_end - build_beg) * 1e3, "ms")
ctx = tvm.gpu(1)
a_np = np.random.uniform(size=(in_size, in_size, in_channel, batch)).astype(A.dtype)
w_np = np.random.uniform(size=(kernel, kernel, in_channel, out_channel)).astype(W.dtype)
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(np.zeros((out_size, out_size, out_channel, batch), dtype=B.dtype), ctx)
func(a, w, b)
evaluator = func.time_evaluator(func.entry_name, ctx, number=1)
print('Convolution: %f ms' % (evaluator(a, w, b).mean * 1e3))
def test2():
print("test 2 #####################3")
A = tvm.te.compute([4, 4], lambda i, j: 1)
s = tvm.te.create_schedule(A.op)
beg = time.time()
func = tvm.build(s, [A], "llvm")
end = time.time()
print("build time cost=", (end-beg) * 1e3, "ms")
def test3(number=50):
print("test 3 #####################3")
print("number =", number)
s_list = []
buf_list = []
f_list = []
for i in range(number):
s, bufs = conv2d()
s_list.append(s)
buf_list.append(bufs)
f_list.append(None)
def build(i):
s = s_list[i]
bufs = buf_list[i]
func = tvm.build(s, bufs, 'cuda')
f_list[i] = func
beg = time.time()
[build(i) for i in range(number)]
end = time.time()
print("serial build time cost=", (end - beg) * 1e3, "ms")
beg = time.time()
list(map(build, range(number)))
end = time.time()
print("parallel build time cost=", (end - beg) * 1e3, "ms")
if __name__ == "__main__":
test1()
test2()
test3() | [
"numpy.random.uniform",
"tvm.te.reduce_axis",
"tvm.te.placeholder",
"tvm.tir.const",
"tvm.nd.array",
"numpy.zeros",
"time.time",
"tvm.build",
"tvm.te.thread_axis",
"tvm.te.compute",
"tvm.te.create_schedule",
"tvm.lower",
"tvm.gpu",
"tvm.tir.all",
"tvm.te.sum"
] | [((1348, 1411), 'tvm.te.placeholder', 'te.placeholder', (['(in_size, in_size, in_channel, batch)'], {'name': '"""A"""'}), "((in_size, in_size, in_channel, batch), name='A')\n", (1362, 1411), False, 'from tvm import te\n'), ((1420, 1487), 'tvm.te.placeholder', 'te.placeholder', (['(kernel, kernel, in_channel, out_channel)'], {'name': '"""W"""'}), "((kernel, kernel, in_channel, out_channel), name='W')\n", (1434, 1487), False, 'from tvm import te\n'), ((1892, 1934), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, in_channel)'], {'name': '"""rc"""'}), "((0, in_channel), name='rc')\n", (1906, 1934), False, 'from tvm import te\n'), ((1944, 1982), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, kernel)'], {'name': '"""ry"""'}), "((0, kernel), name='ry')\n", (1958, 1982), False, 'from tvm import te\n'), ((1992, 2030), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, kernel)'], {'name': '"""rx"""'}), "((0, kernel), name='rx')\n", (2006, 2030), False, 'from tvm import te\n'), ((3491, 3515), 'tvm.te.create_schedule', 'te.create_schedule', (['B.op'], {}), '(B.op)\n', (3509, 3515), False, 'from tvm import te\n'), ((4749, 4777), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (4763, 4777), False, 'from tvm import te\n'), ((4792, 4820), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.y"""'], {}), "('blockIdx.y')\n", (4806, 4820), False, 'from tvm import te\n'), ((4835, 4863), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.z"""'], {}), "('blockIdx.z')\n", (4849, 4863), False, 'from tvm import te\n'), ((4879, 4925), 'tvm.te.thread_axis', 'te.thread_axis', (['(0, num_thread)', '"""threadIdx.x"""'], {}), "((0, num_thread), 'threadIdx.x')\n", (4893, 4925), False, 'from tvm import te\n'), ((4941, 4987), 'tvm.te.thread_axis', 'te.thread_axis', (['(0, num_thread)', '"""threadIdx.y"""'], {}), "((0, num_thread), 'threadIdx.y')\n", (4955, 4987), False, 'from tvm import te\n'), ((5004, 5054), 'tvm.te.thread_axis', 'te.thread_axis', (['(0, vthread)', '"""vthread"""'], {'name': '"""vx"""'}), "((0, vthread), 'vthread', name='vx')\n", (5018, 5054), False, 'from tvm import te\n'), ((5071, 5121), 'tvm.te.thread_axis', 'te.thread_axis', (['(0, vthread)', '"""vthread"""'], {'name': '"""vy"""'}), "((0, vthread), 'vthread', name='vy')\n", (5085, 5121), False, 'from tvm import te\n'), ((8511, 8522), 'time.time', 'time.time', ([], {}), '()\n', (8520, 8522), False, 'import time\n'), ((8533, 8574), 'tvm.lower', 'tvm.lower', (['s', '[A, W, B]'], {'simple_mode': '(True)'}), '(s, [A, W, B], simple_mode=True)\n', (8542, 8574), False, 'import tvm\n'), ((8612, 8623), 'time.time', 'time.time', ([], {}), '()\n', (8621, 8623), False, 'import time\n'), ((8707, 8718), 'time.time', 'time.time', ([], {}), '()\n', (8716, 8718), False, 'import time\n'), ((8730, 8761), 'tvm.build', 'tvm.build', (['s', '[A, W, B]', '"""cuda"""'], {}), "(s, [A, W, B], 'cuda')\n", (8739, 8761), False, 'import tvm\n'), ((8778, 8789), 'time.time', 'time.time', ([], {}), '()\n', (8787, 8789), False, 'import time\n'), ((8867, 8877), 'tvm.gpu', 'tvm.gpu', (['(1)'], {}), '(1)\n', (8874, 8877), False, 'import tvm\n'), ((9068, 9091), 'tvm.nd.array', 'tvm.nd.array', (['a_np', 'ctx'], {}), '(a_np, ctx)\n', (9080, 9091), False, 'import tvm\n'), ((9100, 9123), 'tvm.nd.array', 'tvm.nd.array', (['w_np', 'ctx'], {}), '(w_np, ctx)\n', (9112, 9123), False, 'import tvm\n'), ((9435, 9473), 'tvm.te.compute', 'tvm.te.compute', (['[4, 4]', '(lambda i, j: 1)'], {}), '([4, 4], lambda i, j: 1)\n', (9449, 9473), False, 'import tvm\n'), ((9482, 9510), 'tvm.te.create_schedule', 'tvm.te.create_schedule', (['A.op'], {}), '(A.op)\n', (9504, 9510), False, 'import tvm\n'), ((9526, 9537), 'time.time', 'time.time', ([], {}), '()\n', (9535, 9537), False, 'import time\n'), ((9549, 9574), 'tvm.build', 'tvm.build', (['s', '[A]', '"""llvm"""'], {}), "(s, [A], 'llvm')\n", (9558, 9574), False, 'import tvm\n'), ((9585, 9596), 'time.time', 'time.time', ([], {}), '()\n', (9594, 9596), False, 'import time\n'), ((10082, 10093), 'time.time', 'time.time', ([], {}), '()\n', (10091, 10093), False, 'import time\n'), ((10142, 10153), 'time.time', 'time.time', ([], {}), '()\n', (10151, 10153), False, 'import time\n'), ((10227, 10238), 'time.time', 'time.time', ([], {}), '()\n', (10236, 10238), False, 'import time\n'), ((10285, 10296), 'time.time', 'time.time', ([], {}), '()\n', (10294, 10296), False, 'import time\n'), ((9145, 9210), 'numpy.zeros', 'np.zeros', (['(out_size, out_size, out_channel, batch)'], {'dtype': 'B.dtype'}), '((out_size, out_size, out_channel, batch), dtype=B.dtype)\n', (9153, 9210), True, 'import numpy as np\n'), ((10019, 10045), 'tvm.build', 'tvm.build', (['s', 'bufs', '"""cuda"""'], {}), "(s, bufs, 'cuda')\n", (10028, 10045), False, 'import tvm\n'), ((2162, 2261), 'tvm.te.sum', 'te.sum', (['(Apad[yy * stride + ry, xx * stride + rx, rc, nn] * W[ry, rx, rc, ff])'], {'axis': '[ry, rx, rc]'}), '(Apad[yy * stride + ry, xx * stride + rx, rc, nn] * W[ry, rx, rc, ff],\n axis=[ry, rx, rc])\n', (2168, 2261), False, 'from tvm import te\n'), ((8889, 8950), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(in_size, in_size, in_channel, batch)'}), '(size=(in_size, in_size, in_channel, batch))\n', (8906, 8950), True, 'import numpy as np\n'), ((8978, 9043), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(kernel, kernel, in_channel, out_channel)'}), '(size=(kernel, kernel, in_channel, out_channel))\n', (8995, 9043), True, 'import numpy as np\n'), ((1660, 1733), 'tvm.tir.all', 'tvm.tir.all', (['(yy >= pad)', '(yy - pad < in_size)', '(xx >= pad)', '(xx - pad < in_size)'], {}), '(yy >= pad, yy - pad < in_size, xx >= pad, xx - pad < in_size)\n', (1671, 1733), False, 'import tvm\n'), ((1798, 1827), 'tvm.tir.const', 'tvm.tir.const', (['(0.0)', '"""float32"""'], {}), "(0.0, 'float32')\n", (1811, 1827), False, 'import tvm\n')] |
import hashlib
import json
import logging
import os
import pickle
import re
from pathlib import Path
from time import time
import h5py
import numpy as np
import torch
from scipy import spatial
from torch.utils.data import Sampler
import Resources.training as r
def reshape_to_indeces(_input, ix=((1, 4), (1, 4)), goal=80):
_input = _input.reshape((-1, 1, 38, 30)).contiguous()
_input = _input[:, :, ix[0][0]::ix[0][1], ix[1][0]::ix[1][1]].contiguous()
return _input.reshape(-1, 1, goal).contiguous()
class RandomOverSampler(Sampler):
"""
Sampler to put more emphasis on the last samples of runs.
The original size of the dataset is kept. Some samples are dropped.
Arguments:
data_source (Dataset): dataset to sample from
emphasize_after_max_minus (int): index from which starting, counting from the back, the samples are used more
often
multiply_by (int): by how much those samples are multiplied in the dataset
"""
def __init__(self, data_source, emphasize_after_max_minus=80, multiply_by=2):
self.data_source = data_source
self.emph = emphasize_after_max_minus
self.multiply_by = multiply_by
@property
def num_samples(self):
return len(self.data_source[0])
def double_samples_after_step_x_in_each_run(self):
logger = logging.getLogger()
logger.debug("Sampling ...")
t0 = time()
indices_lower = []
indices_higher = []
for i, aux in enumerate(self.data_source[2]): # Aux data
index = aux["ix"]
_max = aux["max"]
if index > _max - self.emph:
indices_higher.append(i)
else:
indices_lower.append(i)
logger.debug(f"Going through data took {t0 - time()}")
t0 = time()
count_higher = len(indices_higher)
# Multiply the number of samples at the back of the runs
hi = torch.cat([torch.tensor(indices_higher) for x in range(self.multiply_by)])
hi = hi[torch.randperm(len(hi))]
logger.debug(f"Random 1 took {t0 - time()}")
t0 = time()
# Cast to tensor, randomize
low = torch.tensor(indices_lower)[torch.randperm(len(indices_lower))]
logger.debug(f"Random 2 took {t0 - time()}")
t0 = time()
low = low[:-count_higher * (self.multiply_by - 1)]
indizes = torch.cat((hi, low))
indizes = indizes[torch.randperm(len(indizes))]
logger.debug(f"Random All took {t0 - time()}")
return indizes
def __iter__(self):
indizes = self.double_samples_after_step_x_in_each_run()
return iter(indizes)
def __len__(self):
return self.num_samples
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def load_mean_std(mean_std_f: Path):
with open(mean_std_f, "rb") as f:
mean, std = pickle.load(f)
mean = np.array(mean)
std = np.array(std)
return mean, std
def handle_torch_caching(processing_function, data_source_paths, sampler_func, batch_size):
data_loader_info = processing_function.__self__.__dict__
data_loader_info["data_processing_function"] = processing_function.__name__
data_loader_info["data_loader_name"] = processing_function.__self__.__class__.__name__
data_loader_info["data_source_paths"] = [str(p) for p in data_source_paths]
data_loader_info["batch_size"] = batch_size
if sampler_func is None:
data_loader_info["sampler"] = ""
else:
data_loader_info["sampler"] = sampler_func(None).__dict__
data_loader_str = str(data_loader_info).encode("utf-8")
data_loader_hash = hashlib.md5(data_loader_str).hexdigest()
load_and_save_path = r.datasets_dryspots_torch / data_loader_hash
# logger = logging.getLogger(__name__)
# if load_and_save_path.exists():
# logger.debug("Existing caches: ")
# logger.debug(f"{[x for x in load_and_save_path.iterdir() if x.is_file()]}")
# else:
# logger.debug("No existing caches.")
load_and_save_path.mkdir(exist_ok=True)
if (r.datasets_dryspots_torch / "info.json").is_file():
with open(r.datasets_dryspots_torch / "info.json", "r") as f:
data = json.load(f)
else:
data = {}
data.update({data_loader_hash: data_loader_info})
with open(r.datasets_dryspots_torch / "info.json", "w") as f:
json.dump(data, f, cls=NumpyEncoder)
return load_and_save_path, data_loader_hash
def extract_sensor_coords(fn: Path, indices=((0, 1), (0, 1))):
"""
Extract the sensor coordinates as numpy array from a *d.out file, which exists for every run.
"""
with fn.open() as f:
content = f.read()
sensor_coords = []
for triple in re.findall(r"\d+\.\d+ \d+\.\d+ \d+\.\d+", content):
sensor_coords.append([float(e) for e in triple.split(' ')])
_s_coords = np.array(sensor_coords)
# Cut off last column (z), since it is filled with 1s anyway
_s_coords = _s_coords[:, :-1]
# if indices != ((0, 1), (0, 1)):
# _s_coords = _s_coords.reshape(38, 30)
# _s_coords = _s_coords[indices[0][0]::indices[0][1], indices[1][0]::indices[1][1]]
# _s_coords = _s_coords.flatten()
return _s_coords
def extract_coords_of_mesh_nodes(fn: Path, normalized=True):
"""
Extract the coordinates of the mesh nodes as numpy array from a *RESULT.erfh5 file, which exists for every run.
"""
with h5py.File(fn, 'r') as f:
coord_as_np_array = f["post/constant/entityresults/NODE/COORDINATE/ZONE1_set0/erfblock/res"][()]
# Cut off last column (z), since it is filled with 1s anyway
_coords = coord_as_np_array[:, :-1]
if normalized:
_coords = normalize_coords(_coords)
return _coords
def get_node_propery_at_states(f: h5py.File, node_property: str, states: list):
return [
f["post"]["singlestate"][state]["entityresults"]["NODE"][node_property]["ZONE1_set1"]["erfblock"]["res"][()]
for state in states]
def extract_nearest_mesh_nodes_to_sensors(fn: Path):
sensor_coords = extract_sensor_coords(Path(str(fn) + "d.out"))
nodes_coords = extract_coords_of_mesh_nodes(Path(str(fn) + "_RESULT.erfh5"), normalized=False)
dists_indeces = []
for sensor in sensor_coords:
dists_indeces.append(spatial.KDTree(nodes_coords).query(sensor))
dists, indices = zip(*dists_indeces)
return np.array(indices)
def scale_coords_lautern(input_coords):
scaled_coords = (input_coords + 23.25) * 10
return scaled_coords
def scale_coords_leoben(input_coords):
scaled_coords = input_coords * 10
return scaled_coords
def normalize_coords(coords):
coords = np.array(coords)
max_c = np.max(coords[:, 0])
min_c = np.min(coords[:, 0])
coords[:, 0] = coords[:, 0] - min_c
coords[:, 0] = coords[:, 0] / (max_c - min_c)
max_c = np.max(coords[:, 1])
min_c = np.min(coords[:, 1])
coords[:, 1] = coords[:, 1] - min_c
coords[:, 1] = coords[:, 1] / (max_c - min_c)
return coords
def change_win_to_unix_path_if_needed(_str):
if os.name == "unix" and _str.startswith("Y:"):
_str = _str.replace("\\", "/").replace("Y:", "/cfs/share")
if os.name == "unix" and _str.startswith("X:"):
_str = _str.replace("\\", "/").replace("X:", "/cfs/home")
return _str
if __name__ == '__main__':
extract_nearest_mesh_nodes_to_sensors(
Path(r'Y:\data\RTM\Leoben\sim_output\2019-07-23_15-38-08_5000p\0\2019-07-23_15-38-08_0'))
| [
"json.dump",
"h5py.File",
"hashlib.md5",
"json.load",
"torch.cat",
"time.time",
"numpy.max",
"re.findall",
"numpy.array",
"numpy.min",
"pickle.load",
"pathlib.Path",
"scipy.spatial.KDTree",
"torch.tensor",
"json.JSONEncoder.default",
"logging.getLogger"
] | [((4903, 4961), 're.findall', 're.findall', (['"""\\\\d+\\\\.\\\\d+ \\\\d+\\\\.\\\\d+ \\\\d+\\\\.\\\\d+"""', 'content'], {}), "('\\\\d+\\\\.\\\\d+ \\\\d+\\\\.\\\\d+ \\\\d+\\\\.\\\\d+', content)\n", (4913, 4961), False, 'import re\n'), ((5039, 5062), 'numpy.array', 'np.array', (['sensor_coords'], {}), '(sensor_coords)\n', (5047, 5062), True, 'import numpy as np\n'), ((6567, 6584), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (6575, 6584), True, 'import numpy as np\n'), ((6849, 6865), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (6857, 6865), True, 'import numpy as np\n'), ((6878, 6898), 'numpy.max', 'np.max', (['coords[:, 0]'], {}), '(coords[:, 0])\n', (6884, 6898), True, 'import numpy as np\n'), ((6911, 6931), 'numpy.min', 'np.min', (['coords[:, 0]'], {}), '(coords[:, 0])\n', (6917, 6931), True, 'import numpy as np\n'), ((7034, 7054), 'numpy.max', 'np.max', (['coords[:, 1]'], {}), '(coords[:, 1])\n', (7040, 7054), True, 'import numpy as np\n'), ((7067, 7087), 'numpy.min', 'np.min', (['coords[:, 1]'], {}), '(coords[:, 1])\n', (7073, 7087), True, 'import numpy as np\n'), ((1352, 1371), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1369, 1371), False, 'import logging\n'), ((1422, 1428), 'time.time', 'time', ([], {}), '()\n', (1426, 1428), False, 'from time import time\n'), ((1826, 1832), 'time.time', 'time', ([], {}), '()\n', (1830, 1832), False, 'from time import time\n'), ((2136, 2142), 'time.time', 'time', ([], {}), '()\n', (2140, 2142), False, 'from time import time\n'), ((2323, 2329), 'time.time', 'time', ([], {}), '()\n', (2327, 2329), False, 'from time import time\n'), ((2407, 2427), 'torch.cat', 'torch.cat', (['(hi, low)'], {}), '((hi, low))\n', (2416, 2427), False, 'import torch\n'), ((2892, 2927), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (2916, 2927), False, 'import json\n'), ((3025, 3039), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3036, 3039), False, 'import pickle\n'), ((3055, 3069), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (3063, 3069), True, 'import numpy as np\n'), ((3084, 3097), 'numpy.array', 'np.array', (['std'], {}), '(std)\n', (3092, 3097), True, 'import numpy as np\n'), ((4545, 4581), 'json.dump', 'json.dump', (['data', 'f'], {'cls': 'NumpyEncoder'}), '(data, f, cls=NumpyEncoder)\n', (4554, 4581), False, 'import json\n'), ((5607, 5625), 'h5py.File', 'h5py.File', (['fn', '"""r"""'], {}), "(fn, 'r')\n", (5616, 5625), False, 'import h5py\n'), ((7576, 7680), 'pathlib.Path', 'Path', (['"""Y:\\\\data\\\\RTM\\\\Leoben\\\\sim_output\\\\2019-07-23_15-38-08_5000p\\\\0\\\\2019-07-23_15-38-08_0"""'], {}), "(\n 'Y:\\\\data\\\\RTM\\\\Leoben\\\\sim_output\\\\2019-07-23_15-38-08_5000p\\\\0\\\\2019-07-23_15-38-08_0'\n )\n", (7580, 7680), False, 'from pathlib import Path\n'), ((2193, 2220), 'torch.tensor', 'torch.tensor', (['indices_lower'], {}), '(indices_lower)\n', (2205, 2220), False, 'import torch\n'), ((3802, 3830), 'hashlib.md5', 'hashlib.md5', (['data_loader_str'], {}), '(data_loader_str)\n', (3813, 3830), False, 'import hashlib\n'), ((4376, 4388), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4385, 4388), False, 'import json\n'), ((1965, 1993), 'torch.tensor', 'torch.tensor', (['indices_higher'], {}), '(indices_higher)\n', (1977, 1993), False, 'import torch\n'), ((6471, 6499), 'scipy.spatial.KDTree', 'spatial.KDTree', (['nodes_coords'], {}), '(nodes_coords)\n', (6485, 6499), False, 'from scipy import spatial\n'), ((1803, 1809), 'time.time', 'time', ([], {}), '()\n', (1807, 1809), False, 'from time import time\n'), ((2113, 2119), 'time.time', 'time', ([], {}), '()\n', (2117, 2119), False, 'from time import time\n'), ((2300, 2306), 'time.time', 'time', ([], {}), '()\n', (2304, 2306), False, 'from time import time\n'), ((2529, 2535), 'time.time', 'time', ([], {}), '()\n', (2533, 2535), False, 'from time import time\n')] |
import os
import sys
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# import wmi
from keras.backend import clear_session
from scipy import stats
from LSTM_for_Stock.data_processor import DataHelper
from LSTM_for_Stock.data_processor import DataLoaderStock
from LSTM_for_Stock.data_processor import Normalize
from LSTM_for_Stock.data_processor import Wrapper_default
from LSTM_for_Stock.loss import root_mean_squared_error
from LSTM_for_Stock.model import SequentialModel
nb_dir = os.path.split(os.getcwd())[0]
if nb_dir not in sys.path:
sys.path.append(nb_dir)
matplotlib.rcParams["figure.figsize"] = [16, 5]
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = ['Noto Sans CJK SC', 'SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\n
# def print_system_info():
# computer = wmi.WMI()
# computer_info = computer.Win32_ComputerSystem()[0]
# os_info = computer.Win32_OperatingSystem()[0]
# proc_info = computer.Win32_Processor()[0]
# gpu_info = computer.Win32_VideoController()[0]
# os_name = os_info.Name.encode('utf-8').split(b'|')[0]
# os_version = ' '.join([os_info.Version, os_info.BuildNumber])
# system_ram = float(os_info.TotalVisibleMemorySize) / 1048576 # KB to GB
# print('OS Name: {0}'.format(os_name))
# print('OS Version: {0}'.format(os_version))
# print('CPU: {0}'.format(proc_info.Name))
# print('RAM: {0} GB'.format(system_ram))
# print('Graphics Card: {0}'.format(gpu_info.Name))
def do(code='000002',
window=3,
days=1,
wrapper=Wrapper_default(),
norm=Normalize(),
*args,
**kwargs):
"""
Args:
layers [dict]: 训练层定义。默认为LSTM。第一层的`input_shape`和最后一层的`unit`会自动设置。
"""
dl = DataLoaderStock(
code, wrapper=wrapper, appends=kwargs.pop('appends', []))
df = dl.load()
# print(df.head(window+2))
train, test = DataHelper.train_test_split(df, batch_size=window + days,train_size=kwargs.pop('train_size',0.85))
# print(train[0])
X_train, Y_train = DataHelper.xy_split_2(train, window, days, norm=norm)
X_test, Y_test = DataHelper.xy_split_2(test, window, days, norm=norm)
# print(X_train[0])
# print(Y_train[0])
# print(X_test[0])
# print(Y_test[0])
batch_size = kwargs.pop('batch_size', 128)
verbose = kwargs.pop('verbose', 0)
X_train_arr = []
Y_train_arr = []
for x in X_train:
X_train_arr.append(x.values)
for y in Y_train:
Y_train_arr.append(y.values)
X_test_arr = []
Y_test_arr = []
for x in X_test:
X_test_arr.append(x.values)
for y in Y_test:
Y_test_arr.append(y.values)
clear_session()
model = SequentialModel()
# https://www.researchgate.net/publication/327967988_Predicting_Stock_Prices_Using_LSTM
# For analyzing the efficiency of the system we are used the
# Root Mean Square Error(RMSE). The error or the difference between
# the target and the obtained output value is minimized by
# using RMSE value. RMSE is the square root of the mean/average of the
# square of all of the error. The use of RMSE is highly common and
# it makes an excellent general purpose error metric for
# numerical predictions. Compared to the similar Mean Absolute Error,
# RMSE amplifies and severely punishes large errors.
ls = kwargs.pop("layers", [])
c = kwargs.pop('compile', {'loss': root_mean_squared_error,
'optimizer': 'rmsprop',
'metrics': ["mae", "acc"]})
if not ls:
ls.append({'type': 'lstm', 'units': 128})
ls.append({'type': 'dense'})
ls[0]['input_shape'] = X_train_arr[0].shape
ls[-1]['units'] = days
start = time.time()
model.build_model(ls, c)
model.train(np.array(X_train_arr),
np.array(Y_train_arr), callbacks=kwargs.pop('cbs', None),
train={'epochs': kwargs.pop('epochs', 500),
'shuffle': kwargs.pop('shuffle', False),
'verbose': verbose,
'batch_size': batch_size,
'validation_split': kwargs.pop('validation_split',
0.15)})
# history = model.fit(
# np.array(X_train_arr),
# np.array(Y_train_arr),
# epochs=kwargs.pop('epochs', 500),
# shuffle=kwargs.pop('shuffle', False),
# verbose=verbose,
# batch_size=batch_size,
# validation_split=kwargs.pop('validation_split', 0.15),
# callbacks=cbs)
if kwargs.pop('summary', True):
model.model.summary()
end = time.time()
return {
'start': start,
'end': end,
'X_test_arr': X_test_arr,
'Y_test_arr': Y_test_arr,
'model': model.model,
'code': code,
'window': window,
'days': days,
'batch_size': batch_size,
'history': model.history,
'data': df,
'X_train': X_train,
'Y_train': Y_train,
'X_test': X_test,
'Y_test': Y_test
}
def show_history(h, *args, **kwargs):
start = h['start']
end = h['end']
X_test_arr = h['X_test_arr']
Y_test_arr = h['Y_test_arr']
model = h['model']
code = h['code']
window = h['window']
days = h['days']
batch_size = h['batch_size']
history = h['history']
print('Net time using : ', end - start, ' secs.')
score = model.evaluate(np.array(X_test_arr), np.array(Y_test_arr))
if kwargs.pop('print_score', True):
print("Score:")
for i in range(len(model.metrics_names)):
print(' {0}:{1}'.format(model.metrics_names[i], score[i]))
show_plt = kwargs.pop('show_plt', True)
if show_plt:
plt.figure(figsize=(15, 8))
# 绘制训练 & 验证的准确率值
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
plt.figure(figsize=(15, 8))
# 绘制训练 & 验证的损失值
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
pred = model.predict(np.array(X_test_arr))
pred_slope = []
for day in range(days):
df_result = pd.DataFrame({
'pred': pred[:, day],
'real': np.array(Y_test_arr)[:, day]
})
if show_plt:
plt.figure(figsize=(15, 8))
plt.title(
'预测。code={0},window={1},day={2}/{3},batch_size={4}'.format(
code, window, day + 1, days, batch_size))
plt.plot(df_result['pred'])
plt.plot(df_result['real'])
plt.show()
sns.regplot(x=pred[:, day], y=np.array(Y_test_arr)[:, day])
plt.show()
slope = stats.linregress(pred[:, day],
np.array(Y_test_arr)[:, day]).slope
# print('Slope Day{0}:{1}'.format(day + 1, slope))
pred_slope.append(slope)
plt.close('all')
return {
'score': score,
'pred': pred,
'real': np.array(Y_test_arr),
'slope': pred_slope
}
| [
"sys.path.append",
"LSTM_for_Stock.data_processor.Wrapper_default",
"LSTM_for_Stock.data_processor.DataHelper.xy_split_2",
"LSTM_for_Stock.model.SequentialModel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"os.getcwd",
"matplotlib.pyplot.close",
"matplotlib.pypl... | [((620, 643), 'sys.path.append', 'sys.path.append', (['nb_dir'], {}), '(nb_dir)\n', (635, 643), False, 'import sys\n'), ((1668, 1685), 'LSTM_for_Stock.data_processor.Wrapper_default', 'Wrapper_default', ([], {}), '()\n', (1683, 1685), False, 'from LSTM_for_Stock.data_processor import Wrapper_default\n'), ((1699, 1710), 'LSTM_for_Stock.data_processor.Normalize', 'Normalize', ([], {}), '()\n', (1708, 1710), False, 'from LSTM_for_Stock.data_processor import Normalize\n'), ((2149, 2202), 'LSTM_for_Stock.data_processor.DataHelper.xy_split_2', 'DataHelper.xy_split_2', (['train', 'window', 'days'], {'norm': 'norm'}), '(train, window, days, norm=norm)\n', (2170, 2202), False, 'from LSTM_for_Stock.data_processor import DataHelper\n'), ((2224, 2276), 'LSTM_for_Stock.data_processor.DataHelper.xy_split_2', 'DataHelper.xy_split_2', (['test', 'window', 'days'], {'norm': 'norm'}), '(test, window, days, norm=norm)\n', (2245, 2276), False, 'from LSTM_for_Stock.data_processor import DataHelper\n'), ((2777, 2792), 'keras.backend.clear_session', 'clear_session', ([], {}), '()\n', (2790, 2792), False, 'from keras.backend import clear_session\n'), ((2805, 2822), 'LSTM_for_Stock.model.SequentialModel', 'SequentialModel', ([], {}), '()\n', (2820, 2822), False, 'from LSTM_for_Stock.model import SequentialModel\n'), ((3884, 3895), 'time.time', 'time.time', ([], {}), '()\n', (3893, 3895), False, 'import time\n'), ((4803, 4814), 'time.time', 'time.time', ([], {}), '()\n', (4812, 4814), False, 'import time\n'), ((7403, 7419), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7412, 7419), True, 'import matplotlib.pyplot as plt\n'), ((573, 584), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (582, 584), False, 'import os\n'), ((3942, 3963), 'numpy.array', 'np.array', (['X_train_arr'], {}), '(X_train_arr)\n', (3950, 3963), True, 'import numpy as np\n'), ((3981, 4002), 'numpy.array', 'np.array', (['Y_train_arr'], {}), '(Y_train_arr)\n', (3989, 4002), True, 'import numpy as np\n'), ((5622, 5642), 'numpy.array', 'np.array', (['X_test_arr'], {}), '(X_test_arr)\n', (5630, 5642), True, 'import numpy as np\n'), ((5644, 5664), 'numpy.array', 'np.array', (['Y_test_arr'], {}), '(Y_test_arr)\n', (5652, 5664), True, 'import numpy as np\n'), ((5923, 5950), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)'}), '(figsize=(15, 8))\n', (5933, 5950), True, 'import matplotlib.pyplot as plt\n'), ((5985, 6017), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['acc']"], {}), "(history.history['acc'])\n", (5993, 6017), True, 'import matplotlib.pyplot as plt\n'), ((6026, 6062), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_acc']"], {}), "(history.history['val_acc'])\n", (6034, 6062), True, 'import matplotlib.pyplot as plt\n'), ((6071, 6098), 'matplotlib.pyplot.title', 'plt.title', (['"""Model accuracy"""'], {}), "('Model accuracy')\n", (6080, 6098), True, 'import matplotlib.pyplot as plt\n'), ((6107, 6129), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (6117, 6129), True, 'import matplotlib.pyplot as plt\n'), ((6138, 6157), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (6148, 6157), True, 'import matplotlib.pyplot as plt\n'), ((6166, 6213), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Test']"], {'loc': '"""upper left"""'}), "(['Train', 'Test'], loc='upper left')\n", (6176, 6213), True, 'import matplotlib.pyplot as plt\n'), ((6222, 6232), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6230, 6232), True, 'import matplotlib.pyplot as plt\n'), ((6242, 6269), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)'}), '(figsize=(15, 8))\n', (6252, 6269), True, 'import matplotlib.pyplot as plt\n'), ((6302, 6335), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (6310, 6335), True, 'import matplotlib.pyplot as plt\n'), ((6344, 6381), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (6352, 6381), True, 'import matplotlib.pyplot as plt\n'), ((6390, 6413), 'matplotlib.pyplot.title', 'plt.title', (['"""Model loss"""'], {}), "('Model loss')\n", (6399, 6413), True, 'import matplotlib.pyplot as plt\n'), ((6422, 6440), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (6432, 6440), True, 'import matplotlib.pyplot as plt\n'), ((6449, 6468), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (6459, 6468), True, 'import matplotlib.pyplot as plt\n'), ((6477, 6524), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Test']"], {'loc': '"""upper left"""'}), "(['Train', 'Test'], loc='upper left')\n", (6487, 6524), True, 'import matplotlib.pyplot as plt\n'), ((6533, 6543), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6541, 6543), True, 'import matplotlib.pyplot as plt\n'), ((6570, 6590), 'numpy.array', 'np.array', (['X_test_arr'], {}), '(X_test_arr)\n', (6578, 6590), True, 'import numpy as np\n'), ((7496, 7516), 'numpy.array', 'np.array', (['Y_test_arr'], {}), '(Y_test_arr)\n', (7504, 7516), True, 'import numpy as np\n'), ((6803, 6830), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)'}), '(figsize=(15, 8))\n', (6813, 6830), True, 'import matplotlib.pyplot as plt\n'), ((7004, 7031), 'matplotlib.pyplot.plot', 'plt.plot', (["df_result['pred']"], {}), "(df_result['pred'])\n", (7012, 7031), True, 'import matplotlib.pyplot as plt\n'), ((7044, 7071), 'matplotlib.pyplot.plot', 'plt.plot', (["df_result['real']"], {}), "(df_result['real'])\n", (7052, 7071), True, 'import matplotlib.pyplot as plt\n'), ((7084, 7094), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7092, 7094), True, 'import matplotlib.pyplot as plt\n'), ((7180, 7190), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7188, 7190), True, 'import matplotlib.pyplot as plt\n'), ((6729, 6749), 'numpy.array', 'np.array', (['Y_test_arr'], {}), '(Y_test_arr)\n', (6737, 6749), True, 'import numpy as np\n'), ((7271, 7291), 'numpy.array', 'np.array', (['Y_test_arr'], {}), '(Y_test_arr)\n', (7279, 7291), True, 'import numpy as np\n'), ((7138, 7158), 'numpy.array', 'np.array', (['Y_test_arr'], {}), '(Y_test_arr)\n', (7146, 7158), True, 'import numpy as np\n')] |
from unittest import TestCase
import numpy as np
from scipy.stats import norm, uniform, lognorm
from corai_util.calculus import diff_eq
from corai_util.tools import function_iterable
class Test_diff_eq(TestCase):
def test_fractional_adams(self):
pass
def test_system_ode_solver(self):
# example taken from the paper <NAME> 2015
UNIFORM_SUPP = [[-1., 1.],
[-2.,
2.]] # UNIFORM_SUPP[0][0] <= UNIFORM_SUPP[1][0] <= UNIFORM_SUPP[1][1] <= UNIFORM_SUPP[1][0]
density_1 = lambda tt: uniform.pdf(tt,
loc=UNIFORM_SUPP[0][0],
scale=UNIFORM_SUPP[0][1] - UNIFORM_SUPP[0][0])
density_2 = lambda tt: uniform.pdf(tt,
loc=UNIFORM_SUPP[1][0],
scale=UNIFORM_SUPP[1][1] - UNIFORM_SUPP[1][0])
def density_mu(tt):
return density_1(tt)
def density_nu(tt):
return density_2(tt)
def density_eta(tt):
return np.maximum(density_mu(tt) - density_nu(tt), 0)
def density_gamma(tt):
return np.maximum(density_nu(tt) - density_mu(tt), 0)
def p_dash_open_formula(tt, xx, yy):
return (tt - yy) / (yy - xx) * density_eta(tt) / density_gamma(xx)
def q_dash_open_formula(tt, xx, yy):
return (xx - tt) / (yy - xx) * density_eta(tt) / density_gamma(yy)
tt = np.linspace(-1 * 0.999, 0.5, 1000)
starting_points = [[1.99, -1.01], [1.01, -1.99]]
# forward equation
empirical = diff_eq.system_ODE_solver(tt, starting_points[0],
[p_dash_open_formula, q_dash_open_formula],
left_or_right="left")
q, p = zip(*empirical)
p = function_iterable.replace_nans_numpy(np.array(p))
q = function_iterable.replace_nans_numpy(np.array(q))
true_p = lambda tt: -1 / 2 * (np.sqrt(12. - 3. * tt * tt) + tt)
true_q = lambda tt: 1 / 2 * (np.sqrt(12. - 3. * tt * tt) - tt)
error = np.mean(np.abs(function_iterable.replace_nans_numpy(p) - true_p(tt)))
error += np.mean(np.abs(function_iterable.replace_nans_numpy(q) - true_q(tt)))
# backward equation
tt = np.linspace(-0.5, 1 * 0.999, 2000)
# forward equation
empirical = diff_eq.system_ODE_solver(tt, starting_points[1],
[p_dash_open_formula, q_dash_open_formula],
left_or_right="left")
q, p = zip(*empirical)
p = function_iterable.replace_nans_numpy(np.array(p))
q = function_iterable.replace_nans_numpy(np.array(q))
error += np.mean(function_iterable.replace_nans_numpy(p) - true_p(tt))
error += np.mean(function_iterable.replace_nans_numpy(q) - true_q(tt))
assert error < 0.1
| [
"scipy.stats.uniform.pdf",
"corai_util.calculus.diff_eq.system_ODE_solver",
"corai_util.tools.function_iterable.replace_nans_numpy",
"numpy.array",
"numpy.linspace",
"numpy.sqrt"
] | [((1532, 1566), 'numpy.linspace', 'np.linspace', (['(-1 * 0.999)', '(0.5)', '(1000)'], {}), '(-1 * 0.999, 0.5, 1000)\n', (1543, 1566), True, 'import numpy as np\n'), ((1671, 1790), 'corai_util.calculus.diff_eq.system_ODE_solver', 'diff_eq.system_ODE_solver', (['tt', 'starting_points[0]', '[p_dash_open_formula, q_dash_open_formula]'], {'left_or_right': '"""left"""'}), "(tt, starting_points[0], [p_dash_open_formula,\n q_dash_open_formula], left_or_right='left')\n", (1696, 1790), False, 'from corai_util.calculus import diff_eq\n'), ((2393, 2427), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(1 * 0.999)', '(2000)'], {}), '(-0.5, 1 * 0.999, 2000)\n', (2404, 2427), True, 'import numpy as np\n'), ((2475, 2594), 'corai_util.calculus.diff_eq.system_ODE_solver', 'diff_eq.system_ODE_solver', (['tt', 'starting_points[1]', '[p_dash_open_formula, q_dash_open_formula]'], {'left_or_right': '"""left"""'}), "(tt, starting_points[1], [p_dash_open_formula,\n q_dash_open_formula], left_or_right='left')\n", (2500, 2594), False, 'from corai_util.calculus import diff_eq\n'), ((573, 663), 'scipy.stats.uniform.pdf', 'uniform.pdf', (['tt'], {'loc': 'UNIFORM_SUPP[0][0]', 'scale': '(UNIFORM_SUPP[0][1] - UNIFORM_SUPP[0][0])'}), '(tt, loc=UNIFORM_SUPP[0][0], scale=UNIFORM_SUPP[0][1] -\n UNIFORM_SUPP[0][0])\n', (584, 663), False, 'from scipy.stats import norm, uniform, lognorm\n'), ((777, 867), 'scipy.stats.uniform.pdf', 'uniform.pdf', (['tt'], {'loc': 'UNIFORM_SUPP[1][0]', 'scale': '(UNIFORM_SUPP[1][1] - UNIFORM_SUPP[1][0])'}), '(tt, loc=UNIFORM_SUPP[1][0], scale=UNIFORM_SUPP[1][1] -\n UNIFORM_SUPP[1][0])\n', (788, 867), False, 'from scipy.stats import norm, uniform, lognorm\n'), ((1959, 1970), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (1967, 1970), True, 'import numpy as np\n'), ((2021, 2032), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (2029, 2032), True, 'import numpy as np\n'), ((2763, 2774), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (2771, 2774), True, 'import numpy as np\n'), ((2825, 2836), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (2833, 2836), True, 'import numpy as np\n'), ((2863, 2902), 'corai_util.tools.function_iterable.replace_nans_numpy', 'function_iterable.replace_nans_numpy', (['p'], {}), '(p)\n', (2899, 2902), False, 'from corai_util.tools import function_iterable\n'), ((2942, 2981), 'corai_util.tools.function_iterable.replace_nans_numpy', 'function_iterable.replace_nans_numpy', (['q'], {}), '(q)\n', (2978, 2981), False, 'from corai_util.tools import function_iterable\n'), ((2073, 2102), 'numpy.sqrt', 'np.sqrt', (['(12.0 - 3.0 * tt * tt)'], {}), '(12.0 - 3.0 * tt * tt)\n', (2080, 2102), True, 'import numpy as np\n'), ((2144, 2173), 'numpy.sqrt', 'np.sqrt', (['(12.0 - 3.0 * tt * tt)'], {}), '(12.0 - 3.0 * tt * tt)\n', (2151, 2173), True, 'import numpy as np\n'), ((2209, 2248), 'corai_util.tools.function_iterable.replace_nans_numpy', 'function_iterable.replace_nans_numpy', (['p'], {}), '(p)\n', (2245, 2248), False, 'from corai_util.tools import function_iterable\n'), ((2296, 2335), 'corai_util.tools.function_iterable.replace_nans_numpy', 'function_iterable.replace_nans_numpy', (['q'], {}), '(q)\n', (2332, 2335), False, 'from corai_util.tools import function_iterable\n')] |
import copy
from typing import Dict, List, Tuple
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
plt.style.use("./Styles/Scientific.mplstyle")
import matplotlib.patches as patches
import msgpack
import numpy as np
import quaternion as quat
import optimization
from configuration import Configuration
from data_structures import Map, MapUnpacker, Trajectory
from plotting import plot_3D_line, plot_3D_scatter
from utilities import closest_point, clamp_signal, quat_from_axis_angle, \
quat_array_to_vec3_array, vec4_array_to_quat_array
def get_extremas_array_2D(arrays: List):
m, n = arrays[0].shape
extremas = []
for array in arrays:
extremas.append([ np.min(array, axis=0), np.max(array, axis=0) ])
extremas = np.array(extremas)
mins = np.min(np.min(extremas, axis=0), axis=0)
maxs= np.max(np.max(extremas, axis=0), axis=0)
return np.stack([mins, maxs], axis=1)
def visualize_alignment_results(config: Configuration, trajectories: Dict, \
key_est, key_gt, label_est: str="Keyframes", label_gt: str="Ground Truth"):
# Plot parameters.
margins_pos = np.array([ -2, 2 ])
margins_ang = np.array([ -10, 10 ])
pad = 2.0
w_pad = 2.0
h_pad = 2.0
patch_color = "y"
patch_alpha = 0.5
# Assign to variables.
ground_truth = trajectories[key_gt]
estimate = trajectories[key_est]
# Truncate ground truth.
start, _ = closest_point(estimate.timestamps[0], ground_truth.timestamps)
end, _ = closest_point(estimate.timestamps[-1], ground_truth.timestamps)
ground_truth.timestamps = ground_truth.timestamps[start:end+1]
ground_truth.positions= ground_truth.positions[start:end+1]
ground_truth.attitudes= ground_truth.attitudes[start:end+1]
# Get ground truth attitude.
q_ground_truth = vec4_array_to_quat_array(ground_truth.attitudes)
q_estimate = vec4_array_to_quat_array(estimate.attitudes)
angles_ground_truth = quat.as_euler_angles(q_ground_truth) * 180 / np.pi
angles_estimate = quat.as_euler_angles(q_estimate) * 180 / np.pi
angles_ground_truth = clamp_signal(angles_ground_truth, 0, 360)
angles_estimate = clamp_signal(angles_estimate, 0, 360)
# Calculate limits.
lims_time = [ estimate.timestamps[0], estimate.timestamps[-1] ]
lims_pos = get_extremas_array_2D( \
[ estimate.positions, ground_truth.positions ])
lims_ang = get_extremas_array_2D( \
[ angles_estimate, angles_ground_truth ])
lims_pos += margins_pos
lims_ang += margins_ang
# Visualize trajectory - Figure.
fig1, ax1 = plt.subplots(nrows=3, ncols=1, figsize=(7, 4.5))
fig1.tight_layout(pad=pad, w_pad=w_pad, h_pad=h_pad)
# Position figure - Northing.
ax1[0].plot(estimate.timestamps, estimate[:, 0])
ax1[0].plot(ground_truth.timestamps, ground_truth[:, 0])
ax1[0].set_xlim(lims_time)
ax1[0].set_ylim(lims_pos[0])
ax1[0].set_xlabel(r"Time, $t$ $[s]$")
ax1[0].set_ylabel(r"Northing, $N$ $[m]$")
# Position figure - Easting.
ax1[1].plot(estimate.timestamps, estimate[:, 1])
ax1[1].plot(ground_truth.timestamps, ground_truth[:, 1])
ax1[1].set_xlim(lims_time)
ax1[1].set_ylim(lims_pos[1])
ax1[1].set_xlabel(r"Time, $t$ $[s]$")
ax1[1].set_ylabel(r"Easting, $E$ $[m]$")
# Position figure - Depth.
ax1[2].plot(estimate.timestamps, estimate[:, 2], label=label_est)
ax1[2].plot(ground_truth.timestamps, ground_truth[:, 2], \
label=label_gt)
ax1[2].set_xlim(lims_time)
ax1[2].set_ylim(lims_pos[2])
ax1[2].set_xlabel(r"Time, $t$ $[s]$")
ax1[2].set_ylabel(r"Depth, $D$ $[m]$")
# Position figure - legend.
lg1 = fig1.legend(bbox_to_anchor=(1, 1), loc="upper right", frameon=True, \
fancybox=False)
fr1 = lg1.get_frame()
fr1.set_facecolor("white")
fr1.set_edgecolor("black")
# Visualize attitudes - Figure.
fig2, ax2 = plt.subplots(nrows=3, ncols=1, figsize=(7, 4.5))
fig2.tight_layout(pad=2.0, w_pad=2.0, h_pad=2.0)
# Rotation 1.
ax2[0].plot(estimate.timestamps, angles_estimate[:, 0])
ax2[0].plot(ground_truth.timestamps, angles_ground_truth[:, 0])
ax2[0].set_xlim(lims_time)
ax2[0].set_ylim([ 80, 220 ])
#ax2[0].set_ylim(lims_ang[0])
ax2[0].set_xlabel(r"Time, $t$ $[s]$")
ax2[0].set_ylabel(r"Euler X, $r_{x}$ $[\text{deg}]$")
# Rotation 2.
ax2[1].plot(estimate.timestamps, angles_estimate[:, 1])
ax2[1].plot(ground_truth.timestamps, angles_ground_truth[:, 1])
ax2[1].set_xlim(lims_time)
ax2[1].set_ylim(lims_ang[1])
ax2[1].set_xlabel(r"Time, $t$ $[s]$")
ax2[1].set_ylabel(r"Euler Y, $r_{y}$ $[\text{deg}]$")
# Rotation 3.
ax2[2].plot(estimate.timestamps, angles_estimate[:, 2], label=label_est)
ax2[2].plot(ground_truth.timestamps, angles_ground_truth[:, 2], \
label=label_gt)
ax2[2].set_xlim(lims_time)
ax2[2].set_ylim(lims_ang[2])
ax2[2].set_xlabel(r"Time, $t$ $[s]$")
ax2[2].set_ylabel(r"Euler Z, $r_{z}$ $[\text{deg}]$")
lg2 = fig2.legend(bbox_to_anchor=(1, 1), loc="upper right", frameon=True, \
fancybox=False)
fr2 = lg2.get_frame()
fr2.set_facecolor("white")
fr2.set_edgecolor("black")
if config.save_figures:
fig1.savefig(config.output_dir + config.name + "-" + "Positions.pdf", \
dpi=300)
fig2.savefig(config.output_dir + config.name + "-" + "Attitudes.pdf", \
dpi=300)
if config.show_figures:
plt.show()
def georeference(config: Configuration, trajectories: Dict, map: Map):
"""
"""
# Get trajectories.
ground_truth = trajectories["Ground-Truth"]
keyframes = trajectories["Keyframes"]
frames = trajectories["Frames"]
# Get map landmarks.
landmarks = map.get_landmarks()
# Perform temporal and spatial optimization.
results = optimization.optimize(config.optim, keyframes, ground_truth)
rotation = results.rotation
translation = results.translation
matched_keyframes = results.matched_frames
matched_ground_truth = results.matched_ground_truth
# Add matched trajectories.
trajectories["Matched-Keyframes"] = matched_keyframes
trajectories["Matched-Ground-Truth"] = matched_ground_truth
# Add bias and apply rotation and translation.
keyframes.add_time_bias(config.optim.bias)
frames.add_time_bias(config.optim.bias)
keyframes.apply_SE3_transform(rotation, translation)
frames.apply_SE3_transform(rotation, translation)
landmarks.apply_SE3_transform(rotation, translation)
trajectories["Keyframes"] = keyframes
trajectories["Frames"] = frames
visualize_alignment_results(config, trajectories, "Keyframes", \
"Ground-Truth")
if config.save_output:
keyframes.save_as_csv(config.output_dir + config.name + "-" \
+ "Keyframes.csv")
frames.save_as_csv(config.output_dir + config.name + "-" \
+ "Frames.csv")
landmarks.save_as_csv(config.output_dir + config.name + "-" \
+ "Landmarks.csv")
matched_keyframes.save_as_csv(config.output_dir + config.name + "-" \
+ "Matched-Keyframes.csv")
matched_ground_truth.save_as_csv(config.output_dir + config.name + "-" \
+ "Matched-Ground-Truth.csv")
| [
"numpy.stack",
"matplotlib.pyplot.show",
"utilities.clamp_signal",
"optimization.optimize",
"utilities.closest_point",
"matplotlib.pyplot.style.use",
"matplotlib.use",
"numpy.array",
"utilities.vec4_array_to_quat_array",
"numpy.min",
"numpy.max",
"quaternion.as_euler_angles",
"matplotlib.pyp... | [((69, 92), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (83, 92), False, 'import matplotlib\n'), ((125, 170), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""./Styles/Scientific.mplstyle"""'], {}), "('./Styles/Scientific.mplstyle')\n", (138, 170), True, 'import matplotlib.pyplot as plt\n'), ((771, 789), 'numpy.array', 'np.array', (['extremas'], {}), '(extremas)\n', (779, 789), True, 'import numpy as np\n'), ((906, 936), 'numpy.stack', 'np.stack', (['[mins, maxs]'], {'axis': '(1)'}), '([mins, maxs], axis=1)\n', (914, 936), True, 'import numpy as np\n'), ((1136, 1153), 'numpy.array', 'np.array', (['[-2, 2]'], {}), '([-2, 2])\n', (1144, 1153), True, 'import numpy as np\n'), ((1174, 1193), 'numpy.array', 'np.array', (['[-10, 10]'], {}), '([-10, 10])\n', (1182, 1193), True, 'import numpy as np\n'), ((1436, 1498), 'utilities.closest_point', 'closest_point', (['estimate.timestamps[0]', 'ground_truth.timestamps'], {}), '(estimate.timestamps[0], ground_truth.timestamps)\n', (1449, 1498), False, 'from utilities import closest_point, clamp_signal, quat_from_axis_angle, quat_array_to_vec3_array, vec4_array_to_quat_array\n'), ((1512, 1575), 'utilities.closest_point', 'closest_point', (['estimate.timestamps[-1]', 'ground_truth.timestamps'], {}), '(estimate.timestamps[-1], ground_truth.timestamps)\n', (1525, 1575), False, 'from utilities import closest_point, clamp_signal, quat_from_axis_angle, quat_array_to_vec3_array, vec4_array_to_quat_array\n'), ((1826, 1874), 'utilities.vec4_array_to_quat_array', 'vec4_array_to_quat_array', (['ground_truth.attitudes'], {}), '(ground_truth.attitudes)\n', (1850, 1874), False, 'from utilities import closest_point, clamp_signal, quat_from_axis_angle, quat_array_to_vec3_array, vec4_array_to_quat_array\n'), ((1892, 1936), 'utilities.vec4_array_to_quat_array', 'vec4_array_to_quat_array', (['estimate.attitudes'], {}), '(estimate.attitudes)\n', (1916, 1936), False, 'from utilities import closest_point, clamp_signal, quat_from_axis_angle, quat_array_to_vec3_array, vec4_array_to_quat_array\n'), ((2109, 2150), 'utilities.clamp_signal', 'clamp_signal', (['angles_ground_truth', '(0)', '(360)'], {}), '(angles_ground_truth, 0, 360)\n', (2121, 2150), False, 'from utilities import closest_point, clamp_signal, quat_from_axis_angle, quat_array_to_vec3_array, vec4_array_to_quat_array\n'), ((2173, 2210), 'utilities.clamp_signal', 'clamp_signal', (['angles_estimate', '(0)', '(360)'], {}), '(angles_estimate, 0, 360)\n', (2185, 2210), False, 'from utilities import closest_point, clamp_signal, quat_from_axis_angle, quat_array_to_vec3_array, vec4_array_to_quat_array\n'), ((2600, 2648), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(1)', 'figsize': '(7, 4.5)'}), '(nrows=3, ncols=1, figsize=(7, 4.5))\n', (2612, 2648), True, 'import matplotlib.pyplot as plt\n'), ((3925, 3973), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(1)', 'figsize': '(7, 4.5)'}), '(nrows=3, ncols=1, figsize=(7, 4.5))\n', (3937, 3973), True, 'import matplotlib.pyplot as plt\n'), ((5874, 5934), 'optimization.optimize', 'optimization.optimize', (['config.optim', 'keyframes', 'ground_truth'], {}), '(config.optim, keyframes, ground_truth)\n', (5895, 5934), False, 'import optimization\n'), ((809, 833), 'numpy.min', 'np.min', (['extremas'], {'axis': '(0)'}), '(extremas, axis=0)\n', (815, 833), True, 'import numpy as np\n'), ((860, 884), 'numpy.max', 'np.max', (['extremas'], {'axis': '(0)'}), '(extremas, axis=0)\n', (866, 884), True, 'import numpy as np\n'), ((5499, 5509), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5507, 5509), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1999), 'quaternion.as_euler_angles', 'quat.as_euler_angles', (['q_ground_truth'], {}), '(q_ground_truth)\n', (1983, 1999), True, 'import quaternion as quat\n'), ((2036, 2068), 'quaternion.as_euler_angles', 'quat.as_euler_angles', (['q_estimate'], {}), '(q_estimate)\n', (2056, 2068), True, 'import quaternion as quat\n'), ((707, 728), 'numpy.min', 'np.min', (['array'], {'axis': '(0)'}), '(array, axis=0)\n', (713, 728), True, 'import numpy as np\n'), ((730, 751), 'numpy.max', 'np.max', (['array'], {'axis': '(0)'}), '(array, axis=0)\n', (736, 751), True, 'import numpy as np\n')] |
import imageio
import cv2
import numpy as np
import os, sys
import argparse
output_order = ['m00','m10','m01','m20','m11','m02','m30','m21','m12','m03','mu20','mu11','mu02','mu30','mu21','mu12','mu03','nu20','nu11','nu02','nu30','nu21','nu12','nu03']
def writeCSVHeader(filename):
writer = open(filename, 'w')
writer.write("m00,m10,m01,m20,m11,m02,m30,m21,m12,m03,mu20,mu11,mu02,mu30,mu21,mu12,mu03,nu20,nu11,nu02,nu30,nu21,nu12,nu03,perimeter\n")
writer.close()
append_writer = open(filename, 'a')
return append_writer
def process_video(args):
vid_reader = imageio.get_reader(args.input_file)
full_writer = writeCSVHeader(os.path.splitext(args.input_file)[0] + '_DarkMask_' + str(args.frame_size) + '.csv')
for frame in vid_reader:
frame = frame[:,:,0]
frame = cv2.resize(frame, (args.frame_size, args.frame_size))
masked_full_frame = np.zeros_like(frame)
masked_full_frame[frame > 128] = 1
moments = cv2.moments(masked_full_frame)
contours, hierarchy = cv2.findContours(np.uint8(masked_full_frame), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if len(contours) < 1:
# Default values
moments = {'m00': 0, 'm10': 0, 'm01': 0, 'm20': 0, 'm11': 0, 'm02': 0, 'm30': 0, 'm21': 0, 'm12': 0, 'm03': 0, 'mu20': 0, 'mu11': 0, 'mu02': 0, 'mu30': 0, 'mu21': 0, 'mu12': 0, 'mu03': 0, 'nu20': 0, 'nu11': 0, 'nu02': 0, 'nu30': 0, 'nu21': 0, 'nu12': 0, 'nu03': 0}
perimeter = 0
else:
max_contour = None
max_size = -1
for k in contours:
blob_size = cv2.contourArea(k)
if blob_size > max_size:
max_contour = k
max_size = blob_size
perimeter = cv2.arcLength(max_contour, True)
np.savetxt(full_writer, [list([moments[x] for x in output_order]) + [perimeter]], delimiter=',')
vid_reader.close()
full_writer.close()
# cv2.imwrite('masked.png',masked_full_frame*254)
# cv2.imwrite('frame.png',frame)
def main(argv):
parser = argparse.ArgumentParser(description='Exports ')
parser.add_argument('--input_file', help='Input dataset to process', required=True)
parser.add_argument('--frame_size', help='Scaled frame size to use', default=1080, type=int)
args = parser.parse_args()
process_video(args)
if __name__ == '__main__':
main(sys.argv[1:])
| [
"cv2.contourArea",
"numpy.zeros_like",
"numpy.uint8",
"argparse.ArgumentParser",
"cv2.arcLength",
"cv2.moments",
"os.path.splitext",
"imageio.get_reader",
"cv2.resize"
] | [((567, 602), 'imageio.get_reader', 'imageio.get_reader', (['args.input_file'], {}), '(args.input_file)\n', (585, 602), False, 'import imageio\n'), ((1872, 1919), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Exports """'}), "(description='Exports ')\n", (1895, 1919), False, 'import argparse\n'), ((777, 830), 'cv2.resize', 'cv2.resize', (['frame', '(args.frame_size, args.frame_size)'], {}), '(frame, (args.frame_size, args.frame_size))\n', (787, 830), False, 'import cv2\n'), ((853, 873), 'numpy.zeros_like', 'np.zeros_like', (['frame'], {}), '(frame)\n', (866, 873), True, 'import numpy as np\n'), ((923, 953), 'cv2.moments', 'cv2.moments', (['masked_full_frame'], {}), '(masked_full_frame)\n', (934, 953), False, 'import cv2\n'), ((995, 1022), 'numpy.uint8', 'np.uint8', (['masked_full_frame'], {}), '(masked_full_frame)\n', (1003, 1022), True, 'import numpy as np\n'), ((1586, 1618), 'cv2.arcLength', 'cv2.arcLength', (['max_contour', '(True)'], {}), '(max_contour, True)\n', (1599, 1618), False, 'import cv2\n'), ((1476, 1494), 'cv2.contourArea', 'cv2.contourArea', (['k'], {}), '(k)\n', (1491, 1494), False, 'import cv2\n'), ((633, 666), 'os.path.splitext', 'os.path.splitext', (['args.input_file'], {}), '(args.input_file)\n', (649, 666), False, 'import os, sys\n')] |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import OpenEXR, Imath
import numpy as np
import os, sys
from collections import defaultdict
#import set
# exr.py: Tools/helpers for various exr I/O operations
FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)
HALF = Imath.PixelType(Imath.PixelType.HALF)
UINT = Imath.PixelType(Imath.PixelType.UINT)
NO_COMPRESSION = Imath.Compression(Imath.Compression.NO_COMPRESSION)
RLE_COMPRESSION = Imath.Compression(Imath.Compression.RLE_COMPRESSION)
ZIPS_COMPRESSION = Imath.Compression(Imath.Compression.ZIPS_COMPRESSION)
ZIP_COMPRESSION = Imath.Compression(Imath.Compression.ZIP_COMPRESSION)
PIZ_COMPRESSION = Imath.Compression(Imath.Compression.PIZ_COMPRESSION)
PXR24_COMPRESSION = Imath.Compression(Imath.Compression.PXR24_COMPRESSION)
NP_PRECISION = {
"FLOAT": np.float32,
"HALF": np.float16,
"UINT": np.uint8
}
def open(filename):
# Check if the file is an EXR file
if not OpenEXR.isOpenExrFile(filename):
raise Exception("File '%s' is not an EXR file." % filename)
# Return an `InputFile`
return InputFile(OpenEXR.InputFile(filename), filename)
def read(filename, channels = "default", precision = FLOAT):
f = open(filename)
if _is_list(channels):
# Construct an array of precisions
return f.get_dict(channels, precision=precision)
else:
return f.get(channels, precision)
def read_all(filename, precision = FLOAT):
f = open(filename)
return f.get_all(precision=precision)
def write(filename, data, channel_names = None, precision = FLOAT, compression = PIZ_COMPRESSION):
# Helper function add a third dimension to 2-dimensional matrices (single channel)
def make_ndims_3(matrix):
if matrix.ndim > 3 or matrix.ndim < 2:
raise Exception("Invalid number of dimensions for the `matrix` argument.")
elif matrix.ndim == 2:
matrix = np.expand_dims(matrix, -1)
return matrix
# Helper function to read channel names from default
def get_channel_names(channel_names, depth):
if channel_names:
if depth is not len(channel_names):
raise Exception("The provided channel names have the wrong length (%d vs %d)." % (len(channel_names), depth))
return channel_names
elif depth in _default_channel_names:
return _default_channel_names[depth]
else:
raise Exception("There are no suitable default channel names for data of depth %d" % depth)
#
# Case 1, the `data` argument is a dictionary
#
if isinstance(data, dict):
# Make sure everything has ndims 3
for group, matrix in data.items():
data[group] = make_ndims_3(matrix)
# Prepare precisions
if not isinstance(precision, dict):
precisions = {group: precision for group in data.keys()}
else:
precisions = {group: precision.get(group, FLOAT) for group in data.keys()}
# Prepare channel names
if channel_names is None:
channel_names = {}
channel_names = {group: get_channel_names(channel_names.get(group), matrix.shape[2]) for group, matrix in data.items()}
# Collect channels
channels = {}
channel_data = {}
width = None
height = None
for group, matrix in data.items():
# Read the depth of the current group
# and set height and width variables if not set yet
if width is None:
height, width, depth = matrix.shape
else:
depth = matrix.shape[2]
names = channel_names[group]
# Check the number of channel names
if len(names) != depth:
raise Exception("Depth does not match the number of channel names for channel '%s'" % group)
for i, c in enumerate(names):
if group == "default":
channel_name = c
else:
channel_name = "%s.%s" % (group, c)
channels[channel_name] = Imath.Channel(precisions[group])
channel_data[channel_name] = matrix[:,:,i].astype(NP_PRECISION[str(precisions[group])]).tostring()
# Save
header = OpenEXR.Header(width, height)
header['compression'] = compression
header['channels'] = channels
out = OpenEXR.OutputFile(filename, header)
out.writePixels(channel_data)
#
# Case 2, the `data` argument is one matrix
#
elif isinstance(data, np.ndarray):
data = make_ndims_3(data)
height, width, depth = data.shape
channel_names = get_channel_names(channel_names, depth)
header = OpenEXR.Header(width, height)
header['compression'] = compression
header['channels'] = {c: Imath.Channel(precision) for c in channel_names}
out = OpenEXR.OutputFile(filename, header)
out.writePixels({c: data[:,:,i].astype(NP_PRECISION[str(precision)]).tostring() for i, c in enumerate(channel_names)})
else:
raise Exception("Invalid precision for the `data` argument. Supported are NumPy arrays and dictionaries.")
def tonemap(matrix, gamma=2.2):
return np.clip(matrix ** (1.0/gamma), 0, 1)
class InputFile(object):
def __init__(self, input_file, filename=None):
self.input_file = input_file
if not input_file.isComplete():
raise Exception("EXR file '%s' is not ready." % filename)
header = input_file.header()
dw = header['dataWindow']
self.width = dw.max.x - dw.min.x + 1
self.height = dw.max.y - dw.min.y + 1
self.channels = sorted(header['channels'].keys(),key=_channel_sort_key)
self.depth = len(self.channels)
self.precisions = [c.type for c in header['channels'].values()]
self.channel_precision = {c: v.type for c, v in header['channels'].items()}
self.channel_map = defaultdict(list)
self.root_channels = set()
self._init_channel_map()
def _init_channel_map(self):
# Make a dictionary of subchannels per channel
for c in self.channels:
self.channel_map['all'].append(c)
parts = c.split('.')
if len(parts) == 1:
self.root_channels.add('default')
self.channel_map['default'].append(c)
else:
self.root_channels.add(parts[0])
for i in range(1, len(parts)+1):
key = ".".join(parts[0:i])
self.channel_map[key].append(c)
def describe_channels(self):
if 'default' in self.root_channels:
for c in self.channel_map['default']:
print (c)
for group in sorted(list(self.root_channels)):
if group != 'default':
channels = self.channel_map[group]
print("%-20s%s" % (group, ",".join([c[len(group)+1:] for c in channels])))
def get(self, group = 'default', precision=FLOAT):
channels = self.channel_map[group]
if len(channels) == 0:
print("I did't find any channels in group '%s'." % group)
print("You could try:")
self.describe_channels()
sys.exit()
strings = self.input_file.channels(channels)
matrix = np.zeros((self.height, self.width, len(channels)), dtype=NP_PRECISION[str(precision)])
for i, string in enumerate(strings):
precision = NP_PRECISION[str(self.channel_precision[channels[i]])]
matrix[:,:,i] = np.fromstring(string, dtype = precision) \
.reshape(self.height, self.width)
return matrix
def get_all(self, precision = {}):
return self.get_dict(self.root_channels, precision)
def get_dict(self, groups = [], precision = {}):
if not isinstance(precision, dict):
precision = {group: precision for group in groups}
return_dict = {}
todo = []
for group in groups:
group_chans = self.channel_map[group]
if len(group_chans) == 0:
print("I didn't find any channels for the requested group '%s'." % group)
print("You could try:")
self.describe_channels()
sys.exit()
if group in precision:
p = precision[group]
else:
p = FLOAT
matrix = np.zeros((self.height, self.width, len(group_chans)), dtype=NP_PRECISION[str(p)])
return_dict[group] = matrix
for i, c in enumerate(group_chans):
todo.append({'group': group, 'id': i, 'channel': c})
if len(todo) == 0:
print("Please ask for some channels, I cannot process empty queries.")
print("You could try:")
self.describe_channels()
sys.exit()
strings = self.input_file.channels([c['channel'] for c in todo])
for i, item in enumerate(todo):
precision = NP_PRECISION[str(self.channel_precision[todo[i]['channel']])]
return_dict[item['group']][:,:,item['id']] = \
np.fromstring(strings[i], dtype = precision) \
.reshape(self.height, self.width)
return return_dict
def _sort_dictionary(key):
if key == 'R' or key == 'r':
return 10
elif key == 'G' or key == 'g':
return 20
elif key == 'B' or key == 'b':
return 30
elif key == 'A' or key == 'a':
return 40
elif key == 'X' or key == 'x':
return 110
elif key == 'Y' or key == 'y':
return 120
elif key == 'Z' or key == 'z':
return 130
else:
return key
def _channel_sort_key(i):
return [_sort_dictionary(x) for x in i.split(".")]
_default_channel_names = {
1: ['Z'],
2: ['X','Y'],
3: ['R','G','B'],
4: ['R','G','B','A']
}
def _is_list(x):
return isinstance(x, (list, tuple, np.ndarray))
| [
"Imath.Channel",
"OpenEXR.isOpenExrFile",
"OpenEXR.InputFile",
"numpy.expand_dims",
"numpy.clip",
"OpenEXR.Header",
"collections.defaultdict",
"numpy.fromstring",
"OpenEXR.OutputFile",
"Imath.Compression",
"sys.exit",
"Imath.PixelType"
] | [((301, 339), 'Imath.PixelType', 'Imath.PixelType', (['Imath.PixelType.FLOAT'], {}), '(Imath.PixelType.FLOAT)\n', (316, 339), False, 'import OpenEXR, Imath\n'), ((348, 385), 'Imath.PixelType', 'Imath.PixelType', (['Imath.PixelType.HALF'], {}), '(Imath.PixelType.HALF)\n', (363, 385), False, 'import OpenEXR, Imath\n'), ((394, 431), 'Imath.PixelType', 'Imath.PixelType', (['Imath.PixelType.UINT'], {}), '(Imath.PixelType.UINT)\n', (409, 431), False, 'import OpenEXR, Imath\n'), ((453, 504), 'Imath.Compression', 'Imath.Compression', (['Imath.Compression.NO_COMPRESSION'], {}), '(Imath.Compression.NO_COMPRESSION)\n', (470, 504), False, 'import OpenEXR, Imath\n'), ((525, 577), 'Imath.Compression', 'Imath.Compression', (['Imath.Compression.RLE_COMPRESSION'], {}), '(Imath.Compression.RLE_COMPRESSION)\n', (542, 577), False, 'import OpenEXR, Imath\n'), ((598, 651), 'Imath.Compression', 'Imath.Compression', (['Imath.Compression.ZIPS_COMPRESSION'], {}), '(Imath.Compression.ZIPS_COMPRESSION)\n', (615, 651), False, 'import OpenEXR, Imath\n'), ((672, 724), 'Imath.Compression', 'Imath.Compression', (['Imath.Compression.ZIP_COMPRESSION'], {}), '(Imath.Compression.ZIP_COMPRESSION)\n', (689, 724), False, 'import OpenEXR, Imath\n'), ((745, 797), 'Imath.Compression', 'Imath.Compression', (['Imath.Compression.PIZ_COMPRESSION'], {}), '(Imath.Compression.PIZ_COMPRESSION)\n', (762, 797), False, 'import OpenEXR, Imath\n'), ((818, 872), 'Imath.Compression', 'Imath.Compression', (['Imath.Compression.PXR24_COMPRESSION'], {}), '(Imath.Compression.PXR24_COMPRESSION)\n', (835, 872), False, 'import OpenEXR, Imath\n'), ((4935, 4973), 'numpy.clip', 'np.clip', (['(matrix ** (1.0 / gamma))', '(0)', '(1)'], {}), '(matrix ** (1.0 / gamma), 0, 1)\n', (4942, 4973), True, 'import numpy as np\n'), ((1027, 1058), 'OpenEXR.isOpenExrFile', 'OpenEXR.isOpenExrFile', (['filename'], {}), '(filename)\n', (1048, 1058), False, 'import OpenEXR, Imath\n'), ((1169, 1196), 'OpenEXR.InputFile', 'OpenEXR.InputFile', (['filename'], {}), '(filename)\n', (1186, 1196), False, 'import OpenEXR, Imath\n'), ((4036, 4065), 'OpenEXR.Header', 'OpenEXR.Header', (['width', 'height'], {}), '(width, height)\n', (4050, 4065), False, 'import OpenEXR, Imath\n'), ((4150, 4186), 'OpenEXR.OutputFile', 'OpenEXR.OutputFile', (['filename', 'header'], {}), '(filename, header)\n', (4168, 4186), False, 'import OpenEXR, Imath\n'), ((5675, 5692), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5686, 5692), False, 'from collections import defaultdict\n'), ((4454, 4483), 'OpenEXR.Header', 'OpenEXR.Header', (['width', 'height'], {}), '(width, height)\n', (4468, 4483), False, 'import OpenEXR, Imath\n'), ((4612, 4648), 'OpenEXR.OutputFile', 'OpenEXR.OutputFile', (['filename', 'header'], {}), '(filename, header)\n', (4630, 4648), False, 'import OpenEXR, Imath\n'), ((6808, 6818), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6816, 6818), False, 'import os, sys\n'), ((8262, 8272), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8270, 8272), False, 'import os, sys\n'), ((1941, 1967), 'numpy.expand_dims', 'np.expand_dims', (['matrix', '(-1)'], {}), '(matrix, -1)\n', (1955, 1967), True, 'import numpy as np\n'), ((3871, 3903), 'Imath.Channel', 'Imath.Channel', (['precisions[group]'], {}), '(precisions[group])\n', (3884, 3903), False, 'import OpenEXR, Imath\n'), ((4553, 4577), 'Imath.Channel', 'Imath.Channel', (['precision'], {}), '(precision)\n', (4566, 4577), False, 'import OpenEXR, Imath\n'), ((7761, 7771), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7769, 7771), False, 'import os, sys\n'), ((7106, 7144), 'numpy.fromstring', 'np.fromstring', (['string'], {'dtype': 'precision'}), '(string, dtype=precision)\n', (7119, 7144), True, 'import numpy as np\n'), ((8523, 8565), 'numpy.fromstring', 'np.fromstring', (['strings[i]'], {'dtype': 'precision'}), '(strings[i], dtype=precision)\n', (8536, 8565), True, 'import numpy as np\n')] |
import numpy as np
from protosc.model.utils import compute_accuracy
from protosc.model.base import BaseFoldModel
from protosc.model.filter import compute_filter_fold
class RandomModel(BaseFoldModel):
def _execute_fold(self, fold):
filter_data = compute_filter_fold(fold)
return self.execute_with_clusters(**filter_data)
@staticmethod
def execute_with_clusters(cur_fold, clusters, selected_features):
# Random
np.random.shuffle(clusters)
random_selection = []
for cluster in clusters:
if len(random_selection) >= len(selected_features):
break
random_selection.extend(cluster)
accuracy = compute_accuracy(cur_fold, random_selection)
return {'features': random_selection, 'accuracy': accuracy}
| [
"protosc.model.utils.compute_accuracy",
"numpy.random.shuffle",
"protosc.model.filter.compute_filter_fold"
] | [((260, 285), 'protosc.model.filter.compute_filter_fold', 'compute_filter_fold', (['fold'], {}), '(fold)\n', (279, 285), False, 'from protosc.model.filter import compute_filter_fold\n'), ((457, 484), 'numpy.random.shuffle', 'np.random.shuffle', (['clusters'], {}), '(clusters)\n', (474, 484), True, 'import numpy as np\n'), ((699, 743), 'protosc.model.utils.compute_accuracy', 'compute_accuracy', (['cur_fold', 'random_selection'], {}), '(cur_fold, random_selection)\n', (715, 743), False, 'from protosc.model.utils import compute_accuracy\n')] |
from collections import defaultdict
from .utils import fix_dataclass_init_docs
from .sim import SimGrid
from .typing import Optional, Callable, Union, List, Tuple, Dict
import jax
import jax.numpy as jnp
from jax.config import config
from jax.experimental.optimizers import adam
import numpy as np
import dataclasses
import xarray
try:
HOLOVIEWS_IMPORTED = True
import holoviews as hv
from holoviews.streams import Pipe
import panel as pn
except ImportError:
HOLOVIEWS_IMPORTED = False
from .viz import scalar_metrics_viz
config.parse_flags_with_absl()
@fix_dataclass_init_docs
@dataclasses.dataclass
class OptProblem:
"""An optimization problem
An optimization problem consists of a neural network defined at least by input parameters :code:`rho`,
the transform function :code:`T(rho)` (:math:`T(\\rho(x, y))`) (default identity),
and objective function :code:`C(T(rho))` (:math:`C(T(\\rho(x, y)))`), which maps to a scalar.
For use with an inverse design problem (the primary use case in this module), the user can include an
FDFD simulation and a source (to be fed into the FDFD solver). The FDFD simulation and source are then
used to define a function :code:`S(eps) == S(T(rho))` that solves the FDFD problem
where `eps == T(rho)` (:math:`\\epsilon(x, y)) := T(\\rho(x, y))`),
in which case the objective function evaluates :code:`C(S(T(rho)))`
(:math:`\\epsilon(x, y)) := C(S(T((\\rho(x, y))))`).
Args:
transform_fn: The JAX-transformable transform function to yield epsilon (identity if None,
must be a single :code:`transform_fn` (to be broadcast to all)
or a list to match the FDFD objects respectively). Examples of transform_fn
could be smoothing functions, symmetry functions, and more (which can be compounded appropriately).
cost_fn: The JAX-transformable cost function (or tuple of such functions)
corresponding to src that takes in output of solve_fn from :code:`opt_solver`.
sim: SimGrid(s) used to generate the solver (FDFD is not run is :code:`fdfd` is :code:`None`)
source: A numpy array source (FDFD is not run is :code:`source` is :code:`None`)
metrics_fn: A metric_fn that returns useful dictionary data based on fields and FDFD object
at certain time intervals (specified in opt). Each problem is supplied this metric_fn
(Optional, ignored if :code:`None`).
"""
transform_fn: Callable
cost_fn: Callable
sim: SimGrid
source: str
metrics_fn: Optional[Callable[[np.ndarray, SimGrid], Dict]] = None
def __post_init__(self):
self.fn = self.sim.get_sim_sparams_fn(self.source, self.transform_fn)\
if self.source is not None else self.transform_fn
@fix_dataclass_init_docs
@dataclasses.dataclass
class OptViz:
"""An optimization visualization object
An optimization visualization object consists of a plot for monitoring the
history and current state of an optimization in real time.
Args:
cost_dmap: Cost dynamic map for streaming cost fn over time
simulations_panel: Simulations panel for visualizing simulation results from last iteration
costs_pipe: Costs pipe for streaming cost fn over time
simulations_pipes: Simulations pipes of form :code:`eps, field, power`
for visualizing simulation results from last iteration
metrics_panels: Metrics panels for streaming metrics over time for each simulation (e.g. powers/power ratios)
metrics_pipes: Metrics pipes for streaming metrics over time for each simulation
metric_config: Metric config (a dictionary that describes how to plot/group the real-time metrics)
"""
cost_dmap: "hv.DynamicMap"
simulations_panels: Dict[str, "pn.layout.Panel"]
costs_pipe: "Pipe"
simulations_pipes: Dict[str, Tuple["Pipe", "Pipe", "Pipe"]]
metric_config: Optional[Dict[str, List[str]]] = None
metrics_panels: Optional[Dict[str, "hv.DynamicMap"]] = None
metrics_pipes: Optional[Dict[str, Dict[str, "Pipe"]]] = None
@fix_dataclass_init_docs
@dataclasses.dataclass
class OptRecord:
"""An optimization record
We need an object to hold the history, which includes a list of costs (we avoid the term loss
as it may be related to denoted
Attributes:
costs: List of costs
params: Params (:math:`\rho`) transformed into the design
metrics: An xarray for metrics with dimensions :code:`name`, :code:`metric`, :code:`iteration`
eps: An xarray for relative permittivity with dimensions :code:`name`, :code:`x`, :code:`y`
fields: An xarray for a selected field component with dimensions :code:`name`, :code:`x`, :code:`y`
"""
costs: np.ndarray
params: jnp.ndarray
metrics: xarray.DataArray
eps: xarray.DataArray
fields: xarray.DataArray
def opt_run(opt_problem: Union[OptProblem, List[OptProblem]], init_params: np.ndarray, num_iters: int,
pbar: Optional[Callable] = None, step_size: float = 1, viz_interval: int = 0, metric_interval: int = 0,
viz: Optional[OptViz] = None, backend: str = 'cpu',
eps_interval: int = 0, field_interval: int = 0) -> OptRecord:
"""Run the optimization.
The optimization can be done over multiple simulations as long as those simulations
share the same set of params provided by :code:`init_params`.
Args:
opt_problem: An :code:`OptProblem` or list of :code:`OptProblem`'s. If a list is provided,
the optimization optimizes the sum of all objective functions.
If the user wants to weight the objective functions, weights must be inlcuded in the objective function
definition itself, but we may provide support for this feature at a later time if needed.
init_params: Initial parameters for the optimizer (:code:`eps` if :code:`None`)
num_iters: Number of iterations to run
pbar: Progress bar to keep track of optimization progress with ideally a simple tqdm interface
step_size: For the Adam update, specify the step size needed.
viz_interval: The optimization intermediate results are recorded every :code:`record_interval` steps
(default of 0 means do not visualize anything)
metric_interval: The interval over which a recorded object (e.g. metric, param)
are recorded in a given :code:`OptProblem` (default of 0 means do not record anything).
viz: The :code:`OptViz` object required for visualizing the optimization in real time.
backend: Recommended backend for :code:`ndim == 2` is :code:`'cpu'` and :code:`ndim == 3` is :code:`'gpu'`
eps_interval: Whether to record the eps at the specified :code:`eps_interval`.
Beware, this can use up a lot of memory during the opt so use judiciously.
field_interval: Whether to record the field at the specified :code:`field_interval`.
Beware, this can use up a lot of memory during the opt so use judiciously.
Returns:
A tuple of the final eps distribution (:code:`transform_fn(p)`) and parameters :code:`p`
"""
opt_init, opt_update, get_params = adam(step_size=step_size)
opt_state = opt_init(init_params)
# define opt_problems
opt_problems = [opt_problem] if isinstance(opt_problem, OptProblem) else opt_problem
n_problems = len(opt_problems)
# opt problems that include both an FDFD sim and a source sim
sim_opt_problems = [op for op in opt_problems if op.sim is not None and op.source is not None]
if viz is not None:
if not len(viz.simulations_pipes) == len(sim_opt_problems):
raise ValueError("Number of viz_pipes must match number of opt problems")
# Define the simulation and objective function acting on parameters rho
solve_fn = [None if (op.source is None or op.sim is None) else op.fn for op in opt_problems]
def overall_cost_fn(rho: jnp.ndarray):
evals = [op.cost_fn(s(rho)) if s is not None else op.cost_fn(rho) for op, s in zip(opt_problems, solve_fn)]
return jnp.array([obj for obj, _ in evals]).sum() / n_problems, [aux for _, aux in evals]
# Define a compiled update step
def step_(current_step, state):
vaux, g = jax.value_and_grad(overall_cost_fn, has_aux=True)(get_params(state))
v, aux = vaux
return v, opt_update(current_step, g, state), aux
def _update_eps(state):
rho = get_params(state)
for op in opt_problems:
op.sim.eps = np.asarray(jax.lax.stop_gradient(op.transform_fn(rho)))
step = jax.jit(step_, backend=backend)
iterator = pbar(range(num_iters)) if pbar is not None else range(num_iters)
costs = []
history = defaultdict(list)
for i in iterator:
v, opt_state, data = step(i, opt_state)
_update_eps(opt_state)
for sop, sparams_fields in zip(sim_opt_problems, data):
sim = sop.sim
sparams, e, h = sim.decorate(*sparams_fields)
hz = np.asarray(h[2]).squeeze().T
if viz_interval > 0 and i % viz_interval == 0 and viz is not None:
eps_pipe, field_pipe, power_pipe = viz.simulations_pipes[sim.name]
eps_pipe.send((sim.eps.T - np.min(sim.eps)) / (np.max(sim.eps) - np.min(sim.eps)))
field_pipe.send(hz.real / np.max(hz.real))
power = np.abs(hz) ** 2
power_pipe.send(power / np.max(power))
if metric_interval > 0 and i % metric_interval == 0 and viz is not None:
metrics = sop.metrics_fn(sparams)
for metric_name, metric_value in metrics.items():
history[f'{metric_name}/{sop.sim.name}'].append(metric_value)
for title in viz.metrics_pipes[sop.sim.name]:
viz.metrics_pipes[sop.sim.name][title].send(
xarray.DataArray(
data=np.asarray([history[f'{metric_name}/{sop.sim.name}']
for metric_name in viz.metric_config[title]]),
coords={
'metric': viz.metric_config[title],
'iteration': np.arange(i + 1)
},
dims=['metric', 'iteration'],
name=title
)
)
if eps_interval > 0 and i % eps_interval == 0:
history[f'eps/{sop.sim.name}'].append((i, sop.sim.eps))
if field_interval > 0 and i % field_interval == 0:
history[f'field/{sop.sim.name}'].append((i, hz.T))
iterator.set_description(f"𝓛: {v:.5f}")
costs.append(jax.lax.stop_gradient(v))
if viz is not None:
viz.costs_pipe.send(np.asarray(costs))
_update_eps(opt_state)
all_metric_names = sum([metric_names for _, metric_names in viz.metric_config.items()], [])
metrics = xarray.DataArray(
data=np.array([[history[f'{metric_name}/{sop.sim.name}']
for metric_name in all_metric_names] for sop in sim_opt_problems]),
coords={
'name': [sop.sim.name for sop in sim_opt_problems],
'metric': all_metric_names,
'iteration': np.arange(num_iters)
},
dims=['name', 'metric', 'iteration'],
name='metrics'
) if sim_opt_problems and metric_interval != 0 else []
eps = xarray.DataArray(
data=np.array([[eps for _, eps in history[f'eps/{sop.sim.name}']] if eps_interval > 0 else []
for sop in sim_opt_problems]),
coords={
'name': [sop.sim.name for sop in sim_opt_problems],
'iteration': [it for it, _ in history[f'eps/{sim_opt_problems[0].sim.name}']],
'x': np.arange(sim_opt_problems[0].sim.shape[0]),
'y': np.arange(sim_opt_problems[0].sim.shape[1]),
},
dims=['name', 'iteration', 'x', 'y'],
name='eps'
) if sim_opt_problems and eps_interval != 0 else []
fields = xarray.DataArray(
data=np.asarray([[field for _, field in history[f'field/{sop.sim.name}']] if field_interval > 0 else []
for sop in sim_opt_problems]),
coords={
'name': [sop.sim.name for sop in sim_opt_problems],
'iteration': [it for it, _ in history[f'field/{sim_opt_problems[0].sim.name}']],
'x': np.arange(sim_opt_problems[0].sim.shape[0]),
'y': np.arange(sim_opt_problems[0].sim.shape[1]),
},
dims=['name', 'iteration', 'x', 'y'],
name='fields'
) if sim_opt_problems and field_interval != 0 else []
return OptRecord(costs=np.asarray(costs), params=get_params(opt_state), metrics=metrics, eps=eps, fields=fields)
def opt_viz(opt_problem: Union[OptProblem, List[OptProblem]], metric_config: Dict[str, List[str]]) -> OptViz:
"""Optimization visualization panel
Args:
opt_problem: An :code:`OptProblem` or list of :code:`OptProblem`'s.
metric_config: A dictionary of titles mapped to lists of metrics to plot in the graph (for overlay)
Returns:
A tuple of visualization panel, loss curve pipe, and visualization pipes
"""
opt_problems = [opt_problem] if isinstance(opt_problem, OptProblem) else opt_problem
viz_panel_pipes = {op.sim.name: op.sim.viz_panel()
for op in opt_problems if op.sim is not None and op.source is not None}
costs_pipe = Pipe(data=[])
metrics_panel_pipes = {op.sim.name: scalar_metrics_viz(metric_config=metric_config)
for op in opt_problems if op.sim is not None and op.source is not None}
return OptViz(
cost_dmap=hv.DynamicMap(hv.Curve, streams=[costs_pipe]).opts(title='Cost Fn (𝓛)'),
simulations_panels={name: v[0] for name, v in viz_panel_pipes.items()},
costs_pipe=costs_pipe,
simulations_pipes={name: v[1] for name, v in viz_panel_pipes.items()},
metrics_panels={name: m[0] for name, m in metrics_panel_pipes.items()},
metrics_pipes={name: m[1] for name, m in metrics_panel_pipes.items()},
metric_config=metric_config
)
| [
"jax.numpy.array",
"holoviews.DynamicMap",
"numpy.abs",
"jax.jit",
"jax.lax.stop_gradient",
"numpy.asarray",
"collections.defaultdict",
"numpy.max",
"numpy.min",
"numpy.array",
"jax.value_and_grad",
"numpy.arange",
"jax.experimental.optimizers.adam",
"holoviews.streams.Pipe",
"jax.config... | [((547, 577), 'jax.config.config.parse_flags_with_absl', 'config.parse_flags_with_absl', ([], {}), '()\n', (575, 577), False, 'from jax.config import config\n'), ((7264, 7289), 'jax.experimental.optimizers.adam', 'adam', ([], {'step_size': 'step_size'}), '(step_size=step_size)\n', (7268, 7289), False, 'from jax.experimental.optimizers import adam\n'), ((8682, 8713), 'jax.jit', 'jax.jit', (['step_'], {'backend': 'backend'}), '(step_, backend=backend)\n', (8689, 8713), False, 'import jax\n'), ((8825, 8842), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8836, 8842), False, 'from collections import defaultdict\n'), ((13656, 13669), 'holoviews.streams.Pipe', 'Pipe', ([], {'data': '[]'}), '(data=[])\n', (13660, 13669), False, 'from holoviews.streams import Pipe\n'), ((8347, 8396), 'jax.value_and_grad', 'jax.value_and_grad', (['overall_cost_fn'], {'has_aux': '(True)'}), '(overall_cost_fn, has_aux=True)\n', (8365, 8396), False, 'import jax\n'), ((10858, 10882), 'jax.lax.stop_gradient', 'jax.lax.stop_gradient', (['v'], {}), '(v)\n', (10879, 10882), False, 'import jax\n'), ((12859, 12876), 'numpy.asarray', 'np.asarray', (['costs'], {}), '(costs)\n', (12869, 12876), True, 'import numpy as np\n'), ((10944, 10961), 'numpy.asarray', 'np.asarray', (['costs'], {}), '(costs)\n', (10954, 10961), True, 'import numpy as np\n'), ((11132, 11254), 'numpy.array', 'np.array', (["[[history[f'{metric_name}/{sop.sim.name}'] for metric_name in\n all_metric_names] for sop in sim_opt_problems]"], {}), "([[history[f'{metric_name}/{sop.sim.name}'] for metric_name in\n all_metric_names] for sop in sim_opt_problems])\n", (11140, 11254), True, 'import numpy as np\n'), ((11625, 11750), 'numpy.array', 'np.array', (["[([eps for _, eps in history[f'eps/{sop.sim.name}']] if eps_interval > 0 else\n []) for sop in sim_opt_problems]"], {}), "([([eps for _, eps in history[f'eps/{sop.sim.name}']] if \n eps_interval > 0 else []) for sop in sim_opt_problems])\n", (11633, 11750), True, 'import numpy as np\n'), ((12242, 12377), 'numpy.asarray', 'np.asarray', (["[([field for _, field in history[f'field/{sop.sim.name}']] if \n field_interval > 0 else []) for sop in sim_opt_problems]"], {}), "([([field for _, field in history[f'field/{sop.sim.name}']] if \n field_interval > 0 else []) for sop in sim_opt_problems])\n", (12252, 12377), True, 'import numpy as np\n'), ((9484, 9494), 'numpy.abs', 'np.abs', (['hz'], {}), '(hz)\n', (9490, 9494), True, 'import numpy as np\n'), ((11424, 11444), 'numpy.arange', 'np.arange', (['num_iters'], {}), '(num_iters)\n', (11433, 11444), True, 'import numpy as np\n'), ((11959, 12002), 'numpy.arange', 'np.arange', (['sim_opt_problems[0].sim.shape[0]'], {}), '(sim_opt_problems[0].sim.shape[0])\n', (11968, 12002), True, 'import numpy as np\n'), ((12021, 12064), 'numpy.arange', 'np.arange', (['sim_opt_problems[0].sim.shape[1]'], {}), '(sim_opt_problems[0].sim.shape[1])\n', (12030, 12064), True, 'import numpy as np\n'), ((12588, 12631), 'numpy.arange', 'np.arange', (['sim_opt_problems[0].sim.shape[0]'], {}), '(sim_opt_problems[0].sim.shape[0])\n', (12597, 12631), True, 'import numpy as np\n'), ((12650, 12693), 'numpy.arange', 'np.arange', (['sim_opt_problems[0].sim.shape[1]'], {}), '(sim_opt_problems[0].sim.shape[1])\n', (12659, 12693), True, 'import numpy as np\n'), ((13896, 13941), 'holoviews.DynamicMap', 'hv.DynamicMap', (['hv.Curve'], {'streams': '[costs_pipe]'}), '(hv.Curve, streams=[costs_pipe])\n', (13909, 13941), True, 'import holoviews as hv\n'), ((8173, 8209), 'jax.numpy.array', 'jnp.array', (['[obj for obj, _ in evals]'], {}), '([obj for obj, _ in evals])\n', (8182, 8209), True, 'import jax.numpy as jnp\n'), ((9111, 9127), 'numpy.asarray', 'np.asarray', (['h[2]'], {}), '(h[2])\n', (9121, 9127), True, 'import numpy as np\n'), ((9443, 9458), 'numpy.max', 'np.max', (['hz.real'], {}), '(hz.real)\n', (9449, 9458), True, 'import numpy as np\n'), ((9540, 9553), 'numpy.max', 'np.max', (['power'], {}), '(power)\n', (9546, 9553), True, 'import numpy as np\n'), ((9345, 9360), 'numpy.min', 'np.min', (['sim.eps'], {}), '(sim.eps)\n', (9351, 9360), True, 'import numpy as np\n'), ((9365, 9380), 'numpy.max', 'np.max', (['sim.eps'], {}), '(sim.eps)\n', (9371, 9380), True, 'import numpy as np\n'), ((9383, 9398), 'numpy.min', 'np.min', (['sim.eps'], {}), '(sim.eps)\n', (9389, 9398), True, 'import numpy as np\n'), ((10040, 10143), 'numpy.asarray', 'np.asarray', (["[history[f'{metric_name}/{sop.sim.name}'] for metric_name in viz.\n metric_config[title]]"], {}), "([history[f'{metric_name}/{sop.sim.name}'] for metric_name in viz\n .metric_config[title]])\n", (10050, 10143), True, 'import numpy as np\n'), ((10335, 10351), 'numpy.arange', 'np.arange', (['(i + 1)'], {}), '(i + 1)\n', (10344, 10351), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import os
import sys
from keras.models import load_model
from data_loaders.SpectrogramGenerator import SpectrogramGenerator
class_labels = ["EN", "DE", "FR", "ES", "CN", "RU"]
def predict(cli_args):
config = {"pixel_per_second": 50, "input_shape": [129, 500, 1], "num_classes": 4}
data_generator = SpectrogramGenerator(cli_args.input_file, config, shuffle=False, run_only_once=True).get_generator()
data = [np.divide(image, 255.0) for image in data_generator]
data = np.stack(data)
# Model Generation
model = load_model(cli_args.model_dir)
probabilities = model.predict(data)
classes = np.argmax(probabilities, axis=1)
average_prob = np.mean(probabilities, axis=0)
average_class = np.argmax(average_prob)
print(classes, class_labels[average_class], average_prob)
return probabilities
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model', dest='model_dir', required=True)
parser.add_argument('--input', dest='input_file', required=True)
cli_args = parser.parse_args()
if not os.path.isfile(cli_args.input_file):
sys.exit("Input is not a file.")
predict(cli_args)
| [
"numpy.stack",
"keras.models.load_model",
"numpy.divide",
"argparse.ArgumentParser",
"numpy.argmax",
"os.path.isfile",
"numpy.mean",
"data_loaders.SpectrogramGenerator.SpectrogramGenerator",
"sys.exit"
] | [((522, 536), 'numpy.stack', 'np.stack', (['data'], {}), '(data)\n', (530, 536), True, 'import numpy as np\n'), ((573, 603), 'keras.models.load_model', 'load_model', (['cli_args.model_dir'], {}), '(cli_args.model_dir)\n', (583, 603), False, 'from keras.models import load_model\n'), ((660, 692), 'numpy.argmax', 'np.argmax', (['probabilities'], {'axis': '(1)'}), '(probabilities, axis=1)\n', (669, 692), True, 'import numpy as np\n'), ((712, 742), 'numpy.mean', 'np.mean', (['probabilities'], {'axis': '(0)'}), '(probabilities, axis=0)\n', (719, 742), True, 'import numpy as np\n'), ((763, 786), 'numpy.argmax', 'np.argmax', (['average_prob'], {}), '(average_prob)\n', (772, 786), True, 'import numpy as np\n'), ((917, 942), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (940, 942), False, 'import argparse\n'), ((458, 481), 'numpy.divide', 'np.divide', (['image', '(255.0)'], {}), '(image, 255.0)\n', (467, 481), True, 'import numpy as np\n'), ((1127, 1162), 'os.path.isfile', 'os.path.isfile', (['cli_args.input_file'], {}), '(cli_args.input_file)\n', (1141, 1162), False, 'import os\n'), ((1172, 1204), 'sys.exit', 'sys.exit', (['"""Input is not a file."""'], {}), "('Input is not a file.')\n", (1180, 1204), False, 'import sys\n'), ((345, 433), 'data_loaders.SpectrogramGenerator.SpectrogramGenerator', 'SpectrogramGenerator', (['cli_args.input_file', 'config'], {'shuffle': '(False)', 'run_only_once': '(True)'}), '(cli_args.input_file, config, shuffle=False,\n run_only_once=True)\n', (365, 433), False, 'from data_loaders.SpectrogramGenerator import SpectrogramGenerator\n')] |
#!/usr/bin/python
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULtAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import numpy as np
import cv2
import os
import sys
import datetime
import config
def main():
# Display splash screen
print(config.splash_screen)
# Set up the video capture
video_capture = cv2.VideoCapture(0)
if not video_capture.isOpened():
print("ERROR: Could not open video capture device")
sys.exit(1)
# Set up the cascade classifier
if not os.path.isfile(config.CLASSIFIER_PATH):
print("ERROR: Cannot open classifier file")
sys.exit(2)
classifier = cv2.CascadeClassifier(config.CLASSIFIER_PATH)
# Create a resizable window
cv2.namedWindow(config.MAIN_WINDOW, cv2.cv.CV_WINDOW_NORMAL)
# Capture loop
while video_capture.isOpened():
success, image = video_capture.read()
if not success:
print("ERROR: Could not read from video capture device")
break
# Rescale to IMAGE_WIDTH
aspect_ratio = image.shape[0] / float(image.shape[1])
image_size = (config.IMAGE_WIDTH, int(aspect_ratio * config.IMAGE_WIDTH))
image = cv2.resize(image, image_size)
# Detect
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
objects = classifier.detectMultiScale(gray, **config.DETECT_ARGS)
time_string = str(datetime.datetime.now())
# Extract zoomed images
zoom_images = []
for x, y, w, h in objects[:config.NUM_ZOOMS]:
zoom_image = image[y : y + h, x : x + w, :].copy()
zoom_image = cv2.resize(zoom_image, (config.ZOOM_SIZE, config.ZOOM_SIZE))
zoom_images.append(zoom_image)
# Draw markers
num_objects = len(objects)
for num, box in enumerate(objects, start=1):
x, y, w, h = box
# Draw circle
center = (int(x + w/2), int(y + h/2))
scale = config.MARKER_SCALE
radius = int(scale * min(w, h))
cv2.circle(image, center, radius, config.MARKER_COLOR, config.MARKER_THICK)
# Write text
text_pos = (int(center[0] + scale * w), int(center[1] + scale * h))
text_msg = "{}".format(num)
cv2.putText(image, text_msg, text_pos, cv2.FONT_HERSHEY_SIMPLEX, 1, config.MARKER_COLOR, 3)
# Status message
format_args = (time_string, num, num_objects, center[0], center[1])
print("{}: Detected object {}/{} at (x, y) = ({}, {})".format(*format_args))
if num_objects > 0:
print()
# Display zoom bar
if len(zoom_images) > 0:
zoom_bar = np.hstack(zoom_images)
zoom_h, zoom_w = zoom_bar.shape[:2]
image[:zoom_h, -zoom_w:] = zoom_bar
# Display the resulting image
cv2.imshow(config.MAIN_WINDOW, image)
# Waiting for escape key
if cv2.waitKey(1) == config.ESC_KEY:
break
# Clean up
video_capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| [
"cv2.resize",
"cv2.circle",
"cv2.putText",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"numpy.hstack",
"cv2.VideoCapture",
"cv2.namedWindow",
"os.path.isfile",
"cv2.CascadeClassifier",
"cv2.destroyAllWindows",
"datetime.datetime.now",
"sys.exit"
] | [((1378, 1397), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1394, 1397), False, 'import cv2\n'), ((1693, 1738), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['config.CLASSIFIER_PATH'], {}), '(config.CLASSIFIER_PATH)\n', (1714, 1738), False, 'import cv2\n'), ((1780, 1840), 'cv2.namedWindow', 'cv2.namedWindow', (['config.MAIN_WINDOW', 'cv2.cv.CV_WINDOW_NORMAL'], {}), '(config.MAIN_WINDOW, cv2.cv.CV_WINDOW_NORMAL)\n', (1795, 1840), False, 'import cv2\n'), ((4123, 4146), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4144, 4146), False, 'import cv2\n'), ((1503, 1514), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1511, 1514), False, 'import sys\n'), ((1563, 1601), 'os.path.isfile', 'os.path.isfile', (['config.CLASSIFIER_PATH'], {}), '(config.CLASSIFIER_PATH)\n', (1577, 1601), False, 'import os\n'), ((1663, 1674), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1671, 1674), False, 'import sys\n'), ((2248, 2277), 'cv2.resize', 'cv2.resize', (['image', 'image_size'], {}), '(image, image_size)\n', (2258, 2277), False, 'import cv2\n'), ((2311, 2350), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2323, 2350), False, 'import cv2\n'), ((3940, 3977), 'cv2.imshow', 'cv2.imshow', (['config.MAIN_WINDOW', 'image'], {}), '(config.MAIN_WINDOW, image)\n', (3950, 3977), False, 'import cv2\n'), ((2451, 2474), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2472, 2474), False, 'import datetime\n'), ((2676, 2736), 'cv2.resize', 'cv2.resize', (['zoom_image', '(config.ZOOM_SIZE, config.ZOOM_SIZE)'], {}), '(zoom_image, (config.ZOOM_SIZE, config.ZOOM_SIZE))\n', (2686, 2736), False, 'import cv2\n'), ((3115, 3190), 'cv2.circle', 'cv2.circle', (['image', 'center', 'radius', 'config.MARKER_COLOR', 'config.MARKER_THICK'], {}), '(image, center, radius, config.MARKER_COLOR, config.MARKER_THICK)\n', (3125, 3190), False, 'import cv2\n'), ((3350, 3446), 'cv2.putText', 'cv2.putText', (['image', 'text_msg', 'text_pos', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', 'config.MARKER_COLOR', '(3)'], {}), '(image, text_msg, text_pos, cv2.FONT_HERSHEY_SIMPLEX, 1, config.\n MARKER_COLOR, 3)\n', (3361, 3446), False, 'import cv2\n'), ((3774, 3796), 'numpy.hstack', 'np.hstack', (['zoom_images'], {}), '(zoom_images)\n', (3783, 3796), True, 'import numpy as np\n'), ((4023, 4037), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4034, 4037), False, 'import cv2\n')] |
import cv2
import numpy as np
from lantz.core import Action, Driver, Feat
class USBCam(Driver):
def __init__(self, device_id):
self.device_id = device_id
self._flipud = False
self._fliplr = False
self._rotation = 0
return
def initialize(self):
self.capture = cv2.VideoCapture(self.device_id)
return
def finalize(self):
self.capture.release()
return
@Feat(values={0, 90, 180, 270})
def rotation(self):
return self._rotation
@rotation.setter
def rotation(self, value):
self._rotation = value
return
@Feat(values={True, False})
def flipud(self):
return self._flipud
@flipud.setter
def flipud(self, value):
self._flipud = value
return
@Feat(values={True, False})
def fliplr(self):
return self._fliplr
@fliplr.setter
def fliplr(self, value):
self._fliplr = value
return
@Action()
def get_frame(self):
img = self.capture.read()[1]
array = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if self._flipud:
array = np.flipud(array)
if self._fliplr:
array = np.fliplr(array)
array = np.rot90(array, k=int(self._rotation / 90))
return array
| [
"lantz.core.Feat",
"cv2.cvtColor",
"numpy.flipud",
"cv2.VideoCapture",
"numpy.fliplr",
"lantz.core.Action"
] | [((444, 474), 'lantz.core.Feat', 'Feat', ([], {'values': '{0, 90, 180, 270}'}), '(values={0, 90, 180, 270})\n', (448, 474), False, 'from lantz.core import Action, Driver, Feat\n'), ((634, 660), 'lantz.core.Feat', 'Feat', ([], {'values': '{True, False}'}), '(values={True, False})\n', (638, 660), False, 'from lantz.core import Action, Driver, Feat\n'), ((810, 836), 'lantz.core.Feat', 'Feat', ([], {'values': '{True, False}'}), '(values={True, False})\n', (814, 836), False, 'from lantz.core import Action, Driver, Feat\n'), ((986, 994), 'lantz.core.Action', 'Action', ([], {}), '()\n', (992, 994), False, 'from lantz.core import Action, Driver, Feat\n'), ((319, 351), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.device_id'], {}), '(self.device_id)\n', (335, 351), False, 'import cv2\n'), ((1073, 1110), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1085, 1110), False, 'import cv2\n'), ((1156, 1172), 'numpy.flipud', 'np.flipud', (['array'], {}), '(array)\n', (1165, 1172), True, 'import numpy as np\n'), ((1218, 1234), 'numpy.fliplr', 'np.fliplr', (['array'], {}), '(array)\n', (1227, 1234), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: <NAME>(<EMAIL>)
# Segmentation running score.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class RunningScore(object):
def __init__(self, configer):
self.configer = configer
self.n_classes = self.configer.get('data', 'num_classes')
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
def _fast_hist(self, label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) +
label_pred[mask], minlength=n_class**2).reshape(n_class, n_class)
return hist
def update(self, label_preds, label_trues):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
def _get_scores(self):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
return acc, acc_cls, fwavacc, mean_iu, cls_iu
def get_mean_iou(self):
return self._get_scores()[3]
def get_pixel_acc(self):
return self._get_scores()[0]
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
| [
"numpy.diag",
"numpy.zeros",
"numpy.nanmean"
] | [((428, 470), 'numpy.zeros', 'np.zeros', (['(self.n_classes, self.n_classes)'], {}), '((self.n_classes, self.n_classes))\n', (436, 470), True, 'import numpy as np\n'), ((1317, 1336), 'numpy.nanmean', 'np.nanmean', (['acc_cls'], {}), '(acc_cls)\n', (1327, 1336), True, 'import numpy as np\n'), ((1438, 1452), 'numpy.nanmean', 'np.nanmean', (['iu'], {}), '(iu)\n', (1448, 1452), True, 'import numpy as np\n'), ((1850, 1892), 'numpy.zeros', 'np.zeros', (['(self.n_classes, self.n_classes)'], {}), '((self.n_classes, self.n_classes))\n', (1858, 1892), True, 'import numpy as np\n'), ((1266, 1279), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (1273, 1279), True, 'import numpy as np\n'), ((1350, 1363), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (1357, 1363), True, 'import numpy as np\n'), ((1405, 1418), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (1412, 1418), True, 'import numpy as np\n'), ((1215, 1228), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (1222, 1228), True, 'import numpy as np\n')] |
import os
import io
import shutil
import math
import random
import numpy as np
import requests
import urllib.request
from PIL import Image, ImageDraw
from bs4 import BeautifulSoup
import imghdr
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from keras.models import Model
from keras.preprocessing.image import load_img
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import VGG16
from yellowbrick.cluster import KElbowVisualizer
import matplotlib.pyplot as plt
ALLOWED_EXTENSIONS = set(['.png', '.jpg', '.jpeg'])
def initiat_exchange(folder):
if not os.path.isdir(folder):
os.mkdir(folder)
def del_prev_session(folder):
if os.path.isdir(folder):
shutil.rmtree(folder)
os.mkdir(folder)
def get_image_from_url(url_img, search_dir):
tempfile = os.path.join(search_dir, "temp.jpeg")
urllib.request.urlretrieve(url_img, tempfile)
extension = imghdr.what(tempfile)
os.remove(tempfile)
urllib.request.urlretrieve(url_img, os.path.join(search_dir, str(len(os.listdir(search_dir)) + 1) + "." + extension))
def get_images_from_web(searchTerm, search_dir):
url_list = "https://images.search.yahoo.com/search/images;?p={}", "https://www.google.com/search?q={}&site=webhp&tbm=isch", "https://www.bing.com/images/search?q={}&scope=images"
done = ()
for url in url_list:
searchUrl = url.format(searchTerm)
d = requests.get(searchUrl).text
soup = BeautifulSoup(d, 'html.parser')
img_tags = soup.find_all('img')
for img in img_tags:
try:
key_list = img.attrs.keys()
url_img = 'na'
if 'src' in key_list and img['src'].startswith("http"):
url_img = img['src']
elif 'src2' in key_list and img['src2'].startswith("http"):
url_img = img['src2']
get_image_from_url(url_img, search_dir)
done.append(url_img)
except:
pass
linkTags = soup.findAll('a')
for linkTag in linkTags:
try:
linkUrl = linkTag['href']
if linkUrl.startswith("http"):
if linkUrl.endswith(".jpg") or linkUrl.endswith(".jpeg") or linkUrl.endswith(".png") and linkUrl not in done:
get_image_from_url(linkUrl, search_dir)
except:
pass
def create_image_from_input(directory):
if os.path.exists(os.path.join(directory, "temp.jpeg")):
os.remove(os.path.join(directory, "temp.jpeg"))
files = [filename.path for filename in os.scandir(directory) if os.path.splitext(filename.name)[1] in ALLOWED_EXTENSIONS]
grid_size = math.ceil(math.sqrt(len(files))) * 100
with Image.new('RGBA', (grid_size, grid_size), color="white") as new_im:
k = 0
for i in range(0, grid_size, 100):
for j in range(0, grid_size, 100):
with Image.open(files[k]) as im:
im.thumbnail((100, 100))
new_im.paste(im, (i, j))
k += 1
if k >= len(files):
break
if k >= len(files):
break
buf = io.BytesIO()
new_im.save(buf, format='PNG')
return buf
def resize_img_static(image, size):
pil_image = Image.open(image)
width, height = pil_image.size
pil_image = pil_image.resize((size, int(height * (size / width))), Image.ANTIALIAS)
return pil_image
def remove_corrupt_images(input, output):
for filename in os.scandir(input):
extension = os.path.splitext(filename.name)[1]
if extension in ALLOWED_EXTENSIONS:
try:
with Image.open(filename.path) as img:
img.save(os.path.join(output, filename.name.replace(extension, '.png')), 'PNG')
except:
print('file ' + filename.name + ' skipped')
def remove_duplicate_images(img_dir):
no_duplicates = {}
for image in os.scandir(img_dir):
pil_image = resize_img_static(image.path, 500)
bytes_pil_image = pil_image.tobytes()
hashed_value = hash(bytes_pil_image)
no_duplicates[hashed_value] = image.path
pil_image.close()
for image in os.scandir(img_dir):
if image.path not in list(no_duplicates.values()):
os.remove(image.path)
def extract_features(file, model):
img = load_img(file, target_size=(224,224))
img = np.array(img)
reshaped_img = img.reshape(1,224,224,3)
imgx = preprocess_input(reshaped_img)
features = model.predict(imgx, use_multiprocessing=True)
return features
def create_image_from_clusters(directory):
dirs = [os.path.join(directory, d) for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))]
cols = len(dirs)
cols = cols * 100
rows = 11 * 100
with Image.new('RGBA', (cols, rows), color="white") as new_im:
for i in range(0, cols, 100):
cluster_path = dirs[int(i/100)]
files = os.listdir(cluster_path)
ImageDraw.Draw(new_im).text((i, 0), os.path.basename(cluster_path), (0, 0, 0))
l = 0
for j in range(100, rows, 100):
if l < len(files):
with Image.open(os.path.join(cluster_path, files[l])) as im:
im.thumbnail((100, 100))
new_im.paste(im, (i, j))
l += 1
buf = io.BytesIO()
new_im.save(buf, format='PNG')
return buf
def clustering(path, amount_of_clusters, meth):
model = VGG16()
model = Model(inputs=model.inputs, outputs=model.layers[-2].output)
os.chdir(path)
with os.scandir(path) as files:
images = [file.name for file in files if file.name.endswith('.png')]
data = {}
for image in images:
feat = extract_features(image, model)
data[image] = feat
filenames = np.array(list(data.keys()))
file_count = len(filenames)
feat = np.array(list(data.values()))
feat = feat.reshape(-1,4096)
if len(feat) > 100 and file_count > 100:
components = 100
else:
components = min(len(feat), file_count)
pca = PCA(n_components=components, random_state=22)
pca.fit(feat)
x = pca.transform(feat)
if amount_of_clusters is None or meth == 'elbow':
if file_count > 50:
rounds = 50
else:
rounds = file_count
model = KMeans()
visualizer = KElbowVisualizer(model, k=(2, rounds), timings=False)
visualizer.fit(x)
if (meth == 'elbow'):
buf = io.BytesIO()
visualizer.show(outpath=buf, format='PNG')
plt.gcf().clear()
return buf
else:
amount_of_clusters = visualizer.elbow_value_
plt.gcf().clear()
kmeans = KMeans(n_clusters=amount_of_clusters, random_state=22)
kmeans.fit(x)
groups = {}
for file, cluster in zip(filenames, kmeans.labels_):
if cluster not in groups.keys():
groups[cluster] = []
groups[cluster].append(file)
os.makedirs(os.path.join(path, 'cluster_' + str(cluster)))
else:
groups[cluster].append(file)
shutil.move(os.path.join(path, file), os.path.join(path, 'cluster_' + str(cluster), file))
return create_image_from_clusters(path)
def get_sample_of_cluster(output, samplesize):
list_clusters = [f.path for f in os.scandir(os.path.join(output, "")) if f.is_dir()]
sample = os.path.join(output, "sample")
os.makedirs(sample)
for cluster in list_clusters:
list_images = [f.path for f in os.scandir(os.path.join(cluster, "")) if f.is_file()]
images_of_cluster = math.floor(len(list_images) * samplesize)
if(images_of_cluster <= 1):
images_of_cluster = 1
selected_sample = random.sample(list_images, images_of_cluster)
for file in selected_sample:
shutil.copyfile(file, os.path.join(sample, os.path.basename(file)))
| [
"os.mkdir",
"os.remove",
"PIL.Image.new",
"random.sample",
"keras.models.Model",
"keras.applications.vgg16.VGG16",
"shutil.rmtree",
"os.path.join",
"os.chdir",
"sklearn.cluster.KMeans",
"keras.preprocessing.image.load_img",
"requests.get",
"PIL.ImageDraw.Draw",
"keras.applications.vgg16.pr... | [((731, 752), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (744, 752), False, 'import os\n'), ((790, 806), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (798, 806), False, 'import os\n'), ((871, 908), 'os.path.join', 'os.path.join', (['search_dir', '"""temp.jpeg"""'], {}), "(search_dir, 'temp.jpeg')\n", (883, 908), False, 'import os\n'), ((977, 998), 'imghdr.what', 'imghdr.what', (['tempfile'], {}), '(tempfile)\n', (988, 998), False, 'import imghdr\n'), ((1004, 1023), 'os.remove', 'os.remove', (['tempfile'], {}), '(tempfile)\n', (1013, 1023), False, 'import os\n'), ((3477, 3494), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (3487, 3494), False, 'from PIL import Image, ImageDraw\n'), ((3708, 3725), 'os.scandir', 'os.scandir', (['input'], {}), '(input)\n', (3718, 3725), False, 'import os\n'), ((4168, 4187), 'os.scandir', 'os.scandir', (['img_dir'], {}), '(img_dir)\n', (4178, 4187), False, 'import os\n'), ((4433, 4452), 'os.scandir', 'os.scandir', (['img_dir'], {}), '(img_dir)\n', (4443, 4452), False, 'import os\n'), ((4598, 4636), 'keras.preprocessing.image.load_img', 'load_img', (['file'], {'target_size': '(224, 224)'}), '(file, target_size=(224, 224))\n', (4606, 4636), False, 'from keras.preprocessing.image import load_img\n'), ((4647, 4660), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4655, 4660), True, 'import numpy as np\n'), ((4718, 4748), 'keras.applications.vgg16.preprocess_input', 'preprocess_input', (['reshaped_img'], {}), '(reshaped_img)\n', (4734, 4748), False, 'from keras.applications.vgg16 import preprocess_input\n'), ((5817, 5824), 'keras.applications.vgg16.VGG16', 'VGG16', ([], {}), '()\n', (5822, 5824), False, 'from keras.applications.vgg16 import VGG16\n'), ((5838, 5897), 'keras.models.Model', 'Model', ([], {'inputs': 'model.inputs', 'outputs': 'model.layers[-2].output'}), '(inputs=model.inputs, outputs=model.layers[-2].output)\n', (5843, 5897), False, 'from keras.models import Model\n'), ((5903, 5917), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (5911, 5917), False, 'import os\n'), ((6446, 6491), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'components', 'random_state': '(22)'}), '(n_components=components, random_state=22)\n', (6449, 6491), False, 'from sklearn.decomposition import PCA\n'), ((7118, 7172), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'amount_of_clusters', 'random_state': '(22)'}), '(n_clusters=amount_of_clusters, random_state=22)\n', (7124, 7172), False, 'from sklearn.cluster import KMeans\n'), ((7813, 7843), 'os.path.join', 'os.path.join', (['output', '"""sample"""'], {}), "(output, 'sample')\n", (7825, 7843), False, 'import os\n'), ((7849, 7868), 'os.makedirs', 'os.makedirs', (['sample'], {}), '(sample)\n', (7860, 7868), False, 'import os\n'), ((641, 662), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (654, 662), False, 'import os\n'), ((673, 689), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (681, 689), False, 'import os\n'), ((763, 784), 'shutil.rmtree', 'shutil.rmtree', (['folder'], {}), '(folder)\n', (776, 784), False, 'import shutil\n'), ((1526, 1557), 'bs4.BeautifulSoup', 'BeautifulSoup', (['d', '"""html.parser"""'], {}), "(d, 'html.parser')\n", (1539, 1557), False, 'from bs4 import BeautifulSoup\n'), ((2583, 2619), 'os.path.join', 'os.path.join', (['directory', '"""temp.jpeg"""'], {}), "(directory, 'temp.jpeg')\n", (2595, 2619), False, 'import os\n'), ((2872, 2928), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(grid_size, grid_size)'], {'color': '"""white"""'}), "('RGBA', (grid_size, grid_size), color='white')\n", (2881, 2928), False, 'from PIL import Image, ImageDraw\n'), ((3352, 3364), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3362, 3364), False, 'import io\n'), ((4891, 4917), 'os.path.join', 'os.path.join', (['directory', 'd'], {}), '(directory, d)\n', (4903, 4917), False, 'import os\n'), ((5071, 5117), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(cols, rows)'], {'color': '"""white"""'}), "('RGBA', (cols, rows), color='white')\n", (5080, 5117), False, 'from PIL import Image, ImageDraw\n'), ((5680, 5692), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5690, 5692), False, 'import io\n'), ((5928, 5944), 'os.scandir', 'os.scandir', (['path'], {}), '(path)\n', (5938, 5944), False, 'import os\n'), ((6714, 6722), 'sklearn.cluster.KMeans', 'KMeans', ([], {}), '()\n', (6720, 6722), False, 'from sklearn.cluster import KMeans\n'), ((6745, 6798), 'yellowbrick.cluster.KElbowVisualizer', 'KElbowVisualizer', (['model'], {'k': '(2, rounds)', 'timings': '(False)'}), '(model, k=(2, rounds), timings=False)\n', (6761, 6798), False, 'from yellowbrick.cluster import KElbowVisualizer\n'), ((8168, 8213), 'random.sample', 'random.sample', (['list_images', 'images_of_cluster'], {}), '(list_images, images_of_cluster)\n', (8181, 8213), False, 'import random\n'), ((1481, 1504), 'requests.get', 'requests.get', (['searchUrl'], {}), '(searchUrl)\n', (1493, 1504), False, 'import requests\n'), ((2641, 2677), 'os.path.join', 'os.path.join', (['directory', '"""temp.jpeg"""'], {}), "(directory, 'temp.jpeg')\n", (2653, 2677), False, 'import os\n'), ((2723, 2744), 'os.scandir', 'os.scandir', (['directory'], {}), '(directory)\n', (2733, 2744), False, 'import os\n'), ((3748, 3779), 'os.path.splitext', 'os.path.splitext', (['filename.name'], {}), '(filename.name)\n', (3764, 3779), False, 'import os\n'), ((4527, 4548), 'os.remove', 'os.remove', (['image.path'], {}), '(image.path)\n', (4536, 4548), False, 'import os\n'), ((4927, 4948), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (4937, 4948), False, 'import os\n'), ((5234, 5258), 'os.listdir', 'os.listdir', (['cluster_path'], {}), '(cluster_path)\n', (5244, 5258), False, 'import os\n'), ((6876, 6888), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (6886, 6888), False, 'import io\n'), ((7535, 7559), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (7547, 7559), False, 'import os\n'), ((4966, 4992), 'os.path.join', 'os.path.join', (['directory', 'd'], {}), '(directory, d)\n', (4978, 4992), False, 'import os\n'), ((5308, 5338), 'os.path.basename', 'os.path.basename', (['cluster_path'], {}), '(cluster_path)\n', (5324, 5338), False, 'import os\n'), ((7758, 7782), 'os.path.join', 'os.path.join', (['output', '""""""'], {}), "(output, '')\n", (7770, 7782), False, 'import os\n'), ((2748, 2779), 'os.path.splitext', 'os.path.splitext', (['filename.name'], {}), '(filename.name)\n', (2764, 2779), False, 'import os\n'), ((3069, 3089), 'PIL.Image.open', 'Image.open', (['files[k]'], {}), '(files[k])\n', (3079, 3089), False, 'from PIL import Image, ImageDraw\n'), ((3868, 3893), 'PIL.Image.open', 'Image.open', (['filename.path'], {}), '(filename.path)\n', (3878, 3893), False, 'from PIL import Image, ImageDraw\n'), ((5272, 5294), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['new_im'], {}), '(new_im)\n', (5286, 5294), False, 'from PIL import Image, ImageDraw\n'), ((6958, 6967), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6965, 6967), True, 'import matplotlib.pyplot as plt\n'), ((7086, 7095), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7093, 7095), True, 'import matplotlib.pyplot as plt\n'), ((7955, 7980), 'os.path.join', 'os.path.join', (['cluster', '""""""'], {}), "(cluster, '')\n", (7967, 7980), False, 'import os\n'), ((8308, 8330), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (8324, 8330), False, 'import os\n'), ((5488, 5524), 'os.path.join', 'os.path.join', (['cluster_path', 'files[l]'], {}), '(cluster_path, files[l])\n', (5500, 5524), False, 'import os\n'), ((1098, 1120), 'os.listdir', 'os.listdir', (['search_dir'], {}), '(search_dir)\n', (1108, 1120), False, 'import os\n')] |
"""
IN YOLOV3, uses 3 layers, respectively downsample 32, downsample 16 and downsample 8
IN DEEOLABV3+, the nwetwork output layers if stride 16, so need to add more layer to generate downsample 32!
"""
import numpy as np
detection_feature_layers = [
# downsample 8
'xception_65/entry_flow/block2/unit_1/xception_module/add:0',
# downsample 16
'xception_65/middle_flow/block1/unit_16/xception_module/add:0',
# downsample 32
'xception_65/detection_branch/exit_flow/block3/unit_1/xception_module/separable_conv3/pointwise_conv/Relu:0'
]
detection_feature_strides = np.asarray([
8,
16,
32
])
detection_anchors = np.asarray([
[
[0.02403846, 0.03125],
[0.03846154, 0.07211538],
[0.07932692, 0.05528846]
],
[
[0.07211538, 0.14663462],
[0.14903846, 0.10817308],
[0.14182692, 0.28605769]
],
[
[0.27884615, 0.21634615],
[0.375, 0.47596154],
[0.89663462, 0.78365385]
]
])
| [
"numpy.asarray"
] | [((588, 611), 'numpy.asarray', 'np.asarray', (['[8, 16, 32]'], {}), '([8, 16, 32])\n', (598, 611), True, 'import numpy as np\n'), ((647, 906), 'numpy.asarray', 'np.asarray', (['[[[0.02403846, 0.03125], [0.03846154, 0.07211538], [0.07932692, 0.05528846]\n ], [[0.07211538, 0.14663462], [0.14903846, 0.10817308], [0.14182692, \n 0.28605769]], [[0.27884615, 0.21634615], [0.375, 0.47596154], [\n 0.89663462, 0.78365385]]]'], {}), '([[[0.02403846, 0.03125], [0.03846154, 0.07211538], [0.07932692, \n 0.05528846]], [[0.07211538, 0.14663462], [0.14903846, 0.10817308], [\n 0.14182692, 0.28605769]], [[0.27884615, 0.21634615], [0.375, 0.47596154\n ], [0.89663462, 0.78365385]]])\n', (657, 906), True, 'import numpy as np\n')] |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
def compute_fp_tp_probs(
probs: Union[np.ndarray, torch.Tensor],
y_coord: Union[np.ndarray, torch.Tensor],
x_coord: Union[np.ndarray, torch.Tensor],
evaluation_mask: Union[np.ndarray, torch.Tensor],
labels_to_exclude: Optional[List] = None,
resolution_level: int = 0,
):
"""
This function is modified from the official evaluation code of
`CAMELYON 16 Challenge <https://camelyon16.grand-challenge.org/>`_, and used to distinguish
true positive and false positive predictions. A true positive prediction is defined when
the detection point is within the annotated ground truth region.
Args:
probs: an array with shape (n,) that represents the probabilities of the detections.
Where, n is the number of predicted detections.
y_coord: an array with shape (n,) that represents the Y-coordinates of the detections.
x_coord: an array with shape (n,) that represents the X-coordinates of the detections.
evaluation_mask: the ground truth mask for evaluation.
labels_to_exclude: labels in this list will not be counted for metric calculation.
resolution_level: the level at which the evaluation mask is made.
Returns:
fp_probs: an array that contains the probabilities of the false positive detections.
tp_probs: an array that contains the probabilities of the True positive detections.
num_targets: the total number of targets (excluding `labels_to_exclude`) for all images under evaluation.
"""
if not (probs.shape == y_coord.shape == x_coord.shape):
raise AssertionError("the shapes for coordinates and probabilities should be the same.")
if isinstance(probs, torch.Tensor):
probs = probs.detach().cpu().numpy()
if isinstance(y_coord, torch.Tensor):
y_coord = y_coord.detach().cpu().numpy()
if isinstance(x_coord, torch.Tensor):
x_coord = x_coord.detach().cpu().numpy()
if isinstance(evaluation_mask, torch.Tensor):
evaluation_mask = evaluation_mask.detach().cpu().numpy()
if labels_to_exclude is None:
labels_to_exclude = []
max_label = np.max(evaluation_mask)
tp_probs = np.zeros((max_label,), dtype=np.float32)
y_coord = (y_coord / pow(2, resolution_level)).astype(int)
x_coord = (x_coord / pow(2, resolution_level)).astype(int)
hittedlabel = evaluation_mask[y_coord, x_coord]
fp_probs = probs[np.where(hittedlabel == 0)]
for i in range(1, max_label + 1):
if i not in labels_to_exclude and i in hittedlabel:
tp_probs[i - 1] = probs[np.where(hittedlabel == i)].max()
num_targets = max_label - len(labels_to_exclude)
return fp_probs, tp_probs, num_targets
def compute_froc_curve_data(
fp_probs: Union[np.ndarray, torch.Tensor],
tp_probs: Union[np.ndarray, torch.Tensor],
num_targets: int,
num_images: int,
):
"""
This function is modified from the official evaluation code of
`CAMELYON 16 Challenge <https://camelyon16.grand-challenge.org/>`_, and used to compute
the required data for plotting the Free Response Operating Characteristic (FROC) curve.
Args:
fp_probs: an array that contains the probabilities of the false positive detections for all
images under evaluation.
tp_probs: an array that contains the probabilities of the True positive detections for all
images under evaluation.
num_targets: the total number of targets (excluding `labels_to_exclude`) for all images under evaluation.
num_images: the number of images under evaluation.
"""
if not isinstance(fp_probs, type(tp_probs)):
raise AssertionError("fp and tp probs should have same type.")
if isinstance(fp_probs, torch.Tensor):
fp_probs = fp_probs.detach().cpu().numpy()
if isinstance(tp_probs, torch.Tensor):
tp_probs = tp_probs.detach().cpu().numpy()
total_fps, total_tps = [], []
all_probs = sorted(set(list(fp_probs) + list(tp_probs)))
for thresh in all_probs[1:]:
total_fps.append((fp_probs >= thresh).sum())
total_tps.append((tp_probs >= thresh).sum())
total_fps.append(0)
total_tps.append(0)
fps_per_image = np.asarray(total_fps) / float(num_images)
total_sensitivity = np.asarray(total_tps) / float(num_targets)
return fps_per_image, total_sensitivity
def compute_froc_score(
fps_per_image: np.ndarray, total_sensitivity: np.ndarray, eval_thresholds: Tuple = (0.25, 0.5, 1, 2, 4, 8)
):
"""
This function is modified from the official evaluation code of
`CAMELYON 16 Challenge <https://camelyon16.grand-challenge.org/>`_, and used to compute
the challenge's second evaluation metric, which is defined as the average sensitivity at
the predefined false positive rates per whole slide image.
Args:
fps_per_image: the average number of false positives per image for different thresholds.
total_sensitivity: sensitivities (true positive rates) for different thresholds.
eval_thresholds: the false positive rates for calculating the average sensitivity. Defaults
to (0.25, 0.5, 1, 2, 4, 8) which is the same as the CAMELYON 16 Challenge.
"""
interp_sens = np.interp(eval_thresholds, fps_per_image[::-1], total_sensitivity[::-1])
return np.mean(interp_sens)
| [
"numpy.asarray",
"numpy.zeros",
"numpy.max",
"numpy.mean",
"numpy.where",
"numpy.interp"
] | [((2813, 2836), 'numpy.max', 'np.max', (['evaluation_mask'], {}), '(evaluation_mask)\n', (2819, 2836), True, 'import numpy as np\n'), ((2852, 2892), 'numpy.zeros', 'np.zeros', (['(max_label,)'], {'dtype': 'np.float32'}), '((max_label,), dtype=np.float32)\n', (2860, 2892), True, 'import numpy as np\n'), ((5921, 5993), 'numpy.interp', 'np.interp', (['eval_thresholds', 'fps_per_image[::-1]', 'total_sensitivity[::-1]'], {}), '(eval_thresholds, fps_per_image[::-1], total_sensitivity[::-1])\n', (5930, 5993), True, 'import numpy as np\n'), ((6005, 6025), 'numpy.mean', 'np.mean', (['interp_sens'], {}), '(interp_sens)\n', (6012, 6025), True, 'import numpy as np\n'), ((3094, 3120), 'numpy.where', 'np.where', (['(hittedlabel == 0)'], {}), '(hittedlabel == 0)\n', (3102, 3120), True, 'import numpy as np\n'), ((4894, 4915), 'numpy.asarray', 'np.asarray', (['total_fps'], {}), '(total_fps)\n', (4904, 4915), True, 'import numpy as np\n'), ((4960, 4981), 'numpy.asarray', 'np.asarray', (['total_tps'], {}), '(total_tps)\n', (4970, 4981), True, 'import numpy as np\n'), ((3256, 3282), 'numpy.where', 'np.where', (['(hittedlabel == i)'], {}), '(hittedlabel == i)\n', (3264, 3282), True, 'import numpy as np\n')] |
### Overview of artifact detection ###
import os
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(0, 60).load_data() # just use a fraction of data for speed here
######## Artifact detection ########
ssp_projectors = raw.info['projs']
raw.del_proj()
mag_channels = mne.pick_types(raw.info, meg='mag')
raw.plot(duration=60, order=mag_channels, n_channels=len(mag_channels), remove_dc=False)
######## Power line noise ########
fig = raw.plot_psd(tmax=np.inf, fmax=250, average=True)
# add some arrows at 60 Hz and its harmonics:
for ax in fig.axes[1:]:
freqs = ax.lines[-1].get_xdata()
psds = ax.lines[-1].get_ydata()
for freq in (60, 120, 180, 240):
idx = np.searchsorted(freqs, freq)
ax.arrow(x=freqs[idx], y=psds[idx] + 18, dx=0, dy=-12, color='red',
width=0.1, head_width=3, length_includes_head=True)
######## Heartbeat artifacts (ECG) ########
ecg_epochs = mne.preprocessing.create_ecg_epochs(raw)
ecg_epochs.plot_image(combine='mean')
avg_ecg_epochs = ecg_epochs.average().apply_baseline((-0.5, -0.2))
avg_ecg_epochs.plot_topomap(times=np.linspace(-0.05, 0.05, 11))
avg_ecg_epochs.plot_joint(times=[-0.25, -0.025, 0, 0.025, 0.25])
######## Ocular artifacts (EOG) ########
eog_epochs = mne.preprocessing.create_eog_epochs(raw, baseline=(-0.5, -0.2))
eog_epochs.plot_image(combine='mean')
eog_epochs.average().plot_joint()
| [
"mne.preprocessing.create_ecg_epochs",
"mne.io.read_raw_fif",
"mne.pick_types",
"mne.preprocessing.create_eog_epochs",
"numpy.searchsorted",
"numpy.linspace",
"mne.datasets.sample.data_path",
"os.path.join"
] | [((102, 133), 'mne.datasets.sample.data_path', 'mne.datasets.sample.data_path', ([], {}), '()\n', (131, 133), False, 'import mne\n'), ((157, 231), 'os.path.join', 'os.path.join', (['sample_data_folder', '"""MEG"""', '"""sample"""', '"""sample_audvis_raw.fif"""'], {}), "(sample_data_folder, 'MEG', 'sample', 'sample_audvis_raw.fif')\n", (169, 231), False, 'import os\n'), ((274, 315), 'mne.io.read_raw_fif', 'mne.io.read_raw_fif', (['sample_data_raw_file'], {}), '(sample_data_raw_file)\n', (293, 315), False, 'import mne\n'), ((495, 530), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '"""mag"""'}), "(raw.info, meg='mag')\n", (509, 530), False, 'import mne\n'), ((1140, 1180), 'mne.preprocessing.create_ecg_epochs', 'mne.preprocessing.create_ecg_epochs', (['raw'], {}), '(raw)\n', (1175, 1180), False, 'import mne\n'), ((1472, 1535), 'mne.preprocessing.create_eog_epochs', 'mne.preprocessing.create_eog_epochs', (['raw'], {'baseline': '(-0.5, -0.2)'}), '(raw, baseline=(-0.5, -0.2))\n', (1507, 1535), False, 'import mne\n'), ((907, 935), 'numpy.searchsorted', 'np.searchsorted', (['freqs', 'freq'], {}), '(freqs, freq)\n', (922, 935), True, 'import numpy as np\n'), ((1321, 1349), 'numpy.linspace', 'np.linspace', (['(-0.05)', '(0.05)', '(11)'], {}), '(-0.05, 0.05, 11)\n', (1332, 1349), True, 'import numpy as np\n')] |
import networkx as nx
import numpy.random as npr
def draw_graph(G, edge_weight=None, layout: str = "kamada_kawai"):
pos = nx.kamada_kawai_layout(G)
if edge_weight:
edge_labels = {
(u, v): d[edge_weight] for u, v, d in G.edges(data=True)
} # noqa: E501
nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)
nx.draw_networkx_edges(G, pos)
nx.draw_networkx_nodes(G, pos, with_labels=True)
nx.draw_kamada_kawai(G, with_labels=True)
def noise(size):
return npr.normal(loc=0, scale=1, size=size)
def rule1(n, S, G, path):
"""
Tells us if a node in the graph G satisfies
blocking rule 1 in the causal path provided.
Blocking rule 1 is:
-> n ->
This is topologically equivalent to:
<- n <-
Where n is a member of S.
:param n: A node in graph G.
:param S: The conditioning node set.
:param G: A NetworkX graph.
:param path: The causal path of interest.
"""
G_sub = path_nodes(G, path)
in_conditioning_set = n in S
has_in_edges = len(list(G_sub.in_edges(n))) == 1
has_out_edges = len(list(G_sub.out_edges(n))) == 1
return in_conditioning_set and has_in_edges and has_out_edges
def rule2(n, S, G, path):
"""
Tells us if a node in the graph G satisfies
blocking rule 2 in the causal path provided.
Blocking rule 2 is:
<- n ->
Where n is a member of S.
:param n: A node in graph G.
:param S: The conditioning node set.
:param G: A NetworkX graph.
:param path: The causal path of interest.
"""
G_sub = path_nodes(G, path)
in_conditioning_set = n in S
has_out_edges = len(list(G_sub.out_edges(n))) == 2
return in_conditioning_set and has_out_edges
def rule3(n, S, G, path):
"""
Tells us if a node in the graph G satisfies
blocking rule 3 in the causal path provided.
Blocking rule 3 is as such:
If n is a collider:
-> n <-
Then it is a blocker, otherwise it is not a blocker.
However, if n is a member of S, or n has a descendant
that is a member of S, then it is not a blocker.
:param n: A node in graph G.
:param S: The conditioning node set.
:param G: A NetworkX graph.
:param path: The causal path of interest.
"""
G_sub = path_nodes(G, path)
in_conditioning_set = n in S
is_collider = len(list(G_sub.in_edges(n))) == 2
descendant_in_S = bool(set(G.successors(n)).intersection(S))
is_blocker = is_collider
# We then check to see if the
if n in S or descendant_in_S:
is_blocker = False
return is_blocker
def path_nodes(G: nx.DiGraph, path: list):
"""
Returns the causal path as indicated by the path.
Does not include the other edges, as would G.subgraph do.
:param G: A NetworkX directed graph.
:param path: A list of nodes denoting an undirected path.
"""
assert isinstance(G, nx.DiGraph), "G must be a directed graph"
G_sub = nx.DiGraph()
for n1, n2 in zip(path, path[1:]):
if G.has_edge(n1, n2):
G_sub.add_edge(n1, n2)
elif G.has_edge(n2, n1):
G_sub.add_edge(n2, n1)
return G_sub
| [
"networkx.draw_networkx_edges",
"networkx.kamada_kawai_layout",
"networkx.draw_kamada_kawai",
"networkx.draw_networkx_nodes",
"numpy.random.normal",
"networkx.draw_networkx_edge_labels",
"networkx.DiGraph"
] | [((128, 153), 'networkx.kamada_kawai_layout', 'nx.kamada_kawai_layout', (['G'], {}), '(G)\n', (150, 153), True, 'import networkx as nx\n'), ((371, 401), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G', 'pos'], {}), '(G, pos)\n', (393, 401), True, 'import networkx as nx\n'), ((406, 454), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {'with_labels': '(True)'}), '(G, pos, with_labels=True)\n', (428, 454), True, 'import networkx as nx\n'), ((459, 500), 'networkx.draw_kamada_kawai', 'nx.draw_kamada_kawai', (['G'], {'with_labels': '(True)'}), '(G, with_labels=True)\n', (479, 500), True, 'import networkx as nx\n'), ((531, 568), 'numpy.random.normal', 'npr.normal', ([], {'loc': '(0)', 'scale': '(1)', 'size': 'size'}), '(loc=0, scale=1, size=size)\n', (541, 568), True, 'import numpy.random as npr\n'), ((3000, 3012), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (3010, 3012), True, 'import networkx as nx\n'), ((300, 365), 'networkx.draw_networkx_edge_labels', 'nx.draw_networkx_edge_labels', (['G'], {'pos': 'pos', 'edge_labels': 'edge_labels'}), '(G, pos=pos, edge_labels=edge_labels)\n', (328, 365), True, 'import networkx as nx\n')] |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from libs.configs._base_.models.retinanet_r50_fpn import *
from libs.configs._base_.datasets.dota_detection import *
from libs.configs._base_.schedules.schedule_1x import *
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
# schedule
BATCH_SIZE = 1
GPU_GROUP = "0"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SAVE_WEIGHTS_INTE = 27000
DECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE
MAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH
WARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)
# dataset
# model
# backbone
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
# bbox head
ANGLE_RANGE = 180
# loss
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0 / 5.0
REG_LOSS_MODE = None
VERSION = 'RetinaNet_DOTA_1x_20210725'
"""
RetinaNet-H + theta=atan(sin(theta)/cos(theta)) + 180, sin^2(theta) + cos^2(theta) = 1
[-90, 90] sin in [-1, 1] cos in [0, 1]
FLOPs: 485784881; Trainable params: 33051321
This is your result for task 1:
mAP: 0.6482820239385153
ap of each class: plane:0.8863486082518542, baseball-diamond:0.7510916490271552, bridge:0.4136498976633022, ground-track-field:0.6934357734426206, small-vehicle:0.5915433817529869, large-vehicle:0.4156886089040786, ship:0.6512479280213479, tennis-court:0.8965927064782218, basketball-court:0.778541563411186, storage-tank:0.7716242837257139, soccer-ball-field:0.5261143148330104, roundabout:0.6328490142731126, harbor:0.5072934651888339, swimming-pool:0.6566747539350666, helicopter:0.55153441016924
The submitted information is :
Description: RetinaNet_DOTA_1x_20210725_35.1w_v1
Username: SJTU-Det
Institute: SJTU
Emailadress: <EMAIL>
TeamMembers: yangxue
"""
| [
"dataloader.pretrained_weights.pretrain_zoo.PretrainModelZoo",
"numpy.array"
] | [((673, 691), 'dataloader.pretrained_weights.pretrain_zoo.PretrainModelZoo', 'PretrainModelZoo', ([], {}), '()\n', (689, 691), False, 'from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo\n'), ((481, 512), 'numpy.array', 'np.array', (['DECAY_EPOCH', 'np.int32'], {}), '(DECAY_EPOCH, np.int32)\n', (489, 512), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.