code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import vaex
def test_mutual_information():
df = vaex.example()
# A single pair
xy = yx = df.mutual_information('x', 'y')
expected = np.array(0.068934)
np.testing.assert_array_almost_equal(xy, expected)
np.testing.assert_array_almost_equal(df.mutual_information('y', 'x'), df.mutual_information('x', 'y'))
xx = df.mutual_information('x', 'x')
yy = df.mutual_information('y', 'y')
zz = df.mutual_information('z', 'z')
zx = xz = df.mutual_information('x', 'z')
zy = yz = df.mutual_information('y', 'z')
# A list of columns
result = df.mutual_information(x=['x', 'y', 'z'])
expected = np.array(([xx, xy, xz],
[yx, yy, yz],
[zx, zy, zz]))
np.testing.assert_array_almost_equal(result, expected)
# A list of columns and a single target
result = df.mutual_information(x=['x', 'y', 'z'], y='z')
expected = np.array([xz, yz, zz])
np.testing.assert_array_almost_equal(result, expected)
# A list of columns and targets
result = df.mutual_information(x=['x', 'y', 'z'], y=['y', 'z'])
assert result.shape == (3, 2)
expected = np.array(([xy, xz],
[yy, yz],
[zy, zz]
))
np.testing.assert_array_almost_equal(result, expected)
# a list of custom pairs
result = df.mutual_information(x=[['x', 'y'], ['x', 'z'], ['y', 'z']])
assert result.shape == (3,)
expected = np.array([xy, xz, yz])
np.testing.assert_array_almost_equal(result, expected)
result = df.mutual_information(x=['x', 'y'], dimension=3, mi_shape=4)
assert result.shape == (2, 2, 2)
| [
"numpy.array",
"numpy.testing.assert_array_almost_equal",
"vaex.example"
] | [((74, 88), 'vaex.example', 'vaex.example', ([], {}), '()\n', (86, 88), False, 'import vaex\n'), ((171, 189), 'numpy.array', 'np.array', (['(0.068934)'], {}), '(0.068934)\n', (179, 189), True, 'import numpy as np\n'), ((194, 244), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['xy', 'expected'], {}), '(xy, expected)\n', (230, 244), True, 'import numpy as np\n'), ((664, 716), 'numpy.array', 'np.array', (['([xx, xy, xz], [yx, yy, yz], [zx, zy, zz])'], {}), '(([xx, xy, xz], [yx, yy, yz], [zx, zy, zz]))\n', (672, 716), True, 'import numpy as np\n'), ((771, 825), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (807, 825), True, 'import numpy as np\n'), ((947, 969), 'numpy.array', 'np.array', (['[xz, yz, zz]'], {}), '([xz, yz, zz])\n', (955, 969), True, 'import numpy as np\n'), ((974, 1028), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (1010, 1028), True, 'import numpy as np\n'), ((1183, 1223), 'numpy.array', 'np.array', (['([xy, xz], [yy, yz], [zy, zz])'], {}), '(([xy, xz], [yy, yz], [zy, zz]))\n', (1191, 1223), True, 'import numpy as np\n'), ((1304, 1358), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (1340, 1358), True, 'import numpy as np\n'), ((1511, 1533), 'numpy.array', 'np.array', (['[xy, xz, yz]'], {}), '([xy, xz, yz])\n', (1519, 1533), True, 'import numpy as np\n'), ((1538, 1592), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (1574, 1592), True, 'import numpy as np\n')] |
import numpy as np
from joblib import Parallel, delayed, cpu_count
import editsim
def su(a,i):
return a[i].sum();
def execute():
print("test")
dims = (100000,4);
x = editsim.createSharedNumpyArray(dims);
x[:] = np.random.rand(dims[0], dims[1]);
res = Parallel(n_jobs = cpu_count())(delayed(su)(x,i) for i in range(dims[0]));
print(res)
execute()
| [
"joblib.delayed",
"joblib.cpu_count",
"numpy.random.rand",
"editsim.createSharedNumpyArray"
] | [((178, 214), 'editsim.createSharedNumpyArray', 'editsim.createSharedNumpyArray', (['dims'], {}), '(dims)\n', (208, 214), False, 'import editsim\n'), ((225, 257), 'numpy.random.rand', 'np.random.rand', (['dims[0]', 'dims[1]'], {}), '(dims[0], dims[1])\n', (239, 257), True, 'import numpy as np\n'), ((285, 296), 'joblib.cpu_count', 'cpu_count', ([], {}), '()\n', (294, 296), False, 'from joblib import Parallel, delayed, cpu_count\n'), ((298, 309), 'joblib.delayed', 'delayed', (['su'], {}), '(su)\n', (305, 309), False, 'from joblib import Parallel, delayed, cpu_count\n')] |
import numpy as np
import pandas as pd
from metrics import scores
from sklearn.model_selection import train_test_split
#load datasets
def load(filepath):
data = pd.read_csv(filepath, sep=" ", dtype=float,header=None)
data = data.drop(22, axis=1)
data = data.drop(23, axis=1)
data_np = data.to_numpy()
features = data_np[:, 0:21]
labels = data_np[:, 21:22]
return features, labels
train_features, train_labels = load("ann-train.data")
test_features, test_labels = load("ann-test.data")
train_labels = np.array(train_labels,dtype="int64")
#entropy calculation
def entropy(y):
cnt = np.bincount(y)
probs = cnt / len(y)
ent = -np.sum([p * np.log(p) for p in probs if p > 0])
return ent
class Node:
def __init__(self, feature=None, threshold=None ,left_node=None, right_node=None, mark=None):
self.feature = feature
self.threshold = threshold
self.left_node = left_node
self.right_node = right_node
self.mark = mark
def is_leaf_node(self):
return self.mark is not None
class DecisionTreeClassifier:
def __init__(self,min_samples_split=2, max_depth=0, number_of_features=None):
self.min_samples_split = min_samples_split
self.max_depth = max_depth
self.number_of_features = number_of_features
self.root = None
def fit(self, X, y):
self.number_of_features = X.shape[1] if not self.number_of_features else min(self.number_of_features, X.shape[1])
self.root = self.build_node(X, y)
def predict(self, X):
array = []
for x in X:
array.append(self.bottom_up(x, self.root))
return np.array(array)
def bottom_up(self, x, node):
if node.is_leaf_node():
return node.mark
if x[node.feature] <= node.threshold:
return self.bottom_up(x, node.left_node)
else:
return self.bottom_up(x, node.right_node)
def build_node(self, X, y, depth=0):
n_samples, n_features = X.shape
n_labels = len(np.unique(y))
if (depth >= self.max_depth or n_labels == 1 or n_samples < self.min_samples_split):
leaf_mark = self.most_class(y)
return Node(mark=leaf_mark)
feature_idxs = np.random.choice(n_features, self.number_of_features, replace=False)
best_feature, best_thresh = self.best_split(X, y, feature_idxs)
left_idxs, right_idxs = self.split(X[:, best_feature], best_thresh)
left = self.build_node(X[left_idxs, :], y[left_idxs], depth+1)
right = self.build_node(X[right_idxs, :], y[right_idxs], depth+1)
return Node(best_feature, best_thresh,left, right)
def best_split(self, X, y, feature_idxs):
best_information_gain = -1
split_idx, split_thresh = None, None
for idx in feature_idxs:
X_column=X[:, idx]
thresholds = np.unique(X_column)
for threshold in thresholds:
gain = self.information_gain(y, X_column, threshold)
if gain > best_information_gain:
best_information_gain = gain
split_idx = idx
split_thresh = threshold
return split_idx, split_thresh
def information_gain(self, y, X_column, split_thresh):
parent_entropy = entropy(y)
left_idxs, right_idxs = self.split(X_column, split_thresh)
if len(left_idxs) == 0 or len(right_idxs) == 0:
return 0
n = len(y)
n_l, n_r = len(left_idxs), len(right_idxs)
entropy_l, entropy_r = entropy(y[left_idxs]), entropy(y[right_idxs])
child_entropy = (n_l/n)*entropy_l + (n_r/n)*entropy_r
info_gain = parent_entropy - child_entropy
return info_gain
def split(self, X_column, split_thresh):
left_idxs = np.argwhere(X_column <= split_thresh).flatten()
right_idxs = np.argwhere(X_column > split_thresh).flatten()
return left_idxs, right_idxs
def most_class(self, y):
most_class = np.argmax(np.bincount(y))
return most_class
def print_tree(self, node=None, depth=0):
if not node:
node = self.root
if node.is_leaf_node():
print('\t' * depth, "Leaf:", node.mark)
return
print('\t' * depth, "Split: X{} <= {} ".format(node.feature, node.threshold))
self.print_tree(node.left_node, depth + 1)
self.print_tree(node.right_node, depth + 1)
max_depth = [10, 15, 25, 30]
min_samples_split = [5, 10, 15]
hyper_parameters = {}
X_train, X_cv , y_train, y_cv = train_test_split(train_features, train_labels, test_size=0.2, random_state=35)
for max_depth_ in max_depth:
for min_samples_split_ in min_samples_split:
hyper_parameters["max_depth=" + str(max_depth_) + ",min_samples_split=" + str(min_samples_split_)] = []
my_clf = DecisionTreeClassifier(max_depth=max_depth_, min_samples_split=min_samples_split_)
model = my_clf.fit(X_train, y_train[:,0])
cv_y_pred = my_clf.predict(X_cv)
cm = scores(y_cv,cv_y_pred)
micro_acc = 0
for i in range(3):
micro_acc += (cm[i,i]/np.sum(cm[i,:]) * np.sum(y_cv == i+1))
micro_acc = micro_acc / y_cv.shape[0]
hyper_parameters["max_depth=" + str(max_depth_) + ",min_samples_split=" + str(min_samples_split_)].append(micro_acc)
best_parameters = max(hyper_parameters, key=hyper_parameters.get)
print(f"Best hyperparameters chosen on cross validation set is {best_parameters}")
my_clf = DecisionTreeClassifier(max_depth=int(best_parameters.split(",")[0].split("=")[1]),min_samples_split = int(best_parameters.split(",")[1].split("=")[1]))
model = my_clf.fit(X_train, y_train[:,0])
print("\n")
print("PERFORMANCE ON TRAINING SET")
train_y_pred = my_clf.predict(train_features)
cm = scores(train_labels,train_y_pred)
print(cm)
print("Class based accuracies: ")
for i in range(3):
print("Class % d: % f" %(i, cm[i,i]/np.sum(cm[i,:])))
print("\n")
print("PERFORMANCE ON TEST SET")
test_y_pred = my_clf.predict(test_features)
cm = scores(test_labels,test_y_pred)
print(cm)
print("Class based accuracies: ")
for i in range(3):
print("Class % d: % f" %(i, cm[i,i]/np.sum(cm[i,:])))
print("\n")
my_clf.print_tree()
| [
"numpy.unique",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.random.choice",
"numpy.log",
"numpy.array",
"metrics.scores",
"numpy.argwhere",
"numpy.sum",
"numpy.bincount"
] | [((543, 580), 'numpy.array', 'np.array', (['train_labels'], {'dtype': '"""int64"""'}), "(train_labels, dtype='int64')\n", (551, 580), True, 'import numpy as np\n'), ((4935, 5013), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_features', 'train_labels'], {'test_size': '(0.2)', 'random_state': '(35)'}), '(train_features, train_labels, test_size=0.2, random_state=35)\n', (4951, 5013), False, 'from sklearn.model_selection import train_test_split\n'), ((6177, 6211), 'metrics.scores', 'scores', (['train_labels', 'train_y_pred'], {}), '(train_labels, train_y_pred)\n', (6183, 6211), False, 'from metrics import scores\n'), ((6427, 6459), 'metrics.scores', 'scores', (['test_labels', 'test_y_pred'], {}), '(test_labels, test_y_pred)\n', (6433, 6459), False, 'from metrics import scores\n'), ((167, 223), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'sep': '""" """', 'dtype': 'float', 'header': 'None'}), "(filepath, sep=' ', dtype=float, header=None)\n", (178, 223), True, 'import pandas as pd\n'), ((630, 644), 'numpy.bincount', 'np.bincount', (['y'], {}), '(y)\n', (641, 644), True, 'import numpy as np\n'), ((1755, 1770), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (1763, 1770), True, 'import numpy as np\n'), ((2385, 2453), 'numpy.random.choice', 'np.random.choice', (['n_features', 'self.number_of_features'], {'replace': '(False)'}), '(n_features, self.number_of_features, replace=False)\n', (2401, 2453), True, 'import numpy as np\n'), ((5408, 5431), 'metrics.scores', 'scores', (['y_cv', 'cv_y_pred'], {}), '(y_cv, cv_y_pred)\n', (5414, 5431), False, 'from metrics import scores\n'), ((2163, 2175), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2172, 2175), True, 'import numpy as np\n'), ((3083, 3102), 'numpy.unique', 'np.unique', (['X_column'], {}), '(X_column)\n', (3092, 3102), True, 'import numpy as np\n'), ((4374, 4388), 'numpy.bincount', 'np.bincount', (['y'], {}), '(y)\n', (4385, 4388), True, 'import numpy as np\n'), ((4147, 4184), 'numpy.argwhere', 'np.argwhere', (['(X_column <= split_thresh)'], {}), '(X_column <= split_thresh)\n', (4158, 4184), True, 'import numpy as np\n'), ((4216, 4252), 'numpy.argwhere', 'np.argwhere', (['(X_column > split_thresh)'], {}), '(X_column > split_thresh)\n', (4227, 4252), True, 'import numpy as np\n'), ((5532, 5553), 'numpy.sum', 'np.sum', (['(y_cv == i + 1)'], {}), '(y_cv == i + 1)\n', (5538, 5553), True, 'import numpy as np\n'), ((693, 702), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (699, 702), True, 'import numpy as np\n'), ((5514, 5530), 'numpy.sum', 'np.sum', (['cm[i, :]'], {}), '(cm[i, :])\n', (5520, 5530), True, 'import numpy as np\n'), ((6314, 6330), 'numpy.sum', 'np.sum', (['cm[i, :]'], {}), '(cm[i, :])\n', (6320, 6330), True, 'import numpy as np\n'), ((6562, 6578), 'numpy.sum', 'np.sum', (['cm[i, :]'], {}), '(cm[i, :])\n', (6568, 6578), True, 'import numpy as np\n')] |
from dataclasses import dataclass, field
from enum import Enum, auto
from fractions import Fraction
from typing import List, Sequence, Optional, Any, Iterator, cast
import re
import numpy as np
from scipy.interpolate import interp1d
import constants as Const
from utils import gaunt_bf
from atomic_table import AtomicTable, get_global_atomic_table
@dataclass
class AtomicModel:
name: str
levels: Sequence['AtomicLevel']
lines: Sequence['AtomicLine']
continua: Sequence['AtomicContinuum']
collisions: Sequence['CollisionalRates']
atomicTable: AtomicTable = field(default_factory=get_global_atomic_table)
def __post_init__(self):
self.name = self.name.upper()
for l in self.levels:
l.setup(self)
for l in self.lines:
l.setup(self)
# This is separate because all of the lines in
# an atom need to be initialised first
for l in self.lines:
l.setup_wavelength()
for c in self.continua:
c.setup(self)
for c in self.collisions:
c.setup(self)
def __repr__(self):
s = 'AtomicModel(name="%s",\n\tlevels=[\n' % self.name
for l in self.levels:
s += '\t\t' + repr(l) + ',\n'
s += '\t],\n\tlines=[\n'
for l in self.lines:
s += '\t\t' + repr(l) + ',\n'
s += '\t],\n\tcontinua=[\n'
for c in self.continua:
s += '\t\t' + repr(c) + ',\n'
s += '\t],\n\tcollisions=[\n'
for c in self.collisions:
s += '\t\t' + repr(c) + ',\n'
s += '])\n'
return s
def __hash__(self):
return hash(repr(self))
def replace_atomic_table(self, table: AtomicTable):
print('Called %s' % self.name)
self.atomicTable = table
self.__post_init__()
def v_broad(self, atmos):
vTherm = 2.0 * Const.KBoltzmann / (Const.Amu * self.atomicTable[self.name].weight)
vBroad = np.sqrt(vTherm * atmos.temperature + atmos.vturb**2)
return vBroad
def reconfigure_atom(atom: AtomicModel):
atom.__post_init__()
def avoid_recursion_eq(a, b) -> bool:
if isinstance(a, np.ndarray):
if not np.all(a == b):
return False
elif isinstance(a, AtomicModel):
if a.name != b.name:
return False
if len(a.levels) != len(b.levels):
return False
if len(a.lines) != len(b.lines):
return False
if len(a.continua) != len(b.continua):
return False
if len(a.collisions) != len(b.collisions):
return False
else:
if a != b:
return False
return True
def model_component_eq(a, b) -> bool:
if a is b:
return True
if type(a) is not type(b):
if not (isinstance(a, AtomicTransition) and isinstance(b, AtomicTransition)):
raise NotImplemented
else:
return False
ignoreKeys = ['interpolator']
da = a.__dict__
db = b.__dict__
return all([avoid_recursion_eq(da[k], db[k]) for k in da.keys() if k not in ignoreKeys])
@dataclass
class AtomicLevel:
E: float
g: float
label: str
stage: int
lsCoupling: bool = False
atom: AtomicModel = field(init=False)
J: Optional[Fraction] = None
L: Optional[int] = None
S: Optional[Fraction] = None
def setup(self, atom):
self.atom = atom
if not any([x is None for x in [self.J, self.L, self.S]]):
if self.J <= self.L + self.S:
self.lsCoupling = True
def __eq__(self, other: object) -> bool:
return model_component_eq(self, other)
@property
def E_SI(self):
return self.E * Const.HC / Const.CM_TO_M
def __repr__(self):
s = 'AtomicLevel(E=%f, g=%f, label="%s", stage=%d, J=%s, L=%s, S=%s)' % (self.E, self.g, self.label, self.stage, repr(self.J), repr(self.L), repr(self.S))
return s
class LineType(Enum):
CRD = 0
PRD = auto()
def __repr__(self):
if self == LineType.CRD:
return 'LineType.CRD'
elif self == LineType.PRD:
return 'LineType.PRD'
else:
raise ValueError('Unknown LineType in LineType.__repr__')
@dataclass
class VdwApprox:
vals: Sequence[float]
def setup(self, line: 'AtomicLine', table: AtomicTable):
pass
def __eq__(self, other: object) -> bool:
if not isinstance(other, VdwApprox):
return False
return (self.vals == other.vals) and (type(self) is type(other))
@dataclass(eq=False)
class VdwUnsold(VdwApprox):
def setup(self, line: 'AtomicLine', table: AtomicTable):
self.line = line
if len(self.vals) != 2:
raise ValueError('VdwUnsold expects 2 coefficients (%s)' % repr(line))
Z = line.jLevel.stage + 1
j = line.j
ic = j + 1
while line.atom.levels[ic].stage < Z:
ic += 1
cont = line.atom.levels[ic]
deltaR = (Const.ERydberg / (cont.E_SI - line.jLevel.E_SI))**2 \
- (Const.ERydberg / (cont.E_SI - line.iLevel.E_SI))**2
fourPiEps0 = 4.0 * np.pi * Const.Epsilon0
C625 = (2.5 * Const.QElectron**2 / fourPiEps0 * Const.ABarH / fourPiEps0 \
* 2 * np.pi * (Z * Const.RBohr)**2 / Const.HPlanck * deltaR)**0.4
name = line.atom.name
vRel35He = (8.0 * Const.KBoltzmann / (np.pi*Const.Amu * table[name].weight)\
* (1.0 + table[name].weight / table['He'].weight))**0.3
vRel35H = (8.0 * Const.KBoltzmann / (np.pi*Const.Amu * table[name].weight)\
* (1.0 + table[name].weight / table['H'].weight))**0.3
heAbund = table['He'].abundance
self.cross = 8.08 * (self.vals[0] * vRel35H \
+ self.vals[1] * heAbund * vRel35He) * C625
def broaden(self, temperature, nHGround, broad):
broad[:] = self.cross * temperature**0.3 * nHGround
@dataclass
class AtomicTransition:
def __eq__(self, other: object) -> bool:
return model_component_eq(self, other)
pass
@dataclass(eq=False)
class AtomicLine(AtomicTransition):
j: int
i: int
f: float
type: LineType
NlambdaGen: int
qCore: float
qWing: float
vdw: VdwApprox
gRad: float
stark: float
gLandeEff: Optional[float] = None
atom: AtomicModel = field(init=False)
jLevel: AtomicLevel = field(init=False)
iLevel: AtomicLevel = field(init=False)
wavelength: np.ndarray = field(init=False)
preserveWavelength: bool = False
def __repr__(self):
s = 'AtomicLine(j=%d, i=%d, f=%e, type=%s, NlambdaGen=%d, qCore=%f, qWing=%f, vdw=%s, gRad=%e, stark=%f' % (
self.j, self.i, self.f, repr(self.type), self.NlambdaGen, self.qCore, self.qWing, repr(self.vdw),
self.gRad, self.stark)
if self.gLandeEff is not None:
s += ', gLandeEff=%f' % self.gLandeEff
s += ')'
return s
def __hash__(self):
return hash(repr(self))
@property
def Nlambda(self):
return self.wavelength.shape[0]
@property
def lambda0(self) -> float:
raise NotImplemented
@property
def lambda0_m(self) -> float:
raise NotImplemented
@property
def Aji(self) -> float:
raise NotImplemented
@property
def Bji(self) -> float:
raise NotImplemented
@property
def Bij(self) -> float:
raise NotImplemented
@property
def polarisable(self) -> bool:
raise NotImplemented
def damping(self, atmos, vBroad, hGround):
raise NotImplemented
def fraction_range(start: Fraction, stop: Fraction, step: Fraction=Fraction(1,1)) -> Iterator[Fraction]:
while start < stop:
yield start
start += step
@dataclass
class ZeemanComponents:
alpha: np.ndarray
strength: np.ndarray
shift: np.ndarray
@dataclass(eq=False)
class VoigtLine(AtomicLine):
def setup(self, atom):
if self.j < self.i:
self.i, self.j = self.j, self.i
self.atom: AtomicModel = atom
self.jLevel: AtomicLevel = self.atom.levels[self.j]
self.iLevel: AtomicLevel = self.atom.levels[self.i]
self.vdw.setup(self, atom.atomicTable)
def __repr__(self):
s = 'VoigtLine(j=%d, i=%d, f=%e, type=%s, NlambdaGen=%d, qCore=%f, qWing=%f, vdw=%s, gRad=%e, stark=%f' % (
self.j, self.i, self.f, repr(self.type), self.NlambdaGen, self.qCore, self.qWing, repr(self.vdw),
self.gRad, self.stark)
if self.gLandeEff is not None:
s += ', gLandeEff=%f' % self.gLandeEff
s += ')'
return s
def linear_stark_broaden(self, atmos):
# We don't need to read n from the label, we can use the fact that for H -- which is the only atom we have here, g=2n^2
nUpper = int(np.round(np.sqrt(0.5*self.jLevel.g)))
nLower = int(np.round(np.sqrt(0.5*self.iLevel.g)))
a1 = 0.642 if nUpper - nLower == 1 else 1.0
C = a1 * 0.6 * (nUpper**2 - nLower**2) * Const.CM_TO_M**2
GStark = C * atmos.ne**(2.0/3.0)
return GStark
def stark_broaden(self, atmos):
if self.stark > 0.0:
weight = self.atom.atomicTable[self.atom.name].weight
C = 8.0 * Const.KBoltzmann / (np.pi * Const.Amu * weight)
Cm = (1.0 + weight / (Const.MElectron / Const.Amu))**(1.0/6.0)
# NOTE(cmo): 28.0 is average atomic weight
Cm += (1.0 + weight / (28.0))**(1.0/6.0)
Z = self.iLevel.stage + 1
ic = self.i + 1
while self.atom.levels[ic].stage < Z and ic < len(self.atom.levels):
ic += 1
if self.atom.levels[ic].stage == self.iLevel.stage:
raise ValueError('Cant find overlying cont: %s' % repr(self))
E_Ryd = Const.ERydberg / (1.0 + Const.MElectron / (weight * Const.Amu))
neff_l = Z * np.sqrt(E_Ryd / (self.atom.levels[ic].E_SI - self.iLevel.E_SI))
neff_u = Z * np.sqrt(E_Ryd / (self.atom.levels[ic].E_SI - self.jLevel.E_SI))
C4 = Const.QElectron**2 / (4.0 * np.pi * Const.Epsilon0) \
* Const.RBohr \
* (2.0 * np.pi * Const.RBohr**2 / Const.HPlanck) / (18.0 * Z**4) \
* ((neff_u * (5.0 * neff_u**2 + 1.0))**2 \
- (neff_l * (5.0 * neff_l**2 + 1.0))**2)
cStark23 = 11.37 * (self.stark * C4)**(2.0/3.0)
vRel = (C * atmos.temperature)**(1.0/6.0) * Cm
stark = cStark23 * vRel * atmos.ne
elif self.stark < 0.0:
stark = np.abs(self.stark) * atmos.ne
else:
stark = np.zeros(atmos.Nspace)
if self.atom.name.upper().strip() == 'H':
stark += self.linear_stark_broaden(atmos)
return stark
def setup_wavelength(self):
if self.preserveWavelength:
print('preserveWavelength set on %s, ignoring NlambdaGen' % (repr(self)))
return
# Compute default lambda grid
Nlambda = self.NlambdaGen // 2 if self.NlambdaGen % 2 == 1 else (self.NlambdaGen - 1) // 2
Nlambda += 1
if self.qWing <= 2.0 * self.qCore:
# Use linear scale to qWing
print("Ratio of qWing / (2*qCore) <= 1\n Using linear spacing for transition %d->%d" % (self.j, self.i))
beta = 1.0
else:
beta = self.qWing / (2.0 * self.qCore)
y = beta + np.sqrt(beta**2 + (beta - 1.0) * Nlambda + 2.0 - 3.0 * beta)
b = 2.0 * np.log(y) / (Nlambda - 1)
a = self.qWing / (Nlambda - 2.0 + y**2)
self.a = a
self.b = b
self.y = y
nl = np.arange(Nlambda)
self.q: np.ndarray = a * (nl + (np.exp(b * nl) - 1.0))
qToLambda = self.lambda0 * (Const.VMICRO_CHAR / Const.CLight)
NlambdaFull = 2 * Nlambda - 1
line = np.zeros(NlambdaFull)
Nmid = Nlambda - 1
line[Nmid] = self.lambda0
line[:Nmid][::-1] = self.lambda0 - qToLambda * self.q[1:]
line[Nmid+1:] = self.lambda0 + qToLambda * self.q[1:]
self.wavelength = line
def zeeman_components(self) -> Optional[ZeemanComponents]:
# Just do basic anomalous Zeeman splitting
if self.gLandeEff is not None:
alpha = np.array([-1, 0, 1], dtype=np.int32)
strength = np.ones(3)
shift = alpha * self.gLandeEff
return ZeemanComponents(alpha, strength, shift)
# Do LS coupling
if self.iLevel.lsCoupling and self.jLevel.lsCoupling:
# Mypy... you're a pain sometimes... (even if you are technically correct)
Jl = cast(Fraction, self.iLevel.J)
Ll = cast(int, self.iLevel.L)
Sl = cast(Fraction, self.iLevel.S)
Ju = cast(Fraction, self.jLevel.J)
Lu = cast(int, self.jLevel.L)
Su = cast(Fraction, self.jLevel.S)
gLl = lande_factor(Jl, Ll, Sl)
gLu = lande_factor(Ju, Lu, Su)
alpha = []
strength = []
shift = []
norm = np.zeros(3)
for ml in fraction_range(-Jl, Jl+1):
for mu in fraction_range(-Ju, Ju+1):
if abs(ml - mu) <= 1.0:
alpha.append(int(ml - mu))
shift.append(gLl*ml - gLu*mu)
strength.append(zeeman_strength(Ju, mu, Jl, ml))
norm[alpha[-1]+1] += strength[-1]
alpha = np.array(alpha, dtype=np.int32)
strength = np.array(strength)
shift = np.array(shift)
strength /= norm[alpha + 1]
return ZeemanComponents(alpha, strength, shift)
return None
def polarised_wavelength(self, bChar: Optional[float]=None) -> Sequence[float]:
## NOTE(cmo): bChar in TESLA
if any([not self.iLevel.lsCoupling, not self.jLevel.lsCoupling]) or \
(self.gLandeEff is None):
print("Can't treat line %d->%d with polarization" % (self.j, self.i))
return self.wavelength
if bChar is None:
bChar = Const.B_CHAR
# /* --- When magnetic field is present account for denser
# wavelength spacing in the unshifted \pi component, and the
# red and blue-shifted circularly polarized components.
# First, get characteristic Zeeman splitting -- ------------ */
gLandeEff = effective_lande(self)
qBChar = gLandeEff * (Const.QElectron / (4.0 * np.pi * Const.MElectron)) * \
(self.lambda0_m) * bChar / Const.VMICRO_CHAR
if 0.5 * qBChar >= self.qCore:
print("Characteristic Zeeman splitting qBChar (=%f) >= 2*qCore for transition %d->%d" % (qBChar, self.j, self.i))
Nlambda = self.q.shape[0]
NB = np.searchsorted(self.q, 0.5 * qBChar)
qBShift = 2 * self.q[NB]
qB = np.zeros(self.q.shape[0] + 2 * NB)
qB[:Nlambda] = self.q
nl = np.arange(NB+1, 2*NB+1)
qB[NB+1:2*NB+1] = qBShift - self.a * (2*NB - nl + \
(np.exp(self.b * (2 * NB - nl)) -1.0))
nl = np.arange(2*NB+1, Nlambda+2*NB)
qB[2*NB+1:] = qBShift + self.a * (nl - 2*NB + \
(np.exp(self.b * (nl - 2 * NB)) - 1.0))
line = np.zeros(2 * qB.shape[0] - 1)
Nmid = qB.shape[0] - 1
qToLambda = self.lambda0 * (Const.VMICRO_CHAR / Const.CLight)
line[Nmid] = self.lambda0
line[:Nmid][::-1] = self.lambda0 - qToLambda * qB[1:]
line[Nmid+1:] = self.lambda0 + qToLambda * qB[1:]
return line
@property
def lambda0(self) -> float:
return self.lambda0_m / Const.NM_TO_M
@property
def lambda0_m(self) -> float:
deltaE = self.jLevel.E_SI - self.iLevel.E_SI
return Const.HC / deltaE
@property
def Aji(self) -> float:
gRatio = self.iLevel.g / self.jLevel.g
C: float = 2 * np.pi * (Const.QElectron / Const.Epsilon0) \
* (Const.QElectron / Const.MElectron) / Const.CLight
return C / self.lambda0_m**2 * gRatio * self.f
@property
def Bji(self) -> float:
return self.lambda0_m**3 / (2.0 * Const.HC) * self.Aji
@property
def Bij(self) -> float:
return self.jLevel.g / self.iLevel.g * self.Bji
@property
def polarisable(self) -> bool:
return (self.iLevel.lsCoupling and self.jLevel.lsCoupling) or (self.gLandeEff is not None)
def damping(self, atmos, vBroad, hGround):
aDamp = np.zeros(atmos.Nspace)
Qelast = np.zeros(atmos.Nspace)
self.vdw.broaden(atmos.temperature, hGround, aDamp)
Qelast += aDamp
Qelast += self.stark_broaden(atmos)
cDop = self.lambda0_m / (4.0 * np.pi)
aDamp = (self.gRad + Qelast) * cDop / vBroad
return aDamp, Qelast
def zeeman_strength(Ju: Fraction, Mu: Fraction, Jl: Fraction, Ml: Fraction) -> float:
alpha = int(Ml - Mu)
dJ = int(Ju - Jl)
# These parameters are x2 those in del Toro Iniesta (p. 137), but we normalise after the fact, so it's fine
if dJ == 0: # jMin = ju = jl
if alpha == 0: # pi trainsitions
s = 2.0 * Mu**2
elif alpha == -1: # sigma_b transitions
s = (Ju + Mu) * (Ju - Mu + 1.0)
elif alpha == 1: # sigma_r transitions
s = (Ju - Mu) * (Ju + Mu + 1.0)
elif dJ == 1: # jMin = jl, Mi = Ml
if alpha == 0: # pi trainsitions
s = 2.0 * ((Jl + 1)**2 - Ml**2)
elif alpha == -1: # sigma_b transitions
s = (Jl + Ml + 1) * (Jl + Ml + 2.0)
elif alpha == 1: # sigma_r transitions
s = (Jl - Ml + 1.0) * (Jl - Ml + 2.0)
elif dJ == -1: # jMin = ju, Mi = Mu
if alpha == 0: # pi trainsitions
s = 2.0 * ((Ju + 1)**2 - Mu**2)
elif alpha == -1: # sigma_b transitions
s = (Ju - Mu + 1) * (Ju - Mu + 2.0)
elif alpha == 1: # sigma_r transitions
s = (Ju + Mu + 1.0) * (Ju + Mu + 2.0)
else:
raise ValueError('Invalid dJ: %d' % dJ)
return float(s)
def lande_factor(J: Fraction, L: int, S: Fraction) -> float:
if J == 0.0:
return 0.0
return float(1.5 + (S * (S + 1.0) - L * (L + 1)) / (2.0 * J * (J + 1.0)))
def effective_lande(line: AtomicLine):
if line.gLandeEff is not None:
return line.gLandeEff
i = line.iLevel
j = line.jLevel
if any(x is None for x in [i.J, i.L, i.S, j.J, j.L, j.S]):
raise ValueError('Cannot compute gLandeEff as gLandeEff not set and some of J, L and S None for line %s'%repr(line))
gL = lande_factor(i.J, i.L, i.S) # type: ignore
gU = lande_factor(j.J, j.L, j.S) # type: ignore
return 0.5 * (gU + gL) + \
0.25 * (gU - gL) * (j.J * (j.J + 1.0) - i.J * (i.J + 1.0)) # type: ignore
@dataclass(eq=False)
class AtomicContinuum(AtomicTransition):
j: int
i: int
atom: AtomicModel = field(init=False)
jLevel: AtomicLevel = field(init=False)
iLevel: AtomicLevel = field(init=False)
wavelength: np.ndarray = field(init=False)
alpha: np.ndarray = field(init=False)
def __repr__(self):
s = 'AtomicContinuum(j=%d, i=%d)' % (self.j, self.i)
return s
def compute_alpha(self, wavelength) -> np.ndarray:
pass
def __hash__(self):
return hash(repr(self))
@property
def lambda0(self) -> float:
return self.lambda0_m / Const.NM_TO_M
@property
def lambdaEdge(self) -> float:
return self.lambda0
@property
def lambda0_m(self) -> float:
deltaE = self.jLevel.E_SI - self.iLevel.E_SI
return Const.HC / deltaE
@dataclass(eq=False)
class ExplicitContinuum(AtomicContinuum):
alphaGrid: Sequence[Sequence[float]]
def setup(self, atom):
if self.j < self.i:
self.i, self.j = self.j, self.i
self.atom = atom
lambdaAlpha = np.array(self.alphaGrid).T
self.wavelength = np.copy(lambdaAlpha[0, ::-1])
if not np.all(np.diff(self.wavelength) > 0.0):
raise ValueError('Wavelength array not monotonically increasing in continuum %s' % repr(self))
self.alpha = np.copy(lambdaAlpha[1, ::-1])
self.jLevel: AtomicLevel = atom.levels[self.j]
self.iLevel: AtomicLevel = atom.levels[self.i]
if self.lambdaEdge > self.wavelength[-1]:
wav = np.concatenate((self.wavelength, np.array([self.lambdaEdge])))
self.wavelength = wav
self.alpha = np.concatenate((self.alpha, np.array([self.alpha[-1]])))
def __repr__(self):
s = 'ExplicitContinuum(j=%d, i=%d, alphaGrid=%s)' % (self.j, self.i, repr(self.alphaGrid))
return s
def compute_alpha(self, wavelength) -> np.ndarray:
alpha = interp1d(self.wavelength, self.alpha, kind=3, bounds_error=False, fill_value=0.0)(wavelength)
alpha[wavelength < self.minLambda] = 0.0
alpha[wavelength > self.lambdaEdge] = 0.0
if np.any(alpha < 0.0):
alpha = interp1d(self.wavelength, self.alpha, bounds_error=False, fill_value=0.0)(wavelength)
return alpha
@property
def Nlambda(self) -> int:
return self.wavelength.shape[0]
@property
def lambda0(self) -> float:
return self.lambda0_m / Const.NM_TO_M
@property
def lambdaEdge(self) -> float:
return self.lambda0
@property
def minLambda(self) -> float:
return self.wavelength[0]
@property
def lambda0_m(self) -> float:
deltaE = self.jLevel.E_SI - self.iLevel.E_SI
return Const.HC / deltaE
@dataclass(eq=False)
class HydrogenicContinuum(AtomicContinuum):
alpha0: float
minLambda: float
NlambdaGen: int
def __repr__(self):
s = 'HydrogenicContinuum(j=%d, i=%d, alpha0=%e, minLambda=%f, NlambdaGen=%d)' % (self.j, self.i, self.alpha0, self.minLambda, self.NlambdaGen)
return s
def setup(self, atom):
if self.j < self.i:
self.i, self.j = self.j, self.i
self.atom = atom
self.jLevel: AtomicLevel = atom.levels[self.j]
self.iLevel: AtomicLevel = atom.levels[self.i]
if self.minLambda >= self.lambda0:
raise ValueError('Minimum wavelength is larger than continuum edge at %f [nm] in continuum %s' % (self.lambda0, repr(self)))
self.wavelength = np.linspace(self.minLambda, self.lambdaEdge, self.NlambdaGen)
self.alpha = self.compute_alpha(self.wavelength)
def compute_alpha(self, wavelength) -> np.ndarray:
# if self.atom.name.strip() != 'H':
# NOTE(cmo): As it should be, the general case is equivalent for H
Z = self.jLevel.stage
nEff = Z * np.sqrt(Const.ERydberg / (self.jLevel.E_SI - self.iLevel.E_SI))
gbf0 = gaunt_bf(self.lambda0, nEff, Z)
gbf = gaunt_bf(wavelength, nEff, Z)
alpha = self.alpha0 * gbf / gbf0 * (wavelength / self.lambda0)**3
alpha[wavelength < self.minLambda] = 0.0
alpha[wavelength > self.lambdaEdge] = 0.0
return alpha
# else:
# sigma0 = 32.0 / (3.0 * np.sqrt(3.0)) * Const.Q_ELECTRON**2 / (4.0 * np.pi * Const.EPSILON_0) / (Const.M_ELECTRON * Const.CLIGHT) * Const.HPLANCK / (2.0 * Const.E_RYDBERG)
# nEff = np.sqrt(Const.E_RYDBERG / (self.jLevel.E_SI - self.iLevel.E_SI))
# gbf = gaunt_bf(wavelength, nEff, self.iLevel.stage+1)
# sigma = sigma0 * nEff * gbf * (wavelength / self.lambdaEdge)**3
# sigma[wavelength < self.minLambda] = 0.0
# sigma[wavelength > self.lambdaEdge] = 0.0
# return sigma
@property
def lambda0(self) -> float:
return self.lambda0_m / Const.NM_TO_M
@property
def lambdaEdge(self) -> float:
return self.lambda0
@property
def lambda0_m(self) -> float:
deltaE = self.jLevel.E_SI - self.iLevel.E_SI
return Const.HC / deltaE
@property
def Nlambda(self) -> int:
return self.wavelength.shape[0]
@dataclass
class CollisionalRates:
j: int
i: int
atom: AtomicModel = field(init=False)
def __repr__(self):
s = 'CollisionalRates(j=%d, i=%d, temperature=%s, rates=%s)' % (self.j, self.i, repr(self.temperature), repr(self.rates))
return s
def setup(self, atom):
pass
def compute_rates(self, atmos, nstar, Cmat):
pass
def __eq__(self, other: object) -> bool:
return model_component_eq(self, other)
def __getstate__(self):
state = self.__dict__.copy()
try:
del state['interpolator']
except KeyError:
pass
return state | [
"enum.auto",
"numpy.sqrt",
"numpy.log",
"dataclasses.dataclass",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.arange",
"numpy.searchsorted",
"fractions.Fraction",
"numpy.diff",
"numpy.exp",
"numpy.linspace",
"dataclasses.field",
"numpy.abs",
"utils.gaunt_bf",
"numpy.ones",
"nu... | [((4598, 4617), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (4607, 4617), False, 'from dataclasses import dataclass, field\n'), ((6163, 6182), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (6172, 6182), False, 'from dataclasses import dataclass, field\n'), ((7991, 8010), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (8000, 8010), False, 'from dataclasses import dataclass, field\n'), ((19054, 19073), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (19063, 19073), False, 'from dataclasses import dataclass, field\n'), ((19893, 19912), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (19902, 19912), False, 'from dataclasses import dataclass, field\n'), ((21837, 21856), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (21846, 21856), False, 'from dataclasses import dataclass, field\n'), ((583, 629), 'dataclasses.field', 'field', ([], {'default_factory': 'get_global_atomic_table'}), '(default_factory=get_global_atomic_table)\n', (588, 629), False, 'from dataclasses import dataclass, field\n'), ((3271, 3288), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (3276, 3288), False, 'from dataclasses import dataclass, field\n'), ((4023, 4029), 'enum.auto', 'auto', ([], {}), '()\n', (4027, 4029), False, 'from enum import Enum, auto\n'), ((6441, 6458), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (6446, 6458), False, 'from dataclasses import dataclass, field\n'), ((6485, 6502), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (6490, 6502), False, 'from dataclasses import dataclass, field\n'), ((6529, 6546), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (6534, 6546), False, 'from dataclasses import dataclass, field\n'), ((6576, 6593), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (6581, 6593), False, 'from dataclasses import dataclass, field\n'), ((7780, 7794), 'fractions.Fraction', 'Fraction', (['(1)', '(1)'], {}), '(1, 1)\n', (7788, 7794), False, 'from fractions import Fraction\n'), ((19161, 19178), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (19166, 19178), False, 'from dataclasses import dataclass, field\n'), ((19205, 19222), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (19210, 19222), False, 'from dataclasses import dataclass, field\n'), ((19249, 19266), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (19254, 19266), False, 'from dataclasses import dataclass, field\n'), ((19296, 19313), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (19301, 19313), False, 'from dataclasses import dataclass, field\n'), ((19338, 19355), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (19343, 19355), False, 'from dataclasses import dataclass, field\n'), ((24332, 24349), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (24337, 24349), False, 'from dataclasses import dataclass, field\n'), ((1983, 2037), 'numpy.sqrt', 'np.sqrt', (['(vTherm * atmos.temperature + atmos.vturb ** 2)'], {}), '(vTherm * atmos.temperature + atmos.vturb ** 2)\n', (1990, 2037), True, 'import numpy as np\n'), ((11802, 11820), 'numpy.arange', 'np.arange', (['Nlambda'], {}), '(Nlambda)\n', (11811, 11820), True, 'import numpy as np\n'), ((12008, 12029), 'numpy.zeros', 'np.zeros', (['NlambdaFull'], {}), '(NlambdaFull)\n', (12016, 12029), True, 'import numpy as np\n'), ((15002, 15039), 'numpy.searchsorted', 'np.searchsorted', (['self.q', '(0.5 * qBChar)'], {}), '(self.q, 0.5 * qBChar)\n', (15017, 15039), True, 'import numpy as np\n'), ((15086, 15120), 'numpy.zeros', 'np.zeros', (['(self.q.shape[0] + 2 * NB)'], {}), '(self.q.shape[0] + 2 * NB)\n', (15094, 15120), True, 'import numpy as np\n'), ((15165, 15194), 'numpy.arange', 'np.arange', (['(NB + 1)', '(2 * NB + 1)'], {}), '(NB + 1, 2 * NB + 1)\n', (15174, 15194), True, 'import numpy as np\n'), ((15341, 15380), 'numpy.arange', 'np.arange', (['(2 * NB + 1)', '(Nlambda + 2 * NB)'], {}), '(2 * NB + 1, Nlambda + 2 * NB)\n', (15350, 15380), True, 'import numpy as np\n'), ((15522, 15551), 'numpy.zeros', 'np.zeros', (['(2 * qB.shape[0] - 1)'], {}), '(2 * qB.shape[0] - 1)\n', (15530, 15551), True, 'import numpy as np\n'), ((16750, 16772), 'numpy.zeros', 'np.zeros', (['atmos.Nspace'], {}), '(atmos.Nspace)\n', (16758, 16772), True, 'import numpy as np\n'), ((16790, 16812), 'numpy.zeros', 'np.zeros', (['atmos.Nspace'], {}), '(atmos.Nspace)\n', (16798, 16812), True, 'import numpy as np\n'), ((20196, 20225), 'numpy.copy', 'np.copy', (['lambdaAlpha[0, ::-1]'], {}), '(lambdaAlpha[0, ::-1])\n', (20203, 20225), True, 'import numpy as np\n'), ((20409, 20438), 'numpy.copy', 'np.copy', (['lambdaAlpha[1, ::-1]'], {}), '(lambdaAlpha[1, ::-1])\n', (20416, 20438), True, 'import numpy as np\n'), ((21213, 21232), 'numpy.any', 'np.any', (['(alpha < 0.0)'], {}), '(alpha < 0.0)\n', (21219, 21232), True, 'import numpy as np\n'), ((22596, 22657), 'numpy.linspace', 'np.linspace', (['self.minLambda', 'self.lambdaEdge', 'self.NlambdaGen'], {}), '(self.minLambda, self.lambdaEdge, self.NlambdaGen)\n', (22607, 22657), True, 'import numpy as np\n'), ((23018, 23049), 'utils.gaunt_bf', 'gaunt_bf', (['self.lambda0', 'nEff', 'Z'], {}), '(self.lambda0, nEff, Z)\n', (23026, 23049), False, 'from utils import gaunt_bf\n'), ((23064, 23093), 'utils.gaunt_bf', 'gaunt_bf', (['wavelength', 'nEff', 'Z'], {}), '(wavelength, nEff, Z)\n', (23072, 23093), False, 'from utils import gaunt_bf\n'), ((2213, 2227), 'numpy.all', 'np.all', (['(a == b)'], {}), '(a == b)\n', (2219, 2227), True, 'import numpy as np\n'), ((11579, 11641), 'numpy.sqrt', 'np.sqrt', (['(beta ** 2 + (beta - 1.0) * Nlambda + 2.0 - 3.0 * beta)'], {}), '(beta ** 2 + (beta - 1.0) * Nlambda + 2.0 - 3.0 * beta)\n', (11586, 11641), True, 'import numpy as np\n'), ((12425, 12461), 'numpy.array', 'np.array', (['[-1, 0, 1]'], {'dtype': 'np.int32'}), '([-1, 0, 1], dtype=np.int32)\n', (12433, 12461), True, 'import numpy as np\n'), ((12485, 12495), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (12492, 12495), True, 'import numpy as np\n'), ((12799, 12828), 'typing.cast', 'cast', (['Fraction', 'self.iLevel.J'], {}), '(Fraction, self.iLevel.J)\n', (12803, 12828), False, 'from typing import List, Sequence, Optional, Any, Iterator, cast\n'), ((12846, 12870), 'typing.cast', 'cast', (['int', 'self.iLevel.L'], {}), '(int, self.iLevel.L)\n', (12850, 12870), False, 'from typing import List, Sequence, Optional, Any, Iterator, cast\n'), ((12888, 12917), 'typing.cast', 'cast', (['Fraction', 'self.iLevel.S'], {}), '(Fraction, self.iLevel.S)\n', (12892, 12917), False, 'from typing import List, Sequence, Optional, Any, Iterator, cast\n'), ((12935, 12964), 'typing.cast', 'cast', (['Fraction', 'self.jLevel.J'], {}), '(Fraction, self.jLevel.J)\n', (12939, 12964), False, 'from typing import List, Sequence, Optional, Any, Iterator, cast\n'), ((12982, 13006), 'typing.cast', 'cast', (['int', 'self.jLevel.L'], {}), '(int, self.jLevel.L)\n', (12986, 13006), False, 'from typing import List, Sequence, Optional, Any, Iterator, cast\n'), ((13024, 13053), 'typing.cast', 'cast', (['Fraction', 'self.jLevel.S'], {}), '(Fraction, self.jLevel.S)\n', (13028, 13053), False, 'from typing import List, Sequence, Optional, Any, Iterator, cast\n'), ((13232, 13243), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (13240, 13243), True, 'import numpy as np\n'), ((13659, 13690), 'numpy.array', 'np.array', (['alpha'], {'dtype': 'np.int32'}), '(alpha, dtype=np.int32)\n', (13667, 13690), True, 'import numpy as np\n'), ((13714, 13732), 'numpy.array', 'np.array', (['strength'], {}), '(strength)\n', (13722, 13732), True, 'import numpy as np\n'), ((13753, 13768), 'numpy.array', 'np.array', (['shift'], {}), '(shift)\n', (13761, 13768), True, 'import numpy as np\n'), ((20143, 20167), 'numpy.array', 'np.array', (['self.alphaGrid'], {}), '(self.alphaGrid)\n', (20151, 20167), True, 'import numpy as np\n'), ((21009, 21094), 'scipy.interpolate.interp1d', 'interp1d', (['self.wavelength', 'self.alpha'], {'kind': '(3)', 'bounds_error': '(False)', 'fill_value': '(0.0)'}), '(self.wavelength, self.alpha, kind=3, bounds_error=False,\n fill_value=0.0)\n', (21017, 21094), False, 'from scipy.interpolate import interp1d\n'), ((22939, 23002), 'numpy.sqrt', 'np.sqrt', (['(Const.ERydberg / (self.jLevel.E_SI - self.iLevel.E_SI))'], {}), '(Const.ERydberg / (self.jLevel.E_SI - self.iLevel.E_SI))\n', (22946, 23002), True, 'import numpy as np\n'), ((8967, 8995), 'numpy.sqrt', 'np.sqrt', (['(0.5 * self.jLevel.g)'], {}), '(0.5 * self.jLevel.g)\n', (8974, 8995), True, 'import numpy as np\n'), ((9026, 9054), 'numpy.sqrt', 'np.sqrt', (['(0.5 * self.iLevel.g)'], {}), '(0.5 * self.iLevel.g)\n', (9033, 9054), True, 'import numpy as np\n'), ((10046, 10109), 'numpy.sqrt', 'np.sqrt', (['(E_Ryd / (self.atom.levels[ic].E_SI - self.iLevel.E_SI))'], {}), '(E_Ryd / (self.atom.levels[ic].E_SI - self.iLevel.E_SI))\n', (10053, 10109), True, 'import numpy as np\n'), ((10135, 10198), 'numpy.sqrt', 'np.sqrt', (['(E_Ryd / (self.atom.levels[ic].E_SI - self.jLevel.E_SI))'], {}), '(E_Ryd / (self.atom.levels[ic].E_SI - self.jLevel.E_SI))\n', (10142, 10198), True, 'import numpy as np\n'), ((10788, 10810), 'numpy.zeros', 'np.zeros', (['atmos.Nspace'], {}), '(atmos.Nspace)\n', (10796, 10810), True, 'import numpy as np\n'), ((11658, 11667), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (11664, 11667), True, 'import numpy as np\n'), ((21254, 21327), 'scipy.interpolate.interp1d', 'interp1d', (['self.wavelength', 'self.alpha'], {'bounds_error': '(False)', 'fill_value': '(0.0)'}), '(self.wavelength, self.alpha, bounds_error=False, fill_value=0.0)\n', (21262, 21327), False, 'from scipy.interpolate import interp1d\n'), ((10724, 10742), 'numpy.abs', 'np.abs', (['self.stark'], {}), '(self.stark)\n', (10730, 10742), True, 'import numpy as np\n'), ((11861, 11875), 'numpy.exp', 'np.exp', (['(b * nl)'], {}), '(b * nl)\n', (11867, 11875), True, 'import numpy as np\n'), ((20248, 20272), 'numpy.diff', 'np.diff', (['self.wavelength'], {}), '(self.wavelength)\n', (20255, 20272), True, 'import numpy as np\n'), ((20650, 20677), 'numpy.array', 'np.array', (['[self.lambdaEdge]'], {}), '([self.lambdaEdge])\n', (20658, 20677), True, 'import numpy as np\n'), ((20767, 20793), 'numpy.array', 'np.array', (['[self.alpha[-1]]'], {}), '([self.alpha[-1]])\n', (20775, 20793), True, 'import numpy as np\n'), ((15290, 15320), 'numpy.exp', 'np.exp', (['(self.b * (2 * NB - nl))'], {}), '(self.b * (2 * NB - nl))\n', (15296, 15320), True, 'import numpy as np\n'), ((15468, 15498), 'numpy.exp', 'np.exp', (['(self.b * (nl - 2 * NB))'], {}), '(self.b * (nl - 2 * NB))\n', (15474, 15498), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from math import ceil, sqrt
import numpy as np
from functools import partial
from pyKrig.utilities import pairwise_distance, mmcriterion
def latin_hypercube(nsample, ndv):
"""
create sample points for latin hypercube sampling
:param nsample: number of samples
:param ndv: number of design variables
"""
min_level = (1 / nsample) * 0.5
max_level = 1 - min_level
levels = np.linspace(min_level, max_level, nsample)
lhs = np.empty((nsample, ndv))
index_levels = np.arange(nsample)
for j in range(ndv):
order = np.random.permutation(index_levels)
lhs[:, j] = levels[order]
return lhs
def perturbate(X):
"""
randomly choose a pair of sampling points, and interchange the values of a randomly chosen design variable
"""
ns, ndv = X.shape
is1 = np.random.randint(ns)
is2 = np.random.randint(ns)
idv = np.random.randint(ndv)
X[is1, idv], X[is2, idv] = X[is2, idv], X[is1, idv]
def optimize_lhs(X, criterion_func):
"""
optimize a latin hypercube via simulated annealing
:param X: a latin hypercube
:param criterion_func: the function used to evaluate the latin hypercube
"""
# initialize
phi = criterion_func(X)
phi_best = phi
Xbest = np.array(X, copy=True)
Xtry = np.empty_like(X)
# calculate initial temperature by sampling the average change of criterion
n_test = 30
avg_delta_phi = 0
cnt = 0
for _ in range(n_test):
Xtry[:] = X
perturbate(Xtry)
phi_try = criterion_func(Xtry)
delta_phi = phi_try - phi
if delta_phi > 0:
cnt += 1
avg_delta_phi += delta_phi
if cnt == 0:
temp_init = 1.
else:
avg_delta_phi /= cnt
temp_init = - avg_delta_phi / np.log(0.99)
temp_fin = temp_init * 1e-6
cool_rate = 0.95
max_perturbation = ceil(sqrt(X.shape[1]))
temp = temp_init
# optimize lhs via simmulated annealing
while temp > temp_fin:
Xtry[:] = X
for _ in range(max_perturbation):
perturbate(Xtry)
phi_try = criterion_func(Xtry)
if phi_try < phi_best:
Xbest[:] = Xtry
phi_best = phi_try
delta_phi = phi_try - phi
if delta_phi < 0:
X[:] = Xtry
phi = phi_try
break
elif np.exp(- delta_phi / temp) > np.random.rand():
X[:] = Xtry
phi = phi_try
temp *= cool_rate
X[:] = Xbest
def optimal_latin_hypercube(nsample, ndv, metric="euclidean"):
"""
create an optimal lhs using Morris & Mitchel's method (1995)
:param nsample: number of sample points
:param ndv: number of design variables
:param metric: metric used to calculate the MM criterion; use "manhattan" or "euclidean"
"""
X = latin_hypercube(nsample, ndv)
Xbest = np.array(X, copy=True)
distbest = pairwise_distance(Xbest, metric)
Xtry = np.empty_like(X)
disttry = np.empty_like(distbest)
for p in (1, 2, 5, 10, 20, 50, 100):
cfunc = partial(mmcriterion, p=p, metric=metric)
Xtry[:] = X
optimize_lhs(Xtry, cfunc)
disttry[:] = pairwise_distance(Xtry, metric)
for dtry, dbest in zip(disttry, distbest):
if dtry > dbest:
Xbest[:] = Xtry
distbest[:] = disttry
break
elif dtry < dbest:
break
else:
continue
return Xbest
| [
"numpy.random.rand",
"numpy.log",
"math.sqrt",
"numpy.exp",
"numpy.array",
"numpy.random.randint",
"numpy.linspace",
"numpy.empty",
"numpy.empty_like",
"functools.partial",
"pyKrig.utilities.pairwise_distance",
"numpy.arange",
"numpy.random.permutation"
] | [((441, 483), 'numpy.linspace', 'np.linspace', (['min_level', 'max_level', 'nsample'], {}), '(min_level, max_level, nsample)\n', (452, 483), True, 'import numpy as np\n'), ((495, 519), 'numpy.empty', 'np.empty', (['(nsample, ndv)'], {}), '((nsample, ndv))\n', (503, 519), True, 'import numpy as np\n'), ((540, 558), 'numpy.arange', 'np.arange', (['nsample'], {}), '(nsample)\n', (549, 558), True, 'import numpy as np\n'), ((877, 898), 'numpy.random.randint', 'np.random.randint', (['ns'], {}), '(ns)\n', (894, 898), True, 'import numpy as np\n'), ((910, 931), 'numpy.random.randint', 'np.random.randint', (['ns'], {}), '(ns)\n', (927, 931), True, 'import numpy as np\n'), ((943, 965), 'numpy.random.randint', 'np.random.randint', (['ndv'], {}), '(ndv)\n', (960, 965), True, 'import numpy as np\n'), ((1330, 1352), 'numpy.array', 'np.array', (['X'], {'copy': '(True)'}), '(X, copy=True)\n', (1338, 1352), True, 'import numpy as np\n'), ((1365, 1381), 'numpy.empty_like', 'np.empty_like', (['X'], {}), '(X)\n', (1378, 1381), True, 'import numpy as np\n'), ((3045, 3067), 'numpy.array', 'np.array', (['X'], {'copy': '(True)'}), '(X, copy=True)\n', (3053, 3067), True, 'import numpy as np\n'), ((3084, 3116), 'pyKrig.utilities.pairwise_distance', 'pairwise_distance', (['Xbest', 'metric'], {}), '(Xbest, metric)\n', (3101, 3116), False, 'from pyKrig.utilities import pairwise_distance, mmcriterion\n'), ((3129, 3145), 'numpy.empty_like', 'np.empty_like', (['X'], {}), '(X)\n', (3142, 3145), True, 'import numpy as np\n'), ((3161, 3184), 'numpy.empty_like', 'np.empty_like', (['distbest'], {}), '(distbest)\n', (3174, 3184), True, 'import numpy as np\n'), ((602, 637), 'numpy.random.permutation', 'np.random.permutation', (['index_levels'], {}), '(index_levels)\n', (623, 637), True, 'import numpy as np\n'), ((1977, 1993), 'math.sqrt', 'sqrt', (['X.shape[1]'], {}), '(X.shape[1])\n', (1981, 1993), False, 'from math import ceil, sqrt\n'), ((3244, 3284), 'functools.partial', 'partial', (['mmcriterion'], {'p': 'p', 'metric': 'metric'}), '(mmcriterion, p=p, metric=metric)\n', (3251, 3284), False, 'from functools import partial\n'), ((3363, 3394), 'pyKrig.utilities.pairwise_distance', 'pairwise_distance', (['Xtry', 'metric'], {}), '(Xtry, metric)\n', (3380, 3394), False, 'from pyKrig.utilities import pairwise_distance, mmcriterion\n'), ((1880, 1892), 'numpy.log', 'np.log', (['(0.99)'], {}), '(0.99)\n', (1886, 1892), True, 'import numpy as np\n'), ((2506, 2531), 'numpy.exp', 'np.exp', (['(-delta_phi / temp)'], {}), '(-delta_phi / temp)\n', (2512, 2531), True, 'import numpy as np\n'), ((2535, 2551), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2549, 2551), True, 'import numpy as np\n')] |
from matplotlib import pyplot as plt
import numpy as np
results = np.load("feedforwardtimings.npy")
#Raw Timings Plot Feedforward
plt.figure()
plt.suptitle("Feedforward", fontsize=24, y=1.05)
plt.subplot(2,2,1)
plt.title("Timing With Ten by Ten Sized Matrices")
plt.plot(results[:-1,0])
plt.scatter([5],results[-1:,0])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,2)
plt.title("Timing With a Hundred by Hundred Sized Matrices")
plt.plot(results[:-1,1])
plt.scatter([5],results[-1:,1])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,3)
plt.title("Timing With a Thousand by a Thousand Sized Matrices")
plt.plot(results[:-1,2])
plt.scatter([5],results[-1:,2])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,4)
plt.title("All Timings In Log Scale")
plt.plot(results[:-1])
plt.scatter([5],results[-1:,0],color="blue")
plt.scatter([5],results[-1:,1], color="green")
plt.scatter([5],results[-1:,2], color="red")
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.yscale("log")
plt.tight_layout()
plt.savefig("feedforward.pgf")
plt.show()
#Relative Timings Feedforward
thread_counts = np.array([1,2,4,8,16,32*256])
results = results[0,None]/results
plt.figure()
plt.suptitle("Feedforward", fontsize=24, y=1.05)
plt.subplot(2,2,1)
plt.title("All Speed Ups")
plt.plot(results[:-1])
plt.scatter([5],results[-1:,0],color="blue")
plt.scatter([5],results[-1:,1], color="green")
plt.scatter([5],results[-1:,2], color="red")
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Speed up ratio on one calculation (log scale)")
plt.yscale("log")
plt.legend(["10x10","100x100","1000x1000"],loc=2)
plt.subplot(2,2,2)
plt.title("CPU Only Speed Ups")
plt.plot(results[:-1]-1)
plt.xticks(range(-1,6),["","1", "2", "4", "8", "16", ""])
plt.xlabel("Number of threads")
plt.ylabel("Relative speed difference on one calculation")
plt.legend(["10x10","100x100","1000x1000"],loc=2)
plt.subplot(2,2,3)
plt.title("Speed Up Per Thread")
plt.plot((results[1:-1]-1)/thread_counts[1:-1,None])
plt.scatter([4],(results[-1:,0]-1)/thread_counts[-1],color="blue")
plt.scatter([4],(results[-1:,1]-1)/thread_counts[-1], color="green")
plt.scatter([4],(results[-1:,2]-1)/thread_counts[-1], color="red")
plt.xticks(range(-1,6),["", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Relative speed difference on one calculation")
plt.legend(["10x10","100x100","1000x1000"],loc=1)
plt.subplot(2,2,4)
def amdahlPortion(speedup,threads):
return threads*(speedup-1)/((threads-1)*speedup)
plt.title("Amdahl's Law Calculated Parallelizable Portion")
plt.plot(amdahlPortion(results[1:-1],thread_counts[1:-1,None]))
plt.scatter([4],amdahlPortion(results[-1:,0],thread_counts[-1]),color="blue")
plt.scatter([4],amdahlPortion(results[-1:,1],thread_counts[-1]), color="green")
plt.scatter([4],amdahlPortion(results[-1:,2],thread_counts[-1]), color="red")
plt.xticks(range(-1,6),["", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Ratio of parallelizable code to total code")
plt.legend(["10x10","100x100","1000x1000"],loc=10)
plt.tight_layout()
plt.savefig("feedforward2.pgf")
plt.show()
#Backprop time
results = np.load("backproptimings.npy")
#Raw Timings Plot Backpropagation
plt.figure()
plt.suptitle("Backpropagation", fontsize=24, y=1.05)
plt.subplot(2,2,1)
plt.title("Timing With Ten by Ten Sized Matrices")
plt.plot(results[:-1,0])
plt.scatter([5],results[-1:,0])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,2)
plt.title("Timing With a Hundred by Hundred Sized Matrices")
plt.plot(results[:-1,1])
plt.scatter([5],results[-1:,1])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,3)
plt.title("Timing With a Thousand by a Thousand Sized Matrices")
plt.plot(results[:-1,2])
plt.scatter([5],results[-1:,2])
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.subplot(2,2,4)
plt.title("All Timings In Log Scale")
plt.plot(results[:-1])
plt.scatter([5],results[-1:,0],color="blue")
plt.scatter([5],results[-1:,1], color="green")
plt.scatter([5],results[-1:,2], color="red")
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Seconds to compute one calculation")
plt.yscale("log")
plt.tight_layout()
plt.savefig("backprop.pgf")
plt.show()
#Relative Timings Backpropagation
results = results[0,None]/results
plt.figure()
plt.suptitle("Feedforward", fontsize=24, y=1.05)
plt.subplot(2,2,1)
plt.title("All Speed Ups")
plt.plot(results[:-1])
plt.scatter([5],results[-1:,0],color="blue")
plt.scatter([5],results[-1:,1], color="green")
plt.scatter([5],results[-1:,2], color="red")
plt.xticks(range(-1,7),["","1", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Speed up ratio on one calculation (log scale)")
plt.yscale("log")
plt.legend(["10x10","100x100","1000x1000"],loc=2)
plt.subplot(2,2,2)
plt.title("CPU Only Speed Ups")
plt.plot(results[:-1]-1)
plt.xticks(range(-1,6),["","1", "2", "4", "8", "16", ""])
plt.xlabel("Number of threads")
plt.ylabel("Relative speed difference on one calculation")
plt.legend(["10x10","100x100","1000x1000"],loc=2)
plt.subplot(2,2,3)
plt.title("Speed Up Per Thread")
plt.plot((results[1:-1]-1)/thread_counts[1:-1,None])
plt.scatter([4],(results[-1:,0]-1)/thread_counts[-1],color="blue")
plt.scatter([4],(results[-1:,1]-1)/thread_counts[-1], color="green")
plt.scatter([4],(results[-1:,2]-1)/thread_counts[-1], color="red")
plt.xticks(range(-1,6),["", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Relative speed difference on one calculation")
plt.legend(["10x10","100x100","1000x1000"],loc=1)
plt.subplot(2,2,4)
plt.title("Amdahl's Law Calculated Parallelizable Portion")
plt.plot(amdahlPortion(results[1:-1],thread_counts[1:-1,None]))
plt.scatter([4],amdahlPortion(results[-1:,0],thread_counts[-1]),color="blue")
plt.scatter([4],amdahlPortion(results[-1:,1],thread_counts[-1]), color="green")
plt.scatter([4],amdahlPortion(results[-1:,2],thread_counts[-1]), color="red")
plt.xticks(range(-1,6),["", "2", "4", "8", "16", "GPU", ""])
plt.xlabel("Number of threads (or GPU)")
plt.ylabel("Ratio of parallelizable code to total code")
plt.legend(["10x10","100x100","1000x1000"],loc=10)
plt.tight_layout()
plt.savefig("feedforward2.pgf")
plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"numpy.load",
"matplot... | [((67, 100), 'numpy.load', 'np.load', (['"""feedforwardtimings.npy"""'], {}), "('feedforwardtimings.npy')\n", (74, 100), True, 'import numpy as np\n'), ((132, 144), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (142, 144), True, 'from matplotlib import pyplot as plt\n'), ((145, 193), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Feedforward"""'], {'fontsize': '(24)', 'y': '(1.05)'}), "('Feedforward', fontsize=24, y=1.05)\n", (157, 193), True, 'from matplotlib import pyplot as plt\n'), ((194, 214), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (205, 214), True, 'from matplotlib import pyplot as plt\n'), ((213, 263), 'matplotlib.pyplot.title', 'plt.title', (['"""Timing With Ten by Ten Sized Matrices"""'], {}), "('Timing With Ten by Ten Sized Matrices')\n", (222, 263), True, 'from matplotlib import pyplot as plt\n'), ((264, 289), 'matplotlib.pyplot.plot', 'plt.plot', (['results[:-1, 0]'], {}), '(results[:-1, 0])\n', (272, 289), True, 'from matplotlib import pyplot as plt\n'), ((289, 322), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 0]'], {}), '([5], results[-1:, 0])\n', (300, 322), True, 'from matplotlib import pyplot as plt\n'), ((386, 426), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (396, 426), True, 'from matplotlib import pyplot as plt\n'), ((427, 475), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Seconds to compute one calculation"""'], {}), "('Seconds to compute one calculation')\n", (437, 475), True, 'from matplotlib import pyplot as plt\n'), ((477, 497), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (488, 497), True, 'from matplotlib import pyplot as plt\n'), ((496, 556), 'matplotlib.pyplot.title', 'plt.title', (['"""Timing With a Hundred by Hundred Sized Matrices"""'], {}), "('Timing With a Hundred by Hundred Sized Matrices')\n", (505, 556), True, 'from matplotlib import pyplot as plt\n'), ((557, 582), 'matplotlib.pyplot.plot', 'plt.plot', (['results[:-1, 1]'], {}), '(results[:-1, 1])\n', (565, 582), True, 'from matplotlib import pyplot as plt\n'), ((582, 615), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 1]'], {}), '([5], results[-1:, 1])\n', (593, 615), True, 'from matplotlib import pyplot as plt\n'), ((679, 719), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (689, 719), True, 'from matplotlib import pyplot as plt\n'), ((720, 768), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Seconds to compute one calculation"""'], {}), "('Seconds to compute one calculation')\n", (730, 768), True, 'from matplotlib import pyplot as plt\n'), ((770, 790), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (781, 790), True, 'from matplotlib import pyplot as plt\n'), ((789, 853), 'matplotlib.pyplot.title', 'plt.title', (['"""Timing With a Thousand by a Thousand Sized Matrices"""'], {}), "('Timing With a Thousand by a Thousand Sized Matrices')\n", (798, 853), True, 'from matplotlib import pyplot as plt\n'), ((854, 879), 'matplotlib.pyplot.plot', 'plt.plot', (['results[:-1, 2]'], {}), '(results[:-1, 2])\n', (862, 879), True, 'from matplotlib import pyplot as plt\n'), ((879, 912), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 2]'], {}), '([5], results[-1:, 2])\n', (890, 912), True, 'from matplotlib import pyplot as plt\n'), ((976, 1016), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (986, 1016), True, 'from matplotlib import pyplot as plt\n'), ((1017, 1065), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Seconds to compute one calculation"""'], {}), "('Seconds to compute one calculation')\n", (1027, 1065), True, 'from matplotlib import pyplot as plt\n'), ((1067, 1087), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (1078, 1087), True, 'from matplotlib import pyplot as plt\n'), ((1086, 1123), 'matplotlib.pyplot.title', 'plt.title', (['"""All Timings In Log Scale"""'], {}), "('All Timings In Log Scale')\n", (1095, 1123), True, 'from matplotlib import pyplot as plt\n'), ((1124, 1146), 'matplotlib.pyplot.plot', 'plt.plot', (['results[:-1]'], {}), '(results[:-1])\n', (1132, 1146), True, 'from matplotlib import pyplot as plt\n'), ((1147, 1194), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 0]'], {'color': '"""blue"""'}), "([5], results[-1:, 0], color='blue')\n", (1158, 1194), True, 'from matplotlib import pyplot as plt\n'), ((1192, 1240), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 1]'], {'color': '"""green"""'}), "([5], results[-1:, 1], color='green')\n", (1203, 1240), True, 'from matplotlib import pyplot as plt\n'), ((1239, 1285), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 2]'], {'color': '"""red"""'}), "([5], results[-1:, 2], color='red')\n", (1250, 1285), True, 'from matplotlib import pyplot as plt\n'), ((1349, 1389), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (1359, 1389), True, 'from matplotlib import pyplot as plt\n'), ((1390, 1438), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Seconds to compute one calculation"""'], {}), "('Seconds to compute one calculation')\n", (1400, 1438), True, 'from matplotlib import pyplot as plt\n'), ((1440, 1457), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (1450, 1457), True, 'from matplotlib import pyplot as plt\n'), ((1458, 1476), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1474, 1476), True, 'from matplotlib import pyplot as plt\n'), ((1478, 1508), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""feedforward.pgf"""'], {}), "('feedforward.pgf')\n", (1489, 1508), True, 'from matplotlib import pyplot as plt\n'), ((1509, 1519), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1517, 1519), True, 'from matplotlib import pyplot as plt\n'), ((1567, 1603), 'numpy.array', 'np.array', (['[1, 2, 4, 8, 16, 32 * 256]'], {}), '([1, 2, 4, 8, 16, 32 * 256])\n', (1575, 1603), True, 'import numpy as np\n'), ((1631, 1643), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1641, 1643), True, 'from matplotlib import pyplot as plt\n'), ((1644, 1692), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Feedforward"""'], {'fontsize': '(24)', 'y': '(1.05)'}), "('Feedforward', fontsize=24, y=1.05)\n", (1656, 1692), True, 'from matplotlib import pyplot as plt\n'), ((1693, 1713), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (1704, 1713), True, 'from matplotlib import pyplot as plt\n'), ((1712, 1738), 'matplotlib.pyplot.title', 'plt.title', (['"""All Speed Ups"""'], {}), "('All Speed Ups')\n", (1721, 1738), True, 'from matplotlib import pyplot as plt\n'), ((1739, 1761), 'matplotlib.pyplot.plot', 'plt.plot', (['results[:-1]'], {}), '(results[:-1])\n', (1747, 1761), True, 'from matplotlib import pyplot as plt\n'), ((1762, 1809), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 0]'], {'color': '"""blue"""'}), "([5], results[-1:, 0], color='blue')\n", (1773, 1809), True, 'from matplotlib import pyplot as plt\n'), ((1807, 1855), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 1]'], {'color': '"""green"""'}), "([5], results[-1:, 1], color='green')\n", (1818, 1855), True, 'from matplotlib import pyplot as plt\n'), ((1854, 1900), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 2]'], {'color': '"""red"""'}), "([5], results[-1:, 2], color='red')\n", (1865, 1900), True, 'from matplotlib import pyplot as plt\n'), ((1964, 2004), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (1974, 2004), True, 'from matplotlib import pyplot as plt\n'), ((2005, 2064), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Speed up ratio on one calculation (log scale)"""'], {}), "('Speed up ratio on one calculation (log scale)')\n", (2015, 2064), True, 'from matplotlib import pyplot as plt\n'), ((2065, 2082), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2075, 2082), True, 'from matplotlib import pyplot as plt\n'), ((2083, 2135), 'matplotlib.pyplot.legend', 'plt.legend', (["['10x10', '100x100', '1000x1000']"], {'loc': '(2)'}), "(['10x10', '100x100', '1000x1000'], loc=2)\n", (2093, 2135), True, 'from matplotlib import pyplot as plt\n'), ((2134, 2154), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2145, 2154), True, 'from matplotlib import pyplot as plt\n'), ((2153, 2184), 'matplotlib.pyplot.title', 'plt.title', (['"""CPU Only Speed Ups"""'], {}), "('CPU Only Speed Ups')\n", (2162, 2184), True, 'from matplotlib import pyplot as plt\n'), ((2185, 2211), 'matplotlib.pyplot.plot', 'plt.plot', (['(results[:-1] - 1)'], {}), '(results[:-1] - 1)\n', (2193, 2211), True, 'from matplotlib import pyplot as plt\n'), ((2268, 2299), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads"""'], {}), "('Number of threads')\n", (2278, 2299), True, 'from matplotlib import pyplot as plt\n'), ((2300, 2358), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative speed difference on one calculation"""'], {}), "('Relative speed difference on one calculation')\n", (2310, 2358), True, 'from matplotlib import pyplot as plt\n'), ((2359, 2411), 'matplotlib.pyplot.legend', 'plt.legend', (["['10x10', '100x100', '1000x1000']"], {'loc': '(2)'}), "(['10x10', '100x100', '1000x1000'], loc=2)\n", (2369, 2411), True, 'from matplotlib import pyplot as plt\n'), ((2410, 2430), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (2421, 2430), True, 'from matplotlib import pyplot as plt\n'), ((2429, 2461), 'matplotlib.pyplot.title', 'plt.title', (['"""Speed Up Per Thread"""'], {}), "('Speed Up Per Thread')\n", (2438, 2461), True, 'from matplotlib import pyplot as plt\n'), ((2462, 2519), 'matplotlib.pyplot.plot', 'plt.plot', (['((results[1:-1] - 1) / thread_counts[1:-1, None])'], {}), '((results[1:-1] - 1) / thread_counts[1:-1, None])\n', (2470, 2519), True, 'from matplotlib import pyplot as plt\n'), ((2515, 2588), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[4]', '((results[-1:, 0] - 1) / thread_counts[-1])'], {'color': '"""blue"""'}), "([4], (results[-1:, 0] - 1) / thread_counts[-1], color='blue')\n", (2526, 2588), True, 'from matplotlib import pyplot as plt\n'), ((2582, 2656), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[4]', '((results[-1:, 1] - 1) / thread_counts[-1])'], {'color': '"""green"""'}), "([4], (results[-1:, 1] - 1) / thread_counts[-1], color='green')\n", (2593, 2656), True, 'from matplotlib import pyplot as plt\n'), ((2651, 2723), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[4]', '((results[-1:, 2] - 1) / thread_counts[-1])'], {'color': '"""red"""'}), "([4], (results[-1:, 2] - 1) / thread_counts[-1], color='red')\n", (2662, 2723), True, 'from matplotlib import pyplot as plt\n'), ((2779, 2819), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (2789, 2819), True, 'from matplotlib import pyplot as plt\n'), ((2820, 2878), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative speed difference on one calculation"""'], {}), "('Relative speed difference on one calculation')\n", (2830, 2878), True, 'from matplotlib import pyplot as plt\n'), ((2879, 2931), 'matplotlib.pyplot.legend', 'plt.legend', (["['10x10', '100x100', '1000x1000']"], {'loc': '(1)'}), "(['10x10', '100x100', '1000x1000'], loc=1)\n", (2889, 2931), True, 'from matplotlib import pyplot as plt\n'), ((2930, 2950), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (2941, 2950), True, 'from matplotlib import pyplot as plt\n'), ((3038, 3097), 'matplotlib.pyplot.title', 'plt.title', (['"""Amdahl\'s Law Calculated Parallelizable Portion"""'], {}), '("Amdahl\'s Law Calculated Parallelizable Portion")\n', (3047, 3097), True, 'from matplotlib import pyplot as plt\n'), ((3459, 3499), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (3469, 3499), True, 'from matplotlib import pyplot as plt\n'), ((3500, 3556), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ratio of parallelizable code to total code"""'], {}), "('Ratio of parallelizable code to total code')\n", (3510, 3556), True, 'from matplotlib import pyplot as plt\n'), ((3557, 3610), 'matplotlib.pyplot.legend', 'plt.legend', (["['10x10', '100x100', '1000x1000']"], {'loc': '(10)'}), "(['10x10', '100x100', '1000x1000'], loc=10)\n", (3567, 3610), True, 'from matplotlib import pyplot as plt\n'), ((3609, 3627), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3625, 3627), True, 'from matplotlib import pyplot as plt\n'), ((3628, 3659), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""feedforward2.pgf"""'], {}), "('feedforward2.pgf')\n", (3639, 3659), True, 'from matplotlib import pyplot as plt\n'), ((3660, 3670), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3668, 3670), True, 'from matplotlib import pyplot as plt\n'), ((3697, 3727), 'numpy.load', 'np.load', (['"""backproptimings.npy"""'], {}), "('backproptimings.npy')\n", (3704, 3727), True, 'import numpy as np\n'), ((3763, 3775), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3773, 3775), True, 'from matplotlib import pyplot as plt\n'), ((3776, 3828), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Backpropagation"""'], {'fontsize': '(24)', 'y': '(1.05)'}), "('Backpropagation', fontsize=24, y=1.05)\n", (3788, 3828), True, 'from matplotlib import pyplot as plt\n'), ((3829, 3849), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (3840, 3849), True, 'from matplotlib import pyplot as plt\n'), ((3848, 3898), 'matplotlib.pyplot.title', 'plt.title', (['"""Timing With Ten by Ten Sized Matrices"""'], {}), "('Timing With Ten by Ten Sized Matrices')\n", (3857, 3898), True, 'from matplotlib import pyplot as plt\n'), ((3899, 3924), 'matplotlib.pyplot.plot', 'plt.plot', (['results[:-1, 0]'], {}), '(results[:-1, 0])\n', (3907, 3924), True, 'from matplotlib import pyplot as plt\n'), ((3924, 3957), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 0]'], {}), '([5], results[-1:, 0])\n', (3935, 3957), True, 'from matplotlib import pyplot as plt\n'), ((4021, 4061), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (4031, 4061), True, 'from matplotlib import pyplot as plt\n'), ((4062, 4110), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Seconds to compute one calculation"""'], {}), "('Seconds to compute one calculation')\n", (4072, 4110), True, 'from matplotlib import pyplot as plt\n'), ((4112, 4132), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (4123, 4132), True, 'from matplotlib import pyplot as plt\n'), ((4131, 4191), 'matplotlib.pyplot.title', 'plt.title', (['"""Timing With a Hundred by Hundred Sized Matrices"""'], {}), "('Timing With a Hundred by Hundred Sized Matrices')\n", (4140, 4191), True, 'from matplotlib import pyplot as plt\n'), ((4192, 4217), 'matplotlib.pyplot.plot', 'plt.plot', (['results[:-1, 1]'], {}), '(results[:-1, 1])\n', (4200, 4217), True, 'from matplotlib import pyplot as plt\n'), ((4217, 4250), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 1]'], {}), '([5], results[-1:, 1])\n', (4228, 4250), True, 'from matplotlib import pyplot as plt\n'), ((4314, 4354), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (4324, 4354), True, 'from matplotlib import pyplot as plt\n'), ((4355, 4403), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Seconds to compute one calculation"""'], {}), "('Seconds to compute one calculation')\n", (4365, 4403), True, 'from matplotlib import pyplot as plt\n'), ((4405, 4425), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (4416, 4425), True, 'from matplotlib import pyplot as plt\n'), ((4424, 4488), 'matplotlib.pyplot.title', 'plt.title', (['"""Timing With a Thousand by a Thousand Sized Matrices"""'], {}), "('Timing With a Thousand by a Thousand Sized Matrices')\n", (4433, 4488), True, 'from matplotlib import pyplot as plt\n'), ((4489, 4514), 'matplotlib.pyplot.plot', 'plt.plot', (['results[:-1, 2]'], {}), '(results[:-1, 2])\n', (4497, 4514), True, 'from matplotlib import pyplot as plt\n'), ((4514, 4547), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 2]'], {}), '([5], results[-1:, 2])\n', (4525, 4547), True, 'from matplotlib import pyplot as plt\n'), ((4611, 4651), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (4621, 4651), True, 'from matplotlib import pyplot as plt\n'), ((4652, 4700), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Seconds to compute one calculation"""'], {}), "('Seconds to compute one calculation')\n", (4662, 4700), True, 'from matplotlib import pyplot as plt\n'), ((4702, 4722), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (4713, 4722), True, 'from matplotlib import pyplot as plt\n'), ((4721, 4758), 'matplotlib.pyplot.title', 'plt.title', (['"""All Timings In Log Scale"""'], {}), "('All Timings In Log Scale')\n", (4730, 4758), True, 'from matplotlib import pyplot as plt\n'), ((4759, 4781), 'matplotlib.pyplot.plot', 'plt.plot', (['results[:-1]'], {}), '(results[:-1])\n', (4767, 4781), True, 'from matplotlib import pyplot as plt\n'), ((4782, 4829), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 0]'], {'color': '"""blue"""'}), "([5], results[-1:, 0], color='blue')\n", (4793, 4829), True, 'from matplotlib import pyplot as plt\n'), ((4827, 4875), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 1]'], {'color': '"""green"""'}), "([5], results[-1:, 1], color='green')\n", (4838, 4875), True, 'from matplotlib import pyplot as plt\n'), ((4874, 4920), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 2]'], {'color': '"""red"""'}), "([5], results[-1:, 2], color='red')\n", (4885, 4920), True, 'from matplotlib import pyplot as plt\n'), ((4984, 5024), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (4994, 5024), True, 'from matplotlib import pyplot as plt\n'), ((5025, 5073), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Seconds to compute one calculation"""'], {}), "('Seconds to compute one calculation')\n", (5035, 5073), True, 'from matplotlib import pyplot as plt\n'), ((5074, 5091), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (5084, 5091), True, 'from matplotlib import pyplot as plt\n'), ((5093, 5111), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5109, 5111), True, 'from matplotlib import pyplot as plt\n'), ((5113, 5140), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""backprop.pgf"""'], {}), "('backprop.pgf')\n", (5124, 5140), True, 'from matplotlib import pyplot as plt\n'), ((5141, 5151), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5149, 5151), True, 'from matplotlib import pyplot as plt\n'), ((5221, 5233), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5231, 5233), True, 'from matplotlib import pyplot as plt\n'), ((5234, 5282), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Feedforward"""'], {'fontsize': '(24)', 'y': '(1.05)'}), "('Feedforward', fontsize=24, y=1.05)\n", (5246, 5282), True, 'from matplotlib import pyplot as plt\n'), ((5283, 5303), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (5294, 5303), True, 'from matplotlib import pyplot as plt\n'), ((5302, 5328), 'matplotlib.pyplot.title', 'plt.title', (['"""All Speed Ups"""'], {}), "('All Speed Ups')\n", (5311, 5328), True, 'from matplotlib import pyplot as plt\n'), ((5329, 5351), 'matplotlib.pyplot.plot', 'plt.plot', (['results[:-1]'], {}), '(results[:-1])\n', (5337, 5351), True, 'from matplotlib import pyplot as plt\n'), ((5352, 5399), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 0]'], {'color': '"""blue"""'}), "([5], results[-1:, 0], color='blue')\n", (5363, 5399), True, 'from matplotlib import pyplot as plt\n'), ((5397, 5445), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 1]'], {'color': '"""green"""'}), "([5], results[-1:, 1], color='green')\n", (5408, 5445), True, 'from matplotlib import pyplot as plt\n'), ((5444, 5490), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[5]', 'results[-1:, 2]'], {'color': '"""red"""'}), "([5], results[-1:, 2], color='red')\n", (5455, 5490), True, 'from matplotlib import pyplot as plt\n'), ((5554, 5594), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (5564, 5594), True, 'from matplotlib import pyplot as plt\n'), ((5595, 5654), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Speed up ratio on one calculation (log scale)"""'], {}), "('Speed up ratio on one calculation (log scale)')\n", (5605, 5654), True, 'from matplotlib import pyplot as plt\n'), ((5655, 5672), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (5665, 5672), True, 'from matplotlib import pyplot as plt\n'), ((5673, 5725), 'matplotlib.pyplot.legend', 'plt.legend', (["['10x10', '100x100', '1000x1000']"], {'loc': '(2)'}), "(['10x10', '100x100', '1000x1000'], loc=2)\n", (5683, 5725), True, 'from matplotlib import pyplot as plt\n'), ((5724, 5744), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (5735, 5744), True, 'from matplotlib import pyplot as plt\n'), ((5743, 5774), 'matplotlib.pyplot.title', 'plt.title', (['"""CPU Only Speed Ups"""'], {}), "('CPU Only Speed Ups')\n", (5752, 5774), True, 'from matplotlib import pyplot as plt\n'), ((5775, 5801), 'matplotlib.pyplot.plot', 'plt.plot', (['(results[:-1] - 1)'], {}), '(results[:-1] - 1)\n', (5783, 5801), True, 'from matplotlib import pyplot as plt\n'), ((5858, 5889), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads"""'], {}), "('Number of threads')\n", (5868, 5889), True, 'from matplotlib import pyplot as plt\n'), ((5890, 5948), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative speed difference on one calculation"""'], {}), "('Relative speed difference on one calculation')\n", (5900, 5948), True, 'from matplotlib import pyplot as plt\n'), ((5949, 6001), 'matplotlib.pyplot.legend', 'plt.legend', (["['10x10', '100x100', '1000x1000']"], {'loc': '(2)'}), "(['10x10', '100x100', '1000x1000'], loc=2)\n", (5959, 6001), True, 'from matplotlib import pyplot as plt\n'), ((6000, 6020), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (6011, 6020), True, 'from matplotlib import pyplot as plt\n'), ((6019, 6051), 'matplotlib.pyplot.title', 'plt.title', (['"""Speed Up Per Thread"""'], {}), "('Speed Up Per Thread')\n", (6028, 6051), True, 'from matplotlib import pyplot as plt\n'), ((6052, 6109), 'matplotlib.pyplot.plot', 'plt.plot', (['((results[1:-1] - 1) / thread_counts[1:-1, None])'], {}), '((results[1:-1] - 1) / thread_counts[1:-1, None])\n', (6060, 6109), True, 'from matplotlib import pyplot as plt\n'), ((6105, 6178), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[4]', '((results[-1:, 0] - 1) / thread_counts[-1])'], {'color': '"""blue"""'}), "([4], (results[-1:, 0] - 1) / thread_counts[-1], color='blue')\n", (6116, 6178), True, 'from matplotlib import pyplot as plt\n'), ((6172, 6246), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[4]', '((results[-1:, 1] - 1) / thread_counts[-1])'], {'color': '"""green"""'}), "([4], (results[-1:, 1] - 1) / thread_counts[-1], color='green')\n", (6183, 6246), True, 'from matplotlib import pyplot as plt\n'), ((6241, 6313), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[4]', '((results[-1:, 2] - 1) / thread_counts[-1])'], {'color': '"""red"""'}), "([4], (results[-1:, 2] - 1) / thread_counts[-1], color='red')\n", (6252, 6313), True, 'from matplotlib import pyplot as plt\n'), ((6369, 6409), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (6379, 6409), True, 'from matplotlib import pyplot as plt\n'), ((6410, 6468), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative speed difference on one calculation"""'], {}), "('Relative speed difference on one calculation')\n", (6420, 6468), True, 'from matplotlib import pyplot as plt\n'), ((6469, 6521), 'matplotlib.pyplot.legend', 'plt.legend', (["['10x10', '100x100', '1000x1000']"], {'loc': '(1)'}), "(['10x10', '100x100', '1000x1000'], loc=1)\n", (6479, 6521), True, 'from matplotlib import pyplot as plt\n'), ((6520, 6540), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (6531, 6540), True, 'from matplotlib import pyplot as plt\n'), ((6539, 6598), 'matplotlib.pyplot.title', 'plt.title', (['"""Amdahl\'s Law Calculated Parallelizable Portion"""'], {}), '("Amdahl\'s Law Calculated Parallelizable Portion")\n', (6548, 6598), True, 'from matplotlib import pyplot as plt\n'), ((6960, 7000), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of threads (or GPU)"""'], {}), "('Number of threads (or GPU)')\n", (6970, 7000), True, 'from matplotlib import pyplot as plt\n'), ((7001, 7057), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ratio of parallelizable code to total code"""'], {}), "('Ratio of parallelizable code to total code')\n", (7011, 7057), True, 'from matplotlib import pyplot as plt\n'), ((7058, 7111), 'matplotlib.pyplot.legend', 'plt.legend', (["['10x10', '100x100', '1000x1000']"], {'loc': '(10)'}), "(['10x10', '100x100', '1000x1000'], loc=10)\n", (7068, 7111), True, 'from matplotlib import pyplot as plt\n'), ((7110, 7128), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7126, 7128), True, 'from matplotlib import pyplot as plt\n'), ((7129, 7160), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""feedforward2.pgf"""'], {}), "('feedforward2.pgf')\n", (7140, 7160), True, 'from matplotlib import pyplot as plt\n'), ((7161, 7171), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7169, 7171), True, 'from matplotlib import pyplot as plt\n')] |
import torch
import numpy as np
from marl_coop.component.replay_buffer import convert_to_agent_tensor
from marl_coop.component import BufferCreator
from marl_coop.test.mock import Mock_buffer_config
def test_group_by_agent_convert_interleaved_agent_experience_into_batch_by_agent():
arg = np.array([[
[111, 112, 113],
[211, 212, 213],
[311, 312, 313]
],[
[121, 122, 123],
[221, 222, 223],
[321, 322, 323]
],[
[131, 132, 133],
[231, 232, 233],
[331, 332, 333]]])
expected = [
torch.tensor([
[111., 112., 113.],
[121., 122., 123.],
[131., 132., 133.]
]),
torch.tensor([
[211., 212., 213.],
[221., 222., 223.],
[231., 232., 233.]
]),
torch.tensor([
[311., 312., 313.],
[321., 322., 323.],
[331., 332., 333.]])]
actual = convert_to_agent_tensor(arg)
assert np.all([torch.all(exp == act).numpy() for exp, act in zip(expected, actual)]) == True
def test_group_by_agent_convert_interleaved_agent_experience_into_batch_by_agent_even_not_square():
arg = np.array([[
[111, 112],
[211, 212]
],[
[121, 122],
[221, 222]
],[
[131, 132],
[231, 232]
],[
[141, 142],
[241, 242]
]])
expected = [
torch.tensor([
[111., 112.],
[121., 122.],
[131., 132.],
[141., 142.],
]),
torch.tensor([
[211., 212.],
[221., 222.],
[231., 232.],
[241., 242.]])]
actual = convert_to_agent_tensor(arg)
assert np.all([torch.all(exp == act).numpy() for exp, act in zip(expected, actual)]) == True
def test_buffer_sampling():
buffer = BufferCreator().create(Mock_buffer_config(size=3, type='uniform'))
observations_s = np.array([[[111,112,113],
[121,122,123]
],[
[211,212,213],
[221,222,223]
],[
[311,312,313],
[321,322,323]]])
actions_s = observations_s.copy()
reward_s = np.array([[0,1],[0,1],[0,1]])
next_observations_s = observations_s.copy()
done_s = np.array([[False,False],[False,False],[True,True]])
for observation, action, reward, next_observation, done in zip(
observations_s, actions_s, reward_s, next_observations_s, done_s):
buffer.add(observation, action, reward, next_observation, done)
observations_batch, _, _, _, _ = buffer.sample(3)
observations_batch, _ = torch.sort(torch.stack(observations_batch), dim=1)
expected_observations = torch.from_numpy(np.array([
[[111,112,113],[211,212,213],[311,312,313]],
[[121,122,123],[221,222,223],[321,322,323]]
])).float()
assert torch.all(observations_batch == expected_observations) == True
| [
"torch.stack",
"marl_coop.test.mock.Mock_buffer_config",
"marl_coop.component.replay_buffer.convert_to_agent_tensor",
"numpy.array",
"torch.tensor",
"marl_coop.component.BufferCreator",
"torch.all"
] | [((301, 480), 'numpy.array', 'np.array', (['[[[111, 112, 113], [211, 212, 213], [311, 312, 313]], [[121, 122, 123], [\n 221, 222, 223], [321, 322, 323]], [[131, 132, 133], [231, 232, 233], [\n 331, 332, 333]]]'], {}), '([[[111, 112, 113], [211, 212, 213], [311, 312, 313]], [[121, 122, \n 123], [221, 222, 223], [321, 322, 323]], [[131, 132, 133], [231, 232, \n 233], [331, 332, 333]]])\n', (309, 480), True, 'import numpy as np\n'), ((1011, 1039), 'marl_coop.component.replay_buffer.convert_to_agent_tensor', 'convert_to_agent_tensor', (['arg'], {}), '(arg)\n', (1034, 1039), False, 'from marl_coop.component.replay_buffer import convert_to_agent_tensor\n'), ((1254, 1372), 'numpy.array', 'np.array', (['[[[111, 112], [211, 212]], [[121, 122], [221, 222]], [[131, 132], [231, 232\n ]], [[141, 142], [241, 242]]]'], {}), '([[[111, 112], [211, 212]], [[121, 122], [221, 222]], [[131, 132],\n [231, 232]], [[141, 142], [241, 242]]])\n', (1262, 1372), True, 'import numpy as np\n'), ((1806, 1834), 'marl_coop.component.replay_buffer.convert_to_agent_tensor', 'convert_to_agent_tensor', (['arg'], {}), '(arg)\n', (1829, 1834), False, 'from marl_coop.component.replay_buffer import convert_to_agent_tensor\n'), ((2065, 2188), 'numpy.array', 'np.array', (['[[[111, 112, 113], [121, 122, 123]], [[211, 212, 213], [221, 222, 223]], [[\n 311, 312, 313], [321, 322, 323]]]'], {}), '([[[111, 112, 113], [121, 122, 123]], [[211, 212, 213], [221, 222, \n 223]], [[311, 312, 313], [321, 322, 323]]])\n', (2073, 2188), True, 'import numpy as np\n'), ((2451, 2485), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [0, 1]]'], {}), '([[0, 1], [0, 1], [0, 1]])\n', (2459, 2485), True, 'import numpy as np\n'), ((2542, 2598), 'numpy.array', 'np.array', (['[[False, False], [False, False], [True, True]]'], {}), '([[False, False], [False, False], [True, True]])\n', (2550, 2598), True, 'import numpy as np\n'), ((624, 712), 'torch.tensor', 'torch.tensor', (['[[111.0, 112.0, 113.0], [121.0, 122.0, 123.0], [131.0, 132.0, 133.0]]'], {}), '([[111.0, 112.0, 113.0], [121.0, 122.0, 123.0], [131.0, 132.0, \n 133.0]])\n', (636, 712), False, 'import torch\n'), ((754, 842), 'torch.tensor', 'torch.tensor', (['[[211.0, 212.0, 213.0], [221.0, 222.0, 223.0], [231.0, 232.0, 233.0]]'], {}), '([[211.0, 212.0, 213.0], [221.0, 222.0, 223.0], [231.0, 232.0, \n 233.0]])\n', (766, 842), False, 'import torch\n'), ((884, 972), 'torch.tensor', 'torch.tensor', (['[[311.0, 312.0, 313.0], [321.0, 322.0, 323.0], [331.0, 332.0, 333.0]]'], {}), '([[311.0, 312.0, 313.0], [321.0, 322.0, 323.0], [331.0, 332.0, \n 333.0]])\n', (896, 972), False, 'import torch\n'), ((1532, 1610), 'torch.tensor', 'torch.tensor', (['[[111.0, 112.0], [121.0, 122.0], [131.0, 132.0], [141.0, 142.0]]'], {}), '([[111.0, 112.0], [121.0, 122.0], [131.0, 132.0], [141.0, 142.0]])\n', (1544, 1610), False, 'import torch\n'), ((1671, 1749), 'torch.tensor', 'torch.tensor', (['[[211.0, 212.0], [221.0, 222.0], [231.0, 232.0], [241.0, 242.0]]'], {}), '([[211.0, 212.0], [221.0, 222.0], [231.0, 232.0], [241.0, 242.0]])\n', (1683, 1749), False, 'import torch\n'), ((1999, 2041), 'marl_coop.test.mock.Mock_buffer_config', 'Mock_buffer_config', ([], {'size': '(3)', 'type': '"""uniform"""'}), "(size=3, type='uniform')\n", (2017, 2041), False, 'from marl_coop.test.mock import Mock_buffer_config\n'), ((2904, 2935), 'torch.stack', 'torch.stack', (['observations_batch'], {}), '(observations_batch)\n', (2915, 2935), False, 'import torch\n'), ((3134, 3188), 'torch.all', 'torch.all', (['(observations_batch == expected_observations)'], {}), '(observations_batch == expected_observations)\n', (3143, 3188), False, 'import torch\n'), ((1976, 1991), 'marl_coop.component.BufferCreator', 'BufferCreator', ([], {}), '()\n', (1989, 1991), False, 'from marl_coop.component import BufferCreator\n'), ((2990, 3111), 'numpy.array', 'np.array', (['[[[111, 112, 113], [211, 212, 213], [311, 312, 313]], [[121, 122, 123], [\n 221, 222, 223], [321, 322, 323]]]'], {}), '([[[111, 112, 113], [211, 212, 213], [311, 312, 313]], [[121, 122, \n 123], [221, 222, 223], [321, 322, 323]]])\n', (2998, 3111), True, 'import numpy as np\n'), ((1060, 1081), 'torch.all', 'torch.all', (['(exp == act)'], {}), '(exp == act)\n', (1069, 1081), False, 'import torch\n'), ((1855, 1876), 'torch.all', 'torch.all', (['(exp == act)'], {}), '(exp == act)\n', (1864, 1876), False, 'import torch\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 17 16:41:11 2020
@author: ssli
Post process of covariance calculation
re-arrange the results to desired forms
"""
import numpy as np
import pandas as pd
import os
# cut matrix into three different parts:
# bb, rr, rb (=br)
# +++++++++++++++++++++++++++++++++++++++ general setting
# covariance type
# mar11 for old one
# apr8 for updated one
# cov_tag = 'mar11'
cov_tag = 'apr8'
# parent directory for input & output covariance
ParDir = "/disks/shear15/ssli/CosmicShear/covariance/"
# path to the theory vector
P_xi_theo_r = "/disks/shear15/ssli/CosmicShear/theory_vector/xi_theory_full_less3_KV450_best.dat"
P_xi_theo_b = "/disks/shear15/ssli/CosmicShear/theory_vector/xi_theory_full_greater3_KV450_best.dat"
# path to the cut file
cutvalues_file_path = '/disks/shear15/ssli/CosmicShear/SUPPLEMENTARY_FILES/CUT_VALUES/cut_values_5zbins.txt'
# +++++++++++++++++++++++++++++++++++++++ to list form
inpath = ParDir + cov_tag + "/original/thps_cov_{:}_list.dat".format(cov_tag)
tmp_raw = np.loadtxt(inpath)
# discard undesired columns
tmp_raw = np.delete(tmp_raw, [2, 3, 8, 9], axis=1)
# build dataframe
df = pd.DataFrame(tmp_raw, \
columns=['s1_bin1','s1_bin2','s1_xip0_xim1', 's1_theta', \
's2_bin1','s2_bin2','s2_xip0_xim1', 's2_theta', \
'Gaussian', 'non_Gaussian'])
df = df.astype(dtype= {"s1_bin1":"int32",\
"s1_bin2":"int32",\
"s1_xip0_xim1":"int32",\
"s1_theta":"float64",\
"s2_bin1":"int32",\
"s2_bin2":"int32",\
"s2_xip0_xim1":"int32",\
"s2_theta":"float64",\
"Gaussian":"float64",\
"non_Gaussian":"float64",\
})
# get blue-blue part
mask_bb = (df.s1_bin1<=5) & (df.s1_bin2<=5) & (df.s2_bin1<=5) & (df.s2_bin2<=5)
df_bb = df[mask_bb]
# get red-red part
mask_rr = (df.s1_bin1>5) & (df.s1_bin2>5) & (df.s2_bin1>5) & (df.s2_bin2>5)
df_rr = df[mask_rr]
# get blue-red part
mask_br = (df.s1_bin1<=5) & (df.s1_bin2<=5) & (df.s2_bin1>5) & (df.s2_bin2>5)
df_br = df[mask_br]
# output
outpath1 = ParDir + cov_tag + '/thps_cov_{:}_bb_list.dat'.format(cov_tag)
df_bb.to_csv(outpath1, sep=' ', index=False, header=False)
#
outpath2 = ParDir + cov_tag + '/thps_cov_{:}_rr_list.dat'.format(cov_tag)
df_rr.to_csv(outpath2, sep=' ', index=False, header=False)
#
outpath3 = ParDir + cov_tag + '/thps_cov_{:}_br_list.dat'.format(cov_tag)
df_br.to_csv(outpath3, sep=' ', index=False, header=False)
print("list form of covariance saved to: \n", outpath1, '\n', outpath2, '\n', outpath3, '\n')
# +++++++++++++++++++++++++++++++++++++++ to matrix form
print('Now we construct the covariance matrix in a format usable for cosmological calculation.')
def List2UsableFunc(tmp_raw, xi_theo1=None, xi_theo2=None, CROSS=False):
ntheta = 9
nzcorrs = int(5 * (5 + 1) / 2)
indices = np.column_stack((tmp_raw[:, :3], tmp_raw[:, 4:7]))
# we need to add both components for full covariance
values = tmp_raw[:, 8] + tmp_raw[:, 9]
dim = 2 * ntheta * nzcorrs
matrix = np.zeros((dim, dim))
# make sure the list covariance is in right order:
# theta -> PorM -> iz2 -> iz1
index_lin = 0
# this creates the correctly ordered (i.e. like self.xi_obs) full
# 270 x 270 covariance matrix:
if CROSS:
# for cross covariance
for index1 in range(dim):
for index2 in range(dim):
matrix[index1, index2] = values[index_lin]
index_lin += 1
else:
# for auto covariance
for index1 in range(dim):
for index2 in range(index1, dim):
matrix[index1, index2] = values[index_lin]
matrix[index2, index1] = matrix[index1, index2]
index_lin += 1
# apply propagation of m-correction uncertainty following
# equation 12 from Hildebrandt et al. 2017 (arXiv:1606.05338):
err_multiplicative_bias = 0.02
matrix_m_corr = np.matrix(xi_theo1).T * np.matrix(xi_theo2) * 4. * err_multiplicative_bias**2
matrix = matrix + np.asarray(matrix_m_corr)
return matrix
# load xi_theo
xi_theo_r = np.loadtxt(P_xi_theo_r)[:,2]
xi_theo_b = np.loadtxt(P_xi_theo_b)[:,2]
# list form covariance
df_bb = df_bb.to_numpy()
df_rr = df_rr.to_numpy()
df_br = df_br.to_numpy()
# transfer to matrix form
df_bb = List2UsableFunc(df_bb, xi_theo_b, xi_theo_b, False)
outpath1 = ParDir + cov_tag + '/thps_cov_{:}_bb_inc_m_usable.dat'.format(cov_tag)
np.savetxt(outpath1, df_bb)
df_rr = List2UsableFunc(df_rr, xi_theo_r, xi_theo_r, False)
outpath2 = ParDir + cov_tag + '/thps_cov_{:}_rr_inc_m_usable.dat'.format(cov_tag)
np.savetxt(outpath2, df_rr)
df_br = List2UsableFunc(df_br, xi_theo_b, xi_theo_r, True)
outpath3 = ParDir + cov_tag + '/thps_cov_{:}_br_inc_m_usable.dat'.format(cov_tag)
np.savetxt(outpath3, df_br)
print('Saved covariance matrix (incl. shear calibration uncertainty) in format usable with this likelihood to: \n', outpath1, '\n', outpath2, '\n', outpath3, '\n')
# ++++++++++++++++++++++++++++++++++++++++++++++ add mask (for plot)
print('Now we construct the masked covariance matrix.')
def ReadCutValueFunc(theta_bins, cutvalues_file_path):
"""
Read cut values and convert into mask
"""
ntheta = 9
nzbins = 5
nzcorrs = int(nzbins * (nzbins + 1) / 2)
if os.path.exists(cutvalues_file_path):
cut_values = np.loadtxt(cutvalues_file_path)
else:
raise Exception('File not found:\n {:} \n \
Check that requested file exists in the following folder: \
\n {:}'.format(cutvalues_file_path))
# create the mask
mask = np.zeros(2 * nzcorrs * ntheta)
iz = 0
for izl in range(nzbins):
for izh in range(izl, nzbins):
# this counts the bin combinations
# iz=1 =>(1,1), iz=2 =>(1,2) etc
iz = iz + 1
for i in range(ntheta):
j = (iz-1)*2*ntheta
xi_plus_cut_low = max(cut_values[izl, 0], cut_values[izh, 0])
xi_plus_cut_high = max(cut_values[izl, 1], cut_values[izh, 1])
xi_minus_cut_low = max(cut_values[izl, 2], cut_values[izh, 2])
xi_minus_cut_high = max(cut_values[izl, 3], cut_values[izh, 3])
if ((theta_bins[i] < xi_plus_cut_high) and (theta_bins[i]>xi_plus_cut_low)):
mask[j+i] = 1
if ((theta_bins[i] < xi_minus_cut_high) and (theta_bins[i]>xi_minus_cut_low)):
mask[ntheta + j+i] = 1
mask_indices = np.where(mask == 1)[0]
return mask, mask_indices
theta_bins = np.loadtxt(P_xi_theo_r)[:,1]
mask, mask_indices = ReadCutValueFunc(theta_bins, cutvalues_file_path)
mask_indices_cov = np.ix_(mask_indices, mask_indices)
# mask cov
df_bb = df_bb[mask_indices_cov]
outpath1 = ParDir + cov_tag + '/thps_cov_{:}_bb_inc_m_usable_mask.dat'.format(cov_tag)
np.savetxt(outpath1, df_bb)
df_rr = df_rr[mask_indices_cov]
outpath2 = ParDir + cov_tag + '/thps_cov_{:}_rr_inc_m_usable_mask.dat'.format(cov_tag)
np.savetxt(outpath2, df_rr)
df_br = df_br[mask_indices_cov]
outpath3 = ParDir + cov_tag + '/thps_cov_{:}_br_inc_m_usable_mask.dat'.format(cov_tag)
np.savetxt(outpath3, df_br)
print('Saved masked covariance matrix to: \n', outpath1, '\n', outpath2, '\n', outpath3, '\n')
| [
"os.path.exists",
"numpy.where",
"numpy.delete",
"numpy.asarray",
"numpy.column_stack",
"numpy.ix_",
"numpy.zeros",
"numpy.savetxt",
"pandas.DataFrame",
"numpy.loadtxt",
"numpy.matrix"
] | [((1079, 1097), 'numpy.loadtxt', 'np.loadtxt', (['inpath'], {}), '(inpath)\n', (1089, 1097), True, 'import numpy as np\n'), ((1137, 1177), 'numpy.delete', 'np.delete', (['tmp_raw', '[2, 3, 8, 9]'], {'axis': '(1)'}), '(tmp_raw, [2, 3, 8, 9], axis=1)\n', (1146, 1177), True, 'import numpy as np\n'), ((1202, 1369), 'pandas.DataFrame', 'pd.DataFrame', (['tmp_raw'], {'columns': "['s1_bin1', 's1_bin2', 's1_xip0_xim1', 's1_theta', 's2_bin1', 's2_bin2',\n 's2_xip0_xim1', 's2_theta', 'Gaussian', 'non_Gaussian']"}), "(tmp_raw, columns=['s1_bin1', 's1_bin2', 's1_xip0_xim1',\n 's1_theta', 's2_bin1', 's2_bin2', 's2_xip0_xim1', 's2_theta',\n 'Gaussian', 'non_Gaussian'])\n", (1214, 1369), True, 'import pandas as pd\n'), ((4778, 4805), 'numpy.savetxt', 'np.savetxt', (['outpath1', 'df_bb'], {}), '(outpath1, df_bb)\n', (4788, 4805), True, 'import numpy as np\n'), ((4949, 4976), 'numpy.savetxt', 'np.savetxt', (['outpath2', 'df_rr'], {}), '(outpath2, df_rr)\n', (4959, 4976), True, 'import numpy as np\n'), ((5119, 5146), 'numpy.savetxt', 'np.savetxt', (['outpath3', 'df_br'], {}), '(outpath3, df_br)\n', (5129, 5146), True, 'import numpy as np\n'), ((7034, 7068), 'numpy.ix_', 'np.ix_', (['mask_indices', 'mask_indices'], {}), '(mask_indices, mask_indices)\n', (7040, 7068), True, 'import numpy as np\n'), ((7200, 7227), 'numpy.savetxt', 'np.savetxt', (['outpath1', 'df_bb'], {}), '(outpath1, df_bb)\n', (7210, 7227), True, 'import numpy as np\n'), ((7348, 7375), 'numpy.savetxt', 'np.savetxt', (['outpath2', 'df_rr'], {}), '(outpath2, df_rr)\n', (7358, 7375), True, 'import numpy as np\n'), ((7496, 7523), 'numpy.savetxt', 'np.savetxt', (['outpath3', 'df_br'], {}), '(outpath3, df_br)\n', (7506, 7523), True, 'import numpy as np\n'), ((3057, 3107), 'numpy.column_stack', 'np.column_stack', (['(tmp_raw[:, :3], tmp_raw[:, 4:7])'], {}), '((tmp_raw[:, :3], tmp_raw[:, 4:7]))\n', (3072, 3107), True, 'import numpy as np\n'), ((3270, 3290), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (3278, 3290), True, 'import numpy as np\n'), ((4440, 4463), 'numpy.loadtxt', 'np.loadtxt', (['P_xi_theo_r'], {}), '(P_xi_theo_r)\n', (4450, 4463), True, 'import numpy as np\n'), ((4481, 4504), 'numpy.loadtxt', 'np.loadtxt', (['P_xi_theo_b'], {}), '(P_xi_theo_b)\n', (4491, 4504), True, 'import numpy as np\n'), ((5638, 5673), 'os.path.exists', 'os.path.exists', (['cutvalues_file_path'], {}), '(cutvalues_file_path)\n', (5652, 5673), False, 'import os\n'), ((5945, 5975), 'numpy.zeros', 'np.zeros', (['(2 * nzcorrs * ntheta)'], {}), '(2 * nzcorrs * ntheta)\n', (5953, 5975), True, 'import numpy as np\n'), ((6914, 6937), 'numpy.loadtxt', 'np.loadtxt', (['P_xi_theo_r'], {}), '(P_xi_theo_r)\n', (6924, 6937), True, 'import numpy as np\n'), ((4363, 4388), 'numpy.asarray', 'np.asarray', (['matrix_m_corr'], {}), '(matrix_m_corr)\n', (4373, 4388), True, 'import numpy as np\n'), ((5696, 5727), 'numpy.loadtxt', 'np.loadtxt', (['cutvalues_file_path'], {}), '(cutvalues_file_path)\n', (5706, 5727), True, 'import numpy as np\n'), ((6845, 6864), 'numpy.where', 'np.where', (['(mask == 1)'], {}), '(mask == 1)\n', (6853, 6864), True, 'import numpy as np\n'), ((4283, 4302), 'numpy.matrix', 'np.matrix', (['xi_theo2'], {}), '(xi_theo2)\n', (4292, 4302), True, 'import numpy as np\n'), ((4259, 4278), 'numpy.matrix', 'np.matrix', (['xi_theo1'], {}), '(xi_theo1)\n', (4268, 4278), True, 'import numpy as np\n')] |
"""
Test beamshapes predictions given model data.
Author: <NAME>, Acoustic and Functional Ecology,
Max Planck Institute for Ornithology, Seewiesen
License : This code is released under an MIT License.
Copyright 2020, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
import pandas as pd
def calculate_model_and_data_error(real_data, predictions, **kwargs):
""" This function compares and calculates the error between model prediction
and data.
Parameters
----------
real_data : pd.DataFrame with 2 columns
theta: float. Angle in radians.
obs_relative_pressure: float>0. Observed relative pressure in comparison to on-axis.
predictions : pd.DataFrame with 2 columns.
theta: float. Angle in radians.
pred_relative_pressure: float>0. Predicted relative pressure in comparison to on-axis.
Keyword Arguments
-----------------
error_function : function that calculates the mis/match between data and predictions.
Defaults to the sum of absolute error observed.
Returns
--------
prediction_error : output format depends on the exact error function used.
"""
prediction_error = kwargs.get('error_function', sum_absolute_error)(real_data, predictions)
return(prediction_error)
def sum_absolute_error(real_data, predictions):
"""
Calculates the absolute difference between the predictions and the observed data
and outputs the sum.
sum_absolute_error = Sum(real_data - prediction)
"""
if not check_if_angles_are_same(real_data, predictions):
raise ValueError('The theta values are not the same between data and predictions - please check.')
# subtract and calculate sum absolute error
error = predictions['pred_relative_pressure'] - real_data['obs_relative_pressure']
sum_absolute_error = np.sum(np.abs(error))
return sum_absolute_error
def dbeam_by_dtheta_error(real_data, predictions):
'''
Calculates the first order derivative of the beamshape with reference to
the angle of emission.
If the overall error in dbeam/dtheta is low it means that the real data and
predictions match well in their shape, but not so much in their exact values.
Parameters
----------
real_data
predictions
Returns
-------
error_dbeam_by_dtheta
'''
def check_if_angles_are_same(real_data, predictions):
''' Check to make sure that the real data and
predictions are of the same emission angles
Parameters
---------
real_data
predictions
Returns
-------
angles_same: Boolean.
True if the 'theta' is the same, False otherwise.
'''
angles_same = real_data['theta'].equals(predictions['theta'])
return(angles_same)
| [
"numpy.abs"
] | [((2913, 2926), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (2919, 2926), True, 'import numpy as np\n')] |
__author__ = "<NAME>"
__version__ = "0.1"
import os, sys
from dataclasses import dataclass
from typing import List
import functools
import operator
import numpy as np
from astropy import units as u
from astropy import constants as const
from astropy.table import QTable, Column
from colorama import Fore
from colorama import init
init(autoreset=True)
def average_(x, n):
"""
Bin an array by averaging n cells together
Input:
x: astropy column
n: number of cells to average
Return:
average each n cells
"""
return np.average(x.reshape((-1, n)), axis=1)
def average_err(x, n):
"""
For binning an array by averaging n cells together, propagation of errors by
sqrt(e1**2+e2**2+e3**2.+...+en**2.)/n
Input:
x: astropy column, for error
n: number of cells to average
Return:
geometric mean of errors
"""
return np.sqrt(np.average((x ** 2).reshape((-1, n)), axis=1) / n)
def sigma_to_fwhm(sigma):
"""
Convert from Gaussian sigma to FWHM
"""
return sigma * 2.0 * np.sqrt(2.0 * np.log(2.0))
def fwhm_to_sigma(fwhm):
"""
Convert FWHM of 1D Gaussian to sigma
"""
return fwhm / (2.0 * np.sqrt(2.0 * np.log(2.0)))
def height_to_amplitude(height, sigma):
"""
Convert height of a 1D Gaussian to the amplitude
"""
return height * sigma * np.sqrt(2 * np.pi)
def amplitude_to_height(amplitude, sigma):
"""
Convert amplitude of a 1D Gaussian to the height
"""
return amplitude / (sigma * np.sqrt(2 * np.pi))
def bin_spectrum(spectrum, n=2):
"""
Bin the spectrum by averaging n number of adjacent cells together
Input:
spectrum: astropy spectrum, where: x axis, usually wavelength; y axis,
usually flux; yerr axis, usually stdev on flux
Output:
binned spectrum
"""
tsize = len(spectrum) // n * n
spectrum = spectrum[:tsize]
t = QTable()
t["wl"] = Column(
average_(spectrum["wl"], n), unit=spectrum["wl"].unit, dtype="f"
) # Ang
t["flux"] = Column(
average_(spectrum["flux"], n), unit=spectrum["flux"].unit, dtype="f"
) # Ang
t["stdev"] = Column(
average_err(spectrum["stdev"], n), unit=spectrum["flux"].unit, dtype="f"
)
return t
def reject_outliers(data, m=2):
"""
Rejects outliers at a certain number of sigma away from the mean
Input:
data: list of input data with possible outlies
m: number of sigma (stdev) away at which the data should be cut
Output:
data: filtered data
mask: where the data should be masked
"""
mask = np.abs(data - np.mean(data)) <= m * np.std(data)
return data[mask], mask
def dispersion(wl):
"""
Returns the dispestion (wavelength width per pixel) of a 1D spectrum
Input:
wl: 1D wavelength in some units (preferably Astropy QTable, such that it
has units attached)
Output:
dispersion: single minimum value for dispersion in the spectrum sampled
! Writes warning if the spectrum in non uniform
"""
diff = wl[1:] - wl[0:-1]
diff, mask = reject_outliers(diff, 3)
average = np.mean(diff)
stdev = np.std(diff)
minimum = np.min(diff)
if stdev / average > 10 ** -3:
print(Fore.YELLOW + "Warning: non-constant dispersion")
return minimum
def mask_line(wl, wl_ref, mask_width):
"""
Masks the spectrum around the listed line, within the width specified
Input:
wl: spectrum to be masked; preferable has unit
wl_ref: reference wavelength that we want to mask; preferably has unit
mask_width: width to be used for masking the line
Output:
mask: mask to be applied to the spectrum such that the spectrum now has
the line in question masked away
"""
wl_min = wl_ref - mask_width / 2.0
wl_max = wl_ref + mask_width / 2.0
mask = (wl < wl_min) | (wl > wl_max)
return mask
def spectrum_rms(y):
"""
Calculate the rms of a spectrum, after removing the mean value
"""
rms = np.sqrt(np.mean((y - np.mean(y)) ** 2))
return rms
def mask_atmosphere(wl, z, sky):
"""
Masks the spectrum around prominent optical atmospheric absorption bands
!!! Assumes spectrum has been restframed !!!
Input:
wl: rest-frame wavelength spectrum
z: redshift of the source; used to restframe the sky lines
sky: sky bands in QTable format
Output:
mask: mask to be applied to the spectrum such that the spectrum now has
the absorption features masked
Note: the wavelengths of the A band and B band sky absorption areas are
defined in the constants file
"""
if sky is None:
return np.ones_like(wl.value).astype(bool)
# mask areas of absorption due to sky
absorption = functools.reduce(
operator.or_,
(
(wl > restframe_wl(band["wavelength_min"], z))
& (wl < restframe_wl(band["wavelength_max"], z))
for band in sky
),
)
without_absorption = ~absorption
return without_absorption
def restframe_wl(x, z):
"""
Transform a given spectrum x into the restframe, given the redshift
Input:
x: observed wavelengths of a spectrum, in Angstrom or nm for example
z: redshift
Return:
restframe spectrum
"""
return x / (1.0 + z)
def add_restframe(spectrum, z):
"""
Add column with restframe spectrum onto the 1d spectrum flux
Input:
spectrum: Astropy table containing the 1d spectrum of a source
z: redshift, determined externally (e.g. specpro)
Output:
spectrum with the new added restframe wavelength column
"""
spectrum.add_column(restframe_wl(spectrum["wl"], z), name="wl_rest")
return spectrum
def select_singleline(wl_rest, line, cont_width):
"""
Select the region around an emission line
!!! Assumes the spectrum is restframed
Input:
wl_rest: Astropy table containing the restframe wavelength for a source,
preferably with unit
line: wavelength of the line of interest, preferably with unit
cont_width: width of the region around the lines to be selected,
preferably with unit
Output:
mask to select region around the line of interest
"""
wl_min = line - cont_width
wl_max = line + cont_width
mask = (wl_rest > wl_min) & (wl_rest < wl_max)
return mask
def select_lines(
selected_lines, other_lines, spectrum, target_info, sky, cont_width, mask_width,
):
"""
Masks the spectrum in the vicinity of the line of interest. It should leave
unmasked the actual line and lots of continuum, but mask other neighboring
lines we want to fit, that might contaminate the continuum estimation
Input:
selected_lines: table of all the lines to be fit
other_lines: the other lines in the table that will be masked
spectrum: 1d spectrum, error and wavelength, as read in by
read_files.read_spectrum with extra column for the restframe
wavelength
target: ancillary information on the source, such as RA, DEC or redshift
as produced by read_files.read_lof()
cont_width: amount of wavelength coverage on each side of the line that
will be taken into account
Output:
wl_masked: masked wavelength coverage, with only the line of interest
and the continuum; all other lines masked
"""
z_ref = target_info["Redshift"]
wl_rest = spectrum["wl_rest"]
flux = spectrum["flux"]
# Restframe resolution
res_rest = dispersion(wl_rest)
# mask all lines, but the line we are interested in
masked_otherlines = np.full(np.shape(wl_rest), True)
for line in map(QTable, other_lines):
mask = mask_line(wl_rest, line["wavelength"], mask_width)
masked_otherlines = masked_otherlines & mask
# select the lines of interest
select_lines = np.full(np.shape(wl_rest), False)
for line in map(QTable, selected_lines):
mask = select_singleline(wl_rest, line["wavelength"], cont_width)
select_lines = select_lines | mask
# mask the atmospheric lines, if masking them is enabled
masked_atm = mask_atmosphere(wl_rest, z_ref, sky)
masked_all = masked_atm & masked_otherlines & select_lines
return masked_all
def group_lines(line_list, tolerance):
"""
Group together lines within a wavelength tolerance. These will be fit
together as a sum of Gaussian, rathen than independently.
Input:
line_list: Astropy table containing lines and their properties
t: how far from each other can lines be to still be considered a group.
tolerance is read from the constants file
Output:
Groups of lines of type Component as returned by connected components
"""
wl = [(x - tolerance, x + tolerance) for x in line_list["wavelength"]]
line_groups = connected_components(wl, left, right)
return line_groups
@dataclass
class Component:
segments: List
beginning: float
ending: float
# offline algorithm
def connected_components(segments, left, right):
"""
For a set of segments (in my case wavelength segments), check whether they
overlap and group them together.
Input:
segments: list of pair of the outmost edges of the segment; beginning
and end of the segment
left: function defining how to get the left-most edge
right: function defining how to get the right-most edge
Output:
List of components (as defined in the Component class): grouped list of
each of the segments that overlap, with their overall left and right
side edges
"""
segments = sorted(segments, key=left)
try:
head, *segments = segments
except:
return []
ref_component = Component(segments=[head], beginning=left(head), ending=right(head))
components = [ref_component]
for segment in segments:
opening = left(segment)
closing = right(segment)
if ref_component.ending > opening:
ref_component.segments.append(segment)
if closing > ref_component.ending:
ref_component.ending = closing
else:
ref_component = Component(
segments=[segment], beginning=opening, ending=closing
)
components.append(ref_component)
return components
def left(x):
return x[0]
def right(x):
return x[1]
| [
"numpy.mean",
"numpy.ones_like",
"numpy.sqrt",
"numpy.std",
"numpy.log",
"numpy.min",
"astropy.table.QTable",
"numpy.shape",
"colorama.init"
] | [((334, 354), 'colorama.init', 'init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (338, 354), False, 'from colorama import init\n'), ((1956, 1964), 'astropy.table.QTable', 'QTable', ([], {}), '()\n', (1962, 1964), False, 'from astropy.table import QTable, Column\n'), ((3217, 3230), 'numpy.mean', 'np.mean', (['diff'], {}), '(diff)\n', (3224, 3230), True, 'import numpy as np\n'), ((3243, 3255), 'numpy.std', 'np.std', (['diff'], {}), '(diff)\n', (3249, 3255), True, 'import numpy as np\n'), ((3270, 3282), 'numpy.min', 'np.min', (['diff'], {}), '(diff)\n', (3276, 3282), True, 'import numpy as np\n'), ((1383, 1401), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1390, 1401), True, 'import numpy as np\n'), ((7901, 7918), 'numpy.shape', 'np.shape', (['wl_rest'], {}), '(wl_rest)\n', (7909, 7918), True, 'import numpy as np\n'), ((8150, 8167), 'numpy.shape', 'np.shape', (['wl_rest'], {}), '(wl_rest)\n', (8158, 8167), True, 'import numpy as np\n'), ((1548, 1566), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1555, 1566), True, 'import numpy as np\n'), ((2705, 2717), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (2711, 2717), True, 'import numpy as np\n'), ((1094, 1105), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (1100, 1105), True, 'import numpy as np\n'), ((2683, 2696), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (2690, 2696), True, 'import numpy as np\n'), ((4805, 4827), 'numpy.ones_like', 'np.ones_like', (['wl.value'], {}), '(wl.value)\n', (4817, 4827), True, 'import numpy as np\n'), ((1230, 1241), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (1236, 1241), True, 'import numpy as np\n'), ((4149, 4159), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (4156, 4159), True, 'import numpy as np\n')] |
__author__ = 'renienj'
import numpy as np
def compute_jaccard_similarity_score(x, y):
"""
Jaccard Similarity J (A,B) = | Intersection (A,B) | / | Union (A,B) |
"""
intersection_cardinality = len(set(x).intersection(set(y)))
union_cardinality = len(set(x).union(set(y)))
return intersection_cardinality / float(union_cardinality)
if __name__ == "__main__":
score = compute_jaccard_similarity_score(np.array([0, 1, 2, 5, 6]), np.array([0, 2, 3, 5, 7, 9]))
print("Jaccard Similarity Score : %s" %score)
pass | [
"numpy.array"
] | [((434, 459), 'numpy.array', 'np.array', (['[0, 1, 2, 5, 6]'], {}), '([0, 1, 2, 5, 6])\n', (442, 459), True, 'import numpy as np\n'), ((461, 489), 'numpy.array', 'np.array', (['[0, 2, 3, 5, 7, 9]'], {}), '([0, 2, 3, 5, 7, 9])\n', (469, 489), True, 'import numpy as np\n')] |
import numpy as np
from astropy.table import Table
from btk.metrics import get_detection_match
def test_true_detected_catalog():
"""Test if correct matches are computed from the true and detected tables"""
names = ["x_peak", "y_peak"]
cols = [[0.0, 1.0], [0.0, 0.0]]
true_table = Table(cols, names=names)
detected_table = Table([[0.1], [0.1]], names=["x_peak", "y_peak"])
matches = get_detection_match(true_table, detected_table)
target_num_detections = np.array([0, -1])
np.testing.assert_array_equal(
matches["match_detected_id"],
target_num_detections,
err_msg="Incorrect match",
)
np.testing.assert_array_equal(
matches["dist"],
[0.14142135623730953, 0.0],
err_msg="Incorrect distance between true centers",
)
def test_no_detection():
"""When no detection, make sure no match is returned"""
names = ["x_peak", "y_peak"]
cols = [[0.0], [0.0]]
true_table = Table(cols, names=names)
detected_table = Table([[], []], names=["x_peak", "y_peak"])
matches = get_detection_match(true_table, detected_table)
np.testing.assert_array_equal(
matches["match_detected_id"], [-1], err_msg="A match was returned when it should not have."
)
| [
"btk.metrics.get_detection_match",
"numpy.array",
"numpy.testing.assert_array_equal",
"astropy.table.Table"
] | [((299, 323), 'astropy.table.Table', 'Table', (['cols'], {'names': 'names'}), '(cols, names=names)\n', (304, 323), False, 'from astropy.table import Table\n'), ((345, 394), 'astropy.table.Table', 'Table', (['[[0.1], [0.1]]'], {'names': "['x_peak', 'y_peak']"}), "([[0.1], [0.1]], names=['x_peak', 'y_peak'])\n", (350, 394), False, 'from astropy.table import Table\n'), ((409, 456), 'btk.metrics.get_detection_match', 'get_detection_match', (['true_table', 'detected_table'], {}), '(true_table, detected_table)\n', (428, 456), False, 'from btk.metrics import get_detection_match\n'), ((485, 502), 'numpy.array', 'np.array', (['[0, -1]'], {}), '([0, -1])\n', (493, 502), True, 'import numpy as np\n'), ((507, 620), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["matches['match_detected_id']", 'target_num_detections'], {'err_msg': '"""Incorrect match"""'}), "(matches['match_detected_id'],\n target_num_detections, err_msg='Incorrect match')\n", (536, 620), True, 'import numpy as np\n'), ((652, 781), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["matches['dist']", '[0.14142135623730953, 0.0]'], {'err_msg': '"""Incorrect distance between true centers"""'}), "(matches['dist'], [0.14142135623730953, 0.0],\n err_msg='Incorrect distance between true centers')\n", (681, 781), True, 'import numpy as np\n'), ((972, 996), 'astropy.table.Table', 'Table', (['cols'], {'names': 'names'}), '(cols, names=names)\n', (977, 996), False, 'from astropy.table import Table\n'), ((1018, 1061), 'astropy.table.Table', 'Table', (['[[], []]'], {'names': "['x_peak', 'y_peak']"}), "([[], []], names=['x_peak', 'y_peak'])\n", (1023, 1061), False, 'from astropy.table import Table\n'), ((1076, 1123), 'btk.metrics.get_detection_match', 'get_detection_match', (['true_table', 'detected_table'], {}), '(true_table, detected_table)\n', (1095, 1123), False, 'from btk.metrics import get_detection_match\n'), ((1128, 1255), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["matches['match_detected_id']", '[-1]'], {'err_msg': '"""A match was returned when it should not have."""'}), "(matches['match_detected_id'], [-1], err_msg=\n 'A match was returned when it should not have.')\n", (1157, 1255), True, 'import numpy as np\n')] |
import copy
import csv
import numpy as np
import traceback
import requests
import json
import time
import boto
import alog
import logging
from boto.s3.key import Key
from StringIO import StringIO
from datetime import datetime
from django.db.utils import IntegrityError
from firecares.utils.arcgis2geojson import arcgis2geojson
from firecares.utils import to_multipolygon
from celery import chain
from firecares.celery import app
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry, MultiPolygon, fromstr
from django.db import connections
from django.db.utils import ConnectionDoesNotExist
from scipy.stats import lognorm
from firecares.firestation.models import (
FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view)
from firecares.firestation.models import NFIRSStatistic as nfirs
from fire_risk.models import DIST, DISTMediumHazard, DISTHighHazard, NotEnoughRecords
from fire_risk.models.DIST.providers.ahs import ahs_building_areas
from fire_risk.models.DIST.providers.iaff import response_time_distributions
from fire_risk.utils import LogNormalDraw
from firecares.utils import dictfetchall, lenient_summation
def p(msg):
alog.info(msg)
ISOCHRONE_BREAKS = ['4', '6', '8']
# There are two different total response times for effective response force:
# 8 minutes for low and medium risk parcels
# 10 minutes for high risk parcels
# The mapbox API gives us drive time isochrones, however this does not factor in
# alarm processing time and turnout time. According to Lori at IPSDI, the drive
# time plus alarm processing plus turnout time should be less than or equal to
# the times above. She said shaving three minutes off of the allowable drive time
# would be the correct calculation for adjusting the drive times.
RISK_LEVEL_ERF_DRIVE_TIMES = {
'unknown': 5,
'low': 5,
'medium': 5,
'high': 7,
}
RISK_LEVEL_ERF_AREAS = {
'low': 'erf_area_low',
'medium': 'erf_area_medium',
'high': 'erf_area_high',
'unknown': 'erf_area_unknown',
}
RISK_LEVEL_MINIMUM_STAFFING_NO_EMS = {
'low': 15,
'medium': 27,
'high': 42,
'unknown': 15,
}
RISK_LEVEL_MINIMUM_STAFFING = {
'low': 15,
'medium': 27,
'high': 38,
'unknown': 15,
}
HTTP_TOO_MANY_REQUESTS = 429
def update_scores():
for fd in FireDepartment.objects.filter(archived=False):
update_performance_score.delay(fd.id)
def dist_model_for_hazard_level(hazard_level):
"""
Returns the appropriate DIST model based on the hazard level.
"""
hazard_level = hazard_level.lower()
if hazard_level == 'high':
return DISTHighHazard
if hazard_level == 'medium':
return DISTMediumHazard
return DIST
@app.task(queue='update')
def update_performance_score(id, dry_run=False):
"""
Updates department performance scores.
"""
p("updating performance score for {}".format(id))
try:
cursor = connections['nfirs'].cursor()
fd = FireDepartment.objects.get(id=id)
except (ConnectionDoesNotExist, FireDepartment.DoesNotExist):
return
# Hack to get around inline SQL string execution and argument escaping in a tuple
fds = ["''{}''".format(x) for x in fd.fdids]
RESIDENTIAL_FIRES_BY_FDID_STATE = """
SELECT *
FROM crosstab(
'select COALESCE(y.risk_category, ''N/A'') as risk_category, fire_sprd, count(*)
FROM joint_buildingfires a left join (
SELECT state,
fdid,
inc_date,
inc_no,
exp_no,
geom,
x.parcel_id,
x.risk_category
FROM (select * from joint_incidentaddress a
left join parcel_risk_category_local b using (parcel_id)
) AS x
) AS y using (state, inc_date, exp_no, fdid, inc_no)
where a.state='%(state)s' and a.fdid in ({fds}) and prop_use in (''419'',''429'',''439'',''449'',''459'',''460'',''462'',''464'',''400'')
and fire_sprd is not null and fire_sprd != ''''
group by risk_category, fire_sprd
order by risk_category, fire_sprd ASC')
AS ct(risk_category text, "object_of_origin" bigint, "room_of_origin" bigint, "floor_of_origin" bigint, "building_of_origin" bigint, "beyond" bigint);
""".format(fds=','.join(fds))
cursor.execute(RESIDENTIAL_FIRES_BY_FDID_STATE, {'state': fd.state})
results = dictfetchall(cursor)
all_counts = dict(object_of_origin=0,
room_of_origin=0,
floor_of_origin=0,
building_of_origin=0,
beyond=0)
risk_mapping = {'Low': 1, 'Medium': 2, 'High': 4, 'N/A': 5}
ahs_building_size = ahs_building_areas(fd.fdid, fd.state)
for result in results:
if result.get('risk_category') not in risk_mapping:
continue
dist_model = dist_model_for_hazard_level(result.get('risk_category'))
# Use floor draws based on the LogNormal of the structure type distribution for med/high risk categories
# TODO: Detect support for number_of_floors_draw on risk model vs being explicit on hazard levels used :/
if result.get('risk_category') in ['Medium', 'High']:
rm, _ = fd.firedepartmentriskmodels_set.get_or_create(level=risk_mapping[result['risk_category']])
if rm.floor_count_coefficients:
pass
# TODO
# dist_model.number_of_floors_draw = LogNormalDraw(*rm.floor_count_coefficients)
counts = dict(object_of_origin=result['object_of_origin'] or 0,
room_of_origin=result['room_of_origin'] or 0,
floor_of_origin=result['floor_of_origin'] or 0,
building_of_origin=result['building_of_origin'] or 0,
beyond=result['beyond'] or 0)
# add current risk category to the all risk category
for key, value in counts.items():
all_counts[key] += value
if ahs_building_size is not None:
counts['building_area_draw'] = ahs_building_size
response_times = response_time_distributions.get('{0}-{1}'.format(fd.fdid, fd.state))
if response_times:
counts['arrival_time_draw'] = LogNormalDraw(*response_times, multiplier=60)
record, _ = fd.firedepartmentriskmodels_set.get_or_create(level=risk_mapping[result['risk_category']])
old_score = record.dist_model_score
try:
dist = dist_model(floor_extent=False, **counts)
record.dist_model_score = dist.gibbs_sample()
record.dist_model_score_fire_count = dist.total_fires
p('updating fdid: {2} - {3} performance score from: {0} to {1}.'.format(old_score, record.dist_model_score, fd.id, HazardLevels(record.level).name))
except (NotEnoughRecords, ZeroDivisionError):
p('Error updating DIST score: {}.'.format(traceback.format_exc()))
record.dist_model_score = None
if not dry_run:
record.save()
# Clear out scores for missing hazard levels
if not dry_run:
missing_categories = set(risk_mapping.keys()) - set(map(lambda x: x.get('risk_category'), results))
for r in missing_categories:
p('clearing {0} level from {1} due to missing categories in aggregation'.format(r, fd.id))
record, _ = fd.firedepartmentriskmodels_set.get_or_create(level=risk_mapping[r])
record.dist_model_score = None
record.save()
record, _ = fd.firedepartmentriskmodels_set.get_or_create(level=HazardLevels.All.value)
old_score = record.dist_model_score
dist_model = dist_model_for_hazard_level('All')
try:
if ahs_building_size is not None:
all_counts['building_area_draw'] = ahs_building_size
response_times = response_time_distributions.get('{0}-{1}'.format(fd.fdid, fd.state))
if response_times:
all_counts['arrival_time_draw'] = LogNormalDraw(*response_times, multiplier=60)
dist = dist_model(floor_extent=False, **all_counts)
record.dist_model_score = dist.gibbs_sample()
p('updating fdid: {2} - {3} performance score from: {0} to {1}.'.format(old_score, record.dist_model_score, fd.id, HazardLevels(record.level).name))
except (NotEnoughRecords, ZeroDivisionError):
p('Error updating DIST score: {}.'.format(traceback.format_exc()))
record.dist_model_score = None
if not dry_run:
record.save()
p("...updated performance score for {}".format(id))
DROP_TMP_PARCEL_RISK_TABLE = """
DROP TABLE IF EXISTS {}
"""
TMP_PARCEL_RISK_TABLE = """
SELECT a.state, a.fdid, a.inc_date, a.inc_no, a.exp_no, a.geom, a.parcel_id, p.risk_category
INTO {}
FROM joint_incidentaddress a
LEFT JOIN parcel_risk_category_local p
USING (parcel_id)
WHERE a.state = %(state)s AND a.fdid in %(fdid)s AND extract(year FROM a.inc_date) IN %(years)s
"""
NFIRS_STATS_QUERY = """
SELECT count(1) as count, extract(year from a.inc_date) as year, COALESCE(b.risk_category, 'N/A') as risk_level
FROM {stat_table} a
LEFT JOIN {parcel_risk_table} b
USING (state, fdid, inc_date, inc_no, exp_no)
WHERE a.state = %(state)s AND a.fdid in %(fdid)s AND extract(year FROM a.inc_date) IN %(years)s
GROUP BY b.risk_category, extract(year from a.inc_date)
ORDER BY extract(year from a.inc_date) DESC
"""
@app.task(queue='update')
def update_fires_heatmap(id):
try:
fd = FireDepartment.objects.get(id=id)
cursor = connections['nfirs'].cursor()
except (FireDepartment.DoesNotExist, ConnectionDoesNotExist):
return
q = """
SELECT alarm, a.inc_type, alarms,ff_death, oth_death, ST_X(geom) AS x, st_y(geom) AS y, COALESCE(y.risk_category, 'Unknown') AS risk_category
FROM buildingfires a
LEFT JOIN (
SELECT state, fdid, inc_date, inc_no, exp_no, x.geom, x.parcel_id, x.risk_category
FROM (
SELECT *
FROM incidentaddress a
LEFT JOIN parcel_risk_category_local
using (parcel_id)
) AS x
) AS y
USING (state, fdid, inc_date, inc_no, exp_no)
WHERE a.state = %(state)s and a.fdid in %(fdid)s
"""
cursor.execute(q, params=dict(state=fd.state, fdid=tuple(fd.fdids)))
res = cursor.fetchall()
out = StringIO()
writer = csv.writer(out)
writer.writerow('alarm,inc_type,alarms,ff_death,oth_death,x,y,risk_category'.split(','))
for r in res:
writer.writerow(r)
s3 = boto.connect_s3()
k = Key(s3.get_bucket(settings.HEATMAP_BUCKET))
k.key = '{}-building-fires.csv'.format(id)
k.set_contents_from_string(out.getvalue())
k.set_acl('public-read')
@app.task(queue='update')
def update_ems_heatmap(id):
try:
fd = FireDepartment.objects.get(id=id)
cursor = connections['nfirs'].cursor()
except (FireDepartment.DoesNotExist, ConnectionDoesNotExist):
return
q = """
SELECT bi.alarm, ST_X(geom) AS x, st_y(geom) AS y, COALESCE(y.risk_category, 'Unknown') AS risk_category
FROM ems.ems a
INNER JOIN ems.basicincident bi
ON a.state = bi.state and a.fdid = bi.fdid and a.inc_no = bi.inc_no and a.exp_no = bi.exp_no and to_date(a.inc_date, 'MMDDYYYY') = bi.inc_date
LEFT JOIN (
SELECT state, fdid, inc_date, inc_no, exp_no, x.geom, x.parcel_id, x.risk_category
FROM (
SELECT *
FROM ems.incidentaddress a
LEFT JOIN parcel_risk_category_local
using (parcel_id)
) AS x
) AS y
ON a.state = y.state and a.fdid = y.fdid and to_date(a.inc_date, 'MMDDYYYY') = y.inc_date and a.inc_no = y.inc_no and a.exp_no = y.exp_no
WHERE a.state = %(state)s and a.fdid in %(fdid)s
"""
cursor.execute(q, params=dict(state=fd.state, fdid=tuple(fd.fdids)))
res = cursor.fetchall()
out = StringIO()
writer = csv.writer(out)
writer.writerow('alarm,x,y,risk_category'.split(','))
for r in res:
writer.writerow(r)
s3 = boto.connect_s3()
k = Key(s3.get_bucket(settings.HEATMAP_BUCKET))
k.key = '{}-ems-incidents.csv'.format(id)
k.set_contents_from_string(out.getvalue())
k.set_acl('public-read')
@app.task(queue='update')
def update_department(id):
p("updating department {}".format(id))
chain(update_nfirs_counts.si(id),
update_performance_score.si(id), calculate_department_census_geom.si(id)).delay()
@app.task(queue='update')
def refresh_department_views():
p("updating department Views")
chain(refresh_quartile_view_task.si(), refresh_national_calculations_view_task.si()).delay()
@app.task(queue='update')
def update_all_nfirs_counts(years=None):
p('updating all department nfirs stats')
for fdid in FireDepartment.objects.filter(archived=False).values_list('id', flat=True):
update_nfirs_counts.delay(fdid, years)
@app.task(queue='update')
def update_nfirs_counts(id, year=None, stat=None):
"""
Queries the NFIRS database for statistics.
"""
if not id:
return
try:
fd = FireDepartment.objects.get(id=id)
except (FireDepartment.DoesNotExist, ConnectionDoesNotExist):
return
p("updating NFIRS counts for {}: {}".format(fd.id, fd.name))
mapping = {'Low': 1, 'Medium': 2, 'High': 4, 'N/A': 5}
years = {}
if not year:
# get a list of years populated in the NFIRS database
years_query = "select distinct(extract(year from inc_date)) as year from joint_buildingfires;"
with connections['nfirs'].cursor() as cursor:
cursor.execute(years_query)
# default years to None
years = {x: {1: None, 2: None, 4: None, 5: None} for x in [int(n[0]) for n in cursor.fetchall()]}
else:
years = {y: {1: None, 2: None, 4: None, 5: None} for y in year}
params = dict(fdid=tuple(fd.fdids), state=fd.state, years=tuple(years.keys()))
p('building temp table to optimize queries')
tmp_table_name = 'tmp_{state}_{fdids}_{years}'.format(
state=params['state'],
# some departments have a single ' ' character for the fdid
fdids='_'.join(str(fdid).replace(' ', '_') for fdid in params['fdid']),
years='_'.join(str(year) for year in params['years'])
)
tmp_table_query = TMP_PARCEL_RISK_TABLE.format(tmp_table_name)
with connections['nfirs'].cursor() as cursor:
cursor.execute(DROP_TMP_PARCEL_RISK_TABLE.format(tmp_table_name))
cursor.execute(tmp_table_query, params)
p('temp table created')
queries = (
('civilian_casualties', NFIRS_STATS_QUERY.format(stat_table='joint_civiliancasualty', parcel_risk_table=tmp_table_name), params),
('residential_structure_fires', NFIRS_STATS_QUERY.format(stat_table='joint_buildingfires', parcel_risk_table=tmp_table_name), params),
('firefighter_casualties', NFIRS_STATS_QUERY.format(stat_table='joint_ffcasualty', parcel_risk_table=tmp_table_name), params),
('fire_calls', NFIRS_STATS_QUERY.format(stat_table='joint_fireincident', parcel_risk_table=tmp_table_name), params),
)
if stat:
queries = filter(lambda x: x[0] == stat, queries)
for statistic, query, params in queries:
with connections['nfirs'].cursor() as cursor:
p('querying NFIRS counts: {}'.format(json.dumps(
{
'department_id': fd.id,
'department_name': fd.name,
'years': list(years.keys()),
'statistic': statistic,
'timestamp': str(datetime.now()),
})
))
counts = copy.deepcopy(years)
start_time = time.time()
cursor.execute(query, params)
end_time = time.time()
p('query took {:.4f} seconds'.format(end_time - start_time))
for count, year, level in cursor.fetchall():
counts[int(year)][mapping[level]] = int(count) if count is not None else count
p('updating NFIRS counts: {}'.format(json.dumps(
{
'department_id': fd.id,
'department_name': fd.name,
'years': list(years.keys()),
'statistic': statistic,
'timestamp': str(datetime.now()),
})
))
for year, levels in counts.items():
for level, count in levels.items():
nfirs.objects.update_or_create(year=year, defaults={'count': count}, fire_department=fd, metric=statistic, level=level)
total = lenient_summation(*map(lambda x: x[1], levels.items()))
nfirs.objects.update_or_create(year=year, defaults={'count': total}, fire_department=fd, metric=statistic, level=0)
with connections['nfirs'].cursor() as cursor:
cursor.execute('DROP TABLE {}'.format(tmp_table_name))
p("updated NFIRS counts for {}: {}".format(id, fd.name))
@app.task(queue='update')
def refresh_nfirs_views(view_name=None):
view_names = {
'joint_incidentaddress',
'joint_buildingfires',
'joint_ffcasualty',
'joint_civiliancasualty',
'joint_fireincident',
}
if view_name in view_names:
view_names = [view_name]
for name in view_names:
with connections['nfirs'].cursor() as cursor:
p('refreshing materiazlied view {}'.format(name))
cursor.execute('REFRESH MATERIALIZED VIEW {}'.format(name))
p('refreshing materiazlied view complete {}'.format(name))
@app.task(queue='update')
def calculate_department_census_geom(fd_id):
"""
Calculate and cache the owned census geometry for a specific department
"""
try:
fd = FireDepartment.objects.get(id=fd_id)
cursor = connections['nfirs'].cursor()
except (FireDepartment.DoesNotExist, ConnectionDoesNotExist):
return
UNION_CENSUS_TRACTS_FOR_DEPARTMENT = """SELECT ST_Multi(ST_Union(bg.geom))
FROM nist.tract_years ty
INNER JOIN census_block_groups_2010 bg
ON ty.tr10_fid = ('14000US'::text || "substring"((bg.geoid10)::text, 0, 12))
WHERE ty.fc_dept_id = %(id)s
GROUP BY ty.fc_dept_id
"""
cursor.execute(UNION_CENSUS_TRACTS_FOR_DEPARTMENT, {'id': fd.id})
geom = cursor.fetchone()
if geom:
fd.owned_tracts_geom = GEOSGeometry(geom[0])
fd.save()
else:
p('No census geom - {} ({})'.format(fd.name, fd.id))
@app.task(queue='update', rate_limit='5/h')
def refresh_quartile_view_task(*args, **kwargs):
"""
Updates the Quartile Materialized Views.
"""
p("updating quartile view")
refresh_quartile_view()
@app.task(queue='update', rate_limit='5/h')
def refresh_national_calculations_view_task(*args, **kwargs):
"""
Updates the National Calculation View.
"""
p("updating national calculations view")
refresh_national_calculations_view()
@app.task(queue='update')
def calculate_structure_counts(fd_id):
try:
fd = FireDepartment.objects.get(id=fd_id)
cursor = connections['nfirs'].cursor()
except (FireDepartment.DoesNotExist, ConnectionDoesNotExist):
return
# Skip over existing calculations or missing dept owned tracts
if not fd.owned_tracts_geom or fd.firedepartmentriskmodels_set.filter(structure_count__isnull=False).count() == 5:
return
STRUCTURE_COUNTS = """SELECT sum(case when l.risk_category = 'Low' THEN 1 ELSE 0 END) as low,
sum(CASE WHEN l.risk_category = 'Medium' THEN 1 ELSE 0 END) as medium,
sum(CASE WHEN l.risk_category = 'High' THEN 1 ELSE 0 END) high,
sum(CASE WHEN l.risk_category is null THEN 1 ELSE 0 END) as na
FROM parcel_risk_category_local l
JOIN (SELECT ST_SetSRID(%(owned_geom)s::geometry, 4326) as owned_geom) x
ON owned_geom && l.wkb_geometry
WHERE ST_Intersects(owned_geom, l.wkb_geometry)
"""
cursor.execute(STRUCTURE_COUNTS, {'owned_geom': fd.owned_tracts_geom.wkb})
mapping = {1: 'low', 2: 'medium', 4: 'high', 5: 'na'}
tot = 0
counts = dictfetchall(cursor)[0]
for l in HazardLevels.values_sans_all():
rm, _ = fd.firedepartmentriskmodels_set.get_or_create(level=l)
count = counts[mapping[l]]
rm.structure_count = count
rm.save()
tot = tot + count
rm, _ = fd.firedepartmentriskmodels_set.get_or_create(level=HazardLevels.All.value)
rm.structure_count = tot
rm.save()
@app.task(queue='update')
def calculate_story_distribution(fd_id):
"""
Using the department in combination with similar departments, calculate the story distribution of structures in
owned census tracts. Only medium and high risk structures are included in the calculations.
"""
MAX_STORIES = 108
try:
fd = FireDepartment.objects.get(id=fd_id)
cursor = connections['nfirs'].cursor()
except (FireDepartment.DoesNotExist, ConnectionDoesNotExist):
return
geoms = list(fd.similar_departments.filter(owned_tracts_geom__isnull=False).values_list('owned_tracts_geom', flat=True))
geoms.append(fd.owned_tracts_geom)
FIND_STORY_COUNTS = """SELECT count(1), p.story_nbr
FROM parcel_stories p
JOIN "LUSE_swg" lu ON lu."Code" = p.land_use,
(SELECT g.owned_tracts_geom FROM (VALUES {values}) AS g (owned_tracts_geom)) owned_tracts
WHERE lu.include_in_floor_dist AND lu.risk_category = %(level)s
AND ST_Intersects(owned_tracts.owned_tracts_geom, p.wkb_geometry)
GROUP BY p.story_nbr
ORDER BY count DESC, p.story_nbr;"""
values = ','.join(['(ST_SetSRID(\'{}\'::geometry, 4326))'.format(geom.hex) for geom in geoms])
mapping = {2: 'Medium', 4: 'High'}
def expand(values, weights):
ret = []
for v in zip(values, weights):
ret = ret + [v[0]] * v[1]
return ret
for nlevel, level in mapping.items():
cursor.execute(FIND_STORY_COUNTS.format(values=values), {'level': level})
res = cursor.fetchall()
# Filter out `None` story counts and obnoxious values
a = filter(lambda x: x[1] is not None and x[1] <= MAX_STORIES, res)
weights = map(lambda x: x[0], a)
vals = map(lambda x: x[1], a)
expanded = expand(vals, weights)
samples = np.random.choice(expanded, size=1000)
samp = lognorm.fit(samples)
# Fit curve to story counts
rm = fd.firedepartmentriskmodels_set.get(level=nlevel)
rm.floor_count_coefficients = {'shape': samp[0], 'loc': samp[1], 'scale': samp[2]}
rm.save()
def get_mapbox_isochrone_geometry(x, y, params):
keep_trying = True
delay = 1
url = '{base_url}/isochrone/v1/mapbox/driving/{x},{y}'.format(
x=x,
y=y,
base_url=settings.MAPBOX_BASE_URL,
)
while keep_trying:
try:
response = requests.get(url, params=params)
response.raise_for_status()
keep_trying = False
except Exception as e:
print('Mapbox API error: {}'.format(e))
print('Reattempting in {} seconds'.format(delay))
time.sleep(delay)
# exponential backoff
delay *= 2
if 'features' not in response.json():
return None
return json.dumps(response.json()['features'][0]['geometry'])
@app.task(queue='dataanalysis')
def update_station_service_areas():
for firestation in FireStation.objects.filter(archived=False):
if not (firestation.service_area_0_4 and firestation.service_area_4_6 and firestation.service_area_6_8):
update_station_service_area(firestation)
else:
p('Fire station {id}: {dept} # {station_number} already has drive times'.format(
id=firestation.id,
dept=firestation.department,
station_number=firestation.station_number,
)
)
def update_station_service_area(firestation):
isochrone_geometries = []
for minute in ISOCHRONE_BREAKS:
params = {
'contours_minutes': minute,
'polygons': 'true',
'access_token': settings.MAPBOX_ACCESS_TOKEN
}
raw_geometry = get_mapbox_isochrone_geometry(firestation.geom.x, firestation.geom.y, params)
if not raw_geometry:
p('Service area not found for {id}: {dept} # {station_number}'.format(
id=firestation.id,
dept=firestation.department,
station_number=firestation.station_number,
))
return
# prevents bad/self-intersecting geometries
isochrone_geometries.append(GEOSGeometry(raw_geometry).buffer(0))
# difference the lesser isochrones from the greater ones
for i in reversed(range(1, len(isochrone_geometries))):
isochrone_geometries[i] = isochrone_geometries[i].difference(isochrone_geometries[i-1])
firestation.service_area_0_4 = to_multipolygon(isochrone_geometries[0])
firestation.service_area_4_6 = to_multipolygon(isochrone_geometries[1])
firestation.service_area_6_8 = to_multipolygon(isochrone_geometries[2])
firestation.save(update_fields=['service_area_0_4', 'service_area_4_6', 'service_area_6_8'])
p('Fire station {id}: {dept} #{station_number} drive times updated'.format(
id=firestation.id,
dept=firestation.department,
station_number=firestation.station_number,
)
)
return firestation
@app.task(queue='dataanalysis')
def update_station_erf_areas():
for firestation in FireStation.objects.filter(archived=False):
if not all([firestation.erf_area_low, firestation.erf_area_medium, firestation.erf_area_high, firestation_erf_area_unknown]):
update_station_erf_area(firestation)
firestation.save(update_fields=['erf_area_low', 'erf_area_medium', 'erf_area_high', 'erf_area_unknown'])
else:
p('Fire station {id}: {dept} # {station_number} already has erf areas'.format(
id=firestation.id,
dept=firestation.department,
station_number=firestation.station_number,
)
)
def update_station_erf_area(firestation):
for risk_level, minute in RISK_LEVEL_ERF_DRIVE_TIMES.items():
params = {
'contours_minutes': minute,
'polygons': 'true',
'access_token': settings.MAPBOX_ACCESS_TOKEN
}
raw_geometry = get_mapbox_isochrone_geometry(firestation.geom.x, firestation.geom.y, params)
if not raw_geometry:
p('Service area not found for {id}: {dept} # {station_number}'.format(
id=firestation.id,
dept=firestation.department,
station_number=firestation.station_number,
))
return
# prevents bad/self-intersecting geometries
erf_geometry = to_multipolygon(GEOSGeometry(raw_geometry).buffer(0))
setattr(firestation, 'erf_area_{}'.format(risk_level), erf_geometry)
p('Fire station {id}: {dept} #{station_number} ERF areas updated'.format(
id=firestation.id,
dept=firestation.department,
station_number=firestation.station_number,
)
)
return firestation
@app.task(queue='dataanalysis')
def create_parcel_department_hazard_level_rollup_all():
"""
Task for updating the servicearea table rolling up parcel hazard categories with departement drive time data
"""
for fd in FireDepartment.objects.values_list('id', flat=True):
get_parcel_department_hazard_level_rollup(fd)
@app.task(queue='dataanalysis')
def get_parcel_department_hazard_level_rollup(fd_id):
"""
Update for one department for the drive time hazard level
"""
stationlist = FireStation.objects.filter(department_id=fd_id, archived=False)
dept = FireDepartment.objects.filter(id=fd_id)
if not dept:
print('Department {} not found.'.format(fd_id))
return
dept = dept[0]
p("Calculating Drive times for: " + dept.name)
# use headquarters if no stations
station_geometries = [{
"y": round(firestation.geom.y, 5),
"x": round(firestation.geom.x, 5),
} for firestation in stationlist] if stationlist else [{
'y': round(dept.headquarters_geom.y, 5),
'x': round(dept.headquarters_geom.x, 5),
}]
isochrone_geometries = []
for minute in ISOCHRONE_BREAKS:
isochrone_geom = None
params = {
'contours_minutes': minute,
'polygons': 'true',
'access_token': settings.MAPBOX_ACCESS_TOKEN
}
for station_geometry in station_geometries:
raw_geometry = get_mapbox_isochrone_geometry(station_geometry['x'], station_geometry['y'], params)
if not raw_geometry:
p('Service area not found for {id}: {dept} # {station_number}'.format(
id=firestation.id,
dept=firestation.department,
station_number=firestation.station_number,
))
continue
# prevents bad/self-intersecting geometries
buffered_geometry = GEOSGeometry(raw_geometry).buffer(0)
# union all of the equivalent isochrone polygons for each station
isochrone_geom = isochrone_geom.union(buffered_geometry) if isochrone_geom else buffered_geometry
isochrone_geometries.append(isochrone_geom)
# difference out the lesser isochrones from the greater ones
isochrone_geometries[2] = isochrone_geometries[2].difference(isochrone_geometries[1]).difference(isochrone_geometries[0])
isochrone_geometries[1] = isochrone_geometries[1].difference(isochrone_geometries[0])
# conver to MultiPolygon geometries
isochrone_geometries = [to_multipolygon(geom) for geom in isochrone_geometries]
update_parcel_department_hazard_level(isochrone_geometries, dept)
def update_parcel_department_hazard_level(isochrone_geometries, department):
"""
Intersect with Parcel layer and update parcel_department_hazard_level table
0-4 minutes
4-6 minutes
6-8 minutes
"""
cursor = connections['nfirs'].cursor()
QUERY_INTERSECT_FOR_PARCEL_DRIVETIME = """SELECT sum(case when l.risk_category = 'Low' THEN 1 ELSE 0 END) as low,
sum(CASE WHEN l.risk_category = 'Medium' THEN 1 ELSE 0 END) as medium,
sum(CASE WHEN l.risk_category = 'High' THEN 1 ELSE 0 END) high,
sum(CASE WHEN l.risk_category is null THEN 1 ELSE 0 END) as unknown
FROM parcel_risk_category_local l
JOIN (SELECT ST_SetSRID(ST_GeomFromGeoJSON(%(drive_geom)s), 4326) as drive_geom) x
ON drive_geom && l.wkb_geometry
WHERE ST_WITHIN(l.wkb_geometry, drive_geom)
"""
p('Querying Database for parcels')
results = []
for geom in isochrone_geometries:
cursor.execute(QUERY_INTERSECT_FOR_PARCEL_DRIVETIME, {'drive_geom': geom.geojson})
results.append(dictfetchall(cursor))
results0, results4, results6 = results
drivetimegeom_0_4, drivetimegeom_4_6, drivetimegeom_6_8 = isochrone_geometries
# Overwrite/Update service area is already registered
if ParcelDepartmentHazardLevel.objects.filter(department_id=department.id):
existingrecord = ParcelDepartmentHazardLevel.objects.filter(department_id=department.id)
addhazardlevelfordepartment = existingrecord[0]
addhazardlevelfordepartment.parcelcount_low_0_4 = results0[0]['low']
addhazardlevelfordepartment.parcelcount_low_4_6 = results4[0]['low']
addhazardlevelfordepartment.parcelcount_low_6_8 = results6[0]['low']
addhazardlevelfordepartment.parcelcount_medium_0_4 = results0[0]['medium']
addhazardlevelfordepartment.parcelcount_medium_4_6 = results4[0]['medium']
addhazardlevelfordepartment.parcelcount_medium_6_8 = results6[0]['medium']
addhazardlevelfordepartment.parcelcount_high_0_4 = results0[0]['high']
addhazardlevelfordepartment.parcelcount_high_4_6 = results4[0]['high']
addhazardlevelfordepartment.parcelcount_high_6_8 = results6[0]['high']
addhazardlevelfordepartment.parcelcount_unknown_0_4 = results0[0]['unknown']
addhazardlevelfordepartment.parcelcount_unknown_4_6 = results4[0]['unknown']
addhazardlevelfordepartment.parcelcount_unknown_6_8 = results6[0]['unknown']
addhazardlevelfordepartment.drivetimegeom_0_4 = drivetimegeom_0_4
addhazardlevelfordepartment.drivetimegeom_4_6 = drivetimegeom_4_6
addhazardlevelfordepartment.drivetimegeom_6_8 = drivetimegeom_6_8
p(department.name + " Service Area Updated")
else:
deptservicearea = {}
deptservicearea['department'] = department
deptservicearea['parcelcount_low_0_4'] = results0[0]['low']
deptservicearea['parcelcount_low_4_6'] = results4[0]['low']
deptservicearea['parcelcount_low_6_8'] = results6[0]['low']
deptservicearea['parcelcount_medium_0_4'] = results0[0]['medium']
deptservicearea['parcelcount_medium_4_6'] = results4[0]['medium']
deptservicearea['parcelcount_medium_6_8'] = results6[0]['medium']
deptservicearea['parcelcount_high_0_4'] = results0[0]['high']
deptservicearea['parcelcount_high_4_6'] = results4[0]['high']
deptservicearea['parcelcount_high_6_8'] = results6[0]['high']
deptservicearea['parcelcount_unknown_0_4'] = results0[0]['unknown']
deptservicearea['parcelcount_unknown_4_6'] = results4[0]['unknown']
deptservicearea['parcelcount_unknown_6_8'] = results6[0]['unknown']
deptservicearea['drivetimegeom_0_4'] = drivetimegeom_0_4
deptservicearea['drivetimegeom_4_6'] = drivetimegeom_4_6
deptservicearea['drivetimegeom_6_8'] = drivetimegeom_6_8
addhazardlevelfordepartment = ParcelDepartmentHazardLevel.objects.create(**deptservicearea)
p(department.name + " Service Area Created")
addhazardlevelfordepartment.save()
@app.task(queue='dataanalysis')
def create_effective_firefighting_rollup_all():
"""
Task for updating the effective fire fighting force EffectiveFireFightingForceLevel table
"""
for fd in FireDepartment.objects.values_list('id', flat=True):
update_parcel_department_effectivefirefighting_rollup(fd)
@app.task(queue='dataanalysis')
def update_parcel_department_effectivefirefighting_rollup(fd_id):
"""
Update for one department for the effective fire fighting force
"""
stations = FireStation.objects.filter(department_id=fd_id)
dept = FireDepartment.objects.get(id=fd_id)
# assume staffing minimum of 1 for now
staffingtotal = "1"
if dept.owned_tracts_geom is None:
p("No geometry for the department " + dept.name)
return
p('Calculating response times and staffing for: {dept_id}: {dept_name} at {t}'.format(
t=time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()),
dept_name=dept.name,
dept_id=dept.id
)
)
# No stations are present, only hq
if not stations:
return
for station in stations:
if not all(getattr(station, erf_area_attr) for erf_area_attr in RISK_LEVEL_ERF_AREAS.values()):
station = update_station_erf_area(station)
# Do not save in the case that this is a temp station created from hq geom above
if station.pk:
station.save(update_fields=['erf_area_low', 'erf_area_medium', 'erf_area_high', 'erf_area_unknown'])
# There are different staffing requirements for the high
# risk category depending on if EMS transport is available
risk_level_minimum_staffing = RISK_LEVEL_MINIMUM_STAFFING if dept.ems_transport else RISK_LEVEL_MINIMUM_STAFFING_NO_EMS
for risk_level, erf_area in RISK_LEVEL_ERF_AREAS.items():
p('Calculating effective response force extent for department: {department}, risk level: {risk_level}'.format(
department=dept.id,
risk_level=risk_level,
))
geom = get_efff_geometry([station.id for station in stations], erf_area, risk_level_minimum_staffing[risk_level])
p('Updating effective response force parcel data for department: {department}, risk level: {risk_level}'.format(
department=dept.id,
risk_level=risk_level,
))
update_parcel_effectivefirefighting_table(geom, risk_level, dept)
def get_efff_geometry(station_ids, erf_area, minimum_staffing):
cursor = connections['default'].cursor()
EFF_QUERY = """
DROP SEQUENCE IF EXISTS polyseq;
CREATE TEMP SEQUENCE polyseq;
WITH stations AS (
SELECT usgsstructuredata_ptr_id AS id, ST_SetSRID({erf_area}, 4326) AS geom FROM firestation_firestation
WHERE usgsstructuredata_ptr_id IN ({station_ids})
), boundaries AS (
SELECT ST_Union(ST_ExteriorRing(areas.geom)) AS geom
FROM (
SELECT (ST_Dump(s.geom)).geom AS geom
FROM stations s
) as areas
), polys AS (
SELECT nextval('polyseq') AS id, (ST_Dump(ST_Polygonize(b.geom))).geom AS geom FROM boundaries b
), staffing AS (
SELECT p.personnel, p.id, s.geom
FROM stations AS s
JOIN (
SELECT SUM(fs.personnel) as personnel, fs.firestation_id AS id
FROM firestation_staffing AS fs
JOIN stations AS st
ON st.id = fs.firestation_id
GROUP BY fs.firestation_id
) AS p
ON s.id = p.id
), erf_totals AS (
SELECT SUM(s.personnel)::int AS personnel, p.id AS id
FROM polys p
JOIN staffing s
ON ST_Contains(s.geom, ST_PointOnSurface(p.geom))
GROUP BY p.id
)
SELECT ST_AsGeoJSON(ST_Union(p.geom)) AS geom
FROM erf_totals e
JOIN polys p
ON e.id = p.id
WHERE e.personnel >= {minimum_staffing};
""".format(
erf_area=erf_area,
station_ids=', '.join(str(_) for _ in station_ids),
minimum_staffing=minimum_staffing,
)
# this could take a long time
cursor.execute(EFF_QUERY)
geom = dictfetchall(cursor)[0]['geom']
return geom
def update_parcel_effectivefirefighting_table(erf_geom, risk_level, department):
"""
Intersect with Parcel layer and update parcel_department_hazard_level table
"""
risk_category_comparison = 'l.risk_category IS NULL' if risk_level == 'unknown' else "l.risk_category = '{}'".format(risk_level.capitalize())
QUERY_INTERSECT_FOR_PARCEL_DRIVETIME = """
SELECT SUM(CASE WHEN {risk_category_comparison} THEN 1 ELSE 0 END) AS {risk_level}
FROM parcel_risk_category_local l
JOIN (SELECT ST_SetSRID(ST_GeomFromGeoJSON(%(erf_geom)s), 4326) as drive_geom) x
ON drive_geom && l.wkb_geometry
WHERE ST_Within(l.wkb_geometry, drive_geom)
""".format(
risk_level=risk_level,
risk_category_comparison=risk_category_comparison,
)
cursor = connections['nfirs'].cursor()
cursor.execute(QUERY_INTERSECT_FOR_PARCEL_DRIVETIME, {
'erf_geom': erf_geom,
})
result = dictfetchall(cursor)[0]
result[risk_level] = result[risk_level] or 0
try:
efffl = EffectiveFireFightingForceLevel.objects.get(department=department.id)
except EffectiveFireFightingForceLevel.DoesNotExist:
efffl = EffectiveFireFightingForceLevel(department=department)
setattr(efffl, 'parcel_count_{}'.format(risk_level), result[risk_level])
structure_counts = getattr(department.metrics.structure_counts_by_risk_category, risk_level)
if structure_counts is not None:
percent_covered = 100 if structure_counts == 0 else (
round(100 * float(result[risk_level]) / float(structure_counts), 2)
)
else:
percent_covered = None
setattr(efffl, 'percent_covered_{}'.format(risk_level), percent_covered)
if erf_geom:
setattr(efffl, 'erf_area_{}'.format(risk_level), to_multipolygon(fromstr(erf_geom)))
efffl.save()
p('ERF area updated for department {}, risk level {}'.format(department.id, risk_level))
| [
"time.sleep",
"firecares.firestation.models.FireDepartment.objects.filter",
"firecares.firestation.models.FireDepartment.objects.values_list",
"copy.deepcopy",
"StringIO.StringIO",
"boto.connect_s3",
"firecares.firestation.models.EffectiveFireFightingForceLevel.objects.get",
"firecares.firestation.mod... | [((2834, 2858), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""update"""'}), "(queue='update')\n", (2842, 2858), False, 'from firecares.celery import app\n'), ((9566, 9590), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""update"""'}), "(queue='update')\n", (9574, 9590), False, 'from firecares.celery import app\n'), ((10908, 10932), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""update"""'}), "(queue='update')\n", (10916, 10932), False, 'from firecares.celery import app\n'), ((12448, 12472), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""update"""'}), "(queue='update')\n", (12456, 12472), False, 'from firecares.celery import app\n'), ((12675, 12699), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""update"""'}), "(queue='update')\n", (12683, 12699), False, 'from firecares.celery import app\n'), ((12866, 12890), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""update"""'}), "(queue='update')\n", (12874, 12890), False, 'from firecares.celery import app\n'), ((13118, 13142), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""update"""'}), "(queue='update')\n", (13126, 13142), False, 'from firecares.celery import app\n'), ((17246, 17270), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""update"""'}), "(queue='update')\n", (17254, 17270), False, 'from firecares.celery import app\n'), ((17854, 17878), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""update"""'}), "(queue='update')\n", (17862, 17878), False, 'from firecares.celery import app\n'), ((18765, 18807), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""update"""', 'rate_limit': '"""5/h"""'}), "(queue='update', rate_limit='5/h')\n", (18773, 18807), False, 'from firecares.celery import app\n'), ((18982, 19024), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""update"""', 'rate_limit': '"""5/h"""'}), "(queue='update', rate_limit='5/h')\n", (18990, 19024), False, 'from firecares.celery import app\n'), ((19236, 19260), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""update"""'}), "(queue='update')\n", (19244, 19260), False, 'from firecares.celery import app\n'), ((20776, 20800), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""update"""'}), "(queue='update')\n", (20784, 20800), False, 'from firecares.celery import app\n'), ((23641, 23671), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""dataanalysis"""'}), "(queue='dataanalysis')\n", (23649, 23671), False, 'from firecares.celery import app\n'), ((25793, 25823), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""dataanalysis"""'}), "(queue='dataanalysis')\n", (25801, 25823), False, 'from firecares.celery import app\n'), ((27602, 27632), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""dataanalysis"""'}), "(queue='dataanalysis')\n", (27610, 27632), False, 'from firecares.celery import app\n'), ((27942, 27972), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""dataanalysis"""'}), "(queue='dataanalysis')\n", (27950, 27972), False, 'from firecares.celery import app\n'), ((34407, 34437), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""dataanalysis"""'}), "(queue='dataanalysis')\n", (34415, 34437), False, 'from firecares.celery import app\n'), ((34731, 34761), 'firecares.celery.app.task', 'app.task', ([], {'queue': '"""dataanalysis"""'}), "(queue='dataanalysis')\n", (34739, 34761), False, 'from firecares.celery import app\n'), ((1286, 1300), 'alog.info', 'alog.info', (['msg'], {}), '(msg)\n', (1295, 1300), False, 'import alog\n'), ((2422, 2467), 'firecares.firestation.models.FireDepartment.objects.filter', 'FireDepartment.objects.filter', ([], {'archived': '(False)'}), '(archived=False)\n', (2451, 2467), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((4490, 4510), 'firecares.utils.dictfetchall', 'dictfetchall', (['cursor'], {}), '(cursor)\n', (4502, 4510), False, 'from firecares.utils import dictfetchall, lenient_summation\n'), ((4801, 4838), 'fire_risk.models.DIST.providers.ahs.ahs_building_areas', 'ahs_building_areas', (['fd.fdid', 'fd.state'], {}), '(fd.fdid, fd.state)\n', (4819, 4838), False, 'from fire_risk.models.DIST.providers.ahs import ahs_building_areas\n'), ((10524, 10534), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (10532, 10534), False, 'from StringIO import StringIO\n'), ((10548, 10563), 'csv.writer', 'csv.writer', (['out'], {}), '(out)\n', (10558, 10563), False, 'import csv\n'), ((10712, 10729), 'boto.connect_s3', 'boto.connect_s3', ([], {}), '()\n', (10727, 10729), False, 'import boto\n'), ((12100, 12110), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (12108, 12110), False, 'from StringIO import StringIO\n'), ((12124, 12139), 'csv.writer', 'csv.writer', (['out'], {}), '(out)\n', (12134, 12139), False, 'import csv\n'), ((12253, 12270), 'boto.connect_s3', 'boto.connect_s3', ([], {}), '()\n', (12268, 12270), False, 'import boto\n'), ((18955, 18978), 'firecares.firestation.models.refresh_quartile_view', 'refresh_quartile_view', ([], {}), '()\n', (18976, 18978), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((19196, 19232), 'firecares.firestation.models.refresh_national_calculations_view', 'refresh_national_calculations_view', ([], {}), '()\n', (19230, 19232), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((20424, 20454), 'firecares.firestation.models.HazardLevels.values_sans_all', 'HazardLevels.values_sans_all', ([], {}), '()\n', (20452, 20454), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((23731, 23773), 'firecares.firestation.models.FireStation.objects.filter', 'FireStation.objects.filter', ([], {'archived': '(False)'}), '(archived=False)\n', (23757, 23773), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((25264, 25304), 'firecares.utils.to_multipolygon', 'to_multipolygon', (['isochrone_geometries[0]'], {}), '(isochrone_geometries[0])\n', (25279, 25304), False, 'from firecares.utils import to_multipolygon\n'), ((25340, 25380), 'firecares.utils.to_multipolygon', 'to_multipolygon', (['isochrone_geometries[1]'], {}), '(isochrone_geometries[1])\n', (25355, 25380), False, 'from firecares.utils import to_multipolygon\n'), ((25416, 25456), 'firecares.utils.to_multipolygon', 'to_multipolygon', (['isochrone_geometries[2]'], {}), '(isochrone_geometries[2])\n', (25431, 25456), False, 'from firecares.utils import to_multipolygon\n'), ((25879, 25921), 'firecares.firestation.models.FireStation.objects.filter', 'FireStation.objects.filter', ([], {'archived': '(False)'}), '(archived=False)\n', (25905, 25921), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((27832, 27883), 'firecares.firestation.models.FireDepartment.objects.values_list', 'FireDepartment.objects.values_list', (['"""id"""'], {'flat': '(True)'}), "('id', flat=True)\n", (27866, 27883), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((28123, 28186), 'firecares.firestation.models.FireStation.objects.filter', 'FireStation.objects.filter', ([], {'department_id': 'fd_id', 'archived': '(False)'}), '(department_id=fd_id, archived=False)\n', (28149, 28186), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((28198, 28237), 'firecares.firestation.models.FireDepartment.objects.filter', 'FireDepartment.objects.filter', ([], {'id': 'fd_id'}), '(id=fd_id)\n', (28227, 28237), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((31586, 31657), 'firecares.firestation.models.ParcelDepartmentHazardLevel.objects.filter', 'ParcelDepartmentHazardLevel.objects.filter', ([], {'department_id': 'department.id'}), '(department_id=department.id)\n', (31628, 31657), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((34610, 34661), 'firecares.firestation.models.FireDepartment.objects.values_list', 'FireDepartment.objects.values_list', (['"""id"""'], {'flat': '(True)'}), "('id', flat=True)\n", (34644, 34661), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((34927, 34974), 'firecares.firestation.models.FireStation.objects.filter', 'FireStation.objects.filter', ([], {'department_id': 'fd_id'}), '(department_id=fd_id)\n', (34953, 34974), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((34986, 35022), 'firecares.firestation.models.FireDepartment.objects.get', 'FireDepartment.objects.get', ([], {'id': 'fd_id'}), '(id=fd_id)\n', (35012, 35022), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((3092, 3125), 'firecares.firestation.models.FireDepartment.objects.get', 'FireDepartment.objects.get', ([], {'id': 'id'}), '(id=id)\n', (3118, 3125), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((9643, 9676), 'firecares.firestation.models.FireDepartment.objects.get', 'FireDepartment.objects.get', ([], {'id': 'id'}), '(id=id)\n', (9669, 9676), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((10983, 11016), 'firecares.firestation.models.FireDepartment.objects.get', 'FireDepartment.objects.get', ([], {'id': 'id'}), '(id=id)\n', (11009, 11016), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((13310, 13343), 'firecares.firestation.models.FireDepartment.objects.get', 'FireDepartment.objects.get', ([], {'id': 'id'}), '(id=id)\n', (13336, 13343), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((18039, 18075), 'firecares.firestation.models.FireDepartment.objects.get', 'FireDepartment.objects.get', ([], {'id': 'fd_id'}), '(id=fd_id)\n', (18065, 18075), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((18651, 18672), 'django.contrib.gis.geos.GEOSGeometry', 'GEOSGeometry', (['geom[0]'], {}), '(geom[0])\n', (18663, 18672), False, 'from django.contrib.gis.geos import GEOSGeometry, MultiPolygon, fromstr\n'), ((19322, 19358), 'firecares.firestation.models.FireDepartment.objects.get', 'FireDepartment.objects.get', ([], {'id': 'fd_id'}), '(id=fd_id)\n', (19348, 19358), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((20386, 20406), 'firecares.utils.dictfetchall', 'dictfetchall', (['cursor'], {}), '(cursor)\n', (20398, 20406), False, 'from firecares.utils import dictfetchall, lenient_summation\n'), ((21117, 21153), 'firecares.firestation.models.FireDepartment.objects.get', 'FireDepartment.objects.get', ([], {'id': 'fd_id'}), '(id=fd_id)\n', (21143, 21153), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((22599, 22636), 'numpy.random.choice', 'np.random.choice', (['expanded'], {'size': '(1000)'}), '(expanded, size=1000)\n', (22615, 22636), True, 'import numpy as np\n'), ((22652, 22672), 'scipy.stats.lognorm.fit', 'lognorm.fit', (['samples'], {}), '(samples)\n', (22663, 22672), False, 'from scipy.stats import lognorm\n'), ((30185, 30206), 'firecares.utils.to_multipolygon', 'to_multipolygon', (['geom'], {}), '(geom)\n', (30200, 30206), False, 'from firecares.utils import to_multipolygon\n'), ((31684, 31755), 'firecares.firestation.models.ParcelDepartmentHazardLevel.objects.filter', 'ParcelDepartmentHazardLevel.objects.filter', ([], {'department_id': 'department.id'}), '(department_id=department.id)\n', (31726, 31755), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((34250, 34311), 'firecares.firestation.models.ParcelDepartmentHazardLevel.objects.create', 'ParcelDepartmentHazardLevel.objects.create', ([], {}), '(**deptservicearea)\n', (34292, 34311), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((39695, 39715), 'firecares.utils.dictfetchall', 'dictfetchall', (['cursor'], {}), '(cursor)\n', (39707, 39715), False, 'from firecares.utils import dictfetchall, lenient_summation\n'), ((39794, 39863), 'firecares.firestation.models.EffectiveFireFightingForceLevel.objects.get', 'EffectiveFireFightingForceLevel.objects.get', ([], {'department': 'department.id'}), '(department=department.id)\n', (39837, 39863), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((6363, 6408), 'fire_risk.utils.LogNormalDraw', 'LogNormalDraw', (['*response_times'], {'multiplier': '(60)'}), '(*response_times, multiplier=60)\n', (6376, 6408), False, 'from fire_risk.utils import LogNormalDraw\n'), ((8103, 8148), 'fire_risk.utils.LogNormalDraw', 'LogNormalDraw', (['*response_times'], {'multiplier': '(60)'}), '(*response_times, multiplier=60)\n', (8116, 8148), False, 'from fire_risk.utils import LogNormalDraw\n'), ((12993, 13038), 'firecares.firestation.models.FireDepartment.objects.filter', 'FireDepartment.objects.filter', ([], {'archived': '(False)'}), '(archived=False)\n', (13022, 13038), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((15900, 15920), 'copy.deepcopy', 'copy.deepcopy', (['years'], {}), '(years)\n', (15913, 15920), False, 'import copy\n'), ((15946, 15957), 'time.time', 'time.time', ([], {}), '()\n', (15955, 15957), False, 'import time\n'), ((16023, 16034), 'time.time', 'time.time', ([], {}), '()\n', (16032, 16034), False, 'import time\n'), ((23172, 23204), 'requests.get', 'requests.get', (['url'], {'params': 'params'}), '(url, params=params)\n', (23184, 23204), False, 'import requests\n'), ((31371, 31391), 'firecares.utils.dictfetchall', 'dictfetchall', (['cursor'], {}), '(cursor)\n', (31383, 31391), False, 'from firecares.utils import dictfetchall, lenient_summation\n'), ((38690, 38710), 'firecares.utils.dictfetchall', 'dictfetchall', (['cursor'], {}), '(cursor)\n', (38702, 38710), False, 'from firecares.utils import dictfetchall, lenient_summation\n'), ((39937, 39991), 'firecares.firestation.models.EffectiveFireFightingForceLevel', 'EffectiveFireFightingForceLevel', ([], {'department': 'department'}), '(department=department)\n', (39968, 39991), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((16952, 17071), 'firecares.firestation.models.NFIRSStatistic.objects.update_or_create', 'nfirs.objects.update_or_create', ([], {'year': 'year', 'defaults': "{'count': total}", 'fire_department': 'fd', 'metric': 'statistic', 'level': '(0)'}), "(year=year, defaults={'count': total},\n fire_department=fd, metric=statistic, level=0)\n", (16982, 17071), True, 'from firecares.firestation.models import NFIRSStatistic as nfirs\n'), ((23434, 23451), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (23444, 23451), False, 'import time\n'), ((40568, 40585), 'django.contrib.gis.geos.fromstr', 'fromstr', (['erf_geom'], {}), '(erf_geom)\n', (40575, 40585), False, 'from django.contrib.gis.geos import GEOSGeometry, MultiPolygon, fromstr\n'), ((8387, 8413), 'firecares.firestation.models.HazardLevels', 'HazardLevels', (['record.level'], {}), '(record.level)\n', (8399, 8413), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((8522, 8544), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8542, 8544), False, 'import traceback\n'), ((16735, 16858), 'firecares.firestation.models.NFIRSStatistic.objects.update_or_create', 'nfirs.objects.update_or_create', ([], {'year': 'year', 'defaults': "{'count': count}", 'fire_department': 'fd', 'metric': 'statistic', 'level': 'level'}), "(year=year, defaults={'count': count},\n fire_department=fd, metric=statistic, level=level)\n", (16765, 16858), True, 'from firecares.firestation.models import NFIRSStatistic as nfirs\n'), ((24972, 24998), 'django.contrib.gis.geos.GEOSGeometry', 'GEOSGeometry', (['raw_geometry'], {}), '(raw_geometry)\n', (24984, 24998), False, 'from django.contrib.gis.geos import GEOSGeometry, MultiPolygon, fromstr\n'), ((27250, 27276), 'django.contrib.gis.geos.GEOSGeometry', 'GEOSGeometry', (['raw_geometry'], {}), '(raw_geometry)\n', (27262, 27276), False, 'from django.contrib.gis.geos import GEOSGeometry, MultiPolygon, fromstr\n'), ((29556, 29582), 'django.contrib.gis.geos.GEOSGeometry', 'GEOSGeometry', (['raw_geometry'], {}), '(raw_geometry)\n', (29568, 29582), False, 'from django.contrib.gis.geos import GEOSGeometry, MultiPolygon, fromstr\n'), ((35339, 35352), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (35350, 35352), False, 'import time\n'), ((6890, 6916), 'firecares.firestation.models.HazardLevels', 'HazardLevels', (['record.level'], {}), '(record.level)\n', (6902, 6916), False, 'from firecares.firestation.models import FireStation, FireDepartment, ParcelDepartmentHazardLevel, EffectiveFireFightingForceLevel, Staffing, refresh_quartile_view, HazardLevels, refresh_national_calculations_view\n'), ((7033, 7055), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7053, 7055), False, 'import traceback\n'), ((15827, 15841), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15839, 15841), False, 'from datetime import datetime\n'), ((16563, 16577), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16575, 16577), False, 'from datetime import datetime\n')] |
from manim import *
import numpy as np
import random
class Tute1(Scene):
def construct(self):
plane = NumberPlane(x_range=[-7,7,1], y_range=[-4,4,1]).add_coordinates()
box = Rectangle(stroke_color = GREEN_C, stroke_opacity=0.7, fill_color = RED_B, fill_opacity = 0.5, height=1, width=1)
dot = always_redraw(lambda : Dot().move_to(box.get_center()))
#code = Code("Tute1Code1.py", style=Code.styles_list[12], background ="window", language = "python", insert_line_no = True,
#tab_width = 2, line_spacing = 0.3, scale_factor = 0.5, font="Monospace").set_width(6).to_edge(UL, buff=0)
self.play(FadeIn(plane), run_time = 6)
#self.wait()
self.add(box, dot)
self.play(box.animate.shift(RIGHT*2), run_time=4)
self.wait()
self.play(box.animate.shift(UP*3), run_time=4)
self.wait()
self.play(box.animate.shift(DOWN*5+LEFT*5), run_time=4)
self.wait()
self.play(box.animate.shift(UP*1.5+RIGHT*1), run_time=4)
self.wait()
class Tute2(Scene):
def construct(self):
plane = NumberPlane(x_range=[-7,7,1], y_range=[-4,4,1]).add_coordinates()
axes = Axes(x_range=[-3,3,1], y_range=[-3,3,1], x_length = 6, y_length=6)
axes.to_edge(LEFT, buff=0.5)
circle = Circle(stroke_width = 6, stroke_color = YELLOW, fill_color = RED_C, fill_opacity = 0.8)
circle.set_width(2).to_edge(DR, buff=0)
triangle = Triangle(stroke_color = ORANGE, stroke_width = 10,
fill_color = GREY).set_height(2).shift(DOWN*3+RIGHT*3)
# code = Code("Tute1Code2.py", style=Code.styles_list[12], background ="window", language = "python", insert_line_no = True,
# tab_width = 2, line_spacing = 0.3, scale_factor = 0.5, font="Monospace").set_width(8).to_edge(UR, buff=0)
self.play(FadeIn(plane), run_time=6 )#Write(code)
self.wait()
self.play(Write(axes))
self.wait()
self.play(plane.animate.set_opacity(0.4))
self.wait()
self.play(DrawBorderThenFill(circle))
self.wait()
self.play(circle.animate.set_width(1))
self.wait()
self.play(Transform(circle, triangle), run_time=3)
self.wait()
class Tute3(Scene):
def construct(self):
rectangle = RoundedRectangle(stroke_width = 8, stroke_color = WHITE,
fill_color = BLUE_B, width = 4.5, height = 2).shift(UP*3+LEFT*4)
mathtext = MathTex("\\frac{3}{4} = 0.75"
).set_color_by_gradient(GREEN, PINK).set_height(1.5)
mathtext.move_to(rectangle.get_center())
mathtext.add_updater(lambda x : x.move_to(rectangle.get_center()))
code = Code("Tute1Code3.py", style=Code.styles_list[12], background ="window", language = "python", insert_line_no = True,
tab_width = 2, line_spacing = 0.3, scale_factor = 0.5, font="Monospace").set_width(8).to_edge(UR, buff=0)
self.play(Write(code), run_time=6)
self.wait()
self.play(FadeIn(rectangle))
self.wait()
self.play(Write(mathtext), run_time=2)
self.wait()
self.play(rectangle.animate.shift(RIGHT*1.5+DOWN*5), run_time=6)
self.wait()
mathtext.clear_updaters()
self.play(rectangle.animate.shift(LEFT*2 + UP*1), run_time=6)
self.wait()
class Tute4(Scene):
def construct(self):
r = ValueTracker(0.5) #Tracks the value of the radius
circle = always_redraw(lambda :
Circle(radius = r.get_value(), stroke_color = YELLOW,
stroke_width = 5))
line_radius = always_redraw(lambda :
Line(start = circle.get_center(), end = circle.get_bottom(), stroke_color = RED_B, stroke_width = 10)
)
line_circumference = always_redraw(lambda :
Line(stroke_color = YELLOW, stroke_width = 5
).set_length(2 * r.get_value() * PI).next_to(circle, DOWN, buff=0.2)
)
triangle = always_redraw(lambda :
Polygon(circle.get_top(), circle.get_left(), circle.get_right(), fill_color = GREEN_C)
)
self.play(LaggedStart(
Create(circle), DrawBorderThenFill(line_radius), DrawBorderThenFill(triangle),
run_time = 4, lag_ratio = 0.75
))
self.play(ReplacementTransform(circle.copy(), line_circumference), run_time = 2)
self.play(r.animate.set_value(2), run_time = 5)
class testing(Scene):
def construct(self):
play_icon = VGroup(*[SVGMobject(f"{HOME2}\\youtube_icon.svg") for k in range(8)]
).set_height(0.75).arrange(DOWN, buff=0.2).to_edge(UL, buff=0.1)
time = ValueTracker(0)
l = 3
g = 10
w = np.sqrt(g/l)
T = 2*PI / w
theta_max = 20/180*PI
p_x = -2
p_y = 3
shift_req = p_x*RIGHT+p_y*UP
vertical_line = DashedLine(start = shift_req, end = shift_req+3*DOWN)
theta = DecimalNumber().move_to(RIGHT*10)
theta.add_updater(lambda m : m.set_value((theta_max)*np.sin(w*time.get_value())))
def get_ball(x,y):
dot = Dot(fill_color = BLUE, fill_opacity = 1).move_to(x*RIGHT+y*UP).scale(3)
return dot
ball = always_redraw(lambda :
get_ball(shift_req+l*np.sin(theta.get_value()),
shift_req - l*np.cos(theta.get_value()))
)
def get_string():
line = Line(color = GREY, start = shift_req, end = ball.get_center())
return line
string = always_redraw(lambda : get_string())
def get_angle(theta):
if theta != 0:
if theta > 0:
angle = Angle(line1 = string, line2 = vertical_line, other_angle = True, radius = 0.5, color = YELLOW)
else:
angle = VectorizedPoint()
else:
angle = VectorizedPoint()
return angle
angle = always_redraw(lambda : get_angle(theta.get_value()))
guest_name = Tex("<NAME>").next_to(vertical_line.get_start(), RIGHT, buff=0.5)
guest_logo = ImageMobject(f"{HOME}\\guest_logo.png").set_width(2).next_to(guest_name, DOWN, buff=0.1)
pendulum = Group(string, ball, vertical_line, guest_name, guest_logo)
self.play(DrawBorderThenFill(play_icon), run_time = 3)
self.add(vertical_line, theta, ball, string, angle)
self.wait()
self.play(FadeIn(guest_name), FadeIn(guest_logo))
self.play(time.animate.set_value(2*T), rate_func = linear, run_time = 2*T)
self.play(pendulum.animate.set_height(0.6).move_to(play_icon[7].get_center()), run_time = 2)
self.remove(theta, angle, ball, string)
self.wait()
class parametric(ThreeDScene):
def construct(self):
axes = ThreeDAxes().add_coordinates()
end = ValueTracker(-4.9)
graph = always_redraw(lambda :
ParametricFunction(lambda u : np.array([4*np.cos(u), 4*np.sin(u), 0.5*u]),
color = BLUE, t_min = -3*TAU, t_range = [-5, end.get_value()])
)
line = always_redraw(lambda :
Line(start = ORIGIN, end = graph.get_end(), color = BLUE).add_tip()
)
self.set_camera_orientation(phi = 70*DEGREES, theta = -30*DEGREES)
self.add(axes, graph, line)
self.play(end.animate.set_value(5), run_time = 3)
self.wait()
class Test(Scene):
def construct(self):
self.camera.background_color = "#FFDE59"
text = Tex("$3x \cdot 5x = 135$",color=BLACK).scale(1.4)
text2 = MathTex("15x^2=135",color=BLACK).scale(1.4)
a = [-2, 0, 0]
b = [2, 0, 0]
c = [0, 2*np.sqrt(3), 0]
p = [0.37, 1.4, 0]
dota = Dot(a, radius=0.06,color=BLACK)
dotb = Dot(b, radius=0.06,color=BLACK)
dotc = Dot(c, radius=0.06,color=BLACK)
dotp = Dot(p, radius=0.06,color=BLACK)
lineap = Line(dota.get_center(), dotp.get_center()).set_color(BLACK)
linebp = Line(dotb.get_center(), dotp.get_center()).set_color(BLACK)
linecp = Line(dotc.get_center(), dotp.get_center()).set_color(BLACK)
equilateral = Polygon(a,b,c)
triangle = Polygon(a,b,p)
self.play(Write(equilateral))
self.wait()
self.play(Write(VGroup(lineap,linebp,linecp,triangle)))
self.wait()
self.play(triangle.animate.rotate(0.4))
self.wait()
| [
"numpy.sin",
"numpy.sqrt",
"numpy.cos"
] | [((4928, 4942), 'numpy.sqrt', 'np.sqrt', (['(g / l)'], {}), '(g / l)\n', (4935, 4942), True, 'import numpy as np\n'), ((7990, 8000), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (7997, 8000), True, 'import numpy as np\n'), ((7244, 7253), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (7250, 7253), True, 'import numpy as np\n'), ((7257, 7266), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (7263, 7266), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as im
from moviepy.editor import VideoFileClip
#lists to hold prev frames detected lines
prev_leftlines = []
prev_rightlines = []
#Perform color thresholding in HLS color space
def laneColors_mask(img,thresh=[(0,255),(0,255),(0,255)]):
#Convert to hls color space
hls = cv2.cvtColor(img,cv2.COLOR_RGB2HLS)
#Color masking using HLS color space
h = hls[:,:,0]
l = hls[:,:,1]
s = hls[:,:,2]
#Create gray scale thresholded img
binary_mask = np.zeros_like(s)
binary_mask [((h>= thresh[0][0]) & (h<= thresh[0][1])) &\
((l >=thresh[1][0]) & (l<=thresh[1][1])) &\
((s >=thresh[2][0]) & (s<=thresh[2][1]))] = 1
return binary_mask
#Canny edge detection + Gaussian smoothing filter
def canny_edge (grayImg,thresh=[0,255],kernel_size = 3):
# Gaussian filter
blur = cv2.GaussianBlur(grayImg,(kernel_size,kernel_size),0)
#Canny Edge Detection
return cv2.Canny(blur,thresh[0],thresh[1])
# Region masking to extract the region of interest
def regionMask (img,pts):
region_mask = np.zeros_like(img)
#Fill black image with the region of interest
cv2.fillPoly(region_mask,np.array([[pts]]),255)
return region_mask
# Draw lines into a given image
def drawLines (img,lines,color = 255, thickness = 10):
#Check if list is empty
if lines is not None:
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img,(x1,y1),(x2,y2),color ,thickness)
return img
else : #Nothing to draw
return img
#Filter Horizontal lines
def filter_horizontal (lines , slope_thresh = 0):
selected_lines = []
slopes = []
intercepts = []
#Loop over each line in lines
for line in lines:
#Unpack the line points
x1,y1,x2,y2 = line[0]
#Compute slope
m = (y2-y1)/(x2-x1)
#Filter Horizontal lines
if abs(m) > slope_thresh:
selected_lines.append([[x1,y1,x2,y2]])
return selected_lines
# Sort lines into two grous (left laneline and right laneline)
def sortLines (lines):
#Lists to hold the averaged lanes
left_lanelines = []
right_lanelines = []
#Using -ve , +ve slopes
for line in lines :
for x1,y1,x2,y2 in line:
#Sort according to the slope
#Compute slope
m = (y2-y1)/(x2-x1)
if m < 0:
left_lanelines.append([x1,y1,x2,y2])
elif m > 0:
right_lanelines.append([x1,y1,x2,y2])
return [left_lanelines,right_lanelines]
# Filter the sorted lines
def Lanelines_filtering (lanelines,start_y,end_y):
#Convert points into integers
start_y = int(start_y)
end_y = int(end_y)
#List to hold the averaged lines
avg_lines = []
#Lists to carry the slopes and intercepts for each laneline
slopes = []
intercepts = []
for laneline in lanelines :
#Avg the lines by its slope and intercepts
# for each line (y = mx +b )
#Reset lists
slopes.clear()
intercepts.clear()
#Loop over lines in each laneline
for line in laneline:
#Unpack
x1,y1,x2,y2 = np.array(line).reshape(4,1)
#Compute slope and intercept
if x1 != x2: #Avoid div by zero
m = (y2-y1)/(x2-x1)
b = y1 - (m*x1)
slopes.append(m)
intercepts.append(b)
#lines were found
if (len(slopes) > 0):
#Average all similar lines
avg_m = sum(slopes)/len(slopes)
avg_b = sum(intercepts)/len(intercepts)
#compute starting and ending points for each lane (x = (y-b)/m)
start_x = int((start_y - avg_b)/avg_m)
end_x = int((end_y - avg_b)/avg_m)
avg_lines.append([[start_x,start_y,end_x,end_y]])
else:
avg_lines.append([[0,0,0,0]])
return avg_lines
# The full pipline
def Lanelines_detection(image):
# #Visualize Original Image if needed
# plt.figure(1)
# plt.imshow(image)
# plt.title('Original Image')
####################
###1.Color masking##
####################
colorMask = laneColors_mask(image,thresh=[(0,255),(40,100),(120,200)])
# #Visualize and save results if needed
# plt.figure(2)
# plt.imshow(colorMask,cmap='gray')
# plt.title('Colors Thresholded Image')
#
# #Save Image
# plt.imsave('output_images/colorMask_img.jpg', colorMask,cmap='gray')
##############################
### 2. Canny Edge Detection###
##############################
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
#Edge Detection (on the masked image)
edge = canny_edge(gray,thresh=[50,150],kernel_size = 7)
#Combine with color masking
binary_edge = np.zeros_like(edge)
binary_edge =cv2.bitwise_or(edge,colorMask)
# #Visualize and save results if needed
# plt.figure(3)
# plt.imshow(binary_edge,cmap='gray')
# plt.title('Color Thresholded Edge Detection')
#
# #Save Image
# plt.imsave('output_images/colorThresh_edge.jpg', binary_edge ,cmap='gray')
############################
#### 3. Region Masking #####
############################
#Shape offsets
width1 = 0.45
width2 = 0.05
region_height = 0.6
#Define the regions four points
pt1 = (int(image.shape[1]/2 - width1*image.shape[1]),int(image.shape[0]))
pt4 = (int(image.shape[1]/2 + width1*image.shape[1]),int(image.shape[0]))
pt2 = (int(image.shape[1]/2 - width2*image.shape[1]) ,int(region_height*image.shape[0]))
pt3 = (int(image.shape[1]/2 + width2*image.shape[1]) ,int(region_height*image.shape[0]))
region_mask = regionMask (binary_edge,[pt1,pt2,pt3,pt4])
#Apply the mask to the edge detected image
regionMasked_img = np.copy(binary_edge)
regionMasked_img [region_mask == 0] = 0
# #Visualize and save results if needed
# plt.figure(4)
# plt.imshow(regionMasked_img,cmap='gray')
# plt.title('Region Masked Image')
#
# #Save Image
# plt.imsave('output_images/regionMasked_img.jpg', regionMasked_img,cmap='gray')
############################
#### 4. Lines Detection ####
############################
#Hough Lines parameters
rho = 2
theta = np.pi/180
threshold = 70
min_len = 2
max_gap = 50
#Hough lines
houghLines_img = np.zeros_like(regionMasked_img)
lines = cv2.HoughLinesP (regionMasked_img, rho, theta, threshold, minLineLength = min_len, maxLineGap = max_gap)
#####################################
### 4.1 Filter Horizontal lines #####
#####################################
lines = filter_horizontal (lines , slope_thresh =0.6)
#Draw Hough Lines
houghLines_img = drawLines(houghLines_img,lines,color = 255, thickness = 10)
# # Visualize and save results if needed
# plt.figure(5)
# plt.imshow(houghLines_img,cmap='gray')
# plt.title('Hough Lines')
#
# #Save Image
# plt.imsave('output_images/HoughLines.jpg', hough Lines_img,cmap='gray')
########################
#### 4.2 Sort Lines ####
########################
#Sort lines into two lane
sortedLines = sortLines(lines)
###################################################
#### 4.3 Filter Lines into two lanelines only ####
###################################################
global prev_rightlines
global prev_leftlines
#Lanelines filtering
start_y = image.shape[0]
end_y = region_height*(1.1*image.shape[0])
filteredLines = Lanelines_filtering(sortedLines,start_y,end_y)
# Save the left and right lines
prev_leftlines.append(filteredLines[0])
prev_rightlines.append(filteredLines[1])
###########################################################
#### 5. Smooth lanelines (in case of video processing) ####
##########################################################
#Remove the oldest readings if the number frames exceeded N
N = 8
if len(prev_leftlines ) > N :
prev_leftlines.pop(0)
prev_rightlines.pop(0)
# Average the last detected lanelines together if previous frames are found
avg_lanelines = Lanelines_filtering([prev_leftlines,prev_rightlines],start_y,end_y)
#Draw filtered lines on empty image
filteredLines_img = image*0
filteredLines_img = drawLines(filteredLines_img,avg_lanelines,color = [0,0,255] , thickness = 20)
# #Visualize and save results if needed
# plt.figure(6)
# plt.imshow(filteredLines_img)
# plt.title('Filtered Lanes')
#
# #Save Image
# plt.imsave('output_images/filteredLines_img.jpg', filteredLines_img)
##############################################
### 8. Draw lanelines on the oriignal image ###
##############################################
#Combine lanelines with the original Image
final_img = np.copy(image)
final_img = cv2.addWeighted(image,0.8,filteredLines_img,1,0)
# #Visualize and save results if needed
# plt.figure(7)
# plt.imshow(final_img)
# plt.title('Final Result')
#
# #Save Image
# plt.imsave('output_images/final_res.jpg', final_img)
#
# plt.show()
return final_img
#################################
### Image lanelines detection ###
#################################
# Load image
#test_img = im.imread("test_images/challenge4.jpg")
# plt.figure(1)
# plt.imshow(test_img)
# plt.title('Original Image')
# Perform detection
#lanelines_img = Lanelines_detection(test_img)
# plt.figure(2)
# plt.imshow(lanelines_img)
# plt.title('Lanelines Detection')
# plt.show()
#################################
### Video lanelines detection ###
#################################
#Load video
in_clip = VideoFileClip('test_vids/challenge.mp4')
#Process video
out_clip = in_clip.fl_image(Lanelines_detection)
#Save the result video
out_clip.write_videofile( 'output_vids/new1.mp4', audio=False)
| [
"numpy.copy",
"cv2.HoughLinesP",
"cv2.Canny",
"cv2.line",
"cv2.addWeighted",
"numpy.array",
"cv2.bitwise_or",
"cv2.cvtColor",
"cv2.GaussianBlur",
"numpy.zeros_like",
"moviepy.editor.VideoFileClip"
] | [((9943, 9983), 'moviepy.editor.VideoFileClip', 'VideoFileClip', (['"""test_vids/challenge.mp4"""'], {}), "('test_vids/challenge.mp4')\n", (9956, 9983), False, 'from moviepy.editor import VideoFileClip\n'), ((368, 404), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (380, 404), False, 'import cv2\n'), ((562, 578), 'numpy.zeros_like', 'np.zeros_like', (['s'], {}), '(s)\n', (575, 578), True, 'import numpy as np\n'), ((929, 985), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['grayImg', '(kernel_size, kernel_size)', '(0)'], {}), '(grayImg, (kernel_size, kernel_size), 0)\n', (945, 985), False, 'import cv2\n'), ((1021, 1058), 'cv2.Canny', 'cv2.Canny', (['blur', 'thresh[0]', 'thresh[1]'], {}), '(blur, thresh[0], thresh[1])\n', (1030, 1058), False, 'import cv2\n'), ((1154, 1172), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (1167, 1172), True, 'import numpy as np\n'), ((4756, 4795), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (4768, 4795), False, 'import cv2\n'), ((4948, 4967), 'numpy.zeros_like', 'np.zeros_like', (['edge'], {}), '(edge)\n', (4961, 4967), True, 'import numpy as np\n'), ((4985, 5016), 'cv2.bitwise_or', 'cv2.bitwise_or', (['edge', 'colorMask'], {}), '(edge, colorMask)\n', (4999, 5016), False, 'import cv2\n'), ((5975, 5995), 'numpy.copy', 'np.copy', (['binary_edge'], {}), '(binary_edge)\n', (5982, 5995), True, 'import numpy as np\n'), ((6556, 6587), 'numpy.zeros_like', 'np.zeros_like', (['regionMasked_img'], {}), '(regionMasked_img)\n', (6569, 6587), True, 'import numpy as np\n'), ((6600, 6704), 'cv2.HoughLinesP', 'cv2.HoughLinesP', (['regionMasked_img', 'rho', 'theta', 'threshold'], {'minLineLength': 'min_len', 'maxLineGap': 'max_gap'}), '(regionMasked_img, rho, theta, threshold, minLineLength=\n min_len, maxLineGap=max_gap)\n', (6615, 6704), False, 'import cv2\n'), ((9079, 9093), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (9086, 9093), True, 'import numpy as np\n'), ((9110, 9162), 'cv2.addWeighted', 'cv2.addWeighted', (['image', '(0.8)', 'filteredLines_img', '(1)', '(0)'], {}), '(image, 0.8, filteredLines_img, 1, 0)\n', (9125, 9162), False, 'import cv2\n'), ((1253, 1270), 'numpy.array', 'np.array', (['[[pts]]'], {}), '([[pts]])\n', (1261, 1270), True, 'import numpy as np\n'), ((1526, 1577), 'cv2.line', 'cv2.line', (['img', '(x1, y1)', '(x2, y2)', 'color', 'thickness'], {}), '(img, (x1, y1), (x2, y2), color, thickness)\n', (1534, 1577), False, 'import cv2\n'), ((3298, 3312), 'numpy.array', 'np.array', (['line'], {}), '(line)\n', (3306, 3312), True, 'import numpy as np\n')] |
import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image
from src.models.class_patcher import patcher
from src.utils.imgproc import *
class patcher(patcher):
def __init__(self, body='./body/body_noy.png', **options):
super().__init__('ノイ', body=body, pantie_position=[147, 133], **options)
self.mask = io.imread('./mask/mask_noy.png')
def convert(self, image):
pantie = np.array(image)
# prepare for moving from hip to front
patch = np.copy(pantie[-140:-5, 546:, :])
patch = skt.resize(patch[::-1, ::-1, :], (240, 60), anti_aliasing=True, mode='reflect')
[pr, pc, d] = patch.shape
# Affine transform matrix
pantie = np.pad(pantie, [(0, 0), (100, 0), (0, 0)], mode='constant')
arrx = np.zeros(100)
arry = np.zeros(100)
arry[:30] += np.linspace(70,0,30)
arry[30:] += np.linspace(0,200,70)
arry[65:] -= np.linspace(0,170,35)
pantie = affine_transform_by_arr(pantie, arrx, arry)[:,53:-65]
pantie[147:147 + pr, :pc, :] = patch
pantie = np.bitwise_and(np.uint8(pantie*255), self.mask)
pantie = skt.resize(pantie, (int(pantie.shape[0]*3.65), int(pantie.shape[1]*3.13)), anti_aliasing=True, mode='reflect')[:,8:]
pantie = np.concatenate((pantie[:,::-1],pantie),axis=1)
return Image.fromarray(np.uint8(pantie*255))
| [
"numpy.uint8",
"numpy.copy",
"numpy.array",
"skimage.io.imread",
"numpy.zeros",
"numpy.linspace",
"numpy.concatenate",
"numpy.pad",
"skimage.transform.resize"
] | [((364, 396), 'skimage.io.imread', 'io.imread', (['"""./mask/mask_noy.png"""'], {}), "('./mask/mask_noy.png')\n", (373, 396), True, 'import skimage.io as io\n'), ((445, 460), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (453, 460), True, 'import numpy as np\n'), ((525, 558), 'numpy.copy', 'np.copy', (['pantie[-140:-5, 546:, :]'], {}), '(pantie[-140:-5, 546:, :])\n', (532, 558), True, 'import numpy as np\n'), ((575, 654), 'skimage.transform.resize', 'skt.resize', (['patch[::-1, ::-1, :]', '(240, 60)'], {'anti_aliasing': '(True)', 'mode': '"""reflect"""'}), "(patch[::-1, ::-1, :], (240, 60), anti_aliasing=True, mode='reflect')\n", (585, 654), True, 'import skimage.transform as skt\n'), ((741, 800), 'numpy.pad', 'np.pad', (['pantie', '[(0, 0), (100, 0), (0, 0)]'], {'mode': '"""constant"""'}), "(pantie, [(0, 0), (100, 0), (0, 0)], mode='constant')\n", (747, 800), True, 'import numpy as np\n'), ((816, 829), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (824, 829), True, 'import numpy as np\n'), ((845, 858), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (853, 858), True, 'import numpy as np\n'), ((880, 902), 'numpy.linspace', 'np.linspace', (['(70)', '(0)', '(30)'], {}), '(70, 0, 30)\n', (891, 902), True, 'import numpy as np\n'), ((922, 945), 'numpy.linspace', 'np.linspace', (['(0)', '(200)', '(70)'], {}), '(0, 200, 70)\n', (933, 945), True, 'import numpy as np\n'), ((965, 988), 'numpy.linspace', 'np.linspace', (['(0)', '(170)', '(35)'], {}), '(0, 170, 35)\n', (976, 988), True, 'import numpy as np\n'), ((1319, 1368), 'numpy.concatenate', 'np.concatenate', (['(pantie[:, ::-1], pantie)'], {'axis': '(1)'}), '((pantie[:, ::-1], pantie), axis=1)\n', (1333, 1368), True, 'import numpy as np\n'), ((1135, 1157), 'numpy.uint8', 'np.uint8', (['(pantie * 255)'], {}), '(pantie * 255)\n', (1143, 1157), True, 'import numpy as np\n'), ((1398, 1420), 'numpy.uint8', 'np.uint8', (['(pantie * 255)'], {}), '(pantie * 255)\n', (1406, 1420), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os, logging
import pandas as pd
from pathlib import Path
import numpy as np
from scipy.interpolate import interp1d
from bins_and_cuts import obs_cent_list, obs_range_list
# fully specify numeric data types, including endianness and size, to
# ensure consistency across all machines
float_t = '<f8'
int_t = '<i8'
complex_t = '<c16'
#fix the random seed for cross validation, that sets are deleted consistently
np.random.seed(1)
# Work, Design, and Exp directories
workdir = Path(os.getenv('WORKDIR', '.'))
design_dir = str(workdir/'production_designs/500pts')
dir_obs_exp = "HIC_experimental_data"
####################################
### USEFUL LABELS / DICTIONARIES ###
####################################
#only using data from these experimental collabs
expt_for_system = { 'Au-Au-200' : 'STAR',
'Pb-Pb-2760' : 'ALICE',
'Pb-Pb-5020' : 'ALICE',
'Xe-Xe-5440' : 'ALICE',
}
#for STAR we have measurements of pi+ dN/dy, k+ dN/dy etc... so we need to scale them by 2 after reading in
STAR_id_yields = {
'dN_dy_pion' : 'dN_dy_pion_+',
'dN_dy_kaon' : 'dN_dy_kaon_+',
'dN_dy_proton' : 'dN_dy_proton_+',
}
idf_label = {
0 : 'Grad',
1 : '<NAME>',
2 : 'Pratt-McNelis',
3 : 'Pratt-Bernhard'
}
idf_label_short = {
0 : 'Grad',
1 : 'C.E.',
2 : 'P.M.',
3 : 'P.B.'
}
####################################
### SWITCHES AND OPTIONS !!!!!!! ###
####################################
#how many versions of the model are run, for instance
# 4 versions of delta-f with SMASH and a fifth model with UrQMD totals 5
number_of_models_per_run = 4
# the choice of viscous correction. 0 : 14 Moment, 1 : C.E. RTA, 2 : McNelis, 3 : Bernhard
idf = 0
print("Using idf = " + str(idf) + " : " + idf_label[idf])
#the Collision systems
systems = [
('Pb', 'Pb', 2760),
('Au', 'Au', 200),
#('Pb', 'Pb', 5020),
#('Xe', 'Xe', 5440)
]
system_strs = ['{:s}-{:s}-{:d}'.format(*s) for s in systems]
num_systems = len(system_strs)
#these are problematic points for Pb Pb 2760 run with 500 design points
nan_sets_by_deltaf = {
0 : set([334, 341, 377, 429, 447, 483]),
1 : set([285, 334, 341, 447, 483, 495]),
2 : set([209, 280, 322, 334, 341, 412, 421, 424, 429, 432, 446, 447, 453, 468, 483, 495]),
3 : set([60, 232, 280, 285, 322, 324, 341, 377, 432, 447, 464, 468, 482, 483, 485, 495])
}
nan_design_pts_set = nan_sets_by_deltaf[idf]
#nan_design_pts_set = set([60, 285, 322, 324, 341, 377, 432, 447, 464, 468, 482, 483, 495])
unfinished_events_design_pts_set = set([289, 324, 326, 459, 462, 242, 406, 440, 123])
strange_features_design_pts_set = set([289, 324, 440, 459, 462])
delete_design_pts_set = nan_design_pts_set.union(
unfinished_events_design_pts_set.union(
strange_features_design_pts_set
)
)
delete_design_pts_validation_set = [10, 68, 93] # idf 0
class systems_setting(dict):
def __init__(self, A, B, sqrts):
super().__setitem__("proj", A)
super().__setitem__("targ", B)
super().__setitem__("sqrts", sqrts)
sysdir = "/design_pts_{:s}_{:s}_{:d}_production".format(A, B, sqrts)
super().__setitem__("main_design_file",
design_dir+sysdir+'/design_points_main_{:s}{:s}-{:d}.dat'.format(A, B, sqrts)
)
super().__setitem__("main_range_file",
design_dir+sysdir+'/design_ranges_main_{:s}{:s}-{:d}.dat'.format(A, B, sqrts)
)
super().__setitem__("validation_design_file",
design_dir+sysdir+'/design_points_validation_{:s}{:s}-{:d}.dat'.format(A, B, sqrts)
)
super().__setitem__("validation_range_file",
design_dir+sysdir+'//design_ranges_validation_{:s}{:s}-{:d}.dat'.format(A, B, sqrts)
)
with open(design_dir+sysdir+'/design_labels_{:s}{:s}-{:d}.dat'.format(A, B, sqrts), 'r') as f:
labels = [r""+line[:-1] for line in f]
super().__setitem__("labels", labels)
def __setitem__(self, key, value):
if key == 'run_id':
super().__setitem__("main_events_dir",
str(workdir/'model_calculations/{:s}/Events/main/'.format(value))
)
super().__setitem__("validation_events_dir",
str(workdir/'model_calculations/{:s}/Events/validation/'.format(value))
)
super().__setitem__("main_obs_file",
str(workdir/'model_calculations/{:s}/Obs/main.dat'.format(value))
)
super().__setitem__("validation_obs_file",
str(workdir/'model_calculations/{:s}/Obs/validation.dat'.format(value))
)
else:
super().__setitem__(key, value)
SystemsInfo = {"{:s}-{:s}-{:d}".format(*s): systems_setting(*s) \
for s in systems
}
if 'Pb-Pb-2760' in system_strs:
SystemsInfo["Pb-Pb-2760"]["run_id"] = "production_500pts_Pb_Pb_2760"
SystemsInfo["Pb-Pb-2760"]["n_design"] = 500
SystemsInfo["Pb-Pb-2760"]["n_validation"] = 100
SystemsInfo["Pb-Pb-2760"]["design_remove_idx"]=list(delete_design_pts_set)
SystemsInfo["Pb-Pb-2760"]["npc"]=10
SystemsInfo["Pb-Pb-2760"]["MAP_obs_file"]=str(workdir/'model_calculations/MAP') + '/' + idf_label_short[idf] + '/Obs/obs_Pb-Pb-2760.dat'
if 'Au-Au-200' in system_strs:
SystemsInfo["Au-Au-200"]["run_id"] = "production_500pts_Au_Au_200"
SystemsInfo["Au-Au-200"]["n_design"] = 500
SystemsInfo["Au-Au-200"]["n_validation"] = 100
SystemsInfo["Au-Au-200"]["design_remove_idx"]=list(delete_design_pts_set)
SystemsInfo["Au-Au-200"]["npc"] = 6
SystemsInfo["Au-Au-200"]["MAP_obs_file"]=str(workdir/'model_calculations/MAP') + '/' + idf_label_short[idf] + '/Obs/obs_Au-Au-200.dat'
###############################################################################
############### BAYES #########################################################
# if True : perform emulator validation
# if False : use experimental data for parameter estimation
validation = False
#if true, we will validate emulator against points in the training set
pseudovalidation = False
#if true, we will omit 20% of the training design when training emulator
crossvalidation = False
fixed_validation_pt=0
if validation:
print("Performing emulator validation type ...")
if pseudovalidation:
print("... pseudo-validation")
pass
elif crossvalidation:
print("... cross-validation")
cross_validation_pts = np.random.choice(n_design_pts_main,
n_design_pts_main // 5,
replace = False)
delete_design_pts_set = cross_validation_pts #omit these points from training
else:
validation_pt = fixed_validation_pt
print("... independent-validation, using validation_pt = " + str(validation_pt))
#if this switch is True, all experimental errors will be set to zero
set_exp_error_to_zero = False
# if this switch is True, then when performing MCMC each experimental error
# will be multiplied by the corresponding factor
change_exp_error = True
change_exp_error_vals = {
'Au-Au-200': {},
'Pb-Pb-2760' : {
'dN_dy_proton' : 1.e-1,
'mean_pT_proton' : 1.e-1
}
}
#if this switch is turned on, some parameters will be fixed
#to certain values in the bayesian analysis. see bayes_mcmc.py
hold_parameters = False
# hold are pairs of parameter (index, value)
# count the index correctly when have multiple systems!!!
# e.g [(1, 10.5), (5, 0.3)] will hold parameter[1] at 10.5, and parameter[5] at 0.3
#hold_parameters_set = [(7, 0.0), (8, 0.154), (9, 0.0), (15, 0.0), (16, 5.0)] #these should hold the parameters to Jonah's prior for LHC+RHIC
#hold_parameters_set = [(6, 0.0), (7, 0.154), (8, 0.0), (14, 0.0), (15, 5.0)] #these should hold the parameters to Jonah's prior for LHC only
#hold_parameters_set = [(16, 8.0)] #this will fix the shear relaxation time factor for LHC+RHIC
hold_parameters_set = [(17, 0.155)] #this will fix the T_sw for LHC+RHIC
if hold_parameters:
print("Warning : holding parameters to fixed values : ")
print(hold_parameters_set)
#if this switch is turned on, the emulator will be trained on the values of
# eta/s (T_i) and zeta/s (T_i), where T_i are a grid of temperatures, rather
# than the parameters such as slope, width, etc...
do_transform_design = True
#if this switch is turned on, the emulator will be trained on log(1 + dY_dx)
#where dY_dx includes dET_deta, dNch_deta, dN_dy_pion, etc...
transform_multiplicities = False
#this switches on/off parameterized experimental covariance btw. centrality bins and groups
assume_corr_exp_error = False
cent_corr_length = 0.5 #this is the correlation length between centrality bins
bayes_dtype = [ (s,
[(obs, [("mean",float_t,len(cent_list)),
("err",float_t,len(cent_list))]) \
for obs, cent_list in obs_cent_list[s].items() ],
number_of_models_per_run
) \
for s in system_strs
]
# The active ones used in Bayes analysis (MCMC)
active_obs_list = {
sys: list(obs_cent_list[sys].keys()) for sys in system_strs
}
#try exluding PHENIX dN/dy proton from fit
for s in system_strs:
if s == 'Au-Au-200':
active_obs_list[s].remove('dN_dy_proton')
active_obs_list[s].remove('mean_pT_proton')
if s == 'Pb-Pb-2760':
active_obs_list[s].remove('dN_dy_Lambda')
active_obs_list[s].remove('dN_dy_Omega')
active_obs_list[s].remove('dN_dy_Xi')
print("The active observable list for calibration: " + str(active_obs_list))
def zeta_over_s(T, zmax, T0, width, asym):
DeltaT = T - T0
sign = 1 if DeltaT>0 else -1
x = DeltaT/(width*(1.+asym*sign))
return zmax/(1.+x**2)
zeta_over_s = np.vectorize(zeta_over_s)
def eta_over_s(T, T_k, alow, ahigh, etas_k):
if T < T_k:
y = etas_k + alow*(T-T_k)
else:
y = etas_k + ahigh*(T-T_k)
if y > 0:
return y
else:
return 0.
eta_over_s = np.vectorize(eta_over_s)
def taupi(T, T_k, alow, ahigh, etas_k, bpi):
return bpi*eta_over_s(T, T_k, alow, ahigh, etas_k)/T
taupi = np.vectorize(taupi)
def tau_fs(e, tau_R, alpha):
#e stands for e_initial / e_R, dimensionless
return tau_R * (e**alpha)
# load design for other module
def load_design(system_str, pset='main'): # or validation
design_file = SystemsInfo[system_str]["main_design_file"] if pset == 'main' \
else SystemsInfo[system_str]["validation_design_file"]
range_file = SystemsInfo[system_str]["main_range_file"] if pset == 'main' \
else SystemsInfo[system_str]["validation_range_file"]
print("Loading {:s} points from {:s}".format(pset, design_file) )
print("Loading {:s} ranges from {:s}".format(pset, range_file) )
labels = SystemsInfo[system_str]["labels"]
# design
design = pd.read_csv(design_file)
design = design.drop("idx", axis=1)
print("Summary of design : ")
design.describe()
design_range = pd.read_csv(range_file)
design_max = design_range['max'].values
design_min = design_range['min'].values
return design, design_min, design_max, labels
# A specially transformed design for the emulators
# 0 1 2 3 4
# norm trento_p sigma_k nucleon_width dmin3
#
# 5 6 7
# tau_R alpha eta_over_s_T_kink_in_GeV
#
# 8 9 10
# eta_over_s_low_T_slope_in_GeV eta_over_s_high_T_slope_in_GeV eta_over_s_at_kink,
#
# 11 12 13
# zeta_over_s_max zeta_over_s_T_peak_in_GeV zeta_over_s_width_in_GeV
#
# 14 15 16
# zeta_over_s_lambda_asymm shear_relax_time_factor Tswitch
#right now this depends on the ordering of parameters
#we should write a version instead that uses labels in case ordering changes
def transform_design(X):
#pop out the viscous parameters
indices = [0, 1, 2, 3, 4, 5, 6, 15, 16]
new_design_X = X[:, indices]
#now append the values of eta/s and zeta/s at various temperatures
num_T = 10
Temperature_grid = np.linspace(0.135, 0.4, num_T)
eta_vals = []
zeta_vals = []
for pt, T in enumerate(Temperature_grid):
eta_vals.append( eta_over_s(T, X[:, 7], X[:, 8], X[:, 9], X[:, 10]) )
for pt, T in enumerate(Temperature_grid):
zeta_vals.append( zeta_over_s(T, X[:, 11], X[:, 12], X[:, 13], X[:, 14]) )
eta_vals = np.array(eta_vals).T
zeta_vals = np.array(zeta_vals).T
new_design_X = np.concatenate( (new_design_X, eta_vals), axis=1)
new_design_X = np.concatenate( (new_design_X, zeta_vals), axis=1)
return new_design_X
def prepare_emu_design(system_str):
design, design_max, design_min, labels = \
load_design(system_str=system_str, pset='main')
#transformation of design for viscosities
if do_transform_design:
print("Note : Transforming design of viscosities")
#replace this with function that transforms based on labels, not indices
design = transform_design(design.values)
else :
design = design.values
design_max = np.max(design, axis=0)
design_min = np.min(design, axis=0)
return design, design_max, design_min, labels
| [
"os.getenv",
"pandas.read_csv",
"numpy.random.choice",
"numpy.max",
"numpy.array",
"numpy.linspace",
"numpy.random.seed",
"numpy.concatenate",
"numpy.min",
"numpy.vectorize"
] | [((441, 458), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (455, 458), True, 'import numpy as np\n'), ((10445, 10470), 'numpy.vectorize', 'np.vectorize', (['zeta_over_s'], {}), '(zeta_over_s)\n', (10457, 10470), True, 'import numpy as np\n'), ((10684, 10708), 'numpy.vectorize', 'np.vectorize', (['eta_over_s'], {}), '(eta_over_s)\n', (10696, 10708), True, 'import numpy as np\n'), ((10820, 10839), 'numpy.vectorize', 'np.vectorize', (['taupi'], {}), '(taupi)\n', (10832, 10839), True, 'import numpy as np\n'), ((510, 535), 'os.getenv', 'os.getenv', (['"""WORKDIR"""', '"""."""'], {}), "('WORKDIR', '.')\n", (519, 535), False, 'import os, logging\n'), ((11558, 11582), 'pandas.read_csv', 'pd.read_csv', (['design_file'], {}), '(design_file)\n', (11569, 11582), True, 'import pandas as pd\n'), ((11698, 11721), 'pandas.read_csv', 'pd.read_csv', (['range_file'], {}), '(range_file)\n', (11709, 11721), True, 'import pandas as pd\n'), ((12818, 12848), 'numpy.linspace', 'np.linspace', (['(0.135)', '(0.4)', 'num_T'], {}), '(0.135, 0.4, num_T)\n', (12829, 12848), True, 'import numpy as np\n'), ((13234, 13282), 'numpy.concatenate', 'np.concatenate', (['(new_design_X, eta_vals)'], {'axis': '(1)'}), '((new_design_X, eta_vals), axis=1)\n', (13248, 13282), True, 'import numpy as np\n'), ((13303, 13352), 'numpy.concatenate', 'np.concatenate', (['(new_design_X, zeta_vals)'], {'axis': '(1)'}), '((new_design_X, zeta_vals), axis=1)\n', (13317, 13352), True, 'import numpy as np\n'), ((13854, 13876), 'numpy.max', 'np.max', (['design'], {'axis': '(0)'}), '(design, axis=0)\n', (13860, 13876), True, 'import numpy as np\n'), ((13894, 13916), 'numpy.min', 'np.min', (['design'], {'axis': '(0)'}), '(design, axis=0)\n', (13900, 13916), True, 'import numpy as np\n'), ((13155, 13173), 'numpy.array', 'np.array', (['eta_vals'], {}), '(eta_vals)\n', (13163, 13173), True, 'import numpy as np\n'), ((13192, 13211), 'numpy.array', 'np.array', (['zeta_vals'], {}), '(zeta_vals)\n', (13200, 13211), True, 'import numpy as np\n'), ((6921, 6995), 'numpy.random.choice', 'np.random.choice', (['n_design_pts_main', '(n_design_pts_main // 5)'], {'replace': '(False)'}), '(n_design_pts_main, n_design_pts_main // 5, replace=False)\n', (6937, 6995), True, 'import numpy as np\n')] |
from qtpy.QtCore import Qt, Signal
from qtpy.QtWidgets import QWidget, QGridLayout, QSizePolicy, QScrollBar
import numpy as np
from ..components.dims import Dims
from ..components.dims_constants import DimsMode
class QtDims(QWidget):
"""Qt View for Dims model.
Parameters
----------
dims : Dims
Dims object to be passed to Qt object
parent : QWidget, optional
QWidget that will be the parent of this widget
Attributes
----------
dims : Dims
Dims object
sliders : list
List of slider widgets
"""
# Qt Signals for sending events to Qt thread
update_ndim = Signal()
update_axis = Signal(int)
update_range = Signal(int)
update_display = Signal()
def __init__(self, dims: Dims, parent=None):
super().__init__(parent=parent)
self.SLIDERHEIGHT = 22
# We keep a reference to the view:
self.dims = dims
# list of sliders
self.sliders = []
# True / False if slider is or is not displayed
self._displayed_sliders = []
self._last_used = None
# Initialises the layout:
layout = QGridLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
# Update the number of sliders now that the dims have been added
self._update_nsliders()
# The next lines connect events coming from the model to the Qt event
# system: We need to go through Qt signals so that these events are run
# in the Qt event loop thread. This is all about changing thread
# context for thread-safety purposes
# ndim change listener
def update_ndim_listener(event):
self.update_ndim.emit()
self.dims.events.ndim.connect(update_ndim_listener)
self.update_ndim.connect(self._update_nsliders)
# axis change listener
def update_axis_listener(event):
self.update_axis.emit(event.axis)
self.dims.events.axis.connect(update_axis_listener)
self.update_axis.connect(self._update_slider)
# range change listener
def update_range_listener(event):
self.update_range.emit(event.axis)
self.dims.events.range.connect(update_range_listener)
self.update_range.connect(self._update_range)
# range change listener
def update_display_listener(event):
self.update_display.emit()
self.dims.events.ndisplay.connect(update_display_listener)
self.dims.events.order.connect(update_display_listener)
self.update_display.connect(self._update_display)
@property
def nsliders(self):
"""Returns the number of sliders displayed
Returns
-------
nsliders: int
Number of sliders displayed
"""
return len(self.sliders)
@property
def last_used(self):
"""int: Index of slider last used.
"""
return self._last_used
@last_used.setter
def last_used(self, last_used):
if last_used == self.last_used:
return
formerly_used = self.last_used
if formerly_used is not None:
sld = self.sliders[formerly_used]
sld.setProperty('last_used', False)
sld.style().unpolish(sld)
sld.style().polish(sld)
self._last_used = last_used
if last_used is not None:
sld = self.sliders[last_used]
sld.setProperty('last_used', True)
sld.style().unpolish(sld)
sld.style().polish(sld)
def _update_slider(self, axis: int):
"""
Updates position for a given slider.
Parameters
----------
axis : int
Axis index.
"""
if axis >= len(self.sliders):
return
slider = self.sliders[axis]
mode = self.dims.mode[axis]
if mode == DimsMode.POINT:
slider.setValue(self.dims.point[axis])
self.last_used = axis
def _update_range(self, axis: int):
"""
Updates range for a given slider.
Parameters
----------
axis : int
Axis index.
"""
if axis >= len(self.sliders):
return
slider = self.sliders[axis]
range = self.dims.range[axis]
range = (range[0], range[1] - range[2], range[2])
if range not in (None, (None, None, None)):
if range[1] == 0:
self._displayed_sliders[axis] = False
self.last_used = None
slider.hide()
else:
if (
not self._displayed_sliders[axis]
and not axis in self.dims.displayed
):
self._displayed_sliders[axis] = True
self.last_used = axis
slider.show()
slider.setMinimum(range[0])
slider.setMaximum(range[1])
slider.setSingleStep(range[2])
slider.setPageStep(range[2])
else:
self._displayed_sliders[axis] = False
slider.hide()
nsliders = np.sum(self._displayed_sliders)
self.setMinimumHeight(nsliders * self.SLIDERHEIGHT)
def _update_display(self):
"""Updates display for all sliders."""
for axis, slider in reversed(list(enumerate(self.sliders))):
if axis in self.dims.displayed:
# Displayed dimensions correspond to non displayed sliders
self._displayed_sliders[axis] = False
self.last_used = None
slider.hide()
else:
# Non displayed dimensions correspond to displayed sliders
self._displayed_sliders[axis] = True
self.last_used = axis
slider.show()
nsliders = np.sum(self._displayed_sliders)
self.setMinimumHeight(nsliders * self.SLIDERHEIGHT)
def _update_nsliders(self):
"""
Updates the number of sliders based on the number of dimensions
"""
self._trim_sliders(0)
self._create_sliders(self.dims.ndim)
self._update_display()
for i in range(self.dims.ndim):
self._update_range(i)
if self._displayed_sliders[i]:
self._update_slider(i)
def _create_sliders(self, number_of_sliders):
"""
Creates sliders to match new number of dimensions
Parameters
----------
number_of_sliders : new number of sliders
"""
# add extra sliders so that number_of_sliders are present
# add to the beginning of the list
for slider_num in range(self.nsliders, number_of_sliders):
dim_axis = number_of_sliders - slider_num - 1
slider = self._create_range_slider_widget(dim_axis)
self.layout().addWidget(slider)
self.sliders.insert(0, slider)
self._displayed_sliders.insert(0, True)
nsliders = np.sum(self._displayed_sliders)
self.setMinimumHeight(nsliders * self.SLIDERHEIGHT)
def _trim_sliders(self, number_of_sliders):
"""
Trims number of dimensions to a lower number
Parameters
----------
number_of_sliders : new number of sliders
"""
# remove extra sliders so that only number_of_sliders are left
# remove from the beginning of the list
for slider_num in range(number_of_sliders, self.nsliders):
self._remove_slider(0)
def _remove_slider(self, index):
"""
Remove slider at index
Parameters
----------
axis : int
Index of slider to remove
"""
# remove particular slider
slider = self.sliders.pop(index)
self._displayed_sliders.pop(index)
self.layout().removeWidget(slider)
slider.deleteLater()
nsliders = np.sum(self._displayed_sliders)
self.setMinimumHeight(nsliders * self.SLIDERHEIGHT)
self.last_used = None
def _create_range_slider_widget(self, axis):
"""
Creates a range slider widget for a given axis
Parameters
----------
axis : axis index
Returns
-------
slider : range slider
"""
range = self.dims.range[axis]
# Set the maximum values of the range slider to be one step less than
# the range of the layer as otherwise the slider can move beyond the
# shape of the layer as the endpoint is included
range = (range[0], range[1] - range[2], range[2])
point = self.dims.point[axis]
slider = QScrollBar(Qt.Horizontal)
slider.setFocusPolicy(Qt.NoFocus)
slider.setMinimum(range[0])
slider.setMaximum(range[1])
slider.setSingleStep(range[2])
slider.setPageStep(range[2])
slider.setValue(point)
# Listener to be used for sending events back to model:
def slider_change_listener(value):
self.dims.set_point(axis, value)
# linking the listener to the slider:
slider.valueChanged.connect(slider_change_listener)
def slider_focused_listener():
self.last_used = self.sliders.index(slider)
# linking focus listener to the last used:
slider.sliderPressed.connect(slider_focused_listener)
return slider
def focus_up(self):
"""Shift focused dimension slider to be the next slider above."""
displayed = list(np.nonzero(self._displayed_sliders)[0])
if len(displayed) == 0:
return
if self.last_used is None:
self.last_used = displayed[-1]
else:
index = (displayed.index(self.last_used) + 1) % len(displayed)
self.last_used = displayed[index]
def focus_down(self):
"""Shift focused dimension slider to be the next slider bellow."""
displayed = list(np.nonzero(self._displayed_sliders)[0])
if len(displayed) == 0:
return
if self.last_used is None:
self.last_used = displayed[-1]
else:
index = (displayed.index(self.last_used) - 1) % len(displayed)
self.last_used = displayed[index]
| [
"qtpy.QtCore.Signal",
"qtpy.QtWidgets.QGridLayout",
"numpy.sum",
"numpy.nonzero",
"qtpy.QtWidgets.QScrollBar"
] | [((640, 648), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (646, 648), False, 'from qtpy.QtCore import Qt, Signal\n'), ((667, 678), 'qtpy.QtCore.Signal', 'Signal', (['int'], {}), '(int)\n', (673, 678), False, 'from qtpy.QtCore import Qt, Signal\n'), ((698, 709), 'qtpy.QtCore.Signal', 'Signal', (['int'], {}), '(int)\n', (704, 709), False, 'from qtpy.QtCore import Qt, Signal\n'), ((731, 739), 'qtpy.QtCore.Signal', 'Signal', ([], {}), '()\n', (737, 739), False, 'from qtpy.QtCore import Qt, Signal\n'), ((1162, 1175), 'qtpy.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (1173, 1175), False, 'from qtpy.QtWidgets import QWidget, QGridLayout, QSizePolicy, QScrollBar\n'), ((5280, 5311), 'numpy.sum', 'np.sum', (['self._displayed_sliders'], {}), '(self._displayed_sliders)\n', (5286, 5311), True, 'import numpy as np\n'), ((5994, 6025), 'numpy.sum', 'np.sum', (['self._displayed_sliders'], {}), '(self._displayed_sliders)\n', (6000, 6025), True, 'import numpy as np\n'), ((8090, 8121), 'numpy.sum', 'np.sum', (['self._displayed_sliders'], {}), '(self._displayed_sliders)\n', (8096, 8121), True, 'import numpy as np\n'), ((8833, 8858), 'qtpy.QtWidgets.QScrollBar', 'QScrollBar', (['Qt.Horizontal'], {}), '(Qt.Horizontal)\n', (8843, 8858), False, 'from qtpy.QtWidgets import QWidget, QGridLayout, QSizePolicy, QScrollBar\n'), ((7159, 7190), 'numpy.sum', 'np.sum', (['self._displayed_sliders'], {}), '(self._displayed_sliders)\n', (7165, 7190), True, 'import numpy as np\n'), ((9697, 9732), 'numpy.nonzero', 'np.nonzero', (['self._displayed_sliders'], {}), '(self._displayed_sliders)\n', (9707, 9732), True, 'import numpy as np\n'), ((10129, 10164), 'numpy.nonzero', 'np.nonzero', (['self._displayed_sliders'], {}), '(self._displayed_sliders)\n', (10139, 10164), True, 'import numpy as np\n')] |
from copy import deepcopy
import os
os.environ['LAMMPS_COMMAND'] = '/n/home08/xiey/lammps-16Mar18/src/lmp_mpi'
import sys
sys.path.append('../flare')
import datetime
import time
import multiprocessing as mp
from typing import List
import numpy as np
from flare.mff.mff import MappedForceField
from flare.otf import OTF
from flare.struc import Structure
import flare.gp as gp
from flare.gp import GaussianProcess
from flare.env import AtomicEnvironment
from flare.qe_util import run_espresso, parse_qe_input, \
qe_input_to_structure, parse_qe_forces
from flare import output
from ase import Atoms
from ase.calculators.eam import EAM
from ase.calculators.lammpsrun import LAMMPS
from ase.lattice.hexagonal import Graphene
from ase.build import make_supercell
class MFFOTF(OTF):
def __init__(self, qe_input: str, dt: float, number_of_steps: int,
gp_model: gp.GaussianProcess, pw_loc: str,
std_tolerance_factor: float = 1,
prev_pos_init: np.ndarray=None, par: bool=False,
skip: int=0, init_atoms: List[int]=None,
calculate_energy=False, output_name='otf_run.out',
backup_name='otf_run_backup.out',
max_atoms_added=None, freeze_hyps=False,
rescale_steps=[], rescale_temps=[], add_all=False,
no_cpus=1, use_mapping=True, non_mapping_steps=[],
l_bound=None, two_d=False,
grid_params: dict={}, struc_params: dict={}):
super().__init__(qe_input, dt, number_of_steps,
gp_model, pw_loc,
std_tolerance_factor,
prev_pos_init, par,
skip, init_atoms,
calculate_energy, output_name,
backup_name,
max_atoms_added, freeze_hyps,
rescale_steps, rescale_temps, add_all,
no_cpus, use_mapping, non_mapping_steps,
l_bound, two_d)
self.grid_params = grid_params
self.struc_params = struc_params
if par:
self.pool = mp.Pool(processes=no_cpus)
def predict_on_structure_par_mff(self):
args_list = [(atom, self.structure, self.gp.cutoffs, self.mff) for atom in self.atom_list]
results = self.pool.starmap(predict_on_atom_mff, args_list)
for atom in self.atom_list:
res = results[atom]
self.structure.forces[atom] = res[0]
self.structure.stds[atom] = res[1]
self.local_energies[atom] = res[2]
self.structure.dft_forces = False
def predict_on_structure_mff(self): # changed
"""
Assign forces to self.structure based on self.gp
"""
output.write_to_output('\npredict with mapping:\n', self.output_name)
for n in range(self.structure.nat):
chemenv = AtomicEnvironment(self.structure, n, self.gp.cutoffs)
force, var = self.mff.predict(chemenv)
self.structure.forces[n][:] = force
self.structure.stds[n][:] = np.sqrt(np.absolute(var))
self.structure.dft_forces = False
def train_mff(self, skip=True):
t0 = time.time()
if self.l_bound < self.grid_params['bounds_2'][0,0]:
self.grid_params['bounds_2'][0,0] = self.l_bound - 0.01
self.grid_params['bounds_3'][0,:2] = np.ones(2)*self.l_bound - 0.01
if skip and (self.curr_step in self.non_mapping_steps):
return 1
# set svd rank based on the training set, grid number and threshold 1000
train_size = len(self.gp.training_data)
rank_2 = np.min([1000, self.grid_params['grid_num_2'], train_size*3])
rank_3 = np.min([1000, self.grid_params['grid_num_3'][0]**3, train_size*3])
self.grid_params['svd_rank_2'] = rank_2
self.grid_params['svd_rank_3'] = rank_3
output.write_to_output('\ntraining set size: {}\n'.format(train_size),
self.output_name)
output.write_to_output('lower bound: {}\n'.format(self.l_bound))
output.write_to_output('mff l_bound: {}\n'.format(self.grid_params['bounds_2'][0,0]))
output.write_to_output('Constructing mapped force field...\n',
self.output_name)
self.mff = MappedForceField(self.gp, self.grid_params, self.struc_params)
output.write_to_output('building mapping time: {}'.format(time.time()-t0),
self.output_name)
self.is_mff_built = True
# def run_dft(self):
# output.write_to_output('\nCalling Lammps...\n',
# self.output_name)
#
# # build ASE unitcell
# species = self.structure.species[0]
# positions = self.structure.positions
# nat = len(positions)
# cell = self.structure.cell
# symbols = species + str(nat)
# a = 2.46
# c = cell[2][2]
# unit_cell = Graphene(species, latticeconstant={'a':a,'c':c})
# multiplier = np.array([[6,0,0],[0,6,0],[0,0,1]])
# super_cell = make_supercell(unit_cell, multiplier)
# super_cell.positions = positions
# super_cell.cell = cell
#
# # calculate Lammps forces
# pot_path = '/n/home08/xiey/lammps-16Mar18/potentials/'
# parameters = {'pair_style': 'airebo 5.0',
# 'pair_coeff': ['* * '+pot_path+'CH.airebo C'],
# 'mass': ['* 12.0107']}
# files = [pot_path+'CH.airebo']
#
## calc = LAMMPS(keep_tmp_files=True, tmp_dir='lmp_tmp/', parameters=parameters, files=files)
# calc = LAMMPS(parameters=parameters, files=files)
# super_cell.set_calculator(calc)
#
# # calculate LAMMPS forces
# forces = super_cell.get_forces()
# self.structure.forces = forces
#
# # write wall time of DFT calculation
# self.dft_count += 1
# output.write_to_output('QE run complete.\n', self.output_name)
# time_curr = time.time() - self.start_time
# output.write_to_output('number of DFT calls: %i \n' % self.dft_count,
# self.output_name)
# output.write_to_output('wall time from start: %.2f s \n' % time_curr,
# self.output_name)
#
def predict_on_atom_mff(atom, structure, cutoffs, mff):
chemenv = AtomicEnvironment(structure, atom, cutoffs)
# predict force components and standard deviations
force, var = mff.predict(chemenv)
comps = force
stds = np.sqrt(np.absolute(var))
# predict local energy
# local_energy = self.gp.predict_local_energy(chemenv)
local_energy = 0
return comps, stds, local_energy
| [
"numpy.ones",
"flare.env.AtomicEnvironment",
"numpy.absolute",
"flare.mff.mff.MappedForceField",
"flare.output.write_to_output",
"multiprocessing.Pool",
"numpy.min",
"sys.path.append",
"time.time"
] | [((122, 149), 'sys.path.append', 'sys.path.append', (['"""../flare"""'], {}), "('../flare')\n", (137, 149), False, 'import sys\n'), ((6503, 6546), 'flare.env.AtomicEnvironment', 'AtomicEnvironment', (['structure', 'atom', 'cutoffs'], {}), '(structure, atom, cutoffs)\n', (6520, 6546), False, 'from flare.env import AtomicEnvironment\n'), ((2852, 2923), 'flare.output.write_to_output', 'output.write_to_output', (['"""\npredict with mapping:\n"""', 'self.output_name'], {}), '("""\npredict with mapping:\n""", self.output_name)\n', (2874, 2923), False, 'from flare import output\n'), ((3299, 3310), 'time.time', 'time.time', ([], {}), '()\n', (3308, 3310), False, 'import time\n'), ((3754, 3816), 'numpy.min', 'np.min', (["[1000, self.grid_params['grid_num_2'], train_size * 3]"], {}), "([1000, self.grid_params['grid_num_2'], train_size * 3])\n", (3760, 3816), True, 'import numpy as np\n'), ((3832, 3902), 'numpy.min', 'np.min', (["[1000, self.grid_params['grid_num_3'][0] ** 3, train_size * 3]"], {}), "([1000, self.grid_params['grid_num_3'][0] ** 3, train_size * 3])\n", (3838, 3902), True, 'import numpy as np\n'), ((4307, 4392), 'flare.output.write_to_output', 'output.write_to_output', (['"""Constructing mapped force field...\n"""', 'self.output_name'], {}), "('Constructing mapped force field...\\n', self.output_name\n )\n", (4329, 4392), False, 'from flare import output\n'), ((4440, 4502), 'flare.mff.mff.MappedForceField', 'MappedForceField', (['self.gp', 'self.grid_params', 'self.struc_params'], {}), '(self.gp, self.grid_params, self.struc_params)\n', (4456, 4502), False, 'from flare.mff.mff import MappedForceField\n'), ((6677, 6693), 'numpy.absolute', 'np.absolute', (['var'], {}), '(var)\n', (6688, 6693), True, 'import numpy as np\n'), ((2218, 2244), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'no_cpus'}), '(processes=no_cpus)\n', (2225, 2244), True, 'import multiprocessing as mp\n'), ((2988, 3041), 'flare.env.AtomicEnvironment', 'AtomicEnvironment', (['self.structure', 'n', 'self.gp.cutoffs'], {}), '(self.structure, n, self.gp.cutoffs)\n', (3005, 3041), False, 'from flare.env import AtomicEnvironment\n'), ((3189, 3205), 'numpy.absolute', 'np.absolute', (['var'], {}), '(var)\n', (3200, 3205), True, 'import numpy as np\n'), ((3490, 3500), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (3497, 3500), True, 'import numpy as np\n'), ((4569, 4580), 'time.time', 'time.time', ([], {}), '()\n', (4578, 4580), False, 'import time\n')] |
"""Breast cancer whole slide image dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import csv
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
_URL = "http://spiechallenges.cloudapp.net/competitions/14#participate"
# BibTeX citation
_CITATION = """\
@article{peikari2017automatic,
title={Automatic cellularity assessment from \
post-treated breast surgical specimens},
author={<NAME> and <NAME> and \
<NAME> and <NAME>},
journal={Cytometry Part A},
volume={91},
number={11},
pages={1078--1087},
year={2017},
publisher={Wiley Online Library}
}
"""
_DESCRIPTION = """\
The dataset's training/validation set consists of \
2578 patches extracted from 96 breast cancer \
whole slide images (WSI). Each patch is labelled \
by a tumor cellularity score. The testing set \
contains 1121 patches from 25 WSIs. Labels for \
testing data are not provided by far. \
The dataset can be used to develop an automated method \
for evaluating cancer cellularity from \
histology patches extracted from WSIs. The method \
is aimed to increase reproducibility of cancer \
cellularity scores and enhance tumor burden assessment.
"""
_IMAGE_SHAPE = (512, 512, 3)
def _load_tif(path):
with tf.io.gfile.GFile(path, "rb") as fp:
image = tfds.core.lazy_imports.PIL_Image.open(fp)
rgb_img = image.convert("RGB")
return np.array(rgb_img)
class Breastpathq(tfds.core.GeneratorBasedBuilder):
"""Breast cancer whole slide image dataset."""
# Set up version.
VERSION = tfds.core.Version('0.1.0')
def _info(self):
# Specifies the tfds.core.DatasetInfo object
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
# These are the features of your dataset like images, labels ...
"image": tfds.features.Image(),
"label": tf.float32
}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=("image", "label"),
# Homepage of the dataset for documentation
urls=[_URL],
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Downloads the data and defines the splits
# dl_manager is a tfds.download.DownloadManager that can be used to
# download and extract URLs
# manual download is required for this dataset
download_path = dl_manager.manual_dir
train_file_list = list(filter(lambda x: 'breastpathq.zip' in x, tf.io.gfile.listdir(download_path)))
test_file_list = list(filter(lambda x: 'breastpathq-test.zip' in x, tf.io.gfile.listdir(download_path)))
if len(train_file_list)==0 or len(test_file_list)==0:
msg = "You must download the dataset files manually and place them in: "
msg += dl_manager.manual_dir
msg += " as .zip files. See testing/test_data/fake_examples/breastpathq "
raise AssertionError(msg)
train_dir = dl_manager.extract(os.path.join(download_path, train_file_list[0]))
test_dir = dl_manager.extract(os.path.join(download_path, test_file_list[0]))
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"images_dir_path": os.path.join(train_dir, \
"breastpathq/datasets/train"),
"labels": os.path.join(train_dir, \
"breastpathq/datasets/train_labels.csv"),
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"images_dir_path": os.path.join(train_dir, \
"breastpathq/datasets/validation"),
"labels": os.path.join(test_dir, \
"breastpathq-test/val_labels.csv"),
}
),
]
def _generate_examples(self, images_dir_path, labels):
"""Yields examples."""
# Yields (key, example) tuples from the dataset
with tf.io.gfile.GFile(labels, "r") as f:
dataset = csv.DictReader(f)
for row in dataset:
image_id = row['slide']+'_'+row['rid']
yield image_id, {
"image": _load_tif(os.path.join(images_dir_path, image_id+'.tif')),
'label': row['y'],
}
| [
"csv.DictReader",
"tensorflow.io.gfile.GFile",
"os.path.join",
"tensorflow_datasets.core.Version",
"tensorflow.io.gfile.listdir",
"numpy.array",
"tensorflow_datasets.core.lazy_imports.PIL_Image.open",
"tensorflow_datasets.features.Image"
] | [((1446, 1463), 'numpy.array', 'np.array', (['rgb_img'], {}), '(rgb_img)\n', (1454, 1463), True, 'import numpy as np\n'), ((1600, 1626), 'tensorflow_datasets.core.Version', 'tfds.core.Version', (['"""0.1.0"""'], {}), "('0.1.0')\n", (1617, 1626), True, 'import tensorflow_datasets as tfds\n'), ((1311, 1340), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (1328, 1340), True, 'import tensorflow as tf\n'), ((1360, 1401), 'tensorflow_datasets.core.lazy_imports.PIL_Image.open', 'tfds.core.lazy_imports.PIL_Image.open', (['fp'], {}), '(fp)\n', (1397, 1401), True, 'import tensorflow_datasets as tfds\n'), ((3293, 3340), 'os.path.join', 'os.path.join', (['download_path', 'train_file_list[0]'], {}), '(download_path, train_file_list[0])\n', (3305, 3340), False, 'import os\n'), ((3376, 3422), 'os.path.join', 'os.path.join', (['download_path', 'test_file_list[0]'], {}), '(download_path, test_file_list[0])\n', (3388, 3422), False, 'import os\n'), ((4310, 4340), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['labels', '"""r"""'], {}), "(labels, 'r')\n", (4327, 4340), True, 'import tensorflow as tf\n'), ((4363, 4380), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (4377, 4380), False, 'import csv\n'), ((2826, 2860), 'tensorflow.io.gfile.listdir', 'tf.io.gfile.listdir', (['download_path'], {}), '(download_path)\n', (2845, 2860), True, 'import tensorflow as tf\n'), ((2935, 2969), 'tensorflow.io.gfile.listdir', 'tf.io.gfile.listdir', (['download_path'], {}), '(download_path)\n', (2954, 2969), True, 'import tensorflow as tf\n'), ((2045, 2066), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {}), '()\n', (2064, 2066), True, 'import tensorflow_datasets as tfds\n'), ((3629, 3682), 'os.path.join', 'os.path.join', (['train_dir', '"""breastpathq/datasets/train"""'], {}), "(train_dir, 'breastpathq/datasets/train')\n", (3641, 3682), False, 'import os\n'), ((3726, 3790), 'os.path.join', 'os.path.join', (['train_dir', '"""breastpathq/datasets/train_labels.csv"""'], {}), "(train_dir, 'breastpathq/datasets/train_labels.csv')\n", (3738, 3790), False, 'import os\n'), ((3962, 4020), 'os.path.join', 'os.path.join', (['train_dir', '"""breastpathq/datasets/validation"""'], {}), "(train_dir, 'breastpathq/datasets/validation')\n", (3974, 4020), False, 'import os\n'), ((4060, 4117), 'os.path.join', 'os.path.join', (['test_dir', '"""breastpathq-test/val_labels.csv"""'], {}), "(test_dir, 'breastpathq-test/val_labels.csv')\n", (4072, 4117), False, 'import os\n'), ((4511, 4559), 'os.path.join', 'os.path.join', (['images_dir_path', "(image_id + '.tif')"], {}), "(images_dir_path, image_id + '.tif')\n", (4523, 4559), False, 'import os\n')] |
import numpy as np
import os
import sys
from astropy.io import ascii
# list of targets
names = ['CZTau', 'DPTau', 'FQTau', 'FSTau', 'FVTau', 'FXTau', 'GGTau',
'GHTau', 'GNTau', 'HLTau', 'Haro6-28', 'IRAS04260', 'IRAS04263',
'IRAS04301', 'ITG40', 'J04202144', 'J04210795', 'J04231822',
'J04333905', 'KPNO12', 'LR1', 'MHO3', 'RYTau', 'V410Xray2',
'XEST26-062', 'XZTau']
ra_c = [64.63162786625362, 70.65708182828782, 64.80352616268286,
65.5091354239521, 66.72308959934668, 67.62332089261605,
68.12651341096947, 68.27590876082053, 69.83725498319842,
67.9101541666667, 68.98693874461458, 67.2707940812198,
67.3402554267156, 68.30987746224699, 70.3526833333333,
65.0893458333333, 65.28321530344869, 65.82605101647816,
68.41282112599453, 64.75533699347011, 64.6722166666667,
63.6273185749781, 65.48924643785053, 64.6435291666667,
73.98360897639743, 67.91702873667592]
dec_c = [28.2828227209915, 25.26027575848813, 28.492450465350792,
26.958425295413747, 26.1149841650471, 24.44581144643576,
17.52783912652535, 24.159341225291524, 25.750444749858417,
18.23268055555554, 22.909951788325596, 26.818582514814786,
27.023757986588464, 26.23977712228343, 25.73139722222222,
28.23032499999997, 27.038915302599605, 26.687583575565935,
22.455681672508604, 28.046707217304707, 28.45694722222222,
28.087284950653235, 28.44309318336528, 28.508397222222,
30.605679874626354, 18.23240067751559]
names = ['J15430131', 'J15430227', 'J15450634', 'HNLup', 'J16011549',
'J16070384', 'J16075475', 'J16080175', 'Sz102', 'J160831.1',
'J16085828', 'J16085834', 'J16091644', 'J16092032', 'J16092317',
'J160934.2', 'J16093928', 'J16102741', 'J16120445']
ra_c = [235.7554833333, 235.759478253698, 236.27642916666667,
237.02173123203156, 240.31454999999997, 241.76596229447992,
241.97812916666666, 242.0073291666667, 242.12383426467534,
242.12958333333333, 242.2428625, 242.2431125,
242.31852916666665, 242.3347, 242.34654583333332,
242.39241666666666, 242.41370833333332, 242.6142375,
243.01857925178248]
dec_c = [-34.15425, -34.73504725470962, -34.293841666666665,
-35.264804211901755, -41.876441666666665, -39.18657754126502,
-39.2623972222222, -39.20879444444445, -39.05315929850272,
-38.93333333333333, -39.12654166666667, -39.130325,
-39.078836111111116, -39.06709166666667, -39.06874166666667,
-39.25352777777778, -39.07546944444445, -39.04166111111,
-38.166361316856616]
names = ['Sz4', 'J11062942', 'ISO91', 'VVCha', 'HKCha', 'VWCha', 'ESOHa-562',
'J11082570', 'GlassQ', 'ESOHa-569', 'ESOHa-574']
ra_c = [164.42542005360383, 166.62260833333332, 166.78855000000001,
166.8671197243853, 166.92643370348574, 167.00582256899153,
167.01200784694313, 167.10710416666666, 167.6579205241828,
167.7951375, 169.01151558854068]
dec_c = [-76.99323334434312, -77.4162888888889, -77.31309444444445,
-76.87001173392946, -77.56649498687622, -77.70793869370792,
-77.64515529165121, -77.27767222222222, -77.54440933293864,
-76.69928611111112, -76.41478934894849]
names = ['J15354856', 'UScoCTIO13', 'J16014086', 'J16020287', 'J16052661',
'J16060061', 'RXJ1606', 'J16072747', 'J16082751', 'J16102857',
'J16133650', 'J16135434', 'J16141107', 'J16181618']
ra_c = [233.95234425609885, 239.374425, 240.42022126212947,
240.5117721539541, 241.36081974095615, 241.5024935452512,
241.59141317598332, 241.86437572515374, 242.11466666666666,
242.6190188009051, 243.40211207414208, 243.47633755845683,
243.54609727086296, 244.56735784740243]
dec_c = [-29.982116847433613, -22.97884722222222, -22.969565461700306,
-22.60405672979044, -19.951506865768344, -19.953213438432797,
-19.479112117501167, -20.995691111395644, -19.817977777777777,
-19.07980065741846, -25.06325855372467, -23.342944977147738,
-23.093493657884192, -26.31901530276253]
for i in range(len(names)):
data = ascii.read('data/'+names[i]+'_gaia.csv', format='csv',
fast_reader=True)
dra = (ra_c[i]-data['ra'])*np.cos(np.radians(dec_c[i]))
ddec = (dec_c[i]-data['dec'])
wgtd = 1./(dra**2+ddec**2)
wgtp = 1./(data['parallax_error']**2)
plx = np.average(data['parallax'], weights=wgtd*wgtp)
eplx = np.sqrt(np.average((data['parallax']-plx)**2, weights=wgtd*wgtp))
print('%15s %7.4f %7.4f' % (names[i], plx, eplx) )
| [
"numpy.radians",
"astropy.io.ascii.read",
"numpy.average"
] | [((4202, 4278), 'astropy.io.ascii.read', 'ascii.read', (["('data/' + names[i] + '_gaia.csv')"], {'format': '"""csv"""', 'fast_reader': '(True)'}), "('data/' + names[i] + '_gaia.csv', format='csv', fast_reader=True)\n", (4212, 4278), False, 'from astropy.io import ascii\n'), ((4476, 4525), 'numpy.average', 'np.average', (["data['parallax']"], {'weights': '(wgtd * wgtp)'}), "(data['parallax'], weights=wgtd * wgtp)\n", (4486, 4525), True, 'import numpy as np\n'), ((4543, 4605), 'numpy.average', 'np.average', (["((data['parallax'] - plx) ** 2)"], {'weights': '(wgtd * wgtp)'}), "((data['parallax'] - plx) ** 2, weights=wgtd * wgtp)\n", (4553, 4605), True, 'import numpy as np\n'), ((4336, 4356), 'numpy.radians', 'np.radians', (['dec_c[i]'], {}), '(dec_c[i])\n', (4346, 4356), True, 'import numpy as np\n')] |
from matplotlib import pyplot as pp
import numpy as np
import DNA_manipulations as DNAm
#Places labels on top of bars within bar graph
def autolabel(bins, plotType, integer = True):
# attach some text labels
offSet = max([b.get_height() for b in bins])
for bins in bins:
height = bins.get_height()
if integer:
plotType.text(bins.get_x()+bins.get_width()/2., height + offSet*0.025, '%d'%int(height),
ha='center', va='bottom')
else:
plotType.text(bins.get_x()+bins.get_width()/2., height + offSet*0.025, '%.2f'%(height),
ha='center', va='bottom')
# inporting training data
DNA1, recSite1, freq1 = DNAm.array("sixtyninemers_frequencies_GsAK_og.csv")
DNA2, recSite2, freq2 = DNAm.array("sixtyninemers_frequencies_TnAK_og.csv")
DNA3, recSite3, freq3 = DNAm.array("sixtyninemers_frequencies_BgAK_og.csv")
DNA4, recSite4, freq4 = DNAm.array("sixtyninemers_frequencies_BsAK_og.csv")
GCContent1 = [DNAm.GC_content(seq, overhang = 12)[1] for seq in DNA1]
GCContent2 = [DNAm.GC_content(seq, overhang = 12)[1] for seq in DNA2]
GCContent3 = [DNAm.GC_content(seq, overhang = 12)[1] for seq in DNA3]
GCContent4 = [DNAm.GC_content(seq, overhang = 12)[1] for seq in DNA4]
freq = freq1 + freq2 + freq3 + freq4
GCContent = GCContent1 + GCContent2 + GCContent3 + GCContent4
GCAverageAll = np.mean(GCContent, axis = 0)
GCGeneAverages = [np.mean(GCContent1), np.mean(GCContent2), np.mean(GCContent3), np.mean(GCContent4)]
orderedGCContent = sorted(zip(freq, GCContent), key = lambda x: int(x[0]))
orderedGCContent = [x[1] for x in orderedGCContent]
l = len(orderedGCContent)
averageGCTopTen = np.mean(orderedGCContent[l - int(l*0.1):l], axis = 0)
averageGCBottomTen = np.mean(orderedGCContent[0:int(l*0.1)], axis = 0)
bpRange = range(-12,0)
reversebpRange = range(-12,0)
reversebpRange.reverse()
xlabels = bpRange + ['N1', 'N2', 'N3', 'N4', 'N5'] + reversebpRange
xrange = range(len(xlabels))
fig = pp.figure(figsize = (12, 7))
ax = pp.subplot2grid((2,4), (0,0), colspan = 4)
ax.plot(GCAverageAll, label = 'Average GC', linewidth = 2, color = 'gray')
ax.plot(averageGCTopTen,label = 'Top 10% insert freq.', marker = '*')
ax.plot(averageGCBottomTen, label = 'Bottom 10% insert freq.', marker = '*')
ax.set_ylim([0,100])
ax.set_xticks(xrange)
ax.set_xticklabels(xlabels)
ax.margins(0.01)
ax.set_title("Average positional GC content")
ax.set_xlabel("Nucleotide position")
ax.set_ylabel("Average GC content (%)")
handles,labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, frameon = False)
ax = pp.subplot2grid((2,4), (1,0))
n_bar = len(GCGeneAverages)
ind = np.arange(n_bar)
p1 = ax.bar(ind,GCGeneAverages, width = 0.8)
ax.set_ylim([0,100])
ax.set_xticks(range(len(GCGeneAverages)))
ax.set_xticklabels(["GsAK", "TnAK", "BgAK", "BsAK"])
ax.margins(0.05)
ax.set_title("Average gene GC content")
ax.set_xlabel("Gene")
ax.set_ylabel("Average GC content (%)")
autolabel(p1, ax, integer = False)
ax = pp.subplot2grid((2,4), (1,1), colspan = 3)
ax.plot(GCContent1[92], label = 'High insert freq.', marker = '*')
ax.plot(GCContent1[0], label = 'Low insert freq.', marker = '*')
ax.set_ylim([0,100])
ax.set_xticks(range(len(xlabels)))
ax.set_xticklabels(xlabels)
ax.margins(0.01)
ax.set_title("Representative low and high freq GC content")
ax.set_xlabel("Nucleotide position")
handles,labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, frameon = False)
pp.tight_layout()
fig.savefig('GC Analysis.pdf')
| [
"numpy.mean",
"DNA_manipulations.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"DNA_manipulations.GC_content",
"matplotlib.pyplot.subplot2grid",
"numpy.arange"
] | [((697, 748), 'DNA_manipulations.array', 'DNAm.array', (['"""sixtyninemers_frequencies_GsAK_og.csv"""'], {}), "('sixtyninemers_frequencies_GsAK_og.csv')\n", (707, 748), True, 'import DNA_manipulations as DNAm\n'), ((773, 824), 'DNA_manipulations.array', 'DNAm.array', (['"""sixtyninemers_frequencies_TnAK_og.csv"""'], {}), "('sixtyninemers_frequencies_TnAK_og.csv')\n", (783, 824), True, 'import DNA_manipulations as DNAm\n'), ((849, 900), 'DNA_manipulations.array', 'DNAm.array', (['"""sixtyninemers_frequencies_BgAK_og.csv"""'], {}), "('sixtyninemers_frequencies_BgAK_og.csv')\n", (859, 900), True, 'import DNA_manipulations as DNAm\n'), ((925, 976), 'DNA_manipulations.array', 'DNAm.array', (['"""sixtyninemers_frequencies_BsAK_og.csv"""'], {}), "('sixtyninemers_frequencies_BsAK_og.csv')\n", (935, 976), True, 'import DNA_manipulations as DNAm\n'), ((1373, 1399), 'numpy.mean', 'np.mean', (['GCContent'], {'axis': '(0)'}), '(GCContent, axis=0)\n', (1380, 1399), True, 'import numpy as np\n'), ((1985, 2011), 'matplotlib.pyplot.figure', 'pp.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (1994, 2011), True, 'from matplotlib import pyplot as pp\n'), ((2019, 2061), 'matplotlib.pyplot.subplot2grid', 'pp.subplot2grid', (['(2, 4)', '(0, 0)'], {'colspan': '(4)'}), '((2, 4), (0, 0), colspan=4)\n', (2034, 2061), True, 'from matplotlib import pyplot as pp\n'), ((2593, 2624), 'matplotlib.pyplot.subplot2grid', 'pp.subplot2grid', (['(2, 4)', '(1, 0)'], {}), '((2, 4), (1, 0))\n', (2608, 2624), True, 'from matplotlib import pyplot as pp\n'), ((2657, 2673), 'numpy.arange', 'np.arange', (['n_bar'], {}), '(n_bar)\n', (2666, 2673), True, 'import numpy as np\n'), ((2998, 3040), 'matplotlib.pyplot.subplot2grid', 'pp.subplot2grid', (['(2, 4)', '(1, 1)'], {'colspan': '(3)'}), '((2, 4), (1, 1), colspan=3)\n', (3013, 3040), True, 'from matplotlib import pyplot as pp\n'), ((3466, 3483), 'matplotlib.pyplot.tight_layout', 'pp.tight_layout', ([], {}), '()\n', (3481, 3483), True, 'from matplotlib import pyplot as pp\n'), ((1420, 1439), 'numpy.mean', 'np.mean', (['GCContent1'], {}), '(GCContent1)\n', (1427, 1439), True, 'import numpy as np\n'), ((1441, 1460), 'numpy.mean', 'np.mean', (['GCContent2'], {}), '(GCContent2)\n', (1448, 1460), True, 'import numpy as np\n'), ((1462, 1481), 'numpy.mean', 'np.mean', (['GCContent3'], {}), '(GCContent3)\n', (1469, 1481), True, 'import numpy as np\n'), ((1483, 1502), 'numpy.mean', 'np.mean', (['GCContent4'], {}), '(GCContent4)\n', (1490, 1502), True, 'import numpy as np\n'), ((992, 1025), 'DNA_manipulations.GC_content', 'DNAm.GC_content', (['seq'], {'overhang': '(12)'}), '(seq, overhang=12)\n', (1007, 1025), True, 'import DNA_manipulations as DNAm\n'), ((1062, 1095), 'DNA_manipulations.GC_content', 'DNAm.GC_content', (['seq'], {'overhang': '(12)'}), '(seq, overhang=12)\n', (1077, 1095), True, 'import DNA_manipulations as DNAm\n'), ((1132, 1165), 'DNA_manipulations.GC_content', 'DNAm.GC_content', (['seq'], {'overhang': '(12)'}), '(seq, overhang=12)\n', (1147, 1165), True, 'import DNA_manipulations as DNAm\n'), ((1202, 1235), 'DNA_manipulations.GC_content', 'DNAm.GC_content', (['seq'], {'overhang': '(12)'}), '(seq, overhang=12)\n', (1217, 1235), True, 'import DNA_manipulations as DNAm\n')] |
import numpy as np
import pandas as pd
import sys
sys.path.append('../dsbase/src/main')
from sklearn.model_selection import train_test_split
from ModelDSBase import ModelDSBaseWrapper
from AdaBoostClassificationDSBase import AdaBoostClassificationDSBaseModelParamsToMap
from AdaBoostClassificationDSBase import AdaBoostClassificationDSBaseModel
from utils.utils import getVector
def train_test(fold_id, df):
print('Initiating training of fold ' + str(fold_id) + ' ...')
print(' size: ' + str(df.shape))
out_path = 'models/fold' + str(fold_id) + "/test.sav.npy"
np.save(out_path,np.array(['pepo','te','migo','micci']))
print('Training of fold ' + str(fold_id) + ' finalized!')
return (0.6455, np.zeros((2,3)))
def train(fold_id, df):
print('Initiating training of fold ' + str(fold_id) + ' ...')
out_path = 'models/fold' + str(fold_id)
# Splitting label information
df_y = df['HasDetections']
df.drop(labels=['HasDetections'], axis=1, inplace=True)
# Training model
params = AdaBoostClassificationDSBaseModelParamsToMap(100,1.0)
abc = ModelDSBaseWrapper('AB',df.values,df_y.values,[30,65,100],0.3,AdaBoostClassificationDSBaseModel,params,splitter=train_test_split)
abc.train()
# Collecting results
lcabc = abc.getLearningCurves()
score = abc.getScore()
# Save model
abc.save(out_path)
print('Training of fold ' + str(fold_id) + ' finalized!')
return (score,lcabc)
def saveColumnsCategorical(fold_id, df, columns_categorical):
out_path = 'models/fold' + str(fold_id)
# Save columns partitioned at this fold
for c in columns_categorical:
np.save(out_path + '/' + str(c) + '.sav.npy',df[c].unique())
def loadColumnsCategorical(fold_id, df, columns_categorical):
in_path = 'models/fold' + str(fold_id)
df_aux = pd.DataFrame([list(map(lambda x: [x], row)) for row in df.values], columns=df.columns)
# Save columns partitioned at this fold
for c in columns_categorical:
print(' column "' + c + '" transformation ...')
vec = np.load('models/fold' + str(fold_id) + "/" + c + ".sav.npy")
df_aux[c]=df_aux[c].apply(lambda x: getVector(x[0],vec))
df_end = pd.DataFrame([np.concatenate(row) for row in df_aux.values])
return df_end
| [
"ModelDSBase.ModelDSBaseWrapper",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"utils.utils.getVector",
"AdaBoostClassificationDSBase.AdaBoostClassificationDSBaseModelParamsToMap",
"sys.path.append"
] | [((50, 87), 'sys.path.append', 'sys.path.append', (['"""../dsbase/src/main"""'], {}), "('../dsbase/src/main')\n", (65, 87), False, 'import sys\n'), ((997, 1051), 'AdaBoostClassificationDSBase.AdaBoostClassificationDSBaseModelParamsToMap', 'AdaBoostClassificationDSBaseModelParamsToMap', (['(100)', '(1.0)'], {}), '(100, 1.0)\n', (1041, 1051), False, 'from AdaBoostClassificationDSBase import AdaBoostClassificationDSBaseModelParamsToMap\n'), ((1058, 1200), 'ModelDSBase.ModelDSBaseWrapper', 'ModelDSBaseWrapper', (['"""AB"""', 'df.values', 'df_y.values', '[30, 65, 100]', '(0.3)', 'AdaBoostClassificationDSBaseModel', 'params'], {'splitter': 'train_test_split'}), "('AB', df.values, df_y.values, [30, 65, 100], 0.3,\n AdaBoostClassificationDSBaseModel, params, splitter=train_test_split)\n", (1076, 1200), False, 'from ModelDSBase import ModelDSBaseWrapper\n'), ((585, 626), 'numpy.array', 'np.array', (["['pepo', 'te', 'migo', 'micci']"], {}), "(['pepo', 'te', 'migo', 'micci'])\n", (593, 626), True, 'import numpy as np\n'), ((701, 717), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (709, 717), True, 'import numpy as np\n'), ((2119, 2138), 'numpy.concatenate', 'np.concatenate', (['row'], {}), '(row)\n', (2133, 2138), True, 'import numpy as np\n'), ((2073, 2093), 'utils.utils.getVector', 'getVector', (['x[0]', 'vec'], {}), '(x[0], vec)\n', (2082, 2093), False, 'from utils.utils import getVector\n')] |
__author__ = 'Chronis'
from pySLM.definitions import SLM
import numpy as np
from tkinter import _setit
import PIL
from astropy.io import fits
import pygame, os, time, pickle
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import threading
import matplotlib.image as mplimg
from matplotlib.colors import Normalize
from matplotlib import cm
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
def cart2pol(x,y):
"""
Takes cartesian (2D) coordinates and transforms them into polar.
"""
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return (rho, phi)
class dummyClass:
def __init__(self):
print('Dummy class')
self.maps = {'zero': np.zeros((1024, 768, 3))}
self.SLM_type = 'None'
self.pixelSize = 8
self.dimensions = (1024, 768, 3)
self.width = 1024
self.height = 768
self.size = (1024, 768)
class StdoutRedirector(object):
"""
Redirects all stdout to this object which then can be embeded into a text widget
"""
def __init__(self, text_widget):
self.text_space = text_widget
def write(self, string):
self.text_space.insert('end', string)
self.text_space.see('end')
def flush(self):
pass
def array2PIL(arr, size):
mode = 'RGBA'
arr = arr.reshape(arr.shape[0]*arr.shape[1], arr.shape[2])
if len(arr[0]) == 3:
arr = np.c_[arr, 255*np.ones((len(arr), 1), np.uint8)]
return PIL.Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1)
class DropMenu:
"""
DropMenu is a widget that will contain various functionalities of a menu
"""
def __init__(self, master, window):
# Create dropdown menu
self.path = os.getcwd()
self.window = window
self.master = master
self.menu = Menu(self.master)
self.master.config(menu=self.menu)
# File Option************************************************
self.FileMenu = Menu(self.menu)
self.menu.add_cascade(label='File', menu=self.FileMenu)
self.FileMenu.add_command(label='Open phase map')
self.FileMenu.add_command(label='Save as FITS', command=lambda: self.save_fits())
self.FileMenu.add_command(label='Save weighting function')
self.FileMenu.add_separator()
self.FileMenu.add_command(label='Exit', command=self._quit)
# Settings option***********************************************
self.SettingsMenu = Menu(self.menu)
self.menu.add_cascade(label='Settings', menu=self.SettingsMenu)
self.SettingsMenu.add_command(label='Calibration curve', command=self.calibration_callback)
self.SettingsMenu.add_command(label='Star info', command=self.star_info_callback)
# Tools option**************************************************
self.ToolMenu = Menu(self.menu)
self.menu.add_cascade(label='Tools', menu=self.ToolMenu)
self.ToolMenu.add_command(label='Count')
self.ToolMenu.add_command(label='Histogram')
# Help option ********************************************
self.HelpMenu = Menu(self.menu)
self.menu.add_cascade(label='Help', menu=self.HelpMenu)
self.HelpMenu.add_command(label='Documentation')
self.HelpMenu.add_command(label='App Help')
# Variables **********************************************
try:
self.menu_data = pickle.load(open("SLM_data.p", 'rb'))
self.phase_curve = self.menu_data['phase curve']
except:
file = filedialog.askopenfilename(title="Select phase curve(.npy)")
phase = np.load(file)
self.menu_data = {'phase curve': phase}
self.phase_curve = phase
pickle.dump(self.menu_data, open("SLM_data.p", 'wb'))
# take data point from phase curve and fit a polynomial such that each phase shift value in radians
# corresponds to a gray value. The inverse case gray->rad will just takes these data points
p = np.polyfit(self.phase_curve, np.arange(0, 256), deg=3)
self.rad_2_gray = np.poly1d(p)
# size of SLM pixel in microns (um)
self.slm_pxl = StringVar()
# variables for SLM characteristics and system setup used in Multiple stars
self.slm_pxl.set('36')
self.intensity = StringVar()
self.wavelength = StringVar()
self.Fnum = StringVar()
self.lD = StringVar()
self.lD.set('4')
def star_info_callback(self):
"""
Contains info about the optical bench and SLM
:return:
"""
toplevel_r = Toplevel()
toplevel_r.title('Star info')
toplevel_r.geometry("400x150+300+300")
toplevel = ttk.Frame(toplevel_r)
toplevel.grid(column=0, row=0, sticky=(N, W, E, S))
self.wavelength.set('633')
wavelength_entry = Entry(toplevel, textvariable=self.wavelength,justify='center')
wavelength_lab = Label(toplevel, text='Wavelength (nm):')
self.Fnum.set('230')
Fnum_entry = Entry(toplevel, textvariable=self.Fnum, justify='center')
Fnum_lab = Label(toplevel, text='F # :')
self.intensity.set('1')
intensity_entry = Entry(toplevel, textvariable=self.intensity, justify='center')
intensity_lab = Label(toplevel, text='Intensity :')
"""As discussed, here are the correct parameters for the coordinates conversion in the SLM plane :
F# = 230
Pixel_size = 36 um
The spot size in the SLM plane right now is lambda*F# ~ 145 um ~ 4 pixels.
"""
slm_pxl_lab = Label(toplevel, text='SLM pixel size (um):', justify='center')
slm_pxl_entry = Entry(toplevel, textvariable=self.slm_pxl)
lD_lab = Label(toplevel, text='#pixels per l/D:')
lD_entry = Entry(toplevel, textvariable=self.lD)
separator = ttk.Separator(toplevel, orient=VERTICAL)
set_button = ttk.Button(toplevel, text='Set', command=self.apply_star_info)
wavelength_lab.grid(column=0, row=0)
wavelength_entry.grid(column=1, row=0)
Fnum_lab.grid(column=0, row=1)
Fnum_entry.grid(column=1, row=1)
intensity_lab.grid(column=0, row=2)
intensity_entry.grid(column=1, row=2)
separator.grid(column=2, row=0, rowspan=3, sticky=(N, S))
slm_pxl_lab.grid(column=3, row=0)
slm_pxl_entry.grid(column=3, row=1)
lD_lab.grid(column=3, row=2)
lD_entry.grid(column=3, row=3)
set_button.grid(column=0, row=4)
def apply_star_info(self):
pass
def calibration_callback(self):
"""
Plots the current phase response curve and allows to select a new one
:return:
"""
toplevel_r = Toplevel()
toplevel_r.title('Grayvalues calibration')
toplevel_r.geometry("300x300+300+300")
toplevel = ttk.Frame(toplevel_r)
toplevel.grid(column=0, row=0, sticky=(N, W, E, S))
self.curve_plot, self.ax = plt.subplots(figsize=(3,3))
self.line = self.ax.plot(np.arange(256), self.phase_curve, 'o')
self.ax.set_xlim([-1, 260])
self.ax.set_xlabel("gray values")
self.ax.set_ylabel("Phase shift[$\pi$]")
data_plot = FigureCanvasTkAgg(self.curve_plot, master=toplevel)
data_plot.show()
import_curve_button = ttk.Button(toplevel, text='Import curve', command=self.import_curve_callback)
import_curve_button.grid(column=0, row=2)
data_plot.get_tk_widget().grid(column=1, row=2, columnspan=4, rowspan=4)
return
def import_curve_callback(self):
"""
Used for insertion of new phase curve calibration curve. Expects an numpy array of length 256 corresponding to
each grayvalue
:return:
"""
file = filedialog.askopenfilename(title="Select phase curve(.npy)")
phase = np.load(file)
self.menu_data = {'phase curve': phase}
self.phase_curve = phase
self.line[0].set_data(np.arange(256), phase)
plt.draw()
pickle.dump(self.menu_data, open("SLM_data.p", 'wb'))
return
def save_fits(self, name=None):
"""
Save current open phase mask as a FITS file with the center information
contained in the header
"""
file = filedialog.asksaveasfilename(master=self.master, title='Save as..', initialdir=self.path)
if file is None:
return
self.path = os.path.dirname(file)
file += '.fits'
# current = 0
if name is None:
current = self.window.maps_var.get()
else:
current = name
if current == '':
return
mask = self.window.SLM.maps[current]['data']
if self.window.active:
mask = self.window.image
hdu = fits.PrimaryHDU()
hdu.data = mask[:, :, 0]
hdu.header['center'] = str(self.window.center_position)
if len(self.window.center_position) > 1:
hdu.header['stars'] = str(self.window.multiple_star_position) + "\ ([l/D, azimuth]"
"""
if mask['star info']:
for k, val in mask['star info']:
hdu.header[k] = val
"""
hdu.header['DATE'] = time.strftime("%d/%m/%Y")
hdu.writeto(file)
return
def _quit(self):
self.window.SLM.quit()
self.master.quit() # stops mainloop
self.master.destroy()
return
class SLMViewer:
"""
Basic GUI that enables communication with SLM , on/off switch and
import/manipulation of phase maps
"""
def __init__(self, root):
self.master = Frame(root)
self.master.grid(column=0, row=0, sticky=(N, W, E, S))
root.title('SLM Controller')
try:
self.SLM = SLM()
print("SLM type is %s"%self.SLM.SLM_type)
except UserWarning:
self.SLM = dummyClass()
#raise UserWarning('No SLM connected.')
self.menu = DropMenu(root, self) # add drop-down menu
#self.SLM.pixelSize = int(self.menu.slm_pxl.get())
# =====================================================================================
# make canvas
self.off_image = np.zeros(self.SLM.dimensions)
self.image = self.off_image
self.fig, self.ax = plt.subplots()
self.norm = Normalize(vmin=0, vmax=255)
self.cmap = cm.gray
self.im = plt.imshow(self.image[:, :, 0].T, cmap=self.cmap, norm=self.norm)
self.ax.get_xaxis().set_visible(False)
self.ax.get_yaxis().set_visible(False)
# get image plot onto canvas and app
self.data_plot = FigureCanvasTkAgg(self.fig, master=self.master)
self.data_plot.get_tk_widget().configure(borderwidth=0)
self.fig.suptitle('SLM type : %s'%self.SLM.SLM_type, fontsize=12, fontweight='bold')
self.data_plot.show()
self.fig.canvas.mpl_connect('button_press_event', self.click_callback)
# ====================================================================================
# import phase maps frame
self.import_maps_frame = ttk.LabelFrame(self.master, text='Phase maps')
self.import_map_button = ttk.Button(self.import_maps_frame,
text='Import map', command=self.import_map_callback)
self.clear_list_button = ttk.Button(self.import_maps_frame, text='Clear', command=self.clear_maps)
self.maps_var = StringVar()
self.maps_var.set('')
if len(self.SLM.maps) > 0:
self.maps = [m for m in self.SLM.maps]
else:
self.maps = ['Zeros']
self.maps_options = OptionMenu(self.import_maps_frame, self.maps_var, *self.maps)
self.maps_options.grid(column=0, row=0)
self.import_map_button.grid(column=1, row=0)
self.clear_list_button.grid(column=1, row=1)
# ============================================================================================
# Set up center(s) position
# =============================================================================================
# default mouse position for center is center of SLM
self.mouse_coordinates = (int(self.SLM.width/2), int(self.SLM.height/2))
self.center_position = [[int(self.SLM.width/2), int(self.SLM.height/2)]]
self.plot_update()
self.center_step = 1
# =============================================================================================
# Phase mask activation/de-activation
# =============================================================================================
self.active_frame = LabelFrame(self.master, text='Activate')
self.active_var = StringVar()
self.active_var.set('OFF')
self.activation_button = Button(self.active_frame, textvariable=self.active_var,
command=self.activate, bg='firebrick2')
self.activation_button.grid(column=0, row=0)
self.active = False
# ==========================================================================================
# OPTIONS FRAME
# ==========================================================================================
self.notebook = ttk.Notebook(self.master)
self.fqpm_frame = Frame(self.notebook)
self.vortex_frame = Frame(self.notebook)
self.multiple_frame = Frame(self.notebook)
self.zernike_frame = Frame(self.notebook)
self.rotate_frame = Frame(self.notebook)
self.notebook.add(self.fqpm_frame, text='FQ/EO')
self.notebook.add(self.vortex_frame, text='Vortex')
self.notebook.add(self.multiple_frame, text='Multiple')
self.notebook.add(self.zernike_frame, text='Zernike')
self.notebook.add(self.rotate_frame, text='Phase shift')
self.notebook.grid()
# ===========================================================================================
# Star info in multiple star frame
# ===========================================================================================
self.stars_frame = ttk.LabelFrame(self.multiple_frame, text='Stars')
self.star_1 = Label(self.stars_frame, text='Star 1')
self.star_2 = Label(self.stars_frame, text='Star 2', state=DISABLED)
self.star_3 = Label(self.stars_frame, text='Star 3', state=DISABLED)
self.star_1.grid(column=0, row=1)
self.star_2.grid(column=0, row=2)
self.star_3.grid(column=0, row=3)
I_lab = ttk.Label(self.stars_frame, text='Intensity', width=10)
magn_lab = ttk.Label(self.stars_frame, text='Magnitude', width=10)
l_lab = ttk.Label(self.stars_frame, text='Wavelength(nm)', width=10)
F_lab = ttk.Label(self.stars_frame, text='F #', width=10)
lD_lab = ttk.Label(self.stars_frame, text='l/D', width=10)
phi_lab= ttk.Label(self.stars_frame, text='phi(pi)', width=10)
C_lab = ttk.Label(self.stars_frame, text='Center(x,y)', width=10)
magn_lab.grid(column=1, row=0)
I_lab.grid(column=2, row=0)
l_lab.grid(column=3, row=0)
F_lab.grid(column=4, row=0)
lD_lab.grid(column=5, row=0)
phi_lab.grid(column=6, row=0)
C_lab.grid(column=7, row=0)
# 1st star -- always visible
self.M1 = StringVar()
self.M1.set('0')
M1_entry = ttk.Entry(self.stars_frame, textvariable=self.M1, width=10)
M1_entry.grid(column=1, row=1)
self.I1_num = StringVar()
self.I1_num.set('1')
self.I1_entry = ttk.Entry(self.stars_frame, textvariable=self.I1_num, width=10)
self.I1_entry.grid(column=2, row=1)
self.l1_num = StringVar()
self.l1_num.set('633')
self.l1_entry = ttk.Entry(self.stars_frame, textvariable=self.l1_num, width=10)
self.l1_entry.grid(column=3, row=1)
self.F1_num = StringVar()
self.F1_num.set('230')
self.F1_entry = ttk.Entry(self.stars_frame, textvariable=self.F1_num, width=10)
self.F1_entry.grid(column=4, row=1)
self.starc1 = StringVar()
self.starc1.set('%i,%i' % (int(self.SLM.width/2), int(self.SLM.height/2)))
self.center1_lab = Entry(self.stars_frame, textvariable=self.starc1, width=10)
self.center1_lab.grid(column=7, row=1)
# star 2
self.M2 = StringVar()
self.M2.set('0')
self.M2_entry = ttk.Entry(self.stars_frame, textvariable=self.M2,
width=10, state=DISABLED)
self.M2_entry.grid(column=1, row=2)
self.M2_entry.bind("<Return>", self.magnitude_to_intensity)
self.I2_num = StringVar()
self.I2_num.set('1')
self.I2_entry = ttk.Entry(self.stars_frame, textvariable=self.I2_num,
width=10, state=DISABLED)
self.I2_entry.bind("<Return>", self.magnitude_to_intensity)
self.I2_entry.grid(column=2, row=2)
self.l2_num = StringVar()
self.l2_num.set('633')
self.l2_entry = ttk.Entry(self.stars_frame, textvariable=self.l2_num,
width=10, state=DISABLED)
self.l2_entry.grid(column=3, row=2)
self.F2_num = StringVar()
self.F2_num.set('230')
self.F2_entry = ttk.Entry(self.stars_frame, textvariable=self.F2_num,
width=10, state=DISABLED)
self.F2_entry.grid(column=4, row=2)
self.starc2 = StringVar()
self.starc2.set('0,0')
self.lD_star2 = StringVar()
self.lD_star2.set('1')
self.lD_star2_entry = Entry(self.stars_frame, textvariable=self.lD_star2,
width=10, state=DISABLED)
self.lD_star2_entry.grid(column=5, row=2)
self.phi_star2 = StringVar()
self.phi_star2.set('0')
self.phi_star2_entry = Entry(self.stars_frame, textvariable=self.phi_star2,
width=10, state=DISABLED)
self.phi_star2_entry.grid(column=6, row=2)
self.center2_lab = Entry(self.stars_frame, textvariable=self.starc2,
width=10, state=DISABLED)
self.center2_lab.grid(column=7, row=2)
self.center2_lab.bind("<Return>", self.l_over_D_callback)
# star 3
self.M3 = StringVar()
self.M3.set('0')
self.M3_entry = ttk.Entry(self.stars_frame, textvariable=self.M3,
width=10, state=DISABLED)
self.M3_entry.grid(column=1, row=3)
self.M3_entry.bind("<Return>", self.magnitude_to_intensity)
self.I3_num = StringVar()
self.I3_num.set('1')
self.I3_entry = ttk.Entry(self.stars_frame, textvariable=self.I3_num,
width=10, state=DISABLED)
self.I3_entry.grid(column=2, row=3)
self.I3_entry.bind("<Return>", self.magnitude_to_intensity)
self.l3_num = StringVar()
self.l3_num.set('633')
self.l3_entry = ttk.Entry(self.stars_frame, textvariable=self.l3_num,
width=10, state=DISABLED)
self.l3_entry.grid(column=3, row=3)
self.F3_num = StringVar()
self.F3_num.set('230')
self.F3_entry = ttk.Entry(self.stars_frame, textvariable=self.F3_num,
width=10, state=DISABLED)
self.F3_entry.grid(column=4, row=3)
self.starc3 = StringVar()
self.starc3.set('0,0')
self.lD_star3 = StringVar()
self.lD_star3.set('1')
self.lD_star3_entry = Entry(self.stars_frame, textvariable=self.lD_star3,
width=10, state=DISABLED)
self.lD_star3_entry.grid(column=5, row=3)
self.phi_star3 = StringVar()
self.phi_star3.set('0')
self.phi_star3_entry = Entry(self.stars_frame, textvariable=self.phi_star3,
width=10, state=DISABLED)
self.phi_star3_entry.grid(column=6, row=3)
self.center3_lab = Entry(self.stars_frame, textvariable=self.starc3,
width=10, state=DISABLED)
self.center3_lab.grid(column=7, row=3)
self.center3_lab.bind("<Return>", self.l_over_D_callback)
# ============================================================================================
# FQPM and EOPM frame
# ============================================================================================
self.center1_lab_fqpm = Entry(self.fqpm_frame, textvariable=self.starc1)
self.center1_lab_fqpm.grid(column=4, row=0)
self.single_button = ttk.Button(self.fqpm_frame, text='Make map',
command=lambda: self.make_map('single'))
self.single_button.grid(column=0, row=0)
map_types = ['FQPM', 'EOPM', 'FLAT']
self.map_type_var = StringVar()
self.map_type_var.set('FQPM')
self.map_type_menu = OptionMenu(self.fqpm_frame, self.map_type_var, *map_types)
self.map_type_menu.grid(row=0, column=2)
# =========================================================================================================
# CONTROL FRAME
# =========================================================================================================
self.control_frame = ttk.LabelFrame(self.master, text='Center Controls')
self.cstep_var = StringVar()
self.cstep_var.set('1')
self.center_step_entry = Entry(self.control_frame, textvariable=self.cstep_var, justify='center')
self.center_step_entry.bind("<Return>", self.set_center_step)
self.center_control_up = ttk.Button(self.control_frame, text='^', command=lambda: self.center_move('up', 0))
self.center_control_down = ttk.Button(self.control_frame, text='v', command=lambda: self.center_move('down',0))
self.center_control_left = ttk.Button(self.control_frame, text='<', command=lambda: self.center_move('left',0))
self.center_control_right = ttk.Button(self.control_frame, text='>', command=lambda: self.center_move('right',0))
self.center_control_up.grid(column=1, row=0)
self.center_control_down.grid(column=1, row=2)
self.center_control_left.grid(column=0, row=1)
self.center_control_right.grid(column=2, row=1)
self.center_step_entry.grid(column=1, row=1)
self.center_num = ['1']
self.center_var = StringVar()
self.center_var.set('1')
# Set gray values
self.val_1 = 0
self.val_2 = 1
self.grayval_frame = ttk.LabelFrame(self.fqpm_frame, text='Gray values')
self.gray_1_val = StringVar()
self.gray_1_val.set('0')
self.gray_1_entry = Entry(self.grayval_frame, textvariable=self.gray_1_val, justify='center')
self.gray_1_entry.bind("<Return>", self.arrow_return)
self.gray_1_entry.bind("<Up>", self.arrow_return)
self.gray_1_entry.bind("<Down>", self.arrow_return)
self.gray_1_entry.bind("<Left>", self.arrow_return)
self.gray_1_entry.bind("<Right>", self.arrow_return)
self.gray_2_val = StringVar()
self.gray_2_val.set('0')
self.gray_2_entry = Entry(self.grayval_frame, textvariable=self.gray_2_val, justify='center')
self.gray_2_entry.bind("<Return>", self.arrow_return)
self.gray_2_entry.bind("<Up>", self.arrow_return)
self.gray_2_entry.bind("<Down>", self.arrow_return)
self.gray_2_entry.bind("<Left>", self.arrow_return)
self.gray_2_entry.bind("<Right>", self.arrow_return)
self.gray_1_lab = ttk.Label(self.grayval_frame, text='Gray-value 1')
self.gray_2_lab = ttk.Label(self.grayval_frame, text='Gray-value 2')
self.phase_1_val = StringVar()
self.phase_1_val.set('Phase: %.3f rad'%self.menu.phase_curve[int(self.gray_1_val.get())])
self.phase_2_val = StringVar()
self.phase_2_val.set('Phase: %.3f rad'%self.menu.phase_curve[int(self.gray_2_val.get())])
self.phase_1_lab = ttk.Label(self.grayval_frame, textvariable=self.phase_1_val)
self.phase_2_lab = ttk.Label(self.grayval_frame, textvariable=self.phase_2_val)
self.gray_1_lab.grid(column=0, row=0)
self.gray_2_lab.grid(column=0, row=1)
self.gray_1_entry.grid(column=1, row=0)
self.gray_2_entry.grid(column=1, row=1)
self.phase_1_lab.grid(column=2, row=0)
self.phase_2_lab.grid(column=2, row=1)
# ============================================================================================
# ZERNIKE TAB
# ============================================================================================
# implement various zernike terms which can be used to correct aberrations due to the SLM back-plate
#DEFOCUS
defocus_coeff_lab = ttk.Label(self.zernike_frame, text='Defocus:')
defocus_coeff_lab.grid(column=0, row=0)
self.defocus_coeff = DoubleVar()
self.defocus_coeff.set(0)
defocus_coeff_entry = Entry(self.zernike_frame, textvariable=self.defocus_coeff)
defocus_coeff_entry.grid(column=1, row=0)
#OBLIQUE ASTIGMATISM
astigm_coeff_lab = ttk.Label(self.zernike_frame, text='Obliq. Astigmatism:')
astigm_coeff_lab.grid(column=2, row=1)
self.astigm_coeff = DoubleVar()
self.astigm_coeff.set(0)
astigm_coeff_entry = Entry(self.zernike_frame, textvariable=self.astigm_coeff)
astigm_coeff_entry.grid(column=3, row=1)
# VERTICAL ASTIGMATISM
secastigm_coeff_lab = ttk.Label(self.zernike_frame, text='Vert. Astigmatism:')
secastigm_coeff_lab.grid(column=0, row=1)
self.secastigm_coeff = DoubleVar()
self.secastigm_coeff.set(0)
secastigm_coeff_entry = Entry(self.zernike_frame, textvariable=self.secastigm_coeff)
secastigm_coeff_entry.grid(column=1, row=1)
#TILT
tilt_coeff_lab = ttk.Label(self.zernike_frame, text='Tilt:')
tilt_coeff_lab.grid(column=2, row=2)
self.tilt_coeff = DoubleVar()
self.tilt_coeff.set(0)
tilt_coeff_entry = Entry(self.zernike_frame, textvariable=self.tilt_coeff)
tilt_coeff_entry.grid(column=3, row=2)
#TIP
tip_coeff_lab = ttk.Label(self.zernike_frame, text='Tip:')
tip_coeff_lab.grid(column=0, row=2)
self.tip_coeff = DoubleVar()
self.tip_coeff.set(0)
tip_coeff_entry = Entry(self.zernike_frame, textvariable=self.tip_coeff)
tip_coeff_entry.grid(column=1, row=2)
# X AND Y GRADIENTS
xgrad_coeff_lab = ttk.Label(self.zernike_frame, text='X gradient:')
xgrad_coeff_lab.grid(column=2, row=3)
self.xgrad_coeff = DoubleVar()
self.xgrad_coeff.set(0)
xgrad_coeff_entry = Entry(self.zernike_frame, textvariable=self.xgrad_coeff)
xgrad_coeff_entry.grid(column=3, row=3)
ygrad_coeff_lab = ttk.Label(self.zernike_frame, text='Y gradient:')
ygrad_coeff_lab.grid(column=0, row=3)
self.ygrad_coeff = DoubleVar()
self.ygrad_coeff.set(0)
ygrad_coeff_entry = Entry(self.zernike_frame, textvariable=self.ygrad_coeff)
ygrad_coeff_entry.grid(column=1, row=3)
# Phase shift of the zernike correction
zernike_range_lab = Label(self.zernike_frame, text='Phase shift of zernike')
zernike_range_lab.grid(column=0, row=4)
self.zernike_min = DoubleVar()
self.zernike_min.set(0)
zernike_min_entry = Entry(self.zernike_frame, textvariable=self.zernike_min)
zernike_min_entry.grid(column=1, row=4)
self.zernike_max = DoubleVar()
self.zernike_max.set(1)
zernike_max_entry = Entry(self.zernike_frame, textvariable=self.zernike_max)
zernike_max_entry.grid(column=2, row=4)
# Apply zernike corrections to the phase mask currently active or selected
apply_zernike = ttk.Button(self.zernike_frame, text='Apply', command=self.apply_zernike)
apply_zernike.grid(column=4, row=0)
# functions implementing the various zernike polynomials
self.Defocus = lambda r: np.sqrt(3)*(2*r**2)
self.Astigm = lambda r, theta: np.sqrt(6)*(r**2)*np.sin(2*theta)
self.VertAstigm = lambda r, theta: np.sqrt(6) * (r ** 2) * np.cos(2 * theta)
self.SecAstigm = lambda r, theta: np.sqrt(10)*(4*r**4-3*r**3)*np.sin(2*theta)
self.XGrad = lambda x: abs(x)
self.YGrad = lambda y: abs(y)
self.Tip = lambda r, theta: 2*r*np.cos(theta)
self.Tilt = lambda r, theta: 2*r*np.sin(theta)
# mesh grid used to create the 2d zernike polynomials in cartesian and polar coordinates
self.xx, self.yy = np.meshgrid(np.arange(-self.SLM.width/2, self.SLM.width/2),
np.arange(-self.SLM.height/2, self.SLM.height/2))
self.R, self.Theta = cart2pol(self.xx, self.yy)
# zernike_gray1_lab = Label(self.zernike_frame, text='Gray1')
# zernike_gray1_lab.grid(column=0, row=3)
# self.zernike_gray1 = IntVar()
# self.zernike_gray1.set(85)
# zernike_gray1_entry = Entry(self.zernike_frame, textvariable=self.zernike_gray1)
# zernike_gray1_entry.grid(column=1, row=3)
#
# zernike_gray2_lab = Label(self.zernike_frame, text='Gray2')
# zernike_gray2_lab.grid(column=0, row=4)
# self.zernike_gray2 = IntVar()
# self.zernike_gray2.set(255)
# zernike_gray2_entry = Entry(self.zernike_frame, textvariable=self.zernike_gray2)
# zernike_gray2_entry.grid(column=1, row=4)
self.zernike_gray1_old = 85
self.zernike_gray2_old = 255
# ======================================================================================
self.grayval_frame.grid(column=0, row=1, columnspan=5)
self.control_frame.grid(column=0, row=2, columnspan=5)
# ======================================================================================
# Multiple sources
# ======================================================================================
# Pack star center vars together for easy access
self.center_labels = [self.starc1, self.starc2, self.starc3]
# make frame where a binary star map or triple star map can be created
# binary phase map using airy pattern distribution for each star
self.binary_frame = ttk.Frame(self.multiple_frame)
self.binary_button = ttk.Button(self.binary_frame, text='Binary',
command=lambda: self.make_map('binary'), state=DISABLED)
self.binary_button.grid(column=1, row=1)
self.checkbox_val = IntVar()
binary_checkbox = Checkbutton(self.binary_frame, text='Save map', variable=self.checkbox_val)
binary_checkbox.grid(column=3, row=1)
self.tertiary_button = ttk.Button(self.binary_frame, text='Tertiary star',
command=lambda: self.make_map('triple'), state=DISABLED)
self.tertiary_button.grid(column=2, row=1)
self.new_map_name = StringVar()
self.new_map_name.set('Map name')
self.new_map_name_entry = Entry(self.binary_frame, textvariable=self.new_map_name)
self.new_map_name_entry_single = Entry(self.fqpm_frame, textvariable=self.new_map_name)
self.new_map_name_entry_single.grid(column=3, row=0)
self.new_map_name_entry.grid(column=0, row=1)
self.save_filetypes = [('Windows Bitmap', '*.bmp'), ('Text File', '*.txt'), ('Fits File', '*.fits')]
add_center_button = ttk.Button(self.binary_frame, text='Add', command=self.add_center)
add_center_button.grid(column=0, row=0)
self.centers_options = OptionMenu(self.binary_frame, self.center_var, *self.center_num)
self.centers_options.grid(column=1, row=0)
self.stars_frame.grid(column=0, row=0)
self.binary_frame.grid(column=0, row=2)
# =====================================================================================================
# Vortex tab
# =====================================================================================================
self.make_vortex = ttk.Button(self.vortex_frame, text='Make vortex',
command=lambda: self.make_map('vortex'))
self.make_vortex.grid(column=0, row=0)
# charge of the vortex
charge_lab = ttk.Label(self.vortex_frame, text='charge')
charge_lab.grid(column=2, row=1)
self.charge = IntVar()
self.charge.set(2)
self.charge_entry = Entry(self.vortex_frame, textvariable=self.charge, width=10)
self.charge_entry.bind("<Return>", self.charge_callback)
self.charge_entry.grid(column=3, row=1)
# coordinates entry
coordinates_lab = ttk.Label(self.vortex_frame, text='Coordinates')
coordinates_lab.grid(column=0, row=1)
self.vortex_coordinates = StringVar()
self.vortex_coordinates.set('%i, %i' % (int(self.SLM.width/2), int(self.SLM.height/2)))
self.vortex_coordinates_entry = Entry(self.vortex_frame, textvariable=self.vortex_coordinates, width=10)
self.vortex_coordinates_entry.grid(column=1, row=1)
# label indicating gray values
gray_lab = ttk.Label(self.vortex_frame, text='Gray values')
gray_lab.grid(column=1, row=3, columnspan=2)
# gray value for the 0 pi phase
gray0_lab = ttk.Label(self.vortex_frame, text='0:', width=10)
gray0_lab.grid(column=0, row=4)
self.gray0 = IntVar()
self.gray0.set(0)
self.gray0_entry = Entry(self.vortex_frame, textvariable=self.gray0, width=10)
self.gray0_entry.grid(column=1, row=4)
# gray value for 2pi phase
gray2pi_lab = ttk.Label(self.vortex_frame, text='2pi:', width=10)
gray2pi_lab.grid(column=2, row=4)
self.gray2pi = IntVar()
self.gray2pi.set(0)
self.gray2pi_entry = Entry(self.vortex_frame, textvariable=self.gray2pi, width=10)
self.gray2pi_entry.grid(column=3, row=4)
# button to change gray values of vortex on the fly
self.gray_vortex_button = ttk.Button(self.vortex_frame, text='Change', command=self.vortex_change_grayvalues)
self.gray_vortex_button.grid(column=4, row=4)
# ============================================================================================================
# ZERNIKE WAVEFRONT SENSING
# ============================================================================================================
create_rotating_button = ttk.Button(self.rotate_frame, text='Create',
command=lambda: self.make_map('rotate'))
self.rotate_button = ttk.Button(self.rotate_frame, text='Rotate', command=self.rotate_callback, state=DISABLED)
self.rotating_var = StringVar()
self.rotating_var.set('0-pi')
self.rotating_label = ttk.Label(self.rotate_frame, textvariable=self.rotating_var, state=DISABLED)
self.rotating_list = ['0', 'pi/2', 'pi', '3pi/2']
self.whichZernike = 0
self.rotateZernike_dict = {}
lD_label = Label(self.rotate_frame, text="l/D", width=10)
self.lD_var = IntVar()
self.lD_var.set(10)
l_over_D_entry = ttk.Entry(self.rotate_frame, textvariable=self.lD_var, width=10)
create_rotating_button.grid(column=0, row=0)
self.rotate_button.grid(column=1, row=0)
self.rotating_label.grid(column=2, row=0)
lD_label.grid(column=1, row=1)
l_over_D_entry.grid(column=2, row=1)
# ======================================================================================================
self.multiple_star_position = []
# =============================================================================================================
# Text frame
# ========================================================================================================
self.text_frame = ttk.Frame(self.master)
scrollbar = Scrollbar(self.text_frame)
scrollbar.grid(column=4, row=0)
self.text = Text(self.text_frame, height=5, width=40, wrap='word', yscrollcommand=scrollbar.set)
self.text.insert(INSERT, "Initializing SLM..\n")
self.text.grid(column=0, row=0, columnspan=4)
sys.stdout = StdoutRedirector(self.text) # assign stdout to custom class
def rotating_mask(self):
"""
Create map with 1s in the center circle and 0 else
:return:
"""
if self.active:
self.image = np.zeros(self.SLM.dimensions, dtype=np.uint8)
self.active = False
self.activation_button.config(bg='firebrick2')
self.active_var.set('OFF')
m = np.zeros(self.SLM.size)
if self.lD_var.get() < 0:
return
m[np.where(self.R.T <= self.lD_var.get())] = 1
v0 = int(self.menu.rad_2_gray(0))
v1 = int(self.menu.rad_2_gray(0.5))
v2 = int(self.menu.rad_2_gray(1))
v3 = int(self.menu.rad_2_gray(1.5))
print(v0, v1, v2, v3)
# 0 - pi
p0 = np.zeros(self.SLM.size)
phase_map0 = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map0[:, :, 0] = p0
phase_map0[:, :, 1] = p0
phase_map0[:, :, 2] = p0
self.rotateZernike_dict[self.rotating_list[0]] = phase_map0
# pi/2 - 3pi/2 FQ phase mask
p1 = np.zeros(self.SLM.size, dtype=np.uint8)
p1[np.where(m==1)] = v1
phase_map1 = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map1[:, :, 0] = p1
phase_map1[:, :, 1] = p1
phase_map1[:, :, 2] = p1
self.rotateZernike_dict[self.rotating_list[1]] = phase_map1
# pi-0 FQ phase mask
p2 = np.zeros(self.SLM.size, dtype=np.uint8)
p2[np.where(m == 1)] = v2
phase_map2 = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map2[:, :, 0] = p2
phase_map2[:, :, 1] = p2
phase_map2[:, :, 2] = p2
self.rotateZernike_dict[self.rotating_list[2]] = phase_map2
# 3pi/2 - pi/2 FQ phase mask
p3 = np.zeros(self.SLM.size, dtype=np.uint8)
p3[np.where(m == 1)] = v3
phase_map3 = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map3[:, :, 0] = p3
phase_map3[:, :, 1] = p3
phase_map3[:, :, 2] = p3
self.rotateZernike_dict[self.rotating_list[3]] = phase_map3
self.rotate_button.config(state=NORMAL)
self.rotating_label.config(state=NORMAL)
return
def rotate_callback(self):
"""
Rotate through masks in rotateFQ_dict which will result in rotating FQ mask with
different pi values
:return:
"""
# make activate button green to indicate that mask is alive
self.active = True
self.activation_button.config(bg='PaleGreen2')
self.active_var.set('ON')
# get mask and apply it
which = self.rotating_list[self.whichZernike]
m = self.rotateZernike_dict[which]
self.image = m
self.SLM.draw(m)
self.plot_update()
self.rotating_var.set(which)
# go to next mask in line
self.whichZernike = (self.whichZernike + 1) % 4
def calculate_charge_gray(self, p):
"""
Calculates a vortex phase mask from a map that gives the geometry. Also used every time one changes charge
:param p: complex matrix in which implements the vortex
:return:
"""
if not(0 <= self.gray0.get() <= 255 and 0 <= self.gray2pi.get() <= 255):
print('invalid values')
return
if self.charge.get() % 2 != 0:
print("Odd charge -> change to closest even number")
self.charge.set(self.charge.get() + 1)
#if 'vortex' not in self.SLM.maps.keys():
# return
# z = p^n, apply charge
z = p**self.charge.get()
# transform to radians
z = (np.angle(z) + np.pi)/(np.pi)
# map radians to gray values
z = self.map_to_interval(abs(z))
self.gray0.set(str(np.min(z)))
self.gray2pi.set(str(np.max(z)))
#z = z*abs(self.gray2pi.get() - self.gray0.get()) + self.gray0.get()
# create phase mask for SLM
phase_map = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map[:, :, 0] = z
phase_map[:, :, 1] = z
phase_map[:, :, 2] = z
return phase_map
def map_to_interval(self, p):
"""
Takes vortex with values 0-2.0 and maps them to an interval in radians
determined by the higher value of gray2pi"""
# val2pi = self.gray_to_rad(self.gray2pi.get())
# val0 = val2pi - 2.0
# if val0 < 0 :
# val0 = 0
# val2pi = 2.0
# p += val0
return self.rad_to_gray(p)
def charge_callback(self, event):
"""
Callback when charge of vortex phase mask is changed
:return:
"""
p = self.SLM.maps[self.maps_var.get()]['map']
phase_map = self.calculate_charge_gray(p)
self.image = phase_map
print('Changed charge to %i' % self.charge.get())
if self.active:
self.SLM.draw(self.image)
self.plot_update()
return
def make_vortex_callback(self):
"""
Create single vortex mask at center denoted by star center
:return:
"""
try:
c = self.vortex_coordinates.get().split(sep=',')
xc = int(c[0])
yc = int(c[1])
except ValueError:
print('Error with coordinates')
return
print('Calculating vortex with charge %i, gray %i-%i, coord %i,%i' %
(self.charge.get(), self.gray0.get(), self.gray2pi.get(), xc, yc))
p = self.SLM.Vortex_coronagraph(xc, yc)
phase_map = self.calculate_charge_gray(p)
name = "Vortex_coord:%i,%i" % (xc, yc)
print('Finished, map-name %s' % name)
# in 'data' the phase mask ready to apply to the SLM is stored
self.SLM.maps[name] = {'data': phase_map}
# in 'map' the complex map that creates the vortex is stored
self.SLM.maps[name]['map'] = p
# vortex map in gray values but with 1 depth dim
self.SLM.maps[name]['vortex_map'] = phase_map[:,:,0]
self.SLM.maps[name]['center'] = [[xc, yc]]
self.SLM.maps[name]['type'] = 'vortex'
self.maps.append(name)
self.refresh_optionmenu(self.maps_options, self.maps_var, self.maps)
self.image = phase_map
self.plot_update()
# save map to bitmap if option is checked
if self.checkbox_val.get():
filename = filedialog.asksaveasfilename(parent=self.master, filetypes=self.save_filetypes,
title='Save map as..')
filename += '.bmp'
surf = pygame.surfarray.make_surface(phase_map)
pygame.image.save(surf, filename)
return
def vortex_change_grayvalues(self):
"""
Changes the values of the vortex by scaling them with new_range
:return:
"""
p = self.SLM.maps[self.maps_var.get()]['map']
phase_map = self.calculate_charge_gray(p)
self.image = phase_map
print('Changed gray value range to %i-%i' % (self.gray0.get(), self.gray2pi.get()))
if self.active:
self.SLM.draw(self.image)
self.plot_update()
return
def l_over_D_callback(self, event):
"""
Transforms l/D and azimuthial information to pixel coordinates with respect to the first star
x = n*l/D*cos(phi*pi)
y = n*l/D*sin(phi*pi)
:param which: which star
:return:
"""
if event.widget == self.center2_lab:
x = int(float(self.lD_star2.get())*int(self.menu.lD.get())*np.cos(float(self.phi_star2.get())*np.pi-np.pi))
y = int(float(self.lD_star2.get())*int(self.menu.lD.get())*np.sin(float(self.phi_star2.get())*np.pi-np.pi))
self.multiple_star_position[0] = [float(self.lD_star2.get()), float(self.phi_star2.get())]
x += self.center_position[0][0]
y += self.center_position[0][1]
self.center_position[1] = [x, y]
self.multiple_star_position[0] = [x, y]
self.starc2.set('%i,%i' % (x, y))
elif event.widget == self.center3_lab:
x = int(float(self.lD_star3.get())*int(self.menu.lD.get())*np.cos(float(self.phi_star3.get())*np.pi-np.pi))
y = int(float(self.lD_star3.get())*int(self.menu.lD.get())*np.sin(float(self.phi_star3.get())*np.pi-np.pi))
self.multiple_star_position[1] = [float(self.lD_star3.get()), float(self.phi_star3.get())]
x += self.center_position[0][0]
y += self.center_position[0][1]
self.center_position[2] = [x, y]
self.multiple_star_position[1] = [x, y]
self.starc3.set('%i,%i' % (x, y))
else:
pass
return
def magnitude_to_intensity(self, event):
"""
Transform magnitude difference between star and primary star into intensity difference
:param which: which star
:return:
"""
if event.widget == self.M2_entry:
m = float(self.M2.get())
self.I2_num.set(str(10**(-m/2.5)))
elif event.widget == self.M3_entry:
m = float(self.M3.get())
self.I2_num.set(str(10**(-m/2.5)))
elif event.widget == self.I2_entry:
I = float(self.I2_num.get())
self.M2.set(-2.5*np.log10(I))
elif event.widget == self.I3_entry:
I = float(self.I3_num.get())
self.M3.set(-2.5*np.log10(I))
else:
pass
return
def clear_maps(self):
"""
Clears list of maps
:return:
"""
self.maps = []
self.refresh_optionmenu(self.maps_options, self.maps_var, self.maps)
return
def make_map(self, which):
"""
Make thread that starts to calculate phase map
:param which:
:return:
"""
if which == 'single':
self.map_thread = threading.Thread(target=self.single_mask, daemon=True)
self.map_thread.start()
elif which == 'binary':
self.map_thread = threading.Thread(target=self.binary_mask, daemon=True)
self.map_thread.start()
elif which == 'triple':
self.map_thread = threading.Thread(target=self.triple_mask, daemon=True)
self.map_thread.start()
elif which == 'vortex':
self.map_thread = threading.Thread(target=self.make_vortex_callback, daemon=True)
self.map_thread.start()
elif which == 'rotate':
self.map_thread = threading.Thread(target=self.rotating_mask, daemon=True)
self.map_thread.start()
else:
pass
print('Thread started')
return
def refresh_optionmenu(self, menu, var, options):
"""
Refreshes option menu
:param menu: handle to optionmenu widget
:param var: handle to variable of menu
:param options: options to insert
:return:
"""
var.set('')
menu['menu'].delete(0, 'end')
if len(options) == 0:
menu['menu'].add_command(label='', command=_setit(var, ''))
return
for option in options:
menu['menu'].add_command(label=option, command=_setit(var, option))
var.set(options[-1])
return
def add_center(self):
"""
Add new center which represents a new star or other object on the phase map
Gets center coordinates from right click mouse position on figure.
:return:
"""
if len(self.center_num) > 2: # up to a total of 3 objects can be defined
return
num = int(self.center_num[-1]) + 1
self.center_num.append(str(num))
self.center_position.append([0, 0])
self.multiple_star_position.append([1, 0]) # [l/D, phi]
self.refresh_optionmenu(self.centers_options, self.center_var, self.center_num)
if num == 2:
self.M2_entry.config(state=NORMAL)
self.I2_entry.config(state=NORMAL)
self.l2_entry.config(state=NORMAL)
self.F2_entry.config(state=NORMAL)
self.lD_star2_entry.config(state=NORMAL)
self.phi_star2_entry.config(state=NORMAL)
self.star_2.config(state=NORMAL)
self.binary_button.config(state=NORMAL)
self.center2_lab.config(state=NORMAL)
self.multiple_star_position.append([0, 0])
else:
self.M3_entry.config(state=NORMAL)
self.I3_entry.config(state=NORMAL)
self.l3_entry.config(state=NORMAL)
self.F3_entry.config(state=NORMAL)
self.lD_star3_entry.config(state=NORMAL)
self.phi_star3_entry.config(state=NORMAL)
self.tertiary_button.config(state=NORMAL)
self.star_3.config(state=NORMAL)
self.center3_lab.config(state=NORMAL)
self.multiple_star_position.append([0, 0])
return
def arrow_return(self, event):
"""
Changes grayvalue with arrow key hits
:param event:
:return:
"""
which = event.widget
what = event.keycode
if what == 37 or what == 40:
what = -1
elif what == 38 or what == 39:
what = 1
elif what == 13:
what = 0
else:
return
if which == self.gray_1_entry:
try:
val_old = self.val_1
val = int(self.gray_1_val.get())
val += what
if val > 255:
val = 255
if val < 0:
val = 0
self.gray_1_val.set(str(val))
self.phase_1_val.set('Phase: %.3f rad'%self.menu.phase_curve[val])
self.val_1 = val
self.set_value(val_old, val)
except ValueError:
return
elif which == self.gray_2_entry:
try:
val_old = self.val_2
val = int(self.gray_2_val.get())
val += what
if val > 255:
val = 255
if val < 0:
val = 0
self.gray_2_val.set(str(val))
self.phase_2_val.set('Phase: %.3f rad'%self.menu.phase_curve[val])
self.val_2 = val
self.set_value(val_old, val)
except ValueError:
return
else:
return
def set_value(self, val_old, val):
"""
Find all pixels with value val and replace them with the new one
:param val_old: old value to replace
:param val: value to replace with
:return:
"""
self.image[self.image == val_old] = val
if self.active:
self.SLM.draw(self.image)
self.plot_update()
return
def activate(self):
"""
Activate and deactivate SLM
:return:
"""
if self.active:
self.active = False
self.activation_button.config(bg='firebrick2')
self.active_var.set('OFF')
self.send_map('OFF')
else:
self.active = True
self.activation_button.config(bg='PaleGreen2')
self.active_var.set('ON')
self.send_map('ON')
return
def get_grayvals(self, image):
"""
Get the values from the phase mask
:param image: applied mask
:return:
"""
vals = np.unique(image)
self.val_1 = vals.min()
self.val_2 = vals.max()
self.gray_1_val.set(str(self.val_1))
self.gray_2_val.set(str(self.val_2))
return
def send_map(self, status):
"""
Send map to SLM
:param status: Phase map for ON and zero map for OFF
:return:
"""
if status == 'ON':
map_array = self.SLM.maps[self.maps_var.get()]['data'] # should get the matrix of the chosen map
self.get_grayvals(map_array)
self.image = map_array
self.SLM.draw(map_array)
self.plot_update()
elif status == 'OFF':
try:
self.SLM.maps[self.maps_var.get()]['data'] = self.image # save current state of map to dictionary
except KeyError:
pass
self.image = self.off_image
self.SLM.draw(self.off_image)
self.plot_update()
def plot_update(self):
self.im.set_data(self.image[:, :, 0].T)
self.fig.canvas.draw()
return
def import_map_callback(self):
"""
Import new map from file. Accepted extensions are bmp, txt, fits
:return:
"""
mfile = filedialog.askopenfilename()
try:
mname, flag = self.SLM.import_phase_map(mfile)
mname = os.path.basename(mname)
if flag:
p = self.SLM.maps[mname]['map']
p = self.rad_to_gray(p)
m = np.zeros(self.SLM.dimensions, dtype=np.uint8)
m[:, :, 0] = p
m[:, :, 1] = p
m[:, :, 2] = p
self.SLM.maps[mname]['data'] = m
#self.SLM.maps[mname] = {'data': m}
self.SLM.maps[mname]['type'] = 'custom'
self.maps.append(mname)
self.refresh_optionmenu(self.maps_options, self.maps_var, self.maps)
except Exception as e:
print(e)
return
return
def click_callback(self, event):
_x = event.x
_y = event.y
inv = self.ax.transData.inverted()
data_pos = inv.transform((_x, _y))
data_pos = tuple([int(e) for e in data_pos])
if event.button == 1:
self.center_move('mouse', data_pos)
elif event.button == 3:
self.mouse_coordinates = data_pos
else:
pass
return
def center_move(self, d, pos):
"""
Move center of phase map
:param d: direction to move
:return:
"""
which = int(self.center_var.get())-1 # which center is currently active
if d == 'up':
self.center_position[which][1] -= self.center_step
self.image = np.roll(self.image, shift=-self.center_step, axis=1)
elif d == 'down':
self.center_position[which][1] += self.center_step
self.image = np.roll(self.image, shift=self.center_step, axis=1)
elif d == 'left':
self.center_position[which][0] -= self.center_step
self.image = np.roll(self.image, shift=-self.center_step, axis=0)
elif d == 'right':
self.center_position[which][0] += self.center_step
self.image = np.roll(self.image, shift=self.center_step, axis=0)
else:
self.center_position[which] = list(pos)
# update plot and SLM
if self.active:
self.SLM.draw(self.image)
self.plot_update()
# update label showing center
s = '%i,%i'%(self.center_position[which][0], self.center_position[which][1])
self.center_labels[which].set(s)
return
def set_center_step(self, event):
"""
Callback for setting the center move step size
"""
try:
val = int(self.cstep_var.get())
if val > 384:
raise ValueError
self.center_step = val
except ValueError:
self.cstep_var.set(str(self.center_step))
return
def binary_mask(self):
"""
Create binary mask
:return:
"""
c1 = self.starc1.get().split(sep=',')
c1 = (int(c1[0]), int(c1[1]))
c2 = self.starc2.get().split(sep=',')
c2 = (int(c2[0]), int(c2[1]))
try:
I1, l1, F1 = float(self.I1_num.get()), float(self.l1_num.get()), float(self.F1_num.get())
I2, l2, F2 = float(self.I2_num.get()), float(self.l2_num.get()), float(self.F2_num.get())
val1 = self.menu.phase_curve[int(self.gray_1_val.get())]
val2 = self.menu.phase_curve[int(self.gray_2_val.get())]
print('Binary map with values :%f, %f'%(val1, val2))
except ValueError:
print('ValueError')
return
self.f = lambda x, y: self.SLM.pixel_value(x, y, c1, c2, I1, I2, val1, val2, F1, F2, l1, l2,
mask=self.map_type_var.get())
print('Calculating binary %s' % self.map_type_var.get())
p = np.zeros(np.SLM.size)
print('Running binary weight-values calculation..')
for (x, y), val in np.ndenumerate(p):
p[x, y] = self.f(x, y)
try:
print("Running rad to gray conversion..")
p = self.rad_to_gray(p) # convert rad values to the corresponding gray values
except Exception as e:
print(e)
return
phase_map = np.zeros(self.SLM.dimensions, dtype=np.uint8) # dimensions are (width,height, 3)
phase_map[:, :, 0] = p
phase_map[:, :, 1] = p
phase_map[:, :, 2] = p
name = self.new_map_name.get()
self.SLM.maps[name] = {'data': phase_map}
self.SLM.maps[name]['center'] = [[c1[0], c1[1]], [c2[0], c2[1]]]
self.SLM.maps[name]['star info'] = [[I1, l1, F1], [I2, l2, F2]]
self.maps.append(name)
self.refresh_optionmenu(self.maps_options, self.maps_var, self.maps)
self.image = phase_map
self.plot_update()
# save map to bitmap if option is checked
if self.checkbox_val.get():
self.menu.save_fits(name=name)
print('File saved')
return
def triple_mask(self):
"""
Create binary mask
:return:
"""
c1 = self.starc1.get().split(sep=',')
c1 = (int(c1[0]), int(c1[1]))
c2 = self.starc2.get().split(sep=',')
c2 = (int(c2[0]), int(c2[1]))
c3 = self.starc3.get().split(sep=',')
c3 = (int(c3[0]), int(c3[1]))
try:
I1, l1, F1 = int(self.I1_num.get()), int(self.l1_num.get()), int(self.F1_num.get())
I2, l2, F2 = int(self.I2_num.get()), int(self.l2_num.get()), int(self.F2_num.get())
I3, l3, F3 = int(self.I3_num.get()), int(self.l3_num.get()), int(self.F3_num.get())
val1 = int(self.gray_1_val.get())
val2 = int(self.gray_2_val.get())
except ValueError:
print('Error')
return
#self.f = lambda x, y: self.SLM.pixel_value_triple(x, y, c1, c2, I1, I2, val1, val2, F1, F2, l1, l2)
p = np.zeros(self.SLM.size, dtype=np.uint8)
for (x, y), val in np.ndenumerate(p):
p[x, y] = self.f(x, y)
phase_map = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map[:, :, 0] = p
phase_map[:, :, 1] = p
phase_map[:, :, 2] = p
name = self.new_map_name.get()
self.SLM.maps[name] = {'data': phase_map}
self.SLM.maps[name]['center'] = [[c1[0], c1[1]], [c2[0], c2[1]], [c3[0], c3[1]]]
self.SLM.maps[name]['star info'] = [[I1, l1, F1], [I2, l2, F2], [I3, l3, F3]]
self.maps.append(name)
self.refresh_optionmenu(self.maps_options, self.maps_var, self.maps)
self.image = phase_map
self.plot_update()
# save map to bitmap if option is checked
if self.checkbox_val.get():
self.menu.save_fits(name=name)
print('File saved')
return
def single_mask(self):
"""
Create single star mask at center denoted by star center
:return:
"""
try:
which = int(self.center_var.get())-1
c = self.center_labels[which].get().split(sep=',')
I1, l1, F1 = int(self.I1_num.get()), int(self.l1_num.get()), int(self.F1_num.get())
xc = int(c[0])
yc = int(c[1])
val1 = int(self.gray_1_val.get())
val2 = int(self.gray_2_val.get())
except ValueError:
print('Error')
return
p = np.zeros(self.SLM.size)
p_raw = np.zeros(self.SLM.size)
print('Calculating %s with gray values %i, %i at coord %i,%i' %
(self.map_type_var.get(), val1, val2, xc, yc))
self.zernike_gray1_old = val1
self.zernike_gray2_old = val2
# self.zernike_gray1.set(val1)
# self.zernike_gray2.set(val2)
if self.map_type_var.get() == 'FQPM':
p[xc:, yc:] = val2
p[:xc, :yc] = val2
p[:xc, yc:] = val1
p[xc:, :yc] = val1
# p_raw[xc:, yc:] = float(val2/255)*2*np.pi
# p_raw[:xc, :yc] = float(val2/255)*2*np.pi
# p_raw[:xc, yc:] = float(val1/255)*2*np.pi
# p_raw[xc:, :yc] = float(val1/255)*2*np.pi
elif self.map_type_var.get() == 'FLAT':
pass
elif self.map_type_var.get() == 'EOPM':
for (x, y), v in np.ndenumerate(p):
p[x, y] = self.SLM.eight_octants(x, y, (xc, yc), val1, val2)
p_raw = p
else:
# would be nice to have some feedback in the GUI at some point
return
phase_map = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map[:, :, 0] = p
phase_map[:, :, 1] = p
phase_map[:, :, 2] = p
name = self.new_map_name.get()
self.SLM.maps[name] = {'data': phase_map}
self.SLM.maps[name]['center'] = [[xc, yc]]
self.SLM.maps[name]['star info'] = [I1, l1, F1]
self.SLM.maps[name]['map'] = p
self.SLM.maps[name]['type'] = self.map_type_var.get()
self.maps.append(name)
self.refresh_optionmenu(self.maps_options, self.maps_var, self.maps)
print('Finished, mask-name: %s' % name)
self.image = phase_map
self.plot_update()
# save map to bitmap if option is checked
if self.checkbox_val.get():
filename = filedialog.asksaveasfilename(parent=self.master, filetypes=self.save_filetypes,
title='Save map as..')
filename += '.bmp'
surf = pygame.surfarray.make_surface(phase_map)
pygame.image.save(surf, filename)
print('File saved')
return
def zernike_send(self):
"""
Changes the values of the vortex by scaling them with new_range
:return:
"""
p = self.SLM.maps[self.maps_var.get()]['data']
self.image = p
if self.active:
self.SLM.draw(self.image)
self.plot_update()
return
def apply_zernike(self):
"""
Apply zernike correction. R is scaled to the smaller of the 2 dimensions of the SLM
:return:
"""
zernike = self.defocus_coeff.get()*self.Defocus(self.R/1080) + \
self.astigm_coeff.get()*self.Astigm(self.R/1080, self.Theta) + \
self.secastigm_coeff.get()*self.VertAstigm(self.R/1080, self.Theta)+ \
self.xgrad_coeff.get()*self.XGrad(self.xx/1080) + \
self.ygrad_coeff.get()*self.YGrad(self.yy/540) + \
self.tip_coeff.get()*self.Tip(self.R/1080, self.Theta)+ \
self.tilt_coeff.get()*self.Tilt(self.R/1080, self.Theta)
magnitude = (self.zernike_max.get()-self.zernike_min.get())
p = self.SLM.maps[self.maps_var.get()]['map']
if self.SLM.maps[self.maps_var.get()]['type'] == 'vortex':
p = self.SLM.maps[self.maps_var.get()]['map']
p = p*np.exp(1j*zernike.T*magnitude)
phase_map = self.calculate_charge_gray(p)
self.SLM.maps[self.maps_var.get()]['data'] = phase_map
self.zernike_send()
return
elif self.SLM.maps[self.maps_var.get()]['type'] == 'custom':
p = (self.SLM.maps[self.maps_var.get()]['map'] - 1)*np.pi
p = np.exp(1j*p) * np.exp(1j * zernike.T * magnitude)
z = (np.angle(p) + np.pi) / (np.pi)
# map radians to gray values
z = self.map_to_interval(abs(z))
# z = z*abs(self.gray2pi.get() - self.gray0.get()) + self.gray0.get()
# create phase mask for SLM
phase_map = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map[:, :, 0] = z
phase_map[:, :, 1] = z
phase_map[:, :, 2] = z
self.SLM.maps[self.maps_var.get()]['data'] = phase_map
self.zernike_send()
return
# val1 = self.zernike_gray1.get()
# val2 = self.zernike_gray2.get()
#
# if (val1 != self.zernike_gray1_old) or (val2 != self.zernike_gray2_old):
# p[p==self.zernike_gray1_old] = val1
# p[p==self.zernike_gray2_old] = val2
# self.zernike_gray1_old = val1
# self.zernike_gray2_old = val2
#p = np.angle(np.exp(1j*p)) +np.pi
calib = np.angle(np.exp(1j*zernike.T*magnitude))
calib = calib*127/np.pi
#m = (np.angle(np.exp(1j*p)/np.exp(1j*calib)) + np.pi)/(2*np.pi)
m = np.array(p - calib, dtype=np.uint8)
phase_map = np.zeros(self.SLM.dimensions, dtype=np.uint8)
phase_map[:, :, 0] = m
phase_map[:, :, 1] = m
phase_map[:, :, 2] = m
self.SLM.maps[self.maps_var.get()]['map'] = p
self.SLM.maps[self.maps_var.get()]['data'] = phase_map
self.zernike_send()
return
def rad_to_gray(self, p):
"""
Transforms the map p which contains values in radians to the nearest fitting gray values
:param p: map in radians
:return: transformed map
"""
# first find the unique values of p
p = np.round(self.menu.rad_2_gray(p))
p[p < 0] = 0 # correct possible negative values of fit
p[p > 255] = 255 # if value gets over 255
return p.astype(np.uint8)
def gray_to_rad(self, p):
"""
Takes gray values from 0-255 and returns the corresponding value in radians
"""
if not(0<=p<=255):
return
return self.menu.phase_curve[int(p)]
if __name__ == '__main__':
def on_closing(master):
master.quit()
master.destroy()
return
root = Tk()
window = SLMViewer(root)
window.data_plot.get_tk_widget().grid(column=0, row=0, columnspan=4, rowspan=5)
window.import_maps_frame.grid(column=4, row=5)
window.text_frame.grid(column=4, row=3)
window.control_frame.grid(column=4, row=2)
#window.grayval_frame.grid(column=4, row=2)
window.active_frame.grid(column=4, row=0)
#window.stars_frame.grid(column=4, row=3)
#window.binary_frame.grid(column=4, row=4)
#window.rotate_frame.grid(column=0, row=5)
window.notebook.grid(column=4, row=1)
root.protocol("WM_DELETE_WINDOW", lambda: on_closing(root)) # make sure window close properly
root.mainloop()
| [
"numpy.log10",
"numpy.sqrt",
"tkinter.ttk.Button",
"numpy.array",
"numpy.arctan2",
"tkinter.ttk.LabelFrame",
"numpy.poly1d",
"tkinter.ttk.Separator",
"numpy.sin",
"pygame.surfarray.make_surface",
"tkinter._setit",
"numpy.arange",
"matplotlib.pyplot.imshow",
"tkinter.ttk.Entry",
"numpy.wh... | [((280, 303), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (294, 303), False, 'import matplotlib\n'), ((648, 672), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (655, 672), True, 'import numpy as np\n'), ((680, 696), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (690, 696), True, 'import numpy as np\n'), ((1909, 1920), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1918, 1920), False, 'import pygame, os, time, pickle\n'), ((4343, 4355), 'numpy.poly1d', 'np.poly1d', (['p'], {}), '(p)\n', (4352, 4355), True, 'import numpy as np\n'), ((4999, 5020), 'tkinter.ttk.Frame', 'ttk.Frame', (['toplevel_r'], {}), '(toplevel_r)\n', (5008, 5020), False, 'from tkinter import ttk\n'), ((6163, 6203), 'tkinter.ttk.Separator', 'ttk.Separator', (['toplevel'], {'orient': 'VERTICAL'}), '(toplevel, orient=VERTICAL)\n', (6176, 6203), False, 'from tkinter import ttk\n'), ((6226, 6288), 'tkinter.ttk.Button', 'ttk.Button', (['toplevel'], {'text': '"""Set"""', 'command': 'self.apply_star_info'}), "(toplevel, text='Set', command=self.apply_star_info)\n", (6236, 6288), False, 'from tkinter import ttk\n'), ((7201, 7222), 'tkinter.ttk.Frame', 'ttk.Frame', (['toplevel_r'], {}), '(toplevel_r)\n', (7210, 7222), False, 'from tkinter import ttk\n'), ((7324, 7352), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (7336, 7352), True, 'import matplotlib.pyplot as plt\n'), ((7576, 7627), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['self.curve_plot'], {'master': 'toplevel'}), '(self.curve_plot, master=toplevel)\n', (7593, 7627), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((7685, 7762), 'tkinter.ttk.Button', 'ttk.Button', (['toplevel'], {'text': '"""Import curve"""', 'command': 'self.import_curve_callback'}), "(toplevel, text='Import curve', command=self.import_curve_callback)\n", (7695, 7762), False, 'from tkinter import ttk\n'), ((8160, 8220), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'title': '"""Select phase curve(.npy)"""'}), "(title='Select phase curve(.npy)')\n", (8186, 8220), False, 'from tkinter import filedialog\n'), ((8238, 8251), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (8245, 8251), True, 'import numpy as np\n'), ((8398, 8408), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (8406, 8408), True, 'import matplotlib.pyplot as plt\n'), ((8685, 8778), 'tkinter.filedialog.asksaveasfilename', 'filedialog.asksaveasfilename', ([], {'master': 'self.master', 'title': '"""Save as.."""', 'initialdir': 'self.path'}), "(master=self.master, title='Save as..',\n initialdir=self.path)\n", (8713, 8778), False, 'from tkinter import filedialog\n'), ((8842, 8863), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (8857, 8863), False, 'import pygame, os, time, pickle\n'), ((9221, 9238), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (9236, 9238), False, 'from astropy.io import fits\n'), ((9657, 9682), 'time.strftime', 'time.strftime', (['"""%d/%m/%Y"""'], {}), "('%d/%m/%Y')\n", (9670, 9682), False, 'import pygame, os, time, pickle\n'), ((10688, 10717), 'numpy.zeros', 'np.zeros', (['self.SLM.dimensions'], {}), '(self.SLM.dimensions)\n', (10696, 10717), True, 'import numpy as np\n'), ((10784, 10798), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10796, 10798), True, 'import matplotlib.pyplot as plt\n'), ((10820, 10847), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(0)', 'vmax': '(255)'}), '(vmin=0, vmax=255)\n', (10829, 10847), False, 'from matplotlib.colors import Normalize\n'), ((10896, 10961), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.image[:, :, 0].T'], {'cmap': 'self.cmap', 'norm': 'self.norm'}), '(self.image[:, :, 0].T, cmap=self.cmap, norm=self.norm)\n', (10906, 10961), True, 'import matplotlib.pyplot as plt\n'), ((11130, 11177), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['self.fig'], {'master': 'self.master'}), '(self.fig, master=self.master)\n', (11147, 11177), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((11615, 11661), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (['self.master'], {'text': '"""Phase maps"""'}), "(self.master, text='Phase maps')\n", (11629, 11661), False, 'from tkinter import ttk\n'), ((11696, 11788), 'tkinter.ttk.Button', 'ttk.Button', (['self.import_maps_frame'], {'text': '"""Import map"""', 'command': 'self.import_map_callback'}), "(self.import_maps_frame, text='Import map', command=self.\n import_map_callback)\n", (11706, 11788), False, 'from tkinter import ttk\n'), ((11863, 11936), 'tkinter.ttk.Button', 'ttk.Button', (['self.import_maps_frame'], {'text': '"""Clear"""', 'command': 'self.clear_maps'}), "(self.import_maps_frame, text='Clear', command=self.clear_maps)\n", (11873, 11936), False, 'from tkinter import ttk\n'), ((13844, 13869), 'tkinter.ttk.Notebook', 'ttk.Notebook', (['self.master'], {}), '(self.master)\n', (13856, 13869), False, 'from tkinter import ttk\n'), ((14748, 14797), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (['self.multiple_frame'], {'text': '"""Stars"""'}), "(self.multiple_frame, text='Stars')\n", (14762, 14797), False, 'from tkinter import ttk\n'), ((15164, 15219), 'tkinter.ttk.Label', 'ttk.Label', (['self.stars_frame'], {'text': '"""Intensity"""', 'width': '(10)'}), "(self.stars_frame, text='Intensity', width=10)\n", (15173, 15219), False, 'from tkinter import ttk\n'), ((15240, 15295), 'tkinter.ttk.Label', 'ttk.Label', (['self.stars_frame'], {'text': '"""Magnitude"""', 'width': '(10)'}), "(self.stars_frame, text='Magnitude', width=10)\n", (15249, 15295), False, 'from tkinter import ttk\n'), ((15313, 15373), 'tkinter.ttk.Label', 'ttk.Label', (['self.stars_frame'], {'text': '"""Wavelength(nm)"""', 'width': '(10)'}), "(self.stars_frame, text='Wavelength(nm)', width=10)\n", (15322, 15373), False, 'from tkinter import ttk\n'), ((15391, 15440), 'tkinter.ttk.Label', 'ttk.Label', (['self.stars_frame'], {'text': '"""F #"""', 'width': '(10)'}), "(self.stars_frame, text='F #', width=10)\n", (15400, 15440), False, 'from tkinter import ttk\n'), ((15459, 15508), 'tkinter.ttk.Label', 'ttk.Label', (['self.stars_frame'], {'text': '"""l/D"""', 'width': '(10)'}), "(self.stars_frame, text='l/D', width=10)\n", (15468, 15508), False, 'from tkinter import ttk\n'), ((15527, 15580), 'tkinter.ttk.Label', 'ttk.Label', (['self.stars_frame'], {'text': '"""phi(pi)"""', 'width': '(10)'}), "(self.stars_frame, text='phi(pi)', width=10)\n", (15536, 15580), False, 'from tkinter import ttk\n'), ((15598, 15655), 'tkinter.ttk.Label', 'ttk.Label', (['self.stars_frame'], {'text': '"""Center(x,y)"""', 'width': '(10)'}), "(self.stars_frame, text='Center(x,y)', width=10)\n", (15607, 15655), False, 'from tkinter import ttk\n'), ((16040, 16099), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.stars_frame'], {'textvariable': 'self.M1', 'width': '(10)'}), '(self.stars_frame, textvariable=self.M1, width=10)\n', (16049, 16099), False, 'from tkinter import ttk\n'), ((16230, 16293), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.stars_frame'], {'textvariable': 'self.I1_num', 'width': '(10)'}), '(self.stars_frame, textvariable=self.I1_num, width=10)\n', (16239, 16293), False, 'from tkinter import ttk\n'), ((16431, 16494), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.stars_frame'], {'textvariable': 'self.l1_num', 'width': '(10)'}), '(self.stars_frame, textvariable=self.l1_num, width=10)\n', (16440, 16494), False, 'from tkinter import ttk\n'), ((16632, 16695), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.stars_frame'], {'textvariable': 'self.F1_num', 'width': '(10)'}), '(self.stars_frame, textvariable=self.F1_num, width=10)\n', (16641, 16695), False, 'from tkinter import ttk\n'), ((17096, 17171), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.stars_frame'], {'textvariable': 'self.M2', 'width': '(10)', 'state': 'DISABLED'}), '(self.stars_frame, textvariable=self.M2, width=10, state=DISABLED)\n', (17105, 17171), False, 'from tkinter import ttk\n'), ((17402, 17481), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.stars_frame'], {'textvariable': 'self.I2_num', 'width': '(10)', 'state': 'DISABLED'}), '(self.stars_frame, textvariable=self.I2_num, width=10, state=DISABLED)\n', (17411, 17481), False, 'from tkinter import ttk\n'), ((17723, 17802), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.stars_frame'], {'textvariable': 'self.l2_num', 'width': '(10)', 'state': 'DISABLED'}), '(self.stars_frame, textvariable=self.l2_num, width=10, state=DISABLED)\n', (17732, 17802), False, 'from tkinter import ttk\n'), ((17975, 18054), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.stars_frame'], {'textvariable': 'self.F2_num', 'width': '(10)', 'state': 'DISABLED'}), '(self.stars_frame, textvariable=self.F2_num, width=10, state=DISABLED)\n', (17984, 18054), False, 'from tkinter import ttk\n'), ((19093, 19168), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.stars_frame'], {'textvariable': 'self.M3', 'width': '(10)', 'state': 'DISABLED'}), '(self.stars_frame, textvariable=self.M3, width=10, state=DISABLED)\n', (19102, 19168), False, 'from tkinter import ttk\n'), ((19399, 19478), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.stars_frame'], {'textvariable': 'self.I3_num', 'width': '(10)', 'state': 'DISABLED'}), '(self.stars_frame, textvariable=self.I3_num, width=10, state=DISABLED)\n', (19408, 19478), False, 'from tkinter import ttk\n'), ((19720, 19799), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.stars_frame'], {'textvariable': 'self.l3_num', 'width': '(10)', 'state': 'DISABLED'}), '(self.stars_frame, textvariable=self.l3_num, width=10, state=DISABLED)\n', (19729, 19799), False, 'from tkinter import ttk\n'), ((19972, 20051), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.stars_frame'], {'textvariable': 'self.F3_num', 'width': '(10)', 'state': 'DISABLED'}), '(self.stars_frame, textvariable=self.F3_num, width=10, state=DISABLED)\n', (19981, 20051), False, 'from tkinter import ttk\n'), ((22133, 22184), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (['self.master'], {'text': '"""Center Controls"""'}), "(self.master, text='Center Controls')\n", (22147, 22184), False, 'from tkinter import ttk\n'), ((23407, 23458), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (['self.fqpm_frame'], {'text': '"""Gray values"""'}), "(self.fqpm_frame, text='Gray values')\n", (23421, 23458), False, 'from tkinter import ttk\n'), ((24452, 24502), 'tkinter.ttk.Label', 'ttk.Label', (['self.grayval_frame'], {'text': '"""Gray-value 1"""'}), "(self.grayval_frame, text='Gray-value 1')\n", (24461, 24502), False, 'from tkinter import ttk\n'), ((24530, 24580), 'tkinter.ttk.Label', 'ttk.Label', (['self.grayval_frame'], {'text': '"""Gray-value 2"""'}), "(self.grayval_frame, text='Gray-value 2')\n", (24539, 24580), False, 'from tkinter import ttk\n'), ((24887, 24947), 'tkinter.ttk.Label', 'ttk.Label', (['self.grayval_frame'], {'textvariable': 'self.phase_1_val'}), '(self.grayval_frame, textvariable=self.phase_1_val)\n', (24896, 24947), False, 'from tkinter import ttk\n'), ((24976, 25036), 'tkinter.ttk.Label', 'ttk.Label', (['self.grayval_frame'], {'textvariable': 'self.phase_2_val'}), '(self.grayval_frame, textvariable=self.phase_2_val)\n', (24985, 25036), False, 'from tkinter import ttk\n'), ((25717, 25763), 'tkinter.ttk.Label', 'ttk.Label', (['self.zernike_frame'], {'text': '"""Defocus:"""'}), "(self.zernike_frame, text='Defocus:')\n", (25726, 25763), False, 'from tkinter import ttk\n'), ((26091, 26148), 'tkinter.ttk.Label', 'ttk.Label', (['self.zernike_frame'], {'text': '"""Obliq. Astigmatism:"""'}), "(self.zernike_frame, text='Obliq. Astigmatism:')\n", (26100, 26148), False, 'from tkinter import ttk\n'), ((26475, 26531), 'tkinter.ttk.Label', 'ttk.Label', (['self.zernike_frame'], {'text': '"""Vert. Astigmatism:"""'}), "(self.zernike_frame, text='Vert. Astigmatism:')\n", (26484, 26531), False, 'from tkinter import ttk\n'), ((26854, 26897), 'tkinter.ttk.Label', 'ttk.Label', (['self.zernike_frame'], {'text': '"""Tilt:"""'}), "(self.zernike_frame, text='Tilt:')\n", (26863, 26897), False, 'from tkinter import ttk\n'), ((27188, 27230), 'tkinter.ttk.Label', 'ttk.Label', (['self.zernike_frame'], {'text': '"""Tip:"""'}), "(self.zernike_frame, text='Tip:')\n", (27197, 27230), False, 'from tkinter import ttk\n'), ((27532, 27581), 'tkinter.ttk.Label', 'ttk.Label', (['self.zernike_frame'], {'text': '"""X gradient:"""'}), "(self.zernike_frame, text='X gradient:')\n", (27541, 27581), False, 'from tkinter import ttk\n'), ((27866, 27915), 'tkinter.ttk.Label', 'ttk.Label', (['self.zernike_frame'], {'text': '"""Y gradient:"""'}), "(self.zernike_frame, text='Y gradient:')\n", (27875, 27915), False, 'from tkinter import ttk\n'), ((28884, 28956), 'tkinter.ttk.Button', 'ttk.Button', (['self.zernike_frame'], {'text': '"""Apply"""', 'command': 'self.apply_zernike'}), "(self.zernike_frame, text='Apply', command=self.apply_zernike)\n", (28894, 28956), False, 'from tkinter import ttk\n'), ((31437, 31467), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.multiple_frame'], {}), '(self.multiple_frame)\n', (31446, 31467), False, 'from tkinter import ttk\n'), ((32646, 32712), 'tkinter.ttk.Button', 'ttk.Button', (['self.binary_frame'], {'text': '"""Add"""', 'command': 'self.add_center'}), "(self.binary_frame, text='Add', command=self.add_center)\n", (32656, 32712), False, 'from tkinter import ttk\n'), ((33518, 33561), 'tkinter.ttk.Label', 'ttk.Label', (['self.vortex_frame'], {'text': '"""charge"""'}), "(self.vortex_frame, text='charge')\n", (33527, 33561), False, 'from tkinter import ttk\n'), ((33925, 33973), 'tkinter.ttk.Label', 'ttk.Label', (['self.vortex_frame'], {'text': '"""Coordinates"""'}), "(self.vortex_frame, text='Coordinates')\n", (33934, 33973), False, 'from tkinter import ttk\n'), ((34402, 34450), 'tkinter.ttk.Label', 'ttk.Label', (['self.vortex_frame'], {'text': '"""Gray values"""'}), "(self.vortex_frame, text='Gray values')\n", (34411, 34450), False, 'from tkinter import ttk\n'), ((34567, 34616), 'tkinter.ttk.Label', 'ttk.Label', (['self.vortex_frame'], {'text': '"""0:"""', 'width': '(10)'}), "(self.vortex_frame, text='0:', width=10)\n", (34576, 34616), False, 'from tkinter import ttk\n'), ((34911, 34962), 'tkinter.ttk.Label', 'ttk.Label', (['self.vortex_frame'], {'text': '"""2pi:"""', 'width': '(10)'}), "(self.vortex_frame, text='2pi:', width=10)\n", (34920, 34962), False, 'from tkinter import ttk\n'), ((35306, 35394), 'tkinter.ttk.Button', 'ttk.Button', (['self.vortex_frame'], {'text': '"""Change"""', 'command': 'self.vortex_change_grayvalues'}), "(self.vortex_frame, text='Change', command=self.\n vortex_change_grayvalues)\n", (35316, 35394), False, 'from tkinter import ttk\n'), ((35924, 36018), 'tkinter.ttk.Button', 'ttk.Button', (['self.rotate_frame'], {'text': '"""Rotate"""', 'command': 'self.rotate_callback', 'state': 'DISABLED'}), "(self.rotate_frame, text='Rotate', command=self.rotate_callback,\n state=DISABLED)\n", (35934, 36018), False, 'from tkinter import ttk\n'), ((36126, 36202), 'tkinter.ttk.Label', 'ttk.Label', (['self.rotate_frame'], {'textvariable': 'self.rotating_var', 'state': 'DISABLED'}), '(self.rotate_frame, textvariable=self.rotating_var, state=DISABLED)\n', (36135, 36202), False, 'from tkinter import ttk\n'), ((36487, 36551), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.rotate_frame'], {'textvariable': 'self.lD_var', 'width': '(10)'}), '(self.rotate_frame, textvariable=self.lD_var, width=10)\n', (36496, 36551), False, 'from tkinter import ttk\n'), ((37237, 37259), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.master'], {}), '(self.master)\n', (37246, 37259), False, 'from tkinter import ttk\n'), ((38032, 38055), 'numpy.zeros', 'np.zeros', (['self.SLM.size'], {}), '(self.SLM.size)\n', (38040, 38055), True, 'import numpy as np\n'), ((38408, 38431), 'numpy.zeros', 'np.zeros', (['self.SLM.size'], {}), '(self.SLM.size)\n', (38416, 38431), True, 'import numpy as np\n'), ((38454, 38499), 'numpy.zeros', 'np.zeros', (['self.SLM.dimensions'], {'dtype': 'np.uint8'}), '(self.SLM.dimensions, dtype=np.uint8)\n', (38462, 38499), True, 'import numpy as np\n'), ((38725, 38764), 'numpy.zeros', 'np.zeros', (['self.SLM.size'], {'dtype': 'np.uint8'}), '(self.SLM.size, dtype=np.uint8)\n', (38733, 38764), True, 'import numpy as np\n'), ((38820, 38865), 'numpy.zeros', 'np.zeros', (['self.SLM.dimensions'], {'dtype': 'np.uint8'}), '(self.SLM.dimensions, dtype=np.uint8)\n', (38828, 38865), True, 'import numpy as np\n'), ((39083, 39122), 'numpy.zeros', 'np.zeros', (['self.SLM.size'], {'dtype': 'np.uint8'}), '(self.SLM.size, dtype=np.uint8)\n', (39091, 39122), True, 'import numpy as np\n'), ((39180, 39225), 'numpy.zeros', 'np.zeros', (['self.SLM.dimensions'], {'dtype': 'np.uint8'}), '(self.SLM.dimensions, dtype=np.uint8)\n', (39188, 39225), True, 'import numpy as np\n'), ((39451, 39490), 'numpy.zeros', 'np.zeros', (['self.SLM.size'], {'dtype': 'np.uint8'}), '(self.SLM.size, dtype=np.uint8)\n', (39459, 39490), True, 'import numpy as np\n'), ((39548, 39593), 'numpy.zeros', 'np.zeros', (['self.SLM.dimensions'], {'dtype': 'np.uint8'}), '(self.SLM.dimensions, dtype=np.uint8)\n', (39556, 39593), True, 'import numpy as np\n'), ((41690, 41735), 'numpy.zeros', 'np.zeros', (['self.SLM.dimensions'], {'dtype': 'np.uint8'}), '(self.SLM.dimensions, dtype=np.uint8)\n', (41698, 41735), True, 'import numpy as np\n'), ((53603, 53619), 'numpy.unique', 'np.unique', (['image'], {}), '(image)\n', (53612, 53619), True, 'import numpy as np\n'), ((54880, 54908), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (54906, 54908), False, 'from tkinter import filedialog\n'), ((58819, 58840), 'numpy.zeros', 'np.zeros', (['np.SLM.size'], {}), '(np.SLM.size)\n', (58827, 58840), True, 'import numpy as np\n'), ((58930, 58947), 'numpy.ndenumerate', 'np.ndenumerate', (['p'], {}), '(p)\n', (58944, 58947), True, 'import numpy as np\n'), ((59245, 59290), 'numpy.zeros', 'np.zeros', (['self.SLM.dimensions'], {'dtype': 'np.uint8'}), '(self.SLM.dimensions, dtype=np.uint8)\n', (59253, 59290), True, 'import numpy as np\n'), ((60974, 61013), 'numpy.zeros', 'np.zeros', (['self.SLM.size'], {'dtype': 'np.uint8'}), '(self.SLM.size, dtype=np.uint8)\n', (60982, 61013), True, 'import numpy as np\n'), ((61044, 61061), 'numpy.ndenumerate', 'np.ndenumerate', (['p'], {}), '(p)\n', (61058, 61061), True, 'import numpy as np\n'), ((61122, 61167), 'numpy.zeros', 'np.zeros', (['self.SLM.dimensions'], {'dtype': 'np.uint8'}), '(self.SLM.dimensions, dtype=np.uint8)\n', (61130, 61167), True, 'import numpy as np\n'), ((62489, 62512), 'numpy.zeros', 'np.zeros', (['self.SLM.size'], {}), '(self.SLM.size)\n', (62497, 62512), True, 'import numpy as np\n'), ((62530, 62553), 'numpy.zeros', 'np.zeros', (['self.SLM.size'], {}), '(self.SLM.size)\n', (62538, 62553), True, 'import numpy as np\n'), ((63660, 63705), 'numpy.zeros', 'np.zeros', (['self.SLM.dimensions'], {'dtype': 'np.uint8'}), '(self.SLM.dimensions, dtype=np.uint8)\n', (63668, 63705), True, 'import numpy as np\n'), ((67683, 67718), 'numpy.array', 'np.array', (['(p - calib)'], {'dtype': 'np.uint8'}), '(p - calib, dtype=np.uint8)\n', (67691, 67718), True, 'import numpy as np\n'), ((67742, 67787), 'numpy.zeros', 'np.zeros', (['self.SLM.dimensions'], {'dtype': 'np.uint8'}), '(self.SLM.dimensions, dtype=np.uint8)\n', (67750, 67787), True, 'import numpy as np\n'), ((828, 852), 'numpy.zeros', 'np.zeros', (['(1024, 768, 3)'], {}), '((1024, 768, 3))\n', (836, 852), True, 'import numpy as np\n'), ((4290, 4307), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (4299, 4307), True, 'import numpy as np\n'), ((7386, 7400), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (7395, 7400), True, 'import numpy as np\n'), ((8366, 8380), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (8375, 8380), True, 'import numpy as np\n'), ((10234, 10239), 'pySLM.definitions.SLM', 'SLM', ([], {}), '()\n', (10237, 10239), False, 'from pySLM.definitions import SLM\n'), ((29700, 29750), 'numpy.arange', 'np.arange', (['(-self.SLM.width / 2)', '(self.SLM.width / 2)'], {}), '(-self.SLM.width / 2, self.SLM.width / 2)\n', (29709, 29750), True, 'import numpy as np\n'), ((29778, 29830), 'numpy.arange', 'np.arange', (['(-self.SLM.height / 2)', '(self.SLM.height / 2)'], {}), '(-self.SLM.height / 2, self.SLM.height / 2)\n', (29787, 29830), True, 'import numpy as np\n'), ((37838, 37883), 'numpy.zeros', 'np.zeros', (['self.SLM.dimensions'], {'dtype': 'np.uint8'}), '(self.SLM.dimensions, dtype=np.uint8)\n', (37846, 37883), True, 'import numpy as np\n'), ((38777, 38793), 'numpy.where', 'np.where', (['(m == 1)'], {}), '(m == 1)\n', (38785, 38793), True, 'import numpy as np\n'), ((39135, 39151), 'numpy.where', 'np.where', (['(m == 1)'], {}), '(m == 1)\n', (39143, 39151), True, 'import numpy as np\n'), ((39503, 39519), 'numpy.where', 'np.where', (['(m == 1)'], {}), '(m == 1)\n', (39511, 39519), True, 'import numpy as np\n'), ((44186, 44293), 'tkinter.filedialog.asksaveasfilename', 'filedialog.asksaveasfilename', ([], {'parent': 'self.master', 'filetypes': 'self.save_filetypes', 'title': '"""Save map as.."""'}), "(parent=self.master, filetypes=self.\n save_filetypes, title='Save map as..')\n", (44214, 44293), False, 'from tkinter import filedialog\n'), ((44394, 44434), 'pygame.surfarray.make_surface', 'pygame.surfarray.make_surface', (['phase_map'], {}), '(phase_map)\n', (44423, 44434), False, 'import pygame, os, time, pickle\n'), ((44448, 44481), 'pygame.image.save', 'pygame.image.save', (['surf', 'filename'], {}), '(surf, filename)\n', (44465, 44481), False, 'import pygame, os, time, pickle\n'), ((47822, 47876), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.single_mask', 'daemon': '(True)'}), '(target=self.single_mask, daemon=True)\n', (47838, 47876), False, 'import threading\n'), ((55006, 55029), 'os.path.basename', 'os.path.basename', (['mname'], {}), '(mname)\n', (55022, 55029), False, 'import pygame, os, time, pickle\n'), ((56459, 56511), 'numpy.roll', 'np.roll', (['self.image'], {'shift': '(-self.center_step)', 'axis': '(1)'}), '(self.image, shift=-self.center_step, axis=1)\n', (56466, 56511), True, 'import numpy as np\n'), ((64440, 64547), 'tkinter.filedialog.asksaveasfilename', 'filedialog.asksaveasfilename', ([], {'parent': 'self.master', 'filetypes': 'self.save_filetypes', 'title': '"""Save map as.."""'}), "(parent=self.master, filetypes=self.\n save_filetypes, title='Save map as..')\n", (64468, 64547), False, 'from tkinter import filedialog\n'), ((64648, 64688), 'pygame.surfarray.make_surface', 'pygame.surfarray.make_surface', (['phase_map'], {}), '(phase_map)\n', (64677, 64688), False, 'import pygame, os, time, pickle\n'), ((64702, 64735), 'pygame.image.save', 'pygame.image.save', (['surf', 'filename'], {}), '(surf, filename)\n', (64719, 64735), False, 'import pygame, os, time, pickle\n'), ((67529, 67565), 'numpy.exp', 'np.exp', (['(1.0j * zernike.T * magnitude)'], {}), '(1.0j * zernike.T * magnitude)\n', (67535, 67565), True, 'import numpy as np\n'), ((3780, 3840), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'title': '"""Select phase curve(.npy)"""'}), "(title='Select phase curve(.npy)')\n", (3806, 3840), False, 'from tkinter import filedialog\n'), ((3862, 3875), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (3869, 3875), True, 'import numpy as np\n'), ((29104, 29114), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (29111, 29114), True, 'import numpy as np\n'), ((29182, 29199), 'numpy.sin', 'np.sin', (['(2 * theta)'], {}), '(2 * theta)\n', (29188, 29199), True, 'import numpy as np\n'), ((29266, 29283), 'numpy.cos', 'np.cos', (['(2 * theta)'], {}), '(2 * theta)\n', (29272, 29283), True, 'import numpy as np\n'), ((29355, 29372), 'numpy.sin', 'np.sin', (['(2 * theta)'], {}), '(2 * theta)\n', (29361, 29372), True, 'import numpy as np\n'), ((29490, 29503), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (29496, 29503), True, 'import numpy as np\n'), ((29546, 29559), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (29552, 29559), True, 'import numpy as np\n'), ((41363, 41374), 'numpy.angle', 'np.angle', (['z'], {}), '(z)\n', (41371, 41374), True, 'import numpy as np\n'), ((41500, 41509), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (41506, 41509), True, 'import numpy as np\n'), ((41542, 41551), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (41548, 41551), True, 'import numpy as np\n'), ((47978, 48032), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.binary_mask', 'daemon': '(True)'}), '(target=self.binary_mask, daemon=True)\n', (47994, 48032), False, 'import threading\n'), ((55163, 55208), 'numpy.zeros', 'np.zeros', (['self.SLM.dimensions'], {'dtype': 'np.uint8'}), '(self.SLM.dimensions, dtype=np.uint8)\n', (55171, 55208), True, 'import numpy as np\n'), ((56629, 56680), 'numpy.roll', 'np.roll', (['self.image'], {'shift': 'self.center_step', 'axis': '(1)'}), '(self.image, shift=self.center_step, axis=1)\n', (56636, 56680), True, 'import numpy as np\n'), ((66108, 66144), 'numpy.exp', 'np.exp', (['(1.0j * zernike.T * magnitude)'], {}), '(1.0j * zernike.T * magnitude)\n', (66114, 66144), True, 'import numpy as np\n'), ((66809, 66854), 'numpy.zeros', 'np.zeros', (['self.SLM.dimensions'], {'dtype': 'np.uint8'}), '(self.SLM.dimensions, dtype=np.uint8)\n', (66817, 66854), True, 'import numpy as np\n'), ((29164, 29174), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (29171, 29174), True, 'import numpy as np\n'), ((29242, 29252), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (29249, 29252), True, 'import numpy as np\n'), ((29327, 29338), 'numpy.sqrt', 'np.sqrt', (['(10)'], {}), '(10)\n', (29334, 29338), True, 'import numpy as np\n'), ((48134, 48188), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.triple_mask', 'daemon': '(True)'}), '(target=self.triple_mask, daemon=True)\n', (48150, 48188), False, 'import threading\n'), ((49053, 49068), 'tkinter._setit', '_setit', (['var', '""""""'], {}), "(var, '')\n", (49059, 49068), False, 'from tkinter import _setit\n'), ((49182, 49201), 'tkinter._setit', '_setit', (['var', 'option'], {}), '(var, option)\n', (49188, 49201), False, 'from tkinter import _setit\n'), ((56798, 56850), 'numpy.roll', 'np.roll', (['self.image'], {'shift': '(-self.center_step)', 'axis': '(0)'}), '(self.image, shift=-self.center_step, axis=0)\n', (56805, 56850), True, 'import numpy as np\n'), ((63402, 63419), 'numpy.ndenumerate', 'np.ndenumerate', (['p'], {}), '(p)\n', (63416, 63419), True, 'import numpy as np\n'), ((66473, 66489), 'numpy.exp', 'np.exp', (['(1.0j * p)'], {}), '(1.0j * p)\n', (66479, 66489), True, 'import numpy as np\n'), ((66488, 66524), 'numpy.exp', 'np.exp', (['(1.0j * zernike.T * magnitude)'], {}), '(1.0j * zernike.T * magnitude)\n', (66494, 66524), True, 'import numpy as np\n'), ((48290, 48353), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.make_vortex_callback', 'daemon': '(True)'}), '(target=self.make_vortex_callback, daemon=True)\n', (48306, 48353), False, 'import threading\n'), ((56969, 57020), 'numpy.roll', 'np.roll', (['self.image'], {'shift': 'self.center_step', 'axis': '(0)'}), '(self.image, shift=self.center_step, axis=0)\n', (56976, 57020), True, 'import numpy as np\n'), ((66541, 66552), 'numpy.angle', 'np.angle', (['p'], {}), '(p)\n', (66549, 66552), True, 'import numpy as np\n'), ((47189, 47200), 'numpy.log10', 'np.log10', (['I'], {}), '(I)\n', (47197, 47200), True, 'import numpy as np\n'), ((48455, 48511), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.rotating_mask', 'daemon': '(True)'}), '(target=self.rotating_mask, daemon=True)\n', (48471, 48511), False, 'import threading\n'), ((47319, 47330), 'numpy.log10', 'np.log10', (['I'], {}), '(I)\n', (47327, 47330), True, 'import numpy as np\n')] |
import os
import numpy as np
import torch
from .chexpert.data_loader import load_partition_data_chexpert
import logging
def load(args):
return load_synthetic_data(args)
def combine_batches(batches):
full_x = torch.from_numpy(np.asarray([])).float()
full_y = torch.from_numpy(np.asarray([])).long()
for (batched_x, batched_y) in batches:
full_x = torch.cat((full_x, batched_x), 0)
full_y = torch.cat((full_y, batched_y), 0)
return [(full_x, full_y)]
def load_synthetic_data(args):
dataset_name = str(args.dataset).lower()
# check if the centralized training is enabled
centralized = True if (args.client_num_in_total == 1 and args.training_type != "cross_silo") else False
# check if the full-batch training is enabled
args_batch_size = args.batch_size
if args.batch_size <= 0:
full_batch = True
args.batch_size = 128 # temporary batch size
else:
full_batch = False
if dataset_name == "chexpert":
# load chexpert dataset
logging.info("load_data. dataset_name = %s" % dataset_name)
(
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_chexpert(
data_dir=args.data_cache_dir,
partition_method="random",
partition_alpha=None,
client_number=args.client_num_in_total,
batch_size=args.batch_size,
)
if centralized:
train_data_local_num_dict = {
0: sum(user_train_data_num for user_train_data_num in train_data_local_num_dict.values())
}
train_data_local_dict = {
0: [batch for cid in sorted(train_data_local_dict.keys()) for batch in train_data_local_dict[cid]]
}
test_data_local_dict = {
0: [batch for cid in sorted(test_data_local_dict.keys()) for batch in test_data_local_dict[cid]]
}
args.client_num_in_total = 1
if full_batch:
train_data_global = combine_batches(train_data_global)
test_data_global = combine_batches(test_data_global)
train_data_local_dict = {
cid: combine_batches(train_data_local_dict[cid]) for cid in train_data_local_dict.keys()
}
test_data_local_dict = {cid: combine_batches(test_data_local_dict[cid]) for cid in test_data_local_dict.keys()}
args.batch_size = args_batch_size
dataset = [
train_data_num,
test_data_num,
train_data_global,
test_data_global,
train_data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
]
return dataset, class_num
| [
"numpy.asarray",
"logging.info",
"torch.cat"
] | [((376, 409), 'torch.cat', 'torch.cat', (['(full_x, batched_x)', '(0)'], {}), '((full_x, batched_x), 0)\n', (385, 409), False, 'import torch\n'), ((427, 460), 'torch.cat', 'torch.cat', (['(full_y, batched_y)', '(0)'], {}), '((full_y, batched_y), 0)\n', (436, 460), False, 'import torch\n'), ((1039, 1098), 'logging.info', 'logging.info', (["('load_data. dataset_name = %s' % dataset_name)"], {}), "('load_data. dataset_name = %s' % dataset_name)\n", (1051, 1098), False, 'import logging\n'), ((239, 253), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (249, 253), True, 'import numpy as np\n'), ((293, 307), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (303, 307), True, 'import numpy as np\n')] |
import numpy as np
# divide matrix by row-sums
mat = np.mat([[4,2],[2,3]])
print(mat/mat.sum(axis=1))
# divide matrix by col-sums
mat = np.mat([[1,2],[3,4]])
print(mat/mat.sum(axis=0)) | [
"numpy.mat"
] | [((55, 79), 'numpy.mat', 'np.mat', (['[[4, 2], [2, 3]]'], {}), '([[4, 2], [2, 3]])\n', (61, 79), True, 'import numpy as np\n'), ((139, 163), 'numpy.mat', 'np.mat', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (145, 163), True, 'import numpy as np\n')] |
import math
import srwlib
import numpy as np
from srwlib import *
def createGsnSrcSRW(sigrW,propLen,pulseE,poltype,phE=10e3,sampFact=15,mx=0,my=0):
"""
#sigrW: beam size at waist [m]
#propLen: propagation length [m] required by SRW to create numerical Gaussian
#pulseE: energy per pulse [J]
#poltype: polarization type (0=linear horizontal, 1=linear vertical, 2=linear 45 deg, 3=linear 135 deg, 4=circular right, 5=circular left, 6=total)
#phE: photon energy [eV]
#sampFact: sampling factor to increase mesh density
"""
constConvRad = 1.23984186e-06/(4*3.1415926536) ##conversion from energy to 1/wavelength
rmsAngDiv = constConvRad/(phE*sigrW) ##RMS angular divergence [rad]
sigrL=math.sqrt(sigrW**2+(propLen*rmsAngDiv)**2) ##required RMS size to produce requested RMS beam size after propagation by propLen
#***********Gaussian Beam Source
GsnBm = SRWLGsnBm() #Gaussian Beam structure (just parameters)
GsnBm.x = 0 #Transverse Positions of Gaussian Beam Center at Waist [m]
GsnBm.y = 0
GsnBm.z = propLen #Longitudinal Position of Waist [m]
GsnBm.xp = 0 #Average Angles of Gaussian Beam at Waist [rad]
GsnBm.yp = 0
GsnBm.avgPhotEn = phE #Photon Energy [eV]
GsnBm.pulseEn = pulseE #Energy per Pulse [J] - to be corrected
GsnBm.repRate = 1 #Rep. Rate [Hz] - to be corrected
GsnBm.polar = poltype #1- linear horizontal?
GsnBm.sigX = sigrW #Horiz. RMS size at Waist [m]
GsnBm.sigY = GsnBm.sigX #Vert. RMS size at Waist [m]
GsnBm.sigT = 10e-15 #Pulse duration [s] (not used?)
GsnBm.mx = mx #Transverse Gauss-Hermite Mode Orders
GsnBm.my = my
#***********Initial Wavefront
wfr = SRWLWfr() #Initial Electric Field Wavefront
wfr.allocate(1, 1000, 1000) #Numbers of points vs Photon Energy (1), Horizontal and Vertical Positions (dummy)
wfr.mesh.zStart = 0.0 #Longitudinal Position [m] at which initial Electric Field has to be calculated, i.e. the position of the first optical element
wfr.mesh.eStart = GsnBm.avgPhotEn #Initial Photon Energy [eV]
wfr.mesh.eFin = GsnBm.avgPhotEn #Final Photon Energy [eV]
wfr.unitElFld = 1 #Electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)
distSrc = wfr.mesh.zStart - GsnBm.z
#Horizontal and Vertical Position Range for the Initial Wavefront calculation
#can be used to simulate the First Aperture (of M1)
#firstHorAp = 8.*rmsAngDiv*distSrc #[m]
xAp = 8.*sigrL
yAp = xAp #[m]
wfr.mesh.xStart = -0.5*xAp #Initial Horizontal Position [m]
wfr.mesh.xFin = 0.5*xAp #Final Horizontal Position [m]
wfr.mesh.yStart = -0.5*yAp #Initial Vertical Position [m]
wfr.mesh.yFin = 0.5*yAp #Final Vertical Position [m]
sampFactNxNyForProp = sampFact #sampling factor for adjusting nx, ny (effective if > 0)
arPrecPar = [sampFactNxNyForProp]
srwl.CalcElecFieldGaussian(wfr, GsnBm, arPrecPar)
##Beamline to propagate to waist
optDriftW=SRWLOptD(propLen)
propagParDrift = [0, 0, 1., 0, 0, 1.1, 1.2, 1.1, 1.2, 0, 0, 0]
optBLW = SRWLOptC([optDriftW],[propagParDrift])
#wfrW=deepcopy(wfr)
srwl.PropagElecField(wfr, optBLW)
return wfr
def createDriftLensBL2(Length,f):
"""
#Create beamline for propagation from end of crystal to end of cavity and through lens (representing a mirror)
#First propagate by Length, then through lens with focal length f
#Length: drift length [m]
#f: focal length
"""
#f=Lc/4 + df
optDrift=SRWLOptD(Length)
optLens = SRWLOptL(f, f)
propagParLens = [0, 0, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0]
propagParDrift = [0, 0, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0]
#propagParLens = [0, 0, 1., 0, 0, 1.4, 2., 1.4, 2., 0, 0, 0]
#propagParDrift = [0, 0, 1., 0, 0, 1.1, 1.2, 1.1, 1.2, 0, 0, 0]
DriftLensBL = SRWLOptC([optDrift,optLens],[propagParDrift,propagParLens])
return DriftLensBL
def createDriftLensBL(Lc,df):
"""
#Create beamline for propagation from center of cell to end and through lens (representing a mirror)
#First propagate Lc/2, then through lens with focal length Lc/2 + df
#Lc: cavity length [m]
#df: focusing error
"""
f=Lc/4 + df
optDrift=SRWLOptD(Lc/2)
optLens = SRWLOptL(f, f)
propagParLens = [0, 0, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0]
propagParDrift = [0, 0, 1., 1, 0, 1., 1., 1., 1., 0, 0, 0]
#propagParLens = [0, 0, 1., 0, 0, 1.4, 2., 1.4, 2., 0, 0, 0]
#propagParDrift = [0, 0, 1., 0, 0, 1.1, 1.2, 1.1, 1.2, 0, 0, 0]
DriftLensBL = SRWLOptC([optDrift,optLens],[propagParDrift,propagParLens])
return DriftLensBL
def createDriftBL(Lc):
"""
#Create drift beamline container that propagates the wavefront through half the cavity
#Lc is the length of the cavity
"""
optDrift=SRWLOptD(Lc/2)
propagParDrift = [0, 0, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0]
#propagParDrift = [0, 0, 1., 0, 0, 1.1, 1.2, 1.1, 1.2, 0, 0, 0]
DriftBL = SRWLOptC([optDrift],[propagParDrift])
return DriftBL
def createBL1to1(L,dfof=0):
"""
##Define beamline geometric variables.
#L: drift length before and after lens
#dfof: focal length variation factor (=0 for no variation; can be positive or negative)
"""
##Drift lengths between elements beginning with source to 1st crystal and ending with last crystal to start of undulator.
##focal length in meters
f=(L/2)*(1+dfof)
#Lens
optLens = SRWLOptL(f, f)
#Drift spaces
optDrift1=SRWLOptD(L)
optDrift2=SRWLOptD(L)
#***********Wavefront Propagation Parameters:
#[0]: Auto-Resize (1) or not (0) Before propagation
#[1]: Auto-Resize (1) or not (0) After propagation
#[2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[3] Type of the propagator:
#0 - Standard - Fresnel (it uses two FFTs);
#1 - Quadratic Term - with semi-analytical treatment of the quadratic (leading) phase terms (it uses two FFTs);
#2 - Quadratic Term - Special - special case;
#3 - From Waist - good for propagation from "waist" over a large distance (it uses one FFT);
#4 - To Waist - good for propagation to a "waist" (e.g. some 2D focus of an optical system) over some distance (it uses one FFT).
#[4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[6]: Horizontal Resolution modification factor at Resizing
#[7]: Vertical Range modification factor at Resizing
#[8]: Vertical Resolution modification factor at Resizing
#[9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#propagParLens = [0, 0, 1., 0, 0, 1., 1.5, 1., 1.5, 0, 0, 0]
#propagParDrift = [0, 0, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0]
propagParLens = [0, 0, 1., 0, 0, 1.4, 2., 1.4, 2., 0, 0, 0]
propagParDrift = [0, 0, 1., 0, 0, 1.1, 1.2, 1.1, 1.2, 0, 0, 0]
##Beamline consruction
optBL1to1 = SRWLOptC([optDrift1,optLens,optDrift2],[propagParDrift,propagParLens,propagParDrift])
return optBL1to1
def createReflectionOffFocusingMirrorBL(L,f,strDataFolderName,strMirSurfHeightErrInFileName):
"""
#Create an SRW beamline container that will propagate a length L
#then reflect off a flat mirror followed by a lens. Finally, propagate by L again.
#L: length of propagation [m]
#f: focal length of mirror [m]
#strDataFolderName: Folder name where mirror data file is
#strMirSurfHeightErrInFileName: File name for mirror slope error file
#Assuming waist to waist propagation, we want f~L/2 (Note that this isn't a perfect identity
#map in phase space due to the Rayleigh length of the mode)
"""
#Drift
optDrift1=SRWLOptD(L)
#Transmission element to simulate mirror slope error
#angM1 = np.pi #Incident Angle of M1 [rad] ( 1.8e-3 in Ex. 9 )
#angM1 = 3.14 #Incident Angle of M1 [rad]
angM1 = 1.e-2
heightProfData = srwl_uti_read_data_cols(os.path.join(os.getcwd(), strDataFolderName, strMirSurfHeightErrInFileName), _str_sep='\t', _i_col_start=0, _i_col_end=1)
opTrErM1 = srwl_opt_setup_surf_height_1d(heightProfData, _dim='y', _ang=angM1, _amp_coef=1) #_amp_coef=1e4
#print(' Saving optical path difference data to file (for viewing/debugging) ... ', end='')
#opPathDifErM1 = opTrErM1.get_data(3, 3)
#srwl_uti_save_intens_ascii(opPathDifErM1, opTrErM1.mesh, os.path.join(os.getcwd(), strDataFolderName, strMirOptPathDifOutFileName01), 0,
# ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Diff.'], _arUnits=['', 'm', 'm', 'm'])
#Lens
optLens = SRWLOptL(f, f)
#Propagation parameters
propagParLens = [0, 0, 1., 0, 0, 1.4, 2., 1.4, 2., 0, 0, 0]
propagParDrift = [0, 0, 1., 0, 0, 1.1, 1.2, 1.1, 1.2, 0, 0, 0]
#propagParDrift = [0, 0, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0]
#propagParLens = [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
prPar0 = [0, 0, 1., 1, 0, 1., 1., 1., 1., 0, 0, 0]
#Construct beamline
optBL = SRWLOptC([optDrift1,opTrErM1,optLens,optDrift1],[propagParDrift,prPar0,propagParLens,propagParDrift])
#optBL = SRWLOptC([optDrift1,optLens,optDrift1],[propagParDrift,propagParLens,propagParDrift])
return optBL
def createABCDbeamline(A,B,C,D):
"""
#Use decomposition of ABCD matrix into kick-drift-kick Pei-Huang 2017 (https://arxiv.org/abs/1709.06222)
#Construct corresponding SRW beamline container object
#A,B,C,D are 2x2 matrix components.
"""
f1= B/(1-A)
L = B
f2 = B/(1-D)
optLens1 = SRWLOptL(f1, f1)
optDrift=SRWLOptD(L)
optLens2 = SRWLOptL(f2, f2)
propagParLens1 = [0, 0, 1., 0, 0, 1, 1, 1, 1, 0, 0, 0]
propagParDrift = [0, 0, 1., 0, 0, 1, 1, 1, 1, 0, 0, 0]
propagParLens2 = [0, 0, 1., 0, 0, 1, 1, 1, 1, 0, 0, 0]
optBL = SRWLOptC([optLens1,optDrift,optLens2],[propagParLens1,propagParDrift,propagParLens2])
return optBL
def createCrystal(n0,n2,L_cryst):
"""
#Create a set of optical elements representing a crystal.
#Treat as an optical duct
#ABCD matrix found here: https://www.rp-photonics.com/abcd_matrix.html
#n(r) = n0 - 0.5 n2 r^2
#n0: Index of refraction along the optical axis
#n2: radial variation of index of refraction
"""
if n2==0:
optBL=createDriftBL(2*L_cryst) #Note that this drift function divides length by 2
#print("L_cryst/n0=",L_cryst/n0)
else:
gamma = np.sqrt(n2/n0)
A = np.cos(gamma*L_cryst)
B = (1/(gamma))*np.sin(gamma*L_cryst)
C = -gamma*np.sin(gamma*L_cryst)
D = np.cos(gamma*L_cryst)
optBL=createABCDbeamline(A,B,C,D)
return optBL
def rmsWavefrontIntensity(wfr):
"""
#Compute rms values from a wavefront object
"""
IntensityArray2D = array('f', [0]*wfr.mesh.nx*wfr.mesh.ny) #"flat" array to take 2D intensity data
srwlib.srwl.CalcIntFromElecField(IntensityArray2D, wfr, 6, 0, 3, wfr.mesh.eStart, 0, 0) #extracts intensity
##Reshaping electric field data from flat to 2D array
IntensityArray2D = np.array(IntensityArray2D).reshape((wfr.mesh.nx, wfr.mesh.ny), order='C')
xvals=np.linspace(wfr.mesh.xStart,wfr.mesh.xFin,wfr.mesh.nx)
yvals=np.linspace(wfr.mesh.yStart,wfr.mesh.yFin,wfr.mesh.ny)
return (IntensityArray2D, *rmsIntensity(IntensityArray2D,xvals,yvals))
def rmsIntensity(IntArray,xvals,yvals):
"""
Compute rms values in x and y from array
#IntArray is a 2D array representation of a function
#xvals represents the horizontal coordinates
#yvals represents the vertical coordinates
"""
datax=np.sum(IntArray,axis=1)
datay=np.sum(IntArray,axis=0)
sxsq=sum(datax*xvals*xvals)/sum(datax)
xavg=sum(datax*xvals)/sum(datax)
sx=math.sqrt(sxsq-xavg*xavg)
sysq=sum(datay*yvals*yvals)/sum(datay)
yavg=sum(datay*yvals)/sum(datay)
sy=math.sqrt(sysq-yavg*yavg)
return sx, sy, xavg, yavg
def maxWavefrontIntensity(wfr):
"""
Compute maximum value of wavefront intensity
"""
IntensityArray2D = array('f', [0]*wfr.mesh.nx*wfr.mesh.ny) #"flat" array to take 2D intensity data
srwlib.srwl.CalcIntFromElecField(IntensityArray2D, wfr, 6, 0, 3, wfr.mesh.eStart, 0, 0) #extracts intensity
return(np.max(IntensityArray2D))
| [
"numpy.sqrt",
"math.sqrt",
"numpy.max",
"numpy.sum",
"numpy.linspace",
"numpy.array",
"numpy.cos",
"numpy.sin",
"srwlib.srwl.CalcIntFromElecField"
] | [((742, 792), 'math.sqrt', 'math.sqrt', (['(sigrW ** 2 + (propLen * rmsAngDiv) ** 2)'], {}), '(sigrW ** 2 + (propLen * rmsAngDiv) ** 2)\n', (751, 792), False, 'import math\n'), ((11228, 11320), 'srwlib.srwl.CalcIntFromElecField', 'srwlib.srwl.CalcIntFromElecField', (['IntensityArray2D', 'wfr', '(6)', '(0)', '(3)', 'wfr.mesh.eStart', '(0)', '(0)'], {}), '(IntensityArray2D, wfr, 6, 0, 3, wfr.mesh.\n eStart, 0, 0)\n', (11260, 11320), False, 'import srwlib\n'), ((11501, 11557), 'numpy.linspace', 'np.linspace', (['wfr.mesh.xStart', 'wfr.mesh.xFin', 'wfr.mesh.nx'], {}), '(wfr.mesh.xStart, wfr.mesh.xFin, wfr.mesh.nx)\n', (11512, 11557), True, 'import numpy as np\n'), ((11566, 11622), 'numpy.linspace', 'np.linspace', (['wfr.mesh.yStart', 'wfr.mesh.yFin', 'wfr.mesh.ny'], {}), '(wfr.mesh.yStart, wfr.mesh.yFin, wfr.mesh.ny)\n', (11577, 11622), True, 'import numpy as np\n'), ((11961, 11985), 'numpy.sum', 'np.sum', (['IntArray'], {'axis': '(1)'}), '(IntArray, axis=1)\n', (11967, 11985), True, 'import numpy as np\n'), ((11995, 12019), 'numpy.sum', 'np.sum', (['IntArray'], {'axis': '(0)'}), '(IntArray, axis=0)\n', (12001, 12019), True, 'import numpy as np\n'), ((12106, 12135), 'math.sqrt', 'math.sqrt', (['(sxsq - xavg * xavg)'], {}), '(sxsq - xavg * xavg)\n', (12115, 12135), False, 'import math\n'), ((12220, 12249), 'math.sqrt', 'math.sqrt', (['(sysq - yavg * yavg)'], {}), '(sysq - yavg * yavg)\n', (12229, 12249), False, 'import math\n'), ((12481, 12573), 'srwlib.srwl.CalcIntFromElecField', 'srwlib.srwl.CalcIntFromElecField', (['IntensityArray2D', 'wfr', '(6)', '(0)', '(3)', 'wfr.mesh.eStart', '(0)', '(0)'], {}), '(IntensityArray2D, wfr, 6, 0, 3, wfr.mesh.\n eStart, 0, 0)\n', (12513, 12573), False, 'import srwlib\n'), ((12600, 12624), 'numpy.max', 'np.max', (['IntensityArray2D'], {}), '(IntensityArray2D)\n', (12606, 12624), True, 'import numpy as np\n'), ((10794, 10810), 'numpy.sqrt', 'np.sqrt', (['(n2 / n0)'], {}), '(n2 / n0)\n', (10801, 10810), True, 'import numpy as np\n'), ((10821, 10844), 'numpy.cos', 'np.cos', (['(gamma * L_cryst)'], {}), '(gamma * L_cryst)\n', (10827, 10844), True, 'import numpy as np\n'), ((10942, 10965), 'numpy.cos', 'np.cos', (['(gamma * L_cryst)'], {}), '(gamma * L_cryst)\n', (10948, 10965), True, 'import numpy as np\n'), ((10867, 10890), 'numpy.sin', 'np.sin', (['(gamma * L_cryst)'], {}), '(gamma * L_cryst)\n', (10873, 10890), True, 'import numpy as np\n'), ((10908, 10931), 'numpy.sin', 'np.sin', (['(gamma * L_cryst)'], {}), '(gamma * L_cryst)\n', (10914, 10931), True, 'import numpy as np\n'), ((11417, 11443), 'numpy.array', 'np.array', (['IntensityArray2D'], {}), '(IntensityArray2D)\n', (11425, 11443), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
#coiefficient calculation
def regress(x, x_s, t_s, M, N,lamda = 0):
order_list = np.arange((M + 1))
order_list = order_list[ :, np.newaxis]
exponent = np.tile(order_list,[1,N])
h = np.power(x_s,exponent)
a = np.matmul(h, np.transpose(h)) + lamda*np.eye(M+1)
b = np.matmul(h, t_s)
w = np.linalg.solve(a, b) #calculate the coefficent
exponent2 = np.tile(order_list,[1,200])
h2 = np.power(x,exponent2)
p = np.matmul(w, h2)
return p
##task 1
x = np.linspace(0, 1, 200)
t = np.sin(np.pi*x*2)
N = 10
sigma = 0.2;
x_10 = np.linspace(0, 1, N)
t_10 = np.sin(np.pi*x_10*2) + np.random.normal(0,sigma,N)
plt.figure(1)
plt.plot(x, t, 'g', x_10, t_10, 'bo',linewidth = 2)
plt.ylabel('t',rotation='horizontal')
plt.xlabel('x')
plt.savefig('1.png', dpi=300)
##task 2
M = 3
p3_10 = regress(x, x_10, t_10, M, N)
M = 9
p9_10 = regress(x, x_10, t_10, M, N)
plt.figure(2)
plt.plot(x, t, 'g', x_10, t_10, 'bo', x, p3_10, 'r', linewidth = 2)
plt.ylabel('t',rotation='horizontal')
plt.xlabel('x')
plt.text(0.7, 0.7,'M=3', fontsize=16)
plt.savefig('2.png', dpi=300)
plt.figure(3)
plt.plot(x, t, 'g', x_10, t_10, 'bo', x, p9_10, 'r', linewidth = 2)
plt.ylabel('t',rotation='horizontal')
plt.xlabel('x')
plt.text(0.7, 0.7,'M=9', fontsize=16)
plt.savefig('3.png', dpi=300)
##task3
N = 15
x_15 = np.linspace(0, 1, N)
t_15 = np.sin(np.pi*x_15*2) + np.random.normal(0,sigma, N)
M = 9
p9_15 = regress(x, x_15, t_15, M, N)
N = 100
x_100 = np.linspace(0, 1, N)
t_100 = np.sin(np.pi*x_100*2) + np.random.normal(0,sigma, N)
M = 9
p9_100 = regress(x, x_100, t_100, M, N)
plt.figure(4)
plt.plot(x, t, 'g', x_15, t_15, 'bo', x, p9_15, 'r', linewidth = 2)
plt.ylabel('t',rotation='horizontal')
plt.xlabel('x')
plt.text(0.7, 0.7,'N=15', fontsize=16)
plt.savefig('4.png', dpi=300)
plt.figure(5)
plt.plot(x, t, 'g', x_100, t_100, 'bo', x, p9_100, 'r', linewidth = 2)
plt.ylabel('t',rotation='horizontal')
plt.xlabel('x')
plt.text(0.7, 0.7,'N=100', fontsize=16)
plt.savefig('5.png', dpi=300)
##task4
N = 10
M = 9
p9_10 = regress(x, x_10, t_10, M, N, np.exp(-18))
plt.figure(6)
plt.plot(x, t, 'g', x_10, t_10, 'bo', x, p9_10, 'r', linewidth = 2)
plt.ylabel('t',rotation='horizontal')
plt.xlabel('x')
plt.text(0.7, 0.7,'ln$\lambda$ = -18', fontsize=16)
plt.savefig('6.png', dpi=300)
plt.show()
| [
"numpy.random.normal",
"matplotlib.pyplot.text",
"numpy.tile",
"numpy.linalg.solve",
"matplotlib.pyplot.savefig",
"numpy.eye",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.... | [((549, 571), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(200)'], {}), '(0, 1, 200)\n', (560, 571), True, 'import numpy as np\n'), ((578, 599), 'numpy.sin', 'np.sin', (['(np.pi * x * 2)'], {}), '(np.pi * x * 2)\n', (584, 599), True, 'import numpy as np\n'), ((624, 644), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (635, 644), True, 'import numpy as np\n'), ((705, 718), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (715, 718), True, 'import matplotlib.pyplot as plt\n'), ((719, 769), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 't', '"""g"""', 'x_10', 't_10', '"""bo"""'], {'linewidth': '(2)'}), "(x, t, 'g', x_10, t_10, 'bo', linewidth=2)\n", (727, 769), True, 'import matplotlib.pyplot as plt\n'), ((773, 811), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""t"""'], {'rotation': '"""horizontal"""'}), "('t', rotation='horizontal')\n", (783, 811), True, 'import matplotlib.pyplot as plt\n'), ((811, 826), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (821, 826), True, 'import matplotlib.pyplot as plt\n'), ((827, 856), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""1.png"""'], {'dpi': '(300)'}), "('1.png', dpi=300)\n", (838, 856), True, 'import matplotlib.pyplot as plt\n'), ((957, 970), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (967, 970), True, 'import matplotlib.pyplot as plt\n'), ((971, 1036), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 't', '"""g"""', 'x_10', 't_10', '"""bo"""', 'x', 'p3_10', '"""r"""'], {'linewidth': '(2)'}), "(x, t, 'g', x_10, t_10, 'bo', x, p3_10, 'r', linewidth=2)\n", (979, 1036), True, 'import matplotlib.pyplot as plt\n'), ((1041, 1079), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""t"""'], {'rotation': '"""horizontal"""'}), "('t', rotation='horizontal')\n", (1051, 1079), True, 'import matplotlib.pyplot as plt\n'), ((1079, 1094), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1089, 1094), True, 'import matplotlib.pyplot as plt\n'), ((1095, 1133), 'matplotlib.pyplot.text', 'plt.text', (['(0.7)', '(0.7)', '"""M=3"""'], {'fontsize': '(16)'}), "(0.7, 0.7, 'M=3', fontsize=16)\n", (1103, 1133), True, 'import matplotlib.pyplot as plt\n'), ((1133, 1162), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""2.png"""'], {'dpi': '(300)'}), "('2.png', dpi=300)\n", (1144, 1162), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1177), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (1174, 1177), True, 'import matplotlib.pyplot as plt\n'), ((1178, 1243), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 't', '"""g"""', 'x_10', 't_10', '"""bo"""', 'x', 'p9_10', '"""r"""'], {'linewidth': '(2)'}), "(x, t, 'g', x_10, t_10, 'bo', x, p9_10, 'r', linewidth=2)\n", (1186, 1243), True, 'import matplotlib.pyplot as plt\n'), ((1248, 1286), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""t"""'], {'rotation': '"""horizontal"""'}), "('t', rotation='horizontal')\n", (1258, 1286), True, 'import matplotlib.pyplot as plt\n'), ((1286, 1301), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1296, 1301), True, 'import matplotlib.pyplot as plt\n'), ((1302, 1340), 'matplotlib.pyplot.text', 'plt.text', (['(0.7)', '(0.7)', '"""M=9"""'], {'fontsize': '(16)'}), "(0.7, 0.7, 'M=9', fontsize=16)\n", (1310, 1340), True, 'import matplotlib.pyplot as plt\n'), ((1340, 1369), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""3.png"""'], {'dpi': '(300)'}), "('3.png', dpi=300)\n", (1351, 1369), True, 'import matplotlib.pyplot as plt\n'), ((1394, 1414), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1405, 1414), True, 'import numpy as np\n'), ((1539, 1559), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1550, 1559), True, 'import numpy as np\n'), ((1672, 1685), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (1682, 1685), True, 'import matplotlib.pyplot as plt\n'), ((1686, 1751), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 't', '"""g"""', 'x_15', 't_15', '"""bo"""', 'x', 'p9_15', '"""r"""'], {'linewidth': '(2)'}), "(x, t, 'g', x_15, t_15, 'bo', x, p9_15, 'r', linewidth=2)\n", (1694, 1751), True, 'import matplotlib.pyplot as plt\n'), ((1756, 1794), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""t"""'], {'rotation': '"""horizontal"""'}), "('t', rotation='horizontal')\n", (1766, 1794), True, 'import matplotlib.pyplot as plt\n'), ((1794, 1809), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1804, 1809), True, 'import matplotlib.pyplot as plt\n'), ((1810, 1849), 'matplotlib.pyplot.text', 'plt.text', (['(0.7)', '(0.7)', '"""N=15"""'], {'fontsize': '(16)'}), "(0.7, 0.7, 'N=15', fontsize=16)\n", (1818, 1849), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1878), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""4.png"""'], {'dpi': '(300)'}), "('4.png', dpi=300)\n", (1860, 1878), True, 'import matplotlib.pyplot as plt\n'), ((1880, 1893), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {}), '(5)\n', (1890, 1893), True, 'import matplotlib.pyplot as plt\n'), ((1894, 1962), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 't', '"""g"""', 'x_100', 't_100', '"""bo"""', 'x', 'p9_100', '"""r"""'], {'linewidth': '(2)'}), "(x, t, 'g', x_100, t_100, 'bo', x, p9_100, 'r', linewidth=2)\n", (1902, 1962), True, 'import matplotlib.pyplot as plt\n'), ((1967, 2005), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""t"""'], {'rotation': '"""horizontal"""'}), "('t', rotation='horizontal')\n", (1977, 2005), True, 'import matplotlib.pyplot as plt\n'), ((2005, 2020), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2015, 2020), True, 'import matplotlib.pyplot as plt\n'), ((2021, 2061), 'matplotlib.pyplot.text', 'plt.text', (['(0.7)', '(0.7)', '"""N=100"""'], {'fontsize': '(16)'}), "(0.7, 0.7, 'N=100', fontsize=16)\n", (2029, 2061), True, 'import matplotlib.pyplot as plt\n'), ((2061, 2090), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""5.png"""'], {'dpi': '(300)'}), "('5.png', dpi=300)\n", (2072, 2090), True, 'import matplotlib.pyplot as plt\n'), ((2165, 2178), 'matplotlib.pyplot.figure', 'plt.figure', (['(6)'], {}), '(6)\n', (2175, 2178), True, 'import matplotlib.pyplot as plt\n'), ((2179, 2244), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 't', '"""g"""', 'x_10', 't_10', '"""bo"""', 'x', 'p9_10', '"""r"""'], {'linewidth': '(2)'}), "(x, t, 'g', x_10, t_10, 'bo', x, p9_10, 'r', linewidth=2)\n", (2187, 2244), True, 'import matplotlib.pyplot as plt\n'), ((2249, 2287), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""t"""'], {'rotation': '"""horizontal"""'}), "('t', rotation='horizontal')\n", (2259, 2287), True, 'import matplotlib.pyplot as plt\n'), ((2287, 2302), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2297, 2302), True, 'import matplotlib.pyplot as plt\n'), ((2303, 2356), 'matplotlib.pyplot.text', 'plt.text', (['(0.7)', '(0.7)', '"""ln$\\\\lambda$ = -18"""'], {'fontsize': '(16)'}), "(0.7, 0.7, 'ln$\\\\lambda$ = -18', fontsize=16)\n", (2311, 2356), True, 'import matplotlib.pyplot as plt\n'), ((2355, 2384), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""6.png"""'], {'dpi': '(300)'}), "('6.png', dpi=300)\n", (2366, 2384), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2395), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2393, 2395), True, 'import matplotlib.pyplot as plt\n'), ((138, 154), 'numpy.arange', 'np.arange', (['(M + 1)'], {}), '(M + 1)\n', (147, 154), True, 'import numpy as np\n'), ((217, 244), 'numpy.tile', 'np.tile', (['order_list', '[1, N]'], {}), '(order_list, [1, N])\n', (224, 244), True, 'import numpy as np\n'), ((251, 274), 'numpy.power', 'np.power', (['x_s', 'exponent'], {}), '(x_s, exponent)\n', (259, 274), True, 'import numpy as np\n'), ((342, 359), 'numpy.matmul', 'np.matmul', (['h', 't_s'], {}), '(h, t_s)\n', (351, 359), True, 'import numpy as np\n'), ((368, 389), 'numpy.linalg.solve', 'np.linalg.solve', (['a', 'b'], {}), '(a, b)\n', (383, 389), True, 'import numpy as np\n'), ((437, 466), 'numpy.tile', 'np.tile', (['order_list', '[1, 200]'], {}), '(order_list, [1, 200])\n', (444, 466), True, 'import numpy as np\n'), ((474, 496), 'numpy.power', 'np.power', (['x', 'exponent2'], {}), '(x, exponent2)\n', (482, 496), True, 'import numpy as np\n'), ((504, 520), 'numpy.matmul', 'np.matmul', (['w', 'h2'], {}), '(w, h2)\n', (513, 520), True, 'import numpy as np\n'), ((653, 677), 'numpy.sin', 'np.sin', (['(np.pi * x_10 * 2)'], {}), '(np.pi * x_10 * 2)\n', (659, 677), True, 'import numpy as np\n'), ((676, 705), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma', 'N'], {}), '(0, sigma, N)\n', (692, 705), True, 'import numpy as np\n'), ((1424, 1448), 'numpy.sin', 'np.sin', (['(np.pi * x_15 * 2)'], {}), '(np.pi * x_15 * 2)\n', (1430, 1448), True, 'import numpy as np\n'), ((1447, 1476), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma', 'N'], {}), '(0, sigma, N)\n', (1463, 1476), True, 'import numpy as np\n'), ((1570, 1595), 'numpy.sin', 'np.sin', (['(np.pi * x_100 * 2)'], {}), '(np.pi * x_100 * 2)\n', (1576, 1595), True, 'import numpy as np\n'), ((1594, 1623), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma', 'N'], {}), '(0, sigma, N)\n', (1610, 1623), True, 'import numpy as np\n'), ((2151, 2162), 'numpy.exp', 'np.exp', (['(-18)'], {}), '(-18)\n', (2157, 2162), True, 'import numpy as np\n'), ((296, 311), 'numpy.transpose', 'np.transpose', (['h'], {}), '(h)\n', (308, 311), True, 'import numpy as np\n'), ((321, 334), 'numpy.eye', 'np.eye', (['(M + 1)'], {}), '(M + 1)\n', (327, 334), True, 'import numpy as np\n')] |
import mmcv
import pickle
import os
import numpy as np
from shutil import copyfile
root = '/mnt/share_data/waymo_dataset/kitti_format/'
f1 = open(os.path.join(root,'waymo_infos_train.pkl'),'rb')
root = '/mnt/share_data/waymo_dataset/kitti_format/'
sources = ['testing/velodyne_reduced','training/velodyne_reduced']
target = 'traintest/velodyne_reduced'
target = os.path.join(root, target)
if not os.path.exists(target):
os.makedirs(target)
arr = []
source = sources[1]
source = os.path.join(root, source)
base = len(os.listdir(source))
for f in os.listdir(source):
source_file = os.path.join(source, f)
destination_file = os.path.join(target, f)
arr.append(destination_file)
copyfile(source_file, destination_file)
source = sources[0]
source = os.path.join(root, source)
for f in os.listdir(source):
source_file = os.path.join(source, f)
idx = int(f.split('.')[0])
idx += base
destination_file = f'{idx:06d}.bin'.format(idx=idx)
destination_file = os.path.join(target, destination_file)
arr.append(destination_file)
# print(f, destination_file)
copyfile(source_file, destination_file)
print(base)
print(len(np.unique(arr)))
# 198068
# 227715 | [
"os.path.exists",
"os.listdir",
"numpy.unique",
"os.makedirs",
"os.path.join",
"shutil.copyfile"
] | [((367, 393), 'os.path.join', 'os.path.join', (['root', 'target'], {}), '(root, target)\n', (379, 393), False, 'import os\n'), ((490, 516), 'os.path.join', 'os.path.join', (['root', 'source'], {}), '(root, source)\n', (502, 516), False, 'import os\n'), ((558, 576), 'os.listdir', 'os.listdir', (['source'], {}), '(source)\n', (568, 576), False, 'import os\n'), ((774, 800), 'os.path.join', 'os.path.join', (['root', 'source'], {}), '(root, source)\n', (786, 800), False, 'import os\n'), ((811, 829), 'os.listdir', 'os.listdir', (['source'], {}), '(source)\n', (821, 829), False, 'import os\n'), ((148, 191), 'os.path.join', 'os.path.join', (['root', '"""waymo_infos_train.pkl"""'], {}), "(root, 'waymo_infos_train.pkl')\n", (160, 191), False, 'import os\n'), ((402, 424), 'os.path.exists', 'os.path.exists', (['target'], {}), '(target)\n', (416, 424), False, 'import os\n'), ((430, 449), 'os.makedirs', 'os.makedirs', (['target'], {}), '(target)\n', (441, 449), False, 'import os\n'), ((529, 547), 'os.listdir', 'os.listdir', (['source'], {}), '(source)\n', (539, 547), False, 'import os\n'), ((596, 619), 'os.path.join', 'os.path.join', (['source', 'f'], {}), '(source, f)\n', (608, 619), False, 'import os\n'), ((643, 666), 'os.path.join', 'os.path.join', (['target', 'f'], {}), '(target, f)\n', (655, 666), False, 'import os\n'), ((704, 743), 'shutil.copyfile', 'copyfile', (['source_file', 'destination_file'], {}), '(source_file, destination_file)\n', (712, 743), False, 'from shutil import copyfile\n'), ((849, 872), 'os.path.join', 'os.path.join', (['source', 'f'], {}), '(source, f)\n', (861, 872), False, 'import os\n'), ((999, 1037), 'os.path.join', 'os.path.join', (['target', 'destination_file'], {}), '(target, destination_file)\n', (1011, 1037), False, 'import os\n'), ((1108, 1147), 'shutil.copyfile', 'copyfile', (['source_file', 'destination_file'], {}), '(source_file, destination_file)\n', (1116, 1147), False, 'from shutil import copyfile\n'), ((1171, 1185), 'numpy.unique', 'np.unique', (['arr'], {}), '(arr)\n', (1180, 1185), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 08 11:31:14 2014
@author: pierre_b
"""
from unittest import TestCase
from ddt import ddt, data
from pyleecan.Classes.Arc1 import Arc1
from pyleecan.Methods.Geometry.Arc1.check import PointArc1Error, RadiusArc1Error
from pyleecan.Methods.Geometry.Arc1.discretize import NbPointArc1DError
from numpy import pi, exp, sqrt, array
# For AlmostEqual
DELTA = 1e-6
discretize_test = list()
# inner left top arc
discretize_test.append(
{"nb_point": 9, "begin": 1 * exp(1j * pi), "end": 1 * exp(1j * pi / 2), "Radius": 1}
)
discretize_test[0]["result"] = array(
[
-1,
-0.84356553 + 1.23116594e-02j,
-0.69098301 + 4.89434837e-02j,
-0.54600950 + 1.08993476e-01j,
-0.41221475 + 1.90983006e-01j,
-0.29289322 + 2.92893219e-01j,
-0.19098301 + 4.12214748e-01j,
-0.10899348 + 5.46009500e-01j,
-0.04894348 + 6.90983006e-01j,
-0.01231166 + 8.43565535e-01j,
1j,
]
)
# extern left top arc
discretize_test.append(
{
"nb_point": 9,
"begin": 1 * exp(1j * pi),
"end": 1 * exp(1j * pi / 2),
"Radius": -1,
}
)
discretize_test[1]["result"] = array(
[
-1,
-9.87688341e-01 + 1.56434465e-01j,
-9.51056516e-01 + 3.09016994e-01j,
-8.91006524e-01 + 4.53990500e-01j,
-8.09016994e-01 + 5.87785252e-01j,
-7.07106781e-01 + 7.07106781e-01j,
-5.87785252e-01 + 8.09016994e-01j,
-4.53990500e-01 + 8.91006524e-01j,
-3.09016994e-01 + 9.51056516e-01j,
-1.56434465e-01 + 9.87688341e-01j,
1j,
]
)
# extern right top arc
discretize_test.append(
{"nb_point": 9, "begin": 1, "end": 1 * exp(1j * pi / 2), "Radius": 1}
)
discretize_test[2]["result"] = array(
[
1,
9.87688341e-01 + 0.15643447j,
9.51056516e-01 + 0.30901699j,
8.91006524e-01 + 0.4539905j,
8.09016994e-01 + 0.58778525j,
7.07106781e-01 + 0.70710678j,
5.87785252e-01 + 0.80901699j,
4.53990500e-01 + 0.89100652j,
3.09016994e-01 + 0.95105652j,
1.56434465e-01 + 0.98768834j,
1j,
]
)
# inner right top arc
discretize_test.append(
{"nb_point": 9, "begin": 1, "end": 1 * exp(1j * pi / 2), "Radius": -1}
)
discretize_test[3]["result"] = array(
[
1,
8.43565535e-01 + 0.01231166j,
6.90983006e-01 + 0.04894348j,
5.46009500e-01 + 0.10899348j,
4.12214748e-01 + 0.19098301j,
2.92893219e-01 + 0.29289322j,
1.90983006e-01 + 0.41221475j,
1.08993476e-01 + 0.5460095j,
4.89434837e-02 + 0.69098301j,
1.23116594e-02 + 0.84356553j,
1j,
]
)
# extern left bottom arc
discretize_test.append(
{
"nb_point": 9,
"begin": 1 * exp(1j * pi),
"end": 1 * exp(1j * 3 * pi / 2),
"Radius": 1,
}
)
discretize_test[4]["result"] = array(
[
-1,
-9.87688341e-01 - 1.56434465e-01j,
-9.51056516e-01 - 3.09016994e-01j,
-8.91006524e-01 - 4.53990500e-01j,
-8.09016994e-01 - 5.87785252e-01j,
-7.07106781e-01 - 7.07106781e-01j,
-5.87785252e-01 - 8.09016994e-01j,
-4.53990500e-01 - 8.91006524e-01j,
-3.09016994e-01 - 9.51056516e-01j,
-1.56434465e-01 - 9.87688341e-01j,
-1j,
]
)
# inner left bottom arc
discretize_test.append(
{
"nb_point": 9,
"begin": 1 * exp(1j * pi),
"end": 1 * exp(1j * 3 * pi / 2),
"Radius": -1,
}
)
discretize_test[5]["result"] = array(
[
-1,
-0.84356553 - 1.23116594e-02j,
-0.69098301 - 4.89434837e-02j,
-0.54600950 - 1.08993476e-01j,
-0.41221475 - 1.90983006e-01j,
-0.29289322 - 2.92893219e-01j,
-0.19098301 - 4.12214748e-01j,
-0.10899348 - 5.46009500e-01j,
-0.04894348 - 6.90983006e-01j,
-0.01231166 - 8.43565535e-01j,
-1j,
]
)
# inner right bottom arc
discretize_test.append(
{"nb_point": 9, "begin": 1, "end": 1 * exp(1j * 3 * pi / 2), "Radius": 1}
)
discretize_test[6]["result"] = array(
[
1,
8.43565535e-01 - 0.01231166j,
6.90983006e-01 - 0.04894348j,
5.46009500e-01 - 0.10899348j,
4.12214748e-01 - 0.19098301j,
2.92893219e-01 - 0.29289322j,
1.90983006e-01 - 0.41221475j,
1.08993476e-01 - 0.5460095j,
4.89434837e-02 - 0.69098301j,
1.23116594e-02 - 0.84356553j,
-1j,
]
)
# extern right bottom arc
discretize_test.append(
{"nb_point": 9, "begin": 1, "end": 1 * exp(1j * 3 * pi / 2), "Radius": -1}
)
discretize_test[7]["result"] = array(
[
1,
9.87688341e-01 - 0.15643447j,
9.51056516e-01 - 0.30901699j,
8.91006524e-01 - 0.4539905j,
8.09016994e-01 - 0.58778525j,
7.07106781e-01 - 0.70710678j,
5.87785252e-01 - 0.80901699j,
4.53990500e-01 - 0.89100652j,
3.09016994e-01 - 0.95105652j,
1.56434465e-01 - 0.98768834j,
-1j,
]
)
comp_length_test = list()
comp_length_test.append({"begin": 0, "end": 1, "Radius": 2, "length": 1.010721020568})
comp_length_test.append(
{"begin": 1, "end": 1 * exp(1j * pi / 2), "Radius": 1, "length": pi / 2}
)
comp_length_test.append(
{"begin": 1, "end": 1 * exp(1j * 3 * pi / 2), "Radius": -1, "length": pi / 2}
)
comp_length_test.append(
{"begin": 1, "end": 1 * exp(1j * pi), "Radius": 1, "length": pi}
)
# Dictionary to test get_middle
comp_mid_test = list()
comp_mid_test.append(
{
"begin": 1,
"end": 1 * exp(1j * pi / 2),
"radius": 1,
"expect": sqrt(2) / 2 * (1 + 1j),
}
)
comp_mid_test.append(
{
"begin": 2 * exp(1j * 3 * pi / 4),
"end": 2 * exp(1j * pi / 4),
"radius": -2,
"expect": 2j,
}
)
comp_mid_test.append({"begin": 2, "end": 3, "radius": -4, "expect": 2.5 + 0.031373j})
# Dictionary to test rotation
comp_rotate_test = list()
comp_rotate_test.append(
{
"begin": 1,
"end": 1j,
"radius": 1,
"angle": pi / 2,
"exp_begin": 1j,
"exp_end": -1,
}
)
comp_rotate_test.append(
{
"begin": 1 + 1j,
"end": 2j,
"radius": 1,
"angle": -pi / 2,
"exp_begin": 1 - 1j,
"exp_end": 2,
}
)
comp_rotate_test.append(
{
"begin": -1 + 2j,
"end": -2 + 1j,
"radius": 1,
"angle": pi / 4,
"exp_begin": -2.12132034 + 0.70710678j,
"exp_end": -2.1213203 - 0.7071067j,
}
)
# Dictonary to test translation
comp_translate_test = list()
comp_translate_test.append(
{
"begin": 1,
"end": 1j,
"radius": 1,
"delta": 2 + 2j,
"exp_begin": 3 + 2j,
"exp_end": 2 + 3j,
}
)
comp_translate_test.append(
{
"begin": 1 + 1j,
"end": 2j,
"radius": 1,
"delta": -3,
"exp_begin": -2 + 1j,
"exp_end": -3 + 2j,
}
)
comp_translate_test.append(
{
"begin": -1 + 2j,
"end": -2 + 1j,
"radius": 1,
"delta": 2j,
"exp_begin": -1 + 4j,
"exp_end": -2 + 3j,
}
)
@ddt
class test_Arc1_meth(TestCase):
"""unittest for Arc1 methods"""
def test_check_Point(self):
"""Check that you can detect a one point arc
"""
arc = Arc1(0, 0, 1)
with self.assertRaises(PointArc1Error):
arc.check()
def test_check_Radius(self):
"""Check that you can detect null radius
"""
arc = Arc1(0, 1, 0)
with self.assertRaises(RadiusArc1Error):
arc.check()
@data(*discretize_test)
def test_dicretize(self, test_dict):
"""Check that you can discretize an arc1
"""
arc = Arc1(test_dict["begin"], test_dict["end"], test_dict["Radius"])
result = arc.discretize(test_dict["nb_point"])
self.assertEqual(result.size, test_dict["result"].size)
for i in range(0, result.size):
a = result[i]
b = test_dict["result"][i]
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
def test_discretize_Point_error(self):
"""Check that discretize detect a one point arc1
"""
arc = Arc1(0, 0, 2)
with self.assertRaises(PointArc1Error):
arc.discretize(5)
def test_discretize_Radius_error(self):
"""Check that discretize detect a null radius
"""
arc = Arc1(0, 1, 0)
with self.assertRaises(RadiusArc1Error):
arc.discretize(5)
def test_discretize_Nb_error(self):
"""Check that discretize can detect a wrong arg
"""
arc = Arc1(0, 1, 1)
with self.assertRaises(NbPointArc1DError):
arc.discretize(-1)
def test_discretize_Nb_Type_error(self):
"""Check that discretize can detect a wrong arg
"""
arc = Arc1(0, 1, 1)
with self.assertRaises(NbPointArc1DError):
arc.discretize("test")
@data(*comp_length_test)
def test_comp_length(self, test_dict):
"""Check that you the length return by comp_length is correct
"""
arc = Arc1(test_dict["begin"], test_dict["end"], test_dict["Radius"])
a = float(arc.comp_length())
b = float(test_dict["length"])
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
def test_comp_length_Point_error(self):
"""Check that discretize detect a one point arc1
"""
arc = Arc1(0, 0, 2)
with self.assertRaises(PointArc1Error):
arc.comp_length()
def test_comp_length_Radius_error(self):
"""Check that discretize detect a null radius arc1
"""
arc = Arc1(0, 1, 0)
with self.assertRaises(RadiusArc1Error):
arc.comp_length()
def test_get_center(self):
"""Check that the can compute the center of the arc1
"""
arc = Arc1(begin=1, end=1 * exp(1j * pi / 2), radius=1)
result = arc.get_center()
expect = 0
self.assertAlmostEqual(abs(result - expect), 0)
arc = Arc1(begin=2 * exp(1j * 3 * pi / 4), end=2 * exp(1j * pi / 4), radius=-2)
result = arc.get_center()
expect = 0
self.assertAlmostEqual(abs(result - expect), 0)
arc = Arc1(begin=2, end=3, radius=-0.5)
result = arc.get_center()
expect = 2.5
self.assertAlmostEqual(abs(result - expect), 0, delta=1e-3)
@data(*comp_mid_test)
def test_get_middle(self, test_dict):
"""Check that you can compute the arc middle
"""
arc = Arc1(
begin=test_dict["begin"], end=test_dict["end"], radius=test_dict["radius"]
)
result = arc.get_middle()
self.assertAlmostEqual(abs(result - test_dict["expect"]), 0, delta=1e-6)
@data(*comp_rotate_test)
def test_rotate(self, test_dict):
"""Check that you can rotate the arc1
"""
arc = Arc1(
begin=test_dict["begin"], end=test_dict["end"], radius=test_dict["radius"]
)
expect_radius = arc.radius
arc.rotate(test_dict["angle"])
self.assertAlmostEqual(abs(arc.begin - test_dict["exp_begin"]), 0, delta=1e-6)
self.assertAlmostEqual(abs(arc.end - test_dict["exp_end"]), 0, delta=1e-6)
self.assertAlmostEqual(abs(arc.radius - expect_radius), 0)
@data(*comp_translate_test)
def test_translate(self, test_dict):
"""Check that you can translate the arc1
"""
arc = Arc1(
begin=test_dict["begin"], end=test_dict["end"], radius=test_dict["radius"]
)
expect_radius = arc.radius
arc.translate(test_dict["delta"])
self.assertAlmostEqual(abs(arc.begin - test_dict["exp_begin"]), 0, delta=1e-6)
self.assertAlmostEqual(abs(arc.end - test_dict["exp_end"]), 0, delta=1e-6)
self.assertAlmostEqual(abs(arc.radius - expect_radius), 0)
| [
"numpy.sqrt",
"pyleecan.Classes.Arc1.Arc1",
"numpy.exp",
"numpy.array",
"ddt.data"
] | [((604, 887), 'numpy.array', 'array', (['[-1, -0.84356553 + 0.0123116594j, -0.69098301 + 0.0489434837j, -0.5460095 +\n 0.108993476j, -0.41221475 + 0.190983006j, -0.29289322 + 0.292893219j, -\n 0.19098301 + 0.412214748j, -0.10899348 + 0.5460095j, -0.04894348 + \n 0.690983006j, -0.01231166 + 0.843565535j, 1.0j]'], {}), '([-1, -0.84356553 + 0.0123116594j, -0.69098301 + 0.0489434837j, -\n 0.5460095 + 0.108993476j, -0.41221475 + 0.190983006j, -0.29289322 + \n 0.292893219j, -0.19098301 + 0.412214748j, -0.10899348 + 0.5460095j, -\n 0.04894348 + 0.690983006j, -0.01231166 + 0.843565535j, 1.0j])\n', (609, 887), False, 'from numpy import pi, exp, sqrt, array\n'), ((1209, 1498), 'numpy.array', 'array', (['[-1, -0.987688341 + 0.156434465j, -0.951056516 + 0.309016994j, -0.891006524 +\n 0.4539905j, -0.809016994 + 0.587785252j, -0.707106781 + 0.707106781j, -\n 0.587785252 + 0.809016994j, -0.4539905 + 0.891006524j, -0.309016994 + \n 0.951056516j, -0.156434465 + 0.987688341j, 1.0j]'], {}), '([-1, -0.987688341 + 0.156434465j, -0.951056516 + 0.309016994j, -\n 0.891006524 + 0.4539905j, -0.809016994 + 0.587785252j, -0.707106781 + \n 0.707106781j, -0.587785252 + 0.809016994j, -0.4539905 + 0.891006524j, -\n 0.309016994 + 0.951056516j, -0.156434465 + 0.987688341j, 1.0j])\n', (1214, 1498), False, 'from numpy import pi, exp, sqrt, array\n'), ((1796, 2066), 'numpy.array', 'array', (['[1, 0.987688341 + 0.15643447j, 0.951056516 + 0.30901699j, 0.891006524 + \n 0.4539905j, 0.809016994 + 0.58778525j, 0.707106781 + 0.70710678j, \n 0.587785252 + 0.80901699j, 0.4539905 + 0.89100652j, 0.309016994 + \n 0.95105652j, 0.156434465 + 0.98768834j, 1.0j]'], {}), '([1, 0.987688341 + 0.15643447j, 0.951056516 + 0.30901699j, 0.891006524 +\n 0.4539905j, 0.809016994 + 0.58778525j, 0.707106781 + 0.70710678j, \n 0.587785252 + 0.80901699j, 0.4539905 + 0.89100652j, 0.309016994 + \n 0.95105652j, 0.156434465 + 0.98768834j, 1.0j])\n', (1801, 2066), False, 'from numpy import pi, exp, sqrt, array\n'), ((2336, 2608), 'numpy.array', 'array', (['[1, 0.843565535 + 0.01231166j, 0.690983006 + 0.04894348j, 0.5460095 + \n 0.10899348j, 0.412214748 + 0.19098301j, 0.292893219 + 0.29289322j, \n 0.190983006 + 0.41221475j, 0.108993476 + 0.5460095j, 0.0489434837 + \n 0.69098301j, 0.0123116594 + 0.84356553j, 1.0j]'], {}), '([1, 0.843565535 + 0.01231166j, 0.690983006 + 0.04894348j, 0.5460095 +\n 0.10899348j, 0.412214748 + 0.19098301j, 0.292893219 + 0.29289322j, \n 0.190983006 + 0.41221475j, 0.108993476 + 0.5460095j, 0.0489434837 + \n 0.69098301j, 0.0123116594 + 0.84356553j, 1.0j])\n', (2341, 2608), False, 'from numpy import pi, exp, sqrt, array\n'), ((2936, 3226), 'numpy.array', 'array', (['[-1, -0.987688341 - 0.156434465j, -0.951056516 - 0.309016994j, -0.891006524 -\n 0.4539905j, -0.809016994 - 0.587785252j, -0.707106781 - 0.707106781j, -\n 0.587785252 - 0.809016994j, -0.4539905 - 0.891006524j, -0.309016994 - \n 0.951056516j, -0.156434465 - 0.987688341j, -1.0j]'], {}), '([-1, -0.987688341 - 0.156434465j, -0.951056516 - 0.309016994j, -\n 0.891006524 - 0.4539905j, -0.809016994 - 0.587785252j, -0.707106781 - \n 0.707106781j, -0.587785252 - 0.809016994j, -0.4539905 - 0.891006524j, -\n 0.309016994 - 0.951056516j, -0.156434465 - 0.987688341j, -1.0j])\n', (2941, 3226), False, 'from numpy import pi, exp, sqrt, array\n'), ((3584, 3868), 'numpy.array', 'array', (['[-1, -0.84356553 - 0.0123116594j, -0.69098301 - 0.0489434837j, -0.5460095 -\n 0.108993476j, -0.41221475 - 0.190983006j, -0.29289322 - 0.292893219j, -\n 0.19098301 - 0.412214748j, -0.10899348 - 0.5460095j, -0.04894348 - \n 0.690983006j, -0.01231166 - 0.843565535j, -1.0j]'], {}), '([-1, -0.84356553 - 0.0123116594j, -0.69098301 - 0.0489434837j, -\n 0.5460095 - 0.108993476j, -0.41221475 - 0.190983006j, -0.29289322 - \n 0.292893219j, -0.19098301 - 0.412214748j, -0.10899348 - 0.5460095j, -\n 0.04894348 - 0.690983006j, -0.01231166 - 0.843565535j, -1.0j])\n', (3589, 3868), False, 'from numpy import pi, exp, sqrt, array\n'), ((4142, 4415), 'numpy.array', 'array', (['[1, 0.843565535 - 0.01231166j, 0.690983006 - 0.04894348j, 0.5460095 - \n 0.10899348j, 0.412214748 - 0.19098301j, 0.292893219 - 0.29289322j, \n 0.190983006 - 0.41221475j, 0.108993476 - 0.5460095j, 0.0489434837 - \n 0.69098301j, 0.0123116594 - 0.84356553j, -1.0j]'], {}), '([1, 0.843565535 - 0.01231166j, 0.690983006 - 0.04894348j, 0.5460095 -\n 0.10899348j, 0.412214748 - 0.19098301j, 0.292893219 - 0.29289322j, \n 0.190983006 - 0.41221475j, 0.108993476 - 0.5460095j, 0.0489434837 - \n 0.69098301j, 0.0123116594 - 0.84356553j, -1.0j])\n', (4147, 4415), False, 'from numpy import pi, exp, sqrt, array\n'), ((4691, 4962), 'numpy.array', 'array', (['[1, 0.987688341 - 0.15643447j, 0.951056516 - 0.30901699j, 0.891006524 - \n 0.4539905j, 0.809016994 - 0.58778525j, 0.707106781 - 0.70710678j, \n 0.587785252 - 0.80901699j, 0.4539905 - 0.89100652j, 0.309016994 - \n 0.95105652j, 0.156434465 - 0.98768834j, -1.0j]'], {}), '([1, 0.987688341 - 0.15643447j, 0.951056516 - 0.30901699j, 0.891006524 -\n 0.4539905j, 0.809016994 - 0.58778525j, 0.707106781 - 0.70710678j, \n 0.587785252 - 0.80901699j, 0.4539905 - 0.89100652j, 0.309016994 - \n 0.95105652j, 0.156434465 - 0.98768834j, -1.0j])\n', (4696, 4962), False, 'from numpy import pi, exp, sqrt, array\n'), ((7691, 7713), 'ddt.data', 'data', (['*discretize_test'], {}), '(*discretize_test)\n', (7695, 7713), False, 'from ddt import ddt, data\n'), ((9074, 9097), 'ddt.data', 'data', (['*comp_length_test'], {}), '(*comp_length_test)\n', (9078, 9097), False, 'from ddt import ddt, data\n'), ((10536, 10556), 'ddt.data', 'data', (['*comp_mid_test'], {}), '(*comp_mid_test)\n', (10540, 10556), False, 'from ddt import ddt, data\n'), ((10902, 10925), 'ddt.data', 'data', (['*comp_rotate_test'], {}), '(*comp_rotate_test)\n', (10906, 10925), False, 'from ddt import ddt, data\n'), ((11457, 11483), 'ddt.data', 'data', (['*comp_translate_test'], {}), '(*comp_translate_test)\n', (11461, 11483), False, 'from ddt import ddt, data\n'), ((7403, 7416), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (7407, 7416), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((7598, 7611), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (7602, 7611), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((7830, 7893), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', (["test_dict['begin']", "test_dict['end']", "test_dict['Radius']"], {}), "(test_dict['begin'], test_dict['end'], test_dict['Radius'])\n", (7834, 7893), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((8311, 8324), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', (['(0)', '(0)', '(2)'], {}), '(0, 0, 2)\n', (8315, 8324), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((8528, 8541), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (8532, 8541), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((8744, 8757), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (8748, 8757), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((8968, 8981), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (8972, 8981), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((9237, 9300), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', (["test_dict['begin']", "test_dict['end']", "test_dict['Radius']"], {}), "(test_dict['begin'], test_dict['end'], test_dict['Radius'])\n", (9241, 9300), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((9566, 9579), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', (['(0)', '(0)', '(2)'], {}), '(0, 0, 2)\n', (9570, 9579), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((9789, 9802), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (9793, 9802), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((10373, 10406), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', ([], {'begin': '(2)', 'end': '(3)', 'radius': '(-0.5)'}), '(begin=2, end=3, radius=-0.5)\n', (10377, 10406), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((10678, 10763), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', ([], {'begin': "test_dict['begin']", 'end': "test_dict['end']", 'radius': "test_dict['radius']"}), "(begin=test_dict['begin'], end=test_dict['end'], radius=test_dict['radius']\n )\n", (10682, 10763), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((11036, 11121), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', ([], {'begin': "test_dict['begin']", 'end': "test_dict['end']", 'radius': "test_dict['radius']"}), "(begin=test_dict['begin'], end=test_dict['end'], radius=test_dict['radius']\n )\n", (11040, 11121), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((11600, 11685), 'pyleecan.Classes.Arc1.Arc1', 'Arc1', ([], {'begin': "test_dict['begin']", 'end': "test_dict['end']", 'radius': "test_dict['radius']"}), "(begin=test_dict['begin'], end=test_dict['end'], radius=test_dict['radius']\n )\n", (11604, 11685), False, 'from pyleecan.Classes.Arc1 import Arc1\n'), ((515, 529), 'numpy.exp', 'exp', (['(1.0j * pi)'], {}), '(1.0j * pi)\n', (518, 529), False, 'from numpy import pi, exp, sqrt, array\n'), ((540, 558), 'numpy.exp', 'exp', (['(1.0j * pi / 2)'], {}), '(1.0j * pi / 2)\n', (543, 558), False, 'from numpy import pi, exp, sqrt, array\n'), ((1097, 1111), 'numpy.exp', 'exp', (['(1.0j * pi)'], {}), '(1.0j * pi)\n', (1100, 1111), False, 'from numpy import pi, exp, sqrt, array\n'), ((1130, 1148), 'numpy.exp', 'exp', (['(1.0j * pi / 2)'], {}), '(1.0j * pi / 2)\n', (1133, 1148), False, 'from numpy import pi, exp, sqrt, array\n'), ((1732, 1750), 'numpy.exp', 'exp', (['(1.0j * pi / 2)'], {}), '(1.0j * pi / 2)\n', (1735, 1750), False, 'from numpy import pi, exp, sqrt, array\n'), ((2271, 2289), 'numpy.exp', 'exp', (['(1.0j * pi / 2)'], {}), '(1.0j * pi / 2)\n', (2274, 2289), False, 'from numpy import pi, exp, sqrt, array\n'), ((2821, 2835), 'numpy.exp', 'exp', (['(1.0j * pi)'], {}), '(1.0j * pi)\n', (2824, 2835), False, 'from numpy import pi, exp, sqrt, array\n'), ((2854, 2876), 'numpy.exp', 'exp', (['(1.0j * 3 * pi / 2)'], {}), '(1.0j * 3 * pi / 2)\n', (2857, 2876), False, 'from numpy import pi, exp, sqrt, array\n'), ((3468, 3482), 'numpy.exp', 'exp', (['(1.0j * pi)'], {}), '(1.0j * pi)\n', (3471, 3482), False, 'from numpy import pi, exp, sqrt, array\n'), ((3501, 3523), 'numpy.exp', 'exp', (['(1.0j * 3 * pi / 2)'], {}), '(1.0j * 3 * pi / 2)\n', (3504, 3523), False, 'from numpy import pi, exp, sqrt, array\n'), ((4074, 4096), 'numpy.exp', 'exp', (['(1.0j * 3 * pi / 2)'], {}), '(1.0j * 3 * pi / 2)\n', (4077, 4096), False, 'from numpy import pi, exp, sqrt, array\n'), ((4622, 4644), 'numpy.exp', 'exp', (['(1.0j * 3 * pi / 2)'], {}), '(1.0j * 3 * pi / 2)\n', (4625, 4644), False, 'from numpy import pi, exp, sqrt, array\n'), ((5244, 5262), 'numpy.exp', 'exp', (['(1.0j * pi / 2)'], {}), '(1.0j * pi / 2)\n', (5247, 5262), False, 'from numpy import pi, exp, sqrt, array\n'), ((5348, 5370), 'numpy.exp', 'exp', (['(1.0j * 3 * pi / 2)'], {}), '(1.0j * 3 * pi / 2)\n', (5351, 5370), False, 'from numpy import pi, exp, sqrt, array\n'), ((5457, 5471), 'numpy.exp', 'exp', (['(1.0j * pi)'], {}), '(1.0j * pi)\n', (5460, 5471), False, 'from numpy import pi, exp, sqrt, array\n'), ((5623, 5641), 'numpy.exp', 'exp', (['(1.0j * pi / 2)'], {}), '(1.0j * pi / 2)\n', (5626, 5641), False, 'from numpy import pi, exp, sqrt, array\n'), ((5761, 5783), 'numpy.exp', 'exp', (['(1.0j * 3 * pi / 4)'], {}), '(1.0j * 3 * pi / 4)\n', (5764, 5783), False, 'from numpy import pi, exp, sqrt, array\n'), ((5802, 5820), 'numpy.exp', 'exp', (['(1.0j * pi / 4)'], {}), '(1.0j * pi / 4)\n', (5805, 5820), False, 'from numpy import pi, exp, sqrt, array\n'), ((5680, 5687), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (5684, 5687), False, 'from numpy import pi, exp, sqrt, array\n'), ((10023, 10041), 'numpy.exp', 'exp', (['(1.0j * pi / 2)'], {}), '(1.0j * pi / 2)\n', (10026, 10041), False, 'from numpy import pi, exp, sqrt, array\n'), ((10190, 10212), 'numpy.exp', 'exp', (['(1.0j * 3 * pi / 4)'], {}), '(1.0j * 3 * pi / 4)\n', (10193, 10212), False, 'from numpy import pi, exp, sqrt, array\n'), ((10220, 10238), 'numpy.exp', 'exp', (['(1.0j * pi / 4)'], {}), '(1.0j * pi / 4)\n', (10223, 10238), False, 'from numpy import pi, exp, sqrt, array\n')] |
# -*- coding: utf-8 -*-
"""
@author:HuangJie
@time:18-9-17 下午2:48
"""
import os
import h5py
import numpy as np
from BackEnd.CNN_retrieval.extract_cnn_vgg16_keras import VGGNet
'''
ap = argparse.ArgumentParser()
ap.add_argument("-database", required=True, help="Path to database which contains images to be indexed")
ap.add_argument("-index", required=True, help="Name of index file")
args = vars(ap.parse_args())
'''
'''
Returns a list of filenames for all jpg images in a directory.
'''
'''
def get_imlist(path):
img_paths = list()
labels = list()
class_dirs = sorted(os.listdir(path))
dict_classid = dict()
for i in range(len(class_dirs)):
label = i
class_dir = class_dirs[i]
dict_classid[class_dir] = label
#class_path = os.path.join(path, class_dir)
class_path = path
file_names = sorted(os.listdir(class_path))
for file_name in file_names:
file_path = os.path.join(class_path, file_name)
img_paths.append(file_path)
labels.append(label)
img_paths = np.asarray(img_paths)
labels = np.asarray(labels)
return img_paths, labels
'''
'''
Extract features and index the images
'''
'''
Returns a list of filenames for all jpg images in a directory.
'''
def get_imlist(path):
return [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg') or f.endswith(".jpeg")]
if __name__ == "__main__":
# db = args["database"]
# db = img_paths = './database'
db = img_paths = '../static/'
img_list = get_imlist(db)
print("--------------------------------------------------")
print(" feature extraction starts")
print("--------------------------------------------------")
feats = []
names = []
model = VGGNet()
for i, img_path in enumerate(img_list):
norm_feat = model.extract_feat(img_path)
img_name = os.path.split(img_path)[1]
feats.append(norm_feat)
names.append(img_name)
print("extracting feature from image No. %d , %d images in total" % ((i + 1), len(img_list)))
feats = np.array(feats)
names = np.array(names, dtype="S")
output = 'featureCNN.h5'
print("--------------------------------------------------")
print(" writing feature extraction results ...")
print("--------------------------------------------------")
h5f = h5py.File(output, 'w')
h5f.create_dataset('dataset_feat', data=feats)
h5f.create_dataset('dataset_name', data=names)
h5f.close()
| [
"os.listdir",
"os.path.join",
"h5py.File",
"os.path.split",
"numpy.array",
"BackEnd.CNN_retrieval.extract_cnn_vgg16_keras.VGGNet"
] | [((1803, 1811), 'BackEnd.CNN_retrieval.extract_cnn_vgg16_keras.VGGNet', 'VGGNet', ([], {}), '()\n', (1809, 1811), False, 'from BackEnd.CNN_retrieval.extract_cnn_vgg16_keras import VGGNet\n'), ((2129, 2144), 'numpy.array', 'np.array', (['feats'], {}), '(feats)\n', (2137, 2144), True, 'import numpy as np\n'), ((2157, 2183), 'numpy.array', 'np.array', (['names'], {'dtype': '"""S"""'}), "(names, dtype='S')\n", (2165, 2183), True, 'import numpy as np\n'), ((2410, 2432), 'h5py.File', 'h5py.File', (['output', '"""w"""'], {}), "(output, 'w')\n", (2419, 2432), False, 'import h5py\n'), ((1330, 1351), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (1342, 1351), False, 'import os\n'), ((1361, 1377), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1371, 1377), False, 'import os\n'), ((1924, 1947), 'os.path.split', 'os.path.split', (['img_path'], {}), '(img_path)\n', (1937, 1947), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 07 14:42:32 2021
@author: silviapagliarini
"""
import os
import numpy as np
import pandas as pd
import csv
from pydub import AudioSegment
import scipy.io.wavfile as wav
def opensmile_executable(data, baby_id, classes, args):
"""
Generate a text file executable on shell to compute multiple times opensmile features.
If option labels_creation == True, it also generates a csv file containing number of the sound and label.
INPUT
- path to directory
- type of dataset (can be a single directory, or a dataset keywords): see args.baby_id
OUTPUT
A text file for each directory with the command lines to compute MFCC for each extracted sound in the directory.
"""
f = open(args.data_dir + '/' + 'executable_opensmile_' + baby_id + '.txt', 'w+')
i = 0
while i < len(data):
#name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/Datasets/InitialDatasets/singleVoc/single_vocalizations/'
#name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/Datasets/completeDataset/'
name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/Datasets/subsetSilence/'
#name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/Datasets/HumanLabels/exp1'
#name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/BabbleNN/interspeech_Wave'
if baby_id == 'AnneModel':
f.write(name + '/' + os.path.basename(data[i]) + ' -csvoutput ' + os.path.basename(data[i])[0:-3] + 'mfcc.csv')
f.write('\n')
else:
#output_dir = '/Users/silviapagliarini/Documents/opensmile/HumanData_analysis/humanVSlena/human'
#output_dir = '/Users/silviapagliarini/Documents/opensmile/HumanData_analysis/completeDataset'
output_dir = '/Users/silviapagliarini/Documents/opensmile/HumanData_analysis/subsetSilence'
os.makedirs(output_dir + '/' + baby_id, exist_ok=True)
for c in range(0,len(classes)):
os.makedirs(output_dir + '/' + baby_id + '/' + classes[c], exist_ok=True)
f.write(name + baby_id[0:4] + '/' + baby_id + '_segments/' + os.path.basename(data[i]) + ' -csvoutput ' + output_dir + '/' + baby_id + '/' + os.path.basename(data[i])[0:-3] + 'mfcc.csv')
#f.write(name + '/' + baby_id + '_segments/' + os.path.basename(data[i]) + ' -csvoutput ' + output_dir + '/' + baby_id + '/' + os.path.basename(data[i])[0:-3] + 'mfcc.csv')
f.write('\n')
i = i + 1
f.close()
if args.labels_creation == True:
# writing the data rows
labels = []
i = 0
while i < len(data):
j = 0
while j < len(classes):
if os.path.basename(data[i]).find(classes[j]) != -1:
labels.append(classes[j])
j = j + 1
i = i + 1
with open(args.data_dir + '/' + 'LENAlabels_' + baby_id + '.csv', 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(['ID', 'Label'])
i = 0
while i < len(data):
csvwriter.writerow([str(i), labels[i]])
i = i + 1
print('Done')
def list(args):
"""
Create a list of all the babies in the dataset in order to simplify the following steps of the analysis.
INPUT
- path to directory (subdirectories should be the single family directories).
OUTPUT
- .csv file with name of the baby and age of the baby in days.
"""
listDir = glob2.glob(args.data_dir + '/0*')
with open(args.data_dir + '/baby_list_basic.csv', 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(['ID', 'AGE'])
i = 0
while i<len(listDir):
name = os.path.basename(listDir[i])
age = int(name[6])*365 + int(name[8]) * 30 + int(name[10])
csvwriter.writerow([name, age])
i = i + 1
print('Done')
def merge_labels(babies, args):
"""
Create a LENA-like .csv with the human corrections included. When a label has been identified as wrong, it is substitute with the
noise lable NOF.
INPUT
- path to directory
- list of babies
OUTPUT
.csv file containing cleaned labels.
"""
for i in range(0,len(babies)):
print(babies[i])
lena = pd.read_csv(args.data_dir + '/' + babies[i] + '_segments.csv')
human = pd.read_csv(args.data_dir + '/' + babies[i] + '_scrubbed_CHNrelabel_lplf_1.csv')
time_stamp_lena_start = lena["startsec"]
time_stamp_lena_end = lena["endsec"]
prominence = human["targetChildProminence"]
lena_labels = lena["segtype"]
CHNSP_pos = np.where(lena_labels == 'CHNSP')[0]
CHNNSP_pos = np.where(lena_labels == 'CHNNSP')[0]
pos = np.append(CHNSP_pos, CHNNSP_pos)
pos = sorted(pos)
for j in range(0, len(pos)):
if i < 2:
if prominence[j] > 2:
lena_labels[pos[j]] = 'NOF'
else:
if prominence[j] == False:
lena_labels[pos[j]] = 'NOF'
with open(args.data_dir + '/new_' + babies[i] + '_segments.csv', 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(['segtype', 'startsec', 'endsec'])
i = 0
while i < len(time_stamp_lena_start):
csvwriter.writerow([lena_labels[i], time_stamp_lena_start[i], time_stamp_lena_end[i]])
i = i + 1
print('Done')
if __name__ == '__main__':
import argparse
import glob2
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--option', type=str, choices=['merge', 'list', 'executeOS'])
parser.add_argument('--data_dir', type=str)
parser.add_argument('--output_dir', type=str)
parser.add_argument('--baby_id', type = str)
parser.add_argument('--labels_creation', type = bool, default=False)
uniform_duration_args = parser.add_argument_group('Uniform')
uniform_duration_args.add_argument('--sd', type=int,
help='Expected sound duration in milliseconds', default = 1000)
uniform_duration_args.add_argument('--sr', type=int, help='Expected sampling rate',
default=16000)
args = parser.parse_args()
if args.output_dir != None:
if not os.path.isdir(args.data_dir + '/' + args.output_dir):
os.makedirs(args.data_dir + '/' + args.output_dir)
if args.option == 'executeOS':
# Labels (change only if needed)
# classes = ['B', 'S', 'N', 'MS', 'ME', 'M', 'OAS', 'SLEEP']
classes = ['MAN', 'FAN', 'CHNSP', 'CHNNSP']
#classes = ['CHNNSP']
if args.baby_id == 'initial':
# List of babies
summary = pd.read_csv(args.data_dir + '/' + 'baby_list.csv')
summary = pd.DataFrame.to_numpy(summary)
babies = summary[:,0]
# Load data
if len(babies) == 0:
baby_id = args.baby_id
dataset = sorted(glob2.glob(args.data_dir + '/' + baby_id + '/' + '*.wav'))
# Labels (change only if needed)
#classes = ['B', 'S', 'N', 'MS', 'ME', 'M', 'OAS', 'SLEEP']
#classes = ['FAN', 'CHNSP']
opensmile_executable(dataset, baby_id, classes, args)
else:
for i in range(0, len(babies)):
dataset = sorted(glob2.glob(args.data_dir + '/' + babies[i] + '_segments' + '/' + '*.wav'))
print(dataset)
input()
opensmile_executable(dataset, babies[i], classes, args)
elif args.baby_id == 'complete':
# List of babies
babies_dir = glob2.glob(args.data_dir + '/0*')
for i in range(0, len(babies_dir)):
babies_wav = glob2.glob(babies_dir[i] + '/*.wav')
babies = []
for j in range(0, len(babies_wav)):
babies = os.path.basename(babies_wav[j][0:-4])
dataset = sorted(glob2.glob(babies_dir[i] + '/' + babies + '_segments' + '/' + '*.wav'))
opensmile_executable(dataset, babies, classes, args)
elif args.baby_id == 'subset':
# List of babies
summary = pd.read_csv(args.data_dir + '/' + 'baby_list.csv')
summary = pd.DataFrame.to_numpy(summary)
babies = summary[:, 0]
for j in range(0, len(babies)):
dataset = sorted(glob2.glob(args.data_dir + '/' + babies[j][0:4] + '/' + babies[j] + '_segments' + '/' + '*.wav'))
opensmile_executable(dataset, babies[j], classes, args)
else:
dataset = sorted(glob2.glob(args.data_dir + '/' + args.baby_id + '_segments/' + '*.wav'))
opensmile_executable(dataset, args.baby_id, classes, args)
if args.option == 'list':
list(args)
if args.option == 'merge':
babies_csv = pd.read_csv(args.data_dir + '/baby_list.csv')
babies = babies_csv["name"]
merge_labels(babies, args)
### Example: python3 BabyExperience.py --data_dir /Users/labadmin/Documents/Silvia/HumanData --option list | [
"glob2.glob",
"pandas.DataFrame.to_numpy",
"argparse.ArgumentParser",
"pandas.read_csv",
"os.makedirs",
"numpy.where",
"csv.writer",
"numpy.append",
"os.path.isdir",
"os.path.basename"
] | [((3944, 3977), 'glob2.glob', 'glob2.glob', (["(args.data_dir + '/0*')"], {}), "(args.data_dir + '/0*')\n", (3954, 3977), False, 'import glob2\n'), ((6213, 6238), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6236, 6238), False, 'import argparse\n'), ((4108, 4127), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (4118, 4127), False, 'import csv\n'), ((4837, 4899), 'pandas.read_csv', 'pd.read_csv', (["(args.data_dir + '/' + babies[i] + '_segments.csv')"], {}), "(args.data_dir + '/' + babies[i] + '_segments.csv')\n", (4848, 4899), True, 'import pandas as pd\n'), ((4916, 5001), 'pandas.read_csv', 'pd.read_csv', (["(args.data_dir + '/' + babies[i] + '_scrubbed_CHNrelabel_lplf_1.csv')"], {}), "(args.data_dir + '/' + babies[i] + '_scrubbed_CHNrelabel_lplf_1.csv'\n )\n", (4927, 5001), True, 'import pandas as pd\n'), ((5310, 5342), 'numpy.append', 'np.append', (['CHNSP_pos', 'CHNNSP_pos'], {}), '(CHNSP_pos, CHNNSP_pos)\n', (5319, 5342), True, 'import numpy as np\n'), ((9656, 9701), 'pandas.read_csv', 'pd.read_csv', (["(args.data_dir + '/baby_list.csv')"], {}), "(args.data_dir + '/baby_list.csv')\n", (9667, 9701), True, 'import pandas as pd\n'), ((2221, 2275), 'os.makedirs', 'os.makedirs', (["(output_dir + '/' + baby_id)"], {'exist_ok': '(True)'}), "(output_dir + '/' + baby_id, exist_ok=True)\n", (2232, 2275), False, 'import os\n'), ((3361, 3380), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3371, 3380), False, 'import csv\n'), ((4264, 4292), 'os.path.basename', 'os.path.basename', (['listDir[i]'], {}), '(listDir[i])\n', (4280, 4292), False, 'import os\n'), ((5202, 5234), 'numpy.where', 'np.where', (["(lena_labels == 'CHNSP')"], {}), "(lena_labels == 'CHNSP')\n", (5210, 5234), True, 'import numpy as np\n'), ((5259, 5292), 'numpy.where', 'np.where', (["(lena_labels == 'CHNNSP')"], {}), "(lena_labels == 'CHNNSP')\n", (5267, 5292), True, 'import numpy as np\n'), ((5782, 5801), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (5792, 5801), False, 'import csv\n'), ((6993, 7045), 'os.path.isdir', 'os.path.isdir', (["(args.data_dir + '/' + args.output_dir)"], {}), "(args.data_dir + '/' + args.output_dir)\n", (7006, 7045), False, 'import os\n'), ((7059, 7109), 'os.makedirs', 'os.makedirs', (["(args.data_dir + '/' + args.output_dir)"], {}), "(args.data_dir + '/' + args.output_dir)\n", (7070, 7109), False, 'import os\n'), ((7428, 7478), 'pandas.read_csv', 'pd.read_csv', (["(args.data_dir + '/' + 'baby_list.csv')"], {}), "(args.data_dir + '/' + 'baby_list.csv')\n", (7439, 7478), True, 'import pandas as pd\n'), ((7501, 7531), 'pandas.DataFrame.to_numpy', 'pd.DataFrame.to_numpy', (['summary'], {}), '(summary)\n', (7522, 7531), True, 'import pandas as pd\n'), ((2336, 2409), 'os.makedirs', 'os.makedirs', (["(output_dir + '/' + baby_id + '/' + classes[c])"], {'exist_ok': '(True)'}), "(output_dir + '/' + baby_id + '/' + classes[c], exist_ok=True)\n", (2347, 2409), False, 'import os\n'), ((8410, 8443), 'glob2.glob', 'glob2.glob', (["(args.data_dir + '/0*')"], {}), "(args.data_dir + '/0*')\n", (8420, 8443), False, 'import glob2\n'), ((7695, 7752), 'glob2.glob', 'glob2.glob', (["(args.data_dir + '/' + baby_id + '/' + '*.wav')"], {}), "(args.data_dir + '/' + baby_id + '/' + '*.wav')\n", (7705, 7752), False, 'import glob2\n'), ((8521, 8557), 'glob2.glob', 'glob2.glob', (["(babies_dir[i] + '/*.wav')"], {}), "(babies_dir[i] + '/*.wav')\n", (8531, 8557), False, 'import glob2\n'), ((8979, 9029), 'pandas.read_csv', 'pd.read_csv', (["(args.data_dir + '/' + 'baby_list.csv')"], {}), "(args.data_dir + '/' + 'baby_list.csv')\n", (8990, 9029), True, 'import pandas as pd\n'), ((9052, 9082), 'pandas.DataFrame.to_numpy', 'pd.DataFrame.to_numpy', (['summary'], {}), '(summary)\n', (9073, 9082), True, 'import pandas as pd\n'), ((8099, 8172), 'glob2.glob', 'glob2.glob', (["(args.data_dir + '/' + babies[i] + '_segments' + '/' + '*.wav')"], {}), "(args.data_dir + '/' + babies[i] + '_segments' + '/' + '*.wav')\n", (8109, 8172), False, 'import glob2\n'), ((8667, 8704), 'os.path.basename', 'os.path.basename', (['babies_wav[j][0:-4]'], {}), '(babies_wav[j][0:-4])\n', (8683, 8704), False, 'import os\n'), ((9409, 9480), 'glob2.glob', 'glob2.glob', (["(args.data_dir + '/' + args.baby_id + '_segments/' + '*.wav')"], {}), "(args.data_dir + '/' + args.baby_id + '_segments/' + '*.wav')\n", (9419, 9480), False, 'import glob2\n'), ((1802, 1827), 'os.path.basename', 'os.path.basename', (['data[i]'], {}), '(data[i])\n', (1818, 1827), False, 'import os\n'), ((2563, 2588), 'os.path.basename', 'os.path.basename', (['data[i]'], {}), '(data[i])\n', (2579, 2588), False, 'import os\n'), ((3058, 3083), 'os.path.basename', 'os.path.basename', (['data[i]'], {}), '(data[i])\n', (3074, 3083), False, 'import os\n'), ((8742, 8812), 'glob2.glob', 'glob2.glob', (["(babies_dir[i] + '/' + babies + '_segments' + '/' + '*.wav')"], {}), "(babies_dir[i] + '/' + babies + '_segments' + '/' + '*.wav')\n", (8752, 8812), False, 'import glob2\n'), ((9195, 9295), 'glob2.glob', 'glob2.glob', (["(args.data_dir + '/' + babies[j][0:4] + '/' + babies[j] + '_segments' + '/' +\n '*.wav')"], {}), "(args.data_dir + '/' + babies[j][0:4] + '/' + babies[j] +\n '_segments' + '/' + '*.wav')\n", (9205, 9295), False, 'import glob2\n'), ((1757, 1782), 'os.path.basename', 'os.path.basename', (['data[i]'], {}), '(data[i])\n', (1773, 1782), False, 'import os\n'), ((2483, 2508), 'os.path.basename', 'os.path.basename', (['data[i]'], {}), '(data[i])\n', (2499, 2508), False, 'import os\n')] |
"""
@author: <NAME>
"""
import yfinance as yf
import datetime as dt
import pandas as pd
import numpy as np
from pandas.plotting import table
import matplotlib.pyplot as plt
from scipy.stats import levene
# Download historical data for S&P 500
ticker = "^GSPC"
SnP = yf.download(ticker, start="1991-02-01", end="2018-06-01")
SnP = SnP.drop(['Open','High','Low','Close','Volume'], axis=1)
SnP['Return'] = SnP['Adj Close'].pct_change()
def Ret_everyndays(DF,n):
""" This function takes in the SnP data, calculates the returns every n days, returns a list"""
df = DF.copy().drop('Return', axis = 1).iloc[::n, :]
ret = df['Adj Close'].pct_change().to_list()
return ret
def MV_Breach(mvg_avg_days,DF):
"""this function takes in the MA days, df of close prices & outputs events when MA was breached
. In order for the moving average to be breached, the previous day’s closing price
has to be ABOVE the moving average and today's close must be BELOW the moving average"""
df = DF.copy().drop("Return", axis=1)
df["Moving Average Price"] = df["Adj Close"].rolling(mvg_avg_days).mean()
last_close_price = df["Adj Close"].iloc[mvg_avg_days-2]
df = df.iloc[mvg_avg_days-2:, ]
df_BreakingDMA = df[(df["Adj Close"].shift(1) > df["Moving Average Price"].shift(1)) & (df["Adj Close"] < df["Moving Average Price"])]
df_BreakingDMA = df_BreakingDMA.reset_index().rename(columns={'Date': f'Date {mvg_avg_days}d MA is breached','Adj Close': 'Closing Price on Day 0'})
df_BreakingDMA = df_BreakingDMA[[f'Date {mvg_avg_days}d MA is breached', 'Moving Average Price', 'Closing Price on Day 0']]
return df_BreakingDMA
def strategyretdata(Price,breachdata,n,N):
""" Extract the close prices 1d,2d,..,nd from the breach date. Then calculate the returns for each of such intervals
taking the price as on breach date as the base price"""
price = Price.copy()
price = price.reset_index()
dict = {}
for i in breachdata[f'Date {N}d MA is breached']:
x = price[price['Date'] == i]['Adj Close'].index.values
S = pd.Series(SnP['Adj Close'][x[0]:x[0] + n])
first_element = S[0]
dict[i] = list(S.apply(lambda y: ((y / first_element) - 1)))
return dict
# Create a DataFrame with SnP returns from every 1d,2d,...,40d
SnP_Returns = pd.DataFrame()
df = pd.DataFrame()
for k in range(1, 41):
Column_Name = str(f'Date - EVERY {k} DAYS')
df[Column_Name] = np.array(Ret_everyndays(SnP, k))
SnP_Returns = pd.concat([SnP_Returns, df], axis=1)
df = pd.DataFrame()
continue
SnP_Returns.drop(index=0, inplace=True)
# Create DataFrame with the Close Prices on the Breach Date
Breach_data_50DMA = MV_Breach(50,SnP) # 50 DMA
Breach_data_100DMA = MV_Breach(100,SnP) # 100 DMA
Breach_data_200DMA = MV_Breach(200,SnP) # 200 DMA
# Create a DataFrame with the Strategy Returns every 1d,2d,....,40d from the Breach Date
Breach_ret_50DMA = pd.DataFrame(dict((k, v) for k, v in strategyretdata(SnP, Breach_data_50DMA, 41, 50).items() if len(v)==41)).transpose().drop(columns=0)
Breach_ret_100DMA = pd.DataFrame(dict((k, v) for k, v in strategyretdata(SnP, Breach_data_100DMA, 41, 100).items() if len(v)==41)).transpose().drop(columns=0)
Breach_ret_200DMA = pd.DataFrame(dict((k, v) for k, v in strategyretdata(SnP, Breach_data_200DMA, 41, 200).items() if len(v)==41)).transpose().drop(columns=0)
# Performing Levene's Test on 50d,100d,200d against S&P_Ret for very 1d,2d,3d,...,40d holding period
P_Values = pd.DataFrame(index=range(1,41), columns=["Levene's test p-value MA 200 ","Levene's test p-value MA 100","Levene's test p-value MA 50"])
for i in range(0, 40):
stat1, p1 = levene(list(Breach_ret_50DMA.iloc[:, i].dropna()), list(SnP_Returns.iloc[:, i].dropna()))
stat2, p2 = levene(list(Breach_ret_100DMA.iloc[:, i].dropna()), list(SnP_Returns.iloc[:, i].dropna()))
stat3, p3 = levene(list(Breach_ret_200DMA.iloc[:, i].dropna()), list(SnP_Returns.iloc[:, i].dropna()))
P_Values.iloc[i, 2] = p1
P_Values.iloc[i, 1] = p2
P_Values.iloc[i, 0] = p3
# Analyzing the p-values for 50d,100d and 200d MA
mu1 = round(np.mean(P_Values.iloc[:, 0]), 2)
sigma1 = round(np.std(P_Values.iloc[:, 0]), 2)
plt.subplot(311)
plt.hist(P_Values.iloc[:,0], 20, density=True)
plt.title("Histogram of 'p-value - MA 200': '$\mu={}$, $\sigma={}$'".format(mu1, sigma1))
plt.xticks([]) # Disables xticks
plt.axvline(x=0.05, color='r', label='p-value of 0.05', linestyle='--', linewidth=1)
plt.legend()
mu2 = round(np.mean(P_Values.iloc[:, 1]), 2)
sigma2 = round(np.std(P_Values.iloc[:, 1]), 2)
plt.subplot(312)
plt.hist(P_Values.iloc[:, 1])
plt.title("Histogram of 'p-value - MA 100': '$\mu={}$, $\sigma={}$'".format(mu2, sigma2))
plt.xticks([])
plt.axvline(x=0.05, color='r', label='p-value of 0.05', linestyle='--', linewidth=1)
plt.legend()
mu3 = round(np.mean(P_Values.iloc[:, 2]), 2)
sigma3 = round(np.std(P_Values.iloc[:, 2]), 2)
plt.subplot(313)
plt.hist(P_Values.iloc[:, 2])
plt.title("Histogram of 'p-value - MA 50': '$\mu={}$, $\sigma={}$'".format(mu3, sigma3))
plt.axvline(x=0.05, color='r', label='p-value of 0.05', linestyle='--', linewidth=1)
plt.legend()
plt.show()
# Time Series Plot
plt.plot(P_Values)
plt.axhline(y=0.05, color='r', label='p-value of 0.05', linestyle='--', linewidth=1)
plt.title("Time Series of P-Values for Trades at 1-40 Days from Breach of MA")
plt.legend(('MA 200','MA 100', 'MA 50','p-value of 0.05'))
plt.show()
| [
"pandas.Series",
"numpy.mean",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"yfinance.download",
"pandas.concat",
"numpy.std",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axvlin... | [((270, 327), 'yfinance.download', 'yf.download', (['ticker'], {'start': '"""1991-02-01"""', 'end': '"""2018-06-01"""'}), "(ticker, start='1991-02-01', end='2018-06-01')\n", (281, 327), True, 'import yfinance as yf\n'), ((2321, 2335), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2333, 2335), True, 'import pandas as pd\n'), ((2341, 2355), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2353, 2355), True, 'import pandas as pd\n'), ((4212, 4228), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (4223, 4228), True, 'import matplotlib.pyplot as plt\n'), ((4229, 4276), 'matplotlib.pyplot.hist', 'plt.hist', (['P_Values.iloc[:, 0]', '(20)'], {'density': '(True)'}), '(P_Values.iloc[:, 0], 20, density=True)\n', (4237, 4276), True, 'import matplotlib.pyplot as plt\n'), ((4366, 4380), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4376, 4380), True, 'import matplotlib.pyplot as plt\n'), ((4400, 4488), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(0.05)', 'color': '"""r"""', 'label': '"""p-value of 0.05"""', 'linestyle': '"""--"""', 'linewidth': '(1)'}), "(x=0.05, color='r', label='p-value of 0.05', linestyle='--',\n linewidth=1)\n", (4411, 4488), True, 'import matplotlib.pyplot as plt\n'), ((4485, 4497), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4495, 4497), True, 'import matplotlib.pyplot as plt\n'), ((4591, 4607), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (4602, 4607), True, 'import matplotlib.pyplot as plt\n'), ((4608, 4637), 'matplotlib.pyplot.hist', 'plt.hist', (['P_Values.iloc[:, 1]'], {}), '(P_Values.iloc[:, 1])\n', (4616, 4637), True, 'import matplotlib.pyplot as plt\n'), ((4728, 4742), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4738, 4742), True, 'import matplotlib.pyplot as plt\n'), ((4743, 4831), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(0.05)', 'color': '"""r"""', 'label': '"""p-value of 0.05"""', 'linestyle': '"""--"""', 'linewidth': '(1)'}), "(x=0.05, color='r', label='p-value of 0.05', linestyle='--',\n linewidth=1)\n", (4754, 4831), True, 'import matplotlib.pyplot as plt\n'), ((4828, 4840), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4838, 4840), True, 'import matplotlib.pyplot as plt\n'), ((4934, 4950), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (4945, 4950), True, 'import matplotlib.pyplot as plt\n'), ((4951, 4980), 'matplotlib.pyplot.hist', 'plt.hist', (['P_Values.iloc[:, 2]'], {}), '(P_Values.iloc[:, 2])\n', (4959, 4980), True, 'import matplotlib.pyplot as plt\n'), ((5070, 5158), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(0.05)', 'color': '"""r"""', 'label': '"""p-value of 0.05"""', 'linestyle': '"""--"""', 'linewidth': '(1)'}), "(x=0.05, color='r', label='p-value of 0.05', linestyle='--',\n linewidth=1)\n", (5081, 5158), True, 'import matplotlib.pyplot as plt\n'), ((5155, 5167), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5165, 5167), True, 'import matplotlib.pyplot as plt\n'), ((5169, 5179), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5177, 5179), True, 'import matplotlib.pyplot as plt\n'), ((5200, 5218), 'matplotlib.pyplot.plot', 'plt.plot', (['P_Values'], {}), '(P_Values)\n', (5208, 5218), True, 'import matplotlib.pyplot as plt\n'), ((5219, 5307), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0.05)', 'color': '"""r"""', 'label': '"""p-value of 0.05"""', 'linestyle': '"""--"""', 'linewidth': '(1)'}), "(y=0.05, color='r', label='p-value of 0.05', linestyle='--',\n linewidth=1)\n", (5230, 5307), True, 'import matplotlib.pyplot as plt\n'), ((5304, 5382), 'matplotlib.pyplot.title', 'plt.title', (['"""Time Series of P-Values for Trades at 1-40 Days from Breach of MA"""'], {}), "('Time Series of P-Values for Trades at 1-40 Days from Breach of MA')\n", (5313, 5382), True, 'import matplotlib.pyplot as plt\n'), ((5383, 5443), 'matplotlib.pyplot.legend', 'plt.legend', (["('MA 200', 'MA 100', 'MA 50', 'p-value of 0.05')"], {}), "(('MA 200', 'MA 100', 'MA 50', 'p-value of 0.05'))\n", (5393, 5443), True, 'import matplotlib.pyplot as plt\n'), ((5443, 5453), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5451, 5453), True, 'import matplotlib.pyplot as plt\n'), ((2500, 2536), 'pandas.concat', 'pd.concat', (['[SnP_Returns, df]'], {'axis': '(1)'}), '([SnP_Returns, df], axis=1)\n', (2509, 2536), True, 'import pandas as pd\n'), ((2546, 2560), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2558, 2560), True, 'import pandas as pd\n'), ((4132, 4160), 'numpy.mean', 'np.mean', (['P_Values.iloc[:, 0]'], {}), '(P_Values.iloc[:, 0])\n', (4139, 4160), True, 'import numpy as np\n'), ((4180, 4207), 'numpy.std', 'np.std', (['P_Values.iloc[:, 0]'], {}), '(P_Values.iloc[:, 0])\n', (4186, 4207), True, 'import numpy as np\n'), ((4511, 4539), 'numpy.mean', 'np.mean', (['P_Values.iloc[:, 1]'], {}), '(P_Values.iloc[:, 1])\n', (4518, 4539), True, 'import numpy as np\n'), ((4559, 4586), 'numpy.std', 'np.std', (['P_Values.iloc[:, 1]'], {}), '(P_Values.iloc[:, 1])\n', (4565, 4586), True, 'import numpy as np\n'), ((4854, 4882), 'numpy.mean', 'np.mean', (['P_Values.iloc[:, 2]'], {}), '(P_Values.iloc[:, 2])\n', (4861, 4882), True, 'import numpy as np\n'), ((4902, 4929), 'numpy.std', 'np.std', (['P_Values.iloc[:, 2]'], {}), '(P_Values.iloc[:, 2])\n', (4908, 4929), True, 'import numpy as np\n'), ((2086, 2128), 'pandas.Series', 'pd.Series', (["SnP['Adj Close'][x[0]:x[0] + n]"], {}), "(SnP['Adj Close'][x[0]:x[0] + n])\n", (2095, 2128), True, 'import pandas as pd\n')] |
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE_MATPOWER file.
# Copyright 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Copyright (c) 2016-2017 by University of Kassel and Fraunhofer Institute for Wind Energy and
# Energy System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed
# by a BSD-style license that can be found in the LICENSE file.
# The file has been modified from Pypower.
# The function mu() has been added to the solver in order to provide an optimal iteration control
#
# Copyright (c) 2018 <NAME>
#
# This file retains the BSD-Style license
from numpy import array, angle, exp, linalg, r_, Inf, conj, diag, asmatrix, asarray, zeros_like, zeros, complex128, \
empty, float64, int32, arange
from scipy.sparse import csr_matrix as sparse, hstack, vstack
from scipy.sparse.linalg import spsolve, splu
import numpy as np
import pandas as pd
import numba as nb
import time
from warnings import warn
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse import hstack as hs, vstack as vs
from scipy.sparse.linalg import factorized, spsolve
from matplotlib import pyplot as plt
import scipy
scipy.ALLOW_THREADS = True
import time
import numpy as np
np.set_printoptions(precision=8, suppress=True, linewidth=320)
def dSbus_dV(Ybus, V, I):
"""
Computes partial derivatives of power injection w.r.t. voltage.
:param Ybus: Admittance matrix
:param V: Bus voltages array
:param I: Bus current injections array
:return:
"""
'''
Computes partial derivatives of power injection w.r.t. voltage.
Returns two matrices containing partial derivatives of the complex bus
power injections w.r.t voltage magnitude and voltage angle respectively
(for all buses). If C{Ybus} is a sparse matrix, the return values will be
also. The following explains the expressions used to form the matrices::
Ibus = Ybus * V - I
S = diag(V) * conj(Ibus) = diag(conj(Ibus)) * V
Partials of V & Ibus w.r.t. voltage magnitudes::
dV/dVm = diag(V / abs(V))
dI/dVm = Ybus * dV/dVm = Ybus * diag(V / abs(V))
Partials of V & Ibus w.r.t. voltage angles::
dV/dVa = j * diag(V)
dI/dVa = Ybus * dV/dVa = Ybus * j * diag(V)
Partials of S w.r.t. voltage magnitudes::
dS/dVm = diag(V) * conj(dI/dVm) + diag(conj(Ibus)) * dV/dVm
= diag(V) * conj(Ybus * diag(V / abs(V)))
+ conj(diag(Ibus)) * diag(V / abs(V))
Partials of S w.r.t. voltage angles::
dS/dVa = diag(V) * conj(dI/dVa) + diag(conj(Ibus)) * dV/dVa
= diag(V) * conj(Ybus * j * diag(V))
+ conj(diag(Ibus)) * j * diag(V)
= -j * diag(V) * conj(Ybus * diag(V))
+ conj(diag(Ibus)) * j * diag(V)
= j * diag(V) * conj(diag(Ibus) - Ybus * diag(V))
For more details on the derivations behind the derivative code used
in PYPOWER information, see:
[TN2] <NAME>, "AC Power Flows, Generalized OPF Costs and
their Derivatives using Complex Matrix Notation", MATPOWER
Technical Note 2, February 2010.
U{http://www.pserc.cornell.edu/matpower/TN2-OPF-Derivatives.pdf}
@author: <NAME> (PSERC Cornell)
'''
ib = range(len(V))
Ibus = Ybus * V - I
diagV = sparse((V, (ib, ib)))
diagIbus = sparse((Ibus, (ib, ib)))
diagVnorm = sparse((V / np.abs(V), (ib, ib)))
dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm
dS_dVa = 1.0j * diagV * conj(diagIbus - Ybus * diagV)
return dS_dVm, dS_dVa
def mu(Ybus, Ibus, J, incS, dV, dx, pvpq, pq):
"""
Calculate the Iwamoto acceleration parameter as described in:
"A Load Flow Calculation Method for Ill-Conditioned Power Systems" by <NAME>. and <NAME>."
Args:
Ybus: Admittance matrix
J: Jacobian matrix
incS: mismatch vector
dV: voltage increment (in complex form)
dx: solution vector as calculated dx = solve(J, incS)
pvpq: array of the pq and pv indices
pq: array of the pq indices
Returns:
the Iwamoto's optimal multiplier for ill conditioned systems
"""
# evaluate the Jacobian of the voltage derivative
# theoretically this is the second derivative matrix
# since the Jacobian (J2) has been calculated with dV instead of V
J2 = Jacobian(Ybus, dV, Ibus, pq, pvpq)
a = incS
b = J * dx
c = 0.5 * dx * J2 * dx
g0 = -a.dot(b)
g1 = b.dot(b) + 2 * a.dot(c)
g2 = -3.0 * b.dot(c)
g3 = 2.0 * c.dot(c)
roots = np.roots([g3, g2, g1, g0])
# three solutions are provided, the first two are complex, only the real solution is valid
return roots[2].real
def Jacobian(Ybus, V, Ibus, pq, pvpq):
"""
Computes the system Jacobian matrix
Args:
Ybus: Admittance matrix
V: Array of nodal voltages
Ibus: Array of nodal current injections
pq: Array with the indices of the PQ buses
pvpq: Array with the indices of the PV and PQ buses
Returns:
The system Jacobian matrix
"""
dS_dVm, dS_dVa = dSbus_dV(Ybus, V, Ibus) # compute the derivatives
J11 = dS_dVa[array([pvpq]).T, pvpq].real
J12 = dS_dVm[array([pvpq]).T, pq].real
J21 = dS_dVa[array([pq]).T, pvpq].imag
J22 = dS_dVm[array([pq]).T, pq].imag
J = vstack([hstack([J11, J12]),
hstack([J21, J22])], format="csr")
return J
def NR_SVC(Ybus, Sbus, V0, Ibus, pv, pq, pvb, Bmin, Bmax, tol, max_it=15, mu0=0.05, error_registry=None):
"""
Solves the power flow using a full Newton's method
Args:
Ybus: Admittance matrix
Sbus: Array of nodal power injections
V0: Array of nodal voltages (initial solution)
Ibus: Array of nodal current injections
pv: Array with the indices of the PV buses
pq: Array with the indices of the PQ buses
tol: Tolerance
max_it: Maximum number of iterations
mu0: parameter used to correct the "bad" iterations, should be be between 1e-3 ~ 0.5
error_registry: list to store the error for plotting
Returns:
Voltage solution, converged?, error, calculated power injections
@Author: <NAME>
"""
start = time.time()
# initialize
back_track_counter = 0
back_track_iterations = 0
converged = 0
iter_ = 0
V = V0
Va = np.angle(V)
Vm = np.abs(V)
dVa = np.zeros_like(Va)
dVm = np.zeros_like(Vm)
# set up indexing for updating V
pvpq = r_[pv, pq]
npv = len(pv)
npq = len(pq)
# j1:j2 - V angle of pv buses
j1 = 0
j2 = npv
# j3:j4 - V angle of pq buses
j3 = j2
j4 = j2 + npq
# j5:j6 - V mag of pq buses
j5 = j4
j6 = j4 + npq
# evaluate F(x0)
Scalc = V * conj(Ybus * V - Ibus)
dS = Scalc - Sbus # compute the mismatch
f = r_[dS[pv].real, dS[pq].real, dS[pq].imag]
# check tolerance
norm_f = 0.5 * f.dot(f)
if error_registry is not None:
error_registry.append(norm_f)
if norm_f < tol:
converged = 1
# do Newton iterations
while not converged and iter_ < max_it:
# update iteration counter
iter_ += 1
# evaluate Jacobian
J = Jacobian(Ybus, V, Ibus, pq, pvpq)
# compute update step
dx = spsolve(J, f)
# reassign the solution vector
if npv:
dVa[pv] = dx[j1:j2]
if npq:
dVa[pq] = dx[j3:j4]
dVm[pq] = dx[j5:j6]
# update voltage the Newton way (mu=1)
mu_ = mu0
Vm -= mu_ * dVm
Va -= mu_ * dVa
V = Vm * exp(1j * Va)
Vm = np.abs(V) # update Vm and Va again in case
Va = np.angle(V) # we wrapped around with a negative Vm
# compute the mismatch function f(x_new)
Scalc = V * conj(Ybus * V - Ibus)
dS = Scal - Sbus # complex power mismatch
f_new = r_[dS[pv].real, dS[pq].real, dS[pq].imag] # concatenate to form the mismatch function
norm_f = 0.5 * f_new.dot(f_new)
if error_registry is not None:
error_registry.append(norm_f)
if norm_f < tol:
converged = 1
end = time.time()
elapsed = end - start
print('iter_', iter_, ' - back_track_counter', back_track_counter,
' - back_track_iterations', back_track_iterations)
return V, converged, norm_f, Scalc, elapsed
########################################################################################################################
# MAIN
########################################################################################################################
if __name__ == "__main__":
from GridCal.Engine import FileOpen, compile_snapshot_circuit
from matplotlib import pyplot as plt
import pandas as pd
import os
import time
np.set_printoptions(linewidth=10000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# fname = os.path.join('..', '..', '..', 'Grids_and_profiles', 'grids', 'IEEE 30 Bus with storage.xlsx')
# fname = os.path.join('..', '..', '..', 'Grids_and_profiles', 'grids', 'Illinois200Bus.xlsx')
# fname = os.path.join('..', '..', '..', 'Grids_and_profiles', 'grids', 'Pegase 2869.xlsx')
# fname = os.path.join('..', '..', '..', 'Grids_and_profiles', 'grids', '1354 Pegase.xlsx')
# fname = os.path.join('..', '..', '..', 'Grids_and_profiles', 'grids', 'IEEE 14.xlsx')
fname = os.path.join('..', '..', '..', 'Grids_and_profiles', 'grids', 'Lynn 5 bus (SVC).gridcal')
# fname = '/home/santi/Documentos/Private_Grids/2026_INVIERNO_para Plexos_FINAL_9.raw'
# fname = '/home/santi/Documentos/Private_Grids/201902271115 caso TReal Israel.raw'
grid = FileOpen(file_name=fname).open()
nc = compile_snapshot_circuit(grid)
islands = nc.split_into_islands(ignore_single_node_islands=True)
circuit = islands[0]
print('Newton-Raphson-SVC')
start_time = time.time()
error_data1 = list()
V1, converged_, err, S, el = NR_SVC(Ybus=circuit.Ybus,
Sbus=circuit.Sbus,
V0=circuit.Vbus,
Ibus=circuit.Ibus,
pv=circuit.pv,
pq=circuit.pq,
pvb=circuit.pvb,
Bmin=circuit.Bmin_bus[:, 0],
Bmax=circuit.Bmax_bus[:, 0],
tol=1e-5,
max_it=50,
error_registry=error_data1)
print("--- %s seconds ---" % (time.time() - start_time))
print('error: \t', err)
| [
"numpy.abs",
"scipy.sparse.linalg.spsolve",
"numpy.conj",
"numpy.zeros_like",
"os.path.join",
"numpy.roots",
"numpy.angle",
"pandas.set_option",
"GridCal.Engine.compile_snapshot_circuit",
"numpy.exp",
"GridCal.Engine.FileOpen",
"scipy.sparse.hstack",
"numpy.array",
"scipy.sparse.csr_matrix... | [((1409, 1471), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(8)', 'suppress': '(True)', 'linewidth': '(320)'}), '(precision=8, suppress=True, linewidth=320)\n', (1428, 1471), True, 'import numpy as np\n'), ((3575, 3596), 'scipy.sparse.csr_matrix', 'sparse', (['(V, (ib, ib))'], {}), '((V, (ib, ib)))\n', (3581, 3596), True, 'from scipy.sparse import csr_matrix as sparse, hstack, vstack\n'), ((3612, 3636), 'scipy.sparse.csr_matrix', 'sparse', (['(Ibus, (ib, ib))'], {}), '((Ibus, (ib, ib)))\n', (3618, 3636), True, 'from scipy.sparse import csr_matrix as sparse, hstack, vstack\n'), ((4842, 4868), 'numpy.roots', 'np.roots', (['[g3, g2, g1, g0]'], {}), '([g3, g2, g1, g0])\n', (4850, 4868), True, 'import numpy as np\n'), ((6531, 6542), 'time.time', 'time.time', ([], {}), '()\n', (6540, 6542), False, 'import time\n'), ((6670, 6681), 'numpy.angle', 'np.angle', (['V'], {}), '(V)\n', (6678, 6681), True, 'import numpy as np\n'), ((6691, 6700), 'numpy.abs', 'np.abs', (['V'], {}), '(V)\n', (6697, 6700), True, 'import numpy as np\n'), ((6711, 6728), 'numpy.zeros_like', 'np.zeros_like', (['Va'], {}), '(Va)\n', (6724, 6728), True, 'import numpy as np\n'), ((6739, 6756), 'numpy.zeros_like', 'np.zeros_like', (['Vm'], {}), '(Vm)\n', (6752, 6756), True, 'import numpy as np\n'), ((8488, 8499), 'time.time', 'time.time', ([], {}), '()\n', (8497, 8499), False, 'import time\n'), ((9156, 9192), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(10000)'}), '(linewidth=10000)\n', (9175, 9192), True, 'import numpy as np\n'), ((9197, 9235), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(500)'], {}), "('display.max_rows', 500)\n", (9210, 9235), True, 'import pandas as pd\n'), ((9240, 9281), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(500)'], {}), "('display.max_columns', 500)\n", (9253, 9281), True, 'import pandas as pd\n'), ((9286, 9322), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(1000)'], {}), "('display.width', 1000)\n", (9299, 9322), True, 'import pandas as pd\n'), ((9828, 9921), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '""".."""', '"""Grids_and_profiles"""', '"""grids"""', '"""Lynn 5 bus (SVC).gridcal"""'], {}), "('..', '..', '..', 'Grids_and_profiles', 'grids',\n 'Lynn 5 bus (SVC).gridcal')\n", (9840, 9921), False, 'import os\n'), ((10151, 10181), 'GridCal.Engine.compile_snapshot_circuit', 'compile_snapshot_circuit', (['grid'], {}), '(grid)\n', (10175, 10181), False, 'from GridCal.Engine import FileOpen, compile_snapshot_circuit\n'), ((10326, 10337), 'time.time', 'time.time', ([], {}), '()\n', (10335, 10337), False, 'import time\n'), ((3789, 3818), 'numpy.conj', 'conj', (['(diagIbus - Ybus * diagV)'], {}), '(diagIbus - Ybus * diagV)\n', (3793, 3818), False, 'from numpy import array, angle, exp, linalg, r_, Inf, conj, diag, asmatrix, asarray, zeros_like, zeros, complex128, empty, float64, int32, arange\n'), ((7076, 7097), 'numpy.conj', 'conj', (['(Ybus * V - Ibus)'], {}), '(Ybus * V - Ibus)\n', (7080, 7097), False, 'from numpy import array, angle, exp, linalg, r_, Inf, conj, diag, asmatrix, asarray, zeros_like, zeros, complex128, empty, float64, int32, arange\n'), ((7608, 7621), 'scipy.sparse.linalg.spsolve', 'spsolve', (['J', 'f'], {}), '(J, f)\n', (7615, 7621), False, 'from scipy.sparse.linalg import factorized, spsolve\n'), ((7948, 7957), 'numpy.abs', 'np.abs', (['V'], {}), '(V)\n', (7954, 7957), True, 'import numpy as np\n'), ((8005, 8016), 'numpy.angle', 'np.angle', (['V'], {}), '(V)\n', (8013, 8016), True, 'import numpy as np\n'), ((3709, 3731), 'numpy.conj', 'conj', (['(Ybus * diagVnorm)'], {}), '(Ybus * diagVnorm)\n', (3713, 3731), False, 'from numpy import array, angle, exp, linalg, r_, Inf, conj, diag, asmatrix, asarray, zeros_like, zeros, complex128, empty, float64, int32, arange\n'), ((3734, 3748), 'numpy.conj', 'conj', (['diagIbus'], {}), '(diagIbus)\n', (3738, 3748), False, 'from numpy import array, angle, exp, linalg, r_, Inf, conj, diag, asmatrix, asarray, zeros_like, zeros, complex128, empty, float64, int32, arange\n'), ((5633, 5651), 'scipy.sparse.hstack', 'hstack', (['[J11, J12]'], {}), '([J11, J12])\n', (5639, 5651), False, 'from scipy.sparse import csr_matrix as sparse, hstack, vstack\n'), ((5669, 5687), 'scipy.sparse.hstack', 'hstack', (['[J21, J22]'], {}), '([J21, J22])\n', (5675, 5687), False, 'from scipy.sparse import csr_matrix as sparse, hstack, vstack\n'), ((7921, 7935), 'numpy.exp', 'exp', (['(1.0j * Va)'], {}), '(1.0j * Va)\n', (7924, 7935), False, 'from numpy import array, angle, exp, linalg, r_, Inf, conj, diag, asmatrix, asarray, zeros_like, zeros, complex128, empty, float64, int32, arange\n'), ((8127, 8148), 'numpy.conj', 'conj', (['(Ybus * V - Ibus)'], {}), '(Ybus * V - Ibus)\n', (8131, 8148), False, 'from numpy import array, angle, exp, linalg, r_, Inf, conj, diag, asmatrix, asarray, zeros_like, zeros, complex128, empty, float64, int32, arange\n'), ((10109, 10134), 'GridCal.Engine.FileOpen', 'FileOpen', ([], {'file_name': 'fname'}), '(file_name=fname)\n', (10117, 10134), False, 'from GridCal.Engine import FileOpen, compile_snapshot_circuit\n'), ((3665, 3674), 'numpy.abs', 'np.abs', (['V'], {}), '(V)\n', (3671, 3674), True, 'import numpy as np\n'), ((11106, 11117), 'time.time', 'time.time', ([], {}), '()\n', (11115, 11117), False, 'import time\n'), ((5461, 5474), 'numpy.array', 'array', (['[pvpq]'], {}), '([pvpq])\n', (5466, 5474), False, 'from numpy import array, angle, exp, linalg, r_, Inf, conj, diag, asmatrix, asarray, zeros_like, zeros, complex128, empty, float64, int32, arange\n'), ((5506, 5519), 'numpy.array', 'array', (['[pvpq]'], {}), '([pvpq])\n', (5511, 5519), False, 'from numpy import array, angle, exp, linalg, r_, Inf, conj, diag, asmatrix, asarray, zeros_like, zeros, complex128, empty, float64, int32, arange\n'), ((5549, 5560), 'numpy.array', 'array', (['[pq]'], {}), '([pq])\n', (5554, 5560), False, 'from numpy import array, angle, exp, linalg, r_, Inf, conj, diag, asmatrix, asarray, zeros_like, zeros, complex128, empty, float64, int32, arange\n'), ((5592, 5603), 'numpy.array', 'array', (['[pq]'], {}), '([pq])\n', (5597, 5603), False, 'from numpy import array, angle, exp, linalg, r_, Inf, conj, diag, asmatrix, asarray, zeros_like, zeros, complex128, empty, float64, int32, arange\n')] |
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl','rb'))
@app.route("/")
def home():
return render_template("medical-form.html")
@app.route('/predict', methods=['POST'])
def predict():
features = [int(x) for x in request.form.values()]
final_features = [np.array(features)]
prediction = model.predict(final_features)
output = prediction
return render_template('result.html', prediction = "You are Corona {}".format(output))
if __name__ == "__main__":
app.run(debug=True) | [
"flask.render_template",
"numpy.array",
"flask.request.form.values",
"flask.Flask"
] | [((98, 113), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (103, 113), False, 'from flask import Flask, request, jsonify, render_template\n'), ((198, 234), 'flask.render_template', 'render_template', (['"""medical-form.html"""'], {}), "('medical-form.html')\n", (213, 234), False, 'from flask import Flask, request, jsonify, render_template\n'), ((370, 388), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (378, 388), True, 'import numpy as np\n'), ((325, 346), 'flask.request.form.values', 'request.form.values', ([], {}), '()\n', (344, 346), False, 'from flask import Flask, request, jsonify, render_template\n')] |
import unicodedata
import json
import string
import tensorflow as tf
import tflearn
from keras.utils import to_categorical
import glob
import os
from encode import *
import numpy as np
import json
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
n_class = 9
def findFiles(path): return glob.glob(path)
# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
# Read a file and split into lines
def readLines(filename):
'''
Read lines from txt files.
Return:
- Array in ASCII
'''
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
def letterToIndex(letter):
'''
Convert letter to index. eg a -> 1. 0 is reserved for empty letter (for pad_sequence latter)
Return
- Integer representing index.
'''
return all_letters.find(letter) + 1
def sentenceToIndex(sentence):
'''
Convert sentence to array of letter index
Return
- Array of integer
'''
return [letterToIndex(c) for c in sentence]
def sentenceToOneHotVectors(sentence):
'''
Convert sentence to array of one hot vector
Return
- Array of one-hot vectors
'''
with tf.Session() as sess:
arr = tf.one_hot(sentence, n_letters + 1).eval()
return arr
def mapIntentToNumber(intent):
if intent == 'greetings':
return 0
elif intent == 'thanks':
return 1
elif intent == 'bye':
return 2
elif intent == 'news':
return 3
elif intent == 'weather':
return 4
elif intent == 'worldCup':
return 5
elif intent == 'pkmGo':
return 6
elif intent == 'help':
return 7
elif intent == 'compliment':
return 8
def mapNumberToIntent(n):
if n == 0:
return 'greetings'
elif n == 1:
return 'thanks'
elif n == 2:
return 'bye'
elif n == 3:
return 'news'
elif n == 4:
return 'weather'
elif n == 5:
return 'worldCup'
elif n == 6:
return 'pkmGO'
elif n == 7:
return 'help'
elif n == 8:
return 'compliment'
def embed(X):
x = X.lower()
x = sentenceToIndex(x)
x = sentenceToOneHotVectors(x)
to_concat = np.zeros((100 - len(x), n_letters + 1))
res = np.empty((100, n_letters + 1))
np.concatenate([x, to_concat], out=res)
return res
def getData():
X = []
Y = []
num_classes = 0
for filename in findFiles('cases/*.txt'):
num_classes += 1
category = os.path.splitext(os.path.basename(filename))[0]
lines = readLines(filename)
for line in lines:
X.append(embed(line))
Y.append(mapIntentToNumber(category))
Y = to_categorical(Y)
return X, Y
| [
"tensorflow.one_hot",
"tensorflow.Session",
"keras.utils.to_categorical",
"unicodedata.category",
"numpy.empty",
"os.path.basename",
"numpy.concatenate",
"unicodedata.normalize",
"glob.glob"
] | [((312, 327), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (321, 327), False, 'import glob\n'), ((2229, 2259), 'numpy.empty', 'np.empty', (['(100, n_letters + 1)'], {}), '((100, n_letters + 1))\n', (2237, 2259), True, 'import numpy as np\n'), ((2261, 2300), 'numpy.concatenate', 'np.concatenate', (['[x, to_concat]'], {'out': 'res'}), '([x, to_concat], out=res)\n', (2275, 2300), True, 'import numpy as np\n'), ((2607, 2624), 'keras.utils.to_categorical', 'to_categorical', (['Y'], {}), '(Y)\n', (2621, 2624), False, 'from keras.utils import to_categorical\n'), ((1318, 1330), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1328, 1330), True, 'import tensorflow as tf\n'), ((474, 505), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 's'], {}), "('NFD', s)\n", (495, 505), False, 'import unicodedata\n'), ((1348, 1383), 'tensorflow.one_hot', 'tf.one_hot', (['sentence', '(n_letters + 1)'], {}), '(sentence, n_letters + 1)\n', (1358, 1383), True, 'import tensorflow as tf\n'), ((2454, 2480), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (2470, 2480), False, 'import os\n'), ((511, 534), 'unicodedata.category', 'unicodedata.category', (['c'], {}), '(c)\n', (531, 534), False, 'import unicodedata\n')] |
"""
The features extractor decomposes each URL in the set of selected features and
saves the records in a CSV file. Selected features are: URL size, protocol used
(HTTP/HTTPS), domain and subdomain digit count, symbols count ('@', '~'),
presence
"""
import csv
import re
import subprocess
import urllib.parse as urllib
import editdistance as levenshtein
import numpy as np
import tldextract as tld
def hamming_distance(s1, s2):
return len(list(filter(lambda x: ord(x[0]) ^ ord(x[1]), zip(s1, s2))))
def min_distances(url, N):
"""
Calculates the minimum value for the levenshtein distance between the top N
domains and the URL's domain and the hamming distance between the top N
domains and URL's subdomain. To ensure precision, the subdomain is split on
"." and "-" and then fed into the hamming distance calculation.
"""
subdomain, domain = tld.extract(url)[:2]
f_benign_1M = open("data/processed_sets/benign_1M.csv")
min_dd, min_sd = 100, 100
index = 1
for row in csv.reader(f_benign_1M):
b_domain = tld.extract(row[0]).domain
levdd = levenshtein.eval(domain, b_domain)
if levdd < min_dd:
min_dd = levdd
# Splitting the subdomain to cover cases similar to
# https://target-brand.customer-service.signin.example.com/
for component in subdomain.split("."):
if "-" in component:
for subcomp in component.split("-"):
hamsd = hamming_distance(subcomp, b_domain)
if hamsd < min_sd:
min_sd = hamsd
else:
hamsd = hamming_distance(component, b_domain)
if hamsd < min_sd:
min_sd = hamsd
index += 1
if index == N:
break
# Test based on the assumption that two strings with levenstain distance
# greater than 10 can be regarded as different. Otherwise the target URL's
# domain and subdomain are considered similar to one of the top N domains
min_subdomain_distance = (
1
if min_sd <= 5
and "www" not in subdomain
and len(subdomain.split(".")) == 1
else 0
)
min_domain_distance = 1 if min_dd <= 5 and min_dd != 0 else 0
return min_subdomain_distance, min_domain_distance
def has_domain(url, N):
f_benign_list = open("data/processed_sets/benign_1M.csv")
index = 1
for row in csv.reader(f_benign_list):
# Testing first if the length of the domain is greater or equal to 5 to
# reduce false positives produced by domains like t.com
if len(row[0]) >= 5 and (row[0] in url.path or row[0] in url.query):
return True
index += 1
if index == N:
break
f_benign_list.close()
return False
def contains(words, target, check_domains=False):
if check_domains:
for word in words:
if word in target:
return 1
return 1 if has_domain(target, 2500) else 0
else:
for word in words:
if word in target:
return 1
return 0
def extract(raw_url, label=-1):
"""
Decomposes passed URL into the selected list of features
"""
parsed_url = urllib.urlparse(raw_url)
netloc = tld.extract(raw_url)
path_dot_count = parsed_url.path.count(".") + parsed_url.query.count(".")
### Feature 1: URL size
# Reasoning: The literature shows a clear discrepancy between the average
# benign URL size and average phishing URL size.
url_size = len(raw_url)
### Feature 2: Protocol used (HTTP/HTTPS)
# Reasoning: Although the number of phishing websites has risen
# significantly in recent years, serving a website through HTTP is still an
# indicator of maliciousness in most cases.
tls_usage = 1 if raw_url.split(":")[0] == "http" else 0
### Feature 3: Number of numerical characters
# Reasoning: It is highly uncommon for benign domains and subdomains to
# contain any numerical characters.
digit_count = sum(c.isdigit() for c in parsed_url.netloc)
### Feature 4: Number of '@' and '~' characters
# Reasoning: Like numerical characters, it is uncommon for an URL to contain
# any '~' characters. The '@' character produces unexpected behaviour in the
# browser. It could either redirect to an email address or get the browser
# to ignore everything after it.
symbols_count = raw_url.count("@") + raw_url.count("~")
### Feature 5: Domain presence in the url path segment
# Reasoning: This feature determines whether the path contains a domain.
# This practice is often found in redirects and domains hosted on places
# like firebase or storage.googleapis.com.
domain_in_path = 1 if path_dot_count >= 2 else 0
### Feature 6: Number of hyphens
# Reasoning: It is uncommon for benign domains and subdomains to use hyphens
# to separate words while it is common for phishing URLs to use them. If a
# domain is detected in path, the number of hyphens is counted over the
# entire URL
dash_count = (
parsed_url.netloc.count("-")
if path_dot_count < 2
else raw_url.count("-")
)
### Feature 7: Size of the subdomain
# Reasoning: The literature shows that there is a strong correlation between
# a logn subdomain and phishing webpages. In this feature the length of the
# subdomain is based on its components rather than character count as past
# experiments proved it more effective
components = netloc.subdomain.count(".") + netloc.subdomain.count("-") + 1
_long_subdomain = 1 if components >= 3 else 0
### Features 8,9 and 10: Presence of sensitive vocabulary and of benign
### domains in URL's subdomain
# Reasoning: The sensitive words shown below are a curated selection from
# the output of a word frequency algorithm and are an indicator of
# suspiciousness. Because it is a common practice for phishing URLs to
# inlcude the target brand in their subdomain, the sensitive subdomain words
# check the presence of top N benign domains as well
sensitive_subdomains = [
"paypal",
"sites",
"secure",
"login",
"runescape",
"account",
"service",
"sign",
"bank",
"transfer",
"user",
"security",
]
sensitive_domains = [
"000webhost",
"sharepoint",
"customer",
"service",
"secure",
"support",
]
sensitive_paths = [
"admin",
"login",
"account",
"sign",
"secure",
"verification",
"transfer",
"validation",
"bank",
"verify",
]
subdomain_sw = contains(sensitive_subdomains, netloc.subdomain)
domain_sw = contains(sensitive_domains, netloc.domain)
path_sw = contains(sensitive_paths, parsed_url, check_domains=True)
### Feature 11: Presence of IP address in URL
# Reasoning: Usage of IP address instead of a domain name is tighyly related
# to phishing/malicious intent.
ip_presence = (
1 if len(re.findall(r"[0-9]+(?:\.[0-9]+){3}", raw_url)) != 0 else 0
)
### Feature 12 and 13: Minimum distance between URL's domain and subdomain
### and the top N benign domains
# Reasoning: It is a common practice for phishing URLs to use a variation of
# the targeted domain either in their domain or subdomain to create the
# illusion of the legit website.
suspicious_domain, suspicious_subdomain = min_distances(raw_url, 25_000)
if label >= 0:
return np.array(
[
label,
url_size,
tls_usage,
digit_count,
symbols_count,
domain_in_path,
dash_count,
# long_subdomain,
subdomain_sw,
domain_sw,
path_sw,
ip_presence,
suspicious_subdomain,
suspicious_domain,
]
)
return np.array(
[
url_size,
tls_usage,
digit_count,
symbols_count,
domain_in_path,
dash_count,
# long_subdomain,
subdomain_sw,
domain_sw,
path_sw,
ip_presence,
suspicious_subdomain,
suspicious_domain,
]
)
| [
"urllib.parse.urlparse",
"tldextract.extract",
"numpy.array",
"re.findall",
"csv.reader",
"editdistance.eval"
] | [((1020, 1043), 'csv.reader', 'csv.reader', (['f_benign_1M'], {}), '(f_benign_1M)\n', (1030, 1043), False, 'import csv\n'), ((2446, 2471), 'csv.reader', 'csv.reader', (['f_benign_list'], {}), '(f_benign_list)\n', (2456, 2471), False, 'import csv\n'), ((3269, 3293), 'urllib.parse.urlparse', 'urllib.urlparse', (['raw_url'], {}), '(raw_url)\n', (3284, 3293), True, 'import urllib.parse as urllib\n'), ((3307, 3327), 'tldextract.extract', 'tld.extract', (['raw_url'], {}), '(raw_url)\n', (3318, 3327), True, 'import tldextract as tld\n'), ((8161, 8344), 'numpy.array', 'np.array', (['[url_size, tls_usage, digit_count, symbols_count, domain_in_path,\n dash_count, subdomain_sw, domain_sw, path_sw, ip_presence,\n suspicious_subdomain, suspicious_domain]'], {}), '([url_size, tls_usage, digit_count, symbols_count, domain_in_path,\n dash_count, subdomain_sw, domain_sw, path_sw, ip_presence,\n suspicious_subdomain, suspicious_domain])\n', (8169, 8344), True, 'import numpy as np\n'), ((879, 895), 'tldextract.extract', 'tld.extract', (['url'], {}), '(url)\n', (890, 895), True, 'import tldextract as tld\n'), ((1107, 1141), 'editdistance.eval', 'levenshtein.eval', (['domain', 'b_domain'], {}), '(domain, b_domain)\n', (1123, 1141), True, 'import editdistance as levenshtein\n'), ((7688, 7878), 'numpy.array', 'np.array', (['[label, url_size, tls_usage, digit_count, symbols_count, domain_in_path,\n dash_count, subdomain_sw, domain_sw, path_sw, ip_presence,\n suspicious_subdomain, suspicious_domain]'], {}), '([label, url_size, tls_usage, digit_count, symbols_count,\n domain_in_path, dash_count, subdomain_sw, domain_sw, path_sw,\n ip_presence, suspicious_subdomain, suspicious_domain])\n', (7696, 7878), True, 'import numpy as np\n'), ((1064, 1083), 'tldextract.extract', 'tld.extract', (['row[0]'], {}), '(row[0])\n', (1075, 1083), True, 'import tldextract as tld\n'), ((7200, 7245), 're.findall', 're.findall', (['"""[0-9]+(?:\\\\.[0-9]+){3}"""', 'raw_url'], {}), "('[0-9]+(?:\\\\.[0-9]+){3}', raw_url)\n", (7210, 7245), False, 'import re\n')] |
import numpy as np
def roundLikeNCI(np_float64):
outval = np.float64(np_float64) * np.float64(1000.0)
if outval - outval.astype(np.int) >= np.float(0.5):
outval = outval.astype(np.int) + 1
else:
outval = outval.astype(np.int)
return np.float(outval) / np.float(1000)
| [
"numpy.float64",
"numpy.float"
] | [((64, 86), 'numpy.float64', 'np.float64', (['np_float64'], {}), '(np_float64)\n', (74, 86), True, 'import numpy as np\n'), ((89, 107), 'numpy.float64', 'np.float64', (['(1000.0)'], {}), '(1000.0)\n', (99, 107), True, 'import numpy as np\n'), ((149, 162), 'numpy.float', 'np.float', (['(0.5)'], {}), '(0.5)\n', (157, 162), True, 'import numpy as np\n'), ((267, 283), 'numpy.float', 'np.float', (['outval'], {}), '(outval)\n', (275, 283), True, 'import numpy as np\n'), ((286, 300), 'numpy.float', 'np.float', (['(1000)'], {}), '(1000)\n', (294, 300), True, 'import numpy as np\n')] |
from PIL import Image, ImageDraw, ImageFont # pip install pillow
import numpy
import cv2
import sys, os
FONT_MARGIN = 2
COLOR_MAP = {
'1': (255, 35, 11),
'0': (16, 194, 20),
'C': (136, 0, 21),
'D': (7, 92, 10)}
OUTPATH = 'output'
def get_attr(info, default=None, isBoolean=False):
value = input(info + ('' if default is None \
else '(default: {})'.format(default)) + ': ').strip()
if value == '' and default is not None: value = default
if isBoolean: value = True if value[0] in 'yY' else False
return value
def get_cd_matrix(datafile, layoutfile, W, H):
with open(layoutfile) as f:
layout_str = f.read().replace('\n', '').replace('\t', '')
with open(datafile) as f:
player_str = f.read().replace('C', '1').replace('D', '0').split('\n')
data_str = ''
n_players = layout_str.count('_')
cnt = 0
if len(player_str) % n_players != 0:
print('It seems that you have choosen a wrong layout file, please check it.')
exit(1)
for i in range(int(len(player_str)/layout_str.count('_'))):
for p in layout_str:
if p == '_':
data_str += player_str[cnt]
cnt += 1
else:
data_str += p
matrix = list(numpy.array(list(data_str)).reshape((-1, W, H)))
for i in range(len(matrix)):
matrix[i] = matrix[i].T
return matrix
def get_image_list(matrix_data, width, height, psize, title_height):
length = len(matrix_data)
print('Generate rounds images (0/{})...'.format(length), end='')
if title_height:
fontsize = 1
font = ImageFont.truetype("arial.ttf", fontsize)
while font.getsize('Round: 999')[1] < title_height - FONT_MARGIN * 2:
fontsize += 1
font = ImageFont.truetype("arial.ttf", fontsize)
iml = []
for i, m in enumerate(matrix_data):
image = Image.new('RGB', (width, height), (255, 255, 255))
draw = ImageDraw.Draw(image)
for x in range(width):
for y in range(height - title_height):
draw.point((x, y), fill=get_color(m, psize, x, y))
if title_height > 0:
print('\rGenerate rounds images ({}/{})...'\
.format(i+1, length), end='')
draw.text((10, height-title_height + FONT_MARGIN), 'Round: ' + str(i+1),
font=font, fill='#000000')
iml.append(image)
print('Done')
return iml
def get_color(m, psize, x, y):
return COLOR_MAP[m[int(x/psize)][int(y/psize)]]
def save_iml(name, iml):
if not os.path.exists(OUTPATH+'/'+name):
os.makedirs(OUTPATH+'/'+name)
print('Saving Images (0/{})...'.format(len(iml)), end='')
for i, image in enumerate(iml):
print('\rSaving Images ({}/{})...'.format(i+1, len(iml)), end='')
image.save('{}/{}/{:02d}.jpg'.format(OUTPATH, name, i+1), 'jpeg')
print('Done')
def save_gif(name, iml, duration):
print('Saving GIF...', end='')
iml[0].save(OUTPATH+'/'+name+'.gif', save_all=True, append_images=iml,
duration=duration)
print('Done')
def save_video(name, iml, width, height, fps=24, vtype='mp4'):
if vtype.lower() not in ['mp4', 'avi']:
print('Error video type.')
return None
print('Saving {} (0/{})...'.format(vtype, len(iml)), end='')
codecs = {
'mp4': cv2.VideoWriter_fourcc(*'MP4V'),
'avi': cv2.VideoWriter_fourcc(*'XVID'),
}
video = cv2.VideoWriter(OUTPATH+'/' + name + '.' + vtype,
codecs[vtype], float(fps),
(width,height), isColor=False)
for i, im in enumerate(iml):
print('\rSaving {} ({}/{})...'.format(vtype, i+1, len(iml)), end='')
image = cv2.cvtColor(numpy.array(im), cv2.COLOR_RGB2BGR)
for i in range(int(fps)):
video.write(image)
cv2.destroyAllWindows()
video.release()
print('Done')
def process():
name = get_attr('Data file (without ".txt")')
layout = get_attr('Layout file (without ".txt")')
W = int(get_attr('Grid width'))
H = int(get_attr('Grid height'))
psize = int(get_attr('Pixels per person', 40))
title_height = int(get_attr('Height of title for round (0 for no title)', 0))
isSaveImage = get_attr('Save Images (y/n)', 'n', True)
isSaveGif = get_attr('Save GIF (y/n)', 'n', True)
isSaveAvi = get_attr('Save Avi (y/n)', 'n', True)
isSaveMp4 = get_attr('Save Mp4 (y/n)', 'y', True)
if not isSaveImage and not isSaveGif and not isSaveAvi and not isSaveMp4:
return None
width = W * psize
height = H * psize + title_height
print('[Processing:', name+'.txt]')
matrix_data = get_cd_matrix(name + '.txt', layout+'.txt', W, H)
iml = get_image_list(matrix_data, width, height, psize, title_height)
if isSaveImage: save_iml(name, iml)
if isSaveGif: save_gif(name, iml, 1000)
if isSaveAvi: save_video(name, iml, width, height, vtype='avi')
if isSaveMp4: save_video(name, iml, width, height, vtype='mp4')
def process_sequence(fname):
with open(fname) as f:
f.readline()
lines = f.read().strip().split('\n')
for line in lines:
data = line.split(',')
name = data[0]
layout = data[1]
W = int(data[2])
H = int(data[3])
psize = int(data[4])
title_height = int(data[5])
isSaveImage = True if data[6][0] in 'yY' else False
isSaveGif = True if data[7][0] in 'yY' else False
isSaveAvi = True if data[8][0] in 'yY' else False
isSaveMp4 = True if data[9][0] in 'yY' else False
if not isSaveImage and not isSaveGif and \
not isSaveAvi and not isSaveMp4: continue
width = W * psize
height = H * psize + title_height
print('[Processing:', name+'.txt]')
matrix_data = get_cd_matrix(name + '.txt', layout+'.txt', W, H)
iml = get_image_list(matrix_data, width, height, psize, title_height)
if isSaveImage: save_iml(name, iml)
if isSaveGif: save_gif(name, iml, 1000)
if isSaveAvi: save_video(name, iml, width, height, vtype='avi')
if isSaveMp4: save_video(name, iml, width, height, vtype='mp4')
if __name__ == '__main__':
if len(sys.argv) > 1:
process_sequence(sys.argv[1])
else:
process()
| [
"os.path.exists",
"os.makedirs",
"PIL.Image.new",
"PIL.ImageFont.truetype",
"numpy.array",
"PIL.ImageDraw.Draw",
"cv2.destroyAllWindows",
"cv2.VideoWriter_fourcc"
] | [((3916, 3939), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3937, 3939), False, 'import cv2\n'), ((1643, 1684), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arial.ttf"""', 'fontsize'], {}), "('arial.ttf', fontsize)\n", (1661, 1684), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1919, 1969), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(width, height)', '(255, 255, 255)'], {}), "('RGB', (width, height), (255, 255, 255))\n", (1928, 1969), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1985, 2006), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (1999, 2006), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2600, 2636), 'os.path.exists', 'os.path.exists', (["(OUTPATH + '/' + name)"], {}), "(OUTPATH + '/' + name)\n", (2614, 2636), False, 'import sys, os\n'), ((2642, 2675), 'os.makedirs', 'os.makedirs', (["(OUTPATH + '/' + name)"], {}), "(OUTPATH + '/' + name)\n", (2653, 2675), False, 'import sys, os\n'), ((3405, 3436), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MP4V'"], {}), "(*'MP4V')\n", (3427, 3436), False, 'import cv2\n'), ((3453, 3484), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (3475, 3484), False, 'import cv2\n'), ((1808, 1849), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arial.ttf"""', 'fontsize'], {}), "('arial.ttf', fontsize)\n", (1826, 1849), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3811, 3826), 'numpy.array', 'numpy.array', (['im'], {}), '(im)\n', (3822, 3826), False, 'import numpy\n')] |
from sklearn.svm import LinearSVC
from sklearn.model_selection import KFold
import numpy as np
from joblib import load
import time
import matplotlib.pyplot as plt
import sys
from pathlib import Path
sys.path[0] = str(Path(sys.path[0]).parent)
from metrics import metrics, meanMetrics, printMetrics, stdMetrics
train_images = np.load('../saved_images/images_array_normal.npy')
x = train_images[:,:-1]
y = train_images[:,-1]
feature_model = load('feature_extraction')
x = feature_model.transform(x)
#C = np.logspace(1,2,2)
C = [0.01]
#Gamma = np.logspace(-3,2,6)
num_splits = 10
kf = KFold(n_splits=num_splits)
kf.get_n_splits(x)
error_by_parameter = np.zeros((6,5))
i = 0
start = time.time()
for c in C:
clf = LinearSVC(C=c, max_iter = 100000)
exactitud = np.zeros((num_splits, 5))
iteration = 0
for train_index, test_index in kf.split(x):
X_train, X_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
exactitud[iteration, :] = metrics(y_test, y_pred)
iteration += 1
error_standard = stdMetrics(error_promedio)
error_promedio = meanMetrics(error_promedio)
print('Error para C=', c)
printMetrics(error_promedio)
print('Desviación estandar')
print('###################################')
printMetrics(error_standard)
error_by_parameter[i,:]=error_promedio
i += 1
elapsed_time = time.time()-start
print('Elapsed time for one neuron Classification: ',elapsed_time)
plt.plot(C, error_by_parameter[:,0], 'b--')
| [
"metrics.meanMetrics",
"pathlib.Path",
"matplotlib.pyplot.plot",
"sklearn.svm.LinearSVC",
"metrics.stdMetrics",
"numpy.zeros",
"metrics.printMetrics",
"joblib.load",
"metrics.metrics",
"sklearn.model_selection.KFold",
"numpy.load",
"time.time"
] | [((332, 382), 'numpy.load', 'np.load', (['"""../saved_images/images_array_normal.npy"""'], {}), "('../saved_images/images_array_normal.npy')\n", (339, 382), True, 'import numpy as np\n'), ((447, 473), 'joblib.load', 'load', (['"""feature_extraction"""'], {}), "('feature_extraction')\n", (451, 473), False, 'from joblib import load\n'), ((593, 619), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'num_splits'}), '(n_splits=num_splits)\n', (598, 619), False, 'from sklearn.model_selection import KFold\n'), ((661, 677), 'numpy.zeros', 'np.zeros', (['(6, 5)'], {}), '((6, 5))\n', (669, 677), True, 'import numpy as np\n'), ((692, 703), 'time.time', 'time.time', ([], {}), '()\n', (701, 703), False, 'import time\n'), ((1572, 1616), 'matplotlib.pyplot.plot', 'plt.plot', (['C', 'error_by_parameter[:, 0]', '"""b--"""'], {}), "(C, error_by_parameter[:, 0], 'b--')\n", (1580, 1616), True, 'import matplotlib.pyplot as plt\n'), ((727, 758), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': 'c', 'max_iter': '(100000)'}), '(C=c, max_iter=100000)\n', (736, 758), False, 'from sklearn.svm import LinearSVC\n'), ((778, 803), 'numpy.zeros', 'np.zeros', (['(num_splits, 5)'], {}), '((num_splits, 5))\n', (786, 803), True, 'import numpy as np\n'), ((1159, 1185), 'metrics.stdMetrics', 'stdMetrics', (['error_promedio'], {}), '(error_promedio)\n', (1169, 1185), False, 'from metrics import metrics, meanMetrics, printMetrics, stdMetrics\n'), ((1207, 1234), 'metrics.meanMetrics', 'meanMetrics', (['error_promedio'], {}), '(error_promedio)\n', (1218, 1234), False, 'from metrics import metrics, meanMetrics, printMetrics, stdMetrics\n'), ((1270, 1298), 'metrics.printMetrics', 'printMetrics', (['error_promedio'], {}), '(error_promedio)\n', (1282, 1298), False, 'from metrics import metrics, meanMetrics, printMetrics, stdMetrics\n'), ((1385, 1413), 'metrics.printMetrics', 'printMetrics', (['error_standard'], {}), '(error_standard)\n', (1397, 1413), False, 'from metrics import metrics, meanMetrics, printMetrics, stdMetrics\n'), ((1486, 1497), 'time.time', 'time.time', ([], {}), '()\n', (1495, 1497), False, 'import time\n'), ((221, 238), 'pathlib.Path', 'Path', (['sys.path[0]'], {}), '(sys.path[0])\n', (225, 238), False, 'from pathlib import Path\n'), ((1090, 1113), 'metrics.metrics', 'metrics', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1097, 1113), False, 'from metrics import metrics, meanMetrics, printMetrics, stdMetrics\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" acoustic_fixture.py: Code to receive data from the acoustic fixture
The mirror of the laser is 24 mm from the front of the device
Test grid is Y328.8 mm
Test fixture is 342.9 mm + 80.7 mm + 24 mm = X0 Y447.6 Z56
"""
__version__ = "1.0"
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, <NAME>"
__license__ = "Apache 2.0"
import pyaudio, serial, time
from scipy.interpolate import interp1d
from numpy import cos, pi, zeros, frombuffer, float32, roll, average
from acoustic_trilateration import get_time_shift, trilateration, butter_bandpass_filter
from trilateration_linear_regression_model import predict
from serial_comms import waitFor, sendCommand
# Configuration
COM_PORT = "COM3" # Laser com port
OFFLINE_MODE = False
USE_MACHINE_LEARNING = 0
RATE = 44100
BUFFER = 882 # RATE must be evenly divisible by BUFFER
LPF = 400
HPF = 480
AMPLITUDE_MS = 500
AMPLITUDE_SIZE = int(AMPLITUDE_MS/RATE*BUFFER)
# Microphone calibrations in mm
CAL_DISTANCE = [0, 25, 50, 100, 150, 200]
M1_CAL = interp1d([1.3250, 0.0990, 0.0300, 0.0098, 0.0048, 0.0030], CAL_DISTANCE, kind='linear', fill_value="extrapolate")
M2_CAL = interp1d([1.3200, 0.0810, 0.0260, 0.0094, 0.0058, 0.0045], CAL_DISTANCE, kind='linear', fill_value="extrapolate")
M3_CAL = interp1d([1.3020, 0.0700, 0.0230, 0.0084, 0.0054, 0.0045], CAL_DISTANCE, kind='linear', fill_value="extrapolate")
MIC_CAL = [M1_CAL, M2_CAL, M3_CAL]
#RADIAL_CAL = interp1d(CAL_DISTANCE, [1.0, 0.892857143, 0.714285714, 0.571428571, 0.5])
# Acoustic fixture properties, variables are part of the trilateration equation
FIXT_MIC_RADIUS = 80
FIXT_D = FIXT_MIC_RADIUS * cos(30 * pi / 180) * 2
FIXT_E = FIXT_D/2
FIXT_F = FIXT_MIC_RADIUS + FIXT_MIC_RADIUS/2
#print([FIXT_D, FIXT_E, FIXT_F])
ser = None
class AcousticFixture:
mic_dict = {"Mosquito 1":[-1, ""], "Mosquito 2":[-1, ""], "Mosquito 3":[-1, ""]}
# Pre-allocate buffers
buf = [zeros(BUFFER) for y in range(len(mic_dict))]
buf_copy = buf.copy()
buf_filtered = buf.copy()
voltage_data = buf.copy()
amplitude_buffer = [zeros(AMPLITUDE_SIZE) for i in range(len(mic_dict))]
amplitude_avg = zeros(len(mic_dict) + 1)
delay_buffer = [zeros(AMPLITUDE_SIZE) for i in range(len(mic_dict))]
delay_avg = zeros(len(mic_dict) + 1)
calibration_mode = False
streams = []
x = 0
y = 0
z = 0
# Custom callback which inserts the index of the microphone into the local scope
def portaudio_callback(this, idx):
def callback(in_data, frame_count, time_info, status):
this.buf[idx] = frombuffer(in_data,dtype=float32)
return (this.buf[idx], pyaudio.paComplete)
return callback
def active(this):
for s in this.streams:
if s.is_active():
return True
return False
def __init__(this, cal_mode = False):
global ser
this.calibration_mode = cal_mode
# Print config
print("Sample Rate: %d Hz\nBuffer Size: %d frames\nSample Length: %d ms\n" % (RATE, BUFFER, 1/RATE*BUFFER*1000))
p = pyaudio.PyAudio()
# Search for our microphones
print("Searching for microphones by name")
info = p.get_host_api_info_by_index(0)
numdevices = info.get('deviceCount')
for i in range(numdevices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0:
dev_name = p.get_device_info_by_host_api_device_index(0, i).get('name')
#print("Input Device id %d - %s" % (i, dev_name))
# Check if this is the correct microphone and set the index
for key in this.mic_dict:
if key in dev_name:
this.mic_dict[key][0] = i
this.mic_dict[key][1] = dev_name
# Verify that all microphones are attached
for key in this.mic_dict:
if this.mic_dict[key][0] == -1:
print("%s not found. Please make sure the device is plugged in." % (key))
exit()
# Print all microphone id
for key in this.mic_dict:
print("Input Device id %d - %s" % (this.mic_dict[key][0], this.mic_dict[key][1]))
# Open microphone streams
for key in this.mic_dict:
this.streams.append(p.open(
format = pyaudio.paFloat32,
channels = 1,
rate = RATE,
input = True,
output = False,
frames_per_buffer = BUFFER,
input_device_index = this.mic_dict[key][0],
stream_callback = this.portaudio_callback(list(this.mic_dict.keys()).index(key))
))
# Aquire the first samples
for s in this.streams:
s.start_stream()
print("Connecting to Laser...")
if not OFFLINE_MODE:
ser = serial.Serial(COM_PORT, 115200)
waitFor(ser, "\rsh$ ") # Wait for the system to initialize
sendCommand(ser, "M3", "\rsh$ ")
print("Offline mode. Laser module disconnected.")
def update(this, corr_lines=None):
global ser
# Wait until all streams stop
while this.active():
pass
# Copy the data to a new buffer
this.buf_copy = this.buf.copy()
# Start the data aquisition for the next cycle before running the routine
for s in this.streams:
s.stop_stream()
s.start_stream()
for i in range(len(this.streams)):
# Apply the bandpass filter and write to global var
this.buf_filtered[i] = butter_bandpass_filter(this.buf_copy[i], LPF, HPF, RATE, 3)
# Get the delay relative to the first microphone
this.delay_buffer[i][0] = get_time_shift(this.buf_filtered[0], this.buf_filtered[i], BUFFER, RATE, corr_lines[i][0] if corr_lines != None else None)
this.delay_buffer[i] = roll(this.delay_buffer[i], 1)
this.delay_avg[i] = average(this.delay_buffer[i])
# Write voltage chart data
this.voltage_data[i] = (this.buf_filtered[i] * 2.25) + 2.25
# DEBUG print the buffer for use in offline mode
#print("signal[%d] = %s" % (i, repr(buf_filtered[i]).replace("array(", "").replace(")", "")))
# Extrapolate rolling average distance to be fed into trilateration
if this.calibration_mode or USE_MACHINE_LEARNING:
this.amplitude_buffer[i][0] = max(this.buf_filtered[i]) # Also enable the mic cal line below
else:
this.amplitude_buffer[i][0] = MIC_CAL[i](max(this.buf_filtered[i]))
this.amplitude_buffer[i] = roll(this.amplitude_buffer[i], 1)
this.amplitude_avg[i] = average(this.amplitude_buffer[i])
# print average amplitudes
#amplitude_avg[-1] = average(amplitude_avg[0:-1]) # Calculate overall average
#print("ampl_avg: %.4f" % (amplitude_avg[-1]))
# Print raw microphone calibration line
if this.calibration_mode:
pass
#print("M1: %.4f, M2: %.4f, M3: %.4f" % (this.amplitude_avg[0], this.amplitude_avg[1], this.amplitude_avg[2]))
# Print info line
else:
if USE_MACHINE_LEARNING:
# Predict using machine learning
this.x, this.y, this.z = predict([this.amplitude_avg[0], this.amplitude_avg[1], this.amplitude_avg[2]])[0]
else:
# Calcuate using trilateration
(this.x, this.y, this.z) = trilateration(this.amplitude_avg[0], this.amplitude_avg[2], this.amplitude_avg[1], FIXT_D, FIXT_E, FIXT_F)
# Move the origin to the center of the fixture
this.x -= FIXT_E
this.y -= FIXT_MIC_RADIUS/2
if not OFFLINE_MODE:
# Test fixture is 342.9 mm + 80.7 mm + 24 mm = X0 Y447.6 Z56
sendCommand(ser, "G1 X%.2f Y%.2f Z%.2f" % (this.x, this.y + 447.6, this.z + 56), "\rsh$ ")
print("x: %4.0f, y: %4.0f, z: %4.0f, (r1: %3.0f, r2: %3.0f, r3: %3.0f), d1: %5.2f, d2: %5.2f, d3: %5.2f" % (this.x, this.y, this.z, this.amplitude_avg[0], this.amplitude_avg[1], this.amplitude_avg[2], this.delay_avg[0], this.delay_avg[1], this.delay_avg[2]))
| [
"trilateration_linear_regression_model.predict",
"numpy.roll",
"serial_comms.sendCommand",
"acoustic_trilateration.trilateration",
"numpy.average",
"acoustic_trilateration.butter_bandpass_filter",
"scipy.interpolate.interp1d",
"acoustic_trilateration.get_time_shift",
"numpy.zeros",
"serial.Serial"... | [((1068, 1181), 'scipy.interpolate.interp1d', 'interp1d', (['[1.325, 0.099, 0.03, 0.0098, 0.0048, 0.003]', 'CAL_DISTANCE'], {'kind': '"""linear"""', 'fill_value': '"""extrapolate"""'}), "([1.325, 0.099, 0.03, 0.0098, 0.0048, 0.003], CAL_DISTANCE, kind=\n 'linear', fill_value='extrapolate')\n", (1076, 1181), False, 'from scipy.interpolate import interp1d\n'), ((1191, 1305), 'scipy.interpolate.interp1d', 'interp1d', (['[1.32, 0.081, 0.026, 0.0094, 0.0058, 0.0045]', 'CAL_DISTANCE'], {'kind': '"""linear"""', 'fill_value': '"""extrapolate"""'}), "([1.32, 0.081, 0.026, 0.0094, 0.0058, 0.0045], CAL_DISTANCE, kind=\n 'linear', fill_value='extrapolate')\n", (1199, 1305), False, 'from scipy.interpolate import interp1d\n'), ((1314, 1428), 'scipy.interpolate.interp1d', 'interp1d', (['[1.302, 0.07, 0.023, 0.0084, 0.0054, 0.0045]', 'CAL_DISTANCE'], {'kind': '"""linear"""', 'fill_value': '"""extrapolate"""'}), "([1.302, 0.07, 0.023, 0.0084, 0.0054, 0.0045], CAL_DISTANCE, kind=\n 'linear', fill_value='extrapolate')\n", (1322, 1428), False, 'from scipy.interpolate import interp1d\n'), ((1680, 1698), 'numpy.cos', 'cos', (['(30 * pi / 180)'], {}), '(30 * pi / 180)\n', (1683, 1698), False, 'from numpy import cos, pi, zeros, frombuffer, float32, roll, average\n'), ((1959, 1972), 'numpy.zeros', 'zeros', (['BUFFER'], {}), '(BUFFER)\n', (1964, 1972), False, 'from numpy import cos, pi, zeros, frombuffer, float32, roll, average\n'), ((2114, 2135), 'numpy.zeros', 'zeros', (['AMPLITUDE_SIZE'], {}), '(AMPLITUDE_SIZE)\n', (2119, 2135), False, 'from numpy import cos, pi, zeros, frombuffer, float32, roll, average\n'), ((2232, 2253), 'numpy.zeros', 'zeros', (['AMPLITUDE_SIZE'], {}), '(AMPLITUDE_SIZE)\n', (2237, 2253), False, 'from numpy import cos, pi, zeros, frombuffer, float32, roll, average\n'), ((3126, 3143), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (3141, 3143), False, 'import pyaudio, serial, time\n'), ((2619, 2653), 'numpy.frombuffer', 'frombuffer', (['in_data'], {'dtype': 'float32'}), '(in_data, dtype=float32)\n', (2629, 2653), False, 'from numpy import cos, pi, zeros, frombuffer, float32, roll, average\n'), ((4988, 5019), 'serial.Serial', 'serial.Serial', (['COM_PORT', '(115200)'], {}), '(COM_PORT, 115200)\n', (5001, 5019), False, 'import pyaudio, serial, time\n'), ((5032, 5054), 'serial_comms.waitFor', 'waitFor', (['ser', "'\\rsh$ '"], {}), "(ser, '\\rsh$ ')\n", (5039, 5054), False, 'from serial_comms import waitFor, sendCommand\n'), ((5106, 5138), 'serial_comms.sendCommand', 'sendCommand', (['ser', '"""M3"""', "'\\rsh$ '"], {}), "(ser, 'M3', '\\rsh$ ')\n", (5117, 5138), False, 'from serial_comms import waitFor, sendCommand\n'), ((5743, 5802), 'acoustic_trilateration.butter_bandpass_filter', 'butter_bandpass_filter', (['this.buf_copy[i]', 'LPF', 'HPF', 'RATE', '(3)'], {}), '(this.buf_copy[i], LPF, HPF, RATE, 3)\n', (5765, 5802), False, 'from acoustic_trilateration import get_time_shift, trilateration, butter_bandpass_filter\n'), ((5903, 6030), 'acoustic_trilateration.get_time_shift', 'get_time_shift', (['this.buf_filtered[0]', 'this.buf_filtered[i]', 'BUFFER', 'RATE', '(corr_lines[i][0] if corr_lines != None else None)'], {}), '(this.buf_filtered[0], this.buf_filtered[i], BUFFER, RATE, \n corr_lines[i][0] if corr_lines != None else None)\n', (5917, 6030), False, 'from acoustic_trilateration import get_time_shift, trilateration, butter_bandpass_filter\n'), ((6061, 6090), 'numpy.roll', 'roll', (['this.delay_buffer[i]', '(1)'], {}), '(this.delay_buffer[i], 1)\n', (6065, 6090), False, 'from numpy import cos, pi, zeros, frombuffer, float32, roll, average\n'), ((6123, 6152), 'numpy.average', 'average', (['this.delay_buffer[i]'], {}), '(this.delay_buffer[i])\n', (6130, 6152), False, 'from numpy import cos, pi, zeros, frombuffer, float32, roll, average\n'), ((6839, 6872), 'numpy.roll', 'roll', (['this.amplitude_buffer[i]', '(1)'], {}), '(this.amplitude_buffer[i], 1)\n', (6843, 6872), False, 'from numpy import cos, pi, zeros, frombuffer, float32, roll, average\n'), ((6909, 6942), 'numpy.average', 'average', (['this.amplitude_buffer[i]'], {}), '(this.amplitude_buffer[i])\n', (6916, 6942), False, 'from numpy import cos, pi, zeros, frombuffer, float32, roll, average\n'), ((7704, 7815), 'acoustic_trilateration.trilateration', 'trilateration', (['this.amplitude_avg[0]', 'this.amplitude_avg[2]', 'this.amplitude_avg[1]', 'FIXT_D', 'FIXT_E', 'FIXT_F'], {}), '(this.amplitude_avg[0], this.amplitude_avg[2], this.\n amplitude_avg[1], FIXT_D, FIXT_E, FIXT_F)\n', (7717, 7815), False, 'from acoustic_trilateration import get_time_shift, trilateration, butter_bandpass_filter\n'), ((8080, 8174), 'serial_comms.sendCommand', 'sendCommand', (['ser', "('G1 X%.2f Y%.2f Z%.2f' % (this.x, this.y + 447.6, this.z + 56))", "'\\rsh$ '"], {}), "(ser, 'G1 X%.2f Y%.2f Z%.2f' % (this.x, this.y + 447.6, this.z +\n 56), '\\rsh$ ')\n", (8091, 8174), False, 'from serial_comms import waitFor, sendCommand\n'), ((7514, 7592), 'trilateration_linear_regression_model.predict', 'predict', (['[this.amplitude_avg[0], this.amplitude_avg[1], this.amplitude_avg[2]]'], {}), '([this.amplitude_avg[0], this.amplitude_avg[1], this.amplitude_avg[2]])\n', (7521, 7592), False, 'from trilateration_linear_regression_model import predict\n')] |
# -*- coding:utf-8 -*-
import random
import time
import xlsxwriter
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
class Similarity:
def __init__(self, med_molregno1=0, med_molregno2=0, maccs=0, fcfp4=0, ecfp4=0, topo=0, weighted_sim=0):
self.med_molregno1 = med_molregno1
self.med_molregno2 = med_molregno2
self.maccs = maccs
self.ecfp4 = ecfp4
self.fcfp4 = fcfp4
self.topo = topo
self.weighted_sim = weighted_sim
def get_simtable(self):
return [self.med_molregno1, self.med_molregno2, self.maccs, self.ecfp4, self.fcfp4, self.topo, self.weighted_sim]
def from_simtable(self, table):
self.med_molregno1 = table[0]
self.med_molregno2 = table[1]
self.maccs = float(table[2])
self.ecfp4 = float(table[3])
self.fcfp4 = float(table[4])
self.topo = float(table[5])
self.weighted_sim = float(table[6])
@staticmethod
def read_similarities():
similarities = []
sim_file = open('result.txt')
while 1:
s = Similarity()
line = sim_file.readline()
if not line:
break
s.from_simtable(line.split())
# s.print()
similarities.append(s)
sim_file.close()
return similarities
@staticmethod
def read_sims_to_dict():
sim_file = open('result.txt')
maccs_dict = {}
ecfp4_dict = {}
fcfp4_dict = {}
topo_dict = {}
while 1:
line = sim_file.readline()
if not line:
break
table = line.split()
key = table[0] + ' ' + table[1]
maccs_dict[key] = float(table[2])
ecfp4_dict[key] = float(table[3])
fcfp4_dict[key] = float(table[4])
topo_dict[key] = float(table[5])
sim_file.close()
return [maccs_dict, ecfp4_dict, fcfp4_dict, topo_dict]
@staticmethod
def read_pairs():
pairs = []
pairs_file = open('pairs.txt')
while 1:
line = pairs_file.readline()
if not line:
break
item = line.split() # item[0]: molregno1; item[1]: most similar mec's molregno; item[2]: similarity
pairs.append(item)
return pairs
class ChnMed: # Chinese medicine class
def __init__(self, lst):
self.id = lst[0]
self.chn_name = lst[1]
self.chn_word_id = lst[2]
self.component = lst[3]
self.description = lst[4]
self.chn_description = lst[5]
# read chinese medicine data
@staticmethod
def read_chn_med():
chn_med_file = open('CMedc.txt')
chn_med = []
while 1:
line = chn_med_file.readline()
if not line:
break
row = line.split()
med = ChnMed(row)
chn_med.append(med)
chn_med_file.close()
return chn_med
def chn_str(self):
return str(self.id) + ' ' + str(self.chn_name) + ' ' + str(self.chn_word_id) + ' ' +\
str(self.component) + ' ' + str(self.description) + ' ' + str(self.chn_description)
@staticmethod
def write_chn_med(chn_med):
file = open('CMedc1.txt', 'w')
for item in chn_med:
line = item.chn_str()
file.write(line + '\n')
file.close()
class WstMed: # Western medicine class
def __init__(self, lst):
self.id = lst[0] # drugs.com id
self.name = lst[1]
self.component = lst[2]
self.molregno = lst[3] # CHEMBL id
self.smiles = lst[4] # store medicine's SMILES notation, rather than mol object
def wst_str(self):
return str(self.id) + ' ' + str(self.name) + ' ' + str(self.component) + ' ' +\
str(self.molregno) + ' ' + str(self.smiles)
@staticmethod
# read western medicine data
def read_wstmed_to_dict():
wst_med_file = open('WMedc.txt')
wstmed_molregno_dict = {}
wstmed_id_dict = {}
while 1:
line = wst_med_file.readline()
if not line:
break
row = line.split()
med = WstMed(row)
wstmed_molregno_dict[med.molregno] = med
wstmed_id_dict[med.id] = med
wst_med_file.close()
return [wstmed_molregno_dict, wstmed_id_dict]
@staticmethod
# read western medicine data
def read_wstmed():
wst_med_file = open('WMedc.txt')
wst_med = []
while 1:
line = wst_med_file.readline()
if not line:
break
row = line.split()
med = WstMed(row)
wst_med.append(med)
wst_med_file.close()
return wst_med
class Interaction: # interaction between western medicines
def __init__(self, lst):
self.id = lst[0]
self.medicine_id1 = lst[1]
self.medicine_name1 = lst[2]
self.medicine_id2 = lst[3]
self.medicine_name2 = lst[4]
self.interaction_level = lst[5]
# read interaction data
@staticmethod
def read_interactions():
interaction_file = open('interactions.txt')
interactions = []
while 1:
line = interaction_file.readline()
if not line:
break
row = line.split()
inter = Interaction(row)
interactions.append(inter)
interaction_file.close()
return interactions
def read_interactions_to_dict(wstmed_id):
interaction_file = open('interactions.txt')
interactions_dict = {}
while 1:
line = interaction_file.readline()
if not line:
break
row = line.split()
if row[1] in wstmed_id.keys() and row[3] in wstmed_id.keys(): # consider interactions between 1366 drugs
key = row[1] + ' ' + row[3]
interactions_dict[key] = row[5] # Key = drugs.com id strings
interaction_file.close()
return interactions_dict
@staticmethod
def write_interactions(interactions):
interaction_file = open('interactions.txt', 'w')
# interactions = []
for item in interactions:
line = item.interaction_str()
interaction_file.write(line + '\n')
interaction_file.close()
def interaction_str(self):
return self.id + ' ' + self.medicine_id1 + ' ' + self.medicine_name1 + ' ' + self.medicine_id2 + ' ' +\
self.medicine_name2 + ' ' + self.interaction_level
class Validation:
def __init__(self, wst_med, similarities, interaction):
self.wst_med = wst_med
self.sim = similarities # key: molregno
self.interaction = interaction # key: drugs.com id
self.train_set = [] # 90%
self.validation_set = [] # 10%
self.train_inters = {} # molregno1 + molregno2: interaction level, all interactions between drugs in train set
self.maccs_pair_mol = {} # drug molregno: similar drug's molregno
self.ecfp4_pair_mol = {} # drug molregno: similar drug's molregno
self.fcfp4_pair_mol = {} # drug molregno: similar drug's molregno
self.topo_pair_mol = {} # drug molregno: similar drug's molregno
self.maccs_pair_id = {} # drug molregno: similar drug's id
self.topo_pair_id = {} # drug molregno: similar drug's id
self.ecfp4_pair_id = {} # drug molregno: similar drug's id
self.fcfp4_pair_id = {} # drug molregno: similar drug's id
self.maccs = {}
self.ecfp4 = {}
self.fcfp4 = {}
self.topo = {}
self.mol_by_id = {} # find molregno by drugs' id
self.id_by_mol = {} # find id by molregno
self.index_array = np.zeros(1366)
self.train_interactions = {}
self.validation_interactions = {}
self.inter_matrix = np.zeros(shape=(1366,1366))
def input_sims(self, maccs_dict, ecfp4_dict, fcfp4_dict, topo_dict):
self.maccs = maccs_dict
self.ecfp4 = ecfp4_dict
self.fcfp4 = fcfp4_dict
self.topo = topo_dict
def sim_by_mol(self, mol1, mol2, sim_type=0): # sim_type: 0-maccs, 1-ecfp4, 2-fcfp4, 3-topo
key = mol1 + ' ' + mol2
key2 = mol2 + ' ' + mol1
if sim_type == 0:
if key in self.maccs.keys():
return self.maccs[key]
elif key2 in self.maccs.keys():
return self.maccs[key2]
else:
print("maccs_sim_by_mol error: no key ", key)
elif sim_type == 1:
if key in self.ecfp4.keys():
return self.ecfp4[key]
elif key2 in self.ecfp4.keys():
return self.ecfp4[key2]
else:
print("ecfp4_sim_by_mol error: no key ", key)
elif sim_type == 2:
if key in self.fcfp4.keys():
return self.fcfp4[key]
elif key2 in self.fcfp4.keys():
return self.fcfp4[key2]
else:
print("fcfp4_sim_by_mol error: no key ", key)
elif sim_type == 3:
if key in self.topo.keys():
return self.topo[key]
elif key2 in self.topo.keys():
return self.topo[key2]
else:
print("topo_sim_by_mol error: no key ", key)
else:
print("similarity type error!!!!!")
def interaction_by_id(self, id1, id2):
key1 = id1 + ' ' + id2
key2 = id2 + ' ' + id1
if key1 in self.interaction.keys():
# return int(self.interaction[key1])
return 1
elif key2 in self.interaction.keys():
return 1
# return int(self.interaction[key2])
else:
return 0
def divide_data(self):
self.train_set = []
self.validation_set = []
index = random.sample(range(0, 1366), 136) # randomly select 1/10 data as test_set
flag = 0
for i in self.wst_med:
if flag in index:
self.validation_set.append(self.wst_med[i])
else:
self.train_set.append(self.wst_med[i])
flag += 1
def create_pairs_for_data_set(self): # for training process
for train1_index in self.wst_med:
maxmaccs = 0
maxecfp = 0
maxfcfp = 0
maxtopo = 0
train1 = self.wst_med[train1_index]
for train2 in self.train_set:
if train1 != train2:
maccs = self.sim_by_mol(train1.molregno, train2.molregno, 0)
ecfp = self.sim_by_mol(train1.molregno, train2.molregno, 1)
fcfp = self.sim_by_mol(train1.molregno, train2.molregno, 2)
topo = self.sim_by_mol(train1.molregno, train2.molregno, 3)
if maccs >= maxmaccs:
maxmaccs = maccs
self.maccs_pair_mol[train1.molregno] = train2.molregno
self.maccs_pair_id[train1.molregno] = train2.id
if ecfp >= maxecfp:
maxecfp = ecfp
self.ecfp4_pair_mol[train1.molregno] = train2.molregno
self.ecfp4_pair_id[train1.molregno] = train2.id
if fcfp >= maxfcfp:
maxfcfp = fcfp
self.fcfp4_pair_mol[train1.molregno] = train2.molregno
self.fcfp4_pair_id[train1.molregno] = train2.id
if topo >= maxtopo:
maxtopo = topo
self.topo_pair_mol[train1.molregno] = train2.molregno
self.topo_pair_id[train1.molregno] = train2.id
def create_interactions_train_set(self): # find all interactions between train set
for d1 in self.train_set:
for d2 in self.train_set:
if d1 != d2:
key = d1.id + ' ' + d2.id
if key in self.interaction.keys():
self.train_inters[key] = self.interaction[key]
def create_id_mol_dict(self):
for key in self.wst_med:
self.mol_by_id[self.wst_med[key].id] = self.wst_med[key].molregno
def create_mol_id_dict(self):
for key in self.wst_med:
self.id_by_mol[self.wst_med[key].molregno] = self.wst_med[key].id
def link_sim(self, d1, d2): # create training array of drug d1 and d2
# find interaction lvl between d1, d2
inter = self.interaction_by_id(d1.id, d2.id)
if 1:
# calculate sim feature using (sim(s1,d1) + sim(s2,d2))/2 * interaction lvl(s1,s2)
s1_mol = self.maccs_pair_mol[d1.molregno]
s2_mol = self.maccs_pair_mol[d2.molregno]
s1_id = self.maccs_pair_id[d1.molregno]
s2_id = self.maccs_pair_id[d2.molregno]
maccs1 = self.sim_by_mol(s1_mol, d1.molregno, sim_type=0)
maccs2 = self.sim_by_mol(s2_mol, d2.molregno, sim_type=0)
feature1 = (float(maccs1) + float(maccs2)) * float(self.interaction_by_id(s1_id, s2_id)) / 2
# feature1 = (float(maccs1) + float(maccs2)) / 2
s1_mol = self.ecfp4_pair_mol[d1.molregno]
s2_mol = self.ecfp4_pair_mol[d2.molregno]
s1_id = self.ecfp4_pair_id[d1.molregno]
s2_id = self.ecfp4_pair_id[d2.molregno]
ecfp41 = self.sim_by_mol(s1_mol, d1.molregno, sim_type=1)
ecfp42 = self.sim_by_mol(s2_mol, d2.molregno, sim_type=1)
feature2 = (float(ecfp41) + float(ecfp42)) * float(self.interaction_by_id(s1_id, s2_id)) / 2
# feature2 = (float(ecfp41) + float(ecfp42)) / 2
s1_mol = self.fcfp4_pair_mol[d1.molregno]
s2_mol = self.fcfp4_pair_mol[d2.molregno]
s1_id = self.fcfp4_pair_id[d1.molregno]
s2_id = self.fcfp4_pair_id[d2.molregno]
fcfp41 = self.sim_by_mol(s1_mol, d1.molregno, sim_type=2)
fcfp42 = self.sim_by_mol(s2_mol, d2.molregno, sim_type=2)
feature3 = (float(fcfp41) + float(fcfp42)) * float(self.interaction_by_id(s1_id, s2_id)) / 2
# feature3 = (float(fcfp41) + float(fcfp42)) / 2
s1_mol = self.topo_pair_mol[d1.molregno]
s2_mol = self.topo_pair_mol[d2.molregno]
s1_id = self.topo_pair_id[d1.molregno]
s2_id = self.topo_pair_id[d2.molregno]
topo1 = self.sim_by_mol(s1_mol, d1.molregno, sim_type=3)
topo2 = self.sim_by_mol(s2_mol, d2.molregno, sim_type=3)
feature4 = (float(topo1) + float(topo2)) * float(self.interaction_by_id(s1_id, s2_id)) / 2
# feature4 = (float(topo1) + float(topo2)) / 2
return [feature1, feature2, feature3, feature4, inter]
else:
return [0, 0, 0, 0, 0]
def create_train_array(self, portion):
ar1 = []
ar2 = []
ar3 = []
ar4 = []
inters = []
for d1 in v.train_set:
for d2 in v.train_set:
if d1 != d2:
f1, f2, f3, f4, inter = v.link_sim(d1, d2)
# if f1!=0 and f2!=0 and f3!=0 and f4!=0:
if inter != 0:
ar1.append(f1)
ar2.append(f2)
ar3.append(f3)
ar4.append(f4)
inters.append(inter)
else:
index = random.sample(range(0, 100), 1) # randomly select 1/10 data as test_set
if index[0] > portion: # 25% zeros in training set
ar1.append(f1)
ar2.append(f2)
ar3.append(f3)
ar4.append(f4)
inters.append(inter)
tr = [ar1, ar2, ar3, ar4]
tr = [list(x) for x in zip(*tr)] # transpose
return [tr, inters]
def create_val_array(self):
ar1 = []
ar2 = []
ar3 = []
ar4 = []
inters = []
for d1 in v.validation_set:
# for d2 in v.validation_set:
for d2 in v.train_set:
if d1 != d2:
f1, f2, f3, f4, inter = v.link_sim(d1, d2)
if 1:
ar1.append(f1)
ar2.append(f2)
ar3.append(f3)
ar4.append(f4)
inters.append(inter)
val = [ar1, ar2, ar3, ar4]
val = [list(x) for x in zip(*val)] # transpose
return [val, inters]
def logistic_regression(self, portion):
# find interactions in train set
# self.create_interactions_train_set()
# for pairs in validation set, find most similar pairs
# self.create_pairs_for_train_set()
# self.create_pairs_for_validation_set()
self.create_pairs_for_data_set()
# create training array
tr, inters = self.create_train_array(portion)
self.tr = tr
# train logistic regression model
lr = LogisticRegression(solver='sag')
lr.fit(tr, inters)
# svm = LinearSVC()
# print('start fitting')
# svm.fit(tr, inters)
# print('fit completed')
# create validation array
val, inters = self.create_val_array()
self.val = val
self.result = lr.predict(val)
prob_re = lr.predict_proba(val)
prob_re= prob_re.transpose()
auroc = roc_auc_score(inters, prob_re[1])
print('roc score:', auroc)
# self.result = svm.predict(val)
# print(prob_re.__len__(), inters.__len__())
# fpr_grd_lm, tpr_grd_lm, _ = roc_curve(inters, prob_re[0])
# plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
# validation
same = 0
unsame = 0
num = 0
for i in range(0, inters.__len__()):
num += 1
if int(self.result[i]) == inters[i]:
same += 1
else:
unsame += 1
TP = 0 # predict 1, actual 1
FP = 0 # predict 1, actual 0
TN = 0 # predict 0, actual 0
FN = 0 # predict 0, actual 1
for i in range(0, inters.__len__()):
if int(self.result[i]) != 0 or inters[i] != 0:
# print(self.result[i], inters[i])
if int(self.result[i]) == int(inters[i]):
TP += 1
elif int(self.result[i]) != 0 and inters[i] == 0:
FP += 1
elif inters[i] != 0 and int(self.result[i])==0:
FN += 1
elif int(self.result[i]) == 0 and inters[i] == 0:
TN += 1
print('TP:', TP)
print('FP:', FP)
print('TN:', TN)
print('FN:', FN)
precision = TP/(TP+FP)
recall = TP/(TP+FN)
print('precision:', precision)
print('recall:', recall)
print('f-score: ', 2*precision*recall/(precision + recall))
print(same, unsame, num)
print(same / num)
return 0
def find_most_similar_link(self, d1, d2):
max_link_maccs = 0
max_link_ecfp4 = 0
max_link_fcfp4 = 0
max_link_topo = 0
summaccs = 0
sumecfp = 0
sumfcfp = 0
sumtopo = 0
maccs = []
ecfp = []
fcfp = []
topo = []
i = 0
for link_key in self.train_inters:
id1, id2 = link_key.split()
if id1 != d1.id and id1 != d2.id:
if id2 != d1.id and id2 != d2.id:
i = i + 1
link_maccs = (self.sim_by_mol(self.mol_by_id[id1], d1.molregno, 0) +
self.sim_by_mol(self.mol_by_id[id2], d2.molregno, 0)) / 2.0
link_ecfp = (self.sim_by_mol(self.mol_by_id[id1], d1.molregno, 1) +
self.sim_by_mol(self.mol_by_id[id2], d2.molregno, 1)) / 2.0
link_fcfp = (self.sim_by_mol(self.mol_by_id[id1], d1.molregno, 2) +
self.sim_by_mol(self.mol_by_id[id2], d2.molregno, 2)) / 2.0
link_topo = (self.sim_by_mol(self.mol_by_id[id1], d1.molregno, 3) +
self.sim_by_mol(self.mol_by_id[id2], d2.molregno, 3)) / 2.0
maccs.append(link_maccs)
ecfp.append(link_ecfp)
fcfp.append(link_fcfp)
topo.append(link_topo)
# if link_maccs >= max_link_maccs:
# max_link_maccs = link_maccs
# # result_id1 = id1
# # result_id2 = id2
# if link_ecfp >= max_link_ecfp4:
# max_link_ecfp4 = link_ecfp
# if link_fcfp >= max_link_fcfp4:
# max_link_fcfp4 = link_fcfp
# if link_topo >= max_link_topo:
# max_link_topo = link_topo
maccsar = np.array(maccs)
ecfpar = np.array(ecfp)
fcfpar = np.array(fcfp)
topoar = np.array(topo)
max_link_maccs = maccsar.max()
max_link_ecfp4 = ecfpar.max()
max_link_fcfp4 = fcfpar.max()
max_link_topo = topoar.max()
max_link_maccs = (max_link_maccs - maccsar.mean())/maccsar.std()
max_link_ecfp4 = (max_link_ecfp4 - ecfpar.mean())/ecfpar.std()
max_link_fcfp4 = (max_link_fcfp4 - fcfpar.mean())/fcfpar.std()
max_link_topo = (max_link_topo - topoar.mean())/topoar.std()
# print(result_id1, result_id2, max_link_sim)
return [max_link_maccs, max_link_ecfp4, max_link_fcfp4, max_link_topo]
def fail_attempt(self):
train_set = v.train_set[0:50]
i = 0
inters = []
tr = []
numof1 = 0
for d1 in train_set:
for d2 in train_set:
i += 1
print(i, '-th process....')
inter = v.interaction_by_id(d1.id, d2.id)
if inter != 0:
numof1 += 1
inters.append(inter)
start = time.time()
feature = v.find_most_similar_link(d1, d2)
print(feature, inter)
end = time.time()
print('time: ', end - start)
tr.append(feature)
print('1 in 100 interactions:', numof1)
# tr = [list(x) for x in zip(*tr)] # transpose
lr = LogisticRegression(solver='sag', max_iter=10000)
lr.fit(tr, inters)
i = 0
test_set = v.validation_set[20:40]
val = []
numof1 = 0
val_inters = []
for d1 in test_set:
for d2 in test_set:
i += 1
print(i, '-th process....')
inter = v.interaction_by_id(d1.id, d2.id)
if inter != 0:
numof1 += 1
val_inters.append(inter)
featureval = v.find_most_similar_link(d1, d2)
# print('val:', featureval)
val.append(featureval)
# val = [list(x) for x in zip(*val)] # transpose
for i in val:
if np.isnan(i[1]):
val.remove(i)
v.result = lr.predict(val)
print('val_inters', val_inters)
print('predicted result', v.result)
def create_index_array(self):
# create index_array
self.index_array = np.zeros(1366)
i = 0
for key in v.wst_med:
self.index_array[i] = key
i += 1
def divide_interactions(self):
self.train_interactions = {}
self.validation_interactions = {}
num = self.interaction.__len__()//10
index = random.sample(range(0, self.interaction.__len__()), num)
flag = 0
for key in self.interaction:
if flag in index:
self.validation_interactions[key] = float(self.interaction[key])
else:
self.train_interactions[key] = float(self.interaction[key])
flag += 1
def get_inter_matrix(self): # train interactions matrix
# create index_array
# self.create_index_array()
self.inter_matrix = np.zeros(shape=(1366, 1366))
for key in self.train_interactions:
id1, id2 = key.split()
row = np.where(self.index_array == float(id1))[0][0]
col = np.where(self.index_array == float(id2))[0][0]
self.inter_matrix[row][col] = float(self.train_interactions[key])
for i in range(0, 1366):
self.inter_matrix[i][i] = 0
def sim_matrix(self):
# maccs, ecfp, fcfp, topo matrix
self.maccs_matrix = np.zeros(shape=(1366, 1366))
self.ecfp_matrix = np.zeros(shape=(1366, 1366))
self.fcfp_matrix = np.zeros(shape=(1366, 1366))
self.topo_matrix = np.zeros(shape=(1366, 1366))
self.create_mol_id_dict()
# self.create_index_array()
for key in self.maccs:
mol1, mol2 = key.split()
id1 = self.id_by_mol[mol1]
id2 = self.id_by_mol[mol2]
row = np.where(self.index_array == float(id1))[0][0]
col = np.where(self.index_array == float(id2))[0][0]
self.maccs_matrix[row][col] = float(self.maccs[key])
self.ecfp_matrix[row][col] = float(self.ecfp4[key])
self.fcfp_matrix[row][col] = float(self.fcfp4[key])
self.topo_matrix[row][col] = float(self.topo[key])
for index in range(0, 1366):
self.maccs_matrix[index][index] = 0
self.ecfp_matrix[index][index] = 0
self.fcfp_matrix[index][index] = 0
self.topo_matrix[index][index] = 0
def create_predict_matrix(self, inter_matrix, sim_matrix):
# m12 = inter_matrix.dot(sim_matrix) # * is element-wise multiply
m12 = np.zeros(shape=(1366, 1366))
st = sim_matrix.transpose()
for row in range(0,1366):
for col in range(0,1366):
m12[row][col] = (inter_matrix[row]*st[col]).max()
m12t = m12.transpose()
pos_bigger = m12t > m12
return m12 - np.multiply(m12, pos_bigger) + np.multiply(m12t, pos_bigger)
def matrix_approach(self):
for key in v.interaction:
v.interaction[key] = 1
v.create_index_array()
v.divide_interactions()
v.get_inter_matrix()
v.sim_matrix()
re = v.create_predict_matrix(v.inter_matrix, v.maccs_matrix)
# transform re matrix to re_interactions dict
re_list = []
re_interactions = {}
for row in range(0, 1366):
for col in range(0, 1366):
id1 = str(int(v.index_array[row]))
id2 = str(int(v.index_array[col]))
key = id1 + ' ' + id2
re_list.append([id1,id2,re[row][col]])
re_interactions[key] = re[row][col]
re_list = np.array(re_list)
re_list = re_list[re_list.transpose()[2].argsort(), :]
re_list = re_list[::-1]
# count TP TN FN FP and precision, recall
TP = 0 # predict 1, actual 1
TN = 0 # predict 0, actual 0
FN = 0 # predict 0, actual 1
FP = 0 # predict 1, actual 0
for item in re_list:
key = item[0] + ' ' + item[1]
if key in v.validation_interactions.keys():
# print(v.validation_interactions[key], item[2])
if float(item[2]) > 0.8:
TP += 1
else:
FN += 1
elif key not in v.train_interactions.keys():
if float(item[2]) > 0.8:
FP += 1
# for key in v.validation_interactions:
# # id1, id2 = key.split()
# # row = np.where(v.index_array == float(id1))[0][0]
# # col = np.where(v.index_array == float(id2))[0][0]
# # print(row, col)
# # if key not in re_interactions.keys():
# # id1, id2 = key.split()
# # key = id2 + ' ' + id1
# # print(v.validation_interactions[key], re_interactions[key])
# if v.validation_interactions[key] != 0 and re_interactions[key] >0.8:
# TP += 1
# elif v.validation_interactions[key] != 0 and re_interactions[key] <0.8:
# FN += 1
# for key in re_interactions:
# if re_interactions[key] != 0:
# if key not in v.validation_interactions.keys():
# if key not in v.train_interactions.keys():
# # if v.validation_interactions[key] == 0:
# FP += 1
#
print('TP:', TP)
print('FP:', FP)
print('TN:', TN)
print('FN:', FN)
precision = TP/(TP+FP)
recall = TP/(TP+FN)
print('precision:', precision)
print('recall:', recall)
print('f-score: ', 2*precision*recall/(precision + recall))
def hehe_approach(self):
for key in self.interaction:
self.interaction[key] = 1
self.create_pairs_for_data_set() # create the most similar drug's mol/id according four similarities
allre = {}
result = {}
for item in self.validation_set:
# item = self.validation_set[key]
pair_id = self.maccs_pair_id[item.molregno]
pair_mol = self.maccs_pair_mol[item.molregno]
# print(pair_mol)
for key in self.wst_med:
target = self.wst_med[key]
if target.id != item.id and pair_id != target.id:
key = target.id + ' ' + item.id
pair_target_key = pair_id + ' ' + target.id
pair_target_key2 = target.id + ' ' + pair_id
if pair_target_key in self.interaction.keys():
result[key] = self.interaction[pair_target_key] * self.sim_by_mol(pair_mol, target.molregno, 0)
elif pair_target_key2 in self.interaction.keys(): # try another key
result[key] = self.interaction[pair_target_key2] * self.sim_by_mol(pair_mol, target.molregno, 0)
else:
result[key] = 0
else:
key = target.id + ' ' + item.id
result[key] = 0
allre = result
# print('get result')
y_ture = []
y_score = []
TP = 0 # predict 1, actual 1
TN = 0 # predict 0, actual 0
FN = 0 # predict 0, actual 1
FP = 0 # predict 1, actual 0
for item in self.validation_set:
for key in self.wst_med:
target = self.wst_med[key]
if target.id != item.id:
key = target.id + ' ' + item.id
key2 = item.id + ' ' + target.id
if key in self.interaction.keys():
if allre[key] >0:#== 1: # key in self.interaction.keys() -> interaction[key] must be 1
TP += 1
y_score.append(allre[key])
y_ture.append(1)
else:
FN += 1
y_score.append(allre[key])
y_ture.append(1)
elif key2 in self.interaction.keys():
if allre[key] >0:#== 1:
TP += 1
y_score.append(allre[key])
y_ture.append(1)
else:
FN += 1
y_score.append(allre[key])
y_ture.append(1)
elif allre[key] >0:#== 1:
FP += 1
y_score.append(allre[key])
y_ture.append(0)
elif allre[key] <=0:#== 0:
TN += 1
y_score.append(allre[key])
y_ture.append(0)
auroc = roc_auc_score(y_ture, y_score)
print('auroc: ', auroc)
print('TP:', TP)
print('FP:', FP)
print('TN:', TN)
print('FN:', FN)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
print('precision:', precision)
print('recall:', recall)
print('f-score: ', 2 * precision * recall / (precision + recall))
print((TP+TN)/(TP+TN+FP+FN))
return allre
start = time.time()
maccs_dict, ecfp4_dict, fcfp4_dict, topo_dict = Similarity.read_sims_to_dict() # 1864590
wstmed_molregno, wstmed_id = WstMed.read_wstmed_to_dict() # search western medicine via molregno
interaction_dict = Interaction.read_interactions_to_dict(wstmed_id) # 128535 inters from totally 853272 of 4696 drugs
v = Validation(wstmed_id, maccs_dict, interaction_dict)
v.divide_data()
v.input_sims(maccs_dict, ecfp4_dict, fcfp4_dict, topo_dict)
count = {}
for interkey in v.interaction:
id1, id2 = interkey.split()
if id1 not in count.keys():
count[id1] = 1
else:
count[id1] = count[id1] + 1
if id2 not in count.keys():
count[id2] = 1
else:
count[id2] = count[id2] + 1
# a = v.hehe_approach()
# v.sim = maccs_dict
# v.create_pairs_for_validation_set()
# v.create_pairs_for_train_set()
# v.create_interactions_train_set()
# portions = [40, 50, 60, 70, 80]
# portions = [30, 80]
# for item in portions:
# v.logistic_regression(item)
# v.logistic_regression(75)
# v.create_interactions_train_set()
# v.create_id_mol_dict()
# start = time.time()
v.hehe_approach()
# re = 0
# train_interactions = 0
# validation_interactions = 0
#
# re = np.array(list(count.values()))
# re.tofile('a.csv', ',')
end = time.time()
print('time: ', end - start)
| [
"numpy.multiply",
"sklearn.metrics.roc_auc_score",
"sklearn.linear_model.LogisticRegression",
"numpy.array",
"numpy.zeros",
"numpy.isnan",
"time.time"
] | [((32993, 33004), 'time.time', 'time.time', ([], {}), '()\n', (33002, 33004), False, 'import time\n'), ((34257, 34268), 'time.time', 'time.time', ([], {}), '()\n', (34266, 34268), False, 'import time\n'), ((7950, 7964), 'numpy.zeros', 'np.zeros', (['(1366)'], {}), '(1366)\n', (7958, 7964), True, 'import numpy as np\n'), ((8072, 8100), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1366, 1366)'}), '(shape=(1366, 1366))\n', (8080, 8100), True, 'import numpy as np\n'), ((17386, 17418), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""sag"""'}), "(solver='sag')\n", (17404, 17418), False, 'from sklearn.linear_model import LogisticRegression\n'), ((17805, 17838), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['inters', 'prob_re[1]'], {}), '(inters, prob_re[1])\n', (17818, 17838), False, 'from sklearn.metrics import roc_auc_score\n'), ((21367, 21382), 'numpy.array', 'np.array', (['maccs'], {}), '(maccs)\n', (21375, 21382), True, 'import numpy as np\n'), ((21400, 21414), 'numpy.array', 'np.array', (['ecfp'], {}), '(ecfp)\n', (21408, 21414), True, 'import numpy as np\n'), ((21432, 21446), 'numpy.array', 'np.array', (['fcfp'], {}), '(fcfp)\n', (21440, 21446), True, 'import numpy as np\n'), ((21464, 21478), 'numpy.array', 'np.array', (['topo'], {}), '(topo)\n', (21472, 21478), True, 'import numpy as np\n'), ((22836, 22884), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""sag"""', 'max_iter': '(10000)'}), "(solver='sag', max_iter=10000)\n", (22854, 22884), False, 'from sklearn.linear_model import LogisticRegression\n'), ((23816, 23830), 'numpy.zeros', 'np.zeros', (['(1366)'], {}), '(1366)\n', (23824, 23830), True, 'import numpy as np\n'), ((24601, 24629), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1366, 1366)'}), '(shape=(1366, 1366))\n', (24609, 24629), True, 'import numpy as np\n'), ((25086, 25114), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1366, 1366)'}), '(shape=(1366, 1366))\n', (25094, 25114), True, 'import numpy as np\n'), ((25142, 25170), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1366, 1366)'}), '(shape=(1366, 1366))\n', (25150, 25170), True, 'import numpy as np\n'), ((25198, 25226), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1366, 1366)'}), '(shape=(1366, 1366))\n', (25206, 25226), True, 'import numpy as np\n'), ((25254, 25282), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1366, 1366)'}), '(shape=(1366, 1366))\n', (25262, 25282), True, 'import numpy as np\n'), ((26264, 26292), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1366, 1366)'}), '(shape=(1366, 1366))\n', (26272, 26292), True, 'import numpy as np\n'), ((27341, 27358), 'numpy.array', 'np.array', (['re_list'], {}), '(re_list)\n', (27349, 27358), True, 'import numpy as np\n'), ((32548, 32578), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_ture', 'y_score'], {}), '(y_ture, y_score)\n', (32561, 32578), False, 'from sklearn.metrics import roc_auc_score\n'), ((23559, 23573), 'numpy.isnan', 'np.isnan', (['i[1]'], {}), '(i[1])\n', (23567, 23573), True, 'import numpy as np\n'), ((26582, 26611), 'numpy.multiply', 'np.multiply', (['m12t', 'pos_bigger'], {}), '(m12t, pos_bigger)\n', (26593, 26611), True, 'import numpy as np\n'), ((22495, 22506), 'time.time', 'time.time', ([], {}), '()\n', (22504, 22506), False, 'import time\n'), ((22626, 22637), 'time.time', 'time.time', ([], {}), '()\n', (22635, 22637), False, 'import time\n'), ((26551, 26579), 'numpy.multiply', 'np.multiply', (['m12', 'pos_bigger'], {}), '(m12, pos_bigger)\n', (26562, 26579), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
def reshape_array_and_save_to_path(arr_data, arr_label, path, timesteps, target_hour, data_type="Train"):
# reshaping the array from 3D
# matrice to 2D matrice.
arr_data_reshaped = arr_data.reshape(arr_data.shape[0], -1)
arr_label_reshaped = arr_label.reshape(arr_label.shape[0], -1)
# saving reshaped array to file.
saved_data = np.savez_compressed(path + "/{}_{}_{}_data.npz".format(timesteps, target_hour, data_type), arr_data_reshaped)
saved_label = np.savez_compressed(path + "/{}_{}_{}_label.npz".format(timesteps, target_hour, data_type), arr_label_reshaped)
# retrieving data from file.
loaded_arr_data_file = np.load(path + "/{}_{}_{}_data.npz".format(timesteps, target_hour, data_type), allow_pickle=True)
loaded_arr_label_file = np.load(path + "/{}_{}_{}_label.npz".format(timesteps, target_hour, data_type), allow_pickle=True)
loaded_arr_data = loaded_arr_data_file['arr_0']
loaded_arr_data_file.close()
loaded_arr_label = loaded_arr_label_file['arr_0'].ravel()
loaded_arr_label_file.close()
# This loadedArr is a 2D array, therefore
# we need to convert it to the original
# array shape.reshaping to get original
# matrice with original shape.
loaded_arr_data = loaded_arr_data.reshape(
loaded_arr_data.shape[0], loaded_arr_data.shape[1] // arr_data.shape[2], arr_data.shape[2])
features_save = np.save(path+"/features.npy", arr_data.shape[2])
# check the shapes:
print("Data array:")
print("shape of arr: ", arr_data.shape)
print("shape of loaded_array: ", loaded_arr_data.shape)
# check if both arrays are same or not:
if (arr_data == loaded_arr_data).all():
print("Yes, both the arrays are same")
else:
print("No, both the arrays are not same")
# check the shapes:
print("Label array:")
print("shape of arr: ", arr_label.shape)
print("shape of loaded_array: ", loaded_arr_label.shape)
# check if both arrays are same or not:
if (arr_label == loaded_arr_label).all():
print("Yes, both the arrays are same")
else:
print("No, both the arrays are not same")
return None
def load_reshaped_array(timesteps, target_hour, folder_path, data_type="train"):
features = np.load(folder_path + "/features.npy", allow_pickle=True).ravel()[0]
loaded_file = np.load(folder_path + "/{}_{}_{}_data.npz".format(timesteps, target_hour, data_type), allow_pickle=True)
loaded_data = loaded_file['arr_0']
loaded_data = loaded_data.reshape(
loaded_data.shape[0], loaded_data.shape[1] // features, features).astype(float)
loaded_file.close()
loaded_file_label = np.load(folder_path + "/{}_{}_{}_label.npz".format(timesteps, target_hour, data_type), allow_pickle=True)
loaded_label = loaded_file_label['arr_0'].ravel().astype(float)
loaded_file_label.close()
return loaded_data, loaded_label
def create_tensorflow_dataset(arr_data, arr_label, batch_size):
if len(arr_data) % batch_size != 0:
if len(arr_data) // batch_size != 0:
remain_count = len(arr_data)%batch_size
arr_data = arr_data[remain_count:]
arr_label = arr_label[remain_count:]
else:
batch_size = len(arr_data)
tf_dataset = tf.data.Dataset.from_tensor_slices((arr_data, arr_label))
tf_dataset = tf_dataset.repeat().batch(batch_size, drop_remainder=True)
steps_per_epochs = len(arr_data) // batch_size
print(arr_data)
return tf_dataset, steps_per_epochs | [
"numpy.load",
"numpy.save",
"tensorflow.data.Dataset.from_tensor_slices"
] | [((1529, 1579), 'numpy.save', 'np.save', (["(path + '/features.npy')", 'arr_data.shape[2]'], {}), "(path + '/features.npy', arr_data.shape[2])\n", (1536, 1579), True, 'import numpy as np\n'), ((3426, 3483), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(arr_data, arr_label)'], {}), '((arr_data, arr_label))\n', (3460, 3483), True, 'import tensorflow as tf\n'), ((2407, 2464), 'numpy.load', 'np.load', (["(folder_path + '/features.npy')"], {'allow_pickle': '(True)'}), "(folder_path + '/features.npy', allow_pickle=True)\n", (2414, 2464), True, 'import numpy as np\n')] |
import numpy as np
def switch_exams(enc, index_pair):
solution = np.diag(enc)
feasible_solutions = []
for pair in list(index_pair):
if solution[pair[0]] != solution[pair[1]]:
# print(enc[pair[0], :], d[pair[1]])
p = (np.delete(enc[pair[0], :],pair[1]) == solution[pair[1]])
p1 = (np.delete(enc[pair[1], :],pair[0]) == solution[pair[0]])
if any(p) == False and any(p1) == False:
# list of feasible neighbour solution
tmp_solution = solution.copy()
tmp_solution[[pair[0], pair[1]]] = tmp_solution[[pair[1], pair[0]]]
feasible_solutions.append(tmp_solution)
return feasible_solutions
| [
"numpy.delete",
"numpy.diag"
] | [((71, 83), 'numpy.diag', 'np.diag', (['enc'], {}), '(enc)\n', (78, 83), True, 'import numpy as np\n'), ((263, 298), 'numpy.delete', 'np.delete', (['enc[pair[0], :]', 'pair[1]'], {}), '(enc[pair[0], :], pair[1])\n', (272, 298), True, 'import numpy as np\n'), ((338, 373), 'numpy.delete', 'np.delete', (['enc[pair[1], :]', 'pair[0]'], {}), '(enc[pair[1], :], pair[0])\n', (347, 373), True, 'import numpy as np\n')] |
import os.path as osp
import PIL
from PIL import Image
import torchvision.transforms.functional as TF
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import numpy as np
THIS_PATH = osp.dirname(__file__)
ROOT_PATH = osp.abspath(osp.join(THIS_PATH, '..', '..'))
IMAGE_PATH = osp.join(ROOT_PATH, 'data/miniImagenet/images')
SPLIT_PATH = osp.join(ROOT_PATH, 'data/miniImagenet/split')
class MiniImageNet(Dataset):
def __init__(self, setname, args):
csv_path = osp.join(SPLIT_PATH, setname + '.csv')
lines = [x.strip() for x in open(csv_path, 'r').readlines()][1:]
data = []
label = []
lb = -1
self.wnids = []
for l in lines:
name, wnid = l.split(',')
path = osp.join(IMAGE_PATH, name)
if wnid not in self.wnids:
self.wnids.append(wnid)
lb += 1
data.append(path)
label.append(lb)
self.data = data
self.label = label
self.num_class = len(set(label))
self.args = args
if args.model_type == 'ConvNet':
# for ConvNet512 and Convnet64
image_size = 84
self.to_tensor = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(np.array([0.485, 0.456, 0.406]),
np.array([0.229, 0.224, 0.225]))
])
self.transform = transforms.Compose([
transforms.Resize(92),
transforms.CenterCrop(image_size)
])
else:
# for Resnet12
image_size = 84
self.to_tensor = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(np.array([x / 255.0 for x in [120.39586422, 115.59361427, 104.54012653]]),
np.array([x / 255.0 for x in [70.68188272, 68.27635443, 72.54505529]]))
])
self.transform = transforms.Compose([
transforms.Resize(92),
transforms.CenterCrop(image_size)
])
def __len__(self):
return len(self.data)
def __getitem__(self, i):
path, label = self.data[i], self.label[i]
image = self.transform(Image.open(path).convert('RGB'))
image_0 = self.to_tensor(image)
image_90 = self.to_tensor(TF.rotate(image, 90))
image_180 = self.to_tensor(TF.rotate(image, 180))
image_270 = self.to_tensor(TF.rotate(image, 270))
all_images = torch.stack([image_0, image_90, image_180, image_270], 0) # <4, 3, size, size>
return all_images, label
| [
"torchvision.transforms.CenterCrop",
"PIL.Image.open",
"torch.stack",
"os.path.join",
"os.path.dirname",
"torchvision.transforms.functional.rotate",
"numpy.array",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor"
] | [((221, 242), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (232, 242), True, 'import os.path as osp\n'), ((313, 360), 'os.path.join', 'osp.join', (['ROOT_PATH', '"""data/miniImagenet/images"""'], {}), "(ROOT_PATH, 'data/miniImagenet/images')\n", (321, 360), True, 'import os.path as osp\n'), ((374, 420), 'os.path.join', 'osp.join', (['ROOT_PATH', '"""data/miniImagenet/split"""'], {}), "(ROOT_PATH, 'data/miniImagenet/split')\n", (382, 420), True, 'import os.path as osp\n'), ((267, 298), 'os.path.join', 'osp.join', (['THIS_PATH', '""".."""', '""".."""'], {}), "(THIS_PATH, '..', '..')\n", (275, 298), True, 'import os.path as osp\n'), ((510, 548), 'os.path.join', 'osp.join', (['SPLIT_PATH', "(setname + '.csv')"], {}), "(SPLIT_PATH, setname + '.csv')\n", (518, 548), True, 'import os.path as osp\n'), ((2743, 2800), 'torch.stack', 'torch.stack', (['[image_0, image_90, image_180, image_270]', '(0)'], {}), '([image_0, image_90, image_180, image_270], 0)\n', (2754, 2800), False, 'import torch\n'), ((783, 809), 'os.path.join', 'osp.join', (['IMAGE_PATH', 'name'], {}), '(IMAGE_PATH, name)\n', (791, 809), True, 'import os.path as osp\n'), ((2583, 2603), 'torchvision.transforms.functional.rotate', 'TF.rotate', (['image', '(90)'], {}), '(image, 90)\n', (2592, 2603), True, 'import torchvision.transforms.functional as TF\n'), ((2640, 2661), 'torchvision.transforms.functional.rotate', 'TF.rotate', (['image', '(180)'], {}), '(image, 180)\n', (2649, 2661), True, 'import torchvision.transforms.functional as TF\n'), ((2698, 2719), 'torchvision.transforms.functional.rotate', 'TF.rotate', (['image', '(270)'], {}), '(image, 270)\n', (2707, 2719), True, 'import torchvision.transforms.functional as TF\n'), ((1261, 1282), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1280, 1282), False, 'from torchvision import transforms\n'), ((1617, 1638), 'torchvision.transforms.Resize', 'transforms.Resize', (['(92)'], {}), '(92)\n', (1634, 1638), False, 'from torchvision import transforms\n'), ((1656, 1689), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['image_size'], {}), '(image_size)\n', (1677, 1689), False, 'from torchvision import transforms\n'), ((1857, 1878), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1876, 1878), False, 'from torchvision import transforms\n'), ((2201, 2222), 'torchvision.transforms.Resize', 'transforms.Resize', (['(92)'], {}), '(92)\n', (2218, 2222), False, 'from torchvision import transforms\n'), ((2240, 2273), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['image_size'], {}), '(image_size)\n', (2261, 2273), False, 'from torchvision import transforms\n'), ((2459, 2475), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (2469, 2475), False, 'from PIL import Image\n'), ((1353, 1384), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (1361, 1384), True, 'import numpy as np\n'), ((1454, 1485), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (1462, 1485), True, 'import numpy as np\n'), ((1917, 1992), 'numpy.array', 'np.array', (['[(x / 255.0) for x in [120.39586422, 115.59361427, 104.54012653]]'], {}), '([(x / 255.0) for x in [120.39586422, 115.59361427, 104.54012653]])\n', (1925, 1992), True, 'import numpy as np\n'), ((2009, 2081), 'numpy.array', 'np.array', (['[(x / 255.0) for x in [70.68188272, 68.27635443, 72.54505529]]'], {}), '([(x / 255.0) for x in [70.68188272, 68.27635443, 72.54505529]])\n', (2017, 2081), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import csv
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
# Load data about each article in a dataframe
df = pd.read_csv("Data/node_information.csv")
print(df.head())
# Read training data
train_ids = list()
y_train = list()
with open('Data/train.csv', 'r') as f:
next(f)
for line in f:
t = line.split(',')
train_ids.append(t[0])
y_train.append(t[1][:-1])
n_train = len(train_ids)
unique = np.unique(y_train)
print("\nNumber of classes: ", unique.size)
# Extract the abstract of each training article from the dataframe
train_abstracts = list()
for i in train_ids:
train_abstracts.append(df.loc[df['id'] == int(i)]['abstract'].iloc[0])
# Create the training matrix. Each row corresponds to an article
# and each column to a word present in at least 2 webpages and at
# most 50 articles. The value of each entry in a row is equal to
# the frequency of that word in the corresponding article
vec = CountVectorizer(decode_error='ignore', min_df=2, max_df=50, stop_words='english')
X_train = vec.fit_transform(train_abstracts)
# Read test data
test_ids = list()
with open('Data/test.csv', 'r') as f:
next(f)
for line in f:
test_ids.append(line[:-2])
# Extract the abstract of each test article from the dataframe
n_test = len(test_ids)
test_abstracts = list()
for i in test_ids:
test_abstracts.append(df.loc[df['id'] == int(i)]['abstract'].iloc[0])
# Create the test matrix following the same approach as in the case of the training matrix
X_test = vec.transform(test_abstracts)
print("\nTrain matrix dimensionality: ", X_train.shape)
print("Test matrix dimensionality: ", X_test.shape)
# Use logistic regression to classify the articles of the test set
clf = LogisticRegression()
clf.fit(X_train, y_train)
y_pred = clf.predict_proba(X_test)
# Write predictions to a file
with open('Result/text_submission.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
lst = clf.classes_.tolist()
lst.insert(0, "Article")
writer.writerow(lst)
for i,test_id in enumerate(test_ids):
lst = y_pred[i,:].tolist()
lst.insert(0, test_id)
writer.writerow(lst)
| [
"numpy.unique",
"pandas.read_csv",
"sklearn.feature_extraction.text.CountVectorizer",
"csv.writer",
"sklearn.linear_model.LogisticRegression"
] | [((214, 254), 'pandas.read_csv', 'pd.read_csv', (['"""Data/node_information.csv"""'], {}), "('Data/node_information.csv')\n", (225, 254), True, 'import pandas as pd\n'), ((528, 546), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (537, 546), True, 'import numpy as np\n'), ((1043, 1129), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'decode_error': '"""ignore"""', 'min_df': '(2)', 'max_df': '(50)', 'stop_words': '"""english"""'}), "(decode_error='ignore', min_df=2, max_df=50, stop_words=\n 'english')\n", (1058, 1129), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((1828, 1848), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1846, 1848), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2011, 2045), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (2021, 2045), False, 'import csv\n')] |
"""
Implementation of ODE Risk minimization
<NAME>, ETH Zurich
based on code from
<NAME>, Machine Learning Research Group, University of Oxford
February 2019
"""
# Libraries
from odin.utils.trainable_models import TrainableModel
from odin.utils.gaussian_processes import GaussianProcess
from odin.utils.tensorflow_optimizer import ExtendedScipyOptimizerInterface
import numpy as np
import tensorflow as tf
from typing import Union, Tuple
import time
class ODERiskMinimization(object):
"""
Class that implements ODIN risk minimization
"""
def __init__(self, trainable: TrainableModel,
system_data: np.array, t_data: np.array,
gp_kernel: str = 'RBF',
optimizer: str = 'L-BFGS-B',
initial_gamma: float = 1e-6,
train_gamma: bool = True,
gamma_bounds: Union[np.array, list, Tuple] = (1e-6, 10.0),
state_bounds: np.array = None,
basinhopping: bool = True,
basinhopping_options: dict = None,
single_gp: bool = False,
state_normalization: bool = True,
time_normalization: bool = False,
tensorboard_summary_dir: str = None,
runtime_prof_dir: str = None):
"""
Constructor.
:param trainable: Trainable model class, as explained and implemented in
utils.trainable_models;
:param system_data: numpy array containing the noisy observations of
the state values of the system, size is [n_states, n_points];
:param t_data: numpy array containing the time stamps corresponding to
the observations passed as system_data;
:param gp_kernel: string indicating which kernel to use in the GP.
Valid options are 'RBF', 'Matern52', 'Matern32', 'RationalQuadratic',
'Sigmoid';
:param optimizer: string indicating which scipy optimizer to use. The
valid ones are the same that can be passed to scipy.optimize.minimize.
Notice that some of them will ignore bounds;
:param initial_gamma: initial value for the gamma parameter.
:param train_gamma: boolean, indicates whether to train of not the
variable gamma;
:param gamma_bounds: bounds for gamma (a lower bound of at least 1e-6
is always applied to overcome numerical instabilities);
:param state_bounds: bounds for the state optimization;
:param basinhopping: boolean, indicates whether to turn on the scipy
basinhopping;
:param basinhopping_options: dictionary containing options for the
basinhooping algorithm (syntax is the same as scipy's one);
:param single_gp: boolean, indicates whether to use a single set of GP
hyperparameters for each state;
:param state_normalization: boolean, indicates whether to normalize the
states values before the optimization (notice the parameter values
theta won't change);
:param time_normalization: boolean, indicates whether to normalize the
time stamps before the optimization (notice the parameter values
theta won't change);
:param QFF_features: int, the order of the quadrature scheme
:param tensorboard_summary_dir, runtime_prof_dir: str, logging directories
"""
# Save arguments
self.trainable = trainable
self.system_data = np.copy(system_data)
self.t_data = np.copy(t_data).reshape(-1, 1)
self.dim, self.n_p = system_data.shape
self.gp_kernel = gp_kernel
self.optimizer = optimizer
self.initial_gamma = initial_gamma
self.train_gamma = train_gamma
self.gamma_bounds = np.log(np.array(gamma_bounds))
self.basinhopping = basinhopping
self.basinhopping_options = {'n_iter': 10,
'temperature': 1.0,
'stepsize': 0.05}
self.state_normalization = state_normalization
if basinhopping_options:
self.basinhopping_options.update(basinhopping_options)
self.single_gp = single_gp
# Build bounds for the states and gamma
self._compute_state_bounds(state_bounds)
self._compute_gamma_bounds(gamma_bounds)
# Initialize utils
self._compute_standardization_data(state_normalization,
time_normalization)
# Build the necessary TensorFlow tensors
self._build_tf_data()
# Initialize the Gaussian Process for the derivative model
self.gaussian_process = GaussianProcess(self.dim, self.n_p,
self.gp_kernel, self.single_gp)
#initialize logging variables
if tensorboard_summary_dir:
self.writer = tf.summary.FileWriter(tensorboard_summary_dir)
theta_sum=tf.summary.histogram('Theta_summary',self.trainable.theta)
else:
self.writer = None
self.runtime_prof_dir= runtime_prof_dir
# Initialization of TF operations
self.init = None
return
def _compute_gamma_bounds(self, bounds: Union[np.array, list, Tuple])\
-> None:
"""
Builds the numpy array that defines the bounds for gamma.
:param bounds: of the form (lower_bound, upper_bound).
"""
self.gamma_bounds = np.array([1.0, 1.0])
if bounds is None:
self.gamma_bounds[0] = np.log(1e-6)
self.gamma_bounds[1] = np.inf
else:
self.gamma_bounds[0] = np.log(np.array(bounds[0]))
self.gamma_bounds[1] = np.log(np.array(bounds[1]))
return
def _compute_state_bounds(self, bounds: np.array) -> None:
"""
Builds the numpy array that defines the bounds for the states.
:param bounds: numpy array, sized [n_dim, 2], in which for each
dimensions we can find respectively lower and upper bounds.
"""
if bounds is None:
self.state_bounds = np.inf * np.ones([self.dim, 2])
self.state_bounds[:, 0] = - self.state_bounds[:, 0]
else:
self.state_bounds = np.array(bounds)
return
def _compute_standardization_data(self, state_normalization: bool,
time_normalization: bool) -> None:
"""
Compute the means and the standard deviations for data standardization,
used in the GP regression.
"""
# Compute mean and std dev of the state and time values
if state_normalization:
self.system_data_means = np.mean(self.system_data,
axis=1).reshape(self.dim, 1)
self.system_data_std_dev = np.std(self.system_data,
axis=1).reshape(self.dim, 1)
else:
self.system_data_means = np.zeros([self.dim, 1])
self.system_data_std_dev = np.ones([self.dim, 1])
if time_normalization:
self.t_data_mean = np.mean(self.t_data)
self.t_data_std_dev = np.std(self.t_data)
else:
self.t_data_mean = 0.0
self.t_data_std_dev = 1.0
if self.gp_kernel == 'Sigmoid':
self.t_data_mean = 0.0
# Normalize states and time
self.normalized_states = (self.system_data - self.system_data_means) / \
self.system_data_std_dev
self.normalized_t_data = (self.t_data - self.t_data_mean) / \
self.t_data_std_dev
return
def _build_tf_data(self) -> None:
"""
Initialize all the TensorFlow constants needed by the pipeline.
"""
self.system = tf.constant(self.normalized_states, dtype=tf.float64)
self.t = tf.constant(self.normalized_t_data, dtype=tf.float64)
self.system_means = tf.constant(self.system_data_means,
dtype=tf.float64,
shape=[self.dim, 1])
self.system_std_dev = tf.constant(self.system_data_std_dev,
dtype=tf.float64,
shape=[self.dim, 1])
self.t_mean = tf.constant(self.t_data_mean, dtype=tf.float64)
self.t_std_dev = tf.constant(self.t_data_std_dev, dtype=tf.float64)
self.n_points = tf.constant(self.n_p, dtype=tf.int32)
self.dimensionality = tf.constant(self.dim, dtype=tf.int32)
return
def _build_states_bounds(self) -> None:
"""
Builds the tensors for the normalized states that will containing the
bounds for the constrained optimization.
"""
# Tile the bounds to get the right dimensions
state_lower_bounds = self.state_bounds[:, 0].reshape(self.dim, 1)
state_lower_bounds = np.tile(state_lower_bounds, [1, self.n_p])
state_lower_bounds = (state_lower_bounds - self.system_data_means)\
/ self.system_data_std_dev
state_lower_bounds = state_lower_bounds.reshape([self.dim,
self.n_p])
state_upper_bounds = self.state_bounds[:, 1].reshape(self.dim, 1)
state_upper_bounds = np.tile(state_upper_bounds, [1, self.n_p])
state_upper_bounds = (state_upper_bounds - self.system_data_means)\
/ self.system_data_std_dev
state_upper_bounds = state_upper_bounds.reshape([self.dim,
self.n_p])
self.state_lower_bounds = state_lower_bounds
self.state_upper_bounds = state_upper_bounds
return
def _build_variables(self) -> None:
"""
Builds the TensorFlow variables with the state values and the gamma
that will later be optimized.
"""
with tf.variable_scope('risk_main'):
self.x = tf.Variable(self.system,
dtype=tf.float64, trainable=True,
name='states')
if self.single_gp:
self.log_gamma = tf.Variable(np.log(self.initial_gamma),
dtype=tf.float64,
trainable=self.train_gamma,
name='log_gamma')
self.gamma = tf.exp(self.log_gamma)\
* tf.ones([self.dimensionality, 1, 1], dtype=tf.float64)
else:
self.log_gamma =\
tf.Variable(np.log(self.initial_gamma)
* tf.ones([self.dimensionality, 1, 1],
dtype=tf.float64),
trainable=self.train_gamma,
dtype=tf.float64,
name='log_gamma')
self.gamma = tf.exp(self.log_gamma)
return
def _build_regularization_risk_term(self) -> tf.Tensor:
"""
Build the first term of the risk, connected to regularization.
:return: the TensorFlow Tensor that contains the term.
"""
a_vector = tf.linalg.solve(
self.gaussian_process.c_phi_matrices_noiseless, tf.expand_dims(self.x, -1),name='reg_risk_inv_kernel')
risk_term = 0.5 * tf.reduce_sum(self.x * tf.squeeze(a_vector))
return tf.reduce_sum(risk_term)
def _build_states_risk_term(self) -> tf.Tensor:
"""
Build the second term of the risk, connected with the value of the
states.
:return: the TensorFlow Tensor that contains the term.
"""
states_difference = self.system - self.x
risk_term = tf.reduce_sum(states_difference * states_difference, 1)
risk_term = risk_term * 0.5 / tf.squeeze(
self.gaussian_process.likelihood_variances)
return tf.reduce_sum(risk_term)
def _build_derivatives_risk_term(self) -> tf.Tensor:
"""
Build the third term of the risk, connected with the value of the
derivatives.
:return: the TensorFlow Tensor that contains the term.
"""
# Compute model and data-based derivatives
unnormalized_states = self.x * self.system_std_dev + self.system_means
model_derivatives = tf.expand_dims(self.trainable.compute_gradients(
unnormalized_states) / self.system_std_dev * self.t_std_dev, -1)
data_derivatives =\
self.gaussian_process.compute_posterior_derivative_mean(self.x)
derivatives_difference = model_derivatives - data_derivatives
# Compute log_variance on the derivatives
self.posterior_derivative_variance = self.gaussian_process.compute_posterior_derivative_variance()
post_variance =\
self.posterior_derivative_variance +\
self.gamma * tf.expand_dims(tf.eye(self.n_points,
dtype=tf.float64), 0)
# Compute risk term
a_vector = tf.linalg.solve(post_variance, derivatives_difference,name='deriv_risk_inv_A')
risk_term = 0.5 * tf.reduce_sum(a_vector * derivatives_difference)
return risk_term
def _build_gamma_risk_term(self) -> tf.Tensor:
"""
Build the term associated with gamma.
:return: the TensorFlow Tensor that contains the terms
"""
# Compute log_variance on the derivatives
post_variance =\
self.posterior_derivative_variance +\
self.gamma * tf.expand_dims(tf.eye(self.n_points,
dtype=tf.float64), 0)
risk_term = 0.5 * tf.linalg.logdet(post_variance)
return tf.reduce_sum(risk_term)
def _build_risk(self) -> None:
"""
Build the risk tensor by summing up the single terms.
"""
self.risk_term1 = self._build_regularization_risk_term()
self.risk_term2 = self._build_states_risk_term()
self.risk_term3 = self._build_derivatives_risk_term()
self.risk_term4 = self._build_gamma_risk_term()
self.risk = self.risk_term1 + self.risk_term2 + self.risk_term3
if self.train_gamma:
self.risk += self.risk_term4
if self.writer:
loss_sum=tf.summary.scalar(name='loss_sum', tensor=self.risk)
return
def _build_optimizer(self) -> None:
"""
Build the TensorFlow optimizer, wrapper to the scipy optimization
algorithms.
"""
# Extract the TF variables that get optimized in the risk minimization
t_vars = tf.trainable_variables()
risk_vars = [var for var in t_vars if 'risk_main' in var.name]
# Dictionary containing the bounds on the TensorFlow Variables
var_to_bounds = {risk_vars[0]: (self.trainable.parameter_lower_bounds,
self.trainable.parameter_upper_bounds),
risk_vars[1]: (self.state_lower_bounds,
self.state_upper_bounds)}
if self.train_gamma:
var_to_bounds[risk_vars[2]] = (self.gamma_bounds[0],
self.gamma_bounds[1])
self.risk_optimizer = ExtendedScipyOptimizerInterface(
loss=self.risk, method=self.optimizer, var_list=risk_vars,
var_to_bounds=var_to_bounds,file_writer=self.writer,dir_prof_name=self.runtime_prof_dir)
return
def build_model(self) -> None:
"""
Builds Some common part of the computational graph for the optimization.
"""
# Gaussian Process Interpolation
self.gaussian_process.build_supporting_covariance_matrices(
self.t, self.t)
self._build_states_bounds()
self._build_variables()
self._build_risk()
if self.writer:
self.merged_sum = tf.summary.merge_all()
self._build_optimizer()
return
def _initialize_variables(self) -> None:
"""
Initialize all the variables and placeholders in the graph.
"""
self.init = tf.global_variables_initializer()
return
def _initialize_states_with_mean_gp(self, session: tf.Session, compute_dict:dict) -> None:
"""
Before optimizing the risk, we initialize the x to be the mean
predicted by the Gaussian Process for an easier task later.
:param session: TensorFlow session, used in the fit function.
"""
mean_prediction = self.gaussian_process.compute_posterior_mean(
self.system)
assign_states_mean = tf.assign(self.x, tf.squeeze(mean_prediction))
session.run(assign_states_mean,feed_dict=compute_dict)
self.X=self.x
self.x = tf.clip_by_value(
self.x, clip_value_min=tf.constant(self.state_lower_bounds),
clip_value_max=tf.constant(self.state_upper_bounds))
return
def train(self,gp_parameters) -> np.array:
"""
Trains the model and returns thetas
:param gp_parameters: values of hyperparameters of GP
"""
if self.gp_kernel == 'Sigmoid':
compute_dict={self.gaussian_process.kernel.a:gp_parameters[0],self.gaussian_process.kernel.b:gp_parameters[1],self.gaussian_process.kernel.variances:gp_parameters[2],self.gaussian_process.likelihood_variances:gp_parameters[3]}
else:
compute_dict={self.gaussian_process.kernel.lengthscales: gp_parameters[0], self.gaussian_process.kernel.variances:gp_parameters[1], self.gaussian_process.likelihood_variances:gp_parameters[2]}
self._initialize_variables()
session = tf.Session()
with session:
# Start the session
session.run(self.init)
# Initialize x as the mean of the GP
self._initialize_states_with_mean_gp(session,compute_dict=compute_dict)
post_der_var = session.run(self.posterior_derivative_variance,feed_dict=compute_dict)
compute_dict.update({self.posterior_derivative_variance:post_der_var})
# Print initial theta
theta = session.run(self.trainable.theta,feed_dict=compute_dict)
print("Initialized Theta", theta)
# Print initial gamma
gamma = session.run(self.gamma,feed_dict=compute_dict)
print("Initialized Gamma", gamma)
# Print the terms of the Risk before the optimization
print("Risk 1: ", session.run(self.risk_term1,feed_dict=compute_dict))
print("Risk 2: ", session.run(self.risk_term2,feed_dict=compute_dict))
print("Risk 3: ", session.run(self.risk_term3,feed_dict=compute_dict))
print("Risk: ", session.run(self.risk,feed_dict=compute_dict))
if self.writer:
self.writer.add_graph(session.graph)
def summary_funct(merged_sum):
summary_funct.step+=1
self.writer.add_summary(merged_sum, summary_funct.step)
summary_funct.step=-1
result=[]
# Optimize
if self.basinhopping:
secs=time.time()
result=self.risk_optimizer.basinhopping(session,feed_dict=compute_dict,
**self.basinhopping_options)
secs=time.time() -secs
else:
if self.writer:
secs=time.time()
result=self.risk_optimizer.minimize(session,feed_dict=compute_dict,loss_callback=summary_funct,fetches=[self.merged_sum])
secs=time.time() -secs
else:
secs=time.time()
result=self.risk_optimizer.minimize(session,feed_dict=compute_dict)
secs=time.time() -secs
print("Elapsed time is ",secs)
# Print the terms of the Risk after the optimization
print("risk 1: ", session.run(self.risk_term1,feed_dict=compute_dict))
print("risk 2: ", session.run(self.risk_term2,feed_dict=compute_dict))
print("risk 3: ", session.run(self.risk_term3,feed_dict=compute_dict))
if self.train_gamma:
print("risk 4: ", session.run(self.risk_term4,feed_dict=compute_dict))
found_risk=session.run(self.risk,feed_dict=compute_dict)
print("risk: ", found_risk)
unnormalized_states = tf.squeeze(self.x) * self.system_std_dev + \
self.system_means
states_after = session.run(unnormalized_states,feed_dict=compute_dict)
# Print final theta
theta = session.run(self.trainable.theta,feed_dict=compute_dict)
print("Final Theta", theta)
# Print final gamma
gamma = session.run(self.gamma,feed_dict=compute_dict)
print("Final Gamma", gamma)
tf.reset_default_graph()
return theta, secs
| [
"tensorflow.reduce_sum",
"numpy.log",
"numpy.array",
"odin.utils.tensorflow_optimizer.ExtendedScipyOptimizerInterface",
"tensorflow.eye",
"numpy.mean",
"odin.utils.gaussian_processes.GaussianProcess",
"tensorflow.Session",
"tensorflow.trainable_variables",
"tensorflow.summary.scalar",
"numpy.til... | [((3460, 3480), 'numpy.copy', 'np.copy', (['system_data'], {}), '(system_data)\n', (3467, 3480), True, 'import numpy as np\n'), ((4664, 4731), 'odin.utils.gaussian_processes.GaussianProcess', 'GaussianProcess', (['self.dim', 'self.n_p', 'self.gp_kernel', 'self.single_gp'], {}), '(self.dim, self.n_p, self.gp_kernel, self.single_gp)\n', (4679, 4731), False, 'from odin.utils.gaussian_processes import GaussianProcess\n'), ((5461, 5481), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (5469, 5481), True, 'import numpy as np\n'), ((7807, 7860), 'tensorflow.constant', 'tf.constant', (['self.normalized_states'], {'dtype': 'tf.float64'}), '(self.normalized_states, dtype=tf.float64)\n', (7818, 7860), True, 'import tensorflow as tf\n'), ((7878, 7931), 'tensorflow.constant', 'tf.constant', (['self.normalized_t_data'], {'dtype': 'tf.float64'}), '(self.normalized_t_data, dtype=tf.float64)\n', (7889, 7931), True, 'import tensorflow as tf\n'), ((7960, 8034), 'tensorflow.constant', 'tf.constant', (['self.system_data_means'], {'dtype': 'tf.float64', 'shape': '[self.dim, 1]'}), '(self.system_data_means, dtype=tf.float64, shape=[self.dim, 1])\n', (7971, 8034), True, 'import tensorflow as tf\n'), ((8145, 8221), 'tensorflow.constant', 'tf.constant', (['self.system_data_std_dev'], {'dtype': 'tf.float64', 'shape': '[self.dim, 1]'}), '(self.system_data_std_dev, dtype=tf.float64, shape=[self.dim, 1])\n', (8156, 8221), True, 'import tensorflow as tf\n'), ((8328, 8375), 'tensorflow.constant', 'tf.constant', (['self.t_data_mean'], {'dtype': 'tf.float64'}), '(self.t_data_mean, dtype=tf.float64)\n', (8339, 8375), True, 'import tensorflow as tf\n'), ((8401, 8451), 'tensorflow.constant', 'tf.constant', (['self.t_data_std_dev'], {'dtype': 'tf.float64'}), '(self.t_data_std_dev, dtype=tf.float64)\n', (8412, 8451), True, 'import tensorflow as tf\n'), ((8476, 8513), 'tensorflow.constant', 'tf.constant', (['self.n_p'], {'dtype': 'tf.int32'}), '(self.n_p, dtype=tf.int32)\n', (8487, 8513), True, 'import tensorflow as tf\n'), ((8544, 8581), 'tensorflow.constant', 'tf.constant', (['self.dim'], {'dtype': 'tf.int32'}), '(self.dim, dtype=tf.int32)\n', (8555, 8581), True, 'import tensorflow as tf\n'), ((8950, 8992), 'numpy.tile', 'np.tile', (['state_lower_bounds', '[1, self.n_p]'], {}), '(state_lower_bounds, [1, self.n_p])\n', (8957, 8992), True, 'import numpy as np\n'), ((9346, 9388), 'numpy.tile', 'np.tile', (['state_upper_bounds', '[1, self.n_p]'], {}), '(state_upper_bounds, [1, self.n_p])\n', (9353, 9388), True, 'import numpy as np\n'), ((11506, 11530), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['risk_term'], {}), '(risk_term)\n', (11519, 11530), True, 'import tensorflow as tf\n'), ((11831, 11886), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(states_difference * states_difference)', '(1)'], {}), '(states_difference * states_difference, 1)\n', (11844, 11886), True, 'import tensorflow as tf\n'), ((12008, 12032), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['risk_term'], {}), '(risk_term)\n', (12021, 12032), True, 'import tensorflow as tf\n'), ((13141, 13220), 'tensorflow.linalg.solve', 'tf.linalg.solve', (['post_variance', 'derivatives_difference'], {'name': '"""deriv_risk_inv_A"""'}), "(post_variance, derivatives_difference, name='deriv_risk_inv_A')\n", (13156, 13220), True, 'import tensorflow as tf\n'), ((13834, 13858), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['risk_term'], {}), '(risk_term)\n', (13847, 13858), True, 'import tensorflow as tf\n'), ((14731, 14755), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (14753, 14755), True, 'import tensorflow as tf\n'), ((15378, 15568), 'odin.utils.tensorflow_optimizer.ExtendedScipyOptimizerInterface', 'ExtendedScipyOptimizerInterface', ([], {'loss': 'self.risk', 'method': 'self.optimizer', 'var_list': 'risk_vars', 'var_to_bounds': 'var_to_bounds', 'file_writer': 'self.writer', 'dir_prof_name': 'self.runtime_prof_dir'}), '(loss=self.risk, method=self.optimizer,\n var_list=risk_vars, var_to_bounds=var_to_bounds, file_writer=self.\n writer, dir_prof_name=self.runtime_prof_dir)\n', (15409, 15568), False, 'from odin.utils.tensorflow_optimizer import ExtendedScipyOptimizerInterface\n'), ((16253, 16286), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (16284, 16286), True, 'import tensorflow as tf\n'), ((17809, 17821), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (17819, 17821), True, 'import tensorflow as tf\n'), ((21073, 21097), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (21095, 21097), True, 'import tensorflow as tf\n'), ((3768, 3790), 'numpy.array', 'np.array', (['gamma_bounds'], {}), '(gamma_bounds)\n', (3776, 3790), True, 'import numpy as np\n'), ((4880, 4926), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['tensorboard_summary_dir'], {}), '(tensorboard_summary_dir)\n', (4901, 4926), True, 'import tensorflow as tf\n'), ((4949, 5008), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""Theta_summary"""', 'self.trainable.theta'], {}), "('Theta_summary', self.trainable.theta)\n", (4969, 5008), True, 'import tensorflow as tf\n'), ((5544, 5557), 'numpy.log', 'np.log', (['(1e-06)'], {}), '(1e-06)\n', (5550, 5557), True, 'import numpy as np\n'), ((6255, 6271), 'numpy.array', 'np.array', (['bounds'], {}), '(bounds)\n', (6263, 6271), True, 'import numpy as np\n'), ((6994, 7017), 'numpy.zeros', 'np.zeros', (['[self.dim, 1]'], {}), '([self.dim, 1])\n', (7002, 7017), True, 'import numpy as np\n'), ((7057, 7079), 'numpy.ones', 'np.ones', (['[self.dim, 1]'], {}), '([self.dim, 1])\n', (7064, 7079), True, 'import numpy as np\n'), ((7142, 7162), 'numpy.mean', 'np.mean', (['self.t_data'], {}), '(self.t_data)\n', (7149, 7162), True, 'import numpy as np\n'), ((7197, 7216), 'numpy.std', 'np.std', (['self.t_data'], {}), '(self.t_data)\n', (7203, 7216), True, 'import numpy as np\n'), ((9953, 9983), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""risk_main"""'], {}), "('risk_main')\n", (9970, 9983), True, 'import tensorflow as tf\n'), ((10006, 10079), 'tensorflow.Variable', 'tf.Variable', (['self.system'], {'dtype': 'tf.float64', 'trainable': '(True)', 'name': '"""states"""'}), "(self.system, dtype=tf.float64, trainable=True, name='states')\n", (10017, 10079), True, 'import tensorflow as tf\n'), ((11365, 11391), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.x', '(-1)'], {}), '(self.x, -1)\n', (11379, 11391), True, 'import tensorflow as tf\n'), ((11925, 11979), 'tensorflow.squeeze', 'tf.squeeze', (['self.gaussian_process.likelihood_variances'], {}), '(self.gaussian_process.likelihood_variances)\n', (11935, 11979), True, 'import tensorflow as tf\n'), ((13246, 13294), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(a_vector * derivatives_difference)'], {}), '(a_vector * derivatives_difference)\n', (13259, 13294), True, 'import tensorflow as tf\n'), ((13787, 13818), 'tensorflow.linalg.logdet', 'tf.linalg.logdet', (['post_variance'], {}), '(post_variance)\n', (13803, 13818), True, 'import tensorflow as tf\n'), ((14408, 14460), 'tensorflow.summary.scalar', 'tf.summary.scalar', ([], {'name': '"""loss_sum"""', 'tensor': 'self.risk'}), "(name='loss_sum', tensor=self.risk)\n", (14425, 14460), True, 'import tensorflow as tf\n'), ((16025, 16047), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (16045, 16047), True, 'import tensorflow as tf\n'), ((16775, 16802), 'tensorflow.squeeze', 'tf.squeeze', (['mean_prediction'], {}), '(mean_prediction)\n', (16785, 16802), True, 'import tensorflow as tf\n'), ((3503, 3518), 'numpy.copy', 'np.copy', (['t_data'], {}), '(t_data)\n', (3510, 3518), True, 'import numpy as np\n'), ((5655, 5674), 'numpy.array', 'np.array', (['bounds[0]'], {}), '(bounds[0])\n', (5663, 5674), True, 'import numpy as np\n'), ((5718, 5737), 'numpy.array', 'np.array', (['bounds[1]'], {}), '(bounds[1])\n', (5726, 5737), True, 'import numpy as np\n'), ((6122, 6144), 'numpy.ones', 'np.ones', (['[self.dim, 2]'], {}), '([self.dim, 2])\n', (6129, 6144), True, 'import numpy as np\n'), ((11011, 11033), 'tensorflow.exp', 'tf.exp', (['self.log_gamma'], {}), '(self.log_gamma)\n', (11017, 11033), True, 'import tensorflow as tf\n'), ((16959, 16995), 'tensorflow.constant', 'tf.constant', (['self.state_lower_bounds'], {}), '(self.state_lower_bounds)\n', (16970, 16995), True, 'import tensorflow as tf\n'), ((17024, 17060), 'tensorflow.constant', 'tf.constant', (['self.state_upper_bounds'], {}), '(self.state_upper_bounds)\n', (17035, 17060), True, 'import tensorflow as tf\n'), ((19312, 19323), 'time.time', 'time.time', ([], {}), '()\n', (19321, 19323), False, 'import time\n'), ((6704, 6737), 'numpy.mean', 'np.mean', (['self.system_data'], {'axis': '(1)'}), '(self.system_data, axis=1)\n', (6711, 6737), True, 'import numpy as np\n'), ((6843, 6875), 'numpy.std', 'np.std', (['self.system_data'], {'axis': '(1)'}), '(self.system_data, axis=1)\n', (6849, 6875), True, 'import numpy as np\n'), ((10222, 10248), 'numpy.log', 'np.log', (['self.initial_gamma'], {}), '(self.initial_gamma)\n', (10228, 10248), True, 'import numpy as np\n'), ((10478, 10500), 'tensorflow.exp', 'tf.exp', (['self.log_gamma'], {}), '(self.log_gamma)\n', (10484, 10500), True, 'import tensorflow as tf\n'), ((10524, 10578), 'tensorflow.ones', 'tf.ones', (['[self.dimensionality, 1, 1]'], {'dtype': 'tf.float64'}), '([self.dimensionality, 1, 1], dtype=tf.float64)\n', (10531, 10578), True, 'import tensorflow as tf\n'), ((11469, 11489), 'tensorflow.squeeze', 'tf.squeeze', (['a_vector'], {}), '(a_vector)\n', (11479, 11489), True, 'import tensorflow as tf\n'), ((13003, 13042), 'tensorflow.eye', 'tf.eye', (['self.n_points'], {'dtype': 'tf.float64'}), '(self.n_points, dtype=tf.float64)\n', (13009, 13042), True, 'import tensorflow as tf\n'), ((13670, 13709), 'tensorflow.eye', 'tf.eye', (['self.n_points'], {'dtype': 'tf.float64'}), '(self.n_points, dtype=tf.float64)\n', (13676, 13709), True, 'import tensorflow as tf\n'), ((19511, 19522), 'time.time', 'time.time', ([], {}), '()\n', (19520, 19522), False, 'import time\n'), ((19604, 19615), 'time.time', 'time.time', ([], {}), '()\n', (19613, 19615), False, 'import time\n'), ((19848, 19859), 'time.time', 'time.time', ([], {}), '()\n', (19857, 19859), False, 'import time\n'), ((20612, 20630), 'tensorflow.squeeze', 'tf.squeeze', (['self.x'], {}), '(self.x)\n', (20622, 20630), True, 'import tensorflow as tf\n'), ((10663, 10689), 'numpy.log', 'np.log', (['self.initial_gamma'], {}), '(self.initial_gamma)\n', (10669, 10689), True, 'import numpy as np\n'), ((10724, 10778), 'tensorflow.ones', 'tf.ones', (['[self.dimensionality, 1, 1]'], {'dtype': 'tf.float64'}), '([self.dimensionality, 1, 1], dtype=tf.float64)\n', (10731, 10778), True, 'import tensorflow as tf\n'), ((19783, 19794), 'time.time', 'time.time', ([], {}), '()\n', (19792, 19794), False, 'import time\n'), ((19973, 19984), 'time.time', 'time.time', ([], {}), '()\n', (19982, 19984), False, 'import time\n')] |
import sys
sys.path.append('../utils')
from ops import dense_sum_list, get_shape
from np_op import convert2list
import tensorflow as tf
import numpy as np
import os
def test1(id_=0):
''' dense_sum_list test
Results :
=================Round1===================
const : [4, 2]
[[ 1. 4.]
[ 3. 4.]
[ 5. 6.]
[ 1. 10.]]
split :
[array([[ 1., 4.],
[ 3., 4.]], dtype=float32), array([[ 5., 6.],
[ 1., 10.]], dtype=float32)]
dense_sum : (2, 4)
[[ 6. 7. 9. 10.]
[ 4. 13. 5. 14.]]
=================Round2===================
const : [3, 2]
[[ 1. 10.]
[ 2. 100.]
[ 3. 1000.]]
split :
[array([[ 1., 10.]], dtype=float32), array([[ 2., 100.]], dtype=float32), array([[ 3., 1000.]], dtype=float32)]
dense_sum : (1, 8)
[[ 6. 1003. 104. 1101. 15. 1012. 113. 1110.]]
'''
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(id_)
sess = tf.Session()
print("=================Round1===================")
const = np.array([[1,4],[3,4],[5,6],[1,10]])
const = tf.constant(const, dtype=tf.float32)
print("const : {} \n{}".format(get_shape(const), sess.run(const)))
split = tf.split(const, num_or_size_splits=2, axis=0)
print("split :\n{}".format(sess.run(split)))
ds = dense_sum_list(split)
ds_run = sess.run(ds)
print("dense_sum : {}\n{}".format(ds_run.shape, ds_run))
print("=================Round2===================")
const = np.array([[1,10],[2,100],[3,1000]])
const = tf.constant(const, dtype=tf.float32)
print("const : {}\n{}".format(get_shape(const), sess.run(const)))
split = tf.split(const, num_or_size_splits=3, axis=0)
print("split :\n{}".format(sess.run(split)))
ds = dense_sum_list(split)
ds_run = sess.run(ds)
print("dense_sum : {}\n{}".format(ds_run.shape, ds_run))
def test2(id_=0):
''' test for
cifar_exp/exp_9/deepmetric.py
utils/eval_op/HashTree
Results -
'''
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(id_)
nbatch = 2
k = 3
d = 4
sparsity = 3
sess = tf.Session()
const = np.random.random([nbatch*k, d])
const = tf.constant(const, dtype=tf.float32)
print("const : {} \n{}".format(get_shape(const), sess.run(const)))
split = tf.split(const, num_or_size_splits=k, axis=0) # k*[batch_size, d]
print("split :\n{}".format(sess.run(split)))
tree_idx_set = [tf.nn.top_k(v, k=d)[1] for v in split]# k*[batch_size, d]
print("tree_idx_set : \n{}".format(sess.run(tree_idx_set)))
tree_idx = tf.transpose(tf.stack(tree_idx_set, axis=0), [1, 0, 2]) # [batch_size, k, d]
print("tree_idx : \n{}".format(sess.run(tree_idx)))
idx_convert_np = list()
tmp = 1
for i in range(k):
idx_convert_np.append(tmp)
tmp*=d
idx_convert_np = np.array(idx_convert_np)[::-1] # [d**(k-1),...,1]
idx_convert = tf.constant(idx_convert_np, dtype=tf.int32) # tensor [k]
print("idx_convert :{} \n{}".format(get_shape(idx_convert), sess.run(idx_convert)))
max_idx = tf.reduce_sum(tf.multiply(tree_idx[:,:,0], idx_convert), axis=-1) # [batch_size]
print("max_idx :\n{}".format(sess.run(max_idx)))
max_k_idx = tf.add(tf.reduce_sum(tf.multiply(tree_idx[:,:-1,0], idx_convert[:-1]), axis=-1, keep_dims=True), tree_idx[:,-1,:sparsity]) # [batch_size]
print("max_k_idx :\n{}".format(sess.run(max_k_idx)))
tree_idx = sess.run(tree_idx)
for b_idx in range(nbatch):
for idx in range(d**k):
idx_list = convert2list(n=idx, base=d, fill=k) # [k]
st_idx= np.sum(np.multiply(np.array([tree_idx[b_idx][v][idx_list[v]] for v in range(k)]), idx_convert_np))
print("b_idx, idx, st_idx : {}, {}, {}".format(b_idx, idx, st_idx))
def test3():
const = np.array([[1,2,3],[30,20,10],[100,300,200],[3000,1000,2000]])
const = tf.constant(const, dtype=tf.float32)
print(get_shape(const)==[4,3])
def test4(id_=0):
''' test for
icml_imgnet/expf5npair/deepmetric.py
'''
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(id_)
nbatch = 2
sk = 3
nhash = 3
d = 5
sess = tf.Session()
anc_embed_k_hash1 = tf.constant(np.random.random([nbatch, nhash]), dtype=tf.float32)
print("anc_embed_k_hash1 : {} \n{}".format(get_shape(anc_embed_k_hash1), sess.run(anc_embed_k_hash1)))
anc_embed_k_hash2 = tf.constant(np.random.random([nbatch, d]), dtype=tf.float32)
print("anc_embed_k_hash2 : {} \n{}".format(get_shape(anc_embed_k_hash2), sess.run(anc_embed_k_hash2)))
idx_array = tf.reshape(
tf.add(
tf.expand_dims(tf.multiply(tf.nn.top_k(anc_embed_k_hash1, k=nhash)[1], d), axis=2),
tf.expand_dims(tf.nn.top_k(anc_embed_k_hash2, k=d)[1], axis=1)),
[-1, nhash*d])
# [nbatch, nhash, 1], [nbatch, 1, d] => [nbatch//2, nhash, d] => [nbatch//2, nhash*d]
max_k_idx = idx_array[:, :sk] # [batch_size, sk]
print("max_k_idx :\n{}".format(sess.run(max_k_idx)))
print("idx_array :\n{}".format(sess.run(idx_array)))
if __name__=='__main__':
test4(0)
| [
"np_op.convert2list",
"numpy.random.random",
"tensorflow.Session",
"tensorflow.split",
"ops.dense_sum_list",
"tensorflow.multiply",
"ops.get_shape",
"tensorflow.nn.top_k",
"numpy.array",
"tensorflow.constant",
"sys.path.append",
"tensorflow.stack"
] | [((12, 39), 'sys.path.append', 'sys.path.append', (['"""../utils"""'], {}), "('../utils')\n", (27, 39), False, 'import sys\n'), ((1152, 1164), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1162, 1164), True, 'import tensorflow as tf\n'), ((1234, 1277), 'numpy.array', 'np.array', (['[[1, 4], [3, 4], [5, 6], [1, 10]]'], {}), '([[1, 4], [3, 4], [5, 6], [1, 10]])\n', (1242, 1277), True, 'import numpy as np\n'), ((1283, 1319), 'tensorflow.constant', 'tf.constant', (['const'], {'dtype': 'tf.float32'}), '(const, dtype=tf.float32)\n', (1294, 1319), True, 'import tensorflow as tf\n'), ((1403, 1448), 'tensorflow.split', 'tf.split', (['const'], {'num_or_size_splits': '(2)', 'axis': '(0)'}), '(const, num_or_size_splits=2, axis=0)\n', (1411, 1448), True, 'import tensorflow as tf\n'), ((1507, 1528), 'ops.dense_sum_list', 'dense_sum_list', (['split'], {}), '(split)\n', (1521, 1528), False, 'from ops import dense_sum_list, get_shape\n'), ((1684, 1724), 'numpy.array', 'np.array', (['[[1, 10], [2, 100], [3, 1000]]'], {}), '([[1, 10], [2, 100], [3, 1000]])\n', (1692, 1724), True, 'import numpy as np\n'), ((1732, 1768), 'tensorflow.constant', 'tf.constant', (['const'], {'dtype': 'tf.float32'}), '(const, dtype=tf.float32)\n', (1743, 1768), True, 'import tensorflow as tf\n'), ((1851, 1896), 'tensorflow.split', 'tf.split', (['const'], {'num_or_size_splits': '(3)', 'axis': '(0)'}), '(const, num_or_size_splits=3, axis=0)\n', (1859, 1896), True, 'import tensorflow as tf\n'), ((1955, 1976), 'ops.dense_sum_list', 'dense_sum_list', (['split'], {}), '(split)\n', (1969, 1976), False, 'from ops import dense_sum_list, get_shape\n'), ((2377, 2389), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2387, 2389), True, 'import tensorflow as tf\n'), ((2402, 2435), 'numpy.random.random', 'np.random.random', (['[nbatch * k, d]'], {}), '([nbatch * k, d])\n', (2418, 2435), True, 'import numpy as np\n'), ((2446, 2482), 'tensorflow.constant', 'tf.constant', (['const'], {'dtype': 'tf.float32'}), '(const, dtype=tf.float32)\n', (2457, 2482), True, 'import tensorflow as tf\n'), ((2566, 2611), 'tensorflow.split', 'tf.split', (['const'], {'num_or_size_splits': 'k', 'axis': '(0)'}), '(const, num_or_size_splits=k, axis=0)\n', (2574, 2611), True, 'import tensorflow as tf\n'), ((3179, 3222), 'tensorflow.constant', 'tf.constant', (['idx_convert_np'], {'dtype': 'tf.int32'}), '(idx_convert_np, dtype=tf.int32)\n', (3190, 3222), True, 'import tensorflow as tf\n'), ((4074, 4146), 'numpy.array', 'np.array', (['[[1, 2, 3], [30, 20, 10], [100, 300, 200], [3000, 1000, 2000]]'], {}), '([[1, 2, 3], [30, 20, 10], [100, 300, 200], [3000, 1000, 2000]])\n', (4082, 4146), True, 'import numpy as np\n'), ((4148, 4184), 'tensorflow.constant', 'tf.constant', (['const'], {'dtype': 'tf.float32'}), '(const, dtype=tf.float32)\n', (4159, 4184), True, 'import tensorflow as tf\n'), ((4486, 4498), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4496, 4498), True, 'import tensorflow as tf\n'), ((2851, 2881), 'tensorflow.stack', 'tf.stack', (['tree_idx_set'], {'axis': '(0)'}), '(tree_idx_set, axis=0)\n', (2859, 2881), True, 'import tensorflow as tf\n'), ((3111, 3135), 'numpy.array', 'np.array', (['idx_convert_np'], {}), '(idx_convert_np)\n', (3119, 3135), True, 'import numpy as np\n'), ((3352, 3395), 'tensorflow.multiply', 'tf.multiply', (['tree_idx[:, :, 0]', 'idx_convert'], {}), '(tree_idx[:, :, 0], idx_convert)\n', (3363, 3395), True, 'import tensorflow as tf\n'), ((4535, 4568), 'numpy.random.random', 'np.random.random', (['[nbatch, nhash]'], {}), '([nbatch, nhash])\n', (4551, 4568), True, 'import numpy as np\n'), ((4731, 4760), 'numpy.random.random', 'np.random.random', (['[nbatch, d]'], {}), '([nbatch, d])\n', (4747, 4760), True, 'import numpy as np\n'), ((1355, 1371), 'ops.get_shape', 'get_shape', (['const'], {}), '(const)\n', (1364, 1371), False, 'from ops import dense_sum_list, get_shape\n'), ((1803, 1819), 'ops.get_shape', 'get_shape', (['const'], {}), '(const)\n', (1812, 1819), False, 'from ops import dense_sum_list, get_shape\n'), ((2518, 2534), 'ops.get_shape', 'get_shape', (['const'], {}), '(const)\n', (2527, 2534), False, 'from ops import dense_sum_list, get_shape\n'), ((2701, 2720), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['v'], {'k': 'd'}), '(v, k=d)\n', (2712, 2720), True, 'import tensorflow as tf\n'), ((3276, 3298), 'ops.get_shape', 'get_shape', (['idx_convert'], {}), '(idx_convert)\n', (3285, 3298), False, 'from ops import dense_sum_list, get_shape\n'), ((3510, 3560), 'tensorflow.multiply', 'tf.multiply', (['tree_idx[:, :-1, 0]', 'idx_convert[:-1]'], {}), '(tree_idx[:, :-1, 0], idx_convert[:-1])\n', (3521, 3560), True, 'import tensorflow as tf\n'), ((3807, 3842), 'np_op.convert2list', 'convert2list', ([], {'n': 'idx', 'base': 'd', 'fill': 'k'}), '(n=idx, base=d, fill=k)\n', (3819, 3842), False, 'from np_op import convert2list\n'), ((4195, 4211), 'ops.get_shape', 'get_shape', (['const'], {}), '(const)\n', (4204, 4211), False, 'from ops import dense_sum_list, get_shape\n'), ((4635, 4663), 'ops.get_shape', 'get_shape', (['anc_embed_k_hash1'], {}), '(anc_embed_k_hash1)\n', (4644, 4663), False, 'from ops import dense_sum_list, get_shape\n'), ((4827, 4855), 'ops.get_shape', 'get_shape', (['anc_embed_k_hash2'], {}), '(anc_embed_k_hash2)\n', (4836, 4855), False, 'from ops import dense_sum_list, get_shape\n'), ((5103, 5138), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['anc_embed_k_hash2'], {'k': 'd'}), '(anc_embed_k_hash2, k=d)\n', (5114, 5138), True, 'import tensorflow as tf\n'), ((5003, 5042), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['anc_embed_k_hash1'], {'k': 'nhash'}), '(anc_embed_k_hash1, k=nhash)\n', (5014, 5042), True, 'import tensorflow as tf\n')] |
import tilemapbase as tmb
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import folium
from .maputil import copyright_osm
import numpy as np
import re
tmb.init(create=True)
def _extend(m, M, p):
w = M - m
return m - p*w, M + p*w
def _expand(ex, p):
xmin, xmax = _extend(ex.xmin, ex.xmax, p)
ymin, ymax = _extend(ex.ymin, ex.ymax, p)
return tmb.Extent(xmin, xmax, ymin, ymax)
def _widen(ex, p):
return tmb.Extent(
*_extend(ex.xmin, ex.xmax, p),
ex.ymin, ex.ymax
)
def _heighten(ex, p):
return tmb.Extent(
ex.xmin, ex.xmax,
*_extend(ex.ymin, ex.ymax, p)
)
def _adjust(ex, w, h):
# Extent.xrange, yrange returns min and max in skewed way
xmin, xmax = ex.xrange
ymax, ymin = ex.yrange
m = ymax - ymin
n = xmax - xmin
if h < w:
p1 = w/h
p2 = m/n
return _widen(ex, p1 * p2)
elif w < h:
p1 = h/w
p2 = n/m
return _heighten(ex, p1 * p2)
elif m < n:
return _heighten(ex, n/m)
elif n < m:
return _widen(ex, m/n)
else:
return ex
return tmb.Extent(xmin, xmax, ymin, ymax)
def _get_column(df, candidates):
clns = [x.lower() for x in df.columns]
candidates = [c.lower() for c in candidates if c is not None]
pos = -1
for c in candidates:
if c in clns:
pos = clns.index(c)
break
if pos == -1:
raise RuntimeError(f"{candidates} is not found in {clns}")
return df.iloc[:, pos]
def _iterable(x):
try:
iter(x)
except TypeError:
return False
return True
def draw(df,
latitude=None, longitude=None,
p_size=1, p_color="#000000", p_popup=None,
l_size=1, l_color="#000000", l_popup=None,
output_format="png",
**kwargs):
lats = _get_column(df, [latitude, "latitude", "lat"]).values
lngs = _get_column(df, [longitude, "longitude", "lon", "lng"]).values
if type(p_size) is str:
ps = _get_column(df, [p_size])
elif type(p_size) is float or type(p_size) is int:
ps = [p_size] * len(df)
elif type(p_size) is list:
ps = p_size
elif _iterable(p_size):
ps = list(p_size)
else:
raise ValueError(f"{p_size} is given for p_size")
if type(p_color) is str and re.match(r"^#[0-9a-fA-F]{6}$", p_color):
pc = [p_color] * len(df)
elif type(p_color) is str:
pc = _get_column(df, [p_color])
elif type(p_color) is list:
pc = p_color
elif _iterable(p_color):
pc = list(p_color)
else:
raise ValueError(f"{p_color} is given for p_color")
if type(l_size) is str:
ls = _get_column(df, [l_size])
elif type(l_size) is float or type(l_size) is int:
ls = [l_size] * (len(df) - 1)
elif type(l_size) is list:
ls = l_size
elif _iterable(l_size):
ls = list(l_size)
else:
raise ValueError(f"{l_size} is given for l_size")
if type(l_color) is str and re.match(r"^#[0-9a-fA-F]{6}$", l_color):
lc = [l_color] * (len(df) - 1)
elif type(l_color) is str:
lc = _get_column(df, [l_color])
elif type(l_color) is list:
lc = l_color
elif _iterable(l_color):
lc = list(l_color)
else:
raise ValueError(f"{l_color} is given for l_color")
if output_format == "png":
return _draw_png(
df,
lats, lngs,
ps, pc,
ls, lc,
**kwargs
)
elif output_format == "html":
return _draw_html(
df,
lats, lngs,
ps, pc, p_popup,
ls, lc, l_popup,
** kwargs
)
else:
raise ValueError(
f"'{output_format}' is not valid for output_format. It should be 'png' or 'html'")
def _draw_png(df,
lats, lngs,
p_size, p_color,
l_size, l_color,
figsize=(8, 8), dpi=100,
axis_visible=False,
padding=0.03,
adjust=True):
ex1 = tmb.Extent.from_lonlat(
min(lngs), max(lngs),
min(lats), max(lats)
)
ex2 = _expand(ex1, padding)
extent = ex2.to_aspect(figsize[0]/figsize[1],
shrink=False) if adjust else ex2
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
ax.xaxis.set_visible(axis_visible)
ax.yaxis.set_visible(axis_visible)
t = tmb.tiles.build_OSM()
plotter = tmb.Plotter(extent, t,
width=figsize[0] * 100,
height=figsize[1] * 100)
plotter.plot(ax, t)
ps = [tmb.project(x, y) for x, y in zip(lngs, lats)]
xs = [p[0] for p in ps]
ys = [p[1] for p in ps]
n = len(df)
for i in range(n-1):
l2 = lines.Line2D(xs[i:(i+2)], ys[i:(i+2)],
linewidth=l_size[i], color=l_color[i])
ax.add_line(l2)
for i in range(n):
x, y = ps[i]
ax.plot(x, y, marker=".", markersize=p_size[i], color=p_color[i])
fig.text(0.8875, 0.125, '© DATAWISE', va='bottom', ha='right')
return fig, ax
def _draw_html(df,
lats, lngs,
p_size, p_color, p_popup,
l_size, l_color, l_popup,
zoom_start=15, width=800, height=800,
control_scale=True):
n = len(df)
mlat = np.mean(lats)
mlng = np.mean(lngs)
fmap = folium.Map(
location=[mlat, mlng],
attr=copyright_osm,
width=width, height=height,
zoom_start=zoom_start,
control_scale=control_scale
)
copyright = ' <a href="https://www.datawise.co.jp/"> | © DATAWISE </a>,'
folium.raster_layers.TileLayer(
tiles='https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
name='OpenStreetMap2',
attr=copyright,
overlay=True
).add_to(fmap)
for i in range(n):
x, y = lngs[i], lats[i]
folium.Circle(
(y, x),
color=p_color[i],
fill=True,
popup=p_popup[i] if p_popup is not None else None,
radius=p_size[i],
weight=0
).add_to(fmap)
for i in range(n-1):
x, y = lngs[i], lats[i]
nx, ny = lngs[i+1], lats[i+1]
col = l_color[i]
folium.PolyLine(
locations=[(y, x), (ny, nx)],
color=col,
weight=l_size[i],
popup=l_popup[i] if l_popup is not None else None
).add_to(fmap)
return fmap
| [
"numpy.mean",
"tilemapbase.Plotter",
"folium.Circle",
"re.match",
"matplotlib.lines.Line2D",
"folium.Map",
"tilemapbase.tiles.build_OSM",
"tilemapbase.project",
"folium.PolyLine",
"tilemapbase.init",
"tilemapbase.Extent",
"matplotlib.pyplot.subplots",
"folium.raster_layers.TileLayer"
] | [((170, 191), 'tilemapbase.init', 'tmb.init', ([], {'create': '(True)'}), '(create=True)\n', (178, 191), True, 'import tilemapbase as tmb\n'), ((383, 417), 'tilemapbase.Extent', 'tmb.Extent', (['xmin', 'xmax', 'ymin', 'ymax'], {}), '(xmin, xmax, ymin, ymax)\n', (393, 417), True, 'import tilemapbase as tmb\n'), ((1138, 1172), 'tilemapbase.Extent', 'tmb.Extent', (['xmin', 'xmax', 'ymin', 'ymax'], {}), '(xmin, xmax, ymin, ymax)\n', (1148, 1172), True, 'import tilemapbase as tmb\n'), ((4357, 4395), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize', 'dpi': 'dpi'}), '(figsize=figsize, dpi=dpi)\n', (4369, 4395), True, 'import matplotlib.pyplot as plt\n'), ((4483, 4504), 'tilemapbase.tiles.build_OSM', 'tmb.tiles.build_OSM', ([], {}), '()\n', (4502, 4504), True, 'import tilemapbase as tmb\n'), ((4519, 4590), 'tilemapbase.Plotter', 'tmb.Plotter', (['extent', 't'], {'width': '(figsize[0] * 100)', 'height': '(figsize[1] * 100)'}), '(extent, t, width=figsize[0] * 100, height=figsize[1] * 100)\n', (4530, 4590), True, 'import tilemapbase as tmb\n'), ((5418, 5431), 'numpy.mean', 'np.mean', (['lats'], {}), '(lats)\n', (5425, 5431), True, 'import numpy as np\n'), ((5443, 5456), 'numpy.mean', 'np.mean', (['lngs'], {}), '(lngs)\n', (5450, 5456), True, 'import numpy as np\n'), ((5468, 5606), 'folium.Map', 'folium.Map', ([], {'location': '[mlat, mlng]', 'attr': 'copyright_osm', 'width': 'width', 'height': 'height', 'zoom_start': 'zoom_start', 'control_scale': 'control_scale'}), '(location=[mlat, mlng], attr=copyright_osm, width=width, height=\n height, zoom_start=zoom_start, control_scale=control_scale)\n', (5478, 5606), False, 'import folium\n'), ((2352, 2390), 're.match', 're.match', (['"""^#[0-9a-fA-F]{6}$"""', 'p_color'], {}), "('^#[0-9a-fA-F]{6}$', p_color)\n", (2360, 2390), False, 'import re\n'), ((3043, 3081), 're.match', 're.match', (['"""^#[0-9a-fA-F]{6}$"""', 'l_color'], {}), "('^#[0-9a-fA-F]{6}$', l_color)\n", (3051, 3081), False, 'import re\n'), ((4678, 4695), 'tilemapbase.project', 'tmb.project', (['x', 'y'], {}), '(x, y)\n', (4689, 4695), True, 'import tilemapbase as tmb\n'), ((4835, 4912), 'matplotlib.lines.Line2D', 'lines.Line2D', (['xs[i:i + 2]', 'ys[i:i + 2]'], {'linewidth': 'l_size[i]', 'color': 'l_color[i]'}), '(xs[i:i + 2], ys[i:i + 2], linewidth=l_size[i], color=l_color[i])\n', (4847, 4912), True, 'import matplotlib.lines as lines\n'), ((5733, 5886), 'folium.raster_layers.TileLayer', 'folium.raster_layers.TileLayer', ([], {'tiles': '"""https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png"""', 'name': '"""OpenStreetMap2"""', 'attr': 'copyright', 'overlay': '(True)'}), "(tiles=\n 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', name=\n 'OpenStreetMap2', attr=copyright, overlay=True)\n", (5763, 5886), False, 'import folium\n'), ((5984, 6118), 'folium.Circle', 'folium.Circle', (['(y, x)'], {'color': 'p_color[i]', 'fill': '(True)', 'popup': '(p_popup[i] if p_popup is not None else None)', 'radius': 'p_size[i]', 'weight': '(0)'}), '((y, x), color=p_color[i], fill=True, popup=p_popup[i] if \n p_popup is not None else None, radius=p_size[i], weight=0)\n', (5997, 6118), False, 'import folium\n'), ((6337, 6466), 'folium.PolyLine', 'folium.PolyLine', ([], {'locations': '[(y, x), (ny, nx)]', 'color': 'col', 'weight': 'l_size[i]', 'popup': '(l_popup[i] if l_popup is not None else None)'}), '(locations=[(y, x), (ny, nx)], color=col, weight=l_size[i],\n popup=l_popup[i] if l_popup is not None else None)\n', (6352, 6466), False, 'import folium\n')] |
import os
from PIL import Image
import numpy as np
from scipy.interpolate import griddata
import cv2
import argparse
def getSymXYcoordinates(iuv, resolution=256, dp_uv_lookup_256_np=None):
if dp_uv_lookup_256_np is None:
dp_uv_lookup_256_np = np.load('util/dp_uv_lookup_256.npy')
xy, xyMask = getXYcoor(iuv, resolution=resolution, dp_uv_lookup_256_np=dp_uv_lookup_256_np)
f_xy, f_xyMask = getXYcoor(flip_iuv(np.copy(iuv)), resolution=resolution, dp_uv_lookup_256_np=dp_uv_lookup_256_np)
f_xyMask = np.clip(f_xyMask-xyMask, a_min=0, a_max=1)
# combine actual + symmetric
combined_texture = xy*np.expand_dims(xyMask,2) + f_xy*np.expand_dims(f_xyMask,2)
combined_mask = np.clip(xyMask+f_xyMask, a_min=0, a_max=1)
return combined_texture, combined_mask, f_xyMask
def flip_iuv(iuv):
POINT_LABEL_SYMMETRIES = [ 0, 1, 2, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 18, 17, 20, 19, 22, 21, 24, 23]
i = iuv[:,:,0]
u = iuv[:,:,1]
v = iuv[:,:,2]
i_old = np.copy(i)
for part in range(24):
if (part + 1) in i_old:
annot_indices_i = i_old == (part + 1)
if POINT_LABEL_SYMMETRIES[part + 1] != part + 1:
i[annot_indices_i] = POINT_LABEL_SYMMETRIES[part + 1]
if part == 22 or part == 23 or part == 2 or part == 3 : #head and hands
u[annot_indices_i] = 255-u[annot_indices_i]
if part == 0 or part == 1: # torso
v[annot_indices_i] = 255-v[annot_indices_i]
return np.stack([i,u,v],2)
def getXYcoor(iuv, resolution=256, dp_uv_lookup_256_np=None):
x, y, u, v = mapper(iuv, resolution, dp_uv_lookup_256_np=dp_uv_lookup_256_np)
nx, ny = resolution, resolution
# get mask
uv_mask = np.zeros((ny,nx))
uv_mask[np.ceil(v).astype(int),np.ceil(u).astype(int)]=1
uv_mask[np.floor(v).astype(int),np.floor(u).astype(int)]=1
uv_mask[np.ceil(v).astype(int),np.floor(u).astype(int)]=1
uv_mask[np.floor(v).astype(int),np.ceil(u).astype(int)]=1
kernel = np.ones((3,3),np.uint8)
uv_mask_d = cv2.dilate(uv_mask, kernel, iterations=1)
# A meshgrid of pixel coordinates
X, Y = np.meshgrid(np.arange(0, nx, 1), np.arange(0, ny, 1))
YX = np.stack([Y, X], -1)
## get x,y coordinates
xy = np.stack([x, y], -1)
uv_xy = np.zeros((ny, nx, 2))
uv_mask_b = uv_mask_d.astype(bool)
uv_xy[uv_mask_b] = griddata((v, u), xy, YX[uv_mask_b], method='linear')
nan_mask = np.isnan(uv_xy) & uv_mask_b[:, :, None]
uv_xy[nan_mask] = griddata((v, u), xy, YX[nan_mask], method='nearest').reshape(-1)
return uv_xy, uv_mask_d
def mapper(iuv, resolution=256, dp_uv_lookup_256_np=None):
H, W, _ = iuv.shape
iuv_mask = iuv[:, :, 0] > 0
iuv_raw = iuv[iuv_mask].astype(int)
x = np.linspace(0, W-1, W, dtype=int)
y = np.linspace(0, H-1, H, dtype=int)
xx, yy = np.meshgrid(x, y)
xx_rgb = xx[iuv_mask]
yy_rgb = yy[iuv_mask]
# modify i to start from 0... 0-23
i = iuv_raw[:, 0] - 1
u = iuv_raw[:, 1]
v = iuv_raw[:, 2]
uv_smpl = dp_uv_lookup_256_np[i, v, u]
u_f = uv_smpl[:, 0] * (resolution - 1)
v_f = (1 - uv_smpl[:, 1]) * (resolution - 1)
return xx_rgb, yy_rgb, u_f, v_f
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--image_file', type=str, help="path to image file to process. ex: ./train.lst")
parser.add_argument("--save_path", type=str, help="path to save the uv data")
parser.add_argument("--dp_path", type=str, help="path to densepose data")
args = parser.parse_args()
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
images = []
f = open(args.image_file, 'r')
for lines in f:
lines = lines.strip()
images.append(lines)
for i in range(len(images)):
im_name = images[i]
print ('%d/%d'%(i+1, len(images)))
dp = os.path.join(args.dp_path, im_name.split('.')[0]+'_iuv.png')
iuv = np.array(Image.open(dp))
h, w, _ = iuv.shape
if np.sum(iuv[:,:,0]==0)==(h*w):
print ('no human: invalid image %d: %s'%(i, im_name))
else:
uv_coor, uv_mask, uv_symm_mask = getSymXYcoordinates(iuv, resolution = 512)
np.save(os.path.join(args.save_path, '%s_uv_coor.npy'%(im_name.split('.')[0])), uv_coor)
mask_im = Image.fromarray((uv_mask*255).astype(np.uint8))
mask_im.save(os.path.join(args.save_path, im_name.split('.')[0]+'_uv_mask.png'))
mask_im = Image.fromarray((uv_symm_mask*255).astype(np.uint8))
mask_im.save(os.path.join(args.save_path, im_name.split('.')[0]+'_uv_symm_mask.png'))
| [
"numpy.clip",
"numpy.arange",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.stack",
"numpy.linspace",
"numpy.meshgrid",
"numpy.ceil",
"numpy.ones",
"numpy.floor",
"numpy.isnan",
"numpy.copy",
"PIL.Image.open",
"os.makedirs",
"scipy.interpolate.griddata",
"numpy.sum",
"numpy.zer... | [((523, 567), 'numpy.clip', 'np.clip', (['(f_xyMask - xyMask)'], {'a_min': '(0)', 'a_max': '(1)'}), '(f_xyMask - xyMask, a_min=0, a_max=1)\n', (530, 567), True, 'import numpy as np\n'), ((704, 748), 'numpy.clip', 'np.clip', (['(xyMask + f_xyMask)'], {'a_min': '(0)', 'a_max': '(1)'}), '(xyMask + f_xyMask, a_min=0, a_max=1)\n', (711, 748), True, 'import numpy as np\n'), ((1010, 1020), 'numpy.copy', 'np.copy', (['i'], {}), '(i)\n', (1017, 1020), True, 'import numpy as np\n'), ((1535, 1557), 'numpy.stack', 'np.stack', (['[i, u, v]', '(2)'], {}), '([i, u, v], 2)\n', (1543, 1557), True, 'import numpy as np\n'), ((1766, 1784), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {}), '((ny, nx))\n', (1774, 1784), True, 'import numpy as np\n'), ((2045, 2070), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (2052, 2070), True, 'import numpy as np\n'), ((2085, 2126), 'cv2.dilate', 'cv2.dilate', (['uv_mask', 'kernel'], {'iterations': '(1)'}), '(uv_mask, kernel, iterations=1)\n', (2095, 2126), False, 'import cv2\n'), ((2240, 2260), 'numpy.stack', 'np.stack', (['[Y, X]', '(-1)'], {}), '([Y, X], -1)\n', (2248, 2260), True, 'import numpy as np\n'), ((2298, 2318), 'numpy.stack', 'np.stack', (['[x, y]', '(-1)'], {}), '([x, y], -1)\n', (2306, 2318), True, 'import numpy as np\n'), ((2331, 2352), 'numpy.zeros', 'np.zeros', (['(ny, nx, 2)'], {}), '((ny, nx, 2))\n', (2339, 2352), True, 'import numpy as np\n'), ((2415, 2467), 'scipy.interpolate.griddata', 'griddata', (['(v, u)', 'xy', 'YX[uv_mask_b]'], {'method': '"""linear"""'}), "((v, u), xy, YX[uv_mask_b], method='linear')\n", (2423, 2467), False, 'from scipy.interpolate import griddata\n'), ((2802, 2837), 'numpy.linspace', 'np.linspace', (['(0)', '(W - 1)', 'W'], {'dtype': 'int'}), '(0, W - 1, W, dtype=int)\n', (2813, 2837), True, 'import numpy as np\n'), ((2844, 2879), 'numpy.linspace', 'np.linspace', (['(0)', '(H - 1)', 'H'], {'dtype': 'int'}), '(0, H - 1, H, dtype=int)\n', (2855, 2879), True, 'import numpy as np\n'), ((2891, 2908), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2902, 2908), True, 'import numpy as np\n'), ((3283, 3308), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3306, 3308), False, 'import argparse\n'), ((256, 292), 'numpy.load', 'np.load', (['"""util/dp_uv_lookup_256.npy"""'], {}), "('util/dp_uv_lookup_256.npy')\n", (263, 292), True, 'import numpy as np\n'), ((2189, 2208), 'numpy.arange', 'np.arange', (['(0)', 'nx', '(1)'], {}), '(0, nx, 1)\n', (2198, 2208), True, 'import numpy as np\n'), ((2210, 2229), 'numpy.arange', 'np.arange', (['(0)', 'ny', '(1)'], {}), '(0, ny, 1)\n', (2219, 2229), True, 'import numpy as np\n'), ((2483, 2498), 'numpy.isnan', 'np.isnan', (['uv_xy'], {}), '(uv_xy)\n', (2491, 2498), True, 'import numpy as np\n'), ((3617, 3647), 'os.path.exists', 'os.path.exists', (['args.save_path'], {}), '(args.save_path)\n', (3631, 3647), False, 'import os\n'), ((3657, 3684), 'os.makedirs', 'os.makedirs', (['args.save_path'], {}), '(args.save_path)\n', (3668, 3684), False, 'import os\n'), ((429, 441), 'numpy.copy', 'np.copy', (['iuv'], {}), '(iuv)\n', (436, 441), True, 'import numpy as np\n'), ((625, 650), 'numpy.expand_dims', 'np.expand_dims', (['xyMask', '(2)'], {}), '(xyMask, 2)\n', (639, 650), True, 'import numpy as np\n'), ((657, 684), 'numpy.expand_dims', 'np.expand_dims', (['f_xyMask', '(2)'], {}), '(f_xyMask, 2)\n', (671, 684), True, 'import numpy as np\n'), ((2545, 2597), 'scipy.interpolate.griddata', 'griddata', (['(v, u)', 'xy', 'YX[nan_mask]'], {'method': '"""nearest"""'}), "((v, u), xy, YX[nan_mask], method='nearest')\n", (2553, 2597), False, 'from scipy.interpolate import griddata\n'), ((4020, 4034), 'PIL.Image.open', 'Image.open', (['dp'], {}), '(dp)\n', (4030, 4034), False, 'from PIL import Image\n'), ((4075, 4100), 'numpy.sum', 'np.sum', (['(iuv[:, :, 0] == 0)'], {}), '(iuv[:, :, 0] == 0)\n', (4081, 4100), True, 'import numpy as np\n'), ((1796, 1806), 'numpy.ceil', 'np.ceil', (['v'], {}), '(v)\n', (1803, 1806), True, 'import numpy as np\n'), ((1819, 1829), 'numpy.ceil', 'np.ceil', (['u'], {}), '(u)\n', (1826, 1829), True, 'import numpy as np\n'), ((1857, 1868), 'numpy.floor', 'np.floor', (['v'], {}), '(v)\n', (1865, 1868), True, 'import numpy as np\n'), ((1881, 1892), 'numpy.floor', 'np.floor', (['u'], {}), '(u)\n', (1889, 1892), True, 'import numpy as np\n'), ((1920, 1930), 'numpy.ceil', 'np.ceil', (['v'], {}), '(v)\n', (1927, 1930), True, 'import numpy as np\n'), ((1943, 1954), 'numpy.floor', 'np.floor', (['u'], {}), '(u)\n', (1951, 1954), True, 'import numpy as np\n'), ((1982, 1993), 'numpy.floor', 'np.floor', (['v'], {}), '(v)\n', (1990, 1993), True, 'import numpy as np\n'), ((2006, 2016), 'numpy.ceil', 'np.ceil', (['u'], {}), '(u)\n', (2013, 2016), True, 'import numpy as np\n')] |
import warnings
import numpy as np
import scipy.sparse as sp
def flip_adj(adj, flips, undirected=True):
if isinstance(adj, (np.ndarray, np.matrix)):
if undirected:
flips = np.vstack([flips, flips[:, [1,0]]])
return flip_x(adj, flips)
elif not sp.isspmatrix(adj):
raise ValueError(f"adj must be a Scipy sparse matrix, but got {type(adj)}.")
if flips is None or len(flips) == 0:
warnings.warn(
"There are NO structure flips, the adjacency matrix remain unchanged.",
RuntimeWarning,
)
return adj.tocsr(copy=True)
rows, cols = np.transpose(flips)
if undirected:
rows, cols = np.hstack([rows, cols]), np.hstack([cols, rows])
data = adj[(rows, cols)].A
data[data > 0.] = 1.
data[data < 0.] = 0.
adj = adj.tolil(copy=True)
adj[(rows, cols)] = 1. - data
adj = adj.tocsr(copy=False)
adj.eliminate_zeros()
return adj
def flip_x(matrix, flips):
if flips is None or len(flips) == 0:
warnings.warn(
"There are NO flips, the matrix remain unchanged.",
RuntimeWarning,
)
return matrix.copy()
matrix = matrix.copy()
flips = tuple(np.transpose(flips))
matrix[flips] = 1. - matrix[flips]
matrix[matrix < 0] = 0
matrix[matrix > 1] = 1
return matrix
def add_edges(adj, edges, undirected=True):
if isinstance(adj, (np.ndarray, np.matrix)):
if undirected:
edges = np.vstack([edges, edges[:, [1,0]]])
return flip_x(adj, edges)
elif not sp.isspmatrix(adj):
raise ValueError(f"adj must be a Scipy sparse matrix, but got {type(adj)}.")
if edges is None or len(edges) == 0:
warnings.warn(
"There are NO structure edges, the adjacency matrix remain unchanged.",
RuntimeWarning,
)
return adj.tocsr(copy=True)
rows, cols = np.transpose(edges)
if undirected:
rows, cols = np.hstack([rows, cols]), np.hstack([cols, rows])
datas = np.ones(rows.size, dtype=adj.dtype)
adj = adj.tocoo(copy=True)
rows, cols = np.hstack([adj.row, rows]), np.hstack([adj.col, cols])
datas = np.hstack([adj.data, datas])
adj = sp.csr_matrix((datas, (rows, cols)), shape=adj.shape)
adj[adj>1] = 1.
adj.eliminate_zeros()
return adj
def remove_edges(adj, edges, undirected=True):
if isinstance(adj, (np.ndarray, np.matrix)):
if undirected:
edges = np.vstack([edges, edges[:, [1,0]]])
return flip_x(adj, edges)
elif not sp.isspmatrix(adj):
raise ValueError(f"adj must be a Scipy sparse matrix, but got {type(adj)}.")
if edges is None or len(edges) == 0:
warnings.warn(
"There are NO structure edges, the adjacency matrix remain unchanged.",
RuntimeWarning,
)
return adj.tocsr(copy=True)
rows, cols = np.transpose(edges)
if undirected:
rows, cols = np.hstack([rows, cols]), np.hstack([cols, rows])
datas = -np.ones(rows.size, dtype=adj.dtype)
adj = adj.tocoo(copy=True)
rows, cols = np.hstack([adj.row, rows]), np.hstack([adj.col, cols])
datas = np.hstack([adj.data, datas])
adj = sp.csr_matrix((datas, (rows, cols)), shape=adj.shape)
adj[adj<0] = 0.
adj.eliminate_zeros()
return adj
| [
"scipy.sparse.isspmatrix",
"numpy.ones",
"numpy.hstack",
"numpy.vstack",
"warnings.warn",
"scipy.sparse.csr_matrix",
"numpy.transpose"
] | [((635, 654), 'numpy.transpose', 'np.transpose', (['flips'], {}), '(flips)\n', (647, 654), True, 'import numpy as np\n'), ((1957, 1976), 'numpy.transpose', 'np.transpose', (['edges'], {}), '(edges)\n', (1969, 1976), True, 'import numpy as np\n'), ((2078, 2113), 'numpy.ones', 'np.ones', (['rows.size'], {'dtype': 'adj.dtype'}), '(rows.size, dtype=adj.dtype)\n', (2085, 2113), True, 'import numpy as np\n'), ((2234, 2262), 'numpy.hstack', 'np.hstack', (['[adj.data, datas]'], {}), '([adj.data, datas])\n', (2243, 2262), True, 'import numpy as np\n'), ((2273, 2326), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['(datas, (rows, cols))'], {'shape': 'adj.shape'}), '((datas, (rows, cols)), shape=adj.shape)\n', (2286, 2326), True, 'import scipy.sparse as sp\n'), ((2969, 2988), 'numpy.transpose', 'np.transpose', (['edges'], {}), '(edges)\n', (2981, 2988), True, 'import numpy as np\n'), ((3247, 3275), 'numpy.hstack', 'np.hstack', (['[adj.data, datas]'], {}), '([adj.data, datas])\n', (3256, 3275), True, 'import numpy as np\n'), ((3286, 3339), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['(datas, (rows, cols))'], {'shape': 'adj.shape'}), '((datas, (rows, cols)), shape=adj.shape)\n', (3299, 3339), True, 'import scipy.sparse as sp\n'), ((444, 554), 'warnings.warn', 'warnings.warn', (['"""There are NO structure flips, the adjacency matrix remain unchanged."""', 'RuntimeWarning'], {}), "(\n 'There are NO structure flips, the adjacency matrix remain unchanged.',\n RuntimeWarning)\n", (457, 554), False, 'import warnings\n'), ((1046, 1131), 'warnings.warn', 'warnings.warn', (['"""There are NO flips, the matrix remain unchanged."""', 'RuntimeWarning'], {}), "('There are NO flips, the matrix remain unchanged.',\n RuntimeWarning)\n", (1059, 1131), False, 'import warnings\n'), ((1242, 1261), 'numpy.transpose', 'np.transpose', (['flips'], {}), '(flips)\n', (1254, 1261), True, 'import numpy as np\n'), ((1766, 1876), 'warnings.warn', 'warnings.warn', (['"""There are NO structure edges, the adjacency matrix remain unchanged."""', 'RuntimeWarning'], {}), "(\n 'There are NO structure edges, the adjacency matrix remain unchanged.',\n RuntimeWarning)\n", (1779, 1876), False, 'import warnings\n'), ((2167, 2193), 'numpy.hstack', 'np.hstack', (['[adj.row, rows]'], {}), '([adj.row, rows])\n', (2176, 2193), True, 'import numpy as np\n'), ((2195, 2221), 'numpy.hstack', 'np.hstack', (['[adj.col, cols]'], {}), '([adj.col, cols])\n', (2204, 2221), True, 'import numpy as np\n'), ((2778, 2888), 'warnings.warn', 'warnings.warn', (['"""There are NO structure edges, the adjacency matrix remain unchanged."""', 'RuntimeWarning'], {}), "(\n 'There are NO structure edges, the adjacency matrix remain unchanged.',\n RuntimeWarning)\n", (2791, 2888), False, 'import warnings\n'), ((3091, 3126), 'numpy.ones', 'np.ones', (['rows.size'], {'dtype': 'adj.dtype'}), '(rows.size, dtype=adj.dtype)\n', (3098, 3126), True, 'import numpy as np\n'), ((3180, 3206), 'numpy.hstack', 'np.hstack', (['[adj.row, rows]'], {}), '([adj.row, rows])\n', (3189, 3206), True, 'import numpy as np\n'), ((3208, 3234), 'numpy.hstack', 'np.hstack', (['[adj.col, cols]'], {}), '([adj.col, cols])\n', (3217, 3234), True, 'import numpy as np\n'), ((198, 234), 'numpy.vstack', 'np.vstack', (['[flips, flips[:, [1, 0]]]'], {}), '([flips, flips[:, [1, 0]]])\n', (207, 234), True, 'import numpy as np\n'), ((281, 299), 'scipy.sparse.isspmatrix', 'sp.isspmatrix', (['adj'], {}), '(adj)\n', (294, 299), True, 'import scipy.sparse as sp\n'), ((695, 718), 'numpy.hstack', 'np.hstack', (['[rows, cols]'], {}), '([rows, cols])\n', (704, 718), True, 'import numpy as np\n'), ((720, 743), 'numpy.hstack', 'np.hstack', (['[cols, rows]'], {}), '([cols, rows])\n', (729, 743), True, 'import numpy as np\n'), ((1520, 1556), 'numpy.vstack', 'np.vstack', (['[edges, edges[:, [1, 0]]]'], {}), '([edges, edges[:, [1, 0]]])\n', (1529, 1556), True, 'import numpy as np\n'), ((1603, 1621), 'scipy.sparse.isspmatrix', 'sp.isspmatrix', (['adj'], {}), '(adj)\n', (1616, 1621), True, 'import scipy.sparse as sp\n'), ((2017, 2040), 'numpy.hstack', 'np.hstack', (['[rows, cols]'], {}), '([rows, cols])\n', (2026, 2040), True, 'import numpy as np\n'), ((2042, 2065), 'numpy.hstack', 'np.hstack', (['[cols, rows]'], {}), '([cols, rows])\n', (2051, 2065), True, 'import numpy as np\n'), ((2532, 2568), 'numpy.vstack', 'np.vstack', (['[edges, edges[:, [1, 0]]]'], {}), '([edges, edges[:, [1, 0]]])\n', (2541, 2568), True, 'import numpy as np\n'), ((2615, 2633), 'scipy.sparse.isspmatrix', 'sp.isspmatrix', (['adj'], {}), '(adj)\n', (2628, 2633), True, 'import scipy.sparse as sp\n'), ((3029, 3052), 'numpy.hstack', 'np.hstack', (['[rows, cols]'], {}), '([rows, cols])\n', (3038, 3052), True, 'import numpy as np\n'), ((3054, 3077), 'numpy.hstack', 'np.hstack', (['[cols, rows]'], {}), '([cols, rows])\n', (3063, 3077), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torchvision import transforms
import os
import torch.nn.functional as F
import cv2
from ._utils import load_models
from classifier.fracture_detector.data import get_wr_tta
# for debug
import matplotlib.pyplot as plt
class FractureDetector(object):
device = 'cuda:0'
def __init__(self, config, snapshots, side):
self.inference_transform = get_wr_tta(config, side=side)
# Loading the models
self.models = load_models(config, snapshots, self.device, side=side)
self.config = config
def detect(self, img):
h, w = img.shape
img = self.inference_transform(img).to(self.device)
n, c, _, _ = img.shape
preds = list()
gcams_list = list()
T = 1.0
for model in self.models:
model.eval()
with torch.no_grad():
acts = model.encoder[:-1](img)
features = model.encoder[-1](acts).view(n, -1)
with torch.enable_grad():
grads = []
features.requires_grad = True
features.register_hook(lambda g: grads.append(g))
logits = model.classifier(features)
sm = torch.softmax(logits / T, dim=1)
grad_y = torch.zeros_like(sm)
grad_y[:, 1] = 1
sm.backward(grad_y)
pred = sm[:, 1].detach().cpu().numpy()
preds.append(pred.mean())
grads = grads[0]
grads = grads.unsqueeze(-1)
grads = grads.unsqueeze(-1)
grads = grads * acts
grads = grads.sum(1)
grads = F.relu(grads)
grads = grads.unsqueeze(1)
# gcams = F.relu((grads[0].unsqueeze(-1).unsqueeze(-1) * acts).sum(1)).unsqueeze(1)
gcams = F.interpolate(grads, size=(img.shape[-2], img.shape[-1]), mode='bilinear', align_corners=True).to('cpu').squeeze().numpy()
gcams_list.append(gcams)
gcams = np.mean(gcams_list, axis=0)
gcams_h, gcams_w = gcams[0].shape
border_z = 15
mask_gcam = np.zeros((gcams_h, gcams_w))
mask_gcam[border_z:-border_z, border_z:-border_z] = np.ones((gcams_h - 2 * border_z, gcams_w - 2 * border_z))
mask_gcam = cv2.GaussianBlur(mask_gcam, (5, 5), 25)
gcams = [gcam * mask_gcam for gcam in gcams]
heatmap = np.zeros((h, w))
size = self.config.dataset.crop_size
x1 = w // 2 - size // 2
y1 = h // 2 - size // 2
# gradcam + TTA: flips and 5-crop
# upper-left crop
heatmap[0:size, 0:size] += gcams[1]
heatmap[0:size, 0:size] += cv2.flip(gcams[6], 1)
# upper-right crop
heatmap[0:size, w - size:w] += gcams[2]
heatmap[0:size, w - size:w] += cv2.flip(gcams[7], 1)
# bottom-left crop
heatmap[h - size:h, 0:size] += gcams[3]
heatmap[h - size:h, 0:size] += cv2.flip(gcams[8], 1)
heatmap[h - size:h, w - size:w] += gcams[4]
heatmap[h - size:h, w - size:w] += cv2.flip(gcams[9], 1)
# Center crop
heatmap[y1:y1 + size, x1:x1 + size] += gcams[0]
heatmap[y1:y1 + size, x1:x1 + size] += cv2.flip(gcams[5], 1)
heatmap -= heatmap.min()
if heatmap.max() != 0:
heatmap /= heatmap.max()
final_pred = np.mean(preds)
return final_pred, heatmap | [
"numpy.mean",
"numpy.ones",
"cv2.flip",
"torch.enable_grad",
"torch.softmax",
"numpy.zeros",
"torch.nn.functional.relu",
"torch.nn.functional.interpolate",
"torch.no_grad",
"torch.zeros_like",
"cv2.GaussianBlur",
"classifier.fracture_detector.data.get_wr_tta"
] | [((396, 425), 'classifier.fracture_detector.data.get_wr_tta', 'get_wr_tta', (['config'], {'side': 'side'}), '(config, side=side)\n', (406, 425), False, 'from classifier.fracture_detector.data import get_wr_tta\n'), ((2011, 2038), 'numpy.mean', 'np.mean', (['gcams_list'], {'axis': '(0)'}), '(gcams_list, axis=0)\n', (2018, 2038), True, 'import numpy as np\n'), ((2123, 2151), 'numpy.zeros', 'np.zeros', (['(gcams_h, gcams_w)'], {}), '((gcams_h, gcams_w))\n', (2131, 2151), True, 'import numpy as np\n'), ((2212, 2269), 'numpy.ones', 'np.ones', (['(gcams_h - 2 * border_z, gcams_w - 2 * border_z)'], {}), '((gcams_h - 2 * border_z, gcams_w - 2 * border_z))\n', (2219, 2269), True, 'import numpy as np\n'), ((2290, 2329), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['mask_gcam', '(5, 5)', '(25)'], {}), '(mask_gcam, (5, 5), 25)\n', (2306, 2329), False, 'import cv2\n'), ((2403, 2419), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (2411, 2419), True, 'import numpy as np\n'), ((2678, 2699), 'cv2.flip', 'cv2.flip', (['gcams[6]', '(1)'], {}), '(gcams[6], 1)\n', (2686, 2699), False, 'import cv2\n'), ((2815, 2836), 'cv2.flip', 'cv2.flip', (['gcams[7]', '(1)'], {}), '(gcams[7], 1)\n', (2823, 2836), False, 'import cv2\n'), ((2952, 2973), 'cv2.flip', 'cv2.flip', (['gcams[8]', '(1)'], {}), '(gcams[8], 1)\n', (2960, 2973), False, 'import cv2\n'), ((3070, 3091), 'cv2.flip', 'cv2.flip', (['gcams[9]', '(1)'], {}), '(gcams[9], 1)\n', (3078, 3091), False, 'import cv2\n'), ((3217, 3238), 'cv2.flip', 'cv2.flip', (['gcams[5]', '(1)'], {}), '(gcams[5], 1)\n', (3225, 3238), False, 'import cv2\n'), ((3362, 3376), 'numpy.mean', 'np.mean', (['preds'], {}), '(preds)\n', (3369, 3376), True, 'import numpy as np\n'), ((1666, 1679), 'torch.nn.functional.relu', 'F.relu', (['grads'], {}), '(grads)\n', (1672, 1679), True, 'import torch.nn.functional as F\n'), ((848, 863), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (861, 863), False, 'import torch\n'), ((992, 1011), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (1009, 1011), False, 'import torch\n'), ((1226, 1258), 'torch.softmax', 'torch.softmax', (['(logits / T)'], {'dim': '(1)'}), '(logits / T, dim=1)\n', (1239, 1258), False, 'import torch\n'), ((1284, 1304), 'torch.zeros_like', 'torch.zeros_like', (['sm'], {}), '(sm)\n', (1300, 1304), False, 'import torch\n'), ((1835, 1933), 'torch.nn.functional.interpolate', 'F.interpolate', (['grads'], {'size': '(img.shape[-2], img.shape[-1])', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(grads, size=(img.shape[-2], img.shape[-1]), mode='bilinear',\n align_corners=True)\n", (1848, 1933), True, 'import torch.nn.functional as F\n')] |
import numpy as np
from utils.text_analyzer import TextStats
from utils.text_analyzer import compute_score
from utils.dictionary import Dictionary
from itertools import permutations as permutations
def crack_transposition(stats: TextStats, dictionary: Dictionary, verbose: bool=False):
"""
Method to crack table transpositions. It assumes that ciphertext was obtained by writing plaintext to rows of given
size and then read by columns. If there are empty spaces in the table, they are filled by fake letters. This cracker
identifies the correct plaintext by counting identical letters at it's end, and by comparing it to the dictionary.
All possible table sizes are evaluated, as it doesn't take much time and size detection function that was used to
get one table size can be confused if the filler uses random characters instead of all Xs.
:param stats: TextStats object of the text to be cracked
:param dictionary: Dictionary constructed from the language of the plaintext
:param verbose: True to print some outputs, eg other candidates for plaintext
:return: plaintext, cipher name ("transposition_table"), parameters (table height)
"""
solutions = []
# this doesn't work all that well, and it is found later on anyway
# likely_size = guess_table_size(stats)
# if likely_size:
# solution = read_from_table(stats, likely_size)
# language_score = compute_score(solution, dictionary)
# solutions.append((language_score, solution, "guessed"+str(likely_size)))
# try all possible table dimensions
for size in range(2, stats.N):
# table size must be divisible by the dimensions
if not stats.N % size:
solution = read_from_table(stats, size)
# how many characters repeat at the end of the plaintext
tail_score = count_tailing_letters(solution)
language_score = compute_score(solution, dictionary)
solutions.append((tail_score, language_score, solution, "transposition_table", [size]))
# sort by language, but use tail_score for primary sorting
solutions.sort(key=lambda s: s[1], reverse=True)
solutions.sort(key=lambda s: s[0], reverse=True)
if verbose:
for i in range(0, len(solutions)):
print(solutions[i][0], solutions[i][3], solutions[i][2])
return solutions[0][2], solutions[0][3], solutions[0][4]
def read_from_table(stats: TextStats, table_width: int):
# take the string from stats, feed it to an array and then read it along different axis to get plaintext
m = int(stats.N/table_width)
f = np.array(list(stats.text))
cipher_table = np.transpose(np.reshape(f, (m, table_width)))
table = np.reshape(cipher_table, (1, stats.N)).tolist()
return "".join(table[0])
def crack_transposition_with_column_scrambling(stats: TextStats, dictionary: Dictionary, verbose: bool=False):
"""
Method to crack table transpositions with column shuffling. The cipher is the same as table transposition above, but
the columns are shuffled before message is read. The method will try all permutations of columns for tables with 7
or less columns. Larger tables are skipped, because it would take ages to compute.
:param stats: TextStats object of the text to be cracked
:param dictionary: Dictionary constructed from the language of the plaintext
:param verbose: True to print some outputs, eg other candidates for plaintext
:return: plaintext, cipher name ("transposition_table_shuffled"), parameters: [table height, permutation]
"""
solutions = []
for size in range(2, stats.N):
if not stats.N % size:
n_cols = int(stats.N / size)
if n_cols > 7:
if verbose:
print("Can't guess with table size", size, "- too many column permutations, skipping")
continue
from math import factorial
if verbose:
print(n_cols, factorial(n_cols))
# prepare a table to be shuffled
cipher_table = generate_table(stats, size)
# create permutations of [0, 1, ..., n_cols]
for permutation in permutations(list(range(0, n_cols))):
# apply permutation and read message
shuffled_table = cipher_table[:, permutation]
table = np.reshape(shuffled_table, (1, stats.N)).tolist()
solution = "".join(table[0])
language_score = compute_score(solution, dictionary)
solutions.append((language_score, solution, size, permutation))
solutions.sort(key=lambda s: s[0], reverse=True)
if verbose:
for i in range(0, min(len(solutions), 50)):
print(solutions[i][0], solutions[i][1], solutions[i][2], solutions[i][3])
return solutions[0][1], "transposition_table_shuffled", [solutions[0][2], solutions[0][3]]
def generate_table(stats: TextStats, table_width: int):
m = int(stats.N/table_width)
f = np.array(list(stats.text))
cipher_table = np.transpose(np.reshape(f, (m, table_width)))
return cipher_table
def guess_table_size(text: TextStats, filler: int="X"):
"""
Finds how often filler characters appear. This can be used to detect table size, but turned out to be useless.
:param text: ciphertext
:param filler: character to work with
:return: Likely table size
"""
last_filler_pos = 0
distances = {}
best_size = -1
best_size_value = -1
for i in range(0, text.N):
c = text.text[i]
if c == filler:
distance = i - last_filler_pos
if distance not in distances:
distances[distance] = 1
else:
distances[distance] += 1
last_filler_pos = i
if distances[distance] > best_size_value:
best_size = distance
best_size_value = distances[distance]
if text.N % best_size:
return False
return best_size
def count_tailing_letters(text: str):
if len(text) == 0:
return 0
last_char = text[-1]
i = 1
for i in range(1, len(text)):
if text[-i] != last_char:
break
return i-1
| [
"math.factorial",
"numpy.reshape",
"utils.text_analyzer.compute_score"
] | [((2684, 2715), 'numpy.reshape', 'np.reshape', (['f', '(m, table_width)'], {}), '(f, (m, table_width))\n', (2694, 2715), True, 'import numpy as np\n'), ((5093, 5124), 'numpy.reshape', 'np.reshape', (['f', '(m, table_width)'], {}), '(f, (m, table_width))\n', (5103, 5124), True, 'import numpy as np\n'), ((1919, 1954), 'utils.text_analyzer.compute_score', 'compute_score', (['solution', 'dictionary'], {}), '(solution, dictionary)\n', (1932, 1954), False, 'from utils.text_analyzer import compute_score\n'), ((2729, 2767), 'numpy.reshape', 'np.reshape', (['cipher_table', '(1, stats.N)'], {}), '(cipher_table, (1, stats.N))\n', (2739, 2767), True, 'import numpy as np\n'), ((4515, 4550), 'utils.text_analyzer.compute_score', 'compute_score', (['solution', 'dictionary'], {}), '(solution, dictionary)\n', (4528, 4550), False, 'from utils.text_analyzer import compute_score\n'), ((4002, 4019), 'math.factorial', 'factorial', (['n_cols'], {}), '(n_cols)\n', (4011, 4019), False, 'from math import factorial\n'), ((4387, 4427), 'numpy.reshape', 'np.reshape', (['shuffled_table', '(1, stats.N)'], {}), '(shuffled_table, (1, stats.N))\n', (4397, 4427), True, 'import numpy as np\n')] |
from kokoropy import request, draw_matplotlib_figure, Autoroute_Controller, \
load_view
class My_Controller(Autoroute_Controller):
'''
Plotting example
'''
def action_plot(self):
max_range = 6.28
if 'range' in request.GET:
max_range = float(request.GET['range'])
# import things
import numpy as np
import matplotlib.pyplot as plt
# determine x, sin(x) and cos(x)
x = np.arange(0, max_range, 0.1)
y1 = np.sin(x)
y2 = np.cos(x)
# make figure
fig = plt.figure()
fig.subplots_adjust(hspace = 0.5, wspace = 0.5)
fig.suptitle('The legendary sine and cosine curves')
# first subplot
ax = fig.add_subplot(2, 1, 1)
ax.plot(x, y1, 'b')
ax.plot(x, y1, 'ro')
ax.set_title ('y = sin(x)')
ax.set_xlabel('x')
ax.set_ylabel('y')
# second subplot
ax = fig.add_subplot(2, 1, 2)
ax.plot(x, y2, 'b')
ax.plot(x, y2, 'ro')
ax.set_title ('y = cos(x)')
ax.set_xlabel('x')
ax.set_ylabel('y')
# make canvas
return draw_matplotlib_figure(fig)
def action_index(self):
return load_view('example','plotting') | [
"kokoropy.load_view",
"kokoropy.draw_matplotlib_figure",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.sin",
"numpy.arange"
] | [((457, 485), 'numpy.arange', 'np.arange', (['(0)', 'max_range', '(0.1)'], {}), '(0, max_range, 0.1)\n', (466, 485), True, 'import numpy as np\n'), ((499, 508), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (505, 508), True, 'import numpy as np\n'), ((522, 531), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (528, 531), True, 'import numpy as np\n'), ((568, 580), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (578, 580), True, 'import matplotlib.pyplot as plt\n'), ((1154, 1181), 'kokoropy.draw_matplotlib_figure', 'draw_matplotlib_figure', (['fig'], {}), '(fig)\n', (1176, 1181), False, 'from kokoropy import request, draw_matplotlib_figure, Autoroute_Controller, load_view\n'), ((1226, 1258), 'kokoropy.load_view', 'load_view', (['"""example"""', '"""plotting"""'], {}), "('example', 'plotting')\n", (1235, 1258), False, 'from kokoropy import request, draw_matplotlib_figure, Autoroute_Controller, load_view\n')] |
import numpy as np
def find_unimodal_range(x0, h, f):
l = x0 - h
r = x0 + h
m = x0
step = 1
fm = f(x0)
fl = f(l)
fr = f(r)
if fl > fm and fm < fr:
return l, r
elif fm > fr:
while True:
l = m
m = r
fm = fr
step *= 2
r = x0 + h * step
fr = f(r)
if fm <= fr:
break
else:
while True:
r = m
m = l
fm = fl
step *= 2
l = x0 - h * step
fl = f(l)
if fm <= fl:
break
return l, r
def golden_ratio_search(f, a, b=None, e=1e-6, verbose=True):
if b is None:
a, b = find_unimodal_range(a, 1, f)
k = 0.5 * (np.sqrt(5) - 1)
c = b - k*(b - a)
d = a + k*(b - a)
fc = f(c)
fd = f(d)
while b - a > e:
# kontrolni ispis
if verbose is True:
print(f'a={a} f(a)={f(a)} b={b} f(b)={f(b)} c={c} f(c)={f(c)} d={d} f(d)={f(d)}')
if fc < fd:
b = d
d = c
c = b - k*(b - a)
fd = fc
fc = f(c)
else:
a = c
c = d
d = a + k*(b - a)
fc = fd
fd = f(d)
return a, b
def e_i(n, i):
ei = np.zeros(n)
ei[i] = 1.0
return ei
def vectorize(x0):
if isinstance(x0, float):
return np.array([x0])
else:
return x0
def coord_axes_search(x0, f, e=1e-9, verbose=True):
x0 = vectorize(x0)
x = x0
n = len(x0)
while True:
xs = np.array(x)
for i in range(n):
ei = e_i(n, i)
f_lamda = lambda lamda: f(x + lamda * ei)
a, b = golden_ratio_search(f_lamda, 1, verbose=verbose)
x += (a+b)/2.0 * ei
diff = np.linalg.norm(x-xs, ord=2)
if diff <= e:
break
return x
def calculate_simplex(x0, step):
n = len(x0)
X = [x0]
for i in range(n):
X.append(x0 + e_i(n, i) * step)
return X
def find_min_index(arr, f=lambda x: x):
index_of_min = 0
for i in range(1, len(arr)):
if f(arr[i]) < f(arr[index_of_min]):
index_of_min = i
return index_of_min
def simplex_nelder_mead(f, x0, step=1, alpha=1, beta=0.5, gamma=2, sigma=0.5, e=1e-6, verbose=True):
x0 = vectorize(x0)
X = calculate_simplex(x0, step)
assert len(X) == len(x0) + 1 # TODO maknut ovo
n = len(x0)
f_negative = lambda x: -f(x)
while True:
h = find_min_index(X, f=f_negative)
l = find_min_index(X, f=f)
xc = 1/n * sum([X[i] for i in range(n+1) if i != h])
# kontrolni ispis
if verbose is True:
print(f'xc={xc} f(xc)={f(xc)}')
xr = (1+alpha)*xc - alpha*X[h]
if f(xr) < f(X[l]):
xe = (1-gamma)*xc - gamma*xr # expansion(x)
if f(xe) < f(X[l]):
X[h] = xe
else:
X[h] = xr
else:
if all(f(xr)>f(X[j]) for j in range(n+1) if j!=h):
if f(xr) < f(X[h]):
X[h] = xr
xk = (1-beta)*xc + beta*X[h]
if f(xk) < f(X[h]):
X[h] = xk
else:
# samo zapamtimo kopiju
X_l = 1.0 * X[l]
X = [sigma*(X[i]+X_l) for i in range(n+1)]
else:
X[h] = xr
div = 1/n * sum([(f(X[i])-f(xc))**2 for i in range(n+1)])
if np.sqrt(div) <= e:
break
return xc
def __search(f, xp, dx, n):
x = xp * 1.0
for i in range(n):
P = f(x)
x[i] += dx[i]
N = f(x)
if N > P:
x[i] -= 2*dx[i]
N = f(x)
if N > P:
x[i] += dx[i]
return x
def hook_jeeves_search(f, x0, dx=0.5, e=1e-6, verbose=True):
x0 = vectorize(x0)
xp = x0
xb = x0
n = len(x0)
# ako su ostali default dx i e, proširit ih u vektore
if isinstance(dx, float):
dx = dx * np.ones(n)
if isinstance(e, float):
e = e * np.ones(n)
while True:
xn = __search(f, xp, dx, n)
# kontrolni ispis
if verbose is True:
print(f'xb={xb} f(xb)={f(xb)} xp={xp} f(xp)={f(xp)} xn={xn} f(xn)={f(xn)}')
if f(xn) < f(xb):
xp = 2*xn - xb
xb = xn
else:
dx /= 2
xp = xb
if all([dx[i] <= e[i] for i in range(n)]):
break
return xb
| [
"numpy.sqrt",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.linalg.norm"
] | [((1338, 1349), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1346, 1349), True, 'import numpy as np\n'), ((1446, 1460), 'numpy.array', 'np.array', (['[x0]'], {}), '([x0])\n', (1454, 1460), True, 'import numpy as np\n'), ((1622, 1633), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1630, 1633), True, 'import numpy as np\n'), ((1857, 1886), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - xs)'], {'ord': '(2)'}), '(x - xs, ord=2)\n', (1871, 1886), True, 'import numpy as np\n'), ((786, 796), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (793, 796), True, 'import numpy as np\n'), ((3560, 3572), 'numpy.sqrt', 'np.sqrt', (['div'], {}), '(div)\n', (3567, 3572), True, 'import numpy as np\n'), ((4103, 4113), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (4110, 4113), True, 'import numpy as np\n'), ((4159, 4169), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (4166, 4169), True, 'import numpy as np\n')] |
import datetime
import cv2
import numpy as np
from numpy.linalg import norm
import os
SZ = 20 # 训练图片长宽
def deskew(img):
m = cv2.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11'] / m['mu02']
M = np.float32([[1, skew, -0.5 * SZ * skew], [0, 1, 0]])
img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
return img
class StatModel(object):
def load(self, fn):
self.model = self.model.load(fn)
def save(self, fn):
self.model.save(fn)
# 利用OpenCV中的SVM进行机器学习
class SVM(StatModel):
def __init__(self, C=1, gamma=0.5):
self.model = cv2.ml.SVM_create() # 创建SVM model
# 属性设置
self.model.setGamma(gamma)
self.model.setC(C)
self.model.setKernel(cv2.ml.SVM_RBF) # 径向基核函数((Radial Basis Function),比较好的选择,gamma>0;
self.model.setType(cv2.ml.SVM_C_SVC)
# 训练svm
def train(self, samples, responses): # SVM的训练函数
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
# 字符识别
def predict(self, samples):
r = self.model.predict(samples)
return r[1].ravel()
# 来自opencv的sample,用于svm训练
def hog(digits):
samples = []
'''
step1.先计算图像 X 方向和 Y 方向的 Sobel 导数。
step2.然后计算得到每个像素的梯度角度angle和梯度大小magnitude。
step3.把这个梯度的角度转换成 0至16 之间的整数。
step4.将图像分为 4 个小的方块,对每一个小方块计算它们梯度角度的直方图(16 个 bin),使用梯度的大小做权重。
这样每一个小方块都会得到一个含有 16 个值的向量。
4 个小方块的 4 个向量就组成了这个图像的特征向量(包含 64 个值)。
这就是我们要训练数据的特征向量。
'''
for img in digits:
# plt.subplot(221)
# plt.imshow(img,'gray')
# step1.计算图像的 X 方向和 Y 方向的 Sobel 导数
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy) # step2.笛卡尔坐标(直角/斜角坐标)转换为极坐标, → magnitude, angle
bin_n = 16
bin = np.int32(bin_n * ang / (2 * np.pi)) # step3. quantizing binvalues in (0...16)。2π就是360度。
# step4. Divide to 4 sub-squares
bin_cells = bin[:10, :10], bin[10:, :10], bin[:10, 10:], bin[10:, 10:]
mag_cells = mag[:10, :10], mag[10:, :10], mag[:10, 10:], mag[10:, 10:]
# zip() 函数用于将可迭代的对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的列表。
# a = [1,2,3];b = [4,5,6];zipped = zip(a,b) 结果[(1, 4), (2, 5), (3, 6)]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists) # hist is a 64 bit vector
# plt.subplot(223)
# plt.plot(hist)
# transform to Hellinger kernel
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= norm(hist) + eps
# plt.subplot(224)
# plt.plot(hist)
# plt.show()
samples.append(hist)
return np.float32(samples)
def train_svm():
chinesemodel = SVM(C=1, gamma=0.5)
if os.path.exists("svmchi.dat"):
chinesemodel.load("svmchi.dat")
else:
chars_train = []
chars_label = []
| [
"os.path.exists",
"cv2.warpAffine",
"numpy.sqrt",
"numpy.hstack",
"cv2.cartToPolar",
"cv2.ml.SVM_create",
"numpy.int32",
"cv2.moments",
"numpy.linalg.norm",
"numpy.float32",
"cv2.Sobel"
] | [((132, 148), 'cv2.moments', 'cv2.moments', (['img'], {}), '(img)\n', (143, 148), False, 'import cv2\n'), ((246, 298), 'numpy.float32', 'np.float32', (['[[1, skew, -0.5 * SZ * skew], [0, 1, 0]]'], {}), '([[1, skew, -0.5 * SZ * skew], [0, 1, 0]])\n', (256, 298), True, 'import numpy as np\n'), ((309, 388), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(SZ, SZ)'], {'flags': '(cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)'}), '(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)\n', (323, 388), False, 'import cv2\n'), ((2775, 2794), 'numpy.float32', 'np.float32', (['samples'], {}), '(samples)\n', (2785, 2794), True, 'import numpy as np\n'), ((2860, 2888), 'os.path.exists', 'os.path.exists', (['"""svmchi.dat"""'], {}), "('svmchi.dat')\n", (2874, 2888), False, 'import os\n'), ((655, 674), 'cv2.ml.SVM_create', 'cv2.ml.SVM_create', ([], {}), '()\n', (672, 674), False, 'import cv2\n'), ((1655, 1687), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_32F', '(1)', '(0)'], {}), '(img, cv2.CV_32F, 1, 0)\n', (1664, 1687), False, 'import cv2\n'), ((1701, 1733), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_32F', '(0)', '(1)'], {}), '(img, cv2.CV_32F, 0, 1)\n', (1710, 1733), False, 'import cv2\n'), ((1753, 1776), 'cv2.cartToPolar', 'cv2.cartToPolar', (['gx', 'gy'], {}), '(gx, gy)\n', (1768, 1776), False, 'import cv2\n'), ((1861, 1896), 'numpy.int32', 'np.int32', (['(bin_n * ang / (2 * np.pi))'], {}), '(bin_n * ang / (2 * np.pi))\n', (1869, 1896), True, 'import numpy as np\n'), ((2409, 2425), 'numpy.hstack', 'np.hstack', (['hists'], {}), '(hists)\n', (2418, 2425), True, 'import numpy as np\n'), ((2613, 2626), 'numpy.sqrt', 'np.sqrt', (['hist'], {}), '(hist)\n', (2620, 2626), True, 'import numpy as np\n'), ((2643, 2653), 'numpy.linalg.norm', 'norm', (['hist'], {}), '(hist)\n', (2647, 2653), False, 'from numpy.linalg import norm\n')] |
# Closed-loop simulation of defined problem
from UKF_SNMPC import *
import numpy as np
from casadi import *
UKF_SNMPC = UKF_SNMPC()
solver = UKF_SNMPC.solver
U_pasts, Xd_pasts, Xa_pasts, Conp_pasts, Cont_pasts, xu_pasts, MeanSEx_pasts,\
CovSEx_pasts, t_pasts = UKF_SNMPC.initialization()
number_of_repeats = UKF_SNMPC.number_of_repeats
simulation_time, nun, nd = UKF_SNMPC.simulation_time, UKF_SNMPC.nun, UKF_SNMPC.nd
Sigma_v, nm, Sigma_w = UKF_SNMPC.Sigma_v, UKF_SNMPC.nm, UKF_SNMPC.Sigma_w
MeanP, CovP, hfcn = UKF_SNMPC.MeanP, UKF_SNMPC.CovP, UKF_SNMPC.hfcn
update_inputs, deltat = UKF_SNMPC.update_inputs, UKF_SNMPC.deltat
cfcn, xhatfcn, Sigmafcn = UKF_SNMPC.cfcn, UKF_SNMPC.x_hatfcn, UKF_SNMPC.Sigmafcn
for un in range(number_of_repeats):
ws = np.zeros((UKF_SNMPC.nk,nun+nd))
arg, u_past, tk, t0i, tfi, u_nmpc, Meanx0, Covx0, MeanSEx, CovSEx, t_past \
= UKF_SNMPC.initialization_loop()
xd_current = np.expand_dims(np.random.multivariate_normal(\
np.array(Meanx0),Covx0),0).T
xu_current = np.expand_dims(np.random.multivariate_normal(\
np.array(MeanP),CovP),0).T
Xd_pasts[0,un,:] = np.array(DM(xd_current)).flatten()
xu_pasts[un,:] = np.array(DM(xu_current)).flatten()
MeanSEx_pasts[0,un,:] = np.array(DM(MeanSEx)).flatten()
CovSEx_pasts[0,un,:,:] = np.array(DM(CovSEx))
while True:
# Break when simulation time is reached
if tk >= UKF_SNMPC.nk-1:
break
# Simulation and measurement of plant
xd_current, xa_current = UKF_SNMPC.simulator(xd_current,u_nmpc,\
t0i,tfi,xu_current)
w = np.random.multivariate_normal(np.zeros(nd),Sigma_w)
xd_current = (xd_current.flatten() + w)
yd = DM(hfcn(xd_current,xu_current)) + \
np.random.multivariate_normal(np.zeros(nm),Sigma_v)
tfi += deltat
# Parameter to set initial condition of NMPC algorithm and update discrete time tk
p, tk = update_inputs(yd,tk,u_nmpc,MeanSEx,CovSEx)
arg["p"] = p
# Solve SNMPC problem and extract results
res = solver(**arg)
u_nmpc = cfcn(np.array(res["x"])[:,0]) # control input
MeanSEx = xhatfcn(np.array(res["x"])[:,0]) # mean of state estimate
CovSEx = Sigmafcn(np.array(res["x"])[:,0]) # covariance of state estimate
# Collect data
MeanSEx_pasts[tk+1,un,:] = np.array(DM(MeanSEx)).flatten()
CovSEx_pasts[tk+1,un,:,:] = np.array(DM(CovSEx))
t0i += deltat
t_past, u_past = UKF_SNMPC.collect_data(t_past,u_past,t0i,u_nmpc)
# Generate data for plots and save files
Xd_pasts, Xa_pasts, Conp_pasts, Cont_pasts, U_pasts, t_pasts = \
UKF_SNMPC.generate_data(Xd_pasts,Xa_pasts,Conp_pasts,Cont_pasts,U_pasts,un,\
u_past,xu_pasts,deltat,t_pasts,ws)
# Plot results
UKF_SNMPC.plot_graphs(t_past,t_pasts,Xd_pasts,Xa_pasts,U_pasts,Conp_pasts,Cont_pasts)
# Save results
UKF_SNMPC.save_results(Xd_pasts,Xa_pasts,U_pasts,Conp_pasts,Cont_pasts,t_pasts,\
xu_pasts,MeanSEx_pasts,CovSEx_pasts) | [
"numpy.array",
"numpy.zeros"
] | [((812, 846), 'numpy.zeros', 'np.zeros', (['(UKF_SNMPC.nk, nun + nd)'], {}), '((UKF_SNMPC.nk, nun + nd))\n', (820, 846), True, 'import numpy as np\n'), ((1867, 1879), 'numpy.zeros', 'np.zeros', (['nd'], {}), '(nd)\n', (1875, 1879), True, 'import numpy as np\n'), ((1051, 1067), 'numpy.array', 'np.array', (['Meanx0'], {}), '(Meanx0)\n', (1059, 1067), True, 'import numpy as np\n'), ((1158, 1173), 'numpy.array', 'np.array', (['MeanP'], {}), '(MeanP)\n', (1166, 1173), True, 'import numpy as np\n'), ((2033, 2045), 'numpy.zeros', 'np.zeros', (['nm'], {}), '(nm)\n', (2041, 2045), True, 'import numpy as np\n'), ((2388, 2406), 'numpy.array', 'np.array', (["res['x']"], {}), "(res['x'])\n", (2396, 2406), True, 'import numpy as np\n'), ((2459, 2477), 'numpy.array', 'np.array', (["res['x']"], {}), "(res['x'])\n", (2467, 2477), True, 'import numpy as np\n'), ((2537, 2555), 'numpy.array', 'np.array', (["res['x']"], {}), "(res['x'])\n", (2545, 2555), True, 'import numpy as np\n')] |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from tkinter import *
from datetime import datetime
import numpy, re, pyotp, sys, time, pytesseract, tkinter.font
import cv2 as cv
from pytesseract import image_to_string
dp = Tk()
main_frame = Frame(dp)
dp.geometry('500x500')
dp.title("인터파크 티켓팅 프로그램")
main_frame.pack()
driver = webdriver.Chrome("/home/clyde/Documents/Coding/Python Projects/InterPark/es/chromedriver")
wait = WebDriverWait(driver, 20)
url = "https://ticket.interpark.com/Gate/TPLogin.asp"
driver.get(url)
id_label = Label(main_frame, text="아이디")
id_label.grid(row=1, column=0)
id_entry = Entry(main_frame)
id_entry.grid(row=1, column=1)
pw_label = Label(main_frame, text="비밀번호")
pw_label.grid(row=2, column=0)
pw_entry = Entry(main_frame, show='*')
pw_entry.grid(row=2, column=1)
showcode_label = Label(main_frame, text="공연번호")
showcode_label.grid(row=4, column=0)
showcode_entry = Entry(main_frame)
showcode_entry.grid(row=4, column=1)
date_label = Label(main_frame, text="날짜")
date_label.grid(row=5, column=0)
date_entry = Entry(main_frame)
date_entry.grid(row=5, column=1)
round_label = Label(main_frame, text="회차")
round_label.grid(row=6, column=0)
round_entry = Entry(main_frame)
round_entry.grid(row=6, column=1)
ticket_label = Label(main_frame, text="<NAME>")
ticket_label.grid(row=7, column=0)
ticket_entry = Entry(main_frame)
ticket_entry.grid(row=7, column=1)
code_time = Entry(main_frame)
code_time.grid(row=14, column=1)
def login_go():
driver.switch_to.frame(driver.find_element_by_tag_name('iframe'))
driver.find_element_by_name('userId').send_keys(id_entry.get())
driver.find_element_by_id('userPwd').send_keys(pw_entry.get())
driver.find_element_by_id('btn_login').click()
def link_go():
driver.get('http://poticket.interpark.com/Book/BookSession.asp?GroupCode=' + showcode_entry.get())
def seat_macro():
driver.switch_to.default_content()
seat1_frame = driver.find_element_by_name("ifrmSeat")
driver.switch_to.frame(seat1_frame)
seat2_frame = driver.find_element_by_name("ifrmSeatDetail")
driver.switch_to.frame(seat2_frame)
len_seatn = len(driver.find_elements_by_class_name('stySeat'))
print(len_seatn)
shot = 0
VIP = driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/1_90.gif"]')
R = driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/2_90.gif"]')
S = driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/3_90.gif"]')
A = driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/4_90.gif"]')
for x in range(0, len_seatn):
try:
VIP[x].click()
shot = shot + 1
except:
try:
R[x].click()
shot = shot + 1
except:
try:
S[x].click()
shot = shot + 1
except:
try:
A[x].click()
shot = shot + 1
except:
break
if shot == int(ticket_entry.get()):
break
def captcha():
driver.switch_to.default_content()
seat1_frame = driver.find_element_by_id("ifrmSeat")
driver.switch_to.frame(seat1_frame)
image = driver.find_element_by_id('imgCaptcha')
image = image.screenshot_as_png
with open("captcha.png", "wb") as file:
file.write(image)
image = cv.imread("captcha.png")
# Set a threshold value for the image, and save
image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
image = cv.adaptiveThreshold(image, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 1)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
image = cv.morphologyEx(image, cv.MORPH_OPEN, kernel, iterations=1)
cnts = cv.findContours(image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv.contourArea(c)
if area < 50:
cv.drawContours(image, [c], -1, (0, 0, 0), -1)
kernel2 = numpy.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
image = cv.filter2D(image, -1, kernel2)
result = 255 - image
captcha_text = image_to_string(result)
print(captcha_text)
driver.switch_to.default_content()
driver.switch_to.frame(seat1_frame)
driver.find_element_by_class_name('validationTxt').click()
driver.find_element_by_id('txtCaptcha').send_keys(captcha_text)
while 1:
if driver.find_element_by_class_name('capchaInner').is_displayed():
driver.find_element_by_class_name('refreshBtn').click()
captcha()
else:
break
def date_select():
first_frame = driver.find_element_by_id('ifrmBookStep')
driver.switch_to.frame(first_frame)
# 날짜
driver.find_element_by_xpath('(//*[@id="CellPlayDate"])' + "[" + date_entry.get() + "]").click()
# 회차
wait.until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div[3]/div[1]/div/span/ul/li[' + round_entry.get() + ']/a'))).click()
driver.switch_to.default_content()
wait.until(EC.element_to_be_clickable((By.ID, 'LargeNextBtnImage'))).click()
# 다음
try:
driver.switch_to.alert().accept()
driver.switch_to.default_content()
wait.until(EC.presence_of_all_elements_located((By.ID, 'ifrmSeat')))
except:
driver.switch_to.default_content()
wait.until(EC.presence_of_all_elements_located((By.ID, 'ifrmSeat')))
def go2():
seat1_frame = driver.find_element_by_id("ifrmSeat")
seat_macro()
try:
driver.switch_to.alert().accept()
driver.switch_to.default_content()
driver.switch_to.frame(seat1_frame)
driver.find_element_by_id('NextStepImage').click()
driver.switch_to.alert().accept()
except:
driver.switch_to.default_content()
driver.switch_to.frame(seat1_frame)
driver.find_element_by_id('NextStepImage').click()
def go():
code_time.delete(0, END)
start_time = time.time()
wait.until(EC.visibility_of_element_located((By.XPATH, '/html/body/div[1]/div[2]/div[3]/div[2]/p[3]/a[2]/img')))
try:
driver.find_element_by_class_name('closeBtn').click()
date_select()
try:
captcha()
go2()
pass
except:
go2()
pass
except:
date_select()
try:
captcha()
go2()
pass
except:
go2()
pass
finally:
code_time.insert(0, "%s 초" % round((time.time() - start_time), 3))
def all_go():
driver.get('http://poticket.interpark.com/Book/BookSession.asp?GroupCode=' + showcode_entry.get())
try:
go()
except:
try:
driver.switch_to.alert().accpet()
driver.get('http://poticket.interpark.com/Book/BookSession.asp?GroupCode=' + showcode_entry.get())
except:
driver.get('http://poticket.interpark.com/Book/BookSession.asp?GroupCode=' + showcode_entry.get())
def credit():
driver.switch_to.default_content()
driver.find_element_by_xpath('//*[@id="SmallNextBtnImage"]').click()
driver.switch_to.frame(driver.find_element_by_xpath('//*[@id="ifrmBookStep"]'))
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="YYMMDD"]'))).send_keys("990813")
driver.switch_to.default_content()
driver.find_element_by_xpath('//*[@id="SmallNextBtnImage"]').click()
driver.switch_to.frame(driver.find_element_by_xpath('//*[@id="ifrmBookStep"]'))
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="Payment_22004"]/td/input'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="BankCode"]/option[7]'))).click()
driver.switch_to.default_content()
driver.find_element_by_xpath('//*[@id="SmallNextBtnImage"]').click()
driver.switch_to.frame(driver.find_element_by_xpath('//*[@id="ifrmBookStep"]'))
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="checkAll"]'))).click()
driver.switch_to.default_content()
driver.find_element_by_xpath('//*[@id="LargeNextBtnImage"]').click()
def clock_time():
clock = datetime.now().strftime('%H:%M:%S:%f')
time_label.config(text=clock)
time_label.after(1, clock_time)
login_button = Button(main_frame, text="로그인", command=login_go, height=2)
login_button.grid(row=3, column=1)
link_button = Button(main_frame, text="직링", command=link_go, height=2)
link_button.grid(row=9, column=0)
all_button = Button(main_frame, text='전체', command=all_go, height=2)
all_button.grid(row=9, column=1, sticky=W + E)
chair_button = Button(main_frame, text="좌석", command=go2, height=2)
chair_button.grid(row=9, column=2)
start_button = Button(main_frame, text="직좌", command=go, height=2)
start_button.grid(row=10, column=0)
credit_button = Button(main_frame, text="결제", command=credit, height=2)
credit_button.grid(row=10, column=1, sticky=W + E)
captcha_button = Button(main_frame, text='캡챠', command=captcha, height=2)
captcha_button.grid(row=10, column=2)
time_label = Label(main_frame, height=2)
time_label.grid(row=13, column=1)
clock_time()
dp.mainloop()
| [
"selenium.webdriver.support.ui.WebDriverWait",
"cv2.drawContours",
"selenium.webdriver.support.expected_conditions.presence_of_all_elements_located",
"selenium.webdriver.Chrome",
"time.time",
"cv2.filter2D",
"cv2.contourArea",
"cv2.morphologyEx",
"cv2.adaptiveThreshold",
"cv2.getStructuringElement... | [((632, 727), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['"""/home/clyde/Documents/Coding/Python Projects/InterPark/es/chromedriver"""'], {}), "(\n '/home/clyde/Documents/Coding/Python Projects/InterPark/es/chromedriver')\n", (648, 727), False, 'from selenium import webdriver\n'), ((730, 755), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(20)'], {}), '(driver, 20)\n', (743, 755), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((3857, 3881), 'cv2.imread', 'cv.imread', (['"""captcha.png"""'], {}), "('captcha.png')\n", (3866, 3881), True, 'import cv2 as cv\n'), ((3946, 3983), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (3957, 3983), True, 'import cv2 as cv\n'), ((3996, 4089), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['image', '(255)', 'cv.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv.THRESH_BINARY', '(71)', '(1)'], {}), '(image, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.\n THRESH_BINARY, 71, 1)\n', (4016, 4089), True, 'import cv2 as cv\n'), ((4098, 4145), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', '(3, 3)'], {}), '(cv.MORPH_RECT, (3, 3))\n', (4122, 4145), True, 'import cv2 as cv\n'), ((4158, 4217), 'cv2.morphologyEx', 'cv.morphologyEx', (['image', 'cv.MORPH_OPEN', 'kernel'], {'iterations': '(1)'}), '(image, cv.MORPH_OPEN, kernel, iterations=1)\n', (4173, 4217), True, 'import cv2 as cv\n'), ((4230, 4294), 'cv2.findContours', 'cv.findContours', (['image', 'cv.RETR_EXTERNAL', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n', (4245, 4294), True, 'import cv2 as cv\n'), ((4492, 4546), 'numpy.array', 'numpy.array', (['[[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n', (4503, 4546), False, 'import numpy, re, pyotp, sys, time, pytesseract, tkinter.font\n'), ((4559, 4590), 'cv2.filter2D', 'cv.filter2D', (['image', '(-1)', 'kernel2'], {}), '(image, -1, kernel2)\n', (4570, 4590), True, 'import cv2 as cv\n'), ((4635, 4658), 'pytesseract.image_to_string', 'image_to_string', (['result'], {}), '(result)\n', (4650, 4658), False, 'from pytesseract import image_to_string\n'), ((6464, 6475), 'time.time', 'time.time', ([], {}), '()\n', (6473, 6475), False, 'import numpy, re, pyotp, sys, time, pytesseract, tkinter.font\n'), ((4379, 4396), 'cv2.contourArea', 'cv.contourArea', (['c'], {}), '(c)\n', (4393, 4396), True, 'import cv2 as cv\n'), ((6491, 6595), 'selenium.webdriver.support.expected_conditions.visibility_of_element_located', 'EC.visibility_of_element_located', (["(By.XPATH, '/html/body/div[1]/div[2]/div[3]/div[2]/p[3]/a[2]/img')"], {}), "((By.XPATH,\n '/html/body/div[1]/div[2]/div[3]/div[2]/p[3]/a[2]/img'))\n", (6523, 6595), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((4431, 4477), 'cv2.drawContours', 'cv.drawContours', (['image', '[c]', '(-1)', '(0, 0, 0)', '(-1)'], {}), '(image, [c], -1, (0, 0, 0), -1)\n', (4446, 4477), True, 'import cv2 as cv\n'), ((5734, 5790), 'selenium.webdriver.support.expected_conditions.presence_of_all_elements_located', 'EC.presence_of_all_elements_located', (["(By.ID, 'ifrmSeat')"], {}), "((By.ID, 'ifrmSeat'))\n", (5769, 5790), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((8625, 8639), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8637, 8639), False, 'from datetime import datetime\n'), ((5546, 5602), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (["(By.ID, 'LargeNextBtnImage')"], {}), "((By.ID, 'LargeNextBtnImage'))\n", (5572, 5602), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((5866, 5922), 'selenium.webdriver.support.expected_conditions.presence_of_all_elements_located', 'EC.presence_of_all_elements_located', (["(By.ID, 'ifrmSeat')"], {}), "((By.ID, 'ifrmSeat'))\n", (5901, 5922), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((7726, 7785), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH, \'//*[@id="YYMMDD"]\')'], {}), '((By.XPATH, \'//*[@id="YYMMDD"]\'))\n', (7752, 7785), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((8018, 8093), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH, \'//*[@id="Payment_22004"]/td/input\')'], {}), '((By.XPATH, \'//*[@id="Payment_22004"]/td/input\'))\n', (8044, 8093), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((8118, 8189), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH, \'//*[@id="BankCode"]/option[7]\')'], {}), '((By.XPATH, \'//*[@id="BankCode"]/option[7]\'))\n', (8144, 8189), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((8410, 8471), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH, \'//*[@id="checkAll"]\')'], {}), '((By.XPATH, \'//*[@id="checkAll"]\'))\n', (8436, 8471), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((7021, 7032), 'time.time', 'time.time', ([], {}), '()\n', (7030, 7032), False, 'import numpy, re, pyotp, sys, time, pytesseract, tkinter.font\n')] |
#!/usr/bin/env python3
from pprint import pprint
from typing import Dict, Generator
import helper
import numpy as np
def sequence(multiplier: int) -> Generator[int, None, None]:
assert multiplier >= 1
#
base = (0, 1, 0, -1)
li = []
for elem in base:
for _ in range(multiplier):
li.append(elem)
#
#
length = len(li)
idx = 0
while True:
yield li[idx]
idx = (idx + 1) % length
class Cache:
#
def __init__(self, line: str) -> None:
self.line = line
print("Begin: build cache...")
self.d = self.build_cache(self.line)
print("End: build cache")
def build_cache(self, line: str) -> Dict[int, np.array]:
d: Dict[int, np.array] = {}
length = len(line)
for i in range(1, length+1):
seq = sequence(i)
li = []
for _ in range(length+1):
li.append(next(seq))
li = li[1:]
d[i] = np.array(li)
#
return d
def __getitem__(self, key: int) -> np.array:
return self.d[key]
def debug(self) -> None:
pprint(self.d)
def transform(line: str, cache: Cache) -> str:
numbers = np.array([int(digit) for digit in line])
length = len(line)
sb = []
for i in range(1, length+1):
mul = numbers * cache[i]
num = np.sum(mul)
res = np.abs(num) % 10
sb.append(str(res))
#
return "".join(sb)
def process(curr: str, cache: Cache, repeat=1) -> str:
# print(curr)
step = 0
for i in range(repeat):
curr = transform(curr, cache)
step += 1
# print(f"{step}) {curr}")
#
return curr
def test_1() -> None:
line = "12345678"
cache = Cache(line)
process(line, cache, repeat=4)
def test_2() -> None:
line = "80871224585914546619083218645595"
cache = Cache(line)
process(line, cache, repeat=100)
def real() -> None:
line = helper.read_lines("input.txt")[0]
cache = Cache(line)
result = process(line, cache, repeat=100)
print(result)
print()
print("First eight digits:", result[:8])
def main() -> None:
# test_1()
# test_2()
real()
##############################################################################
if __name__ == "__main__":
main()
| [
"numpy.abs",
"helper.read_lines",
"numpy.array",
"numpy.sum",
"pprint.pprint"
] | [((1150, 1164), 'pprint.pprint', 'pprint', (['self.d'], {}), '(self.d)\n', (1156, 1164), False, 'from pprint import pprint\n'), ((1384, 1395), 'numpy.sum', 'np.sum', (['mul'], {}), '(mul)\n', (1390, 1395), True, 'import numpy as np\n'), ((1982, 2012), 'helper.read_lines', 'helper.read_lines', (['"""input.txt"""'], {}), "('input.txt')\n", (1999, 2012), False, 'import helper\n'), ((995, 1007), 'numpy.array', 'np.array', (['li'], {}), '(li)\n', (1003, 1007), True, 'import numpy as np\n'), ((1410, 1421), 'numpy.abs', 'np.abs', (['num'], {}), '(num)\n', (1416, 1421), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from os import path as op
from uuid import uuid4
import numpy as np
import nibabel as nib
from matplotlib import pyplot as plt
from svgutils.transform import fromstring
from seaborn import color_palette
from nipype.interfaces.base import (
traits,
File,
isdefined,
)
from niworkflows.interfaces.report_base import ReportingInterface, _SVGReportCapableInputSpec
from niworkflows.viz.utils import compose_view, extract_svg, cuts_from_bbox
from nilearn.plotting import plot_epi, plot_anat
from ...utils import nvol
from ...resource import get as getresource
class PlotInputSpec(_SVGReportCapableInputSpec):
in_file = File(exists=True, mandatory=True, desc="volume")
mask_file = File(exists=True, mandatory=True, desc="mask")
label = traits.Str()
class PlotEpi(ReportingInterface):
input_spec = PlotInputSpec
def _generate_report(self):
in_img = nib.load(self.inputs.in_file)
assert nvol(in_img) == 1
mask_img = nib.load(self.inputs.mask_file)
assert nvol(mask_img) == 1
label = None
if isdefined(self.inputs.label):
label = self.inputs.label
compress = self.inputs.compress_report
n_cuts = 7
cuts = cuts_from_bbox(mask_img, cuts=n_cuts)
img_vals = in_img.get_fdata()[np.asanyarray(mask_img.dataobj).astype(np.bool)]
vmin = img_vals.min()
vmax = img_vals.max()
outfiles = []
for dimension in ["z", "y", "x"]:
display = plot_epi(
in_img,
draw_cross=False,
display_mode=dimension,
cut_coords=cuts[dimension],
title=label,
vmin=vmin,
vmax=vmax,
colorbar=(dimension == "z"),
cmap=plt.cm.gray,
)
display.add_contours(mask_img, levels=[0.5], colors="r")
label = None # only on first
svg = extract_svg(display, compress=compress)
svg = svg.replace("figure_1", str(uuid4()), 1)
outfiles.append(fromstring(svg))
self._out_report = op.abspath(self.inputs.out_report)
compose_view(bg_svgs=outfiles, fg_svgs=None, out_file=self._out_report)
class PlotRegistrationInputSpec(PlotInputSpec):
template = traits.Str(mandatory=True)
class PlotRegistration(ReportingInterface):
input_spec = PlotRegistrationInputSpec
def _generate_report(self):
in_img = nib.load(self.inputs.in_file)
assert nvol(in_img) == 1
mask_img = nib.load(self.inputs.mask_file)
assert nvol(mask_img) == 1
template = self.inputs.template
parc_file = getresource(f"tpl-{template}_RegistrationCheckOverlay.nii.gz")
assert parc_file is not None
parc_img = nib.load(parc_file)
levels = np.unique(np.asanyarray(parc_img.dataobj).astype(np.int32))
levels = (levels[levels > 0] - 0.5).tolist()
colors = color_palette("husl", len(levels))
label = None
if isdefined(self.inputs.label):
label = self.inputs.label
compress = self.inputs.compress_report
n_cuts = 7
cuts = cuts_from_bbox(mask_img, cuts=n_cuts)
outfiles = []
for dimension in ["z", "y", "x"]:
display = plot_anat(
in_img,
draw_cross=False,
display_mode=dimension,
cut_coords=cuts[dimension],
title=label,
)
display.add_contours(parc_img, levels=levels, colors=colors, linewidths=0.25)
display.add_contours(mask_img, levels=[0.5], colors="r", linewidths=0.5)
label = None # only on first
svg = extract_svg(display, compress=compress)
svg = svg.replace("figure_1", str(uuid4()), 1)
outfiles.append(fromstring(svg))
self._out_report = op.abspath(self.inputs.out_report)
compose_view(bg_svgs=outfiles, fg_svgs=None, out_file=self._out_report)
| [
"niworkflows.viz.utils.compose_view",
"nipype.interfaces.base.isdefined",
"niworkflows.viz.utils.extract_svg",
"nipype.interfaces.base.traits.Str",
"nibabel.load",
"nilearn.plotting.plot_epi",
"numpy.asanyarray",
"svgutils.transform.fromstring",
"uuid.uuid4",
"nipype.interfaces.base.File",
"nile... | [((776, 824), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""volume"""'}), "(exists=True, mandatory=True, desc='volume')\n", (780, 824), False, 'from nipype.interfaces.base import traits, File, isdefined\n'), ((841, 887), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""mask"""'}), "(exists=True, mandatory=True, desc='mask')\n", (845, 887), False, 'from nipype.interfaces.base import traits, File, isdefined\n'), ((900, 912), 'nipype.interfaces.base.traits.Str', 'traits.Str', ([], {}), '()\n', (910, 912), False, 'from nipype.interfaces.base import traits, File, isdefined\n'), ((2447, 2473), 'nipype.interfaces.base.traits.Str', 'traits.Str', ([], {'mandatory': '(True)'}), '(mandatory=True)\n', (2457, 2473), False, 'from nipype.interfaces.base import traits, File, isdefined\n'), ((1031, 1060), 'nibabel.load', 'nib.load', (['self.inputs.in_file'], {}), '(self.inputs.in_file)\n', (1039, 1060), True, 'import nibabel as nib\n'), ((1114, 1145), 'nibabel.load', 'nib.load', (['self.inputs.mask_file'], {}), '(self.inputs.mask_file)\n', (1122, 1145), True, 'import nibabel as nib\n'), ((1214, 1242), 'nipype.interfaces.base.isdefined', 'isdefined', (['self.inputs.label'], {}), '(self.inputs.label)\n', (1223, 1242), False, 'from nipype.interfaces.base import traits, File, isdefined\n'), ((1365, 1402), 'niworkflows.viz.utils.cuts_from_bbox', 'cuts_from_bbox', (['mask_img'], {'cuts': 'n_cuts'}), '(mask_img, cuts=n_cuts)\n', (1379, 1402), False, 'from niworkflows.viz.utils import compose_view, extract_svg, cuts_from_bbox\n'), ((2267, 2301), 'os.path.abspath', 'op.abspath', (['self.inputs.out_report'], {}), '(self.inputs.out_report)\n', (2277, 2301), True, 'from os import path as op\n'), ((2310, 2381), 'niworkflows.viz.utils.compose_view', 'compose_view', ([], {'bg_svgs': 'outfiles', 'fg_svgs': 'None', 'out_file': 'self._out_report'}), '(bg_svgs=outfiles, fg_svgs=None, out_file=self._out_report)\n', (2322, 2381), False, 'from niworkflows.viz.utils import compose_view, extract_svg, cuts_from_bbox\n'), ((2613, 2642), 'nibabel.load', 'nib.load', (['self.inputs.in_file'], {}), '(self.inputs.in_file)\n', (2621, 2642), True, 'import nibabel as nib\n'), ((2696, 2727), 'nibabel.load', 'nib.load', (['self.inputs.mask_file'], {}), '(self.inputs.mask_file)\n', (2704, 2727), True, 'import nibabel as nib\n'), ((2945, 2964), 'nibabel.load', 'nib.load', (['parc_file'], {}), '(parc_file)\n', (2953, 2964), True, 'import nibabel as nib\n'), ((3181, 3209), 'nipype.interfaces.base.isdefined', 'isdefined', (['self.inputs.label'], {}), '(self.inputs.label)\n', (3190, 3209), False, 'from nipype.interfaces.base import traits, File, isdefined\n'), ((3332, 3369), 'niworkflows.viz.utils.cuts_from_bbox', 'cuts_from_bbox', (['mask_img'], {'cuts': 'n_cuts'}), '(mask_img, cuts=n_cuts)\n', (3346, 3369), False, 'from niworkflows.viz.utils import compose_view, extract_svg, cuts_from_bbox\n'), ((4060, 4094), 'os.path.abspath', 'op.abspath', (['self.inputs.out_report'], {}), '(self.inputs.out_report)\n', (4070, 4094), True, 'from os import path as op\n'), ((4103, 4174), 'niworkflows.viz.utils.compose_view', 'compose_view', ([], {'bg_svgs': 'outfiles', 'fg_svgs': 'None', 'out_file': 'self._out_report'}), '(bg_svgs=outfiles, fg_svgs=None, out_file=self._out_report)\n', (4115, 4174), False, 'from niworkflows.viz.utils import compose_view, extract_svg, cuts_from_bbox\n'), ((1638, 1813), 'nilearn.plotting.plot_epi', 'plot_epi', (['in_img'], {'draw_cross': '(False)', 'display_mode': 'dimension', 'cut_coords': 'cuts[dimension]', 'title': 'label', 'vmin': 'vmin', 'vmax': 'vmax', 'colorbar': "(dimension == 'z')", 'cmap': 'plt.cm.gray'}), "(in_img, draw_cross=False, display_mode=dimension, cut_coords=cuts[\n dimension], title=label, vmin=vmin, vmax=vmax, colorbar=dimension ==\n 'z', cmap=plt.cm.gray)\n", (1646, 1813), False, 'from nilearn.plotting import plot_epi, plot_anat\n'), ((2095, 2134), 'niworkflows.viz.utils.extract_svg', 'extract_svg', (['display'], {'compress': 'compress'}), '(display, compress=compress)\n', (2106, 2134), False, 'from niworkflows.viz.utils import compose_view, extract_svg, cuts_from_bbox\n'), ((3457, 3562), 'nilearn.plotting.plot_anat', 'plot_anat', (['in_img'], {'draw_cross': '(False)', 'display_mode': 'dimension', 'cut_coords': 'cuts[dimension]', 'title': 'label'}), '(in_img, draw_cross=False, display_mode=dimension, cut_coords=cuts\n [dimension], title=label)\n', (3466, 3562), False, 'from nilearn.plotting import plot_epi, plot_anat\n'), ((3888, 3927), 'niworkflows.viz.utils.extract_svg', 'extract_svg', (['display'], {'compress': 'compress'}), '(display, compress=compress)\n', (3899, 3927), False, 'from niworkflows.viz.utils import compose_view, extract_svg, cuts_from_bbox\n'), ((2222, 2237), 'svgutils.transform.fromstring', 'fromstring', (['svg'], {}), '(svg)\n', (2232, 2237), False, 'from svgutils.transform import fromstring\n'), ((4015, 4030), 'svgutils.transform.fromstring', 'fromstring', (['svg'], {}), '(svg)\n', (4025, 4030), False, 'from svgutils.transform import fromstring\n'), ((1442, 1473), 'numpy.asanyarray', 'np.asanyarray', (['mask_img.dataobj'], {}), '(mask_img.dataobj)\n', (1455, 1473), True, 'import numpy as np\n'), ((2181, 2188), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2186, 2188), False, 'from uuid import uuid4\n'), ((2993, 3024), 'numpy.asanyarray', 'np.asanyarray', (['parc_img.dataobj'], {}), '(parc_img.dataobj)\n', (3006, 3024), True, 'import numpy as np\n'), ((3974, 3981), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (3979, 3981), False, 'from uuid import uuid4\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 13:55:00 2018
@author: Payam
"""
#%% import libraries
import itk
import numpy as np
from read_image import get_itk_image_type
import main_functions
import os
# from read_image import get_itk_image_type
input_filename = r'\\dingo\scratch\pteng\dataset\rawlung\image\108061.nii.gz'
output_filename = ['knee_test_cam1.nii', # output file name 1
'knee_test_cam2.nii'] # output file name 2
verbose = False # verbose details of all steps.
#% -------------------- Reader -------------------------
InputImageType = get_itk_image_type(input_filename)
print(InputImageType)
OutputImageType= InputImageType
inputImage = itk.imread(input_filename, itk.SS)
#%% Set input information
sizeOutput = [1024,1400,1] # The size of output image
threshold = 0.
rot = [0., 0., 0.] # rotation in degrees in x, y, and z direction.
t = [0. ,0. ,0.] # translation in x, y, and z directions.
cor = [0. ,0. ,0.] # offset of the rotation from the center of image (3D)
spaceOutput = [0.167,0.167,1]
delta = sizeOutput[0]*spaceOutput[0]/2
focalPoint = [0.0,0.0,1000.0]
originOutput = [delta,delta,-200.0]
directionOutput = np.matrix([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
#%%
for counter_x in range(90,100,90): # Rotation in x (rotated 90 degrees)
for counter_y in range(0,5,5): # Rotation in y
for counter_z in range(0,5,5): # Rotation in z
rot = [float(counter_x),float(counter_y),float(counter_z)] # Making the rotation into an array
print(rot)
output_directory = r"M:\apps\personal\bgray\drr_out" # output directory
if not os.path.exists(output_directory): # If the directory is not existing , create one.
os.mkdir(output_directory) # Make the directory
filetype = '.dcm' # type of output image ... it can be nifti or dicom
filename = 'rx_'+str(int(rot[0])) + 'ry_'+str(int(rot[1])) + 'rz_'+str(int(rot[2]))+'9'+filetype # makes the complete path
output_filename = os.path.join(output_directory,filename) # creating the output directory where all the images are stored.
main_functions.drr(inputImage,output_filename,rot,t,focalPoint,originOutput,sizeOutput,cor,spaceOutput,directionOutput,threshold,InputImageType,OutputImageType,verbose) # creating drr.
#%% For later.
#import itk_helpers as Functions
##%%
## Transferring the 3D image so that the center of rotation of the image is located at global origin.
#
#Functions.rigid_body_transform3D(input_filename='/Volumes/Storage/Projects/Registration/QuickData/OA-BEADS-CT.nii',\
# output_filename='/Volumes/Storage/Projects/Registration/QuickData/transformed_ct.nii',\
# t =[-1.14648438, 132.85351562, 502.09999385],rot = [-90.,0.,90.])
| [
"os.path.exists",
"itk.imread",
"read_image.get_itk_image_type",
"os.path.join",
"os.mkdir",
"numpy.matrix",
"main_functions.drr"
] | [((616, 650), 'read_image.get_itk_image_type', 'get_itk_image_type', (['input_filename'], {}), '(input_filename)\n', (634, 650), False, 'from read_image import get_itk_image_type\n'), ((720, 754), 'itk.imread', 'itk.imread', (['input_filename', 'itk.SS'], {}), '(input_filename, itk.SS)\n', (730, 754), False, 'import itk\n'), ((1228, 1290), 'numpy.matrix', 'np.matrix', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (1237, 1290), True, 'import numpy as np\n'), ((2163, 2203), 'os.path.join', 'os.path.join', (['output_directory', 'filename'], {}), '(output_directory, filename)\n', (2175, 2203), False, 'import os\n'), ((2280, 2469), 'main_functions.drr', 'main_functions.drr', (['inputImage', 'output_filename', 'rot', 't', 'focalPoint', 'originOutput', 'sizeOutput', 'cor', 'spaceOutput', 'directionOutput', 'threshold', 'InputImageType', 'OutputImageType', 'verbose'], {}), '(inputImage, output_filename, rot, t, focalPoint,\n originOutput, sizeOutput, cor, spaceOutput, directionOutput, threshold,\n InputImageType, OutputImageType, verbose)\n', (2298, 2469), False, 'import main_functions\n'), ((1768, 1800), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (1782, 1800), False, 'import os\n'), ((1868, 1894), 'os.mkdir', 'os.mkdir', (['output_directory'], {}), '(output_directory)\n', (1876, 1894), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed May 13 2020
Written by EJ_Chang
"""
import os, random
import numpy as np
from datetime import date
from psychopy import visual, event, core, monitors
from psychopy.hardware import joystick
from ResponseTrigger import *
from Solarized import * # Import solarized color palette
from BTS_Material import *
# Subject profile
today = date.today()
print('Today is %s:' % today)
usernum = int(input('Please enter subject number:'))
username = input("Please enter your name:").upper()
print('Hi %s, welcome to our experiment!' % username)
if usernum % 2 == 1:
hw_required = ['Wheel','dPad']
elif usernum % 2 == 0:
hw_required = ['dPad', 'Wheel']
print(hw_required)
# Make screen profile ----
widthPix = 2560 # screen width in px
heightPix = 1440 # screen height in px
monitorwidth = 60 # monitor width in cm
viewdist = 60 # viewing distance in cm
monitorname = 'ProArt27'
scrn = 0 # 0 to use main screen, 1 to use external screen
mon = monitors.Monitor(monitorname, width = monitorwidth, distance = viewdist)
mon.setSizePix((widthPix, heightPix))
mon.save()
# Preparing Window ----
# my_win = visual.Window(size = (880, 440), pos = (880,1040),
# color = SOLARIZED['base03'], colorSpace = 'rgb255',
# monitor = mon, units = 'pix', screen = 1)
my_win = visual.Window(size = (2560, 1440), pos = (0,0),
color = SOLARIZED['base03'], colorSpace = 'rgb255',
monitor = mon, units = 'pix',
screen = 0, fullscr = 1)
# Preparing Joystick & Mouse
# - Joysticks setting
joystick.backend = 'pyglet'
nJoys = joystick.getNumJoysticks() # Check if I have any joysticks
id = 0 # I'll use the first one as input
joy = joystick.Joystick(id) # ID has to be nJoys - 1
# - Mouse setting
mouse = event.Mouse(visible = True, win = my_win)
mouse.clickReset() # Reset to its initials
# Default value
pre_key = []
response = []
x = np.array([0, 1, 2])
requestList = np.repeat(x, [5, 5, 5], axis = 0) # trial = 3*5*2 =30
random.shuffle(requestList)
# Import files from sub-folder ----
IMG_START = 'OSD_ImgFolder/start.png'
IMG_REST = 'OSD_ImgFolder/rest.png'
IMG_THX = 'OSD_ImgFolder/thanks.png'
block_ins = {
'Wheel': 'OSD_ImgFolder/block_w.png',
'dPad' : 'OSD_ImgFolder/block_d.png'}
# Strat the experiment ----
for block in range(2):
# instruction here
req_hw = hw_required[block]
img = visual.ImageStim(my_win, image = block_ins[req_hw], pos = (0,0))
img.draw()
my_win.flip()
core.wait(1)
for iTrial in range(len(requestList)):
trialStatus = 1
iCol = 1
iRow = 0
reqButton = requestList[iTrial]
reqCol = random.randrange(1,3)
reqRow = random.randrange(0,4)
BUTTON_STATUS = 'off'
final_answer = 0
stimuli_time = core.getTime()
while trialStatus == 1:
# Background (all blank)
for image in range(5):
img = visual.ImageStim(
my_win,
image = backgroundLUT[image]['path'],
pos = backgroundLUT[image]['position'])
img.draw()
# String (icon)
for lay in range(reqCol+1):
img = visual.ImageStim(
my_win,
image = strLUT[lay]['path'],
pos = strLUT[lay]['position'])
img.draw()
# UI buttons : On/Off
for req in range(2):
request = visual.ImageStim(
my_win,
image = requestLUT[reqButton][BUTTON_STATUS],
pos = indicatorLUT[reqCol]['position'][reqRow])
request.draw()
if requestLUT[reqButton]['hint'] == 1:
if (iRow == reqRow and iCol == reqCol):
hint = visual.ImageStim(
my_win,
image = strLUT[reqCol+1]['hint'],
pos = strLUT[reqCol+1]['position'])
hint.draw()
# Indicator
indicator = visual.Rect(
my_win,
width = indicatorLUT[iCol]['width'],
height = indicatorLUT[iCol]['height'],
fillColor = SOLARIZED['grey01'], fillColorSpace='rgb255',
lineColor = SOLARIZED['grey01'], lineColorSpace ='rgb255',
pos= indicatorLUT[iCol]['position'][iRow], opacity = 0.5)
if BUTTON_STATUS == 'on':
hint = visual.ImageStim(
my_win,
image = strLUT[reqCol+1]['path'],
pos = strLUT[reqCol+1]['position'])
hint.draw()
my_win.flip()
core.wait(1)
interval = visual.ImageStim(
my_win,
image = 'OSD_ImgFolder/BetweenTrial.png',
pos = (0,0), opacity = 0.9)
for image in range(5):
img = visual.ImageStim(
my_win,
image = backgroundLUT[image]['path'],
pos = backgroundLUT[image]['position'])
img.draw()
interval.draw()
my_win.flip()
core.wait(1)
trialStatus = 0
elif BUTTON_STATUS == 'off':
indicator.draw()
my_win.flip()
# Get response
response_hw, response_key, response_status = getAnything(
mouse, joy)
if response_status == 1 and response_key != pre_key:
key_meaning = interpret_key(response_hw, response_key)
final_answer, BUTTON_STATUS = reponse_checker_BTS(
BUTTON_STATUS, req_hw, response_hw,
key_meaning, iRow, iCol, reqRow, reqCol)
# Save responses
if BUTTON_STATUS == 'on':
current_time = core.getTime()
response.append([
req_hw, response_hw,
requestLUT[reqButton]['name'],
reqRow, reqCol,
key_meaning,
final_answer,
current_time - stimuli_time,
current_time
])
iRow, iCol, trialStatus = determine_behavior_BTS(
key_meaning, iRow, iCol)
pre_key = response_key
# Close the window
my_win.close()
# Experiment record file
os.chdir('/Users/YJC/Dropbox/ExpRecord_BTS')
filename = ('%s_%s.txt' % (today, username))
filecount = 0
while os.path.isfile(filename):
filecount += 1
filename = ('%s_%s_%d.txt' % (today, username, filecount))
with open(filename, 'w') as filehandle:
for key in response:
for item in key:
filehandle.writelines("%s " % item)
filehandle.writelines("\n")
| [
"psychopy.hardware.joystick.getNumJoysticks",
"psychopy.visual.Rect",
"numpy.repeat",
"random.shuffle",
"psychopy.monitors.Monitor",
"psychopy.hardware.joystick.Joystick",
"psychopy.core.wait",
"psychopy.event.Mouse",
"random.randrange",
"os.chdir",
"numpy.array",
"os.path.isfile",
"psychopy... | [((376, 388), 'datetime.date.today', 'date.today', ([], {}), '()\n', (386, 388), False, 'from datetime import date\n'), ((989, 1057), 'psychopy.monitors.Monitor', 'monitors.Monitor', (['monitorname'], {'width': 'monitorwidth', 'distance': 'viewdist'}), '(monitorname, width=monitorwidth, distance=viewdist)\n', (1005, 1057), False, 'from psychopy import visual, event, core, monitors\n'), ((1355, 1498), 'psychopy.visual.Window', 'visual.Window', ([], {'size': '(2560, 1440)', 'pos': '(0, 0)', 'color': "SOLARIZED['base03']", 'colorSpace': '"""rgb255"""', 'monitor': 'mon', 'units': '"""pix"""', 'screen': '(0)', 'fullscr': '(1)'}), "(size=(2560, 1440), pos=(0, 0), color=SOLARIZED['base03'],\n colorSpace='rgb255', monitor=mon, units='pix', screen=0, fullscr=1)\n", (1368, 1498), False, 'from psychopy import visual, event, core, monitors\n'), ((1670, 1696), 'psychopy.hardware.joystick.getNumJoysticks', 'joystick.getNumJoysticks', ([], {}), '()\n', (1694, 1696), False, 'from psychopy.hardware import joystick\n'), ((1776, 1797), 'psychopy.hardware.joystick.Joystick', 'joystick.Joystick', (['id'], {}), '(id)\n', (1793, 1797), False, 'from psychopy.hardware import joystick\n'), ((1849, 1886), 'psychopy.event.Mouse', 'event.Mouse', ([], {'visible': '(True)', 'win': 'my_win'}), '(visible=True, win=my_win)\n', (1860, 1886), False, 'from psychopy import visual, event, core, monitors\n'), ((1982, 2001), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (1990, 2001), True, 'import numpy as np\n'), ((2016, 2047), 'numpy.repeat', 'np.repeat', (['x', '[5, 5, 5]'], {'axis': '(0)'}), '(x, [5, 5, 5], axis=0)\n', (2025, 2047), True, 'import numpy as np\n'), ((2071, 2098), 'random.shuffle', 'random.shuffle', (['requestList'], {}), '(requestList)\n', (2085, 2098), False, 'import os, random\n'), ((6896, 6940), 'os.chdir', 'os.chdir', (['"""/Users/YJC/Dropbox/ExpRecord_BTS"""'], {}), "('/Users/YJC/Dropbox/ExpRecord_BTS')\n", (6904, 6940), False, 'import os, random\n'), ((7007, 7031), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (7021, 7031), False, 'import os, random\n'), ((2472, 2533), 'psychopy.visual.ImageStim', 'visual.ImageStim', (['my_win'], {'image': 'block_ins[req_hw]', 'pos': '(0, 0)'}), '(my_win, image=block_ins[req_hw], pos=(0, 0))\n', (2488, 2533), False, 'from psychopy import visual, event, core, monitors\n'), ((2574, 2586), 'psychopy.core.wait', 'core.wait', (['(1)'], {}), '(1)\n', (2583, 2586), False, 'from psychopy import visual, event, core, monitors\n'), ((2746, 2768), 'random.randrange', 'random.randrange', (['(1)', '(3)'], {}), '(1, 3)\n', (2762, 2768), False, 'import os, random\n'), ((2785, 2807), 'random.randrange', 'random.randrange', (['(0)', '(4)'], {}), '(0, 4)\n', (2801, 2807), False, 'import os, random\n'), ((2887, 2901), 'psychopy.core.getTime', 'core.getTime', ([], {}), '()\n', (2899, 2901), False, 'from psychopy import visual, event, core, monitors\n'), ((4179, 4451), 'psychopy.visual.Rect', 'visual.Rect', (['my_win'], {'width': "indicatorLUT[iCol]['width']", 'height': "indicatorLUT[iCol]['height']", 'fillColor': "SOLARIZED['grey01']", 'fillColorSpace': '"""rgb255"""', 'lineColor': "SOLARIZED['grey01']", 'lineColorSpace': '"""rgb255"""', 'pos': "indicatorLUT[iCol]['position'][iRow]", 'opacity': '(0.5)'}), "(my_win, width=indicatorLUT[iCol]['width'], height=indicatorLUT[\n iCol]['height'], fillColor=SOLARIZED['grey01'], fillColorSpace='rgb255',\n lineColor=SOLARIZED['grey01'], lineColorSpace='rgb255', pos=\n indicatorLUT[iCol]['position'][iRow], opacity=0.5)\n", (4190, 4451), False, 'from psychopy import visual, event, core, monitors\n'), ((3031, 3134), 'psychopy.visual.ImageStim', 'visual.ImageStim', (['my_win'], {'image': "backgroundLUT[image]['path']", 'pos': "backgroundLUT[image]['position']"}), "(my_win, image=backgroundLUT[image]['path'], pos=\n backgroundLUT[image]['position'])\n", (3047, 3134), False, 'from psychopy import visual, event, core, monitors\n'), ((3313, 3398), 'psychopy.visual.ImageStim', 'visual.ImageStim', (['my_win'], {'image': "strLUT[lay]['path']", 'pos': "strLUT[lay]['position']"}), "(my_win, image=strLUT[lay]['path'], pos=strLUT[lay]['position']\n )\n", (3329, 3398), False, 'from psychopy import visual, event, core, monitors\n'), ((3581, 3700), 'psychopy.visual.ImageStim', 'visual.ImageStim', (['my_win'], {'image': 'requestLUT[reqButton][BUTTON_STATUS]', 'pos': "indicatorLUT[reqCol]['position'][reqRow]"}), "(my_win, image=requestLUT[reqButton][BUTTON_STATUS], pos=\n indicatorLUT[reqCol]['position'][reqRow])\n", (3597, 3700), False, 'from psychopy import visual, event, core, monitors\n'), ((4615, 4714), 'psychopy.visual.ImageStim', 'visual.ImageStim', (['my_win'], {'image': "strLUT[reqCol + 1]['path']", 'pos': "strLUT[reqCol + 1]['position']"}), "(my_win, image=strLUT[reqCol + 1]['path'], pos=strLUT[\n reqCol + 1]['position'])\n", (4631, 4714), False, 'from psychopy import visual, event, core, monitors\n'), ((4857, 4869), 'psychopy.core.wait', 'core.wait', (['(1)'], {}), '(1)\n', (4866, 4869), False, 'from psychopy import visual, event, core, monitors\n'), ((4897, 4990), 'psychopy.visual.ImageStim', 'visual.ImageStim', (['my_win'], {'image': '"""OSD_ImgFolder/BetweenTrial.png"""', 'pos': '(0, 0)', 'opacity': '(0.9)'}), "(my_win, image='OSD_ImgFolder/BetweenTrial.png', pos=(0, 0),\n opacity=0.9)\n", (4913, 4990), False, 'from psychopy import visual, event, core, monitors\n'), ((5405, 5417), 'psychopy.core.wait', 'core.wait', (['(1)'], {}), '(1)\n', (5414, 5417), False, 'from psychopy import visual, event, core, monitors\n'), ((3929, 4028), 'psychopy.visual.ImageStim', 'visual.ImageStim', (['my_win'], {'image': "strLUT[reqCol + 1]['hint']", 'pos': "strLUT[reqCol + 1]['position']"}), "(my_win, image=strLUT[reqCol + 1]['hint'], pos=strLUT[\n reqCol + 1]['position'])\n", (3945, 4028), False, 'from psychopy import visual, event, core, monitors\n'), ((5119, 5222), 'psychopy.visual.ImageStim', 'visual.ImageStim', (['my_win'], {'image': "backgroundLUT[image]['path']", 'pos': "backgroundLUT[image]['position']"}), "(my_win, image=backgroundLUT[image]['path'], pos=\n backgroundLUT[image]['position'])\n", (5135, 5222), False, 'from psychopy import visual, event, core, monitors\n'), ((6158, 6172), 'psychopy.core.getTime', 'core.getTime', ([], {}), '()\n', (6170, 6172), False, 'from psychopy import visual, event, core, monitors\n')] |
# Credit: https://github.com/danyaal/mandelbrot
import numpy as np
import matplotlib.pyplot as plt
class Mandelbrot():
"""
Class for generating and displaying Mandelbrot sets.
Attributes:
density (int): How spaced out each point of the set is. A higher number
will result in a clearer picture and a longer run time.
"""
def __init__(self, density):
self.density = density
def __itrUntilDivergent(self, complexP):
"""
Private method to get how many iterations it takes for a point to diverge.
Use the definition: the set for which f(z) = z^2 + c does not diverge.
Parameters:
complexP (ComplexNumber): The complex number to test.
Returns:
i (int): How many iterations it took until the function diverges (max 100)
"""
z = complex(0, 0)
for i in range(100):
z = (z*z) + complexP
if(abs(z) > 4):
break
return i
def generate(self):
"""Generate and show the mandelbrot set."""
# location and size of the atlas rectangle
realAxis = np.linspace(-2.25, 0.75, self.density)
imaginaryAxis = np.linspace(-1.5, 1.5, self.density)
realAxisLen = len(realAxis)
imaginaryAxisLen = len(imaginaryAxis)
# 2-D list to represent mandelbrot atlas
atlas = np.empty((realAxisLen, imaginaryAxisLen))
# color each point in the atlas depending on the iteration count
for x in range(realAxisLen):
for y in range(imaginaryAxisLen):
cx = realAxis[x]
cy = imaginaryAxis[y]
c = complex(cx, cy)
atlas[x, y] = self.__itrUntilDivergent(c)
# plot and display the set
plt.imshow(atlas.T, cmap='Blues', interpolation="nearest")
plt.axis("off")
plt.show()
def main():
set = Mandelbrot(1000)
set.generate()
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.imshow",
"numpy.linspace",
"numpy.empty",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
] | [((1151, 1189), 'numpy.linspace', 'np.linspace', (['(-2.25)', '(0.75)', 'self.density'], {}), '(-2.25, 0.75, self.density)\n', (1162, 1189), True, 'import numpy as np\n'), ((1214, 1250), 'numpy.linspace', 'np.linspace', (['(-1.5)', '(1.5)', 'self.density'], {}), '(-1.5, 1.5, self.density)\n', (1225, 1250), True, 'import numpy as np\n'), ((1399, 1440), 'numpy.empty', 'np.empty', (['(realAxisLen, imaginaryAxisLen)'], {}), '((realAxisLen, imaginaryAxisLen))\n', (1407, 1440), True, 'import numpy as np\n'), ((1808, 1866), 'matplotlib.pyplot.imshow', 'plt.imshow', (['atlas.T'], {'cmap': '"""Blues"""', 'interpolation': '"""nearest"""'}), "(atlas.T, cmap='Blues', interpolation='nearest')\n", (1818, 1866), True, 'import matplotlib.pyplot as plt\n'), ((1875, 1890), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1883, 1890), True, 'import matplotlib.pyplot as plt\n'), ((1899, 1909), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1907, 1909), True, 'import matplotlib.pyplot as plt\n')] |
from collections import OrderedDict
import re
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets
from .utils import *
PC_RE = re.compile('(.+) \(\d+\)')
class VarEditor(object):
def __init__(self, var=None):
if var is None:
self.variable = None
self.type = 0
else:
self.variable = var
if isinstance(var, FCVariable):
self.type = 0
elif isinstance(var, PCVariable):
self.type = 1
elif isinstance(var, CCVariable):
self.type = 2
def setupUi(self, Dialog):
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.dialog = Dialog
Dialog.setObjectName("Dialog")
Dialog.resize(600, 480)
Dialog.setMinimumSize(QtCore.QSize(600, 480))
Dialog.setMaximumSize(QtCore.QSize(600, 480))
Dialog.setSizeIncrement(QtCore.QSize(0, 0))
Dialog.setWindowTitle("New variable...")
self.centralwidget = QtWidgets.QWidget(Dialog)
self.centralwidget.setObjectName("centralwidget")
self.nameLabel = QtWidgets.QLabel(self.centralwidget)
self.nameLabel.setGeometry(QtCore.QRect(10, 15, 125, 20))
self.nameLabel.setFont(font)
self.nameLabel.setObjectName("nameLabel")
self.nameLabel.setText("Variable Name:")
self.nameEditor = QtWidgets.QLineEdit(self.centralwidget)
self.nameEditor.setGeometry(QtCore.QRect(140, 10, 450, 30))
self.nameEditor.setFont(font)
self.nameEditor.setObjectName("nameEditor")
self.doneBtn = QtWidgets.QPushButton(self.centralwidget)
self.doneBtn.setGeometry(QtCore.QRect(470, 450, 120, 30))
self.doneBtn.setFont(font)
self.doneBtn.setObjectName("doneBtn")
self.doneBtn.setText("Done!")
self.doneBtn.clicked.connect(lambda: self.submit())
self.cancelBtn = QtWidgets.QPushButton(self.centralwidget)
self.cancelBtn.setGeometry(QtCore.QRect(360, 450, 120, 30))
self.cancelBtn.setFont(font)
self.cancelBtn.setObjectName("cancelBtn")
self.cancelBtn.setText("Cancel")
self.cancelBtn.clicked.connect(lambda: self.cancel())
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 50, 600, 400))
self.tabWidget.setObjectName("tabWidget")
self.fc_tab = QtWidgets.QWidget()
self.fc_tab.setObjectName("fc_tab")
self.fc_list = QtWidgets.QListWidget(self.fc_tab)
self.fc_list.setGeometry(QtCore.QRect(25, 10, 550, 300))
self.fc_list.setFont(font)
self.fc_list.setDragEnabled(True)
self.fc_list.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.fc_list.setMovement(QtWidgets.QListView.Snap)
self.fc_list.setEditTriggers(QtWidgets.QAbstractItemView.DoubleClicked | QtWidgets.QAbstractItemView.AnyKeyPressed)
self.fc_list.setObjectName("fc_list")
self.fc_new = QtWidgets.QPushButton(self.fc_tab)
self.fc_new.setGeometry(QtCore.QRect(20, 310, 150, 35))
self.fc_new.setFont(font)
self.fc_new.setObjectName("fc_new")
self.fc_new.setText("New value")
self.fc_new.clicked.connect(lambda: self.fcNewValue())
self.fc_del = QtWidgets.QPushButton(self.fc_tab)
self.fc_del.setGeometry(QtCore.QRect(20, 335, 150, 35))
self.fc_del.setFont(font)
self.fc_del.setObjectName("fc_del")
self.fc_del.setText("Remove value")
self.fc_del.setEnabled(False)
self.fc_del.clicked.connect(lambda: self.fcDelValue())
self.fc_list.currentItemChanged.connect(lambda curr, _: self.fcSelection(curr))
self.tabWidget.addTab(self.fc_tab, "")
self.pc_tab = QtWidgets.QWidget()
self.pc_tab.setObjectName("pc_tab")
self.pc_tree = QtWidgets.QTreeWidget(self.pc_tab)
self.pc_tree.setGeometry(QtCore.QRect(25, 10, 550, 300))
self.pc_tree.setFont(font)
self.pc_tree.setEditTriggers(QtWidgets.QAbstractItemView.DoubleClicked)
self.pc_tree.setUniformRowHeights(True)
self.pc_tree.setAnimated(True)
self.pc_tree.setHeaderHidden(True)
self.pc_tree.setObjectName("pc_tree")
self.pc_tree.headerItem().setText(0, "Name")
self.pc_tree.currentItemChanged.connect(lambda curr, prev: self.pcRepaint(curr, prev))
self.pc_newgrp = QtWidgets.QPushButton(self.pc_tab)
self.pc_newgrp.setGeometry(QtCore.QRect(20, 310, 150, 35))
self.pc_newgrp.setFont(font)
self.pc_newgrp.setObjectName("pc_newgrp")
self.pc_newgrp.setText("New group")
self.pc_newgrp.clicked.connect(lambda: self.pcNewGroup())
self.pc_delgrp = QtWidgets.QPushButton(self.pc_tab)
self.pc_delgrp.setGeometry(QtCore.QRect(20, 335, 150, 35))
self.pc_delgrp.setFont(font)
self.pc_delgrp.setObjectName("pc_delgrp")
self.pc_delgrp.setText("Remove group")
self.pc_delgrp.setEnabled(False)
self.pc_delgrp.clicked.connect(lambda: self.pcDelGroup())
self.pc_newval = QtWidgets.QPushButton(self.pc_tab)
self.pc_newval.setGeometry(QtCore.QRect(175, 310, 150, 35))
self.pc_newval.setFont(font)
self.pc_newval.setObjectName("pc_newval")
self.pc_newval.setText("New value")
self.pc_newval.setEnabled(False)
self.pc_newval.clicked.connect(lambda: self.pcNewValue())
self.pc_delval = QtWidgets.QPushButton(self.pc_tab)
self.pc_delval.setGeometry(QtCore.QRect(175, 335, 150, 35))
self.pc_delval.setFont(font)
self.pc_delval.setObjectName("pc_delval")
self.pc_delval.setText("Remove value")
self.pc_delval.setEnabled(False)
self.pc_delval.clicked.connect(lambda: self.pcDelValue())
self.tabWidget.addTab(self.pc_tab, "")
self.cc_tab = QtWidgets.QWidget()
self.cc_tab.setObjectName("cc_tab")
self.cc_adjLabel = QtWidgets.QLabel(self.cc_tab)
self.cc_adjLabel.setGeometry(QtCore.QRect(10, 10, 200, 30))
self.cc_adjLabel.setFont(font)
self.cc_adjLabel.setObjectName("cc_adjLabel")
self.cc_adjLabel.setText("Adjacency matrix path:")
self.cc_browse = QtWidgets.QPushButton(self.cc_tab)
self.cc_browse.setGeometry(QtCore.QRect(200, 10, 100, 35))
self.cc_browse.setFont(font)
self.cc_browse.setObjectName("cc_browse")
self.cc_browse.setText("Browse...")
self.cc_browse.clicked.connect(lambda: self.ccBrowse())
self.cc_fileLabel = QtWidgets.QLabel(self.cc_tab)
self.cc_fileLabel.setGeometry(QtCore.QRect(300, 10, 300, 30))
self.cc_fileLabel.setFont(font)
self.cc_fileLabel.setObjectName("cc_fileLabel")
self.cc_namesLabel = QtWidgets.QLabel(self.cc_tab)
self.cc_namesLabel.setGeometry(QtCore.QRect(10, 40, 200, 30))
self.cc_namesLabel.setFont(font)
self.cc_namesLabel.setObjectName("cc_namesLabel")
self.cc_namesLabel.setText("Names:")
self.cc_nameList = QtWidgets.QListWidget(self.cc_tab)
self.cc_nameList.setGeometry(QtCore.QRect(25, 70, 550, 290))
self.cc_nameList.setFont(font)
self.cc_nameList.setEditTriggers(QtWidgets.QAbstractItemView.DoubleClicked)
self.cc_nameList.setDragEnabled(True)
self.cc_nameList.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.cc_nameList.setMovement(QtWidgets.QListView.Snap)
self.cc_nameList.setObjectName("cc_nameList")
self.adj_matrix = None
self.path = None
self.tabWidget.addTab(self.cc_tab, "")
self.tabWidget.setTabText(self.tabWidget.indexOf(self.fc_tab), "Fully Connected")
self.tabWidget.setTabText(self.tabWidget.indexOf(self.pc_tab), "Partially Connected")
self.tabWidget.setTabText(self.tabWidget.indexOf(self.cc_tab), "Custom Connected")
if self.variable is not None:
self.loadVariable(self.variable)
self.tabWidget.setCurrentIndex(self.type)
QtCore.QMetaObject.connectSlotsByName(Dialog)
self.variable = None
def fcSelection(self, curr):
if curr is None:
self.fc_del.setEnabled(False)
else:
self.fc_del.setEnabled(True)
self.fc_new.setEnabled(True)
self.fc_new.repaint()
self.fc_del.repaint()
def fcNewValue(self):
item = QtWidgets.QListWidgetItem("new value (double-click to edit)")
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
self.fc_list.addItem(item)
self.fc_list.clearSelection()
item.setSelected(True)
if self.fc_list.state() == self.fc_list.State.EditingState:
self.fc_list.setState(self.fc_list.State.NoState)
self.fc_list.editItem(item)
self.fc_list.repaint()
def fcDelValue(self):
if len(self.fc_list.selectedIndexes()) == 0:
return
idx = self.fc_list.selectedIndexes()[0]
self.fc_list.takeItem(idx.row())
def pcRepaint(self, curr=None, prev=None):
# Repaint the tree itself
self.pc_tree.clearSelection()
if curr is not None:
curr.setSelected(True)
self.pc_tree.repaint()
# Chec prev to see if user clicked away from editing a group name.
if prev is not None and prev.parent() is None:
match = PC_RE.findall(prev.text(0))
if len(match) == 0:
prev.setText(0, f"{prev.text(0)} ({prev.childCount()})")
# Change buttons based on curr
if self.pc_tree.state() == self.pc_tree.State.EditingState:
self.pc_newgrp.setEnabled(False)
self.pc_delgrp.setEnabled(False)
self.pc_newval.setEnabled(False)
self.pc_delval.setEnabled(False)
elif curr is None: # No current selection
self.pc_newgrp.setEnabled(True)
self.pc_delgrp.setEnabled(False)
self.pc_newval.setEnabled(False)
self.pc_delval.setEnabled(False)
elif curr.parent() is None: # Current selection is a group
match = PC_RE.findall(curr.text(0))
if len(match) == 0:
curr.setText(0, f"{curr.text(0)} ({curr.childCount()})")
self.pc_newgrp.setEnabled(True)
self.pc_delgrp.setEnabled(True)
self.pc_newval.setEnabled(True)
self.pc_delval.setEnabled(False)
else: # Current selection is a value
self.pc_newgrp.setEnabled(True)
self.pc_delgrp.setEnabled(False)
self.pc_newval.setEnabled(True)
self.pc_delval.setEnabled(curr.parent().childCount() > 1)
self.pc_newgrp.repaint()
self.pc_delgrp.repaint()
self.pc_newval.repaint()
self.pc_delval.repaint()
def pcNewGroup(self):
group = QtWidgets.QTreeWidgetItem(self.pc_tree)
group.setText(0, "new group (double-click to edit) (1)")
group.setFlags(group.flags() | QtCore.Qt.ItemIsEditable)
group.setExpanded(True)
if self.pc_tree.state() == self.pc_tree.State.EditingState:
self.pc_tree.setState(self.pc_tree.State.NoState)
self.pc_tree.editItem(group)
item = QtWidgets.QTreeWidgetItem(group)
item.setText(0, "new value (double-click to edit)")
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
self.pcRepaint(group)
def pcDelGroup(self):
idx = self.pc_tree.selectedIndexes()[0]
group = self.pc_tree.itemFromIndex(idx)
if group.parent() is not None:
return
self.pc_tree.takeTopLevelItem(idx.row())
self.pcRepaint()
def pcNewValue(self):
group = self.pc_tree.selectedItems()[0]
if group.parent() is not None:
group = group.parent()
group_name = group.text(0)
match = PC_RE.findall(group_name)
if match:
group_name = match[0]
item = QtWidgets.QTreeWidgetItem(group)
group.setText(0, f"{group_name} ({group.childCount()})")
item.setText(0, "new value (double-click to edit)")
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
self.pcRepaint(item, None)
if self.pc_tree.state() == self.pc_tree.State.EditingState:
self.pc_tree.setState(self.pc_tree.State.NoState)
self.pc_tree.editItem(item)
def pcDelValue(self):
item = self.pc_tree.selectedItems()[0]
group = item.parent()
if group is None:
return
idx = group.indexOfChild(item)
group.takeChild(idx)
group_name = PC_RE.findall(group.text(0))[0]
group.setText(0, f"{group_name} ({group.childCount()})")
self.pcRepaint()
def ccBrowse(self):
fname = QtWidgets.QFileDialog.getOpenFileName(None, 'Open file', '~', "Numpy array files (*.npy)")[0]
if not fname:
return
try:
arr = np.load(fname, allow_pickle=True)
except ValueError or OSError:
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage(f'Could not read {fname}. Please check that it is a Numpy array saved in the npy file format.')
return
if len(arr.shape) != 2 or arr.shape[0] != arr.shape[1] or (arr != arr.T).any():
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage(f'{fname} must contain a 2-D symmetric array.')
return
self.path = fname
if len(fname.split('/')) > 1:
self.cc_fileLabel.setText(fname.split('/')[-1])
else:
self.cc_fileLabel.setText(fname.split('\\')[-1])
for i in range(arr.shape[0]):
item = QtWidgets.QListWidgetItem(f"Item {i} (double-click to edit)")
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
self.cc_nameList.addItem(item)
self.adj_matrix = arr
def loadVariable(self, var):
self.nameEditor.setText(var.name)
if isinstance(var, FCVariable):
for name in var.values:
item = QtWidgets.QListWidgetItem(name)
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
self.fc_list.addItem(item)
elif isinstance(var, PCVariable):
for group, names in var.groups.items():
group_item = QtWidgets.QTreeWidgetItem(self.pc_tree)
group_item.setText(0, f"{group} ({len(names)})")
group_item.setFlags(group_item.flags() | QtCore.Qt.ItemIsEditable)
group_item.setExpanded(True)
for name in names:
item = QtWidgets.QTreeWidgetItem(group_item)
item.setText(0, name)
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
elif isinstance(var, CCVariable):
if var.path is not None:
self.cc_fileLabel.setText(var.path)
for name in var.labels:
item = QtWidgets.QListWidgetItem(name)
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
self.cc_nameList.addItem(item)
self.adj_matrix = var.adj_matrix
def cancel(self):
box = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, "Cancel",
"Are you sure you want to cancel? You will lose all of your changes!",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
reply = box.exec()
if reply == QtWidgets.QMessageBox.Yes:
self.dialog.reject()
else:
return
def submit(self):
name = self.nameEditor.text()
if not name:
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage('The variable must have a name.')
self.nameEditor.setFocus()
return
tab_idx = self.tabWidget.currentIndex()
if tab_idx == self.tabWidget.indexOf(self.fc_tab):
count = self.fc_list.count()
if count == 0:
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage('The fully connected variable must contain values.')
return
values = [self.fc_list.item(i).text() for i in range(count)]
self.variable = FCVariable(name, values)
self.dialog.accept()
elif tab_idx == self.tabWidget.indexOf(self.pc_tab):
group_count = self.pc_tree.topLevelItemCount()
if group_count == 0:
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage('The partially connected variable must contain groups.')
return
groups = OrderedDict()
for group_idx in range(group_count):
group = self.pc_tree.topLevelItem(group_idx)
value_count = group.childCount()
if value_count == 0:
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage('Every group must contain at least one item.')
return
group_name = group.text(0)
match = PC_RE.findall(group_name)
if match:
group_name = match[0]
groups[group_name] = []
for i in range(value_count):
groups[group_name].append(group.child(i).text(0))
self.variable = PCVariable(name, groups)
self.dialog.accept()
elif tab_idx == self.tabWidget.indexOf(self.cc_tab):
if self.adj_matrix is None:
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage('The custom connected variable must include an adjacency matrix.')
return
labels = [self.cc_nameList.item(i).text() for i in range(self.cc_nameList.count())]
path = self.cc_fileLabel.text()
path = path if path else None
self.variable = CCVariable(name, labels, self.adj_matrix, path)
self.dialog.accept()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = VarEditor()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| [
"re.compile",
"PyQt5.QtWidgets.QMessageBox",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"PyQt5.QtWidgets.QListWidgetItem",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtWidgets.QWidget",
"collections.OrderedDict",... | [((140, 169), 're.compile', 're.compile', (['"""(.+) \\\\(\\\\d+\\\\)"""'], {}), "('(.+) \\\\(\\\\d+\\\\)')\n", (150, 169), False, 'import re\n'), ((18297, 18329), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (18319, 18329), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18343, 18362), 'PyQt5.QtWidgets.QDialog', 'QtWidgets.QDialog', ([], {}), '()\n', (18360, 18362), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((629, 642), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (640, 642), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1048, 1073), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['Dialog'], {}), '(Dialog)\n', (1065, 1073), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1158, 1194), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1174, 1194), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1423, 1462), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1442, 1462), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1645, 1686), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1666, 1686), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1957, 1998), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1978, 1998), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2283, 2323), 'PyQt5.QtWidgets.QTabWidget', 'QtWidgets.QTabWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2303, 2323), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2462, 2481), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (2479, 2481), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2550, 2584), 'PyQt5.QtWidgets.QListWidget', 'QtWidgets.QListWidget', (['self.fc_tab'], {}), '(self.fc_tab)\n', (2571, 2584), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3058, 3092), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.fc_tab'], {}), '(self.fc_tab)\n', (3079, 3092), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3362, 3396), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.fc_tab'], {}), '(self.fc_tab)\n', (3383, 3396), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3844, 3863), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (3861, 3863), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3932, 3966), 'PyQt5.QtWidgets.QTreeWidget', 'QtWidgets.QTreeWidget', (['self.pc_tab'], {}), '(self.pc_tab)\n', (3953, 3966), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4497, 4531), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.pc_tab'], {}), '(self.pc_tab)\n', (4518, 4531), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4822, 4856), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.pc_tab'], {}), '(self.pc_tab)\n', (4843, 4856), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5191, 5225), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.pc_tab'], {}), '(self.pc_tab)\n', (5212, 5225), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5558, 5592), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.pc_tab'], {}), '(self.pc_tab)\n', (5579, 5592), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5973, 5992), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (5990, 5992), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6064, 6093), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.cc_tab'], {}), '(self.cc_tab)\n', (6080, 6093), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6340, 6374), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.cc_tab'], {}), '(self.cc_tab)\n', (6361, 6374), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6666, 6695), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.cc_tab'], {}), '(self.cc_tab)\n', (6682, 6695), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6892, 6921), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.cc_tab'], {}), '(self.cc_tab)\n', (6908, 6921), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7163, 7197), 'PyQt5.QtWidgets.QListWidget', 'QtWidgets.QListWidget', (['self.cc_tab'], {}), '(self.cc_tab)\n', (7184, 7197), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8158, 8203), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Dialog'], {}), '(Dialog)\n', (8195, 8203), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8528, 8589), 'PyQt5.QtWidgets.QListWidgetItem', 'QtWidgets.QListWidgetItem', (['"""new value (double-click to edit)"""'], {}), "('new value (double-click to edit)')\n", (8553, 8589), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10983, 11022), 'PyQt5.QtWidgets.QTreeWidgetItem', 'QtWidgets.QTreeWidgetItem', (['self.pc_tree'], {}), '(self.pc_tree)\n', (11008, 11022), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11367, 11399), 'PyQt5.QtWidgets.QTreeWidgetItem', 'QtWidgets.QTreeWidgetItem', (['group'], {}), '(group)\n', (11392, 11399), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12101, 12133), 'PyQt5.QtWidgets.QTreeWidgetItem', 'QtWidgets.QTreeWidgetItem', (['group'], {}), '(group)\n', (12126, 12133), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((15404, 15600), 'PyQt5.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', (['QtWidgets.QMessageBox.Warning', '"""Cancel"""', '"""Are you sure you want to cancel? You will lose all of your changes!"""', '(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)'], {}), "(QtWidgets.QMessageBox.Warning, 'Cancel',\n 'Are you sure you want to cancel? You will lose all of your changes!', \n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n", (15425, 15600), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((839, 861), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(600)', '(480)'], {}), '(600, 480)\n', (851, 861), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((893, 915), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(600)', '(480)'], {}), '(600, 480)\n', (905, 915), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((949, 967), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(0)'], {}), '(0, 0)\n', (961, 967), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1230, 1259), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(15)', '(125)', '(20)'], {}), '(10, 15, 125, 20)\n', (1242, 1259), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1499, 1529), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(140)', '(10)', '(450)', '(30)'], {}), '(140, 10, 450, 30)\n', (1511, 1529), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1720, 1751), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(470)', '(450)', '(120)', '(30)'], {}), '(470, 450, 120, 30)\n', (1732, 1751), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2034, 2065), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(360)', '(450)', '(120)', '(30)'], {}), '(360, 450, 120, 30)\n', (2046, 2065), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2359, 2388), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(50)', '(600)', '(400)'], {}), '(0, 50, 600, 400)\n', (2371, 2388), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2618, 2648), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(25)', '(10)', '(550)', '(300)'], {}), '(25, 10, 550, 300)\n', (2630, 2648), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3125, 3155), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(310)', '(150)', '(35)'], {}), '(20, 310, 150, 35)\n', (3137, 3155), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3429, 3459), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(335)', '(150)', '(35)'], {}), '(20, 335, 150, 35)\n', (3441, 3459), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4000, 4030), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(25)', '(10)', '(550)', '(300)'], {}), '(25, 10, 550, 300)\n', (4012, 4030), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4567, 4597), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(310)', '(150)', '(35)'], {}), '(20, 310, 150, 35)\n', (4579, 4597), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4892, 4922), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(335)', '(150)', '(35)'], {}), '(20, 335, 150, 35)\n', (4904, 4922), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5261, 5292), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(175)', '(310)', '(150)', '(35)'], {}), '(175, 310, 150, 35)\n', (5273, 5292), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5628, 5659), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(175)', '(335)', '(150)', '(35)'], {}), '(175, 335, 150, 35)\n', (5640, 5659), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6131, 6160), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(10)', '(200)', '(30)'], {}), '(10, 10, 200, 30)\n', (6143, 6160), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6410, 6440), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(200)', '(10)', '(100)', '(35)'], {}), '(200, 10, 100, 35)\n', (6422, 6440), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6734, 6764), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(300)', '(10)', '(300)', '(30)'], {}), '(300, 10, 300, 30)\n', (6746, 6764), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6961, 6990), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(40)', '(200)', '(30)'], {}), '(10, 40, 200, 30)\n', (6973, 6990), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7235, 7265), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(25)', '(70)', '(550)', '(290)'], {}), '(25, 70, 550, 290)\n', (7247, 7265), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12932, 13026), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', (['None', '"""Open file"""', '"""~"""', '"""Numpy array files (*.npy)"""'], {}), "(None, 'Open file', '~',\n 'Numpy array files (*.npy)')\n", (12969, 13026), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((13099, 13132), 'numpy.load', 'np.load', (['fname'], {'allow_pickle': '(True)'}), '(fname, allow_pickle=True)\n', (13106, 13132), True, 'import numpy as np\n'), ((13475, 13511), 'PyQt5.QtWidgets.QErrorMessage', 'QtWidgets.QErrorMessage', (['self.dialog'], {}), '(self.dialog)\n', (13498, 13511), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((13873, 13934), 'PyQt5.QtWidgets.QListWidgetItem', 'QtWidgets.QListWidgetItem', (['f"""Item {i} (double-click to edit)"""'], {}), "(f'Item {i} (double-click to edit)')\n", (13898, 13934), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((15865, 15901), 'PyQt5.QtWidgets.QErrorMessage', 'QtWidgets.QErrorMessage', (['self.dialog'], {}), '(self.dialog)\n', (15888, 15901), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((13189, 13225), 'PyQt5.QtWidgets.QErrorMessage', 'QtWidgets.QErrorMessage', (['self.dialog'], {}), '(self.dialog)\n', (13212, 13225), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((14263, 14294), 'PyQt5.QtWidgets.QListWidgetItem', 'QtWidgets.QListWidgetItem', (['name'], {}), '(name)\n', (14288, 14294), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((16219, 16255), 'PyQt5.QtWidgets.QErrorMessage', 'QtWidgets.QErrorMessage', (['self.dialog'], {}), '(self.dialog)\n', (16242, 16255), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((16868, 16881), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (16879, 16881), False, 'from collections import OrderedDict\n'), ((14532, 14571), 'PyQt5.QtWidgets.QTreeWidgetItem', 'QtWidgets.QTreeWidgetItem', (['self.pc_tree'], {}), '(self.pc_tree)\n', (14557, 14571), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((16698, 16734), 'PyQt5.QtWidgets.QErrorMessage', 'QtWidgets.QErrorMessage', (['self.dialog'], {}), '(self.dialog)\n', (16721, 16734), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((14827, 14864), 'PyQt5.QtWidgets.QTreeWidgetItem', 'QtWidgets.QTreeWidgetItem', (['group_item'], {}), '(group_item)\n', (14852, 14864), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((15172, 15203), 'PyQt5.QtWidgets.QListWidgetItem', 'QtWidgets.QListWidgetItem', (['name'], {}), '(name)\n', (15197, 15203), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((17105, 17141), 'PyQt5.QtWidgets.QErrorMessage', 'QtWidgets.QErrorMessage', (['self.dialog'], {}), '(self.dialog)\n', (17128, 17141), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((17794, 17830), 'PyQt5.QtWidgets.QErrorMessage', 'QtWidgets.QErrorMessage', (['self.dialog'], {}), '(self.dialog)\n', (17817, 17830), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
import context
import io
import json
import requests
import numpy as np
import pandas as pd
import xarray as xr
from glob import glob
from pathlib import Path
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from wrf import ll_to_xy, xy_to_ll
from datetime import datetime, date, timedelta
from context import data_dir, root_dir, fwf_dir
def daily_merge_ds(date_to_merge, domain, wrf_model):
hourly_file_dir = str(fwf_dir) + str(f"/fwf-hourly-{domain}-{date_to_merge}.nc")
daily_file_dir = str(fwf_dir) + str(f"/fwf-daily-{domain}-{date_to_merge}.nc")
### Open datasets
my_dir = Path(hourly_file_dir)
if my_dir.is_file():
hourly_ds = xr.open_dataset(hourly_file_dir)
daily_ds = xr.open_dataset(daily_file_dir)
### Call on variables
static_ds = xr.open_dataset(
str(data_dir) + f"/static/static-vars-{wrf_model}-{domain}.nc"
)
tzone = static_ds.ZoneDT.values
shape = tzone.shape
## create I, J for quick indexing
I, J = np.ogrid[: shape[0], : shape[1]]
time_array = hourly_ds.Time.values
try:
int_time = int(pd.Timestamp(time_array[0]).hour)
except:
int_time = int(pd.Timestamp(time_array).hour)
length = len(time_array) + 1
num_days = [i - 12 for i in range(1, length) if i % 24 == 0]
index = [
i - int_time if 12 - int_time >= 0 else i + 24 - int_time for i in num_days
]
# print(f"index of times {index} with initial time {int_time}Z")
## loop every 24 hours
files_ds = []
for i in index:
# print(i)
## loop each variable
mean_da = []
for var in ["DSR", "F", "R", "S"]:
if var == "SNOWC":
var_array = hourly_ds[var].values
noon = var_array[(i + tzone), I, J]
day = np.array(hourly_ds.Time[i + 1], dtype="datetime64[D]")
# print(np.array(hourly_ds.Time[i]))
var_da = xr.DataArray(
noon,
name=var,
dims=("south_north", "west_east"),
coords=hourly_ds.isel(time=i).coords,
)
var_da["Time"] = day
mean_da.append(var_da)
else:
# print(np.array(hourly_ds.Time[i]))
var_array = hourly_ds[var].values
noon_minus = var_array[(i + tzone - 1), I, J]
noon = var_array[(i + tzone), I, J]
noon_pluse = var_array[(i + tzone + 1), I, J]
noon_mean = (noon_minus + noon + noon_pluse) / 3
day = np.array(hourly_ds.Time[i + 1], dtype="datetime64[D]")
var_da = xr.DataArray(
noon_mean,
name=var,
dims=("south_north", "west_east"),
coords=hourly_ds.isel(time=i).coords,
)
var_da["Time"] = day
var_da.attrs = hourly_ds[var].attrs
mean_da.append(var_da)
mean_ds = xr.merge(mean_da)
files_ds.append(mean_ds)
hourly_daily_ds = xr.combine_nested(files_ds, "time")
final_ds = xr.merge([daily_ds, hourly_daily_ds], compat="override")
final_ds.attrs = hourly_ds.attrs
else:
final_ds = None
return final_ds
def daily_merge_ds_rerun(date_to_merge, domain, wrf_model):
hourly_file_dir = str(
f"/bluesky/archive/fireweather/data/fwf-hourly-{domain}-{date_to_merge}.nc"
)
daily_file_dir = str(
f"/bluesky/archive/fireweather/data/fwf-daily-{domain}-{date_to_merge}.nc"
)
### Open datasets
my_dir = Path(hourly_file_dir)
if my_dir.is_file():
hourly_ds = xr.open_dataset(hourly_file_dir)
daily_ds = xr.open_dataset(daily_file_dir)
### Call on variables
static_ds = xr.open_dataset(
str(data_dir) + f"/static/static-vars-{wrf_model}-{domain}.nc"
)
tzone = static_ds.ZoneDT.values
shape = tzone.shape
## create I, J for quick indexing
I, J = np.ogrid[: shape[0], : shape[1]]
time_array = hourly_ds.Time.values
try:
int_time = int(pd.Timestamp(time_array[0]).hour)
except:
int_time = int(pd.Timestamp(time_array).hour)
length = len(time_array) + 1
num_days = [i - 12 for i in range(1, length) if i % 24 == 0]
index = [
i - int_time if 12 - int_time >= 0 else i + 24 - int_time for i in num_days
]
# print(f"index of times {index} with initial time {int_time}Z")
## loop every 24 hours
files_ds = []
for i in index:
# print(i)
## loop each variable
mean_da = []
for var in ["DSR", "F", "R", "S"]:
if var == "SNOWC":
var_array = hourly_ds[var].values
noon = var_array[(i + tzone), I, J]
day = np.array(hourly_ds.Time[i + 1], dtype="datetime64[D]")
# print(np.array(hourly_ds.Time[i]))
var_da = xr.DataArray(
noon,
name=var,
dims=("south_north", "west_east"),
coords=hourly_ds.isel(time=i).coords,
)
var_da["Time"] = day
mean_da.append(var_da)
else:
# print(np.array(hourly_ds.Time[i]))
var_array = hourly_ds[var].values
noon_minus = var_array[(i + tzone - 1), I, J]
noon = var_array[(i + tzone), I, J]
noon_pluse = var_array[(i + tzone + 1), I, J]
noon_mean = (noon_minus + noon + noon_pluse) / 3
day = np.array(hourly_ds.Time[i + 1], dtype="datetime64[D]")
var_da = xr.DataArray(
noon_mean,
name=var,
dims=("south_north", "west_east"),
coords=hourly_ds.isel(time=i).coords,
)
var_da["Time"] = day
var_da.attrs = hourly_ds[var].attrs
mean_da.append(var_da)
mean_ds = xr.merge(mean_da)
files_ds.append(mean_ds)
hourly_daily_ds = xr.combine_nested(files_ds, "time")
final_ds = xr.merge([daily_ds, hourly_daily_ds], compat="override")
final_ds.attrs = hourly_ds.attrs
else:
final_ds = None
return final_ds
| [
"xarray.merge",
"pathlib.Path",
"numpy.array",
"pandas.Timestamp",
"xarray.open_dataset",
"xarray.combine_nested"
] | [((611, 632), 'pathlib.Path', 'Path', (['hourly_file_dir'], {}), '(hourly_file_dir)\n', (615, 632), False, 'from pathlib import Path\n'), ((3895, 3916), 'pathlib.Path', 'Path', (['hourly_file_dir'], {}), '(hourly_file_dir)\n', (3899, 3916), False, 'from pathlib import Path\n'), ((678, 710), 'xarray.open_dataset', 'xr.open_dataset', (['hourly_file_dir'], {}), '(hourly_file_dir)\n', (693, 710), True, 'import xarray as xr\n'), ((731, 762), 'xarray.open_dataset', 'xr.open_dataset', (['daily_file_dir'], {}), '(daily_file_dir)\n', (746, 762), True, 'import xarray as xr\n'), ((3356, 3391), 'xarray.combine_nested', 'xr.combine_nested', (['files_ds', '"""time"""'], {}), "(files_ds, 'time')\n", (3373, 3391), True, 'import xarray as xr\n'), ((3411, 3467), 'xarray.merge', 'xr.merge', (['[daily_ds, hourly_daily_ds]'], {'compat': '"""override"""'}), "([daily_ds, hourly_daily_ds], compat='override')\n", (3419, 3467), True, 'import xarray as xr\n'), ((3962, 3994), 'xarray.open_dataset', 'xr.open_dataset', (['hourly_file_dir'], {}), '(hourly_file_dir)\n', (3977, 3994), True, 'import xarray as xr\n'), ((4015, 4046), 'xarray.open_dataset', 'xr.open_dataset', (['daily_file_dir'], {}), '(daily_file_dir)\n', (4030, 4046), True, 'import xarray as xr\n'), ((6640, 6675), 'xarray.combine_nested', 'xr.combine_nested', (['files_ds', '"""time"""'], {}), "(files_ds, 'time')\n", (6657, 6675), True, 'import xarray as xr\n'), ((6695, 6751), 'xarray.merge', 'xr.merge', (['[daily_ds, hourly_daily_ds]'], {'compat': '"""override"""'}), "([daily_ds, hourly_daily_ds], compat='override')\n", (6703, 6751), True, 'import xarray as xr\n'), ((3274, 3291), 'xarray.merge', 'xr.merge', (['mean_da'], {}), '(mean_da)\n', (3282, 3291), True, 'import xarray as xr\n'), ((6558, 6575), 'xarray.merge', 'xr.merge', (['mean_da'], {}), '(mean_da)\n', (6566, 6575), True, 'import xarray as xr\n'), ((1156, 1183), 'pandas.Timestamp', 'pd.Timestamp', (['time_array[0]'], {}), '(time_array[0])\n', (1168, 1183), True, 'import pandas as pd\n'), ((1939, 1993), 'numpy.array', 'np.array', (['hourly_ds.Time[i + 1]'], {'dtype': '"""datetime64[D]"""'}), "(hourly_ds.Time[i + 1], dtype='datetime64[D]')\n", (1947, 1993), True, 'import numpy as np\n'), ((2801, 2855), 'numpy.array', 'np.array', (['hourly_ds.Time[i + 1]'], {'dtype': '"""datetime64[D]"""'}), "(hourly_ds.Time[i + 1], dtype='datetime64[D]')\n", (2809, 2855), True, 'import numpy as np\n'), ((4440, 4467), 'pandas.Timestamp', 'pd.Timestamp', (['time_array[0]'], {}), '(time_array[0])\n', (4452, 4467), True, 'import pandas as pd\n'), ((5223, 5277), 'numpy.array', 'np.array', (['hourly_ds.Time[i + 1]'], {'dtype': '"""datetime64[D]"""'}), "(hourly_ds.Time[i + 1], dtype='datetime64[D]')\n", (5231, 5277), True, 'import numpy as np\n'), ((6085, 6139), 'numpy.array', 'np.array', (['hourly_ds.Time[i + 1]'], {'dtype': '"""datetime64[D]"""'}), "(hourly_ds.Time[i + 1], dtype='datetime64[D]')\n", (6093, 6139), True, 'import numpy as np\n'), ((1233, 1257), 'pandas.Timestamp', 'pd.Timestamp', (['time_array'], {}), '(time_array)\n', (1245, 1257), True, 'import pandas as pd\n'), ((4517, 4541), 'pandas.Timestamp', 'pd.Timestamp', (['time_array'], {}), '(time_array)\n', (4529, 4541), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 18 10:40:44 2017
@author: <EMAIL>
"""
"""
Exercise
Given the logistic regression presented above and its validation given a 5 folds CV.
Compute the p-value associated with the prediction accuracy using a permutation test.
Compute the p-value associated with the prediction accuracy using a parametric test.
"""
import numpy as np
from sklearn import datasets
import sklearn.linear_model as lm
import sklearn.metrics as metrics
from sklearn.model_selection import StratifiedKFold
X, y = datasets.make_classification(n_samples=100, n_features=100,
n_informative=10, random_state=42)
model = lm.LogisticRegression(C=1)
nperm = 100
scores_perm= np.zeros((nperm, 3)) # 3 scores acc, recall0, recall1
for perm in range(0, nperm):
# perm = 0; y == yp
# first run on non-permuted samples
yp = y if perm == 0 else np.random.permutation(y)
# CV loop
y_test_pred = np.zeros(len(yp))
cv = StratifiedKFold(5)
for train, test in cv.split(X, y):
X_train, X_test, y_train, y_test = X[train, :], X[test, :], yp[train], yp[test]
model.fit(X_train, y_train)
y_test_pred[test] = model.predict(X_test)
scores_perm[perm, 0] = metrics.accuracy_score(yp, y_test_pred)
scores_perm[perm, [1, 2]] = metrics.recall_score(yp, y_test_pred, average=None)
# Empirical permutation based p-values
pval = np.sum(scores_perm >= scores_perm[0, :], axis=0) / nperm
print("ACC:%.2f(P=%.3f); SPC:%.2f(P=%.3f); SEN:%.2f(P=%.3f)" %\
(scores_perm[0, 0], pval[0],
scores_perm[0, 1], pval[1],
scores_perm[0, 2], pval[2]))
| [
"numpy.random.permutation",
"sklearn.linear_model.LogisticRegression",
"sklearn.model_selection.StratifiedKFold",
"sklearn.metrics.recall_score",
"numpy.zeros",
"numpy.sum",
"sklearn.metrics.accuracy_score",
"sklearn.datasets.make_classification"
] | [((571, 670), 'sklearn.datasets.make_classification', 'datasets.make_classification', ([], {'n_samples': '(100)', 'n_features': '(100)', 'n_informative': '(10)', 'random_state': '(42)'}), '(n_samples=100, n_features=100, n_informative=\n 10, random_state=42)\n', (599, 670), False, 'from sklearn import datasets\n'), ((700, 726), 'sklearn.linear_model.LogisticRegression', 'lm.LogisticRegression', ([], {'C': '(1)'}), '(C=1)\n', (721, 726), True, 'import sklearn.linear_model as lm\n'), ((752, 772), 'numpy.zeros', 'np.zeros', (['(nperm, 3)'], {}), '((nperm, 3))\n', (760, 772), True, 'import numpy as np\n'), ((1014, 1032), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['(5)'], {}), '(5)\n', (1029, 1032), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((1273, 1312), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['yp', 'y_test_pred'], {}), '(yp, y_test_pred)\n', (1295, 1312), True, 'import sklearn.metrics as metrics\n'), ((1345, 1396), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['yp', 'y_test_pred'], {'average': 'None'}), '(yp, y_test_pred, average=None)\n', (1365, 1396), True, 'import sklearn.metrics as metrics\n'), ((1444, 1492), 'numpy.sum', 'np.sum', (['(scores_perm >= scores_perm[0, :])'], {'axis': '(0)'}), '(scores_perm >= scores_perm[0, :], axis=0)\n', (1450, 1492), True, 'import numpy as np\n'), ((930, 954), 'numpy.random.permutation', 'np.random.permutation', (['y'], {}), '(y)\n', (951, 954), True, 'import numpy as np\n')] |
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Detection output visualization module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from PIL import Image
import cv2
import numpy as np
import os
import pycocotools.mask as mask_util
from utils.colormap import colormap
# import utils.keypoints as keypoint_utils
# Matplotlib requires certain adjustments in some environments
# Must happen before importing matplotlib
import matplotlib
matplotlib.use('Agg') # Use a non-interactive backend
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import ipdb
plt.rcParams['pdf.fonttype'] = 42 # For editing in Adobe Illustrator
_GRAY = (218, 227, 218)
_GREEN = (18, 127, 15)
_WHITE = (255, 255, 255)
def get_class_string(class_index, score, dataset):
class_text = dataset[class_index] if dataset is not None else \
'id{:d}'.format(class_index)
return class_text #+ ' {:0.2f}'.format(score).lstrip('0')
def vis_bbox(img, bbox, thick=1):
"""Visualizes a bounding box."""
(x0, y0, w, h) = bbox
x1, y1 = int(x0 + w), int(y0 + h)
x0, y0 = int(x0), int(y0)
cv2.rectangle(img, (x0, y0), (x1, y1), _GREEN, thickness=thick)
return img
def vis_one_image(
im, im_name, output_dir, classes, boxes, masks=None, keypoints=None, thresh=0.9,
kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
ext='pdf', show=False, W=224, H=224):
"""Visual debugging of detections."""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
color_list = colormap(rgb=True) / 255
fig = plt.figure(frameon=False)
fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.axis('off')
fig.add_axes(ax)
ax.imshow(im)
# Display in largest to smallest order to reduce occlusion
boxes[:,0] *= W
boxes[:, 2] *= W
boxes[:,1] *= H
boxes[:, 3] *= H
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
sorted_inds = np.argsort(-areas)
# torch to numpy
# ipdb.set_trace()
if masks is not None:
# uint8
masks = masks.astype('uint8')
# rescale
w_masks, h_masks, _ = masks.shape
mask_color_id = 0
# ipdb.set_trace()
for i in sorted_inds:
bbox = boxes[i, :4]
score = boxes[i, -1]
if score < thresh:
continue
# show box (off by default)
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1],
fill=False, edgecolor='g',
linewidth=1, alpha=box_alpha))
if show_class:
# (x, y) = (bbox[0], bbox[3] + 2) if classes[i] == 1 else (bbox[3], bbox[1] - 2) # below for person or above for the rest
x, y = (bbox[0], bbox[1] - 2)
classes_i = np.argmax(classes[i])
# print(get_class_string(classes_i, score, dataset), classes_i, score)
ax.text(
x, y,
get_class_string(classes_i, score, dataset),
fontsize=4,
family='serif',
bbox=dict(
facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
color='white')
# show mask
if masks is not None:
# ipdb.set_trace()
img = np.ones(im.shape)
color_mask = color_list[mask_color_id % len(color_list), 0:3]
mask_color_id += 1
w_ratio = .4
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
for c in range(3):
img[:, :, c] = color_mask[c]
e_down = masks[i, :, :]
# Rescale mask
e_pil = Image.fromarray(e_down)
e_pil_up = e_pil.resize((H, W),Image.ANTIALIAS)
e = np.array(e_pil_up)
_, contour, hier = cv2.findContours(
e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
for c in contour:
polygon = Polygon(
c.reshape((-1, 2)),
fill=True, facecolor=color_mask,
edgecolor='w', linewidth=1.2,
alpha=0.5)
ax.add_patch(polygon)
output_name = os.path.basename(im_name) + '.' + ext
fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
print('result saved to {}'.format(os.path.join(output_dir, '{}'.format(output_name))))
if show:
plt.show()
plt.close('all')
| [
"cv2.rectangle",
"os.path.exists",
"PIL.Image.fromarray",
"numpy.ones",
"os.makedirs",
"matplotlib.use",
"matplotlib.pyplot.Axes",
"numpy.argmax",
"numpy.argsort",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"utils.colormap.colormap",
"numpy.array",
"os.path.basename",
"matplo... | [((1171, 1192), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1185, 1192), False, 'import matplotlib\n'), ((1844, 1907), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x0, y0)', '(x1, y1)', '_GREEN'], {'thickness': 'thick'}), '(img, (x0, y0), (x1, y1), _GREEN, thickness=thick)\n', (1857, 1907), False, 'import cv2\n'), ((2322, 2347), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'frameon': '(False)'}), '(frameon=False)\n', (2332, 2347), True, 'import matplotlib.pyplot as plt\n'), ((2419, 2454), 'matplotlib.pyplot.Axes', 'plt.Axes', (['fig', '[0.0, 0.0, 1.0, 1.0]'], {}), '(fig, [0.0, 0.0, 1.0, 1.0])\n', (2427, 2454), True, 'import matplotlib.pyplot as plt\n'), ((2743, 2761), 'numpy.argsort', 'np.argsort', (['(-areas)'], {}), '(-areas)\n', (2753, 2761), True, 'import numpy as np\n'), ((5338, 5354), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5347, 5354), True, 'import matplotlib.pyplot as plt\n'), ((2208, 2234), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (2222, 2234), False, 'import os\n'), ((2244, 2267), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (2255, 2267), False, 'import os\n'), ((2286, 2304), 'utils.colormap.colormap', 'colormap', ([], {'rgb': '(True)'}), '(rgb=True)\n', (2294, 2304), False, 'from utils.colormap import colormap\n'), ((5323, 5333), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5331, 5333), True, 'import matplotlib.pyplot as plt\n'), ((3195, 3327), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(bbox[0], bbox[1])', '(bbox[2] - bbox[0])', '(bbox[3] - bbox[1])'], {'fill': '(False)', 'edgecolor': '"""g"""', 'linewidth': '(1)', 'alpha': 'box_alpha'}), "((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1],\n fill=False, edgecolor='g', linewidth=1, alpha=box_alpha)\n", (3208, 3327), True, 'import matplotlib.pyplot as plt\n'), ((3654, 3675), 'numpy.argmax', 'np.argmax', (['classes[i]'], {}), '(classes[i])\n', (3663, 3675), True, 'import numpy as np\n'), ((4153, 4170), 'numpy.ones', 'np.ones', (['im.shape'], {}), '(im.shape)\n', (4160, 4170), True, 'import numpy as np\n'), ((4565, 4588), 'PIL.Image.fromarray', 'Image.fromarray', (['e_down'], {}), '(e_down)\n', (4580, 4588), False, 'from PIL import Image\n'), ((4665, 4683), 'numpy.array', 'np.array', (['e_pil_up'], {}), '(e_pil_up)\n', (4673, 4683), True, 'import numpy as np\n'), ((5096, 5121), 'os.path.basename', 'os.path.basename', (['im_name'], {}), '(im_name)\n', (5112, 5121), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from singledispatch import singledispatch
from functools import partial
import numpy as np # type: ignore
import scipy.sparse as sp # type: ignore
from sklearn.base import BaseEstimator # type: ignore
from sklearn.ensemble import ( # type: ignore
ExtraTreesClassifier,
ExtraTreesRegressor,
GradientBoostingClassifier,
GradientBoostingRegressor,
RandomForestClassifier,
RandomForestRegressor,
)
from sklearn.linear_model import ( # type: ignore
ElasticNet, # includes Lasso, MultiTaskElasticNet, etc.
ElasticNetCV,
HuberRegressor,
Lars,
LassoCV,
LinearRegression,
LogisticRegression,
LogisticRegressionCV,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
PassiveAggressiveClassifier,
PassiveAggressiveRegressor,
Perceptron,
Ridge,
RidgeCV,
RidgeClassifier,
RidgeClassifierCV,
SGDClassifier,
SGDRegressor,
TheilSenRegressor,
)
from sklearn.svm import LinearSVC, LinearSVR # type: ignore
from sklearn.multiclass import OneVsRestClassifier # type: ignore
from sklearn.tree import ( # type: ignore
DecisionTreeClassifier,
DecisionTreeRegressor
)
from eli5.base import Explanation, TargetExplanation
from eli5.utils import get_target_display_names, mask
from eli5.sklearn.utils import (
add_intercept,
get_coef,
get_default_target_names,
get_X,
is_multiclass_classifier,
is_multitarget_regressor,
predict_proba,
has_intercept,
handle_vec,
)
from eli5.sklearn.text import add_weighted_spans
from eli5.explain import explain_prediction
from eli5._decision_path import DECISION_PATHS_CAVEATS
from eli5._feature_weights import get_top_features
@explain_prediction.register(BaseEstimator)
@singledispatch
def explain_prediction_sklearn(estimator, doc,
vec=None,
top=None,
top_targets=None,
target_names=None,
targets=None,
feature_names=None,
feature_re=None,
feature_filter=None,
vectorized=False):
""" Return an explanation of a scikit-learn estimator """
return Explanation(
estimator=repr(estimator),
error="estimator %r is not supported" % estimator,
)
@explain_prediction.register(OneVsRestClassifier)
def explain_prediction_ovr(clf, doc, **kwargs):
estimator = clf.estimator
func = explain_prediction.dispatch(estimator.__class__)
return func(clf, doc, **kwargs)
@explain_prediction_sklearn.register(OneVsRestClassifier)
def explain_prediction_ovr_sklearn(clf, doc, **kwargs):
# dispatch OvR to eli5.sklearn
# if explain_prediction_sklearn is called explicitly
estimator = clf.estimator
func = explain_prediction_sklearn.dispatch(estimator.__class__)
return func(clf, doc, **kwargs)
@explain_prediction_sklearn.register(LogisticRegression)
@explain_prediction_sklearn.register(LogisticRegressionCV)
@explain_prediction_sklearn.register(SGDClassifier)
@explain_prediction_sklearn.register(PassiveAggressiveClassifier)
@explain_prediction_sklearn.register(Perceptron)
@explain_prediction_sklearn.register(LinearSVC)
@explain_prediction_sklearn.register(RidgeClassifier)
@explain_prediction_sklearn.register(RidgeClassifierCV)
def explain_prediction_linear_classifier(clf, doc,
vec=None,
top=None,
top_targets=None,
target_names=None,
targets=None,
feature_names=None,
feature_re=None,
feature_filter=None,
vectorized=False,
):
"""
Explain prediction of a linear classifier.
See :func:`eli5.explain_prediction` for description of
``top``, ``top_targets``, ``target_names``, ``targets``,
``feature_names``, ``feature_re`` and ``feature_filter`` parameters.
``vec`` is a vectorizer instance used to transform
raw features to the input of the classifier ``clf``
(e.g. a fitted CountVectorizer instance); you can pass it
instead of ``feature_names``.
``vectorized`` is a flag which tells eli5 if ``doc`` should be
passed through ``vec`` or not. By default it is False, meaning that
if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
classifier. Set it to False if you're passing ``vec``, but ``doc``
is already vectorized.
"""
vec, feature_names = handle_vec(clf, doc, vec, vectorized, feature_names)
X = get_X(doc, vec=vec, vectorized=vectorized, to_dense=True)
proba = predict_proba(clf, X)
score, = clf.decision_function(X)
if has_intercept(clf):
X = add_intercept(X)
x, = X
feature_names, flt_indices = feature_names.handle_filter(
feature_filter, feature_re, x)
res = Explanation(
estimator=repr(clf),
method='linear model',
targets=[],
)
_weights = _linear_weights(clf, x, top, feature_names, flt_indices)
display_names = get_target_display_names(clf.classes_, target_names,
targets, top_targets, score)
if is_multiclass_classifier(clf):
for label_id, label in display_names:
target_expl = TargetExplanation(
target=label,
feature_weights=_weights(label_id),
score=score[label_id],
proba=proba[label_id] if proba is not None else None,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
else:
target_expl = TargetExplanation(
target=display_names[1][1],
feature_weights=_weights(0),
score=score,
proba=proba[1] if proba is not None else None,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
return res
@explain_prediction_sklearn.register(ElasticNet)
@explain_prediction_sklearn.register(ElasticNetCV)
@explain_prediction_sklearn.register(HuberRegressor)
@explain_prediction_sklearn.register(Lars)
@explain_prediction_sklearn.register(LassoCV)
@explain_prediction_sklearn.register(LinearRegression)
@explain_prediction_sklearn.register(LinearSVR)
@explain_prediction_sklearn.register(OrthogonalMatchingPursuit)
@explain_prediction_sklearn.register(OrthogonalMatchingPursuitCV)
@explain_prediction_sklearn.register(PassiveAggressiveRegressor)
@explain_prediction_sklearn.register(Ridge)
@explain_prediction_sklearn.register(RidgeCV)
@explain_prediction_sklearn.register(SGDRegressor)
@explain_prediction_sklearn.register(TheilSenRegressor)
def explain_prediction_linear_regressor(reg, doc,
vec=None,
top=None,
top_targets=None,
target_names=None,
targets=None,
feature_names=None,
feature_re=None,
feature_filter=None,
vectorized=False):
"""
Explain prediction of a linear regressor.
See :func:`eli5.explain_prediction` for description of
``top``, ``top_targets``, ``target_names``, ``targets``,
``feature_names``, ``feature_re`` and ``feature_filter`` parameters.
``vec`` is a vectorizer instance used to transform
raw features to the input of the classifier ``clf``;
you can pass it instead of ``feature_names``.
``vectorized`` is a flag which tells eli5 if ``doc`` should be
passed through ``vec`` or not. By default it is False, meaning that
if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
regressor ``reg``. Set it to False if you're passing ``vec``,
but ``doc`` is already vectorized.
"""
vec, feature_names = handle_vec(reg, doc, vec, vectorized, feature_names)
X = get_X(doc, vec=vec, vectorized=vectorized, to_dense=True)
score, = reg.predict(X)
if has_intercept(reg):
X = add_intercept(X)
x, = X
feature_names, flt_indices = feature_names.handle_filter(
feature_filter, feature_re, x)
res = Explanation(
estimator=repr(reg),
method='linear model',
targets=[],
is_regression=True,
)
_weights = _linear_weights(reg, x, top, feature_names, flt_indices)
names = get_default_target_names(reg)
display_names = get_target_display_names(names, target_names, targets,
top_targets, score)
if is_multitarget_regressor(reg):
for label_id, label in display_names:
target_expl = TargetExplanation(
target=label,
feature_weights=_weights(label_id),
score=score[label_id],
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
else:
target_expl = TargetExplanation(
target=display_names[0][1],
feature_weights=_weights(0),
score=score,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
return res
DECISION_PATHS_CAVEATS = """
Feature weights are calculated by following decision paths in trees
of an ensemble (or a single tree for DecisionTreeClassifier).
Each node of the tree has an output score, and contribution of a feature
on the decision path is how much the score changes from parent to child.
Weights of all features sum to the output score or proba of the estimator.
""" + DECISION_PATHS_CAVEATS
DESCRIPTION_TREE_CLF_BINARY = """
Features with largest coefficients.
""" + DECISION_PATHS_CAVEATS
DESCRIPTION_TREE_CLF_MULTICLASS = """
Features with largest coefficients per class.
""" + DECISION_PATHS_CAVEATS
DESCRIPTION_TREE_REG = """
Features with largest coefficients.
""" + DECISION_PATHS_CAVEATS
DESCRIPTION_TREE_REG_MULTITARGET = """
Features with largest coefficients per target.
""" + DECISION_PATHS_CAVEATS
@explain_prediction_sklearn.register(DecisionTreeClassifier)
@explain_prediction_sklearn.register(ExtraTreesClassifier)
@explain_prediction_sklearn.register(GradientBoostingClassifier)
@explain_prediction_sklearn.register(RandomForestClassifier)
def explain_prediction_tree_classifier(
clf, doc,
vec=None,
top=None,
top_targets=None,
target_names=None,
targets=None,
feature_names=None,
feature_re=None,
feature_filter=None,
vectorized=False):
""" Explain prediction of a tree classifier.
See :func:`eli5.explain_prediction` for description of
``top``, ``top_targets``, ``target_names``, ``targets``,
``feature_names``, ``feature_re`` and ``feature_filter`` parameters.
``vec`` is a vectorizer instance used to transform
raw features to the input of the classifier ``clf``
(e.g. a fitted CountVectorizer instance); you can pass it
instead of ``feature_names``.
``vectorized`` is a flag which tells eli5 if ``doc`` should be
passed through ``vec`` or not. By default it is False, meaning that
if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
classifier. Set it to False if you're passing ``vec``,
but ``doc`` is already vectorized.
Method for determining feature importances follows an idea from
http://blog.datadive.net/interpreting-random-forests/.
Feature weights are calculated by following decision paths in trees
of an ensemble (or a single tree for DecisionTreeClassifier).
Each node of the tree has an output score, and contribution of a feature
on the decision path is how much the score changes from parent to child.
Weights of all features sum to the output score or proba of the estimator.
"""
vec, feature_names = handle_vec(clf, doc, vec, vectorized, feature_names)
X = get_X(doc, vec=vec, vectorized=vectorized)
if feature_names.bias_name is None:
# Tree estimators do not have an intercept, but here we interpret
# them as having an intercept
feature_names.bias_name = '<BIAS>'
proba = predict_proba(clf, X)
if hasattr(clf, 'decision_function'):
score, = clf.decision_function(X)
else:
score = None
is_multiclass = clf.n_classes_ > 2
feature_weights = _trees_feature_weights(
clf, X, feature_names, clf.n_classes_)
x, = add_intercept(X)
feature_names, flt_indices = feature_names.handle_filter(
feature_filter, feature_re, x)
def _weights(label_id):
scores = feature_weights[:, label_id]
_x = x
if flt_indices is not None:
scores = scores[flt_indices]
_x = mask(_x, flt_indices)
return get_top_features(feature_names, scores, top, _x)
res = Explanation(
estimator=repr(clf),
method='decision path',
targets=[],
description=(DESCRIPTION_TREE_CLF_MULTICLASS if is_multiclass
else DESCRIPTION_TREE_CLF_BINARY),
)
display_names = get_target_display_names(
clf.classes_, target_names, targets, top_targets,
score=score if score is not None else proba)
if is_multiclass:
for label_id, label in display_names:
target_expl = TargetExplanation(
target=label,
feature_weights=_weights(label_id),
score=score[label_id] if score is not None else None,
proba=proba[label_id] if proba is not None else None,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
else:
target_expl = TargetExplanation(
target=display_names[1][1],
feature_weights=_weights(1),
score=score if score is not None else None,
proba=proba[1] if proba is not None else None,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
return res
@explain_prediction_sklearn.register(DecisionTreeRegressor)
@explain_prediction_sklearn.register(ExtraTreesRegressor)
@explain_prediction_sklearn.register(GradientBoostingRegressor)
@explain_prediction_sklearn.register(RandomForestRegressor)
def explain_prediction_tree_regressor(
reg, doc,
vec=None,
top=None,
top_targets=None,
target_names=None,
targets=None,
feature_names=None,
feature_re=None,
feature_filter=None,
vectorized=False):
""" Explain prediction of a tree regressor.
See :func:`eli5.explain_prediction` for description of
``top``, ``top_targets``, ``target_names``, ``targets``,
``feature_names``, ``feature_re`` and ``feature_filter`` parameters.
``vec`` is a vectorizer instance used to transform
raw features to the input of the regressor ``reg``
(e.g. a fitted CountVectorizer instance); you can pass it
instead of ``feature_names``.
``vectorized`` is a flag which tells eli5 if ``doc`` should be
passed through ``vec`` or not. By default it is False, meaning that
if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
regressor. Set it to False if you're passing ``vec``,
but ``doc`` is already vectorized.
Method for determining feature importances follows an idea from
http://blog.datadive.net/interpreting-random-forests/.
Feature weights are calculated by following decision paths in trees
of an ensemble (or a single tree for DecisionTreeRegressor).
Each node of the tree has an output score, and contribution of a feature
on the decision path is how much the score changes from parent to child.
Weights of all features sum to the output score of the estimator.
"""
vec, feature_names = handle_vec(reg, doc, vec, vectorized, feature_names)
X = get_X(doc, vec=vec, vectorized=vectorized)
if feature_names.bias_name is None:
# Tree estimators do not have an intercept, but here we interpret
# them as having an intercept
feature_names.bias_name = '<BIAS>'
score, = reg.predict(X)
num_targets = getattr(reg, 'n_outputs_', 1)
is_multitarget = num_targets > 1
feature_weights = _trees_feature_weights(reg, X, feature_names, num_targets)
x, = add_intercept(X)
feature_names, flt_indices = feature_names.handle_filter(
feature_filter, feature_re, x)
def _weights(label_id):
scores = feature_weights[:, label_id]
_x = x
if flt_indices is not None:
scores = scores[flt_indices]
_x = mask(_x, flt_indices)
return get_top_features(feature_names, scores, top, _x)
res = Explanation(
estimator=repr(reg),
method='decision path',
description=(DESCRIPTION_TREE_REG_MULTITARGET if is_multitarget
else DESCRIPTION_TREE_REG),
targets=[],
is_regression=True,
)
names = get_default_target_names(reg, num_targets=num_targets)
display_names = get_target_display_names(names, target_names, targets,
top_targets, score)
if is_multitarget:
for label_id, label in display_names:
target_expl = TargetExplanation(
target=label,
feature_weights=_weights(label_id),
score=score[label_id],
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
else:
target_expl = TargetExplanation(
target=display_names[0][1],
feature_weights=_weights(0),
score=score,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
return res
def _trees_feature_weights(clf, X, feature_names, num_targets):
""" Return feature weights for a tree or a tree ensemble.
"""
feature_weights = np.zeros([len(feature_names), num_targets])
if hasattr(clf, 'tree_'):
_update_tree_feature_weights(X, feature_names, clf, feature_weights)
else:
if isinstance(clf, (
GradientBoostingClassifier, GradientBoostingRegressor)):
weight = clf.learning_rate
else:
weight = 1. / len(clf.estimators_)
for _clfs in clf.estimators_:
_update = partial(_update_tree_feature_weights, X, feature_names)
if isinstance(_clfs, np.ndarray):
if len(_clfs) == 1:
_update(_clfs[0], feature_weights)
else:
for idx, _clf in enumerate(_clfs):
_update(_clf, feature_weights[:, idx])
else:
_update(_clfs, feature_weights)
feature_weights *= weight
if hasattr(clf, 'init_'):
feature_weights[feature_names.bias_idx] += clf.init_.predict(X)[0]
return feature_weights
def _update_tree_feature_weights(X, feature_names, clf, feature_weights):
""" Update tree feature weights using decision path method.
"""
tree_value = clf.tree_.value
if tree_value.shape[1] == 1:
squeeze_axis = 1
else:
assert tree_value.shape[2] == 1
squeeze_axis = 2
tree_value = np.squeeze(tree_value, axis=squeeze_axis)
tree_feature = clf.tree_.feature
_, indices = clf.decision_path(X).nonzero()
if isinstance(clf, DecisionTreeClassifier):
norm = lambda x: x / x.sum()
else:
norm = lambda x: x
feature_weights[feature_names.bias_idx] += norm(tree_value[0])
for parent_idx, child_idx in zip(indices, indices[1:]):
assert tree_feature[parent_idx] >= 0
feature_weights[tree_feature[parent_idx]] += (
norm(tree_value[child_idx]) - norm(tree_value[parent_idx]))
def _multiply(X, coef):
""" Multiple X by coef element-wise, preserving sparsity. """
if sp.issparse(X):
return X.multiply(sp.csr_matrix(coef))
else:
return np.multiply(X, coef)
def _linear_weights(clf, x, top, feature_names, flt_indices):
""" Return top weights getter for label_id.
"""
def _weights(label_id):
coef = get_coef(clf, label_id)
_x = x
scores = _multiply(_x, coef)
if flt_indices is not None:
scores = scores[flt_indices]
_x = mask(_x, flt_indices)
return get_top_features(feature_names, scores, top, _x)
return _weights
| [
"eli5.utils.get_target_display_names",
"eli5._feature_weights.get_top_features",
"eli5.sklearn.utils.is_multitarget_regressor",
"eli5.sklearn.utils.get_X",
"eli5.sklearn.utils.is_multiclass_classifier",
"eli5.sklearn.utils.predict_proba",
"eli5.explain.explain_prediction.dispatch",
"numpy.multiply",
... | [((1728, 1770), 'eli5.explain.explain_prediction.register', 'explain_prediction.register', (['BaseEstimator'], {}), '(BaseEstimator)\n', (1755, 1770), False, 'from eli5.explain import explain_prediction\n'), ((2450, 2498), 'eli5.explain.explain_prediction.register', 'explain_prediction.register', (['OneVsRestClassifier'], {}), '(OneVsRestClassifier)\n', (2477, 2498), False, 'from eli5.explain import explain_prediction\n'), ((2588, 2636), 'eli5.explain.explain_prediction.dispatch', 'explain_prediction.dispatch', (['estimator.__class__'], {}), '(estimator.__class__)\n', (2615, 2636), False, 'from eli5.explain import explain_prediction\n'), ((4867, 4919), 'eli5.sklearn.utils.handle_vec', 'handle_vec', (['clf', 'doc', 'vec', 'vectorized', 'feature_names'], {}), '(clf, doc, vec, vectorized, feature_names)\n', (4877, 4919), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((4928, 4985), 'eli5.sklearn.utils.get_X', 'get_X', (['doc'], {'vec': 'vec', 'vectorized': 'vectorized', 'to_dense': '(True)'}), '(doc, vec=vec, vectorized=vectorized, to_dense=True)\n', (4933, 4985), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((4999, 5020), 'eli5.sklearn.utils.predict_proba', 'predict_proba', (['clf', 'X'], {}), '(clf, X)\n', (5012, 5020), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((5067, 5085), 'eli5.sklearn.utils.has_intercept', 'has_intercept', (['clf'], {}), '(clf)\n', (5080, 5085), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((5432, 5517), 'eli5.utils.get_target_display_names', 'get_target_display_names', (['clf.classes_', 'target_names', 'targets', 'top_targets', 'score'], {}), '(clf.classes_, target_names, targets, top_targets,\n score)\n', (5456, 5517), False, 'from eli5.utils import get_target_display_names, mask\n'), ((5567, 5596), 'eli5.sklearn.utils.is_multiclass_classifier', 'is_multiclass_classifier', (['clf'], {}), '(clf)\n', (5591, 5596), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((8404, 8456), 'eli5.sklearn.utils.handle_vec', 'handle_vec', (['reg', 'doc', 'vec', 'vectorized', 'feature_names'], {}), '(reg, doc, vec, vectorized, feature_names)\n', (8414, 8456), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((8465, 8522), 'eli5.sklearn.utils.get_X', 'get_X', (['doc'], {'vec': 'vec', 'vectorized': 'vectorized', 'to_dense': '(True)'}), '(doc, vec=vec, vectorized=vectorized, to_dense=True)\n', (8470, 8522), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((8560, 8578), 'eli5.sklearn.utils.has_intercept', 'has_intercept', (['reg'], {}), '(reg)\n', (8573, 8578), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((8945, 8974), 'eli5.sklearn.utils.get_default_target_names', 'get_default_target_names', (['reg'], {}), '(reg)\n', (8969, 8974), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((8995, 9069), 'eli5.utils.get_target_display_names', 'get_target_display_names', (['names', 'target_names', 'targets', 'top_targets', 'score'], {}), '(names, target_names, targets, top_targets, score)\n', (9019, 9069), False, 'from eli5.utils import get_target_display_names, mask\n'), ((9123, 9152), 'eli5.sklearn.utils.is_multitarget_regressor', 'is_multitarget_regressor', (['reg'], {}), '(reg)\n', (9147, 9152), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((12426, 12478), 'eli5.sklearn.utils.handle_vec', 'handle_vec', (['clf', 'doc', 'vec', 'vectorized', 'feature_names'], {}), '(clf, doc, vec, vectorized, feature_names)\n', (12436, 12478), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((12487, 12529), 'eli5.sklearn.utils.get_X', 'get_X', (['doc'], {'vec': 'vec', 'vectorized': 'vectorized'}), '(doc, vec=vec, vectorized=vectorized)\n', (12492, 12529), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((12738, 12759), 'eli5.sklearn.utils.predict_proba', 'predict_proba', (['clf', 'X'], {}), '(clf, X)\n', (12751, 12759), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((13017, 13033), 'eli5.sklearn.utils.add_intercept', 'add_intercept', (['X'], {}), '(X)\n', (13030, 13033), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((13663, 13786), 'eli5.utils.get_target_display_names', 'get_target_display_names', (['clf.classes_', 'target_names', 'targets', 'top_targets'], {'score': '(score if score is not None else proba)'}), '(clf.classes_, target_names, targets, top_targets,\n score=score if score is not None else proba)\n', (13687, 13786), False, 'from eli5.utils import get_target_display_names, mask\n'), ((16434, 16486), 'eli5.sklearn.utils.handle_vec', 'handle_vec', (['reg', 'doc', 'vec', 'vectorized', 'feature_names'], {}), '(reg, doc, vec, vectorized, feature_names)\n', (16444, 16486), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((16495, 16537), 'eli5.sklearn.utils.get_X', 'get_X', (['doc'], {'vec': 'vec', 'vectorized': 'vectorized'}), '(doc, vec=vec, vectorized=vectorized)\n', (16500, 16537), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((16937, 16953), 'eli5.sklearn.utils.add_intercept', 'add_intercept', (['X'], {}), '(X)\n', (16950, 16953), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((17598, 17652), 'eli5.sklearn.utils.get_default_target_names', 'get_default_target_names', (['reg'], {'num_targets': 'num_targets'}), '(reg, num_targets=num_targets)\n', (17622, 17652), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((17673, 17747), 'eli5.utils.get_target_display_names', 'get_target_display_names', (['names', 'target_names', 'targets', 'top_targets', 'score'], {}), '(names, target_names, targets, top_targets, score)\n', (17697, 17747), False, 'from eli5.utils import get_target_display_names, mask\n'), ((19923, 19964), 'numpy.squeeze', 'np.squeeze', (['tree_value'], {'axis': 'squeeze_axis'}), '(tree_value, axis=squeeze_axis)\n', (19933, 19964), True, 'import numpy as np\n'), ((20570, 20584), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (20581, 20584), True, 'import scipy.sparse as sp\n'), ((5099, 5115), 'eli5.sklearn.utils.add_intercept', 'add_intercept', (['X'], {}), '(X)\n', (5112, 5115), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((6238, 6291), 'eli5.sklearn.text.add_weighted_spans', 'add_weighted_spans', (['doc', 'vec', 'vectorized', 'target_expl'], {}), '(doc, vec, vectorized, target_expl)\n', (6256, 6291), False, 'from eli5.sklearn.text import add_weighted_spans\n'), ((8592, 8608), 'eli5.sklearn.utils.add_intercept', 'add_intercept', (['X'], {}), '(X)\n', (8605, 8608), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((9665, 9718), 'eli5.sklearn.text.add_weighted_spans', 'add_weighted_spans', (['doc', 'vec', 'vectorized', 'target_expl'], {}), '(doc, vec, vectorized, target_expl)\n', (9683, 9718), False, 'from eli5.sklearn.text import add_weighted_spans\n'), ((13356, 13404), 'eli5._feature_weights.get_top_features', 'get_top_features', (['feature_names', 'scores', 'top', '_x'], {}), '(feature_names, scores, top, _x)\n', (13372, 13404), False, 'from eli5._feature_weights import get_top_features\n'), ((14525, 14578), 'eli5.sklearn.text.add_weighted_spans', 'add_weighted_spans', (['doc', 'vec', 'vectorized', 'target_expl'], {}), '(doc, vec, vectorized, target_expl)\n', (14543, 14578), False, 'from eli5.sklearn.text import add_weighted_spans\n'), ((17276, 17324), 'eli5._feature_weights.get_top_features', 'get_top_features', (['feature_names', 'scores', 'top', '_x'], {}), '(feature_names, scores, top, _x)\n', (17292, 17324), False, 'from eli5._feature_weights import get_top_features\n'), ((18328, 18381), 'eli5.sklearn.text.add_weighted_spans', 'add_weighted_spans', (['doc', 'vec', 'vectorized', 'target_expl'], {}), '(doc, vec, vectorized, target_expl)\n', (18346, 18381), False, 'from eli5.sklearn.text import add_weighted_spans\n'), ((20658, 20678), 'numpy.multiply', 'np.multiply', (['X', 'coef'], {}), '(X, coef)\n', (20669, 20678), True, 'import numpy as np\n'), ((20842, 20865), 'eli5.sklearn.utils.get_coef', 'get_coef', (['clf', 'label_id'], {}), '(clf, label_id)\n', (20850, 20865), False, 'from eli5.sklearn.utils import add_intercept, get_coef, get_default_target_names, get_X, is_multiclass_classifier, is_multitarget_regressor, predict_proba, has_intercept, handle_vec\n'), ((21049, 21097), 'eli5._feature_weights.get_top_features', 'get_top_features', (['feature_names', 'scores', 'top', '_x'], {}), '(feature_names, scores, top, _x)\n', (21065, 21097), False, 'from eli5._feature_weights import get_top_features\n'), ((5906, 5959), 'eli5.sklearn.text.add_weighted_spans', 'add_weighted_spans', (['doc', 'vec', 'vectorized', 'target_expl'], {}), '(doc, vec, vectorized, target_expl)\n', (5924, 5959), False, 'from eli5.sklearn.text import add_weighted_spans\n'), ((9392, 9445), 'eli5.sklearn.text.add_weighted_spans', 'add_weighted_spans', (['doc', 'vec', 'vectorized', 'target_expl'], {}), '(doc, vec, vectorized, target_expl)\n', (9410, 9445), False, 'from eli5.sklearn.text import add_weighted_spans\n'), ((13319, 13340), 'eli5.utils.mask', 'mask', (['_x', 'flt_indices'], {}), '(_x, flt_indices)\n', (13323, 13340), False, 'from eli5.utils import get_target_display_names, mask\n'), ((14162, 14215), 'eli5.sklearn.text.add_weighted_spans', 'add_weighted_spans', (['doc', 'vec', 'vectorized', 'target_expl'], {}), '(doc, vec, vectorized, target_expl)\n', (14180, 14215), False, 'from eli5.sklearn.text import add_weighted_spans\n'), ((17239, 17260), 'eli5.utils.mask', 'mask', (['_x', 'flt_indices'], {}), '(_x, flt_indices)\n', (17243, 17260), False, 'from eli5.utils import get_target_display_names, mask\n'), ((18055, 18108), 'eli5.sklearn.text.add_weighted_spans', 'add_weighted_spans', (['doc', 'vec', 'vectorized', 'target_expl'], {}), '(doc, vec, vectorized, target_expl)\n', (18073, 18108), False, 'from eli5.sklearn.text import add_weighted_spans\n'), ((19019, 19074), 'functools.partial', 'partial', (['_update_tree_feature_weights', 'X', 'feature_names'], {}), '(_update_tree_feature_weights, X, feature_names)\n', (19026, 19074), False, 'from functools import partial\n'), ((20612, 20631), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['coef'], {}), '(coef)\n', (20625, 20631), True, 'import scipy.sparse as sp\n'), ((21012, 21033), 'eli5.utils.mask', 'mask', (['_x', 'flt_indices'], {}), '(_x, flt_indices)\n', (21016, 21033), False, 'from eli5.utils import get_target_display_names, mask\n')] |
import numpy as np
import pandas as pd
from EnhancedCOXEN_Functions import coxen_multi_drug_gene_selection
res = pd.read_csv('../Data/Drug_Response_Data_Of_Set_1.txt', sep='\t', engine='c', na_values=['na', '-', ''],
header=0, index_col=None)
data1 = pd.read_csv('../Data/Gene_Expression_Data_Of_Set_1.txt', sep='\t', engine='c', na_values=['na', '-', ''],
header=0, index_col=0)
data2 = pd.read_csv('../Data/Gene_Expression_Data_Of_Set_2.txt', sep='\t', engine='c', na_values=['na', '-', ''],
header=0, index_col=0)
assert np.sum(data1.columns != data2.columns) == 0
# Use enhanced COXEN method to select genes. First, select 200 predictive genes to form the candidate pool, and then
# select 100 generalizable genes from the candidate pool. The absolute value of Pearson correlation coefficient is
# used as the measure of gene's prediction power.
id = coxen_multi_drug_gene_selection(source_data=data1, target_data=data2, drug_response_data=res,
drug_response_col='Efficacy', tumor_col='Cell_Line', drug_col='Drug',
prediction_power_measure='pearson', num_predictive_gene=200,
num_generalizable_gene=100)
print('Selected genes are:')
print(data1.columns[id]) | [
"numpy.sum",
"EnhancedCOXEN_Functions.coxen_multi_drug_gene_selection",
"pandas.read_csv"
] | [((116, 249), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/Drug_Response_Data_Of_Set_1.txt"""'], {'sep': '"""\t"""', 'engine': '"""c"""', 'na_values': "['na', '-', '']", 'header': '(0)', 'index_col': 'None'}), "('../Data/Drug_Response_Data_Of_Set_1.txt', sep='\\t', engine='c',\n na_values=['na', '-', ''], header=0, index_col=None)\n", (127, 249), True, 'import pandas as pd\n'), ((273, 406), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/Gene_Expression_Data_Of_Set_1.txt"""'], {'sep': '"""\t"""', 'engine': '"""c"""', 'na_values': "['na', '-', '']", 'header': '(0)', 'index_col': '(0)'}), "('../Data/Gene_Expression_Data_Of_Set_1.txt', sep='\\t', engine=\n 'c', na_values=['na', '-', ''], header=0, index_col=0)\n", (284, 406), True, 'import pandas as pd\n'), ((431, 564), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/Gene_Expression_Data_Of_Set_2.txt"""'], {'sep': '"""\t"""', 'engine': '"""c"""', 'na_values': "['na', '-', '']", 'header': '(0)', 'index_col': '(0)'}), "('../Data/Gene_Expression_Data_Of_Set_2.txt', sep='\\t', engine=\n 'c', na_values=['na', '-', ''], header=0, index_col=0)\n", (442, 564), True, 'import pandas as pd\n'), ((921, 1186), 'EnhancedCOXEN_Functions.coxen_multi_drug_gene_selection', 'coxen_multi_drug_gene_selection', ([], {'source_data': 'data1', 'target_data': 'data2', 'drug_response_data': 'res', 'drug_response_col': '"""Efficacy"""', 'tumor_col': '"""Cell_Line"""', 'drug_col': '"""Drug"""', 'prediction_power_measure': '"""pearson"""', 'num_predictive_gene': '(200)', 'num_generalizable_gene': '(100)'}), "(source_data=data1, target_data=data2,\n drug_response_data=res, drug_response_col='Efficacy', tumor_col=\n 'Cell_Line', drug_col='Drug', prediction_power_measure='pearson',\n num_predictive_gene=200, num_generalizable_gene=100)\n", (952, 1186), False, 'from EnhancedCOXEN_Functions import coxen_multi_drug_gene_selection\n'), ((588, 626), 'numpy.sum', 'np.sum', (['(data1.columns != data2.columns)'], {}), '(data1.columns != data2.columns)\n', (594, 626), True, 'import numpy as np\n')] |
# Reference: https://github.com/starry-sky6688/StarCraft/blob/master/network/commnet.py
import numpy as np
import gym
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.annotations import override
from ray.rllib.utils.types import ModelConfigDict, TensorType, List, Dict
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.misc import normc_initializer
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
# import tensorflow as tf1
tf1, tf, tfv = try_import_tf()
class CommNet(TFModelV2):
def __init__(
self,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
num_outputs: int,
model_config: ModelConfigDict,
name: str,
):
super(CommNet, self).__init__(
obs_space, action_space, num_outputs, model_config, name
)
# assert isinstance(obs_space, gym.spaces.Box) and isinstance(
# action_space, gym.spaces.Tuple
# ), (obs_space, action_space)
custom_model_config = model_config["custom_model_config"]
# TODO(ming): transfer something to here
self.agent_num = len(obs_space.original_space.spaces)
self.communicate_level = custom_model_config["communicate_level"]
self.rnn_hidden_dim = custom_model_config["rnn_hidden_dim"]
self.encoding = tf1.keras.layers.Dense(
self.rnn_hidden_dim,
input_shape=(None, obs_space.shape[0] // self.agent_num),
)
self.encoding.build((self.obs_space.shape[0] // self.agent_num,))
self.f_obs = tf1.keras.layers.GRUCell(self.rnn_hidden_dim)
self.f_obs.build((self.rnn_hidden_dim,))
self.f_comm = tf1.keras.layers.GRUCell(self.rnn_hidden_dim)
self.f_comm.build((self.rnn_hidden_dim,))
self.decoding = tf1.keras.layers.Dense(
num_outputs // self.agent_num,
input_shape=(None, self.rnn_hidden_dim),
name="logits_out",
activation=None,
kernel_initializer=normc_initializer(0.01),
)
self.decoding.build((self.rnn_hidden_dim,))
self.register_variables(self.encoding.variables)
self.register_variables(self.f_obs.variables)
self.register_variables(self.f_comm.variables)
self.register_variables(self.decoding.variables)
@override(ModelV2)
def get_initial_state(self) -> List[np.ndarray]:
return [np.zeros((1, self.rnn_hidden_dim), np.float32)]
@override(ModelV2)
def forward(
self,
input_dict: Dict[str, TensorType],
state: List[TensorType],
seq_lens: TensorType,
) -> (TensorType, List[TensorType]):
obs_flat = input_dict["obs_flat"]
obs_flat = tf.reshape(obs_flat, (-1, self.obs_space.shape[0] // self.agent_num))
obs_encoding = tf.nn.sigmoid(self.encoding(obs_flat))
obs_encoding = tf.expand_dims(obs_encoding, axis=1)
h_out, _ = self.f_obs(obs_encoding, state[0])
for k in range(self.communicate_level):
if k == 0:
h = h_out
c = tf.zeros_like(h)
else:
h = tf.reshape(h, (-1, self.agent_num, self.rnn_hidden_dim))
c = tf.reshape(h, (-1, 1, self.agent_num * self.rnn_hidden_dim))
c = tf.tile(c, [1, self.agent_num, 1])
mask = 1.0 - tf.eye(self.agent_num)
mask = tf.reshape(mask, (-1, 1))
mask = tf.tile(mask, [1, self.rnn_hidden_dim])
mask = tf.reshape(mask, (self.agent_num, -1))
c = c * tf.expand_dims(mask, 0)
c = tf.reshape(
c, (-1, self.agent_num, self.agent_num, self.rnn_hidden_dim)
)
c = tf.reduce_mean(
c, axis=-2
) # (episode_num * max_episode_len, n_agents, rnn_hidden_dim)
h = tf.reshape(h, (-1, 1, self.rnn_hidden_dim))
c = tf.reshape(c, (-1, 1, self.rnn_hidden_dim))
h, _ = self.f_comm(c, h)
h = tf.squeeze(h, axis=1)
weights = self.decoding(h)
# reshape to every agents
weights = tf.reshape(weights, (-1, self.num_outputs))
return weights, [h_out]
@override(ModelV2)
def value_function(self) -> TensorType:
raise NotImplementedError
ModelCatalog.register_custom_model("CommNet", CommNet)
| [
"ray.rllib.utils.annotations.override",
"ray.rllib.models.tf.misc.normc_initializer",
"ray.rllib.utils.framework.try_import_tf",
"numpy.zeros",
"ray.rllib.models.ModelCatalog.register_custom_model"
] | [((533, 548), 'ray.rllib.utils.framework.try_import_tf', 'try_import_tf', ([], {}), '()\n', (546, 548), False, 'from ray.rllib.utils.framework import try_import_tf\n'), ((4422, 4476), 'ray.rllib.models.ModelCatalog.register_custom_model', 'ModelCatalog.register_custom_model', (['"""CommNet"""', 'CommNet'], {}), "('CommNet', CommNet)\n", (4456, 4476), False, 'from ray.rllib.models import ModelCatalog\n'), ((2393, 2410), 'ray.rllib.utils.annotations.override', 'override', (['ModelV2'], {}), '(ModelV2)\n', (2401, 2410), False, 'from ray.rllib.utils.annotations import override\n'), ((2534, 2551), 'ray.rllib.utils.annotations.override', 'override', (['ModelV2'], {}), '(ModelV2)\n', (2542, 2551), False, 'from ray.rllib.utils.annotations import override\n'), ((4324, 4341), 'ray.rllib.utils.annotations.override', 'override', (['ModelV2'], {}), '(ModelV2)\n', (4332, 4341), False, 'from ray.rllib.utils.annotations import override\n'), ((2480, 2526), 'numpy.zeros', 'np.zeros', (['(1, self.rnn_hidden_dim)', 'np.float32'], {}), '((1, self.rnn_hidden_dim), np.float32)\n', (2488, 2526), True, 'import numpy as np\n'), ((2076, 2099), 'ray.rllib.models.tf.misc.normc_initializer', 'normc_initializer', (['(0.01)'], {}), '(0.01)\n', (2093, 2099), False, 'from ray.rllib.models.tf.misc import normc_initializer\n')] |
#!/usr/bin/env python
"""
Attempt dead reckoning (estimating position) from accelerometer data.
The accelerometer data are too noisy!
Authors:
- <NAME>, 2015 (<EMAIL>) http://binarybottle.com
Copyright 2015, Sage Bionetworks (http://sagebase.org), Apache v2.0 License
"""
def velocity_from_acceleration(ax, ay, az, t):
"""
Estimate velocity from accelerometer readings.
Parameters
----------
ax : list or numpy array of floats
accelerometer x-axis data
ay : list or numpy array of floats
accelerometer y-axis data
az : list or numpy array of floats
accelerometer z-axis data
t : list or numpy array of floats
accelerometer time points
Returns
-------
vx : list or numpy array of floats
estimated velocity along x-axis
vy : list or numpy array of floats
estimated velocity along y-axis
vz : list or numpy array of floats
estimated velocity along z-axis
Examples
--------
>>> from mhealthx.extractors.dead_reckon import velocity_from_acceleration
>>> from mhealthx.xio import read_accel_json
>>> #input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/accel_walking_outbound.json.items-6dc4a144-55c3-4e6d-982c-19c7a701ca243282023468470322798.tmp'
>>> input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-5981e0a8-6481-41c8-b589-fa207bfd2ab38771455825726024828.tmp'
>>> #input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-a2ab9333-6d63-4676-977a-08591a5d837f5221783798792869048.tmp'
>>> start = 150
>>> device_motion = True
>>> t, axyz, gxyz, uxyz, rxyz, sample_rate, duration = read_accel_json(input_file, start, device_motion)
>>> ax, ay, az = axyz
>>> vx, vy, vz = velocity_from_acceleration(ax, ay, az, t)
"""
vx = [0]
vy = [0]
vz = [0]
for i in range(1, len(ax)):
dt = t[i] - t[i-1]
vx.append(vx[i-1] + ax[i] * dt)
vy.append(vy[i-1] + ay[i] * dt)
vz.append(vz[i-1] + az[i] * dt)
return vx, vy, vz
def position_from_velocity(vx, vy, vz, t):
"""
Estimate position from velocity.
Parameters
----------
vx : list or numpy array of floats
estimated velocity along x-axis
vy : list or numpy array of floats
estimated velocity along y-axis
vz : list or numpy array of floats
estimated velocity along z-axis
t : list or numpy array of floats
accelerometer time points
Returns
-------
x : list or numpy array of floats
estimated position along x-axis
y : list or numpy array of floats
estimated position along y-axis
z : list or numpy array of floats
estimated position along z-axis
distance : float
estimated change in position
Examples
--------
>>> from mhealthx.extractors.dead_reckon import velocity_from_acceleration, position_from_velocity
>>> from mhealthx.xio import read_accel_json
>>> #input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/accel_walking_outbound.json.items-6dc4a144-55c3-4e6d-982c-19c7a701ca243282023468470322798.tmp'
>>> input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-5981e0a8-6481-41c8-b589-fa207bfd2ab38771455825726024828.tmp'
>>> #input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-a2ab9333-6d63-4676-977a-08591a5d837f5221783798792869048.tmp'
>>> start = 150
>>> device_motion = True
>>> t, axyz, gxyz, uxyz, rxyz, sample_rate, duration = read_accel_json(input_file, start, device_motion)
>>> ax, ay, az = axyz
>>> vx, vy, vz = velocity_from_acceleration(ax, ay, az, t)
>>> x, y, z, distance = position_from_velocity(vx, vy, vz, t)
"""
import numpy as np
x = [0]
y = [0]
z = [0]
for i in range(1, len(vx)):
dt = t[i] - t[i-1]
x.append(x[i-1] + vx[i] * dt)
y.append(y[i-1] + vy[i] * dt)
z.append(z[i-1] + vz[i] * dt)
dx = np.sum(x)
dy = np.sum(y)
dz = np.sum(z)
distance = np.sqrt(dx**2 + dy**2 + dz**2)
return x, y, z, distance
def dead_reckon(ax, ay, az, t):
"""
Attempt dead reckoning (estimating position) from accelerometer data.
The accelerometer data are too noisy!
Parameters
----------
ax : list or numpy array of floats
accelerometer x-axis data
ay : list or numpy array of floats
accelerometer y-axis data
az : list or numpy array of floats
accelerometer z-axis data
t : list or numpy array of floats
accelerometer time points
Returns
-------
x : list or numpy array of floats
estimated position along x-axis
y : list or numpy array of floats
estimated position along y-axis
z : list or numpy array of floats
estimated position along z-axis
distance : float
estimated change in position
Examples
--------
>>> from mhealthx.extractors.dead_reckon import dead_reckon
>>> from mhealthx.xio import read_accel_json
>>> #input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/accel_walking_outbound.json.items-6dc4a144-55c3-4e6d-982c-19c7a701ca243282023468470322798.tmp'
>>> input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-5981e0a8-6481-41c8-b589-fa207bfd2ab38771455825726024828.tmp'
>>> start = 150
>>> device_motion = True
>>> t, axyz, gxyz, uxyz, rxyz, sample_rate, duration = read_accel_json(input_file, start, device_motion)
>>> ax, ay, az = axyz
>>> x, y, z, distance = dead_reckon(ax, ay, az, t)
"""
import numpy as np
from mhealthx.extractors.dead_reckon import velocity_from_acceleration,\
position_from_velocity
#-------------------------------------------------------------------------
# De-mean accelerometer readings:
#-------------------------------------------------------------------------
ax -= np.mean(ax)
ay -= np.mean(ay)
az -= np.mean(az)
#-------------------------------------------------------------------------
# Estimate velocity:
#-------------------------------------------------------------------------
vx, vy, vz = velocity_from_acceleration(ax, ay, az, t)
#-------------------------------------------------------------------------
# Estimate position (dead reckoning):
#-------------------------------------------------------------------------
x, y, z, distance = position_from_velocity(vx, vy, vz, t)
print('distance = {0}'.format(distance))
return x, y, z, distance
# ============================================================================
if __name__ == '__main__':
import numpy as np
from mhealthx.xio import read_accel_json
from mhealthx.extractors.dead_reckon import velocity_from_acceleration,\
position_from_velocity
from mhealthx.utilities import plotxyz
#-------------------------------------------------------------------------
# Load accelerometer x,y,z values (and clip beginning from start):
#-------------------------------------------------------------------------
#input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/accel_walking_outbound.json.items-6dc4a144-55c3-4e6d-982c-19c7a701ca243282023468470322798.tmp'
#device_motion = False
input_file = '/Users/arno/DriveWork/mhealthx/mpower_sample_data/deviceMotion_walking_outbound.json.items-5981e0a8-6481-41c8-b589-fa207bfd2ab38771455825726024828.tmp'
device_motion = True
start = 150
t, axyz, gxyz, uxyz, rxyz, sample_rate, duration = read_accel_json(input_file, start, device_motion)
ax, ay, az = axyz
#-------------------------------------------------------------------------
# De-mean accelerometer readings:
#-------------------------------------------------------------------------
ax -= np.mean(ax)
ay -= np.mean(ay)
az -= np.mean(az)
plotxyz(ax, ay, az, t, 'Acceleration')
#-------------------------------------------------------------------------
# Estimate velocity:
#-------------------------------------------------------------------------
vx, vy, vz = velocity_from_acceleration(ax, ay, az, t)
plotxyz(vx, vy, vz, t, 'Velocity')
#-------------------------------------------------------------------------
# Estimate position (dead reckoning):
#-------------------------------------------------------------------------
x, y, z, distance = position_from_velocity(vx, vy, vz, t)
print('distance = {0}'.format(distance))
plotxyz(x, y, z, t, 'Position')
#plotxyz3d(x, y, z)
| [
"mhealthx.extractors.dead_reckon.velocity_from_acceleration",
"numpy.mean",
"numpy.sqrt",
"mhealthx.extractors.dead_reckon.position_from_velocity",
"mhealthx.xio.read_accel_json",
"numpy.sum",
"mhealthx.utilities.plotxyz"
] | [((4151, 4160), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (4157, 4160), True, 'import numpy as np\n'), ((4170, 4179), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (4176, 4179), True, 'import numpy as np\n'), ((4189, 4198), 'numpy.sum', 'np.sum', (['z'], {}), '(z)\n', (4195, 4198), True, 'import numpy as np\n'), ((4214, 4250), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2 + dz ** 2)'], {}), '(dx ** 2 + dy ** 2 + dz ** 2)\n', (4221, 4250), True, 'import numpy as np\n'), ((6142, 6153), 'numpy.mean', 'np.mean', (['ax'], {}), '(ax)\n', (6149, 6153), True, 'import numpy as np\n'), ((6164, 6175), 'numpy.mean', 'np.mean', (['ay'], {}), '(ay)\n', (6171, 6175), True, 'import numpy as np\n'), ((6186, 6197), 'numpy.mean', 'np.mean', (['az'], {}), '(az)\n', (6193, 6197), True, 'import numpy as np\n'), ((6399, 6440), 'mhealthx.extractors.dead_reckon.velocity_from_acceleration', 'velocity_from_acceleration', (['ax', 'ay', 'az', 't'], {}), '(ax, ay, az, t)\n', (6425, 6440), False, 'from mhealthx.extractors.dead_reckon import velocity_from_acceleration, position_from_velocity\n'), ((6666, 6703), 'mhealthx.extractors.dead_reckon.position_from_velocity', 'position_from_velocity', (['vx', 'vy', 'vz', 't'], {}), '(vx, vy, vz, t)\n', (6688, 6703), False, 'from mhealthx.extractors.dead_reckon import velocity_from_acceleration, position_from_velocity\n'), ((7796, 7845), 'mhealthx.xio.read_accel_json', 'read_accel_json', (['input_file', 'start', 'device_motion'], {}), '(input_file, start, device_motion)\n', (7811, 7845), False, 'from mhealthx.xio import read_accel_json\n'), ((8075, 8086), 'numpy.mean', 'np.mean', (['ax'], {}), '(ax)\n', (8082, 8086), True, 'import numpy as np\n'), ((8097, 8108), 'numpy.mean', 'np.mean', (['ay'], {}), '(ay)\n', (8104, 8108), True, 'import numpy as np\n'), ((8119, 8130), 'numpy.mean', 'np.mean', (['az'], {}), '(az)\n', (8126, 8130), True, 'import numpy as np\n'), ((8140, 8178), 'mhealthx.utilities.plotxyz', 'plotxyz', (['ax', 'ay', 'az', 't', '"""Acceleration"""'], {}), "(ax, ay, az, t, 'Acceleration')\n", (8147, 8178), False, 'from mhealthx.utilities import plotxyz\n'), ((8384, 8425), 'mhealthx.extractors.dead_reckon.velocity_from_acceleration', 'velocity_from_acceleration', (['ax', 'ay', 'az', 't'], {}), '(ax, ay, az, t)\n', (8410, 8425), False, 'from mhealthx.extractors.dead_reckon import velocity_from_acceleration, position_from_velocity\n'), ((8431, 8465), 'mhealthx.utilities.plotxyz', 'plotxyz', (['vx', 'vy', 'vz', 't', '"""Velocity"""'], {}), "(vx, vy, vz, t, 'Velocity')\n", (8438, 8465), False, 'from mhealthx.utilities import plotxyz\n'), ((8695, 8732), 'mhealthx.extractors.dead_reckon.position_from_velocity', 'position_from_velocity', (['vx', 'vy', 'vz', 't'], {}), '(vx, vy, vz, t)\n', (8717, 8732), False, 'from mhealthx.extractors.dead_reckon import velocity_from_acceleration, position_from_velocity\n'), ((8784, 8815), 'mhealthx.utilities.plotxyz', 'plotxyz', (['x', 'y', 'z', 't', '"""Position"""'], {}), "(x, y, z, t, 'Position')\n", (8791, 8815), False, 'from mhealthx.utilities import plotxyz\n')] |
import Init
import Modeling
import System
import Time
class Subsystem:
"""This class represents the the agents that are assigned to the subsystems.
The agents can predict the behavior of their subsystems and store the
current costs, coupling variables and states.
Parameters
----------
sys_id : int
The unique identifier of a subsystem as specified in the Init
Attributes
----------
sys_id : int
The unique identifier of a subsystem as specified in the Init
name : string
model_type : string
The type of the model that this subsystem uses
model : Model
The model object created according to the model type
ups_neigh : int or None
The ID of the upstream neighbor if it exists
downs_neigh : int
The ID of the downstream neighbor if it exists
coup_vars_send : list of floats
A list of the current values of coupling variables that should be sent
coup_vars_rec : list of floats
A list of the current values of coupling variables that are received
cost_send : list of floats
A list of the current values of cost variables that should be sent
cost_rec : list of floats
A list of the current values of cost variables that are received
command_send : list of floats
A list of the current values of command variables that should be sent
command_rec : list of floats
A list of the current values of cost variablesare received
cost_fac : list of floats
The cost factors are essential to select which types of costs should
be considered and how they should be weighted. The first element in the
list is the cost related to the control effort. The second is the cost
that is received from the dowmstream neighbor and the third is the cost
due to deviations form the set point.
last_opt : float
Time when the last optimization was conducted
last_read : float
Time when the last measurement was taken
last_write : float
Time when the last command was sent
commands : list of floats
The possible commands of a subsystem
inputs : list of floats
The considered inputs of a subsystem
fin_command : float
The final command that results from the optimization
traj_var : list of strings
The variable in the subsystem that represents a trajectory that other
subsystems should follow
traj_points : list of floats
Possible values of the trajectory variable considered in advance to
calculate the cost of deviating from the ideal value
"""
def __init__(self, sys_id):
self.sys_id = sys_id
print(sys_id)
self.name = Init.name[sys_id]
self.model_type = Init.model_type[sys_id]
self.model = self.prepare_model()
self.ups_neigh = Init.ups_neigh[sys_id]
self.downs_neigh = Init.downs_neigh[sys_id]
self.par_neigh = Init.par_neigh[sys_id]
self.coup_vars_send = []
self.coup_vars_rec = []
self.cost_send = []
self.cost_rec = []
self.command_send = []
self.command_rec = []
self.cost_fac = Init.cost_fac[sys_id]
self.last_opt = 0
self.last_read = 0
self.last_write = 0
self.commands = Init.commands[sys_id]
self.inputs = Init.inputs[sys_id]
self.fin_command = 0
self.traj_var = Init.traj_var[sys_id]
self.traj_points = Init.traj_points[sys_id]
def prepare_model(self):
"""Prepares the model of a subsystem according to the subsystem's model
type.
Parameters
----------
Returns
----------
model : Model
The created model object
"""
if self.model_type == "Modelica":
model = Modeling.ModelicaMod(self.sys_id)
model.translate()
elif self.model_type == "Scikit":
model = Modeling.SciMod(self.sys_id)
model.load_mod()
elif self.model_type == "Linear":
model = Modeling.LinMod(self.sys_id)
elif self.model_type == "Fuzzy":
model = Modeling.FuzMod(self.sys_id)
return model
def predict(self, inputs, commands):
state_vars = []
if self.model.states.state_var_names != []:
for i,nam in enumerate(self.model.states.state_var_names):
state_vars.append(System.Bexmoc.read_cont_sys(nam))
if inputs != "external":
if type(inputs) is not list:
inputs = [inputs]
self.model.states.inputs = inputs
self.model.states.commands = commands
self.model.predict()
return self.model.states.outputs
def optimize(self, interp):
cur_time = Time.Time.get_time()
if (cur_time - self.last_opt) >= self.model.times.opt_time or (
cur_time == 0):
self.last_opt = cur_time
self.interp_minimize(interp)
def interp_minimize(self, interp):
from scipy import interpolate as it
import time
opt_costs = []
opt_outputs = []
opt_command = []
states = self.get_state_vars()
if states != []:
self.model.states.state_vars = states[0]
if self.model.states.input_variables[0] != "external":
if self.inputs == []:
self.get_inputs()
inputs = self.model.states.inputs[0]
else:
inputs = self.inputs
else:
if self.inputs == []:
inputs = [-1.0]
else:
inputs = self.inputs
for inp in inputs:
outputs = []
costs = []
for com in self.commands:
results = self.predict(inp, com)
outputs.append(results)
costs.append(self.calc_cost(com, results[-1][-1]))
min_ind = costs.index(min(costs))
opt_costs.append(costs[min_ind])
temp = outputs[min_ind]
opt_outputs.append(temp[0][-1])
opt_command.append(self.commands[min_ind])
if self.traj_var != []:
traj_costs = []
traj = self.model.get_results(self.traj_var[0])
set_point = traj[10]
for pts in self.traj_points:
traj_costs.append((pts - set_point)**2)
self.traj_points.insert(self.traj_points[0] - 1.)
traj_costs.insert(traj_costs[0] * 5)
self.traj_points.append(self.traj_points[-1] + 1)
traj_costs.append(traj_costs[-1] * 5)
self.cost_send = it.interp1d(self.traj_points, traj_costs,
fill_value = (100,100), bounds_error = False)
else:
if len(inputs) >= 2:
if interp:
self.cost_send = it.interp1d(inputs, opt_costs,
fill_value = (100,100), bounds_error = False)
else:
self.cost_send = opt_costs
else:
self.cost_send = opt_costs[0]
if len(inputs) >= 2:
if interp:
interp_com = []
self.coup_vars_send = opt_outputs
for com in opt_command:
interp_com.append(com[0])
self.command_send = it.interp1d(inputs, interp_com,
fill_value = (0,0), bounds_error = False)
else:
self.coup_vars_send = opt_outputs
self.command_send = opt_command
else:
self.coup_vars_send = opt_outputs[0]
self.command_send = opt_command[0]
def calc_cost(self, command, outputs):
import scipy.interpolate
import numpy as np
cost = self.cost_fac[0] * sum(command)
if self.cost_rec != [] and self.cost_rec != [[]]:
for c in self.cost_rec:
if type(c) is scipy.interpolate.interpolate.interp1d:
cost += self.cost_fac[1] * c(outputs)
elif type(c) is list:
idx = self.find_nearest(np.asarray(self.inputs), outputs)
cost += self.cost_fac[1] * c[idx]
else:
cost += self.cost_fac[1] * c
if self.model.states.set_points != []:
cost += (self.cost_fac[2] * (outputs -
self.model.states.set_points[0])**2)
return cost
def find_nearest(self, a, a0):
import numpy as np
"Element in nd array `a` closest to the scalar value `a0`"
idx = np.abs(a - a0).argmin()
return idx
def interp(self, iter_real):
import scipy.interpolate
import numpy as np
if iter_real == "iter" and self.coup_vars_rec != []:
inp = self.coup_vars_rec
else:
inp = self.model.states.inputs
idx = self.find_nearest(np.asarray(self.inputs), inp[-1])
if self.command_send != []:
if (type(self.command_send) is scipy.interpolate.interpolate.interp1d):
self.fin_command = self.command_send(inp[-1])
else:
self.fin_command = self.command_send[idx]
if self.coup_vars_send != []:
if type(self.coup_vars_send) is scipy.interpolate.interpolate.interp1d:
self.fin_coup_vars = self.coup_vars_send(inp[0])
else:
self.fin_coup_vars = self.coup_vars_send[idx]
def get_inputs(self):
inputs = []
if self.model.states.input_variables is not None:
for nam in self.model.states.input_names:
inputs.append(System.Bexmoc.read_cont_sys(nam))
self.model.states.inputs = inputs
def get_state_vars(self):
states = []
if self.model.states.state_var_names is not None:
for nam in self.model.states.state_var_names:
states.append(System.Bexmoc.read_cont_sys(nam))
return states
def send_commands(self):
cur_time = Time.Time.get_time()
if (cur_time - self.last_write) > self.model.times.samp_time:
self.last_write = cur_time
if type(self.fin_command) is list:
com = self.fin_command[0]
else:
com = self.fin_command
if self.model.states.command_names is not None:
for nam in self.model.states.command_names:
System.Bexmoc.write_cont_sys(nam, com)
| [
"numpy.abs",
"System.Bexmoc.write_cont_sys",
"numpy.asarray",
"Time.Time.get_time",
"System.Bexmoc.read_cont_sys",
"scipy.interpolate.interp1d",
"Modeling.SciMod",
"Modeling.LinMod",
"Modeling.ModelicaMod",
"Modeling.FuzMod"
] | [((4840, 4860), 'Time.Time.get_time', 'Time.Time.get_time', ([], {}), '()\n', (4858, 4860), False, 'import Time\n'), ((10250, 10270), 'Time.Time.get_time', 'Time.Time.get_time', ([], {}), '()\n', (10268, 10270), False, 'import Time\n'), ((3873, 3906), 'Modeling.ModelicaMod', 'Modeling.ModelicaMod', (['self.sys_id'], {}), '(self.sys_id)\n', (3893, 3906), False, 'import Modeling\n'), ((6725, 6813), 'scipy.interpolate.interp1d', 'it.interp1d', (['self.traj_points', 'traj_costs'], {'fill_value': '(100, 100)', 'bounds_error': '(False)'}), '(self.traj_points, traj_costs, fill_value=(100, 100),\n bounds_error=False)\n', (6736, 6813), True, 'from scipy import interpolate as it\n'), ((9115, 9138), 'numpy.asarray', 'np.asarray', (['self.inputs'], {}), '(self.inputs)\n', (9125, 9138), True, 'import numpy as np\n'), ((3999, 4027), 'Modeling.SciMod', 'Modeling.SciMod', (['self.sys_id'], {}), '(self.sys_id)\n', (4014, 4027), False, 'import Modeling\n'), ((7479, 7549), 'scipy.interpolate.interp1d', 'it.interp1d', (['inputs', 'interp_com'], {'fill_value': '(0, 0)', 'bounds_error': '(False)'}), '(inputs, interp_com, fill_value=(0, 0), bounds_error=False)\n', (7490, 7549), True, 'from scipy import interpolate as it\n'), ((8780, 8794), 'numpy.abs', 'np.abs', (['(a - a0)'], {}), '(a - a0)\n', (8786, 8794), True, 'import numpy as np\n'), ((4119, 4147), 'Modeling.LinMod', 'Modeling.LinMod', (['self.sys_id'], {}), '(self.sys_id)\n', (4134, 4147), False, 'import Modeling\n'), ((4484, 4516), 'System.Bexmoc.read_cont_sys', 'System.Bexmoc.read_cont_sys', (['nam'], {}), '(nam)\n', (4511, 4516), False, 'import System\n'), ((6964, 7037), 'scipy.interpolate.interp1d', 'it.interp1d', (['inputs', 'opt_costs'], {'fill_value': '(100, 100)', 'bounds_error': '(False)'}), '(inputs, opt_costs, fill_value=(100, 100), bounds_error=False)\n', (6975, 7037), True, 'from scipy import interpolate as it\n'), ((9867, 9899), 'System.Bexmoc.read_cont_sys', 'System.Bexmoc.read_cont_sys', (['nam'], {}), '(nam)\n', (9894, 9899), False, 'import System\n'), ((10143, 10175), 'System.Bexmoc.read_cont_sys', 'System.Bexmoc.read_cont_sys', (['nam'], {}), '(nam)\n', (10170, 10175), False, 'import System\n'), ((10680, 10718), 'System.Bexmoc.write_cont_sys', 'System.Bexmoc.write_cont_sys', (['nam', 'com'], {}), '(nam, com)\n', (10708, 10718), False, 'import System\n'), ((4209, 4237), 'Modeling.FuzMod', 'Modeling.FuzMod', (['self.sys_id'], {}), '(self.sys_id)\n', (4224, 4237), False, 'import Modeling\n'), ((8283, 8306), 'numpy.asarray', 'np.asarray', (['self.inputs'], {}), '(self.inputs)\n', (8293, 8306), True, 'import numpy as np\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define model topology."""
from collections import namedtuple
import numpy as np
GroupInfo = namedtuple("GroupInfo", ["size", "rank", "world"])
class Topology(object):
def __init__(self,
device_rank,
world_size,
dp_degree=1,
pp_degree=1,
sharding_degree=1,
mp_degree=1):
assert dp_degree * pp_degree * sharding_degree * mp_degree == world_size
arr = np.arange(0, world_size).reshape([dp_degree, pp_degree, sharding_degree, mp_degree])
dp_idx, pp_idx, sharding_idx, mp_idx = np.where(arr == device_rank)
dp_idx, pp_idx, sharding_idx, mp_idx = dp_idx[0], pp_idx[0], sharding_idx[0], mp_idx[0]
self.world = GroupInfo(size=world_size, rank=device_rank, world=list(range(0, world_size)))
# parallelism groups
mp_world = arr[dp_idx, pp_idx, sharding_idx, :].tolist()
self.mp_info = GroupInfo(size=len(mp_world), rank=mp_idx, world=mp_world)
sharding_world = arr[dp_idx, pp_idx, :, mp_idx].tolist()
self.sharding_info = GroupInfo(size=len(sharding_world), rank=sharding_idx, world=sharding_world)
pp_world = arr[dp_idx, :, sharding_idx, mp_idx].tolist()
self.pp_info = GroupInfo(size=len(pp_world), rank=pp_idx, world=pp_world)
dp_world = arr[:, pp_idx, sharding_idx, mp_idx].tolist()
self.dp_info = GroupInfo(size=len(dp_world), rank=dp_idx, world=dp_world)
# the last rank of a pipeline group
self.is_last = self.pp_info.rank == pp_degree - 1
# dataset partition
data_arr = np.arange(0, dp_degree * sharding_degree).reshape([dp_degree, sharding_degree])
data_arr = np.expand_dims(data_arr, axis=1).repeat(pp_degree, axis=1)
data_arr = np.expand_dims(data_arr, axis=3).repeat(mp_degree, axis=3)
data_world = data_arr.reshape(-1).tolist()
self.data_info = GroupInfo(
size=dp_degree * sharding_degree,
rank=self.dp_info.rank * sharding_degree + self.sharding_info.rank,
world=data_world)
self.data_inner_times = world_size // self.data_info.size
self.num_model_partitions = world_size // dp_degree
def __repr__(self):
return f"dp: {self.dp}, pp: {self.pp}, sharding: {self.sharding}, mp: {self.mp}"
| [
"numpy.where",
"numpy.expand_dims",
"collections.namedtuple",
"numpy.arange"
] | [((711, 761), 'collections.namedtuple', 'namedtuple', (['"""GroupInfo"""', "['size', 'rank', 'world']"], {}), "('GroupInfo', ['size', 'rank', 'world'])\n", (721, 761), False, 'from collections import namedtuple\n'), ((1226, 1254), 'numpy.where', 'np.where', (['(arr == device_rank)'], {}), '(arr == device_rank)\n', (1234, 1254), True, 'import numpy as np\n'), ((1094, 1118), 'numpy.arange', 'np.arange', (['(0)', 'world_size'], {}), '(0, world_size)\n', (1103, 1118), True, 'import numpy as np\n'), ((2245, 2286), 'numpy.arange', 'np.arange', (['(0)', '(dp_degree * sharding_degree)'], {}), '(0, dp_degree * sharding_degree)\n', (2254, 2286), True, 'import numpy as np\n'), ((2344, 2376), 'numpy.expand_dims', 'np.expand_dims', (['data_arr'], {'axis': '(1)'}), '(data_arr, axis=1)\n', (2358, 2376), True, 'import numpy as np\n'), ((2422, 2454), 'numpy.expand_dims', 'np.expand_dims', (['data_arr'], {'axis': '(3)'}), '(data_arr, axis=3)\n', (2436, 2454), True, 'import numpy as np\n')] |
import numpy
import rlr_bv_funcs
def learning_curve(cost_function, x_train, y_train, x_valid, y_valid, reg_lambda=0):
num_train = y_train.shape[0]
# num_valid = y_valid.shape[0]
error_train = numpy.zeros(num_train)
error_valid = numpy.zeros(num_train)
convergence = numpy.zeros(num_train)
for idx in range(1, num_train + 1):
theta_t, conv_t = rlr_bv_funcs.train_lin_reg(
cost_function, x_train[:idx], y_train[:idx], reg_lambda=reg_lambda, maxiter=1000)
convergence[idx - 1] = conv_t
error_train[idx - 1], _ = rlr_bv_funcs.lin_reg_cost_function(
theta_t, x_train[:idx], y_train[:idx], reg_lambda=reg_lambda)
error_valid[idx - 1], _ = rlr_bv_funcs.lin_reg_cost_function(theta_t, x_valid, y_valid, reg_lambda=reg_lambda)
return error_train, error_valid, convergence
| [
"numpy.zeros",
"rlr_bv_funcs.lin_reg_cost_function",
"rlr_bv_funcs.train_lin_reg"
] | [((209, 231), 'numpy.zeros', 'numpy.zeros', (['num_train'], {}), '(num_train)\n', (220, 231), False, 'import numpy\n'), ((250, 272), 'numpy.zeros', 'numpy.zeros', (['num_train'], {}), '(num_train)\n', (261, 272), False, 'import numpy\n'), ((292, 314), 'numpy.zeros', 'numpy.zeros', (['num_train'], {}), '(num_train)\n', (303, 314), False, 'import numpy\n'), ((382, 494), 'rlr_bv_funcs.train_lin_reg', 'rlr_bv_funcs.train_lin_reg', (['cost_function', 'x_train[:idx]', 'y_train[:idx]'], {'reg_lambda': 'reg_lambda', 'maxiter': '(1000)'}), '(cost_function, x_train[:idx], y_train[:idx],\n reg_lambda=reg_lambda, maxiter=1000)\n', (408, 494), False, 'import rlr_bv_funcs\n'), ((576, 676), 'rlr_bv_funcs.lin_reg_cost_function', 'rlr_bv_funcs.lin_reg_cost_function', (['theta_t', 'x_train[:idx]', 'y_train[:idx]'], {'reg_lambda': 'reg_lambda'}), '(theta_t, x_train[:idx], y_train[:idx],\n reg_lambda=reg_lambda)\n', (610, 676), False, 'import rlr_bv_funcs\n'), ((720, 809), 'rlr_bv_funcs.lin_reg_cost_function', 'rlr_bv_funcs.lin_reg_cost_function', (['theta_t', 'x_valid', 'y_valid'], {'reg_lambda': 'reg_lambda'}), '(theta_t, x_valid, y_valid, reg_lambda=\n reg_lambda)\n', (754, 809), False, 'import rlr_bv_funcs\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import csv
import numpy as np
import h5py
import statsmodels.api as sm
#from scipy.stats import wilcoxon
from keras import backend as K
from mnist_mutate import mutate_M2, mutate_M4, mutate_M5
from patsy import dmatrices
import pandas as pd
def cohen_d(orig_accuracy_list, accuracy_list):
nx = len(orig_accuracy_list)
ny = len(accuracy_list)
dof = nx + ny - 2
return (np.mean(orig_accuracy_list) - np.mean(accuracy_list)) / np.sqrt(((nx-1)*np.std(orig_accuracy_list, ddof=1) ** 2 + (ny-1)*np.std(accuracy_list, ddof=1) ** 2) / dof)
def get_dataset(i):
dataset_file = "/home/ubuntu/crossval_set_" + str(i) + ".h5"
hf = h5py.File(dataset_file, 'r')
xn_train = np.asarray(hf.get('xn_train'))
xn_test = np.asarray(hf.get('xn_test'))
yn_train = np.asarray(hf.get('yn_train'))
yn_test = np.asarray(hf.get('yn_test'))
return xn_train, yn_train, xn_test, yn_test
def train_and_get_accuracies(param, mutation):
accuracy_list = range(0, 25)
index = 0
csv_file = "mnist_binary_search_" + str(mutation) + ".csv"
with open(csv_file, 'a') as f1:
writer=csv.writer(f1, delimiter=',',lineterminator='\n',)
for i in range(0, 5):
x_train, y_train, x_test, y_test = get_dataset(i)
for j in range(0, 5):
print("Training " + str(index) + ", for param " + str(param))
if (mutation == '2'):
accuracy, loss = mutate_M2(0, param, x_train, y_train, x_test, y_test, i, j)
elif (mutation == '4r'):
accuracy, loss = mutate_M4(0, param, x_train, y_train, x_test, y_test, i, j, 1)
elif (mutation == '4p'):
accuracy, loss = mutate_M4(0, param, x_train, y_train, x_test, y_test, i, j, 0)
elif (mutation == '5'):
accuracy, loss = mutate_M5(param, x_train, y_train, x_test, y_test, i, j)
writer.writerow([str(i), str(j), str(param), str(accuracy), str(loss)])
print("Loss " + str(loss) + ", Accuracy " + str(accuracy))
accuracy_list[index] = accuracy
index += 1
K.clear_session()
accuracy_dict[param] = accuracy_list
return accuracy_list
def is_diff_sts(orig_accuracy_list, accuracy_list, threshold = 0.05):
#w, p_value = wilcoxon(orig_accuracy_list, accuracy_list)
list_length = len(orig_accuracy_list)
zeros_list = [0] * list_length
ones_list = [1] * list_length
mod_lists = zeros_list + ones_list
acc_lists = orig_accuracy_list + accuracy_list
data = {'Acc': acc_lists, 'Mod': mod_lists}
df = pd.DataFrame(data)
response, predictors = dmatrices("Acc ~ Mod", df, return_type='dataframe')
glm = sm.GLM(response, predictors)
glm_results = glm.fit()
glm_sum = glm_results.summary()
pv = str(glm_sum.tables[1][2][4])
p_value = float(pv)
effect_size = cohen_d(orig_accuracy_list, accuracy_list)
print("effect size:" + str(effect_size))
is_sts = ((p_value < threshold) and effect_size > 0.2)
print("p_value:" + str(p_value) + ", is_sts:" + str(is_sts))
return is_sts
def get_accuracies(param, mutation):
if (param in accuracy_dict):
return accuracy_dict[param]
else:
return train_and_get_accuracies(param, mutation)
def get_orig_accuracies_from_file():
accuracy_list = range(0, 25)
with open(orig_result_file, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = -1
for row in csv_reader:
if line_count == -1:
line_count += 1
accuracy_list[line_count] = float(row['Accuracy'])
line_count += 1
return accuracy_list
def get_accuracies_from_file(num):
percentages = (5, 10, 25, 50, 75, 80, 85, 90, 95, 99, 100)
accuracyDict = {}
line_count = -1
perc_index = 0
with open(result_file, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = -1
accuracy_list = range(0, num)
for row in csv_reader:
if line_count == -1:
line_count += 1
index = line_count % num
if (index == 0):
accuracy_list = range(0, num)
accuracy_list[index] = float(row['Accuracy'])
if (index == num - 1):
accuracyDict[percentages[perc_index]] = accuracy_list
perc_index += 1
line_count += 1
return accuracyDict
def search_for_perfect(lower_bound, upper_bound, lower_accuracy_list, upper_accuracy_list, mutation):
middle_bound = (upper_bound + lower_bound) / 2
print(str(lower_bound) + " " + str(middle_bound) + " " + str(upper_bound))
middle_accuracy_list = get_accuracies(middle_bound, mutation)
is_sts = is_diff_sts(orig_accuracy_list, middle_accuracy_list)
if (is_sts):
upper_bound = middle_bound
upper_accuracy_list = middle_accuracy_list
else:
lower_bound = middle_bound
lower_accuracy_list = middle_accuracy_list
if (upper_bound - lower_bound <= level_of_precision):
if (is_sts):
print("middle_bound:" + str(middle_bound))
perfect = middle_bound
else:
print("middle_bound:" + str(upper_bound))
perfect = upper_bound
return perfect
else:
print("Changing interval to: [" + str(lower_bound) + ", " + str(upper_bound) + "]")
search_for_perfect(lower_bound, upper_bound, lower_accuracy_list, upper_accuracy_list, mutation)
print("pumpurum:" + str(lower_bound) + " " + str(upper_bound))
lower_bound = 0
upper_bound = 100
level_of_precision = 5
num = 25
#mutations = ('4r', '4p', 'm5')
mutations = ('5')
orig_result_file_name = "mnist_orig_25.csv"
for mutation in mutations:
print("Mutation:" + str(mutation))
result_files_dir = "/home/ubuntu/"
result_file_name = "mnist_mutation" + str(mutation) + ".csv"
dataset_dir = "/home/ubuntu/"
result_file = result_files_dir + result_file_name
orig_result_file = result_files_dir + orig_result_file_name
accuracy_dict = get_accuracies_from_file(num)
orig_accuracy_list = get_orig_accuracies_from_file()
lower_accuracy_list = orig_accuracy_list
#lower_accuracy_list = get_accuracies(lower_bound)
if (mutation == '5'):
upper_accuracy_list = get_accuracies(99, mutation)
else:
upper_accuracy_list = get_accuracies(upper_bound, mutation)
#lower_killed = False
lower_killed = is_diff_sts(lower_accuracy_list, orig_accuracy_list)
upper_killed = is_diff_sts(orig_accuracy_list, upper_accuracy_list)
if (lower_killed):
print("The mutation is already killed at the lowest value:" + str(lower_bound))
elif ((not lower_killed) and (not upper_killed)):
print("The mutation is not killed in this given range of parameters: [" + str(lower_bound) + ", " + str(upper_bound) + "]")
elif ((not lower_killed) and (upper_killed)):
perfect = search_for_perfect(lower_bound, upper_bound, lower_accuracy_list, upper_accuracy_list, mutation)
print("perfect is:" + str(perfect)) | [
"numpy.mean",
"csv.DictReader",
"patsy.dmatrices",
"mnist_mutate.mutate_M2",
"csv.writer",
"h5py.File",
"mnist_mutate.mutate_M5",
"keras.backend.clear_session",
"numpy.std",
"pandas.DataFrame",
"statsmodels.api.GLM",
"mnist_mutate.mutate_M4"
] | [((693, 721), 'h5py.File', 'h5py.File', (['dataset_file', '"""r"""'], {}), "(dataset_file, 'r')\n", (702, 721), False, 'import h5py\n'), ((2787, 2805), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2799, 2805), True, 'import pandas as pd\n'), ((2839, 2890), 'patsy.dmatrices', 'dmatrices', (['"""Acc ~ Mod"""', 'df'], {'return_type': '"""dataframe"""'}), "('Acc ~ Mod', df, return_type='dataframe')\n", (2848, 2890), False, 'from patsy import dmatrices\n'), ((2901, 2929), 'statsmodels.api.GLM', 'sm.GLM', (['response', 'predictors'], {}), '(response, predictors)\n', (2907, 2929), True, 'import statsmodels.api as sm\n'), ((1165, 1215), 'csv.writer', 'csv.writer', (['f1'], {'delimiter': '""","""', 'lineterminator': '"""\n"""'}), "(f1, delimiter=',', lineterminator='\\n')\n", (1175, 1215), False, 'import csv\n'), ((3643, 3667), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (3657, 3667), False, 'import csv\n'), ((4154, 4178), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (4168, 4178), False, 'import csv\n'), ((434, 461), 'numpy.mean', 'np.mean', (['orig_accuracy_list'], {}), '(orig_accuracy_list)\n', (441, 461), True, 'import numpy as np\n'), ((464, 486), 'numpy.mean', 'np.mean', (['accuracy_list'], {}), '(accuracy_list)\n', (471, 486), True, 'import numpy as np\n'), ((2234, 2251), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (2249, 2251), True, 'from keras import backend as K\n'), ((1493, 1552), 'mnist_mutate.mutate_M2', 'mutate_M2', (['(0)', 'param', 'x_train', 'y_train', 'x_test', 'y_test', 'i', 'j'], {}), '(0, param, x_train, y_train, x_test, y_test, i, j)\n', (1502, 1552), False, 'from mnist_mutate import mutate_M2, mutate_M4, mutate_M5\n'), ((1629, 1691), 'mnist_mutate.mutate_M4', 'mutate_M4', (['(0)', 'param', 'x_train', 'y_train', 'x_test', 'y_test', 'i', 'j', '(1)'], {}), '(0, param, x_train, y_train, x_test, y_test, i, j, 1)\n', (1638, 1691), False, 'from mnist_mutate import mutate_M2, mutate_M4, mutate_M5\n'), ((506, 540), 'numpy.std', 'np.std', (['orig_accuracy_list'], {'ddof': '(1)'}), '(orig_accuracy_list, ddof=1)\n', (512, 540), True, 'import numpy as np\n'), ((555, 584), 'numpy.std', 'np.std', (['accuracy_list'], {'ddof': '(1)'}), '(accuracy_list, ddof=1)\n', (561, 584), True, 'import numpy as np\n'), ((1769, 1831), 'mnist_mutate.mutate_M4', 'mutate_M4', (['(0)', 'param', 'x_train', 'y_train', 'x_test', 'y_test', 'i', 'j', '(0)'], {}), '(0, param, x_train, y_train, x_test, y_test, i, j, 0)\n', (1778, 1831), False, 'from mnist_mutate import mutate_M2, mutate_M4, mutate_M5\n'), ((1908, 1964), 'mnist_mutate.mutate_M5', 'mutate_M5', (['param', 'x_train', 'y_train', 'x_test', 'y_test', 'i', 'j'], {}), '(param, x_train, y_train, x_test, y_test, i, j)\n', (1917, 1964), False, 'from mnist_mutate import mutate_M2, mutate_M4, mutate_M5\n')] |
import argparse
import torch
import pathlib
import scipy
import scipy.stats
import numpy as np
from collections import defaultdict
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return round(m, 1), round(h, 1)
all_means = defaultdict(list)
all_std = defaultdict(list)
# AP = []
# AP50 = []
# AP75 = []
# APs = []
# APm = []
# APl = []
def main():
# ap = []
# ap50 = []
# ap75 = []
# aps = []
# apm = []
# apl = []
# print(log_folder)
for i in range(1,5):
all_result = defaultdict(list)
log_folder = "outputs/coco_{0}_mil12_aff005_1shot".format(i)
print(log_folder)
log_folder = pathlib.Path(log_folder)
for path in log_folder.glob("*.pth"):
log = torch.load(path)
# import pdb; pdb.set_trace()
# box_results.append(log.results["bbox"]["AP"])
if "segm" in log.results.keys():
# import pdb; pdb.set_trace()
for key in log.results["segm"].keys():
all_result[key].append(log.results["segm"][key]*100)
# all_result["AP"].append(log.results["segm"]["AP"]*100)
# all_result["AP50"].append(log.results["segm"]["AP50"]*100)
# all_result["AP75"].append(log.results["segm"]["AP75"]*100)
# all_result["APs"].append(log.results["segm"]["APs"]*100)
# all_result["APm"].append(log.results["segm"]["APm"]*100)
# all_result["APl"].append(log.results["segm"]["APl"]*100)
# all_means["AP"].append(mean_confidence_interval(ap))
for key in all_result:
m, h = mean_confidence_interval(all_result[key])
all_means[key].append(m)
all_std[key].append(h)
# print(key, all_result[key])
for key in all_means.keys():
print(key, np.array(all_means[key]), np.array(all_std[key]))
print(key, round(np.mean(all_means[key]), 1), round(np.mean(all_std[key]), 1))
# print(key, np.mean(all_std[key]))
# print(all_means)
# print(all_std)
return
# AP50.append(mean_confidence_interval(ap50))
# AP75.append(mean_confidence_interval(ap75))
# APs.append(mean_confidence_interval(aps))
# APm.append(mean_confidence_interval(apm))
# APl.append(mean_confidence_interval(apl))
# print("AP", mean_confidence_interval(ap))
# print("AP50", mean_confidence_interval(ap50))
# print("AP75", mean_confidence_interval(ap75))
# print("APs", mean_confidence_interval(aps))
# print("APm", mean_confidence_interval(apm))
# print("APl", mean_confidence_interval(apl))
# if box_results:
# print(mean_confidence_interval(box_results))
# if ap:
# print("AP", mean_confidence_interval(ap))
# if ap50:
# print(ap50)
# print("AP50", mean_confidence_interval(ap50))
# if ap75:
# print("AP75", mean_confidence_interval(ap75))
# if apc:
# print(mean_confidence_interval(apc))
# if apr:
# print(mean_confidence_interval(apr))
# AP = np.array(AP)
print(all_means)
if __name__ == "__main__":
main()
| [
"numpy.mean",
"pathlib.Path",
"torch.load",
"numpy.array",
"collections.defaultdict",
"scipy.stats.sem",
"scipy.stats.t.ppf"
] | [((379, 396), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (390, 396), False, 'from collections import defaultdict\n'), ((407, 424), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (418, 424), False, 'from collections import defaultdict\n'), ((198, 212), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (206, 212), True, 'import numpy as np\n'), ((240, 250), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (247, 250), True, 'import numpy as np\n'), ((252, 270), 'scipy.stats.sem', 'scipy.stats.sem', (['a'], {}), '(a)\n', (267, 270), False, 'import scipy\n'), ((284, 332), 'scipy.stats.t.ppf', 'scipy.stats.t.ppf', (['((1 + confidence) / 2.0)', '(n - 1)'], {}), '((1 + confidence) / 2.0, n - 1)\n', (301, 332), False, 'import scipy\n'), ((671, 688), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (682, 688), False, 'from collections import defaultdict\n'), ((805, 829), 'pathlib.Path', 'pathlib.Path', (['log_folder'], {}), '(log_folder)\n', (817, 829), False, 'import pathlib\n'), ((899, 915), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (909, 915), False, 'import torch\n'), ((2010, 2034), 'numpy.array', 'np.array', (['all_means[key]'], {}), '(all_means[key])\n', (2018, 2034), True, 'import numpy as np\n'), ((2036, 2058), 'numpy.array', 'np.array', (['all_std[key]'], {}), '(all_std[key])\n', (2044, 2058), True, 'import numpy as np\n'), ((2085, 2108), 'numpy.mean', 'np.mean', (['all_means[key]'], {}), '(all_means[key])\n', (2092, 2108), True, 'import numpy as np\n'), ((2120, 2141), 'numpy.mean', 'np.mean', (['all_std[key]'], {}), '(all_std[key])\n', (2127, 2141), True, 'import numpy as np\n')] |
'''Trains a convolutional neural network on sample images from the environment
using neuroevolution to maximize the ability to discriminate between input
images.
Reference:
Koutnik, Jan, <NAME>, and <NAME>. "Evolving deep
unsupervised convolutional networks for vision-based reinforcement
learning." Proceedings of the 2014 conference on Genetic and
evolutionary computation. ACM, 2014.
'''
import random
import numpy as np
from cnn import create_cnn, calculate_cnn_output, calculate_fitness
from nn_utilities import update_model_weights
from datasets import load_images_torcs_4
from visualization import plot_feature_vectors
from deap import algorithms, base, creator, tools
from operator import attrgetter
from matplotlib import pyplot as plt
# Set the following parameters:
OUTPUT_DIR = 'experiments/train_cnn_ga_11/'
images = load_images_torcs_4()
# Create the ConvNet and load the training set
model = create_cnn()
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
INDIVIDUAL_SIZE = 993
toolbox = base.Toolbox()
toolbox.register("attr_float", random.uniform, -1.5, 1.5)
toolbox.register("individual",
tools.initRepeat,
creator.Individual,
toolbox.attr_float,
n=INDIVIDUAL_SIZE)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
def ga_fitness(individual):
# Update the ConvNet parameters
update_model_weights(model, np.asarray(individual))
# Calculate the output feature vectors
feature_vectors = calculate_cnn_output(model, images)
# Check their fitness
fitness = calculate_fitness(feature_vectors)
return fitness,
toolbox.register("evaluate", ga_fitness)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1.5, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=10) # Optimize this hyperparameter
# This is a modified version of the eaSimple algorithm included with DEAP here:
# https://github.com/DEAP/deap/blob/master/deap/algorithms.py#L84
def eaSimpleModified(population, toolbox, cxpb, mutpb, ngen, stats=None,
halloffame=None, verbose=__debug__):
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
best = []
best_ind = max(population, key=attrgetter("fitness"))
best.append(best_ind)
# Begin the generational process
for gen in range(1, ngen + 1):
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Vary the pool of individuals
offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Save the best individual from the generation
best_ind = max(offspring, key=attrgetter("fitness"))
best.append(best_ind)
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
return population, logbook, best
def run(num_gen=10,
n=100,
mutpb=0.8,
cxpb=0.5):
np.random.seed(0)
history = tools.History()
# Decorate the variation operators
toolbox.decorate("mate", history.decorator)
toolbox.decorate("mutate", history.decorator)
pop = toolbox.population(n=n)
history.update(pop)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
pop, log, best = eaSimpleModified(pop,
toolbox,
cxpb=cxpb,
mutpb=mutpb,
ngen=num_gen,
stats=stats,
halloffame=hof,
verbose=True)
return pop, log, hof, history, best
def plot_results(filename,
gen,
fitness_maxs,
fitness_avgs):
fig, ax1 = plt.subplots()
line1 = ax1.plot(gen, fitness_maxs, "r-", label="Maximum Fitness")
line2 = ax1.plot(gen, fitness_avgs, "b-", label="Average Fitness")
lines = line1 + line2
labs = [line.get_label() for line in lines]
ax1.legend(lines, labs, loc="lower right")
ax1.set_xlabel('Generation')
ax1.set_ylabel('Fitness')
plt.savefig('{}'.format(filename))
def run_experiments(output_dir):
POPULATION_SIZE = 100
NUM_GENERATIONS = 100
CROSSOVER_PROB = 0.5
MUTATION_PROBS = [0.05, 0.10, 0.20, 0.30, 0.40, 0.50]
for mutation_prob in MUTATION_PROBS:
pop, log, hof, history, best_per_gen = run(num_gen=NUM_GENERATIONS,
n=POPULATION_SIZE,
cxpb=CROSSOVER_PROB,
mutpb=mutation_prob)
best = np.asarray(hof)
gen = log.select("gen")
fitness_maxs = log.select("max")
fitness_avgs = log.select("avg")
plot_results(filename='{}train_cnn_ga_mutpb_{}.png'.
format(output_dir,
str(mutation_prob).replace('.', '_')),
gen=gen,
fitness_maxs=fitness_maxs,
fitness_avgs=fitness_avgs)
np.savetxt('{}train_cnn_ga_mutpb_{}.out'.
format(output_dir,
str(mutation_prob).replace('.', '_')), best)
# Plot the feature vectors produced by the best individual from each
# generation
for gen in range(len(best_per_gen)):
update_model_weights(model, np.asarray(best_per_gen[gen]))
feature_vectors = calculate_cnn_output(model, images)
plot_feature_vectors(feature_vectors, filename='{}feature_vectors_{}__{}.png'.\
format(output_dir, str(mutation_prob).replace('.', '_'), gen))
if __name__ == "__main__":
np.random.seed(0)
random.seed(0)
run_experiments(output_dir=OUTPUT_DIR)
| [
"operator.attrgetter",
"datasets.load_images_torcs_4",
"deap.creator.create",
"cnn.calculate_fitness",
"numpy.asarray",
"deap.tools.Logbook",
"random.seed",
"cnn.calculate_cnn_output",
"deap.algorithms.varAnd",
"cnn.create_cnn",
"numpy.random.seed",
"deap.tools.History",
"deap.tools.HallOfFa... | [((833, 854), 'datasets.load_images_torcs_4', 'load_images_torcs_4', ([], {}), '()\n', (852, 854), False, 'from datasets import load_images_torcs_4\n'), ((911, 923), 'cnn.create_cnn', 'create_cnn', ([], {}), '()\n', (921, 923), False, 'from cnn import create_cnn, calculate_cnn_output, calculate_fitness\n'), ((925, 983), 'deap.creator.create', 'creator.create', (['"""FitnessMax"""', 'base.Fitness'], {'weights': '(1.0,)'}), "('FitnessMax', base.Fitness, weights=(1.0,))\n", (939, 983), False, 'from deap import algorithms, base, creator, tools\n'), ((984, 1046), 'deap.creator.create', 'creator.create', (['"""Individual"""', 'list'], {'fitness': 'creator.FitnessMax'}), "('Individual', list, fitness=creator.FitnessMax)\n", (998, 1046), False, 'from deap import algorithms, base, creator, tools\n'), ((1081, 1095), 'deap.base.Toolbox', 'base.Toolbox', ([], {}), '()\n', (1093, 1095), False, 'from deap import algorithms, base, creator, tools\n'), ((1593, 1628), 'cnn.calculate_cnn_output', 'calculate_cnn_output', (['model', 'images'], {}), '(model, images)\n', (1613, 1628), False, 'from cnn import create_cnn, calculate_cnn_output, calculate_fitness\n'), ((1670, 1704), 'cnn.calculate_fitness', 'calculate_fitness', (['feature_vectors'], {}), '(feature_vectors)\n', (1687, 1704), False, 'from cnn import create_cnn, calculate_cnn_output, calculate_fitness\n'), ((2265, 2280), 'deap.tools.Logbook', 'tools.Logbook', ([], {}), '()\n', (2278, 2280), False, 'from deap import algorithms, base, creator, tools\n'), ((4286, 4303), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4300, 4303), True, 'import numpy as np\n'), ((4318, 4333), 'deap.tools.History', 'tools.History', ([], {}), '()\n', (4331, 4333), False, 'from deap import algorithms, base, creator, tools\n'), ((4540, 4559), 'deap.tools.HallOfFame', 'tools.HallOfFame', (['(1)'], {}), '(1)\n', (4556, 4559), False, 'from deap import algorithms, base, creator, tools\n'), ((4572, 4620), 'deap.tools.Statistics', 'tools.Statistics', (['(lambda ind: ind.fitness.values)'], {}), '(lambda ind: ind.fitness.values)\n', (4588, 4620), False, 'from deap import algorithms, base, creator, tools\n'), ((5307, 5321), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5319, 5321), True, 'from matplotlib import pyplot as plt\n'), ((7262, 7279), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (7276, 7279), True, 'import numpy as np\n'), ((7284, 7298), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (7295, 7298), False, 'import random\n'), ((1503, 1525), 'numpy.asarray', 'np.asarray', (['individual'], {}), '(individual)\n', (1513, 1525), True, 'import numpy as np\n'), ((3202, 3252), 'deap.algorithms.varAnd', 'algorithms.varAnd', (['offspring', 'toolbox', 'cxpb', 'mutpb'], {}), '(offspring, toolbox, cxpb, mutpb)\n', (3219, 3252), False, 'from deap import algorithms, base, creator, tools\n'), ((6193, 6208), 'numpy.asarray', 'np.asarray', (['hof'], {}), '(hof)\n', (6203, 6208), True, 'import numpy as np\n'), ((2907, 2928), 'operator.attrgetter', 'attrgetter', (['"""fitness"""'], {}), "('fitness')\n", (2917, 2928), False, 'from operator import attrgetter\n'), ((7022, 7057), 'cnn.calculate_cnn_output', 'calculate_cnn_output', (['model', 'images'], {}), '(model, images)\n', (7042, 7057), False, 'from cnn import create_cnn, calculate_cnn_output, calculate_fitness\n'), ((3634, 3655), 'operator.attrgetter', 'attrgetter', (['"""fitness"""'], {}), "('fitness')\n", (3644, 3655), False, 'from operator import attrgetter\n'), ((6961, 6990), 'numpy.asarray', 'np.asarray', (['best_per_gen[gen]'], {}), '(best_per_gen[gen])\n', (6971, 6990), True, 'import numpy as np\n')] |
"""
==========================================
Earth Coordinate Conversion with a network
==========================================
In this tutorial, we will show how to place actors on specific locations
on the surface of the Earth using a new function.
"""
from fury import window, actor, utils, io
import matplotlib.cm as cm
from fury.data import read_viz_textures, fetch_viz_textures
import math
import numpy as np
import itertools
import igraph as ig
import xnetwork as xn
from math import radians, atan2, asin
import math
from splines.quaternion import UnitQuaternion
from geographiclib.geodesic import Geodesic
def angles2quat(azimuth, elevation, roll):
return (
UnitQuaternion.from_axis_angle((0, 0, 1), radians(azimuth)) *
UnitQuaternion.from_axis_angle((1, 0, 0), radians(elevation)) *
UnitQuaternion.from_axis_angle((0, 1, 0), radians(roll))
)
# See https://AudioSceneDescriptionFormat.readthedocs.io/quaternions.html
def quat2angles(quat):
a, b, c, d = quat.wxyz
sin_elevation = 2 * (a * b + c * d)
if 0.999999 < sin_elevation:
# elevation ~= 90°
return (
math.degrees(atan2(2 * (a * c + b * d), 2 * (a * b - c * d))),
90,
0)
elif sin_elevation < -0.999999:
# elevation ~= -90°
return (
math.degrees(atan2(-2 * (a * c + b * d), 2 * (c * d - a * b))),
-90,
0)
return (
math.degrees(atan2(2 * (a * d - b * c), 1 - 2 * (b**2 + d**2))),
math.degrees(asin(sin_elevation)),
math.degrees(atan2(2 * (a * c - b * d), 1 - 2 * (b**2 + c**2))))
def slerp(one, two, t):
return (two * one.inverse())**t * one
###############################################################################
# Create a new scene, and load in the image of the Earth using
# ``fetch_viz_textures`` and ``read_viz_textures``. We will use a 16k
# resolution texture for maximum detail.
scene = window.Scene()
g = xn.xnet2igraph("../../Networks/Airports.xnet")
# fetch_viz_textures()
# earth_file = read_viz_textures("1_earth_16k.jpg")
# earth_image = io.load_image(earth_file)
# earth_image = io.load_image("5_night_16k.jpg")
earth_image = io.load_image("1_earth_16k_gray.jpg")
earth_actor = actor.texture_on_sphere(earth_image)
###############################################################################
# Define the function to convert geographical coordinates of a location in
# latitude and longitude degrees to coordinates on the ``earth_actor`` surface.
# In this function, convert to radians, then to spherical coordinates, and
# lastly, to cartesian coordinates.
def latlong_coordinates(lat, lon,radius=1.0):
# Convert latitude and longitude to spherical coordinates
degrees_to_radians = math.pi/180.0
# phi = 90 - latitude
phi = (90-lat)*degrees_to_radians
# theta = longitude
theta = lon*degrees_to_radians*-1
# now convert to cartesian
x = radius*np.sin(phi)*np.cos(theta)
y = radius*np.sin(phi)*np.sin(theta)
z = radius*np.cos(phi)
# flipping z to y for FURY coordinates
return (x, z, y)
###############################################################################
# Use this new function to place some sphere actors on several big cities
# around the Earth.
# locationone = latlong_coordinates(40.730610, -73.935242) # new york city, us
# locationtwo = latlong_coordinates(39.916668, 116.383331) # beijing, china
# locationthree = latlong_coordinates(48.864716, 2.349014) # paris, france
###############################################################################
# Set the centers, radii, and colors of these spheres, and create a new
# ``sphere_actor`` for each location to add to the scene.
# centers = np.array([[*locationone], [*locationtwo], [*locationthree]])
# colors = np.random.rand(3, 3)
# radii = np.array([0.005, 0.005, 0.005])
centers = []
nodeColors = []
scales = []
positions = g.vs["Position"]
degrees = g.strength(weights="weight")
maxDegree = np.max(degrees)
cmap = cm.get_cmap('hot')
for vertexIndex in range(g.vcount()):
position = positions[vertexIndex]
degree = degrees[vertexIndex]
nodeColors.append(cmap((degree/maxDegree)**0.5))
scales.append(np.sqrt(degree/maxDegree)*0.04)
centers.append(latlong_coordinates(position[1], position[0], radius=1.01))
# constructing geometry for nodes as a marker actor
nodes = actor.markers(
centers = np.array(centers),
colors=nodeColors,
marker_opacity=1.0,
scales=np.array(scales),
# radii=np.array(scales),
marker="o"
)
# geometry for edges
lines = []
colors = []
geod = Geodesic.WGS84
weightsArray = g.es["weight"]
maxWeight = max(weightsArray)
edges = np.array(list(zip(g.get_edgelist(),g.es["weight"])),dtype=object)
edgesIndices = np.random.choice(len(edges), 20000,
p=weightsArray/np.sum(weightsArray),
replace=False)
for (fromIndex,toIndex),weight in edges[edgesIndices]:
normWeight = (weight/maxWeight)**0.25
opacity = normWeight*0.25
lineSegment = []
colorSegment = []
startPosition = positions[fromIndex]
endPosition = positions[toIndex]
startColor = np.array([0,0,0,opacity])
endColor = np.array([0,0,0,opacity])
startColor[0:3] = np.array(nodeColors[fromIndex])[0:3]
endColor[0:3] = np.array(nodeColors[toIndex])[0:3]
# midPosition = ((startPosition[0] + endPosition[0]) / 2, (startPosition[1] + endPosition[1]) / 2)
lineSegment.append(latlong_coordinates(startPosition[1], startPosition[0], radius=1.01))
colorSegment.append(startColor)
g = geod.Inverse(startPosition[1], startPosition[0], endPosition[1], endPosition[0])
totalDistance = g['s12']
if totalDistance < 0.00001:
continue
l = geod.InverseLine(startPosition[1], startPosition[0], endPosition[1], endPosition[0])
ds = 100e3;
n = int(math.ceil(l.s13 / ds))
for i in range(n + 1):
# if i == 0:
# print("distance latitude longitude azimuth")
s = min(ds * i, l.s13)
g = l.Position(s, Geodesic.STANDARD | Geodesic.LONG_UNROLL)
distance = g['s12']
latitude = g['lat2']
longitude = g['lon2']
coeff = distance/totalDistance
updowncoeff=math.sin(math.pi*coeff)
lineSegment.append(latlong_coordinates(latitude, longitude , radius=1.00+totalDistance/5e7*updowncoeff ))
color = (endColor - startColor) * coeff + startColor
whiteColor=np.array([1,1,1,opacity])
color = (whiteColor - color) * (totalDistance/2e7*updowncoeff) + color
colorSegment.append(color)
lineSegment.append(latlong_coordinates(endPosition[1], endPosition[0], radius=1.00))
colorSegment.append(endColor)
lines.append(lineSegment)
colors.append(colorSegment)
lineActors = actor.line(lines, colors,linewidth =3)
scene.add(earth_actor)
scene.add(lineActors)
scene.add(nodes)
###############################################################################
# Rotate the Earth to make sure the texture is correctly oriented. Change it's
# scale using ``actor.SetScale()``.
utils.rotate(earth_actor, (-90, 1, 0, 0))
utils.rotate(earth_actor, (180, 0, 1, 0))
earth_actor.SetScale(2, 2, 2)
##############################################################################
# Create a ShowManager object, which acts as the interface between the scene,
# the window and the interactor.
showm = window.ShowManager(scene,
size=(900, 768), reset_camera=False,
order_transparent=True)
###############################################################################
# Let's create a ``timer_callback`` function to add some animation to the
# Earth. Change the camera position and angle to fly over and zoom in on each
# new location.
counter = itertools.count()
def timer_callback(_obj, _event):
cnt = next(counter)
showm.render()
if cnt == 0:
scene.set_camera(position=(1.5, 3.5, 7.0))
###############################################################################
# Initialize the ShowManager object, add the timer_callback, and watch the
# new animation take place!
showm.initialize()
showm.add_timer_callback(True, 25, timer_callback)
showm.start()
| [
"fury.io.load_image",
"fury.window.ShowManager",
"numpy.sqrt",
"xnetwork.xnet2igraph",
"numpy.array",
"fury.actor.line",
"numpy.sin",
"fury.actor.texture_on_sphere",
"numpy.max",
"matplotlib.cm.get_cmap",
"math.radians",
"numpy.cos",
"math.atan2",
"fury.utils.rotate",
"math.ceil",
"mat... | [((1966, 1980), 'fury.window.Scene', 'window.Scene', ([], {}), '()\n', (1978, 1980), False, 'from fury import window, actor, utils, io\n'), ((1985, 2031), 'xnetwork.xnet2igraph', 'xn.xnet2igraph', (['"""../../Networks/Airports.xnet"""'], {}), "('../../Networks/Airports.xnet')\n", (1999, 2031), True, 'import xnetwork as xn\n'), ((2213, 2250), 'fury.io.load_image', 'io.load_image', (['"""1_earth_16k_gray.jpg"""'], {}), "('1_earth_16k_gray.jpg')\n", (2226, 2250), False, 'from fury import window, actor, utils, io\n'), ((2265, 2301), 'fury.actor.texture_on_sphere', 'actor.texture_on_sphere', (['earth_image'], {}), '(earth_image)\n', (2288, 2301), False, 'from fury import window, actor, utils, io\n'), ((4020, 4035), 'numpy.max', 'np.max', (['degrees'], {}), '(degrees)\n', (4026, 4035), True, 'import numpy as np\n'), ((4043, 4061), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""hot"""'], {}), "('hot')\n", (4054, 4061), True, 'import matplotlib.cm as cm\n'), ((6827, 6865), 'fury.actor.line', 'actor.line', (['lines', 'colors'], {'linewidth': '(3)'}), '(lines, colors, linewidth=3)\n', (6837, 6865), False, 'from fury import window, actor, utils, io\n'), ((7125, 7166), 'fury.utils.rotate', 'utils.rotate', (['earth_actor', '(-90, 1, 0, 0)'], {}), '(earth_actor, (-90, 1, 0, 0))\n', (7137, 7166), False, 'from fury import window, actor, utils, io\n'), ((7167, 7208), 'fury.utils.rotate', 'utils.rotate', (['earth_actor', '(180, 0, 1, 0)'], {}), '(earth_actor, (180, 0, 1, 0))\n', (7179, 7208), False, 'from fury import window, actor, utils, io\n'), ((7441, 7531), 'fury.window.ShowManager', 'window.ShowManager', (['scene'], {'size': '(900, 768)', 'reset_camera': '(False)', 'order_transparent': '(True)'}), '(scene, size=(900, 768), reset_camera=False,\n order_transparent=True)\n', (7459, 7531), False, 'from fury import window, actor, utils, io\n'), ((7842, 7859), 'itertools.count', 'itertools.count', ([], {}), '()\n', (7857, 7859), False, 'import itertools\n'), ((5195, 5223), 'numpy.array', 'np.array', (['[0, 0, 0, opacity]'], {}), '([0, 0, 0, opacity])\n', (5203, 5223), True, 'import numpy as np\n'), ((5236, 5264), 'numpy.array', 'np.array', (['[0, 0, 0, opacity]'], {}), '([0, 0, 0, opacity])\n', (5244, 5264), True, 'import numpy as np\n'), ((2983, 2996), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2989, 2996), True, 'import numpy as np\n'), ((3024, 3037), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3030, 3037), True, 'import numpy as np\n'), ((3053, 3064), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (3059, 3064), True, 'import numpy as np\n'), ((4446, 4463), 'numpy.array', 'np.array', (['centers'], {}), '(centers)\n', (4454, 4463), True, 'import numpy as np\n'), ((4523, 4539), 'numpy.array', 'np.array', (['scales'], {}), '(scales)\n', (4531, 4539), True, 'import numpy as np\n'), ((5284, 5315), 'numpy.array', 'np.array', (['nodeColors[fromIndex]'], {}), '(nodeColors[fromIndex])\n', (5292, 5315), True, 'import numpy as np\n'), ((5341, 5370), 'numpy.array', 'np.array', (['nodeColors[toIndex]'], {}), '(nodeColors[toIndex])\n', (5349, 5370), True, 'import numpy as np\n'), ((5897, 5918), 'math.ceil', 'math.ceil', (['(l.s13 / ds)'], {}), '(l.s13 / ds)\n', (5906, 5918), False, 'import math\n'), ((6270, 6295), 'math.sin', 'math.sin', (['(math.pi * coeff)'], {}), '(math.pi * coeff)\n', (6278, 6295), False, 'import math\n'), ((6488, 6516), 'numpy.array', 'np.array', (['[1, 1, 1, opacity]'], {}), '([1, 1, 1, opacity])\n', (6496, 6516), True, 'import numpy as np\n'), ((872, 885), 'math.radians', 'radians', (['roll'], {}), '(roll)\n', (879, 885), False, 'from math import radians, atan2, asin\n'), ((1467, 1520), 'math.atan2', 'atan2', (['(2 * (a * d - b * c))', '(1 - 2 * (b ** 2 + d ** 2))'], {}), '(2 * (a * d - b * c), 1 - 2 * (b ** 2 + d ** 2))\n', (1472, 1520), False, 'from math import radians, atan2, asin\n'), ((1540, 1559), 'math.asin', 'asin', (['sin_elevation'], {}), '(sin_elevation)\n', (1544, 1559), False, 'from math import radians, atan2, asin\n'), ((1583, 1636), 'math.atan2', 'atan2', (['(2 * (a * c - b * d))', '(1 - 2 * (b ** 2 + c ** 2))'], {}), '(2 * (a * c - b * d), 1 - 2 * (b ** 2 + c ** 2))\n', (1588, 1636), False, 'from math import radians, atan2, asin\n'), ((2971, 2982), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2977, 2982), True, 'import numpy as np\n'), ((3012, 3023), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (3018, 3023), True, 'import numpy as np\n'), ((4243, 4270), 'numpy.sqrt', 'np.sqrt', (['(degree / maxDegree)'], {}), '(degree / maxDegree)\n', (4250, 4270), True, 'import numpy as np\n'), ((4876, 4896), 'numpy.sum', 'np.sum', (['weightsArray'], {}), '(weightsArray)\n', (4882, 4896), True, 'import numpy as np\n'), ((730, 746), 'math.radians', 'radians', (['azimuth'], {}), '(azimuth)\n', (737, 746), False, 'from math import radians, atan2, asin\n'), ((800, 818), 'math.radians', 'radians', (['elevation'], {}), '(elevation)\n', (807, 818), False, 'from math import radians, atan2, asin\n'), ((1162, 1209), 'math.atan2', 'atan2', (['(2 * (a * c + b * d))', '(2 * (a * b - c * d))'], {}), '(2 * (a * c + b * d), 2 * (a * b - c * d))\n', (1167, 1209), False, 'from math import radians, atan2, asin\n'), ((1350, 1398), 'math.atan2', 'atan2', (['(-2 * (a * c + b * d))', '(2 * (c * d - a * b))'], {}), '(-2 * (a * c + b * d), 2 * (c * d - a * b))\n', (1355, 1398), False, 'from math import radians, atan2, asin\n')] |
#!/usr/bin/env python
#
# LICENSE
#
#
'''
Module :mod: tests.seismicity.test_selector.tests algorithms for
geographical selection of seismicity with respect to various source geometries
'''
import unittest
import numpy as np
import datetime
from openquake.hmtk.seismicity.catalogue import Catalogue
from openquake.hmtk.seismicity.selector import (_check_depth_limits,
_get_decimal_from_datetime,
CatalogueSelector)
from openquake.hazardlib.geo.point import Point
from openquake.hazardlib.geo.polygon import Polygon
from openquake.hazardlib.geo.line import Line
from openquake.hazardlib.geo.surface.simple_fault import SimpleFaultSurface
class TestSelector(unittest.TestCase):
'''
Tests the openquake.hmtk.seismicity.selector.Selector class
'''
def setUp(self):
self.catalogue = Catalogue()
self.polygon = None
def test_check_on_depth_limits(self):
# Tests the checks on depth limits
test_dict = {'upper_depth': None, 'lower_depth': None}
self.assertTupleEqual((0.0, np.inf), _check_depth_limits(test_dict))
test_dict = {'upper_depth': 2.0, 'lower_depth': None}
self.assertTupleEqual((2.0, np.inf), _check_depth_limits(test_dict))
test_dict = {'upper_depth': None, 'lower_depth': 10.0}
self.assertTupleEqual((0.0, 10.0), _check_depth_limits(test_dict))
test_dict = {'upper_depth': -4.2, 'lower_depth': None}
self.assertRaises(ValueError, _check_depth_limits, test_dict)
test_dict = {'upper_depth': 5.0, 'lower_depth': 1.0}
self.assertRaises(ValueError, _check_depth_limits, test_dict)
def test_convert_datetime_to_decimal(self):
# Tests the function to convert a time from a datetime object to a
# decimal - simple test to check conversion
# NB Still will not work for BCE dates
simple_time = datetime.datetime(1900, 6, 6, 1, 1, 1, 0)
stime = float(_get_decimal_from_datetime(simple_time))
self.assertAlmostEqual(stime, 1900.42751335)
def test_catalogue_selection(self):
# Tests the tools for selecting events within the catalogue
self.catalogue.data['longitude'] = np.arange(1., 6., 1.)
self.catalogue.data['latitude'] = np.arange(6., 11., 1.)
self.catalogue.data['depth'] = np.ones(5, dtype=bool)
# No events selected
flag_none = np.zeros(5, dtype=bool)
selector0 = CatalogueSelector(self.catalogue)
test_cat1 = selector0.select_catalogue(flag_none)
self.assertEqual(len(test_cat1.data['longitude']), 0)
self.assertEqual(len(test_cat1.data['latitude']), 0)
self.assertEqual(len(test_cat1.data['depth']), 0)
# All events selected
flag_all = np.ones(5, dtype=bool)
test_cat1 = selector0.select_catalogue(flag_all)
self.assertTrue(np.allclose(test_cat1.data['longitude'],
self.catalogue.data['longitude']))
self.assertTrue(np.allclose(test_cat1.data['latitude'],
self.catalogue.data['latitude']))
self.assertTrue(np.allclose(test_cat1.data['depth'],
self.catalogue.data['depth']))
# Some events selected
flag_1 = np.array([True, False, True, False, True])
test_cat1 = selector0.select_catalogue(flag_1)
self.assertTrue(np.allclose(test_cat1.data['longitude'],
np.array([1., 3., 5.])))
self.assertTrue(np.allclose(test_cat1.data['latitude'],
np.array([6., 8., 10])))
self.assertTrue(np.allclose(test_cat1.data['depth'],
np.array([1., 1., 1.])))
def test_select_within_polygon(self):
# Tests the selection of points within polygon
# Setup polygon
nodes = np.array([[5.0, 6.0], [6.0, 6.0], [6.0, 5.0], [5.0, 5.0]])
polygon0 = Polygon([Point(nodes[iloc, 0], nodes[iloc, 1])
for iloc in range(0, 4)])
self.catalogue.data['longitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['latitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['depth'] = np.ones(7, dtype=float)
# Simple case with nodes inside, outside and on the border of polygon
selector0 = CatalogueSelector(self.catalogue)
test_cat1 = selector0.within_polygon(polygon0)
self.assertTrue(np.allclose(test_cat1.data['longitude'],
np.array([5.0, 5.5, 6.0])))
self.assertTrue(np.allclose(test_cat1.data['latitude'],
np.array([5.0, 5.5, 6.0])))
self.assertTrue(np.allclose(test_cat1.data['depth'],
np.array([1.0, 1.0, 1.0])))
# CASE 2: As case 1 with one of the inside nodes outside of the depths
self.catalogue.data['depth'] = \
np.array([1.0, 1.0, 1.0, 50.0, 1.0, 1.0, 1.0], dtype=float)
selector0 = CatalogueSelector(self.catalogue)
test_cat1 = selector0.within_polygon(polygon0, upper_depth=0.0,
lower_depth=10.0)
self.assertTrue(np.allclose(test_cat1.data['longitude'],
np.array([5.0, 6.0])))
self.assertTrue(np.allclose(test_cat1.data['latitude'],
np.array([5.0, 6.0])))
self.assertTrue(np.allclose(test_cat1.data['depth'],
np.array([1.0])))
def test_point_in_circular_distance(self):
# Tests point in circular distance
self.catalogue.data['longitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['latitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['depth'] = np.ones(7, dtype=float)
test_point = Point(5.5, 5.5)
test_mesh = self.catalogue.hypocentres_as_mesh()
selector0 = CatalogueSelector(self.catalogue)
# Within 10 km
test_cat_10 = selector0.circular_distance_from_point(
test_point, 10., distance_type='epicentral')
np.testing.assert_array_equal(test_cat_10.data['longitude'],
np.array([5.5]))
np.testing.assert_array_equal(test_cat_10.data['latitude'],
np.array([5.5]))
np.testing.assert_array_equal(test_cat_10.data['depth'],
np.array([1.0]))
# Within 100 km
test_cat_100 = selector0.circular_distance_from_point(
test_point, 100., distance_type='epicentral')
np.testing.assert_array_equal(test_cat_100.data['longitude'],
np.array([5.0, 5.5, 6.0]))
np.testing.assert_array_equal(test_cat_100.data['latitude'],
np.array([5.0, 5.5, 6.0]))
np.testing.assert_array_equal(test_cat_100.data['depth'],
np.array([1.0, 1.0, 1.0]))
# Within 1000 km
test_cat_1000 = selector0.circular_distance_from_point(
test_point, 1000., distance_type='epicentral')
np.testing.assert_array_equal(test_cat_1000.data['longitude'],
self.catalogue.data['longitude'])
np.testing.assert_array_equal(test_cat_1000.data['latitude'],
self.catalogue.data['latitude'])
np.testing.assert_array_equal(test_cat_1000.data['depth'],
self.catalogue.data['depth'])
def test_cartesian_square_on_point(self):
# Tests the cartesian square centres on point
self.catalogue.data['longitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['latitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['depth'] = np.ones(7, dtype=float)
test_point = Point(5.5, 5.5)
test_mesh = self.catalogue.hypocentres_as_mesh()
selector0 = CatalogueSelector(self.catalogue)
# Within 10 km
test_cat_10 = selector0.cartesian_square_centred_on_point(
test_point, 10., distance_type='epicentral')
np.testing.assert_array_equal(test_cat_10.data['longitude'],
np.array([5.5]))
np.testing.assert_array_equal(test_cat_10.data['latitude'],
np.array([5.5]))
np.testing.assert_array_equal(test_cat_10.data['depth'],
np.array([1.0]))
# Within 100 km
test_cat_100 = selector0.cartesian_square_centred_on_point(
test_point, 100., distance_type='epicentral')
np.testing.assert_array_almost_equal(
test_cat_100.data['longitude'], np.array([5.0, 5.5, 6.0]))
np.testing.assert_array_almost_equal(
test_cat_100.data['latitude'], np.array([5.0, 5.5, 6.0]))
np.testing.assert_array_almost_equal(
test_cat_100.data['depth'], np.array([1.0, 1.0, 1.0]))
# Within 1000 km
test_cat_1000 = selector0.cartesian_square_centred_on_point(
test_point, 1000., distance_type='epicentral')
np.testing.assert_array_almost_equal(
test_cat_1000.data['longitude'], self.catalogue.data['longitude'])
np.testing.assert_array_almost_equal(
test_cat_1000.data['latitude'], self.catalogue.data['latitude'])
np.testing.assert_array_almost_equal(
test_cat_1000.data['depth'], self.catalogue.data['depth'])
def test_within_joyner_boore_distance(self):
# Tests the function to select within Joyner-Boore distance
self.catalogue.data['longitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['latitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['depth'] = np.ones(7, dtype=float)
selector0 = CatalogueSelector(self.catalogue)
# Construct Fault
trace0 = np.array([[5.5, 6.0], [5.5, 5.0]])
fault_trace = Line([Point(trace0[i, 0], trace0[i, 1])
for i in range(0, 2)])
# Simple fault with vertical dip
fault0 = SimpleFaultSurface.from_fault_data(fault_trace, 0., 20., 90.,
1.)
# Within 100 km
test_cat_100 = selector0.within_joyner_boore_distance(fault0, 100.)
np.testing.assert_array_almost_equal(
test_cat_100.data['longitude'], np.array([5.0, 5.5, 6.0]))
np.testing.assert_array_almost_equal(
test_cat_100.data['latitude'], np.array([5.0, 5.5, 6.0]))
np.testing.assert_array_almost_equal(
test_cat_100.data['depth'], np.array([1.0, 1.0, 1.0]))
# Simple fault with 30 degree dip
fault0 = SimpleFaultSurface.from_fault_data(
fault_trace, 0., 20., 30., 1.)
# Within 100 km
test_cat_100 = selector0.within_joyner_boore_distance(fault0, 100.)
np.testing.assert_array_almost_equal(
test_cat_100.data['longitude'], np.array([4.5, 5.0, 5.5, 6.0]))
np.testing.assert_array_almost_equal(
test_cat_100.data['latitude'], np.array([4.5, 5.0, 5.5, 6.0]))
np.testing.assert_array_almost_equal(
test_cat_100.data['depth'], np.array([1.0, 1.0, 1.0, 1.0]))
def test_within_rupture_distance(self):
# Tests the function to select within Joyner-Boore distance
self.catalogue.data['longitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['latitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['depth'] = np.ones(7, dtype=float)
selector0 = CatalogueSelector(self.catalogue)
# Construct Fault
trace0 = np.array([[5.5, 6.0], [5.5, 5.0]])
fault_trace = Line([Point(trace0[i, 0], trace0[i, 1])
for i in range(0, 2)])
# Simple fault with vertical dip
fault0 = SimpleFaultSurface.from_fault_data(fault_trace, 0., 20., 90.,
1.)
# Within 100 km
test_cat_100 = selector0.within_rupture_distance(fault0, 100.)
np.testing.assert_array_almost_equal(
test_cat_100.data['longitude'], np.array([5.0, 5.5, 6.0]))
np.testing.assert_array_almost_equal(
test_cat_100.data['latitude'], np.array([5.0, 5.5, 6.0]))
np.testing.assert_array_almost_equal(
test_cat_100.data['depth'], np.array([1.0, 1.0, 1.0]))
# Simple fault with 30 degree dip
fault0 = SimpleFaultSurface.from_fault_data(
fault_trace, 0., 20., 30., 1.)
# Within 100 km
test_cat_100 = selector0.within_rupture_distance(fault0, 100.)
np.testing.assert_array_almost_equal(
test_cat_100.data['longitude'], np.array([4.5, 5.0, 5.5, 6.0]))
np.testing.assert_array_almost_equal(
test_cat_100.data['latitude'], np.array([4.5, 5.0, 5.5, 6.0]))
np.testing.assert_array_almost_equal(
test_cat_100.data['depth'], np.array([1.0, 1.0, 1.0, 1.0]))
def test_select_within_time(self):
# Tests the function to select within a time period
self.catalogue.data['year'] = np.arange(1900, 2010, 20)
self.catalogue.data['month'] = np.arange(1, 12, 2)
self.catalogue.data['day'] = np.ones(6, dtype=int)
self.catalogue.data['hour'] = np.ones(6, dtype=int)
self.catalogue.data['minute'] = np.zeros(6, dtype=int)
self.catalogue.data['second'] = np.ones(6, dtype=float)
selector0 = CatalogueSelector(self.catalogue)
# Start time and End time not defined
test_cat_1 = selector0.within_time_period()
self._compare_time_data_dictionaries(test_cat_1.data,
self.catalogue.data)
# Start time defined - end time not defined
begin_time = datetime.datetime(1975, 1, 1, 0, 0, 0, 0)
expected_data = {'year': np.array([1980, 2000]),
'month': np.array([9, 11]),
'day': np.array([1, 1]),
'hour': np.array([1, 1]),
'minute': np.array([0, 0]),
'second': np.array([1., 1.])}
test_cat_1 = selector0.within_time_period(start_time=begin_time)
self._compare_time_data_dictionaries(expected_data, test_cat_1.data)
# Test 3 - Start time not defined, end-time defined
finish_time = datetime.datetime(1965, 1, 1, 0, 0, 0, 0)
expected_data = {'year': np.array([1900, 1920, 1940, 1960]),
'month': np.array([1, 3, 5, 7]),
'day': np.array([1, 1, 1, 1]),
'hour': np.array([1, 1, 1, 1]),
'minute': np.array([0, 0, 0, 0]),
'second': np.array([1., 1., 1., 1.])}
test_cat_1 = selector0.within_time_period(end_time=finish_time)
self._compare_time_data_dictionaries(expected_data, test_cat_1.data)
# Test 4 - both start time and end-time defined
begin_time = datetime.datetime(1935, 1, 1, 0, 0, 0, 0)
finish_time = datetime.datetime(1995, 1, 1, 0, 0, 0, 0)
expected_data = {'year': np.array([1940, 1960, 1980]),
'month': np.array([5, 7, 9]),
'day': np.array([1, 1, 1]),
'hour': np.array([1, 1, 1]),
'minute': np.array([0, 0, 0]),
'second': np.array([1., 1., 1.])}
test_cat_1 = selector0.within_time_period(begin_time, finish_time)
self._compare_time_data_dictionaries(expected_data, test_cat_1.data)
def _compare_time_data_dictionaries(self, expected, modelled):
'''
Compares the relevant time and date information in the catalogue
data dictionaries
'''
time_keys = ['year', 'month', 'day', 'hour', 'minute', 'second']
for key in time_keys:
# The second value is a float - all others are integers
if 'second' in key:
np.testing.assert_array_almost_equal(expected[key],
modelled[key])
else:
np.testing.assert_array_equal(expected[key], modelled[key])
def test_select_within_depth_range(self):
# Tests the function to select within the depth range
# Setup function
self.catalogue = Catalogue()
self.catalogue.data['depth'] = np.array([5., 15., 25., 35., 45.])
selector0 = CatalogueSelector(self.catalogue)
# Test case 1: No limits specified - all catalogue valid
test_cat_1 = selector0.within_depth_range()
np.testing.assert_array_almost_equal(test_cat_1.data['depth'],
self.catalogue.data['depth'])
# Test case 2: Lower depth limit specfied only
test_cat_1 = selector0.within_depth_range(lower_depth=30.)
np.testing.assert_array_almost_equal(test_cat_1.data['depth'],
np.array([5., 15., 25.]))
# Test case 3: Upper depth limit specified only
test_cat_1 = selector0.within_depth_range(upper_depth=20.)
np.testing.assert_array_almost_equal(test_cat_1.data['depth'],
np.array([25., 35., 45.]))
# Test case 4: Both depth limits specified
test_cat_1 = selector0.within_depth_range(upper_depth=20.,
lower_depth=40.)
np.testing.assert_array_almost_equal(test_cat_1.data['depth'],
np.array([25., 35.]))
def test_select_within_magnitude_range(self):
'''
Tests the function to select within the magnitude range
'''
# Setup function
self.catalogue = Catalogue()
self.catalogue.data['magnitude'] = np.array([4., 5., 6., 7., 8.])
selector0 = CatalogueSelector(self.catalogue)
# Test case 1: No limits specified - all catalogue valid
test_cat_1 = selector0.within_magnitude_range()
np.testing.assert_array_almost_equal(test_cat_1.data['magnitude'],
self.catalogue.data['magnitude'])
# Test case 2: Lower depth limit specfied only
test_cat_1 = selector0.within_magnitude_range(lower_mag=5.5)
np.testing.assert_array_almost_equal(test_cat_1.data['magnitude'],
np.array([6., 7., 8.]))
# Test case 3: Upper depth limit specified only
test_cat_1 = selector0.within_magnitude_range(upper_mag=5.5)
np.testing.assert_array_almost_equal(test_cat_1.data['magnitude'],
np.array([4., 5.]))
# Test case 4: Both depth limits specified
test_cat_1 = selector0.within_magnitude_range(upper_mag=7.5,
lower_mag=5.5)
np.testing.assert_array_almost_equal(test_cat_1.data['magnitude'],
np.array([6., 7.]))
def test_create_cluster_set(self):
"""
"""
# Setup function
self.catalogue = Catalogue()
self.catalogue.data["EventID"] = np.array([1, 2, 3, 4, 5, 6])
self.catalogue.data["magnitude"] = np.array([7.0, 5.0, 5.0,
5.0, 4.0, 4.0])
selector0 = CatalogueSelector(self.catalogue)
vcl = np.array([0, 1, 1, 1, 2, 2])
cluster_set = selector0.create_cluster_set(vcl)
np.testing.assert_array_equal(cluster_set[0].data["EventID"],
np.array([1]))
np.testing.assert_array_almost_equal(cluster_set[0].data["magnitude"],
np.array([7.0]))
np.testing.assert_array_equal(cluster_set[1].data["EventID"],
np.array([2, 3, 4]))
np.testing.assert_array_almost_equal(cluster_set[1].data["magnitude"],
np.array([5.0, 5.0, 5.0]))
np.testing.assert_array_equal(cluster_set[2].data["EventID"],
np.array([5, 6]))
np.testing.assert_array_almost_equal(cluster_set[2].data["magnitude"],
np.array([4.0, 4.0]))
| [
"datetime.datetime",
"numpy.testing.assert_array_almost_equal",
"numpy.allclose",
"numpy.ones",
"openquake.hmtk.seismicity.selector._get_decimal_from_datetime",
"openquake.hmtk.seismicity.catalogue.Catalogue",
"numpy.testing.assert_array_equal",
"openquake.hazardlib.geo.point.Point",
"openquake.hmtk... | [((903, 914), 'openquake.hmtk.seismicity.catalogue.Catalogue', 'Catalogue', ([], {}), '()\n', (912, 914), False, 'from openquake.hmtk.seismicity.catalogue import Catalogue\n'), ((1959, 2000), 'datetime.datetime', 'datetime.datetime', (['(1900)', '(6)', '(6)', '(1)', '(1)', '(1)', '(0)'], {}), '(1900, 6, 6, 1, 1, 1, 0)\n', (1976, 2000), False, 'import datetime\n'), ((2269, 2293), 'numpy.arange', 'np.arange', (['(1.0)', '(6.0)', '(1.0)'], {}), '(1.0, 6.0, 1.0)\n', (2278, 2293), True, 'import numpy as np\n'), ((2333, 2358), 'numpy.arange', 'np.arange', (['(6.0)', '(11.0)', '(1.0)'], {}), '(6.0, 11.0, 1.0)\n', (2342, 2358), True, 'import numpy as np\n'), ((2395, 2417), 'numpy.ones', 'np.ones', (['(5)'], {'dtype': 'bool'}), '(5, dtype=bool)\n', (2402, 2417), True, 'import numpy as np\n'), ((2468, 2491), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'bool'}), '(5, dtype=bool)\n', (2476, 2491), True, 'import numpy as np\n'), ((2512, 2545), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (2529, 2545), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((2835, 2857), 'numpy.ones', 'np.ones', (['(5)'], {'dtype': 'bool'}), '(5, dtype=bool)\n', (2842, 2857), True, 'import numpy as np\n'), ((3362, 3404), 'numpy.array', 'np.array', (['[True, False, True, False, True]'], {}), '([True, False, True, False, True])\n', (3370, 3404), True, 'import numpy as np\n'), ((3972, 4030), 'numpy.array', 'np.array', (['[[5.0, 6.0], [6.0, 6.0], [6.0, 5.0], [5.0, 5.0]]'], {}), '([[5.0, 6.0], [6.0, 6.0], [6.0, 5.0], [5.0, 5.0]])\n', (3980, 4030), True, 'import numpy as np\n'), ((4194, 4218), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (4203, 4218), True, 'import numpy as np\n'), ((4261, 4285), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (4270, 4285), True, 'import numpy as np\n'), ((4325, 4348), 'numpy.ones', 'np.ones', (['(7)'], {'dtype': 'float'}), '(7, dtype=float)\n', (4332, 4348), True, 'import numpy as np\n'), ((4447, 4480), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (4464, 4480), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((5050, 5109), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 50.0, 1.0, 1.0, 1.0]'], {'dtype': 'float'}), '([1.0, 1.0, 1.0, 50.0, 1.0, 1.0, 1.0], dtype=float)\n', (5058, 5109), True, 'import numpy as np\n'), ((5130, 5163), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (5147, 5163), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((5796, 5820), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (5805, 5820), True, 'import numpy as np\n'), ((5863, 5887), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (5872, 5887), True, 'import numpy as np\n'), ((5927, 5950), 'numpy.ones', 'np.ones', (['(7)'], {'dtype': 'float'}), '(7, dtype=float)\n', (5934, 5950), True, 'import numpy as np\n'), ((5972, 5987), 'openquake.hazardlib.geo.point.Point', 'Point', (['(5.5)', '(5.5)'], {}), '(5.5, 5.5)\n', (5977, 5987), False, 'from openquake.hazardlib.geo.point import Point\n'), ((6065, 6098), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (6082, 6098), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((7311, 7412), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["test_cat_1000.data['longitude']", "self.catalogue.data['longitude']"], {}), "(test_cat_1000.data['longitude'], self.\n catalogue.data['longitude'])\n", (7340, 7412), True, 'import numpy as np\n'), ((7454, 7553), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["test_cat_1000.data['latitude']", "self.catalogue.data['latitude']"], {}), "(test_cat_1000.data['latitude'], self.\n catalogue.data['latitude'])\n", (7483, 7553), True, 'import numpy as np\n'), ((7595, 7688), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["test_cat_1000.data['depth']", "self.catalogue.data['depth']"], {}), "(test_cat_1000.data['depth'], self.catalogue.\n data['depth'])\n", (7624, 7688), True, 'import numpy as np\n'), ((7867, 7891), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (7876, 7891), True, 'import numpy as np\n'), ((7934, 7958), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (7943, 7958), True, 'import numpy as np\n'), ((7998, 8021), 'numpy.ones', 'np.ones', (['(7)'], {'dtype': 'float'}), '(7, dtype=float)\n', (8005, 8021), True, 'import numpy as np\n'), ((8043, 8058), 'openquake.hazardlib.geo.point.Point', 'Point', (['(5.5)', '(5.5)'], {}), '(5.5, 5.5)\n', (8048, 8058), False, 'from openquake.hazardlib.geo.point import Point\n'), ((8136, 8169), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (8153, 8169), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((9343, 9451), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["test_cat_1000.data['longitude']", "self.catalogue.data['longitude']"], {}), "(test_cat_1000.data['longitude'], self.\n catalogue.data['longitude'])\n", (9379, 9451), True, 'import numpy as np\n'), ((9468, 9574), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["test_cat_1000.data['latitude']", "self.catalogue.data['latitude']"], {}), "(test_cat_1000.data['latitude'], self.\n catalogue.data['latitude'])\n", (9504, 9574), True, 'import numpy as np\n'), ((9591, 9691), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["test_cat_1000.data['depth']", "self.catalogue.data['depth']"], {}), "(test_cat_1000.data['depth'], self.\n catalogue.data['depth'])\n", (9627, 9691), True, 'import numpy as np\n'), ((9862, 9886), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (9871, 9886), True, 'import numpy as np\n'), ((9929, 9953), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (9938, 9953), True, 'import numpy as np\n'), ((9993, 10016), 'numpy.ones', 'np.ones', (['(7)'], {'dtype': 'float'}), '(7, dtype=float)\n', (10000, 10016), True, 'import numpy as np\n'), ((10037, 10070), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (10054, 10070), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((10114, 10148), 'numpy.array', 'np.array', (['[[5.5, 6.0], [5.5, 5.0]]'], {}), '([[5.5, 6.0], [5.5, 5.0]])\n', (10122, 10148), True, 'import numpy as np\n'), ((10321, 10390), 'openquake.hazardlib.geo.surface.simple_fault.SimpleFaultSurface.from_fault_data', 'SimpleFaultSurface.from_fault_data', (['fault_trace', '(0.0)', '(20.0)', '(90.0)', '(1.0)'], {}), '(fault_trace, 0.0, 20.0, 90.0, 1.0)\n', (10355, 10390), False, 'from openquake.hazardlib.geo.surface.simple_fault import SimpleFaultSurface\n'), ((10946, 11015), 'openquake.hazardlib.geo.surface.simple_fault.SimpleFaultSurface.from_fault_data', 'SimpleFaultSurface.from_fault_data', (['fault_trace', '(0.0)', '(20.0)', '(30.0)', '(1.0)'], {}), '(fault_trace, 0.0, 20.0, 30.0, 1.0)\n', (10980, 11015), False, 'from openquake.hazardlib.geo.surface.simple_fault import SimpleFaultSurface\n'), ((11644, 11668), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (11653, 11668), True, 'import numpy as np\n'), ((11711, 11735), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (11720, 11735), True, 'import numpy as np\n'), ((11775, 11798), 'numpy.ones', 'np.ones', (['(7)'], {'dtype': 'float'}), '(7, dtype=float)\n', (11782, 11798), True, 'import numpy as np\n'), ((11819, 11852), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (11836, 11852), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((11896, 11930), 'numpy.array', 'np.array', (['[[5.5, 6.0], [5.5, 5.0]]'], {}), '([[5.5, 6.0], [5.5, 5.0]])\n', (11904, 11930), True, 'import numpy as np\n'), ((12103, 12172), 'openquake.hazardlib.geo.surface.simple_fault.SimpleFaultSurface.from_fault_data', 'SimpleFaultSurface.from_fault_data', (['fault_trace', '(0.0)', '(20.0)', '(90.0)', '(1.0)'], {}), '(fault_trace, 0.0, 20.0, 90.0, 1.0)\n', (12137, 12172), False, 'from openquake.hazardlib.geo.surface.simple_fault import SimpleFaultSurface\n'), ((12723, 12792), 'openquake.hazardlib.geo.surface.simple_fault.SimpleFaultSurface.from_fault_data', 'SimpleFaultSurface.from_fault_data', (['fault_trace', '(0.0)', '(20.0)', '(30.0)', '(1.0)'], {}), '(fault_trace, 0.0, 20.0, 30.0, 1.0)\n', (12757, 12792), False, 'from openquake.hazardlib.geo.surface.simple_fault import SimpleFaultSurface\n'), ((13398, 13423), 'numpy.arange', 'np.arange', (['(1900)', '(2010)', '(20)'], {}), '(1900, 2010, 20)\n', (13407, 13423), True, 'import numpy as np\n'), ((13463, 13482), 'numpy.arange', 'np.arange', (['(1)', '(12)', '(2)'], {}), '(1, 12, 2)\n', (13472, 13482), True, 'import numpy as np\n'), ((13520, 13541), 'numpy.ones', 'np.ones', (['(6)'], {'dtype': 'int'}), '(6, dtype=int)\n', (13527, 13541), True, 'import numpy as np\n'), ((13580, 13601), 'numpy.ones', 'np.ones', (['(6)'], {'dtype': 'int'}), '(6, dtype=int)\n', (13587, 13601), True, 'import numpy as np\n'), ((13642, 13664), 'numpy.zeros', 'np.zeros', (['(6)'], {'dtype': 'int'}), '(6, dtype=int)\n', (13650, 13664), True, 'import numpy as np\n'), ((13705, 13728), 'numpy.ones', 'np.ones', (['(6)'], {'dtype': 'float'}), '(6, dtype=float)\n', (13712, 13728), True, 'import numpy as np\n'), ((13750, 13783), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (13767, 13783), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((14085, 14126), 'datetime.datetime', 'datetime.datetime', (['(1975)', '(1)', '(1)', '(0)', '(0)', '(0)', '(0)'], {}), '(1975, 1, 1, 0, 0, 0, 0)\n', (14102, 14126), False, 'import datetime\n'), ((14680, 14721), 'datetime.datetime', 'datetime.datetime', (['(1965)', '(1)', '(1)', '(0)', '(0)', '(0)', '(0)'], {}), '(1965, 1, 1, 0, 0, 0, 0)\n', (14697, 14721), False, 'import datetime\n'), ((15312, 15353), 'datetime.datetime', 'datetime.datetime', (['(1935)', '(1)', '(1)', '(0)', '(0)', '(0)', '(0)'], {}), '(1935, 1, 1, 0, 0, 0, 0)\n', (15329, 15353), False, 'import datetime\n'), ((15376, 15417), 'datetime.datetime', 'datetime.datetime', (['(1995)', '(1)', '(1)', '(0)', '(0)', '(0)', '(0)'], {}), '(1995, 1, 1, 0, 0, 0, 0)\n', (15393, 15417), False, 'import datetime\n'), ((16696, 16707), 'openquake.hmtk.seismicity.catalogue.Catalogue', 'Catalogue', ([], {}), '()\n', (16705, 16707), False, 'from openquake.hmtk.seismicity.catalogue import Catalogue\n'), ((16747, 16786), 'numpy.array', 'np.array', (['[5.0, 15.0, 25.0, 35.0, 45.0]'], {}), '([5.0, 15.0, 25.0, 35.0, 45.0])\n', (16755, 16786), True, 'import numpy as np\n'), ((16803, 16836), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (16820, 16836), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((16962, 17059), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["test_cat_1.data['depth']", "self.catalogue.data['depth']"], {}), "(test_cat_1.data['depth'], self.\n catalogue.data['depth'])\n", (16998, 17059), True, 'import numpy as np\n'), ((18144, 18155), 'openquake.hmtk.seismicity.catalogue.Catalogue', 'Catalogue', ([], {}), '()\n', (18153, 18155), False, 'from openquake.hmtk.seismicity.catalogue import Catalogue\n'), ((18199, 18234), 'numpy.array', 'np.array', (['[4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([4.0, 5.0, 6.0, 7.0, 8.0])\n', (18207, 18234), True, 'import numpy as np\n'), ((18251, 18284), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (18268, 18284), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((18414, 18519), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["test_cat_1.data['magnitude']", "self.catalogue.data['magnitude']"], {}), "(test_cat_1.data['magnitude'], self.\n catalogue.data['magnitude'])\n", (18450, 18519), True, 'import numpy as np\n'), ((19539, 19550), 'openquake.hmtk.seismicity.catalogue.Catalogue', 'Catalogue', ([], {}), '()\n', (19548, 19550), False, 'from openquake.hmtk.seismicity.catalogue import Catalogue\n'), ((19592, 19620), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (19600, 19620), True, 'import numpy as np\n'), ((19664, 19704), 'numpy.array', 'np.array', (['[7.0, 5.0, 5.0, 5.0, 4.0, 4.0]'], {}), '([7.0, 5.0, 5.0, 5.0, 4.0, 4.0])\n', (19672, 19704), True, 'import numpy as np\n'), ((19778, 19811), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (19795, 19811), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((19826, 19854), 'numpy.array', 'np.array', (['[0, 1, 1, 1, 2, 2]'], {}), '([0, 1, 1, 1, 2, 2])\n', (19834, 19854), True, 'import numpy as np\n'), ((1137, 1167), 'openquake.hmtk.seismicity.selector._check_depth_limits', '_check_depth_limits', (['test_dict'], {}), '(test_dict)\n', (1156, 1167), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((1277, 1307), 'openquake.hmtk.seismicity.selector._check_depth_limits', '_check_depth_limits', (['test_dict'], {}), '(test_dict)\n', (1296, 1307), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((1416, 1446), 'openquake.hmtk.seismicity.selector._check_depth_limits', '_check_depth_limits', (['test_dict'], {}), '(test_dict)\n', (1435, 1446), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((2023, 2062), 'openquake.hmtk.seismicity.selector._get_decimal_from_datetime', '_get_decimal_from_datetime', (['simple_time'], {}), '(simple_time)\n', (2049, 2062), False, 'from openquake.hmtk.seismicity.selector import _check_depth_limits, _get_decimal_from_datetime, CatalogueSelector\n'), ((2939, 3013), 'numpy.allclose', 'np.allclose', (["test_cat1.data['longitude']", "self.catalogue.data['longitude']"], {}), "(test_cat1.data['longitude'], self.catalogue.data['longitude'])\n", (2950, 3013), True, 'import numpy as np\n'), ((3075, 3147), 'numpy.allclose', 'np.allclose', (["test_cat1.data['latitude']", "self.catalogue.data['latitude']"], {}), "(test_cat1.data['latitude'], self.catalogue.data['latitude'])\n", (3086, 3147), True, 'import numpy as np\n'), ((3209, 3275), 'numpy.allclose', 'np.allclose', (["test_cat1.data['depth']", "self.catalogue.data['depth']"], {}), "(test_cat1.data['depth'], self.catalogue.data['depth'])\n", (3220, 3275), True, 'import numpy as np\n'), ((6348, 6363), 'numpy.array', 'np.array', (['[5.5]'], {}), '([5.5])\n', (6356, 6363), True, 'import numpy as np\n'), ((6471, 6486), 'numpy.array', 'np.array', (['[5.5]'], {}), '([5.5])\n', (6479, 6486), True, 'import numpy as np\n'), ((6591, 6606), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (6599, 6606), True, 'import numpy as np\n'), ((6862, 6887), 'numpy.array', 'np.array', (['[5.0, 5.5, 6.0]'], {}), '([5.0, 5.5, 6.0])\n', (6870, 6887), True, 'import numpy as np\n'), ((6996, 7021), 'numpy.array', 'np.array', (['[5.0, 5.5, 6.0]'], {}), '([5.0, 5.5, 6.0])\n', (7004, 7021), True, 'import numpy as np\n'), ((7127, 7152), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (7135, 7152), True, 'import numpy as np\n'), ((8424, 8439), 'numpy.array', 'np.array', (['[5.5]'], {}), '([5.5])\n', (8432, 8439), True, 'import numpy as np\n'), ((8547, 8562), 'numpy.array', 'np.array', (['[5.5]'], {}), '([5.5])\n', (8555, 8562), True, 'import numpy as np\n'), ((8667, 8682), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (8675, 8682), True, 'import numpy as np\n'), ((8925, 8950), 'numpy.array', 'np.array', (['[5.0, 5.5, 6.0]'], {}), '([5.0, 5.5, 6.0])\n', (8933, 8950), True, 'import numpy as np\n'), ((9041, 9066), 'numpy.array', 'np.array', (['[5.0, 5.5, 6.0]'], {}), '([5.0, 5.5, 6.0])\n', (9049, 9066), True, 'import numpy as np\n'), ((9154, 9179), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (9162, 9179), True, 'import numpy as np\n'), ((10630, 10655), 'numpy.array', 'np.array', (['[5.0, 5.5, 6.0]'], {}), '([5.0, 5.5, 6.0])\n', (10638, 10655), True, 'import numpy as np\n'), ((10746, 10771), 'numpy.array', 'np.array', (['[5.0, 5.5, 6.0]'], {}), '([5.0, 5.5, 6.0])\n', (10754, 10771), True, 'import numpy as np\n'), ((10859, 10884), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (10867, 10884), True, 'import numpy as np\n'), ((11216, 11246), 'numpy.array', 'np.array', (['[4.5, 5.0, 5.5, 6.0]'], {}), '([4.5, 5.0, 5.5, 6.0])\n', (11224, 11246), True, 'import numpy as np\n'), ((11337, 11367), 'numpy.array', 'np.array', (['[4.5, 5.0, 5.5, 6.0]'], {}), '([4.5, 5.0, 5.5, 6.0])\n', (11345, 11367), True, 'import numpy as np\n'), ((11455, 11485), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0])\n', (11463, 11485), True, 'import numpy as np\n'), ((12407, 12432), 'numpy.array', 'np.array', (['[5.0, 5.5, 6.0]'], {}), '([5.0, 5.5, 6.0])\n', (12415, 12432), True, 'import numpy as np\n'), ((12523, 12548), 'numpy.array', 'np.array', (['[5.0, 5.5, 6.0]'], {}), '([5.0, 5.5, 6.0])\n', (12531, 12548), True, 'import numpy as np\n'), ((12636, 12661), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (12644, 12661), True, 'import numpy as np\n'), ((12988, 13018), 'numpy.array', 'np.array', (['[4.5, 5.0, 5.5, 6.0]'], {}), '([4.5, 5.0, 5.5, 6.0])\n', (12996, 13018), True, 'import numpy as np\n'), ((13109, 13139), 'numpy.array', 'np.array', (['[4.5, 5.0, 5.5, 6.0]'], {}), '([4.5, 5.0, 5.5, 6.0])\n', (13117, 13139), True, 'import numpy as np\n'), ((13227, 13257), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0])\n', (13235, 13257), True, 'import numpy as np\n'), ((14160, 14182), 'numpy.array', 'np.array', (['[1980, 2000]'], {}), '([1980, 2000])\n', (14168, 14182), True, 'import numpy as np\n'), ((14218, 14235), 'numpy.array', 'np.array', (['[9, 11]'], {}), '([9, 11])\n', (14226, 14235), True, 'import numpy as np\n'), ((14269, 14285), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (14277, 14285), True, 'import numpy as np\n'), ((14320, 14336), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (14328, 14336), True, 'import numpy as np\n'), ((14373, 14389), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (14381, 14389), True, 'import numpy as np\n'), ((14426, 14446), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (14434, 14446), True, 'import numpy as np\n'), ((14755, 14789), 'numpy.array', 'np.array', (['[1900, 1920, 1940, 1960]'], {}), '([1900, 1920, 1940, 1960])\n', (14763, 14789), True, 'import numpy as np\n'), ((14825, 14847), 'numpy.array', 'np.array', (['[1, 3, 5, 7]'], {}), '([1, 3, 5, 7])\n', (14833, 14847), True, 'import numpy as np\n'), ((14881, 14903), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (14889, 14903), True, 'import numpy as np\n'), ((14938, 14960), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (14946, 14960), True, 'import numpy as np\n'), ((14997, 15019), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (15005, 15019), True, 'import numpy as np\n'), ((15056, 15086), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0])\n', (15064, 15086), True, 'import numpy as np\n'), ((15451, 15479), 'numpy.array', 'np.array', (['[1940, 1960, 1980]'], {}), '([1940, 1960, 1980])\n', (15459, 15479), True, 'import numpy as np\n'), ((15515, 15534), 'numpy.array', 'np.array', (['[5, 7, 9]'], {}), '([5, 7, 9])\n', (15523, 15534), True, 'import numpy as np\n'), ((15568, 15587), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (15576, 15587), True, 'import numpy as np\n'), ((15622, 15641), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (15630, 15641), True, 'import numpy as np\n'), ((15678, 15697), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (15686, 15697), True, 'import numpy as np\n'), ((15734, 15759), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (15742, 15759), True, 'import numpy as np\n'), ((17339, 17366), 'numpy.array', 'np.array', (['[5.0, 15.0, 25.0]'], {}), '([5.0, 15.0, 25.0])\n', (17347, 17366), True, 'import numpy as np\n'), ((17604, 17632), 'numpy.array', 'np.array', (['[25.0, 35.0, 45.0]'], {}), '([25.0, 35.0, 45.0])\n', (17612, 17632), True, 'import numpy as np\n'), ((17933, 17955), 'numpy.array', 'np.array', (['[25.0, 35.0]'], {}), '([25.0, 35.0])\n', (17941, 17955), True, 'import numpy as np\n'), ((18805, 18830), 'numpy.array', 'np.array', (['[6.0, 7.0, 8.0]'], {}), '([6.0, 7.0, 8.0])\n', (18813, 18830), True, 'import numpy as np\n'), ((19074, 19094), 'numpy.array', 'np.array', (['[4.0, 5.0]'], {}), '([4.0, 5.0])\n', (19082, 19094), True, 'import numpy as np\n'), ((19404, 19424), 'numpy.array', 'np.array', (['[6.0, 7.0]'], {}), '([6.0, 7.0])\n', (19412, 19424), True, 'import numpy as np\n'), ((20019, 20032), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (20027, 20032), True, 'import numpy as np\n'), ((20158, 20173), 'numpy.array', 'np.array', (['[7.0]'], {}), '([7.0])\n', (20166, 20173), True, 'import numpy as np\n'), ((20283, 20302), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (20291, 20302), True, 'import numpy as np\n'), ((20428, 20453), 'numpy.array', 'np.array', (['[5.0, 5.0, 5.0]'], {}), '([5.0, 5.0, 5.0])\n', (20436, 20453), True, 'import numpy as np\n'), ((20563, 20579), 'numpy.array', 'np.array', (['[5, 6]'], {}), '([5, 6])\n', (20571, 20579), True, 'import numpy as np\n'), ((20705, 20725), 'numpy.array', 'np.array', (['[4.0, 4.0]'], {}), '([4.0, 4.0])\n', (20713, 20725), True, 'import numpy as np\n'), ((3561, 3586), 'numpy.array', 'np.array', (['[1.0, 3.0, 5.0]'], {}), '([1.0, 3.0, 5.0])\n', (3569, 3586), True, 'import numpy as np\n'), ((3686, 3710), 'numpy.array', 'np.array', (['[6.0, 8.0, 10]'], {}), '([6.0, 8.0, 10])\n', (3694, 3710), True, 'import numpy as np\n'), ((3808, 3833), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (3816, 3833), True, 'import numpy as np\n'), ((4059, 4096), 'openquake.hazardlib.geo.point.Point', 'Point', (['nodes[iloc, 0]', 'nodes[iloc, 1]'], {}), '(nodes[iloc, 0], nodes[iloc, 1])\n', (4064, 4096), False, 'from openquake.hazardlib.geo.point import Point\n'), ((4637, 4662), 'numpy.array', 'np.array', (['[5.0, 5.5, 6.0]'], {}), '([5.0, 5.5, 6.0])\n', (4645, 4662), True, 'import numpy as np\n'), ((4765, 4790), 'numpy.array', 'np.array', (['[5.0, 5.5, 6.0]'], {}), '([5.0, 5.5, 6.0])\n', (4773, 4790), True, 'import numpy as np\n'), ((4890, 4915), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4898, 4915), True, 'import numpy as np\n'), ((5400, 5420), 'numpy.array', 'np.array', (['[5.0, 6.0]'], {}), '([5.0, 6.0])\n', (5408, 5420), True, 'import numpy as np\n'), ((5523, 5543), 'numpy.array', 'np.array', (['[5.0, 6.0]'], {}), '([5.0, 6.0])\n', (5531, 5543), True, 'import numpy as np\n'), ((5643, 5658), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (5651, 5658), True, 'import numpy as np\n'), ((10177, 10210), 'openquake.hazardlib.geo.point.Point', 'Point', (['trace0[i, 0]', 'trace0[i, 1]'], {}), '(trace0[i, 0], trace0[i, 1])\n', (10182, 10210), False, 'from openquake.hazardlib.geo.point import Point\n'), ((11959, 11992), 'openquake.hazardlib.geo.point.Point', 'Point', (['trace0[i, 0]', 'trace0[i, 1]'], {}), '(trace0[i, 0], trace0[i, 1])\n', (11964, 11992), False, 'from openquake.hazardlib.geo.point import Point\n'), ((16322, 16388), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expected[key]', 'modelled[key]'], {}), '(expected[key], modelled[key])\n', (16358, 16388), True, 'import numpy as np\n'), ((16476, 16535), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected[key]', 'modelled[key]'], {}), '(expected[key], modelled[key])\n', (16505, 16535), True, 'import numpy as np\n')] |
import numpy as np
def compute_gravitational_force(cphi, sphi, ctheta, stheta, mass, g):
""" Gravitational force expressed in body frame
"""
fx = -mass * g * stheta
fy = mass * g * ctheta * sphi
fz = mass * g * ctheta * cphi
return fx, fy, fz
def compute_gamma(attrs):
Jx = attrs.Jx
Jy = attrs.Jy
Jz = attrs.Jz
Jxz = attrs.Jxz
gamma_0 = Jx * Jz - Jxz ** 2
gamma_1 = (Jxz * (Jx - Jy + Jz)) / gamma_0
gamma_2 = (Jz * (Jz - Jy) + Jxz ** 2) / gamma_0
gamma_3 = Jz / gamma_0
gamma_4 = Jxz / gamma_0
gamma_5 = (Jz - Jx) / Jy
gamma_6 = Jxz / Jy
gamma_7 = ((Jx - Jy) * Jx + Jxz ** 2) / gamma_0
gamma_8 = Jx / gamma_0
gamma = np.array([gamma_0, gamma_1, gamma_2, gamma_3, gamma_4, gamma_5, gamma_6, gamma_7,
gamma_8])
return gamma
def simple_propeller_forces(params, Va, delta_t):
rho = params.rho
S_prop = params.S_prop
C_prop = params.C_prop
k_motor = params.k_motor
fx = 0.5 * rho * S_prop * C_prop * \
(k_motor ** 2 * delta_t ** 2 - Va ** 2)
fy = 0.
fz = 0.
return fx, fy, fz
def simple_propeller_torques(params, delta_t):
kT_p = params.kT_p
kOmega = params.kOmega
l = -kT_p * kOmega ** 2 * delta_t ** 2
m = 0.
n = 0.
return l, m, n
def propeller_thrust_torque(dt, Va, mav_p):
# propeller thrust and torque
rho = mav_p.rho
D = mav_p.D_prop
V_in = mav_p.V_max * dt
a = rho * D ** 5 * mav_p.C_Q0 / (2 * np.pi) ** 2
b = rho * D ** 4 * mav_p.C_Q1 * Va / (2 * np.pi) + mav_p.KQ ** 2 / mav_p.R_motor
c = rho * D ** 3 * mav_p.C_Q2 * Va ** 2 - \
(mav_p.KQ * V_in) / mav_p.R_motor + mav_p.KQ * mav_p.i0
radicand = b ** 2 - 4 * a * c
if radicand < 0:
radicand = 0
Omega_op = (-b + np.sqrt(radicand)) / (2 * a)
J_op = 2 * np.pi * Va / (Omega_op * D)
C_T = mav_p.C_T2 * J_op ** 2 + mav_p.C_T1 * J_op + mav_p.C_T0
C_Q = mav_p.C_Q2 * J_op ** 2 + mav_p.C_Q1 * J_op + mav_p.C_Q0
n = Omega_op / (2 * np.pi)
fp_x = rho * n ** 2 * D ** 4 * C_T
Mp_x = rho * n ** 2 * D ** 5 * C_Q
return np.array([fp_x, 0, 0]), np.array([Mp_x, 0, 0]) | [
"numpy.array",
"numpy.sqrt"
] | [((703, 798), 'numpy.array', 'np.array', (['[gamma_0, gamma_1, gamma_2, gamma_3, gamma_4, gamma_5, gamma_6, gamma_7,\n gamma_8]'], {}), '([gamma_0, gamma_1, gamma_2, gamma_3, gamma_4, gamma_5, gamma_6,\n gamma_7, gamma_8])\n', (711, 798), True, 'import numpy as np\n'), ((2133, 2155), 'numpy.array', 'np.array', (['[fp_x, 0, 0]'], {}), '([fp_x, 0, 0])\n', (2141, 2155), True, 'import numpy as np\n'), ((2157, 2179), 'numpy.array', 'np.array', (['[Mp_x, 0, 0]'], {}), '([Mp_x, 0, 0])\n', (2165, 2179), True, 'import numpy as np\n'), ((1807, 1824), 'numpy.sqrt', 'np.sqrt', (['radicand'], {}), '(radicand)\n', (1814, 1824), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class NeuralNetwork:
def __init__(self, inputnotes,hiddennodes,layers,outputnodes,learningrate,number_of_epochs=1000,output_type = 'c',train_test_split = 0.8,test = True, split = True):
"""
inputnodes
hiddennodes
layers
outputnodes
learningrate
number_of_epochs=1000
output_type = 'c' (choice between c for classification and r for regression)
train_test_split = 0.8 (percentage you would like to train)
test = True (turn if off if you don't want to test)
split = True (split the data)
"""
self.inodes = inputnotes
self.hnodes = hiddennodes
self.layers = layers
self.onodes= outputnodes
self.lr = learningrate
self.epochs = number_of_epochs
self.individual_epoch_errors = []
self.mean_errors = []
self.original_epoch=0
self.output_type = output_type
self.tts = train_test_split
self.test = test
self.split = split
"""Initialize the weights and the biases"""
self.iw = {} #The initial Weights dictionary
for layer in range(self.layers):
if layer == 0:
self.iw[layer] = (np.random.rand(self.hnodes,self.inodes)-.5)
self.iw['bias' +str(layer)] = np.random.rand(self.hnodes)
self.iw['bias' + str(layer)] = np.array(self.iw['bias' +str(layer)],ndmin = 2).T
elif layer == self.layers-1:
self.iw[layer] = (np.random.rand(self.onodes,self.hnodes)-.5)
self.iw['bias' +str(layer)] = np.random.rand(self.onodes)
self.iw['bias' + str(layer)] = np.array(self.iw['bias' +str(layer)],ndmin = 2).T
else:
self.iw[layer] = (np.random.rand(self.hnodes, self.hnodes)-.5)
self.iw['bias'+str(layer)] = np.random.rand(self.hnodes)
self.iw['bias' + str(layer)] = np.array(self.iw['bias'+str(layer)],ndmin = 2).T
def sigmoid(self,x):
return (1/(1+np.e**(-x)))
def dsigmoid(self,x):
return (self.sigmoid(x)*(1-self.sigmoid(x)))
def relu(self,x):
return np.maximum(x,x*.0001)
def drelu(self,x):
positive = 1.*(x>0)
negative = .0001*(x<0)
return positive+negative
# def sigmoid(self,x):
# overall_array = []
#
# for row in x:
# new_array = []
# for value in row:
# if value >0:
# new_array.append(value)
# else:
# new_array.append(0)
# overall_array.append(new_array)
# return np.array(overall_array,ndmin=2)
# def dsigmoid(self,x):
# overall_array = []
# for row in x:
# new_array = []
# for value in row:
# if value >0:
# new_array.append(1)
# else:
# new_array.append(0)
# overall_array.append(new_array)
# return np.array(overall_array,ndmin=1)
def feed_forward(self,input_array):
input_array = np.array(input_array,ndmin=2).T
self.input_array = input_array
"""Run feedword algorithm"""
self.ff = {} #Dictionary to hold the feedforward weights
for layer in range(self.layers):
if layer ==0:
self.ff['z'+str(layer)] = self.iw[layer]@input_array
self.ff['z'+str(layer)]+= self.iw['bias'+str(layer)]
self.ff['a'+str(layer)] = self.sigmoid(self.ff['z'+str(layer)])
elif layer == self.layers -1:
self.ff['z'+str(layer)] = self.iw[layer]@self.ff['a'+str(layer-1)]
self.ff['z'+str(layer)]+= self.iw['bias'+str(layer)]
if self.output_type.lower().startswith('c'):
self.ff['a'+str(layer)] = self.sigmoid(self.ff['z'+str(layer)])
elif self.output_type.lower().startswith('r'):
self.ff['a'+str(layer)] = self.ff['z'+str(layer)]
else:
self.ff['z'+str(layer)] = self.iw[layer]@self.ff['a'+str(layer-1)]
self.ff['z'+str(layer)]+= self.iw['bias'+str(layer)]
self.ff['a'+str(layer)] = self.sigmoid(self.ff['z'+str(layer)])
outputs = self.ff['a'+str(self.layers-1)]
return outputs
def clean_data(func):
def wrapper(*args, **kwargs):
test_outcomes = []
test_answers = []
self = args[0]
data = args[1]
train_data = data.iloc[:int(len(data)*(self.tts))].sample(frac=1)
print(train_data)
test_data = data.iloc[int(len(data)*self.tts):]
if not self.split:
test_data = data.iloc[:int(len(data)*self.tts)]
train_inputs = train_data[data.columns[:-1]].values
train_outputs = train_data[data.columns[-1]].values
test_inputs = test_data[data.columns[:-1]].values
test_outputs = test_data[data.columns[-1]].values
for epoch in range(self.epochs):
for i in range(len(train_inputs)):
func(args[0],train_inputs[i],train_outputs[i],epoch)
plt.plot(self.mean_errors)
plt.show()
if not self.test:
return
else:
for index,point in enumerate(test_inputs):
print(point)
print(self.feed_forward(point))
test_outcomes.append(self.feed_forward(point)[0])
test_answers.append(test_outputs[index])
plt.plot(test_outcomes,'*', label = 'test')
plt.plot(test_answers,'*',label = 'answer')
plt.legend()
plt.show()
return
return wrapper
@clean_data
def train(self,inputs,targets="None",epoch = 0):
outputs = self.feed_forward(inputs)
targets = np.array(targets,ndmin=2).T
output_errors = (targets-outputs)
self.individual_epoch_errors.append(np.linalg.norm(output_errors))
if self.original_epoch != epoch:
self.mean_errors.append(1-np.mean(self.individual_epoch_errors*1))
self.individual_epoch_errors = []
self.original_epoch +=1
for layer in range(self.layers-1,-1,-1):
if layer == self.layers -1:
#δL=(aL−y)⊙σ′(zL).
"""Check to see what the output type is"""
if self.output_type.lower().startswith('c'):
"""Classification output, using sigmoid run it through the dsigmoid"""
gradient = self.dsigmoid(self.ff['z'+str(self.layers-1)])
elif self.output_type.lower().startswith('r'):
"""regression. The function is linear, therefore has derivative of 1 everywhere"""
gradient = np.ones(np.shape(self.ff['z'+str(self.layers-1)]))
gradient = np.multiply(output_errors,gradient)
# if np.linalg.norm(gradient)>10:
# gradient = 10*(gradient/np.linalg.norm(gradient))
first_errors = gradient
gradient = self.lr*gradient
hidden_t = self.ff['a'+str(layer-1)].T
deltas = gradient@hidden_t
self.iw[layer] += deltas
self.iw['bias' + str(layer)]+= gradient
elif layer ==0:
#δl=((wl+1)Tδl+1)⊙σ′(zl),
first_errors = np.transpose(self.iw[layer+1])@first_errors
gradient = self.dsigmoid(self.ff['z'+str(layer)])
gradient = np.multiply(first_errors,gradient)
# if np.linalg.norm(gradient)>10:
# gradient = 10*(gradient/np.linalg.norm(gradient))
first_errors = gradient
gradient = self.lr*gradient
inputs_t = self.input_array.T
deltas = gradient@inputs_t
self.iw[layer]+=deltas
self.iw['bias'+str(layer)]+= gradient
else:
#δl=((wl+1)Tδl+1)⊙σ′(zl),
first_errors = np.transpose(self.iw[layer+1])@first_errors
gradient = self.dsigmoid(self.ff['z'+str(layer)])
gradient = np.multiply(first_errors,gradient)
# if np.linalg.norm(gradient)>10:
# gradient = 10*(gradient/np.linalg.norm(gradient))
first_errors = gradient
gradient = self.lr*gradient
prev_hidden_t = self.ff['a'+str(layer-1)].T
deltas = gradient@prev_hidden_t
self.iw[layer]+= deltas
self.iw['bias'+str(layer)] += gradient
| [
"numpy.mean",
"numpy.multiply",
"numpy.random.rand",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.linalg.norm",
"numpy.maximum",
"numpy.transpose",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((2254, 2279), 'numpy.maximum', 'np.maximum', (['x', '(x * 0.0001)'], {}), '(x, x * 0.0001)\n', (2264, 2279), True, 'import numpy as np\n'), ((3222, 3252), 'numpy.array', 'np.array', (['input_array'], {'ndmin': '(2)'}), '(input_array, ndmin=2)\n', (3230, 3252), True, 'import numpy as np\n'), ((5371, 5397), 'matplotlib.pyplot.plot', 'plt.plot', (['self.mean_errors'], {}), '(self.mean_errors)\n', (5379, 5397), True, 'import matplotlib.pyplot as plt\n'), ((5410, 5420), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5418, 5420), True, 'import matplotlib.pyplot as plt\n'), ((6123, 6149), 'numpy.array', 'np.array', (['targets'], {'ndmin': '(2)'}), '(targets, ndmin=2)\n', (6131, 6149), True, 'import numpy as np\n'), ((6237, 6266), 'numpy.linalg.norm', 'np.linalg.norm', (['output_errors'], {}), '(output_errors)\n', (6251, 6266), True, 'import numpy as np\n'), ((1395, 1422), 'numpy.random.rand', 'np.random.rand', (['self.hnodes'], {}), '(self.hnodes)\n', (1409, 1422), True, 'import numpy as np\n'), ((5783, 5825), 'matplotlib.pyplot.plot', 'plt.plot', (['test_outcomes', '"""*"""'], {'label': '"""test"""'}), "(test_outcomes, '*', label='test')\n", (5791, 5825), True, 'import matplotlib.pyplot as plt\n'), ((5843, 5886), 'matplotlib.pyplot.plot', 'plt.plot', (['test_answers', '"""*"""'], {'label': '"""answer"""'}), "(test_answers, '*', label='answer')\n", (5851, 5886), True, 'import matplotlib.pyplot as plt\n'), ((5903, 5915), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5913, 5915), True, 'import matplotlib.pyplot as plt\n'), ((5932, 5942), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5940, 5942), True, 'import matplotlib.pyplot as plt\n'), ((7166, 7202), 'numpy.multiply', 'np.multiply', (['output_errors', 'gradient'], {}), '(output_errors, gradient)\n', (7177, 7202), True, 'import numpy as np\n'), ((1305, 1345), 'numpy.random.rand', 'np.random.rand', (['self.hnodes', 'self.inodes'], {}), '(self.hnodes, self.inodes)\n', (1319, 1345), True, 'import numpy as np\n'), ((1685, 1712), 'numpy.random.rand', 'np.random.rand', (['self.onodes'], {}), '(self.onodes)\n', (1699, 1712), True, 'import numpy as np\n'), ((1952, 1979), 'numpy.random.rand', 'np.random.rand', (['self.hnodes'], {}), '(self.hnodes)\n', (1966, 1979), True, 'import numpy as np\n'), ((6349, 6390), 'numpy.mean', 'np.mean', (['(self.individual_epoch_errors * 1)'], {}), '(self.individual_epoch_errors * 1)\n', (6356, 6390), True, 'import numpy as np\n'), ((7846, 7881), 'numpy.multiply', 'np.multiply', (['first_errors', 'gradient'], {}), '(first_errors, gradient)\n', (7857, 7881), True, 'import numpy as np\n'), ((8504, 8539), 'numpy.multiply', 'np.multiply', (['first_errors', 'gradient'], {}), '(first_errors, gradient)\n', (8515, 8539), True, 'import numpy as np\n'), ((1595, 1635), 'numpy.random.rand', 'np.random.rand', (['self.onodes', 'self.hnodes'], {}), '(self.onodes, self.hnodes)\n', (1609, 1635), True, 'import numpy as np\n'), ((1862, 1902), 'numpy.random.rand', 'np.random.rand', (['self.hnodes', 'self.hnodes'], {}), '(self.hnodes, self.hnodes)\n', (1876, 1902), True, 'import numpy as np\n'), ((7709, 7741), 'numpy.transpose', 'np.transpose', (['self.iw[layer + 1]'], {}), '(self.iw[layer + 1])\n', (7721, 7741), True, 'import numpy as np\n'), ((8365, 8397), 'numpy.transpose', 'np.transpose', (['self.iw[layer + 1]'], {}), '(self.iw[layer + 1])\n', (8377, 8397), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import roslib; roslib.load_manifest('jaco2_driver')
import rospy
import sys
import math
import actionlib
import kinova_msgs.msg
import argparse
from abc import abstractmethod
import numpy as np
delta_Jx = 0.15675
delta_Jy = 0.13335
delta_Kx = 0.125
delta_Kz = 0.3
T_0K = np.array([
[0, 0, 1, delta_Kx],
[1, 0, 0, 0],
[0, -1, 0, delta_Kz],
[0, 0, 0, 1]
])
T_0L = np.array([
[0, 0, 1, -delta_Jx],
[-1, 0, 0, delta_Jy],
[0, -1, 0, 0],
[0, 0, 0, 1]
])
T_0R = np.array([
[0, 0, 1, -delta_Jx],
[-1, 0, 0, -delta_Jy],
[0, -1, 0, 0],
[0, 0, 0, 1]
])
def origin_to_kinect(v0):
vK = np.matmul(np.linalg.inv(T_0K), v0)
return vK
def origin_to_left(v0):
vL = np.matmul(np.linalg.inv(T_0L), v0)
return vL
def origin_to_right(v0):
vR = np.matmul(np.linalg.inv(T_0R), v0)
return vR
def kinect_to_origin(vK):
v0 = np.matmul(T_0K, vK)
return v0
def left_to_origin(vL):
v0 = np.matmul(T_0L, vL)
return v0
def right_to_origin(vR):
v0 = np.matmul(T_0R, vR)
return v0
| [
"numpy.array",
"numpy.linalg.inv",
"numpy.matmul",
"roslib.load_manifest"
] | [((38, 74), 'roslib.load_manifest', 'roslib.load_manifest', (['"""jaco2_driver"""'], {}), "('jaco2_driver')\n", (58, 74), False, 'import roslib\n'), ((303, 388), 'numpy.array', 'np.array', (['[[0, 0, 1, delta_Kx], [1, 0, 0, 0], [0, -1, 0, delta_Kz], [0, 0, 0, 1]]'], {}), '([[0, 0, 1, delta_Kx], [1, 0, 0, 0], [0, -1, 0, delta_Kz], [0, 0, 0,\n 1]])\n', (311, 388), True, 'import numpy as np\n'), ((411, 498), 'numpy.array', 'np.array', (['[[0, 0, 1, -delta_Jx], [-1, 0, 0, delta_Jy], [0, -1, 0, 0], [0, 0, 0, 1]]'], {}), '([[0, 0, 1, -delta_Jx], [-1, 0, 0, delta_Jy], [0, -1, 0, 0], [0, 0,\n 0, 1]])\n', (419, 498), True, 'import numpy as np\n'), ((522, 610), 'numpy.array', 'np.array', (['[[0, 0, 1, -delta_Jx], [-1, 0, 0, -delta_Jy], [0, -1, 0, 0], [0, 0, 0, 1]]'], {}), '([[0, 0, 1, -delta_Jx], [-1, 0, 0, -delta_Jy], [0, -1, 0, 0], [0, 0,\n 0, 1]])\n', (530, 610), True, 'import numpy as np\n'), ((910, 929), 'numpy.matmul', 'np.matmul', (['T_0K', 'vK'], {}), '(T_0K, vK)\n', (919, 929), True, 'import numpy as np\n'), ((978, 997), 'numpy.matmul', 'np.matmul', (['T_0L', 'vL'], {}), '(T_0L, vL)\n', (987, 997), True, 'import numpy as np\n'), ((1047, 1066), 'numpy.matmul', 'np.matmul', (['T_0R', 'vR'], {}), '(T_0R, vR)\n', (1056, 1066), True, 'import numpy as np\n'), ((668, 687), 'numpy.linalg.inv', 'np.linalg.inv', (['T_0K'], {}), '(T_0K)\n', (681, 687), True, 'import numpy as np\n'), ((751, 770), 'numpy.linalg.inv', 'np.linalg.inv', (['T_0L'], {}), '(T_0L)\n', (764, 770), True, 'import numpy as np\n'), ((835, 854), 'numpy.linalg.inv', 'np.linalg.inv', (['T_0R'], {}), '(T_0R)\n', (848, 854), True, 'import numpy as np\n')] |
import os
import sys
import numpy as np
from copy import deepcopy
from random import sample, random, randint
import argparse
import math
import json
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root)
sys.path.append(root + "/Driver")
sys.path.append(root + "/Util")
from Driver.SDriver import Driver
from Util.XYRouting import XYRouting
class SA:
'''Simulated Annealing Algorithm for Task Mapping Problem
Input:
if_graph: with format as (src, dst, vol), which could be read directly
from communication graph generated by
'''
T = 1e5
T_min = 1 # FIXME: 为了快改的
alpha = 0.98
global_epc_limit = 1e6
local_epc_limit = 100
def __init__(self):
print("log: Employing SA for searching mapping stratey")
super().__init__()
self.estimate_driver = Driver()
def execute(self, task_graph_path, comm_graph_path, arch_config_path):
self.task_graph_path = task_graph_path
self.comm_graph_path = comm_graph_path
self.arc_config_path = arch_config_path
self.__readData(comm_graph_path, arch_config_path)
temperature = self.T
overall_counter = 0
self.__initLabels()
min_consp = self.__consumption(self.labels)
print("\n -------------- Task Mapping ---------------\n")
while temperature > self.T_min and overall_counter < self.global_epc_limit:
for i in range(self.local_epc_limit):
try:
new_lables, new_asgn_labels, _, _ = self.__disturbance(deepcopy(self.labels), deepcopy(self.asgn_labels))
new_consp = self.__consumption(new_lables)
delta_E = (new_consp - min_consp) / (min_consp + 1e-10) * 100
if self.__judge(delta_E, temperature):
min_consp = new_consp
self.labels, self.asgn_labels = new_lables, new_asgn_labels
if delta_E < 0: # have found a better solution
break
except Exception:
pass
temperature = temperature * self.alpha
overall_counter += 1
if overall_counter % 100 == 0:
print("episode: {}, present consumption: {}, temperature: {}".format(overall_counter, min_consp, temperature))
# FIXME: 实验设置
# self.labels = {i: i for i in range(len(self.labels))}
print("labels: ", self.labels)
print("score: ", min_consp)
task_graph = self.__label2TaskGraph(self.labels)
self.__writeTaskGraph(task_graph_path, task_graph)
# self.__writeTaskGraph(task_graph_path, comm_graph_path)
return task_graph
def __readData(self, comm_graph_path, arch_config_path):
arch_arg = self.__readArchConfig(arch_config_path)
whole_comm_graph = self.__readCommGraph(comm_graph_path)
comm_graph_between_pe = [req for req in whole_comm_graph if req[0] != -1 and req[1] != -1]
srcs, dsts, vols = zip(*comm_graph_between_pe)
self.comm_graph_between_pe = {sd: vol for sd, vol in zip(zip(srcs, dsts), vols)}
comm_graph_with_mem = [req for req in whole_comm_graph if req[0] == -1 or req[1] == -1]
msrcs, mdsts, mvols = zip(*comm_graph_with_mem)
self.comm_graph_with_mem = {sd: vol for sd, vol in zip(zip(msrcs, mdsts), mvols)}
n = arch_arg["n"]
assert False not in [i in set(srcs + dsts) for i in range(n)] # 保证n个是全用到的
self.nodes = list(set(srcs + dsts))
self.labels = {node: -1 for node in range(n)}
self.asgn_labels = {i: -1 for i in range(n)}
def __initLabels(self):
n = self.arch_arg["n"]
for node in self.nodes:
label = randint(0, n - 1)
while self.asgn_labels[label] != -1:
label = randint(0, n - 1)
self.labels[node] = label
self.asgn_labels[label] = node
def __disturbance(self, labels, asgn_labels):
l1, l2 = sample(asgn_labels.keys(), 2)
while asgn_labels[l1] == -1 and asgn_labels[l2] == -1: # 避免交换两个空的label
l2, l2 = sample(asgn_labels.keys(), 2)
try:
labels[asgn_labels[l1]], labels[asgn_labels[l2]] = l2, l1
except KeyError:
pass
asgn_labels[l1], asgn_labels[l2] = asgn_labels[l2], asgn_labels[l1] # swap assigned labels
return labels, asgn_labels, l1, l2
def __consumption(self, labels):
d = self.arch_arg["d"]
task_graph = self.__label2TaskGraph(labels)
router_dropin = np.zeros(((d + 1) * (d + 1), 4), dtype=np.int32)
rter = XYRouting(self.arch_arg)
path = rter.path(task_graph)
for req in path:
router_path, _, oport_path = zip(*req[:-1])
oport_path = [i - 2 for i in oport_path]
router_dropin[router_path, oport_path] += 1
consp = np.max(router_dropin)
return consp
def __judge(self, delta_E, tempreature):
if delta_E < 0:
return True
elif math.exp(-delta_E / tempreature) > random():
return True
else:
return False
def __label2TaskGraph(self, labels):
'''Translate transmission requests between PEs according to labels
Assign access to memory with multi banks in a round-roubin style, represented by the last row of PE array
'''
L = labels
comm_graph_with_mem, comm_graph_between_pe = self.comm_graph_with_mem, self.comm_graph_between_pe
task_graph_between_pe = [
(L[src], L[dst], comm_graph_between_pe[(src, dst)])
for src, dst in comm_graph_between_pe
]
d = self.arch_arg["d"] # TODO: bank 与边长相同
bias = self.arch_arg["n"]
comm_graph_with_src_mem = [req for req in comm_graph_with_mem if req[0] == -1]
comm_graph_with_dst_mem = [req for req in comm_graph_with_mem if req[1] == -1]
mapped_bank_src = [i % d + bias for i in range(len(comm_graph_with_src_mem))]
mapped_bank_dst = [i % d + bias for i in range(len(comm_graph_with_dst_mem))]
task_graph_with_src_mem = [
(mb, L[dst], comm_graph_with_mem[(src, dst)])
for mb, (src, dst) in zip(mapped_bank_src, comm_graph_with_src_mem)
]
task_graph_with_dst_mem = [
(L[src], mb, comm_graph_with_mem[(src, dst)])
for mb, (src, dst) in zip(mapped_bank_dst, comm_graph_with_dst_mem)
]
task_graph = task_graph_between_pe + task_graph_with_src_mem + task_graph_with_dst_mem
return task_graph
def __readCommGraph(self, comm_graph_path):
full_comm_graph_path = root + "/" + comm_graph_path
with open(full_comm_graph_path, "r") as f:
comm_graph = [eval(line) for line in f]
self.comm_graph_with_mem = comm_graph
return comm_graph
def __readArchConfig(self, arch_config_path):
full_arch_config_path = root + "/" + arch_config_path
if not os.path.exists(full_arch_config_path):
raise Exception("Invalid configuration path!")
with open(full_arch_config_path, "r") as f:
self.arch_arg = json.load(f)
return self.arch_arg
def __writeTaskGraph(self, task_graph_path, task_graph):
full_task_graph_path = root + "/" + task_graph_path
with open(full_task_graph_path, "w") as f:
for req in task_graph:
f.write(",".join(str(x) for x in req) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", help="Path for communication graph, with the root directory as NoCPerformanceModel")
parser.add_argument("-o", help="Path for task graph, with the root directory as NoCPerformanceModel")
parser.add_argument("-c", help="Path for architecture configruation.")
args = parser.parse_args()
print("\nlog: Searching for mapping strategy",
"Path for communication graph: " + args.i,
"Path for task graph:" + args.o,
"Path for configuration file:" + args.c,
sep="\n")
sa = SA()
sa.execute(args.o, args.i, args.c)
| [
"os.path.exists",
"random.randint",
"Util.XYRouting.XYRouting",
"argparse.ArgumentParser",
"numpy.max",
"json.load",
"numpy.zeros",
"random.random",
"copy.deepcopy",
"os.path.abspath",
"math.exp",
"sys.path.append",
"Driver.SDriver.Driver"
] | [((217, 238), 'sys.path.append', 'sys.path.append', (['root'], {}), '(root)\n', (232, 238), False, 'import sys\n'), ((239, 272), 'sys.path.append', 'sys.path.append', (["(root + '/Driver')"], {}), "(root + '/Driver')\n", (254, 272), False, 'import sys\n'), ((273, 304), 'sys.path.append', 'sys.path.append', (["(root + '/Util')"], {}), "(root + '/Util')\n", (288, 304), False, 'import sys\n'), ((7652, 7677), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7675, 7677), False, 'import argparse\n'), ((189, 214), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (204, 214), False, 'import os\n'), ((875, 883), 'Driver.SDriver.Driver', 'Driver', ([], {}), '()\n', (881, 883), False, 'from Driver.SDriver import Driver\n'), ((4664, 4712), 'numpy.zeros', 'np.zeros', (['((d + 1) * (d + 1), 4)'], {'dtype': 'np.int32'}), '(((d + 1) * (d + 1), 4), dtype=np.int32)\n', (4672, 4712), True, 'import numpy as np\n'), ((4728, 4752), 'Util.XYRouting.XYRouting', 'XYRouting', (['self.arch_arg'], {}), '(self.arch_arg)\n', (4737, 4752), False, 'from Util.XYRouting import XYRouting\n'), ((4996, 5017), 'numpy.max', 'np.max', (['router_dropin'], {}), '(router_dropin)\n', (5002, 5017), True, 'import numpy as np\n'), ((3825, 3842), 'random.randint', 'randint', (['(0)', '(n - 1)'], {}), '(0, n - 1)\n', (3832, 3842), False, 'from random import sample, random, randint\n'), ((7120, 7157), 'os.path.exists', 'os.path.exists', (['full_arch_config_path'], {}), '(full_arch_config_path)\n', (7134, 7157), False, 'import os\n'), ((7298, 7310), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7307, 7310), False, 'import json\n'), ((3916, 3933), 'random.randint', 'randint', (['(0)', '(n - 1)'], {}), '(0, n - 1)\n', (3923, 3933), False, 'from random import sample, random, randint\n'), ((5146, 5178), 'math.exp', 'math.exp', (['(-delta_E / tempreature)'], {}), '(-delta_E / tempreature)\n', (5154, 5178), False, 'import math\n'), ((5181, 5189), 'random.random', 'random', ([], {}), '()\n', (5187, 5189), False, 'from random import sample, random, randint\n'), ((1605, 1626), 'copy.deepcopy', 'deepcopy', (['self.labels'], {}), '(self.labels)\n', (1613, 1626), False, 'from copy import deepcopy\n'), ((1628, 1654), 'copy.deepcopy', 'deepcopy', (['self.asgn_labels'], {}), '(self.asgn_labels)\n', (1636, 1654), False, 'from copy import deepcopy\n')] |
import pytest
from nbgwas.network import igraph_adj_matrix
import igraph as ig
import numpy as np
G = ig.Graph.Full(4)
weights = np.array([1, 2, 3, 4, 5, 6])
G.es['weight'] = weights
def test_adj_matrix():
mat = igraph_adj_matrix(G, weighted=False)
assert mat.shape == (4, 4)
assert (mat.todense()[0] == np.array([0, 1, 1, 1])).all()
assert (mat.todense()[1] == np.array([1, 0, 1, 1])).all()
assert (mat.todense()[2] == np.array([1, 1, 0, 1])).all()
assert (mat.todense()[3] == np.array([1, 1, 1, 0])).all()
def test_adj_matrix_weights():
mat = igraph_adj_matrix(G, weighted='weight')
print(weights)
print(weights[0])
assert mat.shape == (4, 4)
assert (mat.todense()[0] == np.array([0, 1, 2, 3])).all()
assert (mat.todense()[1] == np.array([1, 0, 4, 5])).all()
assert (mat.todense()[2] == np.array([2, 4, 0, 6])).all()
assert (mat.todense()[3] == np.array([3, 5, 6, 0])).all() | [
"numpy.array",
"nbgwas.network.igraph_adj_matrix",
"igraph.Graph.Full"
] | [((107, 123), 'igraph.Graph.Full', 'ig.Graph.Full', (['(4)'], {}), '(4)\n', (120, 123), True, 'import igraph as ig\n'), ((134, 162), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (142, 162), True, 'import numpy as np\n'), ((223, 259), 'nbgwas.network.igraph_adj_matrix', 'igraph_adj_matrix', (['G'], {'weighted': '(False)'}), '(G, weighted=False)\n', (240, 259), False, 'from nbgwas.network import igraph_adj_matrix\n'), ((583, 622), 'nbgwas.network.igraph_adj_matrix', 'igraph_adj_matrix', (['G'], {'weighted': '"""weight"""'}), "(G, weighted='weight')\n", (600, 622), False, 'from nbgwas.network import igraph_adj_matrix\n'), ((323, 345), 'numpy.array', 'np.array', (['[0, 1, 1, 1]'], {}), '([0, 1, 1, 1])\n', (331, 345), True, 'import numpy as np\n'), ((385, 407), 'numpy.array', 'np.array', (['[1, 0, 1, 1]'], {}), '([1, 0, 1, 1])\n', (393, 407), True, 'import numpy as np\n'), ((447, 469), 'numpy.array', 'np.array', (['[1, 1, 0, 1]'], {}), '([1, 1, 0, 1])\n', (455, 469), True, 'import numpy as np\n'), ((509, 531), 'numpy.array', 'np.array', (['[1, 1, 1, 0]'], {}), '([1, 1, 1, 0])\n', (517, 531), True, 'import numpy as np\n'), ((727, 749), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (735, 749), True, 'import numpy as np\n'), ((789, 811), 'numpy.array', 'np.array', (['[1, 0, 4, 5]'], {}), '([1, 0, 4, 5])\n', (797, 811), True, 'import numpy as np\n'), ((851, 873), 'numpy.array', 'np.array', (['[2, 4, 0, 6]'], {}), '([2, 4, 0, 6])\n', (859, 873), True, 'import numpy as np\n'), ((913, 935), 'numpy.array', 'np.array', (['[3, 5, 6, 0]'], {}), '([3, 5, 6, 0])\n', (921, 935), True, 'import numpy as np\n')] |
import json
import numpy
from PIL import Image, ImageOps
import os
import torch
class SegmentationDataset:
def __init__(self, json_file_name):
with open(json_file_name) as json_file:
self.json_config = json.load(json_file)
self.channels = int(self.json_config["input_channels"])
self.height = int(self.json_config["input_height"])
self.width = int(self.json_config["input_width"])
self.target_channels = int(self.json_config["target_channels"])
self.input_shape = (self.channels, self.height, self.width)
self.output_shape = (self.target_channels, self.height, self.width)
self.input_path = str(self.json_config["input_path"])
self.target_path = str(self.json_config["target_path"])
augmentations_count = int(self.json_config["augmentations_count"]) + 1
input_files = self._find_files(self.input_path)
target_files = self._find_files(self.target_path)
self.count = len(input_files)
self.input = numpy.zeros((self.count*augmentations_count, self.channels, self.height, self.width))
self.target = numpy.zeros((self.count*augmentations_count, self.target_channels, self.height, self.width))
for i in range(self.count):
for augmentation in range(augmentations_count):
if numpy.random.randint(2) == 0:
flip = True
else:
flip = False
if numpy.random.randint(2) == 0:
mirror = True
else:
mirror = False
crop_area_level = float(self.json_config["crop_area_level"])
crop_area = crop_area_level + (1.0 - crop_area_level)*numpy.random.rand()
noise_level = numpy.random.rand()*float(self.json_config["noise_level"])
if augmentation == 0:
flip = False
mirror = False
crop_area = 1.0
noise_level = 0.0
print("loading ", input_files[i], target_files[i])
input_img = self._load_image(input_files[i], True, flip, mirror, crop_area)
target_img = self._load_image(target_files[i], False, flip, mirror, crop_area)
if self.channels == 3:
to_rgb = True
else:
to_rgb = False
input_np = self._image_to_numpy(input_img, to_rgb, True, noise_level)
target_np = self._image_to_numpy(target_img, False, False)
target_np = numpy.clip(target_np, 0, self.target_channels)
self.input[i] = input_np.copy()
self.target[i] = target_np.copy()
def get_count(self):
return self.count
def get_batch(self, batch_size = 32):
input = torch.zeros((batch_size, self.channels, self.height, self.width))
target = torch.zeros((batch_size, self.target_channels, self.height, self.width))
for i in range(batch_size):
idx = numpy.random.randint(self.count)
input[i] = torch.from_numpy(self.input[idx])
target[i] = torch.from_numpy(self.target[idx])
return input, target
def _find_files(self, path):
files_list = []
for file in os.listdir(path):
if file.endswith(".png"):
files_list.append(str(file))
result = []
for file_name in files_list:
result.append(path + file_name)
result.sort()
return result
def _load_image(self, file_name, to_rgb, flip = False, mirror = False, crop_area = 1.0 ):
image = Image.open(file_name)
image = image.crop((image.width*(1 - crop_area), image.height*(1 - crop_area), image.width*crop_area, image.height*crop_area))
image = image.resize((self.width, self.height), Image.LANCZOS)
if flip:
image = ImageOps.flip(image)
if mirror:
image = ImageOps.mirror(image)
if to_rgb:
image = image.convert('RGB')
else:
image = image.convert('L')
return image
def _image_to_numpy(self, image, to_rgb, normalize, noise_level = 0):
if to_rgb:
image = image.convert('RGB')
image_np = numpy.array(image).astype(numpy.uint8)
image_np = numpy.rollaxis(image_np, 2, 0)
else:
image = image.convert('L')
image_np = numpy.array(image).astype(numpy.uint8)
image_np = numpy.expand_dims(image_np, axis = 0)
if normalize:
image_np = image_np/255.0
noise_mul = 1
else:
noise_mul = 255
if noise_level > 0:
rnd = noise_mul*noise_level*numpy.random.randn(*image_np.shape)
image_np = (1.0 - noise_level)*image_np + noise_level*rnd
if normalize:
image_np = numpy.clip(image_np, 0, 1)
else:
image_np = numpy.clip(image_np, 0, 255)
return image_np
| [
"numpy.clip",
"PIL.ImageOps.mirror",
"os.listdir",
"PIL.Image.open",
"numpy.random.rand",
"numpy.rollaxis",
"torch.from_numpy",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"PIL.ImageOps.flip",
"numpy.expand_dims",
"json.load",
"numpy.random.randn",
"torch.zeros"
] | [((1071, 1162), 'numpy.zeros', 'numpy.zeros', (['(self.count * augmentations_count, self.channels, self.height, self.width)'], {}), '((self.count * augmentations_count, self.channels, self.height,\n self.width))\n', (1082, 1162), False, 'import numpy\n'), ((1186, 1285), 'numpy.zeros', 'numpy.zeros', (['(self.count * augmentations_count, self.target_channels, self.height, self.\n width)'], {}), '((self.count * augmentations_count, self.target_channels, self.\n height, self.width))\n', (1197, 1285), False, 'import numpy\n'), ((2986, 3051), 'torch.zeros', 'torch.zeros', (['(batch_size, self.channels, self.height, self.width)'], {}), '((batch_size, self.channels, self.height, self.width))\n', (2997, 3051), False, 'import torch\n'), ((3070, 3142), 'torch.zeros', 'torch.zeros', (['(batch_size, self.target_channels, self.height, self.width)'], {}), '((batch_size, self.target_channels, self.height, self.width))\n', (3081, 3142), False, 'import torch\n'), ((3466, 3482), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3476, 3482), False, 'import os\n'), ((3826, 3847), 'PIL.Image.open', 'Image.open', (['file_name'], {}), '(file_name)\n', (3836, 3847), False, 'from PIL import Image, ImageOps\n'), ((229, 249), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (238, 249), False, 'import json\n'), ((3198, 3230), 'numpy.random.randint', 'numpy.random.randint', (['self.count'], {}), '(self.count)\n', (3218, 3230), False, 'import numpy\n'), ((3255, 3288), 'torch.from_numpy', 'torch.from_numpy', (['self.input[idx]'], {}), '(self.input[idx])\n', (3271, 3288), False, 'import torch\n'), ((3313, 3347), 'torch.from_numpy', 'torch.from_numpy', (['self.target[idx]'], {}), '(self.target[idx])\n', (3329, 3347), False, 'import torch\n'), ((4094, 4114), 'PIL.ImageOps.flip', 'ImageOps.flip', (['image'], {}), '(image)\n', (4107, 4114), False, 'from PIL import Image, ImageOps\n'), ((4155, 4177), 'PIL.ImageOps.mirror', 'ImageOps.mirror', (['image'], {}), '(image)\n', (4170, 4177), False, 'from PIL import Image, ImageOps\n'), ((4559, 4589), 'numpy.rollaxis', 'numpy.rollaxis', (['image_np', '(2)', '(0)'], {}), '(image_np, 2, 0)\n', (4573, 4589), False, 'import numpy\n'), ((4741, 4776), 'numpy.expand_dims', 'numpy.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (4758, 4776), False, 'import numpy\n'), ((5150, 5176), 'numpy.clip', 'numpy.clip', (['image_np', '(0)', '(1)'], {}), '(image_np, 0, 1)\n', (5160, 5176), False, 'import numpy\n'), ((5214, 5242), 'numpy.clip', 'numpy.clip', (['image_np', '(0)', '(255)'], {}), '(image_np, 0, 255)\n', (5224, 5242), False, 'import numpy\n'), ((2711, 2757), 'numpy.clip', 'numpy.clip', (['target_np', '(0)', 'self.target_channels'], {}), '(target_np, 0, self.target_channels)\n', (2721, 2757), False, 'import numpy\n'), ((4998, 5033), 'numpy.random.randn', 'numpy.random.randn', (['*image_np.shape'], {}), '(*image_np.shape)\n', (5016, 5033), False, 'import numpy\n'), ((1396, 1419), 'numpy.random.randint', 'numpy.random.randint', (['(2)'], {}), '(2)\n', (1416, 1419), False, 'import numpy\n'), ((1539, 1562), 'numpy.random.randint', 'numpy.random.randint', (['(2)'], {}), '(2)\n', (1559, 1562), False, 'import numpy\n'), ((1863, 1882), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (1880, 1882), False, 'import numpy\n'), ((4494, 4512), 'numpy.array', 'numpy.array', (['image'], {}), '(image)\n', (4505, 4512), False, 'import numpy\n'), ((4676, 4694), 'numpy.array', 'numpy.array', (['image'], {}), '(image)\n', (4687, 4694), False, 'import numpy\n'), ((1813, 1832), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (1830, 1832), False, 'import numpy\n')] |
# Module: PySDMs/internal
# Author: <NAME> <<EMAIL>>
# License: MIT
# Last modified : 8/9/21
# https://github.com/daniel-furman/PySDMs
import pandas as pd
import numpy as np
import sklearn
from matplotlib import pyplot as plt, style
from pycaret import classification as pycaret
import pickle as pk
def internal_validation_visuals(min_seed, max_seed, pycaret_outdir, species_name):
"""Data vizualizations for the user-defined scoring metric (box+whisker)
and ROC AUC visuals (10-fold stratified Cross Val) (see example
notebooks in the Git Repo).
min_seed/max_seed: int
The min and max seed model runs to grab for the F1 boxplot.
AUC_seed: int
The seed(s) to perform ROC analysis."""
# ####################################################################
# F1 Validation Scores Data IO
validation_scores_ensemble, validation_box_plots = [], []
for i in np.arange(min_seed, max_seed).tolist():
validation_scores_ensemble.append(pd.read_csv(pycaret_outdir +
'holdout_' + str(i) + '.csv', index_col = 'Unnamed: 0')['0'])
validation_scores_ensemble = pd.DataFrame(validation_scores_ensemble)
for i in np.arange(0,len(list(validation_scores_ensemble))):
validation_box_plots.append(validation_scores_ensemble[i])
validation_scores_individual = []
for i in np.arange(0, len(validation_scores_ensemble[0])):
validation_scores_individual.append(np.max(
validation_scores_ensemble.iloc[i,:]))
# ####################################################################
# Metric Score Hold-Out Set BoxPlots
style.use('ggplot')
plt.rcParams["figure.figsize"] = (2.25, 5.25)
plt.boxplot(validation_scores_individual, 'k+', 'k^',
medianprops = dict(color='black'), labels=['final_voter'])
plt.title('Validation-Set self.metric Scores')
plt.ylabel('self.metric')
y = validation_scores_individual
x = np.random.normal(1, 0.030, size=len(y))
plt.plot(x, y, 'r.', alpha=0.35, markersize=11.5)
plt.savefig(pycaret_outdir + 'metric_scores.png', dpi=400)
plt.show()
# ####################################################################
# AUC K-fold Cross Validation ROC Plots
style.use('default')
AUC_seed=np.arange(min_seed, max_seed).tolist()
for AUC_seed in AUC_seed:
# Data IO
X = pd.read_csv('test/data/env_train/env_train_'+species_name+'_'+str(AUC_seed)+'.csv')
y = X['pa']
X = X.drop(['pa'], axis=1)
X = pd.DataFrame(sklearn.preprocessing.StandardScaler(
).fit_transform(X), columns=list(X))
n_samples, n_features = X.shape
cv = sklearn.model_selection.StratifiedKFold(n_splits=10)
with open(pycaret_outdir + species_name + '_' + str(AUC_seed) + '.pkl', 'rb') as f:
classifier = pk.load(f)
# Classification and ROC Analysis
tprs, aucs = [], []
mean_fpr = np.linspace(0, 1, 100)
fig, ax = plt.subplots(figsize=(7.5,4.5))
for i, (train, test) in enumerate(cv.split(X, y)):
classifier.fit(X.iloc[train, :], y.iloc[train])
viz = sklearn.metrics.plot_roc_curve(classifier, X.iloc[
test, :], y.iloc[test], alpha=0.4, lw=1, ax=ax)
interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(viz.roc_auc)
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = sklearn.metrics.auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'final_voter (AUC = %0.3f $\pm$ %0.3f)' % (
mean_auc, std_auc), lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey',
alpha=.2, label=r'$\pm$ 1 std. deviation')
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title="ROC Curve: 10-fold C-V (random stratified) for seed "+str(AUC_seed))
ax.legend(loc="lower right")
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[-3:], labels[-3:])
plt.show()
| [
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"sklearn.model_selection.StratifiedKFold",
"matplotlib.style.use",
"sklearn.metrics.plot_roc_curve",
"numpy.arange",
"numpy.mean",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.linspace",
"pandas.DataFrame",
"numpy.maximum",
"matplotlib.pypl... | [((1161, 1201), 'pandas.DataFrame', 'pd.DataFrame', (['validation_scores_ensemble'], {}), '(validation_scores_ensemble)\n', (1173, 1201), True, 'import pandas as pd\n'), ((1678, 1697), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1687, 1697), False, 'from matplotlib import pyplot as plt, style\n'), ((1885, 1931), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation-Set self.metric Scores"""'], {}), "('Validation-Set self.metric Scores')\n", (1894, 1931), True, 'from matplotlib import pyplot as plt, style\n'), ((1938, 1963), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""self.metric"""'], {}), "('self.metric')\n", (1948, 1963), True, 'from matplotlib import pyplot as plt, style\n'), ((2059, 2108), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r."""'], {'alpha': '(0.35)', 'markersize': '(11.5)'}), "(x, y, 'r.', alpha=0.35, markersize=11.5)\n", (2067, 2108), True, 'from matplotlib import pyplot as plt, style\n'), ((2115, 2173), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(pycaret_outdir + 'metric_scores.png')"], {'dpi': '(400)'}), "(pycaret_outdir + 'metric_scores.png', dpi=400)\n", (2126, 2173), True, 'from matplotlib import pyplot as plt, style\n'), ((2180, 2190), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2188, 2190), True, 'from matplotlib import pyplot as plt, style\n'), ((2322, 2342), 'matplotlib.style.use', 'style.use', (['"""default"""'], {}), "('default')\n", (2331, 2342), False, 'from matplotlib import pyplot as plt, style\n'), ((2781, 2833), 'sklearn.model_selection.StratifiedKFold', 'sklearn.model_selection.StratifiedKFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (2820, 2833), False, 'import sklearn\n'), ((3063, 3085), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (3074, 3085), True, 'import numpy as np\n'), ((3106, 3138), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7.5, 4.5)'}), '(figsize=(7.5, 4.5))\n', (3118, 3138), True, 'from matplotlib import pyplot as plt, style\n'), ((3706, 3727), 'numpy.mean', 'np.mean', (['tprs'], {'axis': '(0)'}), '(tprs, axis=0)\n', (3713, 3727), True, 'import numpy as np\n'), ((3778, 3817), 'sklearn.metrics.auc', 'sklearn.metrics.auc', (['mean_fpr', 'mean_tpr'], {}), '(mean_fpr, mean_tpr)\n', (3797, 3817), False, 'import sklearn\n'), ((3838, 3850), 'numpy.std', 'np.std', (['aucs'], {}), '(aucs)\n', (3844, 3850), True, 'import numpy as np\n'), ((4039, 4059), 'numpy.std', 'np.std', (['tprs'], {'axis': '(0)'}), '(tprs, axis=0)\n', (4045, 4059), True, 'import numpy as np\n'), ((4083, 4116), 'numpy.minimum', 'np.minimum', (['(mean_tpr + std_tpr)', '(1)'], {}), '(mean_tpr + std_tpr, 1)\n', (4093, 4116), True, 'import numpy as np\n'), ((4140, 4173), 'numpy.maximum', 'np.maximum', (['(mean_tpr - std_tpr)', '(0)'], {}), '(mean_tpr - std_tpr, 0)\n', (4150, 4173), True, 'import numpy as np\n'), ((4611, 4621), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4619, 4621), True, 'from matplotlib import pyplot as plt, style\n'), ((941, 970), 'numpy.arange', 'np.arange', (['min_seed', 'max_seed'], {}), '(min_seed, max_seed)\n', (950, 970), True, 'import numpy as np\n'), ((1489, 1534), 'numpy.max', 'np.max', (['validation_scores_ensemble.iloc[i, :]'], {}), '(validation_scores_ensemble.iloc[i, :])\n', (1495, 1534), True, 'import numpy as np\n'), ((2358, 2387), 'numpy.arange', 'np.arange', (['min_seed', 'max_seed'], {}), '(min_seed, max_seed)\n', (2367, 2387), True, 'import numpy as np\n'), ((2957, 2967), 'pickle.load', 'pk.load', (['f'], {}), '(f)\n', (2964, 2967), True, 'import pickle as pk\n'), ((3281, 3382), 'sklearn.metrics.plot_roc_curve', 'sklearn.metrics.plot_roc_curve', (['classifier', 'X.iloc[test, :]', 'y.iloc[test]'], {'alpha': '(0.4)', 'lw': '(1)', 'ax': 'ax'}), '(classifier, X.iloc[test, :], y.iloc[test],\n alpha=0.4, lw=1, ax=ax)\n', (3311, 3382), False, 'import sklearn\n'), ((3427, 3464), 'numpy.interp', 'np.interp', (['mean_fpr', 'viz.fpr', 'viz.tpr'], {}), '(mean_fpr, viz.fpr, viz.tpr)\n', (3436, 3464), True, 'import numpy as np\n'), ((2633, 2671), 'sklearn.preprocessing.StandardScaler', 'sklearn.preprocessing.StandardScaler', ([], {}), '()\n', (2669, 2671), False, 'import sklearn\n')] |
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0,4,10001)
y = 0*x
y[np.logical_and(1<x,x<=2)] = x[np.logical_and(1<x,x<=2)]-1
y[np.logical_and(2<x,x<=3)] = 1 - np.sqrt(.3-.25) + np.sqrt(0.3 - (x[np.logical_and(2<x,x<=3)]-2.5)**2)
y[x>3] = 1
fig = plt.figure(figsize=(4, 3))
plt.plot(x,y, linewidth=2.0)
# plt.gca().set_aspect('equal', adjustable='box')
plt.xticks([0,1,2,3,4])
plt.tick_params(labelsize=14, left="off", labelleft="off")
plt.xlabel("time", fontsize=14)
plt.ylabel("distance", fontsize=14)
fig.set_tight_layout(True)
plt.savefig("calcQ.pdf")
fig = plt.figure(figsize=(4, 3))
# plt.plot(x[1:],np.diff(y), linewidth=2.0)
x = x[1:]
dydx = np.diff(y)
d1 = x<1
d2 = np.logical_and(1<x,x<2)
d3 = np.logical_and(2<x,x<3)
d4 = x>3
plt.plot(x[d1],dydx[d1], 'b', linewidth=2.0)
plt.plot(x[d2],dydx[d2], 'b', linewidth=2.0)
plt.plot(x[d3],dydx[d3], 'b', linewidth=2.0)
plt.plot(x[d4],dydx[d4], 'b', linewidth=2.0)
plt.xticks([0,1,2,3,4])
plt.yticks([0])
plt.tick_params(labelsize=14)
plt.xlabel("time", fontsize=14)
plt.ylabel("velocity", fontsize=14)
fig.set_tight_layout(True)
plt.savefig("calcA.pdf")
| [
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.logical_and",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks"
... | [((57, 81), 'numpy.linspace', 'np.linspace', (['(0)', '(4)', '(10001)'], {}), '(0, 4, 10001)\n', (68, 81), True, 'import numpy as np\n'), ((270, 296), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 3)'}), '(figsize=(4, 3))\n', (280, 296), True, 'import matplotlib.pyplot as plt\n'), ((297, 326), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'linewidth': '(2.0)'}), '(x, y, linewidth=2.0)\n', (305, 326), True, 'import matplotlib.pyplot as plt\n'), ((376, 403), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (386, 403), True, 'import matplotlib.pyplot as plt\n'), ((400, 458), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(14)', 'left': '"""off"""', 'labelleft': '"""off"""'}), "(labelsize=14, left='off', labelleft='off')\n", (415, 458), True, 'import matplotlib.pyplot as plt\n'), ((459, 490), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {'fontsize': '(14)'}), "('time', fontsize=14)\n", (469, 490), True, 'import matplotlib.pyplot as plt\n'), ((491, 526), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""distance"""'], {'fontsize': '(14)'}), "('distance', fontsize=14)\n", (501, 526), True, 'import matplotlib.pyplot as plt\n'), ((554, 578), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""calcQ.pdf"""'], {}), "('calcQ.pdf')\n", (565, 578), True, 'import matplotlib.pyplot as plt\n'), ((588, 614), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 3)'}), '(figsize=(4, 3))\n', (598, 614), True, 'import matplotlib.pyplot as plt\n'), ((676, 686), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (683, 686), True, 'import numpy as np\n'), ((701, 729), 'numpy.logical_and', 'np.logical_and', (['(1 < x)', '(x < 2)'], {}), '(1 < x, x < 2)\n', (715, 729), True, 'import numpy as np\n'), ((730, 758), 'numpy.logical_and', 'np.logical_and', (['(2 < x)', '(x < 3)'], {}), '(2 < x, x < 3)\n', (744, 758), True, 'import numpy as np\n'), ((763, 808), 'matplotlib.pyplot.plot', 'plt.plot', (['x[d1]', 'dydx[d1]', '"""b"""'], {'linewidth': '(2.0)'}), "(x[d1], dydx[d1], 'b', linewidth=2.0)\n", (771, 808), True, 'import matplotlib.pyplot as plt\n'), ((808, 853), 'matplotlib.pyplot.plot', 'plt.plot', (['x[d2]', 'dydx[d2]', '"""b"""'], {'linewidth': '(2.0)'}), "(x[d2], dydx[d2], 'b', linewidth=2.0)\n", (816, 853), True, 'import matplotlib.pyplot as plt\n'), ((853, 898), 'matplotlib.pyplot.plot', 'plt.plot', (['x[d3]', 'dydx[d3]', '"""b"""'], {'linewidth': '(2.0)'}), "(x[d3], dydx[d3], 'b', linewidth=2.0)\n", (861, 898), True, 'import matplotlib.pyplot as plt\n'), ((898, 943), 'matplotlib.pyplot.plot', 'plt.plot', (['x[d4]', 'dydx[d4]', '"""b"""'], {'linewidth': '(2.0)'}), "(x[d4], dydx[d4], 'b', linewidth=2.0)\n", (906, 943), True, 'import matplotlib.pyplot as plt\n'), ((944, 971), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2, 3, 4]'], {}), '([0, 1, 2, 3, 4])\n', (954, 971), True, 'import matplotlib.pyplot as plt\n'), ((968, 983), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0]'], {}), '([0])\n', (978, 983), True, 'import matplotlib.pyplot as plt\n'), ((984, 1013), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(14)'}), '(labelsize=14)\n', (999, 1013), True, 'import matplotlib.pyplot as plt\n'), ((1014, 1045), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {'fontsize': '(14)'}), "('time', fontsize=14)\n", (1024, 1045), True, 'import matplotlib.pyplot as plt\n'), ((1046, 1081), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""velocity"""'], {'fontsize': '(14)'}), "('velocity', fontsize=14)\n", (1056, 1081), True, 'import matplotlib.pyplot as plt\n'), ((1109, 1133), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""calcA.pdf"""'], {}), "('calcA.pdf')\n", (1120, 1133), True, 'import matplotlib.pyplot as plt\n'), ((90, 119), 'numpy.logical_and', 'np.logical_and', (['(1 < x)', '(x <= 2)'], {}), '(1 < x, x <= 2)\n', (104, 119), True, 'import numpy as np\n'), ((150, 179), 'numpy.logical_and', 'np.logical_and', (['(2 < x)', '(x <= 3)'], {}), '(2 < x, x <= 3)\n', (164, 179), True, 'import numpy as np\n'), ((120, 149), 'numpy.logical_and', 'np.logical_and', (['(1 < x)', '(x <= 2)'], {}), '(1 < x, x <= 2)\n', (134, 149), True, 'import numpy as np\n'), ((182, 201), 'numpy.sqrt', 'np.sqrt', (['(0.3 - 0.25)'], {}), '(0.3 - 0.25)\n', (189, 201), True, 'import numpy as np\n'), ((217, 246), 'numpy.logical_and', 'np.logical_and', (['(2 < x)', '(x <= 3)'], {}), '(2 < x, x <= 3)\n', (231, 246), True, 'import numpy as np\n')] |
from .base_model import BaseModel
import numpy as np
from sklearn.linear_model import LinearRegression
class LinearModel(BaseModel):
plot_name = 'Linear'
def train(self):
x, y = np.reshape(self.x_train, (-1, 1)), np.reshape(self.y_train, (-1, 1))
self.model = LinearRegression().fit(x, y)
self.is_trained = True
def predict(self):
y_pred = self.model.predict(self.x_pred.reshape(-1, 1))
self.y_pred = y_pred.reshape(y_pred.size)
self.is_predicted = True
| [
"numpy.reshape",
"sklearn.linear_model.LinearRegression"
] | [((198, 231), 'numpy.reshape', 'np.reshape', (['self.x_train', '(-1, 1)'], {}), '(self.x_train, (-1, 1))\n', (208, 231), True, 'import numpy as np\n'), ((233, 266), 'numpy.reshape', 'np.reshape', (['self.y_train', '(-1, 1)'], {}), '(self.y_train, (-1, 1))\n', (243, 266), True, 'import numpy as np\n'), ((288, 306), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (304, 306), False, 'from sklearn.linear_model import LinearRegression\n')] |
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
# # fix result, otherwise may predict all flat
# np.random.seed(1271)
# tf.compat.v1.set_random_seed(1271)
from matplotlib import patches
from matplotlib.pyplot import figure
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, f1_score, recall_score, precision_score, log_loss
#error occurs when directly import keras without tensorflow.python
from tensorflow.python.keras import layers, Input, regularizers
from tensorflow.python.keras.models import Model, load_model
from tensorflow.python.keras.utils import to_categorical, model_to_dot, plot_model
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
def get_f1_pre_recall(model, x, y, batch_size):
y_pred = model.predict(x, verbose=2, batch_size=batch_size)
y_pred = np.argmax(y_pred, axis=1)
y_pred.tolist()
f1 = f1_score(y, y_pred, average='macro')
precision = precision_score(y, y_pred, average='macro')
recall = recall_score(y, y_pred, average='macro')
return f1, precision, recall
def train_model(model, save_dir, train_x, train_y, valid_x, valid_y, batch_size=32, epochs=500):
save_dir_model = os.path.join(save_dir, 'model')
if not os.path.isdir(save_dir_model):
os.makedirs(save_dir_model)
save_dir_output = os.path.join(save_dir, 'output')
if not os.path.isdir(save_dir_output):
os.makedirs(save_dir_output)
train_acc = []
valid_acc = []
train_loss = []
valid_loss = []
train_f1 = []
valid_f1 = []
train_precision = []
valid_precision = []
train_recall = []
valid_recall = []
train_y_label = [np.where(r==1)[0][0] for r in train_y]
valid_y_label = [np.where(r==1)[0][0] for r in valid_y]
for i in range(epochs):
print('starting epoch {}/{}'.format(i+1, epochs))
history1 = model.fit(train_x, train_y, batch_size=batch_size, epochs=1, validation_data=(valid_x, valid_y), verbose=2)
if (i+1)%50==0:
#model_dir = 'result/{}/'.format(task_id)+'model/model_epoch_{}.h5'.format(i+1)
model_dir = 'result/model/model_epoch_{}.h5'.format(i+1)
if os.path.isfile(model_dir):
os.remove(model_dir)
model.save(model_dir)
train_acc = train_acc + history1.history['acc']
train_loss = train_loss + history1.history['loss']
valid_acc = valid_acc + history1.history['val_acc']
valid_loss = valid_loss + history1.history['val_loss']
f1, precision, recall = get_f1_pre_recall(model, train_x, train_y_label, batch_size)
train_f1 = train_f1 + [f1]
train_precision = train_precision + [precision]
train_recall = train_recall + [recall]
f1, precision, recall = get_f1_pre_recall(model, valid_x, valid_y_label, batch_size)
valid_f1 = valid_f1 + [f1]
valid_precision = valid_precision + [precision]
valid_recall = valid_recall + [recall]
if (i+1)%50==0:
train_acc_np = np.array(train_acc)
valid_acc_np = np.array(valid_acc)
train_loss_np = np.array(train_loss)
valid_loss_np = np.array(valid_loss)
train_f1_np = np.array(train_f1)
valid_f1_np = np.array(valid_f1)
train_precision_np = np.array(train_precision)
valid_precision_np = np.array(valid_precision)
train_recall_np = np.array(train_recall)
valid_recall_np = np.array(valid_recall)
np.save(os.path.join(save_dir, 'output/train_acc.npy'), train_acc_np)
np.save(os.path.join(save_dir, 'output/valid_acc.npy'), valid_acc_np)
np.save(os.path.join(save_dir, 'output/train_loss.npy'), train_loss_np)
np.save(os.path.join(save_dir, 'output/valid_loss.npy'), valid_loss_np)
np.save(os.path.join(save_dir, 'output/train_f1.npy'), train_f1_np)
np.save(os.path.join(save_dir, 'output/valid_f1.npy'), valid_f1_np)
np.save(os.path.join(save_dir, 'output/train_precision.npy'), train_precision_np)
np.save(os.path.join(save_dir, 'output/valid_precision.npy'), valid_precision_np)
np.save(os.path.join(save_dir, 'output/train_recall.npy'), train_recall_np)
np.save(os.path.join(save_dir, 'output/valid_recall.npy'), valid_recall_np)
def deeplob_model():
input_tensor = Input(shape=(100,20,1))
# convolutional filter is (1,2) with stride of (1,2)
layer_x = layers.Conv2D(16, (1,2), strides=(1,2))(input_tensor)
layer_x = layers.BatchNormalization()(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (4,1), padding='same')(layer_x)
layer_x = layers.BatchNormalization()(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (4,1), padding='same')(layer_x)
layer_x = layers.BatchNormalization()(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (1,2), strides=(1,2))(layer_x)
layer_x = layers.BatchNormalization()(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (4,1), padding='same')(layer_x)
layer_x = layers.BatchNormalization()(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (4,1), padding='same')(layer_x)
layer_x = layers.BatchNormalization()(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (1,5))(layer_x)
layer_x = layers.BatchNormalization()(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (4,1), padding='same')(layer_x)
layer_x = layers.BatchNormalization()(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (4,1), padding='same')(layer_x)
layer_x = layers.BatchNormalization()(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
# Inception Module
tower_1 = layers.Conv2D(32, (1,1), padding='same')(layer_x)
layer_x = layers.BatchNormalization()(layer_x)
tower_1 = layers.LeakyReLU(alpha=0.01)(tower_1)
tower_1 = layers.Conv2D(32, (3,1), padding='same')(tower_1)
layer_x = layers.BatchNormalization()(layer_x)
tower_1 = layers.LeakyReLU(alpha=0.01)(tower_1)
tower_2 = layers.Conv2D(32, (1,1), padding='same')(layer_x)
layer_x = layers.BatchNormalization()(layer_x)
tower_2 = layers.LeakyReLU(alpha=0.01)(tower_2)
tower_2 = layers.Conv2D(32, (5,1), padding='same')(tower_2)
layer_x = layers.BatchNormalization()(layer_x)
tower_2 = layers.LeakyReLU(alpha=0.01)(tower_2)
tower_3 = layers.MaxPooling2D((3,1), padding='same', strides=(1,1))(layer_x)
tower_3 = layers.Conv2D(32, (1,1), padding='same')(tower_3)
layer_x = layers.BatchNormalization()(layer_x)
tower_3 = layers.LeakyReLU(alpha=0.01)(tower_3)
layer_x = layers.concatenate([tower_1, tower_2, tower_3], axis=-1)
# concatenate features of tower_1, tower_2, tower_3
layer_x = layers.Reshape((100,96))(layer_x)
# 64 LSTM units
#CPU version
#layer_x = layers.LSTM(64)(layer_x)
#GPU version, cannot run on CPU
layer_x = layers.CuDNNLSTM(64)(layer_x)
# The last output layer uses a softmax activation function
output = layers.Dense(3, activation='softmax')(layer_x)
model = Model(input_tensor, output)
opt = Adam(lr=0.01, epsilon=1)# learning rate and epsilon are the same as paper DeepLOB
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
def main():
load_dir = os.path.join(os.getcwd(), 'data_set')
if not os.path.isdir(load_dir):
sys.exit("no data set directory")
train_x = np.load(load_dir+'/train_x.npy')
train_y = np.load(load_dir+'/train_y_onehot.npy')
valid_x = np.load(load_dir+'/valid_x.npy')
valid_y = np.load(load_dir+'/valid_y_onehot.npy')
save_dir = os.path.join(os.getcwd(), 'result')
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model = deeplob_model()
train_model(model, save_dir, train_x, train_y, valid_x, valid_y, batch_size=512, epochs=300)
if __name__ == '__main__':
main()
| [
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"tensorflow.python.keras.Input",
"numpy.array",
"sys.exit",
"os.remove",
"tensorflow.python.keras.layers.concatenate",
"tensorflow.python.keras.models.Model",
"tensorflow.python.keras.layers.MaxPooling2D",
"numpy.where",
"tensorf... | [((965, 990), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (974, 990), True, 'import numpy as np\n'), ((1021, 1057), 'sklearn.metrics.f1_score', 'f1_score', (['y', 'y_pred'], {'average': '"""macro"""'}), "(y, y_pred, average='macro')\n", (1029, 1057), False, 'from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, f1_score, recall_score, precision_score, log_loss\n'), ((1074, 1117), 'sklearn.metrics.precision_score', 'precision_score', (['y', 'y_pred'], {'average': '"""macro"""'}), "(y, y_pred, average='macro')\n", (1089, 1117), False, 'from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, f1_score, recall_score, precision_score, log_loss\n'), ((1131, 1171), 'sklearn.metrics.recall_score', 'recall_score', (['y', 'y_pred'], {'average': '"""macro"""'}), "(y, y_pred, average='macro')\n", (1143, 1171), False, 'from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, f1_score, recall_score, precision_score, log_loss\n'), ((1326, 1357), 'os.path.join', 'os.path.join', (['save_dir', '"""model"""'], {}), "(save_dir, 'model')\n", (1338, 1357), False, 'import os, sys\n'), ((1459, 1491), 'os.path.join', 'os.path.join', (['save_dir', '"""output"""'], {}), "(save_dir, 'output')\n", (1471, 1491), False, 'import os, sys\n'), ((4566, 4591), 'tensorflow.python.keras.Input', 'Input', ([], {'shape': '(100, 20, 1)'}), '(shape=(100, 20, 1))\n', (4571, 4591), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((7099, 7155), 'tensorflow.python.keras.layers.concatenate', 'layers.concatenate', (['[tower_1, tower_2, tower_3]'], {'axis': '(-1)'}), '([tower_1, tower_2, tower_3], axis=-1)\n', (7117, 7155), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((7555, 7582), 'tensorflow.python.keras.models.Model', 'Model', (['input_tensor', 'output'], {}), '(input_tensor, output)\n', (7560, 7582), False, 'from tensorflow.python.keras.models import Model, load_model\n'), ((7593, 7617), 'tensorflow.python.keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.01)', 'epsilon': '(1)'}), '(lr=0.01, epsilon=1)\n', (7597, 7617), False, 'from tensorflow.python.keras.optimizers import Adam\n'), ((7942, 7976), 'numpy.load', 'np.load', (["(load_dir + '/train_x.npy')"], {}), "(load_dir + '/train_x.npy')\n", (7949, 7976), True, 'import numpy as np\n'), ((7989, 8030), 'numpy.load', 'np.load', (["(load_dir + '/train_y_onehot.npy')"], {}), "(load_dir + '/train_y_onehot.npy')\n", (7996, 8030), True, 'import numpy as np\n'), ((8044, 8078), 'numpy.load', 'np.load', (["(load_dir + '/valid_x.npy')"], {}), "(load_dir + '/valid_x.npy')\n", (8051, 8078), True, 'import numpy as np\n'), ((8091, 8132), 'numpy.load', 'np.load', (["(load_dir + '/valid_y_onehot.npy')"], {}), "(load_dir + '/valid_y_onehot.npy')\n", (8098, 8132), True, 'import numpy as np\n'), ((1369, 1398), 'os.path.isdir', 'os.path.isdir', (['save_dir_model'], {}), '(save_dir_model)\n', (1382, 1398), False, 'import os, sys\n'), ((1408, 1435), 'os.makedirs', 'os.makedirs', (['save_dir_model'], {}), '(save_dir_model)\n', (1419, 1435), False, 'import os, sys\n'), ((1503, 1533), 'os.path.isdir', 'os.path.isdir', (['save_dir_output'], {}), '(save_dir_output)\n', (1516, 1533), False, 'import os, sys\n'), ((1543, 1571), 'os.makedirs', 'os.makedirs', (['save_dir_output'], {}), '(save_dir_output)\n', (1554, 1571), False, 'import os, sys\n'), ((4662, 4703), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(16)', '(1, 2)'], {'strides': '(1, 2)'}), '(16, (1, 2), strides=(1, 2))\n', (4675, 4703), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((4730, 4757), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (4755, 4757), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((4781, 4809), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (4797, 4809), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((4833, 4874), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(16)', '(4, 1)'], {'padding': '"""same"""'}), "(16, (4, 1), padding='same')\n", (4846, 4874), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((4897, 4924), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (4922, 4924), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((4948, 4976), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (4964, 4976), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5000, 5041), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(16)', '(4, 1)'], {'padding': '"""same"""'}), "(16, (4, 1), padding='same')\n", (5013, 5041), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5064, 5091), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (5089, 5091), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5115, 5143), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (5131, 5143), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5168, 5209), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(16)', '(1, 2)'], {'strides': '(1, 2)'}), '(16, (1, 2), strides=(1, 2))\n', (5181, 5209), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5231, 5258), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (5256, 5258), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5282, 5310), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (5298, 5310), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5334, 5375), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(16)', '(4, 1)'], {'padding': '"""same"""'}), "(16, (4, 1), padding='same')\n", (5347, 5375), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5398, 5425), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (5423, 5425), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5449, 5477), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (5465, 5477), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5501, 5542), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(16)', '(4, 1)'], {'padding': '"""same"""'}), "(16, (4, 1), padding='same')\n", (5514, 5542), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5565, 5592), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (5590, 5592), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5616, 5644), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (5632, 5644), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5669, 5694), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(16)', '(1, 5)'], {}), '(16, (1, 5))\n', (5682, 5694), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5717, 5744), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (5742, 5744), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5768, 5796), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (5784, 5796), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5820, 5861), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(16)', '(4, 1)'], {'padding': '"""same"""'}), "(16, (4, 1), padding='same')\n", (5833, 5861), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5884, 5911), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (5909, 5911), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5935, 5963), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (5951, 5963), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((5987, 6028), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(16)', '(4, 1)'], {'padding': '"""same"""'}), "(16, (4, 1), padding='same')\n", (6000, 6028), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6051, 6078), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6076, 6078), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6102, 6130), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (6118, 6130), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6178, 6219), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(32)', '(1, 1)'], {'padding': '"""same"""'}), "(32, (1, 1), padding='same')\n", (6191, 6219), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6242, 6269), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6267, 6269), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6293, 6321), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (6309, 6321), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6345, 6386), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(32)', '(3, 1)'], {'padding': '"""same"""'}), "(32, (3, 1), padding='same')\n", (6358, 6386), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6409, 6436), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6434, 6436), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6460, 6488), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (6476, 6488), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6513, 6554), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(32)', '(1, 1)'], {'padding': '"""same"""'}), "(32, (1, 1), padding='same')\n", (6526, 6554), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6577, 6604), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6602, 6604), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6628, 6656), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (6644, 6656), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6680, 6721), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(32)', '(5, 1)'], {'padding': '"""same"""'}), "(32, (5, 1), padding='same')\n", (6693, 6721), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6744, 6771), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6769, 6771), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6795, 6823), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (6811, 6823), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6850, 6909), 'tensorflow.python.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['(3, 1)'], {'padding': '"""same"""', 'strides': '(1, 1)'}), "((3, 1), padding='same', strides=(1, 1))\n", (6869, 6909), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6931, 6972), 'tensorflow.python.keras.layers.Conv2D', 'layers.Conv2D', (['(32)', '(1, 1)'], {'padding': '"""same"""'}), "(32, (1, 1), padding='same')\n", (6944, 6972), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((6995, 7022), 'tensorflow.python.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (7020, 7022), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((7046, 7074), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (7062, 7074), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((7227, 7252), 'tensorflow.python.keras.layers.Reshape', 'layers.Reshape', (['(100, 96)'], {}), '((100, 96))\n', (7241, 7252), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((7389, 7409), 'tensorflow.python.keras.layers.CuDNNLSTM', 'layers.CuDNNLSTM', (['(64)'], {}), '(64)\n', (7405, 7409), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((7495, 7532), 'tensorflow.python.keras.layers.Dense', 'layers.Dense', (['(3)'], {'activation': '"""softmax"""'}), "(3, activation='softmax')\n", (7507, 7532), False, 'from tensorflow.python.keras import layers, Input, regularizers\n'), ((7824, 7835), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7833, 7835), False, 'import os, sys\n'), ((7860, 7883), 'os.path.isdir', 'os.path.isdir', (['load_dir'], {}), '(load_dir)\n', (7873, 7883), False, 'import os, sys\n'), ((7893, 7926), 'sys.exit', 'sys.exit', (['"""no data set directory"""'], {}), "('no data set directory')\n", (7901, 7926), False, 'import os, sys\n'), ((8160, 8171), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8169, 8171), False, 'import os, sys\n'), ((8194, 8217), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (8207, 8217), False, 'import os, sys\n'), ((8227, 8248), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (8238, 8248), False, 'import os, sys\n'), ((2321, 2346), 'os.path.isfile', 'os.path.isfile', (['model_dir'], {}), '(model_dir)\n', (2335, 2346), False, 'import os, sys\n'), ((3181, 3200), 'numpy.array', 'np.array', (['train_acc'], {}), '(train_acc)\n', (3189, 3200), True, 'import numpy as np\n'), ((3228, 3247), 'numpy.array', 'np.array', (['valid_acc'], {}), '(valid_acc)\n', (3236, 3247), True, 'import numpy as np\n'), ((3277, 3297), 'numpy.array', 'np.array', (['train_loss'], {}), '(train_loss)\n', (3285, 3297), True, 'import numpy as np\n'), ((3326, 3346), 'numpy.array', 'np.array', (['valid_loss'], {}), '(valid_loss)\n', (3334, 3346), True, 'import numpy as np\n'), ((3374, 3392), 'numpy.array', 'np.array', (['train_f1'], {}), '(train_f1)\n', (3382, 3392), True, 'import numpy as np\n'), ((3419, 3437), 'numpy.array', 'np.array', (['valid_f1'], {}), '(valid_f1)\n', (3427, 3437), True, 'import numpy as np\n'), ((3472, 3497), 'numpy.array', 'np.array', (['train_precision'], {}), '(train_precision)\n', (3480, 3497), True, 'import numpy as np\n'), ((3531, 3556), 'numpy.array', 'np.array', (['valid_precision'], {}), '(valid_precision)\n', (3539, 3556), True, 'import numpy as np\n'), ((3588, 3610), 'numpy.array', 'np.array', (['train_recall'], {}), '(train_recall)\n', (3596, 3610), True, 'import numpy as np\n'), ((3641, 3663), 'numpy.array', 'np.array', (['valid_recall'], {}), '(valid_recall)\n', (3649, 3663), True, 'import numpy as np\n'), ((1807, 1823), 'numpy.where', 'np.where', (['(r == 1)'], {}), '(r == 1)\n', (1815, 1823), True, 'import numpy as np\n'), ((1867, 1883), 'numpy.where', 'np.where', (['(r == 1)'], {}), '(r == 1)\n', (1875, 1883), True, 'import numpy as np\n'), ((2364, 2384), 'os.remove', 'os.remove', (['model_dir'], {}), '(model_dir)\n', (2373, 2384), False, 'import os, sys\n'), ((3685, 3731), 'os.path.join', 'os.path.join', (['save_dir', '"""output/train_acc.npy"""'], {}), "(save_dir, 'output/train_acc.npy')\n", (3697, 3731), False, 'import os, sys\n'), ((3767, 3813), 'os.path.join', 'os.path.join', (['save_dir', '"""output/valid_acc.npy"""'], {}), "(save_dir, 'output/valid_acc.npy')\n", (3779, 3813), False, 'import os, sys\n'), ((3850, 3897), 'os.path.join', 'os.path.join', (['save_dir', '"""output/train_loss.npy"""'], {}), "(save_dir, 'output/train_loss.npy')\n", (3862, 3897), False, 'import os, sys\n'), ((3934, 3981), 'os.path.join', 'os.path.join', (['save_dir', '"""output/valid_loss.npy"""'], {}), "(save_dir, 'output/valid_loss.npy')\n", (3946, 3981), False, 'import os, sys\n'), ((4019, 4064), 'os.path.join', 'os.path.join', (['save_dir', '"""output/train_f1.npy"""'], {}), "(save_dir, 'output/train_f1.npy')\n", (4031, 4064), False, 'import os, sys\n'), ((4099, 4144), 'os.path.join', 'os.path.join', (['save_dir', '"""output/valid_f1.npy"""'], {}), "(save_dir, 'output/valid_f1.npy')\n", (4111, 4144), False, 'import os, sys\n'), ((4180, 4232), 'os.path.join', 'os.path.join', (['save_dir', '"""output/train_precision.npy"""'], {}), "(save_dir, 'output/train_precision.npy')\n", (4192, 4232), False, 'import os, sys\n'), ((4274, 4326), 'os.path.join', 'os.path.join', (['save_dir', '"""output/valid_precision.npy"""'], {}), "(save_dir, 'output/valid_precision.npy')\n", (4286, 4326), False, 'import os, sys\n'), ((4369, 4418), 'os.path.join', 'os.path.join', (['save_dir', '"""output/train_recall.npy"""'], {}), "(save_dir, 'output/train_recall.npy')\n", (4381, 4418), False, 'import os, sys\n'), ((4457, 4506), 'os.path.join', 'os.path.join', (['save_dir', '"""output/valid_recall.npy"""'], {}), "(save_dir, 'output/valid_recall.npy')\n", (4469, 4506), False, 'import os, sys\n')] |
import random
from copy import deepcopy
from dataclasses import dataclass
from typing import List
from PIL import Image, ImageDraw, ImageFont
import numpy as np
WIDTH = 1100
SIZE = (0, WIDTH)
N = 6
K = 3
@dataclass
class Peak:
x: int
y: int
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __hash__(self):
return hash((self.x, self.y))
@dataclass
class Edge:
first: Peak
second: Peak
weight: int = 0
def __eq__(self, other):
return (
self.first == other.first
and self.second == other.second
or self.first == other.second
and self.second == other.first
)
@dataclass
class Graph:
peaks: List[Peak]
edges: List[Edge]
def generate_random_graph(peaks_count: int):
radius = WIDTH / 2
x_c = [
SIZE[1] // 2 + radius * np.cos(2 * np.pi * j / peaks_count)
for j in range(peaks_count)
]
y_c = [
SIZE[1] // 2 + radius * np.sin(2 * np.pi * j / peaks_count)
for j in range(peaks_count)
]
peaks = [Peak(int(x_c[i]), int(y_c[i])) for i in range(peaks_count)]
edges = []
for i in range(len(peaks)):
for j in range(i, len(peaks)):
if peaks[i] == peaks[j]:
continue
edges.append(Edge(peaks[i], peaks[j], random.randint(*SIZE)))
return Graph(peaks, edges)
def find_min_edge_to_min_path(min_path, left_edges):
left_edges.sort(key=lambda e: e.weight)
if len(min_path) == 0:
return left_edges[0]
insulated_peaks = []
for e in min_path:
insulated_peaks.append(e.first)
insulated_peaks.append(e.second)
for i in range(len(left_edges)):
if (
left_edges[i].first in insulated_peaks
or left_edges[i].second in insulated_peaks
):
return left_edges[i]
def find_max_edge(edges: List[Edge]):
return max(edges, key=lambda x: x.weight)
def knp(graph: Graph):
min_path = []
left_edges = deepcopy(graph.edges)
while True:
insulated_peaks = set()
for e in min_path:
insulated_peaks.add(e.first)
insulated_peaks.add(e.second)
if len(insulated_peaks) == len(graph.peaks):
break
edge = find_min_edge_to_min_path(min_path, left_edges)
left_edges.remove(edge)
min_path.append(edge)
make_clusters(min_path)
def make_clusters(edges):
edges.sort(key=lambda e: -1 * e.weight)
im, d = get_peaks_image(edges)
draw(edges[K - 1 :], im=im, d=d)
def get_peaks_image(edges: List[Edge]):
im = Image.new("RGBA", (WIDTH, WIDTH), (255, 255, 255, 255))
d = ImageDraw.Draw(im)
for edge in edges:
d.ellipse(
(
edge.first.x - 15,
edge.first.y - 15,
edge.first.x + 15,
edge.first.y + 15,
),
fill=(255, 0, 0),
outline=(0, 0, 0),
)
d.ellipse(
(
edge.second.x - 15,
edge.second.y - 15,
edge.second.x + 15,
edge.second.y + 15,
),
fill=(255, 0, 0),
outline=(0, 0, 0),
)
return im, d
def draw(edges: List[Edge], im=None, d=None):
im = im or Image.new("RGBA", (WIDTH, WIDTH), (255, 255, 255, 255))
d = d or ImageDraw.Draw(im)
font = ImageFont.truetype("arial.ttf", size=20)
for edge in edges:
d.line(
(edge.first.x, edge.first.y, edge.second.x, edge.second.y),
fill=128,
width=5,
)
d.text(
(
(edge.first.x + edge.second.x) // 2 + random.randint(-30, 30),
(edge.first.y + edge.second.y) // 2 + random.randint(-30, 30),
),
str(edge.weight),
fill=(255, 0, 0),
font=font,
)
for edge in edges:
d.ellipse(
(
edge.first.x - 15,
edge.first.y - 15,
edge.first.x + 15,
edge.first.y + 15,
),
fill=(255, 0, 0),
outline=(0, 0, 0),
)
d.ellipse(
(
edge.second.x - 15,
edge.second.y - 15,
edge.second.x + 15,
edge.second.y + 15,
),
fill=(255, 0, 0),
outline=(0, 0, 0),
)
im.show()
if __name__ == "__main__":
graph = generate_random_graph(N)
print(graph.edges)
draw(graph.edges)
knp(graph)
| [
"PIL.Image.new",
"PIL.ImageFont.truetype",
"PIL.ImageDraw.Draw",
"numpy.cos",
"copy.deepcopy",
"numpy.sin",
"random.randint"
] | [((2132, 2153), 'copy.deepcopy', 'deepcopy', (['graph.edges'], {}), '(graph.edges)\n', (2140, 2153), False, 'from copy import deepcopy\n'), ((2752, 2807), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(WIDTH, WIDTH)', '(255, 255, 255, 255)'], {}), "('RGBA', (WIDTH, WIDTH), (255, 255, 255, 255))\n", (2761, 2807), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2817, 2835), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (2831, 2835), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3588, 3628), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arial.ttf"""'], {'size': '(20)'}), "('arial.ttf', size=20)\n", (3606, 3628), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3487, 3542), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(WIDTH, WIDTH)', '(255, 255, 255, 255)'], {}), "('RGBA', (WIDTH, WIDTH), (255, 255, 255, 255))\n", (3496, 3542), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3557, 3575), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (3571, 3575), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((935, 970), 'numpy.cos', 'np.cos', (['(2 * np.pi * j / peaks_count)'], {}), '(2 * np.pi * j / peaks_count)\n', (941, 970), True, 'import numpy as np\n'), ((1061, 1096), 'numpy.sin', 'np.sin', (['(2 * np.pi * j / peaks_count)'], {}), '(2 * np.pi * j / peaks_count)\n', (1067, 1096), True, 'import numpy as np\n'), ((1419, 1440), 'random.randint', 'random.randint', (['*SIZE'], {}), '(*SIZE)\n', (1433, 1440), False, 'import random\n'), ((3886, 3909), 'random.randint', 'random.randint', (['(-30)', '(30)'], {}), '(-30, 30)\n', (3900, 3909), False, 'import random\n'), ((3966, 3989), 'random.randint', 'random.randint', (['(-30)', '(30)'], {}), '(-30, 30)\n', (3980, 3989), False, 'import random\n')] |
# @Author : bamtercelboo
# @Datetime : 2018/1/31 9:24
# @File : model_PNC.py
# @Last Modify Time : 2018/1/31 9:24
# @Contact : <EMAIL>, <EMAIL>}
"""
FILE : model_PNC.py
FUNCTION : Part-of-Speech Tagging(POS), Named Entity Recognition(NER) and Chunking
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable as Variable
import random
import torch.nn.init as init
import numpy as np
import hyperparams
torch.manual_seed(hyperparams.seed_num)
random.seed(hyperparams.seed_num)
class PNC(nn.Module):
def __init__(self, args):
super(PNC, self).__init__()
self.args = args
self.cat_size = 5
V = args.embed_num
D = args.embed_dim
C = args.class_num
paddingId = args.paddingId
self.embed = nn.Embedding(V, D, padding_idx=paddingId)
if args.word_Embedding:
self.embed.weight.data.copy_(args.pretrained_weight)
# self.embed.weight.requires_grad = False
self.dropout_embed = nn.Dropout(args.dropout_embed)
self.dropout = nn.Dropout(args.dropout)
self.batchNorm = nn.BatchNorm1d(D * 5)
self.bilstm = nn.LSTM(input_size=500, hidden_size=100, bidirectional=False, bias=True)
# self.linear = nn.Linear(in_features=D * self.cat_size, out_features=C, bias=True)
self.linear = nn.Linear(in_features=D, out_features=C, bias=True)
init.xavier_uniform(self.linear.weight)
# self.linear.bias.data.uniform_(-np.sqrt(6 / (D + 1)), np.sqrt(6 / (D + 1)))
def cat_embedding(self, x):
# print("source", x)
batch = x.size(0)
word_size = x.size(1)
cated_embed = torch.zeros(batch, word_size, self.args.embed_dim * self.cat_size)
for id_batch in range(batch):
batch_word_list = np.array(x[id_batch].data).tolist()
batch_word_list.insert(0, [0] * self.args.embed_dim)
batch_word_list.insert(1, [0] * self.args.embed_dim)
batch_word_list.insert(word_size, [0] * self.args.embed_dim)
batch_word_list.insert(word_size + 1, [0] * self.args.embed_dim)
batch_word_embed = torch.from_numpy(np.array(batch_word_list)).type(torch.FloatTensor)
cat_list = []
for id_word in range(word_size):
cat_list.append(torch.cat(batch_word_embed[id_word:(id_word + self.cat_size)]).unsqueeze(0))
sentence_cated_embed = torch.cat(cat_list)
cated_embed[id_batch] = sentence_cated_embed
if self.args.use_cuda is True:
cated_embed = Variable(cated_embed).cuda()
else:
cated_embed = Variable(cated_embed)
# print("cated", cated_embed)
return cated_embed
def forward(self, batch_features):
word = batch_features.word_features
# print(word)
# print(self.args.create_alphabet.word_alphabet.from_id(word.data[0][0]))
x = self.embed(word) # (N,W,D)
cated_embed = self.cat_embedding(x)
cated_embed = self.dropout_embed(cated_embed)
cated_embed, _ = self.bilstm(cated_embed)
# cated_embed = self.batchNorm(cated_embed.permute(0, 2, 1))
cated_embed = F.tanh(cated_embed)
logit = self.linear(cated_embed)
# print(logit.size())
return logit
| [
"torch.manual_seed",
"torch.nn.functional.tanh",
"torch.nn.Dropout",
"torch.nn.LSTM",
"random.seed",
"torch.nn.BatchNorm1d",
"torch.cat",
"numpy.array",
"torch.nn.Linear",
"torch.nn.init.xavier_uniform",
"torch.autograd.Variable",
"torch.zeros",
"torch.nn.Embedding"
] | [((482, 521), 'torch.manual_seed', 'torch.manual_seed', (['hyperparams.seed_num'], {}), '(hyperparams.seed_num)\n', (499, 521), False, 'import torch\n'), ((522, 555), 'random.seed', 'random.seed', (['hyperparams.seed_num'], {}), '(hyperparams.seed_num)\n', (533, 555), False, 'import random\n'), ((837, 878), 'torch.nn.Embedding', 'nn.Embedding', (['V', 'D'], {'padding_idx': 'paddingId'}), '(V, D, padding_idx=paddingId)\n', (849, 878), True, 'import torch.nn as nn\n'), ((1057, 1087), 'torch.nn.Dropout', 'nn.Dropout', (['args.dropout_embed'], {}), '(args.dropout_embed)\n', (1067, 1087), True, 'import torch.nn as nn\n'), ((1111, 1135), 'torch.nn.Dropout', 'nn.Dropout', (['args.dropout'], {}), '(args.dropout)\n', (1121, 1135), True, 'import torch.nn as nn\n'), ((1162, 1183), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(D * 5)'], {}), '(D * 5)\n', (1176, 1183), True, 'import torch.nn as nn\n'), ((1207, 1279), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': '(500)', 'hidden_size': '(100)', 'bidirectional': '(False)', 'bias': '(True)'}), '(input_size=500, hidden_size=100, bidirectional=False, bias=True)\n', (1214, 1279), True, 'import torch.nn as nn\n'), ((1395, 1446), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'D', 'out_features': 'C', 'bias': '(True)'}), '(in_features=D, out_features=C, bias=True)\n', (1404, 1446), True, 'import torch.nn as nn\n'), ((1455, 1494), 'torch.nn.init.xavier_uniform', 'init.xavier_uniform', (['self.linear.weight'], {}), '(self.linear.weight)\n', (1474, 1494), True, 'import torch.nn.init as init\n'), ((1721, 1787), 'torch.zeros', 'torch.zeros', (['batch', 'word_size', '(self.args.embed_dim * self.cat_size)'], {}), '(batch, word_size, self.args.embed_dim * self.cat_size)\n', (1732, 1787), False, 'import torch\n'), ((3252, 3271), 'torch.nn.functional.tanh', 'F.tanh', (['cated_embed'], {}), '(cated_embed)\n', (3258, 3271), True, 'import torch.nn.functional as F\n'), ((2486, 2505), 'torch.cat', 'torch.cat', (['cat_list'], {}), '(cat_list)\n', (2495, 2505), False, 'import torch\n'), ((2697, 2718), 'torch.autograd.Variable', 'Variable', (['cated_embed'], {}), '(cated_embed)\n', (2705, 2718), True, 'from torch.autograd import Variable as Variable\n'), ((1856, 1882), 'numpy.array', 'np.array', (['x[id_batch].data'], {}), '(x[id_batch].data)\n', (1864, 1882), True, 'import numpy as np\n'), ((2628, 2649), 'torch.autograd.Variable', 'Variable', (['cated_embed'], {}), '(cated_embed)\n', (2636, 2649), True, 'from torch.autograd import Variable as Variable\n'), ((2220, 2245), 'numpy.array', 'np.array', (['batch_word_list'], {}), '(batch_word_list)\n', (2228, 2245), True, 'import numpy as np\n'), ((2374, 2434), 'torch.cat', 'torch.cat', (['batch_word_embed[id_word:id_word + self.cat_size]'], {}), '(batch_word_embed[id_word:id_word + self.cat_size])\n', (2383, 2434), False, 'import torch\n')] |
import os, numpy as np, settings as sett
#number of observed data points
numPts = 10000
#read multidimensional array of sample points with associated ST-density values
inArr = np.load("astkdeArr.npy") # [x,y,t,hs,ht,ks,kt]
#initialize output array
dim = inArr.shape
nRows = dim[0]*dim[1]*dim[2]
nCols = 7 #columns ID, x, y, t, STKDE, max clustering scale s, max clusteringscale t
outArr = np.zeros((nRows,nCols))
ID = 0
xIndex = 0
while xIndex < dim[0]:
yIndex = 0
while yIndex < dim[1]:
tIndex = 0
while tIndex < dim[2]:
hs,ht = inArr[xIndex][yIndex][tIndex][3],inArr[xIndex][yIndex][tIndex][4] #optimal spatial and temporal bandwidth
ks,kt = inArr[xIndex][yIndex][tIndex][5],inArr[xIndex][yIndex][tIndex][6] #spatial and temporal density
outArr[ID][0] = ID #id
outArr[ID][1] = inArr[xIndex][yIndex][tIndex][0] #x
outArr[ID][2] = inArr[xIndex][yIndex][tIndex][1] #y
outArr[ID][3] = inArr[xIndex][yIndex][tIndex][2] #z
outArr[ID][4] = (1/(numPts*pow(hs,2)*ht))*(ks*kt) #STKDE
outArr[ID][5] = inArr[xIndex][yIndex][tIndex][3] #max clustering scale s
outArr[ID][6] = inArr[xIndex][yIndex][tIndex][4] #max clustering scale t
ID += 1
tIndex += 1
yIndex += 1
xIndex += 1
# eliminate zeroes (sample points outside city limits)
outFile = open("aSTKDE.txt","w")
for i in outArr:
if i[1] == 0 and i[2] == 0 and i[3] == 0:
pass
else:
outFile.write(str(i[1]) + "," + str(i[2]) + "," + str(i[3]) + "," + str(i[4]) + "," + str(i[5]) + "," + str(i[6]) + "\n")
outFile.close() | [
"numpy.zeros",
"numpy.load"
] | [((184, 208), 'numpy.load', 'np.load', (['"""astkdeArr.npy"""'], {}), "('astkdeArr.npy')\n", (191, 208), True, 'import os, numpy as np, settings as sett\n'), ((406, 430), 'numpy.zeros', 'np.zeros', (['(nRows, nCols)'], {}), '((nRows, nCols))\n', (414, 430), True, 'import os, numpy as np, settings as sett\n')] |
# Copyright 2017 Sidewalk Labs | https://www.apache.org/licenses/LICENSE-2.0
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from collections import defaultdict, namedtuple
import numpy as np
import pandas
from doppelganger.listbalancer import (
balance_multi_cvx, discretize_multi_weights
)
from doppelganger import inputs
HIGH_PASS_THRESHOLD = .1 # Filter controls which are present in less than 10% of HHs
# These are the minimum fields needed to allocate households
DEFAULT_PERSON_FIELDS = {
inputs.STATE,
inputs.PUMA,
inputs.SERIAL_NUMBER,
inputs.AGE,
inputs.SEX,
inputs.PERSON_WEIGHT,
}
DEFAULT_HOUSEHOLD_FIELDS = {
inputs.STATE,
inputs.PUMA,
inputs.SERIAL_NUMBER,
inputs.NUM_PEOPLE,
inputs.HOUSEHOLD_WEIGHT,
}
CountInformation = namedtuple('CountInformation', ['tract', 'count'])
class HouseholdAllocator(object):
@staticmethod
def from_csvs(households_csv, persons_csv):
"""Load saved household and person allocations.
Args:
households_csv (unicode): path to households file
persons_csv (unicode): path to persons file
Returns:
HouseholdAllocator: allocated persons & households_csv
"""
allocated_households = pandas.read_csv(households_csv)
allocated_persons = pandas.read_csv(persons_csv)
return HouseholdAllocator(allocated_households, allocated_persons)
@staticmethod
def from_cleaned_data(marginals, households_data, persons_data):
"""Allocate households based on the given data.
marginals (Marginals): controls to match when allocating
households_data (CleanedData): data about households. Must contain
DEFAULT_HOUSEHOLD_FIELDS.
persons_data (CleanedData): data about persons. Must contain
DEFAULT_PERSON_FIELDS.
"""
for field in DEFAULT_HOUSEHOLD_FIELDS:
assert field.name in households_data.data, \
'Missing required field {}'.format(field.name)
for field in DEFAULT_PERSON_FIELDS:
assert field.name in persons_data.data, \
'Missing required field {}'.format(field.name)
households, persons = HouseholdAllocator._format_data(
households_data.data, persons_data.data)
allocated_households, allocated_persons = \
HouseholdAllocator._allocate_households(households, persons, marginals)
return HouseholdAllocator(allocated_households, allocated_persons)
def __init__(self, allocated_households, allocated_persons):
self.allocated_households = allocated_households
self.allocated_persons = allocated_persons
self.serialno_to_counts = defaultdict(list)
for _, row in self.allocated_households.iterrows():
serialno = row[inputs.SERIAL_NUMBER.name]
tract = row[inputs.TRACT.name]
count = int(row[inputs.COUNT.name])
self.serialno_to_counts[serialno].append(CountInformation(tract, count))
def get_counts(self, serialno):
"""Return the information about weights for a given serial number.
A household is repeated for a certain number of times for each tract.
This returns a list of (tract, repeat count). The repeat count
indicates the number of times this serial number should be repeated in
this tract.
Args:
seriano (unicode): the household's serial number
Returns:
list(CountInformation): the weighted repetitions for this serialno
"""
return self.serialno_to_counts[serialno]
def write(self, household_file, person_file):
"""Write allocated households and persons to the given files
Args:
household_file (unicode): path to write households to
person_file (unicode): path to write persons to
"""
self.allocated_households.to_csv(household_file)
self.allocated_persons.to_csv(person_file)
@staticmethod
def _filter_sparse_columns(df, cols):
''' Filter out variables who are are so sparse they would break the solver.
Columns are assumed to be of an indicator type (0/1)
Args
df (pandas.DataFrame): dataframe to filter
cols (list(str)): column names
Returns
filtered column list (list(str))
'''
return df[cols]\
.loc[:, df[cols].sum()/float(len(df)) > HIGH_PASS_THRESHOLD]\
.columns.tolist()
@staticmethod
def _allocate_households(households, persons, tract_controls):
# Only take nonzero weights
households = households[households[inputs.HOUSEHOLD_WEIGHT.name] > 0]
# Initial weights from PUMS
w = households[inputs.HOUSEHOLD_WEIGHT.name].as_matrix().T
allocation_inputs = [inputs.NUM_PEOPLE, inputs.NUM_VEHICLES] # Hard-coded for now
# Prepend column name to bin name to prevent bin collision
hh_columns = []
for a_input in allocation_inputs:
subset_values = households[a_input.name].unique().tolist()
hh_columns += HouseholdAllocator._str_broadcast(a_input.name, subset_values)
hh_columns = HouseholdAllocator._filter_sparse_columns(households, hh_columns)
hh_table = households[hh_columns].as_matrix()
A = tract_controls.data[hh_columns].as_matrix()
n_tracts, n_controls = A.shape
n_samples = len(households.index.values)
# Control importance weights
# < 1 means not important (thus relaxing the constraint in the solver)
mu = np.mat([1] * n_controls)
w_extend = np.tile(w, (n_tracts, 1))
mu_extend = np.mat(np.tile(mu, (n_tracts, 1)))
B = np.mat(np.dot(np.ones((1, n_tracts)), A)[0])
# Our trade-off coefficient gamma
# Low values (~1) mean we trust our initial weights, high values
# (~10000) mean want to fit the marginals.
gamma = 100.
# Meta-balancing coefficient
meta_gamma = 100.
hh_weights = balance_multi_cvx(
hh_table, A, B, w_extend, gamma * mu_extend.T, meta_gamma
)
# We're running discretization independently for each tract
tract_ids = tract_controls.data['TRACTCE'].values
total_weights = np.zeros(hh_weights.shape)
sample_weights_int = hh_weights.astype(int)
discretized_hh_weights = discretize_multi_weights(hh_table, hh_weights)
total_weights = sample_weights_int + discretized_hh_weights
# Extend households and add the weights and ids
households_extend = pandas.concat([households] * n_tracts)
households_extend[inputs.COUNT.name] = total_weights.flatten().T
tracts = np.repeat(tract_ids, n_samples)
households_extend[inputs.TRACT.name] = tracts
return households_extend, persons
@staticmethod
def _str_broadcast(string, list1):
return ['_'.join([string, element]) for element in list1]
@staticmethod
def _format_data(households_data, persons_data):
hh_size = pandas.get_dummies(households_data[inputs.NUM_PEOPLE.name])
# Prepend column name to bin name to prevent bin collision
hh_size.columns = HouseholdAllocator\
._str_broadcast(inputs.NUM_PEOPLE.name, hh_size.columns.tolist())
hh_vehicles = pandas.get_dummies(households_data[inputs.NUM_VEHICLES.name])
hh_vehicles.columns = HouseholdAllocator\
._str_broadcast(inputs.NUM_VEHICLES.name, hh_vehicles.columns.tolist())
households_data = pandas.concat([households_data, hh_size, hh_vehicles], axis=1)
hp_ages = pandas.get_dummies(persons_data[inputs.AGE.name])
hp_ages.columns = HouseholdAllocator\
._str_broadcast(inputs.AGE.name, list(inputs.AGE.possible_values))
persons_data = pandas.concat([persons_data, hp_ages], axis=1)
persons_trimmed = persons_data[[
inputs.SERIAL_NUMBER.name
] + hp_ages.columns.tolist()
]
# Get counts we need
persons_trimmed = persons_trimmed.groupby(inputs.SERIAL_NUMBER.name).sum()
households_trimmed = households_data[[
inputs.SERIAL_NUMBER.name,
inputs.NUM_PEOPLE.name,
inputs.NUM_VEHICLES.name,
inputs.HOUSEHOLD_WEIGHT.name
] + hh_size.columns.tolist()
+ hh_vehicles.columns.tolist()
]
# Merge
households_out = pandas.merge(
households_trimmed, persons_trimmed, how='inner',
left_on=inputs.SERIAL_NUMBER.name, right_index=True, sort=True
)
persons_out = persons_data[[
inputs.SERIAL_NUMBER.name,
inputs.SEX.name,
inputs.AGE.name
]]
return households_out, persons_out
| [
"numpy.tile",
"numpy.mat",
"collections.namedtuple",
"doppelganger.listbalancer.discretize_multi_weights",
"numpy.repeat",
"pandas.read_csv",
"numpy.ones",
"doppelganger.listbalancer.balance_multi_cvx",
"pandas.merge",
"numpy.zeros",
"collections.defaultdict",
"pandas.get_dummies",
"pandas.c... | [((837, 887), 'collections.namedtuple', 'namedtuple', (['"""CountInformation"""', "['tract', 'count']"], {}), "('CountInformation', ['tract', 'count'])\n", (847, 887), False, 'from collections import defaultdict, namedtuple\n'), ((1309, 1340), 'pandas.read_csv', 'pandas.read_csv', (['households_csv'], {}), '(households_csv)\n', (1324, 1340), False, 'import pandas\n'), ((1369, 1397), 'pandas.read_csv', 'pandas.read_csv', (['persons_csv'], {}), '(persons_csv)\n', (1384, 1397), False, 'import pandas\n'), ((2779, 2796), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2790, 2796), False, 'from collections import defaultdict, namedtuple\n'), ((5693, 5717), 'numpy.mat', 'np.mat', (['([1] * n_controls)'], {}), '([1] * n_controls)\n', (5699, 5717), True, 'import numpy as np\n'), ((5738, 5763), 'numpy.tile', 'np.tile', (['w', '(n_tracts, 1)'], {}), '(w, (n_tracts, 1))\n', (5745, 5763), True, 'import numpy as np\n'), ((6150, 6226), 'doppelganger.listbalancer.balance_multi_cvx', 'balance_multi_cvx', (['hh_table', 'A', 'B', 'w_extend', '(gamma * mu_extend.T)', 'meta_gamma'], {}), '(hh_table, A, B, w_extend, gamma * mu_extend.T, meta_gamma)\n', (6167, 6226), False, 'from doppelganger.listbalancer import balance_multi_cvx, discretize_multi_weights\n'), ((6400, 6426), 'numpy.zeros', 'np.zeros', (['hh_weights.shape'], {}), '(hh_weights.shape)\n', (6408, 6426), True, 'import numpy as np\n'), ((6512, 6558), 'doppelganger.listbalancer.discretize_multi_weights', 'discretize_multi_weights', (['hh_table', 'hh_weights'], {}), '(hh_table, hh_weights)\n', (6536, 6558), False, 'from doppelganger.listbalancer import balance_multi_cvx, discretize_multi_weights\n'), ((6712, 6750), 'pandas.concat', 'pandas.concat', (['([households] * n_tracts)'], {}), '([households] * n_tracts)\n', (6725, 6750), False, 'import pandas\n'), ((6841, 6872), 'numpy.repeat', 'np.repeat', (['tract_ids', 'n_samples'], {}), '(tract_ids, n_samples)\n', (6850, 6872), True, 'import numpy as np\n'), ((7184, 7243), 'pandas.get_dummies', 'pandas.get_dummies', (['households_data[inputs.NUM_PEOPLE.name]'], {}), '(households_data[inputs.NUM_PEOPLE.name])\n', (7202, 7243), False, 'import pandas\n'), ((7458, 7519), 'pandas.get_dummies', 'pandas.get_dummies', (['households_data[inputs.NUM_VEHICLES.name]'], {}), '(households_data[inputs.NUM_VEHICLES.name])\n', (7476, 7519), False, 'import pandas\n'), ((7680, 7742), 'pandas.concat', 'pandas.concat', (['[households_data, hh_size, hh_vehicles]'], {'axis': '(1)'}), '([households_data, hh_size, hh_vehicles], axis=1)\n', (7693, 7742), False, 'import pandas\n'), ((7762, 7811), 'pandas.get_dummies', 'pandas.get_dummies', (['persons_data[inputs.AGE.name]'], {}), '(persons_data[inputs.AGE.name])\n', (7780, 7811), False, 'import pandas\n'), ((7960, 8006), 'pandas.concat', 'pandas.concat', (['[persons_data, hp_ages]'], {'axis': '(1)'}), '([persons_data, hp_ages], axis=1)\n', (7973, 8006), False, 'import pandas\n'), ((8591, 8722), 'pandas.merge', 'pandas.merge', (['households_trimmed', 'persons_trimmed'], {'how': '"""inner"""', 'left_on': 'inputs.SERIAL_NUMBER.name', 'right_index': '(True)', 'sort': '(True)'}), "(households_trimmed, persons_trimmed, how='inner', left_on=\n inputs.SERIAL_NUMBER.name, right_index=True, sort=True)\n", (8603, 8722), False, 'import pandas\n'), ((5791, 5817), 'numpy.tile', 'np.tile', (['mu', '(n_tracts, 1)'], {}), '(mu, (n_tracts, 1))\n', (5798, 5817), True, 'import numpy as np\n'), ((5845, 5867), 'numpy.ones', 'np.ones', (['(1, n_tracts)'], {}), '((1, n_tracts))\n', (5852, 5867), True, 'import numpy as np\n')] |
import streamlit as st
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn') # change the default style
def app():
st.set_option('deprecation.showPyplotGlobalUse', False)
st.title('EDA')
st.subheader("Portuguese Bank Marketing Dataset")
df = pd.read_csv('projectdataset-1.csv')
df.Class.replace((1, 2), ('no', 'yes'), inplace=True)
# the table
st.write(df)
#Pie Chart so show % of sub/not sub
st.subheader("Subscribed vs Not Subscribed")
labels = ["Not \nsubscribed", "Subscribed"]
explode = (0, 0.1)
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.pie(df['Class'].value_counts(),
labels = labels,
explode = explode,
autopct ='%1.2f%%',
frame = True,
textprops = dict(color ="black", size=12))
ax.axis('equal')
plt.title('Subcription to the term deposit\n% of Total Clients',
loc='left',
color = 'black',
fontsize = '18')
plt.show()
st.pyplot()
#correlation matrix for 9 features
X = df.drop(['Class', 'job', 'marital', 'education','default','loan', 'campaign', 'previous'], axis=1)
plt.subplots(figsize=(15,10))
ax = plt.axes()
ax.set_title("Marketing Characteristic Heatmap")
corr = X.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
cmap="Blues")
plt.show()
st.subheader("Correlation Matrix")
st.pyplot()
#Chart for Age
st.subheader("Age Feature")
plt.figure(figsize=(19, 9))
sns.countplot(data=df, x='age', hue='Class')
st.pyplot()
st.write("")
# Chart for month
df_age = df
st.subheader("Month Feature")
fig, ax = plt.subplots(figsize = (15, 5))
sns.countplot(x = 'month', data = df_age, order = ['jan','feb','mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'])
ax.set_xlabel("Months")
ax.set_ylabel("Count")
ax.set_title("Count of contacts made in each month")
st.pyplot()
df_age.loc[df_age['month']=='jan','month']=1
df_age.loc[df_age['month']=='feb','month']=2
df_age.loc[df_age['month']=='mar','month']=3
df_age.loc[df_age['month']=='apr','month']=4
df_age.loc[df_age['month']=='may','month']=5
df_age.loc[df_age['month']=='jun','month']=6
df_age.loc[df_age['month']=='jul','month']=7
df_age.loc[df_age['month']=='aug','month']=8
df_age.loc[df_age['month']=='sep','month']=9
df_age.loc[df_age['month']=='oct','month']=10
df_age.loc[df_age['month']=='nov','month']=11
df_age.loc[df_age['month']=='dec','month']=12
dict1=dict(list(df_age.groupby(['month','Class'])))
list1=[1,2,3,4,5,6,7,8,9,10,11,12]
no=[]
yes=[]
months=[]
for i in list1:
months.append(i)
for j in ['no','yes']:
if(j=='no'):
no.append(dict1[i,j].count()['Class'])
else:
yes.append(dict1[i,j].count()['Class'])
total_count_per_month=[]
dict2=dict(list(df.groupby(['month'])))
for i in list1:
total_count_per_month.append(dict2[i].count()['Class'])
month_wise=pd.DataFrame()
month_wise['Months']=months
month_wise['Total Enteries per month']=total_count_per_month
month_wise['Count of Subscribed']=yes
month_wise['Count of Not Sub']=no
month_wise['Subscription Rate %']=round((month_wise['Count of Subscribed']/month_wise['Total Enteries per month'])*100)
month_wise['Not Sub Rate %']=round((month_wise['Count of Not Sub']/month_wise['Total Enteries per month'])*100)
month_wise=month_wise.sort_values("Months",ascending=True)
st.write("Based off the chart above, May has the most contact made to the customer (",13766,") but the least amount of subscription rate (",7,"%) compared to March which has the second least contact rate (",477,") but the highest subscription rate (",52,"%)")
st.dataframe(month_wise)
##Housing
st.subheader("Housing Feature")
housing_n=df[df['housing']=='no']
housing_y=df[df['housing']=='yes']
total=df.shape[0]
no=housing_n.count()['Class']
yes=housing_y.count()['Class']
sns.countplot(df['housing'])
st.pyplot()
st.write(round((yes/total)*100,2),"% of customers, which mean",yes," has a house loan and" ,round((no/total)*100,2),"% do not have a house loan with a count of",no)
no=housing_y[housing_y['Class']=='no'].count()['Class']
yes=housing_y[housing_y['Class']=='yes'].count()['Class']
total=housing_y.count()['Class']
st.write("Out of the total amount of customers that have a house loan",round((yes/total)*100,2)," % subscribed to term deposit and",round((no/total)*100,2),"% did not subscribe")
##Contact
st.subheader("Contact Feature")
sns.countplot(df['contact'],hue=df['Class'])
st.pyplot()
##duration
st.subheader("Duration Feature")
plt.figure(figsize=(20,5))
plt.xticks(np.arange(0,5000,150))
sns.scatterplot(df['duration'],df['campaign'],hue=df['Class'])
st.pyplot()
st.write("For duration of the calls, if the call had a shorter duration the customers least likely subscribed to the term deposit while when calls lasted longer you can see more customers subscribing.")
| [
"streamlit.pyplot",
"pandas.read_csv",
"matplotlib.pyplot.show",
"pandas.DataFrame",
"streamlit.write",
"matplotlib.pyplot.style.use",
"seaborn.heatmap",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"streamlit.subheader",
"streamlit.dataframe",
"seaborn.scatterplot",
"seaborn.countp... | [((198, 222), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (211, 222), True, 'import matplotlib.pyplot as plt\n'), ((268, 323), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showPyplotGlobalUse"""', '(False)'], {}), "('deprecation.showPyplotGlobalUse', False)\n", (281, 323), True, 'import streamlit as st\n'), ((329, 344), 'streamlit.title', 'st.title', (['"""EDA"""'], {}), "('EDA')\n", (337, 344), True, 'import streamlit as st\n'), ((350, 399), 'streamlit.subheader', 'st.subheader', (['"""Portuguese Bank Marketing Dataset"""'], {}), "('Portuguese Bank Marketing Dataset')\n", (362, 399), True, 'import streamlit as st\n'), ((409, 444), 'pandas.read_csv', 'pd.read_csv', (['"""projectdataset-1.csv"""'], {}), "('projectdataset-1.csv')\n", (420, 444), True, 'import pandas as pd\n'), ((521, 533), 'streamlit.write', 'st.write', (['df'], {}), '(df)\n', (529, 533), True, 'import streamlit as st\n'), ((575, 619), 'streamlit.subheader', 'st.subheader', (['"""Subscribed vs Not Subscribed"""'], {}), "('Subscribed vs Not Subscribed')\n", (587, 619), True, 'import streamlit as st\n'), ((701, 713), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (711, 713), True, 'import matplotlib.pyplot as plt\n'), ((986, 1100), 'matplotlib.pyplot.title', 'plt.title', (['"""Subcription to the term deposit\n% of Total Clients"""'], {'loc': '"""left"""', 'color': '"""black"""', 'fontsize': '"""18"""'}), '("""Subcription to the term deposit\n% of Total Clients""", loc=\n \'left\', color=\'black\', fontsize=\'18\')\n', (995, 1100), True, 'import matplotlib.pyplot as plt\n'), ((1130, 1140), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1138, 1140), True, 'import matplotlib.pyplot as plt\n'), ((1145, 1156), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (1154, 1156), True, 'import streamlit as st\n'), ((1304, 1334), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (1316, 1334), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1353), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (1351, 1353), True, 'import matplotlib.pyplot as plt\n'), ((1431, 1533), 'seaborn.heatmap', 'sns.heatmap', (['corr'], {'xticklabels': 'corr.columns.values', 'yticklabels': 'corr.columns.values', 'cmap': '"""Blues"""'}), "(corr, xticklabels=corr.columns.values, yticklabels=corr.columns\n .values, cmap='Blues')\n", (1442, 1533), True, 'import seaborn as sns\n'), ((1581, 1591), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1589, 1591), True, 'import matplotlib.pyplot as plt\n'), ((1596, 1630), 'streamlit.subheader', 'st.subheader', (['"""Correlation Matrix"""'], {}), "('Correlation Matrix')\n", (1608, 1630), True, 'import streamlit as st\n'), ((1636, 1647), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (1645, 1647), True, 'import streamlit as st\n'), ((1668, 1695), 'streamlit.subheader', 'st.subheader', (['"""Age Feature"""'], {}), "('Age Feature')\n", (1680, 1695), True, 'import streamlit as st\n'), ((1700, 1727), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(19, 9)'}), '(figsize=(19, 9))\n', (1710, 1727), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1776), 'seaborn.countplot', 'sns.countplot', ([], {'data': 'df', 'x': '"""age"""', 'hue': '"""Class"""'}), "(data=df, x='age', hue='Class')\n", (1745, 1776), True, 'import seaborn as sns\n'), ((1781, 1792), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (1790, 1792), True, 'import streamlit as st\n'), ((1798, 1810), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (1806, 1810), True, 'import streamlit as st\n'), ((1849, 1878), 'streamlit.subheader', 'st.subheader', (['"""Month Feature"""'], {}), "('Month Feature')\n", (1861, 1878), True, 'import streamlit as st\n'), ((1893, 1922), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (1905, 1922), True, 'import matplotlib.pyplot as plt\n'), ((1929, 2062), 'seaborn.countplot', 'sns.countplot', ([], {'x': '"""month"""', 'data': 'df_age', 'order': "['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',\n 'nov', 'dec']"}), "(x='month', data=df_age, order=['jan', 'feb', 'mar', 'apr',\n 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'])\n", (1942, 2062), True, 'import seaborn as sns\n'), ((2179, 2190), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (2188, 2190), True, 'import streamlit as st\n'), ((3297, 3311), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3309, 3311), True, 'import pandas as pd\n'), ((3801, 4086), 'streamlit.write', 'st.write', (['"""Based off the chart above, May has the most contact made to the customer ("""', '(13766)', '""") but the least amount of subscription rate ("""', '(7)', '"""%) compared to March which has the second least contact rate ("""', '(477)', '""") but the highest subscription rate ("""', '(52)', '"""%)"""'], {}), "(\n 'Based off the chart above, May has the most contact made to the customer ('\n , 13766, ') but the least amount of subscription rate (', 7,\n '%) compared to March which has the second least contact rate (', 477,\n ') but the highest subscription rate (', 52, '%)')\n", (3809, 4086), True, 'import streamlit as st\n'), ((4065, 4089), 'streamlit.dataframe', 'st.dataframe', (['month_wise'], {}), '(month_wise)\n', (4077, 4089), True, 'import streamlit as st\n'), ((4106, 4137), 'streamlit.subheader', 'st.subheader', (['"""Housing Feature"""'], {}), "('Housing Feature')\n", (4118, 4137), True, 'import streamlit as st\n'), ((4311, 4339), 'seaborn.countplot', 'sns.countplot', (["df['housing']"], {}), "(df['housing'])\n", (4324, 4339), True, 'import seaborn as sns\n'), ((4344, 4355), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (4353, 4355), True, 'import streamlit as st\n'), ((4887, 4918), 'streamlit.subheader', 'st.subheader', (['"""Contact Feature"""'], {}), "('Contact Feature')\n", (4899, 4918), True, 'import streamlit as st\n'), ((4923, 4968), 'seaborn.countplot', 'sns.countplot', (["df['contact']"], {'hue': "df['Class']"}), "(df['contact'], hue=df['Class'])\n", (4936, 4968), True, 'import seaborn as sns\n'), ((4972, 4983), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (4981, 4983), True, 'import streamlit as st\n'), ((5000, 5032), 'streamlit.subheader', 'st.subheader', (['"""Duration Feature"""'], {}), "('Duration Feature')\n", (5012, 5032), True, 'import streamlit as st\n'), ((5037, 5064), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (5047, 5064), True, 'import matplotlib.pyplot as plt\n'), ((5106, 5170), 'seaborn.scatterplot', 'sns.scatterplot', (["df['duration']", "df['campaign']"], {'hue': "df['Class']"}), "(df['duration'], df['campaign'], hue=df['Class'])\n", (5121, 5170), True, 'import seaborn as sns\n'), ((5173, 5184), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (5182, 5184), True, 'import streamlit as st\n'), ((5189, 5401), 'streamlit.write', 'st.write', (['"""For duration of the calls, if the call had a shorter duration the customers least likely subscribed to the term deposit while when calls lasted longer you can see more customers subscribing."""'], {}), "(\n 'For duration of the calls, if the call had a shorter duration the customers least likely subscribed to the term deposit while when calls lasted longer you can see more customers subscribing.'\n )\n", (5197, 5401), True, 'import streamlit as st\n'), ((5079, 5102), 'numpy.arange', 'np.arange', (['(0)', '(5000)', '(150)'], {}), '(0, 5000, 150)\n', (5088, 5102), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 13:19:48 2018
@author: <NAME>
"""
# Libraries
import math
import numpy as np
# Shifts array to the right by n elements
# and inserts n zeros at the beginning of the array
def arr_shift(A,n):
shift = np.zeros(n)
A_shifted = np.insert(A,0,shift)
A_new = A_shifted[0:len(A)]
return(A_new)
# Population AIF from Parker MRM 2006
def AIF(t0,t):
# parameter values defined in table 1 (Parker MRM 2006)
A1 = 0.809
A2 = 0.330
T1 = 0.17046
T2 = 0.365
sigma1 = 0.0563
sigma2 = 0.132
alpha = 1.050
beta = 0.1685
s = 38.078
tau = 0.483
# Eq. 1 (Parker 2006)
Ca = [(A1/sigma1)*(1/math.sqrt(2*math.pi))*math.exp(-(i/60-T1)**2/(2*sigma1**2)) +
(A2/sigma2)*(1/math.sqrt(2*math.pi))*math.exp(-(i/60-T2)**2/(2*sigma2**2)) +
alpha*math.exp(-beta*i/60)/(1+math.exp(-s*(i/60-tau))) for i in t]
# baseline shift
Ca = arr_shift(Ca,int(t0/t[1])-1)
return(Ca)
# Population AIF from Parker MRM 2006 - modified for a longer injection time
def variableAIF(inj_time,t,t0):
# Standard AIF (Parker MRM 2006)
# Injection rate of 3ml/s of a dose of 0.1mmol/kg of CA of concentration 0.5mmol/ml
# Assuming a standard body weight of 70kg, the injection time comes to
I = 70*(1/5)*(1/3) #seconds
Ca = AIF(t0,t) # standard AIF
# Number of times the standard AIF must be shifted by I to match the required injection time
n = int(round(inj_time/I))
# Calculate AIF for each n
shift = int(I/t[1])
Ca_sup = np.zeros(shape=(n+1,len(Ca)))
Ca_sup[0] = Ca
for i in range(1,n+1):
Ca_sup[i] = arr_shift(Ca,shift*i)
Ca_new = (1/n)*np.sum(Ca_sup,axis=0)
inj_time = I*n # Calculate actual injection time
return(Ca_new)
# Population AIF for a preclinical case - from McGrath MRM 2009
def preclinicalAIF(t0,t):
# Model B - parameter values defined in table 1 (McGrath MRM 2009)
A1 = 3.4
A2 = 1.81
k1 = 0.045
k2 = 0.0015
t1 = 7
# Eq. 5 (McGrath MRM 2009)
Ca = [A1*(i/t1)+A2*(i/t1) if i<=t1 else A1*np.exp(-k1*(i-t1))+A2*np.exp(-k2*(i-t1)) for i in t]
# baseline shift
Ca = arr_shift(Ca,int(t0/t[1])-1)
return(Ca) | [
"numpy.insert",
"math.sqrt",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"math.exp"
] | [((279, 290), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (287, 290), True, 'import numpy as np\n'), ((307, 329), 'numpy.insert', 'np.insert', (['A', '(0)', 'shift'], {}), '(A, 0, shift)\n', (316, 329), True, 'import numpy as np\n'), ((1779, 1801), 'numpy.sum', 'np.sum', (['Ca_sup'], {'axis': '(0)'}), '(Ca_sup, axis=0)\n', (1785, 1801), True, 'import numpy as np\n'), ((744, 793), 'math.exp', 'math.exp', (['(-(i / 60 - T1) ** 2 / (2 * sigma1 ** 2))'], {}), '(-(i / 60 - T1) ** 2 / (2 * sigma1 ** 2))\n', (752, 793), False, 'import math\n'), ((831, 880), 'math.exp', 'math.exp', (['(-(i / 60 - T2) ** 2 / (2 * sigma2 ** 2))'], {}), '(-(i / 60 - T2) ** 2 / (2 * sigma2 ** 2))\n', (839, 880), False, 'import math\n'), ((887, 911), 'math.exp', 'math.exp', (['(-beta * i / 60)'], {}), '(-beta * i / 60)\n', (895, 911), False, 'import math\n'), ((911, 940), 'math.exp', 'math.exp', (['(-s * (i / 60 - tau))'], {}), '(-s * (i / 60 - tau))\n', (919, 940), False, 'import math\n'), ((2210, 2232), 'numpy.exp', 'np.exp', (['(-k1 * (i - t1))'], {}), '(-k1 * (i - t1))\n', (2216, 2232), True, 'import numpy as np\n'), ((2232, 2254), 'numpy.exp', 'np.exp', (['(-k2 * (i - t1))'], {}), '(-k2 * (i - t1))\n', (2238, 2254), True, 'import numpy as np\n'), ((722, 744), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (731, 744), False, 'import math\n'), ((809, 831), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (818, 831), False, 'import math\n')] |
import os, datetime, requests
import tensorflow as tf
from sklearn.model_selection import train_test_split as sk_train_test_split
import matplotlib.pyplot as plt
import numpy as np
from .datagen import DataGen
def train_test_split(ids):
IDs_train, IDs_test = sk_train_test_split(ids, train_size=0.75)
return IDs_train, IDs_test
def train_val_test_split(ids):
IDs_train, IDs_rest = sk_train_test_split(ids, train_size=0.7)
IDs_val, IDs_test = sk_train_test_split(IDs_rest, train_size=0.5)
return IDs_train, IDs_val, IDs_test
def build_datagens(ids, x=None, y=None, batch_size=32, helper=None):
ids_train, ids_val, ids_test = train_val_test_split(ids)
train_gen = DataGen(ids_train, x=x, y=y, batch_size=batch_size, helper=helper)
val_gen = DataGen(ids_val, x=x, y=y, batch_size=batch_size, helper=helper)
test_gen = DataGen(ids_test, x=x, y=y, batch_size=batch_size, helper=helper)
return train_gen, val_gen, test_gen
def _moving_average(x, w=5):
return np.convolve(x, np.ones(w), 'valid') / w
def history_fit_plots(name, history, base_dir='./model_plots', smoothing=False):
fig = plt.figure(figsize=(18, 6))
fig.set_facecolor('white')
plt.ioff()
curr = 1
# loss
loss = history.history['loss']
if smoothing:
loss = _moving_average(loss)
val_loss = history.history['val_loss']
if smoothing:
val_loss = _moving_average(val_loss)
epochs_range = range(1, len(loss)+1)
plt.subplot(1, 3, curr)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.xlabel('Epochs')
plt.xticks([x for x in epochs_range if x==1 or x % 5 == 0])
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.grid(visible=True, color='#eeeeee', linestyle='-', linewidth=1)
curr += 1
# accuracy
if 'accuracy' in history.history:
accuracy = history.history['accuracy']
if smoothing:
accuracy = _moving_average(accuracy)
val_accuracy = history.history['val_accuracy']
if smoothing:
val_accuracy = _moving_average(val_accuracy)
plt.subplot(1, 3, curr)
plt.plot(epochs_range, accuracy, label='Training Accuracy')
plt.plot(epochs_range, val_accuracy, label='Validation Accuracy')
plt.xlabel('Epochs')
plt.xticks([x for x in epochs_range if x==1 or x % 5 == 0])
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.grid(visible=True, color='#eeeeee', linestyle='-', linewidth=1)
curr += 1
# mean squared error
if 'mean_squared_error' in history.history:
mean_squared_error = history.history['mean_squared_error']
if smoothing:
mean_squared_error = _moving_average(mean_squared_error)
val_mean_squared_error = history.history['val_mean_squared_error']
if smoothing:
val_mean_squared_error = _moving_average(val_mean_squared_error)
plt.subplot(1, 3, curr)
plt.plot(epochs_range, mean_squared_error, label='Training MSE')
plt.plot(epochs_range, val_mean_squared_error, label='Validation MSE')
plt.xlabel('Epochs')
plt.xticks([x for x in epochs_range if x==1 or x % 5 == 0])
plt.legend(loc='upper right')
plt.title('Training and Validation MSE')
plt.grid(visible=True, color='#eeeeee', linestyle='-', linewidth=1)
curr += 1
# mean absolute error
if 'mean_absolute_error' in history.history:
mean_absolute_error = history.history['mean_absolute_error']
if smoothing:
mean_absolute_error = _moving_average(mean_absolute_error)
val_mean_absolute_error = history.history['val_mean_absolute_error']
if smoothing:
val_mean_absolute_error = _moving_average(val_mean_absolute_error)
plt.subplot(1, 3, curr)
plt.plot(epochs_range, mean_absolute_error, label='Training MAE')
plt.plot(epochs_range, val_mean_absolute_error, label='Validation MAE')
plt.xlabel('Epochs')
plt.xticks([x for x in epochs_range if x==1 or x % 5 == 0])
plt.legend(loc='upper right')
plt.title('Training and Validation MAE')
plt.grid(visible=True, color='#eeeeee', linestyle='-', linewidth=1)
curr += 1
# save plot
os.makedirs(base_dir, exist_ok=True)
plt.savefig(os.path.join(base_dir, f'{ name }_plots.png'), bbox_inches='tight')
plt.close(fig)
def my_callbacks(name=None, path=None, check_point=False, monitor='val_loss', mode='min', tensor_board=True):
log_dir = os.path.join('logs', datetime.datetime.now().strftime("%Y-%m-%d"))
my_callbacks = []
if check_point:
my_callbacks.append(tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(path, name),
monitor=monitor, mode=mode,
save_best_only=True, verbose=1))
if tensor_board:
my_callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1))
return my_callbacks
| [
"matplotlib.pyplot.grid",
"numpy.ones",
"matplotlib.pyplot.xticks",
"os.makedirs",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.callbacks.TensorBoard",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.close",
"d... | [((266, 307), 'sklearn.model_selection.train_test_split', 'sk_train_test_split', (['ids'], {'train_size': '(0.75)'}), '(ids, train_size=0.75)\n', (285, 307), True, 'from sklearn.model_selection import train_test_split as sk_train_test_split\n'), ((398, 438), 'sklearn.model_selection.train_test_split', 'sk_train_test_split', (['ids'], {'train_size': '(0.7)'}), '(ids, train_size=0.7)\n', (417, 438), True, 'from sklearn.model_selection import train_test_split as sk_train_test_split\n'), ((463, 508), 'sklearn.model_selection.train_test_split', 'sk_train_test_split', (['IDs_rest'], {'train_size': '(0.5)'}), '(IDs_rest, train_size=0.5)\n', (482, 508), True, 'from sklearn.model_selection import train_test_split as sk_train_test_split\n'), ((1139, 1166), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 6)'}), '(figsize=(18, 6))\n', (1149, 1166), True, 'import matplotlib.pyplot as plt\n'), ((1202, 1212), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (1210, 1212), True, 'import matplotlib.pyplot as plt\n'), ((1479, 1502), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', 'curr'], {}), '(1, 3, curr)\n', (1490, 1502), True, 'import matplotlib.pyplot as plt\n'), ((1507, 1558), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'loss'], {'label': '"""Training Loss"""'}), "(epochs_range, loss, label='Training Loss')\n", (1515, 1558), True, 'import matplotlib.pyplot as plt\n'), ((1563, 1620), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_loss'], {'label': '"""Validation Loss"""'}), "(epochs_range, val_loss, label='Validation Loss')\n", (1571, 1620), True, 'import matplotlib.pyplot as plt\n'), ((1625, 1645), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (1635, 1645), True, 'import matplotlib.pyplot as plt\n'), ((1650, 1711), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[x for x in epochs_range if x == 1 or x % 5 == 0]'], {}), '([x for x in epochs_range if x == 1 or x % 5 == 0])\n', (1660, 1711), True, 'import matplotlib.pyplot as plt\n'), ((1714, 1743), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (1724, 1743), True, 'import matplotlib.pyplot as plt\n'), ((1748, 1789), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), "('Training and Validation Loss')\n", (1757, 1789), True, 'import matplotlib.pyplot as plt\n'), ((1794, 1861), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'visible': '(True)', 'color': '"""#eeeeee"""', 'linestyle': '"""-"""', 'linewidth': '(1)'}), "(visible=True, color='#eeeeee', linestyle='-', linewidth=1)\n", (1802, 1861), True, 'import matplotlib.pyplot as plt\n'), ((4408, 4444), 'os.makedirs', 'os.makedirs', (['base_dir'], {'exist_ok': '(True)'}), '(base_dir, exist_ok=True)\n', (4419, 4444), False, 'import os, datetime, requests\n'), ((4533, 4547), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4542, 4547), True, 'import matplotlib.pyplot as plt\n'), ((2190, 2213), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', 'curr'], {}), '(1, 3, curr)\n', (2201, 2213), True, 'import matplotlib.pyplot as plt\n'), ((2222, 2281), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'accuracy'], {'label': '"""Training Accuracy"""'}), "(epochs_range, accuracy, label='Training Accuracy')\n", (2230, 2281), True, 'import matplotlib.pyplot as plt\n'), ((2290, 2355), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_accuracy'], {'label': '"""Validation Accuracy"""'}), "(epochs_range, val_accuracy, label='Validation Accuracy')\n", (2298, 2355), True, 'import matplotlib.pyplot as plt\n'), ((2364, 2384), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (2374, 2384), True, 'import matplotlib.pyplot as plt\n'), ((2393, 2454), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[x for x in epochs_range if x == 1 or x % 5 == 0]'], {}), '([x for x in epochs_range if x == 1 or x % 5 == 0])\n', (2403, 2454), True, 'import matplotlib.pyplot as plt\n'), ((2461, 2490), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (2471, 2490), True, 'import matplotlib.pyplot as plt\n'), ((2499, 2544), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy"""'], {}), "('Training and Validation Accuracy')\n", (2508, 2544), True, 'import matplotlib.pyplot as plt\n'), ((2553, 2620), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'visible': '(True)', 'color': '"""#eeeeee"""', 'linestyle': '"""-"""', 'linewidth': '(1)'}), "(visible=True, color='#eeeeee', linestyle='-', linewidth=1)\n", (2561, 2620), True, 'import matplotlib.pyplot as plt\n'), ((3053, 3076), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', 'curr'], {}), '(1, 3, curr)\n', (3064, 3076), True, 'import matplotlib.pyplot as plt\n'), ((3085, 3149), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'mean_squared_error'], {'label': '"""Training MSE"""'}), "(epochs_range, mean_squared_error, label='Training MSE')\n", (3093, 3149), True, 'import matplotlib.pyplot as plt\n'), ((3158, 3228), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_mean_squared_error'], {'label': '"""Validation MSE"""'}), "(epochs_range, val_mean_squared_error, label='Validation MSE')\n", (3166, 3228), True, 'import matplotlib.pyplot as plt\n'), ((3237, 3257), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (3247, 3257), True, 'import matplotlib.pyplot as plt\n'), ((3266, 3327), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[x for x in epochs_range if x == 1 or x % 5 == 0]'], {}), '([x for x in epochs_range if x == 1 or x % 5 == 0])\n', (3276, 3327), True, 'import matplotlib.pyplot as plt\n'), ((3334, 3363), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (3344, 3363), True, 'import matplotlib.pyplot as plt\n'), ((3372, 3412), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation MSE"""'], {}), "('Training and Validation MSE')\n", (3381, 3412), True, 'import matplotlib.pyplot as plt\n'), ((3421, 3488), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'visible': '(True)', 'color': '"""#eeeeee"""', 'linestyle': '"""-"""', 'linewidth': '(1)'}), "(visible=True, color='#eeeeee', linestyle='-', linewidth=1)\n", (3429, 3488), True, 'import matplotlib.pyplot as plt\n'), ((3931, 3954), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', 'curr'], {}), '(1, 3, curr)\n', (3942, 3954), True, 'import matplotlib.pyplot as plt\n'), ((3963, 4028), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'mean_absolute_error'], {'label': '"""Training MAE"""'}), "(epochs_range, mean_absolute_error, label='Training MAE')\n", (3971, 4028), True, 'import matplotlib.pyplot as plt\n'), ((4037, 4108), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_mean_absolute_error'], {'label': '"""Validation MAE"""'}), "(epochs_range, val_mean_absolute_error, label='Validation MAE')\n", (4045, 4108), True, 'import matplotlib.pyplot as plt\n'), ((4117, 4137), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (4127, 4137), True, 'import matplotlib.pyplot as plt\n'), ((4146, 4207), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[x for x in epochs_range if x == 1 or x % 5 == 0]'], {}), '([x for x in epochs_range if x == 1 or x % 5 == 0])\n', (4156, 4207), True, 'import matplotlib.pyplot as plt\n'), ((4214, 4243), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4224, 4243), True, 'import matplotlib.pyplot as plt\n'), ((4252, 4292), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation MAE"""'], {}), "('Training and Validation MAE')\n", (4261, 4292), True, 'import matplotlib.pyplot as plt\n'), ((4301, 4368), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'visible': '(True)', 'color': '"""#eeeeee"""', 'linestyle': '"""-"""', 'linewidth': '(1)'}), "(visible=True, color='#eeeeee', linestyle='-', linewidth=1)\n", (4309, 4368), True, 'import matplotlib.pyplot as plt\n'), ((4461, 4504), 'os.path.join', 'os.path.join', (['base_dir', 'f"""{name}_plots.png"""'], {}), "(base_dir, f'{name}_plots.png')\n", (4473, 4504), False, 'import os, datetime, requests\n'), ((1022, 1032), 'numpy.ones', 'np.ones', (['w'], {}), '(w)\n', (1029, 1032), True, 'import numpy as np\n'), ((5077, 5142), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(1)'}), '(log_dir=log_dir, histogram_freq=1)\n', (5107, 5142), True, 'import tensorflow as tf\n'), ((4694, 4717), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4715, 4717), False, 'import os, datetime, requests\n'), ((4855, 4879), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (4867, 4879), False, 'import os, datetime, requests\n')] |
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
from numpy.linalg import inv
sys.path.append(os.path.join('../'))
from lib.BeamDynamicsTools.Boundary import Boundary
from lib.BeamDynamicsTools.Bfield import Bfield, BfieldTF, BfieldVF
from lib.BeamDynamicsTools.Trajectory import Trajectory
from lib.BeamDynamicsTools.Beam import Beam
from lib.BeamDynamicsTools.Ellipse import Ellipse
# ==============================================================================
# ====== Convert 6x6 Basis Matrix to 3x3 Basis Matrix ==========================
# ==============================================================================
def ConverM6toM3(M6):
i3 = [0, 1, 2]
i6 = [0, 2, 4]
M3 = np.matrix(np.zeros((3, 3), float))
for i in i3:
for j in i3:
M3[i, j] = M6[i6[i], i6[j]]
return M3
#BS3=[]; Bt3=[];
# for i in range(len(BS)):
# BS3.append( ConverM6toM3(BS[i]) )
# Bt3.append( ConverM6toM3(Bt[i]) )
# ==============================================================================
# ====== Convert from trace 3D modified Sigma to Standard Sigma Matrix =========
# ==============================================================================
Sigma0 = np.matrix([
[0.5771000, 0.3980000, 0.000000, 0.000000, 0.000000, 0.000000],
[0.3980000, 171.8262, 0.000000, 0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 0.3439000, -.2715000, 0.000000, 0.000000],
[0.000000, 0.000000, -.2715000, 238.3722, 0.000000, 0.000000],
[0.000000, 0.000000, 0.000000, 0.000000, 1.297156, 2.343722],
[0.000000, 0.000000, 0.000000, 0.000000, 2.343722, 134.9344]])
def ConvertT3D(MS, S0=Sigma0):
S = np.zeros((6, 6), float)
# Calculate Diagonal Elements
for i in range(6):
S[i, i] = MS[i, 0]**2
S[5, 5] = S[5, 5] * 100.0 # correct Units
# Calculate Off Diagonal Elements in the lower left triangle
for i in range(6):
for j in range(6):
if i != j and i < 5:
S[i, j] = MS[i + 1, j] * np.sqrt(S[i, i] * S[j, j])
# Copy lower right triangle to upper right to symmetrize
for i in range(6):
for j in range(6):
S[j, i] = S[i, j]
# calculate change
dS = np.zeros((6, 6), float)
for i in range(6):
for j in range(6):
if S0[i, j] != 0:
dS[i, j] = (S[i, j] - S0[i, j]) / S0[i, j]
return S, dS
Path0 = './sigma_trace3d/'
Sinjection = np.matrix([
[2.8834, 0.000, 0.000, 0.0000, 0.000, 0.000],
[3.4922, -0.154, 0.000, 0.0000, 0.000, 0.000],
[3.7727, 0.000, 0.000, 0.0000, 0.000, 0.000],
[5.5126, 0.000, 0.000, -0.9000, 0.000, 0.000],
[5.8750, 0.000, 0.000, 0.0000, 0.000, 0.000],
[1.2536, 0.000, 0.000, 0.0000, 0.000, 0.984]], float)
SinjectionLC = np.matrix([
[2.9989, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[3.4066, -0.2140, 0.0000, 0.0000, 0.0000, 0.0000],
[3.5736, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[5.8419, 0.0000, 0.0000, -0.9000, 0.0000, 0.0000],
[5.5170, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[1.1617, 0.0000, 0.0000, 0.0000, 0.0000, 0.9790]], float)
Bend90 = np.matrix([
[6.2429, 0.000, 0.000, 0.000, 0.000, 0.000],
[3.7663, 0.906, 0.000, 0.000, 0.000, 0.000],
[13.8997, 0.000, 0.000, 0.000, 0.000, 0.000],
[13.4632, 0.000, 0.000, 0.982, 0.000, 0.000],
[18.8279, 0.000, 0.000, -0.869, -0.932, 0.000],
[1.2944, 0.000, 0.000, -0.921, -0.960, 0.989]], float)
# Final Modified Sigma Matrices
# ==================================================================== 0000 A
S0000 = np.matrix([
[5.9546, 0.000, 0.000, 0.000, 0.000, 0.000],
[3.7470, 0.895, 0.000, 0.000, 0.000, 0.000],
[4.9859, 0.000, 0.000, 0.000, 0.000, 0.000],
[5.4023, 0.000, 0.000, 0.942, 0.000, 0.000],
[24.8496, 0.000, 0.000, 0.000, 0.000, 0.000],
[1.2935, 0.000, 0.000, 0.000, 0.000, 0.999]], float)
S0000LC = np.matrix([
[5.3088, 0.000, 0.000, 0.000, 0.000, 0.000],
[3.4066, 0.835, 0.000, 0.000, 0.000, 0.000],
[5.6937, 0.000, 0.000, 0.000, 0.000, 0.000],
[5.8423, 0.000, 0.000, 0.962, 0.000, 0.000],
[22.7092, 0.000, 0.000, 0.000, 0.000, 0.000],
[1.1616, 0.000, 0.000, 0.000, 0.000, 0.999]], float)
# ==================================================================== 1600 A
S1600 = np.matrix([
[5.7018, 0.000, 0.000, 0.000, 0.000, 0.000],
[3.9816, 0.899, 0.000, 0.000, 0.000, 0.000],
[5.2000, 0.000, 0.000, 0.000, 0.000, 0.000],
[5.4841, 0.000, 0.000, 0.894, 0.000, 0.000],
[25.1032, 0.000, 0.000, 0.188, 0.473, 0.000],
[1.2938, 0.000, 0.000, 0.215, 0.493, 0.999]], float)
# S1600NG= np.matrix([
#[6.0420 , 0.000 , 0.000 , 0.000 , 0.000 , 0.000],
#[3.7500 , 0.898 , 0.000 , 0.000 , 0.000 , 0.000],
#[5.1966 , 0.000 , 0.000 , 0.000 , 0.000 , 0.000],
#[5.9436 , 0.000 , 0.000 , 0.912 , 0.000 , 0.000],
#[25.1022, 0.000 , 0.000 , 0.191 , 0.450 , 0.000],
# [1.2937 , 0.000 , 0.000 , 0.217 , 0.470 , 0.999]],float)
S1600NG = np.matrix([
[6.3816, 0.000, 0.000, 0.000, 0.000, 0.000],
[3.7599, 0.910, 0.000, 0.000, 0.000, 0.000],
[5.6911, 0.000, 0.000, 0.000, 0.000, 0.000],
[5.9285, 0.000, 0.000, 0.915, 0.000, 0.000],
[26.2963, 0.000, 0.000, 0.170, 0.448, 0.000],
[1.2944, 0.000, 0.000, 0.199, 0.472, 0.999]], float)
S1600LC = np.matrix([
[5.0726, 0.000, 0.000, 0.000, 0.000, 0.000],
[3.6450, 0.843, 0.000, 0.000, 0.000, 0.000],
[5.8895, 0.000, 0.000, 0.000, 0.000, 0.000],
[5.6828, 0.000, 0.000, 0.921, 0.000, 0.000],
[22.9414, 0.000, 0.000, 0.137, 0.400, 0.000],
[1.1616, 0.000, 0.000, 0.171, 0.429, 0.998]], float)
S1600NGH = np.matrix([
[6.0681, 0.000, 0.000, 0.000, 0.000, 0.000],
[4.5034, 0.794, 0.000, 0.000, 0.000, 0.000],
[5.2285, 0.000, 0.000, 0.000, 0.000, 0.000],
[5.4055, 0.000, 0.000, 0.945, 0.000, 0.000],
[25.1140, 0.146, 0.594, 0.000, 0.000, 0.000],
[1.2937, 0.186, 0.621, 0.000, 0.000, 0.998]], float)
# ==================================================================== 3200 A
S3120 = np.matrix([
[5.5371, 0.000, 0.000, 0.000, 0.000, 0.000],
[4.5848, 0.920, 0.000, 0.000, 0.000, 0.000],
[5.4908, 0.000, 0.000, 0.000, 0.000, 0.000],
[6.0884, 0.000, 0.000, 0.789, 0.000, 0.000],
[25.2144, 0.000, 0.000, 0.357, 0.787, 0.000],
[1.2941, 0.000, 0.000, 0.409, 0.813, 0.998]], float)
S3120 = np.matrix([
[5.5371, 0.000, 0.000, 0.000, 0.000, 0.000],
[4.5848, 0.920, 0.000, 0.000, 0.000, 0.000],
[5.4908, 0.000, 0.000, 0.000, 0.000, 0.000],
[6.0884, 0.000, 0.000, 0.789, 0.000, 0.000],
[25.2144, 0.000, 0.000, 0.357, 0.787, 0.000],
[1.2941, 0.000, 0.000, 0.409, 0.813, 0.998]], float)
S3120NG = np.matrix([
[6.4777, 0.000, 0.000, 0.000, 0.000, 0.000],
[3.7630, 0.913, 0.000, 0.000, 0.000, 0.000],
[6.0167, 0.000, 0.000, 0.000, 0.000, 0.000],
[7.1834, 0.000, 0.000, 0.856, 0.000, 0.000],
[26.5075, 0.000, 0.000, 0.330, 0.722, 0.000],
[1.2946, 0.000, 0.000, 0.384, 0.757, 0.998]], float)
# ==================================================================== 4450 A
S4450 = np.matrix([
[6.0062, 0.000, 0.000, 0.000, 0.000, 0.000],
[6.5468, 0.967, 0.000, 0.000, 0.000, 0.000],
[6.2312, 0.000, 0.000, 0.000, 0.000, 0.000],
[6.7172, 0.000, 0.000, 0.598, 0.000, 0.000],
[26.1901, 0.000, 0.000, 0.546, 0.962, 0.000],
[1.2951, 0.000, 0.000, 0.662, 0.961, 0.995]], float)
S4450NG = np.matrix([
[6.0062, 0.000, 0.000, 0.000, 0.000, 0.000],
[6.5468, 0.967, 0.000, 0.000, 0.000, 0.000],
[6.2312, 0.000, 0.000, 0.000, 0.000, 0.000],
[6.7172, 0.000, 0.000, 0.598, 0.000, 0.000],
[26.1901, 0.000, 0.000, 0.546, 0.962, 0.000],
[1.2951, 0.000, 0.000, 0.662, 0.961, 0.995]], float)
SigmaInj, dS = ConvertT3D(Sinjection)
SigmaInjLC, dS = ConvertT3D(SinjectionLC)
Sigma0000, dS = ConvertT3D(S0000)
Sigma0000LC, dS = ConvertT3D(S0000LC)
Sigma1600, dS = ConvertT3D(S1600)
Sigma1600LC, dS = ConvertT3D(S1600LC)
Sigma1600NG, dS = ConvertT3D(S1600NG)
Sigma3120, dS = ConvertT3D(S3120)
Sigma3120NG, dS = ConvertT3D(S3120NG)
Sigma4450, dS = ConvertT3D(S4450)
Sigma4450NG, dS = ConvertT3D(S4450NG)
SigmaBend90, dS = ConvertT3D(Bend90)
np.savetxt(Path0 + 'SigmaInjection.dat', SigmaInj)
np.savetxt(Path0 + 'SigmaInjectionLC.dat', SigmaInjLC)
np.savetxt(Path0 + 'Trace3DSigma_I_0.dat', Sigma0000)
np.savetxt(Path0 + 'Trace3DSigma_I_0LC.dat', Sigma0000LC)
np.savetxt(Path0 + 'Trace3DSigma_I_1600.dat', Sigma1600)
np.savetxt(Path0 + 'Trace3DSigma_I_1600LC.dat', Sigma1600LC)
np.savetxt(Path0 + 'Trace3DSigma_I_1600NG.dat', Sigma1600NG)
np.savetxt(Path0 + 'Trace3DSigma_I_3120.dat', Sigma3120)
np.savetxt(Path0 + 'Trace3DSigma_I_3120NG.dat', Sigma3120NG)
np.savetxt(Path0 + 'Trace3DSigma_I_4450.dat', Sigma4450)
np.savetxt(Path0 + 'Trace3DSigma_I_4450NG.dat', Sigma4450NG)
np.savetxt(Path0 + 'Trace3DSigmaBend90.dat', SigmaBend90)
plt.figure(1, figsize=(8, 8))
E0 = Ellipse(Sigma0000)
E1 = Ellipse(Sigma0000LC)
M = E0.MismatchFactor(E1, Type=1)
plt.subplot(2, 2, 1)
E0.PlotXX1()
E1.PlotXX1()
plt.text(0, 0, 'M=%0.4f' % M[1], va='center', ha='center', color='r', size=16)
plt.legend((r'1.000 mA', '0.001 mA'), loc=2)
plt.subplot(2, 2, 2)
E0.PlotYY1()
E1.PlotYY1()
plt.text(0, 0, 'M=%0.4f' % M[1], va='center', ha='center', color='r', size=16)
plt.subplot(2, 2, 3)
E0.PlotZZ1()
E1.PlotZZ1()
plt.text(0, 0, 'M=%0.4f' % M[2], va='center', ha='center', color='r', size=16)
plt.subplot(2, 2, 4)
E0.PlotXY()
E1.PlotXY()
plt.text(0, 0, 'M=%0.4f' % M[3], va='center', ha='center', color='r', size=16)
plt.suptitle(r'Space Charge Effects: 1mA versus 1$\mu$A', size=16)
plt.savefig("./DataTRACE3D.png")
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"os.path.join",
"matplotlib.pyplot.figure",
"lib.BeamDynamicsTools.Ellipse.Ellipse",
"numpy.zeros",
"numpy.savetxt",
"numpy.matrix",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.legend"
] | [((1224, 1489), 'numpy.matrix', 'np.matrix', (['[[0.5771, 0.398, 0.0, 0.0, 0.0, 0.0], [0.398, 171.8262, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.3439, -0.2715, 0.0, 0.0], [0.0, 0.0, -0.2715, 238.3722, \n 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.297156, 2.343722], [0.0, 0.0, 0.0, \n 0.0, 2.343722, 134.9344]]'], {}), '([[0.5771, 0.398, 0.0, 0.0, 0.0, 0.0], [0.398, 171.8262, 0.0, 0.0,\n 0.0, 0.0], [0.0, 0.0, 0.3439, -0.2715, 0.0, 0.0], [0.0, 0.0, -0.2715, \n 238.3722, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.297156, 2.343722], [0.0, \n 0.0, 0.0, 0.0, 2.343722, 134.9344]])\n', (1233, 1489), True, 'import numpy as np\n'), ((2441, 2689), 'numpy.matrix', 'np.matrix', (['[[2.8834, 0.0, 0.0, 0.0, 0.0, 0.0], [3.4922, -0.154, 0.0, 0.0, 0.0, 0.0], [\n 3.7727, 0.0, 0.0, 0.0, 0.0, 0.0], [5.5126, 0.0, 0.0, -0.9, 0.0, 0.0], [\n 5.875, 0.0, 0.0, 0.0, 0.0, 0.0], [1.2536, 0.0, 0.0, 0.0, 0.0, 0.984]]', 'float'], {}), '([[2.8834, 0.0, 0.0, 0.0, 0.0, 0.0], [3.4922, -0.154, 0.0, 0.0, \n 0.0, 0.0], [3.7727, 0.0, 0.0, 0.0, 0.0, 0.0], [5.5126, 0.0, 0.0, -0.9, \n 0.0, 0.0], [5.875, 0.0, 0.0, 0.0, 0.0, 0.0], [1.2536, 0.0, 0.0, 0.0, \n 0.0, 0.984]], float)\n', (2450, 2689), True, 'import numpy as np\n'), ((2779, 3027), 'numpy.matrix', 'np.matrix', (['[[2.9989, 0.0, 0.0, 0.0, 0.0, 0.0], [3.4066, -0.214, 0.0, 0.0, 0.0, 0.0], [\n 3.5736, 0.0, 0.0, 0.0, 0.0, 0.0], [5.8419, 0.0, 0.0, -0.9, 0.0, 0.0], [\n 5.517, 0.0, 0.0, 0.0, 0.0, 0.0], [1.1617, 0.0, 0.0, 0.0, 0.0, 0.979]]', 'float'], {}), '([[2.9989, 0.0, 0.0, 0.0, 0.0, 0.0], [3.4066, -0.214, 0.0, 0.0, \n 0.0, 0.0], [3.5736, 0.0, 0.0, 0.0, 0.0, 0.0], [5.8419, 0.0, 0.0, -0.9, \n 0.0, 0.0], [5.517, 0.0, 0.0, 0.0, 0.0, 0.0], [1.1617, 0.0, 0.0, 0.0, \n 0.0, 0.979]], float)\n', (2788, 3027), True, 'import numpy as np\n'), ((3135, 3396), 'numpy.matrix', 'np.matrix', (['[[6.2429, 0.0, 0.0, 0.0, 0.0, 0.0], [3.7663, 0.906, 0.0, 0.0, 0.0, 0.0], [\n 13.8997, 0.0, 0.0, 0.0, 0.0, 0.0], [13.4632, 0.0, 0.0, 0.982, 0.0, 0.0],\n [18.8279, 0.0, 0.0, -0.869, -0.932, 0.0], [1.2944, 0.0, 0.0, -0.921, -\n 0.96, 0.989]]', 'float'], {}), '([[6.2429, 0.0, 0.0, 0.0, 0.0, 0.0], [3.7663, 0.906, 0.0, 0.0, 0.0,\n 0.0], [13.8997, 0.0, 0.0, 0.0, 0.0, 0.0], [13.4632, 0.0, 0.0, 0.982, \n 0.0, 0.0], [18.8279, 0.0, 0.0, -0.869, -0.932, 0.0], [1.2944, 0.0, 0.0,\n -0.921, -0.96, 0.989]], float)\n', (3144, 3396), True, 'import numpy as np\n'), ((3576, 3823), 'numpy.matrix', 'np.matrix', (['[[5.9546, 0.0, 0.0, 0.0, 0.0, 0.0], [3.747, 0.895, 0.0, 0.0, 0.0, 0.0], [\n 4.9859, 0.0, 0.0, 0.0, 0.0, 0.0], [5.4023, 0.0, 0.0, 0.942, 0.0, 0.0],\n [24.8496, 0.0, 0.0, 0.0, 0.0, 0.0], [1.2935, 0.0, 0.0, 0.0, 0.0, 0.999]]', 'float'], {}), '([[5.9546, 0.0, 0.0, 0.0, 0.0, 0.0], [3.747, 0.895, 0.0, 0.0, 0.0,\n 0.0], [4.9859, 0.0, 0.0, 0.0, 0.0, 0.0], [5.4023, 0.0, 0.0, 0.942, 0.0,\n 0.0], [24.8496, 0.0, 0.0, 0.0, 0.0, 0.0], [1.2935, 0.0, 0.0, 0.0, 0.0, \n 0.999]], float)\n', (3585, 3823), True, 'import numpy as np\n'), ((3902, 4150), 'numpy.matrix', 'np.matrix', (['[[5.3088, 0.0, 0.0, 0.0, 0.0, 0.0], [3.4066, 0.835, 0.0, 0.0, 0.0, 0.0], [\n 5.6937, 0.0, 0.0, 0.0, 0.0, 0.0], [5.8423, 0.0, 0.0, 0.962, 0.0, 0.0],\n [22.7092, 0.0, 0.0, 0.0, 0.0, 0.0], [1.1616, 0.0, 0.0, 0.0, 0.0, 0.999]]', 'float'], {}), '([[5.3088, 0.0, 0.0, 0.0, 0.0, 0.0], [3.4066, 0.835, 0.0, 0.0, 0.0,\n 0.0], [5.6937, 0.0, 0.0, 0.0, 0.0, 0.0], [5.8423, 0.0, 0.0, 0.962, 0.0,\n 0.0], [22.7092, 0.0, 0.0, 0.0, 0.0, 0.0], [1.1616, 0.0, 0.0, 0.0, 0.0, \n 0.999]], float)\n', (3911, 4150), True, 'import numpy as np\n'), ((4304, 4557), 'numpy.matrix', 'np.matrix', (['[[5.7018, 0.0, 0.0, 0.0, 0.0, 0.0], [3.9816, 0.899, 0.0, 0.0, 0.0, 0.0], [\n 5.2, 0.0, 0.0, 0.0, 0.0, 0.0], [5.4841, 0.0, 0.0, 0.894, 0.0, 0.0], [\n 25.1032, 0.0, 0.0, 0.188, 0.473, 0.0], [1.2938, 0.0, 0.0, 0.215, 0.493,\n 0.999]]', 'float'], {}), '([[5.7018, 0.0, 0.0, 0.0, 0.0, 0.0], [3.9816, 0.899, 0.0, 0.0, 0.0,\n 0.0], [5.2, 0.0, 0.0, 0.0, 0.0, 0.0], [5.4841, 0.0, 0.0, 0.894, 0.0, \n 0.0], [25.1032, 0.0, 0.0, 0.188, 0.473, 0.0], [1.2938, 0.0, 0.0, 0.215,\n 0.493, 0.999]], float)\n', (4313, 4557), True, 'import numpy as np\n'), ((4969, 5223), 'numpy.matrix', 'np.matrix', (['[[6.3816, 0.0, 0.0, 0.0, 0.0, 0.0], [3.7599, 0.91, 0.0, 0.0, 0.0, 0.0], [\n 5.6911, 0.0, 0.0, 0.0, 0.0, 0.0], [5.9285, 0.0, 0.0, 0.915, 0.0, 0.0],\n [26.2963, 0.0, 0.0, 0.17, 0.448, 0.0], [1.2944, 0.0, 0.0, 0.199, 0.472,\n 0.999]]', 'float'], {}), '([[6.3816, 0.0, 0.0, 0.0, 0.0, 0.0], [3.7599, 0.91, 0.0, 0.0, 0.0,\n 0.0], [5.6911, 0.0, 0.0, 0.0, 0.0, 0.0], [5.9285, 0.0, 0.0, 0.915, 0.0,\n 0.0], [26.2963, 0.0, 0.0, 0.17, 0.448, 0.0], [1.2944, 0.0, 0.0, 0.199, \n 0.472, 0.999]], float)\n', (4978, 5223), True, 'import numpy as np\n'), ((5295, 5548), 'numpy.matrix', 'np.matrix', (['[[5.0726, 0.0, 0.0, 0.0, 0.0, 0.0], [3.645, 0.843, 0.0, 0.0, 0.0, 0.0], [\n 5.8895, 0.0, 0.0, 0.0, 0.0, 0.0], [5.6828, 0.0, 0.0, 0.921, 0.0, 0.0],\n [22.9414, 0.0, 0.0, 0.137, 0.4, 0.0], [1.1616, 0.0, 0.0, 0.171, 0.429, \n 0.998]]', 'float'], {}), '([[5.0726, 0.0, 0.0, 0.0, 0.0, 0.0], [3.645, 0.843, 0.0, 0.0, 0.0,\n 0.0], [5.8895, 0.0, 0.0, 0.0, 0.0, 0.0], [5.6828, 0.0, 0.0, 0.921, 0.0,\n 0.0], [22.9414, 0.0, 0.0, 0.137, 0.4, 0.0], [1.1616, 0.0, 0.0, 0.171, \n 0.429, 0.998]], float)\n', (5304, 5548), True, 'import numpy as np\n'), ((5622, 5876), 'numpy.matrix', 'np.matrix', (['[[6.0681, 0.0, 0.0, 0.0, 0.0, 0.0], [4.5034, 0.794, 0.0, 0.0, 0.0, 0.0], [\n 5.2285, 0.0, 0.0, 0.0, 0.0, 0.0], [5.4055, 0.0, 0.0, 0.945, 0.0, 0.0],\n [25.114, 0.146, 0.594, 0.0, 0.0, 0.0], [1.2937, 0.186, 0.621, 0.0, 0.0,\n 0.998]]', 'float'], {}), '([[6.0681, 0.0, 0.0, 0.0, 0.0, 0.0], [4.5034, 0.794, 0.0, 0.0, 0.0,\n 0.0], [5.2285, 0.0, 0.0, 0.0, 0.0, 0.0], [5.4055, 0.0, 0.0, 0.945, 0.0,\n 0.0], [25.114, 0.146, 0.594, 0.0, 0.0, 0.0], [1.2937, 0.186, 0.621, 0.0,\n 0.0, 0.998]], float)\n', (5631, 5876), True, 'import numpy as np\n'), ((6025, 6279), 'numpy.matrix', 'np.matrix', (['[[5.5371, 0.0, 0.0, 0.0, 0.0, 0.0], [4.5848, 0.92, 0.0, 0.0, 0.0, 0.0], [\n 5.4908, 0.0, 0.0, 0.0, 0.0, 0.0], [6.0884, 0.0, 0.0, 0.789, 0.0, 0.0],\n [25.2144, 0.0, 0.0, 0.357, 0.787, 0.0], [1.2941, 0.0, 0.0, 0.409, 0.813,\n 0.998]]', 'float'], {}), '([[5.5371, 0.0, 0.0, 0.0, 0.0, 0.0], [4.5848, 0.92, 0.0, 0.0, 0.0,\n 0.0], [5.4908, 0.0, 0.0, 0.0, 0.0, 0.0], [6.0884, 0.0, 0.0, 0.789, 0.0,\n 0.0], [25.2144, 0.0, 0.0, 0.357, 0.787, 0.0], [1.2941, 0.0, 0.0, 0.409,\n 0.813, 0.998]], float)\n', (6034, 6279), True, 'import numpy as np\n'), ((6349, 6603), 'numpy.matrix', 'np.matrix', (['[[5.5371, 0.0, 0.0, 0.0, 0.0, 0.0], [4.5848, 0.92, 0.0, 0.0, 0.0, 0.0], [\n 5.4908, 0.0, 0.0, 0.0, 0.0, 0.0], [6.0884, 0.0, 0.0, 0.789, 0.0, 0.0],\n [25.2144, 0.0, 0.0, 0.357, 0.787, 0.0], [1.2941, 0.0, 0.0, 0.409, 0.813,\n 0.998]]', 'float'], {}), '([[5.5371, 0.0, 0.0, 0.0, 0.0, 0.0], [4.5848, 0.92, 0.0, 0.0, 0.0,\n 0.0], [5.4908, 0.0, 0.0, 0.0, 0.0, 0.0], [6.0884, 0.0, 0.0, 0.789, 0.0,\n 0.0], [25.2144, 0.0, 0.0, 0.357, 0.787, 0.0], [1.2941, 0.0, 0.0, 0.409,\n 0.813, 0.998]], float)\n', (6358, 6603), True, 'import numpy as np\n'), ((6675, 6929), 'numpy.matrix', 'np.matrix', (['[[6.4777, 0.0, 0.0, 0.0, 0.0, 0.0], [3.763, 0.913, 0.0, 0.0, 0.0, 0.0], [\n 6.0167, 0.0, 0.0, 0.0, 0.0, 0.0], [7.1834, 0.0, 0.0, 0.856, 0.0, 0.0],\n [26.5075, 0.0, 0.0, 0.33, 0.722, 0.0], [1.2946, 0.0, 0.0, 0.384, 0.757,\n 0.998]]', 'float'], {}), '([[6.4777, 0.0, 0.0, 0.0, 0.0, 0.0], [3.763, 0.913, 0.0, 0.0, 0.0,\n 0.0], [6.0167, 0.0, 0.0, 0.0, 0.0, 0.0], [7.1834, 0.0, 0.0, 0.856, 0.0,\n 0.0], [26.5075, 0.0, 0.0, 0.33, 0.722, 0.0], [1.2946, 0.0, 0.0, 0.384, \n 0.757, 0.998]], float)\n', (6684, 6929), True, 'import numpy as np\n'), ((7078, 7333), 'numpy.matrix', 'np.matrix', (['[[6.0062, 0.0, 0.0, 0.0, 0.0, 0.0], [6.5468, 0.967, 0.0, 0.0, 0.0, 0.0], [\n 6.2312, 0.0, 0.0, 0.0, 0.0, 0.0], [6.7172, 0.0, 0.0, 0.598, 0.0, 0.0],\n [26.1901, 0.0, 0.0, 0.546, 0.962, 0.0], [1.2951, 0.0, 0.0, 0.662, 0.961,\n 0.995]]', 'float'], {}), '([[6.0062, 0.0, 0.0, 0.0, 0.0, 0.0], [6.5468, 0.967, 0.0, 0.0, 0.0,\n 0.0], [6.2312, 0.0, 0.0, 0.0, 0.0, 0.0], [6.7172, 0.0, 0.0, 0.598, 0.0,\n 0.0], [26.1901, 0.0, 0.0, 0.546, 0.962, 0.0], [1.2951, 0.0, 0.0, 0.662,\n 0.961, 0.995]], float)\n', (7087, 7333), True, 'import numpy as np\n'), ((7404, 7659), 'numpy.matrix', 'np.matrix', (['[[6.0062, 0.0, 0.0, 0.0, 0.0, 0.0], [6.5468, 0.967, 0.0, 0.0, 0.0, 0.0], [\n 6.2312, 0.0, 0.0, 0.0, 0.0, 0.0], [6.7172, 0.0, 0.0, 0.598, 0.0, 0.0],\n [26.1901, 0.0, 0.0, 0.546, 0.962, 0.0], [1.2951, 0.0, 0.0, 0.662, 0.961,\n 0.995]]', 'float'], {}), '([[6.0062, 0.0, 0.0, 0.0, 0.0, 0.0], [6.5468, 0.967, 0.0, 0.0, 0.0,\n 0.0], [6.2312, 0.0, 0.0, 0.0, 0.0, 0.0], [6.7172, 0.0, 0.0, 0.598, 0.0,\n 0.0], [26.1901, 0.0, 0.0, 0.546, 0.962, 0.0], [1.2951, 0.0, 0.0, 0.662,\n 0.961, 0.995]], float)\n', (7413, 7659), True, 'import numpy as np\n'), ((8165, 8215), 'numpy.savetxt', 'np.savetxt', (["(Path0 + 'SigmaInjection.dat')", 'SigmaInj'], {}), "(Path0 + 'SigmaInjection.dat', SigmaInj)\n", (8175, 8215), True, 'import numpy as np\n'), ((8216, 8270), 'numpy.savetxt', 'np.savetxt', (["(Path0 + 'SigmaInjectionLC.dat')", 'SigmaInjLC'], {}), "(Path0 + 'SigmaInjectionLC.dat', SigmaInjLC)\n", (8226, 8270), True, 'import numpy as np\n'), ((8272, 8325), 'numpy.savetxt', 'np.savetxt', (["(Path0 + 'Trace3DSigma_I_0.dat')", 'Sigma0000'], {}), "(Path0 + 'Trace3DSigma_I_0.dat', Sigma0000)\n", (8282, 8325), True, 'import numpy as np\n'), ((8326, 8383), 'numpy.savetxt', 'np.savetxt', (["(Path0 + 'Trace3DSigma_I_0LC.dat')", 'Sigma0000LC'], {}), "(Path0 + 'Trace3DSigma_I_0LC.dat', Sigma0000LC)\n", (8336, 8383), True, 'import numpy as np\n'), ((8385, 8441), 'numpy.savetxt', 'np.savetxt', (["(Path0 + 'Trace3DSigma_I_1600.dat')", 'Sigma1600'], {}), "(Path0 + 'Trace3DSigma_I_1600.dat', Sigma1600)\n", (8395, 8441), True, 'import numpy as np\n'), ((8442, 8502), 'numpy.savetxt', 'np.savetxt', (["(Path0 + 'Trace3DSigma_I_1600LC.dat')", 'Sigma1600LC'], {}), "(Path0 + 'Trace3DSigma_I_1600LC.dat', Sigma1600LC)\n", (8452, 8502), True, 'import numpy as np\n'), ((8503, 8563), 'numpy.savetxt', 'np.savetxt', (["(Path0 + 'Trace3DSigma_I_1600NG.dat')", 'Sigma1600NG'], {}), "(Path0 + 'Trace3DSigma_I_1600NG.dat', Sigma1600NG)\n", (8513, 8563), True, 'import numpy as np\n'), ((8565, 8621), 'numpy.savetxt', 'np.savetxt', (["(Path0 + 'Trace3DSigma_I_3120.dat')", 'Sigma3120'], {}), "(Path0 + 'Trace3DSigma_I_3120.dat', Sigma3120)\n", (8575, 8621), True, 'import numpy as np\n'), ((8622, 8682), 'numpy.savetxt', 'np.savetxt', (["(Path0 + 'Trace3DSigma_I_3120NG.dat')", 'Sigma3120NG'], {}), "(Path0 + 'Trace3DSigma_I_3120NG.dat', Sigma3120NG)\n", (8632, 8682), True, 'import numpy as np\n'), ((8684, 8740), 'numpy.savetxt', 'np.savetxt', (["(Path0 + 'Trace3DSigma_I_4450.dat')", 'Sigma4450'], {}), "(Path0 + 'Trace3DSigma_I_4450.dat', Sigma4450)\n", (8694, 8740), True, 'import numpy as np\n'), ((8741, 8801), 'numpy.savetxt', 'np.savetxt', (["(Path0 + 'Trace3DSigma_I_4450NG.dat')", 'Sigma4450NG'], {}), "(Path0 + 'Trace3DSigma_I_4450NG.dat', Sigma4450NG)\n", (8751, 8801), True, 'import numpy as np\n'), ((8804, 8861), 'numpy.savetxt', 'np.savetxt', (["(Path0 + 'Trace3DSigmaBend90.dat')", 'SigmaBend90'], {}), "(Path0 + 'Trace3DSigmaBend90.dat', SigmaBend90)\n", (8814, 8861), True, 'import numpy as np\n'), ((8864, 8893), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(8, 8)'}), '(1, figsize=(8, 8))\n', (8874, 8893), True, 'import matplotlib.pyplot as plt\n'), ((8899, 8917), 'lib.BeamDynamicsTools.Ellipse.Ellipse', 'Ellipse', (['Sigma0000'], {}), '(Sigma0000)\n', (8906, 8917), False, 'from lib.BeamDynamicsTools.Ellipse import Ellipse\n'), ((8923, 8943), 'lib.BeamDynamicsTools.Ellipse.Ellipse', 'Ellipse', (['Sigma0000LC'], {}), '(Sigma0000LC)\n', (8930, 8943), False, 'from lib.BeamDynamicsTools.Ellipse import Ellipse\n'), ((8979, 8999), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (8990, 8999), True, 'import matplotlib.pyplot as plt\n'), ((9026, 9104), 'matplotlib.pyplot.text', 'plt.text', (['(0)', '(0)', "('M=%0.4f' % M[1])"], {'va': '"""center"""', 'ha': '"""center"""', 'color': '"""r"""', 'size': '(16)'}), "(0, 0, 'M=%0.4f' % M[1], va='center', ha='center', color='r', size=16)\n", (9034, 9104), True, 'import matplotlib.pyplot as plt\n'), ((9105, 9148), 'matplotlib.pyplot.legend', 'plt.legend', (["('1.000 mA', '0.001 mA')"], {'loc': '(2)'}), "(('1.000 mA', '0.001 mA'), loc=2)\n", (9115, 9148), True, 'import matplotlib.pyplot as plt\n'), ((9151, 9171), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (9162, 9171), True, 'import matplotlib.pyplot as plt\n'), ((9198, 9276), 'matplotlib.pyplot.text', 'plt.text', (['(0)', '(0)', "('M=%0.4f' % M[1])"], {'va': '"""center"""', 'ha': '"""center"""', 'color': '"""r"""', 'size': '(16)'}), "(0, 0, 'M=%0.4f' % M[1], va='center', ha='center', color='r', size=16)\n", (9206, 9276), True, 'import matplotlib.pyplot as plt\n'), ((9278, 9298), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (9289, 9298), True, 'import matplotlib.pyplot as plt\n'), ((9325, 9403), 'matplotlib.pyplot.text', 'plt.text', (['(0)', '(0)', "('M=%0.4f' % M[2])"], {'va': '"""center"""', 'ha': '"""center"""', 'color': '"""r"""', 'size': '(16)'}), "(0, 0, 'M=%0.4f' % M[2], va='center', ha='center', color='r', size=16)\n", (9333, 9403), True, 'import matplotlib.pyplot as plt\n'), ((9405, 9425), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (9416, 9425), True, 'import matplotlib.pyplot as plt\n'), ((9450, 9528), 'matplotlib.pyplot.text', 'plt.text', (['(0)', '(0)', "('M=%0.4f' % M[3])"], {'va': '"""center"""', 'ha': '"""center"""', 'color': '"""r"""', 'size': '(16)'}), "(0, 0, 'M=%0.4f' % M[3], va='center', ha='center', color='r', size=16)\n", (9458, 9528), True, 'import matplotlib.pyplot as plt\n'), ((9529, 9595), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Space Charge Effects: 1mA versus 1$\\\\mu$A"""'], {'size': '(16)'}), "('Space Charge Effects: 1mA versus 1$\\\\mu$A', size=16)\n", (9541, 9595), True, 'import matplotlib.pyplot as plt\n'), ((9597, 9629), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./DataTRACE3D.png"""'], {}), "('./DataTRACE3D.png')\n", (9608, 9629), True, 'import matplotlib.pyplot as plt\n'), ((118, 137), 'os.path.join', 'os.path.join', (['"""../"""'], {}), "('../')\n", (130, 137), False, 'import os\n'), ((1680, 1703), 'numpy.zeros', 'np.zeros', (['(6, 6)', 'float'], {}), '((6, 6), float)\n', (1688, 1703), True, 'import numpy as np\n'), ((2218, 2241), 'numpy.zeros', 'np.zeros', (['(6, 6)', 'float'], {}), '((6, 6), float)\n', (2226, 2241), True, 'import numpy as np\n'), ((735, 758), 'numpy.zeros', 'np.zeros', (['(3, 3)', 'float'], {}), '((3, 3), float)\n', (743, 758), True, 'import numpy as np\n'), ((2020, 2046), 'numpy.sqrt', 'np.sqrt', (['(S[i, i] * S[j, j])'], {}), '(S[i, i] * S[j, j])\n', (2027, 2046), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: data_helper
Description : 数据预处理
Author : charl
date: 2018/8/3
-------------------------------------------------
Change Activity:
2018/8/3:
-------------------------------------------------
"""
import sys
import numpy as np
import pickle as pkl
import re
import itertools
import time
import gc
import gzip
from collections import Counter
from tensorflow.contrib import learn
from gensim.models.word2vec import Word2Vec
from random import random
from preprocess import MyVocabularyProcessor
class InputHelper(object):
pre_emb = dict()
vocab_processor = None
def cleanText(self, s):
s = re.sub(r"[^\x00-\x7F]+", " ", s)
s = re.sub(r'[\~\!\`\^\*\{\}\[\]\#\<\>\?\+\=\-\_\(\)]+', "", s)
s = re.sub(r'( [0-9,\.]+)', r"\1 ", s)
s = re.sub(r'\$', " $ ", s)
s = re.sub('[ ]+', ' ', s)
return s.lower()
def getVocab(self, vocab_path, max_document_length, filter_h_pad):
if self.vocab_processor == None:
print("Loading vocab")
vocab_processor = MyVocabularyProcessor(max_document_length - filter_h_pad, min_frequency=0)
self.vocab_processor = vocab_processor.restore(vocab_path)
return self.vocab_processor
def loadW2V(self, emb_path, type="bin"):
print("Loading W2V data...")
num_keys = 0
if type == "textgz":
for line in gzip.open(emb_path):
l = line.strip().split()
st = l[0].lower()
self.pre_emb[st] = np.asarray(l[1:])
num_keys = len(self.pre_emb)
else:
self.pre_emb = Word2Vec.load_word2vec_format(emb_path, binary=True)
# self.wv.init_sims
self.pre_emb.init_sims(replace=True)
num_keys = len(self.pre_emb.vocab)
print("Loaded word2vec len", num_keys)
gc.collect()
def deletePreEmb(self):
self.pre_emb = dict()
gc.collect()
def getTsvData(self, filePath):
print("Loading training data from" + filePath)
x1 = []
x2 = []
y = []
# positive samples from file
for line in open(filePath):
l = line.strip().split("\t")
if len(l) < 2:
continue
if random() > 0.5:
x1.append(l[0].lower())
x2.append(l[1].lower())
else:
x1.append(l[1].lower())
x2.append(l[2].lower())
y.append(int(l[2]))
return np.asarray(x1), np.asarray(x2), np.asarray(y)
def getTsvDataCharBased(self, filepath):
print("Loading training data from" + filepath)
x1 = []
x2 = []
y = []
# positive samples from file
for line in open(filepath):
l = line.strip().split("\t")
if len(l) < 2:
continue
if random() > 0.5:
x1.append(l[0].lower())
x2.append(l[1].lower())
else:
x1.append(l[1].lower())
x2.append(l[0].lower())
y.append(1) # np.array([0,1]))
# generate random negative samples
combined = np.asarray(x1 + x2)
# 有时候我们有随机打乱一个数组的需求,例如训练时随机打乱样本,我们可以使用 numpy.random.shuffle() 或者 numpy.random.permutation() 来完成。
# shuffle 的参数只能是 array_like,而 permutation 除了 array_like 还可以是 int 类型,如果是 int 类型,那就随机打乱 numpy.arange(int)。
# shuffle 返回 None,这点尤其要注意,也就是说没有返回值,而 permutation 则返回打乱后的 array。
shuffle_indices = np.random.permutation(np.arange(len(combined)))
combined_shuff = combined[shuffle_indices]
for i in range(len(combined)):
x1.append(combined[i])
x2.append(combined_shuff[i])
y.append(0) # np.array([1,0]))
return np.asarray(x1), np.asarray(x2), np.asarray(y)
def getTsvTestData(self, filepath):
print("Loading testing/labelled data from " + filepath)
x1 = []
x2 = []
y = []
# positive samples from file
for line in open(filepath):
l = line.strip().split("\t")
if len(l) < 3:
continue
x1.append(l[1].lower())
x2.append(l[2].lower())
y.append(int(l[0])) # np.array([0,1]))
return np.asarray(x1), np.asarray(x2), np.asarray(y)
def batch_iter(self, data, batch_size, num_epochs, shuffle=True):
'''
Generate a batch iterators for a dataset
:param data:
:param batch_size:
:param num_epochs:
:param shuffle:
:return:
'''
data = np.asarray(data)
print(data)
print(data.shape)
data_size = len(data)
num_batches_per_epoch = int(len(data) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index : end_index]
def dumpValidation(self, x1_text, x2_text, y, shuffled_index, dev_idx, i):
print("Dunmping validation" + str(i))
x1_shuffled = x1_text[shuffled_index]
x2_shuffled = x2_text[shuffled_index]
y_shuffled = y[shuffled_index]
x1_dev = x1_shuffled[dev_idx:]
x2_dev = x2_shuffled[dev_idx:]
y_dev = y_shuffled[dev_idx:]
del x1_shuffled
del y_shuffled
with open('validation.txt' + str(i), 'w') as f:
for text1, text2, label in zip(x1_dev, x2_dev, y_dev):
f.write(str(label) + "\t" + text1 + "\t" + text2 + "\n")
f.close()
del x1_dev
del y_dev
def getDataSets(self, training_paths, max_document_length, percent_dev, batch_size, is_char_based):
if is_char_based:
x1_text, x2_text, y = self.getTsvDataCharBased(training_paths)
else:
x1_text, x2_text, y = self.getTsvData(training_paths)
#Build vocabulary
print("Building vocabulary")
vocab_processor = MyVocabularyProcessor(max_document_length, min_frequency=0, is_char_based=is_char_based)
vocab_processor.fit_transform(np.concatenate((x2_text, x1_text), axis=0))
print("Length of loaded vocabulary = {}".format(len(vocab_processor.vocabulary_)))
il = 0
train_set = []
sum_no_of_batches = 0
x1 = np.asarray(list(vocab_processor.transform(x1_text)))
x2 = np.asarray(list(vocab_processor.transform(x2_text)))
# Random shuffle data
np.random.seed(131)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x1_shuffled = x1[shuffle_indices]
x2_shuffled = x2[shuffle_indices]
y_shuffled = y[shuffle_indices]
dev_idx = -1 * len(y_shuffled) * percent_dev // 100
del x1
del x2
# split train/test data
self.dumpValidation(x1_text, x2_text, y, shuffle_indices, dev_idx, 0)
# TODO: This is very crude, should use cross-validation
x1_train, x1_dev = x1_shuffled[:dev_idx], x1_shuffled[dev_idx:]
x2_train, x2_dev = x2_shuffled[:dev_idx], x2_shuffled[dev_idx:]
y_train, y_dev = y_shuffled[:dev_idx], y_shuffled[dev_idx:]
print("Train/Dev split for {}: {:d}/{:d}".format(training_paths, len(y_train), len(y_dev)))
sum_no_of_batches = sum_no_of_batches + (len(y_train) // batch_size)
train_set = (x1_train, x2_train, y_train)
dev_set = (x1_dev, x2_dev, y_dev)
gc.collect()
return train_set, dev_set, vocab_processor, sum_no_of_batches
def getTestDataSet(self, data_path, vocab_path, max_document_length):
x1_temp, x2_temp, y = self.getTsvTestData(data_path)
# Build vocabulary
vocab_processor = MyVocabularyProcessor(max_document_length, min_frequency=0)
vocab_processor = vocab_processor.restore(vocab_path)
print(len(vocab_processor.vocabulary_))
x1 = np.asarray(list(vocab_processor.transform(x1_temp)))
x2 = np.asarray(list(vocab_processor.transform(x2_temp)))
# Randomly shuffle data
del vocab_processor
gc.collect()
return x1, x2, y
| [
"gensim.models.word2vec.Word2Vec.load_word2vec_format",
"preprocess.MyVocabularyProcessor",
"gzip.open",
"numpy.asarray",
"numpy.random.seed",
"gc.collect",
"numpy.concatenate",
"re.sub",
"random.random",
"numpy.arange"
] | [((752, 785), 're.sub', 're.sub', (['"""[^\\\\x00-\\\\x7F]+"""', '""" """', 's'], {}), "('[^\\\\x00-\\\\x7F]+', ' ', s)\n", (758, 785), False, 'import re\n'), ((797, 874), 're.sub', 're.sub', (['"""[\\\\~\\\\!\\\\`\\\\^\\\\*\\\\{\\\\}\\\\[\\\\]\\\\#\\\\<\\\\>\\\\?\\\\+\\\\=\\\\-\\\\_\\\\(\\\\)]+"""', '""""""', 's'], {}), "('[\\\\~\\\\!\\\\`\\\\^\\\\*\\\\{\\\\}\\\\[\\\\]\\\\#\\\\<\\\\>\\\\?\\\\+\\\\=\\\\-\\\\_\\\\(\\\\)]+', '', s)\n", (803, 874), False, 'import re\n'), ((869, 903), 're.sub', 're.sub', (['"""( [0-9,\\\\.]+)"""', '"""\\\\1 """', 's'], {}), "('( [0-9,\\\\.]+)', '\\\\1 ', s)\n", (875, 903), False, 'import re\n'), ((916, 939), 're.sub', 're.sub', (['"""\\\\$"""', '""" $ """', 's'], {}), "('\\\\$', ' $ ', s)\n", (922, 939), False, 'import re\n'), ((952, 974), 're.sub', 're.sub', (['"""[ ]+"""', '""" """', 's'], {}), "('[ ]+', ' ', s)\n", (958, 974), False, 'import re\n'), ((1984, 1996), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1994, 1996), False, 'import gc\n'), ((2064, 2076), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2074, 2076), False, 'import gc\n'), ((3314, 3333), 'numpy.asarray', 'np.asarray', (['(x1 + x2)'], {}), '(x1 + x2)\n', (3324, 3333), True, 'import numpy as np\n'), ((4749, 4765), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (4759, 4765), True, 'import numpy as np\n'), ((6498, 6591), 'preprocess.MyVocabularyProcessor', 'MyVocabularyProcessor', (['max_document_length'], {'min_frequency': '(0)', 'is_char_based': 'is_char_based'}), '(max_document_length, min_frequency=0, is_char_based=\n is_char_based)\n', (6519, 6591), False, 'from preprocess import MyVocabularyProcessor\n'), ((6999, 7018), 'numpy.random.seed', 'np.random.seed', (['(131)'], {}), '(131)\n', (7013, 7018), True, 'import numpy as np\n'), ((7964, 7976), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7974, 7976), False, 'import gc\n'), ((8237, 8296), 'preprocess.MyVocabularyProcessor', 'MyVocabularyProcessor', (['max_document_length'], {'min_frequency': '(0)'}), '(max_document_length, min_frequency=0)\n', (8258, 8296), False, 'from preprocess import MyVocabularyProcessor\n'), ((8608, 8620), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8618, 8620), False, 'import gc\n'), ((1178, 1252), 'preprocess.MyVocabularyProcessor', 'MyVocabularyProcessor', (['(max_document_length - filter_h_pad)'], {'min_frequency': '(0)'}), '(max_document_length - filter_h_pad, min_frequency=0)\n', (1199, 1252), False, 'from preprocess import MyVocabularyProcessor\n'), ((1517, 1536), 'gzip.open', 'gzip.open', (['emb_path'], {}), '(emb_path)\n', (1526, 1536), False, 'import gzip\n'), ((1748, 1800), 'gensim.models.word2vec.Word2Vec.load_word2vec_format', 'Word2Vec.load_word2vec_format', (['emb_path'], {'binary': '(True)'}), '(emb_path, binary=True)\n', (1777, 1800), False, 'from gensim.models.word2vec import Word2Vec\n'), ((2638, 2652), 'numpy.asarray', 'np.asarray', (['x1'], {}), '(x1)\n', (2648, 2652), True, 'import numpy as np\n'), ((2654, 2668), 'numpy.asarray', 'np.asarray', (['x2'], {}), '(x2)\n', (2664, 2668), True, 'import numpy as np\n'), ((2670, 2683), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (2680, 2683), True, 'import numpy as np\n'), ((3925, 3939), 'numpy.asarray', 'np.asarray', (['x1'], {}), '(x1)\n', (3935, 3939), True, 'import numpy as np\n'), ((3941, 3955), 'numpy.asarray', 'np.asarray', (['x2'], {}), '(x2)\n', (3951, 3955), True, 'import numpy as np\n'), ((3957, 3970), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (3967, 3970), True, 'import numpy as np\n'), ((4428, 4442), 'numpy.asarray', 'np.asarray', (['x1'], {}), '(x1)\n', (4438, 4442), True, 'import numpy as np\n'), ((4444, 4458), 'numpy.asarray', 'np.asarray', (['x2'], {}), '(x2)\n', (4454, 4458), True, 'import numpy as np\n'), ((4460, 4473), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (4470, 4473), True, 'import numpy as np\n'), ((6625, 6667), 'numpy.concatenate', 'np.concatenate', (['(x2_text, x1_text)'], {'axis': '(0)'}), '((x2_text, x1_text), axis=0)\n', (6639, 6667), True, 'import numpy as np\n'), ((1648, 1665), 'numpy.asarray', 'np.asarray', (['l[1:]'], {}), '(l[1:])\n', (1658, 1665), True, 'import numpy as np\n'), ((2397, 2405), 'random.random', 'random', ([], {}), '()\n', (2403, 2405), False, 'from random import random\n'), ((3014, 3022), 'random.random', 'random', ([], {}), '()\n', (3020, 3022), False, 'from random import random\n'), ((5071, 5091), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (5080, 5091), True, 'import numpy as np\n')] |
import numpy as np
from fuzzrl.core.conf import Defuzz as dfz
from fuzzrl.core.test import *
LIN_VARS_FILE = "../res/linvarsGFT7.xml"
GFT_FILE = "../res/gft9.xml"
class TestGenAlg(unittest.TestCase):
def test_initPopulation(self):
pop_size = 1
reg = Registry("test_reg")
xmlToLinvars(open(LIN_VARS_FILE).read(), registry=reg)
xmlToGFT(open(GFT_FILE).read(), registry=reg, defuzz_method=dfz.max_of_maximum)
ga = GeneticAlgorithm(registry=reg)
population = ga.generate_initial_population(pop_size)
self.assertEqual(np.array(population).shape[0], pop_size)
log.debug("Population size = {}".format(np.array(population).shape))
print(str(population))
| [
"numpy.array"
] | [((576, 596), 'numpy.array', 'np.array', (['population'], {}), '(population)\n', (584, 596), True, 'import numpy as np\n'), ((665, 685), 'numpy.array', 'np.array', (['population'], {}), '(population)\n', (673, 685), True, 'import numpy as np\n')] |
"""
Based on cellfinder detected cells, and amap registration, generate a heatmap
image
"""
import logging
import argparse
from pathlib import Path
import numpy as np
from skimage.filters import gaussian
from brainio import brainio
from imlib.cells.utils import get_cell_location_array
from imlib.image.scale import scale_and_convert_to_16_bits
from imlib.image.binning import get_bins
from imlib.image.shape import convert_shape_dict_to_array_shape
from imlib.image.masking import mask_image_threshold
from imlib.general.numerical import check_positive_float
from imlib.general.system import ensure_directory_exists
from imlib.image.size import resize_array
def run(
cells_file,
output_filename,
target_size,
raw_image_shape,
raw_image_bin_sizes,
transformation_matrix,
atlas_scale,
smoothing=10,
mask=True,
atlas=None,
cells_only=True,
convert_16bit=True,
):
"""
:param cells_file: Cellfinder output cells file.
:param output_filename: File to save heatmap into
:param target_size: Size of the final heatmap
:param raw_image_shape: Size of the raw data (coordinate space of the
cells)
:param raw_image_bin_sizes: List/tuple of the sizes of the bins in the
raw data space
:param transformation_matrix: Transformation matrix so that the resulting
nifti can be processed using other tools.
:param atlas_scale: Image scaling so that the resulting nifti can be
processed using other tools.
:param smoothing: Smoothing kernel size, in the target image space
:param mask: Whether or not to mask the heatmap based on an atlas file
:param atlas: Atlas file to mask the heatmap
:param cells_only: Only use "cells", not artefacts
:param convert_16bit: Convert final image to 16 bit
"""
# TODO: compare the smoothing effects of gaussian filtering, and upsampling
target_size = convert_shape_dict_to_array_shape(target_size, type="fiji")
raw_image_shape = convert_shape_dict_to_array_shape(
raw_image_shape, type="fiji"
)
cells_array = get_cell_location_array(cells_file, cells_only=cells_only)
bins = get_bins(raw_image_shape, raw_image_bin_sizes)
logging.debug("Generating heatmap (3D histogram)")
heatmap_array, _ = np.histogramdd(cells_array, bins=bins)
# otherwise resized array is too big to fit into RAM
heatmap_array = heatmap_array.astype(np.uint16)
logging.debug("Resizing heatmap to the size of the target image")
heatmap_array = resize_array(heatmap_array, target_size)
if smoothing is not None:
logging.debug(
"Applying Gaussian smoothing with a kernel sigma of: "
"{}".format(smoothing)
)
heatmap_array = gaussian(heatmap_array, sigma=smoothing)
if mask:
logging.debug("Masking image based on registered atlas")
# copy, otherwise it's modified, which affects later figure generation
heatmap_array = mask_image_threshold(heatmap_array, np.copy(atlas))
del atlas
if convert_16bit:
logging.debug("Converting to 16 bit")
heatmap_array = scale_and_convert_to_16_bits(heatmap_array)
logging.debug("Ensuring output directory exists")
ensure_directory_exists(Path(output_filename).parent)
logging.debug("Saving heatmap image")
brainio.to_nii(
heatmap_array,
output_filename,
scale=atlas_scale,
affine_transform=transformation_matrix,
)
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
dest="cells_file", type=str, help="Cellfinder output cell file",
)
parser.add_argument(
dest="output_filename",
type=str,
help="Output filename. Should end with '.nii'",
)
parser.add_argument(
dest="raw_image", type=str, help="Paths to raw data",
)
parser.add_argument(
dest="downsampled_image", type=str, help="Downsampled_atlas .nii file",
)
parser.add_argument(
"--bin-size",
dest="bin_size_um",
type=check_positive_float,
default=100,
help="Heatmap bin size (um of each edge of histogram cube)",
)
parser.add_argument(
"-x",
"--x-pixel-um",
dest="x_pixel_um",
type=check_positive_float,
help="Pixel spacing of the data in the first "
"dimension, specified in um.",
required=True,
)
parser.add_argument(
"-y",
"--y-pixel-um",
dest="y_pixel_um",
type=check_positive_float,
help="Pixel spacing of the data in the second "
"dimension, specified in um.",
required=True,
)
parser.add_argument(
"-z",
"--z-pixel-um",
dest="z_pixel_um",
type=check_positive_float,
help="Pixel spacing of the data in the third "
"dimension, specified in um.",
required=True,
)
parser.add_argument(
"--heatmap-smoothing",
dest="heatmap_smooth",
type=check_positive_float,
default=100,
help="Gaussian smoothing sigma, in um.",
)
parser.add_argument(
"--no-mask-figs",
dest="mask_figures",
action="store_false",
help="Don't mask the figures (removing any areas outside the brain,"
"from e.g. smoothing)",
)
return parser
class HeatmapParams:
# assumes an isotropic target space
def __init__(
self,
raw_image,
downsampled_image,
bin_size_um,
x_pixel_um,
y_pixel_um,
z_pixel_um,
smoothing_target_space,
):
self._input_image = raw_image
self._target_image = downsampled_image
self._bin_um = bin_size_um
self._x_pixel_um = x_pixel_um
self._y_pixel_um = y_pixel_um
self._z_pixel_um = z_pixel_um
self._smooth_um = smoothing_target_space
self._downsampled_image = None
self.figure_image_shape = None
self.raw_image_shape = None
self.bin_size_raw_voxels = None
self.atlas_scale = None
self.transformation_matrix = None
self.smoothing_target_voxel = None
self._get_raw_image_shape()
self._get_figure_image_shape()
self._get_atlas_data()
self._get_atlas_scale()
self._get_transformation_matrix()
self._get_binning()
self._get_smoothing()
def _get_raw_image_shape(self):
logging.debug("Checking raw image size")
self.raw_image_shape = brainio.get_size_image_from_file_paths(
self._input_image
)
logging.debug(f"Raw image size: {self.raw_image_shape}")
def _get_figure_image_shape(self):
logging.debug(
"Loading file: {} to check target image size"
"".format(self._target_image)
)
self._downsampled_image = brainio.load_nii(
self._target_image, as_array=False
)
shape = self._downsampled_image.shape
self.figure_image_shape = {"x": shape[0], "y": shape[1], "z": shape[2]}
logging.debug("Target image size: {}".format(self.figure_image_shape))
def _get_binning(self):
logging.debug("Calculating bin size in raw image space voxels")
bin_raw_x = int(self._bin_um / self._x_pixel_um)
bin_raw_y = int(self._bin_um / self._y_pixel_um)
bin_raw_z = int(self._bin_um / self._z_pixel_um)
self.bin_size_raw_voxels = [bin_raw_x, bin_raw_y, bin_raw_z]
logging.debug(
f"Bin size in raw image space is x:{bin_raw_x}, "
f"y:{bin_raw_y}, z:{bin_raw_z}."
)
def _get_atlas_data(self):
self.atlas_data = brainio.load_nii(self._target_image, as_array=True)
def _get_atlas_scale(self):
self.atlas_scale = self._downsampled_image.header.get_zooms()
def _get_transformation_matrix(self):
self.transformation_matrix = self._downsampled_image.affine
def _get_smoothing(self):
logging.debug(
"Calculating smoothing in target image volume. Assumes "
"an isotropic target image"
)
if self._smooth_um is not 0:
# 1000 is to scale to um
self.smoothing_target_voxel = int(
self._smooth_um / (self.atlas_scale[0] * 1000)
)
def main(
cells_file,
output_filename,
raw_image,
downsampled_image,
bin_size_um,
x_pixel_um,
y_pixel_um,
z_pixel_um,
heatmap_smooth,
masking,
):
params = HeatmapParams(
raw_image,
downsampled_image,
bin_size_um,
x_pixel_um,
y_pixel_um,
z_pixel_um,
heatmap_smooth,
)
run(
cells_file,
output_filename,
params.figure_image_shape,
params.raw_image_shape,
params.bin_size_raw_voxels,
params.transformation_matrix,
params.atlas_scale,
smoothing=params.smoothing_target_voxel,
mask=masking,
atlas=params.atlas_data,
)
def cli():
args = get_parser().parse_args()
main(
args.cells_file,
args.output_filename,
args.raw_image,
args.downsampled_image,
args.bin_size_um,
args.x_pixel_um,
args.y_pixel_um,
args.z_pixel_um,
args.heatmap_smooth,
args.mask_figures,
)
if __name__ == "__main__":
cli()
| [
"imlib.image.size.resize_array",
"numpy.copy",
"numpy.histogramdd",
"logging.debug",
"imlib.image.binning.get_bins",
"argparse.ArgumentParser",
"pathlib.Path",
"brainio.brainio.to_nii",
"brainio.brainio.load_nii",
"imlib.image.shape.convert_shape_dict_to_array_shape",
"imlib.cells.utils.get_cell... | [((1903, 1962), 'imlib.image.shape.convert_shape_dict_to_array_shape', 'convert_shape_dict_to_array_shape', (['target_size'], {'type': '"""fiji"""'}), "(target_size, type='fiji')\n", (1936, 1962), False, 'from imlib.image.shape import convert_shape_dict_to_array_shape\n'), ((1985, 2048), 'imlib.image.shape.convert_shape_dict_to_array_shape', 'convert_shape_dict_to_array_shape', (['raw_image_shape'], {'type': '"""fiji"""'}), "(raw_image_shape, type='fiji')\n", (2018, 2048), False, 'from imlib.image.shape import convert_shape_dict_to_array_shape\n'), ((2081, 2139), 'imlib.cells.utils.get_cell_location_array', 'get_cell_location_array', (['cells_file'], {'cells_only': 'cells_only'}), '(cells_file, cells_only=cells_only)\n', (2104, 2139), False, 'from imlib.cells.utils import get_cell_location_array\n'), ((2151, 2197), 'imlib.image.binning.get_bins', 'get_bins', (['raw_image_shape', 'raw_image_bin_sizes'], {}), '(raw_image_shape, raw_image_bin_sizes)\n', (2159, 2197), False, 'from imlib.image.binning import get_bins\n'), ((2203, 2253), 'logging.debug', 'logging.debug', (['"""Generating heatmap (3D histogram)"""'], {}), "('Generating heatmap (3D histogram)')\n", (2216, 2253), False, 'import logging\n'), ((2277, 2315), 'numpy.histogramdd', 'np.histogramdd', (['cells_array'], {'bins': 'bins'}), '(cells_array, bins=bins)\n', (2291, 2315), True, 'import numpy as np\n'), ((2430, 2495), 'logging.debug', 'logging.debug', (['"""Resizing heatmap to the size of the target image"""'], {}), "('Resizing heatmap to the size of the target image')\n", (2443, 2495), False, 'import logging\n'), ((2516, 2556), 'imlib.image.size.resize_array', 'resize_array', (['heatmap_array', 'target_size'], {}), '(heatmap_array, target_size)\n', (2528, 2556), False, 'from imlib.image.size import resize_array\n'), ((3182, 3231), 'logging.debug', 'logging.debug', (['"""Ensuring output directory exists"""'], {}), "('Ensuring output directory exists')\n", (3195, 3231), False, 'import logging\n'), ((3295, 3332), 'logging.debug', 'logging.debug', (['"""Saving heatmap image"""'], {}), "('Saving heatmap image')\n", (3308, 3332), False, 'import logging\n'), ((3337, 3446), 'brainio.brainio.to_nii', 'brainio.to_nii', (['heatmap_array', 'output_filename'], {'scale': 'atlas_scale', 'affine_transform': 'transformation_matrix'}), '(heatmap_array, output_filename, scale=atlas_scale,\n affine_transform=transformation_matrix)\n', (3351, 3446), False, 'from brainio import brainio\n'), ((3515, 3594), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (3538, 3594), False, 'import argparse\n'), ((2747, 2787), 'skimage.filters.gaussian', 'gaussian', (['heatmap_array'], {'sigma': 'smoothing'}), '(heatmap_array, sigma=smoothing)\n', (2755, 2787), False, 'from skimage.filters import gaussian\n'), ((2810, 2866), 'logging.debug', 'logging.debug', (['"""Masking image based on registered atlas"""'], {}), "('Masking image based on registered atlas')\n", (2823, 2866), False, 'import logging\n'), ((3071, 3108), 'logging.debug', 'logging.debug', (['"""Converting to 16 bit"""'], {}), "('Converting to 16 bit')\n", (3084, 3108), False, 'import logging\n'), ((3133, 3176), 'imlib.image.scale.scale_and_convert_to_16_bits', 'scale_and_convert_to_16_bits', (['heatmap_array'], {}), '(heatmap_array)\n', (3161, 3176), False, 'from imlib.image.scale import scale_and_convert_to_16_bits\n'), ((6549, 6589), 'logging.debug', 'logging.debug', (['"""Checking raw image size"""'], {}), "('Checking raw image size')\n", (6562, 6589), False, 'import logging\n'), ((6621, 6678), 'brainio.brainio.get_size_image_from_file_paths', 'brainio.get_size_image_from_file_paths', (['self._input_image'], {}), '(self._input_image)\n', (6659, 6678), False, 'from brainio import brainio\n'), ((6709, 6765), 'logging.debug', 'logging.debug', (['f"""Raw image size: {self.raw_image_shape}"""'], {}), "(f'Raw image size: {self.raw_image_shape}')\n", (6722, 6765), False, 'import logging\n'), ((6973, 7025), 'brainio.brainio.load_nii', 'brainio.load_nii', (['self._target_image'], {'as_array': '(False)'}), '(self._target_image, as_array=False)\n', (6989, 7025), False, 'from brainio import brainio\n'), ((7290, 7353), 'logging.debug', 'logging.debug', (['"""Calculating bin size in raw image space voxels"""'], {}), "('Calculating bin size in raw image space voxels')\n", (7303, 7353), False, 'import logging\n'), ((7602, 7705), 'logging.debug', 'logging.debug', (['f"""Bin size in raw image space is x:{bin_raw_x}, y:{bin_raw_y}, z:{bin_raw_z}."""'], {}), "(\n f'Bin size in raw image space is x:{bin_raw_x}, y:{bin_raw_y}, z:{bin_raw_z}.'\n )\n", (7615, 7705), False, 'import logging\n'), ((7792, 7843), 'brainio.brainio.load_nii', 'brainio.load_nii', (['self._target_image'], {'as_array': '(True)'}), '(self._target_image, as_array=True)\n', (7808, 7843), False, 'from brainio import brainio\n'), ((8097, 8203), 'logging.debug', 'logging.debug', (['"""Calculating smoothing in target image volume. Assumes an isotropic target image"""'], {}), "(\n 'Calculating smoothing in target image volume. Assumes an isotropic target image'\n )\n", (8110, 8203), False, 'import logging\n'), ((3006, 3020), 'numpy.copy', 'np.copy', (['atlas'], {}), '(atlas)\n', (3013, 3020), True, 'import numpy as np\n'), ((3260, 3281), 'pathlib.Path', 'Path', (['output_filename'], {}), '(output_filename)\n', (3264, 3281), False, 'from pathlib import Path\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.